Project import
diff --git a/linux-imx/drivers/gpu/Makefile b/linux-imx/drivers/gpu/Makefile
new file mode 100644
index 0000000..d8a22c2
--- /dev/null
+++ b/linux-imx/drivers/gpu/Makefile
@@ -0,0 +1,2 @@
+obj-y			+= drm/ vga/
+obj-$(CONFIG_TEGRA_HOST1X)	+= host1x/
diff --git a/linux-imx/drivers/gpu/drm/Kconfig b/linux-imx/drivers/gpu/drm/Kconfig
new file mode 100644
index 0000000..018964e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/Kconfig
@@ -0,0 +1,229 @@
+#
+# Drm device configuration
+#
+# This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+menuconfig DRM
+	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
+	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+	select HDMI
+	select I2C
+	select I2C_ALGOBIT
+	select DMA_SHARED_BUFFER
+	help
+	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+	  introduced in XFree86 4.0. If you say Y here, you need to select
+	  the module that's right for your graphics card from the list below.
+	  These modules provide support for synchronization, security, and
+	  DMA transfers. Please see <http://dri.sourceforge.net/> for more
+	  details.  You should also select and configure AGP
+	  (/dev/agpgart) support if it is available for your platform.
+
+config DRM_USB
+	tristate
+	depends on DRM
+	depends on USB_SUPPORT && USB_ARCH_HAS_HCD
+	select USB
+
+config DRM_KMS_HELPER
+	tristate
+	depends on DRM
+	select FB
+	select FRAMEBUFFER_CONSOLE if !EXPERT
+	select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
+	help
+	  FB and CRTC helpers for KMS drivers.
+
+config DRM_LOAD_EDID_FIRMWARE
+	bool "Allow to specify an EDID data set instead of probing for it"
+	depends on DRM_KMS_HELPER
+	help
+	  Say Y here, if you want to use EDID data to be loaded from the
+	  /lib/firmware directory or one of the provided built-in
+	  data sets. This may be necessary, if the graphics adapter or
+	  monitor are unable to provide appropriate EDID data. Since this
+	  feature is provided as a workaround for broken hardware, the
+	  default case is N. Details and instructions how to build your own
+	  EDID data are given in Documentation/EDID/HOWTO.txt.
+
+config DRM_TTM
+	tristate
+	depends on DRM
+	help
+	  GPU memory management subsystem for devices with multiple
+	  GPU memory types. Will be enabled automatically if a device driver
+	  uses it.
+
+config DRM_GEM_CMA_HELPER
+	bool
+	depends on DRM
+	help
+	  Choose this if you need the GEM CMA helper functions
+
+config DRM_KMS_CMA_HELPER
+	bool
+	select DRM_GEM_CMA_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	help
+	  Choose this if you need the KMS CMA helper functions
+
+source "drivers/gpu/drm/i2c/Kconfig"
+
+config DRM_TDFX
+	tristate "3dfx Banshee/Voodoo3+"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+	  graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+	tristate "ATI Rage 128"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have an ATI Rage 128 graphics card.  If M
+	  is selected, the module will be called r128.  AGP support for
+	  this card is strongly suggested (unless you have a PCI version).
+
+config DRM_RADEON
+	tristate "ATI Radeon"
+	depends on DRM && PCI
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select FW_LOADER
+        select DRM_KMS_HELPER
+        select DRM_TTM
+	select POWER_SUPPLY
+	select HWMON
+	select BACKLIGHT_CLASS_DEVICE
+	help
+	  Choose this option if you have an ATI Radeon graphics card.  There
+	  are both PCI and AGP versions.  You don't need to choose this to
+	  run the Radeon in plain VGA mode.
+
+	  If M is selected, the module will be called radeon.
+
+source "drivers/gpu/drm/radeon/Kconfig"
+
+source "drivers/gpu/drm/nouveau/Kconfig"
+
+config DRM_I810
+	tristate "Intel I810"
+	# !PREEMPT because of missing ioctl locking
+	depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+	help
+	  Choose this option if you have an Intel I810 graphics card.  If M is
+	  selected, the module will be called i810.  AGP support is required
+	  for this driver to work.
+
+config DRM_I915
+	tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+	depends on DRM
+	depends on AGP
+	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
+	select SHMEM
+	select TMPFS
+	select DRM_KMS_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	# i915 depends on ACPI_VIDEO when ACPI is enabled
+	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select BACKLIGHT_LCD_SUPPORT if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select INPUT if ACPI
+	select ACPI_VIDEO if ACPI
+	select ACPI_BUTTON if ACPI
+	help
+	  Choose this option if you have a system that has "Intel Graphics
+	  Media Accelerator" or "HD Graphics" integrated graphics,
+	  including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+	  G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+	  Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+	  If M is selected, the module will be called i915.  AGP support
+	  is required for this driver to work. This driver is used by
+	  the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+	  replaces the older i830 module that supported a subset of the
+	  hardware in older X.org releases.
+
+	  Note that the older i810/i815 chipsets require the use of the
+	  i810 driver instead, and the Atom z5xx series has an entirely
+	  different implementation.
+
+config DRM_I915_KMS
+	bool "Enable modesetting on intel by default"
+	depends on DRM_I915
+	help
+	  Choose this option if you want kernel modesetting enabled by default,
+	  and you have a new enough userspace to support this. Running old
+	  userspaces with this enabled will cause pain.  Note that this causes
+	  the driver to bind to PCI devices, which precludes loading things
+	  like intelfb.
+
+config DRM_MGA
+	tristate "Matrox g200/g400"
+	depends on DRM && PCI
+	select FW_LOADER
+	help
+	  Choose this option if you have a Matrox G200, G400 or G450 graphics
+	  card.  If M is selected, the module will be called mga.  AGP
+	  support is required for this driver to work.
+
+config DRM_SIS
+	tristate "SiS video cards"
+	depends on DRM && AGP
+	depends on FB_SIS || FB_SIS=n
+	help
+	  Choose this option if you have a SiS 630 or compatible video
+          chipset. If M is selected the module will be called sis. AGP
+          support is required for this driver to work.
+
+config DRM_VIA
+	tristate "Via unichrome video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Via unichrome or compatible video
+	  chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+	tristate "Savage video cards"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+	  chipset. If M is selected the module will be called savage.
+
+config DRM_VIVANTE
+	tristate "Vivante GCCore"
+	depends on DRM
+	help
+	  Choose this option if you have a Vivante graphics card.
+	  If M is selected, the module will be called vivante.
+
+source "drivers/gpu/drm/exynos/Kconfig"
+
+source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
+source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
+
+source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/omapdrm/Kconfig"
+
+source "drivers/gpu/drm/tilcdc/Kconfig"
+
+source "drivers/gpu/drm/qxl/Kconfig"
diff --git a/linux-imx/drivers/gpu/drm/Makefile b/linux-imx/drivers/gpu/drm/Makefile
new file mode 100644
index 0000000..24b70a7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/Makefile
@@ -0,0 +1,76 @@
+##############################################################################
+#
+#    Copyright (C) 2005 - 2013 by Vivante Corp.
+#
+#    This program is free software; you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation; either version 2 of the license, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program; if not write to the Free Software
+#    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+##############################################################################
+
+
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+		drm_context.o drm_dma.o \
+		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+		drm_agpsupport.o drm_scatter.o drm_pci.o \
+		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
+		drm_crtc.o drm_modes.o drm_edid.o \
+		drm_info.o drm_debugfs.o drm_encoder_slave.o \
+		drm_trace_points.o drm_global.o drm_prime.o
+
+drm-$(CONFIG_COMPAT) += drm_ioc32.o
+drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
+
+drm-usb-y   := drm_usb.o
+
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
+
+obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+
+CFLAGS_drm_trace_points.o := -I$(src)
+
+obj-$(CONFIG_DRM)	+= drm.o
+obj-$(CONFIG_DRM_TTM)	+= ttm/
+obj-$(CONFIG_DRM_VIVANTE)	+= vivante/
+obj-$(CONFIG_DRM_TDFX)	+= tdfx/
+obj-$(CONFIG_DRM_R128)	+= r128/
+obj-$(CONFIG_DRM_RADEON)+= radeon/
+obj-$(CONFIG_DRM_MGA)	+= mga/
+obj-$(CONFIG_DRM_I810)	+= i810/
+obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
+obj-$(CONFIG_DRM_SIS)   += sis/
+obj-$(CONFIG_DRM_SAVAGE)+= savage/
+obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
+obj-$(CONFIG_DRM_VIA)	+=via/
+obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
+obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
+obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
+obj-$(CONFIG_DRM_QXL) += qxl/
+obj-y			+= i2c/
diff --git a/linux-imx/drivers/gpu/drm/README.drm b/linux-imx/drivers/gpu/drm/README.drm
new file mode 100644
index 0000000..b5b3327
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/README.drm
@@ -0,0 +1,43 @@
+************************************************************
+* For the very latest on DRI development, please see:      *
+*     http://dri.freedesktop.org/                          *
+************************************************************
+
+The Direct Rendering Manager (drm) is a device-independent kernel-level
+device driver that provides support for the XFree86 Direct Rendering
+Infrastructure (DRI).
+
+The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ways:
+
+    1. The DRM provides synchronized access to the graphics hardware via
+       the use of an optimized two-tiered lock.
+
+    2. The DRM enforces the DRI security policy for access to the graphics
+       hardware by only allowing authenticated X11 clients access to
+       restricted regions of memory.
+
+    3. The DRM provides a generic DMA engine, complete with multiple
+       queues and the ability to detect the need for an OpenGL context
+       switch.
+
+    4. The DRM is extensible via the use of small device-specific modules
+       that rely extensively on the API exported by the DRM module.
+
+
+Documentation on the DRI is available from:
+    http://dri.freedesktop.org/wiki/Documentation
+    http://sourceforge.net/project/showfiles.php?group_id=387
+    http://dri.sourceforge.net/doc/
+
+For specific information about kernel-level support, see:
+
+    The Direct Rendering Manager, Kernel Support for the Direct Rendering
+    Infrastructure
+    http://dri.sourceforge.net/doc/drm_low_level.html
+
+    Hardware Locking for the Direct Rendering Infrastructure
+    http://dri.sourceforge.net/doc/hardware_locking_low_level.html
+
+    A Security Analysis of the Direct Rendering Infrastructure
+    http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/linux-imx/drivers/gpu/drm/ast/Kconfig b/linux-imx/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 0000000..da4a51e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
+config DRM_AST
+	tristate "AST server chips"
+	depends on DRM && PCI
+	select DRM_TTM
+	select FB_SYS_COPYAREA
+	select FB_SYS_FILLRECT
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 Say yes for experimental AST GPU driver. Do not enable
+	 this driver without having a working -modesetting,
+	 and a version of AST that knows to fail if KMS
+	 is bound to the driver. These GPUs are commonly found
+	 in server chipsets.
+
diff --git a/linux-imx/drivers/gpu/drm/ast/Makefile b/linux-imx/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 0000000..8df4f28
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o
\ No newline at end of file
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_dram_tables.h b/linux-imx/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 0000000..cc04539
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+	u16 index;
+	u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+	{ 0x0108, 0x00000000 },
+	{ 0x0120, 0x00004a21 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xFFFFFFFF },
+	{ 0x0004, 0x00000089 },
+	{ 0x0008, 0x22331353 },
+	{ 0x000C, 0x0d07000b },
+	{ 0x0010, 0x11113333 },
+	{ 0x0020, 0x00110350 },
+	{ 0x0028, 0x1e0828f0 },
+	{ 0x0024, 0x00000001 },
+	{ 0x001C, 0x00000000 },
+	{ 0x0014, 0x00000003 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0018, 0x00000131 },
+	{ 0x0014, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0018, 0x00000031 },
+	{ 0x0014, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0028, 0x1e0828f1 },
+	{ 0x0024, 0x00000003 },
+	{ 0x002C, 0x1f0f28fb },
+	{ 0x0030, 0xFFFFFE01 },
+	{ 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+	{ 0x2000, 0x1688a8a8 },
+	{ 0x2020, 0x000041f0 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xfc600309 },
+	{ 0x006C, 0x00909090 },
+	{ 0x0064, 0x00050000 },
+	{ 0x0004, 0x00000585 },
+	{ 0x0008, 0x0011030f },
+	{ 0x0010, 0x22201724 },
+	{ 0x0018, 0x1e29011a },
+	{ 0x0020, 0x00c82222 },
+	{ 0x0014, 0x01001523 },
+	{ 0x001C, 0x1024010d },
+	{ 0x0024, 0x00cb2522 },
+	{ 0x0038, 0xffffff82 },
+	{ 0x003C, 0x00000000 },
+	{ 0x0040, 0x00000000 },
+	{ 0x0044, 0x00000000 },
+	{ 0x0048, 0x00000000 },
+	{ 0x004C, 0x00000000 },
+	{ 0x0050, 0x00000000 },
+	{ 0x0054, 0x00000000 },
+	{ 0x0058, 0x00000000 },
+	{ 0x005C, 0x00000000 },
+	{ 0x0060, 0x032aa02a },
+	{ 0x0064, 0x002d3000 },
+	{ 0x0068, 0x00000000 },
+	{ 0x0070, 0x00000000 },
+	{ 0x0074, 0x00000000 },
+	{ 0x0078, 0x00000000 },
+	{ 0x007C, 0x00000000 },
+	{ 0x0034, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x002C, 0x00000732 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000005 },
+	{ 0x0028, 0x00000007 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0028, 0x00000001 },
+	{ 0x000C, 0x00005a08 },
+	{ 0x002C, 0x00000632 },
+	{ 0x0028, 0x00000001 },
+	{ 0x0030, 0x000003c0 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000003 },
+	{ 0x000C, 0x00005a21 },
+	{ 0x0034, 0x00007c03 },
+	{ 0x0120, 0x00004c41 },
+	{ 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+	{ 0x2000, 0x1688a8a8 },
+	{ 0x2020, 0x00004120 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x0000, 0xfc600309 },
+	{ 0x006C, 0x00909090 },
+	{ 0x0064, 0x00070000 },
+	{ 0x0004, 0x00000489 },
+	{ 0x0008, 0x0011030f },
+	{ 0x0010, 0x32302926 },
+	{ 0x0018, 0x274c0122 },
+	{ 0x0020, 0x00ce2222 },
+	{ 0x0014, 0x01001523 },
+	{ 0x001C, 0x1024010d },
+	{ 0x0024, 0x00cb2522 },
+	{ 0x0038, 0xffffff82 },
+	{ 0x003C, 0x00000000 },
+	{ 0x0040, 0x00000000 },
+	{ 0x0044, 0x00000000 },
+	{ 0x0048, 0x00000000 },
+	{ 0x004C, 0x00000000 },
+	{ 0x0050, 0x00000000 },
+	{ 0x0054, 0x00000000 },
+	{ 0x0058, 0x00000000 },
+	{ 0x005C, 0x00000000 },
+	{ 0x0060, 0x0f2aa02a },
+	{ 0x0064, 0x003f3005 },
+	{ 0x0068, 0x02020202 },
+	{ 0x0070, 0x00000000 },
+	{ 0x0074, 0x00000000 },
+	{ 0x0078, 0x00000000 },
+	{ 0x007C, 0x00000000 },
+	{ 0x0034, 0x00000001 },
+	{ 0xFF00, 0x00000043 },
+	{ 0x002C, 0x00000942 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000005 },
+	{ 0x0028, 0x00000007 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0028, 0x00000001 },
+	{ 0x000C, 0x00005a08 },
+	{ 0x002C, 0x00000842 },
+	{ 0x0028, 0x00000001 },
+	{ 0x0030, 0x000003c0 },
+	{ 0x0028, 0x00000003 },
+	{ 0x0030, 0x00000040 },
+	{ 0x0028, 0x00000003 },
+	{ 0x000C, 0x00005a21 },
+	{ 0x0034, 0x00007c03 },
+	{ 0x0120, 0x00005061 },
+	{ 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_drv.c b/linux-imx/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 0000000..df0d0a0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) {		\
+	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
+	.class_mask = 0xff0000,			\
+	.vendor = PCI_VENDOR_ASPEED,			\
+	.device = id,				\
+	.subvendor = PCI_ANY_ID,		\
+	.subdevice = PCI_ANY_ID,		\
+	.driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+	AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+	/*	AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+	{0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+	drm_kms_helper_poll_disable(dev);
+
+	pci_save_state(dev->pdev);
+
+	console_lock();
+	ast_fbdev_set_suspend(dev, 1);
+	console_unlock();
+	return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+	int error = 0;
+
+	ast_post_gpu(dev);
+
+	drm_mode_config_reset(dev);
+	drm_modeset_lock_all(dev);
+	drm_helper_resume_force_mode(dev);
+	drm_modeset_unlock_all(dev);
+
+	console_lock();
+	ast_fbdev_set_suspend(dev, 0);
+	console_unlock();
+	return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+	int ret;
+
+	if (pci_enable_device(dev->pdev))
+		return -EIO;
+
+	ret = ast_drm_thaw(dev);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	int error;
+
+	error = ast_drm_freeze(ddev);
+	if (error)
+		return error;
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+
+	if (!ddev || !ddev->dev_private)
+		return -ENODEV;
+	return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+	return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *ddev = pci_get_drvdata(pdev);
+
+	return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+	.suspend = ast_pm_suspend,
+	.resume = ast_pm_resume,
+	.freeze = ast_pm_freeze,
+	.thaw = ast_pm_thaw,
+	.poweroff = ast_pm_poweroff,
+	.restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = ast_pci_probe,
+	.remove = ast_pci_remove,
+	.driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = ast_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+	.dev_priv_size = 0,
+
+	.load = ast_driver_load,
+	.unload = ast_driver_unload,
+
+	.fops = &ast_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.gem_init_object = ast_gem_init_object,
+	.gem_free_object = ast_gem_free_object,
+	.dumb_create = ast_dumb_create,
+	.dumb_map_offset = ast_dumb_mmap_offset,
+	.dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && ast_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (ast_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+	drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_drv.h b/linux-imx/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 0000000..b6b7d70
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include <drm/drm_fb_helper.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR		"Dave Airlie"
+
+#define DRIVER_NAME		"ast"
+#define DRIVER_DESC		"AST"
+#define DRIVER_DATE		"20120228"
+
+#define DRIVER_MAJOR		0
+#define DRIVER_MINOR		1
+#define DRIVER_PATCHLEVEL	0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+	AST2000,
+	AST2100,
+	AST1100,
+	AST2200,
+	AST2150,
+	AST2300,
+	AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16   1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32   3
+#define AST_DRAM_2Gx16   6
+#define AST_DRAM_4Gx16   7
+
+struct ast_fbdev;
+
+struct ast_private {
+	struct drm_device *dev;
+
+	void __iomem *regs;
+	void __iomem *ioregs;
+
+	enum ast_chip chip;
+	bool vga2_clone;
+	uint32_t dram_bus_width;
+	uint32_t dram_type;
+	uint32_t mclk;
+	uint32_t vram_size;
+
+	struct ast_fbdev *fbdev;
+
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+	} ttm;
+
+	struct drm_gem_object *cursor_cache;
+	uint64_t cursor_cache_gpu_addr;
+	/* Acces to this cache is protected by the crtc->mutex of the only crtc
+	 * we have. */
+	struct ttm_bo_kmap_obj cache_kmap;
+	int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE		(0x40)
+#define AST_IO_MISC_PORT_WRITE		(0x42)
+#define AST_IO_SEQ_PORT			(0x44)
+#define AST_DAC_INDEX_READ		(0x3c7)
+#define AST_IO_DAC_INDEX_WRITE		(0x48)
+#define AST_IO_DAC_DATA		        (0x49)
+#define AST_IO_GR_PORT			(0x4E)
+#define AST_IO_CRTC_PORT		(0x54)
+#define AST_IO_INPUT_STATUS1_READ	(0x5A)
+#define AST_IO_MISC_PORT_READ		(0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+	iowrite##x(val, ast->regs + reg);\
+	}
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+	iowrite##x(val, ast->ioregs + reg);\
+	}
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+				     uint32_t base, uint8_t index,
+				     uint8_t val)
+{
+	ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+			    uint32_t base, uint8_t index,
+			    uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+			  uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+			       uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
+}
+
+#define AST_VIDMEM_SIZE_8M    0x00800000
+#define AST_VIDMEM_SIZE_16M   0x01000000
+#define AST_VIDMEM_SIZE_32M   0x02000000
+#define AST_VIDMEM_SIZE_64M   0x04000000
+#define AST_VIDMEM_SIZE_128M  0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE                (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE      32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM  0x00
+#define AST_HWC_SIGNATURE_SizeX     0x04
+#define AST_HWC_SIGNATURE_SizeY     0x08
+#define AST_HWC_SIGNATURE_X         0x0C
+#define AST_HWC_SIGNATURE_Y         0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX  0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY  0x18
+
+
+struct ast_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+	struct drm_connector base;
+	struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+	struct drm_crtc base;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_width, cursor_height;
+	u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+	struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+	struct drm_fb_helper helper;
+	struct ast_framebuffer afb;
+	struct list_head fbdev_list;
+	void *sysram;
+	int size;
+	struct ttm_bo_kmap_obj mapping;
+	int x1, y1, x2, y2; /* dirty rect */
+	spinlock_t dirty_lock;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+	u8 misc;
+	u8 seq[4];
+	u8 crtc[25];
+	u8 ar[20];
+	u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+	u32 ht;
+	u32 hde;
+	u32 hfp;
+	u32 hsync;
+	u32 vt;
+	u32 vde;
+	u32 vfp;
+	u32 vsync;
+	u32 dclk_index;
+	u32 flags;
+	u32 refresh_rate;
+	u32 refresh_rate_index;
+	u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+	u8 param1;
+	u8 param2;
+	u8 param3;
+};
+
+struct ast_vbios_mode_info {
+	struct ast_vbios_stdtable *std_table;
+	struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+			 struct ast_framebuffer *ast_fb,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+			   struct drm_device *dev,
+			   struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+				struct drm_device *dev,
+				uint32_t handle,
+				uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_fb.c b/linux-imx/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 0000000..fbc0823
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+	bool store_for_later = false;
+	int x2, y2;
+	unsigned long flags;
+
+	obj = afbdev->afb.obj;
+	bo = gem_to_ast_bo(obj);
+
+	/*
+	 * try and reserve the BO, if we fail with busy
+	 * then the BO is being moved and we should
+	 * store up the damage until later.
+	 */
+	ret = ast_bo_reserve(bo, true);
+	if (ret) {
+		if (ret != -EBUSY)
+			return;
+
+		store_for_later = true;
+	}
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+	spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+	if (afbdev->y1 < y)
+		y = afbdev->y1;
+	if (afbdev->y2 > y2)
+		y2 = afbdev->y2;
+	if (afbdev->x1 < x)
+		x = afbdev->x1;
+	if (afbdev->x2 > x2)
+		x2 = afbdev->x2;
+
+	if (store_for_later) {
+		afbdev->x1 = x;
+		afbdev->x2 = x2;
+		afbdev->y1 = y;
+		afbdev->y2 = y2;
+		spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+		return;
+	}
+
+	afbdev->x1 = afbdev->y1 = INT_MAX;
+	afbdev->x2 = afbdev->y2 = 0;
+	spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			ast_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i <= y2; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_fillrect(info, rect);
+	ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_copyarea(info, area);
+	ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct ast_fbdev *afbdev = info->par;
+	sys_imageblit(info, image);
+	ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+static struct fb_ops astfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = ast_fillrect,
+	.fb_copyarea = ast_copyarea,
+	.fb_imageblit = ast_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+			       struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 bpp, depth;
+	u32 size;
+	struct drm_gem_object *gobj;
+
+	int ret = 0;
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = ast_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int astfb_create(struct drm_fb_helper *helper,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+	struct drm_device *dev = afbdev->helper.dev;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_framebuffer *fb;
+	struct fb_info *info;
+	int size, ret;
+	struct device *device = &dev->pdev->dev;
+	void *sysram;
+	struct drm_gem_object *gobj = NULL;
+	struct ast_bo *bo = NULL;
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+	bo = gem_to_ast_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->par = afbdev;
+
+	ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+	if (ret)
+		goto out;
+
+	afbdev->sysram = sysram;
+	afbdev->size = size;
+
+	fb = &afbdev->afb.base;
+	afbdev->helper.fb = fb;
+	afbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "astdrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &astfb_ops;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+	info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	DRM_DEBUG_KMS("allocated %dx%d\n",
+		      fb->width, fb->height);
+
+	return 0;
+out:
+	return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			       u16 blue, int regno)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	ast_crtc->lut_r[regno] = red >> 8;
+	ast_crtc->lut_g[regno] = green >> 8;
+	ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			       u16 *blue, int regno)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	*red = ast_crtc->lut_r[regno] << 8;
+	*green = ast_crtc->lut_g[regno] << 8;
+	*blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+	.gamma_set = ast_fb_gamma_set,
+	.gamma_get = ast_fb_gamma_get,
+	.fb_probe = astfb_create,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+			      struct ast_fbdev *afbdev)
+{
+	struct fb_info *info;
+	struct ast_framebuffer *afb = &afbdev->afb;
+	if (afbdev->helper.fbdev) {
+		info = afbdev->helper.fbdev;
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (afb->obj) {
+		drm_gem_object_unreference_unlocked(afb->obj);
+		afb->obj = NULL;
+	}
+	drm_fb_helper_fini(&afbdev->helper);
+
+	vfree(afbdev->sysram);
+	drm_framebuffer_unregister_private(&afb->base);
+	drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_fbdev *afbdev;
+	int ret;
+
+	afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+	if (!afbdev)
+		return -ENOMEM;
+
+	ast->fbdev = afbdev;
+	afbdev->helper.funcs = &ast_fb_helper_funcs;
+	spin_lock_init(&afbdev->dirty_lock);
+	ret = drm_fb_helper_init(dev, &afbdev->helper,
+				 1, 1);
+	if (ret) {
+		kfree(afbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(&afbdev->helper, 32);
+	return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (!ast->fbdev)
+		return;
+
+	ast_fbdev_destroy(dev, ast->fbdev);
+	kfree(ast->fbdev);
+	ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (!ast->fbdev)
+		return;
+
+	fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_main.c b/linux-imx/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 0000000..f60fd7b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "ast_drv.h"
+
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+			    uint32_t base, uint8_t index,
+			    uint8_t mask, uint8_t val)
+{
+	u8 tmp;
+	ast_io_write8(ast, base, index);
+	tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+	ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+			  uint32_t base, uint8_t index)
+{
+	uint8_t ret;
+	ast_io_write8(ast, base, index);
+	ret = ast_io_read8(ast, base + 1);
+	return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+			       uint32_t base, uint8_t index, uint8_t mask)
+{
+	uint8_t ret;
+	ast_io_write8(ast, base, index);
+	ret = ast_io_read8(ast, base + 1) & mask;
+	return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	if (dev->pdev->device == PCI_CHIP_AST1180) {
+		ast->chip = AST1100;
+		DRM_INFO("AST 1180 detected\n");
+	} else {
+		if (dev->pdev->revision >= 0x20) {
+			ast->chip = AST2300;
+			DRM_INFO("AST 2300 detected\n");
+		} else if (dev->pdev->revision >= 0x10) {
+			uint32_t data;
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+
+			data = ast_read32(ast, 0x1207c);
+			switch (data & 0x0300) {
+			case 0x0200:
+				ast->chip = AST1100;
+				DRM_INFO("AST 1100 detected\n");
+				break;
+			case 0x0100:
+				ast->chip = AST2200;
+				DRM_INFO("AST 2200 detected\n");
+				break;
+			case 0x0000:
+				ast->chip = AST2150;
+				DRM_INFO("AST 2150 detected\n");
+				break;
+			default:
+				ast->chip = AST2100;
+				DRM_INFO("AST 2100 detected\n");
+				break;
+			}
+			ast->vga2_clone = false;
+		} else {
+			ast->chip = 2000;
+			DRM_INFO("AST 2000 detected\n");
+		}
+	}
+	return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	uint32_t data, data2;
+	uint32_t denum, num, div, ref_pll;
+
+	ast_write32(ast, 0xf004, 0x1e6e0000);
+	ast_write32(ast, 0xf000, 0x1);
+
+
+	ast_write32(ast, 0x10000, 0xfc600309);
+
+	do {
+		;
+	} while (ast_read32(ast, 0x10000) != 0x01);
+	data = ast_read32(ast, 0x10004);
+
+	if (data & 0x400)
+		ast->dram_bus_width = 16;
+	else
+		ast->dram_bus_width = 32;
+
+	if (ast->chip == AST2300) {
+		switch (data & 0x03) {
+		case 0:
+			ast->dram_type = AST_DRAM_512Mx16;
+			break;
+		default:
+		case 1:
+			ast->dram_type = AST_DRAM_1Gx16;
+			break;
+		case 2:
+			ast->dram_type = AST_DRAM_2Gx16;
+			break;
+		case 3:
+			ast->dram_type = AST_DRAM_4Gx16;
+			break;
+		}
+	} else {
+		switch (data & 0x0c) {
+		case 0:
+		case 4:
+			ast->dram_type = AST_DRAM_512Mx16;
+			break;
+		case 8:
+			if (data & 0x40)
+				ast->dram_type = AST_DRAM_1Gx16;
+			else
+				ast->dram_type = AST_DRAM_512Mx32;
+			break;
+		case 0xc:
+			ast->dram_type = AST_DRAM_1Gx32;
+			break;
+		}
+	}
+
+	data = ast_read32(ast, 0x10120);
+	data2 = ast_read32(ast, 0x10170);
+	if (data2 & 0x2000)
+		ref_pll = 14318;
+	else
+		ref_pll = 12000;
+
+	denum = data & 0x1f;
+	num = (data & 0x3fe0) >> 5;
+	data = (data & 0xc000) >> 14;
+	switch (data) {
+	case 3:
+		div = 0x4;
+		break;
+	case 2:
+	case 1:
+		div = 0x2;
+		break;
+	default:
+		div = 0x1;
+		break;
+	}
+	ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+	return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+	struct ast_private *ast = dev->dev_private;
+	uint32_t dclk, jreg;
+	uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+	dram_bus_width = ast->dram_bus_width;
+	mclk = ast->mclk;
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST1100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2150 ||
+	    ast->dram_bus_width == 16)
+		dram_efficency = 600;
+	else if (ast->chip == AST2300)
+		dram_efficency = 400;
+
+	dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+	actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+	if (ast->chip == AST1180)
+		dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+	else {
+		jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+		if ((jreg & 0x08) && (ast->chip == AST2000))
+			dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+		else if ((jreg & 0x08) && (bpp == 8))
+			dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+		else
+			dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+	}
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2300 ||
+	    ast->chip == AST1180) {
+		if (dclk > 200)
+			dclk = 200;
+	} else {
+		if (dclk > 165)
+			dclk = 165;
+	}
+
+	return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+	if (ast_fb->obj)
+		drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+	.destroy = ast_user_framebuffer_destroy,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+			 struct ast_framebuffer *ast_fb,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_gem_object *obj)
+{
+	int ret;
+
+	drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+	ast_fb->obj = obj;
+	ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+	if (ret) {
+		DRM_ERROR("framebuffer init failed %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+	       struct drm_file *filp,
+	       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct ast_framebuffer *ast_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+	if (!ast_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(ast_fb);
+		return ERR_PTR(ret);
+	}
+	return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+	.fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 jreg;
+
+	ast_open_key(ast);
+
+	jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+	switch (jreg & 3) {
+	case 0: return AST_VIDMEM_SIZE_8M;
+	case 1: return AST_VIDMEM_SIZE_16M;
+	case 2: return AST_VIDMEM_SIZE_32M;
+	case 3: return AST_VIDMEM_SIZE_64M;
+	}
+	return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct ast_private *ast;
+	int ret = 0;
+
+	ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+	if (!ast)
+		return -ENOMEM;
+
+	dev->dev_private = ast;
+	ast->dev = dev;
+
+	ast->regs = pci_iomap(dev->pdev, 1, 0);
+	if (!ast->regs) {
+		ret = -EIO;
+		goto out_free;
+	}
+	ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+	if (!ast->ioregs) {
+		ret = -EIO;
+		goto out_free;
+	}
+
+	ast_detect_chip(dev);
+
+	if (ast->chip != AST1180) {
+		ast_get_dram_info(dev);
+		ast->vram_size = ast_get_vram_info(dev);
+		DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+	}
+
+	ret = ast_mm_init(ast);
+	if (ret)
+		goto out_free;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.funcs = (void *)&ast_mode_funcs;
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	if (ast->chip == AST2100 ||
+	    ast->chip == AST2200 ||
+	    ast->chip == AST2300 ||
+	    ast->chip == AST1180) {
+		dev->mode_config.max_width = 1920;
+		dev->mode_config.max_height = 2048;
+	} else {
+		dev->mode_config.max_width = 1600;
+		dev->mode_config.max_height = 1200;
+	}
+
+	ret = ast_mode_init(dev);
+	if (ret)
+		goto out_free;
+
+	ret = ast_fbdev_init(dev);
+	if (ret)
+		goto out_free;
+
+	return 0;
+out_free:
+	kfree(ast);
+	dev->dev_private = NULL;
+	return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	ast_mode_fini(dev);
+	ast_fbdev_fini(dev);
+	drm_mode_config_cleanup(dev);
+
+	ast_mm_fini(ast);
+	pci_iounmap(dev->pdev, ast->ioregs);
+	pci_iounmap(dev->pdev, ast->regs);
+	kfree(ast);
+	return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct ast_bo *astbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = ast_bo_create(dev, size, 0, 0, &astbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &astbo->gem;
+	return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = ast_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+	struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+	if (!ast_bo)
+		return;
+	ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct ast_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_ast_bo(obj);
+	*offset = ast_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}
+
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_mode.c b/linux-imx/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 0000000..7fc9f72
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+			  struct drm_file *file_priv,
+			  uint32_t handle,
+			  uint32_t width,
+			  uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+			   int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+				     u8 index, u8 red, u8 green,
+				     u8 blue)
+{
+	ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, red);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, green);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+	ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+	ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	for (i = 0; i < 256; i++)
+		ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+				       ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode,
+				    struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+	u32 hborder, vborder;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+		color_index = VGAModeIndex - 1;
+		break;
+	case 16:
+		vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+		color_index = HiCModeIndex;
+		break;
+	case 24:
+	case 32:
+		vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+		color_index = TrueCModeIndex;
+		break;
+	default:
+		return false;
+	}
+
+	switch (crtc->mode.crtc_hdisplay) {
+	case 640:
+		vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+		break;
+	case 800:
+		vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+		break;
+	case 1024:
+		vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+		break;
+	case 1280:
+		if (crtc->mode.crtc_vdisplay == 800)
+			vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+		else
+			vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+		break;
+	case 1440:
+		vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+		break;
+	case 1600:
+		vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+		break;
+	case 1680:
+		vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+		break;
+	case 1920:
+		if (crtc->mode.crtc_vdisplay == 1080)
+			vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+		else
+			vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+		break;
+	default:
+		return false;
+	}
+
+	refresh_rate = drm_mode_vrefresh(mode);
+	while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+		vbios_mode->enh_table++;
+		if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+		    (vbios_mode->enh_table->refresh_rate == 0xff)) {
+			vbios_mode->enh_table--;
+			break;
+		}
+	}
+
+	hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+	vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+	adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+	adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+	adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+	adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+		vbios_mode->enh_table->hfp;
+	adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+					 vbios_mode->enh_table->hfp +
+					 vbios_mode->enh_table->hsync);
+
+	adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+	adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+	adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+	adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+		vbios_mode->enh_table->vfp;
+	adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+					 vbios_mode->enh_table->vfp +
+					 vbios_mode->enh_table->vsync);
+
+	refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+	mode_id = vbios_mode->enh_table->mode_id;
+
+	if (ast->chip == AST1180) {
+		/* TODO 1180 */
+	} else {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+	}
+
+	return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			    struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_vbios_stdtable *stdtable;
+	u32 i;
+	u8 jreg;
+
+	stdtable = vbios_mode->std_table;
+
+	jreg = stdtable->misc;
+	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+	/* Set SEQ */
+	ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+	for (i = 0; i < 4; i++) {
+		jreg = stdtable->seq[i];
+		if (!i)
+			jreg |= 0x20;
+		ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+	}
+
+	/* Set CRTC */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+	for (i = 0; i < 25; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+	/* set AR */
+	jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+	for (i = 0; i < 20; i++) {
+		jreg = stdtable->ar[i];
+		ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+		ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+	}
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+	jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+	ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+	/* Set GR */
+	for (i = 0; i < 9; i++)
+		ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+	u16 temp;
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+	temp = (mode->crtc_htotal >> 3) - 5;
+	if (temp & 0x100)
+		jregAC |= 0x01; /* HT D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+	temp = (mode->crtc_hdisplay >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x04; /* HDE D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+	temp = (mode->crtc_hblank_start >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x10; /* HBS D[8] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+	temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+	if (temp & 0x20)
+		jreg05 |= 0x80;  /* HBE D[5] */
+	if (temp & 0x40)
+		jregAD |= 0x01;  /* HBE D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+	temp = (mode->crtc_hsync_start >> 3) - 1;
+	if (temp & 0x100)
+		jregAC |= 0x40; /* HRS D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+	temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+	if (temp & 0x20)
+		jregAD |= 0x04; /* HRE D[5] */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+	/* vert timings */
+	temp = (mode->crtc_vtotal) - 2;
+	if (temp & 0x100)
+		jreg07 |= 0x01;
+	if (temp & 0x200)
+		jreg07 |= 0x20;
+	if (temp & 0x400)
+		jregAE |= 0x01;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+	temp = (mode->crtc_vsync_start) - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x04;
+	if (temp & 0x200)
+		jreg07 |= 0x80;
+	if (temp & 0x400)
+		jregAE |= 0x08;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+	temp = (mode->crtc_vsync_end - 1) & 0x3f;
+	if (temp & 0x10)
+		jregAE |= 0x20;
+	if (temp & 0x20)
+		jregAE |= 0x40;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+	temp = mode->crtc_vdisplay - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x02;
+	if (temp & 0x200)
+		jreg07 |= 0x40;
+	if (temp & 0x400)
+		jregAE |= 0x02;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+	temp = mode->crtc_vblank_start - 1;
+	if (temp & 0x100)
+		jreg07 |= 0x08;
+	if (temp & 0x200)
+		jreg09 |= 0x20;
+	if (temp & 0x400)
+		jregAE |= 0x04;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+	temp = mode->crtc_vblank_end - 1;
+	if (temp & 0x100)
+		jregAE |= 0x10;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+
+	u16 offset;
+
+	offset = crtc->fb->pitches[0] >> 3;
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_vbios_dclk_info *clk_info;
+
+	clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+			       (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			     struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		jregA0 = 0x70;
+		jregA3 = 0x01;
+		jregA8 = 0x00;
+		break;
+	case 15:
+	case 16:
+		jregA0 = 0x70;
+		jregA3 = 0x04;
+		jregA8 = 0x02;
+		break;
+	case 32:
+		jregA0 = 0x70;
+		jregA3 = 0x08;
+		jregA8 = 0x02;
+		break;
+	}
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+	/* Set Threshold */
+	if (ast->chip == AST2300) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+	} else if (ast->chip == AST2100 ||
+		   ast->chip == AST1100 ||
+		   ast->chip == AST2200 ||
+		   ast->chip == AST2150) {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+	} else {
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+	}
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+		      struct ast_vbios_mode_info *vbios_mode)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 jreg;
+
+	jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+	jreg |= (vbios_mode->enh_table->flags & SyncNN);
+	ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		     struct ast_vbios_mode_info *vbios_mode)
+{
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u32 addr;
+
+	addr = offset >> 2;
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+
+	if (ast->chip == AST1180)
+		return;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+		ast_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_OFF:
+		ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+		break;
+	}
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct ast_framebuffer *ast_fb;
+	struct ast_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		ast_fb = to_ast_framebuffer(fb);
+		obj = ast_fb->obj;
+		bo = gem_to_ast_bo(obj);
+		ret = ast_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		ast_bo_push_sysram(bo);
+		ast_bo_unreserve(bo);
+	}
+
+	ast_fb = to_ast_framebuffer(crtc->fb);
+	obj = ast_fb->obj;
+	bo = gem_to_ast_bo(obj);
+
+	ret = ast_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		ast_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&ast->fbdev->afb == ast_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+	}
+	ast_bo_unreserve(bo);
+
+	ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+	return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode,
+			     int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_vbios_mode_info vbios_mode;
+	bool ret;
+	if (ast->chip == AST1180) {
+		DRM_ERROR("AST 1180 modesetting not supported\n");
+		return -EINVAL;
+	}
+
+	ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+	if (ret == false)
+		return -EINVAL;
+	ast_open_key(ast);
+
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+	ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_offset_reg(crtc);
+	ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+	ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+	ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+	ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+	ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+	return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+	.dpms = ast_crtc_dpms,
+	.mode_fixup = ast_crtc_mode_fixup,
+	.mode_set = ast_crtc_mode_set,
+	.mode_set_base = ast_crtc_mode_set_base,
+	.disable = ast_crtc_disable,
+	.load_lut = ast_crtc_load_lut,
+	.prepare = ast_crtc_prepare,
+	.commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t start, uint32_t size)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	int end = (start + size > 256) ? 256 : start + size, i;
+
+	/* userspace palettes are always correct as is */
+	for (i = start; i < end; i++) {
+		ast_crtc->lut_r[i] = red[i] >> 8;
+		ast_crtc->lut_g[i] = green[i] >> 8;
+		ast_crtc->lut_b[i] = blue[i] >> 8;
+	}
+	ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+	.cursor_set = ast_cursor_set,
+	.cursor_move = ast_cursor_move,
+	.reset = ast_crtc_reset,
+	.set_config = drm_crtc_helper_set_config,
+	.gamma_set = ast_crtc_gamma_set,
+	.destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+	struct ast_crtc *crtc;
+	int i;
+
+	crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+	if (!crtc)
+		return -ENOMEM;
+
+	drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+	drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+	for (i = 0; i < 256; i++) {
+		crtc->lut_r[i] = i;
+		crtc->lut_g[i] = i;
+		crtc->lut_b[i] = i;
+	}
+	return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+	.destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+	.dpms = ast_encoder_dpms,
+	.mode_fixup = ast_mode_fixup,
+	.prepare = ast_encoder_prepare,
+	.commit = ast_encoder_commit,
+	.mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+	struct ast_encoder *ast_encoder;
+
+	ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+	if (!ast_encoder)
+		return -ENOMEM;
+
+	drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+	ast_encoder->base.possible_crtcs = 1;
+	return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+	struct ast_connector *ast_connector = to_ast_connector(connector);
+	struct edid *edid;
+	int ret;
+
+	edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+		return ret;
+	} else
+		drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+	return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+			  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+	struct ast_connector *ast_connector = to_ast_connector(connector);
+	ast_i2c_destroy(ast_connector->i2c);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+	.mode_valid = ast_mode_valid,
+	.get_modes = ast_get_modes,
+	.best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = ast_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+	struct ast_connector *ast_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+	if (!ast_connector)
+		return -ENOMEM;
+
+	connector = &ast_connector->base;
+	drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_sysfs_connector_add(connector);
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ast_connector->i2c = ast_i2c_create(dev);
+	if (!ast_connector->i2c)
+		DRM_ERROR("failed to add ddc bus for connector\n");
+
+	return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	int size;
+	int ret;
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	uint64_t gpu_addr;
+
+	size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+	ret = ast_gem_create(dev, size, true, &obj);
+	if (ret)
+		return ret;
+	bo = gem_to_ast_bo(obj);
+	ret = ast_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		goto fail;
+
+	ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	ast_bo_unreserve(bo);
+	if (ret)
+		goto fail;
+
+	/* kmap the object */
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+	if (ret)
+		goto fail;
+
+	ast->cursor_cache = obj;
+	ast->cursor_cache_gpu_addr = gpu_addr;
+	DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+	return 0;
+fail:
+	return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	ttm_bo_kunmap(&ast->cache_kmap);
+	drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+	ast_cursor_init(dev);
+	ast_crtc_init(dev);
+	ast_encoder_init(dev);
+	ast_connector_init(dev);
+	return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+	ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	uint32_t val;
+
+	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+	return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	uint32_t val;
+
+	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+	return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	int i;
+	u8 ujcrb7, jtemp;
+
+	for (i = 0; i < 0x10000; i++) {
+		ujcrb7 = ((clock & 0x01) ? 0 : 1);
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+		if (ujcrb7 == jtemp)
+			break;
+	}
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+	struct ast_i2c_chan *i2c = i2c_priv;
+	struct ast_private *ast = i2c->dev->dev_private;
+	int i;
+	u8 ujcrb7, jtemp;
+
+	for (i = 0; i < 0x10000; i++) {
+		ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+		if (ujcrb7 == jtemp)
+			break;
+	}
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+	struct ast_i2c_chan *i2c;
+	int ret;
+
+	i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+	if (!i2c)
+		return NULL;
+
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+		 "AST i2c bit bus");
+	i2c->adapter.algo_data = &i2c->bit;
+
+	i2c->bit.udelay = 20;
+	i2c->bit.timeout = 2;
+	i2c->bit.data = i2c;
+	i2c->bit.setsda = set_data;
+	i2c->bit.setscl = set_clock;
+	i2c->bit.getsda = get_data;
+	i2c->bit.getscl = get_clock;
+	ret = i2c_bit_add_bus(&i2c->adapter);
+	if (ret) {
+		DRM_ERROR("Failed to register bit i2c\n");
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	u8 jreg;
+
+	jreg = 0x2;
+	/* enable ARGB cursor */
+	jreg |= 1;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+	union {
+		u32 ul;
+		u8 b[4];
+	} srcdata32[2], data32;
+	union {
+		u16 us;
+		u8 b[2];
+	} data16;
+	u32 csum = 0;
+	s32 alpha_dst_delta, last_alpha_dst_delta;
+	u8 *srcxor, *dstxor;
+	int i, j;
+	u32 per_pixel_copy, two_pixel_copy;
+
+	alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+	last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+	srcxor = src;
+	dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+	per_pixel_copy = width & 1;
+	two_pixel_copy = width >> 1;
+
+	for (j = 0; j < height; j++) {
+		for (i = 0; i < two_pixel_copy; i++) {
+			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+			srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+			data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+			data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+			data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+			data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+			writel(data32.ul, dstxor);
+			csum += data32.ul;
+
+			dstxor += 4;
+			srcxor += 8;
+
+		}
+
+		for (i = 0; i < per_pixel_copy; i++) {
+			srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+			data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+			data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+			writew(data16.us, dstxor);
+			csum += (u32)data16.us;
+
+			dstxor += 2;
+			srcxor += 4;
+		}
+		dstxor += last_alpha_dst_delta;
+	}
+	return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+			  struct drm_file *file_priv,
+			  uint32_t handle,
+			  uint32_t width,
+			  uint32_t height)
+{
+	struct ast_private *ast = crtc->dev->dev_private;
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct ast_bo *bo;
+	uint64_t gpu_addr;
+	u32 csum;
+	int ret;
+	struct ttm_bo_kmap_obj uobj_map;
+	u8 *src, *dst;
+	bool src_isiomem, dst_isiomem;
+	if (!handle) {
+		ast_hide_cursor(crtc);
+		return 0;
+	}
+
+	if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+		return -ENOENT;
+	}
+	bo = gem_to_ast_bo(obj);
+
+	ret = ast_bo_reserve(bo, false);
+	if (ret)
+		goto fail;
+
+	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+	src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+	dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+	if (src_isiomem == true)
+		DRM_ERROR("src cursor bo should be in main memory\n");
+	if (dst_isiomem == false)
+		DRM_ERROR("dst bo should be in VRAM\n");
+
+	dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+	/* do data transfer to cursor cache */
+	csum = copy_cursor_image(src, dst, width, height);
+
+	/* write checksum + signature */
+	ttm_bo_kunmap(&uobj_map);
+	ast_bo_unreserve(bo);
+	{
+		u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+		writel(csum, dst);
+		writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+		writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+		writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+		writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+		/* set pattern offset */
+		gpu_addr = ast->cursor_cache_gpu_addr;
+		gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+		gpu_addr >>= 3;
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+	}
+	ast_crtc->cursor_width = width;
+	ast_crtc->cursor_height = height;
+	ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+	ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+	ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+	ast_show_cursor(crtc);
+
+	drm_gem_object_unreference_unlocked(obj);
+	return 0;
+fail:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+			   int x, int y)
+{
+	struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+	struct ast_private *ast = crtc->dev->dev_private;
+	int x_offset, y_offset;
+	u8 *sig;
+
+	sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+	writel(x, sig + AST_HWC_SIGNATURE_X);
+	writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+	x_offset = ast_crtc->offset_x;
+	y_offset = ast_crtc->offset_y;
+	if (x < 0) {
+		x_offset = (-x) + ast_crtc->offset_x;
+		x = 0;
+	}
+
+	if (y < 0) {
+		y_offset = (-y) + ast_crtc->offset_y;
+		y = 0;
+	}
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+	ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+	/* dummy write to fire HWC */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_post.c b/linux-imx/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 0000000..977cfb3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <drm/drmP.h>
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+
+	ast_io_write8(ast, 0x43, 0x01);
+	ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 ch;
+
+	if (ast->chip == AST1180) {
+		/* TODO 1180 */
+	} else {
+		ch = ast_io_read8(ast, 0x43);
+		if (ch) {
+			ast_open_key(ast);
+			ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+			return ch & 0x04;
+		}
+	}
+	return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 i, index, reg;
+	const u8 *ext_reg_info;
+
+	/* reset scratch */
+	for (i = 0x81; i <= 0x8f; i++)
+		ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+	if (ast->chip == AST2300) {
+		if (dev->pdev->revision >= 0x20)
+			ext_reg_info = extreginfo_ast2300;
+		else
+			ext_reg_info = extreginfo_ast2300a0;
+	} else
+		ext_reg_info = extreginfo;
+
+	index = 0xa0;
+	while (*ext_reg_info != 0xff) {
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+		index++;
+		ext_reg_info++;
+	}
+
+	/* disable standard IO/MEM decode if secondary */
+	/* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+	/* Set Ext. Default */
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+	/* Enable RAMDAC for A1 */
+	reg = 0x04;
+	if (ast->chip == AST2300)
+		reg |= 0x20;
+	ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+	ast_write32(ast, 0xf004, r & 0xffff0000);
+	ast_write32(ast, 0xf000, 0x1);
+
+	return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+	ast_write32(ast, 0xf004, r & 0xffff0000);
+	ast_write32(ast, 0xf000, 0x1);
+	ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150	     ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150          5
+#define CBR_THRESHOLD_AST2150        10
+#define CBR_THRESHOLD2_AST2150       10
+#define TIMEOUT_AST2150              5000000
+
+#define CBR_PATNUM_AST2150           8
+
+static const u32 pattern_AST2150[14] = {
+	0xFF00FF00,
+	0xCC33CC33,
+	0xAA55AA55,
+	0xFFFE0001,
+	0x683501FE,
+	0x0F1929B0,
+	0x2D0B4346,
+	0x60767F02,
+	0x6FBE36A6,
+	0x3A253035,
+	0x3019686D,
+	0x41C6167E,
+	0x620152BF,
+	0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x40;
+		if (++timeout > TIMEOUT_AST2150) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0xffffffff;
+		}
+	} while (!data);
+	data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+	int i;
+
+	for (i = 0; i < 8; i++)
+		if (mmctestburst2_ast2150(ast, i))
+			return 0;
+	return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+	u32 patcnt, loop;
+
+	for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+			if (cbrtest_ast2150(ast))
+				break;
+		}
+		if (loop == CBR_PASSNUM_AST2150)
+			return 0;
+	}
+	return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+	u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+	dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+	dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+	passcnt = 0;
+
+	for (dlli = 0; dlli < 100; dlli++) {
+		moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+		data = cbrscan_ast2150(ast, busw);
+		if (data != 0) {
+			if (data & 0x1) {
+				if (dll_min[0] > dlli)
+					dll_min[0] = dlli;
+				if (dll_max[0] < dlli)
+					dll_max[0] = dlli;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD_AST2150)
+			goto cbr_start;
+	}
+	if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+		goto cbr_start;
+
+	dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+	moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	u8 j;
+	u32 data, temp, i;
+	const struct ast_dramstruct *dram_reg_info;
+
+	j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+	if ((j & 0x80) == 0) { /* VGA only */
+		if (ast->chip == AST2000) {
+			dram_reg_info = ast2000_dram_table_data;
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+			ast_write32(ast, 0x10100, 0xa8);
+
+			do {
+				;
+			} while (ast_read32(ast, 0x10100) != 0xa8);
+		} else {/* AST2100/1100 */
+			if (ast->chip == AST2100 || ast->chip == 2200)
+				dram_reg_info = ast2100_dram_table_data;
+			else
+				dram_reg_info = ast1100_dram_table_data;
+
+			ast_write32(ast, 0xf004, 0x1e6e0000);
+			ast_write32(ast, 0xf000, 0x1);
+			ast_write32(ast, 0x12000, 0x1688A8A8);
+			do {
+				;
+			} while (ast_read32(ast, 0x12000) != 0x01);
+
+			ast_write32(ast, 0x10000, 0xfc600309);
+			do {
+				;
+			} while (ast_read32(ast, 0x10000) != 0x01);
+		}
+
+		while (dram_reg_info->index != 0xffff) {
+			if (dram_reg_info->index == 0xff00) {/* delay fn */
+				for (i = 0; i < 15; i++)
+					udelay(dram_reg_info->data);
+			} else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+				data = dram_reg_info->data;
+				if (ast->dram_type == AST_DRAM_1Gx16)
+					data = 0x00000d89;
+				else if (ast->dram_type == AST_DRAM_1Gx32)
+					data = 0x00000c8d;
+
+				temp = ast_read32(ast, 0x12070);
+				temp &= 0xc;
+				temp <<= 2;
+				ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+			} else
+				ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+			dram_reg_info++;
+		}
+
+		/* AST 2100/2150 DRAM calibration */
+		data = ast_read32(ast, 0x10120);
+		if (data == 0x5061) { /* 266Mhz */
+			data = ast_read32(ast, 0x10004);
+			if (data & 0x40)
+				cbrdlli_ast2150(ast, 16); /* 16 bits */
+			else
+				cbrdlli_ast2150(ast, 32); /* 32 bits */
+		}
+
+		switch (ast->chip) {
+		case AST2000:
+			temp = ast_read32(ast, 0x10140);
+			ast_write32(ast, 0x10140, temp | 0x40);
+			break;
+		case AST1100:
+		case AST2100:
+		case AST2200:
+		case AST2150:
+			temp = ast_read32(ast, 0x1200c);
+			ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+			temp = ast_read32(ast, 0x12040);
+			ast_write32(ast, 0x12040, temp | 0x40);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* wait ready */
+	do {
+		j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	} while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+	u32 reg;
+	struct ast_private *ast = dev->dev_private;
+
+	pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+	reg |= 0x3;
+	pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+	ast_enable_vga(dev);
+	ast_open_key(ast);
+	ast_set_def_ext_reg(dev);
+
+	if (ast->chip == AST2300)
+		ast_init_dram_2300(dev);
+	else
+		ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+	u32 dram_type;
+	u32 dram_chipid;
+	u32 dram_freq;
+	u32 vram_size;
+	u32 odt;
+	u32 wodt;
+	u32 rodt;
+	u32 dram_config;
+	u32 reg_PERIOD;
+	u32 reg_MADJ;
+	u32 reg_SADJ;
+	u32 reg_MRS;
+	u32 reg_EMRS;
+	u32 reg_AC1;
+	u32 reg_AC2;
+	u32 reg_DQSIC;
+	u32 reg_DRV;
+	u32 reg_IOZ;
+	u32 reg_DQIDLY;
+	u32 reg_FREQ;
+	u32 madj_max;
+	u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1            ((4  << 10) - 1)
+#define CBR_SIZE2            ((64 << 10) - 1)
+#define CBR_PASSNUM          5
+#define CBR_PASSNUM2         5
+#define CBR_THRESHOLD        10
+#define CBR_THRESHOLD2       10
+#define TIMEOUT              5000000
+#define CBR_PATNUM           8
+
+static const u32 pattern[8] = {
+	0xFF00FF00,
+	0xCC33CC33,
+	0xAA55AA55,
+	0x88778877,
+	0x92CC4D6E,
+	0x543D3CDE,
+	0xF1E843C7,
+	0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x3000;
+		if (data & 0x2000) {
+			return 0;
+		}
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x00000000);
+			return 0;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x1000;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return -1;
+		}
+	} while (!data);
+	data = mindwm(ast, 0x1e6e0078);
+	data = (data | (data >> 16)) & 0xffff;
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x3000;
+		if (data & 0x2000)
+			return 0;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return 0;
+		}
+	} while (!data);
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+	u32 data, timeout;
+
+	moutdwm(ast, 0x1e6e0070, 0x00000000);
+	moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+	timeout = 0;
+	do {
+		data = mindwm(ast, 0x1e6e0070) & 0x1000;
+		if (++timeout > TIMEOUT) {
+			moutdwm(ast, 0x1e6e0070, 0x0);
+			return -1;
+		}
+	} while (!data);
+	data = mindwm(ast, 0x1e6e0078);
+	data = (data | (data >> 16)) & 0xffff;
+	moutdwm(ast, 0x1e6e0070, 0x0);
+	return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+	u32 data;
+	int i;
+	data = mmc_test_single2(ast, 0);
+	if ((data & 0xff) && (data & 0xff00))
+		return 0;
+	for (i = 0; i < 8; i++) {
+		data = mmc_test_burst2(ast, i);
+		if ((data & 0xff) && (data & 0xff00))
+			return 0;
+	}
+	if (!data)
+		return 3;
+	else if (data & 0xff)
+		return 2;
+	return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+	u32 data, data2, patcnt, loop;
+
+	data2 = 3;
+	for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+			if ((data = cbr_test(ast)) != 0) {
+				data2 &= data;
+				if (!data2)
+					return 0;
+				break;
+			}
+		}
+		if (loop == CBR_PASSNUM2)
+			return 0;
+	}
+	return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+	u32 data;
+
+	data = mmc_test_burst2(ast, 0);
+	if (data == 0xffff)
+		return 0;
+	data |= mmc_test_single2(ast, 0);
+	if (data == 0xffff)
+		return 0;
+
+	return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+	u32 data, data2, patcnt, loop;
+
+	data2 = 0xffff;
+	for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+		moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+		for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+			if ((data = cbr_test2(ast)) != 0) {
+				data2 &= data;
+				if (!data)
+					return 0;
+				break;
+			}
+		}
+		if (loop == CBR_PASSNUM2)
+			return 0;
+	}
+	return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+	gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+	gold_sadj[1] = gold_sadj[0] >> 8;
+	gold_sadj[0] = gold_sadj[0] & 0xff;
+	gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+	gold_sadj[1] = gold_sadj[0];
+
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD) {
+			break;
+		}
+	}
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 3) {
+					dlli = 3;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[1] >= dlli) {
+				dlli = (gold_sadj[1] - dlli) >> 1;
+				if (dlli > 3) {
+					dlli = 3;
+				} else {
+					dlli = (dlli - 1) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[1]) >> 1;
+				dlli += 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD2) {
+			break;
+		}
+	}
+	gold_sadj[0] = 0x0;
+	passcnt = 0;
+	for (cnt = 0; cnt < 16; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			gold_sadj[0] += dllmin[cnt];
+			passcnt++;
+		}
+	}
+	if (passcnt != 16) {
+		goto FINETUNE_START;
+	}
+	gold_sadj[0] = gold_sadj[0] >> 4;
+	gold_sadj[1] = gold_sadj[0];
+
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = dllmin[cnt];
+			if (gold_sadj[0] >= dlli) {
+				dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+				if (dlli > 3) {
+					dlli = 3;
+				}
+			} else {
+				dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = dllmin[cnt];
+			if (gold_sadj[1] >= dlli) {
+				dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+				if (dlli > 3) {
+					dlli = 3;
+				} else {
+					dlli = (dlli - 1) & 0x7;
+				}
+			} else {
+				dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+				dlli += 1;
+				if (dlli > 4) {
+					dlli = 4;
+				}
+				dlli = (8 - dlli) & 0x7;
+			}
+			data |= dlli << 21;
+		}
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+	for (cnt = 0; cnt < 16; cnt++) {
+		dllmin[cnt] = 0xff;
+		dllmax[cnt] = 0x0;
+	}
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+		data = cbr_scan2(ast);
+		if (data != 0) {
+			mask = 0x00010001;
+			for (cnt = 0; cnt < 16; cnt++) {
+				if (data & mask) {
+					if (dllmin[cnt] > dlli) {
+						dllmin[cnt] = dlli;
+					}
+					if (dllmax[cnt] < dlli) {
+						dllmax[cnt] = dlli;
+					}
+				}
+				mask <<= 1;
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD2) {
+			break;
+		}
+	}
+	gold_sadj[0] = 0x0;
+	gold_sadj[1] = 0xFF;
+	for (cnt = 0; cnt < 8; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			if (gold_sadj[0] < dllmin[cnt]) {
+				gold_sadj[0] = dllmin[cnt];
+			}
+			if (gold_sadj[1] > dllmax[cnt]) {
+				gold_sadj[1] = dllmax[cnt];
+			}
+		}
+	}
+	gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+	gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+	data = 0;
+	for (cnt = 0; cnt < 8; cnt++) {
+		data >>= 3;
+		data2 = gold_sadj[1] & 0x7;
+		gold_sadj[1] >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 3) {
+					data2 = (data2 + dlli) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 4) {
+					data2 = (data2 - dlli) & 0x7;
+				}
+			}
+		}
+		data |= data2 << 21;
+	}
+	moutdwm(ast, 0x1E6E0080, data);
+
+	gold_sadj[0] = 0x0;
+	gold_sadj[1] = 0xFF;
+	for (cnt = 8; cnt < 16; cnt++) {
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			if (gold_sadj[0] < dllmin[cnt]) {
+				gold_sadj[0] = dllmin[cnt];
+			}
+			if (gold_sadj[1] > dllmax[cnt]) {
+				gold_sadj[1] = dllmax[cnt];
+			}
+		}
+	}
+	gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+	gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+	data = 0;
+	for (cnt = 8; cnt < 16; cnt++) {
+		data >>= 3;
+		data2 = gold_sadj[1] & 0x7;
+		gold_sadj[1] >>= 3;
+		if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+			dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+			if (gold_sadj[0] >= dlli) {
+				dlli = (gold_sadj[0] - dlli) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 3) {
+					data2 = (data2 + dlli) & 0x7;
+				}
+			} else {
+				dlli = (dlli - gold_sadj[0]) >> 1;
+				if (dlli > 0) {
+					dlli = 1;
+				}
+				if (data2 != 4) {
+					data2 = (data2 - dlli) & 0x7;
+				}
+			}
+		}
+		data |= data2 << 21;
+	}
+	moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+	finetuneDQI_L(ast, param);
+	finetuneDQI_L2(ast, param);
+
+CBR_START2:
+	dllmin[0] = dllmin[1] = 0xff;
+	dllmax[0] = dllmax[1] = 0x0;
+	passcnt = 0;
+	for (dlli = 0; dlli < 76; dlli++) {
+		moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+		/* Wait DQSI latch phase calibration */
+		moutdwm(ast, 0x1E6E0074, 0x00000010);
+		moutdwm(ast, 0x1E6E0070, 0x00000003);
+		do {
+			data = mindwm(ast, 0x1E6E0070);
+		} while (!(data & 0x00001000));
+		moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+		moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+		data = cbr_scan(ast);
+		if (data != 0) {
+			if (data & 0x1) {
+				if (dllmin[0] > dlli) {
+					dllmin[0] = dlli;
+				}
+				if (dllmax[0] < dlli) {
+					dllmax[0] = dlli;
+				}
+			}
+			if (data & 0x2) {
+				if (dllmin[1] > dlli) {
+					dllmin[1] = dlli;
+				}
+				if (dllmax[1] < dlli) {
+					dllmax[1] = dlli;
+				}
+			}
+			passcnt++;
+		} else if (passcnt >= CBR_THRESHOLD) {
+			break;
+		}
+	}
+	if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+		goto CBR_START2;
+	}
+	if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+		goto CBR_START2;
+	}
+	dlli  = (dllmin[1] + dllmax[1]) >> 1;
+	dlli <<= 8;
+	dlli += (dllmin[0] + dllmax[0]) >> 1;
+	moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+	data  = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+	data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+	moutdwm(ast, 0x1E6E0018, data2);
+	moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+	/* Wait DQSI latch phase calibration */
+	moutdwm(ast, 0x1E6E0074, 0x00000010);
+	moutdwm(ast, 0x1E6E0070, 0x00000003);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x00000003);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 trap, trap_AC2, trap_MRS;
+
+	moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+	/* Ger trap info */
+	trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+	trap_AC2  = 0x00020000 + (trap << 16);
+	trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+	trap_MRS  = 0x00000010 + (trap << 4);
+	trap_MRS |= ((trap & 0x2) << 18);
+
+	param->reg_MADJ       = 0x00034C4C;
+	param->reg_SADJ       = 0x00001800;
+	param->reg_DRV        = 0x000000F0;
+	param->reg_PERIOD     = param->dram_freq;
+	param->rodt           = 0;
+
+	switch (param->dram_freq) {
+	case 336:
+		moutdwm(ast, 0x1E6E2020, 0x0190);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x22202725;
+		param->reg_AC2       = 0xAA007613 | trap_AC2;
+		param->reg_DQSIC     = 0x000000BA;
+		param->reg_MRS       = 0x04001400 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000074;
+		param->reg_FREQ      = 0x00004DC0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 3;
+		break;
+	default:
+	case 396:
+		moutdwm(ast, 0x1E6E2020, 0x03F1);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302825;
+		param->reg_AC2       = 0xCC009617 | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x04001600 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		default:
+		case AST_DRAM_512Mx16:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC009617 | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC009622 | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00963F | trap_AC2;
+			break;
+		}
+		break;
+
+	case 408:
+		moutdwm(ast, 0x1E6E2020, 0x01F0);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302825;
+		param->reg_AC2       = 0xCC009617 | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x04001600 | trap_MRS;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		default:
+		case AST_DRAM_512Mx16:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC009617 | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC009622 | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00963F | trap_AC2;
+			break;
+		}
+
+		break;
+	case 456:
+		moutdwm(ast, 0x1E6E2020, 0x0230);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xCD44961A;
+		param->reg_DQSIC     = 0x000000FC;
+		param->reg_MRS       = 0x00081830;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x00000097;
+		param->reg_FREQ      = 0x000052C0;
+		param->madj_max      = 88;
+		param->dll2_finetune_step = 4;
+		break;
+	case 504:
+		moutdwm(ast, 0x1E6E2020, 0x0270);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xDE44A61D;
+		param->reg_DQSIC     = 0x00000117;
+		param->reg_MRS       = 0x00081A30;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_IOZ       = 0x070000BB;
+		param->reg_DQIDLY    = 0x000000A0;
+		param->reg_FREQ      = 0x000054C0;
+		param->madj_max      = 79;
+		param->dll2_finetune_step = 4;
+		break;
+	case 528:
+		moutdwm(ast, 0x1E6E2020, 0x0290);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302926;
+		param->reg_AC2       = 0xEF44B61E;
+		param->reg_DQSIC     = 0x00000125;
+		param->reg_MRS       = 0x00081A30;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000088;
+		param->reg_FREQ      = 0x000055C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 576:
+		moutdwm(ast, 0x1E6E2020, 0x0140);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302A37;
+		param->reg_AC2       = 0xEF56B61E;
+		param->reg_DQSIC     = 0x0000013F;
+		param->reg_MRS       = 0x00101A50;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000057C0;
+		param->madj_max      = 136;
+		param->dll2_finetune_step = 3;
+		break;
+	case 600:
+		moutdwm(ast, 0x1E6E2020, 0x02E1);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x32302A37;
+		param->reg_AC2       = 0xDF56B61F;
+		param->reg_DQSIC     = 0x0000014D;
+		param->reg_MRS       = 0x00101A50;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000023;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000058C0;
+		param->madj_max      = 132;
+		param->dll2_finetune_step = 3;
+		break;
+	case 624:
+		moutdwm(ast, 0x1E6E2020, 0x0160);
+		param->reg_MADJ      = 0x00136868;
+		param->reg_SADJ      = 0x00004534;
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x32302A37;
+		param->reg_AC2       = 0xEF56B621;
+		param->reg_DQSIC     = 0x0000015A;
+		param->reg_MRS       = 0x02101A50;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000078;
+		param->reg_FREQ      = 0x000059C0;
+		param->madj_max      = 128;
+		param->dll2_finetune_step = 3;
+		break;
+	} /* switch freq */
+
+	switch (param->dram_chipid) {
+	case AST_DRAM_512Mx16:
+		param->dram_config = 0x130;
+		break;
+	default:
+	case AST_DRAM_1Gx16:
+		param->dram_config = 0x131;
+		break;
+	case AST_DRAM_2Gx16:
+		param->dram_config = 0x132;
+		break;
+	case AST_DRAM_4Gx16:
+		param->dram_config = 0x133;
+		break;
+	}; /* switch size */
+
+	switch (param->vram_size) {
+	default:
+	case AST_VIDMEM_SIZE_8M:
+		param->dram_config |= 0x00;
+		break;
+	case AST_VIDMEM_SIZE_16M:
+		param->dram_config |= 0x04;
+		break;
+	case AST_VIDMEM_SIZE_32M:
+		param->dram_config |= 0x08;
+		break;
+	case AST_VIDMEM_SIZE_64M:
+		param->dram_config |= 0x0c;
+		break;
+	}
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 data, data2;
+
+	moutdwm(ast, 0x1E6E0000, 0xFC600309);
+	moutdwm(ast, 0x1E6E0018, 0x00000100);
+	moutdwm(ast, 0x1E6E0024, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+	moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+	udelay(10);
+
+	moutdwm(ast, 0x1E6E0004, param->dram_config);
+	moutdwm(ast, 0x1E6E0008, 0x90040f);
+	moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+	moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+	moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+	moutdwm(ast, 0x1E6E0080, 0x00000000);
+	moutdwm(ast, 0x1E6E0084, 0x00000000);
+	moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+	moutdwm(ast, 0x1E6E0018, 0x4040A170);
+	moutdwm(ast, 0x1E6E0018, 0x20402370);
+	moutdwm(ast, 0x1E6E0038, 0x00000000);
+	moutdwm(ast, 0x1E6E0040, 0xFF444444);
+	moutdwm(ast, 0x1E6E0044, 0x22222222);
+	moutdwm(ast, 0x1E6E0048, 0x22222222);
+	moutdwm(ast, 0x1E6E004C, 0x00000002);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+	moutdwm(ast, 0x1E6E0054, 0);
+	moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+	moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0074, 0x00000000);
+	moutdwm(ast, 0x1E6E0078, 0x00000000);
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	/* Wait MCLK2X lock to MCLK */
+	do {
+		data = mindwm(ast, 0x1E6E001C);
+	} while (!(data & 0x08000000));
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00005C04);
+	udelay(10);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	data = mindwm(ast, 0x1E6E001C);
+	data = (data >> 8) & 0xff;
+	while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+		data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+		if ((data2 & 0xff) > param->madj_max) {
+			break;
+		}
+		moutdwm(ast, 0x1E6E0064, data2);
+		if (data2 & 0x00100000) {
+			data2 = ((data2 & 0xff) >> 3) + 3;
+		} else {
+			data2 = ((data2 & 0xff) >> 2) + 5;
+		}
+		data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+		data2 += data & 0xff;
+		data = data | (data2 << 8);
+		moutdwm(ast, 0x1E6E0068, data);
+		udelay(10);
+		moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+		udelay(10);
+		data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+		moutdwm(ast, 0x1E6E0018, data);
+		data = data | 0x200;
+		moutdwm(ast, 0x1E6E0018, data);
+		do {
+			data = mindwm(ast, 0x1E6E001C);
+		} while (!(data & 0x08000000));
+
+		moutdwm(ast, 0x1E6E0034, 0x00000001);
+		moutdwm(ast, 0x1E6E000C, 0x00005C04);
+		udelay(10);
+		moutdwm(ast, 0x1E6E000C, 0x00000000);
+		moutdwm(ast, 0x1E6E0034, 0x00000000);
+		data = mindwm(ast, 0x1E6E001C);
+		data = (data >> 8) & 0xff;
+	}
+	data = mindwm(ast, 0x1E6E0018) | 0xC00;
+	moutdwm(ast, 0x1E6E0018, data);
+
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00000040);
+	udelay(50);
+	/* Mode Register Setting */
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000005);
+	moutdwm(ast, 0x1E6E0028, 0x00000007);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+	moutdwm(ast, 0x1E6E000C, 0x00005C08);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+	moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+	data = 0;
+	if (param->wodt) {
+		data = 0x300;
+	}
+	if (param->rodt) {
+		data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+	}
+	moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+	/* Wait DQI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0080);
+	} while (!(data & 0x40000000));
+	/* Wait DQSI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0020);
+	} while (!(data & 0x00000800));
+	/* Calibrate the DQSI delay */
+	cbr_dll2(ast, param);
+
+	moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+	/* ECC Memory Initialization */
+#ifdef ECC
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x221);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 trap, trap_AC2, trap_MRS;
+
+	moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+	/* Ger trap info */
+	trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+	trap_AC2  = (trap << 20) | (trap << 16);
+	trap_AC2 += 0x00110000;
+	trap_MRS  = 0x00000040 | (trap << 4);
+
+
+	param->reg_MADJ       = 0x00034C4C;
+	param->reg_SADJ       = 0x00001800;
+	param->reg_DRV        = 0x000000F0;
+	param->reg_PERIOD     = param->dram_freq;
+	param->rodt           = 0;
+
+	switch (param->dram_freq) {
+	case 264:
+		moutdwm(ast, 0x1E6E2020, 0x0130);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x11101513;
+		param->reg_AC2       = 0x78117011;
+		param->reg_DQSIC     = 0x00000092;
+		param->reg_MRS       = 0x00000842;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_DRV       = 0x000000F0;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x0000005A;
+		param->reg_FREQ      = 0x00004AC0;
+		param->madj_max      = 138;
+		param->dll2_finetune_step = 3;
+		break;
+	case 336:
+		moutdwm(ast, 0x1E6E2020, 0x0190);
+		param->wodt          = 1;
+		param->reg_AC1       = 0x22202613;
+		param->reg_AC2       = 0xAA009016 | trap_AC2;
+		param->reg_DQSIC     = 0x000000BA;
+		param->reg_MRS       = 0x00000A02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000074;
+		param->reg_FREQ      = 0x00004DC0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 3;
+		break;
+	default:
+	case 396:
+		moutdwm(ast, 0x1E6E2020, 0x03F1);
+		param->wodt          = 1;
+		param->rodt          = 0;
+		param->reg_AC1       = 0x33302714;
+		param->reg_AC2       = 0xCC00B01B | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x00000C02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		case AST_DRAM_512Mx16:
+			param->reg_AC2   = 0xCC00B016 | trap_AC2;
+			break;
+		default:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC00B01B | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC00B02B | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00B03F | trap_AC2;
+			break;
+		}
+
+		break;
+
+	case 408:
+		moutdwm(ast, 0x1E6E2020, 0x01F0);
+		param->wodt          = 1;
+		param->rodt          = 0;
+		param->reg_AC1       = 0x33302714;
+		param->reg_AC2       = 0xCC00B01B | trap_AC2;
+		param->reg_DQSIC     = 0x000000E2;
+		param->reg_MRS       = 0x00000C02 | trap_MRS;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x000000FA;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000089;
+		param->reg_FREQ      = 0x000050C0;
+		param->madj_max      = 96;
+		param->dll2_finetune_step = 4;
+
+		switch (param->dram_chipid) {
+		case AST_DRAM_512Mx16:
+			param->reg_AC2   = 0xCC00B016 | trap_AC2;
+			break;
+		default:
+		case AST_DRAM_1Gx16:
+			param->reg_AC2   = 0xCC00B01B | trap_AC2;
+			break;
+		case AST_DRAM_2Gx16:
+			param->reg_AC2   = 0xCC00B02B | trap_AC2;
+			break;
+		case AST_DRAM_4Gx16:
+			param->reg_AC2   = 0xCC00B03F | trap_AC2;
+			break;
+		}
+
+		break;
+	case 456:
+		moutdwm(ast, 0x1E6E2020, 0x0230);
+		param->wodt          = 0;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xCD44B01E;
+		param->reg_DQSIC     = 0x000000FC;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000000;
+		param->reg_DRV       = 0x00000000;
+		param->reg_IOZ       = 0x00000034;
+		param->reg_DQIDLY    = 0x00000097;
+		param->reg_FREQ      = 0x000052C0;
+		param->madj_max      = 88;
+		param->dll2_finetune_step = 3;
+		break;
+	case 504:
+		moutdwm(ast, 0x1E6E2020, 0x0261);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xDE44C022;
+		param->reg_DQSIC     = 0x00000117;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x0000000A;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000A0;
+		param->reg_FREQ      = 0x000054C0;
+		param->madj_max      = 79;
+		param->dll2_finetune_step = 3;
+		break;
+	case 528:
+		moutdwm(ast, 0x1E6E2020, 0x0120);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x33302815;
+		param->reg_AC2       = 0xEF44D024;
+		param->reg_DQSIC     = 0x00000125;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F9;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000A7;
+		param->reg_FREQ      = 0x000055C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 552:
+		moutdwm(ast, 0x1E6E2020, 0x02A1);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x43402915;
+		param->reg_AC2       = 0xFF44E025;
+		param->reg_DQSIC     = 0x00000132;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000040;
+		param->reg_DRV       = 0x0000000A;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000AD;
+		param->reg_FREQ      = 0x000056C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	case 576:
+		moutdwm(ast, 0x1E6E2020, 0x0140);
+		param->wodt          = 1;
+		param->rodt          = 1;
+		param->reg_AC1       = 0x43402915;
+		param->reg_AC2       = 0xFF44E027;
+		param->reg_DQSIC     = 0x0000013F;
+		param->reg_MRS       = 0x00000E72;
+		param->reg_EMRS      = 0x00000004;
+		param->reg_DRV       = 0x000000F5;
+		param->reg_IOZ       = 0x00000045;
+		param->reg_DQIDLY    = 0x000000B3;
+		param->reg_FREQ      = 0x000057C0;
+		param->madj_max      = 76;
+		param->dll2_finetune_step = 3;
+		break;
+	}
+
+	switch (param->dram_chipid) {
+	case AST_DRAM_512Mx16:
+		param->dram_config = 0x100;
+		break;
+	default:
+	case AST_DRAM_1Gx16:
+		param->dram_config = 0x121;
+		break;
+	case AST_DRAM_2Gx16:
+		param->dram_config = 0x122;
+		break;
+	case AST_DRAM_4Gx16:
+		param->dram_config = 0x123;
+		break;
+	}; /* switch size */
+
+	switch (param->vram_size) {
+	default:
+	case AST_VIDMEM_SIZE_8M:
+		param->dram_config |= 0x00;
+		break;
+	case AST_VIDMEM_SIZE_16M:
+		param->dram_config |= 0x04;
+		break;
+	case AST_VIDMEM_SIZE_32M:
+		param->dram_config |= 0x08;
+		break;
+	case AST_VIDMEM_SIZE_64M:
+		param->dram_config |= 0x0c;
+		break;
+	}
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+	u32 data, data2;
+
+	moutdwm(ast, 0x1E6E0000, 0xFC600309);
+	moutdwm(ast, 0x1E6E0018, 0x00000100);
+	moutdwm(ast, 0x1E6E0024, 0x00000000);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+	moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+	udelay(10);
+	moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+	udelay(10);
+
+	moutdwm(ast, 0x1E6E0004, param->dram_config);
+	moutdwm(ast, 0x1E6E0008, 0x90040f);
+	moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+	moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+	moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+	moutdwm(ast, 0x1E6E0080, 0x00000000);
+	moutdwm(ast, 0x1E6E0084, 0x00000000);
+	moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+	moutdwm(ast, 0x1E6E0018, 0x4040A130);
+	moutdwm(ast, 0x1E6E0018, 0x20402330);
+	moutdwm(ast, 0x1E6E0038, 0x00000000);
+	moutdwm(ast, 0x1E6E0040, 0xFF808000);
+	moutdwm(ast, 0x1E6E0044, 0x88848466);
+	moutdwm(ast, 0x1E6E0048, 0x44440008);
+	moutdwm(ast, 0x1E6E004C, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+	moutdwm(ast, 0x1E6E0054, 0);
+	moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+	moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0074, 0x00000000);
+	moutdwm(ast, 0x1E6E0078, 0x00000000);
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+	/* Wait MCLK2X lock to MCLK */
+	do {
+		data = mindwm(ast, 0x1E6E001C);
+	} while (!(data & 0x08000000));
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00005C04);
+	udelay(10);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	moutdwm(ast, 0x1E6E0034, 0x00000000);
+	data = mindwm(ast, 0x1E6E001C);
+	data = (data >> 8) & 0xff;
+	while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+		data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+		if ((data2 & 0xff) > param->madj_max) {
+			break;
+		}
+		moutdwm(ast, 0x1E6E0064, data2);
+		if (data2 & 0x00100000) {
+			data2 = ((data2 & 0xff) >> 3) + 3;
+		} else {
+			data2 = ((data2 & 0xff) >> 2) + 5;
+		}
+		data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+		data2 += data & 0xff;
+		data = data | (data2 << 8);
+		moutdwm(ast, 0x1E6E0068, data);
+		udelay(10);
+		moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+		udelay(10);
+		data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+		moutdwm(ast, 0x1E6E0018, data);
+		data = data | 0x200;
+		moutdwm(ast, 0x1E6E0018, data);
+		do {
+			data = mindwm(ast, 0x1E6E001C);
+		} while (!(data & 0x08000000));
+
+		moutdwm(ast, 0x1E6E0034, 0x00000001);
+		moutdwm(ast, 0x1E6E000C, 0x00005C04);
+		udelay(10);
+		moutdwm(ast, 0x1E6E000C, 0x00000000);
+		moutdwm(ast, 0x1E6E0034, 0x00000000);
+		data = mindwm(ast, 0x1E6E001C);
+		data = (data >> 8) & 0xff;
+	}
+	data = mindwm(ast, 0x1E6E0018) | 0xC00;
+	moutdwm(ast, 0x1E6E0018, data);
+
+	moutdwm(ast, 0x1E6E0034, 0x00000001);
+	moutdwm(ast, 0x1E6E000C, 0x00000000);
+	udelay(50);
+	/* Mode Register Setting */
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000005);
+	moutdwm(ast, 0x1E6E0028, 0x00000007);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+	moutdwm(ast, 0x1E6E000C, 0x00005C08);
+	moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000001);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+	moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+	moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+	moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+	data = 0;
+	if (param->wodt) {
+		data = 0x500;
+	}
+	if (param->rodt) {
+		data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+	}
+	moutdwm(ast, 0x1E6E0034, data | 0x3);
+	moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+	/* Wait DQI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0080);
+	} while (!(data & 0x40000000));
+	/* Wait DQSI delay lock */
+	do {
+		data = mindwm(ast, 0x1E6E0020);
+	} while (!(data & 0x00000800));
+	/* Calibrate the DQSI delay */
+	cbr_dll2(ast, param);
+
+	/* ECC Memory Initialization */
+#ifdef ECC
+	moutdwm(ast, 0x1E6E007C, 0x00000000);
+	moutdwm(ast, 0x1E6E0070, 0x221);
+	do {
+		data = mindwm(ast, 0x1E6E0070);
+	} while (!(data & 0x00001000));
+	moutdwm(ast, 0x1E6E0070, 0x00000000);
+	moutdwm(ast, 0x1E6E0050, 0x80000000);
+	moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast2300_dram_param param;
+	u32 temp;
+	u8 reg;
+
+	reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	if ((reg & 0x80) == 0) {/* vga only */
+		ast_write32(ast, 0xf004, 0x1e6e0000);
+		ast_write32(ast, 0xf000, 0x1);
+		ast_write32(ast, 0x12000, 0x1688a8a8);
+		do {
+			;
+		} while (ast_read32(ast, 0x12000) != 0x1);
+
+		ast_write32(ast, 0x10000, 0xfc600309);
+		do {
+			;
+		} while (ast_read32(ast, 0x10000) != 0x1);
+
+		/* Slow down CPU/AHB CLK in VGA only mode */
+		temp = ast_read32(ast, 0x12008);
+		temp |= 0x73;
+		ast_write32(ast, 0x12008, temp);
+
+		param.dram_type = AST_DDR3;
+		if (temp & 0x01000000)
+			param.dram_type = AST_DDR2;
+		param.dram_chipid = ast->dram_type;
+		param.dram_freq = ast->mclk;
+		param.vram_size = ast->vram_size;
+
+		if (param.dram_type == AST_DDR3) {
+			get_ddr3_info(ast, &param);
+			ddr3_init(ast, &param);
+		} else {
+			get_ddr2_info(ast, &param);
+			ddr2_init(ast, &param);
+		}
+
+		temp = mindwm(ast, 0x1e6e2040);
+		moutdwm(ast, 0x1e6e2040, temp | 0x40);
+	}
+
+	/* wait ready */
+	do {
+		reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+	} while ((reg & 0x40) == 0);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_tables.h b/linux-imx/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 0000000..95fa6ab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  The authors makes no representations
+ * about the suitability of this software for any purpose.  It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex		0
+#define EGAModeIndex		1
+#define VGAModeIndex		2
+#define HiCModeIndex		3
+#define TrueCModeIndex		4
+
+#define Charx8Dot               0x00000001
+#define HalfDCLK                0x00000002
+#define DoubleScanMode          0x00000004
+#define LineCompareOff          0x00000008
+#define SyncPP                  0x00000000
+#define SyncPN                  0x00000040
+#define SyncNP                  0x00000080
+#define SyncNN                  0x000000C0
+#define HBorder                 0x00000020
+#define VBorder                 0x00000010
+#define WideScreenMode		0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175     		0x00
+#define VCLK28_322     		0x01
+#define VCLK31_5       		0x02
+#define VCLK36         		0x03
+#define VCLK40         		0x04
+#define VCLK49_5       		0x05
+#define VCLK50         		0x06
+#define VCLK56_25      		0x07
+#define VCLK65		 	0x08
+#define VCLK75	        	0x09
+#define VCLK78_75      		0x0A
+#define VCLK94_5       		0x0B
+#define VCLK108        		0x0C
+#define VCLK135        		0x0D
+#define VCLK157_5      		0x0E
+#define VCLK162        		0x0F
+/* #define VCLK193_25     		0x10 */
+#define VCLK154     		0x10
+#define VCLK83_5    		0x11
+#define VCLK106_5   		0x12
+#define VCLK146_25  		0x13
+#define VCLK148_5   		0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+	{0x2C, 0xE7, 0x03},					/* 00: VCLK25_175	*/
+	{0x95, 0x62, 0x03},				        /* 01: VCLK28_322	*/
+	{0x67, 0x63, 0x01},				        /* 02: VCLK31_5         */
+	{0x76, 0x63, 0x01},				        /* 03: VCLK36         	*/
+	{0xEE, 0x67, 0x01},				        /* 04: VCLK40          	*/
+	{0x82, 0x62, 0x01}, 			        /* 05: VCLK49_5        	*/
+	{0xC6, 0x64, 0x01},                        	        /* 06: VCLK50          	*/
+	{0x94, 0x62, 0x01},                        	        /* 07: VCLK56_25       	*/
+	{0x80, 0x64, 0x00},                        	        /* 08: VCLK65		*/
+	{0x7B, 0x63, 0x00},                        	        /* 09: VCLK75	        */
+	{0x67, 0x62, 0x00},				        /* 0A: VCLK78_75       	*/
+	{0x7C, 0x62, 0x00},                        	        /* 0B: VCLK94_5        	*/
+	{0x8E, 0x62, 0x00},                        	        /* 0C: VCLK108         	*/
+	{0x85, 0x24, 0x00},                        	        /* 0D: VCLK135         	*/
+	{0x67, 0x22, 0x00},                        	        /* 0E: VCLK157_5       	*/
+	{0x6A, 0x22, 0x00},				        /* 0F: VCLK162         	*/
+	{0x4d, 0x4c, 0x80},				        /* 10: VCLK154      	*/
+	{0xa7, 0x78, 0x80},					/* 11: VCLK83.5         */
+	{0x28, 0x49, 0x80},					/* 12: VCLK106.5        */
+	{0x37, 0x49, 0x80},					/* 13: VCLK146.25       */
+	{0x1f, 0x45, 0x80},					/* 14: VCLK148.5        */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+	/* MD_2_3_400 */
+	{
+		0x67,
+		{0x00,0x03,0x00,0x02},
+		{0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+		 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+		 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+		 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+		 0x0c,0x00,0x0f,0x08},
+		{0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+		 0xff}
+	},
+	/* Mode12/ExtEGATable */
+	{
+		0xe3,
+		{0x01,0x0f,0x00,0x06},
+		{0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+		 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+		 0x01,0x00,0x0f,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtVGATable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtHiCTable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+	/* ExtTrueCTable */
+	{
+		0x2f,
+		{0x01,0x0f,0x00,0x0e},
+		{0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+		 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+		 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+		 0xff},
+		{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+		 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+		 0x01,0x00,0x00,0x00},
+		{0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+		 0xff}
+	},
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+	{ 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175,	/* 60Hz */
+	  (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+	{ 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5,	/* 72Hz */
+	  (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E  },
+	{ 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5,	/* 75Hz */
+	  (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+	{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36,		/* 85Hz */
+	  (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+	{ 832, 640, 56, 56, 509, 480, 1, 3, VCLK36,		/* end */
+	  (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+	{1024, 800, 24, 72, 625, 600, 1, 2, VCLK36,		/* 56Hz */
+	 (SyncPP | Charx8Dot), 56, 1, 0x30 },
+	{1056, 800, 40, 128, 628, 600, 1, 4, VCLK40,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 2, 0x30 },
+	{1040, 800, 56, 120, 666, 600, 37, 6, VCLK50,	/* 72Hz */
+	 (SyncPP | Charx8Dot), 72, 3, 0x30 },
+	{1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 4, 0x30 },
+	{1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 84, 5, 0x30 },
+	{1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+	{1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65,	/* 60Hz */
+	 (SyncNN | Charx8Dot), 60, 1, 0x31 },
+	{1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75,	/* 70Hz */
+	 (SyncNN | Charx8Dot), 70, 2, 0x31 },
+	{1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 3, 0x31 },
+	{1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 84, 4, 0x31 },
+	{1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+	{1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 1, 0x32 },
+	{1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135,	/* 75Hz */
+	 (SyncPP | Charx8Dot), 75, 2, 0x32 },
+	{1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5,	/* 85Hz */
+	 (SyncPP | Charx8Dot), 85, 3, 0x32 },
+	{1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+	{2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162,	/* 60Hz */
+	 (SyncPP | Charx8Dot), 60, 1, 0x33 },
+	{2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162,	/* end */
+	 (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+	{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154,	/* 60Hz */
+	 (SyncNP | Charx8Dot), 60, 1, 0x34 },
+	{2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154,	/* 60Hz */
+	 (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+	{1680, 1280, 72,128,  831,  800, 3, 6, VCLK83_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+	{1680, 1280, 72,128,  831,  800, 3, 6, VCLK83_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+	{1904, 1440, 80,152,  934,  900, 3, 6, VCLK106_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+	{1904, 1440, 80,152,  934,  900, 3, 6, VCLK106_5,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+	{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+	{2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25,	/* 60Hz */
+	 (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+	{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5,	/* 60Hz */
+	 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+	{2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5,	/* 60Hz */
+	 (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif
diff --git a/linux-imx/drivers/gpu/drm/ast/ast_ttm.c b/linux-imx/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 0000000..d5902e2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &ast->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &ast_ttm_mem_global_init;
+	global_ref->release = &ast_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	ast->ttm.bo_global_ref.mem_glob =
+		ast->ttm.mem_global_ref.object;
+	global_ref = &ast->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&ast->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+	if (ast->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&ast->ttm.mem_global_ref);
+	ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct ast_bo *bo;
+
+	bo = container_of(tbo, struct ast_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &ast_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct ast_bo *astbo = ast_bo(bo);
+
+	if (!ast_ttm_bo_is_ast_bo(bo))
+		return;
+
+	ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+	*pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct ast_private *ast = ast_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+	.destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &ast_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+	.ttm_tt_create = ast_ttm_tt_create,
+	.ttm_tt_populate = ast_ttm_tt_populate,
+	.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+	.init_mem_type = ast_bo_init_mem_type,
+	.evict_flags = ast_bo_evict_flags,
+	.move = ast_bo_move,
+	.verify_access = ast_bo_verify_access,
+	.io_mem_reserve = &ast_ttm_io_mem_reserve,
+	.io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+	int ret;
+	struct drm_device *dev = ast->dev;
+	struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+	ret = ast_ttm_global_init(ast);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&ast->ttm.bdev,
+				 ast->ttm.bo_global_ref.ref.object,
+				 &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     ast->vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+	struct drm_device *dev = ast->dev;
+	ttm_bo_device_release(&ast->ttm.bdev);
+
+	ast_ttm_global_release(ast);
+
+	if (ast->fb_mtrr >= 0) {
+		drm_mtrr_del(ast->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		ast->fb_mtrr = -1;
+	}
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS && ret != -EBUSY)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct ast_bo **pastbo)
+{
+	struct ast_private *ast = dev->dev_private;
+	struct ast_bo *astbo;
+	size_t acc_size;
+	int ret;
+
+	astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+	if (!astbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &astbo->gem, size);
+	if (ret) {
+		kfree(astbo);
+		return ret;
+	}
+
+	astbo->gem.driver_private = NULL;
+	astbo->bo.bdev = &ast->ttm.bdev;
+	astbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+	ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+				       sizeof(struct ast_bo));
+
+	ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+			  ttm_bo_type_device, &astbo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
+			  NULL, ast_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pastbo = astbo;
+	return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = ast_bo_gpu_offset(bo);
+	}
+
+	ast_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = ast_bo_gpu_offset(bo);
+	return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct ast_private *ast;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	ast = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/ati_pcigart.c b/linux-imx/drivers/gpu/drm/ati_pcigart.c
new file mode 100644
index 0000000..c399dea
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ati_pcigart.c
@@ -0,0 +1,202 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+						PAGE_SIZE);
+	if (gart_info->table_handle == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	drm_pci_free(dev, gart_info->table_handle);
+	gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+	unsigned long pages;
+	int i;
+	int max_pages;
+
+	/* we need to support large memory configurations */
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		return 0;
+	}
+
+	if (gart_info->bus_addr) {
+
+		max_pages = (gart_info->table_size / sizeof(u32));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+					 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+	    gart_info->table_handle) {
+		drm_ati_free_pcigart_table(dev, gart_info);
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_local_map *map = &gart_info->mapping;
+	struct drm_sg_mem *entry = dev->sg;
+	void *address = NULL;
+	unsigned long pages;
+	u32 *pci_gart = NULL, page_base, gart_idx;
+	dma_addr_t bus_address = 0;
+	int i, j, ret = 0;
+	int max_ati_pages, max_real_pages;
+
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		goto done;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+		if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+			DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+				  (unsigned long long)gart_info->table_mask);
+			ret = 1;
+			goto done;
+		}
+
+		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+		if (ret) {
+			DRM_ERROR("cannot allocate PCI GART page!\n");
+			goto done;
+		}
+
+		pci_gart = gart_info->table_handle->vaddr;
+		address = gart_info->table_handle->vaddr;
+		bus_address = gart_info->table_handle->busaddr;
+	} else {
+		address = gart_info->addr;
+		bus_address = gart_info->bus_addr;
+		DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+			  (unsigned long long)bus_address,
+			  (unsigned long)address);
+	}
+
+
+	max_ati_pages = (gart_info->table_size / sizeof(u32));
+	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+	pages = (entry->pages <= max_real_pages)
+	    ? entry->pages : max_real_pages;
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		memset(pci_gart, 0, max_ati_pages * sizeof(u32));
+	} else {
+		memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
+	}
+
+	gart_idx = 0;
+	for (i = 0; i < pages; i++) {
+		/* we need to support large memory configurations */
+		entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+						 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			drm_ati_pcigart_cleanup(dev, gart_info);
+			address = NULL;
+			bus_address = 0;
+			goto done;
+		}
+		page_base = (u32) entry->busaddr[i];
+
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			u32 val;
+
+			switch(gart_info->gart_reg_if) {
+			case DRM_ATI_GART_IGP:
+				val = page_base | 0xc;
+				break;
+			case DRM_ATI_GART_PCIE:
+				val = (page_base >> 8) | 0xc;
+				break;
+			default:
+			case DRM_ATI_GART_PCI:
+				val = page_base;
+				break;
+			}
+			if (gart_info->gart_table_location ==
+			    DRM_ATI_GART_MAIN)
+				pci_gart[gart_idx] = cpu_to_le32(val);
+			else
+				DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+			gart_idx++;
+			page_base += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+	wbinvd();
+#else
+	mb();
+#endif
+
+      done:
+	gart_info->addr = address;
+	gart_info->bus_addr = bus_address;
+	return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/linux-imx/drivers/gpu/drm/cirrus/Kconfig b/linux-imx/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 0000000..bf67b22
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+	tristate "Cirrus driver for QEMU emulated device"
+	depends on DRM && PCI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 This is a KMS driver for emulated cirrus device in qemu.
+	 It is *NOT* intended for real cirrus devices. This requires
+	 the modesetting userspace X.org driver.
diff --git a/linux-imx/drivers/gpu/drm/cirrus/Makefile b/linux-imx/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 0000000..69ffe70
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y  := cirrus_main.o cirrus_mode.o \
+	cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.c b/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 0000000..64bfc23
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+	  0, 0 },
+	{0,}
+};
+
+
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+	kfree(ap);
+
+	return 0;
+}
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret;
+
+	ret = cirrus_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
+
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int cirrus_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct cirrus_device *cdev = drm_dev->dev_private;
+
+	drm_kms_helper_poll_disable(drm_dev);
+
+	if (cdev->mode_info.gfbdev) {
+		console_lock();
+		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
+		console_unlock();
+	}
+
+	return 0;
+}
+
+static int cirrus_pm_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	struct cirrus_device *cdev = drm_dev->dev_private;
+
+	drm_helper_resume_force_mode(drm_dev);
+
+	if (cdev->mode_info.gfbdev) {
+		console_lock();
+		fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
+		console_unlock();
+	}
+
+	drm_kms_helper_poll_enable(drm_dev);
+	return 0;
+}
+
+static const struct file_operations cirrus_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = cirrus_mmap,
+	.poll = drm_poll,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+	.load = cirrus_driver_load,
+	.unload = cirrus_driver_unload,
+	.fops = &cirrus_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+	.gem_init_object = cirrus_gem_init_object,
+	.gem_free_object = cirrus_gem_free_object,
+	.dumb_create = cirrus_dumb_create,
+	.dumb_map_offset = cirrus_dumb_mmap_offset,
+	.dumb_destroy = cirrus_dumb_destroy,
+};
+
+static const struct dev_pm_ops cirrus_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
+				cirrus_pm_resume)
+};
+
+static struct pci_driver cirrus_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = cirrus_pci_probe,
+	.remove = cirrus_pci_remove,
+	.driver.pm = &cirrus_pm_ops,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && cirrus_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (cirrus_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+	drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.h b/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 0000000..7ca0595
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#define DRIVER_AUTHOR		"Matthew Garrett"
+
+#define DRIVER_NAME		"cirrus"
+#define DRIVER_DESC		"qemu Cirrus emulation"
+#define DRIVER_DATE		"20110418"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v)					\
+	do {							\
+		WREG8(SEQ_INDEX, reg);				\
+		WREG8(SEQ_DATA, v);				\
+	} while (0)						\
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v)					\
+	do {							\
+		WREG8(CRT_INDEX, reg);				\
+		WREG8(CRT_DATA, v);				\
+	} while (0)						\
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v)					\
+	do {							\
+		WREG8(GFX_INDEX, reg);				\
+		WREG8(GFX_DATA, v);				\
+	} while (0)						\
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v)						\
+	do {							\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		RREG8(VGA_DAC_MASK);					\
+		WREG8(VGA_DAC_MASK, v);					\
+	} while (0)						\
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+	struct drm_crtc			base;
+	u8				lut_r[256], lut_g[256], lut_b[256];
+	int				last_dpms;
+	bool				enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+	bool				mode_config_initialized;
+	struct cirrus_crtc		*crtc;
+	/* pointer to fbdev info structure */
+	struct cirrus_fbdev		*gfbdev;
+};
+
+struct cirrus_encoder {
+	struct drm_encoder		base;
+	int				last_dpms;
+};
+
+struct cirrus_connector {
+	struct drm_connector		base;
+};
+
+struct cirrus_framebuffer {
+	struct drm_framebuffer		base;
+	struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+	resource_size_t			vram_size;
+	resource_size_t			vram_base;
+};
+
+struct cirrus_device {
+	struct drm_device		*dev;
+	unsigned long			flags;
+
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	void __iomem			*rmmio;
+
+	struct cirrus_mc			mc;
+	struct cirrus_mode_info		mode_info;
+
+	int				num_crtc;
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+	} ttm;
+	bool mm_inited;
+};
+
+
+struct cirrus_fbdev {
+	struct drm_fb_helper helper;
+	struct cirrus_framebuffer gfb;
+	struct list_head fbdev_list;
+	void *sysram;
+	int size;
+	int x1, y1, x2, y2; /* dirty rect */
+	spinlock_t dirty_lock;
+};
+
+struct cirrus_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+				/* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			     u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno);
+
+
+				/* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+		      struct drm_device *ddev,
+		      struct pci_dev *pdev,
+		      uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+			    struct drm_device *dev,
+			    uint32_t handle,
+			    uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		      struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		       struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+			uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+			   struct cirrus_framebuffer *gfb,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj);
+
+				/* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+				/* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+				/* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+				/* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+		     uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif				/* __CIRRUS_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/linux-imx/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 0000000..3541b56
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct cirrus_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+	bool store_for_later = false;
+	int x2, y2;
+	unsigned long flags;
+
+	obj = afbdev->gfb.obj;
+	bo = gem_to_cirrus_bo(obj);
+
+	/*
+	 * try and reserve the BO, if we fail with busy
+	 * then the BO is being moved and we should
+	 * store up the damage until later.
+	 */
+	ret = cirrus_bo_reserve(bo, true);
+	if (ret) {
+		if (ret != -EBUSY)
+			return;
+		store_for_later = true;
+	}
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+	spin_lock_irqsave(&afbdev->dirty_lock, flags);
+
+	if (afbdev->y1 < y)
+		y = afbdev->y1;
+	if (afbdev->y2 > y2)
+		y2 = afbdev->y2;
+	if (afbdev->x1 < x)
+		x = afbdev->x1;
+	if (afbdev->x2 > x2)
+		x2 = afbdev->x2;
+
+	if (store_for_later) {
+		afbdev->x1 = x;
+		afbdev->x2 = x2;
+		afbdev->y1 = y;
+		afbdev->y2 = y2;
+		spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+		return;
+	}
+
+	afbdev->x1 = afbdev->y1 = INT_MAX;
+	afbdev->x2 = afbdev->y2 = 0;
+	spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			cirrus_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i < y + height; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_fillrect(info, rect);
+	cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_copyarea(info, area);
+	cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct cirrus_fbdev *afbdev = info->par;
+	sys_imageblit(info, image);
+	cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cirrus_fillrect,
+	.fb_copyarea = cirrus_copyarea,
+	.fb_imageblit = cirrus_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+			       struct drm_mode_fb_cmd2 *mode_cmd,
+			       struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 bpp, depth;
+	u32 size;
+	struct drm_gem_object *gobj;
+
+	int ret = 0;
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	if (bpp > 24)
+		return -EINVAL;
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = cirrus_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int cirrusfb_create(struct drm_fb_helper *helper,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+	struct drm_device *dev = gfbdev->helper.dev;
+	struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	void *sysram;
+	struct drm_gem_object *gobj = NULL;
+	struct cirrus_bo *bo = NULL;
+	int size, ret;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+
+	bo = gem_to_cirrus_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL)
+		return -ENOMEM;
+
+	info->par = gfbdev;
+
+	ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	gfbdev->sysram = sysram;
+	gfbdev->size = size;
+
+	fb = &gfbdev->gfb.base;
+	if (!fb) {
+		DRM_INFO("fb is NULL\n");
+		return -EINVAL;
+	}
+
+	/* setup helper */
+	gfbdev->helper.fb = fb;
+	gfbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "cirrusdrmfb");
+
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &cirrusfb_ops;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+	info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+		ret = -ENOMEM;
+		goto out_iounmap;
+	}
+
+	DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+	DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+	DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+	DRM_INFO("fb depth is %d\n", fb->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	return 0;
+out_iounmap:
+	return ret;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+				struct cirrus_fbdev *gfbdev)
+{
+	struct fb_info *info;
+	struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+	if (gfbdev->helper.fbdev) {
+		info = gfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (gfb->obj) {
+		drm_gem_object_unreference_unlocked(gfb->obj);
+		gfb->obj = NULL;
+	}
+
+	vfree(gfbdev->sysram);
+	drm_fb_helper_fini(&gfbdev->helper);
+	drm_framebuffer_unregister_private(&gfb->base);
+	drm_framebuffer_cleanup(&gfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+	.gamma_set = cirrus_crtc_fb_gamma_set,
+	.gamma_get = cirrus_crtc_fb_gamma_get,
+	.fb_probe = cirrusfb_create,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+	struct cirrus_fbdev *gfbdev;
+	int ret;
+	int bpp_sel = 24;
+
+	/*bpp_sel = 8;*/
+	gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+	if (!gfbdev)
+		return -ENOMEM;
+
+	cdev->mode_info.gfbdev = gfbdev;
+	gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+	spin_lock_init(&gfbdev->dirty_lock);
+
+	ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+				 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+	if (ret) {
+		kfree(gfbdev);
+		return ret;
+	}
+	drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(cdev->dev);
+	drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+	return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+	if (!cdev->mode_info.gfbdev)
+		return;
+
+	cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+	kfree(cdev->mode_info.gfbdev);
+	cdev->mode_info.gfbdev = NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_main.c b/linux-imx/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 0000000..35cbae8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+	if (cirrus_fb->obj)
+		drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+	.destroy = cirrus_user_framebuffer_destroy,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+			    struct cirrus_framebuffer *gfb,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj)
+{
+	int ret;
+
+	drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+	gfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+	if (ret) {
+		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *filp,
+			       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct cirrus_framebuffer *cirrus_fb;
+	int ret;
+	u32 bpp, depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+	/* cirrus can't handle > 24bpp framebuffers at all */
+	if (bpp > 24)
+		return ERR_PTR(-EINVAL);
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+	if (!cirrus_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(cirrus_fb);
+		return ERR_PTR(ret);
+	}
+	return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+	.fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+	iounmap(cdev->rmmio);
+	cdev->rmmio = NULL;
+	if (cdev->mc.vram_base)
+		release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+	/* BAR 0 is VRAM */
+	cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+	/* We have 4MB of VRAM */
+	cdev->mc.vram_size = 4 * 1024 * 1024;
+
+	if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+				"cirrusdrmfb_vram")) {
+		DRM_ERROR("can't reserve VRAM\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev, uint32_t flags)
+{
+	int ret;
+
+	cdev->dev = ddev;
+	cdev->flags = flags;
+
+	/* Hardcode the number of CRTCs to 1 */
+	cdev->num_crtc = 1;
+
+	/* BAR 0 is the framebuffer, BAR 1 contains registers */
+	cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+	cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+	if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+				"cirrusdrmfb_mmio")) {
+		DRM_ERROR("can't reserve mmio registers\n");
+		return -ENOMEM;
+	}
+
+	cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+	if (cdev->rmmio == NULL)
+		return -ENOMEM;
+
+	ret = cirrus_vram_init(cdev);
+	if (ret) {
+		release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+		return ret;
+	}
+
+	return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+	release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+	cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct cirrus_device *cdev;
+	int r;
+
+	cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+	if (cdev == NULL)
+		return -ENOMEM;
+	dev->dev_private = (void *)cdev;
+
+	r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+		goto out;
+	}
+
+	r = cirrus_mm_init(cdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+	r = cirrus_modeset_init(cdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+	dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+	if (r)
+		cirrus_driver_unload(dev);
+	return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+	struct cirrus_device *cdev = dev->dev_private;
+
+	if (cdev == NULL)
+		return 0;
+	cirrus_modeset_fini(cdev);
+	cirrus_mm_fini(cdev);
+	cirrus_device_fini(cdev);
+	kfree(cdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct cirrus_bo *cirrusbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &cirrusbo->gem;
+	return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = cirrus_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+	struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+	if (!cirrus_bo)
+		return;
+	cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct cirrus_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_cirrus_bo(obj);
+	*offset = cirrus_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_mode.c b/linux-imx/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 0000000..b86f68d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,631 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		/* VGA registers */
+		WREG8(PALETTE_INDEX, i);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+		WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+	}
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	u8 sr01, gr0e;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		sr01 = 0x00;
+		gr0e = 0x00;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		sr01 = 0x20;
+		gr0e = 0x02;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		sr01 = 0x20;
+		gr0e = 0x04;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		sr01 = 0x20;
+		gr0e = 0x06;
+		break;
+	default:
+		return;
+	}
+
+	WREG8(SEQ_INDEX, 0x1);
+	sr01 |= RREG8(SEQ_DATA) & ~0x20;
+	WREG_SEQ(0x1, sr01);
+
+	WREG8(GFX_INDEX, 0xe);
+	gr0e |= RREG8(GFX_DATA) & ~0x06;
+	WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+	struct cirrus_device *cdev = crtc->dev->dev_private;
+	u32 addr;
+	u8 tmp;
+
+	addr = offset >> 2;
+	WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+	WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+	WREG8(CRT_INDEX, 0x1b);
+	tmp = RREG8(CRT_DATA);
+	tmp &= 0xf2;
+	tmp |= (addr >> 16) & 0x01;
+	tmp |= (addr >> 15) & 0x0c;
+	WREG_CRT(0x1b, tmp);
+	WREG8(CRT_INDEX, 0x1d);
+	tmp = RREG8(CRT_DATA);
+	tmp &= 0x7f;
+	tmp |= (addr >> 12) & 0x80;
+	WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct cirrus_device *cdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct cirrus_framebuffer *cirrus_fb;
+	struct cirrus_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		cirrus_fb = to_cirrus_framebuffer(fb);
+		obj = cirrus_fb->obj;
+		bo = gem_to_cirrus_bo(obj);
+		ret = cirrus_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		cirrus_bo_push_sysram(bo);
+		cirrus_bo_unreserve(bo);
+	}
+
+	cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+	obj = cirrus_fb->obj;
+	bo = gem_to_cirrus_bo(obj);
+
+	ret = cirrus_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		cirrus_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+	}
+	cirrus_bo_unreserve(bo);
+
+	cirrus_set_start_address(crtc, (u32)gpu_addr);
+	return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			     struct drm_framebuffer *old_fb)
+{
+	return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct cirrus_device *cdev = dev->dev_private;
+	int hsyncstart, hsyncend, htotal, hdispend;
+	int vtotal, vdispend;
+	int tmp;
+	int sr07 = 0, hdr = 0;
+
+	htotal = mode->htotal / 8;
+	hsyncend = mode->hsync_end / 8;
+	hsyncstart = mode->hsync_start / 8;
+	hdispend = mode->hdisplay / 8;
+
+	vtotal = mode->vtotal;
+	vdispend = mode->vdisplay;
+
+	vdispend -= 1;
+	vtotal -= 2;
+
+	htotal -= 5;
+	hdispend -= 1;
+	hsyncstart += 1;
+	hsyncend += 1;
+
+	WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+	WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+	WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+	WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+	WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+	WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+	WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+	tmp = 0x40;
+	if ((vdispend + 1) & 512)
+		tmp |= 0x20;
+	WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+	/*
+	 * Overflow bits for values that don't fit in the standard registers
+	 */
+	tmp = 16;
+	if (vtotal & 256)
+		tmp |= 1;
+	if (vdispend & 256)
+		tmp |= 2;
+	if ((vdispend + 1) & 256)
+		tmp |= 8;
+	if (vtotal & 512)
+		tmp |= 32;
+	if (vdispend & 512)
+		tmp |= 64;
+	WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+	tmp = 0;
+
+	/* More overflow bits */
+
+	if ((htotal + 5) & 64)
+		tmp |= 16;
+	if ((htotal + 5) & 128)
+		tmp |= 32;
+	if (vtotal & 256)
+		tmp |= 64;
+	if (vtotal & 512)
+		tmp |= 128;
+
+	WREG_CRT(CL_CRT1A, tmp);
+
+	/* Disable Hercules/CGA compatibility */
+	WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+	WREG8(SEQ_INDEX, 0x7);
+	sr07 = RREG8(SEQ_DATA);
+	sr07 &= 0xe0;
+	hdr = 0;
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		sr07 |= 0x11;
+		break;
+	case 16:
+		sr07 |= 0x17;
+		hdr = 0xc1;
+		break;
+	case 24:
+		sr07 |= 0x15;
+		hdr = 0xc5;
+		break;
+	case 32:
+		sr07 |= 0x19;
+		hdr = 0xc5;
+		break;
+	default:
+		return -1;
+	}
+
+	WREG_SEQ(0x7, sr07);
+
+	/* Program the pitch */
+	tmp = crtc->fb->pitches[0] / 8;
+	WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+	/* Enable extended blanking and pitch bits, and enable full memory */
+	tmp = 0x22;
+	tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+	tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+	WREG_CRT(0x1b, tmp);
+
+	/* Enable high-colour modes */
+	WREG_GFX(VGA_GFX_MODE, 0x40);
+
+	/* And set graphics mode */
+	WREG_GFX(VGA_GFX_MISC, 0x01);
+
+	WREG_HDR(hdr);
+	cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+	/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+	outb(0x20, 0x3c0);
+	return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+	int i;
+
+	if (size != CIRRUS_LUT_SIZE)
+		return;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		cirrus_crtc->lut_r[i] = red[i];
+		cirrus_crtc->lut_g[i] = green[i];
+		cirrus_crtc->lut_b[i] = blue[i];
+	}
+	cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+	.gamma_set = cirrus_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+	.dpms = cirrus_crtc_dpms,
+	.mode_fixup = cirrus_crtc_mode_fixup,
+	.mode_set = cirrus_crtc_mode_set,
+	.mode_set_base = cirrus_crtc_mode_set_base,
+	.prepare = cirrus_crtc_prepare,
+	.commit = cirrus_crtc_commit,
+	.load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+	struct cirrus_device *cdev = dev->dev_private;
+	struct cirrus_crtc *cirrus_crtc;
+	int i;
+
+	cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+			      (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+			      GFP_KERNEL);
+
+	if (cirrus_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+	cdev->mode_info.crtc = cirrus_crtc;
+
+	for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+		cirrus_crtc->lut_r[i] = i;
+		cirrus_crtc->lut_g[i] = i;
+		cirrus_crtc->lut_b[i] = i;
+	}
+
+	drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	cirrus_crtc->lut_r[regno] = red;
+	cirrus_crtc->lut_g[regno] = green;
+	cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+	*red = cirrus_crtc->lut_r[regno];
+	*green = cirrus_crtc->lut_g[regno];
+	*blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+	return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+	.dpms = cirrus_encoder_dpms,
+	.mode_fixup = cirrus_encoder_mode_fixup,
+	.mode_set = cirrus_encoder_mode_set,
+	.prepare = cirrus_encoder_prepare,
+	.commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+	.destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct cirrus_encoder *cirrus_encoder;
+
+	cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+	if (!cirrus_encoder)
+		return NULL;
+
+	encoder = &cirrus_encoder->base;
+	encoder->possible_crtcs = 0x1;
+
+	drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+	return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+	/* Just add a static list of modes */
+	drm_add_modes_noedid(connector, 640, 480);
+	drm_add_modes_noedid(connector, 800, 600);
+	drm_add_modes_noedid(connector, 1024, 768);
+	drm_add_modes_noedid(connector, 1280, 1024);
+
+	return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	/* Any mode we've added is valid */
+	return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+						  *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj =
+		    drm_mode_object_find(connector->dev, enc_id,
+					 DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+						   *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+	.get_modes = cirrus_vga_get_modes,
+	.mode_valid = cirrus_vga_mode_valid,
+	.best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cirrus_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct cirrus_connector *cirrus_connector;
+
+	cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+	if (!cirrus_connector)
+		return NULL;
+
+	connector = &cirrus_connector->base;
+
+	drm_connector_init(dev, connector,
+			   &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+	return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	drm_mode_config_init(cdev->dev);
+	cdev->mode_info.mode_config_initialized = true;
+
+	cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+	cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+	cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+	cdev->dev->mode_config.preferred_depth = 24;
+	/* don't prefer a shadow on virt GPU */
+	cdev->dev->mode_config.prefer_shadow = 0;
+
+	cirrus_crtc_init(cdev->dev);
+
+	encoder = cirrus_encoder_init(cdev->dev);
+	if (!encoder) {
+		DRM_ERROR("cirrus_encoder_init failed\n");
+		return -1;
+	}
+
+	connector = cirrus_vga_init(cdev->dev);
+	if (!connector) {
+		DRM_ERROR("cirrus_vga_init failed\n");
+		return -1;
+	}
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ret = cirrus_fbdev_init(cdev);
+	if (ret) {
+		DRM_ERROR("cirrus_fbdev_init failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+	cirrus_fbdev_fini(cdev);
+
+	if (cdev->mode_info.mode_config_initialized) {
+		drm_mode_config_cleanup(cdev->dev);
+		cdev->mode_info.mode_config_initialized = false;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/cirrus/cirrus_ttm.c b/linux-imx/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 0000000..c18faff
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &cirrus->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &cirrus_ttm_mem_global_init;
+	global_ref->release = &cirrus_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	cirrus->ttm.bo_global_ref.mem_glob =
+		cirrus->ttm.mem_global_ref.object;
+	global_ref = &cirrus->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+	if (cirrus->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+	cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct cirrus_bo *bo;
+
+	bo = container_of(tbo, struct cirrus_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &cirrus_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+	if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+		return;
+
+	cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+	*pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+	.destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &cirrus_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+	.ttm_tt_create = cirrus_ttm_tt_create,
+	.ttm_tt_populate = cirrus_ttm_tt_populate,
+	.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+	.init_mem_type = cirrus_bo_init_mem_type,
+	.evict_flags = cirrus_bo_evict_flags,
+	.move = cirrus_bo_move,
+	.verify_access = cirrus_bo_verify_access,
+	.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+	.io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+	int ret;
+	struct drm_device *dev = cirrus->dev;
+	struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+	ret = cirrus_ttm_global_init(cirrus);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+				 cirrus->ttm.bo_global_ref.ref.object,
+				 &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+			     cirrus->mc.vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	cirrus->mm_inited = true;
+	return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+	struct drm_device *dev = cirrus->dev;
+
+	if (!cirrus->mm_inited)
+		return;
+
+	ttm_bo_device_release(&cirrus->ttm.bdev);
+
+	cirrus_ttm_global_release(cirrus);
+
+	if (cirrus->fb_mtrr >= 0) {
+		drm_mtrr_del(cirrus->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		cirrus->fb_mtrr = -1;
+	}
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS && ret != -EBUSY)
+			DRM_ERROR("reserve failed %p\n", bo);
+		return ret;
+	}
+	return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+	struct cirrus_device *cirrus = dev->dev_private;
+	struct cirrus_bo *cirrusbo;
+	size_t acc_size;
+	int ret;
+
+	cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+	if (!cirrusbo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+	if (ret) {
+		kfree(cirrusbo);
+		return ret;
+	}
+
+	cirrusbo->gem.driver_private = NULL;
+	cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+	cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+	cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+				       sizeof(struct cirrus_bo));
+
+	ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+			  ttm_bo_type_device, &cirrusbo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
+			  NULL, cirrus_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pcirrusbo = cirrusbo;
+	return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = cirrus_bo_gpu_offset(bo);
+	}
+
+	cirrus_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = cirrus_bo_gpu_offset(bo);
+	return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct cirrus_device *cirrus;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	cirrus = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/drm_agpsupport.c b/linux-imx/drivers/gpu/drm/drm_agpsupport.c
new file mode 100644
index 0000000..3d8fed1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_agpsupport.c
@@ -0,0 +1,469 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#if __OS_HAS_AGP
+
+#include <asm/agp.h>
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+	DRM_AGP_KERN *kern;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+
+	kern = &dev->agp->agp_info;
+	info->agp_version_major = kern->version.major;
+	info->agp_version_minor = kern->version.minor;
+	info->mode = kern->mode;
+	info->aperture_base = kern->aper_base;
+	info->aperture_size = kern->aper_size * 1024 * 1024;
+	info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+	info->memory_used = kern->current_memory << PAGE_SHIFT;
+	info->id_vendor = kern->device->vendor;
+	info->id_device = kern->device->device;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_info *info = data;
+	int err;
+
+	err = drm_agp_info(dev, info);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+	if (!dev->agp)
+		return -ENODEV;
+	if (dev->agp->acquired)
+		return -EBUSY;
+	if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+		return -ENODEV;
+	dev->agp->acquired = 1;
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
+{
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	agp_backend_release(dev->agp->bridge);
+	dev->agp->acquired = 0;
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+
+	dev->agp->mode = mode.mode;
+	agp_enable(dev->agp->bridge, mode.mode);
+	dev->agp->enabled = 1;
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_agp_mode *mode = data;
+
+	return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+	struct drm_agp_mem *entry;
+	DRM_AGP_MEM *memory;
+	unsigned long pages;
+	u32 type;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = kmalloc(sizeof(*entry), GFP_KERNEL)))
+		return -ENOMEM;
+
+	memset(entry, 0, sizeof(*entry));
+
+	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+	type = (u32) request->type;
+	if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
+		kfree(entry);
+		return -ENOMEM;
+	}
+
+	entry->handle = (unsigned long)memory->key + 1;
+	entry->memory = memory;
+	entry->bound = 0;
+	entry->pages = pages;
+	list_add(&entry->head, &dev->agp->memory);
+
+	request->handle = entry->handle;
+	request->physical = memory->physical;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_agp_buffer *request = data;
+
+	return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+					   unsigned long handle)
+{
+	struct drm_agp_mem *entry;
+
+	list_for_each_entry(entry, &dev->agp->memory, head) {
+		if (entry->handle == handle)
+			return entry;
+	}
+	return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+	struct drm_agp_mem *entry;
+	int ret;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (!entry->bound)
+		return -EINVAL;
+	ret = drm_unbind_agp(entry->memory);
+	if (ret == 0)
+		entry->bound = 0;
+	return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_agp_binding *request = data;
+
+	return drm_agp_unbind(dev, request);
+}
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+	struct drm_agp_mem *entry;
+	int retcode;
+	int page;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		return -EINVAL;
+	page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+	if ((retcode = drm_bind_agp(entry->memory, page)))
+		return retcode;
+	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+	DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+		  dev->agp->base, entry->bound);
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_binding *request = data;
+
+	return drm_agp_bind(dev, request);
+}
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+	struct drm_agp_mem *entry;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		drm_unbind_agp(entry->memory);
+
+	list_del(&entry->head);
+
+	drm_free_agp(entry->memory, entry->pages);
+	kfree(entry);
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_buffer *request = data;
+
+	return drm_agp_free(dev, request);
+}
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+	struct drm_agp_head *head = NULL;
+
+	if (!(head = kmalloc(sizeof(*head), GFP_KERNEL)))
+		return NULL;
+	memset((void *)head, 0, sizeof(*head));
+	head->bridge = agp_find_bridge(dev->pdev);
+	if (!head->bridge) {
+		if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+			kfree(head);
+			return NULL;
+		}
+		agp_copy_info(head->bridge, &head->agp_info);
+		agp_backend_release(head->bridge);
+	} else {
+		agp_copy_info(head->bridge, &head->agp_info);
+	}
+	if (head->agp_info.chipset == NOT_SUPPORTED) {
+		kfree(head);
+		return NULL;
+	}
+	INIT_LIST_HEAD(&head->memory);
+	head->cant_use_aperture = head->agp_info.cant_use_aperture;
+	head->page_mask = head->agp_info.page_mask;
+	head->base = head->agp_info.aper_base;
+	return head;
+}
+
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+		   struct page **pages,
+		   unsigned long num_pages,
+		   uint32_t gtt_offset,
+		   u32 type)
+{
+	DRM_AGP_MEM *mem;
+	int ret, i;
+
+	DRM_DEBUG("\n");
+
+	mem = agp_allocate_memory(dev->agp->bridge, num_pages,
+				      type);
+	if (mem == NULL) {
+		DRM_ERROR("Failed to allocate memory for %ld pages\n",
+			  num_pages);
+		return NULL;
+	}
+
+	for (i = 0; i < num_pages; i++)
+		mem->pages[i] = pages[i];
+	mem->page_count = num_pages;
+
+	mem->is_flushed = true;
+	ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+	if (ret != 0) {
+		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+		agp_free_memory(mem);
+		return NULL;
+	}
+
+	return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+#endif /* __OS_HAS_AGP */
diff --git a/linux-imx/drivers/gpu/drm/drm_auth.c b/linux-imx/drivers/gpu/drm/drm_auth.c
new file mode 100644
index 0000000..3cedae1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_auth.c
@@ -0,0 +1,194 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
+{
+	struct drm_file *retval = NULL;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+	struct drm_device *dev = master->minor->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+		pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+		retval = pt->priv;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+			 drm_magic_t magic)
+{
+	struct drm_magic_entry *entry;
+	struct drm_device *dev = master->minor->dev;
+	DRM_DEBUG("%d\n", magic);
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->priv = priv;
+	entry->hash_item.key = (unsigned long)magic;
+	mutex_lock(&dev->struct_mutex);
+	drm_ht_insert_item(&master->magiclist, &entry->hash_item);
+	list_add_tail(&entry->head, &master->magicfree);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+{
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+	struct drm_device *dev = master->minor->dev;
+
+	DRM_DEBUG("%d\n", magic);
+
+	mutex_lock(&dev->struct_mutex);
+	if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+	pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+	drm_ht_remove_item(&master->magiclist, hash);
+	list_del(&pt->head);
+	mutex_unlock(&dev->struct_mutex);
+
+	kfree(pt);
+
+	return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	static drm_magic_t sequence = 0;
+	static DEFINE_SPINLOCK(lock);
+	struct drm_auth *auth = data;
+
+	/* Find unique magic */
+	if (file_priv->magic) {
+		auth->magic = file_priv->magic;
+	} else {
+		do {
+			spin_lock(&lock);
+			if (!sequence)
+				++sequence;	/* reserve 0 */
+			auth->magic = sequence++;
+			spin_unlock(&lock);
+		} while (drm_find_file(file_priv->master, auth->magic));
+		file_priv->magic = auth->magic;
+		drm_add_magic(file_priv->master, file_priv, auth->magic);
+	}
+
+	DRM_DEBUG("%u\n", auth->magic);
+
+	return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_auth *auth = data;
+	struct drm_file *file;
+
+	DRM_DEBUG("%u\n", auth->magic);
+	if ((file = drm_find_file(file_priv->master, auth->magic))) {
+		file->authenticated = 1;
+		drm_remove_magic(file_priv->master, auth->magic);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/linux-imx/drivers/gpu/drm/drm_buffer.c b/linux-imx/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 0000000..39a7183
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,185 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include <linux/export.h>
+#include <drm/drm_buffer.h>
+
+/**
+ * Allocate the drm buffer object.
+ *
+ *   buf: Pointer to a pointer where the object is stored.
+ *   size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+	int nr_pages = size / PAGE_SIZE + 1;
+	int idx;
+
+	/* Allocating pointer table to end of structure makes drm_buffer
+	 * variable sized */
+	*buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+			GFP_KERNEL);
+
+	if (*buf == NULL) {
+		DRM_ERROR("Failed to allocate drm buffer object to hold"
+				" %d bytes in %d pages.\n",
+				size, nr_pages);
+		return -ENOMEM;
+	}
+
+	(*buf)->size = size;
+
+	for (idx = 0; idx < nr_pages; ++idx) {
+
+		(*buf)->data[idx] =
+			kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+				GFP_KERNEL);
+
+
+		if ((*buf)->data[idx] == NULL) {
+			DRM_ERROR("Failed to allocate %dth page for drm"
+					" buffer with %d bytes and %d pages.\n",
+					idx + 1, size, nr_pages);
+			goto error_out;
+		}
+
+	}
+
+	return 0;
+
+error_out:
+
+	/* Only last element can be null pointer so check for it first. */
+	if ((*buf)->data[idx])
+		kfree((*buf)->data[idx]);
+
+	for (--idx; idx >= 0; --idx)
+		kfree((*buf)->data[idx]);
+
+	kfree(*buf);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ *   user_data: A pointer the data that is copied to the buffer.
+ *   size: The Number of bytes to copy.
+ */
+int drm_buffer_copy_from_user(struct drm_buffer *buf,
+			      void __user *user_data, int size)
+{
+	int nr_pages = size / PAGE_SIZE + 1;
+	int idx;
+
+	if (size > buf->size) {
+		DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+				" %d bytes space\n",
+				size, buf->size);
+		return -EFAULT;
+	}
+
+	for (idx = 0; idx < nr_pages; ++idx) {
+
+		if (DRM_COPY_FROM_USER(buf->data[idx],
+			user_data + idx * PAGE_SIZE,
+			min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+			DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+					" (%p) %dth page.\n",
+					user_data, buf, idx);
+			return -EFAULT;
+
+		}
+	}
+	buf->iterator = 0;
+	return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+	if (buf != NULL) {
+
+		int nr_pages = buf->size / PAGE_SIZE + 1;
+		int idx;
+		for (idx = 0; idx < nr_pages; ++idx)
+			kfree(buf->data[idx]);
+
+		kfree(buf);
+	}
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ *   objsize: The size of the objet in bytes.
+ *   stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+		int objsize, void *stack_obj)
+{
+	int idx = drm_buffer_index(buf);
+	int page = drm_buffer_page(buf);
+	void *obj = NULL;
+
+	if (idx + objsize <= PAGE_SIZE) {
+		obj = &buf->data[page][idx];
+	} else {
+		/* The object is split which forces copy to temporary object.*/
+		int beginsz = PAGE_SIZE - idx;
+		memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+		memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+				objsize - beginsz);
+
+		obj = stack_obj;
+	}
+
+	drm_buffer_advance(buf, objsize);
+	return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/linux-imx/drivers/gpu/drm/drm_bufs.c b/linux-imx/drivers/gpu/drm/drm_bufs.c
new file mode 100644
index 0000000..0128147
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_bufs.c
@@ -0,0 +1,1614 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/export.h>
+#include <asm/shmparam.h>
+#include <drm/drmP.h>
+
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+						  struct drm_local_map *map)
+{
+	struct drm_map_list *entry;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		/*
+		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
+		 * while PCI resources may live above that, we only compare the
+		 * lower 32 bits of the map offset for maps of type
+		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
+		 * It is assumed that if a driver have more than one resource
+		 * of each type, the lower 32 bits are different.
+		 */
+		if (!entry->map ||
+		    map->type != entry->map->type ||
+		    entry->master != dev->primary->master)
+			continue;
+		switch (map->type) {
+		case _DRM_SHM:
+			if (map->flags != _DRM_CONTAINS_LOCK)
+				break;
+			return entry;
+		case _DRM_REGISTERS:
+		case _DRM_FRAME_BUFFER:
+			if ((entry->map->offset & 0xffffffff) ==
+			    (map->offset & 0xffffffff))
+				return entry;
+		default: /* Make gcc happy */
+			;
+		}
+		if (entry->map->offset == map->offset)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+			  unsigned long user_token, int hashed_handle, int shm)
+{
+	int use_hashed_handle, shift;
+	unsigned long add;
+
+#if (BITS_PER_LONG == 64)
+	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+	use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+	if (!use_hashed_handle) {
+		int ret;
+		hash->key = user_token >> PAGE_SHIFT;
+		ret = drm_ht_insert_item(&dev->map_hash, hash);
+		if (ret != -EINVAL)
+			return ret;
+	}
+
+	shift = 0;
+	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
+	if (shm && (SHMLBA > PAGE_SIZE)) {
+		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
+
+		/* For shared memory, we have to preserve the SHMLBA
+		 * bits of the eventual vma->vm_pgoff value during
+		 * mmap().  Otherwise we run into cache aliasing problems
+		 * on some platforms.  On these platforms, the pgoff of
+		 * a mmap() request is used to pick a suitable virtual
+		 * address for the mmap() region such that it will not
+		 * cause cache aliasing problems.
+		 *
+		 * Therefore, make sure the SHMLBA relevant bits of the
+		 * hash value we use are equal to those in the original
+		 * kernel virtual address.
+		 */
+		shift = bits;
+		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
+	}
+
+	return drm_ht_just_insert_please(&dev->map_hash, hash,
+					 user_token, 32 - PAGE_SHIFT - 3,
+					 shift, add);
+}
+
+/**
+ * Core function to create a range of memory available for mapping by a
+ * non-root process.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+			   unsigned int size, enum drm_map_type type,
+			   enum drm_map_flags flags,
+			   struct drm_map_list ** maplist)
+{
+	struct drm_local_map *map;
+	struct drm_map_list *list;
+	drm_dma_handle_t *dmah;
+	unsigned long user_token;
+	int ret;
+
+	map = kmalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return -ENOMEM;
+
+	map->offset = offset;
+	map->size = size;
+	map->flags = flags;
+	map->type = type;
+
+	/* Only allow shared memory to be removable since we only keep enough
+	 * book keeping information about shared memory to allow for removal
+	 * when processes fork.
+	 */
+	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+		kfree(map);
+		return -EINVAL;
+	}
+	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
+		  (unsigned long long)map->offset, map->size, map->type);
+
+	/* page-align _DRM_SHM maps. They are allocated here so there is no security
+	 * hole created by that and it works around various broken drivers that use
+	 * a non-aligned quantity to map the SAREA. --BenH
+	 */
+	if (map->type == _DRM_SHM)
+		map->size = PAGE_ALIGN(map->size);
+
+	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+		kfree(map);
+		return -EINVAL;
+	}
+	map->mtrr = -1;
+	map->handle = NULL;
+
+	switch (map->type) {
+	case _DRM_REGISTERS:
+	case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
+		if (map->offset + (map->size-1) < map->offset ||
+		    map->offset < virt_to_phys(high_memory)) {
+			kfree(map);
+			return -EINVAL;
+		}
+#endif
+		/* Some drivers preinitialize some maps, without the X Server
+		 * needing to be aware of it.  Therefore, we just return success
+		 * when the server tries to create a duplicate map.
+		 */
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if (list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size,
+					  list->map->size);
+				list->map->size = map->size;
+			}
+
+			kfree(map);
+			*maplist = list;
+			return 0;
+		}
+
+		if (drm_core_has_MTRR(dev)) {
+			if (map->type == _DRM_FRAME_BUFFER ||
+			    (map->flags & _DRM_WRITE_COMBINING)) {
+				map->mtrr = mtrr_add(map->offset, map->size,
+						     MTRR_TYPE_WRCOMB, 1);
+			}
+		}
+		if (map->type == _DRM_REGISTERS) {
+			map->handle = ioremap(map->offset, map->size);
+			if (!map->handle) {
+				kfree(map);
+				return -ENOMEM;
+			}
+		}
+
+		break;
+	case _DRM_SHM:
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if(list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size, list->map->size);
+				list->map->size = map->size;
+			}
+
+			kfree(map);
+			*maplist = list;
+			return 0;
+		}
+		map->handle = vmalloc_user(map->size);
+		DRM_DEBUG("%lu %d %p\n",
+			  map->size, drm_order(map->size), map->handle);
+		if (!map->handle) {
+			kfree(map);
+			return -ENOMEM;
+		}
+		map->offset = (unsigned long)map->handle;
+		if (map->flags & _DRM_CONTAINS_LOCK) {
+			/* Prevent a 2nd X Server from creating a 2nd lock */
+			if (dev->primary->master->lock.hw_lock != NULL) {
+				vfree(map->handle);
+				kfree(map);
+				return -EBUSY;
+			}
+			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
+		}
+		break;
+	case _DRM_AGP: {
+		struct drm_agp_mem *entry;
+		int valid = 0;
+
+		if (!drm_core_has_AGP(dev)) {
+			kfree(map);
+			return -EINVAL;
+		}
+#ifdef __alpha__
+		map->offset += dev->hose->mem_space->start;
+#endif
+		/* In some cases (i810 driver), user space may have already
+		 * added the AGP base itself, because dev->agp->base previously
+		 * only got set during AGP enable.  So, only add the base
+		 * address if the map's offset isn't already within the
+		 * aperture.
+		 */
+		if (map->offset < dev->agp->base ||
+		    map->offset > dev->agp->base +
+		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+			map->offset += dev->agp->base;
+		}
+		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
+
+		/* This assumes the DRM is in total control of AGP space.
+		 * It's not always the case as AGP can be in the control
+		 * of user space (i.e. i810 driver). So this loop will get
+		 * skipped and we double check that dev->agp->memory is
+		 * actually set as well as being invalid before EPERM'ing
+		 */
+		list_for_each_entry(entry, &dev->agp->memory, head) {
+			if ((map->offset >= entry->bound) &&
+			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+				valid = 1;
+				break;
+			}
+		}
+		if (!list_empty(&dev->agp->memory) && !valid) {
+			kfree(map);
+			return -EPERM;
+		}
+		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
+			  (unsigned long long)map->offset, map->size);
+
+		break;
+	}
+	case _DRM_GEM:
+		DRM_ERROR("tried to addmap GEM object\n");
+		break;
+	case _DRM_SCATTER_GATHER:
+		if (!dev->sg) {
+			kfree(map);
+			return -EINVAL;
+		}
+		map->offset += (unsigned long)dev->sg->virtual;
+		break;
+	case _DRM_CONSISTENT:
+		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+		 * As we're limiting the address to 2^32-1 (or less),
+		 * casting it down to 32 bits is no problem, but we
+		 * need to point to a 64bit variable first. */
+		dmah = drm_pci_alloc(dev, map->size, map->size);
+		if (!dmah) {
+			kfree(map);
+			return -ENOMEM;
+		}
+		map->handle = dmah->vaddr;
+		map->offset = (unsigned long)dmah->busaddr;
+		kfree(dmah);
+		break;
+	default:
+		kfree(map);
+		return -EINVAL;
+	}
+
+	list = kzalloc(sizeof(*list), GFP_KERNEL);
+	if (!list) {
+		if (map->type == _DRM_REGISTERS)
+			iounmap(map->handle);
+		kfree(map);
+		return -EINVAL;
+	}
+	list->map = map;
+
+	mutex_lock(&dev->struct_mutex);
+	list_add(&list->head, &dev->maplist);
+
+	/* Assign a 32-bit handle */
+	/* We do it here so that dev->struct_mutex protects the increment */
+	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+		map->offset;
+	ret = drm_map_handle(dev, &list->hash, user_token, 0,
+			     (map->type == _DRM_SHM));
+	if (ret) {
+		if (map->type == _DRM_REGISTERS)
+			iounmap(map->handle);
+		kfree(map);
+		kfree(list);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	list->user_token = list->hash.key << PAGE_SHIFT;
+	mutex_unlock(&dev->struct_mutex);
+
+	if (!(map->flags & _DRM_DRIVER))
+		list->master = dev->primary->master;
+	*maplist = list;
+	return 0;
+	}
+
+int drm_addmap(struct drm_device * dev, resource_size_t offset,
+	       unsigned int size, enum drm_map_type type,
+	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+{
+	struct drm_map_list *list;
+	int rc;
+
+	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+	if (!rc)
+		*map_ptr = list->map;
+	return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a
+ * non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ */
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_map *map = data;
+	struct drm_map_list *maplist;
+	int err;
+
+	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
+		return -EPERM;
+
+	err = drm_addmap_core(dev, map->offset, map->size, map->type,
+			      map->flags, &maplist);
+
+	if (err)
+		return err;
+
+	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+	map->handle = (void *)(unsigned long)maplist->user_token;
+	return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
+{
+	struct drm_map_list *r_list = NULL, *list_t;
+	drm_dma_handle_t dmah;
+	int found = 0;
+	struct drm_master *master;
+
+	/* Find the list entry for the map and remove it */
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+		if (r_list->map == map) {
+			master = r_list->master;
+			list_del(&r_list->head);
+			drm_ht_remove_key(&dev->map_hash,
+					  r_list->user_token >> PAGE_SHIFT);
+			kfree(r_list);
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	switch (map->type) {
+	case _DRM_REGISTERS:
+		iounmap(map->handle);
+		/* FALLTHROUGH */
+	case _DRM_FRAME_BUFFER:
+		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+			int retcode;
+			retcode = mtrr_del(map->mtrr, map->offset, map->size);
+			DRM_DEBUG("mtrr_del=%d\n", retcode);
+		}
+		break;
+	case _DRM_SHM:
+		vfree(map->handle);
+		if (master) {
+			if (dev->sigdata.lock == master->lock.hw_lock)
+				dev->sigdata.lock = NULL;
+			master->lock.hw_lock = NULL;   /* SHM removed */
+			master->lock.file_priv = NULL;
+			wake_up_interruptible_all(&master->lock.lock_queue);
+		}
+		break;
+	case _DRM_AGP:
+	case _DRM_SCATTER_GATHER:
+		break;
+	case _DRM_CONSISTENT:
+		dmah.vaddr = map->handle;
+		dmah.busaddr = map->offset;
+		dmah.size = map->size;
+		__drm_pci_free(dev, &dmah);
+		break;
+	case _DRM_GEM:
+		DRM_ERROR("tried to rmmap GEM object\n");
+		break;
+	}
+	kfree(map);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_rmmap_locked(dev, map);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_map *request = data;
+	struct drm_local_map *map = NULL;
+	struct drm_map_list *r_list;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->user_token == (unsigned long)request->handle &&
+		    r_list->map->flags & _DRM_REMOVABLE) {
+			map = r_list->map;
+			break;
+		}
+	}
+
+	/* List has wrapped around to the head pointer, or its empty we didn't
+	 * find anything.
+	 */
+	if (list_empty(&dev->maplist) || !map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	/* Register and framebuffer maps are permanent */
+	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	ret = drm_rmmap_locked(dev, map);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+				  struct drm_buf_entry * entry)
+{
+	int i;
+
+	if (entry->seg_count) {
+		for (i = 0; i < entry->seg_count; i++) {
+			if (entry->seglist[i]) {
+				drm_pci_free(dev, entry->seglist[i]);
+			}
+		}
+		kfree(entry->seglist);
+
+		entry->seg_count = 0;
+	}
+
+	if (entry->buf_count) {
+		for (i = 0; i < entry->buf_count; i++) {
+			kfree(entry->buflist[i].dev_private);
+		}
+		kfree(entry->buflist);
+
+		entry->buf_count = 0;
+	}
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_agp_mem *agp_entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i, valid;
+	struct drm_buf **temp_buflist;
+
+	if (!dma)
+		return -EINVAL;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = dev->agp->base + request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
+	/* Make sure buffers are located in AGP memory that we own */
+	valid = 0;
+	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+		if ((agp_offset >= agp_entry->bound) &&
+		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+			valid = 1;
+			break;
+		}
+	}
+	if (!list_empty(&dev->agp->memory) && !valid) {
+		DRM_DEBUG("zone invalid\n");
+		return -EINVAL;
+	}
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = krealloc(dma->buflist,
+				(dma->buf_count + entry->buf_count) *
+				sizeof(*dma->buflist), GFP_KERNEL);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_AGP;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif				/* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int count;
+	int order;
+	int size;
+	int total;
+	int page_order;
+	struct drm_buf_entry *entry;
+	drm_dma_handle_t *dmah;
+	struct drm_buf *buf;
+	int alignment;
+	unsigned long offset;
+	int i;
+	int byte_count;
+	int page_count;
+	unsigned long *temp_pagelist;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
+		  request->count, request->size, size, order);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
+	entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+	if (!entry->seglist) {
+		kfree(entry->buflist);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
+	/* Keep the original pagelist until we know all the allocations
+	 * have succeeded
+	 */
+	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
+			       sizeof(*dma->pagelist), GFP_KERNEL);
+	if (!temp_pagelist) {
+		kfree(entry->buflist);
+		kfree(entry->seglist);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memcpy(temp_pagelist,
+	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+	DRM_DEBUG("pagelist: %d entries\n",
+		  dma->page_count + (count << page_order));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+	byte_count = 0;
+	page_count = 0;
+
+	while (entry->buf_count < count) {
+
+		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
+
+		if (!dmah) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			entry->seg_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			kfree(temp_pagelist);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+		entry->seglist[entry->seg_count++] = dmah;
+		for (i = 0; i < (1 << page_order); i++) {
+			DRM_DEBUG("page %d @ 0x%08lx\n",
+				  dma->page_count + page_count,
+				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+			temp_pagelist[dma->page_count + page_count++]
+				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+		}
+		for (offset = 0;
+		     offset + size <= total && entry->buf_count < count;
+		     offset += alignment, ++entry->buf_count) {
+			buf = &entry->buflist[entry->buf_count];
+			buf->idx = dma->buf_count + entry->buf_count;
+			buf->total = alignment;
+			buf->order = order;
+			buf->used = 0;
+			buf->offset = (dma->byte_count + byte_count + offset);
+			buf->address = (void *)(dmah->vaddr + offset);
+			buf->bus_address = dmah->busaddr + offset;
+			buf->next = NULL;
+			buf->waiting = 0;
+			buf->pending = 0;
+			buf->file_priv = NULL;
+
+			buf->dev_priv_size = dev->driver->dev_priv_size;
+			buf->dev_private = kzalloc(buf->dev_priv_size,
+						GFP_KERNEL);
+			if (!buf->dev_private) {
+				/* Set count correctly so we free the proper amount. */
+				entry->buf_count = count;
+				entry->seg_count = count;
+				drm_cleanup_buf_error(dev, entry);
+				kfree(temp_pagelist);
+				mutex_unlock(&dev->struct_mutex);
+				atomic_dec(&dev->buf_alloc);
+				return -ENOMEM;
+			}
+
+			DRM_DEBUG("buffer %d @ %p\n",
+				  entry->buf_count, buf->address);
+		}
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	temp_buflist = krealloc(dma->buflist,
+				(dma->buf_count + entry->buf_count) *
+				sizeof(*dma->buflist), GFP_KERNEL);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		kfree(temp_pagelist);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	/* No allocations failed, so now we can replace the original pagelist
+	 * with the new one.
+	 */
+	if (dma->page_count) {
+		kfree(dma->pagelist);
+	}
+	dma->pagelist = temp_pagelist;
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += entry->seg_count << page_order;
+	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	if (request->flags & _DRM_PCI_BUFFER_RO)
+		dma->flags = _DRM_DMA_USE_PCI_RO;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
+				GFP_KERNEL);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset
+					+ (unsigned long)dev->sg->virtual);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = krealloc(dma->buflist,
+				(dma->buf_count + entry->buf_count) *
+				sizeof(*dma->buflist), GFP_KERNEL);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_SG;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = kzalloc(count * sizeof(*entry->buflist),
+				GFP_KERNEL);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = krealloc(dma->buflist,
+				(dma->buf_count + entry->buf_count) *
+				sizeof(*dma->buflist), GFP_KERNEL);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_FB;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_buf_desc *request = data;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+#if __OS_HAS_AGP
+	if (request->flags & _DRM_AGP_BUFFER)
+		ret = drm_addbufs_agp(dev, request);
+	else
+#endif
+	if (request->flags & _DRM_SG_BUFFER)
+		ret = drm_addbufs_sg(dev, request);
+	else if (request->flags & _DRM_FB_BUFFER)
+		ret = drm_addbufs_fb(dev, request);
+	else
+		ret = drm_addbufs_pci(dev, request);
+
+	return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_info *request = data;
+	int i;
+	int count;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	++dev->buf_use;		/* Can't allocate more after this call */
+	spin_unlock(&dev->count_lock);
+
+	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+		if (dma->bufs[i].buf_count)
+			++count;
+	}
+
+	DRM_DEBUG("count = %d\n", count);
+
+	if (request->count >= count) {
+		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+			if (dma->bufs[i].buf_count) {
+				struct drm_buf_desc __user *to =
+				    &request->list[count];
+				struct drm_buf_entry *from = &dma->bufs[i];
+				struct drm_freelist *list = &dma->bufs[i].freelist;
+				if (copy_to_user(&to->count,
+						 &from->buf_count,
+						 sizeof(from->buf_count)) ||
+				    copy_to_user(&to->size,
+						 &from->buf_size,
+						 sizeof(from->buf_size)) ||
+				    copy_to_user(&to->low_mark,
+						 &list->low_mark,
+						 sizeof(list->low_mark)) ||
+				    copy_to_user(&to->high_mark,
+						 &list->high_mark,
+						 sizeof(list->high_mark)))
+					return -EFAULT;
+
+				DRM_DEBUG("%d %d %d %d %d\n",
+					  i,
+					  dma->bufs[i].buf_count,
+					  dma->bufs[i].buf_size,
+					  dma->bufs[i].freelist.low_mark,
+					  dma->bufs[i].freelist.high_mark);
+				++count;
+			}
+		}
+	}
+	request->count = count;
+
+	return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_desc *request = data;
+	int order;
+	struct drm_buf_entry *entry;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	DRM_DEBUG("%d, %d, %d\n",
+		  request->size, request->low_mark, request->high_mark);
+	order = drm_order(request->size);
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	entry = &dma->bufs[order];
+
+	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+		return -EINVAL;
+	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+		return -EINVAL;
+
+	entry->freelist.low_mark = request->low_mark;
+	entry->freelist.high_mark = request->high_mark;
+
+	return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_free *request = data;
+	int i;
+	int idx;
+	struct drm_buf *buf;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	DRM_DEBUG("%d\n", request->count);
+	for (i = 0; i < request->count; i++) {
+		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+			return -EFAULT;
+		if (idx < 0 || idx >= dma->buf_count) {
+			DRM_ERROR("Index %d (of %d max)\n",
+				  idx, dma->buf_count - 1);
+			return -EINVAL;
+		}
+		buf = dma->buflist[idx];
+		if (buf->file_priv != file_priv) {
+			DRM_ERROR("Process %d freeing buffer not owned\n",
+				  task_pid_nr(current));
+			return -EINVAL;
+		}
+		drm_free_buffer(dev, buf);
+	}
+
+	return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+	        struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int retcode = 0;
+	const int zero = 0;
+	unsigned long virtual;
+	unsigned long address;
+	struct drm_buf_map *request = data;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	dev->buf_use++;		/* Can't allocate more after this call */
+	spin_unlock(&dev->count_lock);
+
+	if (request->count >= dma->buf_count) {
+		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+		    || (drm_core_check_feature(dev, DRIVER_SG)
+			&& (dma->flags & _DRM_DMA_USE_SG))
+		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+			&& (dma->flags & _DRM_DMA_USE_FB))) {
+			struct drm_local_map *map = dev->agp_buffer_map;
+			unsigned long token = dev->agp_buffer_token;
+
+			if (!map) {
+				retcode = -EINVAL;
+				goto done;
+			}
+			virtual = vm_mmap(file_priv->filp, 0, map->size,
+					  PROT_READ | PROT_WRITE,
+					  MAP_SHARED,
+					  token);
+		} else {
+			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
+					  PROT_READ | PROT_WRITE,
+					  MAP_SHARED, 0);
+		}
+		if (virtual > -1024UL) {
+			/* Real error */
+			retcode = (signed long)virtual;
+			goto done;
+		}
+		request->virtual = (void __user *)virtual;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			if (copy_to_user(&request->list[i].idx,
+					 &dma->buflist[i]->idx,
+					 sizeof(request->list[0].idx))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].total,
+					 &dma->buflist[i]->total,
+					 sizeof(request->list[0].total))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].used,
+					 &zero, sizeof(zero))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			address = virtual + dma->buflist[i]->offset;	/* *** */
+			if (copy_to_user(&request->list[i].address,
+					 &address, sizeof(address))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+		}
+	}
+      done:
+	request->count = dma->buf_count;
+	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+	return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+	int order;
+	unsigned long tmp;
+
+	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+	if (size & (size - 1))
+		++order;
+
+	return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/linux-imx/drivers/gpu/drm/drm_cache.c b/linux-imx/drivers/gpu/drm/drm_cache.c
new file mode 100644
index 0000000..bb8f580
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_cache.c
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+	uint8_t *page_virtual;
+	unsigned int i;
+	const int size = boot_cpu_data.x86_clflush_size;
+
+	if (unlikely(page == NULL))
+		return;
+
+	page_virtual = kmap_atomic(page);
+	for (i = 0; i < PAGE_SIZE; i += size)
+		clflush(page_virtual + i);
+	kunmap_atomic(page_virtual);
+}
+
+static void drm_cache_flush_clflush(struct page *pages[],
+				    unsigned long num_pages)
+{
+	unsigned long i;
+
+	mb();
+	for (i = 0; i < num_pages; i++)
+		drm_clflush_page(*pages++);
+	mb();
+}
+
+static void
+drm_clflush_ipi_handler(void *null)
+{
+	wbinvd();
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		drm_cache_flush_clflush(pages, num_pages);
+		return;
+	}
+
+	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+
+#elif defined(__powerpc__)
+	unsigned long i;
+	for (i = 0; i < num_pages; i++) {
+		struct page *page = pages[i];
+		void *page_virtual;
+
+		if (unlikely(page == NULL))
+			continue;
+
+		page_virtual = kmap_atomic(page);
+		flush_dcache_range((unsigned long)page_virtual,
+				   (unsigned long)page_virtual + PAGE_SIZE);
+		kunmap_atomic(page_virtual);
+	}
+#else
+	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+	WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);
+
+void
+drm_clflush_sg(struct sg_table *st)
+{
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		struct sg_page_iter sg_iter;
+
+		mb();
+		for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+			drm_clflush_page(sg_page_iter_page(&sg_iter));
+		mb();
+
+		return;
+	}
+
+	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+	WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_sg);
+
+void
+drm_clflush_virt_range(char *addr, unsigned long length)
+{
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		char *end = addr + length;
+		mb();
+		for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
+			clflush(addr);
+		clflush(end - 1);
+		mb();
+		return;
+	}
+
+	if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
+		printk(KERN_ERR "Timed out waiting for cache flush.\n");
+#else
+	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+	WARN_ON_ONCE(1);
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/linux-imx/drivers/gpu/drm/drm_context.c b/linux-imx/drivers/gpu/drm/drm_context.c
new file mode 100644
index 0000000..725968d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_context.c
@@ -0,0 +1,452 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16	Torsten Duwe <duwe@caldera.de>
+ *		added context constructor/destructor hooks,
+ *		needed by SiS driver's memory management.
+ */
+
+#include <drm/drmP.h>
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_remove(&dev->ctx_idr, ctx_handle);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
+{
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
+			GFP_KERNEL);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
+{
+	idr_init(&dev->ctx_idr);
+	return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_destroy(&dev->ctx_idr);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_ctx_priv_map *request = data;
+	struct drm_local_map *map;
+	struct drm_map_list *_entry;
+
+	mutex_lock(&dev->struct_mutex);
+
+	map = idr_find(&dev->ctx_idr, request->ctx_id);
+	if (!map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	request->handle = NULL;
+	list_for_each_entry(_entry, &dev->maplist, head) {
+		if (_entry->map == map) {
+			request->handle =
+			    (void *)(unsigned long)_entry->user_token;
+			break;
+		}
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	if (request->handle == NULL)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_ctx_priv_map *request = data;
+	struct drm_local_map *map = NULL;
+	struct drm_map_list *r_list = NULL;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map
+		    && r_list->user_token == (unsigned long) request->handle)
+			goto found;
+	}
+      bad:
+	mutex_unlock(&dev->struct_mutex);
+	return -EINVAL;
+
+      found:
+	map = r_list->map;
+	if (!map)
+		goto bad;
+
+	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+		goto bad;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device * dev, int old, int new)
+{
+	if (test_and_set_bit(0, &dev->context_flag)) {
+		DRM_ERROR("Reentering -- FIXME\n");
+		return -EBUSY;
+	}
+
+	DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+	if (new == dev->last_context) {
+		clear_bit(0, &dev->context_flag);
+		return 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device *dev,
+				       struct drm_file *file_priv, int new)
+{
+	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
+	dev->last_switch = jiffies;
+
+	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
+		DRM_ERROR("Lock isn't held after context switch\n");
+	}
+
+	/* If a context switch is ever initiated
+	   when the kernel holds the lock, release
+	   that lock here. */
+	clear_bit(0, &dev->context_flag);
+	wake_up(&dev->context_wait);
+
+	return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx_res *res = data;
+	struct drm_ctx ctx;
+	int i;
+
+	if (res->count >= DRM_RESERVED_CONTEXTS) {
+		memset(&ctx, 0, sizeof(ctx));
+		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+			ctx.handle = i;
+			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+				return -EFAULT;
+		}
+	}
+	res->count = DRM_RESERVED_CONTEXTS;
+
+	return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx_list *ctx_entry;
+	struct drm_ctx *ctx = data;
+
+	ctx->handle = drm_ctxbitmap_next(dev);
+	if (ctx->handle == DRM_KERNEL_CONTEXT) {
+		/* Skip kernel's context and get a new one. */
+		ctx->handle = drm_ctxbitmap_next(dev);
+	}
+	DRM_DEBUG("%d\n", ctx->handle);
+	if (ctx->handle == -1) {
+		DRM_DEBUG("Not enough free contexts.\n");
+		/* Should this return -EBUSY instead? */
+		return -ENOMEM;
+	}
+
+	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+	if (!ctx_entry) {
+		DRM_DEBUG("out of memory\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&ctx_entry->head);
+	ctx_entry->handle = ctx->handle;
+	ctx_entry->tag = file_priv;
+
+	mutex_lock(&dev->ctxlist_mutex);
+	list_add(&ctx_entry->head, &dev->ctxlist);
+	++dev->ctx_count;
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	/* This does nothing */
+	return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	/* This is 0, because we don't handle any context flags */
+	ctx->flags = 0;
+
+	return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	drm_context_switch_complete(dev, file_priv, ctx->handle);
+
+	return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+	      struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	if (ctx->handle != DRM_KERNEL_CONTEXT) {
+		if (dev->driver->context_dtor)
+			dev->driver->context_dtor(dev, ctx->handle);
+		drm_ctxbitmap_free(dev, ctx->handle);
+	}
+
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->handle == ctx->handle) {
+				list_del(&pos->head);
+				kfree(pos);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	return 0;
+}
+
+/*@}*/
diff --git a/linux-imx/drivers/gpu/drm/drm_crtc.c b/linux-imx/drivers/gpu/drm/drm_crtc.c
new file mode 100644
index 0000000..8759d69
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_crtc.c
@@ -0,0 +1,3902 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2008 Red Hat Inc.
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		mutex_unlock(&crtc->mutex);
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	/* Locking is currently fubar in the panic handler. */
+	if (oops_in_progress)
+		return;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
+/* Avoid boilerplate.  I'm tired of typing. */
+#define DRM_ENUM_NAME_FN(fnname, list)				\
+	char *fnname(int val)					\
+	{							\
+		int i;						\
+		for (i = 0; i < ARRAY_SIZE(list); i++) {	\
+			if (list[i].type == val)		\
+				return list[i].name;		\
+		}						\
+		return "(unknown)";				\
+	}
+
+/*
+ * Global properties
+ */
+static struct drm_prop_enum_list drm_dpms_enum_list[] =
+{	{ DRM_MODE_DPMS_ON, "On" },
+	{ DRM_MODE_DPMS_STANDBY, "Standby" },
+	{ DRM_MODE_DPMS_SUSPEND, "Suspend" },
+	{ DRM_MODE_DPMS_OFF, "Off" }
+};
+
+DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+
+/*
+ * Optional properties
+ */
+static struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
+{
+	{ DRM_MODE_SCALE_NONE, "None" },
+	{ DRM_MODE_SCALE_FULLSCREEN, "Full" },
+	{ DRM_MODE_SCALE_CENTER, "Center" },
+	{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
+};
+
+static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
+{
+	{ DRM_MODE_DITHERING_OFF, "Off" },
+	{ DRM_MODE_DITHERING_ON, "On" },
+	{ DRM_MODE_DITHERING_AUTO, "Automatic" },
+};
+
+/*
+ * Non-global properties, but "required" for certain connectors.
+ */
+static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] =
+{
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list)
+
+static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] =
+{
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_DVID,      "DVI-D"     }, /* DVI-I  */
+	{ DRM_MODE_SUBCONNECTOR_DVIA,      "DVI-A"     }, /* DVI-I  */
+};
+
+DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
+		 drm_dvi_i_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_tv_select_enum_list[] =
+{
+	{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list)
+
+static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+{
+	{ DRM_MODE_SUBCONNECTOR_Unknown,   "Unknown"   }, /* DVI-I and TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SVIDEO,    "SVIDEO"    }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */
+	{ DRM_MODE_SUBCONNECTOR_SCART,     "SCART"     }, /* TV-out */
+};
+
+DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+		 drm_tv_subconnector_enum_list)
+
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+	{ DRM_MODE_DIRTY_OFF,      "Off"      },
+	{ DRM_MODE_DIRTY_ON,       "On"       },
+	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+struct drm_conn_prop_enum_list {
+	int type;
+	char *name;
+	int count;
+};
+
+/*
+ * Connector and encoder types.
+ */
+static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+{	{ DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
+	{ DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
+	{ DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
+	{ DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
+	{ DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
+	{ DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
+	{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
+	{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
+	{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
+	{ DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+	{ DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
+	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
+	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+};
+
+static struct drm_prop_enum_list drm_encoder_enum_list[] =
+{	{ DRM_MODE_ENCODER_NONE, "None" },
+	{ DRM_MODE_ENCODER_DAC, "DAC" },
+	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
+	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
+	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+};
+
+char *drm_get_encoder_name(struct drm_encoder *encoder)
+{
+	static char buf[32];
+
+	snprintf(buf, 32, "%s-%d",
+		 drm_encoder_enum_list[encoder->encoder_type].name,
+		 encoder->base.id);
+	return buf;
+}
+EXPORT_SYMBOL(drm_get_encoder_name);
+
+char *drm_get_connector_name(struct drm_connector *connector)
+{
+	static char buf[32];
+
+	snprintf(buf, 32, "%s-%d",
+		 drm_connector_enum_list[connector->connector_type].name,
+		 connector->connector_type_id);
+	return buf;
+}
+EXPORT_SYMBOL(drm_get_connector_name);
+
+char *drm_get_connector_status_name(enum drm_connector_status status)
+{
+	if (status == connector_status_connected)
+		return "connected";
+	else if (status == connector_status_disconnected)
+		return "disconnected";
+	else
+		return "unknown";
+}
+EXPORT_SYMBOL(drm_get_connector_status_name);
+
+/**
+ * drm_mode_object_get - allocate a new modeset identifier
+ * @dev: DRM device
+ * @obj: object pointer, used to generate unique ID
+ * @obj_type: object type
+ *
+ * Create a unique identifier based on @ptr in @dev's identifier space.  Used
+ * for tracking modes, CRTCs and connectors.
+ *
+ * RETURNS:
+ * New unique (relative to other objects in @dev) integer identifier for the
+ * object.
+ */
+static int drm_mode_object_get(struct drm_device *dev,
+			       struct drm_mode_object *obj, uint32_t obj_type)
+{
+	int ret;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
+	if (ret >= 0) {
+		/*
+		 * Set up the object linking under the protection of the idr
+		 * lock so that other users can't see inconsistent state.
+		 */
+		obj->id = ret;
+		obj->type = obj_type;
+	}
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_mode_object_put - free a modeset identifer
+ * @dev: DRM device
+ * @object: object to free
+ *
+ * Free @id from @dev's unique identifier pool.
+ */
+static void drm_mode_object_put(struct drm_device *dev,
+				struct drm_mode_object *object)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	idr_remove(&dev->mode_config.crtc_idr, object->id);
+	mutex_unlock(&dev->mode_config.idr_mutex);
+}
+
+/**
+ * drm_mode_object_find - look up a drm object with static lifetime
+ * @dev: drm device
+ * @id: id of the mode object
+ * @type: type of the mode object
+ *
+ * Note that framebuffers cannot be looked up with this functions - since those
+ * are reference counted, they need special treatment.
+ */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type)
+{
+	struct drm_mode_object *obj = NULL;
+
+	/* Framebuffers are reference counted and need their own lookup
+	 * function.*/
+	WARN_ON(type == DRM_MODE_OBJECT_FB);
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	obj = idr_find(&dev->mode_config.crtc_idr, id);
+	if (!obj || (obj->type != type) || (obj->id != id))
+		obj = NULL;
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return obj;
+}
+EXPORT_SYMBOL(drm_mode_object_find);
+
+/**
+ * drm_framebuffer_init - initialize a framebuffer
+ * @dev: DRM device
+ * @fb: framebuffer to be initialized
+ * @funcs: ... with these functions
+ *
+ * Allocates an ID for the framebuffer's parent mode object, sets its mode
+ * functions & device file and adds it to the master fd list.
+ *
+ * IMPORTANT:
+ * This functions publishes the fb and makes it available for concurrent access
+ * by other users. Which means by this point the fb _must_ be fully set up -
+ * since all the fb attributes are invariant over its lifetime, no further
+ * locking but only correct reference counting is required.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+			 const struct drm_framebuffer_funcs *funcs)
+{
+	int ret;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	kref_init(&fb->refcount);
+	INIT_LIST_HEAD(&fb->filp_head);
+	fb->dev = dev;
+	fb->funcs = funcs;
+
+	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+	if (ret)
+		goto out;
+
+	/* Grab the idr reference. */
+	drm_framebuffer_reference(fb);
+
+	dev->mode_config.num_fb++;
+	list_add(&fb->head, &dev->mode_config.fb_list);
+out:
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_framebuffer_init);
+
+static void drm_framebuffer_free(struct kref *kref)
+{
+	struct drm_framebuffer *fb =
+			container_of(kref, struct drm_framebuffer, refcount);
+	fb->funcs->destroy(fb);
+}
+
+static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
+							uint32_t id)
+{
+	struct drm_mode_object *obj = NULL;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&dev->mode_config.idr_mutex);
+	obj = idr_find(&dev->mode_config.crtc_idr, id);
+	if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
+		fb = NULL;
+	else
+		fb = obj_to_fb(obj);
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	return fb;
+}
+
+/**
+ * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
+ * @dev: drm device
+ * @id: id of the fb object
+ *
+ * If successful, this grabs an additional reference to the framebuffer -
+ * callers need to make sure to eventually unreference the returned framebuffer
+ * again.
+ */
+struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+					       uint32_t id)
+{
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	fb = __drm_framebuffer_lookup(dev, id);
+	if (fb)
+		drm_framebuffer_reference(fb);
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return fb;
+}
+EXPORT_SYMBOL(drm_framebuffer_lookup);
+
+/**
+ * drm_framebuffer_unreference - unref a framebuffer
+ * @fb: framebuffer to unref
+ *
+ * This functions decrements the fb's refcount and frees it if it drops to zero.
+ */
+void drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	kref_put(&fb->refcount, drm_framebuffer_free);
+}
+EXPORT_SYMBOL(drm_framebuffer_unreference);
+
+/**
+ * drm_framebuffer_reference - incr the fb refcnt
+ * @fb: framebuffer
+ */
+void drm_framebuffer_reference(struct drm_framebuffer *fb)
+{
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	kref_get(&fb->refcount);
+}
+EXPORT_SYMBOL(drm_framebuffer_reference);
+
+static void drm_framebuffer_free_bug(struct kref *kref)
+{
+	BUG();
+}
+
+static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
+{
+	DRM_DEBUG("FB ID: %d\n", fb->base.id);
+	kref_put(&fb->refcount, drm_framebuffer_free_bug);
+}
+
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+					 struct drm_framebuffer *fb)
+{
+	mutex_lock(&dev->mode_config.idr_mutex);
+	idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+	mutex_unlock(&dev->mode_config.idr_mutex);
+
+	fb->base.id = 0;
+
+	__drm_framebuffer_unreference(fb);
+}
+
+/**
+ * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
+ * @fb: fb to unregister
+ *
+ * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
+ * those used for fbdev. Note that the caller must hold a reference of it's own,
+ * i.e. the object may not be destroyed through this call (since it'll lead to a
+ * locking inversion).
+ */
+void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	/* Mark fb as reaped and drop idr ref. */
+	__drm_framebuffer_unregister(dev, fb);
+	mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_unregister_private);
+
+/**
+ * drm_framebuffer_cleanup - remove a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Cleanup references to a user-created framebuffer. This function is intended
+ * to be used from the drivers ->destroy callback.
+ *
+ * Note that this function does not remove the fb from active usuage - if it is
+ * still used anywhere, hilarity can ensue since userspace could call getfb on
+ * the id and get back -EINVAL. Obviously no concern at driver unload time.
+ *
+ * Also, the framebuffer will not be removed from the lookup idr - for
+ * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * driver-private objects (e.g. for fbdev) drivers need to explicitly call
+ * drm_framebuffer_unregister_private.
+ */
+void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_del(&fb->head);
+	dev->mode_config.num_fb--;
+	mutex_unlock(&dev->mode_config.fb_lock);
+}
+EXPORT_SYMBOL(drm_framebuffer_cleanup);
+
+/**
+ * drm_framebuffer_remove - remove and unreference a framebuffer object
+ * @fb: framebuffer to remove
+ *
+ * Scans all the CRTCs and planes in @dev's mode_config.  If they're
+ * using @fb, removes it, setting it to NULL. Then drops the reference to the
+ * passed-in framebuffer. Might take the modeset locks.
+ *
+ * Note that this function optimizes the cleanup away if the caller holds the
+ * last reference to the framebuffer. It is also guaranteed to not take the
+ * modeset locks in this case.
+ */
+void drm_framebuffer_remove(struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = fb->dev;
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_mode_set set;
+	int ret;
+
+	WARN_ON(!list_empty(&fb->filp_head));
+
+	/*
+	 * drm ABI mandates that we remove any deleted framebuffers from active
+	 * useage. But since most sane clients only remove framebuffers they no
+	 * longer need, try to optimize this away.
+	 *
+	 * Since we're holding a reference ourselves, observing a refcount of 1
+	 * means that we're the last holder and can skip it. Also, the refcount
+	 * can never increase from 1 again, so we don't need any barriers or
+	 * locks.
+	 *
+	 * Note that userspace could try to race with use and instate a new
+	 * usage _after_ we've cleared all current ones. End result will be an
+	 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
+	 * in this manner.
+	 */
+	if (atomic_read(&fb->refcount.refcount) > 1) {
+		drm_modeset_lock_all(dev);
+		/* remove from any CRTC */
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			if (crtc->fb == fb) {
+				/* should turn off the crtc */
+				memset(&set, 0, sizeof(struct drm_mode_set));
+				set.crtc = crtc;
+				set.fb = NULL;
+				ret = drm_mode_set_config_internal(&set);
+				if (ret)
+					DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
+			}
+		}
+
+		list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+			if (plane->fb == fb) {
+				/* should turn off the crtc */
+				ret = plane->funcs->disable_plane(plane);
+				if (ret)
+					DRM_ERROR("failed to disable plane with busy fb\n");
+				/* disconnect the plane from the fb and crtc: */
+				__drm_framebuffer_unreference(plane->fb);
+				plane->fb = NULL;
+				plane->crtc = NULL;
+			}
+		}
+		drm_modeset_unlock_all(dev);
+	}
+
+	drm_framebuffer_unreference(fb);
+}
+EXPORT_SYMBOL(drm_framebuffer_remove);
+
+/**
+ * drm_crtc_init - Initialise a new CRTC object
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @funcs: callbacks for the new CRTC
+ *
+ * Inits a new object created as base part of an driver crtc object.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+		   const struct drm_crtc_funcs *funcs)
+{
+	int ret;
+
+	crtc->dev = dev;
+	crtc->funcs = funcs;
+	crtc->invert_dimensions = false;
+
+	drm_modeset_lock_all(dev);
+	mutex_init(&crtc->mutex);
+	mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
+
+	ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
+	if (ret)
+		goto out;
+
+	crtc->base.properties = &crtc->properties;
+
+	list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+	dev->mode_config.num_crtc++;
+
+ out:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_crtc_init);
+
+/**
+ * drm_crtc_cleanup - Cleans up the core crtc usage.
+ * @crtc: CRTC to cleanup
+ *
+ * Cleanup @crtc. Removes from drm modesetting space
+ * does NOT free object, caller does that.
+ */
+void drm_crtc_cleanup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+
+	kfree(crtc->gamma_store);
+	crtc->gamma_store = NULL;
+
+	drm_mode_object_put(dev, &crtc->base);
+	list_del(&crtc->head);
+	dev->mode_config.num_crtc--;
+}
+EXPORT_SYMBOL(drm_crtc_cleanup);
+
+/**
+ * drm_mode_probed_add - add a mode to a connector's probed mode list
+ * @connector: connector the new mode
+ * @mode: mode data
+ *
+ * Add @mode to @connector's mode list for later use.
+ */
+void drm_mode_probed_add(struct drm_connector *connector,
+			 struct drm_display_mode *mode)
+{
+	list_add(&mode->head, &connector->probed_modes);
+}
+EXPORT_SYMBOL(drm_mode_probed_add);
+
+/**
+ * drm_mode_remove - remove and free a mode
+ * @connector: connector list to modify
+ * @mode: mode to remove
+ *
+ * Remove @mode from @connector's mode list, then free it.
+ */
+void drm_mode_remove(struct drm_connector *connector,
+		     struct drm_display_mode *mode)
+{
+	list_del(&mode->head);
+	drm_mode_destroy(connector->dev, mode);
+}
+EXPORT_SYMBOL(drm_mode_remove);
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+		       struct drm_connector *connector,
+		       const struct drm_connector_funcs *funcs,
+		       int connector_type)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+	if (ret)
+		goto out;
+
+	connector->base.properties = &connector->properties;
+	connector->dev = dev;
+	connector->funcs = funcs;
+	connector->connector_type = connector_type;
+	connector->connector_type_id =
+		++drm_connector_enum_list[connector_type].count; /* TODO */
+	INIT_LIST_HEAD(&connector->probed_modes);
+	INIT_LIST_HEAD(&connector->modes);
+	connector->edid_blob_ptr = NULL;
+	connector->status = connector_status_unknown;
+
+	list_add_tail(&connector->head, &dev->mode_config.connector_list);
+	dev->mode_config.num_connector++;
+
+	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+		drm_object_attach_property(&connector->base,
+					      dev->mode_config.edid_property,
+					      0);
+
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.dpms_property, 0);
+
+ out:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_connector_init);
+
+/**
+ * drm_connector_cleanup - cleans up an initialised connector
+ * @connector: connector to cleanup
+ *
+ * Cleans up the connector but doesn't free the object.
+ */
+void drm_connector_cleanup(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *t;
+
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+
+	list_for_each_entry_safe(mode, t, &connector->modes, head)
+		drm_mode_remove(connector, mode);
+
+	drm_mode_object_put(dev, &connector->base);
+	list_del(&connector->head);
+	dev->mode_config.num_connector--;
+}
+EXPORT_SYMBOL(drm_connector_cleanup);
+
+void drm_connector_unplug_all(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+
+	/* taking the mode config mutex ends up in a clash with sysfs */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		drm_sysfs_connector_remove(connector);
+
+}
+EXPORT_SYMBOL(drm_connector_unplug_all);
+
+int drm_encoder_init(struct drm_device *dev,
+		      struct drm_encoder *encoder,
+		      const struct drm_encoder_funcs *funcs,
+		      int encoder_type)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
+	if (ret)
+		goto out;
+
+	encoder->dev = dev;
+	encoder->encoder_type = encoder_type;
+	encoder->funcs = funcs;
+
+	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+	dev->mode_config.num_encoder++;
+
+ out:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_encoder_init);
+
+void drm_encoder_cleanup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	drm_modeset_lock_all(dev);
+	drm_mode_object_put(dev, &encoder->base);
+	list_del(&encoder->head);
+	dev->mode_config.num_encoder--;
+	drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_encoder_cleanup);
+
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+		   unsigned long possible_crtcs,
+		   const struct drm_plane_funcs *funcs,
+		   const uint32_t *formats, uint32_t format_count,
+		   bool priv)
+{
+	int ret;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+	if (ret)
+		goto out;
+
+	plane->base.properties = &plane->properties;
+	plane->dev = dev;
+	plane->funcs = funcs;
+	plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+				      GFP_KERNEL);
+	if (!plane->format_types) {
+		DRM_DEBUG_KMS("out of memory when allocating plane\n");
+		drm_mode_object_put(dev, &plane->base);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+	plane->format_count = format_count;
+	plane->possible_crtcs = possible_crtcs;
+
+	/* private planes are not exposed to userspace, but depending on
+	 * display hardware, might be convenient to allow sharing programming
+	 * for the scanout engine with the crtc implementation.
+	 */
+	if (!priv) {
+		list_add_tail(&plane->head, &dev->mode_config.plane_list);
+		dev->mode_config.num_plane++;
+	} else {
+		INIT_LIST_HEAD(&plane->head);
+	}
+
+ out:
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+
+	drm_modeset_lock_all(dev);
+	kfree(plane->format_types);
+	drm_mode_object_put(dev, &plane->base);
+	/* if not added to a list, it must be a private plane */
+	if (!list_empty(&plane->head)) {
+		list_del(&plane->head);
+		dev->mode_config.num_plane--;
+	}
+	drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
+/**
+ * drm_mode_create - create a new display mode
+ * @dev: DRM device
+ *
+ * Create a new drm_display_mode, give it an ID, and return it.
+ *
+ * RETURNS:
+ * Pointer to new mode on success, NULL on error.
+ */
+struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+{
+	struct drm_display_mode *nmode;
+
+	nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL);
+	if (!nmode)
+		return NULL;
+
+	if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
+		kfree(nmode);
+		return NULL;
+	}
+
+	return nmode;
+}
+EXPORT_SYMBOL(drm_mode_create);
+
+/**
+ * drm_mode_destroy - remove a mode
+ * @dev: DRM device
+ * @mode: mode to remove
+ *
+ * Free @mode's unique identifier, then free it.
+ */
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+{
+	if (!mode)
+		return;
+
+	drm_mode_object_put(dev, &mode->base);
+
+	kfree(mode);
+}
+EXPORT_SYMBOL(drm_mode_destroy);
+
+static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+{
+	struct drm_property *edid;
+	struct drm_property *dpms;
+
+	/*
+	 * Standard properties (apply to all connectors)
+	 */
+	edid = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+				   DRM_MODE_PROP_IMMUTABLE,
+				   "EDID", 0);
+	dev->mode_config.edid_property = edid;
+
+	dpms = drm_property_create_enum(dev, 0,
+				   "DPMS", drm_dpms_enum_list,
+				   ARRAY_SIZE(drm_dpms_enum_list));
+	dev->mode_config.dpms_property = dpms;
+
+	return 0;
+}
+
+/**
+ * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties
+ * @dev: DRM device
+ *
+ * Called by a driver the first time a DVI-I connector is made.
+ */
+int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+{
+	struct drm_property *dvi_i_selector;
+	struct drm_property *dvi_i_subconnector;
+
+	if (dev->mode_config.dvi_i_select_subconnector_property)
+		return 0;
+
+	dvi_i_selector =
+		drm_property_create_enum(dev, 0,
+				    "select subconnector",
+				    drm_dvi_i_select_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
+	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+
+	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_dvi_i_subconnector_enum_list,
+				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
+
+/**
+ * drm_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @num_modes: number of different TV formats (modes) supported
+ * @modes: array of pointers to strings containing name of each format
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.  Caller is
+ * responsible for allocating a list of format names and passing them to
+ * this routine.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+				  char *modes[])
+{
+	struct drm_property *tv_selector;
+	struct drm_property *tv_subconnector;
+	int i;
+
+	if (dev->mode_config.tv_select_subconnector_property)
+		return 0;
+
+	/*
+	 * Basic connector properties
+	 */
+	tv_selector = drm_property_create_enum(dev, 0,
+					  "select subconnector",
+					  drm_tv_select_enum_list,
+					  ARRAY_SIZE(drm_tv_select_enum_list));
+	dev->mode_config.tv_select_subconnector_property = tv_selector;
+
+	tv_subconnector =
+		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "subconnector",
+				    drm_tv_subconnector_enum_list,
+				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
+	dev->mode_config.tv_subconnector_property = tv_subconnector;
+
+	/*
+	 * Other, TV specific properties: margins & TV modes.
+	 */
+	dev->mode_config.tv_left_margin_property =
+		drm_property_create_range(dev, 0, "left margin", 0, 100);
+
+	dev->mode_config.tv_right_margin_property =
+		drm_property_create_range(dev, 0, "right margin", 0, 100);
+
+	dev->mode_config.tv_top_margin_property =
+		drm_property_create_range(dev, 0, "top margin", 0, 100);
+
+	dev->mode_config.tv_bottom_margin_property =
+		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+
+	dev->mode_config.tv_mode_property =
+		drm_property_create(dev, DRM_MODE_PROP_ENUM,
+				    "mode", num_modes);
+	for (i = 0; i < num_modes; i++)
+		drm_property_add_enum(dev->mode_config.tv_mode_property, i,
+				      i, modes[i]);
+
+	dev->mode_config.tv_brightness_property =
+		drm_property_create_range(dev, 0, "brightness", 0, 100);
+
+	dev->mode_config.tv_contrast_property =
+		drm_property_create_range(dev, 0, "contrast", 0, 100);
+
+	dev->mode_config.tv_flicker_reduction_property =
+		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+
+	dev->mode_config.tv_overscan_property =
+		drm_property_create_range(dev, 0, "overscan", 0, 100);
+
+	dev->mode_config.tv_saturation_property =
+		drm_property_create_range(dev, 0, "saturation", 0, 100);
+
+	dev->mode_config.tv_hue_property =
+		drm_property_create_range(dev, 0, "hue", 0, 100);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_tv_properties);
+
+/**
+ * drm_mode_create_scaling_mode_property - create scaling mode property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+{
+	struct drm_property *scaling_mode;
+
+	if (dev->mode_config.scaling_mode_property)
+		return 0;
+
+	scaling_mode =
+		drm_property_create_enum(dev, 0, "scaling mode",
+				drm_scaling_mode_enum_list,
+				    ARRAY_SIZE(drm_scaling_mode_enum_list));
+
+	dev->mode_config.scaling_mode_property = scaling_mode;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+
+/**
+ * drm_mode_create_dithering_property - create dithering property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dithering_property(struct drm_device *dev)
+{
+	struct drm_property *dithering_mode;
+
+	if (dev->mode_config.dithering_mode_property)
+		return 0;
+
+	dithering_mode =
+		drm_property_create_enum(dev, 0, "dithering",
+				drm_dithering_mode_enum_list,
+				    ARRAY_SIZE(drm_dithering_mode_enum_list));
+	dev->mode_config.dithering_mode_property = dithering_mode;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dithering_property);
+
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+	struct drm_property *dirty_info;
+
+	if (dev->mode_config.dirty_info_property)
+		return 0;
+
+	dirty_info =
+		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+				    "dirty",
+				    drm_dirty_info_enum_list,
+				    ARRAY_SIZE(drm_dirty_info_enum_list));
+	dev->mode_config.dirty_info_property = dirty_info;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
+{
+	uint32_t total_objects = 0;
+
+	total_objects += dev->mode_config.num_crtc;
+	total_objects += dev->mode_config.num_connector;
+	total_objects += dev->mode_config.num_encoder;
+
+	group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
+	if (!group->id_list)
+		return -ENOMEM;
+
+	group->num_crtcs = 0;
+	group->num_connectors = 0;
+	group->num_encoders = 0;
+	return 0;
+}
+
+int drm_mode_group_init_legacy_group(struct drm_device *dev,
+				     struct drm_mode_group *group)
+{
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	if ((ret = drm_mode_group_init(dev, group)))
+		return ret;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		group->id_list[group->num_crtcs++] = crtc->base.id;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		group->id_list[group->num_crtcs + group->num_encoders++] =
+		encoder->base.id;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		group->id_list[group->num_crtcs + group->num_encoders +
+			       group->num_connectors++] = connector->base.id;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+
+/**
+ * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
+ * @out: drm_mode_modeinfo struct to return to the user
+ * @in: drm_display_mode to use
+ *
+ * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+ * the user.
+ */
+static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+				      const struct drm_display_mode *in)
+{
+	WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
+	     in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
+	     in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
+	     in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
+	     in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
+	     "timing values too large for mode info\n");
+
+	out->clock = in->clock;
+	out->hdisplay = in->hdisplay;
+	out->hsync_start = in->hsync_start;
+	out->hsync_end = in->hsync_end;
+	out->htotal = in->htotal;
+	out->hskew = in->hskew;
+	out->vdisplay = in->vdisplay;
+	out->vsync_start = in->vsync_start;
+	out->vsync_end = in->vsync_end;
+	out->vtotal = in->vtotal;
+	out->vscan = in->vscan;
+	out->vrefresh = in->vrefresh;
+	out->flags = in->flags;
+	out->type = in->type;
+	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+}
+
+/**
+ * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode
+ * @out: drm_display_mode to return to the user
+ * @in: drm_mode_modeinfo to use
+ *
+ * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+ * the caller.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+static int drm_crtc_convert_umode(struct drm_display_mode *out,
+				  const struct drm_mode_modeinfo *in)
+{
+	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+		return -ERANGE;
+
+	out->clock = in->clock;
+	out->hdisplay = in->hdisplay;
+	out->hsync_start = in->hsync_start;
+	out->hsync_end = in->hsync_end;
+	out->htotal = in->htotal;
+	out->hskew = in->hskew;
+	out->vdisplay = in->vdisplay;
+	out->vsync_start = in->vsync_start;
+	out->vsync_end = in->vsync_end;
+	out->vtotal = in->vtotal;
+	out->vscan = in->vscan;
+	out->vrefresh = in->vrefresh;
+	out->flags = in->flags;
+	out->type = in->type;
+	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+
+	return 0;
+}
+
+/**
+ * drm_mode_getresources - get graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a set of configuration description structures and return
+ * them to the user, including CRTC, connector and framebuffer configuration.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getresources(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_mode_card_res *card_res = data;
+	struct list_head *lh;
+	struct drm_framebuffer *fb;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	int ret = 0;
+	int connector_count = 0;
+	int crtc_count = 0;
+	int fb_count = 0;
+	int encoder_count = 0;
+	int copied = 0, i;
+	uint32_t __user *fb_id;
+	uint32_t __user *crtc_id;
+	uint32_t __user *connector_id;
+	uint32_t __user *encoder_id;
+	struct drm_mode_group *mode_group;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+
+	mutex_lock(&file_priv->fbs_lock);
+	/*
+	 * For the non-control nodes we need to limit the list of resources
+	 * by IDs in the group list for this node
+	 */
+	list_for_each(lh, &file_priv->fbs)
+		fb_count++;
+
+	/* handle this in 4 parts */
+	/* FBs */
+	if (card_res->count_fbs >= fb_count) {
+		copied = 0;
+		fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
+		list_for_each_entry(fb, &file_priv->fbs, filp_head) {
+			if (put_user(fb->base.id, fb_id + copied)) {
+				mutex_unlock(&file_priv->fbs_lock);
+				return -EFAULT;
+			}
+			copied++;
+		}
+	}
+	card_res->count_fbs = fb_count;
+	mutex_unlock(&file_priv->fbs_lock);
+
+	drm_modeset_lock_all(dev);
+	mode_group = &file_priv->master->minor->mode_group;
+	if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+
+		list_for_each(lh, &dev->mode_config.crtc_list)
+			crtc_count++;
+
+		list_for_each(lh, &dev->mode_config.connector_list)
+			connector_count++;
+
+		list_for_each(lh, &dev->mode_config.encoder_list)
+			encoder_count++;
+	} else {
+
+		crtc_count = mode_group->num_crtcs;
+		connector_count = mode_group->num_connectors;
+		encoder_count = mode_group->num_encoders;
+	}
+
+	card_res->max_height = dev->mode_config.max_height;
+	card_res->min_height = dev->mode_config.min_height;
+	card_res->max_width = dev->mode_config.max_width;
+	card_res->min_width = dev->mode_config.min_width;
+
+	/* CRTCs */
+	if (card_res->count_crtcs >= crtc_count) {
+		copied = 0;
+		crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+			list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+					    head) {
+				DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+				if (put_user(crtc->base.id, crtc_id + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		} else {
+			for (i = 0; i < mode_group->num_crtcs; i++) {
+				if (put_user(mode_group->id_list[i],
+					     crtc_id + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		}
+	}
+	card_res->count_crtcs = crtc_count;
+
+	/* Encoders */
+	if (card_res->count_encoders >= encoder_count) {
+		copied = 0;
+		encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+			list_for_each_entry(encoder,
+					    &dev->mode_config.encoder_list,
+					    head) {
+				DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+						drm_get_encoder_name(encoder));
+				if (put_user(encoder->base.id, encoder_id +
+					     copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		} else {
+			for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) {
+				if (put_user(mode_group->id_list[i],
+					     encoder_id + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+
+		}
+	}
+	card_res->count_encoders = encoder_count;
+
+	/* Connectors */
+	if (card_res->count_connectors >= connector_count) {
+		copied = 0;
+		connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
+		if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
+			list_for_each_entry(connector,
+					    &dev->mode_config.connector_list,
+					    head) {
+				DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+					connector->base.id,
+					drm_get_connector_name(connector));
+				if (put_user(connector->base.id,
+					     connector_id + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		} else {
+			int start = mode_group->num_crtcs +
+				mode_group->num_encoders;
+			for (i = start; i < start + mode_group->num_connectors; i++) {
+				if (put_user(mode_group->id_list[i],
+					     connector_id + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		}
+	}
+	card_res->count_connectors = connector_count;
+
+	DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
+		  card_res->count_connectors, card_res->count_encoders);
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+/**
+ * drm_mode_getcrtc - get CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a CRTC configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getcrtc(struct drm_device *dev,
+		     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc *crtc_resp = data;
+	struct drm_crtc *crtc;
+	struct drm_mode_object *obj;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
+				   DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	crtc = obj_to_crtc(obj);
+
+	crtc_resp->x = crtc->x;
+	crtc_resp->y = crtc->y;
+	crtc_resp->gamma_size = crtc->gamma_size;
+	if (crtc->fb)
+		crtc_resp->fb_id = crtc->fb->base.id;
+	else
+		crtc_resp->fb_id = 0;
+
+	if (crtc->enabled) {
+
+		drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+		crtc_resp->mode_valid = 1;
+
+	} else {
+		crtc_resp->mode_valid = 0;
+	}
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+/**
+ * drm_mode_getconnector - get connector configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Construct a connector configuration structure to return to the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getconnector(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_mode_get_connector *out_resp = data;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_display_mode *mode;
+	int mode_count = 0;
+	int props_count = 0;
+	int encoders_count = 0;
+	int ret = 0;
+	int copied = 0;
+	int i;
+	struct drm_mode_modeinfo u_mode;
+	struct drm_mode_modeinfo __user *mode_ptr;
+	uint32_t __user *prop_ptr;
+	uint64_t __user *prop_values;
+	uint32_t __user *encoder_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	obj = drm_mode_object_find(dev, out_resp->connector_id,
+				   DRM_MODE_OBJECT_CONNECTOR);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	connector = obj_to_connector(obj);
+
+	props_count = connector->properties.count;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] != 0) {
+			encoders_count++;
+		}
+	}
+
+	if (out_resp->count_modes == 0) {
+		connector->funcs->fill_modes(connector,
+					     dev->mode_config.max_width,
+					     dev->mode_config.max_height);
+	}
+
+	/* delayed so we get modes regardless of pre-fill_modes state */
+	list_for_each_entry(mode, &connector->modes, head)
+		mode_count++;
+
+	out_resp->connector_id = connector->base.id;
+	out_resp->connector_type = connector->connector_type;
+	out_resp->connector_type_id = connector->connector_type_id;
+	out_resp->mm_width = connector->display_info.width_mm;
+	out_resp->mm_height = connector->display_info.height_mm;
+	out_resp->subpixel = connector->display_info.subpixel_order;
+	out_resp->connection = connector->status;
+	if (connector->encoder)
+		out_resp->encoder_id = connector->encoder->base.id;
+	else
+		out_resp->encoder_id = 0;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if ((out_resp->count_modes >= mode_count) && mode_count) {
+		copied = 0;
+		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+		list_for_each_entry(mode, &connector->modes, head) {
+			drm_crtc_convert_to_umode(&u_mode, mode);
+			if (copy_to_user(mode_ptr + copied,
+					 &u_mode, sizeof(u_mode))) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	out_resp->count_modes = mode_count;
+
+	if ((out_resp->count_props >= props_count) && props_count) {
+		copied = 0;
+		prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+		prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+		for (i = 0; i < connector->properties.count; i++) {
+			if (put_user(connector->properties.ids[i],
+				     prop_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			if (put_user(connector->properties.values[i],
+				     prop_values + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	out_resp->count_props = props_count;
+
+	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+		copied = 0;
+		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] != 0) {
+				if (put_user(connector->encoder_ids[i],
+					     encoder_ptr + copied)) {
+					ret = -EFAULT;
+					goto out;
+				}
+				copied++;
+			}
+		}
+	}
+	out_resp->count_encoders = encoders_count;
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+int drm_mode_getencoder(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_mode_get_encoder *enc_resp = data;
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, enc_resp->encoder_id,
+				   DRM_MODE_OBJECT_ENCODER);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	encoder = obj_to_encoder(obj);
+
+	if (encoder->crtc)
+		enc_resp->crtc_id = encoder->crtc->base.id;
+	else
+		enc_resp->crtc_id = 0;
+	enc_resp->encoder_type = encoder->encoder_type;
+	enc_resp->encoder_id = encoder->base.id;
+	enc_resp->possible_crtcs = encoder->possible_crtcs;
+	enc_resp->possible_clones = encoder->possible_clones;
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_mode_get_plane_res *plane_resp = data;
+	struct drm_mode_config *config;
+	struct drm_plane *plane;
+	uint32_t __user *plane_ptr;
+	int copied = 0, ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	config = &dev->mode_config;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (config->num_plane &&
+	    (plane_resp->count_planes >= config->num_plane)) {
+		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+		list_for_each_entry(plane, &config->plane_list, head) {
+			if (put_user(plane->base.id, plane_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	plane_resp->count_planes = config->num_plane;
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_mode_get_plane *plane_resp = data;
+	struct drm_mode_object *obj;
+	struct drm_plane *plane;
+	uint32_t __user *format_ptr;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, plane_resp->plane_id,
+				   DRM_MODE_OBJECT_PLANE);
+	if (!obj) {
+		ret = -ENOENT;
+		goto out;
+	}
+	plane = obj_to_plane(obj);
+
+	if (plane->crtc)
+		plane_resp->crtc_id = plane->crtc->base.id;
+	else
+		plane_resp->crtc_id = 0;
+
+	if (plane->fb)
+		plane_resp->fb_id = plane->fb->base.id;
+	else
+		plane_resp->fb_id = 0;
+
+	plane_resp->plane_id = plane->base.id;
+	plane_resp->possible_crtcs = plane->possible_crtcs;
+	plane_resp->gamma_size = plane->gamma_size;
+
+	/*
+	 * This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it.
+	 */
+	if (plane->format_count &&
+	    (plane_resp->count_format_types >= plane->format_count)) {
+		format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+		if (copy_to_user(format_ptr,
+				 plane->format_types,
+				 sizeof(uint32_t) * plane->format_count)) {
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+	plane_resp->count_format_types = plane->format_count;
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_mode_set_plane *plane_req = data;
+	struct drm_mode_object *obj;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+	int ret = 0;
+	unsigned int fb_width, fb_height;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	/*
+	 * First, find the plane, crtc, and fb objects.  If not available,
+	 * we don't bother to call the driver.
+	 */
+	obj = drm_mode_object_find(dev, plane_req->plane_id,
+				   DRM_MODE_OBJECT_PLANE);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown plane ID %d\n",
+			      plane_req->plane_id);
+		return -ENOENT;
+	}
+	plane = obj_to_plane(obj);
+
+	/* No fb means shut it down */
+	if (!plane_req->fb_id) {
+		drm_modeset_lock_all(dev);
+		old_fb = plane->fb;
+		plane->funcs->disable_plane(plane);
+		plane->crtc = NULL;
+		plane->fb = NULL;
+		drm_modeset_unlock_all(dev);
+		goto out;
+	}
+
+	obj = drm_mode_object_find(dev, plane_req->crtc_id,
+				   DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+			      plane_req->crtc_id);
+		ret = -ENOENT;
+		goto out;
+	}
+	crtc = obj_to_crtc(obj);
+
+	fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+	if (!fb) {
+		DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+			      plane_req->fb_id);
+		ret = -ENOENT;
+		goto out;
+	}
+
+	/* Check whether this plane supports the fb pixel format. */
+	for (i = 0; i < plane->format_count; i++)
+		if (fb->pixel_format == plane->format_types[i])
+			break;
+	if (i == plane->format_count) {
+		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	fb_width = fb->width << 16;
+	fb_height = fb->height << 16;
+
+	/* Make sure source coordinates are inside the fb. */
+	if (plane_req->src_w > fb_width ||
+	    plane_req->src_x > fb_width - plane_req->src_w ||
+	    plane_req->src_h > fb_height ||
+	    plane_req->src_y > fb_height - plane_req->src_h) {
+		DRM_DEBUG_KMS("Invalid source coordinates "
+			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+			      plane_req->src_w >> 16,
+			      ((plane_req->src_w & 0xffff) * 15625) >> 10,
+			      plane_req->src_h >> 16,
+			      ((plane_req->src_h & 0xffff) * 15625) >> 10,
+			      plane_req->src_x >> 16,
+			      ((plane_req->src_x & 0xffff) * 15625) >> 10,
+			      plane_req->src_y >> 16,
+			      ((plane_req->src_y & 0xffff) * 15625) >> 10);
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	/* Give drivers some help against integer overflows */
+	if (plane_req->crtc_w > INT_MAX ||
+	    plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+	    plane_req->crtc_h > INT_MAX ||
+	    plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+			      plane_req->crtc_w, plane_req->crtc_h,
+			      plane_req->crtc_x, plane_req->crtc_y);
+		ret = -ERANGE;
+		goto out;
+	}
+
+	drm_modeset_lock_all(dev);
+	ret = plane->funcs->update_plane(plane, crtc, fb,
+					 plane_req->crtc_x, plane_req->crtc_y,
+					 plane_req->crtc_w, plane_req->crtc_h,
+					 plane_req->src_x, plane_req->src_y,
+					 plane_req->src_w, plane_req->src_h);
+	if (!ret) {
+		old_fb = plane->fb;
+		plane->crtc = crtc;
+		plane->fb = fb;
+		fb = NULL;
+	}
+	drm_modeset_unlock_all(dev);
+
+out:
+	if (fb)
+		drm_framebuffer_unreference(fb);
+	if (old_fb)
+		drm_framebuffer_unreference(old_fb);
+
+	return ret;
+}
+
+/**
+ * drm_mode_set_config_internal - helper to call ->set_config
+ * @set: modeset config to set
+ *
+ * This is a little helper to wrap internal calls to the ->set_config driver
+ * interface. The only thing it adds is correct refcounting dance.
+ */
+int drm_mode_set_config_internal(struct drm_mode_set *set)
+{
+	struct drm_crtc *crtc = set->crtc;
+	struct drm_framebuffer *fb, *old_fb;
+	int ret;
+
+	old_fb = crtc->fb;
+	fb = set->fb;
+
+	ret = crtc->funcs->set_config(set);
+	if (ret == 0) {
+		if (old_fb)
+			drm_framebuffer_unreference(old_fb);
+		if (fb)
+			drm_framebuffer_reference(fb);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_set_config_internal);
+
+/**
+ * drm_mode_setcrtc - set CRTC configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Build a new CRTC configuration based on user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setcrtc(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_mode_crtc *crtc_req = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_connector **connector_set = NULL, *connector;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_display_mode *mode = NULL;
+	struct drm_mode_set set;
+	uint32_t __user *set_connectors_ptr;
+	int ret;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	/* For some reason crtc x/y offsets are signed internally. */
+	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+		return -ERANGE;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+				   DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
+		ret = -EINVAL;
+		goto out;
+	}
+	crtc = obj_to_crtc(obj);
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+	if (crtc_req->mode_valid) {
+		int hdisplay, vdisplay;
+		/* If we have a mode we need a framebuffer. */
+		/* If we pass -1, set the mode with the currently bound fb */
+		if (crtc_req->fb_id == -1) {
+			if (!crtc->fb) {
+				DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
+				ret = -EINVAL;
+				goto out;
+			}
+			fb = crtc->fb;
+			/* Make refcounting symmetric with the lookup path. */
+			drm_framebuffer_reference(fb);
+		} else {
+			fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+			if (!fb) {
+				DRM_DEBUG_KMS("Unknown FB ID%d\n",
+						crtc_req->fb_id);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+
+		mode = drm_mode_create(dev);
+		if (!mode) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
+		if (ret) {
+			DRM_DEBUG_KMS("Invalid mode\n");
+			goto out;
+		}
+
+		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+
+		hdisplay = mode->hdisplay;
+		vdisplay = mode->vdisplay;
+
+		if (crtc->invert_dimensions)
+			swap(hdisplay, vdisplay);
+
+		if (hdisplay > fb->width ||
+		    vdisplay > fb->height ||
+		    crtc_req->x > fb->width - hdisplay ||
+		    crtc_req->y > fb->height - vdisplay) {
+			DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+				      fb->width, fb->height,
+				      hdisplay, vdisplay, crtc_req->x, crtc_req->y,
+				      crtc->invert_dimensions ? " (inverted)" : "");
+			ret = -ENOSPC;
+			goto out;
+		}
+	}
+
+	if (crtc_req->count_connectors == 0 && mode) {
+		DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (crtc_req->count_connectors > 0 && (!mode || !fb)) {
+		DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n",
+			  crtc_req->count_connectors);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (crtc_req->count_connectors > 0) {
+		u32 out_id;
+
+		/* Avoid unbounded kernel memory allocation */
+		if (crtc_req->count_connectors > config->num_connector) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		connector_set = kmalloc(crtc_req->count_connectors *
+					sizeof(struct drm_connector *),
+					GFP_KERNEL);
+		if (!connector_set) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		for (i = 0; i < crtc_req->count_connectors; i++) {
+			set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+			if (get_user(out_id, &set_connectors_ptr[i])) {
+				ret = -EFAULT;
+				goto out;
+			}
+
+			obj = drm_mode_object_find(dev, out_id,
+						   DRM_MODE_OBJECT_CONNECTOR);
+			if (!obj) {
+				DRM_DEBUG_KMS("Connector id %d unknown\n",
+						out_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			connector = obj_to_connector(obj);
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+					connector->base.id,
+					drm_get_connector_name(connector));
+
+			connector_set[i] = connector;
+		}
+	}
+
+	set.crtc = crtc;
+	set.x = crtc_req->x;
+	set.y = crtc_req->y;
+	set.mode = mode;
+	set.connectors = connector_set;
+	set.num_connectors = crtc_req->count_connectors;
+	set.fb = fb;
+	ret = drm_mode_set_config_internal(&set);
+
+out:
+	if (fb)
+		drm_framebuffer_unreference(fb);
+
+	kfree(connector_set);
+	drm_mode_destroy(dev, mode);
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_cursor *req = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+		return -EINVAL;
+
+	obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+
+	mutex_lock(&crtc->mutex);
+	if (req->flags & DRM_MODE_CURSOR_BO) {
+		if (!crtc->funcs->cursor_set) {
+			ret = -ENXIO;
+			goto out;
+		}
+		/* Turns off the cursor if handle is 0 */
+		ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle,
+					      req->width, req->height);
+	}
+
+	if (req->flags & DRM_MODE_CURSOR_MOVE) {
+		if (crtc->funcs->cursor_move) {
+			ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+		} else {
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&crtc->mutex);
+
+	return ret;
+}
+
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+	uint32_t fmt;
+
+	switch (bpp) {
+	case 8:
+		fmt = DRM_FORMAT_C8;
+		break;
+	case 16:
+		if (depth == 15)
+			fmt = DRM_FORMAT_XRGB1555;
+		else
+			fmt = DRM_FORMAT_RGB565;
+		break;
+	case 24:
+		fmt = DRM_FORMAT_RGB888;
+		break;
+	case 32:
+		if (depth == 24)
+			fmt = DRM_FORMAT_XRGB8888;
+		else if (depth == 30)
+			fmt = DRM_FORMAT_XRGB2101010;
+		else
+			fmt = DRM_FORMAT_ARGB8888;
+		break;
+	default:
+		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+		fmt = DRM_FORMAT_XRGB8888;
+		break;
+	}
+
+	return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
+/**
+ * drm_mode_addfb - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *or = data;
+	struct drm_mode_fb_cmd2 r = {};
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_framebuffer *fb;
+	int ret = 0;
+
+	/* Use new struct with format internally */
+	r.fb_id = or->fb_id;
+	r.width = or->width;
+	r.height = or->height;
+	r.pitches[0] = or->pitch;
+	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+	r.handles[0] = or->handle;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	if ((config->min_width > r.width) || (r.width > config->max_width))
+		return -EINVAL;
+
+	if ((config->min_height > r.height) || (r.height > config->max_height))
+		return -EINVAL;
+
+	fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("could not create framebuffer\n");
+		return PTR_ERR(fb);
+	}
+
+	mutex_lock(&file_priv->fbs_lock);
+	or->fb_id = fb->base.id;
+	list_add(&fb->filp_head, &file_priv->fbs);
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	return ret;
+}
+
+static int format_check(const struct drm_mode_fb_cmd2 *r)
+{
+	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
+{
+	int ret, hsub, vsub, num_planes, i;
+
+	ret = format_check(r);
+	if (ret) {
+		DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
+		return ret;
+	}
+
+	hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
+	num_planes = drm_format_num_planes(r->pixel_format);
+
+	if (r->width == 0 || r->width % hsub) {
+		DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
+		return -EINVAL;
+	}
+
+	if (r->height == 0 || r->height % vsub) {
+		DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		unsigned int width = r->width / (i != 0 ? hsub : 1);
+		unsigned int height = r->height / (i != 0 ? vsub : 1);
+		unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
+
+		if (!r->handles[i]) {
+			DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+			return -EINVAL;
+		}
+
+		if ((uint64_t) width * cpp > UINT_MAX)
+			return -ERANGE;
+
+		if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+			return -ERANGE;
+
+		if (r->pitches[i] < width * cpp) {
+			DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+		    void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd2 *r = data;
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+		return -EINVAL;
+	}
+
+	if ((config->min_width > r->width) || (r->width > config->max_width)) {
+		DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
+			  r->width, config->min_width, config->max_width);
+		return -EINVAL;
+	}
+	if ((config->min_height > r->height) || (r->height > config->max_height)) {
+		DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
+			  r->height, config->min_height, config->max_height);
+		return -EINVAL;
+	}
+
+	ret = framebuffer_check(r);
+	if (ret)
+		return ret;
+
+	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("could not create framebuffer\n");
+		return PTR_ERR(fb);
+	}
+
+	mutex_lock(&file_priv->fbs_lock);
+	r->fb_id = fb->base.id;
+	list_add(&fb->filp_head, &file_priv->fbs);
+	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+	mutex_unlock(&file_priv->fbs_lock);
+
+
+	return ret;
+}
+
+/**
+ * drm_mode_rmfb - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_rmfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fbl = NULL;
+	uint32_t *id = data;
+	int found = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	mutex_lock(&file_priv->fbs_lock);
+	mutex_lock(&dev->mode_config.fb_lock);
+	fb = __drm_framebuffer_lookup(dev, *id);
+	if (!fb)
+		goto fail_lookup;
+
+	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+		if (fb == fbl)
+			found = 1;
+	if (!found)
+		goto fail_lookup;
+
+	/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+	__drm_framebuffer_unregister(dev, fb);
+
+	list_del_init(&fb->filp_head);
+	mutex_unlock(&dev->mode_config.fb_lock);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	drm_framebuffer_remove(fb);
+
+	return 0;
+
+fail_lookup:
+	mutex_unlock(&dev->mode_config.fb_lock);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	return -EINVAL;
+}
+
+/**
+ * drm_mode_getfb - get FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_getfb(struct drm_device *dev,
+		   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_fb_cmd *r = data;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -EINVAL;
+
+	r->height = fb->height;
+	r->width = fb->width;
+	r->depth = fb->depth;
+	r->bpp = fb->bits_per_pixel;
+	r->pitch = fb->pitches[0];
+	if (fb->funcs->create_handle) {
+		if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+			ret = fb->funcs->create_handle(fb, file_priv,
+						       &r->handle);
+		} else {
+			/* GET_FB() is an unprivileged ioctl so we must not
+			 * return a buffer-handle to non-master processes! For
+			 * backwards-compatibility reasons, we cannot make
+			 * GET_FB() privileged, so just return an invalid handle
+			 * for non-masters. */
+			r->handle = 0;
+			ret = 0;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_clip_rect __user *clips_ptr;
+	struct drm_clip_rect *clips = NULL;
+	struct drm_mode_fb_dirty_cmd *r = data;
+	struct drm_framebuffer *fb;
+	unsigned flags;
+	int num_clips;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, r->fb_id);
+	if (!fb)
+		return -EINVAL;
+
+	num_clips = r->num_clips;
+	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+
+	if (!num_clips != !clips_ptr) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+	/* If userspace annotates copy, clips must come in pairs */
+	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	if (num_clips && clips_ptr) {
+		if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
+			ret = -EINVAL;
+			goto out_err1;
+		}
+		clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret) {
+			ret = -EFAULT;
+			goto out_err2;
+		}
+	}
+
+	if (fb->funcs->dirty) {
+		drm_modeset_lock_all(dev);
+		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+				       clips, num_clips);
+		drm_modeset_unlock_all(dev);
+	} else {
+		ret = -ENOSYS;
+	}
+
+out_err2:
+	kfree(clips);
+out_err1:
+	drm_framebuffer_unreference(fb);
+
+	return ret;
+}
+
+
+/**
+ * drm_fb_release - remove and free the FBs on this file
+ * @priv: drm file for the ioctl
+ *
+ * Destroy all the FBs associated with @filp.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+void drm_fb_release(struct drm_file *priv)
+{
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_framebuffer *fb, *tfb;
+
+	mutex_lock(&priv->fbs_lock);
+	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+
+		mutex_lock(&dev->mode_config.fb_lock);
+		/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
+		__drm_framebuffer_unregister(dev, fb);
+		mutex_unlock(&dev->mode_config.fb_lock);
+
+		list_del_init(&fb->filp_head);
+
+		/* This will also drop the fpriv->fbs reference. */
+		drm_framebuffer_remove(fb);
+	}
+	mutex_unlock(&priv->fbs_lock);
+}
+
+struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+					 const char *name, int num_values)
+{
+	struct drm_property *property = NULL;
+	int ret;
+
+	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+	if (!property)
+		return NULL;
+
+	if (num_values) {
+		property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL);
+		if (!property->values)
+			goto fail;
+	}
+
+	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
+	if (ret)
+		goto fail;
+
+	property->flags = flags;
+	property->num_values = num_values;
+	INIT_LIST_HEAD(&property->enum_blob_list);
+
+	if (name) {
+		strncpy(property->name, name, DRM_PROP_NAME_LEN);
+		property->name[DRM_PROP_NAME_LEN-1] = '\0';
+	}
+
+	list_add_tail(&property->head, &dev->mode_config.property_list);
+	return property;
+fail:
+	kfree(property->values);
+	kfree(property);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_property_create);
+
+struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+					 const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values)
+{
+	struct drm_property *property;
+	int i, ret;
+
+	flags |= DRM_MODE_PROP_ENUM;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+
+	for (i = 0; i < num_values; i++) {
+		ret = drm_property_add_enum(property, i,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_enum);
+
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+					 int flags, const char *name,
+					 const struct drm_prop_enum_list *props,
+					 int num_values)
+{
+	struct drm_property *property;
+	int i, ret;
+
+	flags |= DRM_MODE_PROP_BITMASK;
+
+	property = drm_property_create(dev, flags, name, num_values);
+	if (!property)
+		return NULL;
+
+	for (i = 0; i < num_values; i++) {
+		ret = drm_property_add_enum(property, i,
+				      props[i].type,
+				      props[i].name);
+		if (ret) {
+			drm_property_destroy(dev, property);
+			return NULL;
+		}
+	}
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_bitmask);
+
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+					 const char *name,
+					 uint64_t min, uint64_t max)
+{
+	struct drm_property *property;
+
+	flags |= DRM_MODE_PROP_RANGE;
+
+	property = drm_property_create(dev, flags, name, 2);
+	if (!property)
+		return NULL;
+
+	property->values[0] = min;
+	property->values[1] = max;
+
+	return property;
+}
+EXPORT_SYMBOL(drm_property_create_range);
+
+int drm_property_add_enum(struct drm_property *property, int index,
+			  uint64_t value, const char *name)
+{
+	struct drm_property_enum *prop_enum;
+
+	if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
+		return -EINVAL;
+
+	/*
+	 * Bitmask enum properties have the additional constraint of values
+	 * from 0 to 63
+	 */
+	if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
+		return -EINVAL;
+
+	if (!list_empty(&property->enum_blob_list)) {
+		list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+			if (prop_enum->value == value) {
+				strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+				prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+				return 0;
+			}
+		}
+	}
+
+	prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL);
+	if (!prop_enum)
+		return -ENOMEM;
+
+	strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
+	prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+	prop_enum->value = value;
+
+	property->values[index] = value;
+	list_add_tail(&prop_enum->head, &property->enum_blob_list);
+	return 0;
+}
+EXPORT_SYMBOL(drm_property_add_enum);
+
+void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
+{
+	struct drm_property_enum *prop_enum, *pt;
+
+	list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
+		list_del(&prop_enum->head);
+		kfree(prop_enum);
+	}
+
+	if (property->num_values)
+		kfree(property->values);
+	drm_mode_object_put(dev, &property->base);
+	list_del(&property->head);
+	kfree(property);
+}
+EXPORT_SYMBOL(drm_property_destroy);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+				struct drm_property *property,
+				uint64_t init_val)
+{
+	int count = obj->properties->count;
+
+	if (count == DRM_OBJECT_MAX_PROPERTY) {
+		WARN(1, "Failed to attach object property (type: 0x%x). Please "
+			"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+			"you see this message on the same object type.\n",
+			obj->type);
+		return;
+	}
+
+	obj->properties->ids[count] = property->base.id;
+	obj->properties->values[count] = init_val;
+	obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t val)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			obj->properties->values[i] = val;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_set_value);
+
+int drm_object_property_get_value(struct drm_mode_object *obj,
+				  struct drm_property *property, uint64_t *val)
+{
+	int i;
+
+	for (i = 0; i < obj->properties->count; i++) {
+		if (obj->properties->ids[i] == property->base.id) {
+			*val = obj->properties->values[i];
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(drm_object_property_get_value);
+
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_object *obj;
+	struct drm_mode_get_property *out_resp = data;
+	struct drm_property *property;
+	int enum_count = 0;
+	int blob_count = 0;
+	int value_count = 0;
+	int ret = 0, i;
+	int copied;
+	struct drm_property_enum *prop_enum;
+	struct drm_mode_property_enum __user *enum_ptr;
+	struct drm_property_blob *prop_blob;
+	uint32_t __user *blob_id_ptr;
+	uint64_t __user *values_ptr;
+	uint32_t __user *blob_length_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
+	if (!obj) {
+		ret = -EINVAL;
+		goto done;
+	}
+	property = obj_to_property(obj);
+
+	if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+		list_for_each_entry(prop_enum, &property->enum_blob_list, head)
+			enum_count++;
+	} else if (property->flags & DRM_MODE_PROP_BLOB) {
+		list_for_each_entry(prop_blob, &property->enum_blob_list, head)
+			blob_count++;
+	}
+
+	value_count = property->num_values;
+
+	strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
+	out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+	out_resp->flags = property->flags;
+
+	if ((out_resp->count_values >= value_count) && value_count) {
+		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+		for (i = 0; i < value_count; i++) {
+			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+				ret = -EFAULT;
+				goto done;
+			}
+		}
+	}
+	out_resp->count_values = value_count;
+
+	if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
+		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+			copied = 0;
+			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+			list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+
+				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+					ret = -EFAULT;
+					goto done;
+				}
+
+				if (copy_to_user(&enum_ptr[copied].name,
+						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
+					ret = -EFAULT;
+					goto done;
+				}
+				copied++;
+			}
+		}
+		out_resp->count_enum_blobs = enum_count;
+	}
+
+	if (property->flags & DRM_MODE_PROP_BLOB) {
+		if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+			copied = 0;
+			blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+			blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
+
+			list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+				if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+					ret = -EFAULT;
+					goto done;
+				}
+
+				if (put_user(prop_blob->length, blob_length_ptr + copied)) {
+					ret = -EFAULT;
+					goto done;
+				}
+
+				copied++;
+			}
+		}
+		out_resp->count_enum_blobs = blob_count;
+	}
+done:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
+							  void *data)
+{
+	struct drm_property_blob *blob;
+	int ret;
+
+	if (!length || !data)
+		return NULL;
+
+	blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+	if (!blob)
+		return NULL;
+
+	ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+	if (ret) {
+		kfree(blob);
+		return NULL;
+	}
+
+	blob->length = length;
+
+	memcpy(blob->data, data, length);
+
+	list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+	return blob;
+}
+
+static void drm_property_destroy_blob(struct drm_device *dev,
+			       struct drm_property_blob *blob)
+{
+	drm_mode_object_put(dev, &blob->base);
+	list_del(&blob->head);
+	kfree(blob);
+}
+
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_object *obj;
+	struct drm_mode_get_blob *out_resp = data;
+	struct drm_property_blob *blob;
+	int ret = 0;
+	void __user *blob_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
+	if (!obj) {
+		ret = -EINVAL;
+		goto done;
+	}
+	blob = obj_to_blob(obj);
+
+	if (out_resp->length == blob->length) {
+		blob_ptr = (void __user *)(unsigned long)out_resp->data;
+		if (copy_to_user(blob_ptr, blob->data, blob->length)){
+			ret = -EFAULT;
+			goto done;
+		}
+	}
+	out_resp->length = blob->length;
+
+done:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+					    struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	int ret, size;
+
+	if (connector->edid_blob_ptr)
+		drm_property_destroy_blob(dev, connector->edid_blob_ptr);
+
+	/* Delete edid, when there is none. */
+	if (!edid) {
+		connector->edid_blob_ptr = NULL;
+		ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
+		return ret;
+	}
+
+	size = EDID_LENGTH * (1 + edid->extensions);
+	connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
+							    size, edid);
+	if (!connector->edid_blob_ptr)
+		return -EINVAL;
+
+	ret = drm_object_property_set_value(&connector->base,
+					       dev->mode_config.edid_property,
+					       connector->edid_blob_ptr->base.id);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+
+static bool drm_property_change_is_valid(struct drm_property *property,
+					 uint64_t value)
+{
+	if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+		return false;
+	if (property->flags & DRM_MODE_PROP_RANGE) {
+		if (value < property->values[0] || value > property->values[1])
+			return false;
+		return true;
+	} else if (property->flags & DRM_MODE_PROP_BITMASK) {
+		int i;
+		uint64_t valid_mask = 0;
+		for (i = 0; i < property->num_values; i++)
+			valid_mask |= (1ULL << property->values[i]);
+		return !(value & ~valid_mask);
+	} else if (property->flags & DRM_MODE_PROP_BLOB) {
+		/* Only the driver knows */
+		return true;
+	} else {
+		int i;
+		for (i = 0; i < property->num_values; i++)
+			if (property->values[i] == value)
+				return true;
+		return false;
+	}
+}
+
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+				       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_connector_set_property *conn_set_prop = data;
+	struct drm_mode_obj_set_property obj_set_prop = {
+		.value = conn_set_prop->value,
+		.prop_id = conn_set_prop->prop_id,
+		.obj_id = conn_set_prop->connector_id,
+		.obj_type = DRM_MODE_OBJECT_CONNECTOR
+	};
+
+	/* It does all the locking and checking we need */
+	return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+					   struct drm_property *property,
+					   uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_connector *connector = obj_to_connector(obj);
+
+	/* Do DPMS ourselves */
+	if (property == connector->dev->mode_config.dpms_property) {
+		if (connector->funcs->dpms)
+			(*connector->funcs->dpms)(connector, (int)value);
+		ret = 0;
+	} else if (connector->funcs->set_property)
+		ret = connector->funcs->set_property(connector, property, value);
+
+	/* store the property value if successful */
+	if (!ret)
+		drm_object_property_set_value(&connector->base, property, value);
+	return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_crtc *crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->set_property)
+		ret = crtc->funcs->set_property(crtc, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+
+static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
+				      struct drm_property *property,
+				      uint64_t value)
+{
+	int ret = -EINVAL;
+	struct drm_plane *plane = obj_to_plane(obj);
+
+	if (plane->funcs->set_property)
+		ret = plane->funcs->set_property(plane, property, value);
+	if (!ret)
+		drm_object_property_set_value(obj, property, value);
+
+	return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{
+	struct drm_mode_obj_get_properties *arg = data;
+	struct drm_mode_object *obj;
+	int ret = 0;
+	int i;
+	int copied = 0;
+	int props_count = 0;
+	uint32_t __user *props_ptr;
+	uint64_t __user *prop_values_ptr;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!obj->properties) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	props_count = obj->properties->count;
+
+	/* This ioctl is called twice, once to determine how much space is
+	 * needed, and the 2nd time to fill it. */
+	if ((arg->count_props >= props_count) && props_count) {
+		copied = 0;
+		props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+		prop_values_ptr = (uint64_t __user *)(unsigned long)
+				  (arg->prop_values_ptr);
+		for (i = 0; i < props_count; i++) {
+			if (put_user(obj->properties->ids[i],
+				     props_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			if (put_user(obj->properties->values[i],
+				     prop_values_ptr + copied)) {
+				ret = -EFAULT;
+				goto out;
+			}
+			copied++;
+		}
+	}
+	arg->count_props = props_count;
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_mode_obj_set_property *arg = data;
+	struct drm_mode_object *arg_obj;
+	struct drm_mode_object *prop_obj;
+	struct drm_property *property;
+	int ret = -EINVAL;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+	if (!arg_obj)
+		goto out;
+	if (!arg_obj->properties)
+		goto out;
+
+	for (i = 0; i < arg_obj->properties->count; i++)
+		if (arg_obj->properties->ids[i] == arg->prop_id)
+			break;
+
+	if (i == arg_obj->properties->count)
+		goto out;
+
+	prop_obj = drm_mode_object_find(dev, arg->prop_id,
+					DRM_MODE_OBJECT_PROPERTY);
+	if (!prop_obj)
+		goto out;
+	property = obj_to_property(prop_obj);
+
+	if (!drm_property_change_is_valid(property, arg->value))
+		goto out;
+
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+						      arg->value);
+		break;
+	case DRM_MODE_OBJECT_CRTC:
+		ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+		break;
+	case DRM_MODE_OBJECT_PLANE:
+		ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+		break;
+	}
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+				      struct drm_encoder *encoder)
+{
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0) {
+			connector->encoder_ids[i] = encoder->base.id;
+			return 0;
+		}
+	}
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+
+void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+				    struct drm_encoder *encoder)
+{
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == encoder->base.id) {
+			connector->encoder_ids[i] = 0;
+			if (connector->encoder == encoder)
+				connector->encoder = NULL;
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+
+int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+				  int gamma_size)
+{
+	crtc->gamma_size = gamma_size;
+
+	crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+	if (!crtc->gamma_store) {
+		crtc->gamma_size = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_lut *crtc_lut = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	void *r_base, *g_base, *b_base;
+	int size;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->gamma_set == NULL) {
+		ret = -ENOSYS;
+		goto out;
+	}
+
+	/* memcpy into gamma store */
+	if (crtc_lut->gamma_size != crtc->gamma_size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	size = crtc_lut->gamma_size * (sizeof(uint16_t));
+	r_base = crtc->gamma_store;
+	if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	g_base = r_base + size;
+	if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	b_base = g_base + size;
+	if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+
+}
+
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_lut *crtc_lut = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	void *r_base, *g_base, *b_base;
+	int size;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+	obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+	crtc = obj_to_crtc(obj);
+
+	/* memcpy into gamma store */
+	if (crtc_lut->gamma_size != crtc->gamma_size) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	size = crtc_lut->gamma_size * (sizeof(uint16_t));
+	r_base = crtc->gamma_store;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	g_base = r_base + size;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	b_base = g_base + size;
+	if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+out:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_page_flip *page_flip = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+	struct drm_pending_vblank_event *e = NULL;
+	unsigned long flags;
+	int hdisplay, vdisplay;
+	int ret = -EINVAL;
+
+	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+	    page_flip->reserved != 0)
+		return -EINVAL;
+
+	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj)
+		return -EINVAL;
+	crtc = obj_to_crtc(obj);
+
+	mutex_lock(&crtc->mutex);
+	if (crtc->fb == NULL) {
+		/* The framebuffer is currently unbound, presumably
+		 * due to a hotplug event, that userspace has not
+		 * yet discovered.
+		 */
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (crtc->funcs->page_flip == NULL)
+		goto out;
+
+	fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+	if (!fb)
+		goto out;
+
+	hdisplay = crtc->mode.hdisplay;
+	vdisplay = crtc->mode.vdisplay;
+
+	if (crtc->invert_dimensions)
+		swap(hdisplay, vdisplay);
+
+	if (hdisplay > fb->width ||
+	    vdisplay > fb->height ||
+	    crtc->x > fb->width - hdisplay ||
+	    crtc->y > fb->height - vdisplay) {
+		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+			      fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
+			      crtc->invert_dimensions ? " (inverted)" : "");
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	if (crtc->fb->pixel_format != fb->pixel_format) {
+		DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		ret = -ENOMEM;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (file_priv->event_space < sizeof e->event) {
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+		file_priv->event_space -= sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		e = kzalloc(sizeof *e, GFP_KERNEL);
+		if (e == NULL) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof e->event;
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+
+		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+		e->event.base.length = sizeof e->event;
+		e->event.user_data = page_flip->user_data;
+		e->base.event = &e->event.base;
+		e->base.file_priv = file_priv;
+		e->base.destroy =
+			(void (*) (struct drm_pending_event *)) kfree;
+	}
+
+	old_fb = crtc->fb;
+	ret = crtc->funcs->page_flip(crtc, fb, e);
+	if (ret) {
+		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof e->event;
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			kfree(e);
+		}
+		/* Keep the old fb, don't unref it. */
+		old_fb = NULL;
+	} else {
+		/*
+		 * Warn if the driver hasn't properly updated the crtc->fb
+		 * field to reflect that the new framebuffer is now used.
+		 * Failing to do so will screw with the reference counting
+		 * on framebuffers.
+		 */
+		WARN_ON(crtc->fb != fb);
+		/* Unref only the old framebuffer. */
+		fb = NULL;
+	}
+
+out:
+	if (fb)
+		drm_framebuffer_unreference(fb);
+	if (old_fb)
+		drm_framebuffer_unreference(old_fb);
+	mutex_unlock(&crtc->mutex);
+
+	return ret;
+}
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->funcs->reset)
+			crtc->funcs->reset(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->funcs->reset)
+			encoder->funcs->reset(encoder);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		connector->status = connector_status_unknown;
+
+		if (connector->funcs->reset)
+			connector->funcs->reset(connector);
+	}
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+			       void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_create_dumb *args = data;
+
+	if (!dev->driver->dumb_create)
+		return -ENOSYS;
+	return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_map_dumb *args = data;
+
+	/* call driver ioctl to get mmap offset */
+	if (!dev->driver->dumb_map_offset)
+		return -ENOSYS;
+
+	return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+				void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_destroy_dumb *args = data;
+
+	if (!dev->driver->dumb_destroy)
+		return -ENOSYS;
+
+	return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+			  int *bpp)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+		*depth = 8;
+		*bpp = 8;
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+		*depth = 15;
+		*bpp = 16;
+		break;
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+		*depth = 16;
+		*bpp = 16;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+		*depth = 24;
+		*bpp = 24;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+		*depth = 24;
+		*bpp = 32;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+		*depth = 30;
+		*bpp = 32;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+		*depth = 32;
+		*bpp = 32;
+		break;
+	default:
+		DRM_DEBUG_KMS("unsupported pixel format\n");
+		*depth = 0;
+		*bpp = 0;
+		break;
+	}
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 3;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * RETURNS:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+	unsigned int depth;
+	int bpp;
+
+	if (plane >= drm_format_num_planes(format))
+		return 0;
+
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		return 2;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		return plane ? 2 : 1;
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 1;
+	default:
+		drm_fb_get_bpp_depth(format, &depth, &bpp);
+		return bpp >> 3;
+	}
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_mode_config_init - initialize DRM mode_configuration structure
+ * @dev: DRM device
+ *
+ * Initialize @dev's mode_config structure, used for tracking the graphics
+ * configuration of @dev.
+ *
+ * Since this initializes the modeset locks, no locking is possible. Which is no
+ * problem, since this should happen single threaded at init time. It is the
+ * driver's problem to ensure this guarantee.
+ *
+ */
+void drm_mode_config_init(struct drm_device *dev)
+{
+	mutex_init(&dev->mode_config.mutex);
+	mutex_init(&dev->mode_config.idr_mutex);
+	mutex_init(&dev->mode_config.fb_lock);
+	INIT_LIST_HEAD(&dev->mode_config.fb_list);
+	INIT_LIST_HEAD(&dev->mode_config.crtc_list);
+	INIT_LIST_HEAD(&dev->mode_config.connector_list);
+	INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+	INIT_LIST_HEAD(&dev->mode_config.property_list);
+	INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+	INIT_LIST_HEAD(&dev->mode_config.plane_list);
+	idr_init(&dev->mode_config.crtc_idr);
+
+	drm_modeset_lock_all(dev);
+	drm_mode_create_standard_connector_properties(dev);
+	drm_modeset_unlock_all(dev);
+
+	/* Just to be sure */
+	dev->mode_config.num_fb = 0;
+	dev->mode_config.num_connector = 0;
+	dev->mode_config.num_crtc = 0;
+	dev->mode_config.num_encoder = 0;
+}
+EXPORT_SYMBOL(drm_mode_config_init);
+
+/**
+ * drm_mode_config_cleanup - free up DRM mode_config info
+ * @dev: DRM device
+ *
+ * Free up all the connectors and CRTCs associated with this DRM device, then
+ * free up the framebuffers and associated buffer objects.
+ *
+ * Note that since this /should/ happen single-threaded at driver/device
+ * teardown time, no locking is required. It's the driver's job to ensure that
+ * this guarantee actually holds true.
+ *
+ * FIXME: cleanup any dangling user buffer objects too
+ */
+void drm_mode_config_cleanup(struct drm_device *dev)
+{
+	struct drm_connector *connector, *ot;
+	struct drm_crtc *crtc, *ct;
+	struct drm_encoder *encoder, *enct;
+	struct drm_framebuffer *fb, *fbt;
+	struct drm_property *property, *pt;
+	struct drm_property_blob *blob, *bt;
+	struct drm_plane *plane, *plt;
+
+	list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+				 head) {
+		encoder->funcs->destroy(encoder);
+	}
+
+	list_for_each_entry_safe(connector, ot,
+				 &dev->mode_config.connector_list, head) {
+		connector->funcs->destroy(connector);
+	}
+
+	list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
+				 head) {
+		drm_property_destroy(dev, property);
+	}
+
+	list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
+				 head) {
+		drm_property_destroy_blob(dev, blob);
+	}
+
+	/*
+	 * Single-threaded teardown context, so it's not required to grab the
+	 * fb_lock to protect against concurrent fb_list access. Contrary, it
+	 * would actually deadlock with the drm_framebuffer_cleanup function.
+	 *
+	 * Also, if there are any framebuffers left, that's a driver leak now,
+	 * so politely WARN about this.
+	 */
+	WARN_ON(!list_empty(&dev->mode_config.fb_list));
+	list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+		drm_framebuffer_remove(fb);
+	}
+
+	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+				 head) {
+		plane->funcs->destroy(plane);
+	}
+
+	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+		crtc->funcs->destroy(crtc);
+	}
+
+	idr_destroy(&dev->mode_config.crtc_idr);
+}
+EXPORT_SYMBOL(drm_mode_config_cleanup);
diff --git a/linux-imx/drivers/gpu/drm/drm_crtc_helper.c b/linux-imx/drivers/gpu/drm/drm_crtc_helper.c
new file mode 100644
index 0000000..ed1334e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_crtc_helper.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM core CRTC related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Keith Packard
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/moduleparam.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ * 						connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+	struct drm_connector *connector, *tmp;
+	struct list_head panel_list;
+
+	INIT_LIST_HEAD(&panel_list);
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			list_move_tail(&connector->head, &panel_list);
+	}
+
+	list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
+static void drm_mode_validate_flag(struct drm_connector *connector,
+				   int flags)
+{
+	struct drm_display_mode *mode;
+
+	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+		return;
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+				!(flags & DRM_MODE_FLAG_INTERLACE))
+			mode->status = MODE_NO_INTERLACE;
+		if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
+				!(flags & DRM_MODE_FLAG_DBLSCAN))
+			mode->status = MODE_NO_DBLESCAN;
+	}
+
+	return;
+}
+
+/**
+ * drm_helper_probe_single_connector_modes - get complete set of display modes
+ * @connector: connector to probe
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes.  Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
+ *
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+					    uint32_t maxX, uint32_t maxY)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode;
+	struct drm_connector_helper_funcs *connector_funcs =
+		connector->helper_private;
+	int count = 0;
+	int mode_flags = 0;
+	bool verbose_prune = true;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+			drm_get_connector_name(connector));
+	/* set all modes to the unverified state */
+	list_for_each_entry(mode, &connector->modes, head)
+		mode->status = MODE_UNVERIFIED;
+
+	if (connector->force) {
+		if (connector->force == DRM_FORCE_ON)
+			connector->status = connector_status_connected;
+		else
+			connector->status = connector_status_disconnected;
+		if (connector->funcs->force)
+			connector->funcs->force(connector);
+	} else {
+		connector->status = connector->funcs->detect(connector, true);
+	}
+
+	/* Re-enable polling in case the global poll config changed. */
+	if (drm_kms_helper_poll != dev->mode_config.poll_running)
+		drm_kms_helper_poll_enable(dev);
+
+	dev->mode_config.poll_running = drm_kms_helper_poll;
+
+	if (connector->status == connector_status_disconnected) {
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+			connector->base.id, drm_get_connector_name(connector));
+		drm_mode_connector_update_edid_property(connector, NULL);
+		verbose_prune = false;
+		goto prune;
+	}
+
+#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
+	count = drm_load_edid_firmware(connector);
+	if (count == 0)
+#endif
+		count = (*connector_funcs->get_modes)(connector);
+
+	if (count == 0 && connector->status == connector_status_connected)
+		count = drm_add_modes_noedid(connector, 1024, 768);
+	if (count == 0)
+		goto prune;
+
+	drm_mode_connector_list_update(connector);
+
+	if (maxX && maxY)
+		drm_mode_validate_size(dev, &connector->modes, maxX,
+				       maxY, 0);
+
+	if (connector->interlace_allowed)
+		mode_flags |= DRM_MODE_FLAG_INTERLACE;
+	if (connector->doublescan_allowed)
+		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+	drm_mode_validate_flag(connector, mode_flags);
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		if (mode->status == MODE_OK)
+			mode->status = connector_funcs->mode_valid(connector,
+								   mode);
+	}
+
+prune:
+	drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
+
+	if (list_empty(&connector->modes))
+		return 0;
+
+	drm_mode_sort(&connector->modes);
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+			drm_get_connector_name(connector));
+	list_for_each_entry(mode, &connector->modes, head) {
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
+		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+		drm_mode_debug_printmodeline(mode);
+	}
+
+	return count;
+}
+EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
+
+/**
+ * drm_helper_encoder_in_use - check if a given encoder is in use
+ * @encoder: encoder to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @encoders's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @encoder is part of the mode_config, false otherwise.
+ */
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			return true;
+	return false;
+}
+EXPORT_SYMBOL(drm_helper_encoder_in_use);
+
+/**
+ * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
+ * @crtc: CRTC to check
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Walk @crtc's DRM device's mode_config and see if it's in use.
+ *
+ * RETURNS:
+ * True if @crtc is part of the mode_config, false otherwise.
+ */
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev = crtc->dev;
+	/* FIXME: Locking around list access? */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
+			return true;
+	return false;
+}
+EXPORT_SYMBOL(drm_helper_crtc_in_use);
+
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+	if (encoder_funcs->disable)
+		(*encoder_funcs->disable)(encoder);
+	else
+		(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
+/**
+ * drm_helper_disable_unused_functions - disable unused objects
+ * @dev: DRM device
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * If an connector or CRTC isn't part of @dev's mode_config, it can be disabled
+ * by calling its dpms function, which should power it off.
+ */
+void drm_helper_disable_unused_functions(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder)
+			continue;
+		if (connector->status == connector_status_disconnected)
+			connector->encoder = NULL;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (!drm_helper_encoder_in_use(encoder)) {
+			drm_encoder_disable(encoder);
+			/* disconnector encoder from any connector */
+			encoder->crtc = NULL;
+		}
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+		crtc->enabled = drm_helper_crtc_in_use(crtc);
+		if (!crtc->enabled) {
+			if (crtc_funcs->disable)
+				(*crtc_funcs->disable)(crtc);
+			else
+				(*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
+			crtc->fb = NULL;
+		}
+	}
+}
+EXPORT_SYMBOL(drm_helper_disable_unused_functions);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+				struct drm_crtc *crtc)
+{
+	struct drm_device *dev;
+	struct drm_crtc *tmp;
+	int crtc_mask = 1;
+
+	WARN(!crtc, "checking null crtc?\n");
+
+	dev = crtc->dev;
+
+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+		if (tmp == crtc)
+			break;
+		crtc_mask <<= 1;
+	}
+
+	if (encoder->possible_crtcs & crtc_mask)
+		return true;
+	return false;
+}
+
+/*
+ * Check the CRTC we're going to map each output to vs. its current
+ * CRTC.  If they don't match, we have to disable the output and the CRTC
+ * since the driver will have to re-route things.
+ */
+static void
+drm_crtc_prepare_encoders(struct drm_device *dev)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder_funcs = encoder->helper_private;
+		/* Disable unused encoders */
+		if (encoder->crtc == NULL)
+			drm_encoder_disable(encoder);
+		/* Disable encoders whose CRTC is about to change */
+		if (encoder_funcs->get_crtc &&
+		    encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
+			drm_encoder_disable(encoder);
+	}
+}
+
+/**
+ * drm_crtc_helper_set_mode - internal helper to set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode,
+			      int x, int y,
+			      struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	int saved_x, saved_y;
+	struct drm_encoder *encoder;
+	bool ret = true;
+
+	crtc->enabled = drm_helper_crtc_in_use(crtc);
+	if (!crtc->enabled)
+		return true;
+
+	adjusted_mode = drm_mode_duplicate(dev, mode);
+	if (!adjusted_mode)
+		return false;
+
+	saved_hwmode = crtc->hwmode;
+	saved_mode = crtc->mode;
+	saved_x = crtc->x;
+	saved_y = crtc->y;
+
+	/* Update crtc values up front so the driver can rely on them for mode
+	 * setting.
+	 */
+	crtc->mode = *mode;
+	crtc->x = x;
+	crtc->y = y;
+
+	/* Pass our mode to the connectors and the CRTC to give them a chance to
+	 * adjust it according to limitations or connector properties, and also
+	 * a chance to reject the mode entirely.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+		if (encoder->crtc != crtc)
+			continue;
+		encoder_funcs = encoder->helper_private;
+		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+						      adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
+			goto done;
+		}
+	}
+
+	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		goto done;
+	}
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+	/* Prepare the encoders and CRTCs before setting the mode. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+		if (encoder->crtc != crtc)
+			continue;
+		encoder_funcs = encoder->helper_private;
+		/* Disable the encoders as the first thing we do. */
+		encoder_funcs->prepare(encoder);
+	}
+
+	drm_crtc_prepare_encoders(dev);
+
+	crtc_funcs->prepare(crtc);
+
+	/* Set up the DPLL and any encoders state that needs to adjust or depend
+	 * on the DPLL.
+	 */
+	ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+	if (!ret)
+	    goto done;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+			encoder->base.id, drm_get_encoder_name(encoder),
+			mode->base.id, mode->name);
+		encoder_funcs = encoder->helper_private;
+		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+	}
+
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	crtc_funcs->commit(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		encoder_funcs = encoder->helper_private;
+		encoder_funcs->commit(encoder);
+
+	}
+
+	/* Store real post-adjustment hardware mode. */
+	crtc->hwmode = *adjusted_mode;
+
+	/* Calculate and store various constants which
+	 * are later needed by vblank and swap-completion
+	 * timestamping. They are derived from true hwmode.
+	 */
+	drm_calc_timestamping_constants(crtc);
+
+	/* FIXME: add subpixel order */
+done:
+	drm_mode_destroy(dev, adjusted_mode);
+	if (!ret) {
+		crtc->hwmode = saved_hwmode;
+		crtc->mode = saved_mode;
+		crtc->x = saved_x;
+		crtc->y = saved_y;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+
+
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	/* Decouple all encoders and their attached connectors from this crtc */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+			if (connector->encoder != encoder)
+				continue;
+
+			connector->encoder = NULL;
+		}
+	}
+
+	drm_helper_disable_unused_functions(dev);
+	return 0;
+}
+
+/**
+ * drm_crtc_helper_set_config - set a new config from userspace
+ * @set: mode set configuration
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
+ *
+ * RETURNS:
+ * Returns 0 on success, -ERRNO on failure.
+ */
+int drm_crtc_helper_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct drm_crtc *save_crtcs, *new_crtc, *crtc;
+	struct drm_encoder *save_encoders, *new_encoder, *encoder;
+	struct drm_framebuffer *old_fb = NULL;
+	bool mode_changed = false; /* if true do a full mode set */
+	bool fb_changed = false; /* if true and !mode_changed just do a flip */
+	struct drm_connector *save_connectors, *connector;
+	int count = 0, ro, fail = 0;
+	struct drm_crtc_helper_funcs *crtc_funcs;
+	struct drm_mode_set save_set;
+	int ret;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!set)
+		return -EINVAL;
+
+	if (!set->crtc)
+		return -EINVAL;
+
+	if (!set->crtc->helper_private)
+		return -EINVAL;
+
+	crtc_funcs = set->crtc->helper_private;
+
+	if (!set->mode)
+		set->fb = NULL;
+
+	if (set->fb) {
+		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+				set->crtc->base.id, set->fb->base.id,
+				(int)set->num_connectors, set->x, set->y);
+	} else {
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+		return drm_crtc_helper_disable(set->crtc);
+	}
+
+	dev = set->crtc->dev;
+
+	/* Allocate space for the backup of all (non-pointer) crtc, encoder and
+	 * connector data. */
+	save_crtcs = kzalloc(dev->mode_config.num_crtc *
+			     sizeof(struct drm_crtc), GFP_KERNEL);
+	if (!save_crtcs)
+		return -ENOMEM;
+
+	save_encoders = kzalloc(dev->mode_config.num_encoder *
+				sizeof(struct drm_encoder), GFP_KERNEL);
+	if (!save_encoders) {
+		kfree(save_crtcs);
+		return -ENOMEM;
+	}
+
+	save_connectors = kzalloc(dev->mode_config.num_connector *
+				sizeof(struct drm_connector), GFP_KERNEL);
+	if (!save_connectors) {
+		kfree(save_crtcs);
+		kfree(save_encoders);
+		return -ENOMEM;
+	}
+
+	/* Copy data. Note that driver private data is not affected.
+	 * Should anything bad happen only the expected state is
+	 * restored, not the drivers personal bookkeeping.
+	 */
+	count = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		save_crtcs[count++] = *crtc;
+	}
+
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		save_encoders[count++] = *encoder;
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		save_connectors[count++] = *connector;
+	}
+
+	save_set.crtc = set->crtc;
+	save_set.mode = &set->crtc->mode;
+	save_set.x = set->crtc->x;
+	save_set.y = set->crtc->y;
+	save_set.fb = set->crtc->fb;
+
+	/* We should be able to check here if the fb has the same properties
+	 * and then just flip_or_move it */
+	if (set->crtc->fb != set->fb) {
+		/* If we have no fb then treat it as a full mode set */
+		if (set->crtc->fb == NULL) {
+			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+			mode_changed = true;
+		} else if (set->fb == NULL) {
+			mode_changed = true;
+		} else if (set->fb->depth != set->crtc->fb->depth) {
+			mode_changed = true;
+		} else if (set->fb->bits_per_pixel !=
+			   set->crtc->fb->bits_per_pixel) {
+			mode_changed = true;
+		} else if (set->fb->pixel_format !=
+			   set->crtc->fb->pixel_format) {
+			mode_changed = true;
+		} else
+			fb_changed = true;
+	}
+
+	if (set->x != set->crtc->x || set->y != set->crtc->y)
+		fb_changed = true;
+
+	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+		DRM_DEBUG_KMS("modes are different, full mode set\n");
+		drm_mode_debug_printmodeline(&set->crtc->mode);
+		drm_mode_debug_printmodeline(set->mode);
+		mode_changed = true;
+	}
+
+	/* a) traverse passed in connector list and get encoders for them */
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct drm_connector_helper_funcs *connector_funcs =
+			connector->helper_private;
+		new_encoder = connector->encoder;
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == connector) {
+				new_encoder = connector_funcs->best_encoder(connector);
+				/* if we can't get an encoder for a connector
+				   we are setting now - then fail */
+				if (new_encoder == NULL)
+					/* don't break so fail path works correct */
+					fail = 1;
+				break;
+			}
+		}
+
+		if (new_encoder != connector->encoder) {
+			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+			mode_changed = true;
+			/* If the encoder is reused for another connector, then
+			 * the appropriate crtc will be set later.
+			 */
+			if (connector->encoder)
+				connector->encoder->crtc = NULL;
+			connector->encoder = new_encoder;
+		}
+	}
+
+	if (fail) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder)
+			continue;
+
+		if (connector->encoder->crtc == set->crtc)
+			new_crtc = NULL;
+		else
+			new_crtc = connector->encoder->crtc;
+
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == connector)
+				new_crtc = set->crtc;
+		}
+
+		/* Make sure the new CRTC will work with the encoder */
+		if (new_crtc &&
+		    !drm_encoder_crtc_ok(connector->encoder, new_crtc)) {
+			ret = -EINVAL;
+			goto fail;
+		}
+		if (new_crtc != connector->encoder->crtc) {
+			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+			mode_changed = true;
+			connector->encoder->crtc = new_crtc;
+		}
+		if (new_crtc) {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+				connector->base.id, drm_get_connector_name(connector),
+				new_crtc->base.id);
+		} else {
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+				connector->base.id, drm_get_connector_name(connector));
+		}
+	}
+
+	/* mode_set_base is not a required function */
+	if (fb_changed && !crtc_funcs->mode_set_base)
+		mode_changed = true;
+
+	if (mode_changed) {
+		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+		if (set->crtc->enabled) {
+			DRM_DEBUG_KMS("attempting to set mode from"
+					" userspace\n");
+			drm_mode_debug_printmodeline(set->mode);
+			old_fb = set->crtc->fb;
+			set->crtc->fb = set->fb;
+			if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
+						      set->x, set->y,
+						      old_fb)) {
+				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+					  set->crtc->base.id);
+				set->crtc->fb = old_fb;
+				ret = -EINVAL;
+				goto fail;
+			}
+			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+			for (i = 0; i < set->num_connectors; i++) {
+				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+					      drm_get_connector_name(set->connectors[i]));
+				set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+			}
+		}
+		drm_helper_disable_unused_functions(dev);
+	} else if (fb_changed) {
+		set->crtc->x = set->x;
+		set->crtc->y = set->y;
+
+		old_fb = set->crtc->fb;
+		if (set->crtc->fb != set->fb)
+			set->crtc->fb = set->fb;
+		ret = crtc_funcs->mode_set_base(set->crtc,
+						set->x, set->y, old_fb);
+		if (ret != 0) {
+			set->crtc->fb = old_fb;
+			goto fail;
+		}
+	}
+
+	kfree(save_connectors);
+	kfree(save_encoders);
+	kfree(save_crtcs);
+	return 0;
+
+fail:
+	/* Restore all previous data. */
+	count = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		*crtc = save_crtcs[count++];
+	}
+
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		*encoder = save_encoders[count++];
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		*connector = save_connectors[count++];
+	}
+
+	/* Try to restore the config */
+	if (mode_changed &&
+	    !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
+				      save_set.y, save_set.fb))
+		DRM_ERROR("failed to restore config after modeset failure\n");
+
+	kfree(save_connectors);
+	kfree(save_encoders);
+	kfree(save_crtcs);
+	return ret;
+}
+EXPORT_SYMBOL(drm_crtc_helper_set_config);
+
+static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
+{
+	int dpms = DRM_MODE_DPMS_OFF;
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder)
+			if (connector->dpms < dpms)
+				dpms = connector->dpms;
+	return dpms;
+}
+
+static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
+{
+	int dpms = DRM_MODE_DPMS_OFF;
+	struct drm_connector *connector;
+	struct drm_device *dev = crtc->dev;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder && connector->encoder->crtc == crtc)
+			if (connector->dpms < dpms)
+				dpms = connector->dpms;
+	return dpms;
+}
+
+/**
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
+ *
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
+ */
+void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_encoder *encoder = connector->encoder;
+	struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+	int old_dpms;
+
+	if (mode == connector->dpms)
+		return;
+
+	old_dpms = connector->dpms;
+	connector->dpms = mode;
+
+	/* from off to on, do crtc then encoder */
+	if (mode < old_dpms) {
+		if (crtc) {
+			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+			if (crtc_funcs->dpms)
+				(*crtc_funcs->dpms) (crtc,
+						     drm_helper_choose_crtc_dpms(crtc));
+		}
+		if (encoder) {
+			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->dpms)
+				(*encoder_funcs->dpms) (encoder,
+							drm_helper_choose_encoder_dpms(encoder));
+		}
+	}
+
+	/* from on to off, do encoder then crtc */
+	if (mode > old_dpms) {
+		if (encoder) {
+			struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->dpms)
+				(*encoder_funcs->dpms) (encoder,
+							drm_helper_choose_encoder_dpms(encoder));
+		}
+		if (crtc) {
+			struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+			if (crtc_funcs->dpms)
+				(*crtc_funcs->dpms) (crtc,
+						     drm_helper_choose_crtc_dpms(crtc));
+		}
+	}
+
+	return;
+}
+EXPORT_SYMBOL(drm_helper_connector_dpms);
+
+int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+				   struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	int i;
+
+	fb->width = mode_cmd->width;
+	fb->height = mode_cmd->height;
+	for (i = 0; i < 4; i++) {
+		fb->pitches[i] = mode_cmd->pitches[i];
+		fb->offsets[i] = mode_cmd->offsets[i];
+	}
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+				    &fb->bits_per_pixel);
+	fb->pixel_format = mode_cmd->pixel_format;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+
+int drm_helper_resume_force_mode(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_crtc_helper_funcs *crtc_funcs;
+	int ret;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+		if (!crtc->enabled)
+			continue;
+
+		ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					       crtc->x, crtc->y, crtc->fb);
+
+		if (ret == false)
+			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+		/* Turn off outputs that were already powered off */
+		if (drm_helper_choose_crtc_dpms(crtc)) {
+			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+				if(encoder->crtc != crtc)
+					continue;
+
+				encoder_funcs = encoder->helper_private;
+				if (encoder_funcs->dpms)
+					(*encoder_funcs->dpms) (encoder,
+								drm_helper_choose_encoder_dpms(encoder));
+			}
+
+			crtc_funcs = crtc->helper_private;
+			if (crtc_funcs->dpms)
+				(*crtc_funcs->dpms) (crtc,
+						     drm_helper_choose_crtc_dpms(crtc));
+		}
+	}
+	/* disable the unused connectors while restoring the modesetting */
+	drm_helper_disable_unused_functions(dev);
+	return 0;
+}
+EXPORT_SYMBOL(drm_helper_resume_force_mode);
+
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+	/* send a uevent + call fbdev */
+	drm_sysfs_hotplug_event(dev);
+	if (dev->mode_config.funcs->output_poll_changed)
+		dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
+#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void output_poll_execute(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
+	struct drm_connector *connector;
+	enum drm_connector_status old_status;
+	bool repoll = false, changed = false;
+
+	if (!drm_kms_helper_poll)
+		return;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* Ignore forced connectors. */
+		if (connector->force)
+			continue;
+
+		/* Ignore HPD capable connectors and connectors where we don't
+		 * want any hotplug detection at all for polling. */
+		if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+			continue;
+
+		repoll = true;
+
+		old_status = connector->status;
+		/* if we are connected and don't want to poll for disconnect
+		   skip it */
+		if (old_status == connector_status_connected &&
+		    !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
+			continue;
+
+		connector->status = connector->funcs->detect(connector, false);
+		if (old_status != connector->status) {
+			const char *old, *new;
+
+			old = drm_get_connector_status_name(old_status);
+			new = drm_get_connector_status_name(connector->status);
+
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
+				      "status updated from %s to %s\n",
+				      connector->base.id,
+				      drm_get_connector_name(connector),
+				      old, new);
+
+			changed = true;
+		}
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+
+	if (repoll)
+		schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
+}
+
+void drm_kms_helper_poll_disable(struct drm_device *dev)
+{
+	if (!dev->mode_config.poll_enabled)
+		return;
+	cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_disable);
+
+void drm_kms_helper_poll_enable(struct drm_device *dev)
+{
+	bool poll = false;
+	struct drm_connector *connector;
+
+	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+		return;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+					 DRM_CONNECTOR_POLL_DISCONNECT))
+			poll = true;
+	}
+
+	if (poll)
+		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+
+void drm_kms_helper_poll_init(struct drm_device *dev)
+{
+	INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
+	dev->mode_config.poll_enabled = true;
+
+	drm_kms_helper_poll_enable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_init);
+
+void drm_kms_helper_poll_fini(struct drm_device *dev)
+{
+	drm_kms_helper_poll_disable(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+void drm_helper_hpd_irq_event(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	enum drm_connector_status old_status;
+	bool changed = false;
+
+	if (!dev->mode_config.poll_enabled)
+		return;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		old_status = connector->status;
+
+		connector->status = connector->funcs->detect(connector, false);
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
+			      connector->base.id,
+			      drm_get_connector_name(connector),
+			      drm_get_connector_status_name(old_status),
+			      drm_get_connector_status_name(connector->status));
+		if (old_status != connector->status)
+			changed = true;
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	if (changed)
+		drm_kms_helper_hotplug_event(dev);
+}
+EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/linux-imx/drivers/gpu/drm/drm_debugfs.c b/linux-imx/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 0000000..a05087c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,241 @@
+/**
+ * \file drm_debugfs.c
+ * debugfs support for DRM
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+static struct drm_info_list drm_debugfs_list[] = {
+	{"name", drm_name_info, 0},
+	{"vm", drm_vm_info, 0},
+	{"clients", drm_clients_info, 0},
+	{"bufs", drm_bufs_info, 0},
+	{"gem_names", drm_gem_name_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+	{"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
+
+
+static int drm_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct drm_info_node *node = inode->i_private;
+
+	return single_open(file, node->info_ent->show, node);
+}
+
+
+static const struct file_operations drm_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+/**
+ * Initialize a given set of debugfs files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI debugfs dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of debugfs files represented by an array of
+ * gdm_debugfs_lists in the given root directory.
+ */
+int drm_debugfs_create_files(struct drm_info_list *files, int count,
+			     struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+	struct drm_info_node *tmp;
+	int i, ret;
+
+	for (i = 0; i < count; i++) {
+		u32 features = files[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
+		tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+		if (tmp == NULL) {
+			ret = -1;
+			goto fail;
+		}
+		ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
+					  root, tmp, &drm_debugfs_fops);
+		if (!ent) {
+			DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
+				  root->d_name.name, files[i].name);
+			kfree(tmp);
+			ret = -1;
+			goto fail;
+		}
+
+		tmp->minor = minor;
+		tmp->dent = ent;
+		tmp->info_ent = &files[i];
+
+		mutex_lock(&minor->debugfs_lock);
+		list_add(&tmp->list, &minor->debugfs_list);
+		mutex_unlock(&minor->debugfs_lock);
+	}
+	return 0;
+
+fail:
+	drm_debugfs_remove_files(files, count, minor);
+	return ret;
+}
+EXPORT_SYMBOL(drm_debugfs_create_files);
+
+/**
+ * Initialize the DRI debugfs filesystem for a device
+ *
+ * \param dev DRM device
+ * \param minor device minor number
+ * \param root DRI debugfs dir entry.
+ *
+ * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
+ * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
+ * "/sys/kernel/debug/dri/%minor%/%name%".
+ */
+int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+		     struct dentry *root)
+{
+	struct drm_device *dev = minor->dev;
+	char name[64];
+	int ret;
+
+	INIT_LIST_HEAD(&minor->debugfs_list);
+	mutex_init(&minor->debugfs_lock);
+	sprintf(name, "%d", minor_id);
+	minor->debugfs_root = debugfs_create_dir(name, root);
+	if (!minor->debugfs_root) {
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
+		return -1;
+	}
+
+	ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+				       minor->debugfs_root, minor);
+	if (ret) {
+		debugfs_remove(minor->debugfs_root);
+		minor->debugfs_root = NULL;
+		DRM_ERROR("Failed to create core drm debugfs files\n");
+		return ret;
+	}
+
+	if (dev->driver->debugfs_init) {
+		ret = dev->driver->debugfs_init(minor);
+		if (ret) {
+			DRM_ERROR("DRM: Driver failed to initialize "
+				  "/sys/kernel/debug/dri.\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+
+/**
+ * Remove a list of debugfs files
+ *
+ * \param files The list of files
+ * \param count The number of files
+ * \param minor The minor of which we should remove the files
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+			     struct drm_minor *minor)
+{
+	struct list_head *pos, *q;
+	struct drm_info_node *tmp;
+	int i;
+
+	mutex_lock(&minor->debugfs_lock);
+	for (i = 0; i < count; i++) {
+		list_for_each_safe(pos, q, &minor->debugfs_list) {
+			tmp = list_entry(pos, struct drm_info_node, list);
+			if (tmp->info_ent == &files[i]) {
+				debugfs_remove(tmp->dent);
+				list_del(pos);
+				kfree(tmp);
+			}
+		}
+	}
+	mutex_unlock(&minor->debugfs_lock);
+	return 0;
+}
+EXPORT_SYMBOL(drm_debugfs_remove_files);
+
+/**
+ * Cleanup the debugfs filesystem resources.
+ *
+ * \param minor device minor number.
+ * \return always zero.
+ *
+ * Remove all debugfs entries created by debugfs_init().
+ */
+int drm_debugfs_cleanup(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+
+	if (!minor->debugfs_root)
+		return 0;
+
+	if (dev->driver->debugfs_cleanup)
+		dev->driver->debugfs_cleanup(minor);
+
+	drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
+
+	debugfs_remove(minor->debugfs_root);
+	minor->debugfs_root = NULL;
+
+	return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
diff --git a/linux-imx/drivers/gpu/drm/drm_dma.c b/linux-imx/drivers/gpu/drm/drm_dma.c
new file mode 100644
index 0000000..495b5fd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_dma.c
@@ -0,0 +1,157 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+	int i;
+
+	dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
+	if (!dev->dma)
+		return -ENOMEM;
+
+	for (i = 0; i <= DRM_MAX_ORDER; i++)
+		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+	return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i, j;
+
+	if (!dma)
+		return;
+
+	/* Clear dma buffers */
+	for (i = 0; i <= DRM_MAX_ORDER; i++) {
+		if (dma->bufs[i].seg_count) {
+			DRM_DEBUG("order %d: buf_count = %d,"
+				  " seg_count = %d\n",
+				  i,
+				  dma->bufs[i].buf_count,
+				  dma->bufs[i].seg_count);
+			for (j = 0; j < dma->bufs[i].seg_count; j++) {
+				if (dma->bufs[i].seglist[j]) {
+					drm_pci_free(dev, dma->bufs[i].seglist[j]);
+				}
+			}
+			kfree(dma->bufs[i].seglist);
+		}
+		if (dma->bufs[i].buf_count) {
+			for (j = 0; j < dma->bufs[i].buf_count; j++) {
+				kfree(dma->bufs[i].buflist[j].dev_private);
+			}
+			kfree(dma->bufs[i].buflist);
+		}
+	}
+
+	kfree(dma->buflist);
+	kfree(dma->pagelist);
+	kfree(dev->dma);
+	dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+{
+	if (!buf)
+		return;
+
+	buf->waiting = 0;
+	buf->pending = 0;
+	buf->file_priv = NULL;
+	buf->used = 0;
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+			      struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma)
+		return;
+	for (i = 0; i < dma->buf_count; i++) {
+		if (dma->buflist[i]->file_priv == file_priv) {
+			switch (dma->buflist[i]->list) {
+			case DRM_LIST_NONE:
+				drm_free_buffer(dev, dma->buflist[i]);
+				break;
+			case DRM_LIST_WAIT:
+				dma->buflist[i]->list = DRM_LIST_RECLAIM;
+				break;
+			default:
+				/* Buffer already on hardware. */
+				break;
+			}
+		}
+	}
+}
+
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/linux-imx/drivers/gpu/drm/drm_dp_helper.c b/linux-imx/drivers/gpu/drm/drm_dp_helper.c
new file mode 100644
index 0000000..89e1966
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_dp_helper.c
@@ -0,0 +1,348 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
+
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+			    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	int ret;
+
+	ret = (*algo_data->aux_ch)(adapter, mode,
+				   write_byte, read_byte);
+	return ret;
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	int mode = MODE_I2C_START;
+	int ret;
+
+	if (reading)
+		mode |= MODE_I2C_READ;
+	else
+		mode |= MODE_I2C_WRITE;
+	algo_data->address = address;
+	algo_data->running = true;
+	ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+	return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	int mode = MODE_I2C_STOP;
+
+	if (reading)
+		mode |= MODE_I2C_READ;
+	else
+		mode |= MODE_I2C_WRITE;
+	if (algo_data->running) {
+		(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+		algo_data->running = false;
+	}
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	int ret;
+
+	if (!algo_data->running)
+		return -EIO;
+
+	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+	return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	int ret;
+
+	if (!algo_data->running)
+		return -EIO;
+
+	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+	return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+		     struct i2c_msg *msgs,
+		     int num)
+{
+	int ret = 0;
+	bool reading = false;
+	int m;
+	int b;
+
+	for (m = 0; m < num; m++) {
+		u16 len = msgs[m].len;
+		u8 *buf = msgs[m].buf;
+		reading = (msgs[m].flags & I2C_M_RD) != 0;
+		ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+		if (ret < 0)
+			break;
+		if (reading) {
+			for (b = 0; b < len; b++) {
+				ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+				if (ret < 0)
+					break;
+			}
+		} else {
+			for (b = 0; b < len; b++) {
+				ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+				if (ret < 0)
+					break;
+			}
+		}
+		if (ret < 0)
+			break;
+	}
+	if (ret >= 0)
+		ret = num;
+	i2c_algo_dp_aux_stop(adapter, reading);
+	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
+	return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+	       I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+	.master_xfer	= i2c_algo_dp_aux_xfer,
+	.functionality	= i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+	(void) i2c_algo_dp_aux_address(adapter, 0, false);
+	(void) i2c_algo_dp_aux_stop(adapter, false);
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+	adapter->algo = &i2c_dp_aux_algo;
+	adapter->retries = 3;
+	i2c_dp_aux_reset_bus(adapter);
+	return 0;
+}
+
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+	int error;
+
+	error = i2c_dp_aux_prepare_bus(adapter);
+	if (error)
+		return error;
+	error = i2c_add_adapter(adapter);
+	return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	u8 l = dp_link_status(link_status, i);
+	return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			  int lane_count)
+{
+	u8 lane_align;
+	u8 lane_status;
+	int lane;
+
+	lane_align = dp_link_status(link_status,
+				    DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			      int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+				     int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+					  int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(100);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+	if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+		udelay(400);
+	else
+		mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+	switch (link_rate) {
+	case 162000:
+	default:
+		return DP_LINK_BW_1_62;
+	case 270000:
+		return DP_LINK_BW_2_7;
+	case 540000:
+		return DP_LINK_BW_5_4;
+	}
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+	switch (link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		return 162000;
+	case DP_LINK_BW_2_7:
+		return 270000;
+	case DP_LINK_BW_5_4:
+		return 540000;
+	}
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/linux-imx/drivers/gpu/drm/drm_drv.c b/linux-imx/drivers/gpu/drm/drm_drv.c
new file mode 100644
index 0000000..2ab782c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_drv.c
@@ -0,0 +1,509 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR	"VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME		"mga"
+ * #define DRIVER_DESC		"Matrox G200/G400"
+ * #define DRIVER_DATE		"20001127"
+ *
+ * #define drm_x		mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
+
+
+static int drm_version(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+	[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct drm_ioctl_desc drm_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+	/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+	DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+#define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+	struct drm_vma_entry *vma, *vma_temp;
+
+	DRM_DEBUG("\n");
+
+	if (dev->driver->lastclose)
+		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
+
+	if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Clear AGP information */
+	if (drm_core_has_AGP(dev) && dev->agp &&
+			!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		struct drm_agp_mem *entry, *tempe;
+
+		/* Remove AGP resources, but leave dev->agp
+		   intact until drv_cleanup is called. */
+		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+			if (entry->bound)
+				drm_unbind_agp(entry->memory);
+			drm_free_agp(entry->memory, entry->pages);
+			kfree(entry);
+		}
+		INIT_LIST_HEAD(&dev->agp->memory);
+
+		if (dev->agp->acquired)
+			drm_agp_release(dev);
+
+		dev->agp->acquired = 0;
+		dev->agp->enabled = 0;
+	}
+	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		drm_sg_cleanup(dev->sg);
+		dev->sg = NULL;
+	}
+
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		kfree(vma);
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_dma_takedown(dev);
+
+	dev->dev_mapping = NULL;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("lastclose completed\n");
+	return 0;
+}
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_stub_open,
+	.llseek = noop_llseek,
+};
+
+static int __init drm_core_init(void)
+{
+	int ret = -ENOMEM;
+
+	drm_global_init();
+	idr_init(&drm_minors_idr);
+
+	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+		goto err_p1;
+
+	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+	if (IS_ERR(drm_class)) {
+		printk(KERN_ERR "DRM: Error creating drm class.\n");
+		ret = PTR_ERR(drm_class);
+		goto err_p2;
+	}
+
+	drm_proc_root = proc_mkdir("dri", NULL);
+	if (!drm_proc_root) {
+		DRM_ERROR("Cannot create /proc/dri\n");
+		ret = -1;
+		goto err_p3;
+	}
+
+	drm_debugfs_root = debugfs_create_dir("dri", NULL);
+	if (!drm_debugfs_root) {
+		DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+		ret = -1;
+		goto err_p3;
+	}
+
+	DRM_INFO("Initialized %s %d.%d.%d %s\n",
+		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+	return 0;
+err_p3:
+	drm_sysfs_destroy();
+err_p2:
+	unregister_chrdev(DRM_MAJOR, "drm");
+
+	idr_destroy(&drm_minors_idr);
+err_p1:
+	return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+	remove_proc_entry("dri", NULL);
+	debugfs_remove(drm_debugfs_root);
+	drm_sysfs_destroy();
+
+	unregister_chrdev(DRM_MAJOR, "drm");
+
+	idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+	int len;
+
+	/* don't overflow userbuf */
+	len = strlen(value);
+	if (len > *buf_len)
+		len = *buf_len;
+
+	/* let userspace know exact length of driver value (which could be
+	 * larger than the userspace-supplied buffer) */
+	*buf_len = strlen(value);
+
+	/* finally, try filling in the userbuf */
+	if (len && buf)
+		if (copy_to_user(buf, value, len))
+			return -EFAULT;
+	return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_version *version = data;
+	int err;
+
+	version->version_major = dev->driver->major;
+	version->version_minor = dev->driver->minor;
+	version->version_patchlevel = dev->driver->patchlevel;
+	err = drm_copy_field(version->name, &version->name_len,
+			dev->driver->name);
+	if (!err)
+		err = drm_copy_field(version->date, &version->date_len,
+				dev->driver->date);
+	if (!err)
+		err = drm_copy_field(version->desc, &version->desc_len,
+				dev->driver->desc);
+
+	return err;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+long drm_ioctl(struct file *filp,
+	      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev;
+	const struct drm_ioctl_desc *ioctl = NULL;
+	drm_ioctl_t *func;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int retcode = -EINVAL;
+	char stack_kdata[128];
+	char *kdata = NULL;
+	unsigned int usize, asize;
+
+	dev = file_priv->minor->dev;
+
+	if (drm_device_is_unplugged(dev))
+		return -ENODEV;
+
+	atomic_inc(&dev->ioctl_count);
+	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+	++file_priv->ioctl_count;
+
+	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+		goto err_i1;
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		u32 drv_size;
+		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+		drv_size = _IOC_SIZE(ioctl->cmd_drv);
+		usize = asize = _IOC_SIZE(cmd);
+		if (drv_size > asize)
+			asize = drv_size;
+		cmd = ioctl->cmd_drv;
+	}
+	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+		u32 drv_size;
+
+		ioctl = &drm_ioctls[nr];
+
+		drv_size = _IOC_SIZE(ioctl->cmd);
+		usize = asize = _IOC_SIZE(cmd);
+		if (drv_size > asize)
+			asize = drv_size;
+
+		cmd = ioctl->cmd;
+	} else
+		goto err_i1;
+
+	DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+		  task_pid_nr(current),
+		  (long)old_encode_dev(file_priv->minor->device),
+		  file_priv->authenticated, ioctl->name);
+
+	/* Do not trust userspace, use our own definition */
+	func = ioctl->func;
+	/* is there a local override? */
+	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+		func = dev->driver->dma_ioctl;
+
+	if (!func) {
+		DRM_DEBUG("no function\n");
+		retcode = -EINVAL;
+	} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+		   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+		   ((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
+		   (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+		retcode = -EACCES;
+	} else {
+		if (cmd & (IOC_IN | IOC_OUT)) {
+			if (asize <= sizeof(stack_kdata)) {
+				kdata = stack_kdata;
+			} else {
+				kdata = kmalloc(asize, GFP_KERNEL);
+				if (!kdata) {
+					retcode = -ENOMEM;
+					goto err_i1;
+				}
+			}
+			if (asize > usize)
+				memset(kdata + usize, 0, asize - usize);
+		}
+
+		if (cmd & IOC_IN) {
+			if (copy_from_user(kdata, (void __user *)arg,
+					   usize) != 0) {
+				retcode = -EFAULT;
+				goto err_i1;
+			}
+		} else
+			memset(kdata, 0, usize);
+
+		if (ioctl->flags & DRM_UNLOCKED)
+			retcode = func(dev, kdata, file_priv);
+		else {
+			mutex_lock(&drm_global_mutex);
+			retcode = func(dev, kdata, file_priv);
+			mutex_unlock(&drm_global_mutex);
+		}
+
+		if (cmd & IOC_OUT) {
+			if (copy_to_user((void __user *)arg, kdata,
+					 usize) != 0)
+				retcode = -EFAULT;
+		}
+	}
+
+      err_i1:
+	if (!ioctl)
+		DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+			  task_pid_nr(current),
+			  (long)old_encode_dev(file_priv->minor->device),
+			  file_priv->authenticated, cmd, nr);
+
+	if (kdata != stack_kdata)
+		kfree(kdata);
+	atomic_dec(&dev->ioctl_count);
+	if (retcode)
+		DRM_DEBUG("ret = %d\n", retcode);
+	return retcode;
+}
+
+EXPORT_SYMBOL(drm_ioctl);
+
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
+
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && entry->map->type == _DRM_SHM &&
+		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+			return entry->map;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/linux-imx/drivers/gpu/drm/drm_edid.c b/linux-imx/drivers/gpu/drm/drm_edid.c
new file mode 100644
index 0000000..83f0ba5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_edid.c
@@ -0,0 +1,3048 @@
+/*
+ * Copyright (c) 2006 Luc Verhaegen (quirks list)
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from
+ * FB layer.
+ *   Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+
+#define version_greater(edid, maj, min) \
+	(((edid)->version > (maj)) || \
+	 ((edid)->version == (maj) && (edid)->revision > (min)))
+
+#define EDID_EST_TIMINGS 16
+#define EDID_STD_TIMINGS 8
+#define EDID_DETAILED_TIMINGS 4
+
+/*
+ * EDID blocks out in the wild have a variety of bugs, try to collect
+ * them here (note that userspace may work around broken monitors first,
+ * but fixes should make their way here so that the kernel "just works"
+ * on as many displays as possible).
+ */
+
+/* First detailed mode wrong, use largest 60Hz mode */
+#define EDID_QUIRK_PREFER_LARGE_60		(1 << 0)
+/* Reported 135MHz pixel clock is too high, needs adjustment */
+#define EDID_QUIRK_135_CLOCK_TOO_HIGH		(1 << 1)
+/* Prefer the largest mode at 75 Hz */
+#define EDID_QUIRK_PREFER_LARGE_75		(1 << 2)
+/* Detail timing is in cm not mm */
+#define EDID_QUIRK_DETAILED_IN_CM		(1 << 3)
+/* Detailed timing descriptors have bogus size values, so just take the
+ * maximum size and use that.
+ */
+#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE	(1 << 4)
+/* Monitor forgot to set the first detailed is preferred bit. */
+#define EDID_QUIRK_FIRST_DETAILED_PREFERRED	(1 << 5)
+/* use +hsync +vsync for detailed mode */
+#define EDID_QUIRK_DETAILED_SYNC_PP		(1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING	(1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC			(1 << 8)
+
+struct detailed_mode_closure {
+	struct drm_connector *connector;
+	struct edid *edid;
+	bool preferred;
+	u32 quirks;
+	int modes;
+};
+
+#define LEVEL_DMT	0
+#define LEVEL_GTF	1
+#define LEVEL_GTF2	2
+#define LEVEL_CVT	3
+
+static struct edid_quirk {
+	char vendor[4];
+	int product_id;
+	u32 quirks;
+} edid_quirk_list[] = {
+	/* Acer AL1706 */
+	{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+	/* Acer F51 */
+	{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+	/* Unknown Acer */
+	{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+	/* Belinea 10 15 55 */
+	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+
+	/* Envision Peripherals, Inc. EN-7100e */
+	{ "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+	/* Envision EN2028 */
+	{ "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+
+	/* Funai Electronics PM36B */
+	{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
+	  EDID_QUIRK_DETAILED_IN_CM },
+
+	/* LG Philips LCD LP154W01-A5 */
+	{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+	{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+
+	/* Philips 107p5 CRT */
+	{ "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+	/* Proview AY765C */
+	{ "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+
+	/* Samsung SyncMaster 205BW.  Note: irony */
+	{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+	/* Samsung SyncMaster 22[5-6]BW */
+	{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+	{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+	/* ViewSonic VA2026w */
+	{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+	/* Medion MD 30217 PG */
+	{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+	/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+	{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+};
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode drm_dmt_modes[] = {
+	/* 640x350@85Hz */
+	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+		   736, 832, 0, 350, 382, 385, 445, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 640x400@85Hz */
+	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+		   736, 832, 0, 400, 401, 404, 445, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 720x400@85Hz */
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+		   828, 936, 0, 400, 401, 404, 446, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 640x480@60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 489, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 640x480@72Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+		   704, 832, 0, 480, 489, 492, 520, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 640x480@75Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+		   720, 840, 0, 480, 481, 484, 500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 640x480@85Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+		   752, 832, 0, 480, 481, 484, 509, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 800x600@56Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+		   896, 1024, 0, 600, 601, 603, 625, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600@60Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+		   968, 1056, 0, 600, 601, 605, 628, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600@72Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+		   976, 1040, 0, 600, 637, 643, 666, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600@75Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+		   896, 1056, 0, 600, 601, 604, 625, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600@85Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+		   896, 1048, 0, 600, 601, 604, 631, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 800x600@120Hz RB */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
+		   880, 960, 0, 600, 603, 607, 636, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 848x480@60Hz */
+	{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+		   976, 1088, 0, 480, 486, 494, 517, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@43Hz, interlace */
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+		   1208, 1264, 0, 768, 768, 772, 817, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE) },
+	/* 1024x768@60Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0, 768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1024x768@70Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+		   1184, 1328, 0, 768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1024x768@75Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+		   1136, 1312, 0, 768, 769, 772, 800, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@85Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+		   1168, 1376, 0, 768, 769, 772, 808, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@120Hz RB */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
+		   1104, 1184, 0, 768, 771, 775, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1152x864@75Hz */
+	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+		   1344, 1600, 0, 864, 865, 868, 900, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@60Hz RB */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
+		   1360, 1440, 0, 768, 771, 778, 790, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x768@60Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+		   1472, 1664, 0, 768, 771, 778, 798, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@75Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+		   1488, 1696, 0, 768, 771, 778, 805, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x768@85Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+		   1496, 1712, 0, 768, 771, 778, 809, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@120Hz RB */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
+		   1360, 1440, 0, 768, 771, 778, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x800@60Hz RB */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
+		   1360, 1440, 0, 800, 803, 809, 823, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x800@60Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+		   1480, 1680, 0, 800, 803, 809, 831, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x800@75Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+		   1488, 1696, 0, 800, 803, 809, 838, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800@85Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+		   1496, 1712, 0, 800, 803, 809, 843, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800@120Hz RB */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
+		   1360, 1440, 0, 800, 803, 809, 847, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x960@60Hz */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+		   1488, 1800, 0, 960, 961, 964, 1000, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x960@85Hz */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+		   1504, 1728, 0, 960, 961, 964, 1011, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x960@120Hz RB */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
+		   1360, 1440, 0, 960, 963, 967, 1017, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x1024@60Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@75Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@85Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+		   1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@120Hz RB */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
+		   1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1360x768@60Hz */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+		   1536, 1792, 0, 768, 771, 777, 795, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1360x768@120Hz RB */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
+		   1440, 1520, 0, 768, 771, 776, 813, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1400x1050@60Hz RB */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
+		   1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1400x1050@60Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1400x1050@75Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+		   1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1400x1050@85Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+		   1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1400x1050@120Hz RB */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
+		   1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1440x900@60Hz RB */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
+		   1520, 1600, 0, 900, 903, 909, 926, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1440x900@60Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+		   1672, 1904, 0, 900, 903, 909, 934, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@75Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+		   1688, 1936, 0, 900, 903, 909, 942, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@85Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+		   1696, 1952, 0, 900, 903, 909, 948, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@120Hz RB */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
+		   1520, 1600, 0, 900, 903, 909, 953, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1600x1200@60Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@65Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@70Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@75Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@85Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@120Hz RB */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
+		   1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1680x1050@60Hz RB */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
+		   1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1680x1050@60Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@75Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+		   1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@85Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+		   1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@120Hz RB */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
+		   1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1792x1344@60Hz */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1792x1344@75Hz */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+		   2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1792x1344@120Hz RB */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
+		   1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1856x1392@60Hz */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1856x1392@75Hz */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+		   2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1856x1392@120Hz RB */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
+		   1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1920x1200@60Hz RB */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
+		   2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1920x1200@60Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@75Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+		   2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@85Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+		   2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@120Hz RB */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
+		   2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1920x1440@60Hz */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440@75Hz */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+		   2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440@120Hz RB */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
+		   2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 2560x1600@60Hz RB */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
+		   2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 2560x1600@60Hz */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@75HZ */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+		   3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@85HZ */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@120Hz RB */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
+		   2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+};
+
+static const struct drm_display_mode edid_est_modes[] = {
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+		   968, 1056, 0, 600, 601, 605, 628, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+		   896, 1024, 0, 600, 601, 603,  625, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+		   720, 840, 0, 480, 481, 484, 500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+		   704,  832, 0, 480, 489, 491, 520, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+		   768,  864, 0, 480, 483, 486, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+		   752, 800, 0, 480, 490, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+		   846, 900, 0, 400, 421, 423,  449, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+		   846,  900, 0, 400, 412, 414, 449, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+		   1136, 1312, 0,  768, 769, 772, 800, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+		   1184, 1328, 0,  768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0,  768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+	{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+		   1208, 1264, 0, 768, 768, 776, 817, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+		   928, 1152, 0, 624, 625, 628, 667, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+		   896, 1056, 0, 600, 601, 604,  625, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+		   976, 1040, 0, 600, 637, 643, 666, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+		   1344, 1600, 0,  864, 865, 868, 900, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+struct minimode {
+	short w;
+	short h;
+	short r;
+	short rb;
+};
+
+static const struct minimode est3_modes[] = {
+	/* byte 6 */
+	{ 640, 350, 85, 0 },
+	{ 640, 400, 85, 0 },
+	{ 720, 400, 85, 0 },
+	{ 640, 480, 85, 0 },
+	{ 848, 480, 60, 0 },
+	{ 800, 600, 85, 0 },
+	{ 1024, 768, 85, 0 },
+	{ 1152, 864, 75, 0 },
+	/* byte 7 */
+	{ 1280, 768, 60, 1 },
+	{ 1280, 768, 60, 0 },
+	{ 1280, 768, 75, 0 },
+	{ 1280, 768, 85, 0 },
+	{ 1280, 960, 60, 0 },
+	{ 1280, 960, 85, 0 },
+	{ 1280, 1024, 60, 0 },
+	{ 1280, 1024, 85, 0 },
+	/* byte 8 */
+	{ 1360, 768, 60, 0 },
+	{ 1440, 900, 60, 1 },
+	{ 1440, 900, 60, 0 },
+	{ 1440, 900, 75, 0 },
+	{ 1440, 900, 85, 0 },
+	{ 1400, 1050, 60, 1 },
+	{ 1400, 1050, 60, 0 },
+	{ 1400, 1050, 75, 0 },
+	/* byte 9 */
+	{ 1400, 1050, 85, 0 },
+	{ 1680, 1050, 60, 1 },
+	{ 1680, 1050, 60, 0 },
+	{ 1680, 1050, 75, 0 },
+	{ 1680, 1050, 85, 0 },
+	{ 1600, 1200, 60, 0 },
+	{ 1600, 1200, 65, 0 },
+	{ 1600, 1200, 70, 0 },
+	/* byte 10 */
+	{ 1600, 1200, 75, 0 },
+	{ 1600, 1200, 85, 0 },
+	{ 1792, 1344, 60, 0 },
+	{ 1792, 1344, 85, 0 },
+	{ 1856, 1392, 60, 0 },
+	{ 1856, 1392, 75, 0 },
+	{ 1920, 1200, 60, 1 },
+	{ 1920, 1200, 60, 0 },
+	/* byte 11 */
+	{ 1920, 1200, 75, 0 },
+	{ 1920, 1200, 85, 0 },
+	{ 1920, 1440, 60, 0 },
+	{ 1920, 1440, 75, 0 },
+};
+
+static const struct minimode extra_modes[] = {
+	{ 1024, 576,  60, 0 },
+	{ 1366, 768,  60, 0 },
+	{ 1600, 900,  60, 0 },
+	{ 1680, 945,  60, 0 },
+	{ 1920, 1080, 60, 0 },
+	{ 2048, 1152, 60, 0 },
+	{ 2048, 1536, 60, 0 },
+};
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+	/* 1 - 640x480@60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 490, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 2 - 720x480@60Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 3 - 720x480@60Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 4 - 1280x720@60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, },
+	/* 5 - 1920x1080i@60Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 60, },
+	/* 6 - 1440x480i@60Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 60, },
+	/* 7 - 1440x480i@60Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 60, },
+	/* 8 - 1440x240@60Hz */
+	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 60, },
+	/* 9 - 1440x240@60Hz */
+	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+		   1602, 1716, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 60, },
+	/* 10 - 2880x480i@60Hz */
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 60, },
+	/* 11 - 2880x480i@60Hz */
+	{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 60, },
+	/* 12 - 2880x240@60Hz */
+	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 13 - 2880x240@60Hz */
+	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+		   3204, 3432, 0, 240, 244, 247, 262, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 14 - 1440x480@60Hz */
+	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+		   1596, 1716, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 15 - 1440x480@60Hz */
+	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+		   1596, 1716, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 16 - 1920x1080@60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, },
+	/* 17 - 720x576@50Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 18 - 720x576@50Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 19 - 1280x720@50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, },
+	/* 20 - 1920x1080i@50Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 50, },
+	/* 21 - 1440x576i@50Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 50, },
+	/* 22 - 1440x576i@50Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 50, },
+	/* 23 - 1440x288@50Hz */
+	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 50, },
+	/* 24 - 1440x288@50Hz */
+	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+		   1590, 1728, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 50, },
+	/* 25 - 2880x576i@50Hz */
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 50, },
+	/* 26 - 2880x576i@50Hz */
+	{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 50, },
+	/* 27 - 2880x288@50Hz */
+	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 28 - 2880x288@50Hz */
+	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+		   3180, 3456, 0, 288, 290, 293, 312, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 29 - 1440x576@50Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1592, 1728, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 30 - 1440x576@50Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1592, 1728, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 31 - 1920x1080@50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, },
+	/* 32 - 1920x1080@24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, },
+	/* 33 - 1920x1080@25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, },
+	/* 34 - 1920x1080@30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, },
+	/* 35 - 2880x480@60Hz */
+	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+		   3192, 3432, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 36 - 2880x480@60Hz */
+	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+		   3192, 3432, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 60, },
+	/* 37 - 2880x576@50Hz */
+	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+		   3184, 3456, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 38 - 2880x576@50Hz */
+	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+		   3184, 3456, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 50, },
+	/* 39 - 1920x1080i@50Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 50, },
+	/* 40 - 1920x1080i@100Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 100, },
+	/* 41 - 1280x720@100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, },
+	/* 42 - 720x576@100Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 100, },
+	/* 43 - 720x576@100Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 100, },
+	/* 44 - 1440x576i@100Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 100, },
+	/* 45 - 1440x576i@100Hz */
+	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 100, },
+	/* 46 - 1920x1080i@120Hz */
+	{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+			DRM_MODE_FLAG_INTERLACE),
+	  .vrefresh = 120, },
+	/* 47 - 1280x720@120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, },
+	/* 48 - 720x480@120Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 120, },
+	/* 49 - 720x480@120Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 120, },
+	/* 50 - 1440x480i@120Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 120, },
+	/* 51 - 1440x480i@120Hz */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 120, },
+	/* 52 - 720x576@200Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 200, },
+	/* 53 - 720x576@200Hz */
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+		   796, 864, 0, 576, 581, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 200, },
+	/* 54 - 1440x576i@200Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 200, },
+	/* 55 - 1440x576i@200Hz */
+	{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+		   1590, 1728, 0, 576, 580, 586, 625, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 200, },
+	/* 56 - 720x480@240Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 240, },
+	/* 57 - 720x480@240Hz */
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+		   798, 858, 0, 480, 489, 495, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+	  .vrefresh = 240, },
+	/* 58 - 1440x480i@240 */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 240, },
+	/* 59 - 1440x480i@240 */
+	{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+		   1602, 1716, 0, 480, 488, 494, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+			DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+	  .vrefresh = 240, },
+	/* 60 - 1280x720@24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, },
+	/* 61 - 1280x720@25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, },
+	/* 62 - 1280x720@30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, },
+	/* 63 - 1920x1080@120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 120, },
+	/* 64 - 1920x1080@100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 100, },
+};
+
+/*** DDC fetch and block validation ***/
+
+static const u8 edid_header[] = {
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+};
+
+ /*
+ * Sanity check the header of the base EDID block.  Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+	int i, score = 0;
+
+	for (i = 0; i < sizeof(edid_header); i++)
+		if (raw_edid[i] == edid_header[i])
+			score++;
+
+	return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+		 "Minimum number of valid EDID header bytes (0-8, default 6)");
+
+/*
+ * Sanity check the EDID block (base or extension).  Return 0 if the block
+ * doesn't check out, or 1 if it's valid.
+ */
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
+{
+	int i;
+	u8 csum = 0;
+	struct edid *edid = (struct edid *)raw_edid;
+
+	if (edid_fixup > 8 || edid_fixup < 0)
+		edid_fixup = 6;
+
+	if (block == 0) {
+		int score = drm_edid_header_is_valid(raw_edid);
+		if (score == 8) ;
+		else if (score >= edid_fixup) {
+			DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+			memcpy(raw_edid, edid_header, sizeof(edid_header));
+		} else {
+			goto bad;
+		}
+	}
+
+	for (i = 0; i < EDID_LENGTH; i++)
+		csum += raw_edid[i];
+	if (csum) {
+		if (print_bad_edid) {
+			DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
+		}
+
+		/* allow CEA to slide through, switches mangle this */
+		if (raw_edid[0] != 0x02)
+			goto bad;
+	}
+
+	/* per-block-type checks */
+	switch (raw_edid[0]) {
+	case 0: /* base */
+		if (edid->version != 1) {
+			DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+			goto bad;
+		}
+
+		if (edid->revision > 4)
+			DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+		break;
+
+	default:
+		break;
+	}
+
+	return 1;
+
+bad:
+	if (raw_edid && print_bad_edid) {
+		printk(KERN_ERR "Raw EDID:\n");
+		print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+			       raw_edid, EDID_LENGTH, false);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_edid_block_valid);
+
+/**
+ * drm_edid_is_valid - sanity check EDID data
+ * @edid: EDID data
+ *
+ * Sanity-check an entire EDID record (including extensions)
+ */
+bool drm_edid_is_valid(struct edid *edid)
+{
+	int i;
+	u8 *raw = (u8 *)edid;
+
+	if (!edid)
+		return false;
+
+	for (i = 0; i <= edid->extensions; i++)
+		if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+			return false;
+
+	return true;
+}
+EXPORT_SYMBOL(drm_edid_is_valid);
+
+#define DDC_SEGMENT_ADDR 0x30
+/**
+ * Get EDID information via I2C.
+ *
+ * \param adapter : i2c device adaptor
+ * \param buf     : EDID data buffer to be filled
+ * \param len     : EDID data buffer length
+ * \return 0 on success or -1 on failure.
+ *
+ * Try to fetch EDID information by calling i2c driver function.
+ */
+static int
+drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+		      int block, int len)
+{
+	unsigned char start = block * EDID_LENGTH;
+	unsigned char segment = block >> 1;
+	unsigned char xfers = segment ? 3 : 2;
+	int ret, retries = 5;
+
+	/* The core i2c driver will automatically retry the transfer if the
+	 * adapter reports EAGAIN. However, we find that bit-banging transfers
+	 * are susceptible to errors under a heavily loaded machine and
+	 * generate spurious NAKs and timeouts. Retrying the transfer
+	 * of the individual block a few times seems to overcome this.
+	 */
+	do {
+		struct i2c_msg msgs[] = {
+			{
+				.addr	= DDC_SEGMENT_ADDR,
+				.flags	= 0,
+				.len	= 1,
+				.buf	= &segment,
+			}, {
+				.addr	= DDC_ADDR,
+				.flags	= 0,
+				.len	= 1,
+				.buf	= &start,
+			}, {
+				.addr	= DDC_ADDR,
+				.flags	= I2C_M_RD,
+				.len	= len,
+				.buf	= buf,
+			}
+		};
+
+	/*
+	 * Avoid sending the segment addr to not upset non-compliant ddc
+	 * monitors.
+	 */
+		ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
+
+		if (ret == -ENXIO) {
+			DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
+					adapter->name);
+			break;
+		}
+	} while (ret != xfers && --retries);
+
+	return ret == xfers ? 0 : -1;
+}
+
+static bool drm_edid_is_zero(u8 *in_edid, int length)
+{
+	if (memchr_inv(in_edid, 0, length))
+		return false;
+
+	return true;
+}
+
+static u8 *
+drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+	int i, j = 0, valid_extensions = 0;
+	u8 *block, *new;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+
+	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+		return NULL;
+
+	/* base block fetch */
+	for (i = 0; i < 4; i++) {
+		if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
+			goto out;
+		if (drm_edid_block_valid(block, 0, print_bad_edid))
+			break;
+		if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
+			connector->null_edid_counter++;
+			goto carp;
+		}
+	}
+	if (i == 4)
+		goto carp;
+
+	/* if there's no extensions, we're done */
+	if (block[0x7e] == 0)
+		return block;
+
+	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+	if (!new)
+		goto out;
+	block = new;
+
+	for (j = 1; j <= block[0x7e]; j++) {
+		for (i = 0; i < 4; i++) {
+			if (drm_do_probe_ddc_edid(adapter,
+				  block + (valid_extensions + 1) * EDID_LENGTH,
+				  j, EDID_LENGTH))
+				goto out;
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+				valid_extensions++;
+				break;
+			}
+		}
+
+		if (i == 4 && print_bad_edid) {
+			dev_warn(connector->dev->dev,
+			 "%s: Ignoring invalid EDID block %d.\n",
+			 drm_get_connector_name(connector), j);
+
+			connector->bad_edid_counter++;
+		}
+	}
+
+	if (valid_extensions != block[0x7e]) {
+		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+		block[0x7e] = valid_extensions;
+		new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+		if (!new)
+			goto out;
+		block = new;
+	}
+
+	return block;
+
+carp:
+	if (print_bad_edid) {
+		dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
+			 drm_get_connector_name(connector), j);
+	}
+	connector->bad_edid_counter++;
+
+out:
+	kfree(block);
+	return NULL;
+}
+
+/**
+ * Probe DDC presence.
+ *
+ * \param adapter : i2c device adaptor
+ * \return 1 on success
+ */
+bool
+drm_probe_ddc(struct i2c_adapter *adapter)
+{
+	unsigned char out;
+
+	return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
+}
+EXPORT_SYMBOL(drm_probe_ddc);
+
+/**
+ * drm_get_edid - get EDID data, if available
+ * @connector: connector we're probing
+ * @adapter: i2c adapter to use for DDC
+ *
+ * Poke the given i2c channel to grab EDID data if possible.  If found,
+ * attach it to the connector.
+ *
+ * Return edid data or NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid(struct drm_connector *connector,
+			  struct i2c_adapter *adapter)
+{
+	struct edid *edid = NULL;
+
+	if (drm_probe_ddc(adapter))
+		edid = (struct edid *)drm_do_get_edid(connector, adapter);
+
+	return edid;
+}
+EXPORT_SYMBOL(drm_get_edid);
+
+/*** EDID parsing ***/
+
+/**
+ * edid_vendor - match a string against EDID's obfuscated vendor field
+ * @edid: EDID to match
+ * @vendor: vendor string
+ *
+ * Returns true if @vendor is in @edid, false otherwise
+ */
+static bool edid_vendor(struct edid *edid, char *vendor)
+{
+	char edid_vendor[3];
+
+	edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
+	edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
+			  ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
+	edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
+
+	return !strncmp(edid_vendor, vendor, 3);
+}
+
+/**
+ * edid_get_quirks - return quirk flags for a given EDID
+ * @edid: EDID to process
+ *
+ * This tells subsequent routines what fixes they need to apply.
+ */
+static u32 edid_get_quirks(struct edid *edid)
+{
+	struct edid_quirk *quirk;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+		quirk = &edid_quirk_list[i];
+
+		if (edid_vendor(edid, quirk->vendor) &&
+		    (EDID_PRODUCT_ID(edid) == quirk->product_id))
+			return quirk->quirks;
+	}
+
+	return 0;
+}
+
+#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
+#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
+
+/**
+ * edid_fixup_preferred - set preferred modes based on quirk list
+ * @connector: has mode list to fix up
+ * @quirks: quirks list
+ *
+ * Walk the mode list for @connector, clearing the preferred status
+ * on existing modes and setting it anew for the right mode ala @quirks.
+ */
+static void edid_fixup_preferred(struct drm_connector *connector,
+				 u32 quirks)
+{
+	struct drm_display_mode *t, *cur_mode, *preferred_mode;
+	int target_refresh = 0;
+
+	if (list_empty(&connector->probed_modes))
+		return;
+
+	if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+		target_refresh = 60;
+	if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+		target_refresh = 75;
+
+	preferred_mode = list_first_entry(&connector->probed_modes,
+					  struct drm_display_mode, head);
+
+	list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) {
+		cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+
+		if (cur_mode == preferred_mode)
+			continue;
+
+		/* Largest mode is preferred */
+		if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
+			preferred_mode = cur_mode;
+
+		/* At a given size, try to get closest to target refresh */
+		if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
+		    MODE_REFRESH_DIFF(cur_mode, target_refresh) <
+		    MODE_REFRESH_DIFF(preferred_mode, target_refresh)) {
+			preferred_mode = cur_mode;
+		}
+	}
+
+	preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
+}
+
+static bool
+mode_is_rb(const struct drm_display_mode *mode)
+{
+	return (mode->htotal - mode->hdisplay == 160) &&
+	       (mode->hsync_end - mode->hdisplay == 80) &&
+	       (mode->hsync_end - mode->hsync_start == 32) &&
+	       (mode->vsync_start - mode->vdisplay == 3);
+}
+
+/*
+ * drm_mode_find_dmt - Create a copy of a mode if present in DMT
+ * @dev: Device to duplicate against
+ * @hsize: Mode width
+ * @vsize: Mode height
+ * @fresh: Mode refresh rate
+ * @rb: Mode reduced-blanking-ness
+ *
+ * Walk the DMT mode list looking for a match for the given parameters.
+ * Return a newly allocated copy of the mode, or NULL if not found.
+ */
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+					   int hsize, int vsize, int fresh,
+					   bool rb)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+		const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+		if (hsize != ptr->hdisplay)
+			continue;
+		if (vsize != ptr->vdisplay)
+			continue;
+		if (fresh != drm_mode_vrefresh(ptr))
+			continue;
+		if (rb != mode_is_rb(ptr))
+			continue;
+
+		return drm_mode_duplicate(dev, ptr);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_mode_find_dmt);
+
+typedef void detailed_cb(struct detailed_timing *timing, void *closure);
+
+static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+	int i, n = 0;
+	u8 d = ext[0x02];
+	u8 *det_base = ext + d;
+
+	n = (127 - d) / 18;
+	for (i = 0; i < n; i++)
+		cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+	unsigned int i, n = min((int)ext[0x02], 6);
+	u8 *det_base = ext + 5;
+
+	if (ext[0x01] != 1)
+		return; /* unknown version */
+
+	for (i = 0; i < n; i++)
+		cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
+{
+	int i;
+	struct edid *edid = (struct edid *)raw_edid;
+
+	if (edid == NULL)
+		return;
+
+	for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
+		cb(&(edid->detailed_timings[i]), closure);
+
+	for (i = 1; i <= raw_edid[0x7e]; i++) {
+		u8 *ext = raw_edid + (i * EDID_LENGTH);
+		switch (*ext) {
+		case CEA_EXT:
+			cea_for_each_detailed_block(ext, cb, closure);
+			break;
+		case VTB_EXT:
+			vtb_for_each_detailed_block(ext, cb, closure);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void
+is_rb(struct detailed_timing *t, void *data)
+{
+	u8 *r = (u8 *)t;
+	if (r[3] == EDID_DETAIL_MONITOR_RANGE)
+		if (r[15] & 0x10)
+			*(bool *)data = true;
+}
+
+/* EDID 1.4 defines this explicitly.  For EDID 1.3, we guess, badly. */
+static bool
+drm_monitor_supports_rb(struct edid *edid)
+{
+	if (edid->revision >= 4) {
+		bool ret = false;
+		drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+		return ret;
+	}
+
+	return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0);
+}
+
+static void
+find_gtf2(struct detailed_timing *t, void *data)
+{
+	u8 *r = (u8 *)t;
+	if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+		*(u8 **)data = r;
+}
+
+/* Secondary GTF curve kicks in above some break frequency */
+static int
+drm_gtf2_hbreak(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? (r[12] * 2) : 0;
+}
+
+static int
+drm_gtf2_2c(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[13] : 0;
+}
+
+static int
+drm_gtf2_m(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? (r[15] << 8) + r[14] : 0;
+}
+
+static int
+drm_gtf2_k(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[16] : 0;
+}
+
+static int
+drm_gtf2_2j(struct edid *edid)
+{
+	u8 *r = NULL;
+	drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r);
+	return r ? r[17] : 0;
+}
+
+/**
+ * standard_timing_level - get std. timing level(CVT/GTF/DMT)
+ * @edid: EDID block to scan
+ */
+static int standard_timing_level(struct edid *edid)
+{
+	if (edid->revision >= 2) {
+		if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
+			return LEVEL_CVT;
+		if (drm_gtf2_hbreak(edid))
+			return LEVEL_GTF2;
+		return LEVEL_GTF;
+	}
+	return LEVEL_DMT;
+}
+
+/*
+ * 0 is reserved.  The spec says 0x01 fill for unused timings.  Some old
+ * monitors fill with ascii space (0x20) instead.
+ */
+static int
+bad_std_timing(u8 a, u8 b)
+{
+	return (a == 0x00 && b == 0x00) ||
+	       (a == 0x01 && b == 0x01) ||
+	       (a == 0x20 && b == 0x20);
+}
+
+/**
+ * drm_mode_std - convert standard mode info (width, height, refresh) into mode
+ * @t: standard timing params
+ * @timing_level: standard timing level
+ *
+ * Take the standard timing params (in this case width, aspect, and refresh)
+ * and convert them into a real mode using CVT/GTF/DMT.
+ */
+static struct drm_display_mode *
+drm_mode_std(struct drm_connector *connector, struct edid *edid,
+	     struct std_timing *t, int revision)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *m, *mode = NULL;
+	int hsize, vsize;
+	int vrefresh_rate;
+	unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK)
+		>> EDID_TIMING_ASPECT_SHIFT;
+	unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
+		>> EDID_TIMING_VFREQ_SHIFT;
+	int timing_level = standard_timing_level(edid);
+
+	if (bad_std_timing(t->hsize, t->vfreq_aspect))
+		return NULL;
+
+	/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
+	hsize = t->hsize * 8 + 248;
+	/* vrefresh_rate = vfreq + 60 */
+	vrefresh_rate = vfreq + 60;
+	/* the vdisplay is calculated based on the aspect ratio */
+	if (aspect_ratio == 0) {
+		if (revision < 3)
+			vsize = hsize;
+		else
+			vsize = (hsize * 10) / 16;
+	} else if (aspect_ratio == 1)
+		vsize = (hsize * 3) / 4;
+	else if (aspect_ratio == 2)
+		vsize = (hsize * 4) / 5;
+	else
+		vsize = (hsize * 9) / 16;
+
+	/* HDTV hack, part 1 */
+	if (vrefresh_rate == 60 &&
+	    ((hsize == 1360 && vsize == 765) ||
+	     (hsize == 1368 && vsize == 769))) {
+		hsize = 1366;
+		vsize = 768;
+	}
+
+	/*
+	 * If this connector already has a mode for this size and refresh
+	 * rate (because it came from detailed or CVT info), use that
+	 * instead.  This way we don't have to guess at interlace or
+	 * reduced blanking.
+	 */
+	list_for_each_entry(m, &connector->probed_modes, head)
+		if (m->hdisplay == hsize && m->vdisplay == vsize &&
+		    drm_mode_vrefresh(m) == vrefresh_rate)
+			return NULL;
+
+	/* HDTV hack, part 2 */
+	if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) {
+		mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0,
+				    false);
+		mode->hdisplay = 1366;
+		mode->hsync_start = mode->hsync_start - 1;
+		mode->hsync_end = mode->hsync_end - 1;
+		return mode;
+	}
+
+	/* check whether it can be found in default mode table */
+	if (drm_monitor_supports_rb(edid)) {
+		mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
+					 true);
+		if (mode)
+			return mode;
+	}
+	mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
+	if (mode)
+		return mode;
+
+	/* okay, generate it */
+	switch (timing_level) {
+	case LEVEL_DMT:
+		break;
+	case LEVEL_GTF:
+		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+		break;
+	case LEVEL_GTF2:
+		/*
+		 * This is potentially wrong if there's ever a monitor with
+		 * more than one ranges section, each claiming a different
+		 * secondary GTF curve.  Please don't do that.
+		 */
+		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+		if (!mode)
+			return NULL;
+		if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+			drm_mode_destroy(dev, mode);
+			mode = drm_gtf_mode_complex(dev, hsize, vsize,
+						    vrefresh_rate, 0, 0,
+						    drm_gtf2_m(edid),
+						    drm_gtf2_2c(edid),
+						    drm_gtf2_k(edid),
+						    drm_gtf2_2j(edid));
+		}
+		break;
+	case LEVEL_CVT:
+		mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
+				    false);
+		break;
+	}
+	return mode;
+}
+
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+			    struct detailed_pixel_timing *pt)
+{
+	int i;
+	static const struct {
+		int w, h;
+	} cea_interlaced[] = {
+		{ 1920, 1080 },
+		{  720,  480 },
+		{ 1440,  480 },
+		{ 2880,  480 },
+		{  720,  576 },
+		{ 1440,  576 },
+		{ 2880,  576 },
+	};
+
+	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
+		if ((mode->hdisplay == cea_interlaced[i].w) &&
+		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
+			mode->vdisplay *= 2;
+			mode->vsync_start *= 2;
+			mode->vsync_end *= 2;
+			mode->vtotal *= 2;
+			mode->vtotal |= 1;
+		}
+	}
+
+	mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
+/**
+ * drm_mode_detailed - create a new mode from an EDID detailed timing section
+ * @dev: DRM device (needed to create new mode)
+ * @edid: EDID block
+ * @timing: EDID detailed timing info
+ * @quirks: quirks to apply
+ *
+ * An EDID detailed timing block contains enough info for us to create and
+ * return a new struct drm_display_mode.
+ */
+static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+						  struct edid *edid,
+						  struct detailed_timing *timing,
+						  u32 quirks)
+{
+	struct drm_display_mode *mode;
+	struct detailed_pixel_timing *pt = &timing->data.pixel_data;
+	unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo;
+	unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
+	unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
+	unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+	unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+	unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+	unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
+	unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+	/* ignore tiny modes */
+	if (hactive < 64 || vactive < 64)
+		return NULL;
+
+	if (pt->misc & DRM_EDID_PT_STEREO) {
+		printk(KERN_WARNING "stereo mode not supported\n");
+		return NULL;
+	}
+	if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+		printk(KERN_WARNING "composite sync not supported\n");
+	}
+
+	/* it is incorrect if hsync/vsync width is zero */
+	if (!hsync_pulse_width || !vsync_pulse_width) {
+		DRM_DEBUG_KMS("Incorrect Detailed timing. "
+				"Wrong Hsync/Vsync pulse width\n");
+		return NULL;
+	}
+
+	if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+		mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+		if (!mode)
+			return NULL;
+
+		goto set_size;
+	}
+
+	mode = drm_mode_create(dev);
+	if (!mode)
+		return NULL;
+
+	if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+		timing->pixel_clock = cpu_to_le16(1088);
+
+	mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
+
+	mode->hdisplay = hactive;
+	mode->hsync_start = mode->hdisplay + hsync_offset;
+	mode->hsync_end = mode->hsync_start + hsync_pulse_width;
+	mode->htotal = mode->hdisplay + hblank;
+
+	mode->vdisplay = vactive;
+	mode->vsync_start = mode->vdisplay + vsync_offset;
+	mode->vsync_end = mode->vsync_start + vsync_pulse_width;
+	mode->vtotal = mode->vdisplay + vblank;
+
+	/* Some EDIDs have bogus h/vtotal values */
+	if (mode->hsync_end > mode->htotal)
+		mode->htotal = mode->hsync_end + 1;
+	if (mode->vsync_end > mode->vtotal)
+		mode->vtotal = mode->vsync_end + 1;
+
+	drm_mode_do_interlace_quirk(mode, pt);
+
+	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+	}
+
+	mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+		DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+		DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
+set_size:
+	mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+	mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+	if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+		mode->width_mm *= 10;
+		mode->height_mm *= 10;
+	}
+
+	if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+		mode->width_mm = edid->width_cm * 10;
+		mode->height_mm = edid->height_cm * 10;
+	}
+
+	mode->type = DRM_MODE_TYPE_DRIVER;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+	drm_mode_set_name(mode);
+
+	return mode;
+}
+
+static bool
+mode_in_hsync_range(const struct drm_display_mode *mode,
+		    struct edid *edid, u8 *t)
+{
+	int hsync, hmin, hmax;
+
+	hmin = t[7];
+	if (edid->revision >= 4)
+	    hmin += ((t[4] & 0x04) ? 255 : 0);
+	hmax = t[8];
+	if (edid->revision >= 4)
+	    hmax += ((t[4] & 0x08) ? 255 : 0);
+	hsync = drm_mode_hsync(mode);
+
+	return (hsync <= hmax && hsync >= hmin);
+}
+
+static bool
+mode_in_vsync_range(const struct drm_display_mode *mode,
+		    struct edid *edid, u8 *t)
+{
+	int vsync, vmin, vmax;
+
+	vmin = t[5];
+	if (edid->revision >= 4)
+	    vmin += ((t[4] & 0x01) ? 255 : 0);
+	vmax = t[6];
+	if (edid->revision >= 4)
+	    vmax += ((t[4] & 0x02) ? 255 : 0);
+	vsync = drm_mode_vrefresh(mode);
+
+	return (vsync <= vmax && vsync >= vmin);
+}
+
+static u32
+range_pixel_clock(struct edid *edid, u8 *t)
+{
+	/* unspecified */
+	if (t[9] == 0 || t[9] == 255)
+		return 0;
+
+	/* 1.4 with CVT support gives us real precision, yay */
+	if (edid->revision >= 4 && t[10] == 0x04)
+		return (t[9] * 10000) - ((t[12] >> 2) * 250);
+
+	/* 1.3 is pathetic, so fuzz up a bit */
+	return t[9] * 10000 + 5001;
+}
+
+static bool
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
+	      struct detailed_timing *timing)
+{
+	u32 max_clock;
+	u8 *t = (u8 *)timing;
+
+	if (!mode_in_hsync_range(mode, edid, t))
+		return false;
+
+	if (!mode_in_vsync_range(mode, edid, t))
+		return false;
+
+	if ((max_clock = range_pixel_clock(edid, t)))
+		if (mode->clock > max_clock)
+			return false;
+
+	/* 1.4 max horizontal check */
+	if (edid->revision >= 4 && t[10] == 0x04)
+		if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
+			return false;
+
+	if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid))
+		return false;
+
+	return true;
+}
+
+static bool valid_inferred_mode(const struct drm_connector *connector,
+				const struct drm_display_mode *mode)
+{
+	struct drm_display_mode *m;
+	bool ok = false;
+
+	list_for_each_entry(m, &connector->probed_modes, head) {
+		if (mode->hdisplay == m->hdisplay &&
+		    mode->vdisplay == m->vdisplay &&
+		    drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+			return false; /* duplicated */
+		if (mode->hdisplay <= m->hdisplay &&
+		    mode->vdisplay <= m->vdisplay)
+			ok = true;
+	}
+	return ok;
+}
+
+static int
+drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
+		if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+		    valid_inferred_mode(connector, drm_dmt_modes + i)) {
+			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	return modes;
+}
+
+/* fix up 1366x768 mode from 1368x768;
+ * GFT/CVT can't express 1366 width which isn't dividable by 8
+ */
+static void fixup_mode_1366x768(struct drm_display_mode *mode)
+{
+	if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
+		mode->hdisplay = 1366;
+		mode->hsync_start--;
+		mode->hsync_end--;
+		drm_mode_set_name(mode);
+	}
+}
+
+static int
+drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+		const struct minimode *m = &extra_modes[i];
+		newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
+		if (!newmode)
+			return modes;
+
+		fixup_mode_1366x768(newmode);
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
+			drm_mode_destroy(dev, newmode);
+			continue;
+		}
+
+		drm_mode_probed_add(connector, newmode);
+		modes++;
+	}
+
+	return modes;
+}
+
+static int
+drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
+			struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+	bool rb = drm_monitor_supports_rb(edid);
+
+	for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
+		const struct minimode *m = &extra_modes[i];
+		newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
+		if (!newmode)
+			return modes;
+
+		fixup_mode_1366x768(newmode);
+		if (!mode_in_range(newmode, edid, timing) ||
+		    !valid_inferred_mode(connector, newmode)) {
+			drm_mode_destroy(dev, newmode);
+			continue;
+		}
+
+		drm_mode_probed_add(connector, newmode);
+		modes++;
+	}
+
+	return modes;
+}
+
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
+{
+	struct detailed_mode_closure *closure = c;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+	struct detailed_data_monitor_range *range = &data->data.range;
+
+	if (data->type != EDID_DETAIL_MONITOR_RANGE)
+		return;
+
+	closure->modes += drm_dmt_modes_for_range(closure->connector,
+						  closure->edid,
+						  timing);
+	
+	if (!version_greater(closure->edid, 1, 1))
+		return; /* GTF not defined yet */
+
+	switch (range->flags) {
+	case 0x02: /* secondary gtf, XXX could do more */
+	case 0x00: /* default gtf */
+		closure->modes += drm_gtf_modes_for_range(closure->connector,
+							  closure->edid,
+							  timing);
+		break;
+	case 0x04: /* cvt, only in 1.4+ */
+		if (!version_greater(closure->edid, 1, 3))
+			break;
+
+		closure->modes += drm_cvt_modes_for_range(closure->connector,
+							  closure->edid,
+							  timing);
+		break;
+	case 0x01: /* just the ranges, no formula */
+	default:
+		break;
+	}
+}
+
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+	struct detailed_mode_closure closure = {
+		connector, edid, 0, 0, 0
+	};
+
+	if (version_greater(edid, 1, 0))
+		drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+					    &closure);
+
+	return closure.modes;
+}
+
+static int
+drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
+{
+	int i, j, m, modes = 0;
+	struct drm_display_mode *mode;
+	u8 *est = ((u8 *)timing) + 5;
+
+	for (i = 0; i < 6; i++) {
+		for (j = 7; j > 0; j--) {
+			m = (i * 8) + (7 - j);
+			if (m >= ARRAY_SIZE(est3_modes))
+				break;
+			if (est[i] & (1 << j)) {
+				mode = drm_mode_find_dmt(connector->dev,
+							 est3_modes[m].w,
+							 est3_modes[m].h,
+							 est3_modes[m].r,
+							 est3_modes[m].rb);
+				if (mode) {
+					drm_mode_probed_add(connector, mode);
+					modes++;
+				}
+			}
+		}
+	}
+
+	return modes;
+}
+
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
+{
+	struct detailed_mode_closure *closure = c;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+
+	if (data->type == EDID_DETAIL_EST_TIMINGS)
+		closure->modes += drm_est3_modes(closure->connector, timing);
+}
+
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above).  Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+	struct drm_device *dev = connector->dev;
+	unsigned long est_bits = edid->established_timings.t1 |
+		(edid->established_timings.t2 << 8) |
+		((edid->established_timings.mfg_rsvd & 0x80) << 9);
+	int i, modes = 0;
+	struct detailed_mode_closure closure = {
+		connector, edid, 0, 0, 0
+	};
+
+	for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+		if (est_bits & (1<<i)) {
+			struct drm_display_mode *newmode;
+			newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	if (version_greater(edid, 1, 0))
+		    drm_for_each_detailed_block((u8 *)edid,
+						do_established_modes, &closure);
+
+	return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+	struct detailed_mode_closure *closure = c;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+	struct drm_connector *connector = closure->connector;
+	struct edid *edid = closure->edid;
+
+	if (data->type == EDID_DETAIL_STD_MODES) {
+		int i;
+		for (i = 0; i < 6; i++) {
+			struct std_timing *std;
+			struct drm_display_mode *newmode;
+
+			std = &data->data.timings[i];
+			newmode = drm_mode_std(connector, edid, std,
+					       edid->revision);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				closure->modes++;
+			}
+		}
+	}
+}
+
+/**
+ * add_standard_modes - get std. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
+ */
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
+{
+	int i, modes = 0;
+	struct detailed_mode_closure closure = {
+		connector, edid, 0, 0, 0
+	};
+
+	for (i = 0; i < EDID_STD_TIMINGS; i++) {
+		struct drm_display_mode *newmode;
+
+		newmode = drm_mode_std(connector, edid,
+				       &edid->standard_timings[i],
+				       edid->revision);
+		if (newmode) {
+			drm_mode_probed_add(connector, newmode);
+			modes++;
+		}
+	}
+
+	if (version_greater(edid, 1, 0))
+		drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+					    &closure);
+
+	/* XXX should also look for standard codes in VTB blocks */
+
+	return modes + closure.modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+			 struct detailed_timing *timing)
+{
+	int i, j, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+	struct cvt_timing *cvt;
+	const int rates[] = { 60, 85, 75, 60, 50 };
+	const u8 empty[3] = { 0, 0, 0 };
+
+	for (i = 0; i < 4; i++) {
+		int uninitialized_var(width), height;
+		cvt = &(timing->data.other_data.data.cvt[i]);
+
+		if (!memcmp(cvt->code, empty, 3))
+			continue;
+
+		height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+		switch (cvt->code[1] & 0x0c) {
+		case 0x00:
+			width = height * 4 / 3;
+			break;
+		case 0x04:
+			width = height * 16 / 9;
+			break;
+		case 0x08:
+			width = height * 16 / 10;
+			break;
+		case 0x0c:
+			width = height * 15 / 9;
+			break;
+		}
+
+		for (j = 1; j < 5; j++) {
+			if (cvt->code[2] & (1 << j)) {
+				newmode = drm_cvt_mode(dev, width, height,
+						       rates[j], j == 0,
+						       false, false);
+				if (newmode) {
+					drm_mode_probed_add(connector, newmode);
+					modes++;
+				}
+			}
+		}
+	}
+
+	return modes;
+}
+
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
+{
+	struct detailed_mode_closure *closure = c;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+
+	if (data->type == EDID_DETAIL_CVT_3BYTE)
+		closure->modes += drm_cvt_modes(closure->connector, timing);
+}
+
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{	
+	struct detailed_mode_closure closure = {
+		connector, edid, 0, 0, 0
+	};
+
+	if (version_greater(edid, 1, 2))
+		drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
+
+	/* XXX should also look for CVT codes in VTB blocks */
+
+	return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+	struct detailed_mode_closure *closure = c;
+	struct drm_display_mode *newmode;
+
+	if (timing->pixel_clock) {
+		newmode = drm_mode_detailed(closure->connector->dev,
+					    closure->edid, timing,
+					    closure->quirks);
+		if (!newmode)
+			return;
+
+		if (closure->preferred)
+			newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_probed_add(closure->connector, newmode);
+		closure->modes++;
+		closure->preferred = 0;
+	}
+}
+
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+		   u32 quirks)
+{
+	struct detailed_mode_closure closure = {
+		connector,
+		edid,
+		1,
+		quirks,
+		0
+	};
+
+	if (closure.preferred && !version_greater(edid, 1, 3))
+		closure.preferred =
+		    (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+	drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+	return closure.modes;
+}
+
+#define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK	0x01
+#define VIDEO_BLOCK     0x02
+#define VENDOR_BLOCK    0x03
+#define SPEAKER_BLOCK	0x04
+#define VIDEO_CAPABILITY_BLOCK	0x07
+#define EDID_BASIC_AUDIO	(1 << 6)
+#define EDID_CEA_YCRCB444	(1 << 5)
+#define EDID_CEA_YCRCB422	(1 << 4)
+#define EDID_CEA_VCDB_QS	(1 << 6)
+
+/**
+ * Search EDID for CEA extension block.
+ */
+u8 *drm_find_cea_extension(struct edid *edid)
+{
+	u8 *edid_ext = NULL;
+	int i;
+
+	/* No EDID or EDID extensions */
+	if (edid == NULL || edid->extensions == 0)
+		return NULL;
+
+	/* Find CEA extension */
+	for (i = 0; i < edid->extensions; i++) {
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == CEA_EXT)
+			break;
+	}
+
+	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+EXPORT_SYMBOL(drm_find_cea_extension);
+
+/**
+ * drm_match_cea_mode - look for a CEA mode matching given mode
+ * @to_match: display mode
+ *
+ * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
+ * mode.
+ */
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
+{
+	u8 mode;
+
+	if (!to_match->clock)
+		return 0;
+
+	for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
+		const struct drm_display_mode *cea_mode = &edid_cea_modes[mode];
+		unsigned int clock1, clock2;
+
+		clock1 = clock2 = cea_mode->clock;
+
+		/* Check both 60Hz and 59.94Hz */
+		if (cea_mode->vrefresh % 6 == 0) {
+			/*
+			 * edid_cea_modes contains the 59.94Hz
+			 * variant for 240 and 480 line modes,
+			 * and the 60Hz variant otherwise.
+			 */
+			if (cea_mode->vdisplay == 240 ||
+			    cea_mode->vdisplay == 480)
+				clock1 = clock1 * 1001 / 1000;
+			else
+				clock2 = DIV_ROUND_UP(clock2 * 1000, 1001);
+		}
+
+		if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+		     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+		    drm_mode_equal_no_clocks(to_match, cea_mode))
+			return mode + 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+	struct drm_device *dev = connector->dev;
+	u8 * mode, cea_mode;
+	int modes = 0;
+
+	for (mode = db; mode < db + len; mode++) {
+		cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+		if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
+			struct drm_display_mode *newmode;
+			newmode = drm_mode_duplicate(dev,
+						     &edid_cea_modes[cea_mode]);
+			if (newmode) {
+				newmode->vrefresh = 0;
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int
+cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define for_each_cea_db(cea, i, start, end) \
+	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+	u8 * cea = drm_find_cea_extension(edid);
+	u8 * db, dbl;
+	int modes = 0;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return 0;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			if (cea_db_tag(db) == VIDEO_BLOCK)
+				modes += do_cea_modes (connector, db+1, dbl);
+		}
+	}
+
+	return modes;
+}
+
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len >= 6) {
+		connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+		connector->dvi_dual = db[6] & 1;
+	}
+	if (len >= 7)
+		connector->max_tmds_clock = db[7] * 5;
+	if (len >= 8) {
+		connector->latency_present[0] = db[8] >> 7;
+		connector->latency_present[1] = (db[8] >> 6) & 1;
+	}
+	if (len >= 9)
+		connector->video_latency[0] = db[9];
+	if (len >= 10)
+		connector->audio_latency[0] = db[10];
+	if (len >= 11)
+		connector->video_latency[1] = db[11];
+	if (len >= 12)
+		connector->audio_latency[1] = db[12];
+
+	DRM_DEBUG_KMS("HDMI: DVI dual %d, "
+		    "max TMDS clock %d, "
+		    "latency present %d %d, "
+		    "video latency %d %d, "
+		    "audio latency %d %d\n",
+		    connector->dvi_dual,
+		    connector->max_tmds_clock,
+	      (int) connector->latency_present[0],
+	      (int) connector->latency_present[1],
+		    connector->video_latency[0],
+		    connector->video_latency[1],
+		    connector->audio_latency[0],
+		    connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+		*(u8 **)data = t->data.other_data.data.str.str;
+}
+
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 5)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IDENTIFIER;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+	uint8_t *eld = connector->eld;
+	u8 *cea;
+	u8 *name;
+	u8 *db;
+	int sad_count = 0;
+	int mnl;
+	int dbl;
+
+	memset(eld, 0, sizeof(connector->eld));
+
+	cea = drm_find_cea_extension(edid);
+	if (!cea) {
+		DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+		return;
+	}
+
+	name = NULL;
+	drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+	for (mnl = 0; name && mnl < 13; mnl++) {
+		if (name[mnl] == 0x0a)
+			break;
+		eld[20 + mnl] = name[mnl];
+	}
+	eld[4] = (cea[1] << 5) | mnl;
+	DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+	eld[0] = 2 << 3;		/* ELD version: 2 */
+
+	eld[16] = edid->mfg_id[0];
+	eld[17] = edid->mfg_id[1];
+	eld[18] = edid->prod_code[0];
+	eld[19] = edid->prod_code[1];
+
+	if (cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end)) {
+			start = 0;
+			end = 0;
+		}
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			dbl = cea_db_payload_len(db);
+
+			switch (cea_db_tag(db)) {
+			case AUDIO_BLOCK:
+				/* Audio Data Block, contains SADs */
+				sad_count = dbl / 3;
+				if (dbl >= 1)
+					memcpy(eld + 20 + mnl, &db[1], dbl);
+				break;
+			case SPEAKER_BLOCK:
+				/* Speaker Allocation Data Block */
+				if (dbl >= 1)
+					eld[7] = db[1];
+				break;
+			case VENDOR_BLOCK:
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db))
+					parse_hdmi_vsdb(connector, db);
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	eld[5] |= sad_count << 4;
+	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+	DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+EXPORT_SYMBOL(drm_edid_to_eld);
+
+/**
+ * drm_edid_to_sad - extracts SADs from EDID
+ * @edid: EDID to parse
+ * @sads: pointer that will be set to the extracted SADs
+ *
+ * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found SADs or negative number on error.
+ */
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
+{
+	int count = 0;
+	int i, start, end, dbl;
+	u8 *cea;
+
+	cea = drm_find_cea_extension(edid);
+	if (!cea) {
+		DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+		return -ENOENT;
+	}
+
+	if (cea_revision(cea) < 3) {
+		DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+		return -ENOTSUPP;
+	}
+
+	if (cea_db_offsets(cea, &start, &end)) {
+		DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+		return -EPROTO;
+	}
+
+	for_each_cea_db(cea, i, start, end) {
+		u8 *db = &cea[i];
+
+		if (cea_db_tag(db) == AUDIO_BLOCK) {
+			int j;
+			dbl = cea_db_payload_len(db);
+
+			count = dbl / 3; /* SAD is 3B */
+			*sads = kcalloc(count, sizeof(**sads), GFP_KERNEL);
+			if (!*sads)
+				return -ENOMEM;
+			for (j = 0; j < count; j++) {
+				u8 *sad = &db[1 + j * 3];
+
+				(*sads)[j].format = (sad[0] & 0x78) >> 3;
+				(*sads)[j].channels = sad[0] & 0x7;
+				(*sads)[j].freq = sad[1] & 0x7F;
+				(*sads)[j].byte2 = sad[2];
+			}
+			break;
+		}
+	}
+
+	return count;
+}
+EXPORT_SYMBOL(drm_edid_to_sad);
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
+{
+	int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+	int a, v;
+
+	if (!connector->latency_present[0])
+		return 0;
+	if (!connector->latency_present[1])
+		i = 0;
+
+	a = connector->audio_latency[i];
+	v = connector->video_latency[i];
+
+	/*
+	 * HDMI/DP sink doesn't support audio or video?
+	 */
+	if (a == 255 || v == 255)
+		return 0;
+
+	/*
+	 * Convert raw EDID values to millisecond.
+	 * Treat unknown latency as 0ms.
+	 */
+	if (a)
+		a = min(2 * (a - 1), 500);
+	if (v)
+		v = min(2 * (v - 1), 500);
+
+	return max(v - a, 0);
+}
+EXPORT_SYMBOL(drm_av_sync_delay);
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode)
+{
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder && connector->eld[0])
+			return connector;
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_select_eld);
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+	u8 *edid_ext;
+	int i;
+	int start_offset, end_offset;
+
+	edid_ext = drm_find_cea_extension(edid);
+	if (!edid_ext)
+		return false;
+
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		return false;
+
+	/*
+	 * Because HDMI identifier is in Vendor Specific Block,
+	 * search it from all data blocks of CEA extension.
+	 */
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
+			return true;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(drm_detect_hdmi_monitor);
+
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+	u8 *edid_ext;
+	int i, j;
+	bool has_audio = false;
+	int start_offset, end_offset;
+
+	edid_ext = drm_find_cea_extension(edid);
+	if (!edid_ext)
+		goto end;
+
+	has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+	if (has_audio) {
+		DRM_DEBUG_KMS("Monitor has basic audio support\n");
+		goto end;
+	}
+
+	if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
+		goto end;
+
+	for_each_cea_db(edid_ext, i, start_offset, end_offset) {
+		if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
+			has_audio = true;
+			for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
+				DRM_DEBUG_KMS("CEA audio format %d\n",
+					      (edid_ext[i + j] >> 3) & 0xf);
+			goto end;
+		}
+	}
+end:
+	return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
+/**
+ * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
+ *
+ * Check whether the monitor reports the RGB quantization range selection
+ * as supported. The AVI infoframe can then be used to inform the monitor
+ * which quantization range (full or limited) is used.
+ */
+bool drm_rgb_quant_range_selectable(struct edid *edid)
+{
+	u8 *edid_ext;
+	int i, start, end;
+
+	edid_ext = drm_find_cea_extension(edid);
+	if (!edid_ext)
+		return false;
+
+	if (cea_db_offsets(edid_ext, &start, &end))
+		return false;
+
+	for_each_cea_db(edid_ext, i, start, end) {
+		if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
+		    cea_db_payload_len(&edid_ext[i]) == 2) {
+			DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
+			return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
+		}
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
+
+/**
+ * drm_add_display_info - pull display info out if present
+ * @edid: EDID data
+ * @info: display info (attached to connector)
+ *
+ * Grab any available display info and stuff it into the drm_display_info
+ * structure that's part of the connector.  Useful for tracking bpp and
+ * color spaces.
+ */
+static void drm_add_display_info(struct edid *edid,
+				 struct drm_display_info *info)
+{
+	u8 *edid_ext;
+
+	info->width_mm = edid->width_cm * 10;
+	info->height_mm = edid->height_cm * 10;
+
+	/* driver figures it out in this case */
+	info->bpc = 0;
+	info->color_formats = 0;
+
+	if (edid->revision < 3)
+		return;
+
+	if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+		return;
+
+	/* Get data from CEA blocks if present */
+	edid_ext = drm_find_cea_extension(edid);
+	if (edid_ext) {
+		info->cea_rev = edid_ext[1];
+
+		/* The existence of a CEA block should imply RGB support */
+		info->color_formats = DRM_COLOR_FORMAT_RGB444;
+		if (edid_ext[3] & EDID_CEA_YCRCB444)
+			info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+		if (edid_ext[3] & EDID_CEA_YCRCB422)
+			info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+	}
+
+	/* Only defined for 1.4 with digital displays */
+	if (edid->revision < 4)
+		return;
+
+	switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
+	case DRM_EDID_DIGITAL_DEPTH_6:
+		info->bpc = 6;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_8:
+		info->bpc = 8;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_10:
+		info->bpc = 10;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_12:
+		info->bpc = 12;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_14:
+		info->bpc = 14;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_16:
+		info->bpc = 16;
+		break;
+	case DRM_EDID_DIGITAL_DEPTH_UNDEF:
+	default:
+		info->bpc = 0;
+		break;
+	}
+
+	info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+	if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+	if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
+		info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+}
+
+/**
+ * drm_add_edid_modes - add modes from EDID data, if available
+ * @connector: connector we're probing
+ * @edid: edid data
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+{
+	int num_modes = 0;
+	u32 quirks;
+
+	if (edid == NULL) {
+		return 0;
+	}
+	if (!drm_edid_is_valid(edid)) {
+		dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
+			 drm_get_connector_name(connector));
+		return 0;
+	}
+
+	quirks = edid_get_quirks(edid);
+
+	/*
+	 * EDID spec says modes should be preferred in this order:
+	 * - preferred detailed mode
+	 * - other detailed modes from base block
+	 * - detailed modes from extension blocks
+	 * - CVT 3-byte code modes
+	 * - standard timing codes
+	 * - established timing codes
+	 * - modes inferred from GTF or CVT range information
+	 *
+	 * We get this pretty much right.
+	 *
+	 * XXX order for additional mode types in extension blocks?
+	 */
+	num_modes += add_detailed_modes(connector, edid, quirks);
+	num_modes += add_cvt_modes(connector, edid);
+	num_modes += add_standard_modes(connector, edid);
+	num_modes += add_established_modes(connector, edid);
+	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+		num_modes += add_inferred_modes(connector, edid);
+	num_modes += add_cea_modes(connector, edid);
+
+	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+		edid_fixup_preferred(connector, quirks);
+
+	drm_add_display_info(edid, &connector->display_info);
+
+	if (quirks & EDID_QUIRK_FORCE_8BPC)
+		connector->display_info.bpc = 8;
+
+	return num_modes;
+}
+EXPORT_SYMBOL(drm_add_edid_modes);
+
+/**
+ * drm_add_modes_noedid - add modes for the connectors without EDID
+ * @connector: connector we're probing
+ * @hdisplay: the horizontal display limit
+ * @vdisplay: the vertical display limit
+ *
+ * Add the specified modes to the connector's mode list. Only when the
+ * hdisplay/vdisplay is not beyond the given limit, it will be added.
+ *
+ * Return number of modes added or 0 if we couldn't find any.
+ */
+int drm_add_modes_noedid(struct drm_connector *connector,
+			int hdisplay, int vdisplay)
+{
+	int i, count, num_modes = 0;
+	struct drm_display_mode *mode;
+	struct drm_device *dev = connector->dev;
+
+	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+	if (hdisplay < 0)
+		hdisplay = 0;
+	if (vdisplay < 0)
+		vdisplay = 0;
+
+	for (i = 0; i < count; i++) {
+		const struct drm_display_mode *ptr = &drm_dmt_modes[i];
+		if (hdisplay && vdisplay) {
+			/*
+			 * Only when two are valid, they will be used to check
+			 * whether the mode should be added to the mode list of
+			 * the connector.
+			 */
+			if (ptr->hdisplay > hdisplay ||
+					ptr->vdisplay > vdisplay)
+				continue;
+		}
+		if (drm_mode_vrefresh(ptr) > 61)
+			continue;
+		mode = drm_mode_duplicate(dev, ptr);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			num_modes++;
+		}
+	}
+	return num_modes;
+}
+EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
+ *                                              data from a DRM display mode
+ * @frame: HDMI AVI infoframe
+ * @mode: DRM display mode
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+					 const struct drm_display_mode *mode)
+{
+	int err;
+
+	if (!frame || !mode)
+		return -EINVAL;
+
+	err = hdmi_avi_infoframe_init(frame);
+	if (err < 0)
+		return err;
+
+	frame->video_code = drm_match_cea_mode(mode);
+	if (!frame->video_code)
+		return 0;
+
+	frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
diff --git a/linux-imx/drivers/gpu/drm/drm_edid_load.c b/linux-imx/drivers/gpu/drm/drm_edid_load.c
new file mode 100644
index 0000000..fa445dd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_edid_load.c
@@ -0,0 +1,276 @@
+/*
+   drm_edid_load.c: use a built-in EDID data set or load it via the firmware
+		    interface
+
+   Copyright (C) 2012 Carsten Emde <C.Emde@osadl.org>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either version 2
+   of the License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
+*/
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static char edid_firmware[PATH_MAX];
+module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
+MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
+	"from built-in data or /lib/firmware instead. ");
+
+#define GENERIC_EDIDS 5
+static char *generic_edid_name[GENERIC_EDIDS] = {
+	"edid/1024x768.bin",
+	"edid/1280x1024.bin",
+	"edid/1600x1200.bin",
+	"edid/1680x1050.bin",
+	"edid/1920x1080.bin",
+};
+
+static u8 generic_edid[GENERIC_EDIDS][128] = {
+	{
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
+	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+	0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
+	0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
+	0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+	0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
+	0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
+	},
+	{
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
+	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
+	0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
+	0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+	0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
+	0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
+	},
+	{
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x05, 0x16, 0x01, 0x03, 0x6d, 0x37, 0x29, 0x78,
+	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xa9, 0x40,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x3f,
+	0x40, 0x30, 0x62, 0xb0, 0x32, 0x40, 0x40, 0xc0,
+	0x13, 0x00, 0x2b, 0xa0, 0x21, 0x00, 0x00, 0x1e,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+	0x3d, 0x4a, 0x4c, 0x11, 0x00, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x55,
+	0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0x9d,
+	},
+	{
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
+	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
+	0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
+	0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+	0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
+	0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
+	},
+	{
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
+	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
+	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
+	0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+	0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
+	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
+	0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
+	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
+	0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
+	},
+};
+
+static u8 *edid_load(struct drm_connector *connector, char *name,
+			char *connector_name)
+{
+	const struct firmware *fw;
+	struct platform_device *pdev;
+	u8 *fwdata = NULL, *edid, *new_edid;
+	int fwsize, expected;
+	int builtin = 0, err = 0;
+	int i, valid_extensions = 0;
+	bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+
+	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		DRM_ERROR("Failed to register EDID firmware platform device "
+		    "for connector \"%s\"\n", connector_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = request_firmware(&fw, name, &pdev->dev);
+	platform_device_unregister(pdev);
+
+	if (err) {
+		i = 0;
+		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
+			i++;
+		if (i < GENERIC_EDIDS) {
+			err = 0;
+			builtin = 1;
+			fwdata = generic_edid[i];
+			fwsize = sizeof(generic_edid[i]);
+		}
+	}
+
+	if (err) {
+		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+		    name, err);
+		goto out;
+	}
+
+	if (fwdata == NULL) {
+		fwdata = (u8 *) fw->data;
+		fwsize = fw->size;
+	}
+
+	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
+	if (expected != fwsize) {
+		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
+		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
+		err = -EINVAL;
+		goto relfw_out;
+	}
+
+	edid = kmalloc(fwsize, GFP_KERNEL);
+	if (edid == NULL) {
+		err = -ENOMEM;
+		goto relfw_out;
+	}
+	memcpy(edid, fwdata, fwsize);
+
+	if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+		connector->bad_edid_counter++;
+		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
+		    name);
+		kfree(edid);
+		err = -EINVAL;
+		goto relfw_out;
+	}
+
+	for (i = 1; i <= edid[0x7e]; i++) {
+		if (i != valid_extensions + 1)
+			memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
+			    edid + i * EDID_LENGTH, EDID_LENGTH);
+		if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
+			valid_extensions++;
+	}
+
+	if (valid_extensions != edid[0x7e]) {
+		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
+		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
+		    "\"%s\" for connector \"%s\"\n", valid_extensions,
+		    edid[0x7e], name, connector_name);
+		edid[0x7e] = valid_extensions;
+		new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+		    GFP_KERNEL);
+		if (new_edid == NULL) {
+			err = -ENOMEM;
+			kfree(edid);
+			goto relfw_out;
+		}
+		edid = new_edid;
+	}
+
+	DRM_INFO("Got %s EDID base block and %d extension%s from "
+	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
+	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
+	    name, connector_name);
+
+relfw_out:
+	release_firmware(fw);
+
+out:
+	if (err)
+		return ERR_PTR(err);
+
+	return edid;
+}
+
+int drm_load_edid_firmware(struct drm_connector *connector)
+{
+	char *connector_name = drm_get_connector_name(connector);
+	char *edidname = edid_firmware, *last, *colon;
+	int ret;
+	struct edid *edid;
+
+	if (*edidname == '\0')
+		return 0;
+
+	colon = strchr(edidname, ':');
+	if (colon != NULL) {
+		if (strncmp(connector_name, edidname, colon - edidname))
+			return 0;
+		edidname = colon + 1;
+		if (*edidname == '\0')
+			return 0;
+	}
+
+	last = edidname + strlen(edidname) - 1;
+	if (*last == '\n')
+		*last = '\0';
+
+	edid = (struct edid *) edid_load(connector, edidname, connector_name);
+	if (IS_ERR_OR_NULL(edid))
+		return 0;
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/drm_encoder_slave.c b/linux-imx/drivers/gpu/drm/drm_encoder_slave.c
new file mode 100644
index 0000000..0cfb60f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_encoder_slave.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <drm/drm_encoder_slave.h>
+
+/**
+ * drm_i2c_encoder_init - Initialize an I2C slave encoder
+ * @dev:	DRM device.
+ * @encoder:    Encoder to be attached to the I2C device. You aren't
+ *		required to have called drm_encoder_init() before.
+ * @adap:	I2C adapter that will be used to communicate with
+ *		the device.
+ * @info:	Information that will be used to create the I2C device.
+ *		Required fields are @addr and @type.
+ *
+ * Create an I2C device on the specified bus (the module containing its
+ * driver is transparently loaded) and attach it to the specified
+ * &drm_encoder_slave. The @slave_funcs field will be initialized with
+ * the hooks provided by the slave driver.
+ *
+ * If @info->platform_data is non-NULL it will be used as the initial
+ * slave config.
+ *
+ * Returns 0 on success or a negative errno on failure, in particular,
+ * -ENODEV is returned when no matching driver is found.
+ */
+int drm_i2c_encoder_init(struct drm_device *dev,
+			 struct drm_encoder_slave *encoder,
+			 struct i2c_adapter *adap,
+			 const struct i2c_board_info *info)
+{
+	struct module *module = NULL;
+	struct i2c_client *client;
+	struct drm_i2c_encoder_driver *encoder_drv;
+	int err = 0;
+
+	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
+
+	client = i2c_new_device(adap, info);
+	if (!client) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	if (!client->driver) {
+		err = -ENODEV;
+		goto fail_unregister;
+	}
+
+	module = client->driver->driver.owner;
+	if (!try_module_get(module)) {
+		err = -ENODEV;
+		goto fail_unregister;
+	}
+
+	encoder->bus_priv = client;
+
+	encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+
+	err = encoder_drv->encoder_init(client, dev, encoder);
+	if (err)
+		goto fail_unregister;
+
+	if (info->platform_data)
+		encoder->slave_funcs->set_config(&encoder->base,
+						 info->platform_data);
+
+	return 0;
+
+fail_unregister:
+	i2c_unregister_device(client);
+	module_put(module);
+fail:
+	return err;
+}
+EXPORT_SYMBOL(drm_i2c_encoder_init);
+
+/**
+ * drm_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * @drm_encoder:	Encoder to be unregistered.
+ *
+ * This should be called from the @destroy method of an I2C slave
+ * encoder driver once I2C access is no longer needed.
+ */
+void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+{
+	struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
+	struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
+	struct module *module = client->driver->driver.owner;
+
+	i2c_unregister_device(client);
+	encoder->bus_priv = NULL;
+
+	module_put(module);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_destroy);
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+	return to_encoder_slave(enc)->slave_funcs;
+}
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_dpms);
+
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
+
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
+{
+	drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_prepare);
+
+void drm_i2c_encoder_commit(struct drm_encoder *encoder)
+{
+	drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_commit);
+
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
+
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+	    struct drm_connector *connector)
+{
+	return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_detect);
+
+void drm_i2c_encoder_save(struct drm_encoder *encoder)
+{
+	get_slave_funcs(encoder)->save(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_save);
+
+void drm_i2c_encoder_restore(struct drm_encoder *encoder)
+{
+	get_slave_funcs(encoder)->restore(encoder);
+}
+EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/linux-imx/drivers/gpu/drm/drm_fb_cma_helper.c b/linux-imx/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 0000000..0b5af7d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,455 @@
+/*
+ * drm kms/fb cma (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Analog Device Inc.
+ *   Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Based on udl_fbdev.c
+ *  Copyright (C) 2012 Red Hat
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/module.h>
+
+struct drm_fb_cma {
+	struct drm_framebuffer		fb;
+	struct drm_gem_cma_object	*obj[4];
+};
+
+struct drm_fbdev_cma {
+	struct drm_fb_helper	fb_helper;
+	struct drm_fb_cma	*fb;
+};
+
+static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
+{
+	return container_of(helper, struct drm_fbdev_cma, fb_helper);
+}
+
+static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct drm_fb_cma, fb);
+}
+
+static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (fb_cma->obj[i])
+			drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
+	}
+
+	drm_framebuffer_cleanup(fb);
+	kfree(fb_cma);
+}
+
+static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+	struct drm_file *file_priv, unsigned int *handle)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	return drm_gem_handle_create(file_priv,
+			&fb_cma->obj[0]->base, handle);
+}
+
+static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+	.destroy	= drm_fb_cma_destroy,
+	.create_handle	= drm_fb_cma_create_handle,
+};
+
+static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
+	struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+	unsigned int num_planes)
+{
+	struct drm_fb_cma *fb_cma;
+	int ret;
+	int i;
+
+	fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
+	if (!fb_cma)
+		return ERR_PTR(-ENOMEM);
+
+	drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
+
+	for (i = 0; i < num_planes; i++)
+		fb_cma->obj[i] = obj[i];
+
+	ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+	if (ret) {
+		dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
+		kfree(fb_cma);
+		return ERR_PTR(ret);
+	}
+
+	return fb_cma;
+}
+
+/**
+ * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+	struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_fb_cma *fb_cma;
+	struct drm_gem_cma_object *objs[4];
+	struct drm_gem_object *obj;
+	unsigned int hsub;
+	unsigned int vsub;
+	int ret;
+	int i;
+
+	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+		unsigned int width = mode_cmd->width / (i ? hsub : 1);
+		unsigned int height = mode_cmd->height / (i ? vsub : 1);
+		unsigned int min_size;
+
+		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+		if (!obj) {
+			dev_err(dev->dev, "Failed to lookup GEM object\n");
+			ret = -ENXIO;
+			goto err_gem_object_unreference;
+		}
+
+		min_size = (height - 1) * mode_cmd->pitches[i]
+			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+			 + mode_cmd->offsets[i];
+
+		if (obj->size < min_size) {
+			drm_gem_object_unreference_unlocked(obj);
+			ret = -EINVAL;
+			goto err_gem_object_unreference;
+		}
+		objs[i] = to_drm_gem_cma_obj(obj);
+	}
+
+	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+	if (IS_ERR(fb_cma)) {
+		ret = PTR_ERR(fb_cma);
+		goto err_gem_object_unreference;
+	}
+
+	return &fb_cma->fb;
+
+err_gem_object_unreference:
+	for (i--; i >= 0; i--)
+		drm_gem_object_unreference_unlocked(&objs[i]->base);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create);
+
+/**
+ * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Return the CMA GEM object for given framebuffer.
+ *
+ * This function will usually be called from the CRTC callback functions.
+ */
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+	unsigned int plane)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+
+	if (plane >= 4)
+		return NULL;
+
+	return fb_cma->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * drm_fb_cma_describe() - Helper to dump information about a single
+ * CMA framebuffer object
+ */
+void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+	int i, n = drm_format_num_planes(fb->pixel_format);
+
+	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+			(char *)&fb->pixel_format);
+
+	for (i = 0; i < n; i++) {
+		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
+				i, fb->offsets[i], fb->pitches[i]);
+		drm_gem_cma_describe(fb_cma->obj[i], m);
+	}
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
+
+/**
+ * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
+ * in debugfs.
+ */
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_framebuffer *fb;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret) {
+		mutex_unlock(&dev->mode_config.mutex);
+		return ret;
+	}
+
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head)
+		drm_fb_cma_describe(fb, m);
+
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
+#endif
+
+static struct fb_ops drm_fbdev_cma_ops = {
+	.owner		= THIS_MODULE,
+	.fb_fillrect	= sys_fillrect,
+	.fb_copyarea	= sys_copyarea,
+	.fb_imageblit	= sys_imageblit,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+};
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+	struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+	struct drm_device *dev = helper->dev;
+	struct drm_gem_cma_object *obj;
+	struct drm_framebuffer *fb;
+	unsigned int bytes_per_pixel;
+	unsigned long offset;
+	struct fb_info *fbi;
+	size_t size;
+	int ret;
+
+	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+			sizes->surface_width, sizes->surface_height,
+			sizes->surface_bpp);
+
+	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+		sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	obj = drm_gem_cma_create(dev, size);
+	if (IS_ERR(obj))
+		return -ENOMEM;
+
+	fbi = framebuffer_alloc(0, dev->dev);
+	if (!fbi) {
+		dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+		ret = -ENOMEM;
+		goto err_drm_gem_cma_free_object;
+	}
+
+	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+	if (IS_ERR(fbdev_cma->fb)) {
+		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+		ret = PTR_ERR(fbdev_cma->fb);
+		goto err_framebuffer_release;
+	}
+
+	fb = &fbdev_cma->fb->fb;
+	helper->fb = fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &drm_fbdev_cma_ops;
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		dev_err(dev->dev, "Failed to allocate color map.\n");
+		goto err_drm_fb_cma_destroy;
+	}
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+	offset = fbi->var.xoffset * bytes_per_pixel;
+	offset += fbi->var.yoffset * fb->pitches[0];
+
+	dev->mode_config.fb_base = (resource_size_t)obj->paddr;
+	fbi->screen_base = obj->vaddr + offset;
+	fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
+	fbi->screen_size = size;
+	fbi->fix.smem_len = size;
+
+	return 0;
+
+err_drm_fb_cma_destroy:
+	drm_framebuffer_unregister_private(fb);
+	drm_fb_cma_destroy(fb);
+err_framebuffer_release:
+	framebuffer_release(fbi);
+err_drm_gem_cma_free_object:
+	drm_gem_cma_free_object(&obj->base);
+	return ret;
+}
+
+static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+	.fb_probe = drm_fbdev_cma_create,
+};
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+	unsigned int preferred_bpp, unsigned int num_crtc,
+	unsigned int max_conn_count)
+{
+	struct drm_fbdev_cma *fbdev_cma;
+	struct drm_fb_helper *helper;
+	int ret;
+
+	fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+	if (!fbdev_cma) {
+		dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
+	helper = &fbdev_cma->fb_helper;
+
+	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+		goto err_free;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(helper);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to add connectors.\n");
+		goto err_drm_fb_helper_fini;
+
+	}
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	ret = drm_fb_helper_initial_config(helper, preferred_bpp);
+	if (ret < 0) {
+		dev_err(dev->dev, "Failed to set inital hw configuration.\n");
+		goto err_drm_fb_helper_fini;
+	}
+
+	return fbdev_cma;
+
+err_drm_fb_helper_fini:
+	drm_fb_helper_fini(helper);
+err_free:
+	kfree(fbdev_cma);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
+
+/**
+ * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
+ * @fbdev_cma: The drm_fbdev_cma struct
+ */
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma->fb_helper.fbdev) {
+		struct fb_info *info;
+		int ret;
+
+		info = fbdev_cma->fb_helper.fbdev;
+		ret = unregister_framebuffer(info);
+		if (ret < 0)
+			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	if (fbdev_cma->fb) {
+		drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+		drm_fb_cma_destroy(&fbdev_cma->fb->fb);
+	}
+
+	drm_fb_helper_fini(&fbdev_cma->fb_helper);
+	kfree(fbdev_cma);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
+
+/**
+ * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers lastclose callback.
+ */
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma) {
+		struct drm_device *dev = fbdev_cma->fb_helper.dev;
+
+		drm_modeset_lock_all(dev);
+		drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
+		drm_modeset_unlock_all(dev);
+	}
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
+
+/**
+ * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ *
+ * This function is usually called from the DRM drivers output_poll_changed
+ * callback.
+ */
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
+{
+	if (fbdev_cma)
+		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
+}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/linux-imx/drivers/gpu/drm/drm_fb_helper.c b/linux-imx/drivers/gpu/drm/drm_fb_helper.c
new file mode 100644
index 0000000..b78cbe7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_fb_helper.c
@@ -0,0 +1,1599 @@
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
+static LIST_HEAD(kernel_fb_helper_list);
+
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ *
+ * Initialization is done as a three-step process with drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
+ * Drivers with fancier requirements than the default beheviour can override the
+ * second step with their own code.  Teardown is done with drm_fb_helper_fini().
+ *
+ * At runtime drivers should restore the fbdev console by calling
+ * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
+ * should also notify the fb helper code from updates to the output
+ * configuration by calling drm_fb_helper_hotplug_event(). For easier
+ * integration with the output polling code in drm_crtc_helper.c the modeset
+ * code proves a ->output_poll_changed callback.
+ *
+ * All other functions exported by the fb helper library can be used to
+ * implement the fbdev driver interface by the driver.
+ */
+
+/**
+ * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
+ * 					       emulation helper
+ * @fb_helper: fbdev initialized with drm_fb_helper_init
+ *
+ * This functions adds all the available connectors for use with the given
+ * fb_helper. This is a separate step to allow drivers to freely assign
+ * connectors to the fbdev, e.g. if some are reserved for special purposes or
+ * not adequate to be used for the fbcon.
+ *
+ * Since this is part of the initial setup before the fbdev is published, no
+ * locking is required.
+ */
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_connector *connector;
+	int i;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct drm_fb_helper_connector *fb_helper_connector;
+
+		fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+		if (!fb_helper_connector)
+			goto fail;
+
+		fb_helper_connector->connector = connector;
+		fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+	}
+	return 0;
+fail:
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		kfree(fb_helper->connector_info[i]);
+		fb_helper->connector_info[i] = NULL;
+	}
+	fb_helper->connector_count = 0;
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+
+static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
+{
+	struct drm_fb_helper_connector *fb_helper_conn;
+	int i;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_cmdline_mode *mode;
+		struct drm_connector *connector;
+		char *option = NULL;
+
+		fb_helper_conn = fb_helper->connector_info[i];
+		connector = fb_helper_conn->connector;
+		mode = &fb_helper_conn->cmdline_mode;
+
+		/* do something on return - turn off connector maybe */
+		if (fb_get_options(drm_get_connector_name(connector), &option))
+			continue;
+
+		if (drm_mode_parse_command_line_for_connector(option,
+							      connector,
+							      mode)) {
+			if (mode->force) {
+				const char *s;
+				switch (mode->force) {
+				case DRM_FORCE_OFF:
+					s = "OFF";
+					break;
+				case DRM_FORCE_ON_DIGITAL:
+					s = "ON - dig";
+					break;
+				default:
+				case DRM_FORCE_ON:
+					s = "ON";
+					break;
+				}
+
+				DRM_INFO("forcing %s connector %s\n",
+					 drm_get_connector_name(connector), s);
+				connector->force = mode->force;
+			}
+
+			DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+				      drm_get_connector_name(connector),
+				      mode->xres, mode->yres,
+				      mode->refresh_specified ? mode->refresh : 60,
+				      mode->rb ? " reduced blanking" : "",
+				      mode->margins ? " with margins" : "",
+				      mode->interlace ?  " interlaced" : "");
+		}
+
+	}
+	return 0;
+}
+
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+	uint16_t *r_base, *g_base, *b_base;
+	int i;
+
+	r_base = crtc->gamma_store;
+	g_base = r_base + crtc->gamma_size;
+	b_base = g_base + crtc->gamma_size;
+
+	for (i = 0; i < crtc->gamma_size; i++)
+		helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+	uint16_t *r_base, *g_base, *b_base;
+
+	if (crtc->funcs->gamma_set == NULL)
+		return;
+
+	r_base = crtc->gamma_store;
+	g_base = r_base + crtc->gamma_size;
+	b_base = g_base + crtc->gamma_size;
+
+	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
+/**
+ * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+	struct drm_fb_helper *helper = info->par;
+	struct drm_crtc_helper_funcs *funcs;
+	int i;
+
+	if (list_empty(&kernel_fb_helper_list))
+		return false;
+
+	list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+		for (i = 0; i < helper->crtc_count; i++) {
+			struct drm_mode_set *mode_set =
+				&helper->crtc_info[i].mode_set;
+
+			if (!mode_set->crtc->enabled)
+				continue;
+
+			funcs =	mode_set->crtc->helper_private;
+			drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
+			funcs->mode_set_base_atomic(mode_set->crtc,
+						    mode_set->fb,
+						    mode_set->x,
+						    mode_set->y,
+						    ENTER_ATOMIC_MODE_SET);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *c;
+
+	list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+		if (crtc->base.id == c->base.id)
+			return c->fb;
+	}
+
+	return NULL;
+}
+
+/**
+ * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+	struct drm_fb_helper *helper = info->par;
+	struct drm_crtc *crtc;
+	struct drm_crtc_helper_funcs *funcs;
+	struct drm_framebuffer *fb;
+	int i;
+
+	for (i = 0; i < helper->crtc_count; i++) {
+		struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+		crtc = mode_set->crtc;
+		funcs = crtc->helper_private;
+		fb = drm_mode_config_fb(crtc);
+
+		if (!crtc->enabled)
+			continue;
+
+		if (!fb) {
+			DRM_ERROR("no fb to restore??\n");
+			continue;
+		}
+
+		drm_fb_helper_restore_lut_atomic(mode_set->crtc);
+		funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+					    crtc->y, LEAVE_ATOMIC_MODE_SET);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+
+/**
+ * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
+ * @fb_helper: fbcon to restore
+ *
+ * This should be called from driver's drm ->lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ */
+bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+{
+	bool error = false;
+	int i, ret;
+
+	drm_warn_on_modeset_not_all_locked(fb_helper->dev);
+
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+		ret = drm_mode_set_config_internal(mode_set);
+		if (ret)
+			error = true;
+	}
+	return error;
+}
+EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
+
+/*
+ * restore fbcon display for all kms driver's using this helper, used for sysrq
+ * and panic handling.
+ */
+static bool drm_fb_helper_force_kernel_mode(void)
+{
+	bool ret, error = false;
+	struct drm_fb_helper *helper;
+
+	if (list_empty(&kernel_fb_helper_list))
+		return false;
+
+	list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+		if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+			continue;
+
+		ret = drm_fb_helper_restore_fbdev_mode(helper);
+		if (ret)
+			error = true;
+	}
+	return error;
+}
+
+static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+			void *panic_str)
+{
+	/*
+	 * It's a waste of time and effort to switch back to text console
+	 * if the kernel should reboot before panic messages can be seen.
+	 */
+	if (panic_timeout < 0)
+		return 0;
+
+	pr_err("panic occurred, switching back to text console\n");
+	return drm_fb_helper_force_kernel_mode();
+}
+
+static struct notifier_block paniced = {
+	.notifier_call = drm_fb_helper_panic,
+};
+
+static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc *crtc;
+	int bound = 0, crtcs_bound = 0;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->fb)
+			crtcs_bound++;
+		if (crtc->fb == fb_helper->fb)
+			bound++;
+	}
+
+	if (bound < crtcs_bound)
+		return false;
+	return true;
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
+{
+	bool ret;
+	ret = drm_fb_helper_force_kernel_mode();
+	if (ret == true)
+		DRM_ERROR("Failed to restore crtc configuration\n");
+}
+static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
+
+static void drm_fb_helper_sysrq(int dummy1)
+{
+	schedule_work(&drm_fb_helper_restore_work);
+}
+
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+	.handler = drm_fb_helper_sysrq,
+	.help_msg = "force-fb(V)",
+	.action_msg = "Restore framebuffer console",
+};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+#endif
+
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	int i, j;
+
+	/*
+	 * fbdev->blank can be called from irq context in case of a panic.
+	 * Since we already have our own special panic handler which will
+	 * restore the fbdev console mode completely, just bail out early.
+	 */
+	if (oops_in_progress)
+		return;
+
+	/*
+	 * For each CRTC in this fb, turn the connectors on/off.
+	 */
+	drm_modeset_lock_all(dev);
+	if (!drm_fb_helper_is_bound(fb_helper)) {
+		drm_modeset_unlock_all(dev);
+		return;
+	}
+
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+		if (!crtc->enabled)
+			continue;
+
+		/* Walk the connectors & encoders on this fb turning them on/off */
+		for (j = 0; j < fb_helper->connector_count; j++) {
+			connector = fb_helper->connector_info[j]->connector;
+			connector->funcs->dpms(connector, dpms_mode);
+			drm_object_property_set_value(&connector->base,
+				dev->mode_config.dpms_property, dpms_mode);
+		}
+	}
+	drm_modeset_unlock_all(dev);
+}
+
+/**
+ * drm_fb_helper_blank - implementation for ->fb_blank
+ * @blank: desired blanking state
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_blank(int blank, struct fb_info *info)
+{
+	switch (blank) {
+	/* Display: On; HSync: On, VSync: On */
+	case FB_BLANK_UNBLANK:
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
+		break;
+	/* Display: Off; HSync: On, VSync: On */
+	case FB_BLANK_NORMAL:
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+		break;
+	/* Display: Off; HSync: Off, VSync: On */
+	case FB_BLANK_HSYNC_SUSPEND:
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+		break;
+	/* Display: Off; HSync: On, VSync: Off */
+	case FB_BLANK_VSYNC_SUSPEND:
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
+		break;
+	/* Display: Off; HSync: Off, VSync: Off */
+	case FB_BLANK_POWERDOWN:
+		drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
+		break;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_blank);
+
+static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+{
+	int i;
+
+	for (i = 0; i < helper->connector_count; i++)
+		kfree(helper->connector_info[i]);
+	kfree(helper->connector_info);
+	for (i = 0; i < helper->crtc_count; i++) {
+		kfree(helper->crtc_info[i].mode_set.connectors);
+		if (helper->crtc_info[i].mode_set.mode)
+			drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+	}
+	kfree(helper->crtc_info);
+}
+
+/**
+ * drm_fb_helper_init - initialize a drm_fb_helper structure
+ * @dev: drm device
+ * @fb_helper: driver-allocated fbdev helper structure to initialize
+ * @crtc_count: maximum number of crtcs to support in this fbdev emulation
+ * @max_conn_count: max connector count
+ *
+ * This allocates the structures for the fbdev helper with the given limits.
+ * Note that this won't yet touch the hardware (through the driver interfaces)
+ * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
+ * to allow driver writes more control over the exact init sequence.
+ *
+ * Drivers must set fb_helper->funcs before calling
+ * drm_fb_helper_initial_config().
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+int drm_fb_helper_init(struct drm_device *dev,
+		       struct drm_fb_helper *fb_helper,
+		       int crtc_count, int max_conn_count)
+{
+	struct drm_crtc *crtc;
+	int i;
+
+	fb_helper->dev = dev;
+
+	INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
+
+	fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
+	if (!fb_helper->crtc_info)
+		return -ENOMEM;
+
+	fb_helper->crtc_count = crtc_count;
+	fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
+	if (!fb_helper->connector_info) {
+		kfree(fb_helper->crtc_info);
+		return -ENOMEM;
+	}
+	fb_helper->connector_count = 0;
+
+	for (i = 0; i < crtc_count; i++) {
+		fb_helper->crtc_info[i].mode_set.connectors =
+			kcalloc(max_conn_count,
+				sizeof(struct drm_connector *),
+				GFP_KERNEL);
+
+		if (!fb_helper->crtc_info[i].mode_set.connectors)
+			goto out_free;
+		fb_helper->crtc_info[i].mode_set.num_connectors = 0;
+	}
+
+	i = 0;
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		fb_helper->crtc_info[i].mode_set.crtc = crtc;
+		i++;
+	}
+
+	return 0;
+out_free:
+	drm_fb_helper_crtc_free(fb_helper);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_fb_helper_init);
+
+void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
+{
+	if (!list_empty(&fb_helper->kernel_fb_list)) {
+		list_del(&fb_helper->kernel_fb_list);
+		if (list_empty(&kernel_fb_helper_list)) {
+			pr_info("drm: unregistered panic notifier\n");
+			atomic_notifier_chain_unregister(&panic_notifier_list,
+							 &paniced);
+			unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+		}
+	}
+
+	drm_fb_helper_crtc_free(fb_helper);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fini);
+
+static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
+		     u16 blue, u16 regno, struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	int pindex;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+		u32 *palette;
+		u32 value;
+		/* place color in psuedopalette */
+		if (regno > 16)
+			return -EINVAL;
+		palette = (u32 *)info->pseudo_palette;
+		red >>= (16 - info->var.red.length);
+		green >>= (16 - info->var.green.length);
+		blue >>= (16 - info->var.blue.length);
+		value = (red << info->var.red.offset) |
+			(green << info->var.green.offset) |
+			(blue << info->var.blue.offset);
+		if (info->var.transp.length > 0) {
+			u32 mask = (1 << info->var.transp.length) - 1;
+			mask <<= info->var.transp.offset;
+			value |= mask;
+		}
+		palette[regno] = value;
+		return 0;
+	}
+
+	pindex = regno;
+
+	if (fb->bits_per_pixel == 16) {
+		pindex = regno << 3;
+
+		if (fb->depth == 16 && regno > 63)
+			return -EINVAL;
+		if (fb->depth == 15 && regno > 31)
+			return -EINVAL;
+
+		if (fb->depth == 16) {
+			u16 r, g, b;
+			int i;
+			if (regno < 32) {
+				for (i = 0; i < 8; i++)
+					fb_helper->funcs->gamma_set(crtc, red,
+						green, blue, pindex + i);
+			}
+
+			fb_helper->funcs->gamma_get(crtc, &r,
+						    &g, &b,
+						    pindex >> 1);
+
+			for (i = 0; i < 4; i++)
+				fb_helper->funcs->gamma_set(crtc, r,
+							    green, b,
+							    (pindex >> 1) + i);
+		}
+	}
+
+	if (fb->depth != 16)
+		fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
+	return 0;
+}
+
+/**
+ * drm_fb_helper_setcmap - implementation for ->fb_setcmap
+ * @cmap: cmap to set
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_crtc_helper_funcs *crtc_funcs;
+	u16 *red, *green, *blue, *transp;
+	struct drm_crtc *crtc;
+	int i, j, rc = 0;
+	int start;
+
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+		crtc_funcs = crtc->helper_private;
+
+		red = cmap->red;
+		green = cmap->green;
+		blue = cmap->blue;
+		transp = cmap->transp;
+		start = cmap->start;
+
+		for (j = 0; j < cmap->len; j++) {
+			u16 hred, hgreen, hblue, htransp = 0xffff;
+
+			hred = *red++;
+			hgreen = *green++;
+			hblue = *blue++;
+
+			if (transp)
+				htransp = *transp++;
+
+			rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
+			if (rc)
+				return rc;
+		}
+		crtc_funcs->load_lut(crtc);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(drm_fb_helper_setcmap);
+
+/**
+ * drm_fb_helper_check_var - implementation for ->fb_check_var
+ * @var: screeninfo to check
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+			    struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_framebuffer *fb = fb_helper->fb;
+	int depth;
+
+	if (var->pixclock != 0 || in_dbg_master())
+		return -EINVAL;
+
+	/* Need to resize the fb object !!! */
+	if (var->bits_per_pixel > fb->bits_per_pixel ||
+	    var->xres > fb->width || var->yres > fb->height ||
+	    var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+			  "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
+			  var->xres, var->yres, var->bits_per_pixel,
+			  var->xres_virtual, var->yres_virtual,
+			  fb->width, fb->height, fb->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	switch (var->bits_per_pixel) {
+	case 16:
+		depth = (var->green.length == 6) ? 16 : 15;
+		break;
+	case 32:
+		depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		depth = var->bits_per_pixel;
+		break;
+	}
+
+	switch (depth) {
+	case 8:
+		var->red.offset = 0;
+		var->green.offset = 0;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 0;
+		var->transp.offset = 0;
+		break;
+	case 15:
+		var->red.offset = 10;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 5;
+		var->blue.length = 5;
+		var->transp.length = 1;
+		var->transp.offset = 15;
+		break;
+	case 16:
+		var->red.offset = 11;
+		var->green.offset = 5;
+		var->blue.offset = 0;
+		var->red.length = 5;
+		var->green.length = 6;
+		var->blue.length = 5;
+		var->transp.length = 0;
+		var->transp.offset = 0;
+		break;
+	case 24:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 0;
+		var->transp.offset = 0;
+		break;
+	case 32:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 8;
+		var->transp.offset = 24;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_check_var);
+
+/**
+ * drm_fb_helper_set_par - implementation for ->fb_set_par
+ * @info: fbdev registered by the helper
+ *
+ * This will let fbcon do the mode init and is called at initialization time by
+ * the fbdev core when registering the driver, and later on through the hotplug
+ * callback.
+ */
+int drm_fb_helper_set_par(struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_device *dev = fb_helper->dev;
+	struct fb_var_screeninfo *var = &info->var;
+	int ret;
+	int i;
+
+	if (var->pixclock != 0) {
+		DRM_ERROR("PIXEL CLOCK SET\n");
+		return -EINVAL;
+	}
+
+	drm_modeset_lock_all(dev);
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
+		if (ret) {
+			drm_modeset_unlock_all(dev);
+			return ret;
+		}
+	}
+	drm_modeset_unlock_all(dev);
+
+	if (fb_helper->delayed_hotplug) {
+		fb_helper->delayed_hotplug = false;
+		drm_fb_helper_hotplug_event(fb_helper);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_set_par);
+
+/**
+ * drm_fb_helper_pan_display - implementation for ->fb_pan_display
+ * @var: updated screen information
+ * @info: fbdev registered by the helper
+ */
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	struct drm_fb_helper *fb_helper = info->par;
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_mode_set *modeset;
+	struct drm_crtc *crtc;
+	int ret = 0;
+	int i;
+
+	drm_modeset_lock_all(dev);
+	if (!drm_fb_helper_is_bound(fb_helper)) {
+		drm_modeset_unlock_all(dev);
+		return -EBUSY;
+	}
+
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+		modeset = &fb_helper->crtc_info[i].mode_set;
+
+		modeset->x = var->xoffset;
+		modeset->y = var->yoffset;
+
+		if (modeset->num_connectors) {
+			ret = drm_mode_set_config_internal(modeset);
+			if (!ret) {
+				info->var.xoffset = var->xoffset;
+				info->var.yoffset = var->yoffset;
+			}
+		}
+	}
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_pan_display);
+
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback and then registers the fbdev and sets up the panic
+ * notifier.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+					 int preferred_bpp)
+{
+	int ret = 0;
+	int crtc_count = 0;
+	int i;
+	struct fb_info *info;
+	struct drm_fb_helper_surface_size sizes;
+	int gamma_size = 0;
+
+	memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
+	sizes.surface_depth = 24;
+	sizes.surface_bpp = 32;
+	sizes.fb_width = (unsigned)-1;
+	sizes.fb_height = (unsigned)-1;
+
+	/* if driver picks 8 or 16 by default use that
+	   for both depth/bpp */
+	if (preferred_bpp != sizes.surface_bpp)
+		sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+
+	/* first up get a count of crtcs now in use and new min/maxes width/heights */
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
+		struct drm_cmdline_mode *cmdline_mode;
+
+		cmdline_mode = &fb_helper_conn->cmdline_mode;
+
+		if (cmdline_mode->bpp_specified) {
+			switch (cmdline_mode->bpp) {
+			case 8:
+				sizes.surface_depth = sizes.surface_bpp = 8;
+				break;
+			case 15:
+				sizes.surface_depth = 15;
+				sizes.surface_bpp = 16;
+				break;
+			case 16:
+				sizes.surface_depth = sizes.surface_bpp = 16;
+				break;
+			case 24:
+				sizes.surface_depth = sizes.surface_bpp = 24;
+				break;
+			case 32:
+				sizes.surface_depth = 24;
+				sizes.surface_bpp = 32;
+				break;
+			}
+			break;
+		}
+	}
+
+	crtc_count = 0;
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		struct drm_display_mode *desired_mode;
+		desired_mode = fb_helper->crtc_info[i].desired_mode;
+
+		if (desired_mode) {
+			if (gamma_size == 0)
+				gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+			if (desired_mode->hdisplay < sizes.fb_width)
+				sizes.fb_width = desired_mode->hdisplay;
+			if (desired_mode->vdisplay < sizes.fb_height)
+				sizes.fb_height = desired_mode->vdisplay;
+			if (desired_mode->hdisplay > sizes.surface_width)
+				sizes.surface_width = desired_mode->hdisplay;
+			if (desired_mode->vdisplay > sizes.surface_height)
+				sizes.surface_height = desired_mode->vdisplay;
+			crtc_count++;
+		}
+	}
+
+	if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+		/* hmm everyone went away - assume VGA cable just fell out
+		   and will come back later. */
+		DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
+		sizes.fb_width = sizes.surface_width = 1024;
+		sizes.fb_height = sizes.surface_height = 768;
+	}
+
+	/* push down into drivers */
+	ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
+	if (ret < 0)
+		return ret;
+
+	info = fb_helper->fbdev;
+
+	/*
+	 * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
+	 * events, but at init time drm_setup_crtcs needs to be called before
+	 * the fb is allocated (since we need to figure out the desired size of
+	 * the fb before we can allocate it ...). Hence we need to fix things up
+	 * here again.
+	 */
+	for (i = 0; i < fb_helper->crtc_count; i++)
+		if (fb_helper->crtc_info[i].mode_set.num_connectors)
+			fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+
+	info->var.pixclock = 0;
+	if (register_framebuffer(info) < 0)
+		return -EINVAL;
+
+	dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+			info->node, info->fix.id);
+
+	/* Switch back to kernel console on panic */
+	/* multi card linked list maybe */
+	if (list_empty(&kernel_fb_helper_list)) {
+		dev_info(fb_helper->dev->dev, "registered panic notifier\n");
+		atomic_notifier_chain_register(&panic_notifier_list,
+					       &paniced);
+		register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+	}
+
+	list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+
+	return 0;
+}
+
+/**
+ * drm_fb_helper_fill_fix - initializes fixed fbdev information
+ * @info: fbdev registered by the helper
+ * @pitch: desired pitch
+ * @depth: desired depth
+ *
+ * Helper to fill in the fixed fbdev information useful for a non-accelerated
+ * fbdev emulations. Drivers which support acceleration methods which impose
+ * additional constraints need to set up their own limits.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback.
+ */
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+			    uint32_t depth)
+{
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+		FB_VISUAL_TRUECOLOR;
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.type_aux = 0;
+
+	info->fix.line_length = pitch;
+	return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
+/**
+ * drm_fb_helper_fill_var - initalizes variable fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @fb_width: desired fb width
+ * @fb_height: desired fb height
+ *
+ * Sets up the variable fbdev metainformation from the given fb helper instance
+ * and the drm framebuffer allocated in fb_helper->fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * ->fb_probe callback after having allocated the fbdev backing
+ * storage framebuffer.
+ */
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+			    uint32_t fb_width, uint32_t fb_height)
+{
+	struct drm_framebuffer *fb = fb_helper->fb;
+	info->pseudo_palette = fb_helper->pseudo_palette;
+	info->var.xres_virtual = fb->width;
+	info->var.yres_virtual = fb->height;
+	info->var.bits_per_pixel = fb->bits_per_pixel;
+	info->var.accel_flags = FB_ACCELF_TEXT;
+	info->var.xoffset = 0;
+	info->var.yoffset = 0;
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->var.height = -1;
+	info->var.width = -1;
+
+	switch (fb->depth) {
+	case 8:
+		info->var.red.offset = 0;
+		info->var.green.offset = 0;
+		info->var.blue.offset = 0;
+		info->var.red.length = 8; /* 8bit DAC */
+		info->var.green.length = 8;
+		info->var.blue.length = 8;
+		info->var.transp.offset = 0;
+		info->var.transp.length = 0;
+		break;
+	case 15:
+		info->var.red.offset = 10;
+		info->var.green.offset = 5;
+		info->var.blue.offset = 0;
+		info->var.red.length = 5;
+		info->var.green.length = 5;
+		info->var.blue.length = 5;
+		info->var.transp.offset = 15;
+		info->var.transp.length = 1;
+		break;
+	case 16:
+		info->var.red.offset = 11;
+		info->var.green.offset = 5;
+		info->var.blue.offset = 0;
+		info->var.red.length = 5;
+		info->var.green.length = 6;
+		info->var.blue.length = 5;
+		info->var.transp.offset = 0;
+		break;
+	case 24:
+		info->var.red.offset = 16;
+		info->var.green.offset = 8;
+		info->var.blue.offset = 0;
+		info->var.red.length = 8;
+		info->var.green.length = 8;
+		info->var.blue.length = 8;
+		info->var.transp.offset = 0;
+		info->var.transp.length = 0;
+		break;
+	case 32:
+		info->var.red.offset = 16;
+		info->var.green.offset = 8;
+		info->var.blue.offset = 0;
+		info->var.red.length = 8;
+		info->var.green.length = 8;
+		info->var.blue.length = 8;
+		info->var.transp.offset = 24;
+		info->var.transp.length = 8;
+		break;
+	default:
+		break;
+	}
+
+	info->var.xres = fb_width;
+	info->var.yres = fb_height;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
+					       uint32_t maxX,
+					       uint32_t maxY)
+{
+	struct drm_connector *connector;
+	int count = 0;
+	int i;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		count += connector->funcs->fill_modes(connector, maxX, maxY);
+	}
+
+	return count;
+}
+
+static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, &fb_connector->connector->modes, head) {
+		if (drm_mode_width(mode) > width ||
+		    drm_mode_height(mode) > height)
+			continue;
+		if (mode->type & DRM_MODE_TYPE_PREFERRED)
+			return mode;
+	}
+	return NULL;
+}
+
+static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
+{
+	struct drm_cmdline_mode *cmdline_mode;
+	cmdline_mode = &fb_connector->cmdline_mode;
+	return cmdline_mode->specified;
+}
+
+static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+						      int width, int height)
+{
+	struct drm_cmdline_mode *cmdline_mode;
+	struct drm_display_mode *mode = NULL;
+
+	cmdline_mode = &fb_helper_conn->cmdline_mode;
+	if (cmdline_mode->specified == false)
+		return mode;
+
+	/* attempt to find a matching mode in the list of modes
+	 *  we have gotten so far, if not add a CVT mode that conforms
+	 */
+	if (cmdline_mode->rb || cmdline_mode->margins)
+		goto create_mode;
+
+	list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+		/* check width/height */
+		if (mode->hdisplay != cmdline_mode->xres ||
+		    mode->vdisplay != cmdline_mode->yres)
+			continue;
+
+		if (cmdline_mode->refresh_specified) {
+			if (mode->vrefresh != cmdline_mode->refresh)
+				continue;
+		}
+
+		if (cmdline_mode->interlace) {
+			if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+				continue;
+		}
+		return mode;
+	}
+
+create_mode:
+	mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
+						 cmdline_mode);
+	list_add(&mode->head, &fb_helper_conn->connector->modes);
+	return mode;
+}
+
+static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
+{
+	bool enable;
+
+	if (strict)
+		enable = connector->status == connector_status_connected;
+	else
+		enable = connector->status != connector_status_disconnected;
+
+	return enable;
+}
+
+static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
+				  bool *enabled)
+{
+	bool any_enabled = false;
+	struct drm_connector *connector;
+	int i = 0;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		enabled[i] = drm_connector_enabled(connector, true);
+		DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
+			  enabled[i] ? "yes" : "no");
+		any_enabled |= enabled[i];
+	}
+
+	if (any_enabled)
+		return;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		connector = fb_helper->connector_info[i]->connector;
+		enabled[i] = drm_connector_enabled(connector, false);
+	}
+}
+
+static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
+			      struct drm_display_mode **modes,
+			      bool *enabled, int width, int height)
+{
+	int count, i, j;
+	bool can_clone = false;
+	struct drm_fb_helper_connector *fb_helper_conn;
+	struct drm_display_mode *dmt_mode, *mode;
+
+	/* only contemplate cloning in the single crtc case */
+	if (fb_helper->crtc_count > 1)
+		return false;
+
+	count = 0;
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (enabled[i])
+			count++;
+	}
+
+	/* only contemplate cloning if more than one connector is enabled */
+	if (count <= 1)
+		return false;
+
+	/* check the command line or if nothing common pick 1024x768 */
+	can_clone = true;
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (!enabled[i])
+			continue;
+		fb_helper_conn = fb_helper->connector_info[i];
+		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+		if (!modes[i]) {
+			can_clone = false;
+			break;
+		}
+		for (j = 0; j < i; j++) {
+			if (!enabled[j])
+				continue;
+			if (!drm_mode_equal(modes[j], modes[i]))
+				can_clone = false;
+		}
+	}
+
+	if (can_clone) {
+		DRM_DEBUG_KMS("can clone using command line\n");
+		return true;
+	}
+
+	/* try and find a 1024x768 mode on each connector */
+	can_clone = true;
+	dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+
+		if (!enabled[i])
+			continue;
+
+		fb_helper_conn = fb_helper->connector_info[i];
+		list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
+			if (drm_mode_equal(mode, dmt_mode))
+				modes[i] = mode;
+		}
+		if (!modes[i])
+			can_clone = false;
+	}
+
+	if (can_clone) {
+		DRM_DEBUG_KMS("can clone using 1024x768\n");
+		return true;
+	}
+	DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
+	return false;
+}
+
+static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
+				 struct drm_display_mode **modes,
+				 bool *enabled, int width, int height)
+{
+	struct drm_fb_helper_connector *fb_helper_conn;
+	int i;
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		fb_helper_conn = fb_helper->connector_info[i];
+
+		if (enabled[i] == false)
+			continue;
+
+		DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
+			      fb_helper_conn->connector->base.id);
+
+		/* got for command line mode first */
+		modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
+		if (!modes[i]) {
+			DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
+				      fb_helper_conn->connector->base.id);
+			modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
+		}
+		/* No preferred modes, pick one off the list */
+		if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
+			list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
+				break;
+		}
+		DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
+			  "none");
+	}
+	return true;
+}
+
+static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+			  struct drm_fb_helper_crtc **best_crtcs,
+			  struct drm_display_mode **modes,
+			  int n, int width, int height)
+{
+	int c, o;
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_connector *connector;
+	struct drm_connector_helper_funcs *connector_funcs;
+	struct drm_encoder *encoder;
+	struct drm_fb_helper_crtc *best_crtc;
+	int my_score, best_score, score;
+	struct drm_fb_helper_crtc **crtcs, *crtc;
+	struct drm_fb_helper_connector *fb_helper_conn;
+
+	if (n == fb_helper->connector_count)
+		return 0;
+
+	fb_helper_conn = fb_helper->connector_info[n];
+	connector = fb_helper_conn->connector;
+
+	best_crtcs[n] = NULL;
+	best_crtc = NULL;
+	best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
+	if (modes[n] == NULL)
+		return best_score;
+
+	crtcs = kzalloc(dev->mode_config.num_connector *
+			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+	if (!crtcs)
+		return best_score;
+
+	my_score = 1;
+	if (connector->status == connector_status_connected)
+		my_score++;
+	if (drm_has_cmdline_mode(fb_helper_conn))
+		my_score++;
+	if (drm_has_preferred_mode(fb_helper_conn, width, height))
+		my_score++;
+
+	connector_funcs = connector->helper_private;
+	encoder = connector_funcs->best_encoder(connector);
+	if (!encoder)
+		goto out;
+
+	/* select a crtc for this connector and then attempt to configure
+	   remaining connectors */
+	for (c = 0; c < fb_helper->crtc_count; c++) {
+		crtc = &fb_helper->crtc_info[c];
+
+		if ((encoder->possible_crtcs & (1 << c)) == 0)
+			continue;
+
+		for (o = 0; o < n; o++)
+			if (best_crtcs[o] == crtc)
+				break;
+
+		if (o < n) {
+			/* ignore cloning unless only a single crtc */
+			if (fb_helper->crtc_count > 1)
+				continue;
+
+			if (!drm_mode_equal(modes[o], modes[n]))
+				continue;
+		}
+
+		crtcs[n] = crtc;
+		memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
+		score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
+						  width, height);
+		if (score > best_score) {
+			best_crtc = crtc;
+			best_score = score;
+			memcpy(best_crtcs, crtcs,
+			       dev->mode_config.num_connector *
+			       sizeof(struct drm_fb_helper_crtc *));
+		}
+	}
+out:
+	kfree(crtcs);
+	return best_score;
+}
+
+static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	struct drm_fb_helper_crtc **crtcs;
+	struct drm_display_mode **modes;
+	struct drm_mode_set *modeset;
+	bool *enabled;
+	int width, height;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	width = dev->mode_config.max_width;
+	height = dev->mode_config.max_height;
+
+	crtcs = kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+	modes = kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_display_mode *), GFP_KERNEL);
+	enabled = kcalloc(dev->mode_config.num_connector,
+			  sizeof(bool), GFP_KERNEL);
+	if (!crtcs || !modes || !enabled) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto out;
+	}
+
+
+	drm_enable_connectors(fb_helper, enabled);
+
+	if (!(fb_helper->funcs->initial_config &&
+	      fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+					       enabled, width, height))) {
+		memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
+		memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+
+		if (!drm_target_cloned(fb_helper,
+				       modes, enabled, width, height) &&
+		    !drm_target_preferred(fb_helper,
+					  modes, enabled, width, height))
+			DRM_ERROR("Unable to find initial modes\n");
+
+		DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
+			      width, height);
+
+		drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
+	}
+
+	/* need to set the modesets up here for use later */
+	/* fill out the connector<->crtc mappings into the modesets */
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		modeset = &fb_helper->crtc_info[i].mode_set;
+		modeset->num_connectors = 0;
+		modeset->fb = NULL;
+	}
+
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		struct drm_display_mode *mode = modes[i];
+		struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+		modeset = &fb_crtc->mode_set;
+
+		if (mode && fb_crtc) {
+			DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
+				      mode->name, fb_crtc->mode_set.crtc->base.id);
+			fb_crtc->desired_mode = mode;
+			if (modeset->mode)
+				drm_mode_destroy(dev, modeset->mode);
+			modeset->mode = drm_mode_duplicate(dev,
+							   fb_crtc->desired_mode);
+			modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+			modeset->fb = fb_helper->fb;
+		}
+	}
+
+	/* Clear out any old modes if there are no more connected outputs. */
+	for (i = 0; i < fb_helper->crtc_count; i++) {
+		modeset = &fb_helper->crtc_info[i].mode_set;
+		if (modeset->num_connectors == 0) {
+			BUG_ON(modeset->fb);
+			BUG_ON(modeset->num_connectors);
+			if (modeset->mode)
+				drm_mode_destroy(dev, modeset->mode);
+			modeset->mode = NULL;
+		}
+	}
+out:
+	kfree(crtcs);
+	kfree(modes);
+	kfree(enabled);
+}
+
+/**
+ * drm_fb_helper_initial_config - setup a sane initial connector configuration
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
+ *
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
+ * At the moment, this is a cloned configuration across all heads with
+ * a new framebuffer object as the backing store.
+ *
+ * Note that this also registers the fbdev and so allows userspace to call into
+ * the driver through the fbdev interfaces.
+ *
+ * This function will call down into the ->fb_probe callback to let
+ * the driver allocate and initialize the fbdev info structure and the drm
+ * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
+ * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
+ * values for the fbdev info structure.
+ *
+ * RETURNS:
+ * Zero if everything went ok, nonzero otherwise.
+ */
+bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+{
+	struct drm_device *dev = fb_helper->dev;
+	int count = 0;
+
+	drm_fb_helper_parse_command_line(fb_helper);
+
+	count = drm_fb_helper_probe_connector_modes(fb_helper,
+						    dev->mode_config.max_width,
+						    dev->mode_config.max_height);
+	/*
+	 * we shouldn't end up with no modes here.
+	 */
+	if (count == 0)
+		dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
+	drm_setup_crtcs(fb_helper);
+
+	return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+}
+EXPORT_SYMBOL(drm_fb_helper_initial_config);
+
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ *                               probing all the outputs attached to the fb
+ * @fb_helper: the drm_fb_helper
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * Called at runtime, takes the mode config locks to be able to check/change the
+ * modeset configuration. Must be run from process context (which usually means
+ * either the output polling work or a work item launched from the driver's
+ * hotplug interrupt).
+ *
+ * Note that the driver must ensure that this is only called _after_ the fb has
+ * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+{
+	struct drm_device *dev = fb_helper->dev;
+	int count = 0;
+	u32 max_width, max_height, bpp_sel;
+
+	if (!fb_helper->fb)
+		return 0;
+
+	mutex_lock(&fb_helper->dev->mode_config.mutex);
+	if (!drm_fb_helper_is_bound(fb_helper)) {
+		fb_helper->delayed_hotplug = true;
+		mutex_unlock(&fb_helper->dev->mode_config.mutex);
+		return 0;
+	}
+	DRM_DEBUG_KMS("\n");
+
+	max_width = fb_helper->fb->width;
+	max_height = fb_helper->fb->height;
+	bpp_sel = fb_helper->fb->bits_per_pixel;
+
+	count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
+						    max_height);
+	mutex_unlock(&fb_helper->dev->mode_config.mutex);
+
+	drm_modeset_lock_all(dev);
+	drm_setup_crtcs(fb_helper);
+	drm_modeset_unlock_all(dev);
+	drm_fb_helper_set_par(fb_helper->fbdev);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
+ * but the module doesn't depend on any fb console symbols.  At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
+static int __init drm_fb_helper_modinit(void)
+{
+	const char *name = "fbcon";
+	struct module *fbcon;
+
+	mutex_lock(&module_mutex);
+	fbcon = find_module(name);
+	mutex_unlock(&module_mutex);
+
+	if (!fbcon)
+		request_module_nowait(name);
+	return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/drm_fops.c b/linux-imx/drivers/gpu/drm/drm_fops.c
new file mode 100644
index 0000000..429e07d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_fops.c
@@ -0,0 +1,640 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/* from BKL pushdown: note that nothing else serializes idr_find() */
+DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+			   struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+	int i;
+	int ret;
+
+	if (dev->driver->firstopen) {
+		ret = dev->driver->firstopen(dev);
+		if (ret != 0)
+			return ret;
+	}
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
+		dev->buf_use = 0;
+		atomic_set(&dev->buf_alloc, 0);
+
+		i = drm_dma_setup(dev);
+		if (i < 0)
+			return i;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+		atomic_set(&dev->counts[i], 0);
+
+	dev->sigdata.lock = NULL;
+
+	dev->context_flag = 0;
+	dev->interrupt_flag = 0;
+	dev->dma_flag = 0;
+	dev->last_context = 0;
+	dev->last_switch = 0;
+	dev->last_checked = 0;
+	init_waitqueue_head(&dev->context_wait);
+	dev->if_version = 0;
+
+	dev->ctx_start = 0;
+	dev->lck_start = 0;
+
+	dev->buf_async = NULL;
+	init_waitqueue_head(&dev->buf_readers);
+	init_waitqueue_head(&dev->buf_writers);
+
+	DRM_DEBUG("\n");
+
+	/*
+	 * The kernel's context could be created here, but is now created
+	 * in drm_dma_enqueue.  This is more resource-efficient for
+	 * hardware that does not do DMA, but may mean that
+	 * drm_select_queue fails between the time the interrupt is
+	 * initialized and the time the queues are initialized.
+	 */
+
+	return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+	struct drm_device *dev = NULL;
+	int minor_id = iminor(inode);
+	struct drm_minor *minor;
+	int retcode = 0;
+	int need_setup = 0;
+	struct address_space *old_mapping;
+	struct address_space *old_imapping;
+
+	minor = idr_find(&drm_minors_idr, minor_id);
+	if (!minor)
+		return -ENODEV;
+
+	if (!(dev = minor->dev))
+		return -ENODEV;
+
+	if (drm_device_is_unplugged(dev))
+		return -ENODEV;
+
+	if (!dev->open_count++)
+		need_setup = 1;
+	mutex_lock(&dev->struct_mutex);
+	old_imapping = inode->i_mapping;
+	old_mapping = dev->dev_mapping;
+	if (old_mapping == NULL)
+		dev->dev_mapping = &inode->i_data;
+	/* ihold ensures nobody can remove inode with our i_data */
+	ihold(container_of(dev->dev_mapping, struct inode, i_data));
+	inode->i_mapping = dev->dev_mapping;
+	filp->f_mapping = dev->dev_mapping;
+	mutex_unlock(&dev->struct_mutex);
+
+	retcode = drm_open_helper(inode, filp, dev);
+	if (retcode)
+		goto err_undo;
+	atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+	if (need_setup) {
+		retcode = drm_setup(dev);
+		if (retcode)
+			goto err_undo;
+	}
+	return 0;
+
+err_undo:
+	mutex_lock(&dev->struct_mutex);
+	filp->f_mapping = old_imapping;
+	inode->i_mapping = old_imapping;
+	iput(container_of(dev->dev_mapping, struct inode, i_data));
+	dev->dev_mapping = old_mapping;
+	mutex_unlock(&dev->struct_mutex);
+	dev->open_count--;
+	return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+	struct drm_device *dev = NULL;
+	struct drm_minor *minor;
+	int minor_id = iminor(inode);
+	int err = -ENODEV;
+	const struct file_operations *old_fops;
+
+	DRM_DEBUG("\n");
+
+	mutex_lock(&drm_global_mutex);
+	minor = idr_find(&drm_minors_idr, minor_id);
+	if (!minor)
+		goto out;
+
+	if (!(dev = minor->dev))
+		goto out;
+
+	if (drm_device_is_unplugged(dev))
+		goto out;
+
+	old_fops = filp->f_op;
+	filp->f_op = fops_get(dev->driver->fops);
+	if (filp->f_op == NULL) {
+		filp->f_op = old_fops;
+		goto out;
+	}
+	if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+		fops_put(filp->f_op);
+		filp->f_op = fops_get(old_fops);
+	}
+	fops_put(old_fops);
+
+out:
+	mutex_unlock(&drm_global_mutex);
+	return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+	if (boot_cpu_data.x86 == 3)
+		return 0;	/* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+	return 0;		/* No cmpxchg before v9 sparc. */
+#endif
+	return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+			   struct drm_device * dev)
+{
+	int minor_id = iminor(inode);
+	struct drm_file *priv;
+	int ret;
+
+	if (filp->f_flags & O_EXCL)
+		return -EBUSY;	/* No exclusive opens */
+	if (!drm_cpu_valid())
+		return -EINVAL;
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return -EINVAL;
+
+	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	filp->private_data = priv;
+	priv->filp = filp;
+	priv->uid = current_euid();
+	priv->pid = get_pid(task_pid(current));
+	priv->minor = idr_find(&drm_minors_idr, minor_id);
+	priv->ioctl_count = 0;
+	/* for compatibility root is always authenticated */
+	priv->authenticated = capable(CAP_SYS_ADMIN);
+	priv->lock_count = 0;
+
+	INIT_LIST_HEAD(&priv->lhead);
+	INIT_LIST_HEAD(&priv->fbs);
+	mutex_init(&priv->fbs_lock);
+	INIT_LIST_HEAD(&priv->event_list);
+	init_waitqueue_head(&priv->event_wait);
+	priv->event_space = 4096; /* set aside 4k for event buffer */
+
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_open(dev, priv);
+
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_init_file_private(&priv->prime);
+
+	if (dev->driver->open) {
+		ret = dev->driver->open(dev, priv);
+		if (ret < 0)
+			goto out_free;
+	}
+
+
+	/* if there is no current master make this fd it */
+	mutex_lock(&dev->struct_mutex);
+	if (!priv->minor->master) {
+		/* create a new master */
+		priv->minor->master = drm_master_create(priv->minor);
+		if (!priv->minor->master) {
+			mutex_unlock(&dev->struct_mutex);
+			ret = -ENOMEM;
+			goto out_free;
+		}
+
+		priv->is_master = 1;
+		/* take another reference for the copy in the local file priv */
+		priv->master = drm_master_get(priv->minor->master);
+
+		priv->authenticated = 1;
+
+		mutex_unlock(&dev->struct_mutex);
+		if (dev->driver->master_create) {
+			ret = dev->driver->master_create(dev, priv->master);
+			if (ret) {
+				mutex_lock(&dev->struct_mutex);
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				mutex_unlock(&dev->struct_mutex);
+				goto out_free;
+			}
+		}
+		mutex_lock(&dev->struct_mutex);
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, priv, true);
+			if (ret) {
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				mutex_unlock(&dev->struct_mutex);
+				goto out_free;
+			}
+		}
+		mutex_unlock(&dev->struct_mutex);
+	} else {
+		/* get a reference to the master */
+		priv->master = drm_master_get(priv->minor->master);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	list_add(&priv->lhead, &dev->filelist);
+	mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+	/*
+	 * Default the hose
+	 */
+	if (!dev->hose) {
+		struct pci_dev *pci_dev;
+		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+		if (pci_dev) {
+			dev->hose = pci_dev->sysdata;
+			pci_dev_put(pci_dev);
+		}
+		if (!dev->hose) {
+			struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+			if (b)
+				dev->hose = b->sysdata;
+		}
+	}
+#endif
+
+	return 0;
+      out_free:
+	kfree(priv);
+	filp->private_data = NULL;
+	return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+
+	DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+		  (long)old_encode_dev(priv->minor->device));
+	return fasync_helper(fd, filp, on, &dev->buf_async);
+}
+EXPORT_SYMBOL(drm_fasync);
+
+static void drm_master_release(struct drm_device *dev, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+
+	if (drm_i_have_hw_lock(dev, file_priv)) {
+		DRM_DEBUG("File %p released, freeing lock for context %d\n",
+			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+		drm_lock_free(&file_priv->master->lock,
+			      _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+	}
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e, *et;
+	struct drm_pending_vblank_event *v, *vt;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	/* Remove pending flips */
+	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+		if (v->base.file_priv == file_priv) {
+			list_del(&v->base.link);
+			drm_vblank_put(dev, v->pipe);
+			v->base.destroy(&v->base);
+		}
+
+	/* Remove unconsumed events */
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+		e->destroy(e);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	int retcode = 0;
+
+	mutex_lock(&drm_global_mutex);
+
+	DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+	if (dev->driver->preclose)
+		dev->driver->preclose(dev, file_priv);
+
+	/* ========================================================
+	 * Begin inline drm_release
+	 */
+
+	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+		  task_pid_nr(current),
+		  (long)old_encode_dev(file_priv->minor->device),
+		  dev->open_count);
+
+	/* Release any auth tokens that might point to this file_priv,
+	   (do that under the drm_global_mutex) */
+	if (file_priv->magic)
+		(void) drm_remove_magic(file_priv->master, file_priv->magic);
+
+	/* if the master has gone away we can't do anything with the lock */
+	if (file_priv->minor->master)
+		drm_master_release(dev, filp);
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		drm_core_reclaim_buffers(dev, file_priv);
+
+	drm_events_release(file_priv);
+
+	if (dev->driver->driver_features & DRIVER_MODESET)
+		drm_fb_release(file_priv);
+
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, file_priv);
+
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->tag == file_priv &&
+			    pos->handle != DRM_KERNEL_CONTEXT) {
+				if (dev->driver->context_dtor)
+					dev->driver->context_dtor(dev,
+								  pos->handle);
+
+				drm_ctxbitmap_free(dev, pos->handle);
+
+				list_del(&pos->head);
+				kfree(pos);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (file_priv->is_master) {
+		struct drm_master *master = file_priv->master;
+		struct drm_file *temp;
+		list_for_each_entry(temp, &dev->filelist, lhead) {
+			if ((temp->master == file_priv->master) &&
+			    (temp != file_priv))
+				temp->authenticated = 0;
+		}
+
+		/**
+		 * Since the master is disappearing, so is the
+		 * possibility to lock.
+		 */
+
+		if (master->lock.hw_lock) {
+			if (dev->sigdata.lock == master->lock.hw_lock)
+				dev->sigdata.lock = NULL;
+			master->lock.hw_lock = NULL;
+			master->lock.file_priv = NULL;
+			wake_up_interruptible_all(&master->lock.lock_queue);
+		}
+
+		if (file_priv->minor->master == file_priv->master) {
+			/* drop the reference held my the minor */
+			if (dev->driver->master_drop)
+				dev->driver->master_drop(dev, file_priv, true);
+			drm_master_put(&file_priv->minor->master);
+		}
+	}
+
+	BUG_ON(dev->dev_mapping == NULL);
+	iput(container_of(dev->dev_mapping, struct inode, i_data));
+
+	/* drop the reference held my the file priv */
+	drm_master_put(&file_priv->master);
+	file_priv->is_master = 0;
+	list_del(&file_priv->lhead);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (dev->driver->postclose)
+		dev->driver->postclose(dev, file_priv);
+
+	if (drm_core_check_feature(dev, DRIVER_PRIME))
+		drm_prime_destroy_file_private(&file_priv->prime);
+
+	put_pid(file_priv->pid);
+	kfree(file_priv);
+
+	/* ========================================================
+	 * End inline drm_release
+	 */
+
+	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+	if (!--dev->open_count) {
+		if (atomic_read(&dev->ioctl_count)) {
+			DRM_ERROR("Device busy: %d\n",
+				  atomic_read(&dev->ioctl_count));
+			retcode = -EBUSY;
+		} else
+			retcode = drm_lastclose(dev);
+		if (drm_device_is_unplugged(dev))
+			drm_put_dev(dev);
+	}
+	mutex_unlock(&drm_global_mutex);
+
+	return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+		  size_t total, size_t max, struct drm_pending_event **out)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e;
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	*out = NULL;
+	if (list_empty(&file_priv->event_list))
+		goto out;
+	e = list_first_entry(&file_priv->event_list,
+			     struct drm_pending_event, link);
+	if (e->event->length + total > max)
+		goto out;
+
+	file_priv->event_space += e->event->length;
+	list_del(&e->link);
+	*out = e;
+	ret = true;
+
+out:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_pending_event *e;
+	size_t total;
+	ssize_t ret;
+
+	ret = wait_event_interruptible(file_priv->event_wait,
+				       !list_empty(&file_priv->event_list));
+	if (ret < 0)
+		return ret;
+
+	total = 0;
+	while (drm_dequeue_event(file_priv, total, count, &e)) {
+		if (copy_to_user(buffer + total,
+				 e->event, e->event->length)) {
+			total = -EFAULT;
+			break;
+		}
+
+		total += e->event->length;
+		e->destroy(e);
+	}
+
+	return total;
+}
+EXPORT_SYMBOL(drm_read);
+
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct drm_file *file_priv = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &file_priv->event_wait, wait);
+
+	if (!list_empty(&file_priv->event_list))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/linux-imx/drivers/gpu/drm/drm_gem.c b/linux-imx/drivers/gpu/drm/drm_gem.c
new file mode 100644
index 0000000..239ef30
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_gem.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+	struct drm_gem_mm *mm;
+
+	spin_lock_init(&dev->object_name_lock);
+	idr_init(&dev->object_name_idr);
+
+	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
+	if (!mm) {
+		DRM_ERROR("out of memory\n");
+		return -ENOMEM;
+	}
+
+	dev->mm_private = mm;
+
+	if (drm_ht_create(&mm->offset_hash, 12)) {
+		kfree(mm);
+		return -ENOMEM;
+	}
+
+	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+			DRM_FILE_PAGE_OFFSET_SIZE)) {
+		drm_ht_remove(&mm->offset_hash);
+		kfree(mm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+	struct drm_gem_mm *mm = dev->mm_private;
+
+	drm_mm_takedown(&mm->offset_manager);
+	drm_ht_remove(&mm->offset_hash);
+	kfree(mm);
+	dev->mm_private = NULL;
+}
+
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size)
+{
+	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+	obj->dev = dev;
+	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+	if (IS_ERR(obj->filp))
+		return PTR_ERR(obj->filp);
+
+	kref_init(&obj->refcount);
+	atomic_set(&obj->handle_count, 0);
+	obj->size = size;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+			struct drm_gem_object *obj, size_t size)
+{
+	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+	obj->dev = dev;
+	obj->filp = NULL;
+
+	kref_init(&obj->refcount);
+	atomic_set(&obj->handle_count, 0);
+	obj->size = size;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_private_object_init);
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_object *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj)
+		goto free;
+
+	if (drm_gem_object_init(dev, obj, size) != 0)
+		goto free;
+
+	if (dev->driver->gem_init_object != NULL &&
+	    dev->driver->gem_init_object(obj) != 0) {
+		goto fput;
+	}
+	return obj;
+fput:
+	/* Object_init mangles the global counters - readjust them. */
+	fput(obj->filp);
+free:
+	kfree(obj);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+	if (obj->import_attach) {
+		drm_prime_remove_buf_handle(&filp->prime,
+				obj->import_attach->dmabuf);
+	}
+	if (obj->export_dma_buf) {
+		drm_prime_remove_buf_handle(&filp->prime,
+				obj->export_dma_buf);
+	}
+}
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+	struct drm_device *dev;
+	struct drm_gem_object *obj;
+
+	/* This is gross. The idr system doesn't let us try a delete and
+	 * return an error code.  It just spews if you fail at deleting.
+	 * So, we have to grab a lock around finding the object and then
+	 * doing the delete on it and dropping the refcount, or the user
+	 * could race us to double-decrement the refcount and cause a
+	 * use-after-free later.  Given the frequency of our handle lookups,
+	 * we may want to use ida for number allocation and a hash table
+	 * for the pointers, anyway.
+	 */
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return -EINVAL;
+	}
+	dev = obj->dev;
+
+	/* Release reference and decrement refcount. */
+	idr_remove(&filp->object_idr, handle);
+	spin_unlock(&filp->table_lock);
+
+	drm_gem_remove_prime_handles(obj, filp);
+
+	if (dev->driver->gem_close_object)
+		dev->driver->gem_close_object(obj, filp);
+	drm_gem_object_handle_unreference_unlocked(obj);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_delete);
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+		       struct drm_gem_object *obj,
+		       u32 *handlep)
+{
+	struct drm_device *dev = obj->dev;
+	int ret;
+
+	/*
+	 * Get the user-visible handle using idr.  Preload and perform
+	 * allocation under our spinlock.
+	 */
+	idr_preload(GFP_KERNEL);
+	spin_lock(&file_priv->table_lock);
+
+	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+
+	spin_unlock(&file_priv->table_lock);
+	idr_preload_end();
+	if (ret < 0)
+		return ret;
+	*handlep = ret;
+
+	drm_gem_object_handle_reference(obj);
+
+	if (dev->driver->gem_open_object) {
+		ret = dev->driver->gem_open_object(obj, file_priv);
+		if (ret) {
+			drm_gem_handle_delete(file_priv, *handlep);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+
+/**
+ * drm_gem_free_mmap_offset - release a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ */
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_map_list *list = &obj->map_list;
+
+	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	drm_mm_put_block(list->file_offset_node);
+	kfree(list->map);
+	list->map = NULL;
+}
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
+
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call.  The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_map_list *list;
+	struct drm_local_map *map;
+	int ret;
+
+	/* Set the object up for mmap'ing */
+	list = &obj->map_list;
+	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+	if (!list->map)
+		return -ENOMEM;
+
+	map = list->map;
+	map->type = _DRM_GEM;
+	map->size = obj->size;
+	map->handle = obj;
+
+	/* Get a DRM GEM mmap offset allocated... */
+	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+			obj->size / PAGE_SIZE, 0, false);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		ret = -ENOSPC;
+		goto out_free_list;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+			obj->size / PAGE_SIZE, 0);
+	if (!list->file_offset_node) {
+		ret = -ENOMEM;
+		goto out_free_list;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+	if (ret) {
+		DRM_ERROR("failed to add to map hash\n");
+		goto out_free_mm;
+	}
+
+	return 0;
+
+out_free_mm:
+	drm_mm_put_block(list->file_offset_node);
+out_free_list:
+	kfree(list->map);
+	list->map = NULL;
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+		      u32 handle)
+{
+	struct drm_gem_object *obj;
+
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return NULL;
+	}
+
+	drm_gem_object_reference(obj);
+
+	spin_unlock(&filp->table_lock);
+
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_close *args = data;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	ret = drm_gem_handle_delete(file_priv, args->handle);
+
+	return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_flink *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -ENOENT;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&dev->object_name_lock);
+	if (!obj->name) {
+		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+		if (ret < 0)
+			goto err;
+
+		obj->name = ret;
+
+		/* Allocate a reference for the name table.  */
+		drm_gem_object_reference(obj);
+	}
+
+	args->name = (uint64_t) obj->name;
+	ret = 0;
+
+err:
+	spin_unlock(&dev->object_name_lock);
+	idr_preload_end();
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_gem_open *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+	u32 handle;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	spin_lock(&dev->object_name_lock);
+	obj = idr_find(&dev->object_name_idr, (int) args->name);
+	if (obj)
+		drm_gem_object_reference(obj);
+	spin_unlock(&dev->object_name_lock);
+	if (!obj)
+		return -ENOENT;
+
+	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	drm_gem_object_unreference_unlocked(obj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	args->size = obj->size;
+
+	return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+	idr_init(&file_private->object_idr);
+	spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+	struct drm_file *file_priv = data;
+	struct drm_gem_object *obj = ptr;
+	struct drm_device *dev = obj->dev;
+
+	drm_gem_remove_prime_handles(obj, file_priv);
+
+	if (dev->driver->gem_close_object)
+		dev->driver->gem_close_object(obj, file_priv);
+
+	drm_gem_object_handle_unreference_unlocked(obj);
+
+	return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+	idr_for_each(&file_private->object_idr,
+		     &drm_gem_object_release_handle, file_private);
+	idr_destroy(&file_private->object_idr);
+}
+
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+	if (obj->filp)
+	    fput(obj->filp);
+}
+EXPORT_SYMBOL(drm_gem_object_release);
+
+/**
+ * Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+	struct drm_device *dev = obj->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (dev->driver->gem_free_object != NULL)
+		dev->driver->gem_free_object(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+	BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+
+	/* Remove any name for this object */
+	spin_lock(&dev->object_name_lock);
+	if (obj->name) {
+		idr_remove(&dev->object_name_idr, obj->name);
+		obj->name = 0;
+		spin_unlock(&dev->object_name_lock);
+		/*
+		 * The object name held a reference to this object, drop
+		 * that now.
+		*
+		* This cannot be the last reference, since the handle holds one too.
+		 */
+		kref_put(&obj->refcount, drm_gem_object_ref_bug);
+	} else
+		spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+void drm_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+
+	drm_gem_object_reference(obj);
+
+	mutex_lock(&obj->dev->struct_mutex);
+	drm_vm_open_locked(obj->dev, vma);
+	mutex_unlock(&obj->dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_open);
+
+void drm_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_vm_close_locked(obj->dev, vma);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_close);
+
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * If we find the object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object), we set up the driver fault handler so that any accesses
+ * to the object can be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_local_map *map = NULL;
+	struct drm_gem_object *obj;
+	struct drm_hash_item *hash;
+	int ret = 0;
+
+	if (drm_device_is_unplugged(dev))
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+		mutex_unlock(&dev->struct_mutex);
+		return drm_mmap(filp, vma);
+	}
+
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	if (!map ||
+	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+		ret =  -EPERM;
+		goto out_unlock;
+	}
+
+	/* Check for valid size. */
+	if (map->size < vma->vm_end - vma->vm_start) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	obj = map->handle;
+	if (!obj->dev->driver->gem_vm_ops) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = obj->dev->driver->gem_vm_ops;
+	vma->vm_private_data = map->handle;
+	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	drm_gem_object_reference(obj);
+
+	drm_vm_open_locked(dev, vma);
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
diff --git a/linux-imx/drivers/gpu/drm/drm_gem_cma_helper.c b/linux-imx/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 0000000..0a7e011
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,272 @@
+/*
+ * drm gem CMA (contiguous memory allocator) helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * Based on Samsung Exynos code
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem_cma_helper.h>
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+static void drm_gem_cma_buf_destroy(struct drm_device *drm,
+		struct drm_gem_cma_object *cma_obj)
+{
+	dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
+			cma_obj->paddr);
+}
+
+/*
+ * drm_gem_cma_create - allocate an object with the given size
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+		unsigned int size)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	size = round_up(size, PAGE_SIZE);
+
+	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+	if (!cma_obj)
+		return ERR_PTR(-ENOMEM);
+
+	cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
+			&cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+	if (!cma_obj->vaddr) {
+		dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
+		ret = -ENOMEM;
+		goto err_dma_alloc;
+	}
+
+	gem_obj = &cma_obj->base;
+
+	ret = drm_gem_object_init(drm, gem_obj, size);
+	if (ret)
+		goto err_obj_init;
+
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret)
+		goto err_create_mmap_offset;
+
+	return cma_obj;
+
+err_create_mmap_offset:
+	drm_gem_object_release(gem_obj);
+
+err_obj_init:
+	drm_gem_cma_buf_destroy(drm, cma_obj);
+
+err_dma_alloc:
+	kfree(cma_obj);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+
+/*
+ * drm_gem_cma_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct drm_gem_cma_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
+		struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int size,
+		unsigned int *handle)
+{
+	struct drm_gem_cma_object *cma_obj;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	cma_obj = drm_gem_cma_create(drm, size);
+	if (IS_ERR(cma_obj))
+		return cma_obj;
+
+	gem_obj = &cma_obj->base;
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
+	if (ret)
+		goto err_handle_create;
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(gem_obj);
+
+	return cma_obj;
+
+err_handle_create:
+	drm_gem_cma_free_object(gem_obj);
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_gem_cma_object *cma_obj;
+
+	if (gem_obj->map_list.map)
+		drm_gem_free_mmap_offset(gem_obj);
+
+	drm_gem_object_release(gem_obj);
+
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
+
+	kfree(cma_obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
+
+/*
+ * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+		struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+	struct drm_gem_cma_object *cma_obj;
+	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+	if (args->pitch < min_pitch)
+		args->pitch = min_pitch;
+
+	if (args->size < args->pitch * args->height)
+		args->size = args->pitch * args->height;
+
+	cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
+			args->size, &args->handle);
+	if (IS_ERR(cma_obj))
+		return PTR_ERR(cma_obj);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+
+/*
+ * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
+ * function
+ */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *drm, uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *gem_obj;
+
+	mutex_lock(&drm->struct_mutex);
+
+	gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+	if (!gem_obj) {
+		dev_err(drm->dev, "failed to lookup gem object\n");
+		mutex_unlock(&drm->struct_mutex);
+		return -EINVAL;
+	}
+
+	*offset = get_gem_mmap_offset(gem_obj);
+
+	drm_gem_object_unreference(gem_obj);
+
+	mutex_unlock(&drm->struct_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
+
+const struct vm_operations_struct drm_gem_cma_vm_ops = {
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+
+/*
+ * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
+ */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem_obj;
+	struct drm_gem_cma_object *cma_obj;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	gem_obj = vma->vm_private_data;
+	cma_obj = to_drm_gem_cma_obj(gem_obj);
+
+	ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+
+/*
+ * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
+ */
+int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *drm, unsigned int handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
+{
+	struct drm_gem_object *obj = &cma_obj->base;
+	struct drm_device *dev = obj->dev;
+	uint64_t off = 0;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (obj->map_list.map)
+		off = (uint64_t)obj->map_list.hash.key;
+
+	seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
+			obj->name, obj->refcount.refcount.counter,
+			off, cma_obj->paddr, cma_obj->vaddr, obj->size);
+
+	seq_printf(m, "\n");
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/drm_global.c b/linux-imx/drivers/gpu/drm/drm_global.c
new file mode 100644
index 0000000..f731116
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_global.c
@@ -0,0 +1,112 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drm_global.h>
+
+struct drm_global_item {
+	struct mutex mutex;
+	void *object;
+	int refcount;
+};
+
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
+
+void drm_global_init(void)
+{
+	int i;
+
+	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+		struct drm_global_item *item = &glob[i];
+		mutex_init(&item->mutex);
+		item->object = NULL;
+		item->refcount = 0;
+	}
+}
+
+void drm_global_release(void)
+{
+	int i;
+	for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+		struct drm_global_item *item = &glob[i];
+		BUG_ON(item->object != NULL);
+		BUG_ON(item->refcount != 0);
+	}
+}
+
+int drm_global_item_ref(struct drm_global_reference *ref)
+{
+	int ret;
+	struct drm_global_item *item = &glob[ref->global_type];
+	void *object;
+
+	mutex_lock(&item->mutex);
+	if (item->refcount == 0) {
+		item->object = kzalloc(ref->size, GFP_KERNEL);
+		if (unlikely(item->object == NULL)) {
+			ret = -ENOMEM;
+			goto out_err;
+		}
+
+		ref->object = item->object;
+		ret = ref->init(ref);
+		if (unlikely(ret != 0))
+			goto out_err;
+
+	}
+	++item->refcount;
+	ref->object = item->object;
+	object = item->object;
+	mutex_unlock(&item->mutex);
+	return 0;
+out_err:
+	mutex_unlock(&item->mutex);
+	item->object = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(drm_global_item_ref);
+
+void drm_global_item_unref(struct drm_global_reference *ref)
+{
+	struct drm_global_item *item = &glob[ref->global_type];
+
+	mutex_lock(&item->mutex);
+	BUG_ON(item->refcount == 0);
+	BUG_ON(ref->object != item->object);
+	if (--item->refcount == 0) {
+		ref->release(ref);
+		item->object = NULL;
+	}
+	mutex_unlock(&item->mutex);
+}
+EXPORT_SYMBOL(drm_global_item_unref);
+
diff --git a/linux-imx/drivers/gpu/drm/drm_hashtab.c b/linux-imx/drivers/gpu/drm/drm_hashtab.c
new file mode 100644
index 0000000..7e4bae7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_hashtab.c
@@ -0,0 +1,208 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_hashtab.h>
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+	unsigned int size = 1 << order;
+
+	ht->order = order;
+	ht->table = NULL;
+	if (size <= PAGE_SIZE / sizeof(*ht->table))
+		ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+	else
+		ht->table = vzalloc(size*sizeof(*ht->table));
+	if (!ht->table) {
+		DRM_ERROR("Out of memory for hash table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_create);
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	unsigned int hashed_key;
+	int count = 0;
+
+	hashed_key = hash_long(key, ht->order);
+	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each_entry(entry, h_list, head)
+		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+					  unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	unsigned int hashed_key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each_entry(entry, h_list, head) {
+		if (entry->key == key)
+			return &entry->head;
+		if (entry->key > key)
+			break;
+	}
+	return NULL;
+}
+
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+					      unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	unsigned int hashed_key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each_entry_rcu(entry, h_list, head) {
+		if (entry->key == key)
+			return &entry->head;
+		if (entry->key > key)
+			break;
+	}
+	return NULL;
+}
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	struct hlist_node *parent;
+	unsigned int hashed_key;
+	unsigned long key = item->key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	parent = NULL;
+	hlist_for_each_entry(entry, h_list, head) {
+		if (entry->key == key)
+			return -EINVAL;
+		if (entry->key > key)
+			break;
+		parent = &entry->head;
+	}
+	if (parent) {
+		hlist_add_after_rcu(parent, &item->head);
+	} else {
+		hlist_add_head_rcu(&item->head, h_list);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_insert_item);
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+			      unsigned long seed, int bits, int shift,
+			      unsigned long add)
+{
+	int ret;
+	unsigned long mask = (1 << bits) - 1;
+	unsigned long first, unshifted_key;
+
+	unshifted_key = hash_long(seed, bits);
+	first = unshifted_key;
+	do {
+		item->key = (unshifted_key << shift) + add;
+		ret = drm_ht_insert_item(ht, item);
+		if (ret)
+			unshifted_key = (unshifted_key + 1) & mask;
+	} while(ret && (unshifted_key != first));
+
+	if (ret) {
+		DRM_ERROR("Available key bit space exhausted\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_just_insert_please);
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+		     struct drm_hash_item **item)
+{
+	struct hlist_node *list;
+
+	list = drm_ht_find_key_rcu(ht, key);
+	if (!list)
+		return -EINVAL;
+
+	*item = hlist_entry(list, struct drm_hash_item, head);
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_find_item);
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+	struct hlist_node *list;
+
+	list = drm_ht_find_key(ht, key);
+	if (list) {
+		hlist_del_init_rcu(list);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+	hlist_del_init_rcu(&item->head);
+	return 0;
+}
+EXPORT_SYMBOL(drm_ht_remove_item);
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+	if (ht->table) {
+		if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
+			kfree(ht->table);
+		else
+			vfree(ht->table);
+		ht->table = NULL;
+	}
+}
+EXPORT_SYMBOL(drm_ht_remove);
diff --git a/linux-imx/drivers/gpu/drm/drm_info.c b/linux-imx/drivers/gpu/drm/drm_info.c
new file mode 100644
index 0000000..d4b20ce
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,278 @@
+/**
+ * \file drm_info.c
+ * DRM info file implementations
+ *
+ * \author Ben Gamari <bgamari@gmail.com>
+ */
+
+/*
+ * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2008 Ben Gamari <bgamari@gmail.com>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+int drm_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_minor *minor = node->minor;
+	struct drm_device *dev = minor->dev;
+	struct drm_master *master = minor->master;
+	const char *bus_name;
+	if (!master)
+		return 0;
+
+	bus_name = dev->driver->bus->get_name(dev);
+	if (master->unique) {
+		seq_printf(m, "%s %s %s\n",
+			   bus_name,
+			   dev_name(dev->dev), master->unique);
+	} else {
+		seq_printf(m, "%s %s\n",
+			   bus_name, dev_name(dev->dev));
+	}
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+int drm_vm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_local_map *map;
+	struct drm_map_list *r_list;
+
+	/* Hardcoded from _DRM_FRAME_BUFFER,
+	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+	   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+	const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+	const char *type;
+	int i;
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "slot	 offset	      size type flags	 address mtrr\n\n");
+	i = 0;
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		map = r_list->map;
+		if (!map)
+			continue;
+		if (map->type < 0 || map->type > 5)
+			type = "??";
+		else
+			type = types[map->type];
+
+		seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+			   i,
+			   (unsigned long long)map->offset,
+			   map->size, type, map->flags,
+			   (unsigned long) r_list->user_token);
+		if (map->mtrr < 0)
+			seq_printf(m, "none\n");
+		else
+			seq_printf(m, "%4d\n", map->mtrr);
+		i++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ */
+int drm_bufs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_device_dma *dma;
+	int i, seg_pages;
+
+	mutex_lock(&dev->struct_mutex);
+	dma = dev->dma;
+	if (!dma) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	seq_printf(m, " o     size count  free	 segs pages    kB\n\n");
+	for (i = 0; i <= DRM_MAX_ORDER; i++) {
+		if (dma->bufs[i].buf_count) {
+			seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
+			seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
+				   i,
+				   dma->bufs[i].buf_size,
+				   dma->bufs[i].buf_count,
+				   atomic_read(&dma->bufs[i].freelist.count),
+				   dma->bufs[i].seg_count,
+				   seg_pages,
+				   seg_pages * PAGE_SIZE / 1024);
+		}
+	}
+	seq_printf(m, "\n");
+	for (i = 0; i < dma->buf_count; i++) {
+		if (i && !(i % 32))
+			seq_printf(m, "\n");
+		seq_printf(m, " %d", dma->buflist[i]->list);
+	}
+	seq_printf(m, "\n");
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../vblank" is read.
+ */
+int drm_vblank_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	int crtc;
+
+	mutex_lock(&dev->struct_mutex);
+	for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
+		seq_printf(m, "CRTC %d enable:     %d\n",
+			   crtc, atomic_read(&dev->vblank_refcount[crtc]));
+		seq_printf(m, "CRTC %d counter:    %d\n",
+			   crtc, drm_vblank_count(dev, crtc));
+		seq_printf(m, "CRTC %d last wait:  %d\n",
+			   crtc, dev->last_vblank_wait[crtc]);
+		seq_printf(m, "CRTC %d in modeset: %d\n",
+			   crtc, dev->vblank_inmodeset[crtc]);
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ */
+int drm_clients_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_file *priv;
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "a dev	pid    uid	magic	  ioctls\n\n");
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
+			   priv->authenticated ? 'y' : 'n',
+			   priv->minor->index,
+			   pid_vnr(priv->pid),
+			   from_kuid_munged(seq_user_ns(m), priv->uid),
+			   priv->magic, priv->ioctl_count);
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+	struct seq_file *m = data;
+
+	seq_printf(m, "%6d %8zd %7d %8d\n",
+		   obj->name, obj->size,
+		   atomic_read(&obj->handle_count),
+		   atomic_read(&obj->refcount.refcount));
+	return 0;
+}
+
+int drm_gem_name_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	seq_printf(m, "  name     size handles refcount\n");
+	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+	return 0;
+}
+
+#if DRM_DEBUG_CODE
+
+int drm_vma_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_vma_entry *pt;
+	struct vm_area_struct *vma;
+#if defined(__i386__)
+	unsigned int pgprot;
+#endif
+
+	mutex_lock(&dev->struct_mutex);
+	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
+		   atomic_read(&dev->vma_count),
+		   high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
+
+	list_for_each_entry(pt, &dev->vmalist, head) {
+		vma = pt->vma;
+		if (!vma)
+			continue;
+		seq_printf(m,
+			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+			   pt->pid,
+			   (void *)vma->vm_start, (void *)vma->vm_end,
+			   vma->vm_flags & VM_READ ? 'r' : '-',
+			   vma->vm_flags & VM_WRITE ? 'w' : '-',
+			   vma->vm_flags & VM_EXEC ? 'x' : '-',
+			   vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+			   vma->vm_flags & VM_LOCKED ? 'l' : '-',
+			   vma->vm_flags & VM_IO ? 'i' : '-',
+			   vma->vm_pgoff);
+
+#if defined(__i386__)
+		pgprot = pgprot_val(vma->vm_page_prot);
+		seq_printf(m, " %c%c%c%c%c%c%c%c%c",
+			   pgprot & _PAGE_PRESENT ? 'p' : '-',
+			   pgprot & _PAGE_RW ? 'w' : 'r',
+			   pgprot & _PAGE_USER ? 'u' : 's',
+			   pgprot & _PAGE_PWT ? 't' : 'b',
+			   pgprot & _PAGE_PCD ? 'u' : 'c',
+			   pgprot & _PAGE_ACCESSED ? 'a' : '-',
+			   pgprot & _PAGE_DIRTY ? 'd' : '-',
+			   pgprot & _PAGE_PSE ? 'm' : 'k',
+			   pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+		seq_printf(m, "\n");
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+#endif
+
diff --git a/linux-imx/drivers/gpu/drm/drm_ioc32.c b/linux-imx/drivers/gpu/drm/drm_ioc32.c
new file mode 100644
index 0000000..2f4c434
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_ioc32.c
@@ -0,0 +1,1085 @@
+/**
+ * \file drm_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+#include <linux/ratelimit.h>
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
+
+#define DRM_IOCTL_VERSION32		DRM_IOWR(0x00, drm_version32_t)
+#define DRM_IOCTL_GET_UNIQUE32		DRM_IOWR(0x01, drm_unique32_t)
+#define DRM_IOCTL_GET_MAP32		DRM_IOWR(0x04, drm_map32_t)
+#define DRM_IOCTL_GET_CLIENT32		DRM_IOWR(0x05, drm_client32_t)
+#define DRM_IOCTL_GET_STATS32		DRM_IOR( 0x06, drm_stats32_t)
+
+#define DRM_IOCTL_SET_UNIQUE32		DRM_IOW( 0x10, drm_unique32_t)
+#define DRM_IOCTL_ADD_MAP32		DRM_IOWR(0x15, drm_map32_t)
+#define DRM_IOCTL_ADD_BUFS32		DRM_IOWR(0x16, drm_buf_desc32_t)
+#define DRM_IOCTL_MARK_BUFS32		DRM_IOW( 0x17, drm_buf_desc32_t)
+#define DRM_IOCTL_INFO_BUFS32		DRM_IOWR(0x18, drm_buf_info32_t)
+#define DRM_IOCTL_MAP_BUFS32		DRM_IOWR(0x19, drm_buf_map32_t)
+#define DRM_IOCTL_FREE_BUFS32		DRM_IOW( 0x1a, drm_buf_free32_t)
+
+#define DRM_IOCTL_RM_MAP32		DRM_IOW( 0x1b, drm_map32_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX32	DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
+#define DRM_IOCTL_GET_SAREA_CTX32	DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
+
+#define DRM_IOCTL_RES_CTX32		DRM_IOWR(0x26, drm_ctx_res32_t)
+#define DRM_IOCTL_DMA32			DRM_IOWR(0x29, drm_dma32_t)
+
+#define DRM_IOCTL_AGP_ENABLE32		DRM_IOW( 0x32, drm_agp_mode32_t)
+#define DRM_IOCTL_AGP_INFO32		DRM_IOR( 0x33, drm_agp_info32_t)
+#define DRM_IOCTL_AGP_ALLOC32		DRM_IOWR(0x34, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_FREE32		DRM_IOW( 0x35, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_BIND32		DRM_IOW( 0x36, drm_agp_binding32_t)
+#define DRM_IOCTL_AGP_UNBIND32		DRM_IOW( 0x37, drm_agp_binding32_t)
+
+#define DRM_IOCTL_SG_ALLOC32		DRM_IOW( 0x38, drm_scatter_gather32_t)
+#define DRM_IOCTL_SG_FREE32		DRM_IOW( 0x39, drm_scatter_gather32_t)
+
+#define DRM_IOCTL_UPDATE_DRAW32		DRM_IOW( 0x3f, drm_update_draw32_t)
+
+#define DRM_IOCTL_WAIT_VBLANK32		DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
+typedef struct drm_version_32 {
+	int version_major;	  /**< Major version */
+	int version_minor;	  /**< Minor version */
+	int version_patchlevel;	   /**< Patch level */
+	u32 name_len;		  /**< Length of name buffer */
+	u32 name;		  /**< Name of driver */
+	u32 date_len;		  /**< Length of date buffer */
+	u32 date;		  /**< User-space buffer to hold date */
+	u32 desc_len;		  /**< Length of desc buffer */
+	u32 desc;		  /**< User-space buffer to hold desc */
+} drm_version32_t;
+
+static int compat_drm_version(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_version32_t v32;
+	struct drm_version __user *version;
+	int err;
+
+	if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
+		return -EFAULT;
+
+	version = compat_alloc_user_space(sizeof(*version));
+	if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+		return -EFAULT;
+	if (__put_user(v32.name_len, &version->name_len)
+	    || __put_user((void __user *)(unsigned long)v32.name,
+			  &version->name)
+	    || __put_user(v32.date_len, &version->date_len)
+	    || __put_user((void __user *)(unsigned long)v32.date,
+			  &version->date)
+	    || __put_user(v32.desc_len, &version->desc_len)
+	    || __put_user((void __user *)(unsigned long)v32.desc,
+			  &version->desc))
+		return -EFAULT;
+
+	err = drm_ioctl(file,
+			DRM_IOCTL_VERSION, (unsigned long)version);
+	if (err)
+		return err;
+
+	if (__get_user(v32.version_major, &version->version_major)
+	    || __get_user(v32.version_minor, &version->version_minor)
+	    || __get_user(v32.version_patchlevel, &version->version_patchlevel)
+	    || __get_user(v32.name_len, &version->name_len)
+	    || __get_user(v32.date_len, &version->date_len)
+	    || __get_user(v32.desc_len, &version->desc_len))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_unique32 {
+	u32 unique_len;	/**< Length of unique */
+	u32 unique;	/**< Unique name for driver instantiation */
+} drm_unique32_t;
+
+static int compat_drm_getunique(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_unique32_t uq32;
+	struct drm_unique __user *u;
+	int err;
+
+	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+		return -EFAULT;
+
+	u = compat_alloc_user_space(sizeof(*u));
+	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+		return -EFAULT;
+	if (__put_user(uq32.unique_len, &u->unique_len)
+	    || __put_user((void __user *)(unsigned long)uq32.unique,
+			  &u->unique))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+	if (err)
+		return err;
+
+	if (__get_user(uq32.unique_len, &u->unique_len))
+		return -EFAULT;
+	if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
+		return -EFAULT;
+	return 0;
+}
+
+static int compat_drm_setunique(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_unique32_t uq32;
+	struct drm_unique __user *u;
+
+	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+		return -EFAULT;
+
+	u = compat_alloc_user_space(sizeof(*u));
+	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+		return -EFAULT;
+	if (__put_user(uq32.unique_len, &u->unique_len)
+	    || __put_user((void __user *)(unsigned long)uq32.unique,
+			  &u->unique))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+}
+
+typedef struct drm_map32 {
+	u32 offset;		/**< Requested physical address (0 for SAREA)*/
+	u32 size;		/**< Requested physical size (bytes) */
+	enum drm_map_type type;	/**< Type of memory to map */
+	enum drm_map_flags flags;	/**< Flags */
+	u32 handle;		/**< User-space: "Handle" to pass to mmap() */
+	int mtrr;		/**< MTRR slot used */
+} drm_map32_t;
+
+static int compat_drm_getmap(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	drm_map32_t m32;
+	struct drm_map __user *map;
+	int idx, err;
+	void *handle;
+
+	if (get_user(idx, &argp->offset))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user(idx, &map->offset))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
+	if (err)
+		return err;
+
+	if (__get_user(m32.offset, &map->offset)
+	    || __get_user(m32.size, &map->size)
+	    || __get_user(m32.type, &map->type)
+	    || __get_user(m32.flags, &map->flags)
+	    || __get_user(handle, &map->handle)
+	    || __get_user(m32.mtrr, &map->mtrr))
+		return -EFAULT;
+
+	m32.handle = (unsigned long)handle;
+	if (copy_to_user(argp, &m32, sizeof(m32)))
+		return -EFAULT;
+	return 0;
+
+}
+
+static int compat_drm_addmap(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	drm_map32_t m32;
+	struct drm_map __user *map;
+	int err;
+	void *handle;
+
+	if (copy_from_user(&m32, argp, sizeof(m32)))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user(m32.offset, &map->offset)
+	    || __put_user(m32.size, &map->size)
+	    || __put_user(m32.type, &map->type)
+	    || __put_user(m32.flags, &map->flags))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
+	if (err)
+		return err;
+
+	if (__get_user(m32.offset, &map->offset)
+	    || __get_user(m32.mtrr, &map->mtrr)
+	    || __get_user(handle, &map->handle))
+		return -EFAULT;
+
+	m32.handle = (unsigned long)handle;
+	if (m32.handle != (unsigned long)handle)
+		printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
+				   " %p for type %d offset %x\n",
+				   handle, m32.type, m32.offset);
+
+	if (copy_to_user(argp, &m32, sizeof(m32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	struct drm_map __user *map;
+	u32 handle;
+
+	if (get_user(handle, &argp->handle))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user((void *)(unsigned long)handle, &map->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
+}
+
+typedef struct drm_client32 {
+	int idx;	/**< Which client desired? */
+	int auth;	/**< Is client authenticated? */
+	u32 pid;	/**< Process ID */
+	u32 uid;	/**< User ID */
+	u32 magic;	/**< Magic */
+	u32 iocs;	/**< Ioctl count */
+} drm_client32_t;
+
+static int compat_drm_getclient(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_client32_t c32;
+	drm_client32_t __user *argp = (void __user *)arg;
+	struct drm_client __user *client;
+	int idx, err;
+
+	if (get_user(idx, &argp->idx))
+		return -EFAULT;
+
+	client = compat_alloc_user_space(sizeof(*client));
+	if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+		return -EFAULT;
+	if (__put_user(idx, &client->idx))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+	if (err)
+		return err;
+
+	if (__get_user(c32.idx, &client->idx)
+	    || __get_user(c32.auth, &client->auth)
+	    || __get_user(c32.pid, &client->pid)
+	    || __get_user(c32.uid, &client->uid)
+	    || __get_user(c32.magic, &client->magic)
+	    || __get_user(c32.iocs, &client->iocs))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &c32, sizeof(c32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_stats32 {
+	u32 count;
+	struct {
+		u32 value;
+		enum drm_stat_type type;
+	} data[15];
+} drm_stats32_t;
+
+static int compat_drm_getstats(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_stats32_t s32;
+	drm_stats32_t __user *argp = (void __user *)arg;
+	struct drm_stats __user *stats;
+	int i, err;
+
+	stats = compat_alloc_user_space(sizeof(*stats));
+	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
+	if (err)
+		return err;
+
+	if (__get_user(s32.count, &stats->count))
+		return -EFAULT;
+	for (i = 0; i < 15; ++i)
+		if (__get_user(s32.data[i].value, &stats->data[i].value)
+		    || __get_user(s32.data[i].type, &stats->data[i].type))
+			return -EFAULT;
+
+	if (copy_to_user(argp, &s32, sizeof(s32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_buf_desc32 {
+	int count;		 /**< Number of buffers of this size */
+	int size;		 /**< Size in bytes */
+	int low_mark;		 /**< Low water mark */
+	int high_mark;		 /**< High water mark */
+	int flags;
+	u32 agp_start;		 /**< Start address in the AGP aperture */
+} drm_buf_desc32_t;
+
+static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_buf_desc32_t __user *argp = (void __user *)arg;
+	struct drm_buf_desc __user *buf;
+	int err;
+	unsigned long agp_start;
+
+	buf = compat_alloc_user_space(sizeof(*buf));
+	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+		return -EFAULT;
+
+	if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
+	    || __get_user(agp_start, &argp->agp_start)
+	    || __put_user(agp_start, &buf->agp_start))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+	if (err)
+		return err;
+
+	if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
+	    || __get_user(agp_start, &buf->agp_start)
+	    || __put_user(agp_start, &argp->agp_start))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_desc32_t b32;
+	drm_buf_desc32_t __user *argp = (void __user *)arg;
+	struct drm_buf_desc __user *buf;
+
+	if (copy_from_user(&b32, argp, sizeof(b32)))
+		return -EFAULT;
+
+	buf = compat_alloc_user_space(sizeof(*buf));
+	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+		return -EFAULT;
+
+	if (__put_user(b32.size, &buf->size)
+	    || __put_user(b32.low_mark, &buf->low_mark)
+	    || __put_user(b32.high_mark, &buf->high_mark))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+}
+
+typedef struct drm_buf_info32 {
+	int count;		/**< Entries in list */
+	u32 list;
+} drm_buf_info32_t;
+
+static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_info32_t req32;
+	drm_buf_info32_t __user *argp = (void __user *)arg;
+	drm_buf_desc32_t __user *to;
+	struct drm_buf_info __user *request;
+	struct drm_buf_desc __user *list;
+	size_t nbytes;
+	int i, err;
+	int count, actual;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	count = req32.count;
+	to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
+	if (count < 0)
+		count = 0;
+	if (count > 0
+	    && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
+		return -EFAULT;
+
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
+	request = compat_alloc_user_space(nbytes);
+	if (!access_ok(VERIFY_WRITE, request, nbytes))
+		return -EFAULT;
+	list = (struct drm_buf_desc *) (request + 1);
+
+	if (__put_user(count, &request->count)
+	    || __put_user(list, &request->list))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(actual, &request->count))
+		return -EFAULT;
+	if (count >= actual)
+		for (i = 0; i < actual; ++i)
+			if (__copy_in_user(&to[i], &list[i],
+					   offsetof(struct drm_buf_desc, flags)))
+				return -EFAULT;
+
+	if (__put_user(actual, &argp->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_buf_pub32 {
+	int idx;		/**< Index into the master buffer list */
+	int total;		/**< Buffer size */
+	int used;		/**< Amount of buffer in use (for DMA) */
+	u32 address;		/**< Address of buffer */
+} drm_buf_pub32_t;
+
+typedef struct drm_buf_map32 {
+	int count;		/**< Length of the buffer list */
+	u32 virtual;		/**< Mmap'd area in user-virtual */
+	u32 list;		/**< Buffer information */
+} drm_buf_map32_t;
+
+static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_buf_map32_t __user *argp = (void __user *)arg;
+	drm_buf_map32_t req32;
+	drm_buf_pub32_t __user *list32;
+	struct drm_buf_map __user *request;
+	struct drm_buf_pub __user *list;
+	int i, err;
+	int count, actual;
+	size_t nbytes;
+	void __user *addr;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+	count = req32.count;
+	list32 = (void __user *)(unsigned long)req32.list;
+
+	if (count < 0)
+		return -EINVAL;
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
+	request = compat_alloc_user_space(nbytes);
+	if (!access_ok(VERIFY_WRITE, request, nbytes))
+		return -EFAULT;
+	list = (struct drm_buf_pub *) (request + 1);
+
+	if (__put_user(count, &request->count)
+	    || __put_user(list, &request->list))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(actual, &request->count))
+		return -EFAULT;
+	if (count >= actual)
+		for (i = 0; i < actual; ++i)
+			if (__copy_in_user(&list32[i], &list[i],
+					   offsetof(struct drm_buf_pub, address))
+			    || __get_user(addr, &list[i].address)
+			    || __put_user((unsigned long)addr,
+					  &list32[i].address))
+				return -EFAULT;
+
+	if (__put_user(actual, &argp->count)
+	    || __get_user(addr, &request->virtual)
+	    || __put_user((unsigned long)addr, &argp->virtual))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_buf_free32 {
+	int count;
+	u32 list;
+} drm_buf_free32_t;
+
+static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_free32_t req32;
+	struct drm_buf_free __user *request;
+	drm_buf_free32_t __user *argp = (void __user *)arg;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(req32.count, &request->count)
+	    || __put_user((int __user *)(unsigned long)req32.list,
+			  &request->list))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+}
+
+typedef struct drm_ctx_priv_map32 {
+	unsigned int ctx_id;	 /**< Context requesting private mapping */
+	u32 handle;		/**< Handle of map */
+} drm_ctx_priv_map32_t;
+
+static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_ctx_priv_map32_t req32;
+	struct drm_ctx_priv_map __user *request;
+	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(req32.ctx_id, &request->ctx_id)
+	    || __put_user((void *)(unsigned long)req32.handle,
+			  &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+}
+
+static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	struct drm_ctx_priv_map __user *request;
+	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+	int err;
+	unsigned int ctx_id;
+	void *handle;
+
+	if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(ctx_id, &argp->ctx_id))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(ctx_id, &request->ctx_id))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(handle, &request->handle)
+	    || __put_user((unsigned long)handle, &argp->handle))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_ctx_res32 {
+	int count;
+	u32 contexts;
+} drm_ctx_res32_t;
+
+static int compat_drm_resctx(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_ctx_res32_t __user *argp = (void __user *)arg;
+	drm_ctx_res32_t res32;
+	struct drm_ctx_res __user *res;
+	int err;
+
+	if (copy_from_user(&res32, argp, sizeof(res32)))
+		return -EFAULT;
+
+	res = compat_alloc_user_space(sizeof(*res));
+	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+		return -EFAULT;
+	if (__put_user(res32.count, &res->count)
+	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
+			  &res->contexts))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
+	if (err)
+		return err;
+
+	if (__get_user(res32.count, &res->count)
+	    || __put_user(res32.count, &argp->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_dma32 {
+	int context;		  /**< Context handle */
+	int send_count;		  /**< Number of buffers to send */
+	u32 send_indices;	  /**< List of handles to buffers */
+	u32 send_sizes;		  /**< Lengths of data to send */
+	enum drm_dma_flags flags;		  /**< Flags */
+	int request_count;	  /**< Number of buffers requested */
+	int request_size;	  /**< Desired size for buffers */
+	u32 request_indices;	  /**< Buffer information */
+	u32 request_sizes;
+	int granted_count;	  /**< Number of buffers granted */
+} drm_dma32_t;
+
+static int compat_drm_dma(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	drm_dma32_t d32;
+	drm_dma32_t __user *argp = (void __user *)arg;
+	struct drm_dma __user *d;
+	int err;
+
+	if (copy_from_user(&d32, argp, sizeof(d32)))
+		return -EFAULT;
+
+	d = compat_alloc_user_space(sizeof(*d));
+	if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+		return -EFAULT;
+
+	if (__put_user(d32.context, &d->context)
+	    || __put_user(d32.send_count, &d->send_count)
+	    || __put_user((int __user *)(unsigned long)d32.send_indices,
+			  &d->send_indices)
+	    || __put_user((int __user *)(unsigned long)d32.send_sizes,
+			  &d->send_sizes)
+	    || __put_user(d32.flags, &d->flags)
+	    || __put_user(d32.request_count, &d->request_count)
+	    || __put_user((int __user *)(unsigned long)d32.request_indices,
+			  &d->request_indices)
+	    || __put_user((int __user *)(unsigned long)d32.request_sizes,
+			  &d->request_sizes))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
+	if (err)
+		return err;
+
+	if (__get_user(d32.request_size, &d->request_size)
+	    || __get_user(d32.granted_count, &d->granted_count)
+	    || __put_user(d32.request_size, &argp->request_size)
+	    || __put_user(d32.granted_count, &argp->granted_count))
+		return -EFAULT;
+
+	return 0;
+}
+
+#if __OS_HAS_AGP
+typedef struct drm_agp_mode32 {
+	u32 mode;	/**< AGP mode */
+} drm_agp_mode32_t;
+
+static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_agp_mode32_t __user *argp = (void __user *)arg;
+	drm_agp_mode32_t m32;
+	struct drm_agp_mode __user *mode;
+
+	if (get_user(m32.mode, &argp->mode))
+		return -EFAULT;
+
+	mode = compat_alloc_user_space(sizeof(*mode));
+	if (put_user(m32.mode, &mode->mode))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+}
+
+typedef struct drm_agp_info32 {
+	int agp_version_major;
+	int agp_version_minor;
+	u32 mode;
+	u32 aperture_base;	/* physical address */
+	u32 aperture_size;	/* bytes */
+	u32 memory_allowed;	/* bytes */
+	u32 memory_used;
+
+	/* PCI information */
+	unsigned short id_vendor;
+	unsigned short id_device;
+} drm_agp_info32_t;
+
+static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_info32_t __user *argp = (void __user *)arg;
+	drm_agp_info32_t i32;
+	struct drm_agp_info __user *info;
+	int err;
+
+	info = compat_alloc_user_space(sizeof(*info));
+	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
+	if (err)
+		return err;
+
+	if (__get_user(i32.agp_version_major, &info->agp_version_major)
+	    || __get_user(i32.agp_version_minor, &info->agp_version_minor)
+	    || __get_user(i32.mode, &info->mode)
+	    || __get_user(i32.aperture_base, &info->aperture_base)
+	    || __get_user(i32.aperture_size, &info->aperture_size)
+	    || __get_user(i32.memory_allowed, &info->memory_allowed)
+	    || __get_user(i32.memory_used, &info->memory_used)
+	    || __get_user(i32.id_vendor, &info->id_vendor)
+	    || __get_user(i32.id_device, &info->id_device))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &i32, sizeof(i32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_agp_buffer32 {
+	u32 size;	/**< In bytes -- will round to page boundary */
+	u32 handle;	/**< Used for binding / unbinding */
+	u32 type;	/**< Type of memory to allocate */
+	u32 physical;	/**< Physical used by i810 */
+} drm_agp_buffer32_t;
+
+static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_agp_buffer32_t __user *argp = (void __user *)arg;
+	drm_agp_buffer32_t req32;
+	struct drm_agp_buffer __user *request;
+	int err;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.size, &request->size)
+	    || __put_user(req32.type, &request->type))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(req32.handle, &request->handle)
+	    || __get_user(req32.physical, &request->physical)
+	    || copy_to_user(argp, &req32, sizeof(req32))) {
+		drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_buffer32_t __user *argp = (void __user *)arg;
+	struct drm_agp_buffer __user *request;
+	u32 handle;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || get_user(handle, &argp->handle)
+	    || __put_user(handle, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+}
+
+typedef struct drm_agp_binding32 {
+	u32 handle;	/**< From drm_agp_buffer */
+	u32 offset;	/**< In bytes -- will round to page boundary */
+} drm_agp_binding32_t;
+
+static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_binding32_t __user *argp = (void __user *)arg;
+	drm_agp_binding32_t req32;
+	struct drm_agp_binding __user *request;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.handle, &request->handle)
+	    || __put_user(req32.offset, &request->offset))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
+}
+
+static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_agp_binding32_t __user *argp = (void __user *)arg;
+	struct drm_agp_binding __user *request;
+	u32 handle;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || get_user(handle, &argp->handle)
+	    || __put_user(handle, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+}
+#endif				/* __OS_HAS_AGP */
+
+typedef struct drm_scatter_gather32 {
+	u32 size;	/**< In bytes -- will round to page boundary */
+	u32 handle;	/**< Used for mapping / unmapping */
+} drm_scatter_gather32_t;
+
+static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_scatter_gather32_t __user *argp = (void __user *)arg;
+	struct drm_scatter_gather __user *request;
+	int err;
+	unsigned long x;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(x, &argp->size)
+	    || __put_user(x, &request->size))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+	if (err)
+		return err;
+
+	/* XXX not sure about the handle conversion here... */
+	if (__get_user(x, &request->handle)
+	    || __put_user(x >> PAGE_SHIFT, &argp->handle))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_scatter_gather32_t __user *argp = (void __user *)arg;
+	struct drm_scatter_gather __user *request;
+	unsigned long x;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(x, &argp->handle)
+	    || __put_user(x << PAGE_SHIFT, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
+}
+
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+typedef struct drm_update_draw32 {
+	drm_drawable_t handle;
+	unsigned int type;
+	unsigned int num;
+	/* 64-bit version has a 32-bit pad here */
+	u64 data;	/**< Pointer */
+} __attribute__((packed)) drm_update_draw32_t;
+
+static int compat_drm_update_draw(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_update_draw32_t update32;
+	struct drm_update_draw __user *request;
+	int err;
+
+	if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+	    __put_user(update32.handle, &request->handle) ||
+	    __put_user(update32.type, &request->type) ||
+	    __put_user(update32.num, &request->num) ||
+	    __put_user(update32.data, &request->data))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+	return err;
+}
+#endif
+
+struct drm_wait_vblank_request32 {
+	enum drm_vblank_seq_type type;
+	unsigned int sequence;
+	u32 signal;
+};
+
+struct drm_wait_vblank_reply32 {
+	enum drm_vblank_seq_type type;
+	unsigned int sequence;
+	s32 tval_sec;
+	s32 tval_usec;
+};
+
+typedef union drm_wait_vblank32 {
+	struct drm_wait_vblank_request32 request;
+	struct drm_wait_vblank_reply32 reply;
+} drm_wait_vblank32_t;
+
+static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_wait_vblank32_t __user *argp = (void __user *)arg;
+	drm_wait_vblank32_t req32;
+	union drm_wait_vblank __user *request;
+	int err;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.request.type, &request->request.type)
+	    || __put_user(req32.request.sequence, &request->request.sequence)
+	    || __put_user(req32.request.signal, &request->request.signal))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(req32.reply.type, &request->reply.type)
+	    || __get_user(req32.reply.sequence, &request->reply.sequence)
+	    || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
+	    || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &req32, sizeof(req32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+drm_ioctl_compat_t *drm_compat_ioctls[] = {
+	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
+	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
+	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
+	[DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+#if __OS_HAS_AGP
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+#endif
+	[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
+	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+#endif
+	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/drm.
+ *
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn;
+	int ret;
+
+	/* Assume that ioctls without an explicit compat routine will just
+	 * work.  This may not always be a good assumption, but it's better
+	 * than always failing.
+	 */
+	if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+		return drm_ioctl(filp, cmd, arg);
+
+	fn = drm_compat_ioctls[nr];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/linux-imx/drivers/gpu/drm/drm_ioctl.c b/linux-imx/drivers/gpu/drm/drm_ioctl.c
new file mode 100644
index 0000000..e77bd8b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_ioctl.c
@@ -0,0 +1,363 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
+
+#include <linux/pci.h>
+#include <linux/export.h>
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_unique *u = data;
+	struct drm_master *master = file_priv->master;
+
+	if (u->unique_len >= master->unique_len) {
+		if (copy_to_user(u->unique, master->unique, master->unique_len))
+			return -EFAULT;
+	}
+	u->unique_len = master->unique_len;
+
+	return 0;
+}
+
+static void
+drm_unset_busid(struct drm_device *dev,
+		struct drm_master *master)
+{
+	kfree(dev->devname);
+	dev->devname = NULL;
+
+	kfree(master->unique);
+	master->unique = NULL;
+	master->unique_len = 0;
+	master->unique_size = 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_unique *u = data;
+	struct drm_master *master = file_priv->master;
+	int ret;
+
+	if (master->unique_len || master->unique)
+		return -EBUSY;
+
+	if (!u->unique_len || u->unique_len > 1024)
+		return -EINVAL;
+
+	if (!dev->driver->bus->set_unique)
+		return -EINVAL;
+
+	ret = dev->driver->bus->set_unique(dev, master, u);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	drm_unset_busid(dev, master);
+	return ret;
+}
+
+static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_master *master = file_priv->master;
+	int ret;
+
+	if (master->unique != NULL)
+		drm_unset_busid(dev, master);
+
+	ret = dev->driver->bus->set_busid(dev, master);
+	if (ret)
+		goto err;
+	return 0;
+err:
+	drm_unset_busid(dev, master);
+	return ret;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_map *map = data;
+	struct drm_map_list *r_list = NULL;
+	struct list_head *list;
+	int idx;
+	int i;
+
+	idx = map->offset;
+	if (idx < 0)
+		return -EINVAL;
+
+	i = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each(list, &dev->maplist) {
+		if (i == idx) {
+			r_list = list_entry(list, struct drm_map_list, head);
+			break;
+		}
+		i++;
+	}
+	if (!r_list || !r_list->map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	map->offset = r_list->map->offset;
+	map->size = r_list->map->size;
+	map->type = r_list->map->type;
+	map->flags = r_list->map->flags;
+	map->handle = (void *)(unsigned long) r_list->user_token;
+	map->mtrr = r_list->map->mtrr;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_client *client = data;
+	struct drm_file *pt;
+	int idx;
+	int i;
+
+	idx = client->idx;
+	i = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(pt, &dev->filelist, lhead) {
+		if (i++ >= idx) {
+			client->auth = pt->authenticated;
+			client->pid = pid_vnr(pt->pid);
+			client->uid = from_kuid_munged(current_user_ns(), pt->uid);
+			client->magic = pt->magic;
+			client->iocs = pt->ioctl_count;
+			mutex_unlock(&dev->struct_mutex);
+
+			return 0;
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_stats *stats = data;
+	int i;
+
+	memset(stats, 0, sizeof(*stats));
+
+	for (i = 0; i < dev->counters; i++) {
+		if (dev->types[i] == _DRM_STAT_LOCK)
+			stats->data[i].value =
+			    (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
+		else
+			stats->data[i].value = atomic_read(&dev->counts[i]);
+		stats->data[i].type = dev->types[i];
+	}
+
+	stats->count = dev->counters;
+
+	return 0;
+}
+
+/**
+ * Get device/driver capabilities
+ */
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_get_cap *req = data;
+
+	req->value = 0;
+	switch (req->capability) {
+	case DRM_CAP_DUMB_BUFFER:
+		if (dev->driver->dumb_create)
+			req->value = 1;
+		break;
+	case DRM_CAP_VBLANK_HIGH_CRTC:
+		req->value = 1;
+		break;
+	case DRM_CAP_DUMB_PREFERRED_DEPTH:
+		req->value = dev->mode_config.preferred_depth;
+		break;
+	case DRM_CAP_DUMB_PREFER_SHADOW:
+		req->value = dev->mode_config.prefer_shadow;
+		break;
+	case DRM_CAP_PRIME:
+		req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+		req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+		break;
+	case DRM_CAP_TIMESTAMP_MONOTONIC:
+		req->value = drm_timestamp_monotonic;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_set_version *sv = data;
+	int if_version, retcode = 0;
+
+	if (sv->drm_di_major != -1) {
+		if (sv->drm_di_major != DRM_IF_MAJOR ||
+		    sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+			retcode = -EINVAL;
+			goto done;
+		}
+		if_version = DRM_IF_VERSION(sv->drm_di_major,
+					    sv->drm_di_minor);
+		dev->if_version = max(if_version, dev->if_version);
+		if (sv->drm_di_minor >= 1) {
+			/*
+			 * Version 1.1 includes tying of DRM to specific device
+			 * Version 1.4 has proper PCI domain support
+			 */
+			retcode = drm_set_busid(dev, file_priv);
+			if (retcode)
+				goto done;
+		}
+	}
+
+	if (sv->drm_dd_major != -1) {
+		if (sv->drm_dd_major != dev->driver->major ||
+		    sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+		    dev->driver->minor) {
+			retcode = -EINVAL;
+			goto done;
+		}
+
+		if (dev->driver->set_version)
+			dev->driver->set_version(dev, sv);
+	}
+
+done:
+	sv->drm_di_major = DRM_IF_MAJOR;
+	sv->drm_di_minor = DRM_IF_MINOR;
+	sv->drm_dd_major = dev->driver->major;
+	sv->drm_dd_minor = dev->driver->minor;
+
+	return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+	     struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+	return 0;
+}
+EXPORT_SYMBOL(drm_noop);
diff --git a/linux-imx/drivers/gpu/drm/drm_irq.c b/linux-imx/drivers/gpu/drm/drm_irq.c
new file mode 100644
index 0000000..f92da0a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_irq.c
@@ -0,0 +1,1417 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "drm_trace.h"
+
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/slab.h>
+
+#include <linux/vgaarb.h>
+#include <linux/export.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+	((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_irq_busid *p = data;
+
+	if (!dev->driver->bus->irq_by_busid)
+		return -EINVAL;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	return dev->driver->bus->irq_by_busid(dev, p);
+}
+
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+	u32 vblcount;
+	s64 diff_ns;
+	int vblrc;
+	struct timeval tvblank;
+	int count = DRM_TIMESTAMP_MAXRETRIES;
+
+	/* Prevent vblank irq processing while disabling vblank irqs,
+	 * so no updates of timestamps or count can happen after we've
+	 * disabled. Needed to prevent races in case of delayed irq's.
+	 */
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	dev->driver->disable_vblank(dev, crtc);
+	dev->vblank_enabled[crtc] = 0;
+
+	/* No further vblank irq's will be processed after
+	 * this point. Get current hardware vblank count and
+	 * vblank timestamp, repeat until they are consistent.
+	 *
+	 * FIXME: There is still a race condition here and in
+	 * drm_update_vblank_count() which can cause off-by-one
+	 * reinitialization of software vblank counter. If gpu
+	 * vblank counter doesn't increment exactly at the leading
+	 * edge of a vblank interval, then we can lose 1 count if
+	 * we happen to execute between start of vblank and the
+	 * delayed gpu counter increment.
+	 */
+	do {
+		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+	if (!count)
+		vblrc = 0;
+
+	/* Compute time difference to stored timestamp of last vblank
+	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
+	 */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* If there is at least 1 msec difference between the last stored
+	 * timestamp and tvblank, then we are currently executing our
+	 * disable inside a new vblank interval, the tvblank timestamp
+	 * corresponds to this new vblank interval and the irq handler
+	 * for this vblank didn't run yet and won't run due to our disable.
+	 * Therefore we need to do the job of drm_handle_vblank() and
+	 * increment the vblank counter by one to account for this vblank.
+	 *
+	 * Skip this step if there isn't any high precision timestamp
+	 * available. In that case we can't account for this and just
+	 * hope for the best.
+	 */
+	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
+	}
+
+	/* Invalidate all timestamps while vblank irq's are off. */
+	clear_vblank_timestamps(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+}
+
+static void vblank_disable_fn(unsigned long arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	unsigned long irqflags;
+	int i;
+
+	if (!dev->vblank_disable_allowed)
+		return;
+
+	for (i = 0; i < dev->num_crtcs; i++) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+		    dev->vblank_enabled[i]) {
+			DRM_DEBUG("disabling vblank on crtc %d\n", i);
+			vblank_disable_and_save(dev, i);
+		}
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+	}
+}
+
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+	/* Bail if the driver didn't call drm_vblank_init() */
+	if (dev->num_crtcs == 0)
+		return;
+
+	del_timer_sync(&dev->vblank_disable_timer);
+
+	vblank_disable_fn((unsigned long)dev);
+
+	kfree(dev->vbl_queue);
+	kfree(dev->_vblank_count);
+	kfree(dev->vblank_refcount);
+	kfree(dev->vblank_enabled);
+	kfree(dev->last_vblank);
+	kfree(dev->last_vblank_wait);
+	kfree(dev->vblank_inmodeset);
+	kfree(dev->_vblank_time);
+
+	dev->num_crtcs = 0;
+}
+EXPORT_SYMBOL(drm_vblank_cleanup);
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+	int i, ret = -ENOMEM;
+
+	setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+		    (unsigned long)dev);
+	spin_lock_init(&dev->vbl_lock);
+	spin_lock_init(&dev->vblank_time_lock);
+
+	dev->num_crtcs = num_crtcs;
+
+	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
+				 GFP_KERNEL);
+	if (!dev->vbl_queue)
+		goto err;
+
+	dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
+	if (!dev->_vblank_count)
+		goto err;
+
+	dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
+				       GFP_KERNEL);
+	if (!dev->vblank_refcount)
+		goto err;
+
+	dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+	if (!dev->vblank_enabled)
+		goto err;
+
+	dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+	if (!dev->last_vblank)
+		goto err;
+
+	dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
+	if (!dev->last_vblank_wait)
+		goto err;
+
+	dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
+	if (!dev->vblank_inmodeset)
+		goto err;
+
+	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+				    sizeof(struct timeval), GFP_KERNEL);
+	if (!dev->_vblank_time)
+		goto err;
+
+	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+	/* Driver specific high-precision vblank timestamping supported? */
+	if (dev->driver->get_vblank_timestamp)
+		DRM_INFO("Driver supports precise vblank timestamp query.\n");
+	else
+		DRM_INFO("No driver support for vblank timestamp query.\n");
+
+	/* Zero per-crtc vblank stuff */
+	for (i = 0; i < num_crtcs; i++) {
+		init_waitqueue_head(&dev->vbl_queue[i]);
+		atomic_set(&dev->_vblank_count[i], 0);
+		atomic_set(&dev->vblank_refcount[i], 0);
+	}
+
+	dev->vblank_disable_allowed = 0;
+	return 0;
+
+err:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
+static void drm_irq_vgaarb_nokms(void *cookie, bool state)
+{
+	struct drm_device *dev = cookie;
+
+	if (dev->driver->vgaarb_irq) {
+		dev->driver->vgaarb_irq(dev, state);
+		return;
+	}
+
+	if (!dev->irq_enabled)
+		return;
+
+	if (state) {
+		if (dev->driver->irq_uninstall)
+			dev->driver->irq_uninstall(dev);
+	} else {
+		if (dev->driver->irq_preinstall)
+			dev->driver->irq_preinstall(dev);
+		if (dev->driver->irq_postinstall)
+			dev->driver->irq_postinstall(dev);
+	}
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Initializes the IRQ related data. Installs the handler, calling the driver
+ * \c irq_preinstall() and \c irq_postinstall() functions
+ * before and after the installation.
+ */
+int drm_irq_install(struct drm_device *dev)
+{
+	int ret;
+	unsigned long sh_flags = 0;
+	char *irqname;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	if (drm_dev_to_irq(dev) == 0)
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Driver must have been initialized */
+	if (!dev->dev_private) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev->irq_enabled) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+	dev->irq_enabled = 1;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+	/* Before installing handler */
+	if (dev->driver->irq_preinstall)
+		dev->driver->irq_preinstall(dev);
+
+	/* Install handler */
+	if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+		sh_flags = IRQF_SHARED;
+
+	if (dev->devname)
+		irqname = dev->devname;
+	else
+		irqname = dev->driver->name;
+
+	ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
+			  sh_flags, irqname, dev);
+
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
+
+	/* After installing handler */
+	if (dev->driver->irq_postinstall)
+		ret = dev->driver->irq_postinstall(dev);
+
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+		if (!drm_core_check_feature(dev, DRIVER_MODESET))
+			vga_client_register(dev->pdev, NULL, NULL, NULL);
+		free_irq(drm_dev_to_irq(dev), dev);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_irq_install);
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device *dev)
+{
+	unsigned long irqflags;
+	int irq_enabled, i;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+	irq_enabled = dev->irq_enabled;
+	dev->irq_enabled = 0;
+	mutex_unlock(&dev->struct_mutex);
+
+	/*
+	 * Wake up any waiters so they don't hang.
+	 */
+	if (dev->num_crtcs) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		for (i = 0; i < dev->num_crtcs; i++) {
+			DRM_WAKEUP(&dev->vbl_queue[i]);
+			dev->vblank_enabled[i] = 0;
+			dev->last_vblank[i] =
+				dev->driver->get_vblank_counter(dev, i);
+		}
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+	}
+
+	if (!irq_enabled)
+		return -EINVAL;
+
+	DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+	if (dev->driver->irq_uninstall)
+		dev->driver->irq_uninstall(dev);
+
+	free_irq(drm_dev_to_irq(dev), dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_control *ctl = data;
+
+	/* if we haven't irq we fallback for compatibility reasons -
+	 * this used to be a separate function in drm_dma.h
+	 */
+
+
+	switch (ctl->func) {
+	case DRM_INST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			return 0;
+		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+		    ctl->irq != drm_dev_to_irq(dev))
+			return -EINVAL;
+		return drm_irq_install(dev);
+	case DRM_UNINST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			return 0;
+		return drm_irq_uninstall(dev);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+	s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+	u64 dotclock;
+
+	/* Dot clock in Hz: */
+	dotclock = (u64) crtc->hwmode.clock * 1000;
+
+	/* Fields of interlaced scanout modes are only halve a frame duration.
+	 * Double the dotclock to get halve the frame-/line-/pixelduration.
+	 */
+	if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+		dotclock *= 2;
+
+	/* Valid dotclock? */
+	if (dotclock > 0) {
+		int frame_size;
+		/* Convert scanline length in pixels and video dot clock to
+		 * line duration, frame duration and pixel duration in
+		 * nanoseconds:
+		 */
+		pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+		linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+					      1000000000), dotclock);
+		frame_size = crtc->hwmode.crtc_htotal *
+				crtc->hwmode.crtc_vtotal;
+		framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
+					      dotclock);
+	} else
+		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+			  crtc->base.id);
+
+	crtc->pixeldur_ns = pixeldur_ns;
+	crtc->linedur_ns  = linedur_ns;
+	crtc->framedur_ns = framedur_ns;
+
+	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+		  crtc->base.id, crtc->hwmode.crtc_htotal,
+		  crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+		  (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ *             On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL   - Invalid crtc.
+ * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO      - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+					  int *max_error,
+					  struct timeval *vblank_time,
+					  unsigned flags,
+					  struct drm_crtc *refcrtc)
+{
+	ktime_t stime, etime, mono_time_offset;
+	struct timeval tv_etime;
+	struct drm_display_mode *mode;
+	int vbl_status, vtotal, vdisplay;
+	int vpos, hpos, i;
+	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+	bool invbl;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Scanout position query not supported? Should not happen. */
+	if (!dev->driver->get_scanout_position) {
+		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+		return -EIO;
+	}
+
+	mode = &refcrtc->hwmode;
+	vtotal = mode->crtc_vtotal;
+	vdisplay = mode->crtc_vdisplay;
+
+	/* Durations of frames, lines, pixels in nanoseconds. */
+	framedur_ns = refcrtc->framedur_ns;
+	linedur_ns  = refcrtc->linedur_ns;
+	pixeldur_ns = refcrtc->pixeldur_ns;
+
+	/* If mode timing undefined, just return as no-op:
+	 * Happens during initial modesetting of a crtc.
+	 */
+	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		return -EAGAIN;
+	}
+
+	/* Get current scanout position with system timestamp.
+	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+	 * if single query takes longer than max_error nanoseconds.
+	 *
+	 * This guarantees a tight bound on maximum error if
+	 * code gets preempted or delayed for some reason.
+	 */
+	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+		/* Disable preemption to make it very likely to
+		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		 */
+		preempt_disable();
+
+		/* Get system timestamp before query. */
+		stime = ktime_get();
+
+		/* Get vertical and horizontal scanout pos. vpos, hpos. */
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+		/* Get system timestamp after query. */
+		etime = ktime_get();
+		if (!drm_timestamp_monotonic)
+			mono_time_offset = ktime_get_monotonic_offset();
+
+		preempt_enable();
+
+		/* Return as no-op if scanout query unsupported or failed. */
+		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+				  crtc, vbl_status);
+			return -EIO;
+		}
+
+		duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
+
+		/* Accept result with <  max_error nsecs timing uncertainty. */
+		if (duration_ns <= (s64) *max_error)
+			break;
+	}
+
+	/* Noisy system timing? */
+	if (i == DRM_TIMESTAMP_MAXRETRIES) {
+		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+			  crtc, (int) duration_ns/1000, *max_error/1000, i);
+	}
+
+	/* Return upper bound of timestamp precision error. */
+	*max_error = (int) duration_ns;
+
+	/* Check if in vblank area:
+	 * vpos is >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+	/* Convert scanout position into elapsed time at raw_time query
+	 * since start of scanout at first display scanline. delta_ns
+	 * can be negative if start of scanout hasn't happened yet.
+	 */
+	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+	/* Is vpos outside nominal vblank area, but less than
+	 * 1/100 of a frame height away from start of vblank?
+	 * If so, assume this isn't a massively delayed vblank
+	 * interrupt, but a vblank interrupt that fired a few
+	 * microseconds before true start of vblank. Compensate
+	 * by adding a full frame duration to the final timestamp.
+	 * Happens, e.g., on ATI R500, R600.
+	 *
+	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
+	 */
+	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+	    ((vdisplay - vpos) < vtotal / 100)) {
+		delta_ns = delta_ns - framedur_ns;
+
+		/* Signal this correction as "applied". */
+		vbl_status |= 0x8;
+	}
+
+	if (!drm_timestamp_monotonic)
+		etime = ktime_sub(etime, mono_time_offset);
+
+	/* save this only for debugging purposes */
+	tv_etime = ktime_to_timeval(etime);
+	/* Subtract time delta from raw timestamp to get final
+	 * vblank_time timestamp for end of vblank.
+	 */
+	if (delta_ns < 0)
+		etime = ktime_add_ns(etime, -delta_ns);
+	else
+		etime = ktime_sub_ns(etime, delta_ns);
+	*vblank_time = ktime_to_timeval(etime);
+
+	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+		  crtc, (int)vbl_status, hpos, vpos,
+		  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
+		  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
+		  (int)duration_ns/1000, i);
+
+	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+	if (invbl)
+		vbl_status |= DRM_VBLANKTIME_INVBL;
+
+	return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+static struct timeval get_drm_timestamp(void)
+{
+	ktime_t now;
+
+	now = ktime_get();
+	if (!drm_timestamp_monotonic)
+		now = ktime_sub(now, ktime_get_monotonic_offset());
+
+	return ktime_to_timeval(now);
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+			      struct timeval *tvblank, unsigned flags)
+{
+	int ret;
+
+	/* Define requested maximum error on timestamps (nanoseconds). */
+	int max_error = (int) drm_timestamp_precision * 1000;
+
+	/* Query driver if possible and precision timestamping enabled. */
+	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+							tvblank, flags);
+		if (ret > 0)
+			return (u32) ret;
+	}
+
+	/* GPU high precision timestamp query unsupported or failed.
+	 * Return current monotonic/gettimeofday timestamp as best estimate.
+	 */
+	*tvblank = get_drm_timestamp();
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+	return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+			      struct timeval *vblanktime)
+{
+	u32 cur_vblank;
+
+	/* Read timestamp from slot of _vblank_time ringbuffer
+	 * that corresponds to current vblank count. Retry if
+	 * count has incremented during readout. This works like
+	 * a seqlock.
+	 */
+	do {
+		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		smp_rmb();
+	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+	return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+static void send_vblank_event(struct drm_device *dev,
+		struct drm_pending_vblank_event *e,
+		unsigned long seq, struct timeval *now)
+{
+	WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+	e->event.sequence = seq;
+	e->event.tv_sec = now->tv_sec;
+	e->event.tv_usec = now->tv_usec;
+
+	list_add_tail(&e->base.link,
+		      &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+	trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+					 e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+		struct drm_pending_vblank_event *e)
+{
+	struct timeval now;
+	unsigned int seq;
+	if (crtc >= 0) {
+		seq = drm_vblank_count_and_time(dev, crtc, &now);
+	} else {
+		seq = 0;
+
+		now = get_drm_timestamp();
+	}
+	e->pipe = crtc;
+	send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+	u32 cur_vblank, diff, tslot, rc;
+	struct timeval t_vblank;
+
+	/*
+	 * Interrupts were disabled prior to this call, so deal with counter
+	 * wrap if needed.
+	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
+	 * here if the register is small or we had vblank interrupts off for
+	 * a long time.
+	 *
+	 * We repeat the hardware vblank counter & timestamp query until
+	 * we get consistent results. This to prevent races between gpu
+	 * updating its hardware counter while we are retrieving the
+	 * corresponding vblank timestamp.
+	 */
+	do {
+		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+	/* Deal with counter wrap */
+	diff = cur_vblank - dev->last_vblank[crtc];
+	if (cur_vblank < dev->last_vblank[crtc]) {
+		diff += dev->max_vblank_count;
+
+		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+	}
+
+	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+		  crtc, diff);
+
+	/* Reinitialize corresponding vblank timestamp if high-precision query
+	 * available. Skip this step if query unsupported or failed. Will
+	 * reinitialize delayed at next vblank interrupt in that case.
+	 */
+	if (rc) {
+		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		vblanktimestamp(dev, crtc, tslot) = t_vblank;
+	}
+
+	smp_mb__before_atomic_inc();
+	atomic_add(diff, &dev->_vblank_count[crtc]);
+	smp_mb__after_atomic_inc();
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags, irqflags2;
+	int ret = 0;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	/* Going from 0->1 means we have to enable interrupts again */
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+		if (!dev->vblank_enabled[crtc]) {
+			/* Enable vblank irqs under vblank_time_lock protection.
+			 * All vblank count & timestamp updates are held off
+			 * until we are done reinitializing master counter and
+			 * timestamps. Filtercode in drm_handle_vblank() will
+			 * prevent double-accounting of same vblank interval.
+			 */
+			ret = dev->driver->enable_vblank(dev, crtc);
+			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+				  crtc, ret);
+			if (ret)
+				atomic_dec(&dev->vblank_refcount[crtc]);
+			else {
+				dev->vblank_enabled[crtc] = 1;
+				drm_update_vblank_count(dev, crtc);
+			}
+		}
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+	} else {
+		if (!dev->vblank_enabled[crtc]) {
+			atomic_dec(&dev->vblank_refcount[crtc]);
+			ret = -EINVAL;
+		}
+	}
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+
+	/* Last user schedules interrupt disable */
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	    (drm_vblank_offdelay > 0))
+		mod_timer(&dev->vblank_disable_timer,
+			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+	struct drm_pending_vblank_event *e, *t;
+	struct timeval now;
+	unsigned long irqflags;
+	unsigned int seq;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	vblank_disable_and_save(dev, crtc);
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+
+	/* Send any queued vblank events, lest the natives grow disquiet */
+	seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+	spin_lock(&dev->event_lock);
+	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+		if (e->pipe != crtc)
+			continue;
+		DRM_DEBUG("Sending premature vblank event on disable: \
+			  wanted %d, current %d\n",
+			  e->event.sequence, seq);
+		list_del(&e->base.link);
+		drm_vblank_put(dev, e->pipe);
+		send_vblank_event(dev, e, seq, &now);
+	}
+	spin_unlock(&dev->event_lock);
+
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
+/**
+ * drm_vblank_pre_modeset - account for vblanks across mode sets
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Account for vblank events across mode setting events, which will likely
+ * reset the hardware frame counter.
+ */
+void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+{
+	/* vblank is not initialized (IRQ not installed ?), or has been freed */
+	if (!dev->num_crtcs)
+		return;
+	/*
+	 * To avoid all the problems that might happen if interrupts
+	 * were enabled/disabled around or between these calls, we just
+	 * have the kernel take a reference on the CRTC (just once though
+	 * to avoid corrupting the count if multiple, mismatch calls occur),
+	 * so that interrupts remain enabled in the interim.
+	 */
+	if (!dev->vblank_inmodeset[crtc]) {
+		dev->vblank_inmodeset[crtc] = 0x1;
+		if (drm_vblank_get(dev, crtc) == 0)
+			dev->vblank_inmodeset[crtc] |= 0x2;
+	}
+}
+EXPORT_SYMBOL(drm_vblank_pre_modeset);
+
+void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+
+	/* vblank is not initialized (IRQ not installed ?), or has been freed */
+	if (!dev->num_crtcs)
+		return;
+
+	if (dev->vblank_inmodeset[crtc]) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		dev->vblank_disable_allowed = 1;
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+		if (dev->vblank_inmodeset[crtc] & 0x2)
+			drm_vblank_put(dev, crtc);
+
+		dev->vblank_inmodeset[crtc] = 0;
+	}
+}
+EXPORT_SYMBOL(drm_vblank_post_modeset);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_modeset_ctl *modeset = data;
+	unsigned int crtc;
+
+	/* If drm_vblank_init() hasn't been called yet, just no-op */
+	if (!dev->num_crtcs)
+		return 0;
+
+	/* KMS drivers handle this internally */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return 0;
+
+	crtc = modeset->crtc;
+	if (crtc >= dev->num_crtcs)
+		return -EINVAL;
+
+	switch (modeset->cmd) {
+	case _DRM_PRE_MODESET:
+		drm_vblank_pre_modeset(dev, crtc);
+		break;
+	case _DRM_POST_MODESET:
+		drm_vblank_post_modeset(dev, crtc);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+				  union drm_wait_vblank *vblwait,
+				  struct drm_file *file_priv)
+{
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+	int ret;
+
+	e = kzalloc(sizeof *e, GFP_KERNEL);
+	if (e == NULL) {
+		ret = -ENOMEM;
+		goto err_put;
+	}
+
+	e->pipe = pipe;
+	e->base.pid = current->pid;
+	e->event.base.type = DRM_EVENT_VBLANK;
+	e->event.base.length = sizeof e->event;
+	e->event.user_data = vblwait->request.signal;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file_priv;
+	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (file_priv->event_space < sizeof e->event) {
+		ret = -EBUSY;
+		goto err_unlock;
+	}
+
+	file_priv->event_space -= sizeof e->event;
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait->request.sequence) <= (1 << 23)) {
+		vblwait->request.sequence = seq + 1;
+		vblwait->reply.sequence = vblwait->request.sequence;
+	}
+
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+		  vblwait->request.sequence, seq, pipe);
+
+	trace_drm_vblank_event_queued(current->pid, pipe,
+				      vblwait->request.sequence);
+
+	e->event.sequence = vblwait->request.sequence;
+	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+		drm_vblank_put(dev, pipe);
+		send_vblank_event(dev, e, seq, &now);
+		vblwait->reply.sequence = seq;
+	} else {
+		/* drm_handle_vblank_events will call drm_vblank_put */
+		list_add_tail(&e->base.link, &dev->vblank_event_list);
+		vblwait->reply.sequence = vblwait->request.sequence;
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return 0;
+
+err_unlock:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	kfree(e);
+err_put:
+	drm_vblank_put(dev, pipe);
+	return ret;
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank irq disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	union drm_wait_vblank *vblwait = data;
+	int ret;
+	unsigned int flags, seq, crtc, high_crtc;
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
+			return -EINVAL;
+
+	if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+		return -EINVAL;
+
+	if (vblwait->request.type &
+	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+	      _DRM_VBLANK_HIGH_CRTC_MASK)) {
+		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+			  vblwait->request.type,
+			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+			   _DRM_VBLANK_HIGH_CRTC_MASK));
+		return -EINVAL;
+	}
+
+	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+	high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+	if (high_crtc)
+		crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+	else
+		crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+	if (crtc >= dev->num_crtcs)
+		return -EINVAL;
+
+	ret = drm_vblank_get(dev, crtc);
+	if (ret) {
+		DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+		return ret;
+	}
+	seq = drm_vblank_count(dev, crtc);
+
+	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+	case _DRM_VBLANK_RELATIVE:
+		vblwait->request.sequence += seq;
+		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+	case _DRM_VBLANK_ABSOLUTE:
+		break;
+	default:
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (flags & _DRM_VBLANK_EVENT) {
+		/* must hold on to the vblank ref until the event fires
+		 * drm_vblank_put will be called asynchronously
+		 */
+		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+	}
+
+	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait->request.sequence) <= (1<<23)) {
+		vblwait->request.sequence = seq + 1;
+	}
+
+	DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+		  vblwait->request.sequence, crtc);
+	dev->last_vblank_wait[crtc] = vblwait->request.sequence;
+	DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+		    (((drm_vblank_count(dev, crtc) -
+		       vblwait->request.sequence) <= (1 << 23)) ||
+		     !dev->irq_enabled));
+
+	if (ret != -EINTR) {
+		struct timeval now;
+
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
+		vblwait->reply.tval_sec = now.tv_sec;
+		vblwait->reply.tval_usec = now.tv_usec;
+
+		DRM_DEBUG("returning %d to client\n",
+			  vblwait->reply.sequence);
+	} else {
+		DRM_DEBUG("vblank wait interrupted by signal\n");
+	}
+
+done:
+	drm_vblank_put(dev, crtc);
+	return ret;
+}
+
+static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+	struct drm_pending_vblank_event *e, *t;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+
+	seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+		if (e->pipe != crtc)
+			continue;
+		if ((seq - e->event.sequence) > (1<<23))
+			continue;
+
+		DRM_DEBUG("vblank event on %d, current %d\n",
+			  e->event.sequence, seq);
+
+		list_del(&e->base.link);
+		drm_vblank_put(dev, e->pipe);
+		send_vblank_event(dev, e, seq, &now);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	trace_drm_vblank_event(crtc, seq);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+	u32 vblcount;
+	s64 diff_ns;
+	struct timeval tvblank;
+	unsigned long irqflags;
+
+	if (!dev->num_crtcs)
+		return false;
+
+	/* Need timestamp lock to prevent concurrent execution with
+	 * vblank enable/disable, as this would cause inconsistent
+	 * or corrupted timestamps and vblank counts.
+	 */
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	/* Vblank irq handling disabled. Nothing to do. */
+	if (!dev->vblank_enabled[crtc]) {
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+		return false;
+	}
+
+	/* Fetch corresponding timestamp for this vblank interval from
+	 * driver and store it in proper slot of timestamp ringbuffer.
+	 */
+
+	/* Get current timestamp and count. */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+	/* Compute time difference to timestamp of last vblank */
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* Update vblank timestamp and count if at least
+	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+	 * difference between last stored timestamp and current
+	 * timestamp. A smaller difference means basically
+	 * identical timestamps. Happens if this vblank has
+	 * been already processed and this is a redundant call,
+	 * e.g., due to spurious vblank interrupts. We need to
+	 * ignore those for accounting.
+	 */
+	if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+		/* Store new timestamp in ringbuffer. */
+		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+		/* Increment cooked vblank count. This also atomically commits
+		 * the timestamp computed above.
+		 */
+		smp_mb__before_atomic_inc();
+		atomic_inc(&dev->_vblank_count[crtc]);
+		smp_mb__after_atomic_inc();
+	} else {
+		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+			  crtc, (int) diff_ns);
+	}
+
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	drm_handle_vblank_events(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+	return true;
+}
+EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/linux-imx/drivers/gpu/drm/drm_lock.c b/linux-imx/drivers/gpu/drm/drm_lock.c
new file mode 100644
index 0000000..d752c96
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_lock.c
@@ -0,0 +1,373 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+static int drm_notifier(void *priv);
+
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	DECLARE_WAITQUEUE(entry, current);
+	struct drm_lock *lock = data;
+	struct drm_master *master = file_priv->master;
+	int ret = 0;
+
+	++file_priv->lock_count;
+
+	if (lock->context == DRM_KERNEL_CONTEXT) {
+		DRM_ERROR("Process %d using kernel context %d\n",
+			  task_pid_nr(current), lock->context);
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+		  lock->context, task_pid_nr(current),
+		  master->lock.hw_lock->lock, lock->flags);
+
+	add_wait_queue(&master->lock.lock_queue, &entry);
+	spin_lock_bh(&master->lock.spinlock);
+	master->lock.user_waiters++;
+	spin_unlock_bh(&master->lock.spinlock);
+
+	for (;;) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		if (!master->lock.hw_lock) {
+			/* Device has been unregistered */
+			send_sig(SIGTERM, current, 0);
+			ret = -EINTR;
+			break;
+		}
+		if (drm_lock_take(&master->lock, lock->context)) {
+			master->lock.file_priv = file_priv;
+			master->lock.lock_time = jiffies;
+			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+			break;	/* Got lock */
+		}
+
+		/* Contention */
+		mutex_unlock(&drm_global_mutex);
+		schedule();
+		mutex_lock(&drm_global_mutex);
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+	}
+	spin_lock_bh(&master->lock.spinlock);
+	master->lock.user_waiters--;
+	spin_unlock_bh(&master->lock.spinlock);
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&master->lock.lock_queue, &entry);
+
+	DRM_DEBUG("%d %s\n", lock->context,
+		  ret ? "interrupted" : "has lock");
+	if (ret) return ret;
+
+	/* don't set the block all signals on the master process for now 
+	 * really probably not the correct answer but lets us debug xkb
+ 	 * xserver for now */
+	if (!file_priv->is_master) {
+		sigemptyset(&dev->sigmask);
+		sigaddset(&dev->sigmask, SIGSTOP);
+		sigaddset(&dev->sigmask, SIGTSTP);
+		sigaddset(&dev->sigmask, SIGTTIN);
+		sigaddset(&dev->sigmask, SIGTTOU);
+		dev->sigdata.context = lock->context;
+		dev->sigdata.lock = master->lock.hw_lock;
+		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+	}
+
+	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+	{
+		if (dev->driver->dma_quiescent(dev)) {
+			DRM_DEBUG("%d waiting for DMA quiescent\n",
+				  lock->context);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_lock *lock = data;
+	struct drm_master *master = file_priv->master;
+
+	if (lock->context == DRM_KERNEL_CONTEXT) {
+		DRM_ERROR("Process %d using kernel context %d\n",
+			  task_pid_nr(current), lock->context);
+		return -EINVAL;
+	}
+
+	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+	if (drm_lock_free(&master->lock, lock->context)) {
+		/* FIXME: Should really bail out here. */
+	}
+
+	unblock_all_signals();
+	return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+		  unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	do {
+		old = *lock;
+		if (old & _DRM_LOCK_HELD)
+			new = old | _DRM_LOCK_CONT;
+		else {
+			new = context | _DRM_LOCK_HELD |
+				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+				 _DRM_LOCK_CONT : 0);
+		}
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+	spin_unlock_bh(&lock_data->spinlock);
+
+	if (_DRM_LOCKING_CONTEXT(old) == context) {
+		if (old & _DRM_LOCK_HELD) {
+			if (context != DRM_KERNEL_CONTEXT) {
+				DRM_ERROR("%d holds heavyweight lock\n",
+					  context);
+			}
+			return 0;
+		}
+	}
+
+	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+		/* Have lock */
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context.	Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+			     unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	lock_data->file_priv = NULL;
+	do {
+		old = *lock;
+		new = context | _DRM_LOCK_HELD;
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+	return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	if (lock_data->kernel_waiters != 0) {
+		drm_lock_transfer(lock_data, 0);
+		lock_data->idle_has_lock = 1;
+		spin_unlock_bh(&lock_data->spinlock);
+		return 1;
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+
+	do {
+		old = *lock;
+		new = _DRM_LOCKING_CONTEXT(old);
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+
+	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+		DRM_ERROR("%d freed heavyweight lock held by %d\n",
+			  context, _DRM_LOCKING_CONTEXT(old));
+		return 1;
+	}
+	wake_up_interruptible(&lock_data->lock_queue);
+	return 0;
+}
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+	struct drm_sigdata *s = (struct drm_sigdata *) priv;
+	unsigned int old, new, prev;
+
+	/* Allow signal delivery if lock isn't held */
+	if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+	    || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+		return 1;
+
+	/* Otherwise, set flag to force call to
+	   drmUnlock */
+	do {
+		old = s->lock->lock;
+		new = old | _DRM_LOCK_CONT;
+		prev = cmpxchg(&s->lock->lock, old, new);
+	} while (prev != old);
+	return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+	int ret;
+
+	spin_lock_bh(&lock_data->spinlock);
+	lock_data->kernel_waiters++;
+	if (!lock_data->idle_has_lock) {
+
+		spin_unlock_bh(&lock_data->spinlock);
+		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+		spin_lock_bh(&lock_data->spinlock);
+
+		if (ret == 1)
+			lock_data->idle_has_lock = 1;
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+	unsigned int old, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	if (--lock_data->kernel_waiters == 0) {
+		if (lock_data->idle_has_lock) {
+			do {
+				old = *lock;
+				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+			} while (prev != old);
+			wake_up_interruptible(&lock_data->lock_queue);
+			lock_data->idle_has_lock = 0;
+		}
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_master *master = file_priv->master;
+	return (file_priv->lock_count && master->lock.hw_lock &&
+		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
+		master->lock.file_priv == file_priv);
+}
diff --git a/linux-imx/drivers/gpu/drm/drm_memory.c b/linux-imx/drivers/gpu/drm/drm_memory.c
new file mode 100644
index 0000000..126d50e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_memory.c
@@ -0,0 +1,144 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+		       struct drm_device * dev)
+{
+	unsigned long i, num_pages =
+	    PAGE_ALIGN(size) / PAGE_SIZE;
+	struct drm_agp_mem *agpmem;
+	struct page **page_map;
+	struct page **phys_page_map;
+	void *addr;
+
+	size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+	offset -= dev->hose->mem_space->start;
+#endif
+
+	list_for_each_entry(agpmem, &dev->agp->memory, head)
+		if (agpmem->bound <= offset
+		    && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+		    (offset + size))
+			break;
+	if (&agpmem->head == &dev->agp->memory)
+		return NULL;
+
+	/*
+	 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+	 * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+	 * page-table instead (that's probably faster anyhow...).
+	 */
+	/* note: use vmalloc() because num_pages could be large... */
+	page_map = vmalloc(num_pages * sizeof(struct page *));
+	if (!page_map)
+		return NULL;
+
+	phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
+	for (i = 0; i < num_pages; ++i)
+		page_map[i] = phys_page_map[i];
+	addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+	vfree(page_map);
+
+	return addr;
+}
+
+/** Wrapper around agp_free_memory() */
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+	agp_free_memory(handle);
+}
+EXPORT_SYMBOL(drm_free_agp);
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+	return agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+	return agp_unbind_memory(handle);
+}
+EXPORT_SYMBOL(drm_unbind_agp);
+
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+			      struct drm_device * dev)
+{
+	return NULL;
+}
+
+#endif				/* agp */
+
+void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		map->handle = agp_remap(map->offset, map->size, dev);
+	else
+		map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap);
+
+void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		map->handle = agp_remap(map->offset, map->size, dev);
+	else
+		map->handle = ioremap_wc(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap_wc);
+
+void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
+{
+	if (!map->handle || !map->size)
+		return;
+
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		vunmap(map->handle);
+	else
+		iounmap(map->handle);
+}
+EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/linux-imx/drivers/gpu/drm/drm_mm.c b/linux-imx/drivers/gpu/drm/drm_mm.c
new file mode 100644
index 0000000..07cf99c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_mm.c
@@ -0,0 +1,794 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/export.h>
+
+#define MM_UNUSED_TARGET 4
+
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+{
+	struct drm_mm_node *child;
+
+	if (atomic)
+		child = kzalloc(sizeof(*child), GFP_ATOMIC);
+	else
+		child = kzalloc(sizeof(*child), GFP_KERNEL);
+
+	if (unlikely(child == NULL)) {
+		spin_lock(&mm->unused_lock);
+		if (list_empty(&mm->unused_nodes))
+			child = NULL;
+		else {
+			child =
+			    list_entry(mm->unused_nodes.next,
+				       struct drm_mm_node, node_list);
+			list_del(&child->node_list);
+			--mm->num_unused;
+		}
+		spin_unlock(&mm->unused_lock);
+	}
+	return child;
+}
+
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:	memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
+int drm_mm_pre_get(struct drm_mm *mm)
+{
+	struct drm_mm_node *node;
+
+	spin_lock(&mm->unused_lock);
+	while (mm->num_unused < MM_UNUSED_TARGET) {
+		spin_unlock(&mm->unused_lock);
+		node = kzalloc(sizeof(*node), GFP_KERNEL);
+		spin_lock(&mm->unused_lock);
+
+		if (unlikely(node == NULL)) {
+			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
+			spin_unlock(&mm->unused_lock);
+			return ret;
+		}
+		++mm->num_unused;
+		list_add_tail(&node->node_list, &mm->unused_nodes);
+	}
+	spin_unlock(&mm->unused_lock);
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_pre_get);
+
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+				 struct drm_mm_node *node,
+				 unsigned long size, unsigned alignment,
+				 unsigned long color)
+{
+	struct drm_mm *mm = hole_node->mm;
+	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+	unsigned long adj_start = hole_start;
+	unsigned long adj_end = hole_end;
+
+	BUG_ON(node->allocated);
+
+	if (mm->color_adjust)
+		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+	if (alignment) {
+		unsigned tmp = adj_start % alignment;
+		if (tmp)
+			adj_start += alignment - tmp;
+	}
+
+	if (adj_start == hole_start) {
+		hole_node->hole_follows = 0;
+		list_del(&hole_node->hole_stack);
+	}
+
+	node->start = adj_start;
+	node->size = size;
+	node->mm = mm;
+	node->color = color;
+	node->allocated = 1;
+
+	INIT_LIST_HEAD(&node->hole_stack);
+	list_add(&node->node_list, &hole_node->node_list);
+
+	BUG_ON(node->start + node->size > adj_end);
+
+	node->hole_follows = 0;
+	if (__drm_mm_hole_node_start(node) < hole_end) {
+		list_add(&node->hole_stack, &mm->hole_stack);
+		node->hole_follows = 1;
+	}
+}
+
+struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
+					unsigned long start,
+					unsigned long size,
+					bool atomic)
+{
+	struct drm_mm_node *hole, *node;
+	unsigned long end = start + size;
+	unsigned long hole_start;
+	unsigned long hole_end;
+
+	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+		if (hole_start > start || hole_end < end)
+			continue;
+
+		node = drm_mm_kmalloc(mm, atomic);
+		if (unlikely(node == NULL))
+			return NULL;
+
+		node->start = start;
+		node->size = size;
+		node->mm = mm;
+		node->allocated = 1;
+
+		INIT_LIST_HEAD(&node->hole_stack);
+		list_add(&node->node_list, &hole->node_list);
+
+		if (start == hole_start) {
+			hole->hole_follows = 0;
+			list_del_init(&hole->hole_stack);
+		}
+
+		node->hole_follows = 0;
+		if (end != hole_end) {
+			list_add(&node->hole_stack, &mm->hole_stack);
+			node->hole_follows = 1;
+		}
+
+		return node;
+	}
+
+	WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_mm_create_block);
+
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
+					     unsigned long size,
+					     unsigned alignment,
+					     unsigned long color,
+					     int atomic)
+{
+	struct drm_mm_node *node;
+
+	node = drm_mm_kmalloc(hole_node->mm, atomic);
+	if (unlikely(node == NULL))
+		return NULL;
+
+	drm_mm_insert_helper(hole_node, node, size, alignment, color);
+
+	return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
+
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+			       unsigned long size, unsigned alignment,
+			       unsigned long color)
+{
+	struct drm_mm_node *hole_node;
+
+	hole_node = drm_mm_search_free_generic(mm, size, alignment,
+					       color, 0);
+	if (!hole_node)
+		return -ENOSPC;
+
+	drm_mm_insert_helper(hole_node, node, size, alignment, color);
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_generic);
+
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+		       unsigned long size, unsigned alignment)
+{
+	return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
+}
+EXPORT_SYMBOL(drm_mm_insert_node);
+
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+				       struct drm_mm_node *node,
+				       unsigned long size, unsigned alignment,
+				       unsigned long color,
+				       unsigned long start, unsigned long end)
+{
+	struct drm_mm *mm = hole_node->mm;
+	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+	unsigned long adj_start = hole_start;
+	unsigned long adj_end = hole_end;
+
+	BUG_ON(!hole_node->hole_follows || node->allocated);
+
+	if (adj_start < start)
+		adj_start = start;
+	if (adj_end > end)
+		adj_end = end;
+
+	if (mm->color_adjust)
+		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+
+	if (alignment) {
+		unsigned tmp = adj_start % alignment;
+		if (tmp)
+			adj_start += alignment - tmp;
+	}
+
+	if (adj_start == hole_start) {
+		hole_node->hole_follows = 0;
+		list_del(&hole_node->hole_stack);
+	}
+
+	node->start = adj_start;
+	node->size = size;
+	node->mm = mm;
+	node->color = color;
+	node->allocated = 1;
+
+	INIT_LIST_HEAD(&node->hole_stack);
+	list_add(&node->node_list, &hole_node->node_list);
+
+	BUG_ON(node->start + node->size > adj_end);
+	BUG_ON(node->start + node->size > end);
+
+	node->hole_follows = 0;
+	if (__drm_mm_hole_node_start(node) < hole_end) {
+		list_add(&node->hole_stack, &mm->hole_stack);
+		node->hole_follows = 1;
+	}
+}
+
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long color,
+						unsigned long start,
+						unsigned long end,
+						int atomic)
+{
+	struct drm_mm_node *node;
+
+	node = drm_mm_kmalloc(hole_node->mm, atomic);
+	if (unlikely(node == NULL))
+		return NULL;
+
+	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
+				   start, end);
+
+	return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
+ */
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+					unsigned long size, unsigned alignment, unsigned long color,
+					unsigned long start, unsigned long end)
+{
+	struct drm_mm_node *hole_node;
+
+	hole_node = drm_mm_search_free_in_range_generic(mm,
+							size, alignment, color,
+							start, end, 0);
+	if (!hole_node)
+		return -ENOSPC;
+
+	drm_mm_insert_helper_range(hole_node, node,
+				   size, alignment, color,
+				   start, end);
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
+
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+				unsigned long size, unsigned alignment,
+				unsigned long start, unsigned long end)
+{
+	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+
+/**
+ * Remove a memory node from the allocator.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+	struct drm_mm *mm = node->mm;
+	struct drm_mm_node *prev_node;
+
+	BUG_ON(node->scanned_block || node->scanned_prev_free
+				   || node->scanned_next_free);
+
+	prev_node =
+	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+	if (node->hole_follows) {
+		BUG_ON(__drm_mm_hole_node_start(node) ==
+		       __drm_mm_hole_node_end(node));
+		list_del(&node->hole_stack);
+	} else
+		BUG_ON(__drm_mm_hole_node_start(node) !=
+		       __drm_mm_hole_node_end(node));
+
+
+	if (!prev_node->hole_follows) {
+		prev_node->hole_follows = 1;
+		list_add(&prev_node->hole_stack, &mm->hole_stack);
+	} else
+		list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+	list_del(&node->node_list);
+	node->allocated = 0;
+}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+
+	struct drm_mm *mm = node->mm;
+
+	drm_mm_remove_node(node);
+
+	spin_lock(&mm->unused_lock);
+	if (mm->num_unused < MM_UNUSED_TARGET) {
+		list_add(&node->node_list, &mm->unused_nodes);
+		++mm->num_unused;
+	} else
+		kfree(node);
+	spin_unlock(&mm->unused_lock);
+}
+EXPORT_SYMBOL(drm_mm_put_block);
+
+static int check_free_hole(unsigned long start, unsigned long end,
+			   unsigned long size, unsigned alignment)
+{
+	if (end - start < size)
+		return 0;
+
+	if (alignment) {
+		unsigned tmp = start % alignment;
+		if (tmp)
+			start += alignment - tmp;
+	}
+
+	return end >= start + size;
+}
+
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+					       unsigned long size,
+					       unsigned alignment,
+					       unsigned long color,
+					       bool best_match)
+{
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
+	unsigned long adj_start;
+	unsigned long adj_end;
+	unsigned long best_size;
+
+	BUG_ON(mm->scanned_blocks);
+
+	best = NULL;
+	best_size = ~0UL;
+
+	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+		if (mm->color_adjust) {
+			mm->color_adjust(entry, color, &adj_start, &adj_end);
+			if (adj_end <= adj_start)
+				continue;
+		}
+
+		if (!check_free_hole(adj_start, adj_end, size, alignment))
+			continue;
+
+		if (!best_match)
+			return entry;
+
+		if (entry->size < best_size) {
+			best = entry;
+			best_size = entry->size;
+		}
+	}
+
+	return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_generic);
+
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+							unsigned long size,
+							unsigned alignment,
+							unsigned long color,
+							unsigned long start,
+							unsigned long end,
+							bool best_match)
+{
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
+	unsigned long adj_start;
+	unsigned long adj_end;
+	unsigned long best_size;
+
+	BUG_ON(mm->scanned_blocks);
+
+	best = NULL;
+	best_size = ~0UL;
+
+	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+		if (adj_start < start)
+			adj_start = start;
+		if (adj_end > end)
+			adj_end = end;
+
+		if (mm->color_adjust) {
+			mm->color_adjust(entry, color, &adj_start, &adj_end);
+			if (adj_end <= adj_start)
+				continue;
+		}
+
+		if (!check_free_hole(adj_start, adj_end, size, alignment))
+			continue;
+
+		if (!best_match)
+			return entry;
+
+		if (entry->size < best_size) {
+			best = entry;
+			best_size = entry->size;
+		}
+	}
+
+	return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
+
+/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+	list_replace(&old->node_list, &new->node_list);
+	list_replace(&old->hole_stack, &new->hole_stack);
+	new->hole_follows = old->hole_follows;
+	new->mm = old->mm;
+	new->start = old->start;
+	new->size = old->size;
+	new->color = old->color;
+
+	old->allocated = 0;
+	new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm,
+		      unsigned long size,
+		      unsigned alignment,
+		      unsigned long color)
+{
+	mm->scan_color = color;
+	mm->scan_alignment = alignment;
+	mm->scan_size = size;
+	mm->scanned_blocks = 0;
+	mm->scan_hit_start = 0;
+	mm->scan_hit_end = 0;
+	mm->scan_check_range = 0;
+	mm->prev_scanned_node = NULL;
+}
+EXPORT_SYMBOL(drm_mm_init_scan);
+
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+				 unsigned long size,
+				 unsigned alignment,
+				 unsigned long color,
+				 unsigned long start,
+				 unsigned long end)
+{
+	mm->scan_color = color;
+	mm->scan_alignment = alignment;
+	mm->scan_size = size;
+	mm->scanned_blocks = 0;
+	mm->scan_hit_start = 0;
+	mm->scan_hit_end = 0;
+	mm->scan_start = start;
+	mm->scan_end = end;
+	mm->scan_check_range = 1;
+	mm->prev_scanned_node = NULL;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+	struct drm_mm *mm = node->mm;
+	struct drm_mm_node *prev_node;
+	unsigned long hole_start, hole_end;
+	unsigned long adj_start, adj_end;
+
+	mm->scanned_blocks++;
+
+	BUG_ON(node->scanned_block);
+	node->scanned_block = 1;
+
+	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+			       node_list);
+
+	node->scanned_preceeds_hole = prev_node->hole_follows;
+	prev_node->hole_follows = 1;
+	list_del(&node->node_list);
+	node->node_list.prev = &prev_node->node_list;
+	node->node_list.next = &mm->prev_scanned_node->node_list;
+	mm->prev_scanned_node = node;
+
+	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+
+	if (mm->scan_check_range) {
+		if (adj_start < mm->scan_start)
+			adj_start = mm->scan_start;
+		if (adj_end > mm->scan_end)
+			adj_end = mm->scan_end;
+	}
+
+	if (mm->color_adjust)
+		mm->color_adjust(prev_node, mm->scan_color,
+				 &adj_start, &adj_end);
+
+	if (check_free_hole(adj_start, adj_end,
+			    mm->scan_size, mm->scan_alignment)) {
+		mm->scan_hit_start = hole_start;
+		mm->scan_hit_end = hole_end;
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_add_block);
+
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediately following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+	struct drm_mm *mm = node->mm;
+	struct drm_mm_node *prev_node;
+
+	mm->scanned_blocks--;
+
+	BUG_ON(!node->scanned_block);
+	node->scanned_block = 0;
+
+	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+			       node_list);
+
+	prev_node->hole_follows = node->scanned_preceeds_hole;
+	list_add(&node->node_list, &prev_node->node_list);
+
+	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+		 node->start < mm->scan_hit_end);
+}
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+	struct list_head *head = &mm->head_node.node_list;
+
+	return (head->next->next == head);
+}
+EXPORT_SYMBOL(drm_mm_clean);
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+	INIT_LIST_HEAD(&mm->hole_stack);
+	INIT_LIST_HEAD(&mm->unused_nodes);
+	mm->num_unused = 0;
+	mm->scanned_blocks = 0;
+	spin_lock_init(&mm->unused_lock);
+
+	/* Clever trick to avoid a special case in the free hole tracking. */
+	INIT_LIST_HEAD(&mm->head_node.node_list);
+	INIT_LIST_HEAD(&mm->head_node.hole_stack);
+	mm->head_node.hole_follows = 1;
+	mm->head_node.scanned_block = 0;
+	mm->head_node.scanned_prev_free = 0;
+	mm->head_node.scanned_next_free = 0;
+	mm->head_node.mm = mm;
+	mm->head_node.start = start + size;
+	mm->head_node.size = start - mm->head_node.start;
+	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+	mm->color_adjust = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_init);
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+	struct drm_mm_node *entry, *next;
+
+	if (!list_empty(&mm->head_node.node_list)) {
+		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+		return;
+	}
+
+	spin_lock(&mm->unused_lock);
+	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+		list_del(&entry->node_list);
+		kfree(entry);
+		--mm->num_unused;
+	}
+	spin_unlock(&mm->unused_lock);
+
+	BUG_ON(mm->num_unused != 0);
+}
+EXPORT_SYMBOL(drm_mm_takedown);
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+	struct drm_mm_node *entry;
+	unsigned long total_used = 0, total_free = 0, total = 0;
+	unsigned long hole_start, hole_end, hole_size;
+
+	hole_start = drm_mm_hole_node_start(&mm->head_node);
+	hole_end = drm_mm_hole_node_end(&mm->head_node);
+	hole_size = hole_end - hole_start;
+	if (hole_size)
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+			prefix, hole_start, hole_end,
+			hole_size);
+	total_free += hole_size;
+
+	drm_mm_for_each_node(entry, mm) {
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
+			prefix, entry->start, entry->start + entry->size,
+			entry->size);
+		total_used += entry->size;
+
+		if (entry->hole_follows) {
+			hole_start = drm_mm_hole_node_start(entry);
+			hole_end = drm_mm_hole_node_end(entry);
+			hole_size = hole_end - hole_start;
+			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+				prefix, hole_start, hole_end,
+				hole_size);
+			total_free += hole_size;
+		}
+	}
+	total = total_free + total_used;
+
+	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
+		total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
+#if defined(CONFIG_DEBUG_FS)
+static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
+{
+	unsigned long hole_start, hole_end, hole_size;
+
+	if (entry->hole_follows) {
+		hole_start = drm_mm_hole_node_start(entry);
+		hole_end = drm_mm_hole_node_end(entry);
+		hole_size = hole_end - hole_start;
+		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+				hole_start, hole_end, hole_size);
+		return hole_size;
+	}
+
+	return 0;
+}
+
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+{
+	struct drm_mm_node *entry;
+	unsigned long total_used = 0, total_free = 0, total = 0;
+
+	total_free += drm_mm_dump_hole(m, &mm->head_node);
+
+	drm_mm_for_each_node(entry, mm) {
+		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+				entry->start, entry->start + entry->size,
+				entry->size);
+		total_used += entry->size;
+		total_free += drm_mm_dump_hole(m, entry);
+	}
+	total = total_free + total_used;
+
+	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
+	return 0;
+}
+EXPORT_SYMBOL(drm_mm_dump_table);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/drm_modes.c b/linux-imx/drivers/gpu/drm/drm_modes.c
new file mode 100644
index 0000000..a371ff8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_modes.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright © 1997-2003 by The XFree86 Project, Inc.
+ * Copyright © 2007 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2005-2006 Luc Verhaegen
+ * Copyright (c) 2001, Andy Ritger  aritger@nvidia.com
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the name of the copyright holder(s)
+ * and author(s) shall not be used in advertising or otherwise to promote
+ * the sale, use or other dealings in this Software without prior written
+ * authorization from the copyright holder(s) and author(s).
+ */
+
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+/**
+ * drm_mode_debug_printmodeline - debug print a mode
+ * @dev: DRM device
+ * @mode: mode to print
+ *
+ * LOCKING:
+ * None.
+ *
+ * Describe @mode using DRM_DEBUG.
+ */
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
+			"0x%x 0x%x\n",
+		mode->base.id, mode->name, mode->vrefresh, mode->clock,
+		mode->hdisplay, mode->hsync_start,
+		mode->hsync_end, mode->htotal,
+		mode->vdisplay, mode->vsync_start,
+		mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+}
+EXPORT_SYMBOL(drm_mode_debug_printmodeline);
+
+/**
+ * drm_cvt_mode -create a modeline based on CVT algorithm
+ * @dev: DRM device
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @vrefresh  : vrefresh rate
+ * @reduced : Whether the GTF calculation is simplified
+ * @interlaced:Whether the interlace is supported
+ *
+ * LOCKING:
+ * none.
+ *
+ * return the modeline based on CVT algorithm
+ *
+ * This function is called to generate the modeline based on CVT algorithm
+ * according to the hdisplay, vdisplay, vrefresh.
+ * It is based from the VESA(TM) Coordinated Video Timing Generator by
+ * Graham Loveridge April 9, 2003 available at
+ * http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls 
+ *
+ * And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c.
+ * What I have done is to translate it by using integer calculation.
+ */
+#define HV_FACTOR			1000
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
+				      int vdisplay, int vrefresh,
+				      bool reduced, bool interlaced, bool margins)
+{
+	/* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define	CVT_MARGIN_PERCENTAGE		18
+	/* 2) character cell horizontal granularity (pixels) - default 8 */
+#define	CVT_H_GRANULARITY		8
+	/* 3) Minimum vertical porch (lines) - default 3 */
+#define	CVT_MIN_V_PORCH			3
+	/* 4) Minimum number of vertical back porch lines - default 6 */
+#define	CVT_MIN_V_BPORCH		6
+	/* Pixel Clock step (kHz) */
+#define CVT_CLOCK_STEP			250
+	struct drm_display_mode *drm_mode;
+	unsigned int vfieldrate, hperiod;
+	int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
+	int interlace;
+
+	/* allocate the drm_display_mode structure. If failure, we will
+	 * return directly
+	 */
+	drm_mode = drm_mode_create(dev);
+	if (!drm_mode)
+		return NULL;
+
+	/* the CVT default refresh rate is 60Hz */
+	if (!vrefresh)
+		vrefresh = 60;
+
+	/* the required field fresh rate */
+	if (interlaced)
+		vfieldrate = vrefresh * 2;
+	else
+		vfieldrate = vrefresh;
+
+	/* horizontal pixels */
+	hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY);
+
+	/* determine the left&right borders */
+	hmargin = 0;
+	if (margins) {
+		hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+		hmargin -= hmargin % CVT_H_GRANULARITY;
+	}
+	/* find the total active pixels */
+	drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin;
+
+	/* find the number of lines per field */
+	if (interlaced)
+		vdisplay_rnd = vdisplay / 2;
+	else
+		vdisplay_rnd = vdisplay;
+
+	/* find the top & bottom borders */
+	vmargin = 0;
+	if (margins)
+		vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000;
+
+	drm_mode->vdisplay = vdisplay + 2 * vmargin;
+
+	/* Interlaced */
+	if (interlaced)
+		interlace = 1;
+	else
+		interlace = 0;
+
+	/* Determine VSync Width from aspect ratio */
+	if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay))
+		vsync = 4;
+	else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay))
+		vsync = 5;
+	else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay))
+		vsync = 6;
+	else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay))
+		vsync = 7;
+	else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay))
+		vsync = 7;
+	else /* custom */
+		vsync = 10;
+
+	if (!reduced) {
+		/* simplify the GTF calculation */
+		/* 4) Minimum time of vertical sync + back porch interval (µs)
+		 * default 550.0
+		 */
+		int tmp1, tmp2;
+#define CVT_MIN_VSYNC_BP	550
+		/* 3) Nominal HSync width (% of line period) - default 8 */
+#define CVT_HSYNC_PERCENTAGE	8
+		unsigned int hblank_percentage;
+		int vsyncandback_porch, vback_porch, hblank;
+
+		/* estimated the horizontal period */
+		tmp1 = HV_FACTOR * 1000000  -
+				CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate;
+		tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 +
+				interlace;
+		hperiod = tmp1 * 2 / (tmp2 * vfieldrate);
+
+		tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1;
+		/* 9. Find number of lines in sync + backporch */
+		if (tmp1 < (vsync + CVT_MIN_V_PORCH))
+			vsyncandback_porch = vsync + CVT_MIN_V_PORCH;
+		else
+			vsyncandback_porch = tmp1;
+		/* 10. Find number of lines in back porch */
+		vback_porch = vsyncandback_porch - vsync;
+		drm_mode->vtotal = vdisplay_rnd + 2 * vmargin +
+				vsyncandback_porch + CVT_MIN_V_PORCH;
+		/* 5) Definition of Horizontal blanking time limitation */
+		/* Gradient (%/kHz) - default 600 */
+#define CVT_M_FACTOR	600
+		/* Offset (%) - default 40 */
+#define CVT_C_FACTOR	40
+		/* Blanking time scaling factor - default 128 */
+#define CVT_K_FACTOR	128
+		/* Scaling factor weighting - default 20 */
+#define CVT_J_FACTOR	20
+#define CVT_M_PRIME	(CVT_M_FACTOR * CVT_K_FACTOR / 256)
+#define CVT_C_PRIME	((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \
+			 CVT_J_FACTOR)
+		/* 12. Find ideal blanking duty cycle from formula */
+		hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME *
+					hperiod / 1000;
+		/* 13. Blanking time */
+		if (hblank_percentage < 20 * HV_FACTOR)
+			hblank_percentage = 20 * HV_FACTOR;
+		hblank = drm_mode->hdisplay * hblank_percentage /
+			 (100 * HV_FACTOR - hblank_percentage);
+		hblank -= hblank % (2 * CVT_H_GRANULARITY);
+		/* 14. find the total pixes per line */
+		drm_mode->htotal = drm_mode->hdisplay + hblank;
+		drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
+		drm_mode->hsync_start = drm_mode->hsync_end -
+			(drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100;
+		drm_mode->hsync_start += CVT_H_GRANULARITY -
+			drm_mode->hsync_start % CVT_H_GRANULARITY;
+		/* fill the Vsync values */
+		drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH;
+		drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+	} else {
+		/* Reduced blanking */
+		/* Minimum vertical blanking interval time (µs)- default 460 */
+#define CVT_RB_MIN_VBLANK	460
+		/* Fixed number of clocks for horizontal sync */
+#define CVT_RB_H_SYNC		32
+		/* Fixed number of clocks for horizontal blanking */
+#define CVT_RB_H_BLANK		160
+		/* Fixed number of lines for vertical front porch - default 3*/
+#define CVT_RB_VFPORCH		3
+		int vbilines;
+		int tmp1, tmp2;
+		/* 8. Estimate Horizontal period. */
+		tmp1 = HV_FACTOR * 1000000 -
+			CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate;
+		tmp2 = vdisplay_rnd + 2 * vmargin;
+		hperiod = tmp1 / (tmp2 * vfieldrate);
+		/* 9. Find number of lines in vertical blanking */
+		vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1;
+		/* 10. Check if vertical blanking is sufficient */
+		if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH))
+			vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH;
+		/* 11. Find total number of lines in vertical field */
+		drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines;
+		/* 12. Find total number of pixels in a line */
+		drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
+		/* Fill in HSync values */
+		drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
+		drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+		/* Fill in VSync values */
+		drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+		drm_mode->vsync_end = drm_mode->vsync_start + vsync;
+	}
+	/* 15/13. Find pixel clock frequency (kHz for xf86) */
+	drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
+	drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
+	/* 18/16. Find actual vertical frame frequency */
+	/* ignore - just set the mode flag for interlaced */
+	if (interlaced) {
+		drm_mode->vtotal *= 2;
+		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	}
+	/* Fill the mode line name */
+	drm_mode_set_name(drm_mode);
+	if (reduced)
+		drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC |
+					DRM_MODE_FLAG_NVSYNC);
+	else
+		drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC |
+					DRM_MODE_FLAG_NHSYNC);
+
+	return drm_mode;
+}
+EXPORT_SYMBOL(drm_cvt_mode);
+
+/**
+ * drm_gtf_mode_complex - create the modeline based on full GTF algorithm
+ *
+ * @dev		:drm device
+ * @hdisplay	:hdisplay size
+ * @vdisplay	:vdisplay size
+ * @vrefresh	:vrefresh rate.
+ * @interlaced	:whether the interlace is supported
+ * @margins	:desired margin size
+ * @GTF_[MCKJ]  :extended GTF formula parameters
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on full GTF algorithm.
+ *
+ * GTF feature blocks specify C and J in multiples of 0.5, so we pass them
+ * in here multiplied by two.  For a C of 40, pass in 80.
+ */
+struct drm_display_mode *
+drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
+		     int vrefresh, bool interlaced, int margins,
+		     int GTF_M, int GTF_2C, int GTF_K, int GTF_2J)
+{	/* 1) top/bottom margin size (% of height) - default: 1.8, */
+#define	GTF_MARGIN_PERCENTAGE		18
+	/* 2) character cell horizontal granularity (pixels) - default 8 */
+#define	GTF_CELL_GRAN			8
+	/* 3) Minimum vertical porch (lines) - default 3 */
+#define	GTF_MIN_V_PORCH			1
+	/* width of vsync in lines */
+#define V_SYNC_RQD			3
+	/* width of hsync as % of total line */
+#define H_SYNC_PERCENT			8
+	/* min time of vsync + back porch (microsec) */
+#define MIN_VSYNC_PLUS_BP		550
+	/* C' and M' are part of the Blanking Duty Cycle computation */
+#define GTF_C_PRIME	((((GTF_2C - GTF_2J) * GTF_K / 256) + GTF_2J) / 2)
+#define GTF_M_PRIME	(GTF_K * GTF_M / 256)
+	struct drm_display_mode *drm_mode;
+	unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd;
+	int top_margin, bottom_margin;
+	int interlace;
+	unsigned int hfreq_est;
+	int vsync_plus_bp, vback_porch;
+	unsigned int vtotal_lines, vfieldrate_est, hperiod;
+	unsigned int vfield_rate, vframe_rate;
+	int left_margin, right_margin;
+	unsigned int total_active_pixels, ideal_duty_cycle;
+	unsigned int hblank, total_pixels, pixel_freq;
+	int hsync, hfront_porch, vodd_front_porch_lines;
+	unsigned int tmp1, tmp2;
+
+	drm_mode = drm_mode_create(dev);
+	if (!drm_mode)
+		return NULL;
+
+	/* 1. In order to give correct results, the number of horizontal
+	 * pixels requested is first processed to ensure that it is divisible
+	 * by the character size, by rounding it to the nearest character
+	 * cell boundary:
+	 */
+	hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+	hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN;
+
+	/* 2. If interlace is requested, the number of vertical lines assumed
+	 * by the calculation must be halved, as the computation calculates
+	 * the number of vertical lines per field.
+	 */
+	if (interlaced)
+		vdisplay_rnd = vdisplay / 2;
+	else
+		vdisplay_rnd = vdisplay;
+
+	/* 3. Find the frame rate required: */
+	if (interlaced)
+		vfieldrate_rqd = vrefresh * 2;
+	else
+		vfieldrate_rqd = vrefresh;
+
+	/* 4. Find number of lines in Top margin: */
+	top_margin = 0;
+	if (margins)
+		top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+				1000;
+	/* 5. Find number of lines in bottom margin: */
+	bottom_margin = top_margin;
+
+	/* 6. If interlace is required, then set variable interlace: */
+	if (interlaced)
+		interlace = 1;
+	else
+		interlace = 0;
+
+	/* 7. Estimate the Horizontal frequency */
+	{
+		tmp1 = (1000000  - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500;
+		tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) *
+				2 + interlace;
+		hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1;
+	}
+
+	/* 8. Find the number of lines in V sync + back porch */
+	/* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */
+	vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000;
+	vsync_plus_bp = (vsync_plus_bp + 500) / 1000;
+	/*  9. Find the number of lines in V back porch alone: */
+	vback_porch = vsync_plus_bp - V_SYNC_RQD;
+	/*  10. Find the total number of lines in Vertical field period: */
+	vtotal_lines = vdisplay_rnd + top_margin + bottom_margin +
+			vsync_plus_bp + GTF_MIN_V_PORCH;
+	/*  11. Estimate the Vertical field frequency: */
+	vfieldrate_est = hfreq_est / vtotal_lines;
+	/*  12. Find the actual horizontal period: */
+	hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines);
+
+	/*  13. Find the actual Vertical field frequency: */
+	vfield_rate = hfreq_est / vtotal_lines;
+	/*  14. Find the Vertical frame frequency: */
+	if (interlaced)
+		vframe_rate = vfield_rate / 2;
+	else
+		vframe_rate = vfield_rate;
+	/*  15. Find number of pixels in left margin: */
+	if (margins)
+		left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) /
+				1000;
+	else
+		left_margin = 0;
+
+	/* 16.Find number of pixels in right margin: */
+	right_margin = left_margin;
+	/* 17.Find total number of active pixels in image and left and right */
+	total_active_pixels = hdisplay_rnd + left_margin + right_margin;
+	/* 18.Find the ideal blanking duty cycle from blanking duty cycle */
+	ideal_duty_cycle = GTF_C_PRIME * 1000 -
+				(GTF_M_PRIME * 1000000 / hfreq_est);
+	/* 19.Find the number of pixels in the blanking time to the nearest
+	 * double character cell: */
+	hblank = total_active_pixels * ideal_duty_cycle /
+			(100000 - ideal_duty_cycle);
+	hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN);
+	hblank = hblank * 2 * GTF_CELL_GRAN;
+	/* 20.Find total number of pixels: */
+	total_pixels = total_active_pixels + hblank;
+	/* 21.Find pixel clock frequency: */
+	pixel_freq = total_pixels * hfreq_est / 1000;
+	/* Stage 1 computations are now complete; I should really pass
+	 * the results to another function and do the Stage 2 computations,
+	 * but I only need a few more values so I'll just append the
+	 * computations here for now */
+	/* 17. Find the number of pixels in the horizontal sync period: */
+	hsync = H_SYNC_PERCENT * total_pixels / 100;
+	hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN;
+	hsync = hsync * GTF_CELL_GRAN;
+	/* 18. Find the number of pixels in horizontal front porch period */
+	hfront_porch = hblank / 2 - hsync;
+	/*  36. Find the number of lines in the odd front porch period: */
+	vodd_front_porch_lines = GTF_MIN_V_PORCH ;
+
+	/* finally, pack the results in the mode struct */
+	drm_mode->hdisplay = hdisplay_rnd;
+	drm_mode->hsync_start = hdisplay_rnd + hfront_porch;
+	drm_mode->hsync_end = drm_mode->hsync_start + hsync;
+	drm_mode->htotal = total_pixels;
+	drm_mode->vdisplay = vdisplay_rnd;
+	drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines;
+	drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD;
+	drm_mode->vtotal = vtotal_lines;
+
+	drm_mode->clock = pixel_freq;
+
+	if (interlaced) {
+		drm_mode->vtotal *= 2;
+		drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	}
+
+	drm_mode_set_name(drm_mode);
+	if (GTF_M == 600 && GTF_2C == 80 && GTF_K == 128 && GTF_2J == 40)
+		drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC;
+	else
+		drm_mode->flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC;
+
+	return drm_mode;
+}
+EXPORT_SYMBOL(drm_gtf_mode_complex);
+
+/**
+ * drm_gtf_mode - create the modeline based on GTF algorithm
+ *
+ * @dev		:drm device
+ * @hdisplay	:hdisplay size
+ * @vdisplay	:vdisplay size
+ * @vrefresh	:vrefresh rate.
+ * @interlaced	:whether the interlace is supported
+ * @margins	:whether the margin is supported
+ *
+ * LOCKING.
+ * none.
+ *
+ * return the modeline based on GTF algorithm
+ *
+ * This function is to create the modeline based on the GTF algorithm.
+ * Generalized Timing Formula is derived from:
+ *	GTF Spreadsheet by Andy Morrish (1/5/97)
+ *	available at http://www.vesa.org
+ *
+ * And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c.
+ * What I have done is to translate it by using integer calculation.
+ * I also refer to the function of fb_get_mode in the file of
+ * drivers/video/fbmon.c
+ *
+ * Standard GTF parameters:
+ * M = 600
+ * C = 40
+ * K = 128
+ * J = 20
+ */
+struct drm_display_mode *
+drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
+	     bool lace, int margins)
+{
+	return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
+				    margins, 600, 40 * 2, 128, 20 * 2);
+}
+EXPORT_SYMBOL(drm_gtf_mode);
+
+#ifdef CONFIG_VIDEOMODE_HELPERS
+int drm_display_mode_from_videomode(const struct videomode *vm,
+				    struct drm_display_mode *dmode)
+{
+	dmode->hdisplay = vm->hactive;
+	dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
+	dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
+	dmode->htotal = dmode->hsync_end + vm->hback_porch;
+
+	dmode->vdisplay = vm->vactive;
+	dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
+	dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
+	dmode->vtotal = dmode->vsync_end + vm->vback_porch;
+
+	dmode->clock = vm->pixelclock / 1000;
+
+	dmode->flags = 0;
+	if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+		dmode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+		dmode->flags |= DRM_MODE_FLAG_NHSYNC;
+	if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+		dmode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+		dmode->flags |= DRM_MODE_FLAG_NVSYNC;
+	if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+		dmode->flags |= DRM_MODE_FLAG_INTERLACE;
+	if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
+		dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
+	drm_mode_set_name(dmode);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
+
+#ifdef CONFIG_OF
+/**
+ * of_get_drm_display_mode - get a drm_display_mode from devicetree
+ * @np: device_node with the timing specification
+ * @dmode: will be set to the return value
+ * @index: index into the list of display timings in devicetree
+ *
+ * This function is expensive and should only be used, if only one mode is to be
+ * read from DT. To get multiple modes start with of_get_display_timings and
+ * work with that instead.
+ */
+int of_get_drm_display_mode(struct device_node *np,
+			    struct drm_display_mode *dmode, int index)
+{
+	struct videomode vm;
+	int ret;
+
+	ret = of_get_videomode(np, &vm, index);
+	if (ret)
+		return ret;
+
+	drm_display_mode_from_videomode(&vm, dmode);
+
+	pr_debug("%s: got %dx%d display mode from %s\n",
+		of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+	drm_mode_debug_printmodeline(dmode);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
+#endif /* CONFIG_OF */
+#endif /* CONFIG_VIDEOMODE_HELPERS */
+
+/**
+ * drm_mode_set_name - set the name on a mode
+ * @mode: name will be set in this mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Set the name of @mode to a standard format.
+ */
+void drm_mode_set_name(struct drm_display_mode *mode)
+{
+	bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+		 mode->hdisplay, mode->vdisplay,
+		 interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(drm_mode_set_name);
+
+/**
+ * drm_mode_list_concat - move modes from one list to another
+ * @head: source list
+ * @new: dst list
+ *
+ * LOCKING:
+ * Caller must ensure both lists are locked.
+ *
+ * Move all the modes from @head to @new.
+ */
+void drm_mode_list_concat(struct list_head *head, struct list_head *new)
+{
+
+	struct list_head *entry, *tmp;
+
+	list_for_each_safe(entry, tmp, head) {
+		list_move_tail(entry, new);
+	}
+}
+EXPORT_SYMBOL(drm_mode_list_concat);
+
+/**
+ * drm_mode_width - get the width of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's width (hdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->hdisplay
+ */
+int drm_mode_width(const struct drm_display_mode *mode)
+{
+	return mode->hdisplay;
+
+}
+EXPORT_SYMBOL(drm_mode_width);
+
+/**
+ * drm_mode_height - get the height of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's height (vdisplay) value.
+ *
+ * FIXME: is this needed?
+ *
+ * RETURNS:
+ * @mode->vdisplay
+ */
+int drm_mode_height(const struct drm_display_mode *mode)
+{
+	return mode->vdisplay;
+}
+EXPORT_SYMBOL(drm_mode_height);
+
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+	unsigned int calc_val;
+
+	if (mode->hsync)
+		return mode->hsync;
+
+	if (mode->htotal < 0)
+		return 0;
+
+	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+	calc_val += 500;				/* round to 1000Hz */
+	calc_val /= 1000;				/* truncate to kHz */
+
+	return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
+/**
+ * drm_mode_vrefresh - get the vrefresh of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+ *
+ * FIXME: why is this needed?  shouldn't vrefresh be set already?
+ *
+ * RETURNS:
+ * Vertical refresh rate. It will be the result of actual value plus 0.5.
+ * If it is 70.288, it will return 70Hz.
+ * If it is 59.6, it will return 60Hz.
+ */
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
+{
+	int refresh = 0;
+	unsigned int calc_val;
+
+	if (mode->vrefresh > 0)
+		refresh = mode->vrefresh;
+	else if (mode->htotal > 0 && mode->vtotal > 0) {
+		int vtotal;
+		vtotal = mode->vtotal;
+		/* work out vrefresh the value will be x1000 */
+		calc_val = (mode->clock * 1000);
+		calc_val /= mode->htotal;
+		refresh = (calc_val + vtotal / 2) / vtotal;
+
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			refresh *= 2;
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			refresh /= 2;
+		if (mode->vscan > 1)
+			refresh /= mode->vscan;
+	}
+	return refresh;
+}
+EXPORT_SYMBOL(drm_mode_vrefresh);
+
+/**
+ * drm_mode_set_crtcinfo - set CRTC modesetting parameters
+ * @p: mode
+ * @adjust_flags: unused? (FIXME)
+ *
+ * LOCKING:
+ * None.
+ *
+ * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ */
+void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+{
+	if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
+		return;
+
+	p->crtc_hdisplay = p->hdisplay;
+	p->crtc_hsync_start = p->hsync_start;
+	p->crtc_hsync_end = p->hsync_end;
+	p->crtc_htotal = p->htotal;
+	p->crtc_hskew = p->hskew;
+	p->crtc_vdisplay = p->vdisplay;
+	p->crtc_vsync_start = p->vsync_start;
+	p->crtc_vsync_end = p->vsync_end;
+	p->crtc_vtotal = p->vtotal;
+
+	if (p->flags & DRM_MODE_FLAG_INTERLACE) {
+		if (adjust_flags & CRTC_INTERLACE_HALVE_V) {
+			p->crtc_vdisplay /= 2;
+			p->crtc_vsync_start /= 2;
+			p->crtc_vsync_end /= 2;
+			p->crtc_vtotal /= 2;
+		}
+	}
+
+	if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+		p->crtc_vdisplay *= 2;
+		p->crtc_vsync_start *= 2;
+		p->crtc_vsync_end *= 2;
+		p->crtc_vtotal *= 2;
+	}
+
+	if (p->vscan > 1) {
+		p->crtc_vdisplay *= p->vscan;
+		p->crtc_vsync_start *= p->vscan;
+		p->crtc_vsync_end *= p->vscan;
+		p->crtc_vtotal *= p->vscan;
+	}
+
+	p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
+	p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
+	p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
+	p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
+}
+EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+
+
+/**
+ * drm_mode_copy - copy the mode
+ * @dst: mode to overwrite
+ * @src: mode to copy
+ *
+ * LOCKING:
+ * None.
+ *
+ * Copy an existing mode into another mode, preserving the object id
+ * of the destination mode.
+ */
+void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
+{
+	int id = dst->base.id;
+
+	*dst = *src;
+	dst->base.id = id;
+	INIT_LIST_HEAD(&dst->head);
+}
+EXPORT_SYMBOL(drm_mode_copy);
+
+/**
+ * drm_mode_duplicate - allocate and duplicate an existing mode
+ * @m: mode to duplicate
+ *
+ * LOCKING:
+ * None.
+ *
+ * Just allocate a new mode, copy the existing mode into it, and return
+ * a pointer to it.  Used to create new instances of established modes.
+ */
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+					    const struct drm_display_mode *mode)
+{
+	struct drm_display_mode *nmode;
+
+	nmode = drm_mode_create(dev);
+	if (!nmode)
+		return NULL;
+
+	drm_mode_copy(nmode, mode);
+
+	return nmode;
+}
+EXPORT_SYMBOL(drm_mode_duplicate);
+
+/**
+ * drm_mode_equal - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+	/* do clock check convert to PICOS so fb modes get matched
+	 * the same */
+	if (mode1->clock && mode2->clock) {
+		if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock))
+			return false;
+	} else if (mode1->clock != mode2->clock)
+		return false;
+
+	return drm_mode_equal_no_clocks(mode1, mode2);
+}
+EXPORT_SYMBOL(drm_mode_equal);
+
+/**
+ * drm_mode_equal_no_clocks - test modes for equality
+ * @mode1: first mode
+ * @mode2: second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Check to see if @mode1 and @mode2 are equivalent, but
+ * don't check the pixel clocks.
+ *
+ * RETURNS:
+ * True if the modes are equal, false otherwise.
+ */
+bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+{
+	if (mode1->hdisplay == mode2->hdisplay &&
+	    mode1->hsync_start == mode2->hsync_start &&
+	    mode1->hsync_end == mode2->hsync_end &&
+	    mode1->htotal == mode2->htotal &&
+	    mode1->hskew == mode2->hskew &&
+	    mode1->vdisplay == mode2->vdisplay &&
+	    mode1->vsync_start == mode2->vsync_start &&
+	    mode1->vsync_end == mode2->vsync_end &&
+	    mode1->vtotal == mode2->vtotal &&
+	    mode1->vscan == mode2->vscan &&
+	    mode1->flags == mode2->flags)
+		return true;
+
+	return false;
+}
+EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+
+/**
+ * drm_mode_validate_size - make sure modes adhere to size constraints
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @maxX: maximum width
+ * @maxY: maximum height
+ * @maxPitch: max pitch
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * The DRM device (@dev) has size and pitch limits.  Here we validate the
+ * modes we probed for @dev against those limits and set their status as
+ * necessary.
+ */
+void drm_mode_validate_size(struct drm_device *dev,
+			    struct list_head *mode_list,
+			    int maxX, int maxY, int maxPitch)
+{
+	struct drm_display_mode *mode;
+
+	list_for_each_entry(mode, mode_list, head) {
+		if (maxPitch > 0 && mode->hdisplay > maxPitch)
+			mode->status = MODE_BAD_WIDTH;
+
+		if (maxX > 0 && mode->hdisplay > maxX)
+			mode->status = MODE_VIRTUAL_X;
+
+		if (maxY > 0 && mode->vdisplay > maxY)
+			mode->status = MODE_VIRTUAL_Y;
+	}
+}
+EXPORT_SYMBOL(drm_mode_validate_size);
+
+/**
+ * drm_mode_validate_clocks - validate modes against clock limits
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @min: minimum clock rate array
+ * @max: maximum clock rate array
+ * @n_ranges: number of clock ranges (size of arrays)
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Some code may need to check a mode list against the clock limits of the
+ * device in question.  This function walks the mode list, testing to make
+ * sure each mode falls within a given range (defined by @min and @max
+ * arrays) and sets @mode->status as needed.
+ */
+void drm_mode_validate_clocks(struct drm_device *dev,
+			      struct list_head *mode_list,
+			      int *min, int *max, int n_ranges)
+{
+	struct drm_display_mode *mode;
+	int i;
+
+	list_for_each_entry(mode, mode_list, head) {
+		bool good = false;
+		for (i = 0; i < n_ranges; i++) {
+			if (mode->clock >= min[i] && mode->clock <= max[i]) {
+				good = true;
+				break;
+			}
+		}
+		if (!good)
+			mode->status = MODE_CLOCK_RANGE;
+	}
+}
+EXPORT_SYMBOL(drm_mode_validate_clocks);
+
+/**
+ * drm_mode_prune_invalid - remove invalid modes from mode list
+ * @dev: DRM device
+ * @mode_list: list of modes to check
+ * @verbose: be verbose about it
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Once mode list generation is complete, a caller can use this routine to
+ * remove invalid modes from a mode list.  If any of the modes have a
+ * status other than %MODE_OK, they are removed from @mode_list and freed.
+ */
+void drm_mode_prune_invalid(struct drm_device *dev,
+			    struct list_head *mode_list, bool verbose)
+{
+	struct drm_display_mode *mode, *t;
+
+	list_for_each_entry_safe(mode, t, mode_list, head) {
+		if (mode->status != MODE_OK) {
+			list_del(&mode->head);
+			if (verbose) {
+				drm_mode_debug_printmodeline(mode);
+				DRM_DEBUG_KMS("Not using %s mode %d\n",
+					mode->name, mode->status);
+			}
+			drm_mode_destroy(dev, mode);
+		}
+	}
+}
+EXPORT_SYMBOL(drm_mode_prune_invalid);
+
+/**
+ * drm_mode_compare - compare modes for favorability
+ * @priv: unused
+ * @lh_a: list_head for first mode
+ * @lh_b: list_head for second mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Compare two modes, given by @lh_a and @lh_b, returning a value indicating
+ * which is better.
+ *
+ * RETURNS:
+ * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+ * positive if @lh_b is better than @lh_a.
+ */
+static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
+{
+	struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+	struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+	int diff;
+
+	diff = ((b->type & DRM_MODE_TYPE_PREFERRED) != 0) -
+		((a->type & DRM_MODE_TYPE_PREFERRED) != 0);
+	if (diff)
+		return diff;
+	diff = b->hdisplay * b->vdisplay - a->hdisplay * a->vdisplay;
+	if (diff)
+		return diff;
+	diff = b->clock - a->clock;
+	return diff;
+}
+
+/**
+ * drm_mode_sort - sort mode list
+ * @mode_list: list to sort
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * Sort @mode_list by favorability, putting good modes first.
+ */
+void drm_mode_sort(struct list_head *mode_list)
+{
+	list_sort(NULL, mode_list, drm_mode_compare);
+}
+EXPORT_SYMBOL(drm_mode_sort);
+
+/**
+ * drm_mode_connector_list_update - update the mode list for the connector
+ * @connector: the connector to update
+ *
+ * LOCKING:
+ * Caller must hold a lock protecting @mode_list.
+ *
+ * This moves the modes from the @connector probed_modes list
+ * to the actual mode list. It compares the probed mode against the current
+ * list and only adds different modes. All modes unverified after this point
+ * will be removed by the prune invalid modes.
+ */
+void drm_mode_connector_list_update(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode;
+	struct drm_display_mode *pmode, *pt;
+	int found_it;
+
+	list_for_each_entry_safe(pmode, pt, &connector->probed_modes,
+				 head) {
+		found_it = 0;
+		/* go through current modes checking for the new probed mode */
+		list_for_each_entry(mode, &connector->modes, head) {
+			if (drm_mode_equal(pmode, mode)) {
+				found_it = 1;
+				/* if equal delete the probed mode */
+				mode->status = pmode->status;
+				/* Merge type bits together */
+				mode->type |= pmode->type;
+				list_del(&pmode->head);
+				drm_mode_destroy(connector->dev, pmode);
+				break;
+			}
+		}
+
+		if (!found_it) {
+			list_move_tail(&pmode->head, &connector->modes);
+		}
+	}
+}
+EXPORT_SYMBOL(drm_mode_connector_list_update);
+
+/**
+ * drm_mode_parse_command_line_for_connector - parse command line for connector
+ * @mode_option - per connector mode option
+ * @connector - connector to parse line for
+ *
+ * This parses the connector specific then generic command lines for
+ * modes and options to configure the connector.
+ *
+ * This uses the same parameters as the fb modedb.c, except for extra
+ *	<xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
+ *
+ * enable/enable Digital/disable bit at the end
+ */
+bool drm_mode_parse_command_line_for_connector(const char *mode_option,
+					       struct drm_connector *connector,
+					       struct drm_cmdline_mode *mode)
+{
+	const char *name;
+	unsigned int namelen;
+	bool res_specified = false, bpp_specified = false, refresh_specified = false;
+	unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
+	bool yres_specified = false, cvt = false, rb = false;
+	bool interlace = false, margins = false, was_digit = false;
+	int i;
+	enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
+
+#ifdef CONFIG_FB
+	if (!mode_option)
+		mode_option = fb_mode_option;
+#endif
+
+	if (!mode_option) {
+		mode->specified = false;
+		return false;
+	}
+
+	name = mode_option;
+	namelen = strlen(name);
+	for (i = namelen-1; i >= 0; i--) {
+		switch (name[i]) {
+		case '@':
+			if (!refresh_specified && !bpp_specified &&
+			    !yres_specified && !cvt && !rb && was_digit) {
+				refresh = simple_strtol(&name[i+1], NULL, 10);
+				refresh_specified = true;
+				was_digit = false;
+			} else
+				goto done;
+			break;
+		case '-':
+			if (!bpp_specified && !yres_specified && !cvt &&
+			    !rb && was_digit) {
+				bpp = simple_strtol(&name[i+1], NULL, 10);
+				bpp_specified = true;
+				was_digit = false;
+			} else
+				goto done;
+			break;
+		case 'x':
+			if (!yres_specified && was_digit) {
+				yres = simple_strtol(&name[i+1], NULL, 10);
+				yres_specified = true;
+				was_digit = false;
+			} else
+				goto done;
+			break;
+		case '0' ... '9':
+			was_digit = true;
+			break;
+		case 'M':
+			if (yres_specified || cvt || was_digit)
+				goto done;
+			cvt = true;
+			break;
+		case 'R':
+			if (yres_specified || cvt || rb || was_digit)
+				goto done;
+			rb = true;
+			break;
+		case 'm':
+			if (cvt || yres_specified || was_digit)
+				goto done;
+			margins = true;
+			break;
+		case 'i':
+			if (cvt || yres_specified || was_digit)
+				goto done;
+			interlace = true;
+			break;
+		case 'e':
+			if (yres_specified || bpp_specified || refresh_specified ||
+			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
+				goto done;
+
+			force = DRM_FORCE_ON;
+			break;
+		case 'D':
+			if (yres_specified || bpp_specified || refresh_specified ||
+			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
+				goto done;
+
+			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+			    (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+				force = DRM_FORCE_ON;
+			else
+				force = DRM_FORCE_ON_DIGITAL;
+			break;
+		case 'd':
+			if (yres_specified || bpp_specified || refresh_specified ||
+			    was_digit || (force != DRM_FORCE_UNSPECIFIED))
+				goto done;
+
+			force = DRM_FORCE_OFF;
+			break;
+		default:
+			goto done;
+		}
+	}
+
+	if (i < 0 && yres_specified) {
+		char *ch;
+		xres = simple_strtol(name, &ch, 10);
+		if ((ch != NULL) && (*ch == 'x'))
+			res_specified = true;
+		else
+			i = ch - name;
+	} else if (!yres_specified && was_digit) {
+		/* catch mode that begins with digits but has no 'x' */
+		i = 0;
+	}
+done:
+	if (i >= 0) {
+		printk(KERN_WARNING
+			"parse error at position %i in video mode '%s'\n",
+			i, name);
+		mode->specified = false;
+		return false;
+	}
+
+	if (res_specified) {
+		mode->specified = true;
+		mode->xres = xres;
+		mode->yres = yres;
+	}
+
+	if (refresh_specified) {
+		mode->refresh_specified = true;
+		mode->refresh = refresh;
+	}
+
+	if (bpp_specified) {
+		mode->bpp_specified = true;
+		mode->bpp = bpp;
+	}
+	mode->rb = rb;
+	mode->cvt = cvt;
+	mode->interlace = interlace;
+	mode->margins = margins;
+	mode->force = force;
+
+	return true;
+}
+EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+				  struct drm_cmdline_mode *cmd)
+{
+	struct drm_display_mode *mode;
+
+	if (cmd->cvt)
+		mode = drm_cvt_mode(dev,
+				    cmd->xres, cmd->yres,
+				    cmd->refresh_specified ? cmd->refresh : 60,
+				    cmd->rb, cmd->interlace,
+				    cmd->margins);
+	else
+		mode = drm_gtf_mode(dev,
+				    cmd->xres, cmd->yres,
+				    cmd->refresh_specified ? cmd->refresh : 60,
+				    cmd->interlace,
+				    cmd->margins);
+	if (!mode)
+		return NULL;
+
+	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+	return mode;
+}
+EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
diff --git a/linux-imx/drivers/gpu/drm/drm_pci.c b/linux-imx/drivers/gpu/drm/drm_pci.c
new file mode 100644
index 0000000..14194b6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_pci.c
@@ -0,0 +1,505 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
+{
+	drm_dma_handle_t *dmah;
+#if 1
+	unsigned long addr;
+	size_t sz;
+#endif
+
+	/* pci_alloc_consistent only guarantees alignment to the smallest
+	 * PAGE_SIZE order which is greater than or equal to the requested size.
+	 * Return NULL here for now to make sure nobody tries for larger alignment
+	 */
+	if (align > size)
+		return NULL;
+
+	dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+	if (!dmah)
+		return NULL;
+
+	dmah->size = size;
+	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+	if (dmah->vaddr == NULL) {
+		kfree(dmah);
+		return NULL;
+	}
+
+	memset(dmah->vaddr, 0, size);
+
+	/* XXX - Is virt_to_page() legal for consistent mem? */
+	/* Reserve */
+	for (addr = (unsigned long)dmah->vaddr, sz = size;
+	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+		SetPageReserved(virt_to_page(addr));
+	}
+
+	return dmah;
+}
+
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+#if 1
+	unsigned long addr;
+	size_t sz;
+#endif
+
+	if (dmah->vaddr) {
+		/* XXX - Is virt_to_page() legal for consistent mem? */
+		/* Unreserve */
+		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+			ClearPageReserved(virt_to_page(addr));
+		}
+		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+				  dmah->busaddr);
+	}
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+	__drm_pci_free(dev, dmah);
+	kfree(dmah);
+}
+
+EXPORT_SYMBOL(drm_pci_free);
+
+#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+	/* For historical reasons, drm_get_pci_domain() is busticated
+	 * on most archs and has to remain so for userspace interface
+	 * < 1.4, except on alpha which was right from the beginning
+	 */
+	if (dev->if_version < 0x10004)
+		return 0;
+#endif /* __alpha__ */
+
+	return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+	return dev->pdev->irq;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+	struct pci_driver *pdriver = dev->driver->kdriver.pci;
+	return pdriver->name;
+}
+
+static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+	int len, ret;
+	struct pci_driver *pdriver = dev->driver->kdriver.pci;
+	master->unique_len = 40;
+	master->unique_size = master->unique_len;
+	master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+	if (master->unique == NULL)
+		return -ENOMEM;
+
+
+	len = snprintf(master->unique, master->unique_len,
+		       "pci:%04x:%02x:%02x.%d",
+		       drm_get_pci_domain(dev),
+		       dev->pdev->bus->number,
+		       PCI_SLOT(dev->pdev->devfn),
+		       PCI_FUNC(dev->pdev->devfn));
+
+	if (len >= master->unique_len) {
+		DRM_ERROR("buffer overflow");
+		ret = -EINVAL;
+		goto err;
+	} else
+		master->unique_len = len;
+
+	dev->devname =
+		kmalloc(strlen(pdriver->name) +
+			master->unique_len + 2, GFP_KERNEL);
+
+	if (dev->devname == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	sprintf(dev->devname, "%s@%s", pdriver->name,
+		master->unique);
+
+	return 0;
+err:
+	return ret;
+}
+
+static int drm_pci_set_unique(struct drm_device *dev,
+			      struct drm_master *master,
+			      struct drm_unique *u)
+{
+	int domain, bus, slot, func, ret;
+	const char *bus_name;
+
+	master->unique_len = u->unique_len;
+	master->unique_size = u->unique_len + 1;
+	master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+	if (!master->unique) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+		ret = -EFAULT;
+		goto err;
+	}
+
+	master->unique[master->unique_len] = '\0';
+
+	bus_name = dev->driver->bus->get_name(dev);
+	dev->devname = kmalloc(strlen(bus_name) +
+			       strlen(master->unique) + 2, GFP_KERNEL);
+	if (!dev->devname) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	sprintf(dev->devname, "%s@%s", bus_name,
+		master->unique);
+
+	/* Return error if the busid submitted doesn't match the device's actual
+	 * busid.
+	 */
+	ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+	if (ret != 3) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	domain = bus >> 8;
+	bus &= 0xff;
+
+	if ((domain != drm_get_pci_domain(dev)) ||
+	    (bus != dev->pdev->bus->number) ||
+	    (slot != PCI_SLOT(dev->pdev->devfn)) ||
+	    (func != PCI_FUNC(dev->pdev->devfn))) {
+		ret = -EINVAL;
+		goto err;
+	}
+	return 0;
+err:
+	return ret;
+}
+
+
+static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+	    (p->busnum & 0xff) != dev->pdev->bus->number ||
+	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+		return -EINVAL;
+
+	p->irq = dev->pdev->irq;
+
+	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+		  p->irq);
+	return 0;
+}
+
+static int drm_pci_agp_init(struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev)) {
+		if (drm_pci_device_is_agp(dev))
+			dev->agp = drm_agp_init(dev);
+		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+		    && (dev->agp == NULL)) {
+			DRM_ERROR("Cannot initialize the agpgart module.\n");
+			return -EINVAL;
+		}
+		if (drm_core_has_MTRR(dev)) {
+			if (dev->agp)
+				dev->agp->agp_mtrr =
+					mtrr_add(dev->agp->agp_info.aper_base,
+						 dev->agp->agp_info.aper_size *
+						 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+		}
+	}
+	return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+	.bus_type = DRIVER_BUS_PCI,
+	.get_irq = drm_pci_get_irq,
+	.get_name = drm_pci_get_name,
+	.set_busid = drm_pci_set_busid,
+	.set_unique = drm_pci_set_unique,
+	.irq_by_busid = drm_pci_irq_by_busid,
+	.agp_init = drm_pci_agp_init,
+};
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+		    struct drm_driver *driver)
+{
+	struct drm_device *dev;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto err_g1;
+
+	dev->pdev = pdev;
+	dev->dev = &pdev->dev;
+
+	dev->pci_device = pdev->device;
+	dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+	dev->hose = pdev->sysdata;
+#endif
+
+	mutex_lock(&drm_global_mutex);
+
+	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
+		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+		goto err_g2;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		pci_set_drvdata(pdev, dev);
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_g2;
+	}
+
+	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+		goto err_g3;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, ent->driver_data);
+		if (ret)
+			goto err_g4;
+	}
+
+	/* setup the grouping for the legacy output */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+						&dev->primary->mode_group);
+		if (ret)
+			goto err_g4;
+	}
+
+	list_add_tail(&dev->driver_item, &driver->device_list);
+
+	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, pci_name(pdev), dev->primary->index);
+
+	mutex_unlock(&drm_global_mutex);
+	return 0;
+
+err_g4:
+	drm_put_minor(&dev->primary);
+err_g3:
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_put_minor(&dev->control);
+err_g2:
+	pci_disable_device(pdev);
+err_g1:
+	kfree(dev);
+	mutex_unlock(&drm_global_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+/**
+ * PCI device initialization. Called direct from modules at load time.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+	struct pci_dev *pdev = NULL;
+	const struct pci_device_id *pid;
+	int i;
+
+	DRM_DEBUG("\n");
+
+	INIT_LIST_HEAD(&driver->device_list);
+	driver->kdriver.pci = pdriver;
+	driver->bus = &drm_pci_bus;
+
+	if (driver->driver_features & DRIVER_MODESET)
+		return pci_register_driver(pdriver);
+
+	/* If not using KMS, fall back to stealth mode manual scanning. */
+	for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+		pid = &pdriver->id_table[i];
+
+		/* Loop around setting up a DRM device for each PCI device
+		 * matching our ID and device class.  If we had the internal
+		 * function that pci_get_subsys and pci_get_class used, we'd
+		 * be able to just pass pid in instead of doing a two-stage
+		 * thing.
+		 */
+		pdev = NULL;
+		while ((pdev =
+			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+				       pid->subdevice, pdev)) != NULL) {
+			if ((pdev->class & pid->class_mask) != pid->class)
+				continue;
+
+			/* stealth mode requires a manual probe */
+			pci_dev_get(pdev);
+			drm_get_pci_dev(pdev, pid, driver);
+		}
+	}
+	return 0;
+}
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
+{
+	struct pci_dev *root;
+	u32 lnkcap, lnkcap2;
+
+	*mask = 0;
+	if (!dev->pdev)
+		return -EINVAL;
+
+	root = dev->pdev->bus->self;
+
+	/* we've been informed via and serverworks don't make the cut */
+	if (root->vendor == PCI_VENDOR_ID_VIA ||
+	    root->vendor == PCI_VENDOR_ID_SERVERWORKS)
+		return -EINVAL;
+
+	pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+	pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
+
+	if (lnkcap2) {	/* PCIe r3.0-compliant */
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+			*mask |= DRM_PCIE_SPEED_25;
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+			*mask |= DRM_PCIE_SPEED_50;
+		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+			*mask |= DRM_PCIE_SPEED_80;
+	} else {	/* pre-r3.0 */
+		if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
+			*mask |= DRM_PCIE_SPEED_25;
+		if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
+			*mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
+	}
+
+	DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
+	return 0;
+}
+EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
+
+#else
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+	return -1;
+}
+
+#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
+/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+	struct drm_device *dev, *tmp;
+	DRM_DEBUG("\n");
+
+	if (driver->driver_features & DRIVER_MODESET) {
+		pci_unregister_driver(pdriver);
+	} else {
+		list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+			drm_put_dev(dev);
+	}
+	DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/linux-imx/drivers/gpu/drm/drm_platform.c b/linux-imx/drivers/gpu/drm/drm_platform.c
new file mode 100644
index 0000000..b8a282e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_platform.c
@@ -0,0 +1,205 @@
+/*
+ * Derived from drm_pci.c
+ *
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * Copyright (c) 2009, Code Aurora Forum.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+/**
+ * Register.
+ *
+ * \param platdev - Platform device struture
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+
+int drm_get_platform_dev(struct platform_device *platdev,
+			 struct drm_driver *driver)
+{
+	struct drm_device *dev;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->platformdev = platdev;
+	dev->dev = &platdev->dev;
+
+	mutex_lock(&drm_global_mutex);
+
+	ret = drm_fill_in_dev(dev, NULL, driver);
+
+	if (ret) {
+		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+		goto err_g1;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+		if (ret)
+			goto err_g1;
+	}
+
+	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	if (ret)
+		goto err_g2;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, 0);
+		if (ret)
+			goto err_g3;
+	}
+
+	/* setup the grouping for the legacy output */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_mode_group_init_legacy_group(dev,
+				&dev->primary->mode_group);
+		if (ret)
+			goto err_g3;
+	}
+
+	list_add_tail(&dev->driver_item, &driver->device_list);
+
+	mutex_unlock(&drm_global_mutex);
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, dev->primary->index);
+
+	return 0;
+
+err_g3:
+	drm_put_minor(&dev->primary);
+err_g2:
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_put_minor(&dev->control);
+err_g1:
+	kfree(dev);
+	mutex_unlock(&drm_global_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(drm_get_platform_dev);
+
+static int drm_platform_get_irq(struct drm_device *dev)
+{
+	return platform_get_irq(dev->platformdev, 0);
+}
+
+static const char *drm_platform_get_name(struct drm_device *dev)
+{
+	return dev->platformdev->name;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+	int len, ret, id;
+
+	master->unique_len = 13 + strlen(dev->platformdev->name);
+	master->unique_size = master->unique_len;
+	master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+	if (master->unique == NULL)
+		return -ENOMEM;
+
+	id = dev->platformdev->id;
+
+	/* if only a single instance of the platform device, id will be
+	 * set to -1.. use 0 instead to avoid a funny looking bus-id:
+	 */
+	if (id == -1)
+		id = 0;
+
+	len = snprintf(master->unique, master->unique_len,
+			"platform:%s:%02d", dev->platformdev->name, id);
+
+	if (len > master->unique_len) {
+		DRM_ERROR("Unique buffer overflowed\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	dev->devname =
+		kmalloc(strlen(dev->platformdev->name) +
+			master->unique_len + 2, GFP_KERNEL);
+
+	if (dev->devname == NULL) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+		master->unique);
+	return 0;
+err:
+	return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+	.bus_type = DRIVER_BUS_PLATFORM,
+	.get_irq = drm_platform_get_irq,
+	.get_name = drm_platform_get_name,
+	.set_busid = drm_platform_set_busid,
+};
+
+/**
+ * Platform device initialization. Called direct from modules.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
+{
+	DRM_DEBUG("\n");
+
+	driver->kdriver.platform_device = platform_device;
+	driver->bus = &drm_platform_bus;
+	INIT_LIST_HEAD(&driver->device_list);
+	return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
+
+void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
+{
+	struct drm_device *dev, *tmp;
+	DRM_DEBUG("\n");
+
+	list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+		drm_put_dev(dev);
+	DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_platform_exit);
diff --git a/linux-imx/drivers/gpu/drm/drm_prime.c b/linux-imx/drivers/gpu/drm/drm_prime.c
new file mode 100644
index 0000000..5b7b911
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_prime.c
@@ -0,0 +1,538 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *      Dave Airlie <airlied@redhat.com>
+ *      Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private.  Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
+ */
+
+struct drm_prime_member {
+	struct list_head entry;
+	struct dma_buf *dma_buf;
+	uint32_t handle;
+};
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+		enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attach->dmabuf->priv;
+	struct sg_table *sgt;
+
+	mutex_lock(&obj->dev->struct_mutex);
+
+	sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+	if (!IS_ERR_OR_NULL(sgt))
+		dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+	mutex_unlock(&obj->dev->struct_mutex);
+	return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+		struct sg_table *sgt, enum dma_data_direction dir)
+{
+	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+
+	if (obj->export_dma_buf == dma_buf) {
+		/* drop the reference on the export fd holds */
+		obj->export_dma_buf = NULL;
+		drm_gem_object_unreference_unlocked(obj);
+	}
+}
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+		unsigned long page_num)
+{
+	return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+		unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+		unsigned long page_num)
+{
+	return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+		unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+		struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
+	.map_dma_buf = drm_gem_map_dma_buf,
+	.unmap_dma_buf = drm_gem_unmap_dma_buf,
+	.release = drm_gem_dmabuf_release,
+	.kmap = drm_gem_dmabuf_kmap,
+	.kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+	.kunmap = drm_gem_dmabuf_kunmap,
+	.kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+	.mmap = drm_gem_dmabuf_mmap,
+	.vmap = drm_gem_dmabuf_vmap,
+	.vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import.  These functions implement dma-buf support in terms of
+ * five lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ *  - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ *  - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ *  - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ *  - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ *  - @gem_prime_import_sg_table (import): produce a GEM object from another
+ *    driver's scatter/gather table
+ */
+
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+				     struct drm_gem_object *obj, int flags)
+{
+	if (dev->driver->gem_prime_pin) {
+		int ret = dev->driver->gem_prime_pin(obj);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+		int *prime_fd)
+{
+	struct drm_gem_object *obj;
+	void *buf;
+	int ret = 0;
+	struct dma_buf *dmabuf;
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
+	mutex_lock(&file_priv->prime.lock);
+	/* re-export the original imported object */
+	if (obj->import_attach) {
+		dmabuf = obj->import_attach->dmabuf;
+		goto out_have_obj;
+	}
+
+	if (obj->export_dma_buf) {
+		dmabuf = obj->export_dma_buf;
+		goto out_have_obj;
+	}
+
+	buf = dev->driver->gem_prime_export(dev, obj, flags);
+	if (IS_ERR(buf)) {
+		/* normally the created dma-buf takes ownership of the ref,
+		 * but if that fails then drop the ref
+		 */
+		ret = PTR_ERR(buf);
+		goto out;
+	}
+	obj->export_dma_buf = buf;
+
+	/* if we've exported this buffer the cheat and add it to the import list
+	 * so we get the correct handle back
+	 */
+	ret = drm_prime_add_buf_handle(&file_priv->prime,
+				       obj->export_dma_buf, handle);
+	if (ret)
+		goto out;
+
+	*prime_fd = dma_buf_fd(buf, flags);
+	mutex_unlock(&file_priv->prime.lock);
+	return 0;
+
+out_have_obj:
+	get_dma_buf(dmabuf);
+	*prime_fd = dma_buf_fd(dmabuf, flags);
+out:
+	drm_gem_object_unreference_unlocked(obj);
+	mutex_unlock(&file_priv->prime.lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+					    struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!dev->driver->gem_prime_import_sg_table)
+		return ERR_PTR(-EINVAL);
+
+	if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+		obj = dma_buf->priv;
+		if (obj->dev == dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(obj);
+			return obj;
+		}
+	}
+
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_PTR(PTR_ERR(attach));
+
+	get_dma_buf(dma_buf);
+
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sgt)) {
+		ret = PTR_ERR(sgt);
+		goto fail_detach;
+	}
+
+	obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto fail_unmap;
+	}
+
+	obj->import_attach = attach;
+
+	return obj;
+
+fail_unmap:
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+{
+	struct dma_buf *dma_buf;
+	struct drm_gem_object *obj;
+	int ret;
+
+	dma_buf = dma_buf_get(prime_fd);
+	if (IS_ERR(dma_buf))
+		return PTR_ERR(dma_buf);
+
+	mutex_lock(&file_priv->prime.lock);
+
+	ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+			dma_buf, handle);
+	if (!ret) {
+		ret = 0;
+		goto out_put;
+	}
+
+	/* never seen this one, need to import */
+	obj = dev->driver->gem_prime_import(dev, dma_buf);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto out_put;
+	}
+
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+	drm_gem_object_unreference_unlocked(obj);
+	if (ret)
+		goto out_put;
+
+	ret = drm_prime_add_buf_handle(&file_priv->prime,
+			dma_buf, *handle);
+	if (ret)
+		goto fail;
+
+	mutex_unlock(&file_priv->prime.lock);
+
+	dma_buf_put(dma_buf);
+
+	return 0;
+
+fail:
+	/* hmm, if driver attached, we are relying on the free-object path
+	 * to detach.. which seems ok..
+	 */
+	drm_gem_object_handle_unreference_unlocked(obj);
+out_put:
+	dma_buf_put(dma_buf);
+	mutex_unlock(&file_priv->prime.lock);
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_prime_handle *args = data;
+	uint32_t flags;
+
+	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+		return -EINVAL;
+
+	if (!dev->driver->prime_handle_to_fd)
+		return -ENOSYS;
+
+	/* check flags are valid */
+	if (args->flags & ~DRM_CLOEXEC)
+		return -EINVAL;
+
+	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+	flags = args->flags & DRM_CLOEXEC;
+
+	return dev->driver->prime_handle_to_fd(dev, file_priv,
+			args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_prime_handle *args = data;
+
+	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+		return -EINVAL;
+
+	if (!dev->driver->prime_fd_to_handle)
+		return -ENOSYS;
+
+	return dev->driver->prime_fd_to_handle(dev, file_priv,
+			args->fd, &args->handle);
+}
+
+/*
+ * drm_prime_pages_to_sg
+ *
+ * this helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+{
+	struct sg_table *sg = NULL;
+	int ret;
+
+	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sg)
+		goto out;
+
+	ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+				nr_pages << PAGE_SHIFT, GFP_KERNEL);
+	if (ret)
+		goto out;
+
+	return sg;
+out:
+	kfree(sg);
+	return NULL;
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/* export an sg table into an array of pages and addresses
+   this is currently required by the TTM driver in order to do correct fault
+   handling */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+				     dma_addr_t *addrs, int max_pages)
+{
+	unsigned count;
+	struct scatterlist *sg;
+	struct page *page;
+	u32 len, offset;
+	int pg_index;
+	dma_addr_t addr;
+
+	pg_index = 0;
+	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+		len = sg->length;
+		offset = sg->offset;
+		page = sg_page(sg);
+		addr = sg_dma_address(sg);
+
+		while (len > 0) {
+			if (WARN_ON(pg_index >= max_pages))
+				return -1;
+			pages[pg_index] = page;
+			if (addrs)
+				addrs[pg_index] = addr;
+
+			page++;
+			addr += PAGE_SIZE;
+			len -= PAGE_SIZE;
+			pg_index++;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+/* helper function to cleanup a GEM/prime object */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+	struct dma_buf_attachment *attach;
+	struct dma_buf *dma_buf;
+	attach = obj->import_attach;
+	if (sg)
+		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+	dma_buf = attach->dmabuf;
+	dma_buf_detach(attach->dmabuf, attach);
+	/* remove the reference */
+	dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+	INIT_LIST_HEAD(&prime_fpriv->head);
+	mutex_init(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_init_file_private);
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+	/* by now drm_gem_release should've made sure the list is empty */
+	WARN_ON(!list_empty(&prime_fpriv->head));
+}
+EXPORT_SYMBOL(drm_prime_destroy_file_private);
+
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+{
+	struct drm_prime_member *member;
+
+	member = kmalloc(sizeof(*member), GFP_KERNEL);
+	if (!member)
+		return -ENOMEM;
+
+	get_dma_buf(dma_buf);
+	member->dma_buf = dma_buf;
+	member->handle = handle;
+	list_add(&member->entry, &prime_fpriv->head);
+	return 0;
+}
+
+int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+{
+	struct drm_prime_member *member;
+
+	list_for_each_entry(member, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			*handle = member->handle;
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
+
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+{
+	struct drm_prime_member *member, *safe;
+
+	mutex_lock(&prime_fpriv->lock);
+	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+		if (member->dma_buf == dma_buf) {
+			dma_buf_put(dma_buf);
+			list_del(&member->entry);
+			kfree(member);
+		}
+	}
+	mutex_unlock(&prime_fpriv->lock);
+}
+EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/linux-imx/drivers/gpu/drm/drm_proc.c b/linux-imx/drivers/gpu/drm/drm_proc.c
new file mode 100644
index 0000000..d7f2324
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_proc.c
@@ -0,0 +1,209 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+
+/***************************************************
+ * Initialization, etc.
+ **************************************************/
+
+/**
+ * Proc file list.
+ */
+static const struct drm_info_list drm_proc_list[] = {
+	{"name", drm_name_info, 0},
+	{"vm", drm_vm_info, 0},
+	{"clients", drm_clients_info, 0},
+	{"bufs", drm_bufs_info, 0},
+	{"gem_names", drm_gem_name_info, DRIVER_GEM},
+#if DRM_DEBUG_CODE
+	{"vma", drm_vma_info, 0},
+#endif
+};
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+static int drm_proc_open(struct inode *inode, struct file *file)
+{
+	struct drm_info_node* node = PDE_DATA(inode);
+
+	return single_open(file, node->info_ent->show, node);
+}
+
+static const struct file_operations drm_proc_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_proc_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+
+/**
+ * Initialize a given set of proc files for a device
+ *
+ * \param files The array of files to create
+ * \param count The number of files given
+ * \param root DRI proc dir entry.
+ * \param minor device minor number
+ * \return Zero on success, non-zero on failure
+ *
+ * Create a given set of proc files represented by an array of
+ * gdm_proc_lists in the given root directory.
+ */
+static int drm_proc_create_files(const struct drm_info_list *files, int count,
+			  struct proc_dir_entry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct proc_dir_entry *ent;
+	struct drm_info_node *tmp;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		u32 features = files[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
+		tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+		if (!tmp)
+			return -1;
+
+		tmp->minor = minor;
+		tmp->info_ent = &files[i];
+		list_add(&tmp->list, &minor->proc_nodes.list);
+
+		ent = proc_create_data(files[i].name, S_IRUGO, root,
+				       &drm_proc_fops, tmp);
+		if (!ent) {
+			DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
+				  minor->index, files[i].name);
+			list_del(&tmp->list);
+			kfree(tmp);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+/**
+ * Initialize the DRI proc filesystem for a device
+ *
+ * \param dev DRM device
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+	char name[12];
+	int ret;
+
+	INIT_LIST_HEAD(&minor->proc_nodes.list);
+	sprintf(name, "%u", minor->index);
+	minor->proc_root = proc_mkdir(name, root);
+	if (!minor->proc_root) {
+		DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+		return -1;
+	}
+
+	ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
+				    minor->proc_root, minor);
+	if (ret) {
+		remove_proc_subtree(name, root);
+		minor->proc_root = NULL;
+		DRM_ERROR("Failed to create core drm proc files\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int drm_proc_remove_files(const struct drm_info_list *files, int count,
+			  struct drm_minor *minor)
+{
+	struct list_head *pos, *q;
+	struct drm_info_node *tmp;
+	int i;
+
+	for (i = 0; i < count; i++) {
+		list_for_each_safe(pos, q, &minor->proc_nodes.list) {
+			tmp = list_entry(pos, struct drm_info_node, list);
+			if (tmp->info_ent == &files[i]) {
+				remove_proc_entry(files[i].name,
+						  minor->proc_root);
+				list_del(pos);
+				kfree(tmp);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+	char name[64];
+
+	if (!root || !minor->proc_root)
+		return 0;
+
+	drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
+
+	sprintf(name, "%d", minor->index);
+	remove_proc_subtree(name, root);
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/drm_scatter.c b/linux-imx/drivers/gpu/drm/drm_scatter.c
new file mode 100644
index 0000000..d87f60b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_scatter.c
@@ -0,0 +1,213 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+
+#define DEBUG_SCATTER 0
+
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+	return vmalloc_32(size);
+#endif
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+	struct page *page;
+	int i;
+
+	for (i = 0; i < entry->pages; i++) {
+		page = entry->pagelist[i];
+		if (page)
+			ClearPageReserved(page);
+	}
+
+	vfree(entry->virtual);
+
+	kfree(entry->busaddr);
+	kfree(entry->pagelist);
+	kfree(entry);
+}
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+	struct drm_sg_mem *entry;
+	unsigned long pages, i, j;
+
+	DRM_DEBUG("\n");
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	if (dev->sg)
+		return -EINVAL;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+	DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
+
+	entry->pages = pages;
+	entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
+	if (!entry->pagelist) {
+		kfree(entry);
+		return -ENOMEM;
+	}
+
+	entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
+	if (!entry->busaddr) {
+		kfree(entry->pagelist);
+		kfree(entry);
+		return -ENOMEM;
+	}
+
+	entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+	if (!entry->virtual) {
+		kfree(entry->busaddr);
+		kfree(entry->pagelist);
+		kfree(entry);
+		return -ENOMEM;
+	}
+
+	/* This also forces the mapping of COW pages, so our page list
+	 * will be valid.  Please don't remove it...
+	 */
+	memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+	entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+	DRM_DEBUG("handle  = %08lx\n", entry->handle);
+	DRM_DEBUG("virtual = %p\n", entry->virtual);
+
+	for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+	     i += PAGE_SIZE, j++) {
+		entry->pagelist[j] = vmalloc_to_page((void *)i);
+		if (!entry->pagelist[j])
+			goto failed;
+		SetPageReserved(entry->pagelist[j]);
+	}
+
+	request->handle = entry->handle;
+
+	dev->sg = entry;
+
+#if DEBUG_SCATTER
+	/* Verify that each page points to its virtual address, and vice
+	 * versa.
+	 */
+	{
+		int error = 0;
+
+		for (i = 0; i < pages; i++) {
+			unsigned long *tmp;
+
+			tmp = page_address(entry->pagelist[i]);
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				*tmp = 0xcafebabe;
+			}
+			tmp = (unsigned long *)((u8 *) entry->virtual +
+						(PAGE_SIZE * i));
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				if (*tmp != 0xcafebabe && error == 0) {
+					error = 1;
+					DRM_ERROR("Scatter allocation error, "
+						  "pagelist does not match "
+						  "virtual mapping\n");
+				}
+			}
+			tmp = page_address(entry->pagelist[i]);
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				*tmp = 0;
+			}
+		}
+		if (error == 0)
+			DRM_ERROR("Scatter allocation matches pagelist\n");
+	}
+#endif
+
+	return 0;
+
+      failed:
+	drm_sg_cleanup(entry);
+	return -ENOMEM;
+}
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_scatter_gather *request = data;
+
+	return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_scatter_gather *request = data;
+	struct drm_sg_mem *entry;
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	entry = dev->sg;
+	dev->sg = NULL;
+
+	if (!entry || entry->handle != request->handle)
+		return -EINVAL;
+
+	DRM_DEBUG("virtual  = %p\n", entry->virtual);
+
+	drm_sg_cleanup(entry);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/drm_stub.c b/linux-imx/drivers/gpu/drm/drm_stub.c
new file mode 100644
index 0000000..16f3ec5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_stub.c
@@ -0,0 +1,509 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_core.h>
+
+unsigned int drm_debug = 0;	/* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+
+struct idr drm_minors_idr;
+
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+struct dentry *drm_debugfs_root;
+
+int drm_err(const char *func, const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+	int r;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+
+	va_end(args);
+
+	return r;
+}
+EXPORT_SYMBOL(drm_err);
+
+void drm_ut_debug_printk(unsigned int request_level,
+			 const char *prefix,
+			 const char *function_name,
+			 const char *format, ...)
+{
+	va_list args;
+
+	if (drm_debug & request_level) {
+		if (function_name)
+			printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
+		va_start(args, format);
+		vprintk(format, args);
+		va_end(args);
+	}
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+	int ret;
+	int base = 0, limit = 63;
+
+	if (type == DRM_MINOR_CONTROL) {
+                base += 64;
+                limit = base + 127;
+        } else if (type == DRM_MINOR_RENDER) {
+                base += 128;
+                limit = base + 255;
+        }
+
+	mutex_lock(&dev->struct_mutex);
+	ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret == -ENOSPC ? -EINVAL : ret;
+}
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+	struct drm_master *master;
+
+	master = kzalloc(sizeof(*master), GFP_KERNEL);
+	if (!master)
+		return NULL;
+
+	kref_init(&master->refcount);
+	spin_lock_init(&master->lock.spinlock);
+	init_waitqueue_head(&master->lock.lock_queue);
+	drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
+	INIT_LIST_HEAD(&master->magicfree);
+	master->minor = minor;
+
+	list_add_tail(&master->head, &minor->master_list);
+
+	return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+	kref_get(&master->refcount);
+	return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+	struct drm_master *master = container_of(kref, struct drm_master, refcount);
+	struct drm_magic_entry *pt, *next;
+	struct drm_device *dev = master->minor->dev;
+	struct drm_map_list *r_list, *list_temp;
+
+	list_del(&master->head);
+
+	if (dev->driver->master_destroy)
+		dev->driver->master_destroy(dev, master);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+		if (r_list->master == master) {
+			drm_rmmap_locked(dev, r_list->map);
+			r_list = NULL;
+		}
+	}
+
+	if (master->unique) {
+		kfree(master->unique);
+		master->unique = NULL;
+		master->unique_len = 0;
+	}
+
+	kfree(dev->devname);
+	dev->devname = NULL;
+
+	list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+		list_del(&pt->head);
+		drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+		kfree(pt);
+	}
+
+	drm_ht_remove(&master->magiclist);
+
+	kfree(master);
+}
+
+void drm_master_put(struct drm_master **master)
+{
+	kref_put(&(*master)->refcount, drm_master_destroy);
+	*master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	int ret;
+
+	if (file_priv->is_master)
+		return 0;
+
+	if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
+		return -EINVAL;
+
+	if (!file_priv->master)
+		return -EINVAL;
+
+	if (file_priv->minor->master)
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+	file_priv->minor->master = drm_master_get(file_priv->master);
+	file_priv->is_master = 1;
+	if (dev->driver->master_set) {
+		ret = dev->driver->master_set(dev, file_priv, false);
+		if (unlikely(ret != 0)) {
+			file_priv->is_master = 0;
+			drm_master_put(&file_priv->minor->master);
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	if (!file_priv->is_master)
+		return -EINVAL;
+
+	if (!file_priv->minor->master)
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+	if (dev->driver->master_drop)
+		dev->driver->master_drop(dev, file_priv, false);
+	drm_master_put(&file_priv->minor->master);
+	file_priv->is_master = 0;
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int drm_fill_in_dev(struct drm_device *dev,
+			   const struct pci_device_id *ent,
+			   struct drm_driver *driver)
+{
+	int retcode;
+
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
+
+	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->event_lock);
+	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->ctxlist_mutex);
+
+	if (drm_ht_create(&dev->map_hash, 12)) {
+		return -ENOMEM;
+	}
+
+	/* the DRM has 6 basic counters */
+	dev->counters = 6;
+	dev->types[0] = _DRM_STAT_LOCK;
+	dev->types[1] = _DRM_STAT_OPENS;
+	dev->types[2] = _DRM_STAT_CLOSES;
+	dev->types[3] = _DRM_STAT_IOCTLS;
+	dev->types[4] = _DRM_STAT_LOCKS;
+	dev->types[5] = _DRM_STAT_UNLOCKS;
+
+	dev->driver = driver;
+
+	if (dev->driver->bus->agp_init) {
+		retcode = dev->driver->bus->agp_init(dev);
+		if (retcode)
+			goto error_out_unreg;
+	}
+
+
+
+	retcode = drm_ctxbitmap_init(dev);
+	if (retcode) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto error_out_unreg;
+	}
+
+	if (driver->driver_features & DRIVER_GEM) {
+		retcode = drm_gem_init(dev);
+		if (retcode) {
+			DRM_ERROR("Cannot initialize graphics execution "
+				  "manager (GEM)\n");
+			goto error_out_unreg;
+		}
+	}
+
+	return 0;
+
+      error_out_unreg:
+	drm_lastclose(dev);
+	return retcode;
+}
+EXPORT_SYMBOL(drm_fill_in_dev);
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+	struct drm_minor *new_minor;
+	int ret;
+	int minor_id;
+
+	DRM_DEBUG("\n");
+
+	minor_id = drm_minor_get_id(dev, type);
+	if (minor_id < 0)
+		return minor_id;
+
+	new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+	if (!new_minor) {
+		ret = -ENOMEM;
+		goto err_idr;
+	}
+
+	new_minor->type = type;
+	new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+	new_minor->dev = dev;
+	new_minor->index = minor_id;
+	INIT_LIST_HEAD(&new_minor->master_list);
+
+	idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+	if (type == DRM_MINOR_LEGACY) {
+		ret = drm_proc_init(new_minor, drm_proc_root);
+		if (ret) {
+			DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+			goto err_mem;
+		}
+	} else
+		new_minor->proc_root = NULL;
+
+#if defined(CONFIG_DEBUG_FS)
+	ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
+	if (ret) {
+		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+		goto err_g2;
+	}
+#endif
+
+	ret = drm_sysfs_device_add(new_minor);
+	if (ret) {
+		printk(KERN_ERR
+		       "DRM: Error sysfs_device_add.\n");
+		goto err_g2;
+	}
+	*minor = new_minor;
+
+	DRM_DEBUG("new minor assigned %d\n", minor_id);
+	return 0;
+
+
+err_g2:
+	if (new_minor->type == DRM_MINOR_LEGACY)
+		drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+	kfree(new_minor);
+err_idr:
+	idr_remove(&drm_minors_idr, minor_id);
+	*minor = NULL;
+	return ret;
+}
+EXPORT_SYMBOL(drm_get_minor);
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+	struct drm_minor *minor = *minor_p;
+
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	if (minor->type == DRM_MINOR_LEGACY)
+		drm_proc_cleanup(minor, drm_proc_root);
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_cleanup(minor);
+#endif
+
+	drm_sysfs_device_remove(minor);
+
+	idr_remove(&drm_minors_idr, minor->index);
+
+	kfree(minor);
+	*minor_p = NULL;
+	return 0;
+}
+EXPORT_SYMBOL(drm_put_minor);
+
+static void drm_unplug_minor(struct drm_minor *minor)
+{
+	drm_sysfs_device_remove(minor);
+}
+
+/**
+ * Called via drm_exit() at module unload time or when pci device is
+ * unplugged.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ */
+void drm_put_dev(struct drm_device *dev)
+{
+	struct drm_driver *driver;
+	struct drm_map_list *r_list, *list_temp;
+
+	DRM_DEBUG("\n");
+
+	if (!dev) {
+		DRM_ERROR("cleanup called no dev\n");
+		return;
+	}
+	driver = dev->driver;
+
+	drm_lastclose(dev);
+
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->agp_mtrr >= 0) {
+		int retval;
+		retval = mtrr_del(dev->agp->agp_mtrr,
+				  dev->agp->agp_info.aper_base,
+				  dev->agp->agp_info.aper_size * 1024 * 1024);
+		DRM_DEBUG("mtrr_del=%d\n", retval);
+	}
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	if (drm_core_has_AGP(dev) && dev->agp) {
+		kfree(dev->agp);
+		dev->agp = NULL;
+	}
+
+	drm_vblank_cleanup(dev);
+
+	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+		drm_rmmap(dev, r_list->map);
+	drm_ht_remove(&dev->map_hash);
+
+	drm_ctxbitmap_cleanup(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_put_minor(&dev->control);
+
+	if (driver->driver_features & DRIVER_GEM)
+		drm_gem_destroy(dev);
+
+	drm_put_minor(&dev->primary);
+
+	list_del(&dev->driver_item);
+	kfree(dev->devname);
+	kfree(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
+
+void drm_unplug_dev(struct drm_device *dev)
+{
+	/* for a USB device */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_unplug_minor(dev->control);
+	drm_unplug_minor(dev->primary);
+
+	mutex_lock(&drm_global_mutex);
+
+	drm_device_set_unplugged(dev);
+
+	if (dev->open_count == 0) {
+		drm_put_dev(dev);
+	}
+	mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
diff --git a/linux-imx/drivers/gpu/drm/drm_sysfs.c b/linux-imx/drivers/gpu/drm/drm_sysfs.c
new file mode 100644
index 0000000..0229665
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_sysfs.c
@@ -0,0 +1,570 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <drm/drm_sysfs.h>
+#include <drm/drm_core.h>
+#include <drm/drmP.h>
+
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+
+static struct device_type drm_sysfs_device_minor = {
+	.name = "drm_minor"
+};
+
+/**
+ * drm_class_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_class_suspend(struct device *dev, pm_message_t state)
+{
+	if (dev->type == &drm_sysfs_device_minor) {
+		struct drm_minor *drm_minor = to_drm_minor(dev);
+		struct drm_device *drm_dev = drm_minor->dev;
+
+		if (drm_minor->type == DRM_MINOR_LEGACY &&
+		    !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+		    drm_dev->driver->suspend)
+			return drm_dev->driver->suspend(drm_dev, state);
+	}
+	return 0;
+}
+
+/**
+ * drm_class_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_class_resume(struct device *dev)
+{
+	if (dev->type == &drm_sysfs_device_minor) {
+		struct drm_minor *drm_minor = to_drm_minor(dev);
+		struct drm_device *drm_dev = drm_minor->dev;
+
+		if (drm_minor->type == DRM_MINOR_LEGACY &&
+		    !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
+		    drm_dev->driver->resume)
+			return drm_dev->driver->resume(drm_dev);
+	}
+	return 0;
+}
+
+static char *drm_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+}
+
+static CLASS_ATTR_STRING(version, S_IRUGO,
+		CORE_NAME " "
+		__stringify(CORE_MAJOR) "."
+		__stringify(CORE_MINOR) "."
+		__stringify(CORE_PATCHLEVEL) " "
+		CORE_DATE);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+	struct class *class;
+	int err;
+
+	class = class_create(owner, name);
+	if (IS_ERR(class)) {
+		err = PTR_ERR(class);
+		goto err_out;
+	}
+
+	class->suspend = drm_class_suspend;
+	class->resume = drm_class_resume;
+
+	err = class_create_file(class, &class_attr_version.attr);
+	if (err)
+		goto err_out_class;
+
+	class->devnode = drm_devnode;
+
+	return class;
+
+err_out_class:
+	class_destroy(class);
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+	if ((drm_class == NULL) || (IS_ERR(drm_class)))
+		return;
+	class_remove_file(drm_class, &class_attr_version.attr);
+	class_destroy(drm_class);
+	drm_class = NULL;
+}
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+	memset(dev, 0, sizeof(struct device));
+	return;
+}
+
+/*
+ * Connector properties
+ */
+static ssize_t status_show(struct device *device,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+	enum drm_connector_status status;
+	int ret;
+
+	ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+	status = connector->funcs->detect(connector, true);
+	mutex_unlock(&connector->dev->mode_config.mutex);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			drm_get_connector_status_name(status));
+}
+
+static ssize_t dpms_show(struct device *device,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+	struct drm_device *dev = connector->dev;
+	uint64_t dpms_status;
+	int ret;
+
+	ret = drm_object_property_get_value(&connector->base,
+					    dev->mode_config.dpms_property,
+					    &dpms_status);
+	if (ret)
+		return 0;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			drm_get_dpms_name((int)dpms_status));
+}
+
+static ssize_t enabled_show(struct device *device,
+			    struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
+			"disabled");
+}
+
+static ssize_t edid_show(struct file *filp, struct kobject *kobj,
+			 struct bin_attribute *attr, char *buf, loff_t off,
+			 size_t count)
+{
+	struct device *connector_dev = container_of(kobj, struct device, kobj);
+	struct drm_connector *connector = to_drm_connector(connector_dev);
+	unsigned char *edid;
+	size_t size;
+
+	if (!connector->edid_blob_ptr)
+		return 0;
+
+	edid = connector->edid_blob_ptr->data;
+	size = connector->edid_blob_ptr->length;
+	if (!edid)
+		return 0;
+
+	if (off >= size)
+		return 0;
+
+	if (off + count > size)
+		count = size - off;
+	memcpy(buf, edid + off, count);
+
+	return count;
+}
+
+static ssize_t modes_show(struct device *device,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+	struct drm_display_mode *mode;
+	int written = 0;
+
+	list_for_each_entry(mode, &connector->modes, head) {
+		written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+				    mode->name);
+	}
+
+	return written;
+}
+
+static ssize_t subconnector_show(struct device *device,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop = NULL;
+	uint64_t subconnector;
+	int is_tv = 0;
+	int ret;
+
+	switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_DVII:
+			prop = dev->mode_config.dvi_i_subconnector_property;
+			break;
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Component:
+		case DRM_MODE_CONNECTOR_TV:
+			prop = dev->mode_config.tv_subconnector_property;
+			is_tv = 1;
+			break;
+		default:
+			DRM_ERROR("Wrong connector type for this property\n");
+			return 0;
+	}
+
+	if (!prop) {
+		DRM_ERROR("Unable to find subconnector property\n");
+		return 0;
+	}
+
+	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
+	if (ret)
+		return 0;
+
+	return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+			drm_get_tv_subconnector_name((int)subconnector) :
+			drm_get_dvi_i_subconnector_name((int)subconnector));
+}
+
+static ssize_t select_subconnector_show(struct device *device,
+			   struct device_attribute *attr,
+			   char *buf)
+{
+	struct drm_connector *connector = to_drm_connector(device);
+	struct drm_device *dev = connector->dev;
+	struct drm_property *prop = NULL;
+	uint64_t subconnector;
+	int is_tv = 0;
+	int ret;
+
+	switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_DVII:
+			prop = dev->mode_config.dvi_i_select_subconnector_property;
+			break;
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Component:
+		case DRM_MODE_CONNECTOR_TV:
+			prop = dev->mode_config.tv_select_subconnector_property;
+			is_tv = 1;
+			break;
+		default:
+			DRM_ERROR("Wrong connector type for this property\n");
+			return 0;
+	}
+
+	if (!prop) {
+		DRM_ERROR("Unable to find select subconnector property\n");
+		return 0;
+	}
+
+	ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
+	if (ret)
+		return 0;
+
+	return snprintf(buf, PAGE_SIZE, "%s", is_tv ?
+			drm_get_tv_select_name((int)subconnector) :
+			drm_get_dvi_i_select_name((int)subconnector));
+}
+
+static struct device_attribute connector_attrs[] = {
+	__ATTR_RO(status),
+	__ATTR_RO(enabled),
+	__ATTR_RO(dpms),
+	__ATTR_RO(modes),
+};
+
+/* These attributes are for both DVI-I connectors and all types of tv-out. */
+static struct device_attribute connector_attrs_opt1[] = {
+	__ATTR_RO(subconnector),
+	__ATTR_RO(select_subconnector),
+};
+
+static struct bin_attribute edid_attr = {
+	.attr.name = "edid",
+	.attr.mode = 0444,
+	.size = 0,
+	.read = edid_show,
+};
+
+/**
+ * drm_sysfs_connector_add - add a connector to sysfs
+ * @connector: connector to add
+ *
+ * Create a connector device in sysfs, along with its associated connector
+ * properties (so far, connection status, dpms, mode list & edid) and
+ * generate a hotplug event so userspace knows there's a new connector
+ * available.
+ *
+ * Note:
+ * This routine should only be called *once* for each registered connector.
+ * A second call for an already registered connector will trigger the BUG_ON
+ * below.
+ */
+int drm_sysfs_connector_add(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	int attr_cnt = 0;
+	int opt_cnt = 0;
+	int i;
+	int ret;
+
+	/* We shouldn't get called more than once for the same connector */
+	BUG_ON(device_is_registered(&connector->kdev));
+
+	connector->kdev.parent = &dev->primary->kdev;
+	connector->kdev.class = drm_class;
+	connector->kdev.release = drm_sysfs_device_release;
+
+	DRM_DEBUG("adding \"%s\" to sysfs\n",
+		  drm_get_connector_name(connector));
+
+	dev_set_name(&connector->kdev, "card%d-%s",
+		     dev->primary->index, drm_get_connector_name(connector));
+	ret = device_register(&connector->kdev);
+
+	if (ret) {
+		DRM_ERROR("failed to register connector device: %d\n", ret);
+		goto out;
+	}
+
+	/* Standard attributes */
+
+	for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
+		ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+		if (ret)
+			goto err_out_files;
+	}
+
+	/* Optional attributes */
+	/*
+	 * In the long run it maybe a good idea to make one set of
+	 * optionals per connector type.
+	 */
+	switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Component:
+		case DRM_MODE_CONNECTOR_TV:
+			for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
+				ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+				if (ret)
+					goto err_out_files;
+			}
+			break;
+		default:
+			break;
+	}
+
+	ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+	if (ret)
+		goto err_out_files;
+
+	/* Let userspace know we have a new connector */
+	drm_sysfs_hotplug_event(dev);
+
+	return 0;
+
+err_out_files:
+	for (i = 0; i < opt_cnt; i++)
+		device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+	for (i = 0; i < attr_cnt; i++)
+		device_remove_file(&connector->kdev, &connector_attrs[i]);
+	device_unregister(&connector->kdev);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_add);
+
+/**
+ * drm_sysfs_connector_remove - remove an connector device from sysfs
+ * @connector: connector to remove
+ *
+ * Remove @connector and its associated attributes from sysfs.  Note that
+ * the device model core will take care of sending the "remove" uevent
+ * at this time, so we don't need to do it.
+ *
+ * Note:
+ * This routine should only be called if the connector was previously
+ * successfully registered.  If @connector hasn't been registered yet,
+ * you'll likely see a panic somewhere deep in sysfs code when called.
+ */
+void drm_sysfs_connector_remove(struct drm_connector *connector)
+{
+	int i;
+
+	if (!connector->kdev.parent)
+		return;
+	DRM_DEBUG("removing \"%s\" from sysfs\n",
+		  drm_get_connector_name(connector));
+
+	for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
+		device_remove_file(&connector->kdev, &connector_attrs[i]);
+	sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+	device_unregister(&connector->kdev);
+	connector->kdev.parent = NULL;
+}
+EXPORT_SYMBOL(drm_sysfs_connector_remove);
+
+/**
+ * drm_sysfs_hotplug_event - generate a DRM uevent
+ * @dev: DRM device
+ *
+ * Send a uevent for the DRM device specified by @dev.  Currently we only
+ * set HOTPLUG=1 in the uevent environment, but this could be expanded to
+ * deal with other types of events.
+ */
+void drm_sysfs_hotplug_event(struct drm_device *dev)
+{
+	char *event_string = "HOTPLUG=1";
+	char *envp[] = { event_string, NULL };
+
+	DRM_DEBUG("generating hotplug event\n");
+
+	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+	int err;
+	char *minor_str;
+
+	minor->kdev.parent = minor->dev->dev;
+
+	minor->kdev.class = drm_class;
+	minor->kdev.release = drm_sysfs_device_release;
+	minor->kdev.devt = minor->device;
+	minor->kdev.type = &drm_sysfs_device_minor;
+	if (minor->type == DRM_MINOR_CONTROL)
+		minor_str = "controlD%d";
+        else if (minor->type == DRM_MINOR_RENDER)
+                minor_str = "renderD%d";
+        else
+                minor_str = "card%d";
+
+	dev_set_name(&minor->kdev, minor_str, minor->index);
+
+	err = device_register(&minor->kdev);
+	if (err) {
+		DRM_ERROR("device add failed: %d\n", err);
+		goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+	if (minor->kdev.parent)
+		device_unregister(&minor->kdev);
+	minor->kdev.parent = NULL;
+}
+
+
+/**
+ * drm_class_device_register - Register a struct device in the drm class.
+ *
+ * @dev: pointer to struct device to register.
+ *
+ * @dev should have all relevant members pre-filled with the exception
+ * of the class member. In particular, the device_type member must
+ * be set.
+ */
+
+int drm_class_device_register(struct device *dev)
+{
+	if (!drm_class || IS_ERR(drm_class))
+		return -ENOENT;
+
+	dev->class = drm_class;
+	return device_register(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_register);
+
+void drm_class_device_unregister(struct device *dev)
+{
+	return device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(drm_class_device_unregister);
diff --git a/linux-imx/drivers/gpu/drm/drm_trace.h b/linux-imx/drivers/gpu/drm/drm_trace.h
new file mode 100644
index 0000000..03ea964
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_trace.h
@@ -0,0 +1,66 @@
+#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DRM_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE drm_trace
+
+TRACE_EVENT(drm_vblank_event,
+	    TP_PROTO(int crtc, unsigned int seq),
+	    TP_ARGS(crtc, seq),
+	    TP_STRUCT__entry(
+		    __field(int, crtc)
+		    __field(unsigned int, seq)
+		    ),
+	    TP_fast_assign(
+		    __entry->crtc = crtc;
+		    __entry->seq = seq;
+		    ),
+	    TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_queued,
+	    TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+	    TP_ARGS(pid, crtc, seq),
+	    TP_STRUCT__entry(
+		    __field(pid_t, pid)
+		    __field(int, crtc)
+		    __field(unsigned int, seq)
+		    ),
+	    TP_fast_assign(
+		    __entry->pid = pid;
+		    __entry->crtc = crtc;
+		    __entry->seq = seq;
+		    ),
+	    TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+		      __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_delivered,
+	    TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+	    TP_ARGS(pid, crtc, seq),
+	    TP_STRUCT__entry(
+		    __field(pid_t, pid)
+		    __field(int, crtc)
+		    __field(unsigned int, seq)
+		    ),
+	    TP_fast_assign(
+		    __entry->pid = pid;
+		    __entry->crtc = crtc;
+		    __entry->seq = seq;
+		    ),
+	    TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+		      __entry->seq)
+);
+
+#endif /* _DRM_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/linux-imx/drivers/gpu/drm/drm_trace_points.c b/linux-imx/drivers/gpu/drm/drm_trace_points.c
new file mode 100644
index 0000000..3bbc4de
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_trace_points.c
@@ -0,0 +1,4 @@
+#include <drm/drmP.h>
+
+#define CREATE_TRACE_POINTS
+#include "drm_trace.h"
diff --git a/linux-imx/drivers/gpu/drm/drm_usb.c b/linux-imx/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 0000000..34a156f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,120 @@
+#include <drm/drmP.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+
+int drm_get_usb_dev(struct usb_interface *interface,
+		    const struct usb_device_id *id,
+		    struct drm_driver *driver)
+{
+	struct drm_device *dev;
+	struct usb_device *usbdev;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	usbdev = interface_to_usbdev(interface);
+	dev->usbdev = usbdev;
+	dev->dev = &interface->dev;
+
+	mutex_lock(&drm_global_mutex);
+
+	ret = drm_fill_in_dev(dev, NULL, driver);
+	if (ret) {
+		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+		goto err_g1;
+	}
+
+	usb_set_intfdata(interface, dev);
+	ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+	if (ret)
+		goto err_g1;
+
+	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+	if (ret)
+		goto err_g2;
+
+	if (dev->driver->load) {
+		ret = dev->driver->load(dev, 0);
+		if (ret)
+			goto err_g3;
+	}
+
+	/* setup the grouping for the legacy output */
+	ret = drm_mode_group_init_legacy_group(dev,
+					       &dev->primary->mode_group);
+	if (ret)
+		goto err_g3;
+
+	list_add_tail(&dev->driver_item, &driver->device_list);
+
+	mutex_unlock(&drm_global_mutex);
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, dev->primary->index);
+
+	return 0;
+
+err_g3:
+	drm_put_minor(&dev->primary);
+err_g2:
+	drm_put_minor(&dev->control);
+err_g1:
+	kfree(dev);
+	mutex_unlock(&drm_global_mutex);
+	return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_get_irq(struct drm_device *dev)
+{
+	return 0;
+}
+
+static const char *drm_usb_get_name(struct drm_device *dev)
+{
+	return "USB";
+}
+
+static int drm_usb_set_busid(struct drm_device *dev,
+			       struct drm_master *master)
+{
+	return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+	.bus_type = DRIVER_BUS_USB,
+	.get_irq = drm_usb_get_irq,
+	.get_name = drm_usb_get_name,
+	.set_busid = drm_usb_set_busid,
+};
+    
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+	int res;
+	DRM_DEBUG("\n");
+
+	INIT_LIST_HEAD(&driver->device_list);
+	driver->kdriver.usb = udriver;
+	driver->bus = &drm_usb_bus;
+
+	res = usb_register(udriver);
+	return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+void drm_usb_exit(struct drm_driver *driver,
+		  struct usb_driver *udriver)
+{
+	usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+
+MODULE_AUTHOR("David Airlie");
+MODULE_DESCRIPTION("USB DRM support");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/drm_vm.c b/linux-imx/drivers/gpu/drm/drm_vm.c
new file mode 100644
index 0000000..1d4f7c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/drm_vm.c
@@ -0,0 +1,676 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <linux/export.h>
+#if defined(__ia64__)
+#include <linux/efi.h>
+#include <linux/slab.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+		pgprot_val(tmp) |= _PAGE_PCD;
+		pgprot_val(tmp) &= ~_PAGE_PWT;
+	}
+#elif defined(__powerpc__)
+	pgprot_val(tmp) |= _PAGE_NO_CACHE;
+	if (map_type == _DRM_REGISTERS)
+		pgprot_val(tmp) |= _PAGE_GUARDED;
+#elif defined(__ia64__)
+	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+				    vma->vm_start))
+		tmp = pgprot_writecombine(tmp);
+	else
+		tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
+	tmp = pgprot_noncached(tmp);
+#endif
+	return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+	tmp |= _PAGE_NO_CACHE;
+#endif
+	return tmp;
+}
+
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_local_map *map = NULL;
+	struct drm_map_list *r_list;
+	struct drm_hash_item *hash;
+
+	/*
+	 * Find the right map
+	 */
+	if (!drm_core_has_AGP(dev))
+		goto vm_fault_error;
+
+	if (!dev->agp || !dev->agp->cant_use_aperture)
+		goto vm_fault_error;
+
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+		goto vm_fault_error;
+
+	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+	map = r_list->map;
+
+	if (map && map->type == _DRM_AGP) {
+		/*
+		 * Using vm_pgoff as a selector forces us to use this unusual
+		 * addressing scheme.
+		 */
+		resource_size_t offset = (unsigned long)vmf->virtual_address -
+			vma->vm_start;
+		resource_size_t baddr = map->offset + offset;
+		struct drm_agp_mem *agpmem;
+		struct page *page;
+
+#ifdef __alpha__
+		/*
+		 * Adjust to a bus-relative address
+		 */
+		baddr -= dev->hose->mem_space->start;
+#endif
+
+		/*
+		 * It's AGP memory - find the real physical page to map
+		 */
+		list_for_each_entry(agpmem, &dev->agp->memory, head) {
+			if (agpmem->bound <= baddr &&
+			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+				break;
+		}
+
+		if (&agpmem->head == &dev->agp->memory)
+			goto vm_fault_error;
+
+		/*
+		 * Get the page, inc the use count, and return it
+		 */
+		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+		page = agpmem->memory->pages[offset];
+		get_page(page);
+		vmf->page = page;
+
+		DRM_DEBUG
+		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
+		     (unsigned long long)baddr,
+		     agpmem->memory->pages[offset],
+		     (unsigned long long)offset,
+		     page_count(page));
+		return 0;
+	}
+vm_fault_error:
+	return VM_FAULT_SIGBUS;	/* Disallow mremap */
+}
+#else				/* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+#endif				/* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_local_map *map = vma->vm_private_data;
+	unsigned long offset;
+	unsigned long i;
+	struct page *page;
+
+	if (!map)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	i = (unsigned long)map->handle + offset;
+	page = vmalloc_to_page((void *)i);
+	if (!page)
+		return VM_FAULT_SIGBUS;
+	get_page(page);
+	vmf->page = page;
+
+	DRM_DEBUG("shm_fault 0x%lx\n", offset);
+	return 0;
+}
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_vma_entry *pt, *temp;
+	struct drm_local_map *map;
+	struct drm_map_list *r_list;
+	int found_maps = 0;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_dec(&dev->vma_count);
+
+	map = vma->vm_private_data;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+		if (pt->vma->vm_private_data == map)
+			found_maps++;
+		if (pt->vma == vma) {
+			list_del(&pt->head);
+			kfree(pt);
+		}
+	}
+
+	/* We were the only map that was found */
+	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+		/* Check to see if we are in the maplist, if we are not, then
+		 * we delete this mappings information.
+		 */
+		found_maps = 0;
+		list_for_each_entry(r_list, &dev->maplist, head) {
+			if (r_list->map == map)
+				found_maps++;
+		}
+
+		if (!found_maps) {
+			drm_dma_handle_t dmah;
+
+			switch (map->type) {
+			case _DRM_REGISTERS:
+			case _DRM_FRAME_BUFFER:
+				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+					int retcode;
+					retcode = mtrr_del(map->mtrr,
+							   map->offset,
+							   map->size);
+					DRM_DEBUG("mtrr_del = %d\n", retcode);
+				}
+				iounmap(map->handle);
+				break;
+			case _DRM_SHM:
+				vfree(map->handle);
+				break;
+			case _DRM_AGP:
+			case _DRM_SCATTER_GATHER:
+				break;
+			case _DRM_CONSISTENT:
+				dmah.vaddr = map->handle;
+				dmah.busaddr = map->offset;
+				dmah.size = map->size;
+				__drm_pci_free(dev, &dmah);
+				break;
+			case _DRM_GEM:
+				DRM_ERROR("tried to rmmap GEM object\n");
+				break;
+			}
+			kfree(map);
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_device_dma *dma = dev->dma;
+	unsigned long offset;
+	unsigned long page_nr;
+	struct page *page;
+
+	if (!dma)
+		return VM_FAULT_SIGBUS;	/* Error */
+	if (!dma->pagelist)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
+	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+	get_page(page);
+	vmf->page = page;
+
+	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+	return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_local_map *map = vma->vm_private_data;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_sg_mem *entry = dev->sg;
+	unsigned long offset;
+	unsigned long map_offset;
+	unsigned long page_offset;
+	struct page *page;
+
+	if (!entry)
+		return VM_FAULT_SIGBUS;	/* Error */
+	if (!entry->pagelist)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	map_offset = map->offset - (unsigned long)dev->sg->virtual;
+	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+	page = entry->pagelist[page_offset];
+	get_page(page);
+	vmf->page = page;
+
+	return 0;
+}
+
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_fault(vma, vmf);
+}
+
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_shm_fault(vma, vmf);
+}
+
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_dma_fault(vma, vmf);
+}
+
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_sg_fault(vma, vmf);
+}
+
+/** AGP virtual memory operations */
+static const struct vm_operations_struct drm_vm_ops = {
+	.fault = drm_vm_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static const struct vm_operations_struct drm_vm_shm_ops = {
+	.fault = drm_vm_shm_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static const struct vm_operations_struct drm_vm_dma_ops = {
+	.fault = drm_vm_dma_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static const struct vm_operations_struct drm_vm_sg_ops = {
+	.fault = drm_vm_sg_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+void drm_vm_open_locked(struct drm_device *dev,
+		struct vm_area_struct *vma)
+{
+	struct drm_vma_entry *vma_entry;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_inc(&dev->vma_count);
+
+	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
+	if (vma_entry) {
+		vma_entry->vma = vma;
+		vma_entry->pid = current->pid;
+		list_add(&vma_entry->head, &dev->vmalist);
+	}
+}
+EXPORT_SYMBOL_GPL(drm_vm_open_locked);
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_vm_open_locked(dev, vma);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void drm_vm_close_locked(struct drm_device *dev,
+		struct vm_area_struct *vma)
+{
+	struct drm_vma_entry *pt, *temp;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_dec(&dev->vma_count);
+
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+		if (pt->vma == vma) {
+			list_del(&pt->head);
+			kfree(pt);
+			break;
+		}
+	}
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_vm_close_locked(dev, vma);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	struct drm_device_dma *dma;
+	unsigned long length = vma->vm_end - vma->vm_start;
+
+	dev = priv->minor->dev;
+	dma = dev->dma;
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+	/* Length must match exact page count */
+	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+		return -EINVAL;
+	}
+
+	if (!capable(CAP_SYS_ADMIN) &&
+	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+		/* Ye gads this is ugly.  With more thought
+		   we could move this up higher and use
+		   `protection_map' instead.  */
+		vma->vm_page_prot =
+		    __pgprot(pte_val
+			     (pte_wrprotect
+			      (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+	}
+
+	vma->vm_ops = &drm_vm_dma_ops;
+
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+
+	drm_vm_open_locked(dev, vma);
+	return 0;
+}
+
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+	return dev->hose->dense_mem_base;
+#else
+	return 0;
+#endif
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_local_map *map = NULL;
+	resource_size_t offset = 0;
+	struct drm_hash_item *hash;
+
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+	if (!priv->authenticated)
+		return -EACCES;
+
+	/* We check for "dma". On Apple's UniNorth, it's valid to have
+	 * the AGP mapped at physical address 0
+	 * --BenH.
+	 */
+	if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+	    && (!dev->agp
+		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+	    )
+		return drm_mmap_dma(filp, vma);
+
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+		DRM_ERROR("Could not find map\n");
+		return -EINVAL;
+	}
+
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+		return -EPERM;
+
+	/* Check for valid size. */
+	if (map->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+		/* Ye gads this is ugly.  With more thought
+		   we could move this up higher and use
+		   `protection_map' instead.  */
+		vma->vm_page_prot =
+		    __pgprot(pte_val
+			     (pte_wrprotect
+			      (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+	}
+
+	switch (map->type) {
+#if !defined(__arm__)
+	case _DRM_AGP:
+		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+			/*
+			 * On some platforms we can't talk to bus dma address from the CPU, so for
+			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
+			 * pages and mappings in fault()
+			 */
+#if defined(__powerpc__)
+			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+			vma->vm_ops = &drm_vm_ops;
+			break;
+		}
+		/* fall through to _DRM_FRAME_BUFFER... */
+#endif
+	case _DRM_FRAME_BUFFER:
+	case _DRM_REGISTERS:
+		offset = drm_core_get_reg_ofs(dev);
+		vma->vm_flags |= VM_IO;	/* not in core dump */
+		vma->vm_page_prot = drm_io_prot(map->type, vma);
+		if (io_remap_pfn_range(vma, vma->vm_start,
+				       (map->offset + offset) >> PAGE_SHIFT,
+				       vma->vm_end - vma->vm_start,
+				       vma->vm_page_prot))
+			return -EAGAIN;
+		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+			  " offset = 0x%llx\n",
+			  map->type,
+			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+
+		vma->vm_ops = &drm_vm_ops;
+		break;
+	case _DRM_CONSISTENT:
+		/* Consistent memory is really like shared memory. But
+		 * it's allocated in a different way, so avoid fault */
+		if (remap_pfn_range(vma, vma->vm_start,
+		    page_to_pfn(virt_to_page(map->handle)),
+		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
+			return -EAGAIN;
+		vma->vm_page_prot = drm_dma_prot(map->type, vma);
+	/* fall through to _DRM_SHM */
+	case _DRM_SHM:
+		vma->vm_ops = &drm_vm_shm_ops;
+		vma->vm_private_data = (void *)map;
+		break;
+	case _DRM_SCATTER_GATHER:
+		vma->vm_ops = &drm_vm_sg_ops;
+		vma->vm_private_data = (void *)map;
+		vma->vm_page_prot = drm_dma_prot(map->type, vma);
+		break;
+	default:
+		return -EINVAL;	/* This should never happen. */
+	}
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+
+	drm_vm_open_locked(dev, vma);
+	return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	int ret;
+
+	if (drm_device_is_unplugged(dev))
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_mmap_locked(filp, vma);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
diff --git a/linux-imx/drivers/gpu/drm/exynos/Kconfig b/linux-imx/drivers/gpu/drm/exynos/Kconfig
new file mode 100644
index 0000000..772c62a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/Kconfig
@@ -0,0 +1,73 @@
+config DRM_EXYNOS
+	tristate "DRM Support for Samsung SoC EXYNOS Series"
+	depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+	select DRM_KMS_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+	help
+	  Choose this option if you have a Samsung SoC EXYNOS chipset.
+	  If M is selected the module will be called exynosdrm.
+
+config DRM_EXYNOS_IOMMU
+	bool "EXYNOS DRM IOMMU Support"
+	depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+	help
+	  Choose this option if you want to use IOMMU feature for DRM.
+
+config DRM_EXYNOS_DMABUF
+	bool "EXYNOS DRM DMABUF"
+	depends on DRM_EXYNOS
+	help
+	  Choose this option if you want to use DMABUF feature for DRM.
+
+config DRM_EXYNOS_FIMD
+	bool "Exynos DRM FIMD"
+	depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+	select FB_MODE_HELPERS
+	select VIDEOMODE_HELPERS
+	help
+	  Choose this option if you want to use Exynos FIMD for DRM.
+
+config DRM_EXYNOS_HDMI
+	bool "Exynos DRM HDMI"
+	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
+	help
+	  Choose this option if you want to use Exynos HDMI for DRM.
+
+config DRM_EXYNOS_VIDI
+	bool "Exynos DRM Virtual Display"
+	depends on DRM_EXYNOS
+	help
+	  Choose this option if you want to use Exynos VIDI for DRM.
+
+config DRM_EXYNOS_G2D
+	bool "Exynos DRM G2D"
+	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
+	help
+	  Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+	bool "Exynos DRM IPP"
+	depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+	help
+	  Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+	bool "Exynos DRM FIMC"
+	depends on DRM_EXYNOS_IPP && MFD_SYSCON && OF
+	help
+	  Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+	bool "Exynos DRM Rotator"
+	depends on DRM_EXYNOS_IPP
+	help
+	  Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+	bool "Exynos DRM GSC"
+	depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+	help
+	  Choose this option if you want to use Exynos GSC for DRM.
diff --git a/linux-imx/drivers/gpu/drm/exynos/Makefile b/linux-imx/drivers/gpu/drm/exynos/Makefile
new file mode 100644
index 0000000..639b49e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
+exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
+		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
+		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+		exynos_drm_plane.o
+
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
+					   exynos_ddc.o exynos_hdmiphy.o \
+					   exynos_drm_hdmi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)	+= exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)	+= exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)	+= exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR)	+= exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)	+= exynos_drm_gsc.o
+
+obj-$(CONFIG_DRM_EXYNOS)		+= exynosdrm.o
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_ddc.c b/linux-imx/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644
index 0000000..4e9b5ba
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+			const struct i2c_device_id *dev_id)
+{
+	hdmi_attach_ddc_client(client);
+
+	dev_info(&client->adapter->dev,
+		"attached %s into i2c adapter successfully\n",
+		client->name);
+
+	return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+	dev_info(&client->adapter->dev,
+		"detached %s from i2c adapter successfully\n",
+		client->name);
+
+	return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+	{"s5p_ddc", 0},
+	{"exynos5-hdmiddc", 0},
+	{ },
+};
+
+#ifdef CONFIG_OF
+static struct of_device_id hdmiddc_match_types[] = {
+	{
+		.compatible = "samsung,exynos5-hdmiddc",
+	}, {
+		/* end node */
+	}
+};
+#endif
+
+struct i2c_driver ddc_driver = {
+	.driver = {
+		.name = "exynos-hdmiddc",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(hdmiddc_match_types),
+	},
+	.id_table	= ddc_idtable,
+	.probe		= s5p_ddc_probe,
+	.remove		= s5p_ddc_remove,
+	.command		= NULL,
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.c
new file mode 100644
index 0000000..57affae
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -0,0 +1,200 @@
+/* exynos_drm_buf.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
+
+static int lowlevel_buffer_allocate(struct drm_device *dev,
+		unsigned int flags, struct exynos_drm_gem_buf *buf)
+{
+	int ret = 0;
+	enum dma_attr attr;
+	unsigned int nr_pages;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (buf->dma_addr) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return 0;
+	}
+
+	init_dma_attrs(&buf->dma_attrs);
+
+	/*
+	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+	 * region will be allocated else physically contiguous
+	 * as possible.
+	 */
+	if (!(flags & EXYNOS_BO_NONCONTIG))
+		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
+
+	/*
+	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+	 * else cachable mapping.
+	 */
+	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+		attr = DMA_ATTR_WRITE_COMBINE;
+	else
+		attr = DMA_ATTR_NON_CONSISTENT;
+
+	dma_set_attr(attr, &buf->dma_attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
+
+	nr_pages = buf->size >> PAGE_SHIFT;
+
+	if (!is_drm_iommu_supported(dev)) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
+
+		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
+					GFP_KERNEL);
+		if (!buf->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			return -ENOMEM;
+		}
+
+		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+					&buf->dma_addr, GFP_KERNEL,
+					&buf->dma_attrs);
+		if (!buf->kvaddr) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			kfree(buf->pages);
+			return -ENOMEM;
+		}
+
+		start_addr = buf->dma_addr;
+		while (i < nr_pages) {
+			buf->pages[i] = phys_to_page(start_addr);
+			start_addr += PAGE_SIZE;
+			i++;
+		}
+	} else {
+
+		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+					&buf->dma_addr, GFP_KERNEL,
+					&buf->dma_attrs);
+		if (!buf->pages) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			return -ENOMEM;
+		}
+	}
+
+	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+	if (!buf->sgt) {
+		DRM_ERROR("failed to get sg table.\n");
+		ret = -ENOMEM;
+		goto err_free_attrs;
+	}
+
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)buf->dma_addr,
+			buf->size);
+
+	return ret;
+
+err_free_attrs:
+	dma_free_attrs(dev->dev, buf->size, buf->pages,
+			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+	buf->dma_addr = (dma_addr_t)NULL;
+
+	if (!is_drm_iommu_supported(dev))
+		kfree(buf->pages);
+
+	return ret;
+}
+
+static void lowlevel_buffer_deallocate(struct drm_device *dev,
+		unsigned int flags, struct exynos_drm_gem_buf *buf)
+{
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+	if (!buf->dma_addr) {
+		DRM_DEBUG_KMS("dma_addr is invalid.\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)buf->dma_addr,
+			buf->size);
+
+	sg_free_table(buf->sgt);
+
+	kfree(buf->sgt);
+	buf->sgt = NULL;
+
+	if (!is_drm_iommu_supported(dev)) {
+		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+		kfree(buf->pages);
+	} else
+		dma_free_attrs(dev->dev, buf->size, buf->pages,
+				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+
+	buf->dma_addr = (dma_addr_t)NULL;
+}
+
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+						unsigned int size)
+{
+	struct exynos_drm_gem_buf *buffer;
+
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer) {
+		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+		return NULL;
+	}
+
+	buffer->size = size;
+	return buffer;
+}
+
+void exynos_drm_fini_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buffer)
+{
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+	if (!buffer) {
+		DRM_DEBUG_KMS("buffer is null.\n");
+		return;
+	}
+
+	kfree(buffer);
+	buffer = NULL;
+}
+
+int exynos_drm_alloc_buf(struct drm_device *dev,
+		struct exynos_drm_gem_buf *buf, unsigned int flags)
+{
+
+	/*
+	 * allocate memory region and set the memory information
+	 * to vaddr and dma_addr of a buffer object.
+	 */
+	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void exynos_drm_free_buf(struct drm_device *dev,
+		unsigned int flags, struct exynos_drm_gem_buf *buffer)
+{
+
+	lowlevel_buffer_deallocate(dev, flags, buffer);
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.h
new file mode 100644
index 0000000..a6412f1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -0,0 +1,33 @@
+/* exynos_drm_buf.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_BUF_H_
+#define _EXYNOS_DRM_BUF_H_
+
+/* create and initialize buffer object. */
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+						unsigned int size);
+
+/* destroy buffer object. */
+void exynos_drm_fini_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buffer);
+
+/* allocate physical memory region and setup sgt. */
+int exynos_drm_alloc_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buf,
+				unsigned int flags);
+
+/* release physical memory region, and sgt. */
+void exynos_drm_free_buf(struct drm_device *dev,
+				unsigned int flags,
+				struct exynos_drm_gem_buf *buffer);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.c
new file mode 100644
index 0000000..8bcc13a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+#define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\
+				drm_connector)
+
+struct exynos_drm_connector {
+	struct drm_connector	drm_connector;
+	uint32_t		encoder_id;
+	struct exynos_drm_manager *manager;
+	uint32_t		dpms;
+};
+
+/* convert exynos_video_timings to drm_display_mode */
+static inline void
+convert_to_display_mode(struct drm_display_mode *mode,
+			struct exynos_drm_panel_info *panel)
+{
+	struct fb_videomode *timing = &panel->timing;
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mode->clock = timing->pixclock / 1000;
+	mode->vrefresh = timing->refresh;
+
+	mode->hdisplay = timing->xres;
+	mode->hsync_start = mode->hdisplay + timing->right_margin;
+	mode->hsync_end = mode->hsync_start + timing->hsync_len;
+	mode->htotal = mode->hsync_end + timing->left_margin;
+
+	mode->vdisplay = timing->yres;
+	mode->vsync_start = mode->vdisplay + timing->lower_margin;
+	mode->vsync_end = mode->vsync_start + timing->vsync_len;
+	mode->vtotal = mode->vsync_end + timing->upper_margin;
+	mode->width_mm = panel->width_mm;
+	mode->height_mm = panel->height_mm;
+
+	if (timing->vmode & FB_VMODE_INTERLACED)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+	if (timing->vmode & FB_VMODE_DOUBLE)
+		mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+}
+
+/* convert drm_display_mode to exynos_video_timings */
+static inline void
+convert_to_video_timing(struct fb_videomode *timing,
+			struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	memset(timing, 0, sizeof(*timing));
+
+	timing->pixclock = mode->clock * 1000;
+	timing->refresh = drm_mode_vrefresh(mode);
+
+	timing->xres = mode->hdisplay;
+	timing->right_margin = mode->hsync_start - mode->hdisplay;
+	timing->hsync_len = mode->hsync_end - mode->hsync_start;
+	timing->left_margin = mode->htotal - mode->hsync_end;
+
+	timing->yres = mode->vdisplay;
+	timing->lower_margin = mode->vsync_start - mode->vdisplay;
+	timing->vsync_len = mode->vsync_end - mode->vsync_start;
+	timing->upper_margin = mode->vtotal - mode->vsync_end;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		timing->vmode = FB_VMODE_INTERLACED;
+	else
+		timing->vmode = FB_VMODE_NONINTERLACED;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		timing->vmode |= FB_VMODE_DOUBLE;
+}
+
+static int exynos_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct exynos_drm_connector *exynos_connector =
+					to_exynos_connector(connector);
+	struct exynos_drm_manager *manager = exynos_connector->manager;
+	struct exynos_drm_display_ops *display_ops = manager->display_ops;
+	struct edid *edid = NULL;
+	unsigned int count = 0;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!display_ops) {
+		DRM_DEBUG_KMS("display_ops is null.\n");
+		return 0;
+	}
+
+	/*
+	 * if get_edid() exists then get_edid() callback of hdmi side
+	 * is called to get edid data through i2c interface else
+	 * get timing from the FIMD driver(display controller).
+	 *
+	 * P.S. in case of lcd panel, count is always 1 if success
+	 * because lcd panel has only one mode.
+	 */
+	if (display_ops->get_edid) {
+		edid = display_ops->get_edid(manager->dev, connector);
+		if (IS_ERR_OR_NULL(edid)) {
+			ret = PTR_ERR(edid);
+			edid = NULL;
+			DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+			goto out;
+		}
+
+		count = drm_add_edid_modes(connector, edid);
+		if (!count) {
+			DRM_ERROR("Add edid modes failed %d\n", count);
+			goto out;
+		}
+
+		drm_mode_connector_update_edid_property(connector, edid);
+	} else {
+		struct exynos_drm_panel_info *panel;
+		struct drm_display_mode *mode = drm_mode_create(connector->dev);
+		if (!mode) {
+			DRM_ERROR("failed to create a new display mode.\n");
+			return 0;
+		}
+
+		if (display_ops->get_panel)
+			panel = display_ops->get_panel(manager->dev);
+		else {
+			drm_mode_destroy(connector->dev, mode);
+			return 0;
+		}
+
+		convert_to_display_mode(mode, panel);
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
+
+		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+
+		count = 1;
+	}
+
+out:
+	kfree(edid);
+	return count;
+}
+
+static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	struct exynos_drm_connector *exynos_connector =
+					to_exynos_connector(connector);
+	struct exynos_drm_manager *manager = exynos_connector->manager;
+	struct exynos_drm_display_ops *display_ops = manager->display_ops;
+	struct fb_videomode timing;
+	int ret = MODE_BAD;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	convert_to_video_timing(&timing, mode);
+
+	if (display_ops && display_ops->check_timing)
+		if (!display_ops->check_timing(manager->dev, (void *)&timing))
+			ret = MODE_OK;
+
+	return ret;
+}
+
+struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct exynos_drm_connector *exynos_connector =
+					to_exynos_connector(connector);
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+				   DRM_MODE_OBJECT_ENCODER);
+	if (!obj) {
+		DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+				exynos_connector->encoder_id);
+		return NULL;
+	}
+
+	encoder = obj_to_encoder(obj);
+
+	return encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
+	.get_modes	= exynos_drm_connector_get_modes,
+	.mode_valid	= exynos_drm_connector_mode_valid,
+	.best_encoder	= exynos_drm_best_encoder,
+};
+
+void exynos_drm_display_power(struct drm_connector *connector, int mode)
+{
+	struct drm_encoder *encoder = exynos_drm_best_encoder(connector);
+	struct exynos_drm_connector *exynos_connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct exynos_drm_display_ops *display_ops = manager->display_ops;
+
+	exynos_connector = to_exynos_connector(connector);
+
+	if (exynos_connector->dpms == mode) {
+		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+		return;
+	}
+
+	if (display_ops && display_ops->power_on)
+		display_ops->power_on(manager->dev, mode);
+
+	exynos_connector->dpms = mode;
+}
+
+static void exynos_drm_connector_dpms(struct drm_connector *connector,
+					int mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * in case that drm_crtc_helper_set_mode() is called,
+	 * encoder/crtc->funcs->dpms() will be just returned
+	 * because they already were DRM_MODE_DPMS_ON so only
+	 * exynos_drm_display_power() will be called.
+	 */
+	drm_helper_connector_dpms(connector, mode);
+
+	exynos_drm_display_power(connector, mode);
+
+}
+
+static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
+				unsigned int max_width, unsigned int max_height)
+{
+	struct exynos_drm_connector *exynos_connector =
+					to_exynos_connector(connector);
+	struct exynos_drm_manager *manager = exynos_connector->manager;
+	struct exynos_drm_manager_ops *ops = manager->ops;
+	unsigned int width, height;
+
+	width = max_width;
+	height = max_height;
+
+	/*
+	 * if specific driver want to find desired_mode using maxmum
+	 * resolution then get max width and height from that driver.
+	 */
+	if (ops && ops->get_max_resol)
+		ops->get_max_resol(manager->dev, &width, &height);
+
+	return drm_helper_probe_single_connector_modes(connector, width,
+							height);
+}
+
+/* get detection status of display device. */
+static enum drm_connector_status
+exynos_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct exynos_drm_connector *exynos_connector =
+					to_exynos_connector(connector);
+	struct exynos_drm_manager *manager = exynos_connector->manager;
+	struct exynos_drm_display_ops *display_ops =
+					manager->display_ops;
+	enum drm_connector_status status = connector_status_disconnected;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (display_ops && display_ops->is_connected) {
+		if (display_ops->is_connected(manager->dev))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+	}
+
+	return status;
+}
+
+static void exynos_drm_connector_destroy(struct drm_connector *connector)
+{
+	struct exynos_drm_connector *exynos_connector =
+		to_exynos_connector(connector);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(exynos_connector);
+}
+
+static struct drm_connector_funcs exynos_connector_funcs = {
+	.dpms		= exynos_drm_connector_dpms,
+	.fill_modes	= exynos_drm_connector_fill_modes,
+	.detect		= exynos_drm_connector_detect,
+	.destroy	= exynos_drm_connector_destroy,
+};
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+						   struct drm_encoder *encoder)
+{
+	struct exynos_drm_connector *exynos_connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct drm_connector *connector;
+	int type;
+	int err;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
+	if (!exynos_connector) {
+		DRM_ERROR("failed to allocate connector\n");
+		return NULL;
+	}
+
+	connector = &exynos_connector->drm_connector;
+
+	switch (manager->display_ops->type) {
+	case EXYNOS_DISPLAY_TYPE_HDMI:
+		type = DRM_MODE_CONNECTOR_HDMIA;
+		connector->interlace_allowed = true;
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	case EXYNOS_DISPLAY_TYPE_VIDI:
+		type = DRM_MODE_CONNECTOR_VIRTUAL;
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+		break;
+	default:
+		type = DRM_MODE_CONNECTOR_Unknown;
+		break;
+	}
+
+	drm_connector_init(dev, connector, &exynos_connector_funcs, type);
+	drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
+
+	err = drm_sysfs_connector_add(connector);
+	if (err)
+		goto err_connector;
+
+	exynos_connector->encoder_id = encoder->base.id;
+	exynos_connector->manager = manager;
+	exynos_connector->dpms = DRM_MODE_DPMS_OFF;
+	connector->dpms = DRM_MODE_DPMS_OFF;
+	connector->encoder = encoder;
+
+	err = drm_mode_connector_attach_encoder(connector, encoder);
+	if (err) {
+		DRM_ERROR("failed to attach a connector to a encoder\n");
+		goto err_sysfs;
+	}
+
+	DRM_DEBUG_KMS("connector has been created\n");
+
+	return connector;
+
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+err_connector:
+	drm_connector_cleanup(connector);
+	kfree(exynos_connector);
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.h
new file mode 100644
index 0000000..547c6b5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_CONNECTOR_H_
+#define _EXYNOS_DRM_CONNECTOR_H_
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+						   struct drm_encoder *encoder);
+
+struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector);
+
+void exynos_drm_display_power(struct drm_connector *connector, int mode);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_core.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_core.c
new file mode 100644
index 0000000..4667c9f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -0,0 +1,236 @@
+/* exynos_drm_core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
+#include "exynos_drm_fbdev.h"
+
+static LIST_HEAD(exynos_drm_subdrv_list);
+
+static int exynos_drm_create_enc_conn(struct drm_device *dev,
+					struct exynos_drm_subdrv *subdrv)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	subdrv->manager->dev = subdrv->dev;
+
+	/* create and initialize a encoder for this sub driver. */
+	encoder = exynos_drm_encoder_create(dev, subdrv->manager,
+			(1 << MAX_CRTC) - 1);
+	if (!encoder) {
+		DRM_ERROR("failed to create encoder\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * create and initialize a connector for this sub driver and
+	 * attach the encoder created above to the connector.
+	 */
+	connector = exynos_drm_connector_create(dev, encoder);
+	if (!connector) {
+		DRM_ERROR("failed to create connector\n");
+		ret = -EFAULT;
+		goto err_destroy_encoder;
+	}
+
+	subdrv->encoder = encoder;
+	subdrv->connector = connector;
+
+	return 0;
+
+err_destroy_encoder:
+	encoder->funcs->destroy(encoder);
+	return ret;
+}
+
+static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv)
+{
+	if (subdrv->encoder) {
+		struct drm_encoder *encoder = subdrv->encoder;
+		encoder->funcs->destroy(encoder);
+		subdrv->encoder = NULL;
+	}
+
+	if (subdrv->connector) {
+		struct drm_connector *connector = subdrv->connector;
+		connector->funcs->destroy(connector);
+		subdrv->connector = NULL;
+	}
+}
+
+static int exynos_drm_subdrv_probe(struct drm_device *dev,
+					struct exynos_drm_subdrv *subdrv)
+{
+	if (subdrv->probe) {
+		int ret;
+
+		subdrv->drm_dev = dev;
+
+		/*
+		 * this probe callback would be called by sub driver
+		 * after setting of all resources to this sub driver,
+		 * such as clock, irq and register map are done or by load()
+		 * of exynos drm driver.
+		 *
+		 * P.S. note that this driver is considered for modularization.
+		 */
+		ret = subdrv->probe(dev, subdrv->dev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void exynos_drm_subdrv_remove(struct drm_device *dev,
+				      struct exynos_drm_subdrv *subdrv)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (subdrv->remove)
+		subdrv->remove(dev, subdrv->dev);
+}
+
+int exynos_drm_device_register(struct drm_device *dev)
+{
+	struct exynos_drm_subdrv *subdrv, *n;
+	unsigned int fine_cnt = 0;
+	int err;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!dev)
+		return -EINVAL;
+
+	list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
+		err = exynos_drm_subdrv_probe(dev, subdrv);
+		if (err) {
+			DRM_DEBUG("exynos drm subdrv probe failed.\n");
+			list_del(&subdrv->list);
+			continue;
+		}
+
+		/*
+		 * if manager is null then it means that this sub driver
+		 * doesn't need encoder and connector.
+		 */
+		if (!subdrv->manager) {
+			fine_cnt++;
+			continue;
+		}
+
+		err = exynos_drm_create_enc_conn(dev, subdrv);
+		if (err) {
+			DRM_DEBUG("failed to create encoder and connector.\n");
+			exynos_drm_subdrv_remove(dev, subdrv);
+			list_del(&subdrv->list);
+			continue;
+		}
+
+		fine_cnt++;
+	}
+
+	if (!fine_cnt)
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_register);
+
+int exynos_drm_device_unregister(struct drm_device *dev)
+{
+	struct exynos_drm_subdrv *subdrv;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!dev) {
+		WARN(1, "Unexpected drm device unregister!\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+		exynos_drm_subdrv_remove(dev, subdrv);
+		exynos_drm_destroy_enc_conn(subdrv);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
+
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!subdrv)
+		return -EINVAL;
+
+	list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
+
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!subdrv)
+		return -EINVAL;
+
+	list_del(&subdrv->list);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct exynos_drm_subdrv *subdrv;
+	int ret;
+
+	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+		if (subdrv->open) {
+			ret = subdrv->open(dev, subdrv->dev, file);
+			if (ret)
+				goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+		if (subdrv->close)
+			subdrv->close(dev, subdrv->dev, file);
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
+
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
+{
+	struct exynos_drm_subdrv *subdrv;
+
+	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
+		if (subdrv->close)
+			subdrv->close(dev, subdrv->dev, file);
+	}
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.c
new file mode 100644
index 0000000..c200e4d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -0,0 +1,434 @@
+/* exynos_drm_crtc.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_plane.h"
+
+#define to_exynos_crtc(x)	container_of(x, struct exynos_drm_crtc,\
+				drm_crtc)
+
+enum exynos_crtc_mode {
+	CRTC_MODE_NORMAL,	/* normal mode */
+	CRTC_MODE_BLANK,	/* The private plane of crtc is blank */
+};
+
+/*
+ * Exynos specific crtc structure.
+ *
+ * @drm_crtc: crtc object.
+ * @drm_plane: pointer of private plane object for this crtc
+ * @pipe: a crtc index created at load() with a new crtc object creation
+ *	and the crtc object would be set to private->crtc array
+ *	to get a crtc object corresponding to this pipe from private->crtc
+ *	array when irq interrupt occured. the reason of using this pipe is that
+ *	drm framework doesn't support multiple irq yet.
+ *	we can refer to the crtc to current hardware interrupt occured through
+ *	this pipe value.
+ * @dpms: store the crtc dpms value
+ * @mode: store the crtc mode value
+ */
+struct exynos_drm_crtc {
+	struct drm_crtc			drm_crtc;
+	struct drm_plane		*plane;
+	unsigned int			pipe;
+	unsigned int			dpms;
+	enum exynos_crtc_mode		mode;
+	wait_queue_head_t		pending_flip_queue;
+	atomic_t			pending_flip;
+};
+
+static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+	DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+	if (exynos_crtc->dpms == mode) {
+		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+		return;
+	}
+
+	if (mode > DRM_MODE_DPMS_ON) {
+		/* wait for the completion of page flip. */
+		wait_event(exynos_crtc->pending_flip_queue,
+				atomic_read(&exynos_crtc->pending_flip) == 0);
+		drm_vblank_off(crtc->dev, exynos_crtc->pipe);
+	}
+
+	exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
+	exynos_crtc->dpms = mode;
+}
+
+static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	exynos_plane_commit(exynos_crtc->plane);
+	exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
+}
+
+static bool
+exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+			    const struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL */
+	return true;
+}
+
+static int
+exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode, int x, int y,
+			  struct drm_framebuffer *old_fb)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane = exynos_crtc->plane;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+	int pipe = exynos_crtc->pipe;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * copy the mode data adjusted by mode_fixup() into crtc->mode
+	 * so that hardware can be seet to proper mode.
+	 */
+	memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+
+	crtc_w = crtc->fb->width - x;
+	crtc_h = crtc->fb->height - y;
+
+	ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+				    x, y, crtc_w, crtc_h);
+	if (ret)
+		return ret;
+
+	plane->crtc = crtc;
+	plane->fb = crtc->fb;
+
+	exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
+
+	return 0;
+}
+
+static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+					  struct drm_framebuffer *old_fb)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_plane *plane = exynos_crtc->plane;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* when framebuffer changing is requested, crtc's dpms should be on */
+	if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
+		DRM_ERROR("failed framebuffer changing request.\n");
+		return -EPERM;
+	}
+
+	crtc_w = crtc->fb->width - x;
+	crtc_h = crtc->fb->height - y;
+
+	ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+				    x, y, crtc_w, crtc_h);
+	if (ret)
+		return ret;
+
+	exynos_drm_crtc_commit(crtc);
+
+	return 0;
+}
+
+static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+	/* drm framework doesn't check NULL */
+}
+
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
+	exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+	.dpms		= exynos_drm_crtc_dpms,
+	.prepare	= exynos_drm_crtc_prepare,
+	.commit		= exynos_drm_crtc_commit,
+	.mode_fixup	= exynos_drm_crtc_mode_fixup,
+	.mode_set	= exynos_drm_crtc_mode_set,
+	.mode_set_base	= exynos_drm_crtc_mode_set_base,
+	.load_lut	= exynos_drm_crtc_load_lut,
+	.disable	= exynos_drm_crtc_disable,
+};
+
+static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+				      struct drm_framebuffer *fb,
+				      struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_framebuffer *old_fb = crtc->fb;
+	int ret = -EINVAL;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* when the page flip is requested, crtc's dpms should be on */
+	if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
+		DRM_ERROR("failed page flip request.\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (event) {
+		/*
+		 * the pipe from user always is 0 so we can set pipe number
+		 * of current owner to event.
+		 */
+		event->pipe = exynos_crtc->pipe;
+
+		ret = drm_vblank_get(dev, exynos_crtc->pipe);
+		if (ret) {
+			DRM_DEBUG("failed to acquire vblank counter\n");
+
+			goto out;
+		}
+
+		spin_lock_irq(&dev->event_lock);
+		list_add_tail(&event->base.link,
+				&dev_priv->pageflip_event_list);
+		atomic_set(&exynos_crtc->pending_flip, 1);
+		spin_unlock_irq(&dev->event_lock);
+
+		crtc->fb = fb;
+		ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
+						    NULL);
+		if (ret) {
+			crtc->fb = old_fb;
+
+			spin_lock_irq(&dev->event_lock);
+			drm_vblank_put(dev, exynos_crtc->pipe);
+			list_del(&event->base.link);
+			spin_unlock_irq(&dev->event_lock);
+
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct exynos_drm_private *private = crtc->dev->dev_private;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	private->crtc[exynos_crtc->pipe] = NULL;
+
+	drm_crtc_cleanup(crtc);
+	kfree(exynos_crtc);
+}
+
+static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
+					struct drm_property *property,
+					uint64_t val)
+{
+	struct drm_device *dev = crtc->dev;
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (property == dev_priv->crtc_mode_property) {
+		enum exynos_crtc_mode mode = val;
+
+		if (mode == exynos_crtc->mode)
+			return 0;
+
+		exynos_crtc->mode = mode;
+
+		switch (mode) {
+		case CRTC_MODE_NORMAL:
+			exynos_drm_crtc_commit(crtc);
+			break;
+		case CRTC_MODE_BLANK:
+			exynos_plane_dpms(exynos_crtc->plane,
+					  DRM_MODE_DPMS_OFF);
+			break;
+		default:
+			break;
+		}
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct drm_crtc_funcs exynos_crtc_funcs = {
+	.set_config	= drm_crtc_helper_set_config,
+	.page_flip	= exynos_drm_crtc_page_flip,
+	.destroy	= exynos_drm_crtc_destroy,
+	.set_property	= exynos_drm_crtc_set_property,
+};
+
+static const struct drm_prop_enum_list mode_names[] = {
+	{ CRTC_MODE_NORMAL, "normal" },
+	{ CRTC_MODE_BLANK, "blank" },
+};
+
+static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop = dev_priv->crtc_mode_property;
+	if (!prop) {
+		prop = drm_property_create_enum(dev, 0, "mode", mode_names,
+						ARRAY_SIZE(mode_names));
+		if (!prop)
+			return;
+
+		dev_priv->crtc_mode_property = prop;
+	}
+
+	drm_object_attach_property(&crtc->base, prop, 0);
+}
+
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+{
+	struct exynos_drm_crtc *exynos_crtc;
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_crtc *crtc;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
+	if (!exynos_crtc) {
+		DRM_ERROR("failed to allocate exynos crtc\n");
+		return -ENOMEM;
+	}
+
+	exynos_crtc->pipe = nr;
+	exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+	init_waitqueue_head(&exynos_crtc->pending_flip_queue);
+	atomic_set(&exynos_crtc->pending_flip, 0);
+	exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
+	if (!exynos_crtc->plane) {
+		kfree(exynos_crtc);
+		return -ENOMEM;
+	}
+
+	crtc = &exynos_crtc->drm_crtc;
+
+	private->crtc[nr] = crtc;
+
+	drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
+	drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+
+	exynos_drm_crtc_attach_mode_property(crtc);
+
+	return 0;
+}
+
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct exynos_drm_crtc *exynos_crtc =
+		to_exynos_crtc(private->crtc[crtc]);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+		return -EPERM;
+
+	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+			exynos_drm_enable_vblank);
+
+	return 0;
+}
+
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct exynos_drm_crtc *exynos_crtc =
+		to_exynos_crtc(private->crtc[crtc]);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+			exynos_drm_disable_vblank);
+}
+
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
+{
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct drm_pending_vblank_event *e, *t;
+	struct drm_crtc *drm_crtc = dev_priv->crtc[crtc];
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(drm_crtc);
+	unsigned long flags;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+			base.link) {
+		/* if event's pipe isn't same as crtc then ignore it. */
+		if (crtc != e->pipe)
+			continue;
+
+		list_del(&e->base.link);
+		drm_send_vblank_event(dev, -1, e);
+		drm_vblank_put(dev, crtc);
+		atomic_set(&exynos_crtc->pending_flip, 0);
+		wake_up(&exynos_crtc->pending_flip_queue);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.h
new file mode 100644
index 0000000..3e197e6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -0,0 +1,23 @@
+/* exynos_drm_crtc.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_CRTC_H_
+#define _EXYNOS_DRM_CRTC_H_
+
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
new file mode 100644
index 0000000..ff7f2a8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -0,0 +1,308 @@
+/* exynos_drm_dmabuf.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+
+#include <linux/dma-buf.h>
+
+struct exynos_drm_dmabuf_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+	bool is_mapped;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+					struct device *dev,
+					struct dma_buf_attachment *attach)
+{
+	struct exynos_drm_dmabuf_attachment *exynos_attach;
+
+	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+	if (!exynos_attach)
+		return -ENOMEM;
+
+	exynos_attach->dir = DMA_NONE;
+	attach->priv = exynos_attach;
+
+	return 0;
+}
+
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+					struct dma_buf_attachment *attach)
+{
+	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+	struct sg_table *sgt;
+
+	if (!exynos_attach)
+		return;
+
+	sgt = &exynos_attach->sgt;
+
+	if (exynos_attach->dir != DMA_NONE)
+		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+				exynos_attach->dir);
+
+	sg_free_table(sgt);
+	kfree(exynos_attach);
+	attach->priv = NULL;
+}
+
+static struct sg_table *
+		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
+					enum dma_data_direction dir)
+{
+	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+	struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+	struct drm_device *dev = gem_obj->base.dev;
+	struct exynos_drm_gem_buf *buf;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt = NULL;
+	unsigned int i;
+	int nents, ret;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/* just return current sgt if already requested. */
+	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
+		return &exynos_attach->sgt;
+
+	buf = gem_obj->buffer;
+	if (!buf) {
+		DRM_ERROR("buffer is null.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	sgt = &exynos_attach->sgt;
+
+	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+	if (ret) {
+		DRM_ERROR("failed to alloc sgt.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	rd = buf->sgt->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	if (dir != DMA_NONE) {
+		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+		if (!nents) {
+			DRM_ERROR("failed to map sgl with iommu.\n");
+			sg_free_table(sgt);
+			sgt = ERR_PTR(-EIO);
+			goto err_unlock;
+		}
+	}
+
+	exynos_attach->is_mapped = true;
+	exynos_attach->dir = dir;
+	attach->priv = exynos_attach;
+
+	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
+
+err_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return sgt;
+}
+
+static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+						struct sg_table *sgt,
+						enum dma_data_direction dir)
+{
+	/* Nothing to do. */
+}
+
+static void exynos_dmabuf_release(struct dma_buf *dmabuf)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/*
+	 * exynos_dmabuf_release() call means that file object's
+	 * f_count is 0 and it calls drm_gem_object_handle_unreference()
+	 * to drop the references that these values had been increased
+	 * at drm_prime_handle_to_fd()
+	 */
+	if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
+		exynos_gem_obj->base.export_dma_buf = NULL;
+
+		/*
+		 * drop this gem object refcount to release allocated buffer
+		 * and resources.
+		 */
+		drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
+	}
+}
+
+static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+						unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+						unsigned long page_num,
+						void *addr)
+{
+	/* TODO */
+}
+
+static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+					unsigned long page_num)
+{
+	/* TODO */
+
+	return NULL;
+}
+
+static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+					unsigned long page_num, void *addr)
+{
+	/* TODO */
+}
+
+static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+	struct vm_area_struct *vma)
+{
+	return -ENOTTY;
+}
+
+static struct dma_buf_ops exynos_dmabuf_ops = {
+	.attach			= exynos_gem_attach_dma_buf,
+	.detach			= exynos_gem_detach_dma_buf,
+	.map_dma_buf		= exynos_gem_map_dma_buf,
+	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
+	.kmap			= exynos_gem_dmabuf_kmap,
+	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
+	.kunmap			= exynos_gem_dmabuf_kunmap,
+	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
+	.mmap			= exynos_gem_dmabuf_mmap,
+	.release		= exynos_dmabuf_release,
+};
+
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+				struct drm_gem_object *obj, int flags)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+				exynos_gem_obj->base.size, flags);
+}
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+				struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	struct scatterlist *sgl;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_buf *buffer;
+	int ret;
+
+	DRM_DEBUG_PRIME("%s\n", __FILE__);
+
+	/* is this one of own objects? */
+	if (dma_buf->ops == &exynos_dmabuf_ops) {
+		struct drm_gem_object *obj;
+
+		exynos_gem_obj = dma_buf->priv;
+		obj = &exynos_gem_obj->base;
+
+		/* is it from our device? */
+		if (obj->dev == drm_dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(obj);
+			return obj;
+		}
+	}
+
+	attach = dma_buf_attach(dma_buf, drm_dev->dev);
+	if (IS_ERR(attach))
+		return ERR_PTR(-EINVAL);
+
+	get_dma_buf(dma_buf);
+
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sgt)) {
+		ret = PTR_ERR(sgt);
+		goto err_buf_detach;
+	}
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer) {
+		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+		ret = -ENOMEM;
+		goto err_unmap_attach;
+	}
+
+	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
+	if (!exynos_gem_obj) {
+		ret = -ENOMEM;
+		goto err_free_buffer;
+	}
+
+	sgl = sgt->sgl;
+
+	buffer->size = dma_buf->size;
+	buffer->dma_addr = sg_dma_address(sgl);
+
+	if (sgt->nents == 1) {
+		/* always physically continuous memory if sgt->nents is 1. */
+		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+	} else {
+		/*
+		 * this case could be CONTIG or NONCONTIG type but for now
+		 * sets NONCONTIG.
+		 * TODO. we have to find a way that exporter can notify
+		 * the type of its own buffer to importer.
+		 */
+		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
+	}
+
+	exynos_gem_obj->buffer = buffer;
+	buffer->sgt = sgt;
+	exynos_gem_obj->base.import_attach = attach;
+
+	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+								buffer->size);
+
+	return &exynos_gem_obj->base;
+
+err_free_buffer:
+	kfree(buffer);
+	buffer = NULL;
+err_unmap_attach:
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_buf_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
new file mode 100644
index 0000000..49acfaf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -0,0 +1,25 @@
+/* exynos_drm_dmabuf.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_DMABUF_H_
+#define _EXYNOS_DRM_DMABUF_H_
+
+#ifdef CONFIG_DRM_EXYNOS_DMABUF
+struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
+				struct drm_gem_object *obj, int flags);
+
+struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
+						struct dma_buf *dma_buf);
+#else
+#define exynos_dmabuf_prime_export		NULL
+#define exynos_dmabuf_prime_import		NULL
+#endif
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.c
new file mode 100644
index 0000000..ba6d995
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
+#include "exynos_drm_vidi.h"
+#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+#define DRIVER_NAME	"exynos"
+#define DRIVER_DESC	"Samsung SoC DRM"
+#define DRIVER_DATE	"20110530"
+#define DRIVER_MAJOR	1
+#define DRIVER_MINOR	0
+
+#define VBLANK_OFF_DELAY	50000
+
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
+static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct exynos_drm_private *private;
+	int ret;
+	int nr;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
+	if (!private) {
+		DRM_ERROR("failed to allocate private\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&private->pageflip_event_list);
+	dev->dev_private = (void *)private;
+
+	/*
+	 * create mapping to manage iommu table and set a pointer to iommu
+	 * mapping structure to iommu_mapping of private data.
+	 * also this iommu_mapping can be used to check if iommu is supported
+	 * or not.
+	 */
+	ret = drm_create_iommu_mapping(dev);
+	if (ret < 0) {
+		DRM_ERROR("failed to create iommu mapping.\n");
+		goto err_crtc;
+	}
+
+	drm_mode_config_init(dev);
+
+	/* init kms poll for handling hpd */
+	drm_kms_helper_poll_init(dev);
+
+	exynos_drm_mode_config_init(dev);
+
+	/*
+	 * EXYNOS4 is enough to have two CRTCs and each crtc would be used
+	 * without dependency of hardware.
+	 */
+	for (nr = 0; nr < MAX_CRTC; nr++) {
+		ret = exynos_drm_crtc_create(dev, nr);
+		if (ret)
+			goto err_release_iommu_mapping;
+	}
+
+	for (nr = 0; nr < MAX_PLANE; nr++) {
+		struct drm_plane *plane;
+		unsigned int possible_crtcs = (1 << MAX_CRTC) - 1;
+
+		plane = exynos_plane_init(dev, possible_crtcs, false);
+		if (!plane)
+			goto err_release_iommu_mapping;
+	}
+
+	ret = drm_vblank_init(dev, MAX_CRTC);
+	if (ret)
+		goto err_release_iommu_mapping;
+
+	/*
+	 * probe sub drivers such as display controller and hdmi driver,
+	 * that were registered at probe() of platform driver
+	 * to the sub driver and create encoder and connector for them.
+	 */
+	ret = exynos_drm_device_register(dev);
+	if (ret)
+		goto err_vblank;
+
+	/* setup possible_clones. */
+	exynos_drm_encoder_setup(dev);
+
+	/*
+	 * create and configure fb helper and also exynos specific
+	 * fbdev object.
+	 */
+	ret = exynos_drm_fbdev_init(dev);
+	if (ret) {
+		DRM_ERROR("failed to initialize drm fbdev\n");
+		goto err_drm_device;
+	}
+
+	drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
+	return 0;
+
+err_drm_device:
+	exynos_drm_device_unregister(dev);
+err_vblank:
+	drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+	drm_release_iommu_mapping(dev);
+err_crtc:
+	drm_mode_config_cleanup(dev);
+	kfree(private);
+
+	return ret;
+}
+
+static int exynos_drm_unload(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	exynos_drm_fbdev_fini(dev);
+	exynos_drm_device_unregister(dev);
+	drm_vblank_cleanup(dev);
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+
+	drm_release_iommu_mapping(dev);
+	kfree(dev->dev_private);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+
+	return exynos_drm_subdrv_open(dev, file);
+}
+
+static void exynos_drm_preclose(struct drm_device *dev,
+					struct drm_file *file)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_pending_vblank_event *e, *t;
+	unsigned long flags;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	/* release events of current file */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry_safe(e, t, &private->pageflip_event_list,
+			base.link) {
+		if (e->base.file_priv == file) {
+			list_del(&e->base.link);
+			e->base.destroy(&e->base);
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	exynos_drm_subdrv_close(dev, file);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!file->driver_priv)
+		return;
+
+	kfree(file->driver_priv);
+	file->driver_priv = NULL;
+}
+
+static void exynos_drm_lastclose(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	exynos_drm_fbdev_restore_mode(dev);
+}
+
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+	.fault = exynos_drm_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static struct drm_ioctl_desc exynos_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+			DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
+			exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
+			DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
+			exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
+			exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
+			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
+			exynos_g2d_get_ver_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST,
+			exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
+			exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+			exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+			exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+			exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+			exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.mmap		= exynos_drm_gem_mmap,
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.release	= drm_release,
+};
+
+static struct drm_driver exynos_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_MODESET |
+					DRIVER_GEM | DRIVER_PRIME,
+	.load			= exynos_drm_load,
+	.unload			= exynos_drm_unload,
+	.open			= exynos_drm_open,
+	.preclose		= exynos_drm_preclose,
+	.lastclose		= exynos_drm_lastclose,
+	.postclose		= exynos_drm_postclose,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= exynos_drm_crtc_enable_vblank,
+	.disable_vblank		= exynos_drm_crtc_disable_vblank,
+	.gem_init_object	= exynos_drm_gem_init_object,
+	.gem_free_object	= exynos_drm_gem_free_object,
+	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
+	.dumb_create		= exynos_drm_gem_dumb_create,
+	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset,
+	.dumb_destroy		= exynos_drm_gem_dumb_destroy,
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle,
+	.gem_prime_export	= exynos_dmabuf_prime_export,
+	.gem_prime_import	= exynos_dmabuf_prime_import,
+	.ioctls			= exynos_ioctls,
+	.fops			= &exynos_drm_driver_fops,
+	.name	= DRIVER_NAME,
+	.desc	= DRIVER_DESC,
+	.date	= DRIVER_DATE,
+	.major	= DRIVER_MAJOR,
+	.minor	= DRIVER_MINOR,
+};
+
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
+
+	return drm_platform_init(&exynos_drm_driver, pdev);
+}
+
+static int exynos_drm_platform_remove(struct platform_device *pdev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	drm_platform_exit(&exynos_drm_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver exynos_drm_platform_driver = {
+	.probe		= exynos_drm_platform_probe,
+	.remove		= exynos_drm_platform_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "exynos-drm",
+	},
+};
+
+static int __init exynos_drm_init(void)
+{
+	int ret;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+	ret = platform_driver_register(&fimd_driver);
+	if (ret < 0)
+		goto out_fimd;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+	ret = platform_driver_register(&hdmi_driver);
+	if (ret < 0)
+		goto out_hdmi;
+	ret = platform_driver_register(&mixer_driver);
+	if (ret < 0)
+		goto out_mixer;
+	ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+	if (ret < 0)
+		goto out_common_hdmi;
+
+	ret = exynos_platform_device_hdmi_register();
+	if (ret < 0)
+		goto out_common_hdmi_dev;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	ret = platform_driver_register(&vidi_driver);
+	if (ret < 0)
+		goto out_vidi;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+	ret = platform_driver_register(&g2d_driver);
+	if (ret < 0)
+		goto out_g2d;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	ret = platform_driver_register(&fimc_driver);
+	if (ret < 0)
+		goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	ret = platform_driver_register(&rotator_driver);
+	if (ret < 0)
+		goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	ret = platform_driver_register(&gsc_driver);
+	if (ret < 0)
+		goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	ret = platform_driver_register(&ipp_driver);
+	if (ret < 0)
+		goto out_ipp;
+
+	ret = exynos_platform_device_ipp_register();
+	if (ret < 0)
+		goto out_ipp_dev;
+#endif
+
+	ret = platform_driver_register(&exynos_drm_platform_driver);
+	if (ret < 0)
+		goto out_drm;
+
+	exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+				NULL, 0);
+	if (IS_ERR(exynos_drm_pdev)) {
+		ret = PTR_ERR(exynos_drm_pdev);
+		goto out;
+	}
+
+	return 0;
+
+out:
+	platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	exynos_platform_device_ipp_unregister();
+out_ipp_dev:
+	platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+	platform_driver_unregister(&g2d_driver);
+out_g2d:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	platform_driver_unregister(&vidi_driver);
+out_vidi:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+	exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
+	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+out_common_hdmi:
+	platform_driver_unregister(&mixer_driver);
+out_mixer:
+	platform_driver_unregister(&hdmi_driver);
+out_hdmi:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+	platform_driver_unregister(&fimd_driver);
+out_fimd:
+#endif
+	return ret;
+}
+
+static void __exit exynos_drm_exit(void)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	platform_device_unregister(exynos_drm_pdev);
+
+	platform_driver_unregister(&exynos_drm_platform_driver);
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+	exynos_platform_device_ipp_unregister();
+	platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+	platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+	platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+	platform_driver_unregister(&fimc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+	platform_driver_unregister(&g2d_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_HDMI
+	exynos_platform_device_hdmi_unregister();
+	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+	platform_driver_unregister(&mixer_driver);
+	platform_driver_unregister(&hdmi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+	platform_driver_unregister(&vidi_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD
+	platform_driver_unregister(&fimd_driver);
+#endif
+}
+
+module_init(exynos_drm_init);
+module_exit(exynos_drm_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.h
new file mode 100644
index 0000000..680a7c1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -0,0 +1,352 @@
+/* exynos_drm_drv.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_DRV_H_
+#define _EXYNOS_DRM_DRV_H_
+
+#include <linux/module.h>
+
+#define MAX_CRTC	3
+#define MAX_PLANE	5
+#define MAX_FB_BUFFER	4
+#define DEFAULT_ZPOS	-1
+
+#define _wait_for(COND, MS) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+	}								\
+	ret__;								\
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS)
+
+struct drm_device;
+struct exynos_drm_overlay;
+struct drm_connector;
+
+extern unsigned int drm_vblank_offdelay;
+
+/* this enumerates display type. */
+enum exynos_drm_output_type {
+	EXYNOS_DISPLAY_TYPE_NONE,
+	/* RGB or CPU Interface. */
+	EXYNOS_DISPLAY_TYPE_LCD,
+	/* HDMI Interface. */
+	EXYNOS_DISPLAY_TYPE_HDMI,
+	/* Virtual Display Interface. */
+	EXYNOS_DISPLAY_TYPE_VIDI,
+};
+
+/*
+ * Exynos drm overlay ops structure.
+ *
+ * @mode_set: copy drm overlay info to hw specific overlay info.
+ * @commit: apply hardware specific overlay data to registers.
+ * @enable: enable hardware specific overlay.
+ * @disable: disable hardware specific overlay.
+ */
+struct exynos_drm_overlay_ops {
+	void (*mode_set)(struct device *subdrv_dev,
+			 struct exynos_drm_overlay *overlay);
+	void (*commit)(struct device *subdrv_dev, int zpos);
+	void (*enable)(struct device *subdrv_dev, int zpos);
+	void (*disable)(struct device *subdrv_dev, int zpos);
+};
+
+/*
+ * Exynos drm common overlay structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displayed.
+ *	- the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed.
+ *	- the unit is screen coordinates.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
+ * @src_width: width of a partial image to be displayed from framebuffer.
+ * @src_height: height of a partial image to be displayed from framebuffer.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_width: window width to be displayed (hardware screen).
+ * @crtc_height: window height to be displayed (hardware screen).
+ * @mode_width: width of screen mode.
+ * @mode_height: height of screen mode.
+ * @refresh: refresh rate.
+ * @scan_flag: interlace or progressive way.
+ *	(it could be DRM_MODE_FLAG_*)
+ * @bpp: pixel size.(in bit)
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ *	      allocated for a overlay.
+ * @zpos: order of overlay layer(z position).
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
+ * @index_color: if using color key feature then this value would be used
+ *			as index color.
+ * @local_path: in case of lcd type, local path mode on or off.
+ * @transparency: transparency on or off.
+ * @activated: activated or not.
+ *
+ * this structure is common to exynos SoC and its contents would be copied
+ * to hardware specific overlay info.
+ */
+struct exynos_drm_overlay {
+	unsigned int fb_x;
+	unsigned int fb_y;
+	unsigned int fb_width;
+	unsigned int fb_height;
+	unsigned int src_width;
+	unsigned int src_height;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_width;
+	unsigned int crtc_height;
+	unsigned int mode_width;
+	unsigned int mode_height;
+	unsigned int refresh;
+	unsigned int scan_flag;
+	unsigned int bpp;
+	unsigned int pitch;
+	uint32_t pixel_format;
+	dma_addr_t dma_addr[MAX_FB_BUFFER];
+	int zpos;
+
+	bool default_win;
+	bool color_key;
+	unsigned int index_color;
+	bool local_path;
+	bool transparency;
+	bool activated;
+};
+
+/*
+ * Exynos DRM Display Structure.
+ *	- this structure is common to analog tv, digital tv and lcd panel.
+ *
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @is_connected: check for that display is connected or not.
+ * @get_edid: get edid modes from display driver.
+ * @get_panel: get panel object from display driver.
+ * @check_timing: check if timing is valid or not.
+ * @power_on: display device on or off.
+ */
+struct exynos_drm_display_ops {
+	enum exynos_drm_output_type type;
+	bool (*is_connected)(struct device *dev);
+	struct edid *(*get_edid)(struct device *dev,
+			struct drm_connector *connector);
+	void *(*get_panel)(struct device *dev);
+	int (*check_timing)(struct device *dev, void *timing);
+	int (*power_on)(struct device *dev, int mode);
+};
+
+/*
+ * Exynos drm manager ops
+ *
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
+ * @mode_fixup: fix mode data comparing to hw specific display mode.
+ * @mode_set: convert drm_display_mode to hw specific display mode and
+ *	      would be called by encoder->mode_set().
+ * @get_max_resol: get maximum resolution to specific hardware.
+ * @commit: set current hw specific display mode to hw.
+ * @enable_vblank: specific driver callback for enabling vblank interrupt.
+ * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ *	hardware overlay is updated.
+ */
+struct exynos_drm_manager_ops {
+	void (*dpms)(struct device *subdrv_dev, int mode);
+	void (*apply)(struct device *subdrv_dev);
+	void (*mode_fixup)(struct device *subdrv_dev,
+				struct drm_connector *connector,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
+	void (*mode_set)(struct device *subdrv_dev, void *mode);
+	void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
+				unsigned int *height);
+	void (*commit)(struct device *subdrv_dev);
+	int (*enable_vblank)(struct device *subdrv_dev);
+	void (*disable_vblank)(struct device *subdrv_dev);
+	void (*wait_for_vblank)(struct device *subdrv_dev);
+};
+
+/*
+ * Exynos drm common manager structure.
+ *
+ * @dev: pointer to device object for subdrv device driver.
+ *	sub drivers such as display controller or hdmi driver,
+ *	have their own device object.
+ * @ops: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control hardware global registers.
+ * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control hardware overlay reigsters.
+ * @display: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control display devices such as
+ *	analog tv, digital tv and lcd panel and also get timing data for them.
+ */
+struct exynos_drm_manager {
+	struct device *dev;
+	int pipe;
+	struct exynos_drm_manager_ops *ops;
+	struct exynos_drm_overlay_ops *overlay_ops;
+	struct exynos_drm_display_ops *display_ops;
+};
+
+struct exynos_drm_g2d_private {
+	struct device		*dev;
+	struct list_head	inuse_cmdlist;
+	struct list_head	event_list;
+	struct list_head	userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+	struct device	*dev;
+	struct list_head	event_list;
+};
+
+struct drm_exynos_file_private {
+	struct exynos_drm_g2d_private	*g2d_priv;
+	struct exynos_drm_ipp_private	*ipp_priv;
+};
+
+/*
+ * Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ *	with iommu, device address space starts from this address
+ *	otherwise default one.
+ * @da_space_size: size of device address space.
+ *	if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
+ */
+struct exynos_drm_private {
+	struct drm_fb_helper *fb_helper;
+
+	/* list head for new event to be added. */
+	struct list_head pageflip_event_list;
+
+	/*
+	 * created crtc object would be contained at this array and
+	 * this array is used to be aware of which crtc did it request vblank.
+	 */
+	struct drm_crtc *crtc[MAX_CRTC];
+	struct drm_property *plane_zpos_property;
+	struct drm_property *crtc_mode_property;
+
+	unsigned long da_start;
+	unsigned long da_space_size;
+	unsigned long da_space_order;
+};
+
+/*
+ * Exynos drm sub driver structure.
+ *
+ * @list: sub driver has its own list object to register to exynos drm driver.
+ * @dev: pointer to device object for subdrv device driver.
+ * @drm_dev: pointer to drm_device and this pointer would be set
+ *	when sub driver calls exynos_drm_subdrv_register().
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ *	and we can access a hardware drawing on this manager.
+ * @probe: this callback would be called by exynos drm driver after
+ *	subdrv is registered to it.
+ * @remove: this callback is used to release resources created
+ *	by probe callback.
+ * @open: this would be called with drm device file open.
+ * @close: this would be called with drm device file close.
+ * @encoder: encoder object owned by this sub driver.
+ * @connector: connector object owned by this sub driver.
+ */
+struct exynos_drm_subdrv {
+	struct list_head list;
+	struct device *dev;
+	struct drm_device *drm_dev;
+	struct exynos_drm_manager *manager;
+
+	int (*probe)(struct drm_device *drm_dev, struct device *dev);
+	void (*remove)(struct drm_device *drm_dev, struct device *dev);
+	int (*open)(struct drm_device *drm_dev, struct device *dev,
+			struct drm_file *file);
+	void (*close)(struct drm_device *drm_dev, struct device *dev,
+			struct drm_file *file);
+
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+};
+
+/*
+ * this function calls a probe callback registered to sub driver list and
+ * create its own encoder and connector and then set drm_device object
+ * to global one.
+ */
+int exynos_drm_device_register(struct drm_device *dev);
+/*
+ * this function calls a remove callback registered to sub driver list and
+ * destroy its own encoder and connetor.
+ */
+int exynos_drm_device_unregister(struct drm_device *dev);
+
+/*
+ * this function would be called by sub drivers such as display controller
+ * or hdmi driver to register this sub driver object to exynos drm driver
+ * and when a sub driver is registered to exynos drm driver a probe callback
+ * of the sub driver is called and creates its own encoder and connector.
+ */
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
+
+/* this function removes subdrv list from exynos drm driver */
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+
+int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
+void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
+
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
+/*
+ * this function registers exynos drm ipp platform device.
+ */
+int exynos_platform_device_ipp_register(void);
+
+/*
+ * this function unregisters exynos drm ipp platform device if it exists.
+ */
+void exynos_platform_device_ipp_unregister(void);
+
+extern struct platform_driver fimd_driver;
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+extern struct platform_driver exynos_drm_common_hdmi_driver;
+extern struct platform_driver vidi_driver;
+extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.c
new file mode 100644
index 0000000..c63721f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -0,0 +1,520 @@
+/* exynos_drm_encoder.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
+
+#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\
+				drm_encoder)
+
+/*
+ * exynos specific encoder structure.
+ *
+ * @drm_encoder: encoder object.
+ * @manager: specific encoder has its own manager to control a hardware
+ *	appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
+ * @updated: indicate whether overlay data updating is needed or not.
+ */
+struct exynos_drm_encoder {
+	struct drm_crtc			*old_crtc;
+	struct drm_encoder		drm_encoder;
+	struct exynos_drm_manager	*manager;
+	int				dpms;
+	bool				updated;
+};
+
+static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (exynos_drm_best_encoder(connector) == encoder) {
+			DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+					connector->base.id, mode);
+
+			exynos_drm_display_power(connector, mode);
+		}
+	}
+}
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+
+	DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+
+	if (exynos_encoder->dpms == mode) {
+		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+		return;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (manager_ops && manager_ops->apply)
+			if (!exynos_encoder->updated)
+				manager_ops->apply(manager->dev);
+
+		exynos_drm_connector_power(encoder, mode);
+		exynos_encoder->dpms = mode;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		exynos_drm_connector_power(encoder, mode);
+		exynos_encoder->dpms = mode;
+		exynos_encoder->updated = false;
+		break;
+	default:
+		DRM_ERROR("unspecified mode %d\n", mode);
+		break;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static bool
+exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder)
+			if (manager_ops && manager_ops->mode_fixup)
+				manager_ops->mode_fixup(manager->dev, connector,
+							mode, adjusted_mode);
+	}
+
+	return true;
+}
+
+static void disable_plane_to_crtc(struct drm_device *dev,
+						struct drm_crtc *old_crtc,
+						struct drm_crtc *new_crtc)
+{
+	struct drm_plane *plane;
+
+	/*
+	 * if old_crtc isn't same as encoder->crtc then it means that
+	 * user changed crtc id to another one so the plane to old_crtc
+	 * should be disabled and plane->crtc should be set to new_crtc
+	 * (encoder->crtc)
+	 */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		if (plane->crtc == old_crtc) {
+			/*
+			 * do not change below call order.
+			 *
+			 * plane->funcs->disable_plane call checks
+			 * if encoder->crtc is same as plane->crtc and if same
+			 * then overlay_ops->disable callback will be called
+			 * to diasble current hw overlay so plane->crtc should
+			 * have new_crtc because new_crtc was set to
+			 * encoder->crtc in advance.
+			 */
+			plane->crtc = new_crtc;
+			plane->funcs->disable_plane(plane);
+		}
+	}
+}
+
+static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
+					 struct drm_display_mode *mode,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+	struct exynos_drm_manager *manager;
+	struct exynos_drm_manager_ops *manager_ops;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct exynos_drm_encoder *exynos_encoder;
+
+			exynos_encoder = to_exynos_encoder(encoder);
+
+			if (exynos_encoder->old_crtc != encoder->crtc &&
+					exynos_encoder->old_crtc) {
+
+				/*
+				 * disable a plane to old crtc and change
+				 * crtc of the plane to new one.
+				 */
+				disable_plane_to_crtc(dev,
+						exynos_encoder->old_crtc,
+						encoder->crtc);
+			}
+
+			manager = exynos_drm_get_manager(encoder);
+			manager_ops = manager->ops;
+
+			if (manager_ops && manager_ops->mode_set)
+				manager_ops->mode_set(manager->dev,
+							adjusted_mode);
+
+			exynos_encoder->old_crtc = encoder->crtc;
+		}
+	}
+}
+
+static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
+{
+	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+	struct exynos_drm_manager *manager = exynos_encoder->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (manager_ops && manager_ops->commit)
+		manager_ops->commit(manager->dev);
+
+	/*
+	 * this will avoid one issue that overlay data is updated to
+	 * real hardware two times.
+	 * And this variable will be used to check if the data was
+	 * already updated or not by exynos_drm_encoder_dpms function.
+	 */
+	exynos_encoder->updated = true;
+
+	/*
+	 * In case of setcrtc, there is no way to update encoder's dpms
+	 * so update it here.
+	 */
+	exynos_encoder->dpms = DRM_MODE_DPMS_ON;
+}
+
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_encoder *exynos_encoder;
+	struct exynos_drm_manager_ops *ops;
+	struct drm_device *dev = fb->dev;
+	struct drm_encoder *encoder;
+
+	/*
+	 * make sure that overlay data are updated to real hardware
+	 * for all encoders.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		exynos_encoder = to_exynos_encoder(encoder);
+		ops = exynos_encoder->manager->ops;
+
+		/*
+		 * wait for vblank interrupt
+		 * - this makes sure that overlay data are updated to
+		 *	real hardware.
+		 */
+		if (ops->wait_for_vblank)
+			ops->wait_for_vblank(exynos_encoder->manager->dev);
+	}
+}
+
+
+static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_plane *plane;
+	struct drm_device *dev = encoder->dev;
+
+	exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	/* all planes connected to this encoder should be also disabled. */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		if (plane->crtc == encoder->crtc)
+			plane->funcs->disable_plane(plane);
+	}
+}
+
+static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
+	.dpms		= exynos_drm_encoder_dpms,
+	.mode_fixup	= exynos_drm_encoder_mode_fixup,
+	.mode_set	= exynos_drm_encoder_mode_set,
+	.prepare	= exynos_drm_encoder_prepare,
+	.commit		= exynos_drm_encoder_commit,
+	.disable	= exynos_drm_encoder_disable,
+};
+
+static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct exynos_drm_encoder *exynos_encoder =
+		to_exynos_encoder(encoder);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_encoder->manager->pipe = -1;
+
+	drm_encoder_cleanup(encoder);
+	kfree(exynos_encoder);
+}
+
+static struct drm_encoder_funcs exynos_encoder_funcs = {
+	.destroy = exynos_drm_encoder_destroy,
+};
+
+static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_encoder *clone;
+	struct drm_device *dev = encoder->dev;
+	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+	struct exynos_drm_display_ops *display_ops =
+				exynos_encoder->manager->display_ops;
+	unsigned int clone_mask = 0;
+	int cnt = 0;
+
+	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
+		switch (display_ops->type) {
+		case EXYNOS_DISPLAY_TYPE_LCD:
+		case EXYNOS_DISPLAY_TYPE_HDMI:
+		case EXYNOS_DISPLAY_TYPE_VIDI:
+			clone_mask |= (1 << (cnt++));
+			break;
+		default:
+			continue;
+		}
+	}
+
+	return clone_mask;
+}
+
+void exynos_drm_encoder_setup(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		encoder->possible_clones = exynos_drm_encoder_clones(encoder);
+}
+
+struct drm_encoder *
+exynos_drm_encoder_create(struct drm_device *dev,
+			   struct exynos_drm_manager *manager,
+			   unsigned int possible_crtcs)
+{
+	struct drm_encoder *encoder;
+	struct exynos_drm_encoder *exynos_encoder;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!manager || !possible_crtcs)
+		return NULL;
+
+	if (!manager->dev)
+		return NULL;
+
+	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
+	if (!exynos_encoder) {
+		DRM_ERROR("failed to allocate encoder\n");
+		return NULL;
+	}
+
+	exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
+	exynos_encoder->manager = manager;
+	encoder = &exynos_encoder->drm_encoder;
+	encoder->possible_crtcs = possible_crtcs;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
+			DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
+
+	DRM_DEBUG_KMS("encoder has been created\n");
+
+	return encoder;
+}
+
+struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder)
+{
+	return to_exynos_encoder(encoder)->manager;
+}
+
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+			    void (*fn)(struct drm_encoder *, void *))
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+	struct exynos_drm_private *private = dev->dev_private;
+	struct exynos_drm_manager *manager;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		/*
+		 * if crtc is detached from encoder, check pipe,
+		 * otherwise check crtc attached to encoder
+		 */
+		if (!encoder->crtc) {
+			manager = to_exynos_encoder(encoder)->manager;
+			if (manager->pipe < 0 ||
+					private->crtc[manager->pipe] != crtc)
+				continue;
+		} else {
+			if (encoder->crtc != crtc)
+				continue;
+		}
+
+		fn(encoder, data);
+	}
+}
+
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	int crtc = *(int *)data;
+
+	if (manager->pipe != crtc)
+		return;
+
+	if (manager_ops->enable_vblank)
+		manager_ops->enable_vblank(manager->dev);
+}
+
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	int crtc = *(int *)data;
+
+	if (manager->pipe != crtc)
+		return;
+
+	if (manager_ops->disable_vblank)
+		manager_ops->disable_vblank(manager->dev);
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+	struct exynos_drm_manager *manager = exynos_encoder->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	int mode = *(int *)data;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (manager_ops && manager_ops->dpms)
+		manager_ops->dpms(manager->dev, mode);
+
+	/*
+	 * if this condition is ok then it means that the crtc is already
+	 * detached from encoder and last function for detaching is properly
+	 * done, so clear pipe from manager to prevent repeated call.
+	 */
+	if (mode > DRM_MODE_DPMS_ON) {
+		if (!encoder->crtc)
+			manager->pipe = -1;
+	}
+}
+
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	int pipe = *(int *)data;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * when crtc is detached from encoder, this pipe is used
+	 * to select manager operation
+	 */
+	manager->pipe = pipe;
+}
+
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	struct exynos_drm_overlay *overlay = data;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (overlay_ops && overlay_ops->mode_set)
+		overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	int zpos = DEFAULT_ZPOS;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (data)
+		zpos = *(int *)data;
+
+	if (overlay_ops && overlay_ops->commit)
+		overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	int zpos = DEFAULT_ZPOS;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (data)
+		zpos = *(int *)data;
+
+	if (overlay_ops && overlay_ops->enable)
+		overlay_ops->enable(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	int zpos = DEFAULT_ZPOS;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (data)
+		zpos = *(int *)data;
+
+	if (overlay_ops && overlay_ops->disable)
+		overlay_ops->disable(manager->dev, zpos);
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.h
new file mode 100644
index 0000000..89e2fb0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_ENCODER_H_
+#define _EXYNOS_DRM_ENCODER_H_
+
+struct exynos_drm_manager;
+
+void exynos_drm_encoder_setup(struct drm_device *dev);
+struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
+					       struct exynos_drm_manager *mgr,
+					       unsigned int possible_crtcs);
+struct exynos_drm_manager *
+exynos_drm_get_manager(struct drm_encoder *encoder);
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+			    void (*fn)(struct drm_encoder *, void *));
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.c
new file mode 100644
index 0000000..0e04f4e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -0,0 +1,338 @@
+/* exynos_drm_fb.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
+
+#define to_exynos_fb(x)	container_of(x, struct exynos_drm_fb, fb)
+
+/*
+ * exynos specific framebuffer structure.
+ *
+ * @fb: drm framebuffer obejct.
+ * @buf_cnt: a buffer count to drm framebuffer.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
+ */
+struct exynos_drm_fb {
+	struct drm_framebuffer		fb;
+	unsigned int			buf_cnt;
+	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
+};
+
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+				struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+	unsigned int flags;
+
+	/*
+	 * if exynos drm driver supports iommu then framebuffer can use
+	 * all the buffer types.
+	 */
+	if (is_drm_iommu_supported(drm_dev))
+		return 0;
+
+	flags = exynos_gem_obj->flags;
+
+	/*
+	 * without iommu support, not support physically non-continuous memory
+	 * for framebuffer.
+	 */
+	if (IS_NONCONTIG_BUFFER(flags)) {
+		DRM_ERROR("cannot use this gem memory type for fb.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+	unsigned int i;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* make sure that overlay data are updated before relesing fb. */
+	exynos_drm_encoder_complete_scanout(fb);
+
+	drm_framebuffer_cleanup(fb);
+
+	for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+		struct drm_gem_object *obj;
+
+		if (exynos_fb->exynos_gem_obj[i] == NULL)
+			continue;
+
+		obj = &exynos_fb->exynos_gem_obj[i]->base;
+		drm_gem_object_unreference_unlocked(obj);
+	}
+
+	kfree(exynos_fb);
+	exynos_fb = NULL;
+}
+
+static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
+					struct drm_file *file_priv,
+					unsigned int *handle)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* This fb should have only one gem object. */
+	if (WARN_ON(exynos_fb->buf_cnt != 1))
+		return -EINVAL;
+
+	return drm_gem_handle_create(file_priv,
+			&exynos_fb->exynos_gem_obj[0]->base, handle);
+}
+
+static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
+				struct drm_file *file_priv, unsigned flags,
+				unsigned color, struct drm_clip_rect *clips,
+				unsigned num_clips)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO */
+
+	return 0;
+}
+
+static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+	.destroy	= exynos_drm_fb_destroy,
+	.create_handle	= exynos_drm_fb_create_handle,
+	.dirty		= exynos_drm_fb_dirty,
+};
+
+void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
+						unsigned int cnt)
+{
+	struct exynos_drm_fb *exynos_fb;
+
+	exynos_fb = to_exynos_fb(fb);
+
+	exynos_fb->buf_cnt = cnt;
+}
+
+unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_fb *exynos_fb;
+
+	exynos_fb = to_exynos_fb(fb);
+
+	return exynos_fb->buf_cnt;
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj)
+{
+	struct exynos_drm_fb *exynos_fb;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int ret;
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+	if (ret < 0) {
+		DRM_ERROR("cannot use this gem memory type for fb.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+	if (!exynos_fb) {
+		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+	exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
+	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+	if (ret) {
+		DRM_ERROR("failed to initialize framebuffer\n");
+		return ERR_PTR(ret);
+	}
+
+	return &exynos_fb->fb;
+}
+
+static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	unsigned int cnt = 0;
+
+	if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
+		return drm_format_num_planes(mode_cmd->pixel_format);
+
+	while (cnt != MAX_FB_BUFFER) {
+		if (!mode_cmd->handles[cnt])
+			break;
+		cnt++;
+	}
+
+	/*
+	 * check if NV12 or NV12M.
+	 *
+	 * NV12
+	 * handles[0] = base1, offsets[0] = 0
+	 * handles[1] = base1, offsets[1] = Y_size
+	 *
+	 * NV12M
+	 * handles[0] = base1, offsets[0] = 0
+	 * handles[1] = base2, offsets[1] = 0
+	 */
+	if (cnt == 2) {
+		/*
+		 * in case of NV12 format, offsets[1] is not 0 and
+		 * handles[0] is same as handles[1].
+		 */
+		if (mode_cmd->offsets[1] &&
+			mode_cmd->handles[0] == mode_cmd->handles[1])
+			cnt = 1;
+	}
+
+	return cnt;
+}
+
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+		      struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_fb *exynos_fb;
+	int i, ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+	if (!exynos_fb) {
+		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object\n");
+		ret = -ENOENT;
+		goto err_free;
+	}
+
+	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
+	exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
+
+	DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
+
+	for (i = 1; i < exynos_fb->buf_cnt; i++) {
+		obj = drm_gem_object_lookup(dev, file_priv,
+				mode_cmd->handles[i]);
+		if (!obj) {
+			DRM_ERROR("failed to lookup gem object\n");
+			ret = -ENOENT;
+			exynos_fb->buf_cnt = i;
+			goto err_unreference;
+		}
+
+		exynos_gem_obj = to_exynos_gem_obj(obj);
+		exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
+
+		ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+		if (ret < 0) {
+			DRM_ERROR("cannot use this gem memory type for fb.\n");
+			goto err_unreference;
+		}
+	}
+
+	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+	if (ret) {
+		DRM_ERROR("failed to init framebuffer.\n");
+		goto err_unreference;
+	}
+
+	return &exynos_fb->fb;
+
+err_unreference:
+	for (i = 0; i < exynos_fb->buf_cnt; i++) {
+		struct drm_gem_object *obj;
+
+		obj = &exynos_fb->exynos_gem_obj[i]->base;
+		if (obj)
+			drm_gem_object_unreference_unlocked(obj);
+	}
+err_free:
+	kfree(exynos_fb);
+	return ERR_PTR(ret);
+}
+
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+						int index)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+	struct exynos_drm_gem_buf *buffer;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (index >= MAX_FB_BUFFER)
+		return NULL;
+
+	buffer = exynos_fb->exynos_gem_obj[index]->buffer;
+	if (!buffer)
+		return NULL;
+
+	DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
+
+	return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_fb_helper *fb_helper = private->fb_helper;
+
+	if (fb_helper)
+		drm_fb_helper_hotplug_event(fb_helper);
+}
+
+static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+	.fb_create = exynos_user_fb_create,
+	.output_poll_changed = exynos_drm_output_poll_changed,
+};
+
+void exynos_drm_mode_config_init(struct drm_device *dev)
+{
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * set max width and height as default value(4096x4096).
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	dev->mode_config.max_width = 4096;
+	dev->mode_config.max_height = 4096;
+
+	dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.h
new file mode 100644
index 0000000..517471b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FB_H_
+#define _EXYNOS_DRM_FB_H
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+			    struct drm_mode_fb_cmd2 *mode_cmd,
+			    struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+						 int index);
+
+void exynos_drm_mode_config_init(struct drm_device *dev);
+
+/* set a buffer count to drm framebuffer. */
+void exynos_drm_fb_set_buf_cnt(struct drm_framebuffer *fb,
+						unsigned int cnt);
+
+/* get a buffer count to drm framebuffer. */
+unsigned int exynos_drm_fb_get_buf_cnt(struct drm_framebuffer *fb);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
new file mode 100644
index 0000000..8f007aa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -0,0 +1,357 @@
+/* exynos_drm_fbdev.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+
+#define MAX_CONNECTOR		4
+#define PREFERRED_BPP		32
+
+#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
+				drm_fb_helper)
+
+struct exynos_drm_fbdev {
+	struct drm_fb_helper		drm_fb_helper;
+	struct exynos_drm_gem_obj	*exynos_gem_obj;
+};
+
+static int exynos_drm_fb_mmap(struct fb_info *info,
+			struct vm_area_struct *vma)
+{
+	struct drm_fb_helper *helper = info->par;
+	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+	unsigned long vm_size;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+	vm_size = vma->vm_end - vma->vm_start;
+
+	if (vm_size > buffer->size)
+		return -EINVAL;
+
+	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct fb_ops exynos_drm_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_mmap        = exynos_drm_fb_mmap,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= drm_fb_helper_set_par,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+};
+
+static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+				     struct drm_framebuffer *fb)
+{
+	struct fb_info *fbi = helper->fbdev;
+	struct drm_device *dev = helper->dev;
+	struct exynos_drm_gem_buf *buffer;
+	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+	unsigned long offset;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+	/* RGB formats use only one buffer */
+	buffer = exynos_drm_fb_buffer(fb, 0);
+	if (!buffer) {
+		DRM_LOG_KMS("buffer is null.\n");
+		return -EFAULT;
+	}
+
+	/* map pages with kernel virtual space. */
+	if (!buffer->kvaddr) {
+		if (is_drm_iommu_supported(dev)) {
+			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+
+			buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+					pgprot_writecombine(PAGE_KERNEL));
+		} else {
+			phys_addr_t dma_addr = buffer->dma_addr;
+			if (dma_addr)
+				buffer->kvaddr = phys_to_virt(dma_addr);
+			else
+				buffer->kvaddr = (void __iomem *)NULL;
+		}
+		if (!buffer->kvaddr) {
+			DRM_ERROR("failed to map pages to kernel space.\n");
+			return -EIO;
+		}
+	}
+
+	/* buffer count to framebuffer always is 1 at booting time. */
+	exynos_drm_fb_set_buf_cnt(fb, 1);
+
+	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+	offset += fbi->var.yoffset * fb->pitches[0];
+
+	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+	fbi->screen_base = buffer->kvaddr + offset;
+	if (is_drm_iommu_supported(dev))
+		fbi->fix.smem_start = (unsigned long)
+			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
+	else
+		fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
+
+	fbi->screen_size = size;
+	fbi->fix.smem_len = size;
+
+	return 0;
+}
+
+static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+				    struct drm_fb_helper_surface_size *sizes)
+{
+	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_device *dev = helper->dev;
+	struct fb_info *fbi;
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+	struct platform_device *pdev = dev->platformdev;
+	unsigned long size;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+			sizes->surface_width, sizes->surface_height,
+			sizes->surface_bpp);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	mutex_lock(&dev->struct_mutex);
+
+	fbi = framebuffer_alloc(0, &pdev->dev);
+	if (!fbi) {
+		DRM_ERROR("failed to allocate fb info.\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	/* 0 means to allocate physically continuous memory */
+	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
+	if (IS_ERR(exynos_gem_obj)) {
+		ret = PTR_ERR(exynos_gem_obj);
+		goto err_release_framebuffer;
+	}
+
+	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+			&exynos_gem_obj->base);
+	if (IS_ERR(helper->fb)) {
+		DRM_ERROR("failed to create drm framebuffer.\n");
+		ret = PTR_ERR(helper->fb);
+		goto err_destroy_gem;
+	}
+
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &exynos_drm_fb_ops;
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("failed to allocate cmap.\n");
+		goto err_destroy_framebuffer;
+	}
+
+	ret = exynos_drm_fbdev_update(helper, helper->fb);
+	if (ret < 0)
+		goto err_dealloc_cmap;
+
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+err_dealloc_cmap:
+	fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+	drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+	exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+	framebuffer_release(fbi);
+
+/*
+ * if failed, all resources allocated above would be released by
+ * drm_mode_config_cleanup() when drm_load() had been called prior
+ * to any specific driver such as fimd or hdmi driver.
+ */
+out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+	.fb_probe =	exynos_drm_fbdev_create,
+};
+
+int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+	struct exynos_drm_fbdev *fbdev;
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_fb_helper *helper;
+	unsigned int num_crtc;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+		return 0;
+
+	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev) {
+		DRM_ERROR("failed to allocate drm fbdev.\n");
+		return -ENOMEM;
+	}
+
+	private->fb_helper = helper = &fbdev->drm_fb_helper;
+	helper->funcs = &exynos_drm_fb_helper_funcs;
+
+	num_crtc = dev->mode_config.num_crtc;
+
+	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize drm fb helper.\n");
+		goto err_init;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(helper);
+	if (ret < 0) {
+		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+		goto err_setup;
+
+	}
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+	if (ret < 0) {
+		DRM_ERROR("failed to set up hw configuration.\n");
+		goto err_setup;
+	}
+
+	return 0;
+
+err_setup:
+	drm_fb_helper_fini(helper);
+
+err_init:
+	private->fb_helper = NULL;
+	kfree(fbdev);
+
+	return ret;
+}
+
+static void exynos_drm_fbdev_destroy(struct drm_device *dev,
+				      struct drm_fb_helper *fb_helper)
+{
+	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+	struct drm_framebuffer *fb;
+
+	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
+		vunmap(exynos_gem_obj->buffer->kvaddr);
+
+	/* release drm framebuffer and real buffer */
+	if (fb_helper->fb && fb_helper->fb->funcs) {
+		fb = fb_helper->fb;
+		if (fb) {
+			drm_framebuffer_unregister_private(fb);
+			drm_framebuffer_remove(fb);
+		}
+	}
+
+	/* release linux framebuffer */
+	if (fb_helper->fbdev) {
+		struct fb_info *info;
+		int ret;
+
+		info = fb_helper->fbdev;
+		ret = unregister_framebuffer(info);
+		if (ret < 0)
+			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	drm_fb_helper_fini(fb_helper);
+}
+
+void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct exynos_drm_fbdev *fbdev;
+
+	if (!private || !private->fb_helper)
+		return;
+
+	fbdev = to_exynos_fbdev(private->fb_helper);
+
+	if (fbdev->exynos_gem_obj)
+		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
+	exynos_drm_fbdev_destroy(dev, private->fb_helper);
+	kfree(fbdev);
+	private->fb_helper = NULL;
+}
+
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+
+	if (!private || !private->fb_helper)
+		return;
+
+	drm_modeset_lock_all(dev);
+	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+	drm_modeset_unlock_all(dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
new file mode 100644
index 0000000..e16d7f0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FBDEV_H_
+#define _EXYNOS_DRM_FBDEV_H_
+
+int exynos_drm_fbdev_init(struct drm_device *dev);
+int exynos_drm_fbdev_reinit(struct drm_device *dev);
+void exynos_drm_fbdev_fini(struct drm_device *dev);
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 0000000..4a1616a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1982 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC stands for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS	4
+#define FIMC_MAX_SRC	2
+#define FIMC_MAX_DST	32
+#define FIMC_SHFACTOR	10
+#define FIMC_BUF_STOP	1
+#define FIMC_BUF_START	2
+#define FIMC_REG_SZ		32
+#define FIMC_WIDTH_ITU_709	1280
+#define FIMC_REFRESH_MAX	60
+#define FIMC_REFRESH_MIN	12
+#define FIMC_CROP_MAX	8192
+#define FIMC_CROP_MIN	32
+#define FIMC_SCALE_MAX	4224
+#define FIMC_SCALE_MIN	32
+
+#define get_fimc_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct fimc_context, ippdrv);
+#define fimc_read(offset)		readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+	FIMC_WB_NONE,
+	FIMC_WB_A,
+	FIMC_WB_B,
+};
+
+enum {
+	FIMC_CLK_LCLK,
+	FIMC_CLK_GATE,
+	FIMC_CLK_WB_A,
+	FIMC_CLK_WB_B,
+	FIMC_CLK_MUX,
+	FIMC_CLK_PARENT,
+	FIMC_CLKS_MAX
+};
+
+static const char * const fimc_clock_names[] = {
+	[FIMC_CLK_LCLK]   = "sclk_fimc",
+	[FIMC_CLK_GATE]   = "fimc",
+	[FIMC_CLK_WB_A]   = "pxl_async0",
+	[FIMC_CLK_WB_B]   = "pxl_async1",
+	[FIMC_CLK_MUX]    = "mux",
+	[FIMC_CLK_PARENT] = "parent",
+};
+
+#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+	bool	range;
+	bool bypass;
+	bool up_h;
+	bool up_v;
+	u32 hratio;
+	u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+	/* scaler */
+	u32	in_hori;
+	u32	bypass;
+	/* output rotator */
+	u32	dst_h_wo_rot;
+	u32	dst_h_rot;
+	/* input rotator */
+	u32	rl_w_wo_rot;
+	u32	rl_h_rot;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @clocks: fimc clocks.
+ * @clk_frequency: LCLK clock frequency.
+ * @sysreg: handle to SYSREG block regmap.
+ * @sc: scaler infomations.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct mutex	lock;
+	struct clk	*clocks[FIMC_CLKS_MAX];
+	u32		clk_frequency;
+	struct regmap	*sysreg;
+	struct fimc_scaler	sc;
+	struct exynos_drm_ipp_pol	pol;
+	int	id;
+	int	irq;
+	bool	suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* stop dma operation */
+	cfg = fimc_read(EXYNOS_CISTATUS);
+	if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
+		cfg = fimc_read(EXYNOS_MSCTRL);
+		cfg &= ~EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg, EXYNOS_MSCTRL);
+	}
+
+	cfg = fimc_read(EXYNOS_CISRCFMT);
+	cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+	fimc_write(cfg, EXYNOS_CISRCFMT);
+
+	/* disable image capture */
+	cfg = fimc_read(EXYNOS_CIIMGCPT);
+	cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+	fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+	/* s/w reset */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= (EXYNOS_CIGCTRL_SWRST);
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	/* s/w reset complete */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~EXYNOS_CIGCTRL_SWRST;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	/* reset sequence */
+	fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK,
+				  SYSREG_FIMD0WB_DEST_MASK,
+				  ctx->id << SYSREG_FIMD0WB_DEST_SHIFT);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+		EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+		EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+		EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+		EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+		EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+	switch (wb) {
+	case FIMC_WB_A:
+		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+		break;
+	case FIMC_WB_B:
+		cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+			EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+		break;
+	case FIMC_WB_NONE:
+	default:
+		cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+			EXYNOS_CIGCTRL_SELWRITEBACK_A |
+			EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+			EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+		break;
+	}
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+		struct exynos_drm_ipp_pol *pol)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+		__func__, pol->inv_pclk, pol->inv_vsync);
+	DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+		__func__, pol->inv_href, pol->inv_hsync);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+		 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+	if (pol->inv_pclk)
+		cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+	if (pol->inv_vsync)
+		cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+	if (pol->inv_href)
+		cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+	if (pol->inv_hsync)
+		cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	if (enable)
+		cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+	else
+		cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+		bool overflow, bool level)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+			enable, overflow, level);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	if (enable) {
+		cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+		cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+		if (overflow)
+			cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+		if (level)
+			cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+	} else
+		cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg, status, flag;
+
+	status = fimc_read(EXYNOS_CISTATUS);
+	flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+		EXYNOS_CISTATUS_OVFICR;
+
+	DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+	if (status & flag) {
+		cfg = fimc_read(EXYNOS_CIWDOFST);
+		cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+			EXYNOS_CIWDOFST_CLROVFICR);
+
+		fimc_write(cfg, EXYNOS_CIWDOFST);
+
+		cfg = fimc_read(EXYNOS_CIWDOFST);
+		cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+			EXYNOS_CIWDOFST_CLROVFICR);
+
+		fimc_write(cfg, EXYNOS_CIWDOFST);
+
+		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+			ctx->id, status);
+		return true;
+	}
+
+	return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+	u32 cfg;
+
+	cfg = fimc_read(EXYNOS_CISTATUS);
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+	if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+		return false;
+
+	cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+	fimc_write(cfg, EXYNOS_CISTATUS);
+
+	return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+	u32 cfg;
+	int frame_cnt, buf_id;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cfg = fimc_read(EXYNOS_CISTATUS2);
+	frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+	if (frame_cnt == 0)
+		frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+	DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+		EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+		EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+	if (frame_cnt == 0) {
+		DRM_ERROR("failed to get frame count.\n");
+		return -EIO;
+	}
+
+	buf_id = frame_cnt - 1;
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+	return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	cfg = fimc_read(EXYNOS_CIOCTRL);
+	if (enable)
+		cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+	else
+		cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+	fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	/* RGB */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	default:
+		/* bypass */
+		break;
+	}
+
+	/* YUV */
+	cfg = fimc_read(EXYNOS_MSCTRL);
+	cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+		EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+		EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+	switch (fmt) {
+	case DRM_FORMAT_YUYV:
+		cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+		break;
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_YUV444:
+		cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+			EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV12MT:
+	case DRM_FORMAT_NV16:
+		cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+			EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_MSCTRL);
+
+	return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = fimc_read(EXYNOS_MSCTRL);
+	cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+		break;
+	case DRM_FORMAT_YUV444:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+		break;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+		break;
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+		break;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV12MT:
+		cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_MSCTRL);
+
+	cfg = fimc_read(EXYNOS_CIDMAPARAM);
+	cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+	if (fmt == DRM_FORMAT_NV12MT)
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+	fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+	return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg1, cfg2;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg1 = fimc_read(EXYNOS_MSCTRL);
+	cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+		EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+	cfg2 = fimc_read(EXYNOS_CITRGFMT);
+	cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+			EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+		cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg1, EXYNOS_MSCTRL);
+	fimc_write(cfg2, EXYNOS_CITRGFMT);
+	*swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+	return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	u32 cfg, h1, h2, v1, v2;
+
+	/* cropped image */
+	h1 = pos->x;
+	h2 = sz->hsize - pos->w - pos->x;
+	v1 = pos->y;
+	v2 = sz->vsize - pos->h - pos->y;
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+	__func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+	DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+		h1, h2, v1, v2);
+
+	/*
+	 * set window offset 1, 2 size
+	 * check figure 43-21 in user manual
+	 */
+	cfg = fimc_read(EXYNOS_CIWDOFST);
+	cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+		EXYNOS_CIWDOFST_WINVEROFST_MASK);
+	cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+		EXYNOS_CIWDOFST_WINVEROFST(v1));
+	cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+	fimc_write(cfg, EXYNOS_CIWDOFST);
+
+	cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+		EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+	fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+	return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct drm_exynos_sz img_sz = *sz;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+		__func__, swap, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+		EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+	fimc_write(cfg, EXYNOS_ORGISIZE);
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+		pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+		img_sz.hsize = sz->vsize;
+		img_sz.vsize = sz->hsize;
+	}
+
+	/* set input DMA image size */
+	cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+	cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+		EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+	cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+		EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+	fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+	/*
+	 * set input FIFO image size
+	 * for now, we support only ITU601 8 bit mode
+	 */
+	cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+		EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+		EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+	fimc_write(cfg, EXYNOS_CISRCFMT);
+
+	/* offset Y(RGB), Cb, Cr */
+	cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIIYOFF);
+	cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIICBOFF);
+	cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIICROFF);
+
+	return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > FIMC_MAX_SRC) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -ENOMEM;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		config = &property->config[EXYNOS_DRM_OPS_SRC];
+		fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			EXYNOS_CIIYSA(buf_id));
+
+		if (config->fmt == DRM_FORMAT_YVU420) {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIICBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIICRSA(buf_id));
+		} else {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIICBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIICRSA(buf_id));
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+	.set_fmt = fimc_src_set_fmt,
+	.set_transf = fimc_src_set_transf,
+	.set_size = fimc_src_set_size,
+	.set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	/* RGB */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_RGB888:
+		cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		return 0;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+			EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+		fimc_write(cfg, EXYNOS_CISCCTRL);
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	/* YUV */
+	cfg = fimc_read(EXYNOS_CIOCTRL);
+	cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+		EXYNOS_CIOCTRL_ORDER422_MASK |
+		EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_XRGB8888:
+		cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV12MT:
+	case DRM_FORMAT_NV16:
+		cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+		cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_CIOCTRL);
+
+	return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = fimc_read(EXYNOS_CIEXTEN);
+
+	if (fmt == DRM_FORMAT_AYUV) {
+		cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+		fimc_write(cfg, EXYNOS_CIEXTEN);
+	} else {
+		cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+		fimc_write(cfg, EXYNOS_CIEXTEN);
+
+		cfg = fimc_read(EXYNOS_CITRGFMT);
+		cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+		switch (fmt) {
+		case DRM_FORMAT_RGB565:
+		case DRM_FORMAT_RGB888:
+		case DRM_FORMAT_XRGB8888:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+			break;
+		case DRM_FORMAT_YUYV:
+		case DRM_FORMAT_YVYU:
+		case DRM_FORMAT_UYVY:
+		case DRM_FORMAT_VYUY:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+			break;
+		case DRM_FORMAT_NV16:
+		case DRM_FORMAT_NV61:
+		case DRM_FORMAT_YUV422:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+			break;
+		case DRM_FORMAT_YUV420:
+		case DRM_FORMAT_YVU420:
+		case DRM_FORMAT_NV12:
+		case DRM_FORMAT_NV12MT:
+		case DRM_FORMAT_NV21:
+			cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+			break;
+		default:
+			dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+				fmt);
+			return -EINVAL;
+		}
+
+		fimc_write(cfg, EXYNOS_CITRGFMT);
+	}
+
+	cfg = fimc_read(EXYNOS_CIDMAPARAM);
+	cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+	if (fmt == DRM_FORMAT_NV12MT)
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+	else
+		cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+	fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+	return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = fimc_read(EXYNOS_CITRGFMT);
+	cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+	cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+			EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+			EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	fimc_write(cfg, EXYNOS_CITRGFMT);
+	*swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+	return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+	if (src >= dst * 64) {
+		DRM_ERROR("failed to make ratio and shift.\n");
+		return -EINVAL;
+	} else if (src >= dst * 32) {
+		*ratio = 32;
+		*shift = 5;
+	} else if (src >= dst * 16) {
+		*ratio = 16;
+		*shift = 4;
+	} else if (src >= dst * 8) {
+		*ratio = 8;
+		*shift = 3;
+	} else if (src >= dst * 4) {
+		*ratio = 4;
+		*shift = 2;
+	} else if (src >= dst * 2) {
+		*ratio = 2;
+		*shift = 1;
+	} else {
+		*ratio = 1;
+		*shift = 0;
+	}
+
+	return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg, cfg_ext, shfactor;
+	u32 pre_dst_width, pre_dst_height;
+	u32 pre_hratio, hfactor, pre_vratio, vfactor;
+	int ret = 0;
+	u32 src_w, src_h, dst_w, dst_h;
+
+	cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+	if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+		src_w = src->h;
+		src_h = src->w;
+	} else {
+		src_w = src->w;
+		src_h = src->h;
+	}
+
+	if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+		dst_w = dst->h;
+		dst_h = dst->w;
+	} else {
+		dst_w = dst->w;
+		dst_h = dst->h;
+	}
+
+	ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		return ret;
+	}
+
+	ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		return ret;
+	}
+
+	pre_dst_width = src_w / pre_hratio;
+	pre_dst_height = src_h / pre_vratio;
+	DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+		pre_dst_width, pre_dst_height);
+	DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+		__func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+	sc->hratio = (src_w << 14) / (dst_w << hfactor);
+	sc->vratio = (src_h << 14) / (dst_h << vfactor);
+	sc->up_h = (dst_w >= src_w) ? true : false;
+	sc->up_v = (dst_h >= src_h) ? true : false;
+	DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+	__func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+	shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+	DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+	cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+		EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+		EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+	fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+	cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+		EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+	fimc_write(cfg, EXYNOS_CISCPREDST);
+
+	return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+	u32 cfg, cfg_ext;
+
+	DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+		__func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+	DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+		__func__, sc->hratio, sc->vratio);
+
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+		EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+		EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+		EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+		EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+		EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+	if (sc->range)
+		cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+			EXYNOS_CISCCTRL_CSCY2R_WIDE);
+	if (sc->bypass)
+		cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+	if (sc->up_h)
+		cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+	if (sc->up_v)
+		cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+	cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+		EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+	fimc_write(cfg, EXYNOS_CISCCTRL);
+
+	cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+	cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+	cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+	cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+		EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+	fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct drm_exynos_sz img_sz = *sz;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+		__func__, swap, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+		EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+	fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+	DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, pos->x, pos->y, pos->w, pos->h);
+
+	/* CSC ITU */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+	if (sz->hsize >= FIMC_WIDTH_ITU_709)
+		cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+	else
+		cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+		img_sz.hsize = sz->vsize;
+		img_sz.vsize = sz->hsize;
+	}
+
+	/* target image size */
+	cfg = fimc_read(EXYNOS_CITRGFMT);
+	cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+		EXYNOS_CITRGFMT_TARGETV_MASK);
+	cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+		EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+	fimc_write(cfg, EXYNOS_CITRGFMT);
+
+	/* target area */
+	cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+	fimc_write(cfg, EXYNOS_CITAREA);
+
+	/* offset Y(RGB), Cb, Cr */
+	cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOYOFF);
+	cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOCBOFF);
+	cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+		EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+	fimc_write(cfg, EXYNOS_CIOCROFF);
+
+	return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+	u32 cfg, i, buf_num = 0;
+	u32 mask = 0x00000001;
+
+	cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+	for (i = 0; i < FIMC_REG_SZ; i++)
+		if (cfg & (mask << i))
+			buf_num++;
+
+	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+	return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool enable;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	mutex_lock(&ctx->lock);
+
+	/* mask register set */
+	cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		enable = true;
+		break;
+	case IPP_BUF_DEQUEUE:
+		enable = false;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		ret =  -EINVAL;
+		goto err_unlock;
+	}
+
+	/* sequence id */
+	cfg &= ~mask;
+	cfg |= (enable << buf_id);
+	fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+	/* interrupt enable */
+	if (buf_type == IPP_BUF_ENQUEUE &&
+	    fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+		fimc_handle_irq(ctx, true, false, true);
+
+	/* interrupt disable */
+	if (buf_type == IPP_BUF_DEQUEUE &&
+	    fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+		fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+	mutex_unlock(&ctx->lock);
+	return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > FIMC_MAX_DST) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -ENOMEM;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		config = &property->config[EXYNOS_DRM_OPS_DST];
+
+		fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			EXYNOS_CIOYSA(buf_id));
+
+		if (config->fmt == DRM_FORMAT_YVU420) {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIOCBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIOCRSA(buf_id));
+		} else {
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+				EXYNOS_CIOCBSA(buf_id));
+			fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+				EXYNOS_CIOCRSA(buf_id));
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+		fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+	.set_fmt = fimc_dst_set_fmt,
+	.set_transf = fimc_dst_set_transf,
+	.set_size = fimc_dst_set_size,
+	.set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	if (enable) {
+		clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+		clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+		ctx->suspended = false;
+	} else {
+		clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+		clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+	struct fimc_context *ctx = dev_id;
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_event_work *event_work =
+		c_node->event_work;
+	int buf_id;
+
+	DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+	fimc_clear_irq(ctx);
+	if (fimc_check_ovf(ctx))
+		return IRQ_NONE;
+
+	if (!fimc_check_frame_end(ctx))
+		return IRQ_NONE;
+
+	buf_id = fimc_get_buf_id(ctx);
+	if (buf_id < 0)
+		return IRQ_HANDLED;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+	if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return IRQ_HANDLED;
+	}
+
+	event_work->ippdrv = ippdrv;
+	event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+	queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+	return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->writeback = 1;
+	prop_list->refresh_min = FIMC_REFRESH_MIN;
+	prop_list->refresh_max = FIMC_REFRESH_MAX;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+				(1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 1;
+	prop_list->crop = 1;
+	prop_list->crop_max.hsize = FIMC_CROP_MAX;
+	prop_list->crop_max.vsize = FIMC_CROP_MAX;
+	prop_list->crop_min.hsize = FIMC_CROP_MIN;
+	prop_list->crop_min.vsize = FIMC_CROP_MIN;
+	prop_list->scale = 1;
+	prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+	prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+	prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+	prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+	case EXYNOS_DRM_FLIP_BOTH:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos *pos;
+	struct drm_exynos_sz *sz;
+	bool swap;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		/* check for flip */
+		if (!fimc_check_drm_flip(config->flip)) {
+			DRM_ERROR("invalid flip.\n");
+			goto err_property;
+		}
+
+		/* check for degree */
+		switch (config->degree) {
+		case EXYNOS_DRM_DEGREE_90:
+		case EXYNOS_DRM_DEGREE_270:
+			swap = true;
+			break;
+		case EXYNOS_DRM_DEGREE_0:
+		case EXYNOS_DRM_DEGREE_180:
+			swap = false;
+			break;
+		default:
+			DRM_ERROR("invalid degree.\n");
+			goto err_property;
+		}
+
+		/* check for buffer bound */
+		if ((pos->x + pos->w > sz->hsize) ||
+			(pos->y + pos->h > sz->vsize)) {
+			DRM_ERROR("out of buf bound.\n");
+			goto err_property;
+		}
+
+		/* check for crop */
+		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+			if (swap) {
+				if ((pos->h < pp->crop_min.hsize) ||
+					(sz->vsize > pp->crop_max.hsize) ||
+					(pos->w < pp->crop_min.vsize) ||
+					(sz->hsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->crop_min.hsize) ||
+					(sz->hsize > pp->crop_max.hsize) ||
+					(pos->h < pp->crop_min.vsize) ||
+					(sz->vsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			}
+		}
+
+		/* check for scale */
+		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+			if (swap) {
+				if ((pos->h < pp->scale_min.hsize) ||
+					(sz->vsize > pp->scale_max.hsize) ||
+					(pos->w < pp->scale_min.vsize) ||
+					(sz->hsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->scale_min.hsize) ||
+					(sz->hsize > pp->scale_max.hsize) ||
+					(pos->h < pp->scale_min.vsize) ||
+					(sz->vsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			}
+		}
+	}
+
+	return 0;
+
+err_property:
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+			i ? "dst" : "src", config->flip, config->degree,
+			pos->x, pos->y, pos->w, pos->h,
+			sz->hsize, sz->vsize);
+	}
+
+	return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s:\n", __func__);
+
+	for (i = 0; i < FIMC_MAX_SRC; i++) {
+		fimc_write(0, EXYNOS_CIIYSA(i));
+		fimc_write(0, EXYNOS_CIICBSA(i));
+		fimc_write(0, EXYNOS_CIICRSA(i));
+	}
+
+	for (i = 0; i < FIMC_MAX_DST; i++) {
+		fimc_write(0, EXYNOS_CIOYSA(i));
+		fimc_write(0, EXYNOS_CIOCBSA(i));
+		fimc_write(0, EXYNOS_CIOCRSA(i));
+	}
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* reset h/w block */
+	fimc_sw_reset(ctx);
+
+	/* reset scaler capability */
+	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+	fimc_clear_addr(ctx);
+
+	return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
+	struct drm_exynos_ipp_set_wb set_wb;
+	int ret, i;
+	u32 cfg0, cfg1;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+
+	fimc_handle_irq(ctx, true, false, true);
+
+	for_each_ipp_ops(i) {
+		config = &property->config[i];
+		img_pos[i] = config->pos;
+	}
+
+	ret = fimc_set_prescaler(ctx, &ctx->sc,
+		&img_pos[EXYNOS_DRM_OPS_SRC],
+		&img_pos[EXYNOS_DRM_OPS_DST]);
+	if (ret) {
+		dev_err(dev, "failed to set precalser.\n");
+		return ret;
+	}
+
+	/* If set ture, we can save jpeg about screen */
+	fimc_handle_jpeg(ctx, false);
+	fimc_set_scaler(ctx, &ctx->sc);
+	fimc_set_polarity(ctx, &ctx->pol);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+		fimc_handle_lastend(ctx, false);
+
+		/* setup dma */
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+		cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+		break;
+	case IPP_CMD_WB:
+		fimc_set_type_ctrl(ctx, FIMC_WB_A);
+		fimc_handle_lastend(ctx, true);
+
+		/* setup FIMD */
+		ret = fimc_set_camblk_fimd0_wb(ctx);
+		if (ret < 0) {
+			dev_err(dev, "camblk setup failed.\n");
+			return ret;
+		}
+
+		set_wb.enable = 1;
+		set_wb.refresh = property->refresh_rate;
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		ret = -EINVAL;
+		dev_err(dev, "invalid operations.\n");
+		return ret;
+	}
+
+	/* Reset status */
+	fimc_write(0x0, EXYNOS_CISTATUS);
+
+	cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+	cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+	/* Scaler */
+	cfg1 = fimc_read(EXYNOS_CISCCTRL);
+	cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+	cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+		EXYNOS_CISCCTRL_SCALERSTART);
+
+	fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+	/* Enable image capture*/
+	cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+	fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+	/* Disable frame end irq */
+	cfg0 = fimc_read(EXYNOS_CIGCTRL);
+	cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+	fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+	cfg0 = fimc_read(EXYNOS_CIOCTRL);
+	cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+	fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+	if (cmd == IPP_CMD_M2M) {
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 |= EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+
+		cfg0 = fimc_read(EXYNOS_MSCTRL);
+		cfg0 |= EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg0, EXYNOS_MSCTRL);
+	}
+
+	return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* Source clear */
+		cfg = fimc_read(EXYNOS_MSCTRL);
+		cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+		cfg &= ~EXYNOS_MSCTRL_ENVID;
+		fimc_write(cfg, EXYNOS_MSCTRL);
+		break;
+	case IPP_CMD_WB:
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		dev_err(dev, "invalid operations.\n");
+		break;
+	}
+
+	fimc_handle_irq(ctx, false, false, true);
+
+	/* reset sequence */
+	fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+	/* Scaler disable */
+	cfg = fimc_read(EXYNOS_CISCCTRL);
+	cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+	fimc_write(cfg, EXYNOS_CISCCTRL);
+
+	/* Disable image capture */
+	cfg = fimc_read(EXYNOS_CIIMGCPT);
+	cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+	fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+	/* Enable frame end irq */
+	cfg = fimc_read(EXYNOS_CIGCTRL);
+	cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+	fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_put_clocks(struct fimc_context *ctx)
+{
+	int i;
+
+	for (i = 0; i < FIMC_CLKS_MAX; i++) {
+		if (IS_ERR(ctx->clocks[i]))
+			continue;
+		clk_put(ctx->clocks[i]);
+		ctx->clocks[i] = ERR_PTR(-EINVAL);
+	}
+}
+
+static int fimc_setup_clocks(struct fimc_context *ctx)
+{
+	struct device *fimc_dev = ctx->ippdrv.dev;
+	struct device *dev;
+	int ret, i;
+
+	for (i = 0; i < FIMC_CLKS_MAX; i++)
+		ctx->clocks[i] = ERR_PTR(-EINVAL);
+
+	for (i = 0; i < FIMC_CLKS_MAX; i++) {
+		if (i == FIMC_CLK_WB_A || i == FIMC_CLK_WB_B)
+			dev = fimc_dev->parent;
+		else
+			dev = fimc_dev;
+
+		ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]);
+		if (IS_ERR(ctx->clocks[i])) {
+			if (i >= FIMC_CLK_MUX)
+				break;
+			ret = PTR_ERR(ctx->clocks[i]);
+			dev_err(fimc_dev, "failed to get clock: %s\n",
+						fimc_clock_names[i]);
+			goto e_clk_free;
+		}
+	}
+
+	/* Optional FIMC LCLK parent clock setting */
+	if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) {
+		ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX],
+				     ctx->clocks[FIMC_CLK_PARENT]);
+		if (ret < 0) {
+			dev_err(fimc_dev, "failed to set parent.\n");
+			goto e_clk_free;
+		}
+	}
+
+	ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency);
+	if (ret < 0)
+		goto e_clk_free;
+
+	ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]);
+	if (!ret)
+		return ret;
+e_clk_free:
+	fimc_put_clocks(ctx);
+	return ret;
+}
+
+static int fimc_parse_dt(struct fimc_context *ctx)
+{
+	struct device_node *node = ctx->ippdrv.dev->of_node;
+
+	/* Handle only devices that support the LCD Writeback data path */
+	if (!of_property_read_bool(node, "samsung,lcd-wb"))
+		return -ENODEV;
+
+	if (of_property_read_u32(node, "clock-frequency",
+					&ctx->clk_frequency))
+		ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY;
+
+	ctx->id = of_alias_get_id(node, "fimc");
+
+	if (ctx->id < 0) {
+		dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int fimc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimc_context *ctx;
+	struct resource *res;
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret;
+
+	if (!dev->of_node) {
+		dev_err(dev, "device tree node not found.\n");
+		return -ENODEV;
+	}
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->ippdrv.dev = dev;
+
+	ret = fimc_parse_dt(ctx);
+	if (ret < 0)
+		return ret;
+
+	ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+						"samsung,sysreg");
+	if (IS_ERR(ctx->sysreg)) {
+		dev_err(dev, "syscon regmap lookup failed.\n");
+		return PTR_ERR(ctx->sysreg);
+	}
+
+	/* resource memory */
+	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+	if (IS_ERR(ctx->regs))
+		return PTR_ERR(ctx->regs);
+
+	/* resource irq */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "failed to request irq resource.\n");
+		return -ENOENT;
+	}
+
+	ctx->irq = res->start;
+	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler,
+		IRQF_ONESHOT, "drm_fimc", ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq.\n");
+		return ret;
+	}
+
+	ret = fimc_setup_clocks(ctx);
+	if (ret < 0)
+		return ret;
+
+	ippdrv = &ctx->ippdrv;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+	ippdrv->check_property = fimc_ippdrv_check_property;
+	ippdrv->reset = fimc_ippdrv_reset;
+	ippdrv->start = fimc_ippdrv_start;
+	ippdrv->stop = fimc_ippdrv_stop;
+	ret = fimc_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		goto err_put_clk;
+	}
+
+	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+		(int)ippdrv);
+
+	mutex_init(&ctx->lock);
+	platform_set_drvdata(pdev, ctx);
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm fimc device.\n");
+		goto err_pm_dis;
+	}
+
+	dev_info(dev, "drm fimc registered successfully.\n");
+
+	return 0;
+
+err_pm_dis:
+	pm_runtime_disable(dev);
+err_put_clk:
+	fimc_put_clocks(ctx);
+
+	return ret;
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimc_context *ctx = get_fimc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+	exynos_drm_ippdrv_unregister(ippdrv);
+	mutex_destroy(&ctx->lock);
+
+	fimc_put_clocks(ctx);
+	pm_runtime_set_suspended(dev);
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (!pm_runtime_suspended(dev))
+		return fimc_clk_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+	struct fimc_context *ctx = get_fimc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops fimc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+	SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+static const struct of_device_id fimc_of_match[] = {
+	{ .compatible = "samsung,exynos4210-fimc" },
+	{ .compatible = "samsung,exynos4212-fimc" },
+	{ },
+};
+
+struct platform_driver fimc_driver = {
+	.probe		= fimc_probe,
+	.remove		= fimc_remove,
+	.driver		= {
+		.of_match_table = fimc_of_match,
+		.name	= "exynos-drm-fimc",
+		.owner	= THIS_MODULE,
+		.pm	= &fimc_pm_ops,
+	},
+};
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 0000000..127a424
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimd.c
new file mode 100644
index 0000000..97c61db
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -0,0 +1,1100 @@
+/* exynos_drm_fimd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <video/of_display_timing.h>
+#include <video/samsung_fimd.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * FIMD is stand for Fully Interactive Mobile Display and
+ * as a display controller, it transfers contents drawn on memory
+ * to a LCD Panel through Display Interfaces such as RGB or
+ * CPU Interface.
+ */
+
+/* position control register for hardware window 0, 2 ~ 4.*/
+#define VIDOSD_A(win)		(VIDOSD_BASE + 0x00 + (win) * 16)
+#define VIDOSD_B(win)		(VIDOSD_BASE + 0x04 + (win) * 16)
+/*
+ * size control register for hardware windows 0 and alpha control register
+ * for hardware windows 1 ~ 4
+ */
+#define VIDOSD_C(win)		(VIDOSD_BASE + 0x08 + (win) * 16)
+/* size control register for hardware windows 1 ~ 2. */
+#define VIDOSD_D(win)		(VIDOSD_BASE + 0x0C + (win) * 16)
+
+#define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8)
+#define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x)		((WKEYCON0 + 0x140) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x)		((WKEYCON1 + 0x140) + ((x - 1) * 8))
+
+/* FIMD has totally five hardware windows. */
+#define WINDOWS_NR	5
+
+#define get_fimd_context(dev)	platform_get_drvdata(to_platform_device(dev))
+
+struct fimd_driver_data {
+	unsigned int timing_base;
+};
+
+static struct fimd_driver_data exynos4_fimd_driver_data = {
+	.timing_base = 0x0,
+};
+
+static struct fimd_driver_data exynos5_fimd_driver_data = {
+	.timing_base = 0x20000,
+};
+
+struct fimd_win_data {
+	unsigned int		offset_x;
+	unsigned int		offset_y;
+	unsigned int		ovl_width;
+	unsigned int		ovl_height;
+	unsigned int		fb_width;
+	unsigned int		fb_height;
+	unsigned int		bpp;
+	dma_addr_t		dma_addr;
+	unsigned int		buf_offsize;
+	unsigned int		line_size;	/* bytes */
+	bool			enabled;
+	bool			resume;
+};
+
+struct fimd_context {
+	struct exynos_drm_subdrv	subdrv;
+	int				irq;
+	struct drm_crtc			*crtc;
+	struct clk			*bus_clk;
+	struct clk			*lcd_clk;
+	void __iomem			*regs;
+	struct fimd_win_data		win_data[WINDOWS_NR];
+	unsigned int			clkdiv;
+	unsigned int			default_win;
+	unsigned long			irq_flags;
+	u32				vidcon0;
+	u32				vidcon1;
+	bool				suspended;
+	struct mutex			lock;
+	wait_queue_head_t		wait_vsync_queue;
+	atomic_t			wait_vsync_event;
+
+	struct exynos_drm_panel_info *panel;
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+	{ .compatible = "samsung,exynos4210-fimd",
+	  .data = &exynos4_fimd_driver_data },
+	{ .compatible = "samsung,exynos5250-fimd",
+	  .data = &exynos5_fimd_driver_data },
+	{},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
+static inline struct fimd_driver_data *drm_fimd_get_driver_data(
+	struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+	const struct of_device_id *of_id =
+			of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+	if (of_id)
+		return (struct fimd_driver_data *)of_id->data;
+#endif
+
+	return (struct fimd_driver_data *)
+		platform_get_device_id(pdev)->driver_data;
+}
+
+static bool fimd_display_is_connected(struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return true;
+}
+
+static void *fimd_get_panel(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return ctx->panel;
+}
+
+static int fimd_check_timing(struct device *dev, void *timing)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return 0;
+}
+
+static int fimd_display_power_on(struct device *dev, int mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO */
+
+	return 0;
+}
+
+static struct exynos_drm_display_ops fimd_display_ops = {
+	.type = EXYNOS_DISPLAY_TYPE_LCD,
+	.is_connected = fimd_display_is_connected,
+	.get_panel = fimd_get_panel,
+	.check_timing = fimd_check_timing,
+	.power_on = fimd_display_power_on,
+};
+
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+	mutex_lock(&ctx->lock);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		/*
+		 * enable fimd hardware only if suspended status.
+		 *
+		 * P.S. fimd_dpms function would be called at booting time so
+		 * clk_enable could be called double time.
+		 */
+		if (ctx->suspended)
+			pm_runtime_get_sync(subdrv_dev);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (!ctx->suspended)
+			pm_runtime_put_sync(subdrv_dev);
+		break;
+	default:
+		DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+		break;
+	}
+
+	mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
+	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+	struct fimd_win_data *win_data;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+			ovl_ops->commit(subdrv_dev, i);
+	}
+
+	if (mgr_ops && mgr_ops->commit)
+		mgr_ops->commit(subdrv_dev);
+}
+
+static void fimd_commit(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct exynos_drm_panel_info *panel = ctx->panel;
+	struct fb_videomode *timing = &panel->timing;
+	struct fimd_driver_data *driver_data;
+	struct platform_device *pdev = to_platform_device(dev);
+	u32 val;
+
+	driver_data = drm_fimd_get_driver_data(pdev);
+	if (ctx->suspended)
+		return;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* setup polarity values from machine code. */
+	writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+
+	/* setup vertical timing values. */
+	val = VIDTCON0_VBPD(timing->upper_margin - 1) |
+	       VIDTCON0_VFPD(timing->lower_margin - 1) |
+	       VIDTCON0_VSPW(timing->vsync_len - 1);
+	writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+	/* setup horizontal timing values.  */
+	val = VIDTCON1_HBPD(timing->left_margin - 1) |
+	       VIDTCON1_HFPD(timing->right_margin - 1) |
+	       VIDTCON1_HSPW(timing->hsync_len - 1);
+	writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+
+	/* setup horizontal and vertical display size. */
+	val = VIDTCON2_LINEVAL(timing->yres - 1) |
+	       VIDTCON2_HOZVAL(timing->xres - 1) |
+	       VIDTCON2_LINEVAL_E(timing->yres - 1) |
+	       VIDTCON2_HOZVAL_E(timing->xres - 1);
+	writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
+
+	/* setup clock source, clock divider, enable dma. */
+	val = ctx->vidcon0;
+	val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+	if (ctx->clkdiv > 1)
+		val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
+	else
+		val &= ~VIDCON0_CLKDIR;	/* 1:1 clock */
+
+	/*
+	 * fields of register with prefix '_F' would be updated
+	 * at vsync(same as dma start)
+	 */
+	val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+	writel(val, ctx->regs + VIDCON0);
+}
+
+static int fimd_enable_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return -EPERM;
+
+	if (!test_and_set_bit(0, &ctx->irq_flags)) {
+		val = readl(ctx->regs + VIDINTCON0);
+
+		val |= VIDINTCON0_INT_ENABLE;
+		val |= VIDINTCON0_INT_FRAME;
+
+		val &= ~VIDINTCON0_FRAMESEL0_MASK;
+		val |= VIDINTCON0_FRAMESEL0_VSYNC;
+		val &= ~VIDINTCON0_FRAMESEL1_MASK;
+		val |= VIDINTCON0_FRAMESEL1_NONE;
+
+		writel(val, ctx->regs + VIDINTCON0);
+	}
+
+	return 0;
+}
+
+static void fimd_disable_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return;
+
+	if (test_and_clear_bit(0, &ctx->irq_flags)) {
+		val = readl(ctx->regs + VIDINTCON0);
+
+		val &= ~VIDINTCON0_INT_FRAME;
+		val &= ~VIDINTCON0_INT_ENABLE;
+
+		writel(val, ctx->regs + VIDINTCON0);
+	}
+}
+
+static void fimd_wait_for_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	if (ctx->suspended)
+		return;
+
+	atomic_set(&ctx->wait_vsync_event, 1);
+
+	/*
+	 * wait for FIMD to signal VSYNC interrupt or return after
+	 * timeout which is set to 50ms (refresh rate of 20).
+	 */
+	if (!wait_event_timeout(ctx->wait_vsync_queue,
+				!atomic_read(&ctx->wait_vsync_event),
+				DRM_HZ/20))
+		DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static struct exynos_drm_manager_ops fimd_manager_ops = {
+	.dpms = fimd_dpms,
+	.apply = fimd_apply,
+	.commit = fimd_commit,
+	.enable_vblank = fimd_enable_vblank,
+	.disable_vblank = fimd_disable_vblank,
+	.wait_for_vblank = fimd_wait_for_vblank,
+};
+
+static void fimd_win_mode_set(struct device *dev,
+			      struct exynos_drm_overlay *overlay)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int win;
+	unsigned long offset;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!overlay) {
+		dev_err(dev, "overlay is NULL\n");
+		return;
+	}
+
+	win = overlay->zpos;
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	offset = overlay->fb_x * (overlay->bpp >> 3);
+	offset += overlay->fb_y * overlay->pitch;
+
+	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+	win_data = &ctx->win_data[win];
+
+	win_data->offset_x = overlay->crtc_x;
+	win_data->offset_y = overlay->crtc_y;
+	win_data->ovl_width = overlay->crtc_width;
+	win_data->ovl_height = overlay->crtc_height;
+	win_data->fb_width = overlay->fb_width;
+	win_data->fb_height = overlay->fb_height;
+	win_data->dma_addr = overlay->dma_addr[0] + offset;
+	win_data->bpp = overlay->bpp;
+	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+				(overlay->bpp >> 3);
+	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+			win_data->offset_x, win_data->offset_y);
+	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+			win_data->ovl_width, win_data->ovl_height);
+	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
+	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+			overlay->fb_width, overlay->crtc_width);
+}
+
+static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data = &ctx->win_data[win];
+	unsigned long val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	val = WINCONx_ENWIN;
+
+	switch (win_data->bpp) {
+	case 1:
+		val |= WINCON0_BPPMODE_1BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_4WORD;
+		break;
+	case 2:
+		val |= WINCON0_BPPMODE_2BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_8WORD;
+		break;
+	case 4:
+		val |= WINCON0_BPPMODE_4BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_8WORD;
+		break;
+	case 8:
+		val |= WINCON0_BPPMODE_8BPP_PALETTE;
+		val |= WINCONx_BURSTLEN_8WORD;
+		val |= WINCONx_BYTSWP;
+		break;
+	case 16:
+		val |= WINCON0_BPPMODE_16BPP_565;
+		val |= WINCONx_HAWSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case 24:
+		val |= WINCON0_BPPMODE_24BPP_888;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case 32:
+		val |= WINCON1_BPPMODE_28BPP_A4888
+			| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	default:
+		DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
+
+		val |= WINCON0_BPPMODE_24BPP_888;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	}
+
+	DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+
+	writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_win_set_colkey(struct device *dev, unsigned int win)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	unsigned int keycon0 = 0, keycon1 = 0;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
+			WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+	keycon1 = WxKEYCON1_COLVAL(0xffffffff);
+
+	writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
+	writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+}
+
+static void fimd_win_commit(struct device *dev, int zpos)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int win = zpos;
+	unsigned long val, alpha, size;
+	unsigned int last_x;
+	unsigned int last_y;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return;
+
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+
+	/*
+	 * SHADOWCON register is used for enabling timing.
+	 *
+	 * for example, once only width value of a register is set,
+	 * if the dma is started then fimd hardware could malfunction so
+	 * with protect window setting, the register fields with prefix '_F'
+	 * wouldn't be updated at vsync also but updated once unprotect window
+	 * is set.
+	 */
+
+	/* protect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	/* buffer start address */
+	val = (unsigned long)win_data->dma_addr;
+	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
+
+	/* buffer end address */
+	size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+	val = (unsigned long)(win_data->dma_addr + size);
+	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
+
+	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+			(unsigned long)win_data->dma_addr, val, size);
+	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+			win_data->ovl_width, win_data->ovl_height);
+
+	/* buffer size */
+	val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
+		VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+		VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+		VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
+	writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
+
+	/* OSD position */
+	val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
+		VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+		VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+		VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
+	writel(val, ctx->regs + VIDOSD_A(win));
+
+	last_x = win_data->offset_x + win_data->ovl_width;
+	if (last_x)
+		last_x--;
+	last_y = win_data->offset_y + win_data->ovl_height;
+	if (last_y)
+		last_y--;
+
+	val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+		VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
+	writel(val, ctx->regs + VIDOSD_B(win));
+
+	DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+			win_data->offset_x, win_data->offset_y, last_x, last_y);
+
+	/* hardware window 0 doesn't support alpha channel. */
+	if (win != 0) {
+		/* OSD alpha */
+		alpha = VIDISD14C_ALPHA1_R(0xf) |
+			VIDISD14C_ALPHA1_G(0xf) |
+			VIDISD14C_ALPHA1_B(0xf);
+
+		writel(alpha, ctx->regs + VIDOSD_C(win));
+	}
+
+	/* OSD size */
+	if (win != 3 && win != 4) {
+		u32 offset = VIDOSD_D(win);
+		if (win == 0)
+			offset = VIDOSD_C(win);
+		val = win_data->ovl_width * win_data->ovl_height;
+		writel(val, ctx->regs + offset);
+
+		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+	}
+
+	fimd_win_set_pixfmt(dev, win);
+
+	/* hardware window 0 doesn't support color key. */
+	if (win != 0)
+		fimd_win_set_colkey(dev, win);
+
+	/* wincon */
+	val = readl(ctx->regs + WINCON(win));
+	val |= WINCONx_ENWIN;
+	writel(val, ctx->regs + WINCON(win));
+
+	/* Enable DMA channel and unprotect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_CHx_ENABLE(win);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	win_data->enabled = true;
+}
+
+static void fimd_win_disable(struct device *dev, int zpos)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int win = zpos;
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+
+	if (ctx->suspended) {
+		/* do not resume this window*/
+		win_data->resume = false;
+		return;
+	}
+
+	/* protect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	/* wincon */
+	val = readl(ctx->regs + WINCON(win));
+	val &= ~WINCONx_ENWIN;
+	writel(val, ctx->regs + WINCON(win));
+
+	/* unprotect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val &= ~SHADOWCON_CHx_ENABLE(win);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	win_data->enabled = false;
+}
+
+static struct exynos_drm_overlay_ops fimd_overlay_ops = {
+	.mode_set = fimd_win_mode_set,
+	.commit = fimd_win_commit,
+	.disable = fimd_win_disable,
+};
+
+static struct exynos_drm_manager fimd_manager = {
+	.pipe		= -1,
+	.ops		= &fimd_manager_ops,
+	.overlay_ops	= &fimd_overlay_ops,
+	.display_ops	= &fimd_display_ops,
+};
+
+static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+{
+	struct fimd_context *ctx = (struct fimd_context *)dev_id;
+	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+	struct drm_device *drm_dev = subdrv->drm_dev;
+	struct exynos_drm_manager *manager = subdrv->manager;
+	u32 val;
+
+	val = readl(ctx->regs + VIDINTCON1);
+
+	if (val & VIDINTCON1_INT_FRAME)
+		/* VSYNC interrupt */
+		writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+
+	/* check the crtc is detached already from encoder */
+	if (manager->pipe < 0)
+		goto out;
+
+	drm_handle_vblank(drm_dev, manager->pipe);
+	exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
+
+	/* set wait vsync event to zero and wake up queue. */
+	if (atomic_read(&ctx->wait_vsync_event)) {
+		atomic_set(&ctx->wait_vsync_event, 0);
+		DRM_WAKEUP(&ctx->wait_vsync_queue);
+	}
+out:
+	return IRQ_HANDLED;
+}
+
+static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * enable drm irq mode.
+	 * - with irq_enabled = 1, we can use the vblank feature.
+	 *
+	 * P.S. note that we wouldn't use drm irq handler but
+	 *	just specific driver own one instead because
+	 *	drm framework supports only one irq handler.
+	 */
+	drm_dev->irq_enabled = 1;
+
+	/*
+	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * by drm timer once a current process gives up ownership of
+	 * vblank event.(after drm_vblank_put function is called)
+	 */
+	drm_dev->vblank_disable_allowed = 1;
+
+	/* attach this sub driver to iommu mapping if supported. */
+	if (is_drm_iommu_supported(drm_dev))
+		drm_iommu_attach_device(drm_dev, dev);
+
+	return 0;
+}
+
+static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* detach this sub driver from iommu mapping if supported. */
+	if (is_drm_iommu_supported(drm_dev))
+		drm_iommu_detach_device(drm_dev, dev);
+}
+
+static int fimd_calc_clkdiv(struct fimd_context *ctx,
+			    struct fb_videomode *timing)
+{
+	unsigned long clk = clk_get_rate(ctx->lcd_clk);
+	u32 retrace;
+	u32 clkdiv;
+	u32 best_framerate = 0;
+	u32 framerate;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	retrace = timing->left_margin + timing->hsync_len +
+				timing->right_margin + timing->xres;
+	retrace *= timing->upper_margin + timing->vsync_len +
+				timing->lower_margin + timing->yres;
+
+	/* default framerate is 60Hz */
+	if (!timing->refresh)
+		timing->refresh = 60;
+
+	clk /= retrace;
+
+	for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
+		int tmp;
+
+		/* get best framerate */
+		framerate = clk / clkdiv;
+		tmp = timing->refresh - framerate;
+		if (tmp < 0) {
+			best_framerate = framerate;
+			continue;
+		} else {
+			if (!best_framerate)
+				best_framerate = framerate;
+			else if (tmp < (best_framerate - framerate))
+				best_framerate = framerate;
+			break;
+		}
+	}
+
+	return clkdiv;
+}
+
+static void fimd_clear_win(struct fimd_context *ctx, int win)
+{
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	writel(0, ctx->regs + WINCON(win));
+	writel(0, ctx->regs + VIDOSD_A(win));
+	writel(0, ctx->regs + VIDOSD_B(win));
+	writel(0, ctx->regs + VIDOSD_C(win));
+
+	if (win == 1 || win == 2)
+		writel(0, ctx->regs + VIDOSD_D(win));
+
+	val = readl(ctx->regs + SHADOWCON);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+}
+
+static int fimd_clock(struct fimd_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (enable) {
+		int ret;
+
+		ret = clk_prepare_enable(ctx->bus_clk);
+		if (ret < 0)
+			return ret;
+
+		ret = clk_prepare_enable(ctx->lcd_clk);
+		if  (ret < 0) {
+			clk_disable_unprepare(ctx->bus_clk);
+			return ret;
+		}
+	} else {
+		clk_disable_unprepare(ctx->lcd_clk);
+		clk_disable_unprepare(ctx->bus_clk);
+	}
+
+	return 0;
+}
+
+static void fimd_window_suspend(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int i;
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->resume = win_data->enabled;
+		fimd_win_disable(dev, i);
+	}
+	fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int i;
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->enabled = win_data->resume;
+		win_data->resume = false;
+	}
+}
+
+static int fimd_activate(struct fimd_context *ctx, bool enable)
+{
+	struct device *dev = ctx->subdrv.dev;
+	if (enable) {
+		int ret;
+
+		ret = fimd_clock(ctx, true);
+		if (ret < 0)
+			return ret;
+
+		ctx->suspended = false;
+
+		/* if vblank was enabled status, enable it again. */
+		if (test_and_clear_bit(0, &ctx->irq_flags))
+			fimd_enable_vblank(dev);
+
+		fimd_window_resume(dev);
+	} else {
+		fimd_window_suspend(dev);
+
+		fimd_clock(ctx, false);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static int fimd_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimd_context *ctx;
+	struct exynos_drm_subdrv *subdrv;
+	struct exynos_drm_fimd_pdata *pdata;
+	struct exynos_drm_panel_info *panel;
+	struct resource *res;
+	int win;
+	int ret = -EINVAL;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (dev->of_node) {
+		pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata) {
+			DRM_ERROR("memory allocation for pdata failed\n");
+			return -ENOMEM;
+		}
+
+		ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
+					OF_USE_NATIVE_MODE);
+		if (ret) {
+			DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
+			return ret;
+		}
+	} else {
+		pdata = dev->platform_data;
+		if (!pdata) {
+			DRM_ERROR("no platform data specified\n");
+			return -EINVAL;
+		}
+	}
+
+	panel = &pdata->panel;
+	if (!panel) {
+		dev_err(dev, "panel is null.\n");
+		return -EINVAL;
+	}
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->bus_clk = devm_clk_get(dev, "fimd");
+	if (IS_ERR(ctx->bus_clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		return PTR_ERR(ctx->bus_clk);
+	}
+
+	ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+	if (IS_ERR(ctx->lcd_clk)) {
+		dev_err(dev, "failed to get lcd clock\n");
+		return PTR_ERR(ctx->lcd_clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	ctx->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctx->regs))
+		return PTR_ERR(ctx->regs);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+	if (!res) {
+		dev_err(dev, "irq request failed.\n");
+		return -ENXIO;
+	}
+
+	ctx->irq = res->start;
+
+	ret = devm_request_irq(dev, ctx->irq, fimd_irq_handler,
+							0, "drm_fimd", ctx);
+	if (ret) {
+		dev_err(dev, "irq request failed.\n");
+		return ret;
+	}
+
+	ctx->vidcon0 = pdata->vidcon0;
+	ctx->vidcon1 = pdata->vidcon1;
+	ctx->default_win = pdata->default_win;
+	ctx->panel = panel;
+	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	atomic_set(&ctx->wait_vsync_event, 0);
+
+	subdrv = &ctx->subdrv;
+
+	subdrv->dev = dev;
+	subdrv->manager = &fimd_manager;
+	subdrv->probe = fimd_subdrv_probe;
+	subdrv->remove = fimd_subdrv_remove;
+
+	mutex_init(&ctx->lock);
+
+	platform_set_drvdata(pdev, ctx);
+
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
+	panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
+
+	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
+			panel->timing.pixclock, ctx->clkdiv);
+
+	for (win = 0; win < WINDOWS_NR; win++)
+		fimd_clear_win(ctx, win);
+
+	exynos_drm_subdrv_register(subdrv);
+
+	return 0;
+}
+
+static int fimd_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimd_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	if (ctx->suspended)
+		goto out;
+
+	pm_runtime_set_suspended(dev);
+	pm_runtime_put_sync(dev);
+
+out:
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	/*
+	 * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
+	 * called here, an error would be returned by that interface
+	 * because the usage_count of pm runtime is more than 1.
+	 */
+	if (!pm_runtime_suspended(dev))
+		return fimd_activate(ctx, false);
+
+	return 0;
+}
+
+static int fimd_resume(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	/*
+	 * if entered to sleep when lcd panel was on, the usage_count
+	 * of pm runtime would still be 1 so in this case, fimd driver
+	 * should be on directly not drawing on pm runtime interface.
+	 */
+	if (!pm_runtime_suspended(dev)) {
+		int ret;
+
+		ret = fimd_activate(ctx, true);
+		if (ret < 0)
+			return ret;
+
+		/*
+		 * in case of dpms on(standby), fimd_apply function will
+		 * be called by encoder's dpms callback to update fimd's
+		 * registers but in case of sleep wakeup, it's not.
+		 * so fimd_apply function should be called at here.
+		 */
+		fimd_apply(dev);
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return fimd_activate(ctx, false);
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return fimd_activate(ctx, true);
+}
+#endif
+
+static struct platform_device_id fimd_driver_ids[] = {
+	{
+		.name		= "exynos4-fb",
+		.driver_data	= (unsigned long)&exynos4_fimd_driver_data,
+	}, {
+		.name		= "exynos5-fb",
+		.driver_data	= (unsigned long)&exynos5_fimd_driver_data,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
+
+static const struct dev_pm_ops fimd_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+	SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
+struct platform_driver fimd_driver = {
+	.probe		= fimd_probe,
+	.remove		= fimd_remove,
+	.id_table       = fimd_driver_ids,
+	.driver		= {
+		.name	= "exynos4-fb",
+		.owner	= THIS_MODULE,
+		.pm	= &fimd_pm_ops,
+		.of_match_table = of_match_ptr(fimd_driver_dt_match),
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.c
new file mode 100644
index 0000000..af75434
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
+#include <linux/of.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+
+#define G2D_HW_MAJOR_VER		4
+#define G2D_HW_MINOR_VER		1
+
+/* vaild register range set from user: 0x0104 ~ 0x0880 */
+#define G2D_VALID_START			0x0104
+#define G2D_VALID_END			0x0880
+
+/* general registers */
+#define G2D_SOFT_RESET			0x0000
+#define G2D_INTEN			0x0004
+#define G2D_INTC_PEND			0x000C
+#define G2D_DMA_SFR_BASE_ADDR		0x0080
+#define G2D_DMA_COMMAND			0x0084
+#define G2D_DMA_STATUS			0x008C
+#define G2D_DMA_HOLD_CMD		0x0090
+
+/* command registers */
+#define G2D_BITBLT_START		0x0100
+
+/* registers for base address */
+#define G2D_SRC_BASE_ADDR		0x0304
+#define G2D_SRC_COLOR_MODE		0x030C
+#define G2D_SRC_LEFT_TOP		0x0310
+#define G2D_SRC_RIGHT_BOTTOM		0x0314
+#define G2D_SRC_PLANE2_BASE_ADDR	0x0318
+#define G2D_DST_BASE_ADDR		0x0404
+#define G2D_DST_COLOR_MODE		0x040C
+#define G2D_DST_LEFT_TOP		0x0410
+#define G2D_DST_RIGHT_BOTTOM		0x0414
+#define G2D_DST_PLANE2_BASE_ADDR	0x0418
+#define G2D_PAT_BASE_ADDR		0x0500
+#define G2D_MSK_BASE_ADDR		0x0520
+
+/* G2D_SOFT_RESET */
+#define G2D_SFRCLEAR			(1 << 1)
+#define G2D_R				(1 << 0)
+
+/* G2D_INTEN */
+#define G2D_INTEN_ACF			(1 << 3)
+#define G2D_INTEN_UCF			(1 << 2)
+#define G2D_INTEN_GCF			(1 << 1)
+#define G2D_INTEN_SCF			(1 << 0)
+
+/* G2D_INTC_PEND */
+#define G2D_INTP_ACMD_FIN		(1 << 3)
+#define G2D_INTP_UCMD_FIN		(1 << 2)
+#define G2D_INTP_GCMD_FIN		(1 << 1)
+#define G2D_INTP_SCMD_FIN		(1 << 0)
+
+/* G2D_DMA_COMMAND */
+#define G2D_DMA_HALT			(1 << 2)
+#define G2D_DMA_CONTINUE		(1 << 1)
+#define G2D_DMA_START			(1 << 0)
+
+/* G2D_DMA_STATUS */
+#define G2D_DMA_LIST_DONE_COUNT		(0xFF << 17)
+#define G2D_DMA_BITBLT_DONE_COUNT	(0xFFFF << 1)
+#define G2D_DMA_DONE			(1 << 0)
+#define G2D_DMA_LIST_DONE_COUNT_OFFSET	17
+
+/* G2D_DMA_HOLD_CMD */
+#define G2D_USER_HOLD			(1 << 2)
+#define G2D_LIST_HOLD			(1 << 1)
+#define G2D_BITBLT_HOLD			(1 << 0)
+
+/* G2D_BITBLT_START */
+#define G2D_START_CASESEL		(1 << 2)
+#define G2D_START_NHOLT			(1 << 1)
+#define G2D_START_BITBLT		(1 << 0)
+
+/* buffer color format */
+#define G2D_FMT_XRGB8888		0
+#define G2D_FMT_ARGB8888		1
+#define G2D_FMT_RGB565			2
+#define G2D_FMT_XRGB1555		3
+#define G2D_FMT_ARGB1555		4
+#define G2D_FMT_XRGB4444		5
+#define G2D_FMT_ARGB4444		6
+#define G2D_FMT_PACKED_RGB888		7
+#define G2D_FMT_A8			11
+#define G2D_FMT_L8			12
+
+/* buffer valid length */
+#define G2D_LEN_MIN			1
+#define G2D_LEN_MAX			8000
+
+#define G2D_CMDLIST_SIZE		(PAGE_SIZE / 4)
+#define G2D_CMDLIST_NUM			64
+#define G2D_CMDLIST_POOL_SIZE		(G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
+#define G2D_CMDLIST_DATA_NUM		(G2D_CMDLIST_SIZE / sizeof(u32) - 2)
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL		(64 * 1024 * 1024)
+
+enum {
+	BUF_TYPE_GEM = 1,
+	BUF_TYPE_USERPTR,
+};
+
+enum g2d_reg_type {
+	REG_TYPE_NONE = -1,
+	REG_TYPE_SRC,
+	REG_TYPE_SRC_PLANE2,
+	REG_TYPE_DST,
+	REG_TYPE_DST_PLANE2,
+	REG_TYPE_PAT,
+	REG_TYPE_MSK,
+	MAX_REG_TYPE_NR
+};
+
+/* cmdlist data structure */
+struct g2d_cmdlist {
+	u32		head;
+	unsigned long	data[G2D_CMDLIST_DATA_NUM];
+	u32		last;	/* last data offset */
+};
+
+/*
+ * A structure of buffer description
+ *
+ * @format: color format
+ * @left_x: the x coordinates of left top corner
+ * @top_y: the y coordinates of left top corner
+ * @right_x: the x coordinates of right bottom corner
+ * @bottom_y: the y coordinates of right bottom corner
+ *
+ */
+struct g2d_buf_desc {
+	unsigned int	format;
+	unsigned int	left_x;
+	unsigned int	top_y;
+	unsigned int	right_x;
+	unsigned int	bottom_y;
+};
+
+/*
+ * A structure of buffer information
+ *
+ * @map_nr: manages the number of mapped buffers
+ * @reg_types: stores regitster type in the order of requested command
+ * @handles: stores buffer handle in its reg_type position
+ * @types: stores buffer type in its reg_type position
+ * @descs: stores buffer description in its reg_type position
+ *
+ */
+struct g2d_buf_info {
+	unsigned int		map_nr;
+	enum g2d_reg_type	reg_types[MAX_REG_TYPE_NR];
+	unsigned long		handles[MAX_REG_TYPE_NR];
+	unsigned int		types[MAX_REG_TYPE_NR];
+	struct g2d_buf_desc	descs[MAX_REG_TYPE_NR];
+};
+
+struct drm_exynos_pending_g2d_event {
+	struct drm_pending_event	base;
+	struct drm_exynos_g2d_event	event;
+};
+
+struct g2d_cmdlist_userptr {
+	struct list_head	list;
+	dma_addr_t		dma_addr;
+	unsigned long		userptr;
+	unsigned long		size;
+	struct page		**pages;
+	unsigned int		npages;
+	struct sg_table		*sgt;
+	struct vm_area_struct	*vma;
+	atomic_t		refcount;
+	bool			in_pool;
+	bool			out_of_list;
+};
+struct g2d_cmdlist_node {
+	struct list_head	list;
+	struct g2d_cmdlist	*cmdlist;
+	dma_addr_t		dma_addr;
+	struct g2d_buf_info	buf_info;
+
+	struct drm_exynos_pending_g2d_event	*event;
+};
+
+struct g2d_runqueue_node {
+	struct list_head	list;
+	struct list_head	run_cmdlist;
+	struct list_head	event_list;
+	struct drm_file		*filp;
+	pid_t			pid;
+	struct completion	complete;
+	int			async;
+};
+
+struct g2d_data {
+	struct device			*dev;
+	struct clk			*gate_clk;
+	void __iomem			*regs;
+	int				irq;
+	struct workqueue_struct		*g2d_workq;
+	struct work_struct		runqueue_work;
+	struct exynos_drm_subdrv	subdrv;
+	bool				suspended;
+
+	/* cmdlist */
+	struct g2d_cmdlist_node		*cmdlist_node;
+	struct list_head		free_cmdlist;
+	struct mutex			cmdlist_mutex;
+	dma_addr_t			cmdlist_pool;
+	void				*cmdlist_pool_virt;
+	struct dma_attrs		cmdlist_dma_attrs;
+
+	/* runqueue*/
+	struct g2d_runqueue_node	*runqueue_node;
+	struct list_head		runqueue;
+	struct mutex			runqueue_mutex;
+	struct kmem_cache		*runqueue_slab;
+
+	unsigned long			current_pool;
+	unsigned long			max_pool;
+};
+
+static int g2d_init_cmdlist(struct g2d_data *g2d)
+{
+	struct device *dev = g2d->dev;
+	struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+	int nr;
+	int ret;
+	struct g2d_buf_info *buf_info;
+
+	init_dma_attrs(&g2d->cmdlist_dma_attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+	g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+						G2D_CMDLIST_POOL_SIZE,
+						&g2d->cmdlist_pool, GFP_KERNEL,
+						&g2d->cmdlist_dma_attrs);
+	if (!g2d->cmdlist_pool_virt) {
+		dev_err(dev, "failed to allocate dma memory\n");
+		return -ENOMEM;
+	}
+
+	node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		dev_err(dev, "failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+		unsigned int i;
+
+		node[nr].cmdlist =
+			g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
+		node[nr].dma_addr =
+			g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
+
+		buf_info = &node[nr].buf_info;
+		for (i = 0; i < MAX_REG_TYPE_NR; i++)
+			buf_info->reg_types[i] = REG_TYPE_NONE;
+
+		list_add_tail(&node[nr].list, &g2d->free_cmdlist);
+	}
+
+	return 0;
+
+err:
+	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+			g2d->cmdlist_pool_virt,
+			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+	return ret;
+}
+
+static void g2d_fini_cmdlist(struct g2d_data *g2d)
+{
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+
+	kfree(g2d->cmdlist_node);
+	dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+			g2d->cmdlist_pool_virt,
+			g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+}
+
+static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
+{
+	struct device *dev = g2d->dev;
+	struct g2d_cmdlist_node *node;
+
+	mutex_lock(&g2d->cmdlist_mutex);
+	if (list_empty(&g2d->free_cmdlist)) {
+		dev_err(dev, "there is no free cmdlist\n");
+		mutex_unlock(&g2d->cmdlist_mutex);
+		return NULL;
+	}
+
+	node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
+				list);
+	list_del_init(&node->list);
+	mutex_unlock(&g2d->cmdlist_mutex);
+
+	return node;
+}
+
+static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
+{
+	mutex_lock(&g2d->cmdlist_mutex);
+	list_move_tail(&node->list, &g2d->free_cmdlist);
+	mutex_unlock(&g2d->cmdlist_mutex);
+}
+
+static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+				     struct g2d_cmdlist_node *node)
+{
+	struct g2d_cmdlist_node *lnode;
+
+	if (list_empty(&g2d_priv->inuse_cmdlist))
+		goto add_to_list;
+
+	/* this links to base address of new cmdlist */
+	lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+				struct g2d_cmdlist_node, list);
+	lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
+
+add_to_list:
+	list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+
+	if (node->event)
+		list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+}
+
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+					unsigned long obj,
+					bool force)
+{
+	struct g2d_cmdlist_userptr *g2d_userptr =
+					(struct g2d_cmdlist_userptr *)obj;
+
+	if (!obj)
+		return;
+
+	if (force)
+		goto out;
+
+	atomic_dec(&g2d_userptr->refcount);
+
+	if (atomic_read(&g2d_userptr->refcount) > 0)
+		return;
+
+	if (g2d_userptr->in_pool)
+		return;
+
+out:
+	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+					DMA_BIDIRECTIONAL);
+
+	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+					g2d_userptr->npages,
+					g2d_userptr->vma);
+
+	if (!g2d_userptr->out_of_list)
+		list_del_init(&g2d_userptr->list);
+
+	sg_free_table(g2d_userptr->sgt);
+	kfree(g2d_userptr->sgt);
+	g2d_userptr->sgt = NULL;
+
+	kfree(g2d_userptr->pages);
+	g2d_userptr->pages = NULL;
+	kfree(g2d_userptr);
+	g2d_userptr = NULL;
+}
+
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+					unsigned long userptr,
+					unsigned long size,
+					struct drm_file *filp,
+					unsigned long *obj)
+{
+	struct drm_exynos_file_private *file_priv = filp->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct g2d_cmdlist_userptr *g2d_userptr;
+	struct g2d_data *g2d;
+	struct page **pages;
+	struct sg_table	*sgt;
+	struct vm_area_struct *vma;
+	unsigned long start, end;
+	unsigned int npages, offset;
+	int ret;
+
+	if (!size) {
+		DRM_ERROR("invalid userptr size.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	g2d = dev_get_drvdata(g2d_priv->dev);
+
+	/* check if userptr already exists in userptr_list. */
+	list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+		if (g2d_userptr->userptr == userptr) {
+			/*
+			 * also check size because there could be same address
+			 * and different size.
+			 */
+			if (g2d_userptr->size == size) {
+				atomic_inc(&g2d_userptr->refcount);
+				*obj = (unsigned long)g2d_userptr;
+
+				return &g2d_userptr->dma_addr;
+			}
+
+			/*
+			 * at this moment, maybe g2d dma is accessing this
+			 * g2d_userptr memory region so just remove this
+			 * g2d_userptr object from userptr_list not to be
+			 * referred again and also except it the userptr
+			 * pool to be released after the dma access completion.
+			 */
+			g2d_userptr->out_of_list = true;
+			g2d_userptr->in_pool = false;
+			list_del_init(&g2d_userptr->list);
+
+			break;
+		}
+	}
+
+	g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+	if (!g2d_userptr) {
+		DRM_ERROR("failed to allocate g2d_userptr.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	atomic_set(&g2d_userptr->refcount, 1);
+
+	start = userptr & PAGE_MASK;
+	offset = userptr & ~PAGE_MASK;
+	end = PAGE_ALIGN(userptr + size);
+	npages = (end - start) >> PAGE_SHIFT;
+	g2d_userptr->npages = npages;
+
+	pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		DRM_ERROR("failed to allocate pages.\n");
+		kfree(g2d_userptr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	vma = find_vma(current->mm, userptr);
+	if (!vma) {
+		DRM_ERROR("failed to get vm region.\n");
+		ret = -EFAULT;
+		goto err_free_pages;
+	}
+
+	if (vma->vm_end < userptr + size) {
+		DRM_ERROR("vma is too small.\n");
+		ret = -EFAULT;
+		goto err_free_pages;
+	}
+
+	g2d_userptr->vma = exynos_gem_get_vma(vma);
+	if (!g2d_userptr->vma) {
+		DRM_ERROR("failed to copy vma.\n");
+		ret = -ENOMEM;
+		goto err_free_pages;
+	}
+
+	g2d_userptr->size = size;
+
+	ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+						npages, pages, vma);
+	if (ret < 0) {
+		DRM_ERROR("failed to get user pages from userptr.\n");
+		goto err_put_vma;
+	}
+
+	g2d_userptr->pages = pages;
+
+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
+		DRM_ERROR("failed to allocate sg table.\n");
+		ret = -ENOMEM;
+		goto err_free_userptr;
+	}
+
+	ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+					size, GFP_KERNEL);
+	if (ret < 0) {
+		DRM_ERROR("failed to get sgt from pages.\n");
+		goto err_free_sgt;
+	}
+
+	g2d_userptr->sgt = sgt;
+
+	ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+						DMA_BIDIRECTIONAL);
+	if (ret < 0) {
+		DRM_ERROR("failed to map sgt with dma region.\n");
+		goto err_sg_free_table;
+	}
+
+	g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+	g2d_userptr->userptr = userptr;
+
+	list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+	if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+		g2d->current_pool += npages << PAGE_SHIFT;
+		g2d_userptr->in_pool = true;
+	}
+
+	*obj = (unsigned long)g2d_userptr;
+
+	return &g2d_userptr->dma_addr;
+
+err_sg_free_table:
+	sg_free_table(sgt);
+
+err_free_sgt:
+	kfree(sgt);
+	sgt = NULL;
+
+err_free_userptr:
+	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+					g2d_userptr->npages,
+					g2d_userptr->vma);
+
+err_put_vma:
+	exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+	kfree(pages);
+	kfree(g2d_userptr);
+	pages = NULL;
+	g2d_userptr = NULL;
+
+	return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+					struct g2d_data *g2d,
+					struct drm_file *filp)
+{
+	struct drm_exynos_file_private *file_priv = filp->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+	list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+		if (g2d_userptr->in_pool)
+			g2d_userptr_put_dma_addr(drm_dev,
+						(unsigned long)g2d_userptr,
+						true);
+
+	g2d->current_pool = 0;
+}
+
+static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+{
+	enum g2d_reg_type reg_type;
+
+	switch (reg_offset) {
+	case G2D_SRC_BASE_ADDR:
+	case G2D_SRC_COLOR_MODE:
+	case G2D_SRC_LEFT_TOP:
+	case G2D_SRC_RIGHT_BOTTOM:
+		reg_type = REG_TYPE_SRC;
+		break;
+	case G2D_SRC_PLANE2_BASE_ADDR:
+		reg_type = REG_TYPE_SRC_PLANE2;
+		break;
+	case G2D_DST_BASE_ADDR:
+	case G2D_DST_COLOR_MODE:
+	case G2D_DST_LEFT_TOP:
+	case G2D_DST_RIGHT_BOTTOM:
+		reg_type = REG_TYPE_DST;
+		break;
+	case G2D_DST_PLANE2_BASE_ADDR:
+		reg_type = REG_TYPE_DST_PLANE2;
+		break;
+	case G2D_PAT_BASE_ADDR:
+		reg_type = REG_TYPE_PAT;
+		break;
+	case G2D_MSK_BASE_ADDR:
+		reg_type = REG_TYPE_MSK;
+		break;
+	default:
+		reg_type = REG_TYPE_NONE;
+		DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+		break;
+	};
+
+	return reg_type;
+}
+
+static unsigned long g2d_get_buf_bpp(unsigned int format)
+{
+	unsigned long bpp;
+
+	switch (format) {
+	case G2D_FMT_XRGB8888:
+	case G2D_FMT_ARGB8888:
+		bpp = 4;
+		break;
+	case G2D_FMT_RGB565:
+	case G2D_FMT_XRGB1555:
+	case G2D_FMT_ARGB1555:
+	case G2D_FMT_XRGB4444:
+	case G2D_FMT_ARGB4444:
+		bpp = 2;
+		break;
+	case G2D_FMT_PACKED_RGB888:
+		bpp = 3;
+		break;
+	default:
+		bpp = 1;
+		break;
+	}
+
+	return bpp;
+}
+
+static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
+						enum g2d_reg_type reg_type,
+						unsigned long size)
+{
+	unsigned int width, height;
+	unsigned long area;
+
+	/*
+	 * check source and destination buffers only.
+	 * so the others are always valid.
+	 */
+	if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
+		return true;
+
+	width = buf_desc->right_x - buf_desc->left_x;
+	if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
+		DRM_ERROR("width[%u] is out of range!\n", width);
+		return false;
+	}
+
+	height = buf_desc->bottom_y - buf_desc->top_y;
+	if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
+		DRM_ERROR("height[%u] is out of range!\n", height);
+		return false;
+	}
+
+	area = (unsigned long)width * (unsigned long)height *
+					g2d_get_buf_bpp(buf_desc->format);
+	if (area > size) {
+		DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+		return false;
+	}
+
+	return true;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+				struct g2d_cmdlist_node *node,
+				struct drm_device *drm_dev,
+				struct drm_file *file)
+{
+	struct g2d_cmdlist *cmdlist = node->cmdlist;
+	struct g2d_buf_info *buf_info = &node->buf_info;
+	int offset;
+	int ret;
+	int i;
+
+	for (i = 0; i < buf_info->map_nr; i++) {
+		struct g2d_buf_desc *buf_desc;
+		enum g2d_reg_type reg_type;
+		int reg_pos;
+		unsigned long handle;
+		dma_addr_t *addr;
+
+		reg_pos = cmdlist->last - 2 * (i + 1);
+
+		offset = cmdlist->data[reg_pos];
+		handle = cmdlist->data[reg_pos + 1];
+
+		reg_type = g2d_get_reg_type(offset);
+		if (reg_type == REG_TYPE_NONE) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		buf_desc = &buf_info->descs[reg_type];
+
+		if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
+			unsigned long size;
+
+			size = exynos_drm_gem_get_size(drm_dev, handle, file);
+			if (!size) {
+				ret = -EFAULT;
+				goto err;
+			}
+
+			if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+									size)) {
+				ret = -EFAULT;
+				goto err;
+			}
+
+			addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+								file);
+			if (IS_ERR(addr)) {
+				ret = -EFAULT;
+				goto err;
+			}
+		} else {
+			struct drm_exynos_g2d_userptr g2d_userptr;
+
+			if (copy_from_user(&g2d_userptr, (void __user *)handle,
+				sizeof(struct drm_exynos_g2d_userptr))) {
+				ret = -EFAULT;
+				goto err;
+			}
+
+			if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+							g2d_userptr.size)) {
+				ret = -EFAULT;
+				goto err;
+			}
+
+			addr = g2d_userptr_get_dma_addr(drm_dev,
+							g2d_userptr.userptr,
+							g2d_userptr.size,
+							file,
+							&handle);
+			if (IS_ERR(addr)) {
+				ret = -EFAULT;
+				goto err;
+			}
+		}
+
+		cmdlist->data[reg_pos + 1] = *addr;
+		buf_info->reg_types[i] = reg_type;
+		buf_info->handles[reg_type] = handle;
+	}
+
+	return 0;
+
+err:
+	buf_info->map_nr = i;
+	return ret;
+}
+
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+				  struct g2d_cmdlist_node *node,
+				  struct drm_file *filp)
+{
+	struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+	struct g2d_buf_info *buf_info = &node->buf_info;
+	int i;
+
+	for (i = 0; i < buf_info->map_nr; i++) {
+		struct g2d_buf_desc *buf_desc;
+		enum g2d_reg_type reg_type;
+		unsigned long handle;
+
+		reg_type = buf_info->reg_types[i];
+
+		buf_desc = &buf_info->descs[reg_type];
+		handle = buf_info->handles[reg_type];
+
+		if (buf_info->types[reg_type] == BUF_TYPE_GEM)
+			exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+							filp);
+		else
+			g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+							false);
+
+		buf_info->reg_types[i] = REG_TYPE_NONE;
+		buf_info->handles[reg_type] = 0;
+		buf_info->types[reg_type] = 0;
+		memset(buf_desc, 0x00, sizeof(*buf_desc));
+	}
+
+	buf_info->map_nr = 0;
+}
+
+static void g2d_dma_start(struct g2d_data *g2d,
+			  struct g2d_runqueue_node *runqueue_node)
+{
+	struct g2d_cmdlist_node *node =
+				list_first_entry(&runqueue_node->run_cmdlist,
+						struct g2d_cmdlist_node, list);
+
+	pm_runtime_get_sync(g2d->dev);
+	clk_enable(g2d->gate_clk);
+
+	writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
+	writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
+}
+
+static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
+{
+	struct g2d_runqueue_node *runqueue_node;
+
+	if (list_empty(&g2d->runqueue))
+		return NULL;
+
+	runqueue_node = list_first_entry(&g2d->runqueue,
+					 struct g2d_runqueue_node, list);
+	list_del_init(&runqueue_node->list);
+	return runqueue_node;
+}
+
+static void g2d_free_runqueue_node(struct g2d_data *g2d,
+				   struct g2d_runqueue_node *runqueue_node)
+{
+	struct g2d_cmdlist_node *node;
+
+	if (!runqueue_node)
+		return;
+
+	mutex_lock(&g2d->cmdlist_mutex);
+	/*
+	 * commands in run_cmdlist have been completed so unmap all gem
+	 * objects in each command node so that they are unreferenced.
+	 */
+	list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+		g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
+	list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
+	mutex_unlock(&g2d->cmdlist_mutex);
+
+	kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+}
+
+static void g2d_exec_runqueue(struct g2d_data *g2d)
+{
+	g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+	if (g2d->runqueue_node)
+		g2d_dma_start(g2d, g2d->runqueue_node);
+}
+
+static void g2d_runqueue_worker(struct work_struct *work)
+{
+	struct g2d_data *g2d = container_of(work, struct g2d_data,
+					    runqueue_work);
+
+	mutex_lock(&g2d->runqueue_mutex);
+	clk_disable(g2d->gate_clk);
+	pm_runtime_put_sync(g2d->dev);
+
+	complete(&g2d->runqueue_node->complete);
+	if (g2d->runqueue_node->async)
+		g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+
+	if (g2d->suspended)
+		g2d->runqueue_node = NULL;
+	else
+		g2d_exec_runqueue(g2d);
+	mutex_unlock(&g2d->runqueue_mutex);
+}
+
+static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
+{
+	struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+	struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
+	struct drm_exynos_pending_g2d_event *e;
+	struct timeval now;
+	unsigned long flags;
+
+	if (list_empty(&runqueue_node->event_list))
+		return;
+
+	e = list_first_entry(&runqueue_node->event_list,
+			     struct drm_exynos_pending_g2d_event, base.link);
+
+	do_gettimeofday(&now);
+	e->event.tv_sec = now.tv_sec;
+	e->event.tv_usec = now.tv_usec;
+	e->event.cmdlist_no = cmdlist_no;
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
+{
+	struct g2d_data *g2d = dev_id;
+	u32 pending;
+
+	pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
+	if (pending)
+		writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
+	if (pending & G2D_INTP_GCMD_FIN) {
+		u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+		cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+						G2D_DMA_LIST_DONE_COUNT_OFFSET;
+
+		g2d_finish_event(g2d, cmdlist_no);
+
+		writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
+		if (!(pending & G2D_INTP_ACMD_FIN)) {
+			writel_relaxed(G2D_DMA_CONTINUE,
+					g2d->regs + G2D_DMA_COMMAND);
+		}
+	}
+
+	if (pending & G2D_INTP_ACMD_FIN)
+		queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+
+	return IRQ_HANDLED;
+}
+
+static int g2d_check_reg_offset(struct device *dev,
+				struct g2d_cmdlist_node *node,
+				int nr, bool for_addr)
+{
+	struct g2d_cmdlist *cmdlist = node->cmdlist;
+	int reg_offset;
+	int index;
+	int i;
+
+	for (i = 0; i < nr; i++) {
+		struct g2d_buf_info *buf_info = &node->buf_info;
+		struct g2d_buf_desc *buf_desc;
+		enum g2d_reg_type reg_type;
+		unsigned long value;
+
+		index = cmdlist->last - 2 * (i + 1);
+
+		reg_offset = cmdlist->data[index] & ~0xfffff000;
+		if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
+			goto err;
+		if (reg_offset % 4)
+			goto err;
+
+		switch (reg_offset) {
+		case G2D_SRC_BASE_ADDR:
+		case G2D_SRC_PLANE2_BASE_ADDR:
+		case G2D_DST_BASE_ADDR:
+		case G2D_DST_PLANE2_BASE_ADDR:
+		case G2D_PAT_BASE_ADDR:
+		case G2D_MSK_BASE_ADDR:
+			if (!for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+			if (reg_type == REG_TYPE_NONE)
+				goto err;
+
+			/* check userptr buffer type. */
+			if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
+				buf_info->types[reg_type] = BUF_TYPE_USERPTR;
+				cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+			} else
+				buf_info->types[reg_type] = BUF_TYPE_GEM;
+			break;
+		case G2D_SRC_COLOR_MODE:
+		case G2D_DST_COLOR_MODE:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+			if (reg_type == REG_TYPE_NONE)
+				goto err;
+
+			buf_desc = &buf_info->descs[reg_type];
+			value = cmdlist->data[index + 1];
+
+			buf_desc->format = value & 0xf;
+			break;
+		case G2D_SRC_LEFT_TOP:
+		case G2D_DST_LEFT_TOP:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+			if (reg_type == REG_TYPE_NONE)
+				goto err;
+
+			buf_desc = &buf_info->descs[reg_type];
+			value = cmdlist->data[index + 1];
+
+			buf_desc->left_x = value & 0x1fff;
+			buf_desc->top_y = (value & 0x1fff0000) >> 16;
+			break;
+		case G2D_SRC_RIGHT_BOTTOM:
+		case G2D_DST_RIGHT_BOTTOM:
+			if (for_addr)
+				goto err;
+
+			reg_type = g2d_get_reg_type(reg_offset);
+			if (reg_type == REG_TYPE_NONE)
+				goto err;
+
+			buf_desc = &buf_info->descs[reg_type];
+			value = cmdlist->data[index + 1];
+
+			buf_desc->right_x = value & 0x1fff;
+			buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
+			break;
+		default:
+			if (for_addr)
+				goto err;
+			break;
+		}
+	}
+
+	return 0;
+
+err:
+	dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+	return -EINVAL;
+}
+
+/* ioctl functions */
+int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
+			     struct drm_file *file)
+{
+	struct drm_exynos_g2d_get_ver *ver = data;
+
+	ver->major = G2D_HW_MAJOR_VER;
+	ver->minor = G2D_HW_MINOR_VER;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
+
+int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
+				 struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct device *dev = g2d_priv->dev;
+	struct g2d_data *g2d;
+	struct drm_exynos_g2d_set_cmdlist *req = data;
+	struct drm_exynos_g2d_cmd *cmd;
+	struct drm_exynos_pending_g2d_event *e;
+	struct g2d_cmdlist_node *node;
+	struct g2d_cmdlist *cmdlist;
+	unsigned long flags;
+	int size;
+	int ret;
+
+	if (!dev)
+		return -ENODEV;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return -EFAULT;
+
+	node = g2d_get_cmdlist(g2d);
+	if (!node)
+		return -ENOMEM;
+
+	node->event = NULL;
+
+	if (req->event_type != G2D_EVENT_NOT) {
+		spin_lock_irqsave(&drm_dev->event_lock, flags);
+		if (file->event_space < sizeof(e->event)) {
+			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+			ret = -ENOMEM;
+			goto err;
+		}
+		file->event_space -= sizeof(e->event);
+		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+		e = kzalloc(sizeof(*node->event), GFP_KERNEL);
+		if (!e) {
+			dev_err(dev, "failed to allocate event\n");
+
+			spin_lock_irqsave(&drm_dev->event_lock, flags);
+			file->event_space += sizeof(e->event);
+			spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		e->event.base.type = DRM_EXYNOS_G2D_EVENT;
+		e->event.base.length = sizeof(e->event);
+		e->event.user_data = req->user_data;
+		e->base.event = &e->event.base;
+		e->base.file_priv = file;
+		e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+		node->event = e;
+	}
+
+	cmdlist = node->cmdlist;
+
+	cmdlist->last = 0;
+
+	/*
+	 * If don't clear SFR registers, the cmdlist is affected by register
+	 * values of previous cmdlist. G2D hw executes SFR clear command and
+	 * a next command at the same time then the next command is ignored and
+	 * is executed rightly from next next command, so needs a dummy command
+	 * to next command of SFR clear command.
+	 */
+	cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
+	cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
+	cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
+	cmdlist->data[cmdlist->last++] = 0;
+
+	/*
+	 * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
+	 * and GCF bit should be set to INTEN register if user wants
+	 * G2D interrupt event once current command list execution is
+	 * finished.
+	 * Otherwise only ACF bit should be set to INTEN register so
+	 * that one interrupt is occured after all command lists
+	 * have been completed.
+	 */
+	if (node->event) {
+		cmdlist->data[cmdlist->last++] = G2D_INTEN;
+		cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
+		cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
+		cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+	} else {
+		cmdlist->data[cmdlist->last++] = G2D_INTEN;
+		cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
+	}
+
+	/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
+	size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
+	if (size > G2D_CMDLIST_DATA_NUM) {
+		dev_err(dev, "cmdlist size is too big\n");
+		ret = -EINVAL;
+		goto err_free_event;
+	}
+
+	cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+
+	if (copy_from_user(cmdlist->data + cmdlist->last,
+				(void __user *)cmd,
+				sizeof(*cmd) * req->cmd_nr)) {
+		ret = -EFAULT;
+		goto err_free_event;
+	}
+	cmdlist->last += req->cmd_nr * 2;
+
+	ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+	if (ret < 0)
+		goto err_free_event;
+
+	node->buf_info.map_nr = req->cmd_buf_nr;
+	if (req->cmd_buf_nr) {
+		struct drm_exynos_g2d_cmd *cmd_buf;
+
+		cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
+
+		if (copy_from_user(cmdlist->data + cmdlist->last,
+					(void __user *)cmd_buf,
+					sizeof(*cmd_buf) * req->cmd_buf_nr)) {
+			ret = -EFAULT;
+			goto err_free_event;
+		}
+		cmdlist->last += req->cmd_buf_nr * 2;
+
+		ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+		if (ret < 0)
+			goto err_free_event;
+
+		ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
+		if (ret < 0)
+			goto err_unmap;
+	}
+
+	cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
+	cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
+
+	/* head */
+	cmdlist->head = cmdlist->last / 2;
+
+	/* tail */
+	cmdlist->data[cmdlist->last] = 0;
+
+	g2d_add_cmdlist_to_inuse(g2d_priv, node);
+
+	return 0;
+
+err_unmap:
+	g2d_unmap_cmdlist_gem(g2d, node, file);
+err_free_event:
+	if (node->event) {
+		spin_lock_irqsave(&drm_dev->event_lock, flags);
+		file->event_space += sizeof(e->event);
+		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+		kfree(node->event);
+	}
+err:
+	g2d_put_cmdlist(g2d, node);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
+
+int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
+			  struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct device *dev = g2d_priv->dev;
+	struct g2d_data *g2d;
+	struct drm_exynos_g2d_exec *req = data;
+	struct g2d_runqueue_node *runqueue_node;
+	struct list_head *run_cmdlist;
+	struct list_head *event_list;
+
+	if (!dev)
+		return -ENODEV;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return -EFAULT;
+
+	runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
+	if (!runqueue_node) {
+		dev_err(dev, "failed to allocate memory\n");
+		return -ENOMEM;
+	}
+	run_cmdlist = &runqueue_node->run_cmdlist;
+	event_list = &runqueue_node->event_list;
+	INIT_LIST_HEAD(run_cmdlist);
+	INIT_LIST_HEAD(event_list);
+	init_completion(&runqueue_node->complete);
+	runqueue_node->async = req->async;
+
+	list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
+	list_splice_init(&g2d_priv->event_list, event_list);
+
+	if (list_empty(run_cmdlist)) {
+		dev_err(dev, "there is no inuse cmdlist\n");
+		kmem_cache_free(g2d->runqueue_slab, runqueue_node);
+		return -EPERM;
+	}
+
+	mutex_lock(&g2d->runqueue_mutex);
+	runqueue_node->pid = current->pid;
+	runqueue_node->filp = file;
+	list_add_tail(&runqueue_node->list, &g2d->runqueue);
+	if (!g2d->runqueue_node)
+		g2d_exec_runqueue(g2d);
+	mutex_unlock(&g2d->runqueue_mutex);
+
+	if (runqueue_node->async)
+		goto out;
+
+	wait_for_completion(&runqueue_node->complete);
+	g2d_free_runqueue_node(g2d, runqueue_node);
+
+out:
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
+
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	struct g2d_data *g2d;
+	int ret;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return -EFAULT;
+
+	/* allocate dma-aware cmdlist buffer. */
+	ret = g2d_init_cmdlist(g2d);
+	if (ret < 0) {
+		dev_err(dev, "cmdlist init failed\n");
+		return ret;
+	}
+
+	if (!is_drm_iommu_supported(drm_dev))
+		return 0;
+
+	ret = drm_iommu_attach_device(drm_dev, dev);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable iommu.\n");
+		g2d_fini_cmdlist(g2d);
+	}
+
+	return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	if (!is_drm_iommu_supported(drm_dev))
+		return;
+
+	drm_iommu_detach_device(drm_dev, dev);
+}
+
+static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+			struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv;
+
+	g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
+	if (!g2d_priv) {
+		dev_err(dev, "failed to allocate g2d private data\n");
+		return -ENOMEM;
+	}
+
+	g2d_priv->dev = dev;
+	file_priv->g2d_priv = g2d_priv;
+
+	INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
+	INIT_LIST_HEAD(&g2d_priv->event_list);
+	INIT_LIST_HEAD(&g2d_priv->userptr_list);
+
+	return 0;
+}
+
+static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+			struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+	struct g2d_data *g2d;
+	struct g2d_cmdlist_node *node, *n;
+
+	if (!dev)
+		return;
+
+	g2d = dev_get_drvdata(dev);
+	if (!g2d)
+		return;
+
+	mutex_lock(&g2d->cmdlist_mutex);
+	list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+		/*
+		 * unmap all gem objects not completed.
+		 *
+		 * P.S. if current process was terminated forcely then
+		 * there may be some commands in inuse_cmdlist so unmap
+		 * them.
+		 */
+		g2d_unmap_cmdlist_gem(g2d, node, file);
+		list_move_tail(&node->list, &g2d->free_cmdlist);
+	}
+	mutex_unlock(&g2d->cmdlist_mutex);
+
+	/* release all g2d_userptr in pool. */
+	g2d_userptr_free_all(drm_dev, g2d, file);
+
+	kfree(file_priv->g2d_priv);
+}
+
+static int g2d_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct g2d_data *g2d;
+	struct exynos_drm_subdrv *subdrv;
+	int ret;
+
+	g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
+	if (!g2d) {
+		dev_err(dev, "failed to allocate driver data\n");
+		return -ENOMEM;
+	}
+
+	g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
+			sizeof(struct g2d_runqueue_node), 0, 0, NULL);
+	if (!g2d->runqueue_slab)
+		return -ENOMEM;
+
+	g2d->dev = dev;
+
+	g2d->g2d_workq = create_singlethread_workqueue("g2d");
+	if (!g2d->g2d_workq) {
+		dev_err(dev, "failed to create workqueue\n");
+		ret = -EINVAL;
+		goto err_destroy_slab;
+	}
+
+	INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
+	INIT_LIST_HEAD(&g2d->free_cmdlist);
+	INIT_LIST_HEAD(&g2d->runqueue);
+
+	mutex_init(&g2d->cmdlist_mutex);
+	mutex_init(&g2d->runqueue_mutex);
+
+	g2d->gate_clk = devm_clk_get(dev, "fimg2d");
+	if (IS_ERR(g2d->gate_clk)) {
+		dev_err(dev, "failed to get gate clock\n");
+		ret = PTR_ERR(g2d->gate_clk);
+		goto err_destroy_workqueue;
+	}
+
+	pm_runtime_enable(dev);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	g2d->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(g2d->regs)) {
+		ret = PTR_ERR(g2d->regs);
+		goto err_put_clk;
+	}
+
+	g2d->irq = platform_get_irq(pdev, 0);
+	if (g2d->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		ret = g2d->irq;
+		goto err_put_clk;
+	}
+
+	ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
+								"drm_g2d", g2d);
+	if (ret < 0) {
+		dev_err(dev, "irq request failed\n");
+		goto err_put_clk;
+	}
+
+	g2d->max_pool = MAX_POOL;
+
+	platform_set_drvdata(pdev, g2d);
+
+	subdrv = &g2d->subdrv;
+	subdrv->dev = dev;
+	subdrv->probe = g2d_subdrv_probe;
+	subdrv->remove = g2d_subdrv_remove;
+	subdrv->open = g2d_open;
+	subdrv->close = g2d_close;
+
+	ret = exynos_drm_subdrv_register(subdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm g2d device\n");
+		goto err_put_clk;
+	}
+
+	dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
+			G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+
+	return 0;
+
+err_put_clk:
+	pm_runtime_disable(dev);
+err_destroy_workqueue:
+	destroy_workqueue(g2d->g2d_workq);
+err_destroy_slab:
+	kmem_cache_destroy(g2d->runqueue_slab);
+	return ret;
+}
+
+static int g2d_remove(struct platform_device *pdev)
+{
+	struct g2d_data *g2d = platform_get_drvdata(pdev);
+
+	cancel_work_sync(&g2d->runqueue_work);
+	exynos_drm_subdrv_unregister(&g2d->subdrv);
+
+	while (g2d->runqueue_node) {
+		g2d_free_runqueue_node(g2d, g2d->runqueue_node);
+		g2d->runqueue_node = g2d_get_runqueue_node(g2d);
+	}
+
+	pm_runtime_disable(&pdev->dev);
+
+	g2d_fini_cmdlist(g2d);
+	destroy_workqueue(g2d->g2d_workq);
+	kmem_cache_destroy(g2d->runqueue_slab);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int g2d_suspend(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+
+	mutex_lock(&g2d->runqueue_mutex);
+	g2d->suspended = true;
+	mutex_unlock(&g2d->runqueue_mutex);
+
+	while (g2d->runqueue_node)
+		/* FIXME: good range? */
+		usleep_range(500, 1000);
+
+	flush_work(&g2d->runqueue_work);
+
+	return 0;
+}
+
+static int g2d_resume(struct device *dev)
+{
+	struct g2d_data *g2d = dev_get_drvdata(dev);
+
+	g2d->suspended = false;
+	g2d_exec_runqueue(g2d);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id exynos_g2d_match[] = {
+	{ .compatible = "samsung,exynos5250-g2d" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
+#endif
+
+struct platform_driver g2d_driver = {
+	.probe		= g2d_probe,
+	.remove		= g2d_remove,
+	.driver		= {
+		.name	= "s5p-g2d",
+		.owner	= THIS_MODULE,
+		.pm	= &g2d_pm_ops,
+		.of_match_table = of_match_ptr(exynos_g2d_match),
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.h
new file mode 100644
index 0000000..1a9c7ca
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifdef CONFIG_DRM_EXYNOS_G2D
+extern int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+#else
+static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
+					   struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev,
+					       void *data,
+					       struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv)
+{
+	return -ENODEV;
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.c
new file mode 100644
index 0000000..cf4543f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -0,0 +1,816 @@
+/* exynos_drm_gem.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/shmem_fs.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+
+static unsigned int convert_to_vm_err_msg(int msg)
+{
+	unsigned int out_msg;
+
+	switch (msg) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		out_msg = VM_FAULT_NOPAGE;
+		break;
+
+	case -ENOMEM:
+		out_msg = VM_FAULT_OOM;
+		break;
+
+	default:
+		out_msg = VM_FAULT_SIGBUS;
+		break;
+	}
+
+	return out_msg;
+}
+
+static int check_gem_flags(unsigned int flags)
+{
+	if (flags & ~(EXYNOS_BO_MASK)) {
+		DRM_ERROR("invalid flags.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
+					struct vm_area_struct *vma)
+{
+	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
+
+	/* non-cachable as default. */
+	if (obj->flags & EXYNOS_BO_CACHABLE)
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	else if (obj->flags & EXYNOS_BO_WC)
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	else
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+}
+
+static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+{
+	/* TODO */
+
+	return roundup(size, PAGE_SIZE);
+}
+
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
+					struct vm_area_struct *vma,
+					unsigned long f_vaddr,
+					pgoff_t page_offset)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+	struct scatterlist *sgl;
+	unsigned long pfn;
+	int i;
+
+	if (!buf->sgt)
+		return -EINTR;
+
+	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+		DRM_ERROR("invalid page offset\n");
+		return -EINVAL;
+	}
+
+	sgl = buf->sgt->sgl;
+	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+		if (page_offset < (sgl->length >> PAGE_SHIFT))
+			break;
+		page_offset -=	(sgl->length >> PAGE_SHIFT);
+	}
+
+	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+
+	return vm_insert_mixed(vma, f_vaddr, pfn);
+}
+
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+					struct drm_file *file_priv,
+					unsigned int *handle)
+{
+	int ret;
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+	if (ret)
+		return ret;
+
+	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
+}
+
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+	struct drm_gem_object *obj;
+	struct exynos_drm_gem_buf *buf;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	obj = &exynos_gem_obj->base;
+	buf = exynos_gem_obj->buffer;
+
+	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+	/*
+	 * do not release memory region from exporter.
+	 *
+	 * the region will be released by exporter
+	 * once dmabuf's refcount becomes 0.
+	 */
+	if (obj->import_attach)
+		goto out;
+
+	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+
+out:
+	exynos_drm_fini_buf(obj->dev, buf);
+	exynos_gem_obj->buffer = NULL;
+
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+
+	/* release file pointer to gem object. */
+	drm_gem_object_release(obj);
+
+	kfree(exynos_gem_obj);
+	exynos_gem_obj = NULL;
+}
+
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+						unsigned int gem_handle,
+						struct drm_file *file_priv)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return 0;
+	}
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return exynos_gem_obj->buffer->size;
+}
+
+
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+						      unsigned long size)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+	int ret;
+
+	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
+	if (!exynos_gem_obj) {
+		DRM_ERROR("failed to allocate exynos gem object\n");
+		return NULL;
+	}
+
+	exynos_gem_obj->size = size;
+	obj = &exynos_gem_obj->base;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize gem object\n");
+		kfree(exynos_gem_obj);
+		return NULL;
+	}
+
+	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+
+	return exynos_gem_obj;
+}
+
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+						unsigned int flags,
+						unsigned long size)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_buf *buf;
+	int ret;
+
+	if (!size) {
+		DRM_ERROR("invalid size.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	size = roundup_gem_size(size, flags);
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ret = check_gem_flags(flags);
+	if (ret)
+		return ERR_PTR(ret);
+
+	buf = exynos_drm_init_buf(dev, size);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	exynos_gem_obj = exynos_drm_gem_init(dev, size);
+	if (!exynos_gem_obj) {
+		ret = -ENOMEM;
+		goto err_fini_buf;
+	}
+
+	exynos_gem_obj->buffer = buf;
+
+	/* set memory type and cache attribute from user side. */
+	exynos_gem_obj->flags = flags;
+
+	ret = exynos_drm_alloc_buf(dev, buf, flags);
+	if (ret < 0) {
+		drm_gem_object_release(&exynos_gem_obj->base);
+		goto err_fini_buf;
+	}
+
+	return exynos_gem_obj;
+
+err_fini_buf:
+	exynos_drm_fini_buf(dev, buf);
+	return ERR_PTR(ret);
+}
+
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_create *args = data;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+	if (IS_ERR(exynos_gem_obj))
+		return PTR_ERR(exynos_gem_obj);
+
+	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+			&args->handle);
+	if (ret) {
+		exynos_drm_gem_destroy(exynos_gem_obj);
+		return ret;
+	}
+
+	return 0;
+}
+
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+					unsigned int gem_handle,
+					struct drm_file *filp)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(dev, filp, gem_handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	return &exynos_gem_obj->buffer->dma_addr;
+}
+
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+					unsigned int gem_handle,
+					struct drm_file *filp)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+
+	obj = drm_gem_object_lookup(dev, filp, gem_handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return;
+	}
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	/*
+	 * decrease obj->refcount one more time because we has already
+	 * increased it at exynos_drm_gem_get_dma_addr().
+	 */
+	drm_gem_object_unreference_unlocked(obj);
+}
+
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_map_off *args = data;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
+			args->handle, (unsigned long)args->offset);
+
+	if (!(dev->driver->driver_features & DRIVER_GEM)) {
+		DRM_ERROR("does not support GEM.\n");
+		return -ENODEV;
+	}
+
+	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+			&args->offset);
+}
+
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+							struct file *filp)
+{
+	struct drm_file *file_priv;
+
+	/* find current process's drm_file from filelist. */
+	list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
+		if (file_priv->filp == filp)
+			return file_priv;
+
+	WARN_ON(1);
+
+	return ERR_PTR(-EFAULT);
+}
+
+static int exynos_drm_gem_mmap_buffer(struct file *filp,
+				      struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = filp->private_data;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct drm_device *drm_dev = obj->dev;
+	struct exynos_drm_gem_buf *buffer;
+	struct drm_file *file_priv;
+	unsigned long vm_size;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_private_data = obj;
+	vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+	/* restore it to driver's fops. */
+	filp->f_op = fops_get(drm_dev->driver->fops);
+
+	file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+	if (IS_ERR(file_priv))
+		return PTR_ERR(file_priv);
+
+	/* restore it to drm_file. */
+	filp->private_data = file_priv;
+
+	update_vm_cache_attr(exynos_gem_obj, vma);
+
+	vm_size = vma->vm_end - vma->vm_start;
+
+	/*
+	 * a buffer contains information to physically continuous memory
+	 * allocated by user request or at framebuffer creation.
+	 */
+	buffer = exynos_gem_obj->buffer;
+
+	/* check if user-requested size is valid. */
+	if (vm_size > buffer->size)
+		return -EINVAL;
+
+	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+				buffer->dma_addr, buffer->size,
+				&buffer->dma_attrs);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	/*
+	 * take a reference to this mapping of the object. And this reference
+	 * is unreferenced by the corresponding vm_close call.
+	 */
+	drm_gem_object_reference(obj);
+
+	drm_vm_open_locked(drm_dev, vma);
+
+	return 0;
+}
+
+static const struct file_operations exynos_drm_gem_fops = {
+	.mmap = exynos_drm_gem_mmap_buffer,
+};
+
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned int addr;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!(dev->driver->driver_features & DRIVER_GEM)) {
+		DRM_ERROR("does not support GEM.\n");
+		return -ENODEV;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * We have to use gem object and its fops for specific mmaper,
+	 * but vm_mmap() can deliver only filp. So we have to change
+	 * filp->f_op and filp->private_data temporarily, then restore
+	 * again. So it is important to keep lock until restoration the
+	 * settings to prevent others from misuse of filp->f_op or
+	 * filp->private_data.
+	 */
+	mutex_lock(&dev->struct_mutex);
+
+	/*
+	 * Set specific mmper's fops. And it will be restored by
+	 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+	 * This is used to call specific mapper temporarily.
+	 */
+	file_priv->filp->f_op = &exynos_drm_gem_fops;
+
+	/*
+	 * Set gem object to private_data so that specific mmaper
+	 * can get the gem object. And it will be restored by
+	 * exynos_drm_gem_mmap_buffer to drm_file.
+	 */
+	file_priv->filp->private_data = obj;
+
+	addr = vm_mmap(file_priv->filp, 0, args->size,
+			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+
+	drm_gem_object_unreference(obj);
+
+	if (IS_ERR((void *)addr)) {
+		/* check filp->f_op, filp->private_data are restored */
+		if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
+			file_priv->filp->f_op = fops_get(dev->driver->fops);
+			file_priv->filp->private_data = file_priv;
+		}
+		mutex_unlock(&dev->struct_mutex);
+		return PTR_ERR((void *)addr);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	args->mapped = addr;
+
+	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
+
+	return 0;
+}
+
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv)
+{	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_exynos_gem_info *args = data;
+	struct drm_gem_object *obj;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	args->flags = exynos_gem_obj->flags;
+	args->size = exynos_gem_obj->size;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+	struct vm_area_struct *vma_copy;
+
+	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+	if (!vma_copy)
+		return NULL;
+
+	if (vma->vm_ops && vma->vm_ops->open)
+		vma->vm_ops->open(vma);
+
+	if (vma->vm_file)
+		get_file(vma->vm_file);
+
+	memcpy(vma_copy, vma, sizeof(*vma));
+
+	vma_copy->vm_mm = NULL;
+	vma_copy->vm_next = NULL;
+	vma_copy->vm_prev = NULL;
+
+	return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+	if (!vma)
+		return;
+
+	if (vma->vm_ops && vma->vm_ops->close)
+		vma->vm_ops->close(vma);
+
+	if (vma->vm_file)
+		fput(vma->vm_file);
+
+	kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+						unsigned int npages,
+						struct page **pages,
+						struct vm_area_struct *vma)
+{
+	int get_npages;
+
+	/* the memory region mmaped with VM_PFNMAP. */
+	if (vma_is_io(vma)) {
+		unsigned int i;
+
+		for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+			unsigned long pfn;
+			int ret = follow_pfn(vma, start, &pfn);
+			if (ret)
+				return ret;
+
+			pages[i] = pfn_to_page(pfn);
+		}
+
+		if (i != npages) {
+			DRM_ERROR("failed to get user_pages.\n");
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	get_npages = get_user_pages(current, current->mm, start,
+					npages, 1, 1, pages, NULL);
+	get_npages = max(get_npages, 0);
+	if (get_npages != npages) {
+		DRM_ERROR("failed to get user_pages.\n");
+		while (get_npages)
+			put_page(pages[--get_npages]);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+					unsigned int npages,
+					struct vm_area_struct *vma)
+{
+	if (!vma_is_io(vma)) {
+		unsigned int i;
+
+		for (i = 0; i < npages; i++) {
+			set_page_dirty_lock(pages[i]);
+
+			/*
+			 * undo the reference we took when populating
+			 * the table.
+			 */
+			put_page(pages[i]);
+		}
+	}
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir)
+{
+	int nents;
+
+	mutex_lock(&drm_dev->struct_mutex);
+
+	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+	if (!nents) {
+		DRM_ERROR("failed to map sgl with dma.\n");
+		mutex_unlock(&drm_dev->struct_mutex);
+		return nents;
+	}
+
+	mutex_unlock(&drm_dev->struct_mutex);
+	return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir)
+{
+	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
+int exynos_drm_gem_init_object(struct drm_gem_object *obj)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return 0;
+}
+
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_buf *buf;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+	buf = exynos_gem_obj->buffer;
+
+	if (obj->import_attach)
+		drm_prime_gem_destroy(obj, buf->sgt);
+
+	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
+}
+
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+			       struct drm_device *dev,
+			       struct drm_mode_create_dumb *args)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * alocate memory to be used for framebuffer.
+	 * - this callback would be called by user application
+	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
+	 */
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
+						EXYNOS_BO_WC, args->size);
+	if (IS_ERR(exynos_gem_obj))
+		return PTR_ERR(exynos_gem_obj);
+
+	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+			&args->handle);
+	if (ret) {
+		exynos_drm_gem_destroy(exynos_gem_obj);
+		return ret;
+	}
+
+	return 0;
+}
+
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+				   struct drm_device *dev, uint32_t handle,
+				   uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mutex_lock(&dev->struct_mutex);
+
+	/*
+	 * get offset of memory allocated for drm framebuffer.
+	 * - this callback would be called by user application
+	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
+	 */
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (!obj->map_list.map) {
+		ret = drm_gem_create_mmap_offset(obj);
+		if (ret)
+			goto out;
+	}
+
+	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+
+out:
+	drm_gem_object_unreference(obj);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+				struct drm_device *dev,
+				unsigned int handle)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * obj->refcount and obj->handle_count are decreased and
+	 * if both them are 0 then exynos_drm_gem_free_object()
+	 * would be called by callback to release resources.
+	 */
+	ret = drm_gem_handle_delete(file_priv, handle);
+	if (ret < 0) {
+		DRM_ERROR("failed to delete drm_gem_handle.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	unsigned long f_vaddr;
+	pgoff_t page_offset;
+	int ret;
+
+	page_offset = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+	f_vaddr = (unsigned long)vmf->virtual_address;
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
+	if (ret < 0)
+		DRM_ERROR("failed to map a buffer with user.\n");
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return convert_to_vm_err_msg(ret);
+}
+
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* set vm_area_struct. */
+	ret = drm_gem_mmap(filp, vma);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	obj = vma->vm_private_data;
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	ret = check_gem_flags(exynos_gem_obj->flags);
+	if (ret) {
+		drm_gem_vm_close(vma);
+		drm_gem_free_mmap_offset(obj);
+		return ret;
+	}
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	update_vm_cache_attr(exynos_gem_obj, vma);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.h
new file mode 100644
index 0000000..468766b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -0,0 +1,201 @@
+/* exynos_drm_gem.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GEM_H_
+#define _EXYNOS_DRM_GEM_H_
+
+#define to_exynos_gem_obj(x)	container_of(x,\
+			struct exynos_drm_gem_obj, base)
+
+#define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
+
+/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ *	- this address could be physical address without IOMMU and
+ *	device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
+ * @sgt: sg table to transfer page data.
+ * @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ *	VM_PFNMAP or not.
+ */
+struct exynos_drm_gem_buf {
+	void __iomem		*kvaddr;
+	unsigned long		userptr;
+	dma_addr_t		dma_addr;
+	struct dma_attrs	dma_attrs;
+	unsigned int		write;
+	struct page		**pages;
+	struct sg_table		*sgt;
+	unsigned long		size;
+	bool			pfnmap;
+};
+
+/*
+ * exynos drm buffer structure.
+ *
+ * @base: a gem object.
+ *	- a new handle to this gem object would be created
+ *	by drm_gem_handle_create().
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ *	- contain the information to memory region allocated
+ *	by user request or at framebuffer creation.
+ *	continuous memory region allocated by user request
+ *	or at framebuffer creation.
+ * @size: size requested from user, in bytes and this size is aligned
+ *	in page unit.
+ * @vma: a pointer to vm_area.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
+ *
+ * P.S. this object would be transfered to user as kms_bo.handle so
+ *	user can access the buffer through kms_bo.handle.
+ */
+struct exynos_drm_gem_obj {
+	struct drm_gem_object		base;
+	struct exynos_drm_gem_buf	*buffer;
+	unsigned long			size;
+	struct vm_area_struct		*vma;
+	unsigned int			flags;
+};
+
+struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a private gem object and initialize it. */
+struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+						      unsigned long size);
+
+/* create a new buffer with gem object */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+						unsigned int flags,
+						unsigned long size);
+
+/*
+ * request gem object creation and buffer allocation as the size
+ * that it is calculated with framebuffer information such as width,
+ * height and bpp.
+ */
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+
+/*
+ * get dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be increased.
+ */
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+					unsigned int gem_handle,
+					struct drm_file *filp);
+
+/*
+ * put dma address from gem handle and this function could be used for
+ * other drivers such as 2d/3d acceleration drivers.
+ * with this function call, gem object reference count would be decreased.
+ */
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
+					unsigned int gem_handle,
+					struct drm_file *filp);
+
+/* get buffer offset to map to user space. */
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+
+/* get buffer information to memory region allocated by gem. */
+int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+
+/* get buffer size to gem handle. */
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+						unsigned int gem_handle,
+						struct drm_file *file_priv);
+
+/* initialize gem object. */
+int exynos_drm_gem_init_object(struct drm_gem_object *obj);
+
+/* free gem object. */
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+			       struct drm_device *dev,
+			       struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+				   struct drm_device *dev, uint32_t handle,
+				   uint64_t *offset);
+
+/*
+ * destroy memory region allocated.
+ *	- a gem handle and physical memory region pointed by a gem object
+ *	would be released by drm_gem_handle_delete().
+ */
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+				struct drm_device *dev,
+				unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+						unsigned int npages,
+						struct page **pages,
+						struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+					unsigned int npages,
+					struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+				struct sg_table *sgt,
+				enum dma_data_direction dir);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 0000000..762f40d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1830 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC stands for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS	4
+#define GSC_MAX_SRC		4
+#define GSC_MAX_DST		16
+#define GSC_RESET_TIMEOUT	50
+#define GSC_BUF_STOP	1
+#define GSC_BUF_START	2
+#define GSC_REG_SZ		16
+#define GSC_WIDTH_ITU_709	1280
+#define GSC_SC_UP_MAX_RATIO		65536
+#define GSC_SC_DOWN_RATIO_7_8		74898
+#define GSC_SC_DOWN_RATIO_6_8		87381
+#define GSC_SC_DOWN_RATIO_5_8		104857
+#define GSC_SC_DOWN_RATIO_4_8		131072
+#define GSC_SC_DOWN_RATIO_3_8		174762
+#define GSC_SC_DOWN_RATIO_2_8		262144
+#define GSC_REFRESH_MIN	12
+#define GSC_REFRESH_MAX	60
+#define GSC_CROP_MAX	8192
+#define GSC_CROP_MIN	32
+#define GSC_SCALE_MAX	4224
+#define GSC_SCALE_MIN	32
+#define GSC_COEF_RATIO	7
+#define GSC_COEF_PHASE	9
+#define GSC_COEF_ATTR	16
+#define GSC_COEF_H_8T	8
+#define GSC_COEF_V_4T	4
+#define GSC_COEF_DEPTH	3
+
+#define get_gsc_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct gsc_context, ippdrv);
+#define gsc_read(offset)		readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset)	writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+	bool	range;
+	u32	pre_shfactor;
+	u32	pre_hratio;
+	u32	pre_vratio;
+	unsigned long main_hratio;
+	unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+	/* tile or rotation */
+	u32	tile_w;
+	u32	tile_h;
+	/* other cases */
+	u32	w;
+	u32	h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct mutex	lock;
+	struct clk	*gsc_clk;
+	struct gsc_scaler	sc;
+	int	id;
+	int	irq;
+	bool	rotation;
+	bool	suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+	{	/* Ratio <= 65536 (~8:8) */
+		{  0,  0,   0, 128,   0,   0,  0,  0 },
+		{ -1,  2,  -6, 127,   7,  -2,  1,  0 },
+		{ -1,  4, -12, 125,  16,  -5,  1,  0 },
+		{ -1,  5, -15, 120,  25,  -8,  2,  0 },
+		{ -1,  6, -18, 114,  35, -10,  3, -1 },
+		{ -1,  6, -20, 107,  46, -13,  4, -1 },
+		{ -2,  7, -21,  99,  57, -16,  5, -1 },
+		{ -1,  6, -20,  89,  68, -18,  5, -1 },
+		{ -1,  6, -20,  79,  79, -20,  6, -1 },
+		{ -1,  5, -18,  68,  89, -20,  6, -1 },
+		{ -1,  5, -16,  57,  99, -21,  7, -2 },
+		{ -1,  4, -13,  46, 107, -20,  6, -1 },
+		{ -1,  3, -10,  35, 114, -18,  6, -1 },
+		{  0,  2,  -8,  25, 120, -15,  5, -1 },
+		{  0,  1,  -5,  16, 125, -12,  4, -1 },
+		{  0,  1,  -2,   7, 127,  -6,  2, -1 }
+	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
+		{  3, -8,  14, 111,  13,  -8,  3,  0 },
+		{  2, -6,   7, 112,  21, -10,  3, -1 },
+		{  2, -4,   1, 110,  28, -12,  4, -1 },
+		{  1, -2,  -3, 106,  36, -13,  4, -1 },
+		{  1, -1,  -7, 103,  44, -15,  4, -1 },
+		{  1,  1, -11,  97,  53, -16,  4, -1 },
+		{  0,  2, -13,  91,  61, -16,  4, -1 },
+		{  0,  3, -15,  85,  69, -17,  4, -1 },
+		{  0,  3, -16,  77,  77, -16,  3,  0 },
+		{ -1,  4, -17,  69,  85, -15,  3,  0 },
+		{ -1,  4, -16,  61,  91, -13,  2,  0 },
+		{ -1,  4, -16,  53,  97, -11,  1,  1 },
+		{ -1,  4, -15,  44, 103,  -7, -1,  1 },
+		{ -1,  4, -13,  36, 106,  -3, -2,  1 },
+		{ -1,  4, -12,  28, 110,   1, -4,  2 },
+		{ -1,  3, -10,  21, 112,   7, -6,  2 }
+	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
+		{ 2, -11,  25,  96, 25, -11,   2,  0 },
+		{ 2, -10,  19,  96, 31, -12,   2,  0 },
+		{ 2,  -9,  14,  94, 37, -12,   2,  0 },
+		{ 2,  -8,  10,  92, 43, -12,   1,  0 },
+		{ 2,  -7,   5,  90, 49, -12,   1,  0 },
+		{ 2,  -5,   1,  86, 55, -12,   0,  1 },
+		{ 2,  -4,  -2,  82, 61, -11,  -1,  1 },
+		{ 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
+		{ 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
+		{ 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
+		{ 1,  -1, -11,  61, 82,  -2,  -4,  2 },
+		{ 1,   0, -12,  55, 86,   1,  -5,  2 },
+		{ 0,   1, -12,  49, 90,   5,  -7,  2 },
+		{ 0,   1, -12,  43, 92,  10,  -8,  2 },
+		{ 0,   2, -12,  37, 94,  14,  -9,  2 },
+		{ 0,   2, -12,  31, 96,  19, -10,  2 }
+	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
+		{ -1,  -8, 33,  80, 33,  -8,  -1,  0 },
+		{ -1,  -8, 28,  80, 37,  -7,  -2,  1 },
+		{  0,  -8, 24,  79, 41,  -7,  -2,  1 },
+		{  0,  -8, 20,  78, 46,  -6,  -3,  1 },
+		{  0,  -8, 16,  76, 50,  -4,  -3,  1 },
+		{  0,  -7, 13,  74, 54,  -3,  -4,  1 },
+		{  1,  -7, 10,  71, 58,  -1,  -5,  1 },
+		{  1,  -6,  6,  68, 62,   1,  -5,  1 },
+		{  1,  -6,  4,  65, 65,   4,  -6,  1 },
+		{  1,  -5,  1,  62, 68,   6,  -6,  1 },
+		{  1,  -5, -1,  58, 71,  10,  -7,  1 },
+		{  1,  -4, -3,  54, 74,  13,  -7,  0 },
+		{  1,  -3, -4,  50, 76,  16,  -8,  0 },
+		{  1,  -3, -6,  46, 78,  20,  -8,  0 },
+		{  1,  -2, -7,  41, 79,  24,  -8,  0 },
+		{  1,  -2, -7,  37, 80,  28,  -8, -1 }
+	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
+		{ -3,   0, 35,  64, 35,   0,  -3,  0 },
+		{ -3,  -1, 32,  64, 38,   1,  -3,  0 },
+		{ -2,  -2, 29,  63, 41,   2,  -3,  0 },
+		{ -2,  -3, 27,  63, 43,   4,  -4,  0 },
+		{ -2,  -3, 24,  61, 46,   6,  -4,  0 },
+		{ -2,  -3, 21,  60, 49,   7,  -4,  0 },
+		{ -1,  -4, 19,  59, 51,   9,  -4, -1 },
+		{ -1,  -4, 16,  57, 53,  12,  -4, -1 },
+		{ -1,  -4, 14,  55, 55,  14,  -4, -1 },
+		{ -1,  -4, 12,  53, 57,  16,  -4, -1 },
+		{ -1,  -4,  9,  51, 59,  19,  -4, -1 },
+		{  0,  -4,  7,  49, 60,  21,  -3, -2 },
+		{  0,  -4,  6,  46, 61,  24,  -3, -2 },
+		{  0,  -4,  4,  43, 63,  27,  -3, -2 },
+		{  0,  -3,  2,  41, 63,  29,  -2, -2 },
+		{  0,  -3,  1,  38, 64,  32,  -1, -3 }
+	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
+		{ -1,   8, 33,  48, 33,   8,  -1,  0 },
+		{ -1,   7, 31,  49, 35,   9,  -1, -1 },
+		{ -1,   6, 30,  49, 36,  10,  -1, -1 },
+		{ -1,   5, 28,  48, 38,  12,  -1, -1 },
+		{ -1,   4, 26,  48, 39,  13,   0, -1 },
+		{ -1,   3, 24,  47, 41,  15,   0, -1 },
+		{ -1,   2, 23,  47, 42,  16,   0, -1 },
+		{ -1,   2, 21,  45, 43,  18,   1, -1 },
+		{ -1,   1, 19,  45, 45,  19,   1, -1 },
+		{ -1,   1, 18,  43, 45,  21,   2, -1 },
+		{ -1,   0, 16,  42, 47,  23,   2, -1 },
+		{ -1,   0, 15,  41, 47,  24,   3, -1 },
+		{ -1,   0, 13,  39, 48,  26,   4, -1 },
+		{ -1,  -1, 12,  38, 48,  28,   5, -1 },
+		{ -1,  -1, 10,  36, 49,  30,   6, -1 },
+		{ -1,  -1,  9,  35, 49,  31,   7, -1 }
+	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
+		{  2,  13, 30,  38, 30,  13,   2,  0 },
+		{  2,  12, 29,  38, 30,  14,   3,  0 },
+		{  2,  11, 28,  38, 31,  15,   3,  0 },
+		{  2,  10, 26,  38, 32,  16,   4,  0 },
+		{  1,  10, 26,  37, 33,  17,   4,  0 },
+		{  1,   9, 24,  37, 34,  18,   5,  0 },
+		{  1,   8, 24,  37, 34,  19,   5,  0 },
+		{  1,   7, 22,  36, 35,  20,   6,  1 },
+		{  1,   6, 21,  36, 36,  21,   6,  1 },
+		{  1,   6, 20,  35, 36,  22,   7,  1 },
+		{  0,   5, 19,  34, 37,  24,   8,  1 },
+		{  0,   5, 18,  34, 37,  24,   9,  1 },
+		{  0,   4, 17,  33, 37,  26,  10,  1 },
+		{  0,   4, 16,  32, 38,  26,  10,  2 },
+		{  0,   3, 15,  31, 38,  28,  11,  2 },
+		{  0,   3, 14,  30, 38,  29,  12,  2 }
+	}
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+	{	/* Ratio <= 65536 (~8:8) */
+		{  0, 128,   0,  0 },
+		{ -4, 127,   5,  0 },
+		{ -6, 124,  11, -1 },
+		{ -8, 118,  19, -1 },
+		{ -8, 111,  27, -2 },
+		{ -8, 102,  37, -3 },
+		{ -8,  92,  48, -4 },
+		{ -7,  81,  59, -5 },
+		{ -6,  70,  70, -6 },
+		{ -5,  59,  81, -7 },
+		{ -4,  48,  92, -8 },
+		{ -3,  37, 102, -8 },
+		{ -2,  27, 111, -8 },
+		{ -1,  19, 118, -8 },
+		{ -1,  11, 124, -6 },
+		{  0,   5, 127, -4 }
+	}, {	/* 65536 < Ratio <= 74898 (~8:7) */
+		{  8, 112,   8,  0 },
+		{  4, 111,  14, -1 },
+		{  1, 109,  20, -2 },
+		{ -2, 105,  27, -2 },
+		{ -3, 100,  34, -3 },
+		{ -5,  93,  43, -3 },
+		{ -5,  86,  51, -4 },
+		{ -5,  77,  60, -4 },
+		{ -5,  69,  69, -5 },
+		{ -4,  60,  77, -5 },
+		{ -4,  51,  86, -5 },
+		{ -3,  43,  93, -5 },
+		{ -3,  34, 100, -3 },
+		{ -2,  27, 105, -2 },
+		{ -2,  20, 109,  1 },
+		{ -1,  14, 111,  4 }
+	}, {	/* 74898 < Ratio <= 87381 (~8:6) */
+		{ 16,  96,  16,  0 },
+		{ 12,  97,  21, -2 },
+		{  8,  96,  26, -2 },
+		{  5,  93,  32, -2 },
+		{  2,  89,  39, -2 },
+		{  0,  84,  46, -2 },
+		{ -1,  79,  53, -3 },
+		{ -2,  73,  59, -2 },
+		{ -2,  66,  66, -2 },
+		{ -2,  59,  73, -2 },
+		{ -3,  53,  79, -1 },
+		{ -2,  46,  84,  0 },
+		{ -2,  39,  89,  2 },
+		{ -2,  32,  93,  5 },
+		{ -2,  26,  96,  8 },
+		{ -2,  21,  97, 12 }
+	}, {	/* 87381 < Ratio <= 104857 (~8:5) */
+		{ 22,  84,  22,  0 },
+		{ 18,  85,  26, -1 },
+		{ 14,  84,  31, -1 },
+		{ 11,  82,  36, -1 },
+		{  8,  79,  42, -1 },
+		{  6,  76,  47, -1 },
+		{  4,  72,  52,  0 },
+		{  2,  68,  58,  0 },
+		{  1,  63,  63,  1 },
+		{  0,  58,  68,  2 },
+		{  0,  52,  72,  4 },
+		{ -1,  47,  76,  6 },
+		{ -1,  42,  79,  8 },
+		{ -1,  36,  82, 11 },
+		{ -1,  31,  84, 14 },
+		{ -1,  26,  85, 18 }
+	}, {	/* 104857 < Ratio <= 131072 (~8:4) */
+		{ 26,  76,  26,  0 },
+		{ 22,  76,  30,  0 },
+		{ 19,  75,  34,  0 },
+		{ 16,  73,  38,  1 },
+		{ 13,  71,  43,  1 },
+		{ 10,  69,  47,  2 },
+		{  8,  66,  51,  3 },
+		{  6,  63,  55,  4 },
+		{  5,  59,  59,  5 },
+		{  4,  55,  63,  6 },
+		{  3,  51,  66,  8 },
+		{  2,  47,  69, 10 },
+		{  1,  43,  71, 13 },
+		{  1,  38,  73, 16 },
+		{  0,  34,  75, 19 },
+		{  0,  30,  76, 22 }
+	}, {	/* 131072 < Ratio <= 174762 (~8:3) */
+		{ 29,  70,  29,  0 },
+		{ 26,  68,  32,  2 },
+		{ 23,  67,  36,  2 },
+		{ 20,  66,  39,  3 },
+		{ 17,  65,  43,  3 },
+		{ 15,  63,  46,  4 },
+		{ 12,  61,  50,  5 },
+		{ 10,  58,  53,  7 },
+		{  8,  56,  56,  8 },
+		{  7,  53,  58, 10 },
+		{  5,  50,  61, 12 },
+		{  4,  46,  63, 15 },
+		{  3,  43,  65, 17 },
+		{  3,  39,  66, 20 },
+		{  2,  36,  67, 23 },
+		{  2,  32,  68, 26 }
+	}, {	/* 174762 < Ratio <= 262144 (~8:2) */
+		{ 32,  64,  32,  0 },
+		{ 28,  63,  34,  3 },
+		{ 25,  62,  37,  4 },
+		{ 22,  62,  40,  4 },
+		{ 19,  61,  43,  5 },
+		{ 17,  59,  46,  6 },
+		{ 15,  58,  48,  7 },
+		{ 13,  55,  51,  9 },
+		{ 11,  53,  53, 11 },
+		{  9,  51,  55, 13 },
+		{  7,  48,  58, 15 },
+		{  6,  46,  59, 17 },
+		{  5,  43,  61, 19 },
+		{  4,  40,  62, 22 },
+		{  4,  37,  62, 25 },
+		{  3,  34,  63, 28 }
+	}
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+	u32 cfg;
+	int count = GSC_RESET_TIMEOUT;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* s/w reset */
+	cfg = (GSC_SW_RESET_SRESET);
+	gsc_write(cfg, GSC_SW_RESET);
+
+	/* wait s/w reset complete */
+	while (count--) {
+		cfg = gsc_read(GSC_SW_RESET);
+		if (!cfg)
+			break;
+		usleep_range(1000, 2000);
+	}
+
+	if (cfg) {
+		DRM_ERROR("failed to reset gsc h/w.\n");
+		return -EBUSY;
+	}
+
+	/* reset sequence */
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+	cfg |= (GSC_IN_BASE_ADDR_MASK |
+		GSC_IN_BASE_ADDR_PINGPONG(0));
+	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+	cfg |= (GSC_OUT_BASE_ADDR_MASK |
+		GSC_OUT_BASE_ADDR_PINGPONG(0));
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+	u32 gscblk_cfg;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+	if (enable)
+		gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+				GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+				GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+	else
+		gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+	writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+		bool overflow, bool done)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+			enable, overflow, done);
+
+	cfg = gsc_read(GSC_IRQ);
+	cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+	if (enable)
+		cfg |= GSC_IRQ_ENABLE;
+	else
+		cfg &= ~GSC_IRQ_ENABLE;
+
+	if (overflow)
+		cfg &= ~GSC_IRQ_OR_MASK;
+	else
+		cfg |= GSC_IRQ_OR_MASK;
+
+	if (done)
+		cfg &= ~GSC_IRQ_FRMDONE_MASK;
+	else
+		cfg |= GSC_IRQ_FRMDONE_MASK;
+
+	gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+		 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+		 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+		 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= GSC_IN_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= GSC_IN_XRGB8888;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_ORDER_LSB_Y |
+			GSC_IN_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_ORDER_LSB_Y |
+			GSC_IN_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_OEDER_LSB_C |
+			GSC_IN_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= (GSC_IN_YUV422_1P |
+			GSC_IN_YUV422_1P_OEDER_LSB_C |
+			GSC_IN_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+			GSC_IN_YUV420_2P);
+		break;
+	case DRM_FORMAT_YUV422:
+		cfg |= GSC_IN_YUV422_3P;
+		break;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= GSC_IN_YUV420_3P;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+		cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+			GSC_IN_YUV420_2P);
+		break;
+	case DRM_FORMAT_NV12MT:
+		cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_YFLIP;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_90_XFLIP;
+		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_90_YFLIP;
+		else
+			cfg |= GSC_IN_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= GSC_IN_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= GSC_IN_ROT_270;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	ctx->rotation = cfg &
+		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+	*swap = ctx->rotation;
+
+	return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct gsc_scaler *sc = &ctx->sc;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+	}
+
+	/* pixel offset */
+	cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+		GSC_SRCIMG_OFFSET_Y(img_pos.y));
+	gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+	/* cropped size */
+	cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+		GSC_CROPPED_HEIGHT(img_pos.h));
+	gsc_write(cfg, GSC_CROPPED_SIZE);
+
+	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+		__func__, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = gsc_read(GSC_SRCIMG_SIZE);
+	cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+		GSC_SRCIMG_WIDTH_MASK);
+
+	cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+		GSC_SRCIMG_HEIGHT(sz->vsize));
+
+	gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+		__func__, pos->w, sc->range);
+
+	if (pos->w >= GSC_WIDTH_ITU_709)
+		if (sc->range)
+			cfg |= GSC_IN_RGB_HD_WIDE;
+		else
+			cfg |= GSC_IN_RGB_HD_NARROW;
+	else
+		if (sc->range)
+			cfg |= GSC_IN_RGB_SD_WIDE;
+		else
+			cfg |= GSC_IN_RGB_SD_NARROW;
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool masked;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	/* mask register set */
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		masked = false;
+		break;
+	case IPP_BUF_DEQUEUE:
+		masked = true;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		return -EINVAL;
+	}
+
+	/* sequence id */
+	cfg &= ~mask;
+	cfg |= masked << buf_id;
+	gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+	return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > GSC_MAX_SRC) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -EINVAL;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			GSC_IN_BASE_ADDR_Y(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+			GSC_IN_BASE_ADDR_CB(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+			GSC_IN_BASE_ADDR_CR(buf_id));
+		break;
+	case IPP_BUF_DEQUEUE:
+		gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+		gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+		gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+	.set_fmt = gsc_src_set_fmt,
+	.set_transf = gsc_src_set_transf,
+	.set_size = gsc_src_set_size,
+	.set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+	cfg = gsc_read(GSC_OUT_CON);
+	cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+		 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+		 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+		 GSC_OUT_GLOBAL_ALPHA_MASK);
+
+	switch (fmt) {
+	case DRM_FORMAT_RGB565:
+		cfg |= GSC_OUT_RGB565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		cfg |= GSC_OUT_XRGB8888;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+		break;
+	case DRM_FORMAT_YUYV:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+			GSC_OUT_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_YVYU:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+			GSC_OUT_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_UYVY:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_OEDER_LSB_C |
+			GSC_OUT_CHROMA_ORDER_CBCR);
+		break;
+	case DRM_FORMAT_VYUY:
+		cfg |= (GSC_OUT_YUV422_1P |
+			GSC_OUT_YUV422_1P_OEDER_LSB_C |
+			GSC_OUT_CHROMA_ORDER_CRCB);
+		break;
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+		break;
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		cfg |= GSC_OUT_YUV420_3P;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+		cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+			GSC_OUT_YUV420_2P);
+		break;
+	case DRM_FORMAT_NV12MT:
+		cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_OUT_CON);
+
+	return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+		degree, flip);
+
+	cfg = gsc_read(GSC_IN_CON);
+	cfg &= ~GSC_IN_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_0:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_XFLIP;
+		if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_YFLIP;
+		break;
+	case EXYNOS_DRM_DEGREE_90:
+		if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+			cfg |= GSC_IN_ROT_90_XFLIP;
+		else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+			cfg |= GSC_IN_ROT_90_YFLIP;
+		else
+			cfg |= GSC_IN_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		cfg |= GSC_IN_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		cfg |= GSC_IN_ROT_270;
+		break;
+	default:
+		dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+		return -EINVAL;
+	}
+
+	gsc_write(cfg, GSC_IN_CON);
+
+	ctx->rotation = cfg &
+		(GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+	*swap = ctx->rotation;
+
+	return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+	DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+	if (src >= dst * 8) {
+		DRM_ERROR("failed to make ratio and shift.\n");
+		return -EINVAL;
+	} else if (src >= dst * 4)
+		*ratio = 4;
+	else if (src >= dst * 2)
+		*ratio = 2;
+	else
+		*ratio = 1;
+
+	return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+	if (hratio == 4 && vratio == 4)
+		*shfactor = 4;
+	else if ((hratio == 4 && vratio == 2) ||
+		 (hratio == 2 && vratio == 4))
+		*shfactor = 3;
+	else if ((hratio == 4 && vratio == 1) ||
+		 (hratio == 1 && vratio == 4) ||
+		 (hratio == 2 && vratio == 2))
+		*shfactor = 2;
+	else if (hratio == 1 && vratio == 1)
+		*shfactor = 0;
+	else
+		*shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+		struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	u32 cfg;
+	u32 src_w, src_h, dst_w, dst_h;
+	int ret = 0;
+
+	src_w = src->w;
+	src_h = src->h;
+
+	if (ctx->rotation) {
+		dst_w = dst->h;
+		dst_h = dst->w;
+	} else {
+		dst_w = dst->w;
+		dst_h = dst->h;
+	}
+
+	ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+		return ret;
+	}
+
+	ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+	if (ret) {
+		dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+		__func__, sc->pre_hratio, sc->pre_vratio);
+
+	sc->main_hratio = (src_w << 16) / dst_w;
+	sc->main_vratio = (src_h << 16) / dst_h;
+
+	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+		__func__, sc->main_hratio, sc->main_vratio);
+
+	gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+		&sc->pre_shfactor);
+
+	DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+		sc->pre_shfactor);
+
+	cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+		GSC_PRESC_H_RATIO(sc->pre_hratio) |
+		GSC_PRESC_V_RATIO(sc->pre_vratio));
+	gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+	return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+	int i, j, k, sc_ratio;
+
+	if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+		sc_ratio = 0;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+		sc_ratio = 1;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+		sc_ratio = 2;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+		sc_ratio = 3;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+		sc_ratio = 4;
+	else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+		sc_ratio = 5;
+	else
+		sc_ratio = 6;
+
+	for (i = 0; i < GSC_COEF_PHASE; i++)
+		for (j = 0; j < GSC_COEF_H_8T; j++)
+			for (k = 0; k < GSC_COEF_DEPTH; k++)
+				gsc_write(h_coef_8t[sc_ratio][i][j],
+					GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+	int i, j, k, sc_ratio;
+
+	if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+		sc_ratio = 0;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+		sc_ratio = 1;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+		sc_ratio = 2;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+		sc_ratio = 3;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+		sc_ratio = 4;
+	else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+		sc_ratio = 5;
+	else
+		sc_ratio = 6;
+
+	for (i = 0; i < GSC_COEF_PHASE; i++)
+		for (j = 0; j < GSC_COEF_V_4T; j++)
+			for (k = 0; k < GSC_COEF_DEPTH; k++)
+				gsc_write(v_coef_4t[sc_ratio][i][j],
+					GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+		__func__, sc->main_hratio, sc->main_vratio);
+
+	gsc_set_h_coef(ctx, sc->main_hratio);
+	cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+	gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+	gsc_set_v_coef(ctx, sc->main_vratio);
+	cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+	gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_pos img_pos = *pos;
+	struct gsc_scaler *sc = &ctx->sc;
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+		__func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+	if (swap) {
+		img_pos.w = pos->h;
+		img_pos.h = pos->w;
+	}
+
+	/* pixel offset */
+	cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+		GSC_DSTIMG_OFFSET_Y(pos->y));
+	gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+	/* scaled size */
+	cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+	gsc_write(cfg, GSC_SCALED_SIZE);
+
+	DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+		__func__, sz->hsize, sz->vsize);
+
+	/* original size */
+	cfg = gsc_read(GSC_DSTIMG_SIZE);
+	cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+		GSC_DSTIMG_WIDTH_MASK);
+	cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+		GSC_DSTIMG_HEIGHT(sz->vsize));
+	gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+	cfg = gsc_read(GSC_OUT_CON);
+	cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+	DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+		__func__, pos->w, sc->range);
+
+	if (pos->w >= GSC_WIDTH_ITU_709)
+		if (sc->range)
+			cfg |= GSC_OUT_RGB_HD_WIDE;
+		else
+			cfg |= GSC_OUT_RGB_HD_NARROW;
+	else
+		if (sc->range)
+			cfg |= GSC_OUT_RGB_SD_WIDE;
+		else
+			cfg |= GSC_OUT_RGB_SD_NARROW;
+
+	gsc_write(cfg, GSC_OUT_CON);
+
+	return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+	u32 cfg, i, buf_num = GSC_REG_SZ;
+	u32 mask = 0x00000001;
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+	for (i = 0; i < GSC_REG_SZ; i++)
+		if (cfg & (mask << i))
+			buf_num--;
+
+	DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+	return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	bool masked;
+	u32 cfg;
+	u32 mask = 0x00000001 << buf_id;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+		buf_id, buf_type);
+
+	mutex_lock(&ctx->lock);
+
+	/* mask register set */
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		masked = false;
+		break;
+	case IPP_BUF_DEQUEUE:
+		masked = true;
+		break;
+	default:
+		dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+		ret =  -EINVAL;
+		goto err_unlock;
+	}
+
+	/* sequence id */
+	cfg &= ~mask;
+	cfg |= masked << buf_id;
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	/* interrupt enable */
+	if (buf_type == IPP_BUF_ENQUEUE &&
+	    gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+		gsc_handle_irq(ctx, true, false, true);
+
+	/* interrupt disable */
+	if (buf_type == IPP_BUF_DEQUEUE &&
+	    gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+		gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+	mutex_unlock(&ctx->lock);
+	return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+		property->prop_id, buf_id, buf_type);
+
+	if (buf_id > GSC_MAX_DST) {
+		dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+		return -EINVAL;
+	}
+
+	/* address register set */
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+			GSC_OUT_BASE_ADDR_Y(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+			GSC_OUT_BASE_ADDR_CB(buf_id));
+		gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+			GSC_OUT_BASE_ADDR_CR(buf_id));
+		break;
+	case IPP_BUF_DEQUEUE:
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+		gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+		break;
+	default:
+		/* bypass */
+		break;
+	}
+
+	return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+	.set_fmt = gsc_dst_set_fmt,
+	.set_transf = gsc_dst_set_transf,
+	.set_size = gsc_dst_set_size,
+	.set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	if (enable) {
+		clk_enable(ctx->gsc_clk);
+		ctx->suspended = false;
+	} else {
+		clk_disable(ctx->gsc_clk);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+	u32 cfg, curr_index, i;
+	u32 buf_id = GSC_MAX_SRC;
+	int ret;
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+	curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+	for (i = curr_index; i < GSC_MAX_SRC; i++) {
+		if (!((cfg >> i) & 0x1)) {
+			buf_id = i;
+			break;
+		}
+	}
+
+	if (buf_id == GSC_MAX_SRC) {
+		DRM_ERROR("failed to get in buffer index.\n");
+		return -EINVAL;
+	}
+
+	ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+	if (ret < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+		curr_index, buf_id);
+
+	return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+	u32 cfg, curr_index, i;
+	u32 buf_id = GSC_MAX_DST;
+	int ret;
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+	curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+	for (i = curr_index; i < GSC_MAX_DST; i++) {
+		if (!((cfg >> i) & 0x1)) {
+			buf_id = i;
+			break;
+		}
+	}
+
+	if (buf_id == GSC_MAX_DST) {
+		DRM_ERROR("failed to get out buffer index.\n");
+		return -EINVAL;
+	}
+
+	ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+	if (ret < 0) {
+		DRM_ERROR("failed to dequeue.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+		curr_index, buf_id);
+
+	return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+	struct gsc_context *ctx = dev_id;
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_event_work *event_work =
+		c_node->event_work;
+	u32 status;
+	int buf_id[EXYNOS_DRM_OPS_MAX];
+
+	DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+	status = gsc_read(GSC_IRQ);
+	if (status & GSC_IRQ_STATUS_OR_IRQ) {
+		dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+			ctx->id, status);
+		return IRQ_NONE;
+	}
+
+	if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+		dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+			ctx->id, status);
+
+		buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+		if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+			return IRQ_HANDLED;
+
+		buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+		if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+			return IRQ_HANDLED;
+
+		DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+			buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+		event_work->ippdrv = ippdrv;
+		event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+			buf_id[EXYNOS_DRM_OPS_SRC];
+		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+			buf_id[EXYNOS_DRM_OPS_DST];
+		queue_work(ippdrv->event_workq,
+			(struct work_struct *)event_work);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->writeback = 1;
+	prop_list->refresh_min = GSC_REFRESH_MIN;
+	prop_list->refresh_max = GSC_REFRESH_MAX;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 1;
+	prop_list->crop = 1;
+	prop_list->crop_max.hsize = GSC_CROP_MAX;
+	prop_list->crop_max.vsize = GSC_CROP_MAX;
+	prop_list->crop_min.hsize = GSC_CROP_MIN;
+	prop_list->crop_min.vsize = GSC_CROP_MIN;
+	prop_list->scale = 1;
+	prop_list->scale_max.hsize = GSC_SCALE_MAX;
+	prop_list->scale_max.vsize = GSC_SCALE_MAX;
+	prop_list->scale_min.hsize = GSC_SCALE_MIN;
+	prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+	case EXYNOS_DRM_FLIP_BOTH:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos *pos;
+	struct drm_exynos_sz *sz;
+	bool swap;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		/* check for flip */
+		if (!gsc_check_drm_flip(config->flip)) {
+			DRM_ERROR("invalid flip.\n");
+			goto err_property;
+		}
+
+		/* check for degree */
+		switch (config->degree) {
+		case EXYNOS_DRM_DEGREE_90:
+		case EXYNOS_DRM_DEGREE_270:
+			swap = true;
+			break;
+		case EXYNOS_DRM_DEGREE_0:
+		case EXYNOS_DRM_DEGREE_180:
+			swap = false;
+			break;
+		default:
+			DRM_ERROR("invalid degree.\n");
+			goto err_property;
+		}
+
+		/* check for buffer bound */
+		if ((pos->x + pos->w > sz->hsize) ||
+			(pos->y + pos->h > sz->vsize)) {
+			DRM_ERROR("out of buf bound.\n");
+			goto err_property;
+		}
+
+		/* check for crop */
+		if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+			if (swap) {
+				if ((pos->h < pp->crop_min.hsize) ||
+					(sz->vsize > pp->crop_max.hsize) ||
+					(pos->w < pp->crop_min.vsize) ||
+					(sz->hsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->crop_min.hsize) ||
+					(sz->hsize > pp->crop_max.hsize) ||
+					(pos->h < pp->crop_min.vsize) ||
+					(sz->vsize > pp->crop_max.vsize)) {
+					DRM_ERROR("out of crop size.\n");
+					goto err_property;
+				}
+			}
+		}
+
+		/* check for scale */
+		if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+			if (swap) {
+				if ((pos->h < pp->scale_min.hsize) ||
+					(sz->vsize > pp->scale_max.hsize) ||
+					(pos->w < pp->scale_min.vsize) ||
+					(sz->hsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			} else {
+				if ((pos->w < pp->scale_min.hsize) ||
+					(sz->hsize > pp->scale_max.hsize) ||
+					(pos->h < pp->scale_min.vsize) ||
+					(sz->vsize > pp->scale_max.vsize)) {
+					DRM_ERROR("out of scale size.\n");
+					goto err_property;
+				}
+			}
+		}
+	}
+
+	return 0;
+
+err_property:
+	for_each_ipp_ops(i) {
+		if ((i == EXYNOS_DRM_OPS_SRC) &&
+			(property->cmd == IPP_CMD_WB))
+			continue;
+
+		config = &property->config[i];
+		pos = &config->pos;
+		sz = &config->sz;
+
+		DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+			i ? "dst" : "src", config->flip, config->degree,
+			pos->x, pos->y, pos->w, pos->h,
+			sz->hsize, sz->vsize);
+	}
+
+	return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct gsc_scaler *sc = &ctx->sc;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* reset h/w block */
+	ret = gsc_sw_reset(ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to reset hardware.\n");
+		return ret;
+	}
+
+	/* scaler setting */
+	memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+	sc->range = true;
+
+	return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_property *property;
+	struct drm_exynos_ipp_config *config;
+	struct drm_exynos_pos	img_pos[EXYNOS_DRM_OPS_MAX];
+	struct drm_exynos_ipp_set_wb set_wb;
+	u32 cfg;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	if (!c_node) {
+		DRM_ERROR("failed to get c_node.\n");
+		return -EINVAL;
+	}
+
+	property = &c_node->property;
+
+	gsc_handle_irq(ctx, true, false, true);
+
+	for_each_ipp_ops(i) {
+		config = &property->config[i];
+		img_pos[i] = config->pos;
+	}
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* enable one shot */
+		cfg = gsc_read(GSC_ENABLE);
+		cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+			GSC_ENABLE_CLK_GATE_MODE_MASK);
+		cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+		gsc_write(cfg, GSC_ENABLE);
+
+		/* src dma memory */
+		cfg = gsc_read(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= GSC_IN_PATH_MEMORY;
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst dma memory */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	case IPP_CMD_WB:
+		set_wb.enable = 1;
+		set_wb.refresh = property->refresh_rate;
+		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+		/* src local path */
+		cfg = gsc_read(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst dma memory */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	case IPP_CMD_OUTPUT:
+		/* src dma memory */
+		cfg = gsc_read(GSC_IN_CON);
+		cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+		cfg |= GSC_IN_PATH_MEMORY;
+		gsc_write(cfg, GSC_IN_CON);
+
+		/* dst local path */
+		cfg = gsc_read(GSC_OUT_CON);
+		cfg |= GSC_OUT_PATH_MEMORY;
+		gsc_write(cfg, GSC_OUT_CON);
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(dev, "invalid operations.\n");
+		return ret;
+	}
+
+	ret = gsc_set_prescaler(ctx, &ctx->sc,
+		&img_pos[EXYNOS_DRM_OPS_SRC],
+		&img_pos[EXYNOS_DRM_OPS_DST]);
+	if (ret) {
+		dev_err(dev, "failed to set precalser.\n");
+		return ret;
+	}
+
+	gsc_set_scaler(ctx, &ctx->sc);
+
+	cfg = gsc_read(GSC_ENABLE);
+	cfg |= GSC_ENABLE_ON;
+	gsc_write(cfg, GSC_ENABLE);
+
+	return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+	u32 cfg;
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+	switch (cmd) {
+	case IPP_CMD_M2M:
+		/* bypass */
+		break;
+	case IPP_CMD_WB:
+		gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+		exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+		break;
+	case IPP_CMD_OUTPUT:
+	default:
+		dev_err(dev, "invalid operations.\n");
+		break;
+	}
+
+	gsc_handle_irq(ctx, false, false, true);
+
+	/* reset sequence */
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+	gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+	cfg = gsc_read(GSC_ENABLE);
+	cfg &= ~GSC_ENABLE_ON;
+	gsc_write(cfg, GSC_ENABLE);
+}
+
+static int gsc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gsc_context *ctx;
+	struct resource *res;
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	/* clock control */
+	ctx->gsc_clk = devm_clk_get(dev, "gscl");
+	if (IS_ERR(ctx->gsc_clk)) {
+		dev_err(dev, "failed to get gsc clock.\n");
+		return PTR_ERR(ctx->gsc_clk);
+	}
+
+	/* resource memory */
+	ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+	if (IS_ERR(ctx->regs))
+		return PTR_ERR(ctx->regs);
+
+	/* resource irq */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "failed to request irq resource.\n");
+		return -ENOENT;
+	}
+
+	ctx->irq = res->start;
+	ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler,
+		IRQF_ONESHOT, "drm_gsc", ctx);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq.\n");
+		return ret;
+	}
+
+	/* context initailization */
+	ctx->id = pdev->id;
+
+	ippdrv = &ctx->ippdrv;
+	ippdrv->dev = dev;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+	ippdrv->check_property = gsc_ippdrv_check_property;
+	ippdrv->reset = gsc_ippdrv_reset;
+	ippdrv->start = gsc_ippdrv_start;
+	ippdrv->stop = gsc_ippdrv_stop;
+	ret = gsc_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		return ret;
+	}
+
+	DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+		(int)ippdrv);
+
+	mutex_init(&ctx->lock);
+	platform_set_drvdata(pdev, ctx);
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm gsc device.\n");
+		goto err_ippdrv_register;
+	}
+
+	dev_info(dev, "drm gsc registered successfully.\n");
+
+	return 0;
+
+err_ippdrv_register:
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct gsc_context *ctx = get_gsc_context(dev);
+	struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+	exynos_drm_ippdrv_unregister(ippdrv);
+	mutex_destroy(&ctx->lock);
+
+	pm_runtime_set_suspended(dev);
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	if (!pm_runtime_suspended(dev))
+		return gsc_clk_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+	return  gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+	struct gsc_context *ctx = get_gsc_context(dev);
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+	return  gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+	.probe		= gsc_probe,
+	.remove		= gsc_remove,
+	.driver		= {
+		.name	= "exynos-drm-gsc",
+		.owner	= THIS_MODULE,
+		.pm	= &gsc_pm_ops,
+	},
+};
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 0000000..29ec1c5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644
index 0000000..437fb94
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev)		platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev)		to_context(dev)
+#define get_ctx_from_subdrv(subdrv)	container_of(subdrv,\
+					struct drm_hdmi_context, subdrv);
+
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
+/* Common hdmi subdrv needs to access the hdmi and mixer though context.
+* These should be initialied by the repective drivers */
+static struct exynos_drm_hdmi_context *hdmi_ctx;
+static struct exynos_drm_hdmi_context *mixer_ctx;
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_ops *hdmi_ops;
+static struct exynos_mixer_ops *mixer_ops;
+
+struct drm_hdmi_context {
+	struct exynos_drm_subdrv	subdrv;
+	struct exynos_drm_hdmi_context	*hdmi_ctx;
+	struct exynos_drm_hdmi_context	*mixer_ctx;
+
+	bool	enabled[MIXER_WIN_NR];
+};
+
+int exynos_platform_device_hdmi_register(void)
+{
+	struct platform_device *pdev;
+
+	if (exynos_drm_hdmi_pdev)
+		return -EEXIST;
+
+	pdev = platform_device_register_simple(
+			"exynos-drm-hdmi", -1, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	exynos_drm_hdmi_pdev = pdev;
+
+	return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+	if (exynos_drm_hdmi_pdev) {
+		platform_device_unregister(exynos_drm_hdmi_pdev);
+		exynos_drm_hdmi_pdev = NULL;
+	}
+}
+
+void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
+{
+	if (ctx)
+		hdmi_ctx = ctx;
+}
+
+void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx)
+{
+	if (ctx)
+		mixer_ctx = ctx;
+}
+
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ops)
+		hdmi_ops = ops;
+}
+
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ops)
+		mixer_ops = ops;
+}
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+	struct drm_hdmi_context *ctx = to_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->is_connected)
+		return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+	return false;
+}
+
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+			struct drm_connector *connector)
+{
+	struct drm_hdmi_context *ctx = to_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->get_edid)
+		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
+
+	return NULL;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+	struct drm_hdmi_context *ctx = to_context(dev);
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	* Both, mixer and hdmi should be able to handle the requested mode.
+	* If any of the two fails, return mode as BAD.
+	*/
+
+	if (mixer_ops && mixer_ops->check_timing)
+		ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing);
+
+	if (ret)
+		return ret;
+
+	if (hdmi_ops && hdmi_ops->check_timing)
+		return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
+
+	return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+	struct drm_hdmi_context *ctx = to_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->power_on)
+		return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+	return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+	.type = EXYNOS_DISPLAY_TYPE_HDMI,
+	.is_connected = drm_hdmi_is_connected,
+	.get_edid = drm_hdmi_get_edid,
+	.check_timing = drm_hdmi_check_timing,
+	.power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+	struct exynos_drm_manager *manager = subdrv->manager;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->enable_vblank)
+		return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
+						manager->pipe);
+
+	return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->disable_vblank)
+		return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->wait_for_vblank)
+		mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
+				struct drm_connector *connector,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_display_mode *m;
+	int mode_ok;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	mode_ok = drm_hdmi_check_timing(subdrv_dev, adjusted_mode);
+
+	/* just return if user desired mode exists. */
+	if (mode_ok == 0)
+		return;
+
+	/*
+	 * otherwise, find the most suitable mode among modes and change it
+	 * to adjusted_mode.
+	 */
+	list_for_each_entry(m, &connector->modes, head) {
+		mode_ok = drm_hdmi_check_timing(subdrv_dev, m);
+
+		if (mode_ok == 0) {
+			struct drm_mode_object base;
+			struct list_head head;
+
+			DRM_INFO("desired mode doesn't exist so\n");
+			DRM_INFO("use the most suitable mode among modes.\n");
+
+			DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+				m->hdisplay, m->vdisplay, m->vrefresh);
+
+			/* preserve display mode header while copying. */
+			head = adjusted_mode->head;
+			base = adjusted_mode->base;
+			memcpy(adjusted_mode, m, sizeof(*m));
+			adjusted_mode->head = head;
+			adjusted_mode->base = base;
+			break;
+		}
+	}
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->mode_set)
+		hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
+				unsigned int *width, unsigned int *height)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->get_max_resol)
+		hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (hdmi_ops && hdmi_ops->commit)
+		hdmi_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->dpms)
+		mixer_ops->dpms(ctx->mixer_ctx->ctx, mode);
+
+	if (hdmi_ops && hdmi_ops->dpms)
+		hdmi_ops->dpms(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_apply(struct device *subdrv_dev)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		if (!ctx->enabled[i])
+			continue;
+		if (mixer_ops && mixer_ops->win_commit)
+			mixer_ops->win_commit(ctx->mixer_ctx->ctx, i);
+	}
+
+	if (hdmi_ops && hdmi_ops->commit)
+		hdmi_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+	.dpms = drm_hdmi_dpms,
+	.apply = drm_hdmi_apply,
+	.enable_vblank = drm_hdmi_enable_vblank,
+	.disable_vblank = drm_hdmi_disable_vblank,
+	.wait_for_vblank = drm_hdmi_wait_for_vblank,
+	.mode_fixup = drm_hdmi_mode_fixup,
+	.mode_set = drm_hdmi_mode_set,
+	.get_max_resol = drm_hdmi_get_max_resol,
+	.commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+		struct exynos_drm_overlay *overlay)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (mixer_ops && mixer_ops->win_mode_set)
+		mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+	int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win < 0 || win > MIXER_WIN_NR) {
+		DRM_ERROR("mixer window[%d] is wrong\n", win);
+		return;
+	}
+
+	if (mixer_ops && mixer_ops->win_commit)
+		mixer_ops->win_commit(ctx->mixer_ctx->ctx, win);
+
+	ctx->enabled[win] = true;
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+	int win = (zpos == DEFAULT_ZPOS) ? MIXER_DEFAULT_WIN : zpos;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win < 0 || win > MIXER_WIN_NR) {
+		DRM_ERROR("mixer window[%d] is wrong\n", win);
+		return;
+	}
+
+	if (mixer_ops && mixer_ops->win_disable)
+		mixer_ops->win_disable(ctx->mixer_ctx->ctx, win);
+
+	ctx->enabled[win] = false;
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+	.mode_set = drm_mixer_mode_set,
+	.commit = drm_mixer_commit,
+	.disable = drm_mixer_disable,
+};
+
+static struct exynos_drm_manager hdmi_manager = {
+	.pipe		= -1,
+	.ops		= &drm_hdmi_manager_ops,
+	.overlay_ops	= &drm_hdmi_overlay_ops,
+	.display_ops	= &drm_hdmi_display_ops,
+};
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+		struct device *dev)
+{
+	struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+	struct drm_hdmi_context *ctx;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!hdmi_ctx) {
+		DRM_ERROR("hdmi context not initialized.\n");
+		return -EFAULT;
+	}
+
+	if (!mixer_ctx) {
+		DRM_ERROR("mixer context not initialized.\n");
+		return -EFAULT;
+	}
+
+	ctx = get_ctx_from_subdrv(subdrv);
+
+	if (!ctx) {
+		DRM_ERROR("no drm hdmi context.\n");
+		return -EFAULT;
+	}
+
+	ctx->hdmi_ctx = hdmi_ctx;
+	ctx->mixer_ctx = mixer_ctx;
+
+	ctx->hdmi_ctx->drm_dev = drm_dev;
+	ctx->mixer_ctx->drm_dev = drm_dev;
+
+	if (mixer_ops->iommu_on)
+		mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
+	return 0;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	struct drm_hdmi_context *ctx;
+	struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+	ctx = get_ctx_from_subdrv(subdrv);
+
+	if (mixer_ops->iommu_on)
+		mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
+static int exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct exynos_drm_subdrv *subdrv;
+	struct drm_hdmi_context *ctx;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+		return -ENOMEM;
+	}
+
+	subdrv = &ctx->subdrv;
+
+	subdrv->dev = dev;
+	subdrv->manager = &hdmi_manager;
+	subdrv->probe = hdmi_subdrv_probe;
+	subdrv->remove = hdmi_subdrv_remove;
+
+	platform_set_drvdata(pdev, subdrv);
+
+	exynos_drm_subdrv_register(subdrv);
+
+	return 0;
+}
+
+static int exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+	struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	return 0;
+}
+
+struct platform_driver exynos_drm_common_hdmi_driver = {
+	.probe		= exynos_drm_hdmi_probe,
+	.remove		= exynos_drm_hdmi_remove,
+	.driver		= {
+		.name	= "exynos-drm-hdmi",
+		.owner	= THIS_MODULE,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644
index 0000000..6b70944
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -0,0 +1,67 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+#define MIXER_WIN_NR		3
+#define MIXER_DEFAULT_WIN	0
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ *	this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+	struct drm_device	*drm_dev;
+	void			*ctx;
+};
+
+struct exynos_hdmi_ops {
+	/* display */
+	bool (*is_connected)(void *ctx);
+	struct edid *(*get_edid)(void *ctx,
+			struct drm_connector *connector);
+	int (*check_timing)(void *ctx, struct fb_videomode *timing);
+	int (*power_on)(void *ctx, int mode);
+
+	/* manager */
+	void (*mode_set)(void *ctx, void *mode);
+	void (*get_max_resol)(void *ctx, unsigned int *width,
+				unsigned int *height);
+	void (*commit)(void *ctx);
+	void (*dpms)(void *ctx, int mode);
+};
+
+struct exynos_mixer_ops {
+	/* manager */
+	int (*iommu_on)(void *ctx, bool enable);
+	int (*enable_vblank)(void *ctx, int pipe);
+	void (*disable_vblank)(void *ctx);
+	void (*wait_for_vblank)(void *ctx);
+	void (*dpms)(void *ctx, int mode);
+
+	/* overlay */
+	void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+	void (*win_commit)(void *ctx, int zpos);
+	void (*win_disable)(void *ctx, int zpos);
+
+	/* display */
+	int (*check_timing)(void *ctx, struct fb_videomode *timing);
+};
+
+void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
+void exynos_mixer_drv_attach(struct exynos_drm_hdmi_context *ctx);
+void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
+void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 0000000..3799d5c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,136 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+	struct dma_iommu_mapping *mapping = NULL;
+	struct exynos_drm_private *priv = drm_dev->dev_private;
+	struct device *dev = drm_dev->dev;
+
+	if (!priv->da_start)
+		priv->da_start = EXYNOS_DEV_ADDR_START;
+	if (!priv->da_space_size)
+		priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+	if (!priv->da_space_order)
+		priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+						priv->da_space_size,
+						priv->da_space_order);
+	if (IS_ERR(mapping))
+		return PTR_ERR(mapping);
+
+	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+					GFP_KERNEL);
+	dma_set_max_seg_size(dev, 0xffffffffu);
+	dev->archdata.mapping = mapping;
+
+	return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+	struct device *dev = drm_dev->dev;
+
+	arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct device *dev = drm_dev->dev;
+	int ret;
+
+	if (!dev->archdata.mapping) {
+		DRM_ERROR("iommu_mapping is null.\n");
+		return -EFAULT;
+	}
+
+	subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+					sizeof(*subdrv_dev->dma_parms),
+					GFP_KERNEL);
+	dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+	ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("failed iommu attach.\n");
+		return ret;
+	}
+
+	/*
+	 * Set dma_ops to drm_device just one time.
+	 *
+	 * The dma mapping api needs device object and the api is used
+	 * to allocate physial memory and map it with iommu table.
+	 * If iommu attach succeeded, the sub driver would have dma_ops
+	 * for iommu and also all sub drivers have same dma_ops.
+	 */
+	if (!dev->archdata.dma_ops)
+		dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+	return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev)
+{
+	struct device *dev = drm_dev->dev;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	if (!mapping || !mapping->domain)
+		return;
+
+	iommu_detach_device(mapping->domain, subdrv_dev);
+	drm_release_iommu_mapping(drm_dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 0000000..598e60f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,71 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START	0x20000000
+#define EXYNOS_DEV_ADDR_SIZE	0x40000000
+#define EXYNOS_DEV_ADDR_ORDER	0x0
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+				struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+				struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+	struct device *dev = drm_dev->dev;
+
+	return dev->archdata.mapping ? true : false;
+#else
+	return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+	return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+						struct device *subdrv_dev)
+{
+	return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+						struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+	return false;
+}
+
+#endif
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 0000000..be1e884
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2067 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP stands for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate	property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
+
+/* platform device pointer for ipp device. */
+static struct platform_device *exynos_drm_ipp_pdev;
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+	struct drm_pending_event	base;
+	struct drm_exynos_ipp_event	event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+	struct list_head	list;
+	enum drm_exynos_ops_id	ops_id;
+	u32	prop_id;
+	u32	buf_id;
+	struct drm_exynos_ipp_buf_info	buf_info;
+	struct drm_file		*filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+	struct exynos_drm_subdrv	subdrv;
+	struct mutex	ipp_lock;
+	struct mutex	prop_lock;
+	struct idr	ipp_idr;
+	struct idr	prop_idr;
+	struct workqueue_struct	*event_workq;
+	struct workqueue_struct	*cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_platform_device_ipp_register(void)
+{
+	struct platform_device *pdev;
+
+	if (exynos_drm_ipp_pdev)
+		return -EEXIST;
+
+	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	exynos_drm_ipp_pdev = pdev;
+
+	return 0;
+}
+
+void exynos_platform_device_ipp_unregister(void)
+{
+	if (exynos_drm_ipp_pdev) {
+		platform_device_unregister(exynos_drm_ipp_pdev);
+		exynos_drm_ipp_pdev = NULL;
+	}
+}
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ippdrv)
+		return -EINVAL;
+
+	mutex_lock(&exynos_drm_ippdrv_lock);
+	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+	mutex_unlock(&exynos_drm_ippdrv_lock);
+
+	return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ippdrv)
+		return -EINVAL;
+
+	mutex_lock(&exynos_drm_ippdrv_lock);
+	list_del(&ippdrv->drv_list);
+	mutex_unlock(&exynos_drm_ippdrv_lock);
+
+	return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+		u32 *idp)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* do the allocation under our mutexlock */
+	mutex_lock(lock);
+	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
+	mutex_unlock(lock);
+	if (ret < 0)
+		return ret;
+
+	*idp = ret;
+	return 0;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+	void *obj;
+
+	DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+	mutex_lock(lock);
+
+	/* find object using handle */
+	obj = idr_find(id_idr, id);
+	if (!obj) {
+		DRM_ERROR("failed to find object.\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_unlock(lock);
+
+	return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+		enum drm_exynos_ipp_cmd	cmd)
+{
+	/*
+	 * check dedicated flag and WB, OUTPUT operation with
+	 * power on state.
+	 */
+	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+	    !pm_runtime_suspended(ippdrv->dev)))
+		return true;
+
+	return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+		struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	u32 ipp_id = property->ipp_id;
+
+	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+	if (ipp_id) {
+		/* find ipp driver using idr */
+		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+			ipp_id);
+		if (IS_ERR(ippdrv)) {
+			DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+			return ippdrv;
+		}
+
+		/*
+		 * WB, OUTPUT opertion not supported multi-operation.
+		 * so, make dedicated state at set property ioctl.
+		 * when ipp driver finished operations, clear dedicated flags.
+		 */
+		if (ipp_check_dedicated(ippdrv, property->cmd)) {
+			DRM_ERROR("already used choose device.\n");
+			return ERR_PTR(-EBUSY);
+		}
+
+		/*
+		 * This is necessary to find correct device in ipp drivers.
+		 * ipp drivers have different abilities,
+		 * so need to check property.
+		 */
+		if (ippdrv->check_property &&
+		    ippdrv->check_property(ippdrv->dev, property)) {
+			DRM_ERROR("not support property.\n");
+			return ERR_PTR(-EINVAL);
+		}
+
+		return ippdrv;
+	} else {
+		/*
+		 * This case is search all ipp driver for finding.
+		 * user application don't set ipp_id in this case,
+		 * so ipp subsystem search correct driver in driver list.
+		 */
+		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+			if (ipp_check_dedicated(ippdrv, property->cmd)) {
+				DRM_DEBUG_KMS("%s:used device.\n", __func__);
+				continue;
+			}
+
+			if (ippdrv->check_property &&
+			    ippdrv->check_property(ippdrv->dev, property)) {
+				DRM_DEBUG_KMS("%s:not support property.\n",
+					__func__);
+				continue;
+			}
+
+			return ippdrv;
+		}
+
+		DRM_ERROR("not support ipp driver operations.\n");
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+	if (list_empty(&exynos_drm_ippdrv_list)) {
+		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+		return ERR_PTR(-ENODEV);
+	}
+
+	/*
+	 * This case is search ipp driver by prop_id handle.
+	 * sometimes, ipp subsystem find driver by prop_id.
+	 * e.g PAUSE state, queue buf, command contro.
+	 */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+			count++, (int)ippdrv);
+
+		if (!list_empty(&ippdrv->cmd_list)) {
+			list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+				if (c_node->property.prop_id == prop_id)
+					return ippdrv;
+		}
+	}
+
+	return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_prop_list *prop_list = data;
+	struct exynos_drm_ippdrv *ippdrv;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!prop_list) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+	if (!prop_list->ipp_id) {
+		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+			count++;
+		/*
+		 * Supports ippdrv list count for user application.
+		 * First step user application getting ippdrv count.
+		 * and second step getting ippdrv capability using ipp_id.
+		 */
+		prop_list->count = count;
+	} else {
+		/*
+		 * Getting ippdrv capability by ipp_id.
+		 * some deivce not supported wb, output interface.
+		 * so, user application detect correct ipp driver
+		 * using this ioctl.
+		 */
+		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+						prop_list->ipp_id);
+		if (!ippdrv) {
+			DRM_ERROR("not found ipp%d driver.\n",
+					prop_list->ipp_id);
+			return -EINVAL;
+		}
+
+		prop_list = ippdrv->prop_list;
+	}
+
+	return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+		int idx)
+{
+	struct drm_exynos_ipp_config *config = &property->config[idx];
+	struct drm_exynos_pos *pos = &config->pos;
+	struct drm_exynos_sz *sz = &config->sz;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+		__func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+	DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+		__func__, pos->x, pos->y, pos->w, pos->h,
+		sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	u32 prop_id = property->prop_id;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+	ippdrv = ipp_find_drv_by_handle(prop_id);
+	if (IS_ERR(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Find command node using command list in ippdrv.
+	 * when we find this command no using prop_id.
+	 * return property information set in this command node.
+	 */
+	list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+		if ((c_node->property.prop_id == prop_id) &&
+		    (c_node->state == IPP_STATE_STOP)) {
+			DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+				__func__, property->cmd, (int)ippdrv);
+
+			c_node->property = *property;
+			return 0;
+		}
+	}
+
+	DRM_ERROR("failed to search property.\n");
+
+	return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+	struct drm_exynos_ipp_cmd_work *cmd_work;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+	if (!cmd_work) {
+		DRM_ERROR("failed to alloc cmd_work.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+	return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+	struct drm_exynos_ipp_event_work *event_work;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+	if (!event_work) {
+		DRM_ERROR("failed to alloc event_work.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+	return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_property *property = data;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!property) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * This is log print for user application property.
+	 * user application set various property.
+	 */
+	for_each_ipp_ops(i)
+		ipp_print_property(property, i);
+
+	/*
+	 * set property ioctl generated new prop_id.
+	 * but in this case already asigned prop_id using old set property.
+	 * e.g PAUSE state. this case supports find current prop_id and use it
+	 * instead of allocation.
+	 */
+	if (property->prop_id) {
+		DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+		return ipp_find_and_set_property(property);
+	}
+
+	/* find ipp driver using ipp id */
+	ippdrv = ipp_find_driver(ctx, property);
+	if (IS_ERR(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EINVAL;
+	}
+
+	/* allocate command node */
+	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+	if (!c_node) {
+		DRM_ERROR("failed to allocate map node.\n");
+		return -ENOMEM;
+	}
+
+	/* create property id */
+	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+		&property->prop_id);
+	if (ret) {
+		DRM_ERROR("failed to create id.\n");
+		goto err_clear;
+	}
+
+	DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+		__func__, property->prop_id, property->cmd, (int)ippdrv);
+
+	/* stored property information and ippdrv in private data */
+	c_node->priv = priv;
+	c_node->property = *property;
+	c_node->state = IPP_STATE_IDLE;
+
+	c_node->start_work = ipp_create_cmd_work();
+	if (IS_ERR(c_node->start_work)) {
+		DRM_ERROR("failed to create start work.\n");
+		goto err_clear;
+	}
+
+	c_node->stop_work = ipp_create_cmd_work();
+	if (IS_ERR(c_node->stop_work)) {
+		DRM_ERROR("failed to create stop work.\n");
+		goto err_free_start;
+	}
+
+	c_node->event_work = ipp_create_event_work();
+	if (IS_ERR(c_node->event_work)) {
+		DRM_ERROR("failed to create event work.\n");
+		goto err_free_stop;
+	}
+
+	mutex_init(&c_node->cmd_lock);
+	mutex_init(&c_node->mem_lock);
+	mutex_init(&c_node->event_lock);
+
+	init_completion(&c_node->start_complete);
+	init_completion(&c_node->stop_complete);
+
+	for_each_ipp_ops(i)
+		INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+	INIT_LIST_HEAD(&c_node->event_list);
+	list_splice_init(&priv->event_list, &c_node->event_list);
+	list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+	/* make dedicated state without m2m */
+	if (!ipp_is_m2m_cmd(property->cmd))
+		ippdrv->dedicated = true;
+
+	return 0;
+
+err_free_stop:
+	kfree(c_node->stop_work);
+err_free_start:
+	kfree(c_node->start_work);
+err_clear:
+	kfree(c_node);
+	return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* delete list */
+	list_del(&c_node->list);
+
+	/* destroy mutex */
+	mutex_destroy(&c_node->cmd_lock);
+	mutex_destroy(&c_node->mem_lock);
+	mutex_destroy(&c_node->event_lock);
+
+	/* free command node */
+	kfree(c_node->start_work);
+	kfree(c_node->stop_work);
+	kfree(c_node->event_work);
+	kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct list_head *head;
+	int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_lock(&c_node->mem_lock);
+
+	for_each_ipp_ops(i) {
+		/* source/destination memory list */
+		head = &c_node->mem_list[i];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+				i ? "dst" : "src");
+			continue;
+		}
+
+		/* find memory node entry */
+		list_for_each_entry(m_node, head, list) {
+			DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+				i ? "dst" : "src", count[i], (int)m_node);
+			count[i]++;
+		}
+	}
+
+	DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+		min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+		max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+	/*
+	 * M2M operations should be need paired memory address.
+	 * so, need to check minimum count about src, dst.
+	 * other case not use paired memory, so use maximum count
+	 */
+	if (ipp_is_m2m_cmd(property->cmd))
+		ret = min(count[EXYNOS_DRM_OPS_SRC],
+			count[EXYNOS_DRM_OPS_DST]);
+	else
+		ret = max(count[EXYNOS_DRM_OPS_SRC],
+			count[EXYNOS_DRM_OPS_DST]);
+
+	mutex_unlock(&c_node->mem_lock);
+
+	return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct list_head *head;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+	/* source/destination memory list */
+	head = &c_node->mem_list[qbuf->ops_id];
+
+	/* find memory node from memory list */
+	list_for_each_entry(m_node, head, list) {
+		DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+			__func__, count++, (int)m_node);
+
+		/* compare buffer id */
+		if (m_node->buf_id == qbuf->buf_id)
+			return m_node;
+	}
+
+	return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node)
+{
+	struct exynos_drm_ipp_ops *ops = NULL;
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+	if (!m_node) {
+		DRM_ERROR("invalid queue node.\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&c_node->mem_lock);
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+	/* get operations callback */
+	ops = ippdrv->ops[m_node->ops_id];
+	if (!ops) {
+		DRM_ERROR("not support ops.\n");
+		ret = -EFAULT;
+		goto err_unlock;
+	}
+
+	/* set address and enable irq */
+	if (ops->set_addr) {
+		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+			m_node->buf_id, IPP_BUF_ENQUEUE);
+		if (ret) {
+			DRM_ERROR("failed to set addr.\n");
+			goto err_unlock;
+		}
+	}
+
+err_unlock:
+	mutex_unlock(&c_node->mem_lock);
+	return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+		*ipp_get_mem_node(struct drm_device *drm_dev,
+		struct drm_file *file,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_buf_info buf_info;
+	void *addr;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_lock(&c_node->mem_lock);
+
+	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+	if (!m_node) {
+		DRM_ERROR("failed to allocate queue node.\n");
+		goto err_unlock;
+	}
+
+	/* clear base address for error handling */
+	memset(&buf_info, 0x0, sizeof(buf_info));
+
+	/* operations, buffer id */
+	m_node->ops_id = qbuf->ops_id;
+	m_node->prop_id = qbuf->prop_id;
+	m_node->buf_id = qbuf->buf_id;
+
+	DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+		(int)m_node, qbuf->ops_id);
+	DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+		qbuf->prop_id, m_node->buf_id);
+
+	for_each_ipp_planar(i) {
+		DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+			i, qbuf->handle[i]);
+
+		/* get dma address by handle */
+		if (qbuf->handle[i]) {
+			addr = exynos_drm_gem_get_dma_addr(drm_dev,
+					qbuf->handle[i], file);
+			if (IS_ERR(addr)) {
+				DRM_ERROR("failed to get addr.\n");
+				goto err_clear;
+			}
+
+			buf_info.handles[i] = qbuf->handle[i];
+			buf_info.base[i] = *(dma_addr_t *) addr;
+			DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+				__func__, i, buf_info.base[i],
+				(int)buf_info.handles[i]);
+		}
+	}
+
+	m_node->filp = file;
+	m_node->buf_info = buf_info;
+	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+	mutex_unlock(&c_node->mem_lock);
+	return m_node;
+
+err_clear:
+	kfree(m_node);
+err_unlock:
+	mutex_unlock(&c_node->mem_lock);
+	return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+	if (!m_node) {
+		DRM_ERROR("invalid dequeue node.\n");
+		return -EFAULT;
+	}
+
+	if (list_empty(&m_node->list)) {
+		DRM_ERROR("empty memory node.\n");
+		return -ENOMEM;
+	}
+
+	mutex_lock(&c_node->mem_lock);
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+	/* put gem buffer */
+	for_each_ipp_planar(i) {
+		unsigned long handle = m_node->buf_info.handles[i];
+		if (handle)
+			exynos_drm_gem_put_dma_addr(drm_dev, handle,
+							m_node->filp);
+	}
+
+	/* delete list in queue */
+	list_del(&m_node->list);
+	kfree(m_node);
+
+	mutex_unlock(&c_node->mem_lock);
+
+	return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+	kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+		struct drm_file *file,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_send_event *e;
+	unsigned long flags;
+
+	DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+		qbuf->ops_id, qbuf->buf_id);
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+	if (!e) {
+		DRM_ERROR("failed to allocate event.\n");
+		spin_lock_irqsave(&drm_dev->event_lock, flags);
+		file->event_space += sizeof(e->event);
+		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+		return -ENOMEM;
+	}
+
+	/* make event */
+	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+	e->event.base.length = sizeof(e->event);
+	e->event.user_data = qbuf->user_data;
+	e->event.prop_id = qbuf->prop_id;
+	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file;
+	e->base.destroy = ipp_free_event;
+	list_add_tail(&e->base.link, &c_node->event_list);
+
+	return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_send_event *e, *te;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (list_empty(&c_node->event_list)) {
+		DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+		return;
+	}
+
+	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+		DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+			__func__, count++, (int)e);
+
+		/*
+		 * quf == NULL condition means all event deletion.
+		 * stop operations want to delete all event list.
+		 * another case delete only same buf id.
+		 */
+		if (!qbuf) {
+			/* delete list */
+			list_del(&e->base.link);
+			kfree(e);
+		}
+
+		/* compare buffer id */
+		if (qbuf && (qbuf->buf_id ==
+		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+			/* delete list */
+			list_del(&e->base.link);
+			kfree(e);
+			return;
+		}
+	}
+}
+
+static void ipp_handle_cmd_work(struct device *dev,
+		struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_work *cmd_work,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	cmd_work->ippdrv = ippdrv;
+	cmd_work->c_node = c_node;
+	queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_mem_node *m_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_property *property;
+	struct exynos_drm_ipp_ops *ops;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+	if (IS_ERR(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return -EFAULT;
+	}
+
+	ops = ippdrv->ops[qbuf->ops_id];
+	if (!ops) {
+		DRM_ERROR("failed to get ops.\n");
+		return -EFAULT;
+	}
+
+	property = &c_node->property;
+
+	if (c_node->state != IPP_STATE_START) {
+		DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+		return 0;
+	}
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return 0;
+	}
+
+	/*
+	 * If set destination buffer and enabled clock,
+	 * then m2m operations need start operations at queue_buf
+	 */
+	if (ipp_is_m2m_cmd(property->cmd)) {
+		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+		cmd_work->ctrl = IPP_CTRL_PLAY;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+	} else {
+		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+		if (ret) {
+			DRM_ERROR("failed to set m node.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+		struct drm_exynos_ipp_cmd_node *c_node,
+		struct drm_exynos_ipp_queue_buf *qbuf)
+{
+	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+		/* delete list */
+		list_for_each_entry_safe(m_node, tm_node,
+			&c_node->mem_list[qbuf->ops_id], list) {
+			if (m_node->buf_id == qbuf->buf_id &&
+			    m_node->ops_id == qbuf->ops_id)
+				ipp_put_mem_node(drm_dev, c_node, m_node);
+		}
+	}
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_queue_buf *qbuf = data;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	struct drm_exynos_ipp_mem_node *m_node;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!qbuf) {
+		DRM_ERROR("invalid buf parameter.\n");
+		return -EINVAL;
+	}
+
+	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+		DRM_ERROR("invalid ops parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+		__func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+		qbuf->buf_id, qbuf->buf_type);
+
+	/* find command node */
+	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+		qbuf->prop_id);
+	if (!c_node) {
+		DRM_ERROR("failed to get command node.\n");
+		return -EFAULT;
+	}
+
+	/* buffer control */
+	switch (qbuf->buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* get memory node */
+		m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+		if (IS_ERR(m_node)) {
+			DRM_ERROR("failed to get m_node.\n");
+			return PTR_ERR(m_node);
+		}
+
+		/*
+		 * first step get event for destination buffer.
+		 * and second step when M2M case run with destination buffer
+		 * if needed.
+		 */
+		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+			/* get event for destination buffer */
+			ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+			if (ret) {
+				DRM_ERROR("failed to get event.\n");
+				goto err_clean_node;
+			}
+
+			/*
+			 * M2M case run play control for streaming feature.
+			 * other case set address and waiting.
+			 */
+			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+			if (ret) {
+				DRM_ERROR("failed to run command.\n");
+				goto err_clean_node;
+			}
+		}
+		break;
+	case IPP_BUF_DEQUEUE:
+		mutex_lock(&c_node->cmd_lock);
+
+		/* put event for destination buffer */
+		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+			ipp_put_event(c_node, qbuf);
+
+		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+		mutex_unlock(&c_node->cmd_lock);
+		break;
+	default:
+		DRM_ERROR("invalid buffer control.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+
+err_clean_node:
+	DRM_ERROR("clean memory nodes.\n");
+
+	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+	return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (ctrl != IPP_CTRL_PLAY) {
+		if (pm_runtime_suspended(dev)) {
+			DRM_ERROR("pm:runtime_suspended.\n");
+			goto err_status;
+		}
+	}
+
+	switch (ctrl) {
+	case IPP_CTRL_PLAY:
+		if (state != IPP_STATE_IDLE)
+			goto err_status;
+		break;
+	case IPP_CTRL_STOP:
+		if (state == IPP_STATE_STOP)
+			goto err_status;
+		break;
+	case IPP_CTRL_PAUSE:
+		if (state != IPP_STATE_START)
+			goto err_status;
+		break;
+	case IPP_CTRL_RESUME:
+		if (state != IPP_STATE_STOP)
+			goto err_status;
+		break;
+	default:
+		DRM_ERROR("invalid state.\n");
+		goto err_status;
+		break;
+	}
+
+	return true;
+
+err_status:
+	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+	return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct exynos_drm_ippdrv *ippdrv = NULL;
+	struct device *dev = priv->dev;
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+	struct drm_exynos_ipp_cmd_work *cmd_work;
+	struct drm_exynos_ipp_cmd_node *c_node;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!ctx) {
+		DRM_ERROR("invalid context.\n");
+		return -EINVAL;
+	}
+
+	if (!cmd_ctrl) {
+		DRM_ERROR("invalid control parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+	if (IS_ERR(ippdrv)) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return PTR_ERR(ippdrv);
+	}
+
+	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+		cmd_ctrl->prop_id);
+	if (!c_node) {
+		DRM_ERROR("invalid command node list.\n");
+		return -EINVAL;
+	}
+
+	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+	    c_node->state)) {
+		DRM_ERROR("invalid state.\n");
+		return -EINVAL;
+	}
+
+	switch (cmd_ctrl->ctrl) {
+	case IPP_CTRL_PLAY:
+		if (pm_runtime_suspended(ippdrv->dev))
+			pm_runtime_get_sync(ippdrv->dev);
+		c_node->state = IPP_STATE_START;
+
+		cmd_work = c_node->start_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+		c_node->state = IPP_STATE_START;
+		break;
+	case IPP_CTRL_STOP:
+		cmd_work = c_node->stop_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+		if (!wait_for_completion_timeout(&c_node->stop_complete,
+		    msecs_to_jiffies(300))) {
+			DRM_ERROR("timeout stop:prop_id[%d]\n",
+				c_node->property.prop_id);
+		}
+
+		c_node->state = IPP_STATE_STOP;
+		ippdrv->dedicated = false;
+		ipp_clean_cmd_node(c_node);
+
+		if (list_empty(&ippdrv->cmd_list))
+			pm_runtime_put_sync(ippdrv->dev);
+		break;
+	case IPP_CTRL_PAUSE:
+		cmd_work = c_node->stop_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+		if (!wait_for_completion_timeout(&c_node->stop_complete,
+		    msecs_to_jiffies(200))) {
+			DRM_ERROR("timeout stop:prop_id[%d]\n",
+				c_node->property.prop_id);
+		}
+
+		c_node->state = IPP_STATE_STOP;
+		break;
+	case IPP_CTRL_RESUME:
+		c_node->state = IPP_STATE_START;
+		cmd_work = c_node->start_work;
+		cmd_work->ctrl = cmd_ctrl->ctrl;
+		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+		break;
+	default:
+		DRM_ERROR("could not support this state currently.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+	return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(
+		&exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(
+		&exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(
+		&exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_property *property)
+{
+	struct exynos_drm_ipp_ops *ops = NULL;
+	bool swap = false;
+	int ret, i;
+
+	if (!property) {
+		DRM_ERROR("invalid property parameter.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* reset h/w block */
+	if (ippdrv->reset &&
+	    ippdrv->reset(ippdrv->dev)) {
+		DRM_ERROR("failed to reset.\n");
+		return -EINVAL;
+	}
+
+	/* set source,destination operations */
+	for_each_ipp_ops(i) {
+		struct drm_exynos_ipp_config *config =
+			&property->config[i];
+
+		ops = ippdrv->ops[i];
+		if (!ops || !config) {
+			DRM_ERROR("not support ops and config.\n");
+			return -EINVAL;
+		}
+
+		/* set format */
+		if (ops->set_fmt) {
+			ret = ops->set_fmt(ippdrv->dev, config->fmt);
+			if (ret) {
+				DRM_ERROR("not support format.\n");
+				return ret;
+			}
+		}
+
+		/* set transform for rotation, flip */
+		if (ops->set_transf) {
+			ret = ops->set_transf(ippdrv->dev, config->degree,
+				config->flip, &swap);
+			if (ret) {
+				DRM_ERROR("not support tranf.\n");
+				return -EINVAL;
+			}
+		}
+
+		/* set size */
+		if (ops->set_size) {
+			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+				&config->sz);
+			if (ret) {
+				DRM_ERROR("not support size.\n");
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct list_head *head;
+	int ret, i;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* store command info in ippdrv */
+	ippdrv->c_node = c_node;
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* set current property in ippdrv */
+	ret = ipp_set_property(ippdrv, property);
+	if (ret) {
+		DRM_ERROR("failed to set property.\n");
+		ippdrv->c_node = NULL;
+		return ret;
+	}
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			m_node = list_first_entry(head,
+				struct drm_exynos_ipp_mem_node, list);
+			if (!m_node) {
+				DRM_ERROR("failed to get node.\n");
+				ret = -EFAULT;
+				return ret;
+			}
+
+			DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+				__func__, (int)m_node);
+
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	case IPP_CMD_WB:
+		/* destination memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+		list_for_each_entry(m_node, head, list) {
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		list_for_each_entry(m_node, head, list) {
+			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to set m node.\n");
+				return ret;
+			}
+		}
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+	/* start operations */
+	if (ippdrv->start) {
+		ret = ippdrv->start(ippdrv->dev, property->cmd);
+		if (ret) {
+			DRM_ERROR("failed to start ops.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+		struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node)
+{
+	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct list_head *head;
+	int ret = 0, i;
+
+	DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+	/* put event */
+	ipp_put_event(c_node, NULL);
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			if (list_empty(head)) {
+				DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+					__func__);
+				break;
+			}
+
+			list_for_each_entry_safe(m_node, tm_node,
+				head, list) {
+				ret = ipp_put_mem_node(drm_dev, c_node,
+					m_node);
+				if (ret) {
+					DRM_ERROR("failed to put m_node.\n");
+					goto err_clear;
+				}
+			}
+		}
+		break;
+	case IPP_CMD_WB:
+		/* destination memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+			break;
+		}
+
+		list_for_each_entry_safe(m_node, tm_node, head, list) {
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to put m_node.\n");
+				goto err_clear;
+			}
+		}
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		if (list_empty(head)) {
+			DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+			break;
+		}
+
+		list_for_each_entry_safe(m_node, tm_node, head, list) {
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret) {
+				DRM_ERROR("failed to put m_node.\n");
+				goto err_clear;
+			}
+		}
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		ret = -EINVAL;
+		goto err_clear;
+	}
+
+err_clear:
+	/* stop operations */
+	if (ippdrv->stop)
+		ippdrv->stop(ippdrv->dev, property->cmd);
+
+	return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+	struct drm_exynos_ipp_cmd_work *cmd_work =
+		(struct drm_exynos_ipp_cmd_work *)work;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	struct drm_exynos_ipp_property *property;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	ippdrv = cmd_work->ippdrv;
+	if (!ippdrv) {
+		DRM_ERROR("invalid ippdrv list.\n");
+		return;
+	}
+
+	c_node = cmd_work->c_node;
+	if (!c_node) {
+		DRM_ERROR("invalid command node list.\n");
+		return;
+	}
+
+	mutex_lock(&c_node->cmd_lock);
+
+	property = &c_node->property;
+
+	switch (cmd_work->ctrl) {
+	case IPP_CTRL_PLAY:
+	case IPP_CTRL_RESUME:
+		ret = ipp_start_property(ippdrv, c_node);
+		if (ret) {
+			DRM_ERROR("failed to start property:prop_id[%d]\n",
+				c_node->property.prop_id);
+			goto err_unlock;
+		}
+
+		/*
+		 * M2M case supports wait_completion of transfer.
+		 * because M2M case supports single unit operation
+		 * with multiple queue.
+		 * M2M need to wait completion of data transfer.
+		 */
+		if (ipp_is_m2m_cmd(property->cmd)) {
+			if (!wait_for_completion_timeout
+			    (&c_node->start_complete, msecs_to_jiffies(200))) {
+				DRM_ERROR("timeout event:prop_id[%d]\n",
+					c_node->property.prop_id);
+				goto err_unlock;
+			}
+		}
+		break;
+	case IPP_CTRL_STOP:
+	case IPP_CTRL_PAUSE:
+		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+			c_node);
+		if (ret) {
+			DRM_ERROR("failed to stop property.\n");
+			goto err_unlock;
+		}
+
+		complete(&c_node->stop_complete);
+		break;
+	default:
+		DRM_ERROR("unknown control type\n");
+		break;
+	}
+
+	DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+	mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+	struct drm_device *drm_dev = ippdrv->drm_dev;
+	struct drm_exynos_ipp_property *property = &c_node->property;
+	struct drm_exynos_ipp_mem_node *m_node;
+	struct drm_exynos_ipp_queue_buf qbuf;
+	struct drm_exynos_ipp_send_event *e;
+	struct list_head *head;
+	struct timeval now;
+	unsigned long flags;
+	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+	int ret, i;
+
+	for_each_ipp_ops(i)
+		DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+			i ? "dst" : "src", buf_id[i]);
+
+	if (!drm_dev) {
+		DRM_ERROR("failed to get drm_dev.\n");
+		return -EINVAL;
+	}
+
+	if (!property) {
+		DRM_ERROR("failed to get property.\n");
+		return -EINVAL;
+	}
+
+	if (list_empty(&c_node->event_list)) {
+		DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+		return 0;
+	}
+
+	if (!ipp_check_mem_list(c_node)) {
+		DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+		return 0;
+	}
+
+	/* check command */
+	switch (property->cmd) {
+	case IPP_CMD_M2M:
+		for_each_ipp_ops(i) {
+			/* source/destination memory list */
+			head = &c_node->mem_list[i];
+
+			m_node = list_first_entry(head,
+				struct drm_exynos_ipp_mem_node, list);
+			if (!m_node) {
+				DRM_ERROR("empty memory node.\n");
+				return -ENOMEM;
+			}
+
+			tbuf_id[i] = m_node->buf_id;
+			DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+				i ? "dst" : "src", tbuf_id[i]);
+
+			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+			if (ret)
+				DRM_ERROR("failed to put m_node.\n");
+		}
+		break;
+	case IPP_CMD_WB:
+		/* clear buf for finding */
+		memset(&qbuf, 0x0, sizeof(qbuf));
+		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+		/* get memory node entry */
+		m_node = ipp_find_mem_node(c_node, &qbuf);
+		if (!m_node) {
+			DRM_ERROR("empty memory node.\n");
+			return -ENOMEM;
+		}
+
+		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+		if (ret)
+			DRM_ERROR("failed to put m_node.\n");
+		break;
+	case IPP_CMD_OUTPUT:
+		/* source memory list */
+		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+		m_node = list_first_entry(head,
+			struct drm_exynos_ipp_mem_node, list);
+		if (!m_node) {
+			DRM_ERROR("empty memory node.\n");
+			return -ENOMEM;
+		}
+
+		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+		if (ret)
+			DRM_ERROR("failed to put m_node.\n");
+		break;
+	default:
+		DRM_ERROR("invalid operations.\n");
+		return -EINVAL;
+	}
+
+	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+			tbuf_id[1], buf_id[1], property->prop_id);
+
+	/*
+	 * command node have event list of destination buffer
+	 * If destination buffer enqueue to mem list,
+	 * then we make event and link to event list tail.
+	 * so, we get first event for first enqueued buffer.
+	 */
+	e = list_first_entry(&c_node->event_list,
+		struct drm_exynos_ipp_send_event, base.link);
+
+	if (!e) {
+		DRM_ERROR("empty event.\n");
+		return -EINVAL;
+	}
+
+	do_gettimeofday(&now);
+	DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+		, __func__, now.tv_sec, now.tv_usec);
+	e->event.tv_sec = now.tv_sec;
+	e->event.tv_usec = now.tv_usec;
+	e->event.prop_id = property->prop_id;
+
+	/* set buffer id about source destination */
+	for_each_ipp_ops(i)
+		e->event.buf_id[i] = tbuf_id[i];
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+	wake_up_interruptible(&e->base.file_priv->event_wait);
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+	DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+	return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+	struct drm_exynos_ipp_event_work *event_work =
+		(struct drm_exynos_ipp_event_work *)work;
+	struct exynos_drm_ippdrv *ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	int ret;
+
+	if (!event_work) {
+		DRM_ERROR("failed to get event_work.\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+		event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+	ippdrv = event_work->ippdrv;
+	if (!ippdrv) {
+		DRM_ERROR("failed to get ipp driver.\n");
+		return;
+	}
+
+	c_node = ippdrv->c_node;
+	if (!c_node) {
+		DRM_ERROR("failed to get command node.\n");
+		return;
+	}
+
+	/*
+	 * IPP supports command thread, event thread synchronization.
+	 * If IPP close immediately from user land, then IPP make
+	 * synchronization with command thread, so make complete event.
+	 * or going out operations.
+	 */
+	if (c_node->state != IPP_STATE_START) {
+		DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+			__func__, c_node->state, c_node->property.prop_id);
+		goto err_completion;
+	}
+
+	mutex_lock(&c_node->event_lock);
+
+	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+	if (ret) {
+		DRM_ERROR("failed to send event.\n");
+		goto err_completion;
+	}
+
+err_completion:
+	if (ipp_is_m2m_cmd(c_node->property.cmd))
+		complete(&c_node->start_complete);
+
+	mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret, count = 0;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* get ipp driver entry */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		ippdrv->drm_dev = drm_dev;
+
+		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+			&ippdrv->ipp_id);
+		if (ret) {
+			DRM_ERROR("failed to create id.\n");
+			goto err_idr;
+		}
+
+		DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+			count++, (int)ippdrv, ippdrv->ipp_id);
+
+		if (ippdrv->ipp_id == 0) {
+			DRM_ERROR("failed to get ipp_id[%d]\n",
+				ippdrv->ipp_id);
+			goto err_idr;
+		}
+
+		/* store parent device for node */
+		ippdrv->parent_dev = dev;
+
+		/* store event work queue and handler */
+		ippdrv->event_workq = ctx->event_workq;
+		ippdrv->sched_event = ipp_sched_event;
+		INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+		if (is_drm_iommu_supported(drm_dev)) {
+			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+			if (ret) {
+				DRM_ERROR("failed to activate iommu\n");
+				goto err_iommu;
+			}
+		}
+	}
+
+	return 0;
+
+err_iommu:
+	/* get ipp driver entry */
+	list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+		if (is_drm_iommu_supported(drm_dev))
+			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+	idr_destroy(&ctx->ipp_idr);
+	idr_destroy(&ctx->prop_idr);
+	return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	struct exynos_drm_ippdrv *ippdrv;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* get ipp driver entry */
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		if (is_drm_iommu_supported(drm_dev))
+			drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+		ippdrv->drm_dev = NULL;
+		exynos_drm_ippdrv_unregister(ippdrv);
+	}
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		DRM_ERROR("failed to allocate priv.\n");
+		return -ENOMEM;
+	}
+	priv->dev = dev;
+	file_priv->ipp_priv = priv;
+
+	INIT_LIST_HEAD(&priv->event_list);
+
+	DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+	return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+		struct drm_file *file)
+{
+	struct drm_exynos_file_private *file_priv = file->driver_priv;
+	struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+	struct exynos_drm_ippdrv *ippdrv = NULL;
+	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+	int count = 0;
+
+	DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+	if (list_empty(&exynos_drm_ippdrv_list)) {
+		DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+		goto err_clear;
+	}
+
+	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+		if (list_empty(&ippdrv->cmd_list))
+			continue;
+
+		list_for_each_entry_safe(c_node, tc_node,
+			&ippdrv->cmd_list, list) {
+			DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+				__func__, count++, (int)ippdrv);
+
+			if (c_node->priv == priv) {
+				/*
+				 * userland goto unnormal state. process killed.
+				 * and close the file.
+				 * so, IPP didn't called stop cmd ctrl.
+				 * so, we are make stop operation in this state.
+				 */
+				if (c_node->state == IPP_STATE_START) {
+					ipp_stop_property(drm_dev, ippdrv,
+						c_node);
+					c_node->state = IPP_STATE_STOP;
+				}
+
+				ippdrv->dedicated = false;
+				ipp_clean_cmd_node(c_node);
+				if (list_empty(&ippdrv->cmd_list))
+					pm_runtime_put_sync(ippdrv->dev);
+			}
+		}
+	}
+
+err_clear:
+	kfree(priv);
+	return;
+}
+
+static int ipp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct ipp_context *ctx;
+	struct exynos_drm_subdrv *subdrv;
+	int ret;
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	mutex_init(&ctx->ipp_lock);
+	mutex_init(&ctx->prop_lock);
+
+	idr_init(&ctx->ipp_idr);
+	idr_init(&ctx->prop_idr);
+
+	/*
+	 * create single thread for ipp event
+	 * IPP supports event thread for IPP drivers.
+	 * IPP driver send event_work to this thread.
+	 * and IPP event thread send event to user process.
+	 */
+	ctx->event_workq = create_singlethread_workqueue("ipp_event");
+	if (!ctx->event_workq) {
+		dev_err(dev, "failed to create event workqueue\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * create single thread for ipp command
+	 * IPP supports command thread for user process.
+	 * user process make command node using set property ioctl.
+	 * and make start_work and send this work to command thread.
+	 * and then this command thread start property.
+	 */
+	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+	if (!ctx->cmd_workq) {
+		dev_err(dev, "failed to create cmd workqueue\n");
+		ret = -EINVAL;
+		goto err_event_workq;
+	}
+
+	/* set sub driver informations */
+	subdrv = &ctx->subdrv;
+	subdrv->dev = dev;
+	subdrv->probe = ipp_subdrv_probe;
+	subdrv->remove = ipp_subdrv_remove;
+	subdrv->open = ipp_subdrv_open;
+	subdrv->close = ipp_subdrv_close;
+
+	platform_set_drvdata(pdev, ctx);
+
+	ret = exynos_drm_subdrv_register(subdrv);
+	if (ret < 0) {
+		DRM_ERROR("failed to register drm ipp device.\n");
+		goto err_cmd_workq;
+	}
+
+	dev_info(dev, "drm ipp registered successfully.\n");
+
+	return 0;
+
+err_cmd_workq:
+	destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+	destroy_workqueue(ctx->event_workq);
+	return ret;
+}
+
+static int ipp_remove(struct platform_device *pdev)
+{
+	struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	/* unregister sub driver */
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	/* remove,destroy ipp idr */
+	idr_destroy(&ctx->ipp_idr);
+	idr_destroy(&ctx->prop_idr);
+
+	mutex_destroy(&ctx->ipp_lock);
+	mutex_destroy(&ctx->prop_lock);
+
+	/* destroy command, event work queue */
+	destroy_workqueue(ctx->cmd_workq);
+	destroy_workqueue(ctx->event_workq);
+
+	return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!pm_runtime_suspended(dev))
+		return ipp_power_ctrl(ctx, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+	struct ipp_context *ctx = get_ipp_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+	SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+	.probe		= ipp_probe,
+	.remove		= ipp_remove,
+	.driver		= {
+		.name	= "exynos-drm-ipp",
+		.owner	= THIS_MODULE,
+		.pm	= &ipp_pm_ops,
+	},
+};
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 0000000..4cadbea
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *	Jinyoung Jeon <jy0.jeon@samsung.com>
+ *	Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos)	\
+	for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos)	\
+	for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH	_IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT	_IOR('F', 303, int)
+#define IPP_SET_WRITEBACK	_IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+	IPP_STATE_IDLE,
+	IPP_STATE_START,
+	IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+	struct work_struct	work;
+	struct exynos_drm_ippdrv	*ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	enum drm_exynos_ipp_ctrl	ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+	struct exynos_drm_ipp_private *priv;
+	struct list_head	list;
+	struct list_head	event_list;
+	struct list_head	mem_list[EXYNOS_DRM_OPS_MAX];
+	struct mutex	cmd_lock;
+	struct mutex	mem_lock;
+	struct mutex	event_lock;
+	struct completion	start_complete;
+	struct completion	stop_complete;
+	struct drm_exynos_ipp_property	property;
+	struct drm_exynos_ipp_cmd_work *start_work;
+	struct drm_exynos_ipp_cmd_work *stop_work;
+	struct drm_exynos_ipp_event_work *event_work;
+	enum drm_exynos_ipp_state	state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+	unsigned long	handles[EXYNOS_DRM_PLANAR_MAX];
+	dma_addr_t	base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+	__u32	enable;
+	__u32	refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+	struct work_struct	work;
+	struct exynos_drm_ippdrv *ippdrv;
+	u32	buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+	int (*set_fmt)(struct device *dev, u32 fmt);
+	int (*set_transf)(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap);
+	int (*set_size)(struct device *dev, int swap,
+		struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+	int (*set_addr)(struct device *dev,
+		 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+		enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @c_node: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+	struct list_head	drv_list;
+	struct device	*parent_dev;
+	struct device	*dev;
+	struct drm_device	*drm_dev;
+	u32	ipp_id;
+	bool	dedicated;
+	struct exynos_drm_ipp_ops	*ops[EXYNOS_DRM_OPS_MAX];
+	struct workqueue_struct	*event_workq;
+	struct drm_exynos_ipp_cmd_node *c_node;
+	struct list_head	cmd_list;
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	int (*check_property)(struct device *dev,
+		struct drm_exynos_ipp_property *property);
+	int (*reset)(struct device *dev);
+	int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+	void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+	void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+					 struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file_priv)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file_priv)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+						void *data,
+						struct drm_file *file)
+{
+	return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+	return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 0000000..83efc66
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+
+#define to_exynos_plane(x)	container_of(x, struct exynos_plane, base)
+
+struct exynos_plane {
+	struct drm_plane		base;
+	struct exynos_drm_overlay	overlay;
+	bool				enabled;
+};
+
+static const uint32_t formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV12MT,
+};
+
+/*
+ * This function is to get X or Y size shown via screen. This needs length and
+ * start position of CRTC.
+ *
+ *      <--- length --->
+ * CRTC ----------------
+ *      ^ start        ^ end
+ *
+ * There are six cases from a to f.
+ *
+ *             <----- SCREEN ----->
+ *             0                 last
+ *   ----------|------------------|----------
+ * CRTCs
+ * a -------
+ *        b -------
+ *        c --------------------------
+ *                 d --------
+ *                           e -------
+ *                                  f -------
+ */
+static int exynos_plane_get_size(int start, unsigned length, unsigned last)
+{
+	int end = start + length;
+	int size = 0;
+
+	if (start <= 0) {
+		if (end > 0)
+			size = min_t(unsigned, end, last);
+	} else if (start <= last) {
+		size = min_t(unsigned, last - start, length);
+	}
+
+	return size;
+}
+
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+			  unsigned int crtc_w, unsigned int crtc_h,
+			  uint32_t src_x, uint32_t src_y,
+			  uint32_t src_w, uint32_t src_h)
+{
+	struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+	struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+	unsigned int actual_w;
+	unsigned int actual_h;
+	int nr;
+	int i;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	nr = exynos_drm_fb_get_buf_cnt(fb);
+	for (i = 0; i < nr; i++) {
+		struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
+
+		if (!buffer) {
+			DRM_LOG_KMS("buffer is null\n");
+			return -EFAULT;
+		}
+
+		overlay->dma_addr[i] = buffer->dma_addr;
+
+		DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+				i, (unsigned long)overlay->dma_addr[i]);
+	}
+
+	actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
+	actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay);
+
+	if (crtc_x < 0) {
+		if (actual_w)
+			src_x -= crtc_x;
+		crtc_x = 0;
+	}
+
+	if (crtc_y < 0) {
+		if (actual_h)
+			src_y -= crtc_y;
+		crtc_y = 0;
+	}
+
+	/* set drm framebuffer data. */
+	overlay->fb_x = src_x;
+	overlay->fb_y = src_y;
+	overlay->fb_width = fb->width;
+	overlay->fb_height = fb->height;
+	overlay->src_width = src_w;
+	overlay->src_height = src_h;
+	overlay->bpp = fb->bits_per_pixel;
+	overlay->pitch = fb->pitches[0];
+	overlay->pixel_format = fb->pixel_format;
+
+	/* set overlay range to be displayed. */
+	overlay->crtc_x = crtc_x;
+	overlay->crtc_y = crtc_y;
+	overlay->crtc_width = actual_w;
+	overlay->crtc_height = actual_h;
+
+	/* set drm mode data. */
+	overlay->mode_width = crtc->mode.hdisplay;
+	overlay->mode_height = crtc->mode.vdisplay;
+	overlay->refresh = crtc->mode.vrefresh;
+	overlay->scan_flag = crtc->mode.flags;
+
+	DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+			overlay->crtc_x, overlay->crtc_y,
+			overlay->crtc_width, overlay->crtc_height);
+
+	exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set);
+
+	return 0;
+}
+
+void exynos_plane_commit(struct drm_plane *plane)
+{
+	struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+	struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+	exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+			exynos_drm_encoder_plane_commit);
+}
+
+void exynos_plane_dpms(struct drm_plane *plane, int mode)
+{
+	struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+	struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		if (exynos_plane->enabled)
+			return;
+
+		exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+				exynos_drm_encoder_plane_enable);
+
+		exynos_plane->enabled = true;
+	} else {
+		if (!exynos_plane->enabled)
+			return;
+
+		exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+				exynos_drm_encoder_plane_disable);
+
+		exynos_plane->enabled = false;
+	}
+}
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		     unsigned int crtc_w, unsigned int crtc_h,
+		     uint32_t src_x, uint32_t src_y,
+		     uint32_t src_w, uint32_t src_h)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
+			crtc_w, crtc_h, src_x >> 16, src_y >> 16,
+			src_w >> 16, src_h >> 16);
+	if (ret < 0)
+		return ret;
+
+	plane->crtc = crtc;
+
+	exynos_plane_commit(plane);
+	exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
+
+	return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+	return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+	struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	exynos_disable_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(exynos_plane);
+}
+
+static int exynos_plane_set_property(struct drm_plane *plane,
+				     struct drm_property *property,
+				     uint64_t val)
+{
+	struct drm_device *dev = plane->dev;
+	struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (property == dev_priv->plane_zpos_property) {
+		exynos_plane->overlay.zpos = val;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+	.update_plane	= exynos_update_plane,
+	.disable_plane	= exynos_disable_plane,
+	.destroy	= exynos_plane_destroy,
+	.set_property	= exynos_plane_set_property,
+};
+
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	prop = dev_priv->plane_zpos_property;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "zpos", 0,
+						 MAX_PLANE - 1);
+		if (!prop)
+			return;
+
+		dev_priv->plane_zpos_property = prop;
+	}
+
+	drm_object_attach_property(&plane->base, prop, 0);
+}
+
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+				    unsigned int possible_crtcs, bool priv)
+{
+	struct exynos_plane *exynos_plane;
+	int err;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+	if (!exynos_plane) {
+		DRM_ERROR("failed to allocate plane\n");
+		return NULL;
+	}
+
+	err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+			      &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
+			      priv);
+	if (err) {
+		DRM_ERROR("failed to initialize plane\n");
+		kfree(exynos_plane);
+		return NULL;
+	}
+
+	if (priv)
+		exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+	else
+		exynos_plane_attach_zpos_property(&exynos_plane->base);
+
+	return &exynos_plane->base;
+}
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 0000000..8831245
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+			  unsigned int crtc_w, unsigned int crtc_h,
+			  uint32_t src_x, uint32_t src_y,
+			  uint32_t src_w, uint32_t src_h);
+void exynos_plane_commit(struct drm_plane *plane);
+void exynos_plane_dpms(struct drm_plane *plane, int mode);
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+				    unsigned int possible_crtcs, bool priv);
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 0000000..9b6c709
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	YoungJun Cho <yj44.cho@samsung.com>
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev)	platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)	container_of(ippdrv,\
+					struct rot_context, ippdrv);
+#define rot_read(offset)		readl(rot->regs + (offset))
+#define rot_write(cfg, offset)	writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+	ROT_IRQ_STATUS_COMPLETE	= 8,
+	ROT_IRQ_STATUS_ILLEGAL	= 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+	u32	min_w;
+	u32	min_h;
+	u32	max_w;
+	u32	max_h;
+	u32	align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+	struct rot_limit	ycbcr420_2p;
+	struct rot_limit	rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+	struct exynos_drm_ippdrv	ippdrv;
+	struct resource	*regs_res;
+	void __iomem	*regs;
+	struct clk	*clock;
+	struct rot_limit_table	*limit_tbl;
+	int	irq;
+	int	cur_buf_id[EXYNOS_DRM_OPS_MAX];
+	bool	suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+	u32 val = rot_read(ROT_CONFIG);
+
+	if (enable == true)
+		val |= ROT_CONFIG_IRQ;
+	else
+		val &= ~ROT_CONFIG_IRQ;
+
+	rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+	u32 val = rot_read(ROT_CONTROL);
+
+	val &= ROT_CONTROL_FMT_MASK;
+
+	return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+	u32 val = rot_read(ROT_STATUS);
+
+	val = ROT_STATUS_IRQ(val);
+
+	if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+		return ROT_IRQ_STATUS_COMPLETE;
+
+	return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+	struct rot_context *rot = arg;
+	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+	struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
+	struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+	enum rot_irq_status irq_status;
+	u32 val;
+
+	/* Get execution result */
+	irq_status = rotator_reg_get_irq_status(rot);
+
+	/* clear status */
+	val = rot_read(ROT_STATUS);
+	val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+	rot_write(val, ROT_STATUS);
+
+	if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+		event_work->ippdrv = ippdrv;
+		event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+			rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+		queue_work(ippdrv->event_workq,
+			(struct work_struct *)event_work);
+	} else
+		DRM_ERROR("the SFR is set illegally\n");
+
+	return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+		u32 *vsize)
+{
+	struct rot_limit_table *limit_tbl = rot->limit_tbl;
+	struct rot_limit *limit;
+	u32 mask, val;
+
+	/* Get size limit */
+	if (fmt == ROT_CONTROL_FMT_RGB888)
+		limit = &limit_tbl->rgb888;
+	else
+		limit = &limit_tbl->ycbcr420_2p;
+
+	/* Get mask for rounding to nearest aligned val */
+	mask = ~((1 << limit->align) - 1);
+
+	/* Set aligned width */
+	val = ROT_ALIGN(*hsize, limit->align, mask);
+	if (val < limit->min_w)
+		*hsize = ROT_MIN(limit->min_w, mask);
+	else if (val > limit->max_w)
+		*hsize = ROT_MAX(limit->max_w, mask);
+	else
+		*hsize = val;
+
+	/* Set aligned height */
+	val = ROT_ALIGN(*vsize, limit->align, mask);
+	if (val < limit->min_h)
+		*vsize = ROT_MIN(limit->min_h, mask);
+	else if (val > limit->max_h)
+		*vsize = ROT_MAX(limit->max_h, mask);
+	else
+		*vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	val = rot_read(ROT_CONTROL);
+	val &= ~ROT_CONTROL_FMT_MASK;
+
+	switch (fmt) {
+	case DRM_FORMAT_NV12:
+		val |= ROT_CONTROL_FMT_YCBCR420_2P;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		val |= ROT_CONTROL_FMT_RGB888;
+		break;
+	default:
+		DRM_ERROR("invalid image format\n");
+		return -EINVAL;
+	}
+
+	rot_write(val, ROT_CONTROL);
+
+	return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+	if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+	    (fmt == ROT_CONTROL_FMT_RGB888))
+		return true;
+
+	return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos,
+		struct drm_exynos_sz *sz)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 fmt, hsize, vsize;
+	u32 val;
+
+	/* Get format */
+	fmt = rotator_reg_get_fmt(rot);
+	if (!rotator_check_reg_fmt(fmt)) {
+		DRM_ERROR("%s:invalid format.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Align buffer size */
+	hsize = sz->hsize;
+	vsize = sz->vsize;
+	rotator_align_size(rot, fmt, &hsize, &vsize);
+
+	/* Set buffer size configuration */
+	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	rot_write(val, ROT_SRC_BUF_SIZE);
+
+	/* Set crop image position configuration */
+	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	rot_write(val, ROT_SRC_CROP_POS);
+	val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+	rot_write(val, ROT_SRC_CROP_SIZE);
+
+	return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info,
+		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+	u32 val, fmt, hsize, vsize;
+	int i;
+
+	/* Set current buf_id */
+	rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* Set address configuration */
+		for_each_ipp_planar(i)
+			addr[i] = buf_info->base[i];
+
+		/* Get format */
+		fmt = rotator_reg_get_fmt(rot);
+		if (!rotator_check_reg_fmt(fmt)) {
+			DRM_ERROR("%s:invalid format.\n", __func__);
+			return -EINVAL;
+		}
+
+		/* Re-set cb planar for NV12 format */
+		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+		    !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+			val = rot_read(ROT_SRC_BUF_SIZE);
+			hsize = ROT_GET_BUF_SIZE_W(val);
+			vsize = ROT_GET_BUF_SIZE_H(val);
+
+			/* Set cb planar */
+			addr[EXYNOS_DRM_PLANAR_CB] =
+				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+		}
+
+		for_each_ipp_planar(i)
+			rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+		break;
+	case IPP_BUF_DEQUEUE:
+		for_each_ipp_planar(i)
+			rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+		break;
+	default:
+		/* Nothing to do */
+		break;
+	}
+
+	return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+		enum drm_exynos_degree degree,
+		enum drm_exynos_flip flip, bool *swap)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	/* Set transform configuration */
+	val = rot_read(ROT_CONTROL);
+	val &= ~ROT_CONTROL_FLIP_MASK;
+
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_VERTICAL:
+		val |= ROT_CONTROL_FLIP_VERTICAL;
+		break;
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+		val |= ROT_CONTROL_FLIP_HORIZONTAL;
+		break;
+	default:
+		/* Flip None */
+		break;
+	}
+
+	val &= ~ROT_CONTROL_ROT_MASK;
+
+	switch (degree) {
+	case EXYNOS_DRM_DEGREE_90:
+		val |= ROT_CONTROL_ROT_90;
+		break;
+	case EXYNOS_DRM_DEGREE_180:
+		val |= ROT_CONTROL_ROT_180;
+		break;
+	case EXYNOS_DRM_DEGREE_270:
+		val |= ROT_CONTROL_ROT_270;
+		break;
+	default:
+		/* Rotation 0 Degree */
+		break;
+	}
+
+	rot_write(val, ROT_CONTROL);
+
+	/* Check degree for setting buffer size swap */
+	if ((degree == EXYNOS_DRM_DEGREE_90) ||
+	    (degree == EXYNOS_DRM_DEGREE_270))
+		*swap = true;
+	else
+		*swap = false;
+
+	return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+		struct drm_exynos_pos *pos,
+		struct drm_exynos_sz *sz)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val, fmt, hsize, vsize;
+
+	/* Get format */
+	fmt = rotator_reg_get_fmt(rot);
+	if (!rotator_check_reg_fmt(fmt)) {
+		DRM_ERROR("%s:invalid format.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Align buffer size */
+	hsize = sz->hsize;
+	vsize = sz->vsize;
+	rotator_align_size(rot, fmt, &hsize, &vsize);
+
+	/* Set buffer size configuration */
+	val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+	rot_write(val, ROT_DST_BUF_SIZE);
+
+	/* Set crop image position configuration */
+	val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+	rot_write(val, ROT_DST_CROP_POS);
+
+	return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+		struct drm_exynos_ipp_buf_info *buf_info,
+		u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+	u32 val, fmt, hsize, vsize;
+	int i;
+
+	/* Set current buf_id */
+	rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+	switch (buf_type) {
+	case IPP_BUF_ENQUEUE:
+		/* Set address configuration */
+		for_each_ipp_planar(i)
+			addr[i] = buf_info->base[i];
+
+		/* Get format */
+		fmt = rotator_reg_get_fmt(rot);
+		if (!rotator_check_reg_fmt(fmt)) {
+			DRM_ERROR("%s:invalid format.\n", __func__);
+			return -EINVAL;
+		}
+
+		/* Re-set cb planar for NV12 format */
+		if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+		    !addr[EXYNOS_DRM_PLANAR_CB]) {
+			/* Get buf size */
+			val = rot_read(ROT_DST_BUF_SIZE);
+
+			hsize = ROT_GET_BUF_SIZE_W(val);
+			vsize = ROT_GET_BUF_SIZE_H(val);
+
+			/* Set cb planar */
+			addr[EXYNOS_DRM_PLANAR_CB] =
+				addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+		}
+
+		for_each_ipp_planar(i)
+			rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+		break;
+	case IPP_BUF_DEQUEUE:
+		for_each_ipp_planar(i)
+			rot_write(0x0, ROT_DST_BUF_ADDR(i));
+		break;
+	default:
+		/* Nothing to do */
+		break;
+	}
+
+	return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+	.set_fmt	=	rotator_src_set_fmt,
+	.set_size	=	rotator_src_set_size,
+	.set_addr	=	rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+	.set_transf	=	rotator_dst_set_transf,
+	.set_size	=	rotator_dst_set_size,
+	.set_addr	=	rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+	struct drm_exynos_ipp_prop_list *prop_list;
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+	if (!prop_list) {
+		DRM_ERROR("failed to alloc property list.\n");
+		return -ENOMEM;
+	}
+
+	prop_list->version = 1;
+	prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+				(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+	prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+				(1 << EXYNOS_DRM_DEGREE_90) |
+				(1 << EXYNOS_DRM_DEGREE_180) |
+				(1 << EXYNOS_DRM_DEGREE_270);
+	prop_list->csc = 0;
+	prop_list->crop = 0;
+	prop_list->scale = 0;
+
+	ippdrv->prop_list = prop_list;
+
+	return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+	switch (fmt) {
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_NV12:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:not support format\n", __func__);
+		return false;
+	}
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+	switch (flip) {
+	case EXYNOS_DRM_FLIP_NONE:
+	case EXYNOS_DRM_FLIP_VERTICAL:
+	case EXYNOS_DRM_FLIP_HORIZONTAL:
+	case EXYNOS_DRM_FLIP_BOTH:
+		return true;
+	default:
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return false;
+	}
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+		struct drm_exynos_ipp_property *property)
+{
+	struct drm_exynos_ipp_config *src_config =
+					&property->config[EXYNOS_DRM_OPS_SRC];
+	struct drm_exynos_ipp_config *dst_config =
+					&property->config[EXYNOS_DRM_OPS_DST];
+	struct drm_exynos_pos *src_pos = &src_config->pos;
+	struct drm_exynos_pos *dst_pos = &dst_config->pos;
+	struct drm_exynos_sz *src_sz = &src_config->sz;
+	struct drm_exynos_sz *dst_sz = &dst_config->sz;
+	bool swap = false;
+
+	/* Check format configuration */
+	if (src_config->fmt != dst_config->fmt) {
+		DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!rotator_check_drm_fmt(dst_config->fmt)) {
+		DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check transform configuration */
+	if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+		DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	switch (dst_config->degree) {
+	case EXYNOS_DRM_DEGREE_90:
+	case EXYNOS_DRM_DEGREE_270:
+		swap = true;
+	case EXYNOS_DRM_DEGREE_0:
+	case EXYNOS_DRM_DEGREE_180:
+		/* No problem */
+		break;
+	default:
+		DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+		return -EINVAL;
+	}
+
+	if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+		DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!rotator_check_drm_flip(dst_config->flip)) {
+		DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Check size configuration */
+	if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+		(src_pos->y + src_pos->h > src_sz->vsize)) {
+		DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+		return -EINVAL;
+	}
+
+	if (swap) {
+		if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+			(dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+			DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+			DRM_DEBUG_KMS("%s:not support scale feature\n",
+				__func__);
+			return -EINVAL;
+		}
+	} else {
+		if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+			(dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+			DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+			DRM_DEBUG_KMS("%s:not support scale feature\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+	u32 val;
+
+	if (rot->suspended) {
+		DRM_ERROR("suspended state\n");
+		return -EPERM;
+	}
+
+	if (cmd != IPP_CMD_M2M) {
+		DRM_ERROR("not support cmd: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Set interrupt enable */
+	rotator_reg_set_irq(rot, true);
+
+	val = rot_read(ROT_CONTROL);
+	val |= ROT_CONTROL_START;
+
+	rot_write(val, ROT_CONTROL);
+
+	return 0;
+}
+
+static int rotator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rot_context *rot;
+	struct exynos_drm_ippdrv *ippdrv;
+	int ret;
+
+	rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+	if (!rot) {
+		dev_err(dev, "failed to allocate rot\n");
+		return -ENOMEM;
+	}
+
+	rot->limit_tbl = (struct rot_limit_table *)
+				platform_get_device_id(pdev)->driver_data;
+
+	rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rot->regs = devm_ioremap_resource(dev, rot->regs_res);
+	if (IS_ERR(rot->regs))
+		return PTR_ERR(rot->regs);
+
+	rot->irq = platform_get_irq(pdev, 0);
+	if (rot->irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return rot->irq;
+	}
+
+	ret = devm_request_threaded_irq(dev, rot->irq, NULL,
+			rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
+	if (ret < 0) {
+		dev_err(dev, "failed to request irq\n");
+		return ret;
+	}
+
+	rot->clock = devm_clk_get(dev, "rotator");
+	if (IS_ERR(rot->clock)) {
+		dev_err(dev, "failed to get clock\n");
+		return PTR_ERR(rot->clock);
+	}
+
+	pm_runtime_enable(dev);
+
+	ippdrv = &rot->ippdrv;
+	ippdrv->dev = dev;
+	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+	ippdrv->check_property = rotator_ippdrv_check_property;
+	ippdrv->start = rotator_ippdrv_start;
+	ret = rotator_init_prop_list(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to init property list.\n");
+		goto err_ippdrv_register;
+	}
+
+	DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+	platform_set_drvdata(pdev, rot);
+
+	ret = exynos_drm_ippdrv_register(ippdrv);
+	if (ret < 0) {
+		dev_err(dev, "failed to register drm rotator device\n");
+		goto err_ippdrv_register;
+	}
+
+	dev_info(dev, "The exynos rotator is probed successfully\n");
+
+	return 0;
+
+err_ippdrv_register:
+	pm_runtime_disable(dev);
+	return ret;
+}
+
+static int rotator_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct rot_context *rot = dev_get_drvdata(dev);
+	struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+	exynos_drm_ippdrv_unregister(ippdrv);
+
+	pm_runtime_disable(dev);
+
+	return 0;
+}
+
+static struct rot_limit_table rot_limit_tbl = {
+	.ycbcr420_2p = {
+		.min_w = 32,
+		.min_h = 32,
+		.max_w = SZ_32K,
+		.max_h = SZ_32K,
+		.align = 3,
+	},
+	.rgb888 = {
+		.min_w = 8,
+		.min_h = 8,
+		.max_w = SZ_8K,
+		.max_h = SZ_8K,
+		.align = 2,
+	},
+};
+
+static struct platform_device_id rotator_driver_ids[] = {
+	{
+		.name		= "exynos-rot",
+		.driver_data	= (unsigned long)&rot_limit_tbl,
+	},
+	{},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (enable) {
+		clk_enable(rot->clock);
+		rot->suspended = false;
+	} else {
+		clk_disable(rot->clock);
+		rot->suspended = true;
+	}
+
+	return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	if (!pm_runtime_suspended(dev))
+		return rotator_clk_crtl(rot, true);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return  rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+	struct rot_context *rot = dev_get_drvdata(dev);
+
+	DRM_DEBUG_KMS("%s\n", __func__);
+
+	return  rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+	SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+									NULL)
+};
+
+struct platform_driver rotator_driver = {
+	.probe		= rotator_probe,
+	.remove		= rotator_remove,
+	.id_table	= rotator_driver_ids,
+	.driver		= {
+		.name	= "exynos-rot",
+		.owner	= THIS_MODULE,
+		.pm	= &rotator_pm_ops,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 0000000..71a0b4c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	YoungJun Cho <yj44.cho@samsung.com>
+ *	Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef	_EXYNOS_DRM_ROTATOR_H_
+#define	_EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.c
new file mode 100644
index 0000000..24376c1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -0,0 +1,668 @@
+/* exynos_drm_vidi.c
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/exynos_drm.h>
+
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+
+/* vidi has totally three virtual windows. */
+#define WINDOWS_NR		3
+
+#define get_vidi_context(dev)	platform_get_drvdata(to_platform_device(dev))
+
+struct vidi_win_data {
+	unsigned int		offset_x;
+	unsigned int		offset_y;
+	unsigned int		ovl_width;
+	unsigned int		ovl_height;
+	unsigned int		fb_width;
+	unsigned int		fb_height;
+	unsigned int		bpp;
+	dma_addr_t		dma_addr;
+	unsigned int		buf_offsize;
+	unsigned int		line_size;	/* bytes */
+	bool			enabled;
+};
+
+struct vidi_context {
+	struct exynos_drm_subdrv	subdrv;
+	struct drm_crtc			*crtc;
+	struct vidi_win_data		win_data[WINDOWS_NR];
+	struct edid			*raw_edid;
+	unsigned int			clkdiv;
+	unsigned int			default_win;
+	unsigned long			irq_flags;
+	unsigned int			connected;
+	bool				vblank_on;
+	bool				suspended;
+	bool				direct_vblank;
+	struct work_struct		work;
+	struct mutex			lock;
+};
+
+static const char fake_edid_info[] = {
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
+	0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
+	0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
+	0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
+	0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
+	0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
+	0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+	0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+	0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
+	0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
+	0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
+	0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
+	0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
+	0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
+	0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
+	0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+	0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x06
+};
+
+static bool vidi_display_is_connected(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * connection request would come from user side
+	 * to do hotplug through specific ioctl.
+	 */
+	return ctx->connected ? true : false;
+}
+
+static struct edid *vidi_get_edid(struct device *dev,
+			struct drm_connector *connector)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+	struct edid *edid;
+	int edid_len;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * the edid data comes from user side and it would be set
+	 * to ctx->raw_edid through specific ioctl.
+	 */
+	if (!ctx->raw_edid) {
+		DRM_DEBUG_KMS("raw_edid is null.\n");
+		return ERR_PTR(-EFAULT);
+	}
+
+	edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+	edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+	if (!edid) {
+		DRM_DEBUG_KMS("failed to allocate edid\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return edid;
+}
+
+static void *vidi_get_panel(struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return NULL;
+}
+
+static int vidi_check_timing(struct device *dev, void *timing)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return 0;
+}
+
+static int vidi_display_power_on(struct device *dev, int mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO */
+
+	return 0;
+}
+
+static struct exynos_drm_display_ops vidi_display_ops = {
+	.type = EXYNOS_DISPLAY_TYPE_VIDI,
+	.is_connected = vidi_display_is_connected,
+	.get_edid = vidi_get_edid,
+	.get_panel = vidi_get_panel,
+	.check_timing = vidi_check_timing,
+	.power_on = vidi_display_power_on,
+};
+
+static void vidi_dpms(struct device *subdrv_dev, int mode)
+{
+	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
+
+	DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+	mutex_lock(&ctx->lock);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		/* TODO. */
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		/* TODO. */
+		break;
+	default:
+		DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+		break;
+	}
+
+	mutex_unlock(&ctx->lock);
+}
+
+static void vidi_apply(struct device *subdrv_dev)
+{
+	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
+	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
+	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+	struct vidi_win_data *win_data;
+	int i;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	for (i = 0; i < WINDOWS_NR; i++) {
+		win_data = &ctx->win_data[i];
+		if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+			ovl_ops->commit(subdrv_dev, i);
+	}
+
+	if (mgr_ops && mgr_ops->commit)
+		mgr_ops->commit(subdrv_dev);
+}
+
+static void vidi_commit(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return;
+}
+
+static int vidi_enable_vblank(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return -EPERM;
+
+	if (!test_and_set_bit(0, &ctx->irq_flags))
+		ctx->vblank_on = true;
+
+	ctx->direct_vblank = true;
+
+	/*
+	 * in case of page flip request, vidi_finish_pageflip function
+	 * will not be called because direct_vblank is true and then
+	 * that function will be called by overlay_ops->commit callback
+	 */
+	schedule_work(&ctx->work);
+
+	return 0;
+}
+
+static void vidi_disable_vblank(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return;
+
+	if (test_and_clear_bit(0, &ctx->irq_flags))
+		ctx->vblank_on = false;
+}
+
+static struct exynos_drm_manager_ops vidi_manager_ops = {
+	.dpms = vidi_dpms,
+	.apply = vidi_apply,
+	.commit = vidi_commit,
+	.enable_vblank = vidi_enable_vblank,
+	.disable_vblank = vidi_disable_vblank,
+};
+
+static void vidi_win_mode_set(struct device *dev,
+			      struct exynos_drm_overlay *overlay)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+	struct vidi_win_data *win_data;
+	int win;
+	unsigned long offset;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!overlay) {
+		dev_err(dev, "overlay is NULL\n");
+		return;
+	}
+
+	win = overlay->zpos;
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	offset = overlay->fb_x * (overlay->bpp >> 3);
+	offset += overlay->fb_y * overlay->pitch;
+
+	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+	win_data = &ctx->win_data[win];
+
+	win_data->offset_x = overlay->crtc_x;
+	win_data->offset_y = overlay->crtc_y;
+	win_data->ovl_width = overlay->crtc_width;
+	win_data->ovl_height = overlay->crtc_height;
+	win_data->fb_width = overlay->fb_width;
+	win_data->fb_height = overlay->fb_height;
+	win_data->dma_addr = overlay->dma_addr[0] + offset;
+	win_data->bpp = overlay->bpp;
+	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+				(overlay->bpp >> 3);
+	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+	/*
+	 * some parts of win_data should be transferred to user side
+	 * through specific ioctl.
+	 */
+
+	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+			win_data->offset_x, win_data->offset_y);
+	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+			win_data->ovl_width, win_data->ovl_height);
+	DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
+	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+			overlay->fb_width, overlay->crtc_width);
+}
+
+static void vidi_win_commit(struct device *dev, int zpos)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+	struct vidi_win_data *win_data;
+	int win = zpos;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (ctx->suspended)
+		return;
+
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+
+	win_data->enabled = true;
+
+	DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+
+	if (ctx->vblank_on)
+		schedule_work(&ctx->work);
+}
+
+static void vidi_win_disable(struct device *dev, int zpos)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+	struct vidi_win_data *win_data;
+	int win = zpos;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win == DEFAULT_ZPOS)
+		win = ctx->default_win;
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+	win_data->enabled = false;
+
+	/* TODO. */
+}
+
+static struct exynos_drm_overlay_ops vidi_overlay_ops = {
+	.mode_set = vidi_win_mode_set,
+	.commit = vidi_win_commit,
+	.disable = vidi_win_disable,
+};
+
+static struct exynos_drm_manager vidi_manager = {
+	.pipe		= -1,
+	.ops		= &vidi_manager_ops,
+	.overlay_ops	= &vidi_overlay_ops,
+	.display_ops	= &vidi_display_ops,
+};
+
+static void vidi_fake_vblank_handler(struct work_struct *work)
+{
+	struct vidi_context *ctx = container_of(work, struct vidi_context,
+					work);
+	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+	struct exynos_drm_manager *manager = subdrv->manager;
+
+	if (manager->pipe < 0)
+		return;
+
+	/* refresh rate is about 50Hz. */
+	usleep_range(16000, 20000);
+
+	mutex_lock(&ctx->lock);
+
+	if (ctx->direct_vblank) {
+		drm_handle_vblank(subdrv->drm_dev, manager->pipe);
+		ctx->direct_vblank = false;
+		mutex_unlock(&ctx->lock);
+		return;
+	}
+
+	mutex_unlock(&ctx->lock);
+
+	exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
+}
+
+static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * enable drm irq mode.
+	 * - with irq_enabled = 1, we can use the vblank feature.
+	 *
+	 * P.S. note that we wouldn't use drm irq handler but
+	 *	just specific driver own one instead because
+	 *	drm framework supports only one irq handler.
+	 */
+	drm_dev->irq_enabled = 1;
+
+	/*
+	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * by drm timer once a current process gives up ownership of
+	 * vblank event.(after drm_vblank_put function is called)
+	 */
+	drm_dev->vblank_disable_allowed = 1;
+
+	return 0;
+}
+
+static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+}
+
+static int vidi_power_on(struct vidi_context *ctx, bool enable)
+{
+	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+	struct device *dev = subdrv->dev;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (enable != false && enable != true)
+		return -EINVAL;
+
+	if (enable) {
+		ctx->suspended = false;
+
+		/* if vblank was enabled status, enable it again. */
+		if (test_and_clear_bit(0, &ctx->irq_flags))
+			vidi_enable_vblank(dev);
+
+		vidi_apply(dev);
+	} else {
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
+static int vidi_show_connection(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int rc;
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	mutex_lock(&ctx->lock);
+
+	rc = sprintf(buf, "%d\n", ctx->connected);
+
+	mutex_unlock(&ctx->lock);
+
+	return rc;
+}
+
+static int vidi_store_connection(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ret = kstrtoint(buf, 0, &ctx->connected);
+	if (ret)
+		return ret;
+
+	if (ctx->connected > 1)
+		return -EINVAL;
+
+	/* use fake edid data for test. */
+	if (!ctx->raw_edid)
+		ctx->raw_edid = (struct edid *)fake_edid_info;
+
+	/* if raw_edid isn't same as fake data then it can't be tested. */
+	if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+		DRM_DEBUG_KMS("edid data is not fake data.\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_KMS("requested connection.\n");
+
+	drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+
+	return len;
+}
+
+static DEVICE_ATTR(connection, 0644, vidi_show_connection,
+			vidi_store_connection);
+
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vidi_context *ctx = NULL;
+	struct drm_encoder *encoder;
+	struct exynos_drm_manager *manager;
+	struct exynos_drm_display_ops *display_ops;
+	struct drm_exynos_vidi_connection *vidi = data;
+	int edid_len;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!vidi) {
+		DRM_DEBUG_KMS("user data for vidi is null.\n");
+		return -EINVAL;
+	}
+
+	if (vidi->connection > 1) {
+		DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
+								head) {
+		manager = exynos_drm_get_manager(encoder);
+		display_ops = manager->display_ops;
+
+		if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) {
+			ctx = get_vidi_context(manager->dev);
+			break;
+		}
+	}
+
+	if (!ctx) {
+		DRM_DEBUG_KMS("not found virtual device type encoder.\n");
+		return -EINVAL;
+	}
+
+	if (ctx->connected == vidi->connection) {
+		DRM_DEBUG_KMS("same connection request.\n");
+		return -EINVAL;
+	}
+
+	if (vidi->connection) {
+		struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid;
+		if (!drm_edid_is_valid(raw_edid)) {
+			DRM_DEBUG_KMS("edid data is invalid.\n");
+			return -EINVAL;
+		}
+		edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
+		ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+		if (!ctx->raw_edid) {
+			DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+			return -ENOMEM;
+		}
+	} else {
+		/*
+		 * with connection = 0, free raw_edid
+		 * only if raw edid data isn't same as fake data.
+		 */
+		if (ctx->raw_edid && ctx->raw_edid !=
+				(struct edid *)fake_edid_info) {
+			kfree(ctx->raw_edid);
+			ctx->raw_edid = NULL;
+		}
+	}
+
+	ctx->connected = vidi->connection;
+	drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
+
+	return 0;
+}
+
+static int vidi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct vidi_context *ctx;
+	struct exynos_drm_subdrv *subdrv;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->default_win = 0;
+
+	INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
+
+	subdrv = &ctx->subdrv;
+	subdrv->dev = dev;
+	subdrv->manager = &vidi_manager;
+	subdrv->probe = vidi_subdrv_probe;
+	subdrv->remove = vidi_subdrv_remove;
+
+	mutex_init(&ctx->lock);
+
+	platform_set_drvdata(pdev, ctx);
+
+	ret = device_create_file(dev, &dev_attr_connection);
+	if (ret < 0)
+		DRM_INFO("failed to create connection sysfs.\n");
+
+	exynos_drm_subdrv_register(subdrv);
+
+	return 0;
+}
+
+static int vidi_remove(struct platform_device *pdev)
+{
+	struct vidi_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+		kfree(ctx->raw_edid);
+		ctx->raw_edid = NULL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vidi_suspend(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	return vidi_power_on(ctx, false);
+}
+
+static int vidi_resume(struct device *dev)
+{
+	struct vidi_context *ctx = get_vidi_context(dev);
+
+	return vidi_power_on(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops vidi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
+};
+
+struct platform_driver vidi_driver = {
+	.probe		= vidi_probe,
+	.remove		= vidi_remove,
+	.driver		= {
+		.name	= "exynos-drm-vidi",
+		.owner	= THIS_MODULE,
+		.pm	= &vidi_pm_ops,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.h
new file mode 100644
index 0000000..1e5fdaa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -0,0 +1,22 @@
+/* exynos_drm_vidi.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_VIDI_H_
+#define _EXYNOS_DRM_VIDI_H_
+
+#ifdef CONFIG_DRM_EXYNOS_VIDI
+int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
+				struct drm_file *file_priv);
+#else
+#define vidi_connection_ioctl	NULL
+#endif
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.c b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 0000000..fd1426d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,2171 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#include <linux/gpio.h>
+#include <media/s5p_hdmi.h>
+
+#define MAX_WIDTH		1920
+#define MAX_HEIGHT		1080
+#define get_hdmi_context(dev)	platform_get_drvdata(to_platform_device(dev))
+
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION		0x02
+#define HDMI_AVI_LENGTH		0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9	(2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO	8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION	0x01
+#define HDMI_AUI_LENGTH	0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+	/* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+	/* InfoFrame packet type */
+	HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+	/* Vendor-Specific InfoFrame */
+	HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+	/* Auxiliary Video information InfoFrame */
+	HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+	/* Audio information InfoFrame */
+	HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
+enum hdmi_type {
+	HDMI_TYPE13,
+	HDMI_TYPE14,
+};
+
+struct hdmi_resources {
+	struct clk			*hdmi;
+	struct clk			*sclk_hdmi;
+	struct clk			*sclk_pixel;
+	struct clk			*sclk_hdmiphy;
+	struct clk			*hdmiphy;
+	struct regulator_bulk_data	*regul_bulk;
+	int				regul_count;
+};
+
+struct hdmi_tg_regs {
+	u8 cmd[1];
+	u8 h_fsz[2];
+	u8 hact_st[2];
+	u8 hact_sz[2];
+	u8 v_fsz[2];
+	u8 vsync[2];
+	u8 vsync2[2];
+	u8 vact_st[2];
+	u8 vact_sz[2];
+	u8 field_chg[2];
+	u8 vact_st2[2];
+	u8 vact_st3[2];
+	u8 vact_st4[2];
+	u8 vsync_top_hdmi[2];
+	u8 vsync_bot_hdmi[2];
+	u8 field_top_hdmi[2];
+	u8 field_bot_hdmi[2];
+	u8 tg_3d[1];
+};
+
+struct hdmi_v13_core_regs {
+	u8 h_blank[2];
+	u8 v_blank[3];
+	u8 h_v_line[3];
+	u8 vsync_pol[1];
+	u8 int_pro_mode[1];
+	u8 v_blank_f[3];
+	u8 h_sync_gen[3];
+	u8 v_sync_gen1[3];
+	u8 v_sync_gen2[3];
+	u8 v_sync_gen3[3];
+};
+
+struct hdmi_v14_core_regs {
+	u8 h_blank[2];
+	u8 v2_blank[2];
+	u8 v1_blank[2];
+	u8 v_line[2];
+	u8 h_line[2];
+	u8 hsync_pol[1];
+	u8 vsync_pol[1];
+	u8 int_pro_mode[1];
+	u8 v_blank_f0[2];
+	u8 v_blank_f1[2];
+	u8 h_sync_start[2];
+	u8 h_sync_end[2];
+	u8 v_sync_line_bef_2[2];
+	u8 v_sync_line_bef_1[2];
+	u8 v_sync_line_aft_2[2];
+	u8 v_sync_line_aft_1[2];
+	u8 v_sync_line_aft_pxl_2[2];
+	u8 v_sync_line_aft_pxl_1[2];
+	u8 v_blank_f2[2]; /* for 3D mode */
+	u8 v_blank_f3[2]; /* for 3D mode */
+	u8 v_blank_f4[2]; /* for 3D mode */
+	u8 v_blank_f5[2]; /* for 3D mode */
+	u8 v_sync_line_aft_3[2];
+	u8 v_sync_line_aft_4[2];
+	u8 v_sync_line_aft_5[2];
+	u8 v_sync_line_aft_6[2];
+	u8 v_sync_line_aft_pxl_3[2];
+	u8 v_sync_line_aft_pxl_4[2];
+	u8 v_sync_line_aft_pxl_5[2];
+	u8 v_sync_line_aft_pxl_6[2];
+	u8 vact_space_1[2];
+	u8 vact_space_2[2];
+	u8 vact_space_3[2];
+	u8 vact_space_4[2];
+	u8 vact_space_5[2];
+	u8 vact_space_6[2];
+};
+
+struct hdmi_v13_conf {
+	struct hdmi_v13_core_regs core;
+	struct hdmi_tg_regs tg;
+};
+
+struct hdmi_v14_conf {
+	struct hdmi_v14_core_regs core;
+	struct hdmi_tg_regs tg;
+};
+
+struct hdmi_conf_regs {
+	int pixel_clock;
+	int cea_video_id;
+	union {
+		struct hdmi_v13_conf v13_conf;
+		struct hdmi_v14_conf v14_conf;
+	} conf;
+};
+
+struct hdmi_context {
+	struct device			*dev;
+	struct drm_device		*drm_dev;
+	bool				hpd;
+	bool				powered;
+	bool				dvi_mode;
+	struct mutex			hdmi_mutex;
+
+	void __iomem			*regs;
+	void				*parent_ctx;
+	int				irq;
+
+	struct i2c_client		*ddc_port;
+	struct i2c_client		*hdmiphy_port;
+
+	/* current hdmiphy conf regs */
+	struct hdmi_conf_regs		mode_conf;
+
+	struct hdmi_resources		res;
+
+	int				hpd_gpio;
+
+	enum hdmi_type			type;
+};
+
+struct hdmiphy_config {
+	int pixel_clock;
+	u8 conf[32];
+};
+
+/* list of phy config settings */
+static const struct hdmiphy_config hdmiphy_v13_configs[] = {
+	{
+		.pixel_clock = 27000000,
+		.conf = {
+			0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+			0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+			0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 27027000,
+		.conf = {
+			0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+			0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+			0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 74176000,
+		.conf = {
+			0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+			0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+			0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+			0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 74250000,
+		.conf = {
+			0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+			0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+			0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+			0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 148500000,
+		.conf = {
+			0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+			0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+			0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+			0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+		},
+	},
+};
+
+static const struct hdmiphy_config hdmiphy_v14_configs[] = {
+	{
+		.pixel_clock = 25200000,
+		.conf = {
+			0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
+			0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 27000000,
+		.conf = {
+			0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
+			0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+			0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 27027000,
+		.conf = {
+			0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+			0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 36000000,
+		.conf = {
+			0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
+			0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 40000000,
+		.conf = {
+			0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
+			0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 65000000,
+		.conf = {
+			0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
+			0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 74176000,
+		.conf = {
+			0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
+			0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 74250000,
+		.conf = {
+			0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+			0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+			0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+		},
+	},
+	{
+		.pixel_clock = 83500000,
+		.conf = {
+			0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
+			0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 106500000,
+		.conf = {
+			0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
+			0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 108000000,
+		.conf = {
+			0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+			0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 146250000,
+		.conf = {
+			0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
+			0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+			0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
+		},
+	},
+	{
+		.pixel_clock = 148500000,
+		.conf = {
+			0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+			0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+			0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+			0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+		},
+	},
+};
+
+struct hdmi_infoframe {
+	enum HDMI_PACKET_TYPE type;
+	u8 ver;
+	u8 len;
+};
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+	return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+				 u32 reg_id, u8 value)
+{
+	writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+				 u32 reg_id, u32 value, u32 mask)
+{
+	u32 old = readl(hdata->regs + reg_id);
+	value = (value & mask) | (old & ~mask);
+	writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+	readl(hdata->regs + reg_id))
+	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_INTC_FLAG);
+	DUMPREG(HDMI_INTC_CON);
+	DUMPREG(HDMI_HPD_STATUS);
+	DUMPREG(HDMI_V13_PHY_RSTOUT);
+	DUMPREG(HDMI_V13_PHY_VPLL);
+	DUMPREG(HDMI_V13_PHY_CMU);
+	DUMPREG(HDMI_V13_CORE_RSTOUT);
+
+	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_CON_0);
+	DUMPREG(HDMI_CON_1);
+	DUMPREG(HDMI_CON_2);
+	DUMPREG(HDMI_SYS_STATUS);
+	DUMPREG(HDMI_V13_PHY_STATUS);
+	DUMPREG(HDMI_STATUS_EN);
+	DUMPREG(HDMI_HPD);
+	DUMPREG(HDMI_MODE_SEL);
+	DUMPREG(HDMI_V13_HPD_GEN);
+	DUMPREG(HDMI_V13_DC_CONTROL);
+	DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
+
+	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_H_BLANK_0);
+	DUMPREG(HDMI_H_BLANK_1);
+	DUMPREG(HDMI_V13_V_BLANK_0);
+	DUMPREG(HDMI_V13_V_BLANK_1);
+	DUMPREG(HDMI_V13_V_BLANK_2);
+	DUMPREG(HDMI_V13_H_V_LINE_0);
+	DUMPREG(HDMI_V13_H_V_LINE_1);
+	DUMPREG(HDMI_V13_H_V_LINE_2);
+	DUMPREG(HDMI_VSYNC_POL);
+	DUMPREG(HDMI_INT_PRO_MODE);
+	DUMPREG(HDMI_V13_V_BLANK_F_0);
+	DUMPREG(HDMI_V13_V_BLANK_F_1);
+	DUMPREG(HDMI_V13_V_BLANK_F_2);
+	DUMPREG(HDMI_V13_H_SYNC_GEN_0);
+	DUMPREG(HDMI_V13_H_SYNC_GEN_1);
+	DUMPREG(HDMI_V13_H_SYNC_GEN_2);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
+	DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
+
+	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_TG_CMD);
+	DUMPREG(HDMI_TG_H_FSZ_L);
+	DUMPREG(HDMI_TG_H_FSZ_H);
+	DUMPREG(HDMI_TG_HACT_ST_L);
+	DUMPREG(HDMI_TG_HACT_ST_H);
+	DUMPREG(HDMI_TG_HACT_SZ_L);
+	DUMPREG(HDMI_TG_HACT_SZ_H);
+	DUMPREG(HDMI_TG_V_FSZ_L);
+	DUMPREG(HDMI_TG_V_FSZ_H);
+	DUMPREG(HDMI_TG_VSYNC_L);
+	DUMPREG(HDMI_TG_VSYNC_H);
+	DUMPREG(HDMI_TG_VSYNC2_L);
+	DUMPREG(HDMI_TG_VSYNC2_H);
+	DUMPREG(HDMI_TG_VACT_ST_L);
+	DUMPREG(HDMI_TG_VACT_ST_H);
+	DUMPREG(HDMI_TG_VACT_SZ_L);
+	DUMPREG(HDMI_TG_VACT_SZ_H);
+	DUMPREG(HDMI_TG_FIELD_CHG_L);
+	DUMPREG(HDMI_TG_FIELD_CHG_H);
+	DUMPREG(HDMI_TG_VACT_ST2_L);
+	DUMPREG(HDMI_TG_VACT_ST2_H);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+	int i;
+
+#define DUMPREG(reg_id) \
+	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+	readl(hdata->regs + reg_id))
+
+	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_INTC_CON);
+	DUMPREG(HDMI_INTC_FLAG);
+	DUMPREG(HDMI_HPD_STATUS);
+	DUMPREG(HDMI_INTC_CON_1);
+	DUMPREG(HDMI_INTC_FLAG_1);
+	DUMPREG(HDMI_PHY_STATUS_0);
+	DUMPREG(HDMI_PHY_STATUS_PLL);
+	DUMPREG(HDMI_PHY_CON_0);
+	DUMPREG(HDMI_PHY_RSTOUT);
+	DUMPREG(HDMI_PHY_VPLL);
+	DUMPREG(HDMI_PHY_CMU);
+	DUMPREG(HDMI_CORE_RSTOUT);
+
+	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_CON_0);
+	DUMPREG(HDMI_CON_1);
+	DUMPREG(HDMI_CON_2);
+	DUMPREG(HDMI_SYS_STATUS);
+	DUMPREG(HDMI_PHY_STATUS_0);
+	DUMPREG(HDMI_STATUS_EN);
+	DUMPREG(HDMI_HPD);
+	DUMPREG(HDMI_MODE_SEL);
+	DUMPREG(HDMI_ENC_EN);
+	DUMPREG(HDMI_DC_CONTROL);
+	DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_H_BLANK_0);
+	DUMPREG(HDMI_H_BLANK_1);
+	DUMPREG(HDMI_V2_BLANK_0);
+	DUMPREG(HDMI_V2_BLANK_1);
+	DUMPREG(HDMI_V1_BLANK_0);
+	DUMPREG(HDMI_V1_BLANK_1);
+	DUMPREG(HDMI_V_LINE_0);
+	DUMPREG(HDMI_V_LINE_1);
+	DUMPREG(HDMI_H_LINE_0);
+	DUMPREG(HDMI_H_LINE_1);
+	DUMPREG(HDMI_HSYNC_POL);
+
+	DUMPREG(HDMI_VSYNC_POL);
+	DUMPREG(HDMI_INT_PRO_MODE);
+	DUMPREG(HDMI_V_BLANK_F0_0);
+	DUMPREG(HDMI_V_BLANK_F0_1);
+	DUMPREG(HDMI_V_BLANK_F1_0);
+	DUMPREG(HDMI_V_BLANK_F1_1);
+
+	DUMPREG(HDMI_H_SYNC_START_0);
+	DUMPREG(HDMI_H_SYNC_START_1);
+	DUMPREG(HDMI_H_SYNC_END_0);
+	DUMPREG(HDMI_H_SYNC_END_1);
+
+	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
+	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
+	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
+	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
+
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
+
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
+
+	DUMPREG(HDMI_V_BLANK_F2_0);
+	DUMPREG(HDMI_V_BLANK_F2_1);
+	DUMPREG(HDMI_V_BLANK_F3_0);
+	DUMPREG(HDMI_V_BLANK_F3_1);
+	DUMPREG(HDMI_V_BLANK_F4_0);
+	DUMPREG(HDMI_V_BLANK_F4_1);
+	DUMPREG(HDMI_V_BLANK_F5_0);
+	DUMPREG(HDMI_V_BLANK_F5_1);
+
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
+
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
+	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
+
+	DUMPREG(HDMI_VACT_SPACE_1_0);
+	DUMPREG(HDMI_VACT_SPACE_1_1);
+	DUMPREG(HDMI_VACT_SPACE_2_0);
+	DUMPREG(HDMI_VACT_SPACE_2_1);
+	DUMPREG(HDMI_VACT_SPACE_3_0);
+	DUMPREG(HDMI_VACT_SPACE_3_1);
+	DUMPREG(HDMI_VACT_SPACE_4_0);
+	DUMPREG(HDMI_VACT_SPACE_4_1);
+	DUMPREG(HDMI_VACT_SPACE_5_0);
+	DUMPREG(HDMI_VACT_SPACE_5_1);
+	DUMPREG(HDMI_VACT_SPACE_6_0);
+	DUMPREG(HDMI_VACT_SPACE_6_1);
+
+	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_TG_CMD);
+	DUMPREG(HDMI_TG_H_FSZ_L);
+	DUMPREG(HDMI_TG_H_FSZ_H);
+	DUMPREG(HDMI_TG_HACT_ST_L);
+	DUMPREG(HDMI_TG_HACT_ST_H);
+	DUMPREG(HDMI_TG_HACT_SZ_L);
+	DUMPREG(HDMI_TG_HACT_SZ_H);
+	DUMPREG(HDMI_TG_V_FSZ_L);
+	DUMPREG(HDMI_TG_V_FSZ_H);
+	DUMPREG(HDMI_TG_VSYNC_L);
+	DUMPREG(HDMI_TG_VSYNC_H);
+	DUMPREG(HDMI_TG_VSYNC2_L);
+	DUMPREG(HDMI_TG_VSYNC2_H);
+	DUMPREG(HDMI_TG_VACT_ST_L);
+	DUMPREG(HDMI_TG_VACT_ST_H);
+	DUMPREG(HDMI_TG_VACT_SZ_L);
+	DUMPREG(HDMI_TG_VACT_SZ_H);
+	DUMPREG(HDMI_TG_FIELD_CHG_L);
+	DUMPREG(HDMI_TG_FIELD_CHG_H);
+	DUMPREG(HDMI_TG_VACT_ST2_L);
+	DUMPREG(HDMI_TG_VACT_ST2_H);
+	DUMPREG(HDMI_TG_VACT_ST3_L);
+	DUMPREG(HDMI_TG_VACT_ST3_H);
+	DUMPREG(HDMI_TG_VACT_ST4_L);
+	DUMPREG(HDMI_TG_VACT_ST4_H);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+	DUMPREG(HDMI_TG_3D);
+
+	DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
+	DUMPREG(HDMI_AVI_CON);
+	DUMPREG(HDMI_AVI_HEADER0);
+	DUMPREG(HDMI_AVI_HEADER1);
+	DUMPREG(HDMI_AVI_HEADER2);
+	DUMPREG(HDMI_AVI_CHECK_SUM);
+	DUMPREG(HDMI_VSI_CON);
+	DUMPREG(HDMI_VSI_HEADER0);
+	DUMPREG(HDMI_VSI_HEADER1);
+	DUMPREG(HDMI_VSI_HEADER2);
+	for (i = 0; i < 7; ++i)
+		DUMPREG(HDMI_VSI_DATA(i));
+
+#undef DUMPREG
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+	if (hdata->type == HDMI_TYPE13)
+		hdmi_v13_regs_dump(hdata, prefix);
+	else
+		hdmi_v14_regs_dump(hdata, prefix);
+}
+
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+			u32 start, u8 len, u32 hdr_sum)
+{
+	int i;
+
+	/* hdr_sum : header0 + header1 + header2
+	* start : start address of packet byte1
+	* len : packet bytes - 1 */
+	for (i = 0; i < len; ++i)
+		hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+	/* return 2's complement of 8 bit hdr_sum */
+	return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+			struct hdmi_infoframe *infoframe)
+{
+	u32 hdr_sum;
+	u8 chksum;
+	u32 aspect_ratio;
+	u32 mod;
+	u32 vic;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+	if (hdata->dvi_mode) {
+		hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+				HDMI_VSI_CON_DO_NOT_TRANSMIT);
+		hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+				HDMI_AVI_CON_DO_NOT_TRANSMIT);
+		hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+		return;
+	}
+
+	switch (infoframe->type) {
+	case HDMI_PACKET_TYPE_AVI:
+		hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+		hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+		/* Output format zero hardcoded ,RGB YBCR selection */
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+			AVI_ACTIVE_FORMAT_VALID |
+			AVI_UNDERSCANNED_DISPLAY_VALID);
+
+		aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+				AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+		vic = hdata->mode_conf.cea_video_id;
+		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+		chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+					infoframe->len, hdr_sum);
+		DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+		hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+		break;
+	case HDMI_PACKET_TYPE_AUI:
+		hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+		hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+		hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+		chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+					infoframe->len, hdr_sum);
+		DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+		hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+		break;
+	default:
+		break;
+	}
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+	struct hdmi_context *hdata = ctx;
+
+	return hdata->hpd;
+}
+
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
+{
+	struct edid *raw_edid;
+	struct hdmi_context *hdata = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (!hdata->ddc_port)
+		return ERR_PTR(-ENODEV);
+
+	raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+	if (!raw_edid)
+		return ERR_PTR(-ENODEV);
+
+	hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+	DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+		(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+		raw_edid->width_cm, raw_edid->height_cm);
+
+	return raw_edid;
+}
+
+static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
+{
+	const struct hdmiphy_config *confs;
+	int count, i;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (hdata->type == HDMI_TYPE13) {
+		confs = hdmiphy_v13_configs;
+		count = ARRAY_SIZE(hdmiphy_v13_configs);
+	} else if (hdata->type == HDMI_TYPE14) {
+		confs = hdmiphy_v14_configs;
+		count = ARRAY_SIZE(hdmiphy_v14_configs);
+	} else
+		return -EINVAL;
+
+	for (i = 0; i < count; i++)
+		if (confs[i].pixel_clock == pixel_clock)
+			return i;
+
+	DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+	return -EINVAL;
+}
+
+static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
+{
+	struct hdmi_context *hdata = ctx;
+	int ret;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres,
+			timing->yres, timing->refresh,
+			timing->vmode);
+
+	ret = hdmi_find_phy_conf(hdata, timing->pixclock);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static void hdmi_set_acr(u32 freq, u8 *acr)
+{
+	u32 n, cts;
+
+	switch (freq) {
+	case 32000:
+		n = 4096;
+		cts = 27000;
+		break;
+	case 44100:
+		n = 6272;
+		cts = 30000;
+		break;
+	case 88200:
+		n = 12544;
+		cts = 30000;
+		break;
+	case 176400:
+		n = 25088;
+		cts = 30000;
+		break;
+	case 48000:
+		n = 6144;
+		cts = 27000;
+		break;
+	case 96000:
+		n = 12288;
+		cts = 27000;
+		break;
+	case 192000:
+		n = 24576;
+		cts = 27000;
+		break;
+	default:
+		n = 0;
+		cts = 0;
+		break;
+	}
+
+	acr[1] = cts >> 16;
+	acr[2] = cts >> 8 & 0xff;
+	acr[3] = cts & 0xff;
+
+	acr[4] = n >> 16;
+	acr[5] = n >> 8 & 0xff;
+	acr[6] = n & 0xff;
+}
+
+static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
+{
+	hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
+	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
+
+	if (hdata->type == HDMI_TYPE13)
+		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
+	else
+		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
+}
+
+static void hdmi_audio_init(struct hdmi_context *hdata)
+{
+	u32 sample_rate, bits_per_sample, frame_size_code;
+	u32 data_num, bit_ch, sample_frq;
+	u32 val;
+	u8 acr[7];
+
+	sample_rate = 44100;
+	bits_per_sample = 16;
+	frame_size_code = 0;
+
+	switch (bits_per_sample) {
+	case 20:
+		data_num = 2;
+		bit_ch  = 1;
+		break;
+	case 24:
+		data_num = 3;
+		bit_ch  = 1;
+		break;
+	default:
+		data_num = 1;
+		bit_ch  = 0;
+		break;
+	}
+
+	hdmi_set_acr(sample_rate, acr);
+	hdmi_reg_acr(hdata, acr);
+
+	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
+				| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
+				| HDMI_I2S_MUX_ENABLE);
+
+	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN
+			| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
+
+	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
+
+	sample_frq = (sample_rate == 44100) ? 0 :
+			(sample_rate == 48000) ? 2 :
+			(sample_rate == 32000) ? 3 :
+			(sample_rate == 96000) ? 0xa : 0x0;
+
+	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
+	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
+
+	val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01;
+	hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val);
+
+	/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
+	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
+			| HDMI_I2S_SEL_LRCK(6));
+	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
+			| HDMI_I2S_SEL_SDATA2(4));
+	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
+			| HDMI_I2S_SEL_SDATA2(2));
+	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
+
+	/* I2S_CON_1 & 2 */
+	hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE
+			| HDMI_I2S_L_CH_LOW_POL);
+	hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE
+			| HDMI_I2S_SET_BIT_CH(bit_ch)
+			| HDMI_I2S_SET_SDATA_BIT(data_num)
+			| HDMI_I2S_BASIC_FORMAT);
+
+	/* Configure register related to CUV information */
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
+			| HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
+			| HDMI_I2S_COPYRIGHT
+			| HDMI_I2S_LINEAR_PCM
+			| HDMI_I2S_CONSUMER_FORMAT);
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
+			| HDMI_I2S_SET_SMP_FREQ(sample_frq));
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
+			HDMI_I2S_ORG_SMP_FREQ_44_1
+			| HDMI_I2S_WORD_LEN_MAX24_24BITS
+			| HDMI_I2S_WORD_LEN_MAX_24BITS);
+
+	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
+}
+
+static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
+{
+	if (hdata->dvi_mode)
+		return;
+
+	hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
+	hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
+			HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
+}
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+	u32 reg;
+
+	if (hdata->type == HDMI_TYPE13)
+		reg = HDMI_V13_CORE_RSTOUT;
+	else
+		reg = HDMI_CORE_RSTOUT;
+
+	/* resetting HDMI core */
+	hdmi_reg_writemask(hdata, reg,  0, HDMI_CORE_SW_RSTOUT);
+	usleep_range(10000, 12000);
+	hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
+	usleep_range(10000, 12000);
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+	struct hdmi_infoframe infoframe;
+
+	/* disable HPD interrupts from HDMI IP block, use GPIO instead */
+	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+	/* choose HDMI mode */
+	hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+		HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+	/* disable bluescreen */
+	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+
+	if (hdata->dvi_mode) {
+		/* choose DVI mode */
+		hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+				HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
+		hdmi_reg_writeb(hdata, HDMI_CON_2,
+				HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS);
+	}
+
+	if (hdata->type == HDMI_TYPE13) {
+		/* choose bluescreen (fecal) color */
+		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
+		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
+		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56);
+
+		/* enable AVI packet every vsync, fixes purple line problem */
+		hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02);
+		/* force RGB, look to CEA-861-D, table 7 for more detail */
+		hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5);
+		hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+		hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02);
+		hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
+		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
+	} else {
+		infoframe.type = HDMI_PACKET_TYPE_AVI;
+		infoframe.ver = HDMI_AVI_VERSION;
+		infoframe.len = HDMI_AVI_LENGTH;
+		hdmi_reg_infoframe(hdata, &infoframe);
+
+		infoframe.type = HDMI_PACKET_TYPE_AUI;
+		infoframe.ver = HDMI_AUI_VERSION;
+		infoframe.len = HDMI_AUI_LENGTH;
+		hdmi_reg_infoframe(hdata, &infoframe);
+
+		/* enable AVI packet every vsync, fixes purple line problem */
+		hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
+	}
+}
+
+static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
+{
+	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+	const struct hdmi_v13_core_regs *core =
+		&hdata->mode_conf.conf.v13_conf.core;
+	int tries;
+
+	/* setting core registers */
+	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
+	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+	/* Timing generator registers */
+	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+
+	/* waiting for HDMIPHY's PLL to get to steady state */
+	for (tries = 100; tries; --tries) {
+		u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
+		if (val & HDMI_PHY_STATUS_READY)
+			break;
+		usleep_range(1000, 2000);
+	}
+	/* steady state not achieved */
+	if (tries == 0) {
+		DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+		hdmi_regs_dump(hdata, "timing apply");
+	}
+
+	clk_disable(hdata->res.sclk_hdmi);
+	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+	clk_enable(hdata->res.sclk_hdmi);
+
+	/* enable HDMI and timing generator */
+	hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+	if (core->int_pro_mode[0])
+		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+				HDMI_FIELD_EN);
+	else
+		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
+{
+	const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+	const struct hdmi_v14_core_regs *core =
+		&hdata->mode_conf.conf.v14_conf.core;
+	int tries;
+
+	/* setting core registers */
+	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
+	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
+	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
+	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
+	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
+	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
+	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
+	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
+	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
+	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
+	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
+	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
+			core->v_sync_line_bef_2[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
+			core->v_sync_line_bef_2[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
+			core->v_sync_line_bef_1[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
+			core->v_sync_line_bef_1[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
+			core->v_sync_line_aft_2[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
+			core->v_sync_line_aft_2[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
+			core->v_sync_line_aft_1[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
+			core->v_sync_line_aft_1[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
+			core->v_sync_line_aft_pxl_2[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
+			core->v_sync_line_aft_pxl_2[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
+			core->v_sync_line_aft_pxl_1[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
+			core->v_sync_line_aft_pxl_1[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
+			core->v_sync_line_aft_3[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
+			core->v_sync_line_aft_3[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
+			core->v_sync_line_aft_4[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
+			core->v_sync_line_aft_4[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
+			core->v_sync_line_aft_5[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
+			core->v_sync_line_aft_5[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
+			core->v_sync_line_aft_6[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
+			core->v_sync_line_aft_6[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
+			core->v_sync_line_aft_pxl_3[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
+			core->v_sync_line_aft_pxl_3[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
+			core->v_sync_line_aft_pxl_4[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
+			core->v_sync_line_aft_pxl_4[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
+			core->v_sync_line_aft_pxl_5[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
+			core->v_sync_line_aft_pxl_5[1]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
+			core->v_sync_line_aft_pxl_6[0]);
+	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
+			core->v_sync_line_aft_pxl_6[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
+	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
+
+	/* Timing generator registers */
+	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
+	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
+	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
+
+	/* waiting for HDMIPHY's PLL to get to steady state */
+	for (tries = 100; tries; --tries) {
+		u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
+		if (val & HDMI_PHY_STATUS_READY)
+			break;
+		usleep_range(1000, 2000);
+	}
+	/* steady state not achieved */
+	if (tries == 0) {
+		DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+		hdmi_regs_dump(hdata, "timing apply");
+	}
+
+	clk_disable(hdata->res.sclk_hdmi);
+	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+	clk_enable(hdata->res.sclk_hdmi);
+
+	/* enable HDMI and timing generator */
+	hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+	if (core->int_pro_mode[0])
+		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+				HDMI_FIELD_EN);
+	else
+		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata)
+{
+	if (hdata->type == HDMI_TYPE13)
+		hdmi_v13_timing_apply(hdata);
+	else
+		hdmi_v14_timing_apply(hdata);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+	u8 buffer[2];
+	u32 reg;
+
+	clk_disable(hdata->res.sclk_hdmi);
+	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+	clk_enable(hdata->res.sclk_hdmi);
+
+	/* operation mode */
+	buffer[0] = 0x1f;
+	buffer[1] = 0x00;
+
+	if (hdata->hdmiphy_port)
+		i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+	if (hdata->type == HDMI_TYPE13)
+		reg = HDMI_V13_PHY_RSTOUT;
+	else
+		reg = HDMI_PHY_RSTOUT;
+
+	/* reset hdmiphy */
+	hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
+	usleep_range(10000, 12000);
+	hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT);
+	usleep_range(10000, 12000);
+}
+
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (hdata->type == HDMI_TYPE14)
+		hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+			HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (hdata->type == HDMI_TYPE14)
+		hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+			HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+	const u8 *hdmiphy_data;
+	u8 buffer[32];
+	u8 operation[2];
+	u8 read_buffer[32] = {0, };
+	int ret;
+	int i;
+
+	if (!hdata->hdmiphy_port) {
+		DRM_ERROR("hdmiphy is not attached\n");
+		return;
+	}
+
+	/* pixel clock */
+	i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock);
+	if (i < 0) {
+		DRM_ERROR("failed to find hdmiphy conf\n");
+		return;
+	}
+
+	if (hdata->type == HDMI_TYPE13)
+		hdmiphy_data = hdmiphy_v13_configs[i].conf;
+	else
+		hdmiphy_data = hdmiphy_v14_configs[i].conf;
+
+	memcpy(buffer, hdmiphy_data, 32);
+	ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+	if (ret != 32) {
+		DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+		return;
+	}
+
+	usleep_range(10000, 12000);
+
+	/* operation mode */
+	operation[0] = 0x1f;
+	operation[1] = 0x80;
+
+	ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+	if (ret != 2) {
+		DRM_ERROR("failed to enable hdmiphy\n");
+		return;
+	}
+
+	ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+	if (ret < 0) {
+		DRM_ERROR("failed to read hdmiphy config\n");
+		return;
+	}
+
+	for (i = 0; i < ret; i++)
+		DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+			"recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdmiphy_conf_reset(hdata);
+	hdmiphy_conf_apply(hdata);
+
+	mutex_lock(&hdata->hdmi_mutex);
+	hdmi_conf_reset(hdata);
+	hdmi_conf_init(hdata);
+	mutex_unlock(&hdata->hdmi_mutex);
+
+	hdmi_audio_init(hdata);
+
+	/* setting core registers */
+	hdmi_timing_apply(hdata);
+	hdmi_audio_control(hdata, true);
+
+	hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
+{
+	int i;
+	BUG_ON(num_bytes > 4);
+	for (i = 0; i < num_bytes; i++)
+		reg_pair[i] = (value >> (8 * i)) & 0xff;
+}
+
+static void hdmi_v13_mode_set(struct hdmi_context *hdata,
+			struct drm_display_mode *m)
+{
+	struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core;
+	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg;
+	unsigned int val;
+
+	hdata->mode_conf.cea_video_id =
+		drm_match_cea_mode((struct drm_display_mode *)m);
+	hdata->mode_conf.pixel_clock = m->clock * 1000;
+
+	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+	hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal);
+
+	val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	hdmi_set_reg(core->vsync_pol, 1, val);
+
+	val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0;
+	hdmi_set_reg(core->int_pro_mode, 1, val);
+
+	val = (m->hsync_start - m->hdisplay - 2);
+	val |= ((m->hsync_end - m->hdisplay - 2) << 10);
+	val |= ((m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0)<<20;
+	hdmi_set_reg(core->h_sync_gen, 3, val);
+
+	/*
+	 * Quirk requirement for exynos HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		val = ((m->vsync_end - m->vdisplay) / 2);
+		val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
+		hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+		val = m->vtotal / 2;
+		val |= ((m->vtotal - m->vdisplay) / 2) << 11;
+		hdmi_set_reg(core->v_blank, 3, val);
+
+		val = (m->vtotal +
+			((m->vsync_end - m->vsync_start) * 4) + 5) / 2;
+		val |= m->vtotal << 11;
+		hdmi_set_reg(core->v_blank_f, 3, val);
+
+		val = ((m->vtotal / 2) + 7);
+		val |= ((m->vtotal / 2) + 2) << 12;
+		hdmi_set_reg(core->v_sync_gen2, 3, val);
+
+		val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		val |= ((m->htotal / 2) +
+			(m->hsync_start - m->hdisplay)) << 12;
+		hdmi_set_reg(core->v_sync_gen3, 3, val);
+
+		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+
+		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+	} else {
+		/* Progressive Mode */
+
+		val = m->vtotal;
+		val |= (m->vtotal - m->vdisplay) << 11;
+		hdmi_set_reg(core->v_blank, 3, val);
+
+		hdmi_set_reg(core->v_blank_f, 3, 0);
+
+		val = (m->vsync_end - m->vdisplay);
+		val |= ((m->vsync_start - m->vdisplay) << 12);
+		hdmi_set_reg(core->v_sync_gen1, 3, val);
+
+		hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value  */
+		hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value  */
+		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+	}
+
+	/* Timing generator registers */
+	hdmi_set_reg(tg->cmd, 1, 0x0);
+	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+	hdmi_set_reg(tg->vsync, 2, 0x1);
+	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */
+}
+
+static void hdmi_v14_mode_set(struct hdmi_context *hdata,
+			struct drm_display_mode *m)
+{
+	struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg;
+	struct hdmi_v14_core_regs *core =
+		&hdata->mode_conf.conf.v14_conf.core;
+
+	hdata->mode_conf.cea_video_id =
+		drm_match_cea_mode((struct drm_display_mode *)m);
+	hdata->mode_conf.pixel_clock = m->clock * 1000;
+
+	hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
+	hdmi_set_reg(core->v_line, 2, m->vtotal);
+	hdmi_set_reg(core->h_line, 2, m->htotal);
+	hdmi_set_reg(core->hsync_pol, 1,
+			(m->flags & DRM_MODE_FLAG_NHSYNC)  ? 1 : 0);
+	hdmi_set_reg(core->vsync_pol, 1,
+			(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
+	hdmi_set_reg(core->int_pro_mode, 1,
+			(m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+
+	/*
+	 * Quirk requirement for exynos 5 HDMI IP design,
+	 * 2 pixels less than the actual calculation for hsync_start
+	 * and end.
+	 */
+
+	/* Following values & calculations differ for different type of modes */
+	if (m->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* Interlaced Mode */
+		hdmi_set_reg(core->v_sync_line_bef_2, 2,
+			(m->vsync_end - m->vdisplay) / 2);
+		hdmi_set_reg(core->v_sync_line_bef_1, 2,
+			(m->vsync_start - m->vdisplay) / 2);
+		hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
+		hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
+		hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal +
+			((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
+		hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
+		hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
+		hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
+		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
+			(m->htotal / 2) + (m->hsync_start - m->hdisplay));
+		hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
+		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
+		hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
+		hdmi_set_reg(tg->vact_st3, 2, 0x0);
+		hdmi_set_reg(tg->vact_st4, 2, 0x0);
+	} else {
+		/* Progressive Mode */
+		hdmi_set_reg(core->v_sync_line_bef_2, 2,
+			m->vsync_end - m->vdisplay);
+		hdmi_set_reg(core->v_sync_line_bef_1, 2,
+			m->vsync_start - m->vdisplay);
+		hdmi_set_reg(core->v2_blank, 2, m->vtotal);
+		hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
+		hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
+		hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
+		hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
+		hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
+		hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
+		hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
+		hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
+		hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
+		hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
+		hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
+		hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
+	}
+
+	/* Following values & calculations are same irrespective of mode type */
+	hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
+	hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
+	hdmi_set_reg(core->vact_space_1, 2, 0xffff);
+	hdmi_set_reg(core->vact_space_2, 2, 0xffff);
+	hdmi_set_reg(core->vact_space_3, 2, 0xffff);
+	hdmi_set_reg(core->vact_space_4, 2, 0xffff);
+	hdmi_set_reg(core->vact_space_5, 2, 0xffff);
+	hdmi_set_reg(core->vact_space_6, 2, 0xffff);
+	hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
+	hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
+	hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
+	hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
+	hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
+
+	/* Timing generator registers */
+	hdmi_set_reg(tg->cmd, 1, 0x0);
+	hdmi_set_reg(tg->h_fsz, 2, m->htotal);
+	hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
+	hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
+	hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
+	hdmi_set_reg(tg->vsync, 2, 0x1);
+	hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
+	hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
+	hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
+	hdmi_set_reg(tg->tg_3d, 1, 0x0);
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+	struct hdmi_context *hdata = ctx;
+	struct drm_display_mode *m = mode;
+
+	DRM_DEBUG_KMS("[%s]: xres=%d, yres=%d, refresh=%d, intl=%s\n",
+		__func__, m->hdisplay, m->vdisplay,
+		m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
+		"INTERLACED" : "PROGERESSIVE");
+
+	if (hdata->type == HDMI_TYPE13)
+		hdmi_v13_mode_set(hdata, mode);
+	else
+		hdmi_v14_mode_set(hdata, mode);
+}
+
+static void hdmi_get_max_resol(void *ctx, unsigned int *width,
+					unsigned int *height)
+{
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	*width = MAX_WIDTH;
+	*height = MAX_HEIGHT;
+}
+
+static void hdmi_commit(void *ctx)
+{
+	struct hdmi_context *hdata = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&hdata->hdmi_mutex);
+	if (!hdata->powered) {
+		mutex_unlock(&hdata->hdmi_mutex);
+		return;
+	}
+	mutex_unlock(&hdata->hdmi_mutex);
+
+	hdmi_conf_apply(hdata);
+}
+
+static void hdmi_poweron(struct hdmi_context *hdata)
+{
+	struct hdmi_resources *res = &hdata->res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&hdata->hdmi_mutex);
+	if (hdata->powered) {
+		mutex_unlock(&hdata->hdmi_mutex);
+		return;
+	}
+
+	hdata->powered = true;
+
+	mutex_unlock(&hdata->hdmi_mutex);
+
+	regulator_bulk_enable(res->regul_count, res->regul_bulk);
+	clk_enable(res->hdmiphy);
+	clk_enable(res->hdmi);
+	clk_enable(res->sclk_hdmi);
+
+	hdmiphy_poweron(hdata);
+}
+
+static void hdmi_poweroff(struct hdmi_context *hdata)
+{
+	struct hdmi_resources *res = &hdata->res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&hdata->hdmi_mutex);
+	if (!hdata->powered)
+		goto out;
+	mutex_unlock(&hdata->hdmi_mutex);
+
+	/*
+	 * The TV power domain needs any condition of hdmiphy to turn off and
+	 * its reset state seems to meet the condition.
+	 */
+	hdmiphy_conf_reset(hdata);
+	hdmiphy_poweroff(hdata);
+
+	clk_disable(res->sclk_hdmi);
+	clk_disable(res->hdmi);
+	clk_disable(res->hdmiphy);
+	regulator_bulk_disable(res->regul_count, res->regul_bulk);
+
+	mutex_lock(&hdata->hdmi_mutex);
+
+	hdata->powered = false;
+
+out:
+	mutex_unlock(&hdata->hdmi_mutex);
+}
+
+static void hdmi_dpms(void *ctx, int mode)
+{
+	struct hdmi_context *hdata = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (pm_runtime_suspended(hdata->dev))
+			pm_runtime_get_sync(hdata->dev);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (!pm_runtime_suspended(hdata->dev))
+			pm_runtime_put_sync(hdata->dev);
+		break;
+	default:
+		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+		break;
+	}
+}
+
+static struct exynos_hdmi_ops hdmi_ops = {
+	/* display */
+	.is_connected	= hdmi_is_connected,
+	.get_edid	= hdmi_get_edid,
+	.check_timing	= hdmi_check_timing,
+
+	/* manager */
+	.mode_set	= hdmi_mode_set,
+	.get_max_resol	= hdmi_get_max_resol,
+	.commit		= hdmi_commit,
+	.dpms		= hdmi_dpms,
+};
+
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+	struct exynos_drm_hdmi_context *ctx = arg;
+	struct hdmi_context *hdata = ctx->ctx;
+
+	mutex_lock(&hdata->hdmi_mutex);
+	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+	mutex_unlock(&hdata->hdmi_mutex);
+
+	if (ctx->drm_dev)
+		drm_helper_hpd_irq_event(ctx->drm_dev);
+
+	return IRQ_HANDLED;
+}
+
+static int hdmi_resources_init(struct hdmi_context *hdata)
+{
+	struct device *dev = hdata->dev;
+	struct hdmi_resources *res = &hdata->res;
+	static char *supply[] = {
+		"hdmi-en",
+		"vdd",
+		"vdd_osc",
+		"vdd_pll",
+	};
+	int i, ret;
+
+	DRM_DEBUG_KMS("HDMI resource init\n");
+
+	memset(res, 0, sizeof(*res));
+
+	/* get clocks, power */
+	res->hdmi = devm_clk_get(dev, "hdmi");
+	if (IS_ERR(res->hdmi)) {
+		DRM_ERROR("failed to get clock 'hdmi'\n");
+		goto fail;
+	}
+	res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+	if (IS_ERR(res->sclk_hdmi)) {
+		DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+		goto fail;
+	}
+	res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
+	if (IS_ERR(res->sclk_pixel)) {
+		DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+		goto fail;
+	}
+	res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
+	if (IS_ERR(res->sclk_hdmiphy)) {
+		DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+		goto fail;
+	}
+	res->hdmiphy = devm_clk_get(dev, "hdmiphy");
+	if (IS_ERR(res->hdmiphy)) {
+		DRM_ERROR("failed to get clock 'hdmiphy'\n");
+		goto fail;
+	}
+
+	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+	res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
+		sizeof(res->regul_bulk[0]), GFP_KERNEL);
+	if (!res->regul_bulk) {
+		DRM_ERROR("failed to get memory for regulators\n");
+		goto fail;
+	}
+	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+		res->regul_bulk[i].supply = supply[i];
+		res->regul_bulk[i].consumer = NULL;
+	}
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+	if (ret) {
+		DRM_ERROR("failed to get regulators\n");
+		goto fail;
+	}
+	res->regul_count = ARRAY_SIZE(supply);
+
+	return 0;
+fail:
+	DRM_ERROR("HDMI resource init - failed\n");
+	return -ENODEV;
+}
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+	if (ddc)
+		hdmi_ddc = ddc;
+}
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+	if (hdmiphy)
+		hdmi_hdmiphy = hdmiphy;
+}
+
+#ifdef CONFIG_OF
+static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
+					(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct s5p_hdmi_platform_data *pd;
+	enum of_gpio_flags flags;
+	u32 value;
+
+	pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd) {
+		DRM_ERROR("memory allocation for pdata failed\n");
+		goto err_data;
+	}
+
+	if (!of_find_property(np, "hpd-gpio", &value)) {
+		DRM_ERROR("no hpd gpio property found\n");
+		goto err_data;
+	}
+
+	pd->hpd_gpio = of_get_named_gpio_flags(np, "hpd-gpio", 0, &flags);
+
+	return pd;
+
+err_data:
+	return NULL;
+}
+#else
+static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
+					(struct device *dev)
+{
+	return NULL;
+}
+#endif
+
+static struct platform_device_id hdmi_driver_types[] = {
+	{
+		.name		= "s5pv210-hdmi",
+		.driver_data    = HDMI_TYPE13,
+	}, {
+		.name		= "exynos4-hdmi",
+		.driver_data    = HDMI_TYPE13,
+	}, {
+		.name		= "exynos4-hdmi14",
+		.driver_data	= HDMI_TYPE14,
+	}, {
+		.name		= "exynos5-hdmi",
+		.driver_data	= HDMI_TYPE14,
+	}, {
+		/* end node */
+	}
+};
+
+#ifdef CONFIG_OF
+static struct of_device_id hdmi_match_types[] = {
+	{
+		.compatible = "samsung,exynos5-hdmi",
+		.data	= (void	*)HDMI_TYPE14,
+	}, {
+		/* end node */
+	}
+};
+#endif
+
+static int hdmi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+	struct hdmi_context *hdata;
+	struct s5p_hdmi_platform_data *pdata;
+	struct resource *res;
+	int ret;
+
+	DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+	if (dev->of_node) {
+		pdata = drm_hdmi_dt_parse_pdata(dev);
+		if (IS_ERR(pdata)) {
+			DRM_ERROR("failed to parse dt\n");
+			return PTR_ERR(pdata);
+		}
+	} else {
+		pdata = dev->platform_data;
+	}
+
+	if (!pdata) {
+		DRM_ERROR("no platform data specified\n");
+		return -EINVAL;
+	}
+
+	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
+								GFP_KERNEL);
+	if (!drm_hdmi_ctx) {
+		DRM_ERROR("failed to allocate common hdmi context.\n");
+		return -ENOMEM;
+	}
+
+	hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
+								GFP_KERNEL);
+	if (!hdata) {
+		DRM_ERROR("out of memory\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&hdata->hdmi_mutex);
+
+	drm_hdmi_ctx->ctx = (void *)hdata;
+	hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+	platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+	if (dev->of_node) {
+		const struct of_device_id *match;
+		match = of_match_node(of_match_ptr(hdmi_match_types),
+					dev->of_node);
+		if (match == NULL)
+			return -ENODEV;
+		hdata->type = (enum hdmi_type)match->data;
+	} else {
+		hdata->type = (enum hdmi_type)platform_get_device_id
+					(pdev)->driver_data;
+	}
+
+	hdata->hpd_gpio = pdata->hpd_gpio;
+	hdata->dev = dev;
+
+	ret = hdmi_resources_init(hdata);
+
+	if (ret) {
+		DRM_ERROR("hdmi_resources_init failed\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hdata->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hdata->regs))
+		return PTR_ERR(hdata->regs);
+
+	ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
+	if (ret) {
+		DRM_ERROR("failed to request HPD gpio\n");
+		return ret;
+	}
+
+	/* DDC i2c driver */
+	if (i2c_add_driver(&ddc_driver)) {
+		DRM_ERROR("failed to register ddc i2c driver\n");
+		return -ENOENT;
+	}
+
+	hdata->ddc_port = hdmi_ddc;
+
+	/* hdmiphy i2c driver */
+	if (i2c_add_driver(&hdmiphy_driver)) {
+		DRM_ERROR("failed to register hdmiphy i2c driver\n");
+		ret = -ENOENT;
+		goto err_ddc;
+	}
+
+	hdata->hdmiphy_port = hdmi_hdmiphy;
+
+	hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+	if (hdata->irq < 0) {
+		DRM_ERROR("failed to get GPIO irq\n");
+		ret = hdata->irq;
+		goto err_hdmiphy;
+	}
+
+	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+	ret = devm_request_threaded_irq(dev, hdata->irq, NULL,
+			hdmi_irq_thread, IRQF_TRIGGER_RISING |
+			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			"hdmi", drm_hdmi_ctx);
+	if (ret) {
+		DRM_ERROR("failed to register hdmi interrupt\n");
+		goto err_hdmiphy;
+	}
+
+	/* Attach HDMI Driver to common hdmi. */
+	exynos_hdmi_drv_attach(drm_hdmi_ctx);
+
+	/* register specific callbacks to common hdmi. */
+	exynos_hdmi_ops_register(&hdmi_ops);
+
+	pm_runtime_enable(dev);
+
+	return 0;
+
+err_hdmiphy:
+	i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+	i2c_del_driver(&ddc_driver);
+	return ret;
+}
+
+static int hdmi_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	pm_runtime_disable(dev);
+
+	/* hdmiphy i2c driver */
+	i2c_del_driver(&hdmiphy_driver);
+	/* DDC i2c driver */
+	i2c_del_driver(&ddc_driver);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hdmi_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	disable_irq(hdata->irq);
+
+	hdata->hpd = false;
+	if (ctx->drm_dev)
+		drm_helper_hpd_irq_event(ctx->drm_dev);
+
+	if (pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+		return 0;
+	}
+
+	hdmi_poweroff(hdata);
+
+	return 0;
+}
+
+static int hdmi_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
+	enable_irq(hdata->irq);
+
+	if (!pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+		return 0;
+	}
+
+	hdmi_poweron(hdata);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdmi_poweroff(hdata);
+
+	return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+	struct hdmi_context *hdata = ctx->ctx;
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	hdmi_poweron(hdata);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+	SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
+
+struct platform_driver hdmi_driver = {
+	.probe		= hdmi_probe,
+	.remove		= hdmi_remove,
+	.id_table = hdmi_driver_types,
+	.driver		= {
+		.name	= "exynos-hdmi",
+		.owner	= THIS_MODULE,
+		.pm	= &hdmi_pm_ops,
+		.of_match_table = of_match_ptr(hdmi_match_types),
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.h b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644
index 0000000..0ddf395
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -0,0 +1,23 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644
index 0000000..ea49d13
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	hdmi_attach_hdmiphy_client(client);
+
+	dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+		"into i2c adapter successfully\n");
+
+	return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+	dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+		"from i2c adapter successfully\n");
+
+	return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+	{ "s5p_hdmiphy", 0 },
+	{ "exynos5-hdmiphy", 0 },
+	{ },
+};
+
+#ifdef CONFIG_OF
+static struct of_device_id hdmiphy_match_types[] = {
+	{
+		.compatible = "samsung,exynos5-hdmiphy",
+	}, {
+		/* end node */
+	}
+};
+#endif
+
+struct i2c_driver hdmiphy_driver = {
+	.driver = {
+		.name	= "exynos-hdmiphy",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(hdmiphy_match_types),
+	},
+	.id_table = hdmiphy_id,
+	.probe		= hdmiphy_probe,
+	.remove		= hdmiphy_remove,
+	.command		= NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/linux-imx/drivers/gpu/drm/exynos/exynos_mixer.c b/linux-imx/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 0000000..7c197d3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1325 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
+
+#define get_mixer_context(dev)	platform_get_drvdata(to_platform_device(dev))
+
+struct hdmi_win_data {
+	dma_addr_t		dma_addr;
+	dma_addr_t		chroma_dma_addr;
+	uint32_t		pixel_format;
+	unsigned int		bpp;
+	unsigned int		crtc_x;
+	unsigned int		crtc_y;
+	unsigned int		crtc_width;
+	unsigned int		crtc_height;
+	unsigned int		fb_x;
+	unsigned int		fb_y;
+	unsigned int		fb_width;
+	unsigned int		fb_height;
+	unsigned int		src_width;
+	unsigned int		src_height;
+	unsigned int		mode_width;
+	unsigned int		mode_height;
+	unsigned int		scan_flags;
+	bool			enabled;
+	bool			resume;
+};
+
+struct mixer_resources {
+	int			irq;
+	void __iomem		*mixer_regs;
+	void __iomem		*vp_regs;
+	spinlock_t		reg_slock;
+	struct clk		*mixer;
+	struct clk		*vp;
+	struct clk		*sclk_mixer;
+	struct clk		*sclk_hdmi;
+	struct clk		*sclk_dac;
+};
+
+enum mixer_version_id {
+	MXR_VER_0_0_0_16,
+	MXR_VER_16_0_33_0,
+};
+
+struct mixer_context {
+	struct device		*dev;
+	struct drm_device	*drm_dev;
+	int			pipe;
+	bool			interlace;
+	bool			powered;
+	bool			vp_enabled;
+	u32			int_en;
+
+	struct mutex		mixer_mutex;
+	struct mixer_resources	mixer_res;
+	struct hdmi_win_data	win_data[MIXER_WIN_NR];
+	enum mixer_version_id	mxr_ver;
+	void			*parent_ctx;
+	wait_queue_head_t	wait_vsync_queue;
+	atomic_t		wait_vsync_event;
+};
+
+struct mixer_drv_data {
+	enum mixer_version_id	version;
+	bool					is_vp_enabled;
+};
+
+static const u8 filter_y_horiz_tap8[] = {
+	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
+	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
+	0,	2,	4,	5,	6,	6,	6,	6,
+	6,	5,	5,	4,	3,	2,	1,	1,
+	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
+	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
+	127,	126,	125,	121,	114,	107,	99,	89,
+	79,	68,	57,	46,	35,	25,	16,	8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
+	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
+	127,	126,	124,	118,	111,	102,	92,	81,
+	70,	59,	48,	37,	27,	19,	11,	5,
+	0,	5,	11,	19,	27,	37,	48,	59,
+	70,	81,	92,	102,	111,	118,	124,	126,
+	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
+	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
+	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
+	127,	126,	124,	118,	111,	102,	92,	81,
+	70,	59,	48,	37,	27,	19,	11,	5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+	return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+				 u32 val)
+{
+	writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+				 u32 val, u32 mask)
+{
+	u32 old = vp_reg_read(res, reg_id);
+
+	val = (val & mask) | (old & ~mask);
+	writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+	return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+				 u32 val)
+{
+	writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+				 u32 reg_id, u32 val, u32 mask)
+{
+	u32 old = mixer_reg_read(res, reg_id);
+
+	val = (val & mask) | (old & ~mask);
+	writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+		(u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+	DUMPREG(MXR_STATUS);
+	DUMPREG(MXR_CFG);
+	DUMPREG(MXR_INT_EN);
+	DUMPREG(MXR_INT_STATUS);
+
+	DUMPREG(MXR_LAYER_CFG);
+	DUMPREG(MXR_VIDEO_CFG);
+
+	DUMPREG(MXR_GRAPHIC0_CFG);
+	DUMPREG(MXR_GRAPHIC0_BASE);
+	DUMPREG(MXR_GRAPHIC0_SPAN);
+	DUMPREG(MXR_GRAPHIC0_WH);
+	DUMPREG(MXR_GRAPHIC0_SXY);
+	DUMPREG(MXR_GRAPHIC0_DXY);
+
+	DUMPREG(MXR_GRAPHIC1_CFG);
+	DUMPREG(MXR_GRAPHIC1_BASE);
+	DUMPREG(MXR_GRAPHIC1_SPAN);
+	DUMPREG(MXR_GRAPHIC1_WH);
+	DUMPREG(MXR_GRAPHIC1_SXY);
+	DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+		(u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+	DUMPREG(VP_ENABLE);
+	DUMPREG(VP_SRESET);
+	DUMPREG(VP_SHADOW_UPDATE);
+	DUMPREG(VP_FIELD_ID);
+	DUMPREG(VP_MODE);
+	DUMPREG(VP_IMG_SIZE_Y);
+	DUMPREG(VP_IMG_SIZE_C);
+	DUMPREG(VP_PER_RATE_CTRL);
+	DUMPREG(VP_TOP_Y_PTR);
+	DUMPREG(VP_BOT_Y_PTR);
+	DUMPREG(VP_TOP_C_PTR);
+	DUMPREG(VP_BOT_C_PTR);
+	DUMPREG(VP_ENDIAN_MODE);
+	DUMPREG(VP_SRC_H_POSITION);
+	DUMPREG(VP_SRC_V_POSITION);
+	DUMPREG(VP_SRC_WIDTH);
+	DUMPREG(VP_SRC_HEIGHT);
+	DUMPREG(VP_DST_H_POSITION);
+	DUMPREG(VP_DST_V_POSITION);
+	DUMPREG(VP_DST_WIDTH);
+	DUMPREG(VP_DST_HEIGHT);
+	DUMPREG(VP_H_RATIO);
+	DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+		int reg_id, const u8 *data, unsigned int size)
+{
+	/* assure 4-byte align */
+	BUG_ON(size & 3);
+	for (; size; size -= 4, reg_id += 4, data += 4) {
+		u32 val = (data[0] << 24) |  (data[1] << 16) |
+			(data[2] << 8) | data[3];
+		vp_reg_write(res, reg_id, val);
+	}
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+	vp_filter_set(res, VP_POLY8_Y0_LL,
+		filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
+	vp_filter_set(res, VP_POLY4_Y0_LL,
+		filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
+	vp_filter_set(res, VP_POLY4_C0_LL,
+		filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	/* block update on vsync */
+	mixer_reg_writemask(res, MXR_STATUS, enable ?
+			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+	if (ctx->vp_enabled)
+		vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+			VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	u32 val;
+
+	/* choosing between interlace and progressive mode */
+	val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+				MXR_CFG_SCAN_PROGRASSIVE);
+
+	/* choosing between porper HD and SD mode */
+	if (height <= 480)
+		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+	else if (height <= 576)
+		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+	else if (height <= 720)
+		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+	else if (height <= 1080)
+		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+	else
+		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	u32 val;
+
+	if (height == 480) {
+		val = MXR_CFG_RGB601_0_255;
+	} else if (height == 576) {
+		val = MXR_CFG_RGB601_0_255;
+	} else if (height == 720) {
+		val = MXR_CFG_RGB709_16_235;
+		mixer_reg_write(res, MXR_CM_COEFF_Y,
+				(1 << 30) | (94 << 20) | (314 << 10) |
+				(32 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CB,
+				(972 << 20) | (851 << 10) | (225 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CR,
+				(225 << 20) | (820 << 10) | (1004 << 0));
+	} else if (height == 1080) {
+		val = MXR_CFG_RGB709_16_235;
+		mixer_reg_write(res, MXR_CM_COEFF_Y,
+				(1 << 30) | (94 << 20) | (314 << 10) |
+				(32 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CB,
+				(972 << 20) | (851 << 10) | (225 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CR,
+				(225 << 20) | (820 << 10) | (1004 << 0));
+	} else {
+		val = MXR_CFG_RGB709_16_235;
+		mixer_reg_write(res, MXR_CM_COEFF_Y,
+				(1 << 30) | (94 << 20) | (314 << 10) |
+				(32 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CB,
+				(972 << 20) | (851 << 10) | (225 << 0));
+		mixer_reg_write(res, MXR_CM_COEFF_CR,
+				(225 << 20) | (820 << 10) | (1004 << 0));
+	}
+
+	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	u32 val = enable ? ~0 : 0;
+
+	switch (win) {
+	case 0:
+		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+		break;
+	case 1:
+		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+		break;
+	case 2:
+		if (ctx->vp_enabled) {
+			vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+			mixer_reg_writemask(res, MXR_CFG, val,
+				MXR_CFG_VP_ENABLE);
+		}
+		break;
+	}
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+	mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	unsigned long flags;
+	struct hdmi_win_data *win_data;
+	unsigned int x_ratio, y_ratio;
+	unsigned int buf_num;
+	dma_addr_t luma_addr[2], chroma_addr[2];
+	bool tiled_mode = false;
+	bool crcb_mode = false;
+	u32 val;
+
+	win_data = &ctx->win_data[win];
+
+	switch (win_data->pixel_format) {
+	case DRM_FORMAT_NV12MT:
+		tiled_mode = true;
+	case DRM_FORMAT_NV12:
+		crcb_mode = false;
+		buf_num = 2;
+		break;
+	/* TODO: single buffer format NV12, NV21 */
+	default:
+		/* ignore pixel format at disable time */
+		if (!win_data->dma_addr)
+			break;
+
+		DRM_ERROR("pixel format for vp is wrong [%d].\n",
+				win_data->pixel_format);
+		return;
+	}
+
+	/* scaling feature: (src << 16) / dst */
+	x_ratio = (win_data->src_width << 16) / win_data->crtc_width;
+	y_ratio = (win_data->src_height << 16) / win_data->crtc_height;
+
+	if (buf_num == 2) {
+		luma_addr[0] = win_data->dma_addr;
+		chroma_addr[0] = win_data->chroma_dma_addr;
+	} else {
+		luma_addr[0] = win_data->dma_addr;
+		chroma_addr[0] = win_data->dma_addr
+			+ (win_data->fb_width * win_data->fb_height);
+	}
+
+	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+		ctx->interlace = true;
+		if (tiled_mode) {
+			luma_addr[1] = luma_addr[0] + 0x40;
+			chroma_addr[1] = chroma_addr[0] + 0x40;
+		} else {
+			luma_addr[1] = luma_addr[0] + win_data->fb_width;
+			chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
+		}
+	} else {
+		ctx->interlace = false;
+		luma_addr[1] = 0;
+		chroma_addr[1] = 0;
+	}
+
+	spin_lock_irqsave(&res->reg_slock, flags);
+	mixer_vsync_set_update(ctx, false);
+
+	/* interlace or progressive scan mode */
+	val = (ctx->interlace ? ~0 : 0);
+	vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+	/* setup format */
+	val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+	val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+	/* setting size of input image */
+	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
+		VP_IMG_VSIZE(win_data->fb_height));
+	/* chroma height has to reduced by 2 to avoid chroma distorions */
+	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
+		VP_IMG_VSIZE(win_data->fb_height / 2));
+
+	vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
+	vp_reg_write(res, VP_SRC_HEIGHT, win_data->src_height);
+	vp_reg_write(res, VP_SRC_H_POSITION,
+			VP_SRC_H_POSITION_VAL(win_data->fb_x));
+	vp_reg_write(res, VP_SRC_V_POSITION, win_data->fb_y);
+
+	vp_reg_write(res, VP_DST_WIDTH, win_data->crtc_width);
+	vp_reg_write(res, VP_DST_H_POSITION, win_data->crtc_x);
+	if (ctx->interlace) {
+		vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height / 2);
+		vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y / 2);
+	} else {
+		vp_reg_write(res, VP_DST_HEIGHT, win_data->crtc_height);
+		vp_reg_write(res, VP_DST_V_POSITION, win_data->crtc_y);
+	}
+
+	vp_reg_write(res, VP_H_RATIO, x_ratio);
+	vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+	vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+	/* set buffer address to vp */
+	vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+	vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+	mixer_cfg_scan(ctx, win_data->mode_height);
+	mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+	mixer_cfg_layer(ctx, win, true);
+	mixer_run(ctx);
+
+	mixer_vsync_set_update(ctx, true);
+	spin_unlock_irqrestore(&res->reg_slock, flags);
+
+	vp_regs_dump(ctx);
+}
+
+static void mixer_layer_update(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	u32 val;
+
+	val = mixer_reg_read(res, MXR_CFG);
+
+	/* allow one update per vsync only */
+	if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
+		mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	unsigned long flags;
+	struct hdmi_win_data *win_data;
+	unsigned int x_ratio, y_ratio;
+	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+	dma_addr_t dma_addr;
+	unsigned int fmt;
+	u32 val;
+
+	win_data = &ctx->win_data[win];
+
+	#define RGB565 4
+	#define ARGB1555 5
+	#define ARGB4444 6
+	#define ARGB8888 7
+
+	switch (win_data->bpp) {
+	case 16:
+		fmt = ARGB4444;
+		break;
+	case 32:
+		fmt = ARGB8888;
+		break;
+	default:
+		fmt = ARGB8888;
+	}
+
+	/* 2x scaling feature */
+	x_ratio = 0;
+	y_ratio = 0;
+
+	dst_x_offset = win_data->crtc_x;
+	dst_y_offset = win_data->crtc_y;
+
+	/* converting dma address base and source offset */
+	dma_addr = win_data->dma_addr
+		+ (win_data->fb_x * win_data->bpp >> 3)
+		+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+	src_x_offset = 0;
+	src_y_offset = 0;
+
+	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+		ctx->interlace = true;
+	else
+		ctx->interlace = false;
+
+	spin_lock_irqsave(&res->reg_slock, flags);
+	mixer_vsync_set_update(ctx, false);
+
+	/* setup format */
+	mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+	/* setup geometry */
+	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
+
+	val  = MXR_GRP_WH_WIDTH(win_data->crtc_width);
+	val |= MXR_GRP_WH_HEIGHT(win_data->crtc_height);
+	val |= MXR_GRP_WH_H_SCALE(x_ratio);
+	val |= MXR_GRP_WH_V_SCALE(y_ratio);
+	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+	/* setup offsets in source image */
+	val  = MXR_GRP_SXY_SX(src_x_offset);
+	val |= MXR_GRP_SXY_SY(src_y_offset);
+	mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+	/* setup offsets in display image */
+	val  = MXR_GRP_DXY_DX(dst_x_offset);
+	val |= MXR_GRP_DXY_DY(dst_y_offset);
+	mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+	/* set buffer address to mixer */
+	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+	mixer_cfg_scan(ctx, win_data->mode_height);
+	mixer_cfg_rgb_fmt(ctx, win_data->mode_height);
+	mixer_cfg_layer(ctx, win, true);
+
+	/* layer update mandatory for mixer 16.0.33.0 */
+	if (ctx->mxr_ver == MXR_VER_16_0_33_0)
+		mixer_layer_update(ctx);
+
+	mixer_run(ctx);
+
+	mixer_vsync_set_update(ctx, true);
+	spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	int tries = 100;
+
+	vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+	for (tries = 100; tries; --tries) {
+		/* waiting until VP_SRESET_PROCESSING is 0 */
+		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+			break;
+		usleep_range(10000, 12000);
+	}
+	WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+	unsigned long flags;
+	u32 val; /* value stored to register */
+
+	spin_lock_irqsave(&res->reg_slock, flags);
+	mixer_vsync_set_update(ctx, false);
+
+	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+	/* set output in RGB888 mode */
+	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+	/* 16 beat burst in DMA */
+	mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+		MXR_STATUS_BURST_MASK);
+
+	/* setting default layer priority: layer1 > layer0 > video
+	 * because typical usage scenario would be
+	 * layer1 - OSD
+	 * layer0 - framebuffer
+	 * video - video overlay
+	 */
+	val = MXR_LAYER_CFG_GRP1_VAL(3);
+	val |= MXR_LAYER_CFG_GRP0_VAL(2);
+	if (ctx->vp_enabled)
+		val |= MXR_LAYER_CFG_VP_VAL(1);
+	mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+	/* setting background color */
+	mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+	mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+	/* setting graphical layers */
+	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+	val |= MXR_GRP_CFG_WIN_BLEND_EN;
+	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+	/* Don't blend layer 0 onto the mixer background */
+	mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+	/* Blend layer 1 into layer 0 */
+	val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+	val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+	mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+	/* setting video layers */
+	val = MXR_GRP_CFG_ALPHA_VAL(0);
+	mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
+	if (ctx->vp_enabled) {
+		/* configuration of Video Processor Registers */
+		vp_win_reset(ctx);
+		vp_default_filter(res);
+	}
+
+	/* disable all layers */
+	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+	if (ctx->vp_enabled)
+		mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+	mixer_vsync_set_update(ctx, true);
+	spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static int mixer_iommu_on(void *ctx, bool enable)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+	struct mixer_context *mdata = ctx;
+	struct drm_device *drm_dev;
+
+	drm_hdmi_ctx = mdata->parent_ctx;
+	drm_dev = drm_hdmi_ctx->drm_dev;
+
+	if (is_drm_iommu_supported(drm_dev)) {
+		if (enable)
+			return drm_iommu_attach_device(drm_dev, mdata->dev);
+
+		drm_iommu_detach_device(drm_dev, mdata->dev);
+	}
+	return 0;
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+	struct mixer_context *mixer_ctx = ctx;
+	struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mixer_ctx->pipe = pipe;
+
+	/* enable vsync interrupt */
+	mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+			MXR_INT_EN_VSYNC);
+
+	return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+	struct mixer_context *mixer_ctx = ctx;
+	struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	/* disable vsync interrupt */
+	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+			      struct exynos_drm_overlay *overlay)
+{
+	struct mixer_context *mixer_ctx = ctx;
+	struct hdmi_win_data *win_data;
+	int win;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (!overlay) {
+		DRM_ERROR("overlay is NULL\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+				 overlay->fb_width, overlay->fb_height,
+				 overlay->fb_x, overlay->fb_y,
+				 overlay->crtc_width, overlay->crtc_height,
+				 overlay->crtc_x, overlay->crtc_y);
+
+	win = overlay->zpos;
+	if (win == DEFAULT_ZPOS)
+		win = MIXER_DEFAULT_WIN;
+
+	if (win < 0 || win > MIXER_WIN_NR) {
+		DRM_ERROR("mixer window[%d] is wrong\n", win);
+		return;
+	}
+
+	win_data = &mixer_ctx->win_data[win];
+
+	win_data->dma_addr = overlay->dma_addr[0];
+	win_data->chroma_dma_addr = overlay->dma_addr[1];
+	win_data->pixel_format = overlay->pixel_format;
+	win_data->bpp = overlay->bpp;
+
+	win_data->crtc_x = overlay->crtc_x;
+	win_data->crtc_y = overlay->crtc_y;
+	win_data->crtc_width = overlay->crtc_width;
+	win_data->crtc_height = overlay->crtc_height;
+
+	win_data->fb_x = overlay->fb_x;
+	win_data->fb_y = overlay->fb_y;
+	win_data->fb_width = overlay->fb_width;
+	win_data->fb_height = overlay->fb_height;
+	win_data->src_width = overlay->src_width;
+	win_data->src_height = overlay->src_height;
+
+	win_data->mode_width = overlay->mode_width;
+	win_data->mode_height = overlay->mode_height;
+
+	win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int win)
+{
+	struct mixer_context *mixer_ctx = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+	mutex_lock(&mixer_ctx->mixer_mutex);
+	if (!mixer_ctx->powered) {
+		mutex_unlock(&mixer_ctx->mixer_mutex);
+		return;
+	}
+	mutex_unlock(&mixer_ctx->mixer_mutex);
+
+	if (win > 1 && mixer_ctx->vp_enabled)
+		vp_video_buffer(mixer_ctx, win);
+	else
+		mixer_graph_buffer(mixer_ctx, win);
+
+	mixer_ctx->win_data[win].enabled = true;
+}
+
+static void mixer_win_disable(void *ctx, int win)
+{
+	struct mixer_context *mixer_ctx = ctx;
+	struct mixer_resources *res = &mixer_ctx->mixer_res;
+	unsigned long flags;
+
+	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+	mutex_lock(&mixer_ctx->mixer_mutex);
+	if (!mixer_ctx->powered) {
+		mutex_unlock(&mixer_ctx->mixer_mutex);
+		mixer_ctx->win_data[win].resume = false;
+		return;
+	}
+	mutex_unlock(&mixer_ctx->mixer_mutex);
+
+	spin_lock_irqsave(&res->reg_slock, flags);
+	mixer_vsync_set_update(mixer_ctx, false);
+
+	mixer_cfg_layer(mixer_ctx, win, false);
+
+	mixer_vsync_set_update(mixer_ctx, true);
+	spin_unlock_irqrestore(&res->reg_slock, flags);
+
+	mixer_ctx->win_data[win].enabled = false;
+}
+
+static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+{
+	u32 w, h;
+
+	w = timing->xres;
+	h = timing->yres;
+
+	DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n",
+		__func__, timing->xres, timing->yres,
+		timing->refresh, (timing->vmode &
+		FB_VMODE_INTERLACED) ? true : false);
+
+	if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
+		(w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+		(w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+		return 0;
+
+	return -EINVAL;
+}
+static void mixer_wait_for_vblank(void *ctx)
+{
+	struct mixer_context *mixer_ctx = ctx;
+
+	mutex_lock(&mixer_ctx->mixer_mutex);
+	if (!mixer_ctx->powered) {
+		mutex_unlock(&mixer_ctx->mixer_mutex);
+		return;
+	}
+	mutex_unlock(&mixer_ctx->mixer_mutex);
+
+	atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+	/*
+	 * wait for MIXER to signal VSYNC interrupt or return after
+	 * timeout which is set to 50ms (refresh rate of 20).
+	 */
+	if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+				!atomic_read(&mixer_ctx->wait_vsync_event),
+				DRM_HZ/20))
+		DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+	struct hdmi_win_data *win_data;
+	int i;
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->resume = win_data->enabled;
+		mixer_win_disable(ctx, i);
+	}
+	mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+	struct hdmi_win_data *win_data;
+	int i;
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		win_data = &ctx->win_data[i];
+		win_data->enabled = win_data->resume;
+		win_data->resume = false;
+	}
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&ctx->mixer_mutex);
+	if (ctx->powered) {
+		mutex_unlock(&ctx->mixer_mutex);
+		return;
+	}
+	ctx->powered = true;
+	mutex_unlock(&ctx->mixer_mutex);
+
+	clk_enable(res->mixer);
+	if (ctx->vp_enabled) {
+		clk_enable(res->vp);
+		clk_enable(res->sclk_mixer);
+	}
+
+	mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+	mixer_win_reset(ctx);
+
+	mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+	struct mixer_resources *res = &ctx->mixer_res;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mutex_lock(&ctx->mixer_mutex);
+	if (!ctx->powered)
+		goto out;
+	mutex_unlock(&ctx->mixer_mutex);
+
+	mixer_window_suspend(ctx);
+
+	ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+	clk_disable(res->mixer);
+	if (ctx->vp_enabled) {
+		clk_disable(res->vp);
+		clk_disable(res->sclk_mixer);
+	}
+
+	mutex_lock(&ctx->mixer_mutex);
+	ctx->powered = false;
+
+out:
+	mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+	struct mixer_context *mixer_ctx = ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (pm_runtime_suspended(mixer_ctx->dev))
+			pm_runtime_get_sync(mixer_ctx->dev);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (!pm_runtime_suspended(mixer_ctx->dev))
+			pm_runtime_put_sync(mixer_ctx->dev);
+		break;
+	default:
+		DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+		break;
+	}
+}
+
+static struct exynos_mixer_ops mixer_ops = {
+	/* manager */
+	.iommu_on		= mixer_iommu_on,
+	.enable_vblank		= mixer_enable_vblank,
+	.disable_vblank		= mixer_disable_vblank,
+	.wait_for_vblank	= mixer_wait_for_vblank,
+	.dpms			= mixer_dpms,
+
+	/* overlay */
+	.win_mode_set		= mixer_win_mode_set,
+	.win_commit		= mixer_win_commit,
+	.win_disable		= mixer_win_disable,
+
+	/* display */
+	.check_timing		= mixer_check_timing,
+};
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+	struct mixer_resources *res = &ctx->mixer_res;
+	u32 val, base, shadow;
+
+	spin_lock(&res->reg_slock);
+
+	/* read interrupt status for handling and clearing flags for VSYNC */
+	val = mixer_reg_read(res, MXR_INT_STATUS);
+
+	/* handling VSYNC */
+	if (val & MXR_INT_STATUS_VSYNC) {
+		/* interlace scan need to check shadow register */
+		if (ctx->interlace) {
+			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
+			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+			if (base != shadow)
+				goto out;
+
+			base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
+			shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+			if (base != shadow)
+				goto out;
+		}
+
+		drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+		exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
+				ctx->pipe);
+
+		/* set wait vsync event to zero and wake up queue. */
+		if (atomic_read(&ctx->wait_vsync_event)) {
+			atomic_set(&ctx->wait_vsync_event, 0);
+			DRM_WAKEUP(&ctx->wait_vsync_queue);
+		}
+	}
+
+out:
+	/* clear interrupts */
+	if (~val & MXR_INT_EN_VSYNC) {
+		/* vsync interrupt use different bit for read and clear */
+		val &= ~MXR_INT_EN_VSYNC;
+		val |= MXR_INT_CLEAR_VSYNC;
+	}
+	mixer_reg_write(res, MXR_INT_STATUS, val);
+
+	spin_unlock(&res->reg_slock);
+
+	return IRQ_HANDLED;
+}
+
+static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+				struct platform_device *pdev)
+{
+	struct mixer_context *mixer_ctx = ctx->ctx;
+	struct device *dev = &pdev->dev;
+	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+	struct resource *res;
+	int ret;
+
+	spin_lock_init(&mixer_res->reg_slock);
+
+	mixer_res->mixer = devm_clk_get(dev, "mixer");
+	if (IS_ERR(mixer_res->mixer)) {
+		dev_err(dev, "failed to get clock 'mixer'\n");
+		return -ENODEV;
+	}
+
+	mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+	if (IS_ERR(mixer_res->sclk_hdmi)) {
+		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+		return -ENODEV;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(dev, "get memory resource failed.\n");
+		return -ENXIO;
+	}
+
+	mixer_res->mixer_regs = devm_ioremap(dev, res->start,
+							resource_size(res));
+	if (mixer_res->mixer_regs == NULL) {
+		dev_err(dev, "register mapping failed.\n");
+		return -ENXIO;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(dev, "get interrupt resource failed.\n");
+		return -ENXIO;
+	}
+
+	ret = devm_request_irq(dev, res->start, mixer_irq_handler,
+							0, "drm_mixer", ctx);
+	if (ret) {
+		dev_err(dev, "request interrupt failed.\n");
+		return ret;
+	}
+	mixer_res->irq = res->start;
+
+	return 0;
+}
+
+static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
+			     struct platform_device *pdev)
+{
+	struct mixer_context *mixer_ctx = ctx->ctx;
+	struct device *dev = &pdev->dev;
+	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+	struct resource *res;
+
+	mixer_res->vp = devm_clk_get(dev, "vp");
+	if (IS_ERR(mixer_res->vp)) {
+		dev_err(dev, "failed to get clock 'vp'\n");
+		return -ENODEV;
+	}
+	mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+	if (IS_ERR(mixer_res->sclk_mixer)) {
+		dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+		return -ENODEV;
+	}
+	mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
+	if (IS_ERR(mixer_res->sclk_dac)) {
+		dev_err(dev, "failed to get clock 'sclk_dac'\n");
+		return -ENODEV;
+	}
+
+	if (mixer_res->sclk_hdmi)
+		clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res == NULL) {
+		dev_err(dev, "get memory resource failed.\n");
+		return -ENXIO;
+	}
+
+	mixer_res->vp_regs = devm_ioremap(dev, res->start,
+							resource_size(res));
+	if (mixer_res->vp_regs == NULL) {
+		dev_err(dev, "register mapping failed.\n");
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+static struct mixer_drv_data exynos5_mxr_drv_data = {
+	.version = MXR_VER_16_0_33_0,
+	.is_vp_enabled = 0,
+};
+
+static struct mixer_drv_data exynos4_mxr_drv_data = {
+	.version = MXR_VER_0_0_0_16,
+	.is_vp_enabled = 1,
+};
+
+static struct platform_device_id mixer_driver_types[] = {
+	{
+		.name		= "s5p-mixer",
+		.driver_data	= (unsigned long)&exynos4_mxr_drv_data,
+	}, {
+		.name		= "exynos5-mixer",
+		.driver_data	= (unsigned long)&exynos5_mxr_drv_data,
+	}, {
+		/* end node */
+	}
+};
+
+static struct of_device_id mixer_match_types[] = {
+	{
+		.compatible = "samsung,exynos5-mixer",
+		.data	= &exynos5_mxr_drv_data,
+	}, {
+		/* end node */
+	}
+};
+
+static int mixer_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+	struct mixer_context *ctx;
+	struct mixer_drv_data *drv;
+	int ret;
+
+	dev_info(dev, "probe start\n");
+
+	drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
+								GFP_KERNEL);
+	if (!drm_hdmi_ctx) {
+		DRM_ERROR("failed to allocate common hdmi context.\n");
+		return -ENOMEM;
+	}
+
+	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		DRM_ERROR("failed to alloc mixer context.\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&ctx->mixer_mutex);
+
+	if (dev->of_node) {
+		const struct of_device_id *match;
+		match = of_match_node(of_match_ptr(mixer_match_types),
+							  dev->of_node);
+		drv = (struct mixer_drv_data *)match->data;
+	} else {
+		drv = (struct mixer_drv_data *)
+			platform_get_device_id(pdev)->driver_data;
+	}
+
+	ctx->dev = dev;
+	ctx->parent_ctx = (void *)drm_hdmi_ctx;
+	drm_hdmi_ctx->ctx = (void *)ctx;
+	ctx->vp_enabled = drv->is_vp_enabled;
+	ctx->mxr_ver = drv->version;
+	DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+	atomic_set(&ctx->wait_vsync_event, 0);
+
+	platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+	/* acquire resources: regs, irqs, clocks */
+	ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+	if (ret) {
+		DRM_ERROR("mixer_resources_init failed\n");
+		goto fail;
+	}
+
+	if (ctx->vp_enabled) {
+		/* acquire vp resources: regs, irqs, clocks */
+		ret = vp_resources_init(drm_hdmi_ctx, pdev);
+		if (ret) {
+			DRM_ERROR("vp_resources_init failed\n");
+			goto fail;
+		}
+	}
+
+	/* attach mixer driver to common hdmi. */
+	exynos_mixer_drv_attach(drm_hdmi_ctx);
+
+	/* register specific callback point to common hdmi. */
+	exynos_mixer_ops_register(&mixer_ops);
+
+	pm_runtime_enable(dev);
+
+	return 0;
+
+
+fail:
+	dev_info(dev, "probe failed\n");
+	return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "remove successful\n");
+
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mixer_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+		return 0;
+	}
+
+	mixer_poweroff(ctx);
+
+	return 0;
+}
+
+static int mixer_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	if (!pm_runtime_suspended(dev)) {
+		DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+		return 0;
+	}
+
+	mixer_poweron(ctx);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mixer_poweroff(ctx);
+
+	return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+	struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+	mixer_poweron(ctx);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+	SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
+
+struct platform_driver mixer_driver = {
+	.driver = {
+		.name = "exynos-mixer",
+		.owner = THIS_MODULE,
+		.pm = &mixer_pm_ops,
+		.of_match_table = mixer_match_types,
+	},
+	.probe = mixer_probe,
+	.remove = mixer_remove,
+	.id_table	= mixer_driver_types,
+};
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-fimc.h b/linux-imx/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 0000000..3049613
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,668 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT		(0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST		(0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL		(0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2	(0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1		(0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2		(0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3		(0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4		(0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1		(0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2		(0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3		(0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4		(0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1		(0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2		(0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3		(0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4		(0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT		(0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL		(0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO	(0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST		(0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL		(0x58)
+/* Target area */
+#define EXYNOS_CITAREA		(0x5c)
+/* Status */
+#define EXYNOS_CISTATUS		(0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2		(0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT		(0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ		(0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF		(0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0		(0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0		(0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0		(0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y	(0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB	(0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR	(0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE	(0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL		(0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1		(0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1		(0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1		(0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF		(0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF		(0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF		(0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF		(0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF		(0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF		(0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE		(0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE		(0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN		(0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM		(0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT		(0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC		(0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ		(0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5		(0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6		(0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7		(0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8		(0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9		(0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10		(0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11		(0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12		(0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13		(0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14		(0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15		(0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16		(0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17		(0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18		(0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19		(0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20		(0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21		(0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22		(0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23		(0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24		(0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25		(0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26		(0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27		(0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28		(0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29		(0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30		(0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31		(0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32		(0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5		(0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6		(0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7		(0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8		(0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9		(0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10		(0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11		(0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12		(0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13		(0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14		(0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15		(0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16		(0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17		(0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18		(0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19		(0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20		(0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21		(0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22		(0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23		(0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24		(0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25		(0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26		(0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27		(0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28		(0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29		(0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30		(0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31		(0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32		(0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5		(0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6		(0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7		(0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8		(0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9		(0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10		(0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11		(0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12		(0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13		(0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14		(0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15		(0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16		(0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17		(0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18		(0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19		(0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20		(0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21		(0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22		(0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23		(0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24		(0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25		(0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26		(0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27		(0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28		(0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29		(0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30		(0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31		(0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32		(0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP		4
+#define EXYNOS_CIOYSA(__x)		\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOYSA1  + (__x) * 4) : \
+	(EXYNOS_CIOYSA5  + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x)	\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+	(EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x)	\
+	(((__x) < DEF_PP) ?	\
+	 (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+	(EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP		1
+#define EXYNOS_CIIYSA(__x)		\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x)	\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x)	\
+	(((__x) < DEF_IPP) ?	\
+	 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x)		((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x)		((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x)		((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x)		((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x)		((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x)		((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x)		(((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x)		(((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x)		((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x)		((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x)		((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x)		((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x)		((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x)		((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x)		((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x)		((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x)		(((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x)		(((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x)	(((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x)		(((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x)	(((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x)	(((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x)	((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x)			((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x)			((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x)			((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x)			(((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x)		((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x)		((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x)		((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x)		((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x)			((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x)			((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x)		((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x)		((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x)		((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x)		((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x)		((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x)		((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x)		(((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x)		((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT		(1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT		(0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT		(1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR		(0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB		(1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY		(2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY		(3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR	(0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB	(1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN			(1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY			(1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB			(1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK		(0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB			(1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR			(1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK		(0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST			(1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A			(1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B		(0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A		(1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK		(1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL		(0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR	(1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC		(2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC		(3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK		(3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT		(27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK			(1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC			(1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF			(1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN			(1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK			(1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE			(0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL			(1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR			(1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE		(1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE			(0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE			(1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE		(1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG			(1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B		(0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A		(1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK		(1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA	(0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK	(1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK		(1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A		(1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B		(0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK		(1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601			(0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709			(1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK			(1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC			(1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU		(0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI		(1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK		(1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE			(0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE			(1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK		(0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK		(0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE		(1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420		(0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422		(1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE	(2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB		(3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK		(3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT			(14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL		(0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR		(1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR		(2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180			(3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK			(3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE		(1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK		(0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK		(0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT			(1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK			(1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN			(1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR		(0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB		(1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB		(2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR		(3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT		(24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK		(3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE		(0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE		(1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK		(1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE		(1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT			(0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR		(0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB		(1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY		(2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY		(3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK		(3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS		(1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H			(1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V			(1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW		(0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE		(1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW		(0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE		(1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO		(1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE		(0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE			(1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK			(1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART		(1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565		(0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666		(1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888		(2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK		(3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565		(0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666		(1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888		(2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK	(3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL		(0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION		(1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE			(1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK		(0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK		(0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY			(1 << 31)
+#define EXYNOS_CISTATUS_OVFICB			(1 << 30)
+#define EXYNOS_CISTATUS_OVFICR			(1 << 29)
+#define EXYNOS_CISTATUS_VSYNC			(1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART		(1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN			(1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN			(1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC			(1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A			(1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B			(1 << 19)
+#define EXYNOS_CISTATUS_OVRLB			(1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND			(1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND		(1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A			(1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B			(1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN			(1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC		(1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE		(1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN		(0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT		(1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE			(0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE			(1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE		(0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER		(1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS			(0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY		(1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE		(2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE		(3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING		(4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE		(5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK			(7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK		((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE	(1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE	(1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK		(0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK		(0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK			(1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE			(1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL			(0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT			(24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK		(0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR		(0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB		(1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB		(2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR		(3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT		(16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK		(0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE		(0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE		(1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT			(13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL			(0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR		(1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR		(2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180			(3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK			(3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY		(0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB		(1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY		(2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR		(3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM			(0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY			(1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK			(1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420		(0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422		(1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE	(2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB			(3 << 1)
+#define EXYNOS_MSCTRL_ENVID			(1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR		(0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE		(1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16		(2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32		(3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK		(3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64		(0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128		(1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256		(2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512		(3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024	(4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048	(5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096	(6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1		(0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2		(1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4		(2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8		(3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16		(4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32		(5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR		(0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE		(1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16		(2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32		(3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK		(3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64		(0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128		(1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256		(2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512		(3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024	(4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048	(5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096	(6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1		(0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2		(1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4		(2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8		(3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16		(4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32		(5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK		(1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK		(1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK	(0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK	(0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT			(1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK				(0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK			(1 << 1)
+#define EXYNOS_CLKSRC_SCLK				(1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK			(0x0218)
+#define SYSREG_FIMD0WB_DEST_MASK		(0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT		23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-gsc.h b/linux-imx/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 0000000..9ad5927
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE			0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK	(1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR	(0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS	(1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK	(1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE	(1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK	(1 << 7)
+#define GSC_ENABLE_NORM_MODE		(0 << 7)
+#define GSC_ENABLE_IPC_MODE		(1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK	(1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE	(1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE		(1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK	(1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT	(1 << 4)
+#define GSC_ENABLE_QOS_ENABLE		(1 << 3)
+#define GSC_ENABLE_OP_STATUS		(1 << 2)
+#define GSC_ENABLE_SFR_UPDATE		(1 << 1)
+#define GSC_ENABLE_ON			(1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET			0x04
+#define GSC_SW_RESET_SRESET		(1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ				0x08
+#define GSC_IRQ_STATUS_OR_IRQ		(1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE	(1 << 16)
+#define GSC_IRQ_OR_MASK			(1 << 2)
+#define GSC_IRQ_FRMDONE_MASK		(1 << 1)
+#define GSC_IRQ_ENABLE			(1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON			0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK	(1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR	(1 << 20)
+#define GSC_IN_RB_SWAP_MASK		(1 << 19)
+#define GSC_IN_RB_SWAP			(1 << 19)
+#define GSC_IN_ROT_MASK			(7 << 16)
+#define GSC_IN_ROT_270			(7 << 16)
+#define GSC_IN_ROT_90_YFLIP		(6 << 16)
+#define GSC_IN_ROT_90_XFLIP		(5 << 16)
+#define GSC_IN_ROT_90			(4 << 16)
+#define GSC_IN_ROT_180			(3 << 16)
+#define GSC_IN_ROT_YFLIP		(2 << 16)
+#define GSC_IN_ROT_XFLIP		(1 << 16)
+#define GSC_IN_RGB_TYPE_MASK		(3 << 14)
+#define GSC_IN_RGB_HD_WIDE		(3 << 14)
+#define GSC_IN_RGB_HD_NARROW		(2 << 14)
+#define GSC_IN_RGB_SD_WIDE		(1 << 14)
+#define GSC_IN_RGB_SD_NARROW		(0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK	(1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y	(0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C	(1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK	(1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR	(0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB	(1 << 12)
+#define GSC_IN_FORMAT_MASK		(7 << 8)
+#define GSC_IN_XRGB8888			(0 << 8)
+#define GSC_IN_RGB565			(1 << 8)
+#define GSC_IN_YUV420_2P		(2 << 8)
+#define GSC_IN_YUV420_3P		(3 << 8)
+#define GSC_IN_YUV422_1P		(4 << 8)
+#define GSC_IN_YUV422_2P		(5 << 8)
+#define GSC_IN_YUV422_3P		(6 << 8)
+#define GSC_IN_TILE_TYPE_MASK		(1 << 4)
+#define GSC_IN_TILE_C_16x8		(0 << 4)
+#define GSC_IN_TILE_C_16x16		(1 << 4)
+#define GSC_IN_TILE_MODE		(1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK		(3 << 1)
+#define GSC_IN_LOCAL_CAM3		(3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB		(2 << 1)
+#define GSC_IN_LOCAL_CAM1		(1 << 1)
+#define GSC_IN_LOCAL_CAM0		(0 << 1)
+#define GSC_IN_PATH_MASK		(1 << 0)
+#define GSC_IN_PATH_LOCAL		(1 << 0)
+#define GSC_IN_PATH_MEMORY		(0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE			0x14
+#define GSC_SRCIMG_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x)		((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK		(0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x)		((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET		0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK	(0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x)		((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK	(0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x)		((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE		0x1C
+#define GSC_CROPPED_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x)		((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK		(0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x)		((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON			0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK	(0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x)		((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK	(1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR	(1 << 13)
+#define GSC_OUT_RB_SWAP_MASK		(1 << 12)
+#define GSC_OUT_RB_SWAP			(1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK		(3 << 10)
+#define GSC_OUT_RGB_HD_NARROW		(3 << 10)
+#define GSC_OUT_RGB_HD_WIDE		(2 << 10)
+#define GSC_OUT_RGB_SD_NARROW		(1 << 10)
+#define GSC_OUT_RGB_SD_WIDE		(0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK	(1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y	(0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C	(1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK	(1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR	(0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB	(1 << 8)
+#define GSC_OUT_FORMAT_MASK		(7 << 4)
+#define GSC_OUT_XRGB8888		(0 << 4)
+#define GSC_OUT_RGB565			(1 << 4)
+#define GSC_OUT_YUV420_2P		(2 << 4)
+#define GSC_OUT_YUV420_3P		(3 << 4)
+#define GSC_OUT_YUV422_1P		(4 << 4)
+#define GSC_OUT_YUV422_2P		(5 << 4)
+#define GSC_OUT_YUV444			(7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK		(1 << 2)
+#define GSC_OUT_TILE_C_16x8		(0 << 2)
+#define GSC_OUT_TILE_C_16x16		(1 << 2)
+#define GSC_OUT_TILE_MODE		(1 << 1)
+#define GSC_OUT_PATH_MASK		(1 << 0)
+#define GSC_OUT_PATH_LOCAL		(1 << 0)
+#define GSC_OUT_PATH_MEMORY		(0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE			0x24
+#define GSC_SCALED_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x)		((x) << 16)
+#define GSC_SCALED_WIDTH_MASK		(0x1fff << 0)
+#define GSC_SCALED_WIDTH(x)		((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO		0x28
+#define GSC_PRESC_SHFACTOR_MASK		(7 << 28)
+#define GSC_PRESC_SHFACTOR(x)		((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK		(7 << 16)
+#define GSC_PRESC_V_RATIO(x)		((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK		(7 << 0)
+#define GSC_PRESC_H_RATIO(x)		((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO		0x2C
+#define GSC_MAIN_H_RATIO_MASK		(0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x)	((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO		0x30
+#define GSC_MAIN_V_RATIO_MASK		(0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x)	((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE		0x3C
+#define GSC_IN_CHROM_STRIDE_MASK	(0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x)	((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE			0x40
+#define GSC_DSTIMG_HEIGHT_MASK		(0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x)		((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK		(0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x)		((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET		0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK	(0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x)		((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK	(0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x)		((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE		0x48
+#define GSC_OUT_CHROM_STRIDE_MASK	(0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x)	((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK		0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n)		(0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n)	(0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK	0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n)		(0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n)	(0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK	0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n)		(0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n)	(0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX	(0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x)	((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x)	((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK		(0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK	0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n)		(0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK	0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n)		(0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK	0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n)		(0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX		(0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x)	((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x)	((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK		(0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x)	(0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x)	(0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON			0xA78
+#define GSC_BUSCON_INT_TIME_MASK	(1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS	(0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE	(1 << 8)
+#define GSC_BUSCON_AWCACHE(x)		((x) << 4)
+#define GSC_BUSCON_ARCACHE(x)		((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION			0xA7C
+#define GSC_VPOS_F(x)			((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT		0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x)	((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT		0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x)	((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1		(S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x)		(x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x)	(1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x)	(0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x)	(1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2		(S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x)	(1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-hdmi.h b/linux-imx/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 0000000..ef1b3eb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,581 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+/* HDMI Version 1.3 & Common */
+#define HDMI_CTRL_BASE(x)		((x) + 0x00000000)
+#define HDMI_CORE_BASE(x)		((x) + 0x00010000)
+#define HDMI_I2S_BASE(x)		((x) + 0x00040000)
+#define HDMI_TG_BASE(x)			((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON			HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG			HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS			HDMI_CTRL_BASE(0x000C)
+#define HDMI_V13_PHY_RSTOUT		HDMI_CTRL_BASE(0x0014)
+#define HDMI_V13_PHY_VPLL		HDMI_CTRL_BASE(0x0018)
+#define HDMI_V13_PHY_CMU		HDMI_CTRL_BASE(0x001C)
+#define HDMI_V13_CORE_RSTOUT		HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0			HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1			HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2			HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS			HDMI_CORE_BASE(0x0010)
+#define HDMI_V13_PHY_STATUS		HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN			HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD			HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL			HDMI_CORE_BASE(0x0040)
+#define HDMI_ENC_EN			HDMI_CORE_BASE(0x0044)
+#define HDMI_V13_BLUE_SCREEN_0		HDMI_CORE_BASE(0x0050)
+#define HDMI_V13_BLUE_SCREEN_1		HDMI_CORE_BASE(0x0054)
+#define HDMI_V13_BLUE_SCREEN_2		HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0			HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1			HDMI_CORE_BASE(0x00A4)
+#define HDMI_V13_V_BLANK_0		HDMI_CORE_BASE(0x00B0)
+#define HDMI_V13_V_BLANK_1		HDMI_CORE_BASE(0x00B4)
+#define HDMI_V13_V_BLANK_2		HDMI_CORE_BASE(0x00B8)
+#define HDMI_V13_H_V_LINE_0		HDMI_CORE_BASE(0x00C0)
+#define HDMI_V13_H_V_LINE_1		HDMI_CORE_BASE(0x00C4)
+#define HDMI_V13_H_V_LINE_2		HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL			HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE		HDMI_CORE_BASE(0x00E8)
+#define HDMI_V13_V_BLANK_F_0		HDMI_CORE_BASE(0x0110)
+#define HDMI_V13_V_BLANK_F_1		HDMI_CORE_BASE(0x0114)
+#define HDMI_V13_V_BLANK_F_2		HDMI_CORE_BASE(0x0118)
+#define HDMI_V13_H_SYNC_GEN_0		HDMI_CORE_BASE(0x0120)
+#define HDMI_V13_H_SYNC_GEN_1		HDMI_CORE_BASE(0x0124)
+#define HDMI_V13_H_SYNC_GEN_2		HDMI_CORE_BASE(0x0128)
+#define HDMI_V13_V_SYNC_GEN_1_0		HDMI_CORE_BASE(0x0130)
+#define HDMI_V13_V_SYNC_GEN_1_1		HDMI_CORE_BASE(0x0134)
+#define HDMI_V13_V_SYNC_GEN_1_2		HDMI_CORE_BASE(0x0138)
+#define HDMI_V13_V_SYNC_GEN_2_0		HDMI_CORE_BASE(0x0140)
+#define HDMI_V13_V_SYNC_GEN_2_1		HDMI_CORE_BASE(0x0144)
+#define HDMI_V13_V_SYNC_GEN_2_2		HDMI_CORE_BASE(0x0148)
+#define HDMI_V13_V_SYNC_GEN_3_0		HDMI_CORE_BASE(0x0150)
+#define HDMI_V13_V_SYNC_GEN_3_1		HDMI_CORE_BASE(0x0154)
+#define HDMI_V13_V_SYNC_GEN_3_2		HDMI_CORE_BASE(0x0158)
+#define HDMI_V13_ACR_CON		HDMI_CORE_BASE(0x0180)
+#define HDMI_V13_AVI_CON		HDMI_CORE_BASE(0x0300)
+#define HDMI_V13_AVI_BYTE(n)		HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_V13_DC_CONTROL		HDMI_CORE_BASE(0x05C0)
+#define HDMI_V13_VIDEO_PATTERN_GEN	HDMI_CORE_BASE(0x05C4)
+#define HDMI_V13_HPD_GEN		HDMI_CORE_BASE(0x05C8)
+#define HDMI_V13_AUI_CON		HDMI_CORE_BASE(0x0360)
+#define HDMI_V13_SPD_CON		HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD			HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L			HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H			HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L		HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H		HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L		HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H		HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L			HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H			HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L			HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H			HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L		HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H		HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L		HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H		HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L		HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H		HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L		HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H		HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L		HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H		HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L	HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H	HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L	HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H	HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L	HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H	HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L	HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H	HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL		(1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG		(1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG		(1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG		(1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG	(1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT		(1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT		(1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN		(1 << 5)
+#define HDMI_ASP_EN			(1 << 2)
+#define HDMI_ASP_DIS			(0 << 2)
+#define HDMI_ASP_MASK			(1 << 2)
+#define HDMI_EN				(1 << 0)
+
+/* HDMI_CON_2 */
+#define HDMI_VID_PREAMBLE_DIS		(1 << 5)
+#define HDMI_GUARD_BAND_DIS		(1 << 1)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY		(1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN		(1 << 1)
+#define HDMI_MODE_DVI_EN		(1 << 0)
+#define HDMI_MODE_MASK			(3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN			(1 << 0)
+#define HDMI_FIELD_EN			(1 << 1)
+
+
+/* HDMI Version 1.4 */
+/* Control registers */
+/* #define HDMI_INTC_CON		HDMI_CTRL_BASE(0x0000) */
+/* #define HDMI_INTC_FLAG		HDMI_CTRL_BASE(0x0004) */
+#define HDMI_HDCP_KEY_LOAD		HDMI_CTRL_BASE(0x0008)
+/* #define HDMI_HPD_STATUS		HDMI_CTRL_BASE(0x000C) */
+#define HDMI_INTC_CON_1			HDMI_CTRL_BASE(0x0010)
+#define HDMI_INTC_FLAG_1		HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_STATUS_0		HDMI_CTRL_BASE(0x0020)
+#define HDMI_PHY_STATUS_CMU		HDMI_CTRL_BASE(0x0024)
+#define HDMI_PHY_STATUS_PLL		HDMI_CTRL_BASE(0x0028)
+#define HDMI_PHY_CON_0			HDMI_CTRL_BASE(0x0030)
+#define HDMI_HPD_CTRL			HDMI_CTRL_BASE(0x0040)
+#define HDMI_HPD_ST			HDMI_CTRL_BASE(0x0044)
+#define HDMI_HPD_TH_X			HDMI_CTRL_BASE(0x0050)
+#define HDMI_AUDIO_CLKSEL		HDMI_CTRL_BASE(0x0070)
+#define HDMI_PHY_RSTOUT			HDMI_CTRL_BASE(0x0074)
+#define HDMI_PHY_VPLL			HDMI_CTRL_BASE(0x0078)
+#define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x007C)
+#define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0080)
+
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN		(1 << 0)
+
+/* Video related registers */
+#define HDMI_YMAX			HDMI_CORE_BASE(0x0060)
+#define HDMI_YMIN			HDMI_CORE_BASE(0x0064)
+#define HDMI_CMAX			HDMI_CORE_BASE(0x0068)
+#define HDMI_CMIN			HDMI_CORE_BASE(0x006C)
+
+#define HDMI_V2_BLANK_0			HDMI_CORE_BASE(0x00B0)
+#define HDMI_V2_BLANK_1			HDMI_CORE_BASE(0x00B4)
+#define HDMI_V1_BLANK_0			HDMI_CORE_BASE(0x00B8)
+#define HDMI_V1_BLANK_1			HDMI_CORE_BASE(0x00BC)
+
+#define HDMI_V_LINE_0			HDMI_CORE_BASE(0x00C0)
+#define HDMI_V_LINE_1			HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_LINE_0			HDMI_CORE_BASE(0x00C8)
+#define HDMI_H_LINE_1			HDMI_CORE_BASE(0x00CC)
+
+#define HDMI_HSYNC_POL			HDMI_CORE_BASE(0x00E0)
+
+#define HDMI_V_BLANK_F0_0		HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F0_1		HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F1_0		HDMI_CORE_BASE(0x0118)
+#define HDMI_V_BLANK_F1_1		HDMI_CORE_BASE(0x011C)
+
+#define HDMI_H_SYNC_START_0		HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_START_1		HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_END_0		HDMI_CORE_BASE(0x0128)
+#define HDMI_H_SYNC_END_1		HDMI_CORE_BASE(0x012C)
+
+#define HDMI_V_SYNC_LINE_BEF_2_0	HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_LINE_BEF_2_1	HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_LINE_BEF_1_0	HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_LINE_BEF_1_1	HDMI_CORE_BASE(0x013C)
+
+#define HDMI_V_SYNC_LINE_AFT_2_0	HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_LINE_AFT_2_1	HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_LINE_AFT_1_0	HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_LINE_AFT_1_1	HDMI_CORE_BASE(0x014C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_0	HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_LINE_AFT_PXL_2_1	HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_0	HDMI_CORE_BASE(0x0158)
+#define HDMI_V_SYNC_LINE_AFT_PXL_1_1	HDMI_CORE_BASE(0x015C)
+
+#define HDMI_V_BLANK_F2_0		HDMI_CORE_BASE(0x0160)
+#define HDMI_V_BLANK_F2_1		HDMI_CORE_BASE(0x0164)
+#define HDMI_V_BLANK_F3_0		HDMI_CORE_BASE(0x0168)
+#define HDMI_V_BLANK_F3_1		HDMI_CORE_BASE(0x016C)
+#define HDMI_V_BLANK_F4_0		HDMI_CORE_BASE(0x0170)
+#define HDMI_V_BLANK_F4_1		HDMI_CORE_BASE(0x0174)
+#define HDMI_V_BLANK_F5_0		HDMI_CORE_BASE(0x0178)
+#define HDMI_V_BLANK_F5_1		HDMI_CORE_BASE(0x017C)
+
+#define HDMI_V_SYNC_LINE_AFT_3_0	HDMI_CORE_BASE(0x0180)
+#define HDMI_V_SYNC_LINE_AFT_3_1	HDMI_CORE_BASE(0x0184)
+#define HDMI_V_SYNC_LINE_AFT_4_0	HDMI_CORE_BASE(0x0188)
+#define HDMI_V_SYNC_LINE_AFT_4_1	HDMI_CORE_BASE(0x018C)
+#define HDMI_V_SYNC_LINE_AFT_5_0	HDMI_CORE_BASE(0x0190)
+#define HDMI_V_SYNC_LINE_AFT_5_1	HDMI_CORE_BASE(0x0194)
+#define HDMI_V_SYNC_LINE_AFT_6_0	HDMI_CORE_BASE(0x0198)
+#define HDMI_V_SYNC_LINE_AFT_6_1	HDMI_CORE_BASE(0x019C)
+
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_0	HDMI_CORE_BASE(0x01A0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_3_1	HDMI_CORE_BASE(0x01A4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_0	HDMI_CORE_BASE(0x01A8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_4_1	HDMI_CORE_BASE(0x01AC)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_0	HDMI_CORE_BASE(0x01B0)
+#define HDMI_V_SYNC_LINE_AFT_PXL_5_1	HDMI_CORE_BASE(0x01B4)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_0	HDMI_CORE_BASE(0x01B8)
+#define HDMI_V_SYNC_LINE_AFT_PXL_6_1	HDMI_CORE_BASE(0x01BC)
+
+#define HDMI_VACT_SPACE_1_0		HDMI_CORE_BASE(0x01C0)
+#define HDMI_VACT_SPACE_1_1		HDMI_CORE_BASE(0x01C4)
+#define HDMI_VACT_SPACE_2_0		HDMI_CORE_BASE(0x01C8)
+#define HDMI_VACT_SPACE_2_1		HDMI_CORE_BASE(0x01CC)
+#define HDMI_VACT_SPACE_3_0		HDMI_CORE_BASE(0x01D0)
+#define HDMI_VACT_SPACE_3_1		HDMI_CORE_BASE(0x01D4)
+#define HDMI_VACT_SPACE_4_0		HDMI_CORE_BASE(0x01D8)
+#define HDMI_VACT_SPACE_4_1		HDMI_CORE_BASE(0x01DC)
+#define HDMI_VACT_SPACE_5_0		HDMI_CORE_BASE(0x01E0)
+#define HDMI_VACT_SPACE_5_1		HDMI_CORE_BASE(0x01E4)
+#define HDMI_VACT_SPACE_6_0		HDMI_CORE_BASE(0x01E8)
+#define HDMI_VACT_SPACE_6_1		HDMI_CORE_BASE(0x01EC)
+
+#define HDMI_GCP_CON			HDMI_CORE_BASE(0x0200)
+#define HDMI_GCP_BYTE1			HDMI_CORE_BASE(0x0210)
+#define HDMI_GCP_BYTE2			HDMI_CORE_BASE(0x0214)
+#define HDMI_GCP_BYTE3			HDMI_CORE_BASE(0x0218)
+
+/* Audio related registers */
+#define HDMI_ASP_CON			HDMI_CORE_BASE(0x0300)
+#define HDMI_ASP_SP_FLAT		HDMI_CORE_BASE(0x0304)
+#define HDMI_ASP_CHCFG0			HDMI_CORE_BASE(0x0310)
+#define HDMI_ASP_CHCFG1			HDMI_CORE_BASE(0x0314)
+#define HDMI_ASP_CHCFG2			HDMI_CORE_BASE(0x0318)
+#define HDMI_ASP_CHCFG3			HDMI_CORE_BASE(0x031C)
+
+#define HDMI_ACR_CON			HDMI_CORE_BASE(0x0400)
+#define HDMI_ACR_MCTS0			HDMI_CORE_BASE(0x0410)
+#define HDMI_ACR_MCTS1			HDMI_CORE_BASE(0x0414)
+#define HDMI_ACR_MCTS2			HDMI_CORE_BASE(0x0418)
+#define HDMI_ACR_CTS0			HDMI_CORE_BASE(0x0420)
+#define HDMI_ACR_CTS1			HDMI_CORE_BASE(0x0424)
+#define HDMI_ACR_CTS2			HDMI_CORE_BASE(0x0428)
+#define HDMI_ACR_N0			HDMI_CORE_BASE(0x0430)
+#define HDMI_ACR_N1			HDMI_CORE_BASE(0x0434)
+#define HDMI_ACR_N2			HDMI_CORE_BASE(0x0438)
+
+/* Packet related registers */
+#define HDMI_ACP_CON			HDMI_CORE_BASE(0x0500)
+#define HDMI_ACP_TYPE			HDMI_CORE_BASE(0x0514)
+#define HDMI_ACP_DATA(n)		HDMI_CORE_BASE(0x0520 + 4 * (n))
+
+#define HDMI_ISRC_CON			HDMI_CORE_BASE(0x0600)
+#define HDMI_ISRC1_HEADER1		HDMI_CORE_BASE(0x0614)
+#define HDMI_ISRC1_DATA(n)		HDMI_CORE_BASE(0x0620 + 4 * (n))
+#define HDMI_ISRC2_DATA(n)		HDMI_CORE_BASE(0x06A0 + 4 * (n))
+
+#define HDMI_AVI_CON			HDMI_CORE_BASE(0x0700)
+#define HDMI_AVI_HEADER0		HDMI_CORE_BASE(0x0710)
+#define HDMI_AVI_HEADER1		HDMI_CORE_BASE(0x0714)
+#define HDMI_AVI_HEADER2		HDMI_CORE_BASE(0x0718)
+#define HDMI_AVI_CHECK_SUM		HDMI_CORE_BASE(0x071C)
+#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0720 + 4 * (n-1))
+
+#define HDMI_AUI_CON			HDMI_CORE_BASE(0x0800)
+#define HDMI_AUI_HEADER0		HDMI_CORE_BASE(0x0810)
+#define HDMI_AUI_HEADER1		HDMI_CORE_BASE(0x0814)
+#define HDMI_AUI_HEADER2		HDMI_CORE_BASE(0x0818)
+#define HDMI_AUI_CHECK_SUM		HDMI_CORE_BASE(0x081C)
+#define HDMI_AUI_BYTE(n)		HDMI_CORE_BASE(0x0820 + 4 * (n-1))
+
+#define HDMI_MPG_CON			HDMI_CORE_BASE(0x0900)
+#define HDMI_MPG_CHECK_SUM		HDMI_CORE_BASE(0x091C)
+#define HDMI_MPG_DATA(n)		HDMI_CORE_BASE(0x0920 + 4 * (n))
+
+#define HDMI_SPD_CON			HDMI_CORE_BASE(0x0A00)
+#define HDMI_SPD_HEADER0		HDMI_CORE_BASE(0x0A10)
+#define HDMI_SPD_HEADER1		HDMI_CORE_BASE(0x0A14)
+#define HDMI_SPD_HEADER2		HDMI_CORE_BASE(0x0A18)
+#define HDMI_SPD_DATA(n)		HDMI_CORE_BASE(0x0A20 + 4 * (n))
+
+#define HDMI_GAMUT_CON			HDMI_CORE_BASE(0x0B00)
+#define HDMI_GAMUT_HEADER0		HDMI_CORE_BASE(0x0B10)
+#define HDMI_GAMUT_HEADER1		HDMI_CORE_BASE(0x0B14)
+#define HDMI_GAMUT_HEADER2		HDMI_CORE_BASE(0x0B18)
+#define HDMI_GAMUT_METADATA(n)		HDMI_CORE_BASE(0x0B20 + 4 * (n))
+
+#define HDMI_VSI_CON			HDMI_CORE_BASE(0x0C00)
+#define HDMI_VSI_HEADER0		HDMI_CORE_BASE(0x0C10)
+#define HDMI_VSI_HEADER1		HDMI_CORE_BASE(0x0C14)
+#define HDMI_VSI_HEADER2		HDMI_CORE_BASE(0x0C18)
+#define HDMI_VSI_DATA(n)		HDMI_CORE_BASE(0x0C20 + 4 * (n))
+
+#define HDMI_DC_CONTROL			HDMI_CORE_BASE(0x0D00)
+#define HDMI_VIDEO_PATTERN_GEN		HDMI_CORE_BASE(0x0D04)
+
+#define HDMI_AN_SEED_SEL		HDMI_CORE_BASE(0x0E48)
+#define HDMI_AN_SEED_0			HDMI_CORE_BASE(0x0E58)
+#define HDMI_AN_SEED_1			HDMI_CORE_BASE(0x0E5C)
+#define HDMI_AN_SEED_2			HDMI_CORE_BASE(0x0E60)
+#define HDMI_AN_SEED_3			HDMI_CORE_BASE(0x0E64)
+
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT	(0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC	(1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID	(1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID	(1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN		(0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT	(0 << 0)
+
+/* HDCP related registers */
+#define HDMI_HDCP_SHA1(n)		HDMI_CORE_BASE(0x7000 + 4 * (n))
+#define HDMI_HDCP_KSV_LIST(n)		HDMI_CORE_BASE(0x7050 + 4 * (n))
+
+#define HDMI_HDCP_KSV_LIST_CON		HDMI_CORE_BASE(0x7064)
+#define HDMI_HDCP_SHA_RESULT		HDMI_CORE_BASE(0x7070)
+#define HDMI_HDCP_CTRL1			HDMI_CORE_BASE(0x7080)
+#define HDMI_HDCP_CTRL2			HDMI_CORE_BASE(0x7084)
+#define HDMI_HDCP_CHECK_RESULT		HDMI_CORE_BASE(0x7090)
+#define HDMI_HDCP_BKSV(n)		HDMI_CORE_BASE(0x70A0 + 4 * (n))
+#define HDMI_HDCP_AKSV(n)		HDMI_CORE_BASE(0x70C0 + 4 * (n))
+#define HDMI_HDCP_AN(n)			HDMI_CORE_BASE(0x70E0 + 4 * (n))
+
+#define HDMI_HDCP_BCAPS			HDMI_CORE_BASE(0x7100)
+#define HDMI_HDCP_BSTATUS_0		HDMI_CORE_BASE(0x7110)
+#define HDMI_HDCP_BSTATUS_1		HDMI_CORE_BASE(0x7114)
+#define HDMI_HDCP_RI_0			HDMI_CORE_BASE(0x7140)
+#define HDMI_HDCP_RI_1			HDMI_CORE_BASE(0x7144)
+#define HDMI_HDCP_I2C_INT		HDMI_CORE_BASE(0x7180)
+#define HDMI_HDCP_AN_INT		HDMI_CORE_BASE(0x7190)
+#define HDMI_HDCP_WDT_INT		HDMI_CORE_BASE(0x71A0)
+#define HDMI_HDCP_RI_INT		HDMI_CORE_BASE(0x71B0)
+#define HDMI_HDCP_RI_COMPARE_0		HDMI_CORE_BASE(0x71D0)
+#define HDMI_HDCP_RI_COMPARE_1		HDMI_CORE_BASE(0x71D4)
+#define HDMI_HDCP_FRAME_COUNT		HDMI_CORE_BASE(0x71E0)
+
+#define HDMI_RGB_ROUND_EN		HDMI_CORE_BASE(0xD500)
+#define HDMI_VACT_SPACE_R_0		HDMI_CORE_BASE(0xD504)
+#define HDMI_VACT_SPACE_R_1		HDMI_CORE_BASE(0xD508)
+#define HDMI_VACT_SPACE_G_0		HDMI_CORE_BASE(0xD50C)
+#define HDMI_VACT_SPACE_G_1		HDMI_CORE_BASE(0xD510)
+#define HDMI_VACT_SPACE_B_0		HDMI_CORE_BASE(0xD514)
+#define HDMI_VACT_SPACE_B_1		HDMI_CORE_BASE(0xD518)
+
+#define HDMI_BLUE_SCREEN_B_0		HDMI_CORE_BASE(0xD520)
+#define HDMI_BLUE_SCREEN_B_1		HDMI_CORE_BASE(0xD524)
+#define HDMI_BLUE_SCREEN_G_0		HDMI_CORE_BASE(0xD528)
+#define HDMI_BLUE_SCREEN_G_1		HDMI_CORE_BASE(0xD52C)
+#define HDMI_BLUE_SCREEN_R_0		HDMI_CORE_BASE(0xD530)
+#define HDMI_BLUE_SCREEN_R_1		HDMI_CORE_BASE(0xD534)
+
+/* HDMI I2S register */
+#define HDMI_I2S_CLK_CON		HDMI_I2S_BASE(0x000)
+#define HDMI_I2S_CON_1			HDMI_I2S_BASE(0x004)
+#define HDMI_I2S_CON_2			HDMI_I2S_BASE(0x008)
+#define HDMI_I2S_PIN_SEL_0		HDMI_I2S_BASE(0x00c)
+#define HDMI_I2S_PIN_SEL_1		HDMI_I2S_BASE(0x010)
+#define HDMI_I2S_PIN_SEL_2		HDMI_I2S_BASE(0x014)
+#define HDMI_I2S_PIN_SEL_3		HDMI_I2S_BASE(0x018)
+#define HDMI_I2S_DSD_CON		HDMI_I2S_BASE(0x01c)
+#define HDMI_I2S_MUX_CON		HDMI_I2S_BASE(0x020)
+#define HDMI_I2S_CH_ST_CON		HDMI_I2S_BASE(0x024)
+#define HDMI_I2S_CH_ST_0		HDMI_I2S_BASE(0x028)
+#define HDMI_I2S_CH_ST_1		HDMI_I2S_BASE(0x02c)
+#define HDMI_I2S_CH_ST_2		HDMI_I2S_BASE(0x030)
+#define HDMI_I2S_CH_ST_3		HDMI_I2S_BASE(0x034)
+#define HDMI_I2S_CH_ST_4		HDMI_I2S_BASE(0x038)
+#define HDMI_I2S_CH_ST_SH_0		HDMI_I2S_BASE(0x03c)
+#define HDMI_I2S_CH_ST_SH_1		HDMI_I2S_BASE(0x040)
+#define HDMI_I2S_CH_ST_SH_2		HDMI_I2S_BASE(0x044)
+#define HDMI_I2S_CH_ST_SH_3		HDMI_I2S_BASE(0x048)
+#define HDMI_I2S_CH_ST_SH_4		HDMI_I2S_BASE(0x04c)
+#define HDMI_I2S_MUX_CH			HDMI_I2S_BASE(0x054)
+#define HDMI_I2S_MUX_CUV		HDMI_I2S_BASE(0x058)
+
+/* I2S bit definition */
+
+/* I2S_CLK_CON */
+#define HDMI_I2S_CLK_DIS		(0)
+#define HDMI_I2S_CLK_EN			(1)
+
+/* I2S_CON_1 */
+#define HDMI_I2S_SCLK_FALLING_EDGE	(0 << 1)
+#define HDMI_I2S_SCLK_RISING_EDGE	(1 << 1)
+#define HDMI_I2S_L_CH_LOW_POL		(0)
+#define HDMI_I2S_L_CH_HIGH_POL		(1)
+
+/* I2S_CON_2 */
+#define HDMI_I2S_MSB_FIRST_MODE		(0 << 6)
+#define HDMI_I2S_LSB_FIRST_MODE		(1 << 6)
+#define HDMI_I2S_BIT_CH_32FS		(0 << 4)
+#define HDMI_I2S_BIT_CH_48FS		(1 << 4)
+#define HDMI_I2S_BIT_CH_RESERVED	(2 << 4)
+#define HDMI_I2S_SDATA_16BIT		(1 << 2)
+#define HDMI_I2S_SDATA_20BIT		(2 << 2)
+#define HDMI_I2S_SDATA_24BIT		(3 << 2)
+#define HDMI_I2S_BASIC_FORMAT		(0)
+#define HDMI_I2S_L_JUST_FORMAT		(2)
+#define HDMI_I2S_R_JUST_FORMAT		(3)
+#define HDMI_I2S_CON_2_CLR		(~(0xFF))
+#define HDMI_I2S_SET_BIT_CH(x)		(((x) & 0x7) << 4)
+#define HDMI_I2S_SET_SDATA_BIT(x)	(((x) & 0x7) << 2)
+
+/* I2S_PIN_SEL_0 */
+#define HDMI_I2S_SEL_SCLK(x)		(((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_LRCK(x)		((x) & 0x7)
+
+/* I2S_PIN_SEL_1 */
+#define HDMI_I2S_SEL_SDATA1(x)		(((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7)
+
+/* I2S_PIN_SEL_2 */
+#define HDMI_I2S_SEL_SDATA3(x)		(((x) & 0x7) << 4)
+#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7)
+
+/* I2S_PIN_SEL_3 */
+#define HDMI_I2S_SEL_DSD(x)		((x) & 0x7)
+
+/* I2S_DSD_CON */
+#define HDMI_I2S_DSD_CLK_RI_EDGE	(1 << 1)
+#define HDMI_I2S_DSD_CLK_FA_EDGE	(0 << 1)
+#define HDMI_I2S_DSD_ENABLE		(1)
+#define HDMI_I2S_DSD_DISABLE		(0)
+
+/* I2S_MUX_CON */
+#define HDMI_I2S_NOISE_FILTER_ZERO	(0 << 5)
+#define HDMI_I2S_NOISE_FILTER_2_STAGE	(1 << 5)
+#define HDMI_I2S_NOISE_FILTER_3_STAGE	(2 << 5)
+#define HDMI_I2S_NOISE_FILTER_4_STAGE	(3 << 5)
+#define HDMI_I2S_NOISE_FILTER_5_STAGE	(4 << 5)
+#define HDMI_I2S_IN_DISABLE		(1 << 4)
+#define HDMI_I2S_IN_ENABLE		(0 << 4)
+#define HDMI_I2S_AUD_SPDIF		(0 << 2)
+#define HDMI_I2S_AUD_I2S		(1 << 2)
+#define HDMI_I2S_AUD_DSD		(2 << 2)
+#define HDMI_I2S_CUV_SPDIF_ENABLE	(0 << 1)
+#define HDMI_I2S_CUV_I2S_ENABLE		(1 << 1)
+#define HDMI_I2S_MUX_DISABLE		(0)
+#define HDMI_I2S_MUX_ENABLE		(1)
+#define HDMI_I2S_MUX_CON_CLR		(~(0xFF))
+
+/* I2S_CH_ST_CON */
+#define HDMI_I2S_CH_STATUS_RELOAD	(1)
+#define HDMI_I2S_CH_ST_CON_CLR		(~(1))
+
+/* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */
+#define HDMI_I2S_CH_STATUS_MODE_0	(0 << 6)
+#define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH	(0 << 3)
+#define HDMI_I2S_2AUD_CH_WITH_PREEMPH	(1 << 3)
+#define HDMI_I2S_DEFAULT_EMPHASIS	(0 << 3)
+#define HDMI_I2S_COPYRIGHT		(0 << 2)
+#define HDMI_I2S_NO_COPYRIGHT		(1 << 2)
+#define HDMI_I2S_LINEAR_PCM		(0 << 1)
+#define HDMI_I2S_NO_LINEAR_PCM		(1 << 1)
+#define HDMI_I2S_CONSUMER_FORMAT	(0)
+#define HDMI_I2S_PROF_FORMAT		(1)
+#define HDMI_I2S_CH_ST_0_CLR		(~(0xFF))
+
+/* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */
+#define HDMI_I2S_CD_PLAYER		(0x00)
+#define HDMI_I2S_DAT_PLAYER		(0x03)
+#define HDMI_I2S_DCC_PLAYER		(0x43)
+#define HDMI_I2S_MINI_DISC_PLAYER	(0x49)
+
+/* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */
+#define HDMI_I2S_CHANNEL_NUM_MASK	(0xF << 4)
+#define HDMI_I2S_SOURCE_NUM_MASK	(0xF)
+#define HDMI_I2S_SET_CHANNEL_NUM(x)	(((x) & (0xF)) << 4)
+#define HDMI_I2S_SET_SOURCE_NUM(x)	((x) & (0xF))
+
+/* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */
+#define HDMI_I2S_CLK_ACCUR_LEVEL_1	(1 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_2	(0 << 4)
+#define HDMI_I2S_CLK_ACCUR_LEVEL_3	(2 << 4)
+#define HDMI_I2S_SMP_FREQ_44_1		(0x0)
+#define HDMI_I2S_SMP_FREQ_48		(0x2)
+#define HDMI_I2S_SMP_FREQ_32		(0x3)
+#define HDMI_I2S_SMP_FREQ_96		(0xA)
+#define HDMI_I2S_SET_SMP_FREQ(x)	((x) & (0xF))
+
+/* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */
+#define HDMI_I2S_ORG_SMP_FREQ_44_1	(0xF << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_88_2	(0x7 << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_22_05	(0xB << 4)
+#define HDMI_I2S_ORG_SMP_FREQ_176_4	(0x3 << 4)
+#define HDMI_I2S_WORD_LEN_NOT_DEFINE	(0x0 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_20BITS	(0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_22BITS	(0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_23BITS	(0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_24BITS	(0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX24_21BITS	(0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_16BITS	(0x1 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_18BITS	(0x2 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_19BITS	(0x4 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_20BITS	(0x5 << 1)
+#define HDMI_I2S_WORD_LEN_MAX20_17BITS	(0x6 << 1)
+#define HDMI_I2S_WORD_LEN_MAX_24BITS	(1)
+#define HDMI_I2S_WORD_LEN_MAX_20BITS	(0)
+
+/* I2S_MUX_CH */
+#define HDMI_I2S_CH3_R_EN		(1 << 7)
+#define HDMI_I2S_CH3_L_EN		(1 << 6)
+#define HDMI_I2S_CH3_EN			(3 << 6)
+#define HDMI_I2S_CH2_R_EN		(1 << 5)
+#define HDMI_I2S_CH2_L_EN		(1 << 4)
+#define HDMI_I2S_CH2_EN			(3 << 4)
+#define HDMI_I2S_CH1_R_EN		(1 << 3)
+#define HDMI_I2S_CH1_L_EN		(1 << 2)
+#define HDMI_I2S_CH1_EN			(3 << 2)
+#define HDMI_I2S_CH0_R_EN		(1 << 1)
+#define HDMI_I2S_CH0_L_EN		(1)
+#define HDMI_I2S_CH0_EN			(3)
+#define HDMI_I2S_CH_ALL_EN		(0xFF)
+#define HDMI_I2S_MUX_CH_CLR		(~HDMI_I2S_CH_ALL_EN)
+
+/* I2S_MUX_CUV */
+#define HDMI_I2S_CUV_R_EN		(1 << 1)
+#define HDMI_I2S_CUV_L_EN		(1)
+#define HDMI_I2S_CUV_RL_EN		(0x03)
+
+/* I2S_CUV_L_R */
+#define HDMI_I2S_CUV_R_DATA_MASK	(0x7 << 4)
+#define HDMI_I2S_CUV_L_DATA_MASK	(0x7)
+
+/* Timing generator registers */
+/* TG configure/status registers */
+#define HDMI_TG_VACT_ST3_L		HDMI_TG_BASE(0x0068)
+#define HDMI_TG_VACT_ST3_H		HDMI_TG_BASE(0x006c)
+#define HDMI_TG_VACT_ST4_L		HDMI_TG_BASE(0x0070)
+#define HDMI_TG_VACT_ST4_H		HDMI_TG_BASE(0x0074)
+#define HDMI_TG_3D			HDMI_TG_BASE(0x00F0)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-mixer.h b/linux-imx/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 0000000..5d8dbc0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,144 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS			0x0000
+#define MXR_CFG				0x0004
+#define MXR_INT_EN			0x0008
+#define MXR_INT_STATUS			0x000C
+#define MXR_LAYER_CFG			0x0010
+#define MXR_VIDEO_CFG			0x0014
+#define MXR_GRAPHIC0_CFG		0x0020
+#define MXR_GRAPHIC0_BASE		0x0024
+#define MXR_GRAPHIC0_SPAN		0x0028
+#define MXR_GRAPHIC0_SXY		0x002C
+#define MXR_GRAPHIC0_WH			0x0030
+#define MXR_GRAPHIC0_DXY		0x0034
+#define MXR_GRAPHIC0_BLANK		0x0038
+#define MXR_GRAPHIC1_CFG		0x0040
+#define MXR_GRAPHIC1_BASE		0x0044
+#define MXR_GRAPHIC1_SPAN		0x0048
+#define MXR_GRAPHIC1_SXY		0x004C
+#define MXR_GRAPHIC1_WH			0x0050
+#define MXR_GRAPHIC1_DXY		0x0054
+#define MXR_GRAPHIC1_BLANK		0x0058
+#define MXR_BG_CFG			0x0060
+#define MXR_BG_COLOR0			0x0064
+#define MXR_BG_COLOR1			0x0068
+#define MXR_BG_COLOR2			0x006C
+#define MXR_CM_COEFF_Y			0x0080
+#define MXR_CM_COEFF_CB			0x0084
+#define MXR_CM_COEFF_CR			0x0088
+#define MXR_GRAPHIC0_BASE_S		0x2024
+#define MXR_GRAPHIC1_BASE_S		0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i)		(0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i)		(0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i)		(0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i)		(0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i)		(0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i)		(0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i)		(0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i)		(0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+	(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_SOFT_RESET		(1 << 8)
+#define MXR_STATUS_16_BURST		(1 << 7)
+#define MXR_STATUS_BURST_MASK		(1 << 7)
+#define MXR_STATUS_BIG_ENDIAN		(1 << 3)
+#define MXR_STATUS_ENDIAN_MASK		(1 << 3)
+#define MXR_STATUS_SYNC_ENABLE		(1 << 2)
+#define MXR_STATUS_REG_RUN		(1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_LAYER_UPDATE		(1 << 31)
+#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
+#define MXR_CFG_RGB601_0_255		(0 << 9)
+#define MXR_CFG_RGB601_16_235		(1 << 9)
+#define MXR_CFG_RGB709_0_255		(2 << 9)
+#define MXR_CFG_RGB709_16_235		(3 << 9)
+#define MXR_CFG_RGB_FMT_MASK		0x600
+#define MXR_CFG_OUT_YUV444		(0 << 8)
+#define MXR_CFG_OUT_RGB888		(1 << 8)
+#define MXR_CFG_OUT_MASK		(1 << 8)
+#define MXR_CFG_DST_SDO			(0 << 7)
+#define MXR_CFG_DST_HDMI		(1 << 7)
+#define MXR_CFG_DST_MASK		(1 << 7)
+#define MXR_CFG_SCAN_HD_720		(0 << 6)
+#define MXR_CFG_SCAN_HD_1080		(1 << 6)
+#define MXR_CFG_GRP1_ENABLE		(1 << 5)
+#define MXR_CFG_GRP0_ENABLE		(1 << 4)
+#define MXR_CFG_VP_ENABLE		(1 << 3)
+#define MXR_CFG_SCAN_INTERLACE		(0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE	(1 << 2)
+#define MXR_CFG_SCAN_NTSC		(0 << 1)
+#define MXR_CFG_SCAN_PAL		(1 << 1)
+#define MXR_CFG_SCAN_SD			(0 << 0)
+#define MXR_CFG_SCAN_HD			(1 << 0)
+#define MXR_CFG_SCAN_MASK		0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE	(1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL	(1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN	(1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN	(1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x)	MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK		MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x)	MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x)		MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x)		MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x)		MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x)		MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC		(1 << 11)
+#define MXR_INT_EN_ALL			(0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC		(1 << 11)
+#define MXR_INT_STATUS_VSYNC		(1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x)	MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x)	MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x)		MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-rotator.h b/linux-imx/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 0000000..a09ac6e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG			0x00
+#define ROT_CONFIG_IRQ			(3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL			0x10
+#define ROT_CONTROL_PATTERN_WRITE	(1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P	(1 << 8)
+#define ROT_CONTROL_FMT_RGB888		(6 << 8)
+#define ROT_CONTROL_FMT_MASK		(7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL	(2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL	(3 << 6)
+#define ROT_CONTROL_FLIP_MASK		(3 << 6)
+#define ROT_CONTROL_ROT_90		(1 << 4)
+#define ROT_CONTROL_ROT_180		(2 << 4)
+#define ROT_CONTROL_ROT_270		(3 << 4)
+#define ROT_CONTROL_ROT_MASK		(3 << 4)
+#define ROT_CONTROL_START		(1 << 0)
+
+/* Status */
+#define ROT_STATUS			0x20
+#define ROT_STATUS_IRQ_PENDING(x)	(1 << (x))
+#define ROT_STATUS_IRQ(x)		(((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE	1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL	2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n)		(0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n)		(0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE		0x3c
+#define ROT_DST_BUF_SIZE		0x5c
+#define ROT_SET_BUF_SIZE_H(x)		((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x)		((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x)		((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x)		((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS		0x40
+#define ROT_DST_CROP_POS		0x60
+#define ROT_CROP_POS_Y(x)		((x) << 16)
+#define ROT_CROP_POS_X(x)		((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE		0x44
+#define ROT_SRC_CROP_SIZE_H(x)		((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x)		((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask)	(((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask)		(((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask)		((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
diff --git a/linux-imx/drivers/gpu/drm/exynos/regs-vp.h b/linux-imx/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 0000000..10b737a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
+/*
+ *
+ *  Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE			0x0000
+#define VP_SRESET			0x0004
+#define VP_SHADOW_UPDATE		0x0008
+#define VP_FIELD_ID			0x000C
+#define VP_MODE				0x0010
+#define VP_IMG_SIZE_Y			0x0014
+#define VP_IMG_SIZE_C			0x0018
+#define VP_PER_RATE_CTRL		0x001C
+#define VP_TOP_Y_PTR			0x0028
+#define VP_BOT_Y_PTR			0x002C
+#define VP_TOP_C_PTR			0x0030
+#define VP_BOT_C_PTR			0x0034
+#define VP_ENDIAN_MODE			0x03CC
+#define VP_SRC_H_POSITION		0x0044
+#define VP_SRC_V_POSITION		0x0048
+#define VP_SRC_WIDTH			0x004C
+#define VP_SRC_HEIGHT			0x0050
+#define VP_DST_H_POSITION		0x0054
+#define VP_DST_V_POSITION		0x0058
+#define VP_DST_WIDTH			0x005C
+#define VP_DST_HEIGHT			0x0060
+#define VP_H_RATIO			0x0064
+#define VP_V_RATIO			0x0068
+#define VP_POLY8_Y0_LL			0x006C
+#define VP_POLY4_Y0_LL			0x00EC
+#define VP_POLY4_C0_LL			0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+	(((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON			(1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING		(1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE		(1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12			(0 << 6)
+#define VP_MODE_NV21			(1 << 6)
+#define VP_MODE_LINE_SKIP		(1 << 5)
+#define VP_MODE_MEM_LINEAR		(0 << 4)
+#define VP_MODE_MEM_TILED		(1 << 4)
+#define VP_MODE_FMT_MASK		(5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING	(1 << 2)
+#define VP_MODE_2D_IPC			(1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x)			VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x)			VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x)	VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE		(1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/linux-imx/drivers/gpu/drm/gma500/Kconfig b/linux-imx/drivers/gpu/drm/gma500/Kconfig
new file mode 100644
index 0000000..1f6e2df
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/Kconfig
@@ -0,0 +1,38 @@
+config DRM_GMA500
+	tristate "Intel GMA5/600 KMS Framebuffer"
+	depends on DRM && PCI && X86
+	select FB_CFB_COPYAREA
+	select FB_CFB_FILLRECT
+	select FB_CFB_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
+	select ACPI_VIDEO if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select INPUT if ACPI
+	help
+	  Say yes for an experimental 2D KMS framebuffer driver for the
+	  Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+	  devices.
+
+config DRM_GMA600
+	bool "Intel GMA600 support (Experimental)"
+	depends on DRM_GMA500
+	help
+	  Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+	  platforms with LVDS ports. MIPI is not currently supported.
+
+config DRM_GMA3600
+	bool "Intel GMA3600/3650 support (Experimental)"
+	depends on DRM_GMA500
+	help
+	  Say yes to include basic support for Intel GMA3600/3650 (Intel
+	  Cedar Trail) platforms.
+
+config DRM_MEDFIELD
+	bool "Intel Medfield support (Experimental)"
+	depends on DRM_GMA500 && X86_INTEL_MID
+	help
+	  Say yes to include support for the Intel Medfield platform.
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/Makefile b/linux-imx/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 0000000..7a2d40a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,52 @@
+#
+#	KMS driver for the GMA500
+#
+ccflags-y += -I$(srctree)/include/drm
+
+gma500_gfx-y += \
+	  accel_2d.o \
+	  backlight.o \
+	  framebuffer.o \
+	  gem.o \
+	  gtt.o \
+	  intel_bios.o \
+	  intel_i2c.o \
+	  intel_gmbus.o \
+	  mmu.o \
+	  power.o \
+	  psb_drv.o \
+	  psb_intel_display.o \
+	  psb_intel_lvds.o \
+	  psb_intel_modes.o \
+	  psb_intel_sdvo.o \
+	  psb_lid.o \
+	  psb_irq.o \
+	  psb_device.o \
+	  mid_bios.o
+
+gma500_gfx-$(CONFIG_ACPI) +=  opregion.o \
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
+	  cdv_intel_crt.o \
+	  cdv_intel_display.o \
+	  cdv_intel_hdmi.o \
+	  cdv_intel_lvds.o \
+	  cdv_intel_dp.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+	  oaktrail_crtc.o \
+	  oaktrail_lvds.o \
+	  oaktrail_hdmi.o \
+	  oaktrail_hdmi_i2c.o
+
+gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
+	  mdfld_output.o \
+	  mdfld_intel_display.o \
+	  mdfld_dsi_output.o \
+	  mdfld_dsi_dpi.o \
+	  mdfld_dsi_pkg_sender.o \
+	  mdfld_tpo_vid.o \
+	  mdfld_tmd_vid.o \
+	  tc35876x-dsi-lvds.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/linux-imx/drivers/gpu/drm/gma500/accel_2d.c b/linux-imx/drivers/gpu/drm/gma500/accel_2d.c
new file mode 100644
index 0000000..d5ef1a5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/accel_2d.c
@@ -0,0 +1,364 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "framebuffer.h"
+
+/**
+ *	psb_spank		-	reset the 2D engine
+ *	@dev_priv: our PSB DRM device
+ *
+ *	Soft reset the graphics engine and then reload the necessary registers.
+ *	We use this at initialisation time but it will become relevant for
+ *	accelerated X later
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+	PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+		_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+		_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+		_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+	PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+	msleep(1);
+
+	PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+	wmb();
+	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+		   PSB_CR_BIF_CTRL);
+	wmb();
+	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+	msleep(1);
+	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+		   PSB_CR_BIF_CTRL);
+	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+	PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
+/**
+ *	psb2_2d_wait_available	-	wait for FIFO room
+ *	@dev_priv: our DRM device
+ *	@size: size (in dwords) of the command we want to issue
+ *
+ *	Wait until there is room to load the FIFO with our data. If the
+ *	device is not responding then reset it
+ */
+static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+			  unsigned size)
+{
+	uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+	unsigned long t = jiffies + HZ;
+
+	while (avail < size) {
+		avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+		if (time_after(jiffies, t)) {
+			psb_spank(dev_priv);
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+/**
+ *	psb_2d_submit		-	submit a 2D command
+ *	@dev_priv: our DRM device
+ *	@cmdbuf: command to issue
+ *	@size: length (in dwords)
+ *
+ *	Issue one or more 2D commands to the accelerator. This needs to be
+ *	serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+								unsigned size)
+{
+	int ret = 0;
+	int i;
+	unsigned submit_size;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->lock_2d, flags);
+	while (size > 0) {
+		submit_size = (size < 0x60) ? size : 0x60;
+		size -= submit_size;
+		ret = psb_2d_wait_available(dev_priv, submit_size);
+		if (ret)
+			break;
+
+		submit_size <<= 2;
+
+		for (i = 0; i < submit_size; i += 4)
+			PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+
+		(void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+	}
+	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+	return ret;
+}
+
+
+/**
+ *	psb_accel_2d_copy_direction	-	compute blit order
+ *	@xdir: X direction of move
+ *	@ydir: Y direction of move
+ *
+ *	Compute the correct order setings to ensure that an overlapping blit
+ *	correctly copies all the pixels.
+ */
+static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+{
+	if (xdir < 0)
+		return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+						PSB_2D_COPYORDER_TR2BL;
+	else
+		return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+						PSB_2D_COPYORDER_TL2BR;
+}
+
+/**
+ *	psb_accel_2d_copy		-	accelerated 2D copy
+ *	@dev_priv: our DRM device
+ *	@src_offset in bytes
+ *	@src_stride in bytes
+ *	@src_format psb 2D format defines
+ *	@dst_offset in bytes
+ *	@dst_stride in bytes
+ *	@dst_format psb 2D format defines
+ *	@src_x offset in pixels
+ *	@src_y offset in pixels
+ *	@dst_x offset in pixels
+ *	@dst_y offset in pixels
+ *	@size_x of the copied area
+ *	@size_y of the copied area
+ *
+ *	Format and issue a 2D accelerated copy command.
+ */
+static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+			     uint32_t src_offset, uint32_t src_stride,
+			     uint32_t src_format, uint32_t dst_offset,
+			     uint32_t dst_stride, uint32_t dst_format,
+			     uint16_t src_x, uint16_t src_y,
+			     uint16_t dst_x, uint16_t dst_y,
+			     uint16_t size_x, uint16_t size_y)
+{
+	uint32_t blit_cmd;
+	uint32_t buffer[10];
+	uint32_t *buf;
+	uint32_t direction;
+
+	buf = buffer;
+
+	direction =
+	    psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+
+	if (direction == PSB_2D_COPYORDER_BR2TL ||
+	    direction == PSB_2D_COPYORDER_TR2BL) {
+		src_x += size_x - 1;
+		dst_x += size_x - 1;
+	}
+	if (direction == PSB_2D_COPYORDER_BR2TL ||
+	    direction == PSB_2D_COPYORDER_BL2TR) {
+		src_y += size_y - 1;
+		dst_y += size_y - 1;
+	}
+
+	blit_cmd =
+	    PSB_2D_BLIT_BH |
+	    PSB_2D_ROT_NONE |
+	    PSB_2D_DSTCK_DISABLE |
+	    PSB_2D_SRCCK_DISABLE |
+	    PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+
+	*buf++ = PSB_2D_FENCE_BH;
+	*buf++ =
+	    PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+					       PSB_2D_DST_STRIDE_SHIFT);
+	*buf++ = dst_offset;
+	*buf++ =
+	    PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+					       PSB_2D_SRC_STRIDE_SHIFT);
+	*buf++ = src_offset;
+	*buf++ =
+	    PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+	    (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+	*buf++ = blit_cmd;
+	*buf++ =
+	    (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+						  PSB_2D_DST_YSTART_SHIFT);
+	*buf++ =
+	    (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+						  PSB_2D_DST_YSIZE_SHIFT);
+	*buf++ = PSB_2D_FLUSH_BH;
+
+	return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+}
+
+/**
+ *	psbfb_copyarea_accel	-	copyarea acceleration for /dev/fb
+ *	@info: our framebuffer
+ *	@a: copyarea parameters from the framebuffer core
+ *
+ *	Perform a 2D copy via the accelerator
+ */
+static void psbfb_copyarea_accel(struct fb_info *info,
+				 const struct fb_copyarea *a)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_device *dev = psbfb->base.dev;
+	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t offset;
+	uint32_t stride;
+	uint32_t src_format;
+	uint32_t dst_format;
+
+	if (!fb)
+		return;
+
+	offset = psbfb->gtt->offset;
+	stride = fb->pitches[0];
+
+	switch (fb->depth) {
+	case 8:
+		src_format = PSB_2D_SRC_332RGB;
+		dst_format = PSB_2D_DST_332RGB;
+		break;
+	case 15:
+		src_format = PSB_2D_SRC_555RGB;
+		dst_format = PSB_2D_DST_555RGB;
+		break;
+	case 16:
+		src_format = PSB_2D_SRC_565RGB;
+		dst_format = PSB_2D_DST_565RGB;
+		break;
+	case 24:
+	case 32:
+		/* this is wrong but since we don't do blending its okay */
+		src_format = PSB_2D_SRC_8888ARGB;
+		dst_format = PSB_2D_DST_8888ARGB;
+		break;
+	default:
+		/* software fallback */
+		cfb_copyarea(info, a);
+		return;
+	}
+
+	if (!gma_power_begin(dev, false)) {
+		cfb_copyarea(info, a);
+		return;
+	}
+	psb_accel_2d_copy(dev_priv,
+			  offset, stride, src_format,
+			  offset, stride, dst_format,
+			  a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+	gma_power_end(dev);
+}
+
+/**
+ *	psbfb_copyarea	-	2D copy interface
+ *	@info: our framebuffer
+ *	@region: region to copy
+ *
+ *	Copy an area of the framebuffer console either by the accelerator
+ *	or directly using the cfb helpers according to the request
+ */
+void psbfb_copyarea(struct fb_info *info,
+			   const struct fb_copyarea *region)
+{
+	if (unlikely(info->state != FBINFO_STATE_RUNNING))
+		return;
+
+	/* Avoid the 8 pixel erratum */
+	if (region->width == 8 || region->height == 8 ||
+		(info->flags & FBINFO_HWACCEL_DISABLED))
+		return cfb_copyarea(info, region);
+
+	psbfb_copyarea_accel(info, region);
+}
+
+/**
+ *	psbfb_sync	-	synchronize 2D
+ *	@info: our framebuffer
+ *
+ *	Wait for the 2D engine to quiesce so that we can do CPU
+ *	access to the framebuffer again
+ */
+int psbfb_sync(struct fb_info *info)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_device *dev = psbfb->base.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long _end = jiffies + DRM_HZ;
+	int busy = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->lock_2d, flags);
+	/*
+	 * First idle the 2D engine.
+	 */
+
+	if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+	    ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+		goto out;
+
+	do {
+		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+		cpu_relax();
+	} while (busy && !time_after_eq(jiffies, _end));
+
+	if (busy)
+		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+	if (busy)
+		goto out;
+
+	do {
+		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+						_PSB_C2B_STATUS_BUSY) != 0);
+		cpu_relax();
+	} while (busy && !time_after_eq(jiffies, _end));
+	if (busy)
+		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+					_PSB_C2B_STATUS_BUSY) != 0);
+
+out:
+	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+	return (busy) ? -EBUSY : 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/backlight.c b/linux-imx/drivers/gpu/drm/gma500/backlight.c
new file mode 100644
index 0000000..143eba3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/backlight.c
@@ -0,0 +1,94 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+static void do_gma_backlight_set(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	backlight_update_status(dev_priv->backlight_device);
+#endif	
+}
+
+void gma_backlight_enable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_disable(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = false;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = 0;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+void gma_backlight_set(struct drm_device *dev, int v)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_level = v;
+	if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
+		dev_priv->backlight_device->props.brightness = v;
+		do_gma_backlight_set(dev);
+	}
+#endif	
+}
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->backlight_enabled = true;
+	return dev_priv->ops->backlight_init(dev);
+#else
+	return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (dev_priv->backlight_device) {
+		dev_priv->backlight_device->props.brightness = 0;
+		backlight_update_status(dev_priv->backlight_device);
+		backlight_device_unregister(dev_priv->backlight_device);
+	}
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_device.c b/linux-imx/drivers/gpu/drm/gma500/cdv_device.c
new file mode 100644
index 0000000..23e14e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_device.c
@@ -0,0 +1,658 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX		0x3c4
+#define VGA_SR_DATA		0x3c5
+
+static void cdv_disable_vga(struct drm_device *dev)
+{
+	u8 sr1;
+	u32 vga_reg;
+
+	vga_reg = VGACNTRL;
+
+	outb(1, VGA_SR_INDEX);
+	sr1 = inb(VGA_SR_DATA);
+	outb(sr1 | 1<<5, VGA_SR_DATA);
+	udelay(300);
+
+	REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+	REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	drm_mode_create_scaling_mode_property(dev);
+
+	cdv_disable_vga(dev);
+
+	cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+	/* These bits indicate HDMI not SDVO on CDV */
+	if (REG_READ(SDVOB) & SDVO_DETECTED) {
+		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+		if (REG_READ(DP_B) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
+	}
+
+	if (REG_READ(SDVOC) & SDVO_DETECTED) {
+		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+		if (REG_READ(DP_C) & DP_DETECTED)
+			cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *	Cedartrail Backlght Interfaces
+ */
+
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_backlight_combination_mode(struct drm_device *dev)
+{
+	return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
+}
+
+static u32 cdv_get_max_backlight(struct drm_device *dev)
+{
+	u32 max = REG_READ(BLC_PWM_CTL);
+
+	if (max == 0) {
+		DRM_DEBUG_KMS("LVDS Panel PWM value is 0!\n");
+		/* i915 does this, I believe which means that we should not
+		 * smash PWM control as firmware will take control of it. */
+		return 1;
+	}
+
+	max >>= 16;
+	if (cdv_backlight_combination_mode(dev))
+		max *= 0xff;
+	return max;
+}
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+	if (cdv_backlight_combination_mode(dev)) {
+		u8 lbpc;
+
+		val &= ~1;
+		pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+		val *= lbpc;
+	}
+	return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	int level = bd->props.brightness;
+	u32 blc_pwm_ctl;
+
+	/* Percentage 1-100% being valid */
+	if (level < 1)
+		level = 1;
+
+	level *= cdv_get_max_backlight(dev);
+	level /= 100;
+
+	if (cdv_backlight_combination_mode(dev)) {
+		u32 max = cdv_get_max_backlight(dev);
+		u8 lbpc;
+
+		lbpc = level * 0xfe / max + 1;
+		level /= lbpc;
+
+		pci_write_config_byte(dev->pdev, 0xF4, lbpc);
+	}
+
+	blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
+				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+	return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+	.get_brightness = cdv_get_brightness,
+	.update_status  = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct backlight_properties props;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 100;
+	props.type = BACKLIGHT_PLATFORM;
+
+	cdv_backlight_device = backlight_device_register("psb-bl",
+					NULL, (void *)dev, &cdv_ops, &props);
+	if (IS_ERR(cdv_backlight_device))
+		return PTR_ERR(cdv_backlight_device);
+
+	cdv_backlight_device->props.brightness =
+			cdv_get_brightness(cdv_backlight_device);
+	backlight_update_status(cdv_backlight_device);
+	dev_priv->backlight_device = cdv_backlight_device;
+	dev_priv->backlight_enabled = true;
+	return 0;
+}
+
+#endif
+
+/*
+ *	Provide the Cedarview specific chip logic and low level methods
+ *	for power management
+ *
+ *	FIXME: we need to implement the apm/ospm base management bits
+ *	for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+	uint32_t ret_val = 0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+	pci_dev_put(pci_root);
+	return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD4, value);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_dev_put(pci_root);
+}
+
+#define PSB_PM_SSC			0x20
+#define PSB_PM_SSS			0x30
+#define PSB_PWRGT_GFX_ON		0x02
+#define PSB_PWRGT_GFX_OFF		0x01
+#define PSB_PWRGT_GFX_D0		0x00
+#define PSB_PWRGT_GFX_D3		0x03
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pwr_cnt;
+	int i;
+
+	dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+							PSB_APMBA) & 0xFFFF;
+	dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+							PSB_OSPMBA) & 0xFFFF;
+
+	/* Power status */
+	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+
+	/* Enable the GPU */
+	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+	pwr_cnt |= PSB_PWRGT_GFX_ON;
+	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+	/* Wait for the GPU power */
+	for (i = 0; i < 5; i++) {
+		u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+		if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+			return;
+		udelay(10);
+	}
+	dev_err(dev->dev, "GPU: power management timed out.\n");
+}
+
+static void cdv_errata(struct drm_device *dev)
+{
+	/* Disable bonus launch.
+	 *	CPU and GPU competes for memory and display misses updates and
+	 *	flickers. Worst with dual core, dual displays.
+	 *
+	 *	Fixes were done to Win 7 gfx driver to disable a feature called
+	 *	Bonus Launch to work around the issue, by degrading
+	 *	performance.
+	 */
+	 CDV_MSG_WRITE32(3, 0x30, 0x08027108);
+}
+
+/**
+ *	cdv_save_display_registers	-	save registers lost on suspend
+ *	@dev: our DRM device
+ *
+ *	Save the state we need in order to be able to restore the interface
+ *	upon resume from suspend
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_save_area *regs = &dev_priv->regs;
+	struct drm_connector *connector;
+
+	dev_dbg(dev->dev, "Saving GPU registers.\n");
+
+	pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
+
+	regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
+	regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
+
+	regs->cdv.saveDSPARB = REG_READ(DSPARB);
+	regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
+	regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
+	regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
+	regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
+	regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
+	regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);
+
+	regs->cdv.saveADPA = REG_READ(ADPA);
+
+	regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
+	regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+	regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+	regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
+	regs->cdv.saveLVDS = REG_READ(LVDS);
+
+	regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+
+	regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
+	regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
+	regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);
+
+	regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);
+
+	regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
+	regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+	return 0;
+}
+
+/**
+ *	cdv_restore_display_registers	-	restore lost register state
+ *	@dev: our DRM device
+ *
+ *	Restore register state that was lost during suspend and resume.
+ *
+ *	FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_save_area *regs = &dev_priv->regs;
+	struct drm_connector *connector;
+	u32 temp;
+
+	pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
+
+	REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
+	REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
+
+	/* BIOS does below anyway */
+	REG_WRITE(DPIO_CFG, 0);
+	REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+
+	temp = REG_READ(DPLL_A);
+	if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+		REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE);
+		REG_READ(DPLL_A);
+	}
+
+	temp = REG_READ(DPLL_B);
+	if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
+		REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE);
+		REG_READ(DPLL_B);
+	}
+
+	udelay(500);
+
+	REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]);
+	REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]);
+	REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]);
+	REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]);
+	REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]);
+	REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]);
+
+	REG_WRITE(DSPARB, regs->cdv.saveDSPARB);
+	REG_WRITE(ADPA, regs->cdv.saveADPA);
+
+	REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2);
+	REG_WRITE(LVDS, regs->cdv.saveLVDS);
+	REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL);
+	REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS);
+	REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL);
+	REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS);
+	REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS);
+	REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE);
+	REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL);
+
+	REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL);
+
+	REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER);
+	REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
+
+	/* Fix arbitration bug */
+	cdv_errata(dev);
+
+	drm_mode_config_reset(dev);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+	/* Resume the modeset for every activated CRTC */
+	drm_helper_resume_force_mode(dev);
+	return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pwr_cnt, pwr_mask, pwr_sts;
+	int tries = 5;
+
+	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+	pwr_cnt |= PSB_PWRGT_GFX_OFF;
+	pwr_mask = PSB_PWRGT_GFX_MASK;
+
+	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+	while (tries--) {
+		pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+		if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3)
+			return 0;
+		udelay(10);
+	}
+	return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pwr_cnt, pwr_mask, pwr_sts;
+	int tries = 5;
+
+	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+	pwr_cnt |= PSB_PWRGT_GFX_ON;
+	pwr_mask = PSB_PWRGT_GFX_MASK;
+
+	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+
+	while (tries--) {
+		pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+		if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0)
+			return 0;
+		udelay(10);
+	}
+	return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+	uint32_t clock;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+	pci_read_config_dword(pci_root, 0xD4, &clock);
+	pci_dev_put(pci_root);
+
+	switch (clock & 0x07) {
+	case 0:
+		dev_priv->core_freq = 100;
+		break;
+	case 1:
+		dev_priv->core_freq = 133;
+		break;
+	case 2:
+		dev_priv->core_freq = 150;
+		break;
+	case 3:
+		dev_priv->core_freq = 178;
+		break;
+	case 4:
+		dev_priv->core_freq = 200;
+		break;
+	case 5:
+	case 6:
+	case 7:
+		dev_priv->core_freq = 266;
+		break;
+	default:
+		dev_priv->core_freq = 0;
+	}
+}
+
+static void cdv_hotplug_work_func(struct work_struct *work)
+{
+        struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
+							hotplug_work);                 
+        struct drm_device *dev = dev_priv->dev;
+
+        /* Just fire off a uevent and let userspace tell us what to do */
+        drm_helper_hpd_irq_event(dev);
+}                       
+
+/* The core driver has received a hotplug IRQ. We are in IRQ context
+   so extract the needed information and kick off queued processing */
+   
+static int cdv_hotplug_event(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	schedule_work(&dev_priv->hotplug_work);
+	REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+	return 1;
+}
+
+static void cdv_hotplug_enable(struct drm_device *dev, bool on)
+{
+	if (on) {
+		u32 hotplug = REG_READ(PORT_HOTPLUG_EN);
+		hotplug |= HDMIB_HOTPLUG_INT_EN | HDMIC_HOTPLUG_INT_EN |
+			   HDMID_HOTPLUG_INT_EN | CRT_HOTPLUG_INT_EN;
+		REG_WRITE(PORT_HOTPLUG_EN, hotplug);
+	}  else {
+		REG_WRITE(PORT_HOTPLUG_EN, 0);
+		REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+	}	
+}
+
+static const char *force_audio_names[] = {
+	"off",
+	"auto",
+	"on",
+};
+
+void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "audio",
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+
+static const char *broadcast_rgb_names[] = {
+	"Full",
+	"Limited 16:235",
+};
+
+void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+	int i;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+	{
+		.fp0 = FPA0,
+		.fp1 = FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = DPLL_A,
+		.dpll_md = DPLL_A_MD,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.base = DSPABASE,
+		.surf = DSPASURF,
+		.addr = DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.dpll_md = DPLL_B_MD,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.base = DSPBBASE,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	}
+};
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+	dev_priv->regmap = cdv_regmap;
+	cdv_get_core_freq(dev);
+	psb_intel_opregion_init(dev);
+	psb_intel_init_bios(dev);
+	cdv_hotplug_enable(dev, false);
+	return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+	.name = "GMA3600/3650",
+	.accel_2d = 0,
+	.pipes = 2,
+	.crtcs = 2,
+	.hdmi_mask = (1 << 0) | (1 << 1),
+	.lvds_mask = (1 << 1),
+	.cursor_needs_phys = 0,
+	.sgx_offset = MRST_SGX_OFFSET,
+	.chip_setup = cdv_chip_setup,
+	.errata = cdv_errata,
+
+	.crtc_helper = &cdv_intel_helper_funcs,
+	.crtc_funcs = &cdv_intel_crtc_funcs,
+
+	.output_init = cdv_output_init,
+	.hotplug = cdv_hotplug_event,
+	.hotplug_enable = cdv_hotplug_enable,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	.backlight_init = cdv_backlight_init,
+#endif
+
+	.init_pm = cdv_init_pm,
+	.save_regs = cdv_save_display_registers,
+	.restore_regs = cdv_restore_display_registers,
+	.power_down = cdv_power_down,
+	.power_up = cdv_power_up,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_device.h b/linux-imx/drivers/gpu/drm/gma500/cdv_device.h
new file mode 100644
index 0000000..9561e17
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_device.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+			struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+			struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+			int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc);
+
+static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+        /* FIXME: msleep ?? */
+	mdelay(20);
+}
+
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_intel_crt.c b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_crt.c
new file mode 100644
index 0000000..7b8386f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include "cdv_device.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	u32 temp, reg;
+	reg = ADPA;
+
+	temp = REG_READ(reg);
+	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+	temp &= ~ADPA_DAC_ENABLE;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		temp |= ADPA_DAC_ENABLE;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	}
+
+	REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* The lowest clock for CDV is 20000KHz */
+	if (mode->clock < 20000)
+		return MODE_CLOCK_LOW;
+
+	/* The max clock for CDV is 355 instead of 400 */
+	if (mode->clock > 355000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *psb_intel_crtc =
+					to_psb_intel_crtc(crtc);
+	int dpll_md_reg;
+	u32 adpa, dpll_md;
+	u32 adpa_reg;
+
+	if (psb_intel_crtc->pipe == 0)
+		dpll_md_reg = DPLL_A_MD;
+	else
+		dpll_md_reg = DPLL_B_MD;
+
+	adpa_reg = ADPA;
+
+	/*
+	 * Disable separate mode multiplier used when cloning SDVO to CRT
+	 * XXX this needs to be adjusted when we really are cloning
+	 */
+	{
+		dpll_md = REG_READ(dpll_md_reg);
+		REG_WRITE(dpll_md_reg,
+			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+	}
+
+	adpa = 0;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+	if (psb_intel_crtc->pipe == 0)
+		adpa |= ADPA_PIPE_A_SELECT;
+	else
+		adpa |= ADPA_PIPE_B_SELECT;
+
+	REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+								bool force)
+{
+	struct drm_device *dev = connector->dev;
+	u32 hotplug_en;
+	int i, tries = 0, ret = false;
+	u32 orig;
+
+	/*
+	 * On a CDV thep, CRT detect sequence need to be done twice
+	 * to get a reliable result.
+	 */
+	tries = 2;
+
+	orig = hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+	hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+	hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+	for (i = 0; i < tries ; i++) {
+		unsigned long timeout;
+		/* turn on the FORCE_DETECT */
+		REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+		timeout = jiffies + msecs_to_jiffies(1000);
+		/* wait for FORCE_DETECT to go off */
+		do {
+			if (!(REG_READ(PORT_HOTPLUG_EN) &
+					CRT_HOTPLUG_FORCE_DETECT))
+				break;
+			msleep(1);
+		} while (time_after(timeout, jiffies));
+	}
+
+	if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+	    CRT_HOTPLUG_MONITOR_NONE)
+		ret = true;
+
+	 /* clear the interrupt we just generated, if any */
+	REG_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+	/* and put the bits back */
+	REG_WRITE(PORT_HOTPLUG_EN, orig);
+	return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+				struct drm_connector *connector, bool force)
+{
+	if (cdv_intel_crt_detect_hotplug(connector, force))
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+	psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+				psb_intel_attached_encoder(connector);
+	return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t value)
+{
+	return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+	.dpms = cdv_intel_crt_dpms,
+	.mode_fixup = cdv_intel_crt_mode_fixup,
+	.prepare = psb_intel_encoder_prepare,
+	.commit = psb_intel_encoder_commit,
+	.mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cdv_intel_crt_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = cdv_intel_crt_destroy,
+	.set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+				cdv_intel_crt_connector_helper_funcs = {
+	.mode_valid = cdv_intel_crt_mode_valid,
+	.get_modes = cdv_intel_crt_get_modes,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+	.destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+			struct psb_intel_mode_device *mode_dev)
+{
+
+	struct psb_intel_connector *psb_intel_connector;
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	u32 i2c_reg;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+
+	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+	if (!psb_intel_connector)
+		goto failed_connector;
+
+	connector = &psb_intel_connector->base;
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	drm_connector_init(dev, connector,
+		&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	encoder = &psb_intel_encoder->base;
+	drm_encoder_init(dev, encoder,
+		&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+
+	/* Set up the DDC bus. */
+	i2c_reg = GPIOA;
+	/* Remove the following code for CDV */
+	/*
+	if (dev_priv->crt_ddc_bus != 0)
+		i2c_reg = dev_priv->crt_ddc_bus;
+	}*/
+	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+							  i2c_reg, "CRTDDC_A");
+	if (!psb_intel_encoder->ddc_bus) {
+		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+			   "failed.\n");
+		goto failed_ddc;
+	}
+
+	psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+	/*
+	psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+	psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+	*/
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+	drm_connector_helper_add(connector,
+					&cdv_intel_crt_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+
+	return;
+failed_ddc:
+	drm_encoder_cleanup(&psb_intel_encoder->base);
+	drm_connector_cleanup(&psb_intel_connector->base);
+	kfree(psb_intel_connector);
+failed_connector:
+	kfree(psb_intel_encoder);
+	return;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_intel_display.c b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_display.c
new file mode 100644
index 0000000..82430ad
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -0,0 +1,1790 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+	int min, max;
+};
+
+struct cdv_intel_p2_t {
+	int dot_limit;
+	int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int dot;
+	int vco;
+	int m;
+	int p;
+};
+
+#define INTEL_P2_NUM		      2
+
+struct cdv_intel_limit_t {
+	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+	struct cdv_intel_p2_t p2;
+	bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
+			int, int, struct cdv_intel_clock_t *);
+};
+
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock);
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock);
+
+#define CDV_LIMIT_SINGLE_LVDS_96	0
+#define CDV_LIMIT_SINGLE_LVDS_100	1
+#define CDV_LIMIT_DAC_HDMI_27		2
+#define CDV_LIMIT_DAC_HDMI_96		3
+#define CDV_LIMIT_DP_27			4
+#define CDV_LIMIT_DP_100		5
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+	{			/* CDV_SINGLE_LVDS_96MHz */
+	 .dot = {.min = 20000, .max = 115500},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 160},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 158},
+	 .p = {.min = 28, .max = 140},
+	 .p1 = {.min = 2, .max = 10},
+	 .p2 = {.dot_limit = 200000,
+		.p2_slow = 14, .p2_fast = 14},
+		.find_pll = cdv_intel_find_best_PLL,
+	 },
+	{			/* CDV_SINGLE_LVDS_100MHz */
+	 .dot = {.min = 20000, .max = 115500},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 160},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 158},
+	 .p = {.min = 28, .max = 140},
+	 .p1 = {.min = 2, .max = 10},
+	 /* The single-channel range is 25-112Mhz, and dual-channel
+	  * is 80-224Mhz.  Prefer single channel as much as possible.
+	  */
+	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+	.find_pll = cdv_intel_find_best_PLL,
+	 },
+	{			/* CDV_DAC_HDMI_27MHz */
+	 .dot = {.min = 20000, .max = 400000},
+	 .vco = {.min = 1809000, .max = 3564000},
+	 .n = {.min = 1, .max = 1},
+	 .m = {.min = 67, .max = 132},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 65, .max = 130},
+	 .p = {.min = 5, .max = 90},
+	 .p1 = {.min = 1, .max = 9},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
+	 },
+	{			/* CDV_DAC_HDMI_96MHz */
+	 .dot = {.min = 20000, .max = 400000},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 160},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 158},
+	 .p = {.min = 5, .max = 100},
+	 .p1 = {.min = 1, .max = 10},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+	.find_pll = cdv_intel_find_best_PLL,
+	 },
+	{			/* CDV_DP_27MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1809000, .max = 3564000},
+	 .n = {.min = 1, .max = 1},
+	 .m = {.min = 67, .max = 132},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 65, .max = 130},
+	 .p = {.min = 5, .max = 90},
+	 .p1 = {.min = 1, .max = 9},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 },
+	{			/* CDV_DP_100MHz */
+	 .dot = {.min = 160000, .max = 272000},
+	 .vco = {.min = 1800000, .max = 3600000},
+	 .n = {.min = 2, .max = 6},
+	 .m = {.min = 60, .max = 164},
+	 .m1 = {.min = 0, .max = 0},
+	 .m2 = {.min = 58, .max = 162},
+	 .p = {.min = 5, .max = 100},
+	 .p1 = {.min = 1, .max = 10},
+	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
+	 .find_pll = cdv_intel_find_dp_pll,
+	 }	
+};
+
+#define _wait_for(COND, MS, W) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		if (W && !in_dbg_master())				\
+			msleep(W);					\
+	}								\
+	ret__;								\
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+	int ret;
+
+	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+	if (ret) {
+		DRM_ERROR("timeout waiting for SB to idle before read\n");
+		return ret;
+	}
+
+	REG_WRITE(SB_ADDR, reg);
+	REG_WRITE(SB_PCKT,
+		   SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+		   SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+	if (ret) {
+		DRM_ERROR("timeout waiting for SB to idle after read\n");
+		return ret;
+	}
+
+	*val = REG_READ(SB_DATA);
+
+	return 0;
+}
+
+int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+	int ret;
+	static bool dpio_debug = true;
+	u32 temp;
+
+	if (dpio_debug) {
+		if (cdv_sb_read(dev, reg, &temp) == 0)
+			DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+		DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+	}
+
+	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+	if (ret) {
+		DRM_ERROR("timeout waiting for SB to idle before write\n");
+		return ret;
+	}
+
+	REG_WRITE(SB_ADDR, reg);
+	REG_WRITE(SB_DATA, val);
+	REG_WRITE(SB_PCKT,
+		   SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+		   SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+	if (ret) {
+		DRM_ERROR("timeout waiting for SB to idle after write\n");
+		return ret;
+	}
+
+	if (dpio_debug) {
+		if (cdv_sb_read(dev, reg, &temp) == 0)
+			DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+	}
+
+	return 0;
+}
+
+/* Reset the DPIO configuration register.  The BIOS does this at every
+ * mode set.
+ */
+void cdv_sb_reset(struct drm_device *dev)
+{
+
+	REG_WRITE(DPIO_CFG, 0);
+	REG_READ(DPIO_CFG);
+	REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus.  They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+			       struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
+{
+	struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_crtc->pipe;
+	u32 m, n_vco, p;
+	int ret = 0;
+	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+	int ref_sfr = (pipe == 0) ? SB_REF_DPLLA : SB_REF_DPLLB;
+	u32 ref_value;
+	u32 lane_reg, lane_value;
+
+	cdv_sb_reset(dev);
+
+	REG_WRITE(dpll_reg, DPLL_SYNCLOCK_ENABLE | DPLL_VGA_MODE_DIS);
+
+	udelay(100);
+
+	/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+	ref_value = 0x68A701;
+
+	cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+	/* We don't know what the other fields of these regs are, so
+	 * leave them in place.
+	 */
+	/* 
+	 * The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
+	 * for the pipe A/B. Display spec 1.06 has wrong definition.
+	 * Correct definition is like below:
+	 *
+	 * refclka mean use clock from same PLL
+	 *
+	 * if DPLLA sets 01 and DPLLB sets 01, they use clock from their pll
+	 *
+	 * if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
+	 *
+	 */  
+	ret = cdv_sb_read(dev, ref_sfr, &ref_value);
+	if (ret)
+		return ret;
+	ref_value &= ~(REF_CLK_MASK);
+
+	/* use DPLL_A for pipeB on CRT/HDMI */
+	if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
+		DRM_DEBUG_KMS("use DPLLA for pipe B\n");
+		ref_value |= REF_CLK_DPLLA;
+	} else {
+		DRM_DEBUG_KMS("use their DPLL for pipe A/B\n");
+		ref_value |= REF_CLK_DPLL;
+	}
+	ret = cdv_sb_write(dev, ref_sfr, ref_value);
+	if (ret)
+		return ret;
+
+	ret = cdv_sb_read(dev, SB_M(pipe), &m);
+	if (ret)
+		return ret;
+	m &= ~SB_M_DIVIDER_MASK;
+	m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+	ret = cdv_sb_write(dev, SB_M(pipe), m);
+	if (ret)
+		return ret;
+
+	ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+	if (ret)
+		return ret;
+
+	/* Follow the BIOS to program the N_DIVIDER REG */
+	n_vco &= 0xFFFF;
+	n_vco |= 0x107;
+	n_vco &= ~(SB_N_VCO_SEL_MASK |
+		   SB_N_DIVIDER_MASK |
+		   SB_N_CB_TUNE_MASK);
+
+	n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+	if (clock->vco < 2250000) {
+		n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+		n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+	} else if (clock->vco < 2750000) {
+		n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+		n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+	} else if (clock->vco < 3300000) {
+		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+		n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+	} else {
+		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+		n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+	}
+
+	ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+	if (ret)
+		return ret;
+
+	ret = cdv_sb_read(dev, SB_P(pipe), &p);
+	if (ret)
+		return ret;
+	p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+	p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+	switch (clock->p2) {
+	case 5:
+		p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+		break;
+	case 10:
+		p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+		break;
+	case 14:
+		p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+		break;
+	case 7:
+		p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+		break;
+	default:
+		DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+		return -EINVAL;
+	}
+	ret = cdv_sb_write(dev, SB_P(pipe), p);
+	if (ret)
+		return ret;
+
+	if (ddi_select) {
+		if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
+			lane_reg = PSB_LANE0;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+
+			lane_reg = PSB_LANE1;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		} else {
+			lane_reg = PSB_LANE2;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+
+			lane_reg = PSB_LANE3;
+			cdv_sb_read(dev, lane_reg, &lane_value);
+			lane_value &= ~(LANE_PLL_MASK);
+			lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
+			cdv_sb_write(dev, lane_reg, lane_value);
+		}
+	}
+	return 0;
+}
+
+/*
+ * Returns whether any encoder on the specified pipe is of the specified type
+ */
+static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *l_entry;
+
+	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+			struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(l_entry);
+			if (psb_intel_encoder->type == type)
+				return true;
+		}
+	}
+	return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+							int refclk)
+{
+	const struct cdv_intel_limit_t *limit;
+	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		/*
+		 * Now only single-channel LVDS is supported on CDV. If it is
+		 * incorrect, please add the dual-channel LVDS.
+		 */
+		if (refclk == 96000)
+			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+			psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		if (refclk == 27000)
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
+	} else {
+		if (refclk == 27000)
+			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+		else
+			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+	}
+	return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+			int refclk, struct cdv_intel_clock_t *clock)
+{
+	clock->m = clock->m2 + 2;
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = (refclk * clock->m) / clock->n;
+	clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+				const struct cdv_intel_limit_t *limit,
+			       struct cdv_intel_clock_t *clock)
+{
+	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+		INTELPllInvalid("p1 out of range\n");
+	if (clock->p < limit->p.min || limit->p.max < clock->p)
+		INTELPllInvalid("p out of range\n");
+	/* unnecessary to check the range of m(m1/M2)/n again */
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		INTELPllInvalid("vco out of range\n");
+	/* XXX: We may need to be checking "Dot clock"
+	 * depending on the multiplier, connector, etc.,
+	 * rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		INTELPllInvalid("dot out of range\n");
+
+	return true;
+}
+
+static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
+	struct drm_crtc *crtc, int target, int refclk,
+	struct cdv_intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct cdv_intel_clock_t clock;
+	int err = target;
+
+
+	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+		/*
+		 * For LVDS, if the panel is on, just rely on its current
+		 * settings for dual-channel.  We haven't figured out how to
+		 * reliably set up different single/dual channel state, if we
+		 * even can.
+		 */
+		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+		    LVDS_CLKB_POWER_UP)
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	clock.m1 = 0;
+	/* m1 is reserved as 0 in CDV, n is a ring counter.
+	   So skip the m1 loop */
+	for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+					     clock.m2++) {
+			for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max;
+					clock.p1++) {
+				int this_err;
+
+				cdv_intel_clock(dev, refclk, &clock);
+
+				if (!cdv_intel_PLL_is_valid(crtc,
+								limit, &clock))
+						continue;
+
+				this_err = abs(clock.dot - target);
+				if (this_err < err) {
+					*best_clock = clock;
+					err = this_err;
+				}
+			}
+		}
+	}
+
+	return err != target;
+}
+
+static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
+				int refclk,
+				struct cdv_intel_clock_t *best_clock)
+{
+	struct cdv_intel_clock_t clock;
+	if (refclk == 27000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 118;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 1;
+			clock.m1 = 0;
+			clock.m2 = 98;
+		}
+	} else if (refclk == 100000) {
+		if (target < 200000) {
+			clock.p1 = 2;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 160;
+		} else {
+			clock.p1 = 1;
+			clock.p2 = 10;
+			clock.n = 5;
+			clock.m1 = 0;
+			clock.m2 = 133;
+		}
+	} else
+		return false;
+	clock.m = clock.m2 + 2;
+	clock.p = clock.p1 * clock.p2;
+	clock.vco = (refclk * clock.m) / clock.n;
+	clock.dot = clock.vco / clock.p;
+	memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+	return true;
+}
+
+static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+			    int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	unsigned long start, offset;
+	u32 dspcntr;
+	int ret = 0;
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		dev_err(dev->dev, "No FB bound\n");
+		goto psb_intel_pipe_cleaner;
+	}
+
+
+	/* We are displaying this buffer, make sure it is actually loaded
+	   into the GTT */
+	ret = psb_gtt_pin(psbfb->gtt);
+	if (ret < 0)
+		goto psb_intel_pipe_set_base_exit;
+	start = psbfb->gtt->offset;
+	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+	dspcntr = REG_READ(map->cntr);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		dev_err(dev->dev, "Unknown color depth\n");
+		ret = -EINVAL;
+		goto psb_intel_pipe_set_base_exit;
+	}
+	REG_WRITE(map->cntr, dspcntr);
+
+	dev_dbg(dev->dev,
+		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+	REG_WRITE(map->base, offset);
+	REG_READ(map->base);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
+
+psb_intel_pipe_cleaner:
+	/* If there was a previous display we can now unpin it */
+	if (old_fb)
+		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+	gma_power_end(dev);
+	return ret;
+}
+
+#define		FIFO_PIPEA		(1 << 0)
+#define		FIFO_PIPEB		(1 << 1)
+
+static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
+{
+	struct drm_crtc *crtc;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = NULL;
+
+	crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+	if (crtc->fb == NULL || !psb_intel_crtc->active)
+		return false;
+	return true;
+}
+
+static bool cdv_intel_single_pipe_active (struct drm_device *dev)
+{
+	uint32_t pipe_enabled = 0;
+
+	if (cdv_intel_pipe_enabled(dev, 0))
+		pipe_enabled |= FIFO_PIPEA;
+
+	if (cdv_intel_pipe_enabled(dev, 1))
+		pipe_enabled |= FIFO_PIPEB;
+
+
+	DRM_DEBUG_KMS("pipe enabled %x\n", pipe_enabled);
+
+	if (pipe_enabled == FIFO_PIPEA || pipe_enabled == FIFO_PIPEB)
+		return true;
+	else
+		return false;
+}
+
+static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	if (psb_intel_crtc->pipe != 1)
+		return false;
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+		if (!connector->encoder
+		    || connector->encoder->crtc != crtc)
+			continue;
+
+		if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+			return true;
+	}
+
+	return false;
+}
+
+static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+{
+	if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
+
+		/* Disable self-refresh before adjust WM */
+		REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
+		REG_READ(FW_BLC_SELF);
+
+		cdv_intel_wait_for_vblank(dev);
+
+		/* Cedarview workaround to write ovelay plane, which force to leave
+		 * MAX_FIFO state.
+		 */
+		REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
+		REG_READ(OV_OVADD);
+
+		cdv_intel_wait_for_vblank(dev);
+	}
+
+}
+
+static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+{
+
+	if (cdv_intel_single_pipe_active(dev)) {
+		u32 fw;
+
+		fw = REG_READ(DSPFW1);
+		fw &= ~DSP_FIFO_SR_WM_MASK;
+		fw |= (0x7e << DSP_FIFO_SR_WM_SHIFT);
+		fw &= ~CURSOR_B_FIFO_WM_MASK;
+		fw |= (0x4 << CURSOR_B_FIFO_WM_SHIFT);
+		REG_WRITE(DSPFW1, fw);
+
+		fw = REG_READ(DSPFW2);
+		fw &= ~CURSOR_A_FIFO_WM_MASK;
+		fw |= (0x6 << CURSOR_A_FIFO_WM_SHIFT);
+		fw &= ~DSP_PLANE_C_FIFO_WM_MASK;
+		fw |= (0x8 << DSP_PLANE_C_FIFO_WM_SHIFT);
+		REG_WRITE(DSPFW2, fw);
+
+		REG_WRITE(DSPFW3, 0x36000000);
+
+		/* ignore FW4 */
+
+		if (is_pipeb_lvds(dev, crtc)) {
+			REG_WRITE(DSPFW5, 0x00040330);
+		} else {
+			fw = (3 << DSP_PLANE_B_FIFO_WM1_SHIFT) |
+			     (4 << DSP_PLANE_A_FIFO_WM1_SHIFT) |
+			     (3 << CURSOR_B_FIFO_WM1_SHIFT) |
+			     (4 << CURSOR_FIFO_SR_WM1_SHIFT);
+			REG_WRITE(DSPFW5, fw);
+		}
+
+		REG_WRITE(DSPFW6, 0x10);
+
+		cdv_intel_wait_for_vblank(dev);
+
+		/* enable self-refresh for single pipe active */
+		REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+		REG_READ(FW_BLC_SELF);
+		cdv_intel_wait_for_vblank(dev);
+
+	} else {
+
+		/* HW team suggested values... */
+		REG_WRITE(DSPFW1, 0x3f880808);
+		REG_WRITE(DSPFW2, 0x0b020202);
+		REG_WRITE(DSPFW3, 0x24000000);
+		REG_WRITE(DSPFW4, 0x08030202);
+		REG_WRITE(DSPFW5, 0x01010101);
+		REG_WRITE(DSPFW6, 0x1d0);
+
+		cdv_intel_wait_for_vblank(dev);
+
+		cdv_intel_disable_self_refresh(dev);
+	
+	}
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int palreg = PALETTE_A;
+	int i;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled)
+		return;
+
+	switch (psb_intel_crtc->pipe) {
+	case 0:
+		break;
+	case 1:
+		palreg = PALETTE_B;
+		break;
+	case 2:
+		palreg = PALETTE_C;
+		break;
+	default:
+		dev_err(dev->dev, "Illegal Pipe Number.\n");
+		return;
+	}
+
+	if (gma_power_begin(dev, false)) {
+		for (i = 0; i < 256; i++) {
+			REG_WRITE(palreg + 4 * i,
+				  ((psb_intel_crtc->lut_r[i] +
+				  psb_intel_crtc->lut_adj[i]) << 16) |
+				  ((psb_intel_crtc->lut_g[i] +
+				  psb_intel_crtc->lut_adj[i]) << 8) |
+				  (psb_intel_crtc->lut_b[i] +
+				  psb_intel_crtc->lut_adj[i]));
+		}
+		gma_power_end(dev);
+	} else {
+		for (i = 0; i < 256; i++) {
+			dev_priv->regs.pipe[0].palette[i] =
+				  ((psb_intel_crtc->lut_r[i] +
+				  psb_intel_crtc->lut_adj[i]) << 16) |
+				  ((psb_intel_crtc->lut_g[i] +
+				  psb_intel_crtc->lut_adj[i]) << 8) |
+				  (psb_intel_crtc->lut_b[i] +
+				  psb_intel_crtc->lut_adj[i]);
+		}
+
+	}
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 temp;
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	cdv_intel_disable_self_refresh(dev);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		if (psb_intel_crtc->active)
+			break;
+
+		psb_intel_crtc->active = true;
+
+		/* Enable the DPLL */
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+		}
+
+		/* Jim Bish - switch plan and pipe per scott */
+		/* Enable the plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(map->cntr,
+				  temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+		}
+
+		udelay(150);
+
+		/* Enable the pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) == 0)
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+		temp = REG_READ(map->status);
+		temp &= ~(0xFFFF);
+		temp |= PIPE_FIFO_UNDERRUN;
+		REG_WRITE(map->status, temp);
+		REG_READ(map->status);
+
+		cdv_intel_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+		break;
+	case DRM_MODE_DPMS_OFF:
+		if (!psb_intel_crtc->active)
+			break;
+
+		psb_intel_crtc->active = false;
+
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Jim Bish - changed pipe/plane here as well. */
+
+		drm_vblank_off(dev, pipe);
+		/* Wait for vblank for the disable to take effect */
+		cdv_intel_wait_for_vblank(dev);
+
+		/* Next, disable display pipes */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
+		}
+
+		/* Wait for vblank for the disable to take effect. */
+		cdv_intel_wait_for_vblank(dev);
+
+		udelay(150);
+
+		/* Disable display plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(map->cntr,
+				  temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
+		}
+
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) != 0) {
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+		}
+
+		/* Wait for the clocks to turn off. */
+		udelay(150);
+		break;
+	}
+	cdv_intel_update_watermark(dev, crtc);
+	/*Set FIFO Watermarks*/
+	REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	u32 pfit_control;
+
+	pfit_control = REG_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+	return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y,
+			       struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int refclk;
+	struct cdv_intel_clock_t clock;
+	u32 dpll = 0, dspcntr, pipeconf;
+	bool ok;
+	bool is_crt = false, is_lvds = false, is_tv = false;
+	bool is_hdmi = false, is_dp = false;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	const struct cdv_intel_limit_t *limit;
+	u32 ddi_select = 0;
+	bool is_edp = false;
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+		if (!connector->encoder
+		    || connector->encoder->crtc != crtc)
+			continue;
+
+		ddi_select = psb_intel_encoder->ddi_select;
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		case INTEL_OUTPUT_ANALOG:
+			is_crt = true;
+			break;
+		case INTEL_OUTPUT_HDMI:
+			is_hdmi = true;
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			is_dp = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			is_edp = true;
+			break;
+		default:
+			DRM_ERROR("invalid output type.\n");
+			return 0;
+		}
+	}
+
+	if (dev_priv->dplla_96mhz)
+		/* low-end sku, 96/100 mhz */
+		refclk = 96000;
+	else
+		/* high-end sku, 27/100 mhz */
+		refclk = 27000;
+	if (is_dp || is_edp) {
+		/*
+		 * Based on the spec the low-end SKU has only CRT/LVDS. So it is
+		 * unnecessary to consider it for DP/eDP.
+		 * On the high-end SKU, it will use the 27/100M reference clk
+		 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
+		 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
+		 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
+		 */ 
+		if (pipe == 0)
+			refclk = 27000;
+		else
+			refclk = 100000;
+	}
+
+	if (is_lvds && dev_priv->lvds_use_ssc) {
+		refclk = dev_priv->lvds_ssc_freq * 1000;
+		DRM_DEBUG_KMS("Use SSC reference clock %d Mhz\n", dev_priv->lvds_ssc_freq);
+	}
+
+	drm_mode_debug_printmodeline(adjusted_mode);
+	
+	limit = cdv_intel_limit(crtc, refclk);
+
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
+				 &clock);
+	if (!ok) {
+		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+		return 0;
+	}
+
+	dpll = DPLL_VGA_MODE_DIS;
+	if (is_tv) {
+		/* XXX: just matching BIOS for now */
+/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	}
+/*		dpll |= PLL_REF_INPUT_DREFCLK; */
+
+	if (is_dp || is_edp) {
+		cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
+	} else {
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
+	}
+
+	dpll |= DPLL_SYNCLOCK_ENABLE;
+/*	if (is_lvds)
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL; */
+	/* dpll |= (2 << 11); */
+
+	/* setup pipeconf */
+	pipeconf = REG_READ(map->conf);
+
+	pipeconf &= ~(PIPE_BPC_MASK);
+	if (is_edp) {
+		switch (dev_priv->edp.bpp) {
+		case 24:
+			pipeconf |= PIPE_8BPC;
+			break;
+		case 18:
+			pipeconf |= PIPE_6BPC;
+			break;
+		case 30:
+			pipeconf |= PIPE_10BPC;
+			break;
+		default:
+			pipeconf |= PIPE_8BPC;
+			break;
+		}
+	} else if (is_lvds) {
+		/* the BPC will be 6 if it is 18-bit LVDS panel */
+		if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+			pipeconf |= PIPE_8BPC;
+		else
+			pipeconf |= PIPE_6BPC;
+	} else
+		pipeconf |= PIPE_8BPC;
+			
+	/* Set up the display plane register */
+	dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+	if (pipe == 0)
+		dspcntr |= DISPPLANE_SEL_PIPE_A;
+	else
+		dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+	pipeconf |= PIPEACONF_ENABLE;
+
+	REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+	REG_READ(map->dpll);
+
+	cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
+
+	udelay(150);
+
+
+	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+	 * This is an exception to the general rule that mode_set doesn't turn
+	 * things on.
+	 */
+	if (is_lvds) {
+		u32 lvds = REG_READ(LVDS);
+
+		lvds |=
+		    LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+		    LVDS_PIPEB_SELECT;
+		/* Set the B0-B3 data pairs corresponding to
+		 * whether we're going to
+		 * set the DPLLs for dual-channel mode or not.
+		 */
+		if (clock.p2 == 7)
+			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+		else
+			lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+		 * appropriately here, but we need to look more
+		 * thoroughly into how panels behave in the two modes.
+		 */
+
+		REG_WRITE(LVDS, lvds);
+		REG_READ(LVDS);
+	}
+
+	dpll |= DPLL_VCO_ENABLE;
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+	drm_mode_debug_printmodeline(mode);
+
+	REG_WRITE(map->dpll,
+		(REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+	REG_READ(map->dpll);
+	/* Wait for the clocks to stabilize. */
+	udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
+
+	if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
+		dev_err(dev->dev, "Failed to get DPLL lock\n");
+		return -EBUSY;
+	}
+
+	{
+		int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+		REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+	}
+
+	REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+		  ((adjusted_mode->crtc_htotal - 1) << 16));
+	REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+		  ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	REG_WRITE(map->size,
+		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(map->pos, 0);
+	REG_WRITE(map->src,
+		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
+
+	cdv_intel_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, dspcntr);
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs =
+		    crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	cdv_intel_wait_for_vblank(dev);
+
+	return 0;
+}
+
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	uint32_t paletteReg;
+	int i;
+
+	if (!crtc_state) {
+		dev_dbg(dev->dev, "No CRTC state found\n");
+		return;
+	}
+
+	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+	crtc_state->savePIPECONF = REG_READ(map->conf);
+	crtc_state->savePIPESRC = REG_READ(map->src);
+	crtc_state->saveFP0 = REG_READ(map->fp0);
+	crtc_state->saveFP1 = REG_READ(map->fp1);
+	crtc_state->saveDPLL = REG_READ(map->dpll);
+	crtc_state->saveHTOTAL = REG_READ(map->htotal);
+	crtc_state->saveHBLANK = REG_READ(map->hblank);
+	crtc_state->saveHSYNC = REG_READ(map->hsync);
+	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+	crtc_state->saveVBLANK = REG_READ(map->vblank);
+	crtc_state->saveVSYNC = REG_READ(map->vsync);
+	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+	/*NOTE: DSPSIZE DSPPOS only for psb*/
+	crtc_state->saveDSPSIZE = REG_READ(map->size);
+	crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+	crtc_state->saveDSPBASE = REG_READ(map->base);
+
+	DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+			crtc_state->saveDSPCNTR,
+			crtc_state->savePIPECONF,
+			crtc_state->savePIPESRC,
+			crtc_state->saveFP0,
+			crtc_state->saveFP1,
+			crtc_state->saveDPLL,
+			crtc_state->saveHTOTAL,
+			crtc_state->saveHBLANK,
+			crtc_state->saveHSYNC,
+			crtc_state->saveVTOTAL,
+			crtc_state->saveVBLANK,
+			crtc_state->saveVSYNC,
+			crtc_state->saveDSPSTRIDE,
+			crtc_state->saveDSPSIZE,
+			crtc_state->saveDSPPOS,
+			crtc_state->saveDSPBASE
+		);
+
+	paletteReg = map->palette;
+	for (i = 0; i < 256; ++i)
+		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	uint32_t paletteReg;
+	int i;
+
+	if (!crtc_state) {
+		dev_dbg(dev->dev, "No crtc state\n");
+		return;
+	}
+
+	DRM_DEBUG(
+		"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+		REG_READ(map->cntr),
+		REG_READ(map->conf),
+		REG_READ(map->src),
+		REG_READ(map->fp0),
+		REG_READ(map->fp1),
+		REG_READ(map->dpll),
+		REG_READ(map->htotal),
+		REG_READ(map->hblank),
+		REG_READ(map->hsync),
+		REG_READ(map->vtotal),
+		REG_READ(map->vblank),
+		REG_READ(map->vsync),
+		REG_READ(map->stride),
+		REG_READ(map->size),
+		REG_READ(map->pos),
+		REG_READ(map->base)
+	);
+
+	DRM_DEBUG(
+		"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+		crtc_state->saveDSPCNTR,
+		crtc_state->savePIPECONF,
+		crtc_state->savePIPESRC,
+		crtc_state->saveFP0,
+		crtc_state->saveFP1,
+		crtc_state->saveDPLL,
+		crtc_state->saveHTOTAL,
+		crtc_state->saveHBLANK,
+		crtc_state->saveHSYNC,
+		crtc_state->saveVTOTAL,
+		crtc_state->saveVBLANK,
+		crtc_state->saveVSYNC,
+		crtc_state->saveDSPSTRIDE,
+		crtc_state->saveDSPSIZE,
+		crtc_state->saveDSPPOS,
+		crtc_state->saveDSPBASE
+	);
+
+
+	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+		REG_WRITE(map->dpll,
+				crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
+		DRM_DEBUG("write dpll: %x\n",
+				REG_READ(map->dpll));
+		udelay(150);
+	}
+
+	REG_WRITE(map->fp0, crtc_state->saveFP0);
+	REG_READ(map->fp0);
+
+	REG_WRITE(map->fp1, crtc_state->saveFP1);
+	REG_READ(map->fp1);
+
+	REG_WRITE(map->dpll, crtc_state->saveDPLL);
+	REG_READ(map->dpll);
+	udelay(150);
+
+	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+	REG_WRITE(map->src, crtc_state->savePIPESRC);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+	REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+	cdv_intel_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+	cdv_intel_wait_for_vblank(dev);
+
+	paletteReg = map->palette;
+	for (i = 0; i < 256; ++i)
+		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+				 struct drm_file *file_priv,
+				 uint32_t handle,
+				 uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+	uint32_t temp;
+	size_t addr = 0;
+	struct gtt_range *gt;
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	/* if we want to turn of the cursor ignore width and height */
+	if (!handle) {
+		/* turn off the cursor */
+		temp = CURSOR_MODE_DISABLE;
+
+		if (gma_power_begin(dev, false)) {
+			REG_WRITE(control, temp);
+			REG_WRITE(base, 0);
+			gma_power_end(dev);
+		}
+
+		/* unpin the old GEM object */
+		if (psb_intel_crtc->cursor_obj) {
+			gt = container_of(psb_intel_crtc->cursor_obj,
+							struct gtt_range, gem);
+			psb_gtt_unpin(gt);
+			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+			psb_intel_crtc->cursor_obj = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (obj->size < width * height * 4) {
+		dev_dbg(dev->dev, "buffer is to small\n");
+		ret = -ENOMEM;
+		goto unref_cursor;
+	}
+
+	gt = container_of(obj, struct gtt_range, gem);
+
+	/* Pin the memory into the GTT */
+	ret = psb_gtt_pin(gt);
+	if (ret) {
+		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+		goto unref_cursor;
+	}
+
+	addr = gt->offset;	/* Or resource.start ??? */
+
+	psb_intel_crtc->cursor_addr = addr;
+
+	temp = 0;
+	/* set the pipe for the cursor */
+	temp |= (pipe << 28);
+	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE(control, temp);
+		REG_WRITE(base, addr);
+		gma_power_end(dev);
+	}
+
+	/* unpin the old GEM object */
+	if (psb_intel_crtc->cursor_obj) {
+		gt = container_of(psb_intel_crtc->cursor_obj,
+							struct gtt_range, gem);
+		psb_gtt_unpin(gt);
+		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+	}
+
+	psb_intel_crtc->cursor_obj = obj;
+	return ret;
+
+unref_cursor:
+	drm_gem_object_unreference(obj);
+	return ret;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t temp = 0;
+	uint32_t adder;
+
+
+	if (x < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+		x = -x;
+	}
+	if (y < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+		y = -y;
+	}
+
+	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+	adder = psb_intel_crtc->cursor_addr;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+		gma_power_end(dev);
+	}
+	return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+			 u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int i;
+	int end = (start + size > 256) ? 256 : start + size;
+
+	for (i = start; i < end; i++) {
+		psb_intel_crtc->lut_r[i] = red[i] >> 8;
+		psb_intel_crtc->lut_g[i] = green[i] >> 8;
+		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+	int ret = 0;
+	struct drm_device *dev = set->crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->rpm_enabled)
+		return drm_crtc_helper_set_config(set);
+
+	pm_runtime_forbid(&dev->pdev->dev);
+
+	ret = drm_crtc_helper_set_config(set);
+
+	pm_runtime_allow(&dev->pdev->dev);
+
+	return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = refclk * clock->m / (clock->n + 2);
+	clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+				struct drm_crtc *crtc)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 dpll;
+	u32 fp;
+	struct cdv_intel_clock_t clock;
+	bool is_lvds;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+
+	if (gma_power_begin(dev, false)) {
+		dpll = REG_READ(map->dpll);
+		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+			fp = REG_READ(map->fp0);
+		else
+			fp = REG_READ(map->fp1);
+		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+		gma_power_end(dev);
+	} else {
+		dpll = p->dpll;
+		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+			fp = p->fp0;
+		else
+			fp = p->fp1;
+
+		is_lvds = (pipe == 1) &&
+				(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
+	}
+
+	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+	if (is_lvds) {
+		clock.p1 =
+		    ffs((dpll &
+			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+			DPLL_FPA01_P1_POST_DIV_SHIFT);
+		if (clock.p1 == 0) {
+			clock.p1 = 4;
+			dev_err(dev->dev, "PLL %d\n", dpll);
+		}
+		clock.p2 = 14;
+
+		if ((dpll & PLL_REF_INPUT_MASK) ==
+		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+			/* XXX: might not be 66MHz */
+			i8xx_clock(66000, &clock);
+		} else
+			i8xx_clock(48000, &clock);
+	} else {
+		if (dpll & PLL_P1_DIVIDE_BY_TWO)
+			clock.p1 = 2;
+		else {
+			clock.p1 =
+			    ((dpll &
+			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+		}
+		if (dpll & PLL_P2_DIVIDE_BY_4)
+			clock.p2 = 4;
+		else
+			clock.p2 = 2;
+
+		i8xx_clock(48000, &clock);
+	}
+
+	/* XXX: It would be nice to validate the clocks, but we can't reuse
+	 * i830PllIsValid() because it relies on the xf86_config connector
+	 * configuration being accurate, which it isn't necessarily.
+	 */
+
+	return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	struct drm_display_mode *mode;
+	int htot;
+	int hsync;
+	int vtot;
+	int vsync;
+
+	if (gma_power_begin(dev, false)) {
+		htot = REG_READ(map->htotal);
+		hsync = REG_READ(map->hsync);
+		vtot = REG_READ(map->vtotal);
+		vsync = REG_READ(map->vsync);
+		gma_power_end(dev);
+	} else {
+		htot = p->htotal;
+		hsync = p->hsync;
+		vtot = p->vtotal;
+		vsync = p->vsync;
+	}
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+	mode->hdisplay = (htot & 0xffff) + 1;
+	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+	mode->hsync_start = (hsync & 0xffff) + 1;
+	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+	mode->vdisplay = (vtot & 0xffff) + 1;
+	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+	mode->vsync_start = (vsync & 0xffff) + 1;
+	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+	kfree(psb_intel_crtc->crtc_state);
+	drm_crtc_cleanup(crtc);
+	kfree(psb_intel_crtc);
+}
+
+static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
+{
+	struct gtt_range *gt;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	if (crtc->fb) {
+		gt = to_psb_fb(crtc->fb)->gtt;
+		psb_gtt_unpin(gt);
+	}
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+	.dpms = cdv_intel_crtc_dpms,
+	.mode_fixup = cdv_intel_crtc_mode_fixup,
+	.mode_set = cdv_intel_crtc_mode_set,
+	.mode_set_base = cdv_intel_pipe_set_base,
+	.prepare = cdv_intel_crtc_prepare,
+	.commit = cdv_intel_crtc_commit,
+	.disable = cdv_intel_crtc_disable,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+	.save = cdv_intel_crtc_save,
+	.restore = cdv_intel_crtc_restore,
+	.cursor_set = cdv_intel_crtc_cursor_set,
+	.cursor_move = cdv_intel_crtc_cursor_move,
+	.gamma_set = cdv_intel_crtc_gamma_set,
+	.set_config = cdv_crtc_set_config,
+	.destroy = cdv_intel_crtc_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_intel_dp.c b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 0000000..88d9ef6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1951 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include <drm/drm_dp_helper.h>
+
+#define _wait_for(COND, MS, W) ({ \
+        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);       \
+        int ret__ = 0;                                                  \
+        while (! (COND)) {                                              \
+                if (time_after(jiffies, timeout__)) {                   \
+                        ret__ = -ETIMEDOUT;                             \
+                        break;                                          \
+                }                                                       \
+                if (W && !in_dbg_master()) msleep(W);                   \
+        }                                                               \
+        ret__;                                                          \
+})      
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+#define DP_LINK_STATUS_SIZE	6
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+#define CDV_FAST_LINK_TRAIN	1
+
+struct cdv_intel_dp {
+	uint32_t output_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	int force_audio;
+	uint32_t color_range;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[4];
+	struct psb_intel_encoder *encoder;
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	uint8_t	train_set[4];
+	uint8_t link_status[DP_LINK_STATUS_SIZE];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct drm_display_mode *panel_fixed_mode;  /* for eDP */
+	bool panel_on;
+};
+
+struct ddi_regoff {
+	uint32_t	PreEmph1;
+	uint32_t	PreEmph2;
+	uint32_t	VSwing1;
+	uint32_t	VSwing2;
+	uint32_t	VSwing3;
+	uint32_t	VSwing4;
+	uint32_t	VSwing5;
+};
+
+static struct ddi_regoff ddi_DP_train_table[] = {
+	{.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
+	.VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
+	.VSwing5 = 0x8158,},
+	{.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
+	.VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
+	.VSwing5 = 0x8258,},
+};
+
+static uint32_t dp_vswing_premph_table[] = {
+        0x55338954,	0x4000,
+        0x554d8954,	0x2000,
+        0x55668954,	0,
+        0x559ac0d4,	0x6000,
+};
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct psb_intel_encoder *encoder)
+{
+	return encoder->type == INTEL_OUTPUT_EDP;
+}
+
+
+static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+
+static int
+cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_lane_count = 4;
+
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
+		switch (max_lane_count) {
+		case 1: case 2: case 4:
+			break;
+		default:
+			max_lane_count = 4;
+		}
+	}
+	return max_lane_count;
+}
+
+static int
+cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+	default:
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+static int
+cdv_intel_dp_link_clock(uint8_t link_bw)
+{
+	if (link_bw == DP_LINK_BW_2_7)
+		return 270000;
+	else
+		return 162000;
+}
+
+static int
+cdv_intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 7) / 8;
+}
+
+static int
+cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 19) / 20;
+}
+
+static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	if (intel_dp->panel_on) {
+		DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
+		return;
+	}	
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	msleep(intel_dp->panel_power_up_delay);
+}
+
+static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_FORCE_VDD;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+}
+
+/* Returns true if the panel was already on when called */
+static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
+
+	if (intel_dp->panel_on)
+		return true;
+
+	DRM_DEBUG_KMS("\n");
+	pp = REG_READ(PP_CONTROL);
+	pp &= ~PANEL_UNLOCK_MASK;
+
+	pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+
+	if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
+		DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
+		intel_dp->panel_on = false;
+	} else
+		intel_dp->panel_on = true;	
+	msleep(intel_dp->panel_power_up_delay);
+
+	return false;
+}
+
+static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp, idle_off_mask = PP_ON ;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+
+	DRM_DEBUG_KMS("\n");
+
+	pp = REG_READ(PP_CONTROL);
+
+	if ((pp & POWER_TARGET_ON) == 0) 
+		return;
+
+	intel_dp->panel_on = false;
+	pp &= ~PANEL_UNLOCK_MASK;
+	/* ILK workaround: disable reset around power sequence */
+
+	pp &= ~POWER_TARGET_ON;
+	pp &= ~EDP_FORCE_VDD;
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	REG_READ(PP_CONTROL);
+	DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
+
+	if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
+		DRM_DEBUG_KMS("Error in turning off Panel\n");	
+	}
+
+	msleep(intel_dp->panel_power_cycle_delay);
+	DRM_DEBUG_KMS("Over\n");
+}
+
+static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	msleep(300);
+	pp = REG_READ(PP_CONTROL);
+
+	pp |= EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	gma_backlight_enable(dev);
+}
+
+static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	u32 pp;
+
+	DRM_DEBUG_KMS("\n");
+	gma_backlight_disable(dev);
+	msleep(10);
+	pp = REG_READ(PP_CONTROL);
+
+	pp &= ~EDP_BLC_ENABLE;
+	REG_WRITE(PP_CONTROL, pp);
+	msleep(intel_dp->backlight_off_delay);
+}
+
+static int
+cdv_intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
+	int max_lanes = cdv_intel_dp_max_lane_count(encoder);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+	if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	/* only refuse the mode on non eDP since we have seen some weird eDP panels
+	   which are outside spec tolerances but somehow work by magic */
+	if (!is_edp(encoder) &&
+	    (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
+	     > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
+		return MODE_CLOCK_HIGH;
+
+	if (is_edp(encoder)) {
+	    if (cdv_intel_dp_link_required(mode->clock, 24)
+	     	> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
+		return MODE_CLOCK_HIGH;
+		
+	}
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+static int
+cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+		uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t output_reg = intel_dp->output_reg;
+	struct drm_device *dev = encoder->base.dev;
+	uint32_t ch_ctl = output_reg + 0x10;
+	uint32_t ch_data = ch_ctl + 4;
+	int i;
+	int recv_bytes;
+	uint32_t status;
+	uint32_t aux_clock_divider;
+	int try, precharge;
+
+	/* The clock divider is based off the hrawclk,
+	 * and would like to run at 2MHz. So, take the
+	 * hrawclk value and divide by 2 and use that
+	 * On CDV platform it uses 200MHz as hrawclk.
+	 *
+	 */
+	aux_clock_divider = 200 / 2;
+
+	precharge = 4;
+	if (is_edp(encoder))
+		precharge = 10;
+
+	if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+		DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+			  REG_READ(ch_ctl));
+		return -EBUSY;
+	}
+
+	/* Must try at least 3 times according to DP spec */
+	for (try = 0; try < 5; try++) {
+		/* Load the send data into the aux channel data registers */
+		for (i = 0; i < send_bytes; i += 4)
+			REG_WRITE(ch_data + i,
+				   pack_aux(send + i, send_bytes - i));
+	
+		/* Send the command and wait for it to complete */
+		REG_WRITE(ch_ctl,
+			   DP_AUX_CH_CTL_SEND_BUSY |
+			   DP_AUX_CH_CTL_TIME_OUT_400us |
+			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		for (;;) {
+			status = REG_READ(ch_ctl);
+			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+				break;
+			udelay(100);
+		}
+	
+		/* Clear done status and any errors */
+		REG_WRITE(ch_ctl,
+			   status |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+		if (status & DP_AUX_CH_CTL_DONE)
+			break;
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		return -EBUSY;
+	}
+
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		return -EIO;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		return -ETIMEDOUT;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+	
+	for (i = 0; i < recv_bytes; i += 4)
+		unpack_aux(REG_READ(ch_data + i),
+			   recv + i, recv_bytes - i);
+
+	return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+			  uint16_t address, uint8_t *send, int send_bytes)
+{
+	int ret;
+	uint8_t	msg[20];
+	int msg_bytes;
+	uint8_t	ack;
+
+	if (send_bytes > 16)
+		return -1;
+	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = send_bytes - 1;
+	memcpy(&msg[4], send, send_bytes);
+	msg_bytes = send_bytes + 4;
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
+		if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			break;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+	return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+			    uint16_t address, uint8_t byte)
+{
+	return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+			 uint16_t address, uint8_t *recv, int recv_bytes)
+{
+	uint8_t msg[4];
+	int msg_bytes;
+	uint8_t reply[20];
+	int reply_bytes;
+	uint8_t ack;
+	int ret;
+
+	msg[0] = AUX_NATIVE_READ << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = recv_bytes - 1;
+
+	msg_bytes = 4;
+	reply_bytes = recv_bytes + 1;
+
+	for (;;) {
+		ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret == 0)
+			return -EPROTO;
+		if (ret < 0)
+			return ret;
+		ack = reply[0];
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+			memcpy(recv, reply + 1, ret - 1);
+			return ret - 1;
+		}
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+}
+
+static int
+cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct cdv_intel_dp *intel_dp = container_of(adapter,
+						struct cdv_intel_dp,
+						adapter);
+	struct psb_intel_encoder *encoder = intel_dp->encoder;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (retry = 0; retry < 5; retry++) {
+		ret = cdv_intel_dp_aux_ch(encoder,
+				      msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			udelay(100);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+				  reply[0]);
+			return -EREMOTEIO;
+		}
+
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+static int
+cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	DRM_DEBUG_KMS("i2c_init %s\n", name);
+
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
+
+	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &connector->base.kdev;
+
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_on(encoder);
+	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+	if (is_edp(encoder))
+		cdv_intel_edp_panel_vdd_off(encoder);
+	
+	return ret;
+}
+
+void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+	struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+}
+
+static bool
+cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	int lane_count, clock;
+	int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
+	int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
+	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+	int refclock = mode->clock;
+	int bpp = 24;
+
+	if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
+		cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
+		refclock = intel_dp->panel_fixed_mode->clock;
+		bpp = dev_priv->edp.bpp;
+	}
+
+	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+		for (clock = max_clock; clock >= 0; clock--) {
+			int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
+
+			if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
+				intel_dp->link_bw = bws[clock];
+				intel_dp->lane_count = lane_count;
+				adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
+				       intel_dp->link_bw, intel_dp->lane_count,
+				       adjusted_mode->clock);
+				return true;
+			}
+		}
+	}
+	if (is_edp(intel_encoder)) {
+		/* okay we failed just pick the highest */
+		intel_dp->lane_count = max_lane_count;
+		intel_dp->link_bw = bws[max_clock];
+		adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
+		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+			      "count %d clock %d\n",
+			      intel_dp->link_bw, intel_dp->lane_count,
+			      adjusted_mode->clock);
+
+		return true;
+	}
+	return false;
+}
+
+struct cdv_intel_dp_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+static void
+cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+	/*
+	while (*num > 0xffffff || *den > 0xffffff) {
+		*num >>= 1;
+		*den >>= 1;
+	}*/
+	uint64_t value, m;
+	m = *num;
+	value = m * (0x800000);
+	m = do_div(value, *den);
+	*num = value;
+	*den = 0x800000;
+}
+
+static void
+cdv_intel_dp_compute_m_n(int bpp,
+		     int nlanes,
+		     int pixel_clock,
+		     int link_clock,
+		     struct cdv_intel_dp_m_n *m_n)
+{
+	m_n->tu = 64;
+	m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
+	m_n->gmch_n = link_clock * nlanes;
+	cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+	m_n->link_m = pixel_clock;
+	m_n->link_n = link_clock;
+	cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_encoder *encoder;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	int lane_count = 4, bpp = 24;
+	struct cdv_intel_dp_m_n m_n;
+	int pipe = intel_crtc->pipe;
+
+	/*
+	 * Find the lane count in the intel_encoder private
+	 */
+	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+		struct psb_intel_encoder *intel_encoder;
+		struct cdv_intel_dp *intel_dp;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		intel_encoder = to_psb_intel_encoder(encoder);
+		intel_dp = intel_encoder->dev_priv;
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+			lane_count = intel_dp->lane_count;
+			break;
+		} else if (is_edp(intel_encoder)) {
+			lane_count = intel_dp->lane_count;
+			bpp = dev_priv->edp.bpp;
+			break;
+		}
+	}
+
+	/*
+	 * Compute the GMCH and Link ratios. The '3' here is
+	 * the number of bytes_per_pixel post-LUT, which we always
+	 * set up for 8-bits of R/G/B, or 3 bytes total.
+	 */
+	cdv_intel_dp_compute_m_n(bpp, lane_count,
+			     mode->clock, adjusted_mode->clock, &m_n);
+
+	{
+		REG_WRITE(PIPE_GMCH_DATA_M(pipe),
+			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+			   m_n.gmch_m);
+		REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+		REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+		REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
+	}
+}
+
+static void
+cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+
+	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+	intel_dp->DP |= intel_dp->color_range;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		intel_dp->DP |= DP_SYNC_HS_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		intel_dp->DP |= DP_SYNC_VS_HIGH;
+
+	intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+	switch (intel_dp->lane_count) {
+	case 1:
+		intel_dp->DP |= DP_PORT_WIDTH_1;
+		break;
+	case 2:
+		intel_dp->DP |= DP_PORT_WIDTH_2;
+		break;
+	case 4:
+		intel_dp->DP |= DP_PORT_WIDTH_4;
+		break;
+	}
+	if (intel_dp->has_audio)
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+		intel_dp->DP |= DP_ENHANCED_FRAMING;
+	}
+
+	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
+	if (intel_crtc->pipe == 1)
+		intel_dp->DP |= DP_PIPEB_SELECT;
+
+	REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
+	DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
+	if (is_edp(intel_encoder)) {
+		uint32_t pfit_control;
+		cdv_intel_edp_panel_on(intel_encoder);
+
+		if (mode->hdisplay != adjusted_mode->hdisplay ||
+			    mode->vdisplay != adjusted_mode->vdisplay)
+			pfit_control = PFIT_ENABLE;
+		else
+			pfit_control = 0;
+
+		pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+		REG_WRITE(PFIT_CONTROL, pfit_control);
+	}
+}
+
+
+/* If the sink supports it, try to set the power state appropriately */
+static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
+						  DP_SET_POWER_D3);
+		if (ret != 1)
+			DRM_DEBUG_DRIVER("failed to write sink power state\n");
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = cdv_intel_dp_aux_native_write_1(encoder,
+							  DP_SET_POWER,
+							  DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			udelay(1000);
+		}
+	}
+}
+
+static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp) {
+		cdv_intel_edp_backlight_off(intel_encoder);
+		cdv_intel_edp_panel_off(intel_encoder);
+		cdv_intel_edp_panel_vdd_on(intel_encoder);
+        }
+	/* Wake up the sink first */
+	cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
+	cdv_intel_dp_link_down(intel_encoder);
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+}
+
+static void cdv_intel_dp_commit(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	int edp = is_edp(intel_encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_on(intel_encoder);
+	cdv_intel_dp_start_link_train(intel_encoder);
+	cdv_intel_dp_complete_link_train(intel_encoder);
+	if (edp)
+		cdv_intel_edp_backlight_on(intel_encoder);
+}
+
+static void
+cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct drm_device *dev = encoder->dev;
+	uint32_t dp_reg = REG_READ(intel_dp->output_reg);
+	int edp = is_edp(intel_encoder);
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		if (edp) {
+			cdv_intel_edp_backlight_off(intel_encoder);
+			cdv_intel_edp_panel_vdd_on(intel_encoder);
+		}
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		cdv_intel_dp_link_down(intel_encoder);
+		if (edp) {
+			cdv_intel_edp_panel_vdd_off(intel_encoder);
+			cdv_intel_edp_panel_off(intel_encoder);
+		}
+	} else {
+        	if (edp)
+			cdv_intel_edp_panel_on(intel_encoder);
+		cdv_intel_dp_sink_dpms(intel_encoder, mode);
+		if (!(dp_reg & DP_PORT_EN)) {
+			cdv_intel_dp_start_link_train(intel_encoder);
+			cdv_intel_dp_complete_link_train(intel_encoder);
+		}
+		if (edp)
+        		cdv_intel_edp_backlight_on(intel_encoder);
+	}
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+			       uint8_t *recv, int recv_bytes)
+{
+	int ret, i;
+
+	/*
+	 * Sinks are *supposed* to come up within 1ms from an off state,
+	 * but we're also supposed to retry 3 times per the spec.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
+					       recv_bytes);
+		if (ret == recv_bytes)
+			return true;
+		udelay(1000);
+	}
+
+	return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	return cdv_intel_dp_aux_native_read_retry(encoder,
+					      DP_LANE0_1_STATUS,
+					      intel_dp->link_status,
+					      DP_LINK_STATUS_SIZE);
+}
+
+static uint8_t
+cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		     int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				 int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+				      int lane)
+{
+	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int	    s = ((lane & 1) ?
+			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char	*voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char	*pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char	*link_train_names[] = {
+	"pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+#define CDV_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_1200
+/*
+static uint8_t
+cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+*/
+static void
+cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
+		uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+	
+	if (v >= CDV_DP_VOLTAGE_MAX)
+		v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+		
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+
+static uint8_t
+cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+		      int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	uint8_t l = cdv_intel_dp_link_status(link_status, i);
+
+	return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+	int lane;
+	uint8_t lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+			 DP_LANE_CHANNEL_EQ_DONE|\
+			 DP_LANE_SYMBOL_LOCKED)
+static bool
+cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint8_t lane_align;
+	uint8_t lane_status;
+	int lane;
+
+	lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
+					  DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
+		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static bool
+cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+			uint32_t dp_reg_value,
+			uint8_t dp_train_pat)
+{
+	
+	struct drm_device *dev = encoder->base.dev;
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	REG_WRITE(intel_dp->output_reg, dp_reg_value);
+	REG_READ(intel_dp->output_reg);
+
+	ret = cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET,
+				    dp_train_pat);
+
+	if (ret != 1) {
+		DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
+				dp_train_pat);
+		return false;
+	}
+
+	return true;
+}
+
+
+static bool
+cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+			uint8_t dp_train_pat)
+{
+	
+	int ret;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+
+	ret = cdv_intel_dp_aux_native_write(encoder,
+					DP_TRAINING_LANE0_SET,
+					intel_dp->train_set,
+					intel_dp->lane_count);
+
+	if (ret != intel_dp->lane_count) {
+		DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
+				intel_dp->train_set[0], intel_dp->lane_count);
+		return false;
+	}
+	return true;
+}
+
+static void
+cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct ddi_regoff *ddi_reg;
+	int vswing, premph, index;
+
+	if (intel_dp->output_reg == DP_B)
+		ddi_reg = &ddi_DP_train_table[0];
+	else
+		ddi_reg = &ddi_DP_train_table[1];
+
+	vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
+	premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
+				DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+	if (vswing + premph > 3)
+		return;
+#ifdef CDV_FAST_LINK_TRAIN
+	return;
+#endif
+	DRM_DEBUG_KMS("Test2\n");
+	//return ;
+	cdv_sb_reset(dev);
+	/* ;Swing voltage programming
+        ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
+	cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
+
+	/* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
+
+	/* ;gfx_dpio_set_reg(0x8148, 0x55338954)
+	 * The VSwing_PreEmph table is also considered based on the vswing/premp
+	 */
+	index = (vswing + premph) * 2;
+	if (premph == 1 && vswing == 1) {
+		cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
+	} else
+		cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
+
+	/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
+	if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
+	else
+		cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
+
+	/* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
+	/* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
+
+	/* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
+	cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
+
+	/* ;Pre emphasis programming
+	 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
+	 */
+	cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
+
+	/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
+	index = 2 * premph + 1;
+	cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
+	return;	
+}
+
+
+/* Enable corresponding port and start training pattern 1 */
+static void
+cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int i;
+	uint8_t voltage;
+	bool clock_recovery = false;
+	int tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	DP |= DP_PORT_EN;
+	DP &= ~DP_LINK_TRAIN_MASK;
+		
+	reg = DP;	
+	reg |= DP_LINK_TRAIN_PAT_1;
+	/* Enable output, wait for it to become active */
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	psb_intel_wait_for_vblank(dev);
+
+	DRM_DEBUG_KMS("Link config\n");
+	/* Write the link configuration data */
+	cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  2);
+
+	memset(intel_dp->train_set, 0, 4);
+	voltage = 0;
+	tries = 0;
+	clock_recovery = false;
+
+	DRM_DEBUG_KMS("Start train\n");
+		reg = DP | DP_LINK_TRAIN_PAT_1;
+
+
+	for (;;) {
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
+		}
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+		/* Set training pattern 1 */
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
+
+		udelay(200);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("PT1 train is done\n");
+			clock_recovery = true;
+			break;
+		}
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count)
+			break;
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5)
+				break;
+		} else
+			tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+
+	}
+
+	if (!clock_recovery) {
+		DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
+	}
+	
+	intel_dp->DP = DP;
+}
+
+static void
+cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	bool channel_eq = false;
+	int tries, cr_tries;
+	u32 reg;
+	uint32_t DP = intel_dp->DP;
+
+	/* channel equalization */
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+
+	DRM_DEBUG_KMS("\n");
+		reg = DP | DP_LINK_TRAIN_PAT_2;
+
+	for (;;) {
+
+		DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
+				intel_dp->train_set[0],
+				intel_dp->link_configuration[0],
+				intel_dp->link_configuration[1]);
+        	/* channel eq pattern */
+
+		if (!cdv_intel_dp_set_link_train(encoder, reg,
+					     DP_TRAINING_PATTERN_2)) {
+			DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
+		}
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			cdv_intel_dp_link_down(encoder);
+			break;
+		}
+
+		cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
+
+		cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
+
+		udelay(1000);
+		if (!cdv_intel_dp_get_link_status(encoder))
+			break;
+
+		DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
+				intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
+				intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
+
+		/* Make sure clock is still ok */
+		if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			cdv_intel_dp_start_link_train(encoder);
+			cr_tries++;
+			continue;
+		}
+
+		if (cdv_intel_channel_eq_ok(encoder)) {
+			DRM_DEBUG_KMS("PT2 train is done\n");
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			cdv_intel_dp_link_down(encoder);
+			cdv_intel_dp_start_link_train(encoder);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Compute new intel_dp->train_set as requested by target */
+		cdv_intel_get_adjust_train(encoder);
+		++tries;
+
+	}
+
+	reg = DP | DP_LINK_TRAIN_OFF;
+
+	REG_WRITE(intel_dp->output_reg, reg);
+	REG_READ(intel_dp->output_reg);
+	cdv_intel_dp_aux_native_write_1(encoder,
+				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	uint32_t DP = intel_dp->DP;
+
+	if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+
+	{
+		DP &= ~DP_LINK_TRAIN_MASK;
+		REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+	}
+	REG_READ(intel_dp->output_reg);
+
+	msleep(17);
+
+	REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	REG_READ(intel_dp->output_reg);
+}
+
+static enum drm_connector_status
+cdv_dp_detect(struct psb_intel_encoder *encoder)
+{
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+
+	status = connector_status_disconnected;
+	if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
+				     sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
+	{
+		if (intel_dp->dpcd[DP_DPCD_REV] != 0)
+			status = connector_status_connected;
+	}
+	if (status == connector_status_connected)
+		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+			intel_dp->dpcd[0], intel_dp->dpcd[1],
+			intel_dp->dpcd[2], intel_dp->dpcd[3]);
+	return status;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+cdv_intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	enum drm_connector_status status;
+	struct edid *edid = NULL;
+	int edp = is_edp(encoder);
+
+	intel_dp->has_audio = false;
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+	status = cdv_dp_detect(encoder);
+	if (status != connector_status_connected) {
+		if (edp)
+			cdv_intel_edp_panel_vdd_off(encoder);
+		return status;
+        }
+
+	if (intel_dp->force_audio) {
+		intel_dp->has_audio = intel_dp->force_audio > 0;
+	} else {
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
+			kfree(edid);
+		}
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return connector_status_connected;
+}
+
+static int cdv_intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
+	struct edid *edid = NULL;
+	int ret = 0;
+	int edp = is_edp(intel_encoder);
+
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	if (is_edp(intel_encoder)) {
+		struct drm_device *dev = connector->dev;
+		struct drm_psb_private *dev_priv = dev->dev_private;
+		
+		cdv_intel_edp_panel_vdd_off(intel_encoder);
+		if (ret) {
+			if (edp && !intel_dp->panel_fixed_mode) {
+				struct drm_display_mode *newmode;
+				list_for_each_entry(newmode, &connector->probed_modes,
+					    head) {
+					if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+						intel_dp->panel_fixed_mode =
+							drm_mode_duplicate(dev, newmode);
+						break;
+					}
+				}
+			}
+
+			return ret;
+		}
+		if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			intel_dp->panel_fixed_mode =
+				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (intel_dp->panel_fixed_mode) {
+				intel_dp->panel_fixed_mode->type |=
+					DRM_MODE_TYPE_PREFERRED;
+			}
+		}
+		if (intel_dp->panel_fixed_mode != NULL) {
+			struct drm_display_mode *mode;
+			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return ret;
+}
+
+static bool
+cdv_intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	struct edid *edid;
+	bool has_audio = false;
+	int edp = is_edp(encoder);
+
+	if (edp)
+		cdv_intel_edp_panel_vdd_on(encoder);
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+	if (edp)
+		cdv_intel_edp_panel_vdd_off(encoder);
+
+	return has_audio;
+}
+
+static int
+cdv_intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = encoder->dev_priv;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == 0)
+			has_audio = cdv_intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!intel_dp->color_range)
+			return 0;
+
+		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (encoder->base.crtc) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y,
+					 crtc->fb);
+	}
+
+	return 0;
+}
+
+static void
+cdv_intel_dp_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+
+	if (is_edp(psb_intel_encoder)) {
+	/*	cdv_intel_panel_destroy_backlight(connector->dev); */
+		if (intel_dp->panel_fixed_mode) {
+			kfree(intel_dp->panel_fixed_mode);
+			intel_dp->panel_fixed_mode = NULL;
+		}
+	}
+	i2c_del_adapter(&intel_dp->adapter);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
+	.dpms = cdv_intel_dp_dpms,
+	.mode_fixup = cdv_intel_dp_mode_fixup,
+	.prepare = cdv_intel_dp_prepare,
+	.mode_set = cdv_intel_dp_mode_set,
+	.commit = cdv_intel_dp_commit,
+};
+
+static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = cdv_intel_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_intel_dp_set_property,
+	.destroy = cdv_intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
+	.get_modes = cdv_intel_dp_get_modes,
+	.mode_valid = cdv_intel_dp_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
+	.destroy = cdv_intel_dp_encoder_destroy,
+};
+
+
+static void cdv_intel_dp_add_properties(struct drm_connector *connector)
+{
+	cdv_intel_attach_force_audio_property(connector);
+	cdv_intel_attach_broadcast_rgb_property(connector);
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+
+		if (p_child->dvo_port == PORT_IDPC &&
+		    p_child->device_type == DEVICE_TYPE_eDP)
+			return true;
+	}
+	return false;
+}
+
+/* Cedarview display clock gating
+
+   We need this disable dot get correct behaviour while enabling
+   DP/eDP. TODO - investigate if we can turn it back to normality
+   after enabling */
+static void cdv_disable_intel_clock_gating(struct drm_device *dev)
+{
+	u32 reg_value;
+	reg_value = REG_READ(DSPCLK_GATE_D);
+
+	reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
+			DPUNIT_PIPEA_GATE_DISABLE |
+			DPCUNIT_CLOCK_GATE_DISABLE |
+			DPLSUNIT_CLOCK_GATE_DISABLE |
+			DPOUNIT_CLOCK_GATE_DISABLE |
+		 	DPIOUNIT_CLOCK_GATE_DISABLE);	
+
+	REG_WRITE(DSPCLK_GATE_D, reg_value);
+
+	udelay(500);		
+}
+
+void
+cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct cdv_intel_dp *intel_dp;
+	const char *name = NULL;
+	int type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+        psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+        if (!psb_intel_connector)
+                goto err_connector;
+	intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
+	if (!intel_dp)
+	        goto err_priv;
+
+	if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
+		type = DRM_MODE_CONNECTOR_eDP;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+
+	drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
+	drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+
+	if (type == DRM_MODE_CONNECTOR_DisplayPort)
+        	psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+        else
+		psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+
+
+	psb_intel_encoder->dev_priv=intel_dp;
+	intel_dp->encoder = psb_intel_encoder;
+	intel_dp->output_reg = output_reg;
+	
+	drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
+	drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_sysfs_connector_add(connector);
+
+	/* Set up the DDC bus. */
+	switch (output_reg) {
+		case DP_B:
+			name = "DPDDC-B";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+			break;
+		case DP_C:
+			name = "DPDDC-C";
+			psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+			break;
+	}
+
+	cdv_disable_intel_clock_gating(dev);
+
+	cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+        /* FIXME:fail check */
+	cdv_intel_dp_add_properties(connector);
+
+	if (is_edp(psb_intel_encoder)) {
+		int ret;
+		struct edp_power_seq cur;
+                u32 pp_on, pp_off, pp_div;
+		u32 pwm_ctrl;
+
+		pp_on = REG_READ(PP_CONTROL);
+		pp_on &= ~PANEL_UNLOCK_MASK;
+	        pp_on |= PANEL_UNLOCK_REGS;
+		
+		REG_WRITE(PP_CONTROL, pp_on);
+
+		pwm_ctrl = REG_READ(BLC_PWM_CTL2);
+		pwm_ctrl |= PWM_PIPE_B;
+		REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
+
+                pp_on = REG_READ(PP_ON_DELAYS);
+                pp_off = REG_READ(PP_OFF_DELAYS);
+                pp_div = REG_READ(PP_DIVISOR);
+	
+		/* Pull timing values out of registers */
+                cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                        PANEL_POWER_UP_DELAY_SHIFT;
+
+                cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                        PANEL_LIGHT_ON_DELAY_SHIFT;
+
+                cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                        PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+                cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                        PANEL_POWER_DOWN_DELAY_SHIFT;
+
+                cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+                               PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+                DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                              cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+
+		intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
+                intel_dp->backlight_on_delay = cur.t8 / 10;
+                intel_dp->backlight_off_delay = cur.t9 / 10;
+                intel_dp->panel_power_down_delay = cur.t10 / 10;
+                intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
+
+                DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                              intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                              intel_dp->panel_power_cycle_delay);
+
+                DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                              intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+
+		cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
+		ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+					       intel_dp->dpcd,
+					       sizeof(intel_dp->dpcd));
+		cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+		if (ret == 0) {
+			/* if this fails, presume the device is a ghost */
+			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+			cdv_intel_dp_encoder_destroy(encoder);
+			cdv_intel_dp_destroy(connector);
+			goto err_priv;
+		} else {
+        		DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
+				intel_dp->dpcd[0], intel_dp->dpcd[1], 
+				intel_dp->dpcd[2], intel_dp->dpcd[3]);
+			
+		}
+		/* The CDV reference driver moves pnale backlight setup into the displays that
+		   have a backlight: this is a good idea and one we should probably adopt, however
+		   we need to migrate all the drivers before we can do that */
+                /*cdv_intel_panel_setup_backlight(dev); */
+	}
+	return;
+
+err_priv:
+	kfree(psb_intel_connector);
+err_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
new file mode 100644
index 0000000..464153d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ *	We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "cdv_device.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC	(1 << 9)
+#define HDMI_BORDER_ENABLE		(1 << 7)
+#define HDMI_AUDIO_ENABLE		(1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH		(1 << 3)
+/* hdmi-b control bits */
+#define	HDMIB_PIPE_B_SELECT		(1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+	u32 hdmi_reg;
+	u32 save_HDMIB;
+	bool has_hdmi_sink;
+	bool has_hdmi_audio;
+	/* Should set this when detect hotplug */
+	bool hdmi_device_connected;
+	struct mdfld_hdmi_i2c *i2c_bus;
+	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
+	struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	u32 hdmib;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+	hdmib = (2 << 10);
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+	if (intel_crtc->pipe == 1)
+		hdmib |= HDMIB_PIPE_B_SELECT;
+
+	if (hdmi_priv->has_hdmi_audio) {
+		hdmib |= HDMI_AUDIO_ENABLE;
+		hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+	}
+
+	REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+	REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+						to_psb_intel_encoder(encoder);
+	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	u32 hdmib;
+
+	hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+	if (mode != DRM_MODE_DPMS_ON)
+		REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+	else
+		REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+	REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+	hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+
+	REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+	REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+				struct drm_connector *connector, bool force)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+	struct edid *edid = NULL;
+	enum drm_connector_status status = connector_status_disconnected;
+
+	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+	hdmi_priv->has_hdmi_sink = false;
+	hdmi_priv->has_hdmi_audio = false;
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+			status = connector_status_connected;
+			hdmi_priv->has_hdmi_sink =
+						drm_detect_hdmi_monitor(edid);
+			hdmi_priv->has_hdmi_audio =
+						drm_detect_monitor_audio(edid);
+		}
+		kfree(edid);
+	}
+	return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+				       struct drm_property *property,
+				       uint64_t value)
+{
+	struct drm_encoder *encoder = connector->encoder;
+
+	if (!strcmp(property->name, "scaling mode") && encoder) {
+		struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+		bool centre;
+		uint64_t curValue;
+
+		if (!crtc)
+			return -1;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			return -1;
+		}
+
+		if (drm_object_property_get_value(&connector->base,
+							property, &curValue))
+			return -1;
+
+		if (curValue == value)
+			return 0;
+
+		if (drm_object_property_set_value(&connector->base,
+							property, value))
+			return -1;
+
+		centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+			(value == DRM_MODE_SCALE_NO_SCALE);
+
+		if (crtc->saved_mode.hdisplay != 0 &&
+		    crtc->saved_mode.vdisplay != 0) {
+			if (centre) {
+				if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+					    encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+					return -1;
+			} else {
+				struct drm_encoder_helper_funcs *helpers
+						    = encoder->helper_private;
+				helpers->mode_set(encoder, &crtc->saved_mode,
+					     &crtc->saved_adjusted_mode);
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct edid *edid = NULL;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+	return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+	if (mode->clock < 20000)
+		return MODE_CLOCK_HIGH;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+	if (psb_intel_encoder->i2c_bus)
+		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+	.dpms = cdv_hdmi_dpms,
+	.mode_fixup = cdv_hdmi_mode_fixup,
+	.prepare = psb_intel_encoder_prepare,
+	.mode_set = cdv_hdmi_mode_set,
+	.commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+					cdv_hdmi_connector_helper_funcs = {
+	.get_modes = cdv_hdmi_get_modes,
+	.mode_valid = cdv_hdmi_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = cdv_hdmi_save,
+	.restore = cdv_hdmi_restore,
+	.detect = cdv_hdmi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_hdmi_set_property,
+	.destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+			struct psb_intel_mode_device *mode_dev, int reg)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct mid_intel_hdmi_priv *hdmi_priv;
+	int ddc_bus;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+				    GFP_KERNEL);
+
+	if (!psb_intel_encoder)
+		return;
+
+	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+				      GFP_KERNEL);
+
+	if (!psb_intel_connector)
+		goto err_connector;
+
+	hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+	if (!hdmi_priv)
+		goto err_priv;
+
+	connector = &psb_intel_connector->base;
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+	encoder = &psb_intel_encoder->base;
+	drm_connector_init(dev, connector,
+			   &cdv_hdmi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DVID);
+
+	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+	hdmi_priv->hdmi_reg = reg;
+	hdmi_priv->has_hdmi_sink = false;
+	psb_intel_encoder->dev_priv = hdmi_priv;
+
+	drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+	drm_connector_helper_add(connector,
+				 &cdv_hdmi_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.scaling_mode_property,
+				      DRM_MODE_SCALE_FULLSCREEN);
+
+	switch (reg) {
+	case SDVOB:
+		ddc_bus = GPIOE;
+		psb_intel_encoder->ddi_select = DDI0_SELECT;
+		break;
+	case SDVOC:
+		ddc_bus = GPIOD;
+		psb_intel_encoder->ddi_select = DDI1_SELECT;
+		break;
+	default:
+		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+		goto failed_ddc;
+		break;
+	}
+
+	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+				ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+	if (!psb_intel_encoder->i2c_bus) {
+		dev_err(dev->dev, "No ddc adapter available!\n");
+		goto failed_ddc;
+	}
+
+	hdmi_priv->hdmi_i2c_adapter =
+				&(psb_intel_encoder->i2c_bus->adapter);
+	hdmi_priv->dev = dev;
+	drm_sysfs_connector_add(connector);
+	return;
+
+failed_ddc:
+	drm_encoder_cleanup(encoder);
+	drm_connector_cleanup(connector);
+err_priv:
+	kfree(psb_intel_connector);
+err_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_lvds.c
new file mode 100644
index 0000000..d81dbc3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Dave Airlie <airlied@linux.ie>
+ *	Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE	0x01
+#define BLC_PWM_TYPT	0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+	/**
+	 * Saved LVDO output states
+	 */
+	uint32_t savePP_ON;
+	uint32_t savePP_OFF;
+	uint32_t saveLVDS;
+	uint32_t savePP_CONTROL;
+	uint32_t savePP_CYCLE;
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 retval;
+
+	if (gma_power_begin(dev, false)) {
+		retval = ((REG_READ(BLC_PWM_CTL) &
+			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+		gma_power_end(dev);
+	} else
+		retval = ((dev_priv->regs.saveBLC_PWM_CTL &
+			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+	return retval;
+}
+
+#if 0
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+					unsigned int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+	u8 out_buf[2];
+	unsigned int blc_i2c_brightness;
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = lvds_i2c_bus->slave_addr,
+			.flags = 0,
+			.len = 2,
+			.buf = out_buf,
+		}
+	};
+
+	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+			     BRIGHTNESS_MASK /
+			     BRIGHTNESS_MAX_LEVEL);
+
+	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+	out_buf[1] = (u8)blc_i2c_brightness;
+
+	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+		return 0;
+
+	DRM_ERROR("I2C transfer error\n");
+	return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	u32 max_pwm_blc;
+	u32 blc_pwm_duty_cycle;
+
+	max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+	/*BLC_PWM_CTL Should be initiated while backlight device init*/
+	BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+	REG_WRITE(BLC_PWM_CTL,
+		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+		  (blc_pwm_duty_cycle));
+
+	return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->lvds_bl) {
+		DRM_ERROR("NO LVDS Backlight Info\n");
+		return;
+	}
+
+	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+		cdv_lvds_i2c_set_brightness(dev, level);
+	else
+		cdv_lvds_pwm_set_brightness(dev, level);
+}
+#endif
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 blc_pwm_ctl;
+
+	if (gma_power_begin(dev, false)) {
+		blc_pwm_ctl =
+			REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+		REG_WRITE(BLC_PWM_CTL,
+				(blc_pwm_ctl |
+				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+		gma_power_end(dev);
+	} else {
+		blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
+				~BACKLIGHT_DUTY_CYCLE_MASK;
+		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+	}
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+				     struct drm_encoder *encoder, bool on)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pp_status;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	if (on) {
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+			  POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & PP_ON) == 0);
+
+		cdv_intel_lvds_set_backlight(dev,
+				dev_priv->mode_dev.backlight_duty_cycle);
+	} else {
+		cdv_intel_lvds_set_backlight(dev, 0);
+
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+			  ~POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while (pp_status & PP_ON);
+	}
+	gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	if (mode == DRM_MODE_DPMS_ON)
+		cdv_intel_lvds_set_power(dev, encoder, true);
+	else
+		cdv_intel_lvds_set_power(dev, encoder, false);
+	/* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+			      struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *fixed_mode =
+					dev_priv->mode_dev.panel_fixed_mode;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+	return MODE_OK;
+}
+
+static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	struct drm_encoder *tmp_encoder;
+	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+	/* Should never happen!! */
+	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+			    head) {
+		if (tmp_encoder != encoder
+		    && tmp_encoder->crtc == encoder->crtc) {
+			printk(KERN_ERR "Can't enable LVDS and another "
+			       "encoder on the same pipe\n");
+			return false;
+		}
+	}
+
+	/*
+	 * If we have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	if (panel_fixed_mode != NULL) {
+		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+		adjusted_mode->htotal = panel_fixed_mode->htotal;
+		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+		adjusted_mode->clock = panel_fixed_mode->clock;
+		drm_mode_set_crtcinfo(adjusted_mode,
+				      CRTC_INTERLACE_HALVE_V);
+	}
+
+	/*
+	 * XXX: It would be nice to support lower refresh rates on the
+	 * panels to reduce power consumption, and perhaps match the
+	 * user's requested refresh rate.
+	 */
+
+	return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+					  BACKLIGHT_DUTY_CYCLE_MASK);
+
+	cdv_intel_lvds_set_power(dev, encoder, false);
+
+	gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (mode_dev->backlight_duty_cycle == 0)
+		mode_dev->backlight_duty_cycle =
+		    cdv_intel_lvds_get_max_backlight(dev);
+
+	cdv_intel_lvds_set_power(dev, encoder, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
+							encoder->crtc);
+	u32 pfit_control;
+
+	/*
+	 * The LVDS pin pair will already have been turned on in the
+	 * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+	 * settings.
+	 */
+
+	/*
+	 * Enable automatic panel scaling so that non-native modes fill the
+	 * screen.  Should be enabled before the pipe is enabled, according to
+	 * register description and PRM.
+	 */
+	if (mode->hdisplay != adjusted_mode->hdisplay ||
+	    mode->vdisplay != adjusted_mode->vdisplay)
+		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+				HORIZ_INTERP_BILINEAR);
+	else
+		pfit_control = 0;
+
+	pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+
+	if (dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+	REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+				struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	int ret;
+
+	ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+
+	if (ret)
+		return ret;
+
+	/* Didn't get an EDID, so
+	 * Set wide sync ranges so we get all modes
+	 * handed to valid_mode for checking
+	 */
+	connector->display_info.min_vfreq = 0;
+	connector->display_info.max_vfreq = 200;
+	connector->display_info.min_hfreq = 0;
+	connector->display_info.max_hfreq = 200;
+	if (mode_dev->panel_fixed_mode != NULL) {
+		struct drm_display_mode *mode =
+		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+		drm_mode_probed_add(connector, mode);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+	if (psb_intel_encoder->i2c_bus)
+		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int cdv_intel_lvds_set_property(struct drm_connector *connector,
+				       struct drm_property *property,
+				       uint64_t value)
+{
+	struct drm_encoder *encoder = connector->encoder;
+
+	if (!strcmp(property->name, "scaling mode") && encoder) {
+		struct psb_intel_crtc *crtc =
+					to_psb_intel_crtc(encoder->crtc);
+		uint64_t curValue;
+
+		if (!crtc)
+			return -1;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			return -1;
+		}
+
+		if (drm_object_property_get_value(&connector->base,
+						     property,
+						     &curValue))
+			return -1;
+
+		if (curValue == value)
+			return 0;
+
+		if (drm_object_property_set_value(&connector->base,
+							property,
+							value))
+			return -1;
+
+		if (crtc->saved_mode.hdisplay != 0 &&
+		    crtc->saved_mode.vdisplay != 0) {
+			if (!drm_crtc_helper_set_mode(encoder->crtc,
+						      &crtc->saved_mode,
+						      encoder->crtc->x,
+						      encoder->crtc->y,
+						      encoder->crtc->fb))
+				return -1;
+		}
+	} else if (!strcmp(property->name, "backlight") && encoder) {
+		if (drm_object_property_set_value(&connector->base,
+							property,
+							value))
+			return -1;
+		else
+                        gma_backlight_set(encoder->dev, value);
+	} else if (!strcmp(property->name, "DPMS") && encoder) {
+		struct drm_encoder_helper_funcs *helpers =
+					encoder->helper_private;
+		helpers->dpms(encoder, value);
+	}
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+					cdv_intel_lvds_helper_funcs = {
+	.dpms = cdv_intel_lvds_encoder_dpms,
+	.mode_fixup = cdv_intel_lvds_mode_fixup,
+	.prepare = cdv_intel_lvds_prepare,
+	.mode_set = cdv_intel_lvds_mode_set,
+	.commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+				cdv_intel_lvds_connector_helper_funcs = {
+	.get_modes = cdv_intel_lvds_get_modes,
+	.mode_valid = cdv_intel_lvds_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = cdv_intel_lvds_save,
+	.restore = cdv_intel_lvds_restore,
+	.detect = cdv_intel_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = cdv_intel_lvds_set_property,
+	.destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+	.destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+				   u8 *i2c_pin)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return true;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		struct child_device_config *child = dev_priv->child_dev + i;
+
+		/* If the device type is not LFP, continue.
+		 * We have to check both the new identifiers as well as the
+		 * old for compatibility with some BIOSes.
+		 */
+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
+		    child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		if (child->i2c_pin)
+		    *i2c_pin = child->i2c_pin;
+
+		/* However, we cannot trust the BIOS writers to populate
+		 * the VBT correctly.  Since LVDS requires additional
+		 * information from AIM blocks, a non-zero addin offset is
+		 * a good indicator that the LVDS is actually present.
+		 */
+		if (child->addin_offset)
+			return true;
+
+		/* But even then some BIOS writers perform some black magic
+		 * and instantiate the device without reference to any
+		 * additional data.  Trust that if the VBT was written into
+		 * the OpRegion then they have validated the LVDS's existence.
+		 */
+		if (dev_priv->opregion.vbt)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+		     struct psb_intel_mode_device *mode_dev)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct cdv_intel_lvds_priv *lvds_priv;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *scan;
+	struct drm_crtc *crtc;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 lvds;
+	int pipe;
+	u8 pin;
+
+	pin = GMBUS_PORT_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+		return;
+	}
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+				    GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+
+	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+				      GFP_KERNEL);
+	if (!psb_intel_connector)
+		goto failed_connector;
+
+	lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+	if (!lvds_priv)
+		goto failed_lvds_priv;
+
+	psb_intel_encoder->dev_priv = lvds_priv;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+
+
+	drm_connector_init(dev, connector,
+			   &cdv_intel_lvds_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+
+	drm_encoder_init(dev, encoder,
+			 &cdv_intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+	drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+	drm_connector_helper_add(connector,
+				 &cdv_intel_lvds_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/*Attach connector properties*/
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.scaling_mode_property,
+				      DRM_MODE_SCALE_FULLSCREEN);
+	drm_object_attach_property(&connector->base,
+				      dev_priv->backlight_property,
+				      BRIGHTNESS_MAX_LEVEL);
+
+	/**
+	 * Set up I2C bus
+	 * FIXME: distroy i2c_bus when exit
+	 */
+	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+							 GPIOB,
+							 "LVDSBLC_B");
+	if (!psb_intel_encoder->i2c_bus) {
+		dev_printk(KERN_ERR,
+			&dev->pdev->dev, "I2C bus registration failed.\n");
+		goto failed_blc_i2c;
+	}
+	psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+	dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+
+	/*
+	 * LVDS discovery:
+	 * 1) check for EDID on DDC
+	 * 2) check for VBT data
+	 * 3) check to see if LVDS is already on
+	 *    if none of the above, no panel
+	 * 4) make sure lid is open
+	 *    if closed, act like it's not there for now
+	 */
+
+	/* Set up the DDC bus. */
+	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+							 GPIOC,
+							 "LVDSDDC_C");
+	if (!psb_intel_encoder->ddc_bus) {
+		dev_printk(KERN_ERR, &dev->pdev->dev,
+			   "DDC bus registration " "failed.\n");
+		goto failed_ddc;
+	}
+
+	/*
+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+	 * preferred mode is the right one.
+	 */
+	psb_intel_ddc_get_modes(connector,
+				&psb_intel_encoder->ddc_bus->adapter);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			mode_dev->panel_fixed_mode =
+			    drm_mode_duplicate(dev, scan);
+			goto out;	/* FIXME: check for quirks */
+		}
+	}
+
+	/* Failed to get EDID, what about VBT? do we need this?*/
+	if (dev_priv->lfp_lvds_vbt_mode) {
+		mode_dev->panel_fixed_mode =
+			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+		if (mode_dev->panel_fixed_mode) {
+			mode_dev->panel_fixed_mode->type |=
+				DRM_MODE_TYPE_PREFERRED;
+			goto out;	/* FIXME: check for quirks */
+		}
+	}
+	/*
+	 * If we didn't get EDID, try checking if the panel is already turned
+	 * on.	If so, assume that whatever is currently programmed is the
+	 * correct mode.
+	 */
+	lvds = REG_READ(LVDS);
+	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+	if (crtc && (lvds & LVDS_PORT_EN)) {
+		mode_dev->panel_fixed_mode =
+		    cdv_intel_crtc_mode_get(dev, crtc);
+		if (mode_dev->panel_fixed_mode) {
+			mode_dev->panel_fixed_mode->type |=
+			    DRM_MODE_TYPE_PREFERRED;
+			goto out;	/* FIXME: check for quirks */
+		}
+	}
+
+	/* If we still don't have a mode after all that, give up. */
+	if (!mode_dev->panel_fixed_mode) {
+		DRM_DEBUG
+			("Found no modes on the lvds, ignoring the LVDS\n");
+		goto failed_find;
+	}
+
+	/* setup PWM */
+	{
+		u32 pwm;
+
+		pwm = REG_READ(BLC_PWM_CTL2);
+		if (pipe == 1)
+			pwm |= PWM_PIPE_B;
+		else
+			pwm &= ~PWM_PIPE_B;
+		pwm |= PWM_ENABLE;
+		REG_WRITE(BLC_PWM_CTL2, pwm);
+	}
+
+out:
+	drm_sysfs_connector_add(connector);
+	return;
+
+failed_find:
+	printk(KERN_ERR "Failed find\n");
+	if (psb_intel_encoder->ddc_bus)
+		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+failed_ddc:
+	printk(KERN_ERR "Failed DDC\n");
+	if (psb_intel_encoder->i2c_bus)
+		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+failed_blc_i2c:
+	printk(KERN_ERR "Failed BLC\n");
+	drm_encoder_cleanup(encoder);
+	drm_connector_cleanup(connector);
+	kfree(lvds_priv);
+failed_lvds_priv:
+	kfree(psb_intel_connector);
+failed_connector:
+	kfree(psb_intel_encoder);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/framebuffer.c b/linux-imx/drivers/gpu/drm/gma500/framebuffer.c
new file mode 100644
index 0000000..8b1b6d9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/framebuffer.c
@@ -0,0 +1,798 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "framebuffer.h"
+#include "gtt.h"
+
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+					      struct drm_file *file_priv,
+					      unsigned int *handle);
+
+static const struct drm_framebuffer_funcs psb_fb_funcs = {
+	.destroy = psb_user_framebuffer_destroy,
+	.create_handle = psb_user_framebuffer_create_handle,
+};
+
+#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+
+static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+			   unsigned blue, unsigned transp,
+			   struct fb_info *info)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+	uint32_t v;
+
+	if (!fb)
+		return -ENOMEM;
+
+	if (regno > 255)
+		return 1;
+
+	red = CMAP_TOHW(red, info->var.red.length);
+	blue = CMAP_TOHW(blue, info->var.blue.length);
+	green = CMAP_TOHW(green, info->var.green.length);
+	transp = CMAP_TOHW(transp, info->var.transp.length);
+
+	v = (red << info->var.red.offset) |
+	    (green << info->var.green.offset) |
+	    (blue << info->var.blue.offset) |
+	    (transp << info->var.transp.offset);
+
+	if (regno < 16) {
+		switch (fb->bits_per_pixel) {
+		case 16:
+			((uint32_t *) info->pseudo_palette)[regno] = v;
+			break;
+		case 24:
+		case 32:
+			((uint32_t *) info->pseudo_palette)[regno] = v;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_device *dev = psbfb->base.dev;
+
+	/*
+	 *	We have to poke our nose in here. The core fb code assumes
+	 *	panning is part of the hardware that can be invoked before
+	 *	the actual fb is mapped. In our case that isn't quite true.
+	 */
+	if (psbfb->gtt->npage) {
+		/* GTT roll shifts in 4K pages, we need to shift the right
+		   number of pages */
+		int pages = info->fix.line_length >> 12;
+		psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+	}
+        return 0;
+}
+
+static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct psb_framebuffer *psbfb = vma->vm_private_data;
+	struct drm_device *dev = psbfb->base.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int page_num;
+	int i;
+	unsigned long address;
+	int ret;
+	unsigned long pfn;
+	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
+				  psbfb->gtt->offset;
+
+	page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	for (i = 0; i < page_num; i++) {
+		pfn = (phys_addr >> PAGE_SHIFT);
+
+		ret = vm_insert_mixed(vma, address, pfn);
+		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+			break;
+		else if (unlikely(ret != 0)) {
+			ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+			return ret;
+		}
+		address += PAGE_SIZE;
+		phys_addr += PAGE_SIZE;
+	}
+	return VM_FAULT_NOPAGE;
+}
+
+static void psbfb_vm_open(struct vm_area_struct *vma)
+{
+}
+
+static void psbfb_vm_close(struct vm_area_struct *vma)
+{
+}
+
+static const struct vm_operations_struct psbfb_vm_ops = {
+	.fault	= psbfb_vm_fault,
+	.open	= psbfb_vm_open,
+	.close	= psbfb_vm_close
+};
+
+static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct psb_fbdev *fbdev = info->par;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+	if (vma->vm_pgoff != 0)
+		return -EINVAL;
+	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+		return -EINVAL;
+
+	if (!psbfb->addr_space)
+		psbfb->addr_space = vma->vm_file->f_mapping;
+	/*
+	 * If this is a GEM object then info->screen_base is the virtual
+	 * kernel remapping of the object. FIXME: Review if this is
+	 * suitable for our mmap work
+	 */
+	vma->vm_ops = &psbfb_vm_ops;
+	vma->vm_private_data = (void *)psbfb;
+	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	return 0;
+}
+
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+						unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+static struct fb_ops psbfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcolreg = psbfb_setcolreg,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = psbfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_mmap = psbfb_mmap,
+	.fb_sync = psbfb_sync,
+	.fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_roll_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcolreg = psbfb_setcolreg,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = psbfb_pan,
+	.fb_mmap = psbfb_mmap,
+	.fb_ioctl = psbfb_ioctl,
+};
+
+static struct fb_ops psbfb_unaccel_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcolreg = psbfb_setcolreg,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_mmap = psbfb_mmap,
+	.fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ *	psb_framebuffer_init	-	initialize a framebuffer
+ *	@dev: our DRM device
+ *	@fb: framebuffer to set up
+ *	@mode_cmd: mode description
+ *	@gt: backing object
+ *
+ *	Configure and fill in the boilerplate for our frame buffer. Return
+ *	0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+					struct psb_framebuffer *fb,
+					struct drm_mode_fb_cmd2 *mode_cmd,
+					struct gtt_range *gt)
+{
+	u32 bpp, depth;
+	int ret;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	if (mode_cmd->pitches[0] & 63)
+		return -EINVAL;
+	switch (bpp) {
+	case 8:
+	case 16:
+	case 24:
+	case 32:
+		break;
+	default:
+		return -EINVAL;
+	}
+	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+	fb->gtt = gt;
+	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+	if (ret) {
+		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+/**
+ *	psb_framebuffer_create	-	create a framebuffer backed by gt
+ *	@dev: our DRM device
+ *	@mode_cmd: the description of the requested mode
+ *	@gt: the backing object
+ *
+ *	Create a framebuffer object backed by the gt, and fill in the
+ *	boilerplate required
+ *
+ *	TODO: review object references
+ */
+
+static struct drm_framebuffer *psb_framebuffer_create
+			(struct drm_device *dev,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct gtt_range *gt)
+{
+	struct psb_framebuffer *fb;
+	int ret;
+
+	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+	if (!fb)
+		return ERR_PTR(-ENOMEM);
+
+	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+	if (ret) {
+		kfree(fb);
+		return ERR_PTR(ret);
+	}
+	return &fb->base;
+}
+
+/**
+ *	psbfb_alloc		-	allocate frame buffer memory
+ *	@dev: the DRM device
+ *	@aligned_size: space needed
+ *	@force: fall back to GEM buffers if need be
+ *
+ *	Allocate the frame buffer. In the usual case we get a GTT range that
+ *	is stolen memory backed and life is simple. If there isn't sufficient
+ *	we fail as we don't have the virtual mapping space to really vmap it
+ *	and the kernel console code can't handle non linear framebuffers.
+ *
+ *	Re-address this as and if the framebuffer layer grows this ability.
+ */
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
+{
+	struct gtt_range *backing;
+	/* Begin by trying to use stolen memory backing */
+	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+	if (backing) {
+		if (drm_gem_private_object_init(dev,
+					&backing->gem, aligned_size) == 0)
+			return backing;
+		psb_gtt_free_range(dev, backing);
+	}
+	return NULL;
+}
+
+/**
+ *	psbfb_create		-	create a framebuffer
+ *	@fbdev: the framebuffer device
+ *	@sizes: specification of the layout
+ *
+ *	Create a framebuffer to the specifications provided
+ */
+static int psbfb_create(struct psb_fbdev *fbdev,
+				struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = fbdev->psb_fb_helper.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct device *device = &dev->pdev->dev;
+	int size;
+	int ret;
+	struct gtt_range *backing;
+	u32 bpp, depth;
+	int gtt_roll = 0;
+	int pitch_lines = 0;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	bpp = sizes->surface_bpp;
+	depth = sizes->surface_depth;
+
+	/* No 24bit packed */
+	if (bpp == 24)
+		bpp = 32;
+
+	do {
+		/*
+		 * Acceleration via the GTT requires pitch to be
+		 * power of two aligned. Preferably page but less
+		 * is ok with some fonts
+		 */
+        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+
+        	size = mode_cmd.pitches[0] * mode_cmd.height;
+        	size = ALIGN(size, PAGE_SIZE);
+
+		/* Allocate the fb in the GTT with stolen page backing */
+		backing = psbfb_alloc(dev, size);
+
+		if (pitch_lines)
+			pitch_lines *= 2;
+		else
+			pitch_lines = 1;
+		gtt_roll++;
+	} while (backing == NULL && pitch_lines <= 16);
+
+	/* The final pitch we accepted if we succeeded */
+	pitch_lines /= 2;
+
+	if (backing == NULL) {
+		/*
+		 *	We couldn't get the space we wanted, fall back to the
+		 *	display engine requirement instead.  The HW requires
+		 *	the pitch to be 64 byte aligned
+		 */
+
+		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
+		pitch_lines = 64;
+
+		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
+
+		size = mode_cmd.pitches[0] * mode_cmd.height;
+		size = ALIGN(size, PAGE_SIZE);
+
+		/* Allocate the framebuffer in the GTT with stolen page backing */
+		backing = psbfb_alloc(dev, size);
+		if (backing == NULL)
+			return -ENOMEM;
+	}
+
+	memset(dev_priv->vram_addr + backing->offset, 0, size);
+
+	mutex_lock(&dev->struct_mutex);
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+	info->par = fbdev;
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+	if (ret)
+		goto out_unref;
+
+	fb = &psbfb->base;
+	psbfb->fbdev = info;
+
+	fbdev->psb_fb_helper.fb = fb;
+	fbdev->psb_fb_helper.fbdev = info;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	strcpy(info->fix.id, "psbdrmfb");
+
+	info->flags = FBINFO_DEFAULT;
+	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
+		info->fbops = &psbfb_ops;
+	else if (gtt_roll) {	/* GTT rolling seems best */
+		info->fbops = &psbfb_roll_ops;
+		info->flags |= FBINFO_HWACCEL_YPAN;
+	} else	/* Software */
+		info->fbops = &psbfb_unaccel_ops;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->fix.smem_start = dev->mode_config.fb_base;
+	info->fix.smem_len = size;
+	info->fix.ywrapstep = gtt_roll;
+	info->fix.ypanstep = 0;
+
+	/* Accessed stolen memory directly */
+	info->screen_base = dev_priv->vram_addr + backing->offset;
+	info->screen_size = size;
+
+	if (dev_priv->gtt.stolen_size) {
+		info->apertures = alloc_apertures(1);
+		if (!info->apertures) {
+			ret = -ENOMEM;
+			goto out_unref;
+		}
+		info->apertures->ranges[0].base = dev->mode_config.fb_base;
+		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+	}
+
+	drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+				sizes->fb_width, sizes->fb_height);
+
+	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	dev_dbg(dev->dev, "allocated %dx%d fb\n",
+					psbfb->base.width, psbfb->base.height);
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+out_unref:
+	if (backing->stolen)
+		psb_gtt_free_range(dev, backing);
+	else
+		drm_gem_object_unreference(&backing->gem);
+out_err1:
+	mutex_unlock(&dev->struct_mutex);
+	psb_gtt_free_range(dev, backing);
+	return ret;
+}
+
+/**
+ *	psb_user_framebuffer_create	-	create framebuffer
+ *	@dev: our DRM device
+ *	@filp: client file
+ *	@cmd: mode request
+ *
+ *	Create a new framebuffer backed by a userspace GEM object
+ */
+static struct drm_framebuffer *psb_user_framebuffer_create
+			(struct drm_device *dev, struct drm_file *filp,
+			 struct drm_mode_fb_cmd2 *cmd)
+{
+	struct gtt_range *r;
+	struct drm_gem_object *obj;
+
+	/*
+	 *	Find the GEM object and thus the gtt range object that is
+	 *	to back this space
+	 */
+	obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	/* Let the core code do all the work */
+	r = container_of(obj, struct gtt_range, gem);
+	return psb_framebuffer_create(dev, cmd, r);
+}
+
+static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+							u16 blue, int regno)
+{
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+	intel_crtc->lut_r[regno] = red >> 8;
+	intel_crtc->lut_g[regno] = green >> 8;
+	intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+					u16 *green, u16 *blue, int regno)
+{
+	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+	*red = intel_crtc->lut_r[regno] << 8;
+	*green = intel_crtc->lut_g[regno] << 8;
+	*blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static int psbfb_probe(struct drm_fb_helper *helper,
+				struct drm_fb_helper_surface_size *sizes)
+{
+	struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int bytespp;
+
+	bytespp = sizes->surface_bpp / 8;
+	if (bytespp == 3)	/* no 24bit packed */
+		bytespp = 4;
+
+	/* If the mode will not fit in 32bit then switch to 16bit to get
+	   a console on full resolution. The X mode setting server will
+	   allocate its own 32bit GEM framebuffer */
+	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
+	                dev_priv->vram_stolen_size) {
+                sizes->surface_bpp = 16;
+                sizes->surface_depth = 16;
+        }
+
+	return psbfb_create(psb_fbdev, sizes);
+}
+
+static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+	.gamma_set = psbfb_gamma_set,
+	.gamma_get = psbfb_gamma_get,
+	.fb_probe = psbfb_probe,
+};
+
+static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+{
+	struct fb_info *info;
+	struct psb_framebuffer *psbfb = &fbdev->pfb;
+
+	if (fbdev->psb_fb_helper.fbdev) {
+		info = fbdev->psb_fb_helper.fbdev;
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+	drm_fb_helper_fini(&fbdev->psb_fb_helper);
+	drm_framebuffer_unregister_private(&psbfb->base);
+	drm_framebuffer_cleanup(&psbfb->base);
+
+	if (psbfb->gtt)
+		drm_gem_object_unreference(&psbfb->gtt->gem);
+	return 0;
+}
+
+int psb_fbdev_init(struct drm_device *dev)
+{
+	struct psb_fbdev *fbdev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+	if (!fbdev) {
+		dev_err(dev->dev, "no memory\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->fbdev = fbdev;
+	fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+	drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+							INTELFB_CONN_LIMIT);
+
+	drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+	return 0;
+}
+
+static void psb_fbdev_fini(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->fbdev)
+		return;
+
+	psb_fbdev_destroy(dev, dev_priv->fbdev);
+	kfree(dev_priv->fbdev);
+	dev_priv->fbdev = NULL;
+}
+
+static void psbfb_output_poll_changed(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+	drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+}
+
+/**
+ *	psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+ *	@fb: framebuffer
+ *	@file_priv: our DRM file
+ *	@handle: returned handle
+ *
+ *	Our framebuffer object is a GTT range which also contains a GEM
+ *	object. We need to turn it into a handle for userspace. GEM will do
+ *	the work for us
+ */
+static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+					      struct drm_file *file_priv,
+					      unsigned int *handle)
+{
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+	struct gtt_range *r = psbfb->gtt;
+	return drm_gem_handle_create(file_priv, &r->gem, handle);
+}
+
+/**
+ *	psb_user_framebuffer_destroy	-	destruct user created fb
+ *	@fb: framebuffer
+ *
+ *	User framebuffers are backed by GEM objects so all we have to do is
+ *	clean up a bit and drop the reference, GEM will handle the fallout
+ */
+static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+	struct gtt_range *r = psbfb->gtt;
+
+	/* Let DRM do its clean up */
+	drm_framebuffer_cleanup(fb);
+	/*  We are no longer using the resource in GEM */
+	drm_gem_object_unreference_unlocked(&r->gem);
+	kfree(fb);
+}
+
+static const struct drm_mode_config_funcs psb_mode_funcs = {
+	.fb_create = psb_user_framebuffer_create,
+	.output_poll_changed = psbfb_output_poll_changed,
+};
+
+static int psb_create_backlight_property(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_property *backlight;
+
+	if (dev_priv->backlight_property)
+		return 0;
+
+	backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
+
+	dev_priv->backlight_property = backlight;
+
+	return 0;
+}
+
+static void psb_setup_outputs(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
+
+	drm_mode_create_scaling_mode_property(dev);
+	psb_create_backlight_property(dev);
+
+	dev_priv->ops->output_init(dev);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		struct psb_intel_encoder *psb_intel_encoder =
+			psb_intel_attached_encoder(connector);
+		struct drm_encoder *encoder = &psb_intel_encoder->base;
+		int crtc_mask = 0, clone_mask = 0;
+
+		/* valid crtcs */
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_ANALOG:
+			crtc_mask = (1 << 0);
+			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+			break;
+		case INTEL_OUTPUT_SDVO:
+			crtc_mask = ((1 << 0) | (1 << 1));
+			clone_mask = (1 << INTEL_OUTPUT_SDVO);
+			break;
+		case INTEL_OUTPUT_LVDS:
+		        crtc_mask = dev_priv->ops->lvds_mask;
+			clone_mask = (1 << INTEL_OUTPUT_LVDS);
+			break;
+		case INTEL_OUTPUT_MIPI:
+			crtc_mask = (1 << 0);
+			clone_mask = (1 << INTEL_OUTPUT_MIPI);
+			break;
+		case INTEL_OUTPUT_MIPI2:
+			crtc_mask = (1 << 2);
+			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+			break;
+		case INTEL_OUTPUT_HDMI:
+		        crtc_mask = dev_priv->ops->hdmi_mask;
+			clone_mask = (1 << INTEL_OUTPUT_HDMI);
+			break;
+		case INTEL_OUTPUT_DISPLAYPORT:
+			crtc_mask = (1 << 0) | (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+			break;
+		case INTEL_OUTPUT_EDP:
+			crtc_mask = (1 << 1);
+			clone_mask = (1 << INTEL_OUTPUT_EDP);
+		}
+		encoder->possible_crtcs = crtc_mask;
+		encoder->possible_clones =
+		    psb_intel_connector_clones(dev, clone_mask);
+	}
+}
+
+void psb_modeset_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	int i;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.funcs = &psb_mode_funcs;
+
+	/* set memory base */
+	/* Oaktrail and Poulsbo should use BAR 2*/
+	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+					&(dev->mode_config.fb_base));
+
+	/* num pipes is 2 for PSB but 1 for Mrst */
+	for (i = 0; i < dev_priv->num_pipe; i++)
+		psb_intel_crtc_init(dev, i, mode_dev);
+
+	dev->mode_config.max_width = 4096;
+	dev->mode_config.max_height = 4096;
+
+	psb_setup_outputs(dev);
+
+	if (dev_priv->ops->errata)
+	        dev_priv->ops->errata(dev);
+
+        dev_priv->modeset = true;
+}
+
+void psb_modeset_cleanup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (dev_priv->modeset) {
+		mutex_lock(&dev->struct_mutex);
+
+		drm_kms_helper_poll_fini(dev);
+		psb_fbdev_fini(dev);
+		drm_mode_config_cleanup(dev);
+
+		mutex_unlock(&dev->struct_mutex);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/framebuffer.h b/linux-imx/drivers/gpu/drm/gma500/framebuffer.h
new file mode 100644
index 0000000..989558a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/framebuffer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2008-2011, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *      Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "psb_drv.h"
+
+struct psb_framebuffer {
+	struct drm_framebuffer base;
+	struct address_space *addr_space;
+	struct fb_info *fbdev;
+	struct gtt_range *gtt;
+};
+
+struct psb_fbdev {
+	struct drm_fb_helper psb_fb_helper;
+	struct psb_framebuffer pfb;
+};
+
+#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+
+extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+
+#endif
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/gem.c b/linux-imx/drivers/gpu/drm/gma500/gem.c
new file mode 100644
index 0000000..eefd6cc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/gem.c
@@ -0,0 +1,299 @@
+/*
+ *  psb GEM interface
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Alan Cox
+ *
+ * TODO:
+ *	-	we need to work out if the MMU is relevant (eg for
+ *		accelerated operations on a GEM object)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+
+int psb_gem_init_object(struct drm_gem_object *obj)
+{
+	return -EINVAL;
+}
+
+void psb_gem_free_object(struct drm_gem_object *obj)
+{
+	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+
+	/* Remove the list map if one is present */
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+
+	/* This must occur last as it frees up the memory of the GEM object */
+	psb_gtt_free_range(obj->dev, gtt);
+}
+
+int psb_gem_get_aperture(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	return -EINVAL;
+}
+
+/**
+ *	psb_gem_dumb_map_gtt	-	buffer mapping for dumb interface
+ *	@file: our drm client file
+ *	@dev: drm device
+ *	@handle: GEM handle to the object (from dumb_create)
+ *
+ *	Do the necessary setup to allow the mapping of the frame buffer
+ *	into user memory. We don't have to do much here at the moment.
+ */
+int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+			 uint32_t handle, uint64_t *offset)
+{
+	int ret = 0;
+	struct drm_gem_object *obj;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* GEM does all our handle to object mapping */
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+	/* What validation is needed here ? */
+
+	/* Make it mmapable */
+	if (!obj->map_list.map) {
+		ret = drm_gem_create_mmap_offset(obj);
+		if (ret)
+			goto out;
+	}
+	/* GEM should really work out the hash offsets for us */
+	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+out:
+	drm_gem_object_unreference(obj);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ *	psb_gem_create		-	create a mappable object
+ *	@file: the DRM file of the client
+ *	@dev: our device
+ *	@size: the size requested
+ *	@handlep: returned handle (opaque number)
+ *
+ *	Create a GEM object, fill in the boilerplate and attach a handle to
+ *	it so that userspace can speak about it. This does the core work
+ *	for the various methods that do/will create GEM objects for things
+ */
+static int psb_gem_create(struct drm_file *file,
+	struct drm_device *dev, uint64_t size, uint32_t *handlep)
+{
+	struct gtt_range *r;
+	int ret;
+	u32 handle;
+
+	size = roundup(size, PAGE_SIZE);
+
+	/* Allocate our object - for now a direct gtt range which is not
+	   stolen memory backed */
+	r = psb_gtt_alloc_range(dev, size, "gem", 0);
+	if (r == NULL) {
+		dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+		return -ENOSPC;
+	}
+	/* Initialize the extra goodies GEM needs to do all the hard work */
+	if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+		psb_gtt_free_range(dev, r);
+		/* GEM doesn't give an error code so use -ENOMEM */
+		dev_err(dev->dev, "GEM init failed for %lld\n", size);
+		return -ENOMEM;
+	}
+	/* Limit the object to 32bit mappings */
+	mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
+	/* Give the object a handle so we can carry it more easily */
+	ret = drm_gem_handle_create(file, &r->gem, &handle);
+	if (ret) {
+		dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+							&r->gem, size);
+		drm_gem_object_release(&r->gem);
+		psb_gtt_free_range(dev, r);
+		return ret;
+	}
+	/* We have the initial and handle reference but need only one now */
+	drm_gem_object_unreference(&r->gem);
+	*handlep = handle;
+	return 0;
+}
+
+/**
+ *	psb_gem_dumb_create	-	create a dumb buffer
+ *	@drm_file: our client file
+ *	@dev: our device
+ *	@args: the requested arguments copied from userspace
+ *
+ *	Allocate a buffer suitable for use for a frame buffer of the
+ *	form described by user space. Give userspace a handle by which
+ *	to reference it.
+ */
+int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			struct drm_mode_create_dumb *args)
+{
+	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+	args->size = args->pitch * args->height;
+	return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+/**
+ *	psb_gem_dumb_destroy	-	destroy a dumb buffer
+ *	@file: client file
+ *	@dev: our DRM device
+ *	@handle: the object handle
+ *
+ *	Destroy a handle that was created via psb_gem_dumb_create, at least
+ *	we hope it was created that way. i915 seems to assume the caller
+ *	does the checking but that might be worth review ! FIXME
+ */
+int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+			uint32_t handle)
+{
+	/* No special work needed, drop the reference and see what falls out */
+	return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ *	psb_gem_fault		-	pagefault handler for GEM objects
+ *	@vma: the VMA of the GEM object
+ *	@vmf: fault detail
+ *
+ *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ *	does most of the work for us including the actual map/unmap calls
+ *	but we need to do the actual page work.
+ *
+ *	This code eventually needs to handle faulting objects in and out
+ *	of the GTT and repacking it when we run out of space. We can put
+ *	that off for now and for our simple uses
+ *
+ *	The VMA was set up by GEM. In doing so it also ensured that the
+ *	vma->vm_private_data points to the GEM object that is backing this
+ *	mapping.
+ */
+int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj;
+	struct gtt_range *r;
+	int ret;
+	unsigned long pfn;
+	pgoff_t page_offset;
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+
+	obj = vma->vm_private_data;	/* GEM object */
+	dev = obj->dev;
+	dev_priv = dev->dev_private;
+
+	r = container_of(obj, struct gtt_range, gem);	/* Get the gtt range */
+
+	/* Make sure we don't parallel update on a fault, nor move or remove
+	   something from beneath our feet */
+	mutex_lock(&dev->struct_mutex);
+
+	/* For now the mmap pins the object and it stays pinned. As things
+	   stand that will do us no harm */
+	if (r->mmapping == 0) {
+		ret = psb_gtt_pin(r);
+		if (ret < 0) {
+			dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+			goto fail;
+		}
+		r->mmapping = 1;
+	}
+
+	/* Page relative to the VMA start - we must calculate this ourselves
+	   because vmf->pgoff is the fake GEM offset */
+	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+				>> PAGE_SHIFT;
+
+	/* CPU view of the page, don't go via the GART for CPU writes */
+	if (r->stolen)
+		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+	else
+		pfn = page_to_pfn(r->pages[page_offset]);
+	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+fail:
+	mutex_unlock(&dev->struct_mutex);
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+						int size, u32 *handle)
+{
+	struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+	if (gtt == NULL)
+		return -ENOMEM;
+	if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+		goto free_gtt;
+	if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+		return 0;
+free_gtt:
+	psb_gtt_free_range(dev, gtt);
+	return -ENOMEM;
+}
+
+/*
+ *	GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file)
+{
+	struct drm_psb_gem_create *args = data;
+	int ret;
+	if (args->flags & GMA_GEM_CREATE_STOLEN) {
+		ret = psb_gem_create_stolen(file, dev, args->size,
+							&args->handle);
+		if (ret == 0)
+			return 0;
+		/* Fall throguh */
+		args->flags &= ~GMA_GEM_CREATE_STOLEN;
+	}
+	return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file)
+{
+	struct drm_psb_gem_mmap *args = data;
+	return dev->driver->dumb_map_offset(file, dev,
+						args->handle, &args->offset);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/gtt.c b/linux-imx/drivers/gpu/drm/gma500/gtt.c
new file mode 100644
index 0000000..1f82183
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/gtt.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+ *	    Alan Cox <alan@linux.intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <linux/shmem_fs.h>
+#include "psb_drv.h"
+
+
+/*
+ *	GTT resource allocator - manage page mappings in GTT space
+ */
+
+/**
+ *	psb_gtt_mask_pte	-	generate GTT pte entry
+ *	@pfn: page number to encode
+ *	@type: type of memory in the GTT
+ *
+ *	Set the GTT entry for the appropriate memory type.
+ */
+static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+{
+	uint32_t mask = PSB_PTE_VALID;
+
+	/* Ensure we explode rather than put an invalid low mapping of
+	   a high mapping page into the gtt */
+	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
+
+	if (type & PSB_MMU_CACHED_MEMORY)
+		mask |= PSB_PTE_CACHED;
+	if (type & PSB_MMU_RO_MEMORY)
+		mask |= PSB_PTE_RO;
+	if (type & PSB_MMU_WO_MEMORY)
+		mask |= PSB_PTE_WO;
+
+	return (pfn << PAGE_SHIFT) | mask;
+}
+
+/**
+ *	psb_gtt_entry		-	find the GTT entries for a gtt_range
+ *	@dev: our DRM device
+ *	@r: our GTT range
+ *
+ *	Given a gtt_range object return the GTT offset of the page table
+ *	entries for this gtt_range
+ */
+static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long offset;
+
+	offset = r->resource.start - dev_priv->gtt_mem->start;
+
+	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+}
+
+/**
+ *	psb_gtt_insert	-	put an object into the GTT
+ *	@dev: our DRM device
+ *	@r: our GTT range
+ *
+ *	Take our preallocated GTT range and insert the GEM object into
+ *	the GTT. This is protected via the gtt mutex which the caller
+ *	must hold.
+ */
+static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
+			  int resume)
+{
+	u32 __iomem *gtt_slot;
+	u32 pte;
+	struct page **pages;
+	int i;
+
+	if (r->pages == NULL) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	WARN_ON(r->stolen);	/* refcount these maybe ? */
+
+	gtt_slot = psb_gtt_entry(dev, r);
+	pages = r->pages;
+
+	if (!resume) {
+		/* Make sure changes are visible to the GPU */
+		set_pages_array_wc(pages, r->npage);
+	}
+
+	/* Write our page entries into the GTT itself */
+	for (i = r->roll; i < r->npage; i++) {
+		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+		iowrite32(pte, gtt_slot++);
+	}
+	for (i = 0; i < r->roll; i++) {
+		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+		iowrite32(pte, gtt_slot++);
+	}
+	/* Make sure all the entries are set before we return */
+	ioread32(gtt_slot - 1);
+
+	return 0;
+}
+
+/**
+ *	psb_gtt_remove	-	remove an object from the GTT
+ *	@dev: our DRM device
+ *	@r: our GTT range
+ *
+ *	Remove a preallocated GTT range from the GTT. Overwrite all the
+ *	page table entries with the dummy page. This is protected via the gtt
+ *	mutex which the caller must hold.
+ */
+static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 __iomem *gtt_slot;
+	u32 pte;
+	int i;
+
+	WARN_ON(r->stolen);
+
+	gtt_slot = psb_gtt_entry(dev, r);
+	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+
+	for (i = 0; i < r->npage; i++)
+		iowrite32(pte, gtt_slot++);
+	ioread32(gtt_slot - 1);
+	set_pages_array_wb(r->pages, r->npage);
+}
+
+/**
+ *	psb_gtt_roll	-	set scrolling position
+ *	@dev: our DRM device
+ *	@r: the gtt mapping we are using
+ *	@roll: roll offset
+ *
+ *	Roll an existing pinned mapping by moving the pages through the GTT.
+ *	This allows us to implement hardware scrolling on the consoles without
+ *	a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+	u32 __iomem *gtt_slot;
+	u32 pte;
+	int i;
+
+	if (roll >= r->npage) {
+		WARN_ON(1);
+		return;
+	}
+
+	r->roll = roll;
+
+	/* Not currently in the GTT - no worry we will write the mapping at
+	   the right position when it gets pinned */
+	if (!r->stolen && !r->in_gart)
+		return;
+
+	gtt_slot = psb_gtt_entry(dev, r);
+
+	for (i = r->roll; i < r->npage; i++) {
+		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+		iowrite32(pte, gtt_slot++);
+	}
+	for (i = 0; i < r->roll; i++) {
+		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+		iowrite32(pte, gtt_slot++);
+	}
+	ioread32(gtt_slot - 1);
+}
+
+/**
+ *	psb_gtt_attach_pages	-	attach and pin GEM pages
+ *	@gt: the gtt range
+ *
+ *	Pin and build an in kernel list of the pages that back our GEM object.
+ *	While we hold this the pages cannot be swapped out. This is protected
+ *	via the gtt mutex which the caller must hold.
+ */
+static int psb_gtt_attach_pages(struct gtt_range *gt)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	int i;
+	struct page *p;
+	int pages = gt->gem.size / PAGE_SIZE;
+
+	WARN_ON(gt->pages);
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = file_inode(gt->gem.filp);
+	mapping = inode->i_mapping;
+
+	gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+	if (gt->pages == NULL)
+		return -ENOMEM;
+	gt->npage = pages;
+
+	for (i = 0; i < pages; i++) {
+		p = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(p))
+			goto err;
+		gt->pages[i] = p;
+	}
+	return 0;
+
+err:
+	while (i--)
+		page_cache_release(gt->pages[i]);
+	kfree(gt->pages);
+	gt->pages = NULL;
+	return PTR_ERR(p);
+}
+
+/**
+ *	psb_gtt_detach_pages	-	attach and pin GEM pages
+ *	@gt: the gtt range
+ *
+ *	Undo the effect of psb_gtt_attach_pages. At this point the pages
+ *	must have been removed from the GTT as they could now be paged out
+ *	and move bus address. This is protected via the gtt mutex which the
+ *	caller must hold.
+ */
+static void psb_gtt_detach_pages(struct gtt_range *gt)
+{
+	int i;
+	for (i = 0; i < gt->npage; i++) {
+		/* FIXME: do we need to force dirty */
+		set_page_dirty(gt->pages[i]);
+		page_cache_release(gt->pages[i]);
+	}
+	kfree(gt->pages);
+	gt->pages = NULL;
+}
+
+/**
+ *	psb_gtt_pin		-	pin pages into the GTT
+ *	@gt: range to pin
+ *
+ *	Pin a set of pages into the GTT. The pins are refcounted so that
+ *	multiple pins need multiple unpins to undo.
+ *
+ *	Non GEM backed objects treat this as a no-op as they are always GTT
+ *	backed objects.
+ */
+int psb_gtt_pin(struct gtt_range *gt)
+{
+	int ret = 0;
+	struct drm_device *dev = gt->gem.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->gtt_mutex);
+
+	if (gt->in_gart == 0 && gt->stolen == 0) {
+		ret = psb_gtt_attach_pages(gt);
+		if (ret < 0)
+			goto out;
+		ret = psb_gtt_insert(dev, gt, 0);
+		if (ret < 0) {
+			psb_gtt_detach_pages(gt);
+			goto out;
+		}
+	}
+	gt->in_gart++;
+out:
+	mutex_unlock(&dev_priv->gtt_mutex);
+	return ret;
+}
+
+/**
+ *	psb_gtt_unpin		-	Drop a GTT pin requirement
+ *	@gt: range to pin
+ *
+ *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+ *	will be removed from the GTT which will also drop the page references
+ *	and allow the VM to clean up or page stuff.
+ *
+ *	Non GEM backed objects treat this as a no-op as they are always GTT
+ *	backed objects.
+ */
+void psb_gtt_unpin(struct gtt_range *gt)
+{
+	struct drm_device *dev = gt->gem.dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev_priv->gtt_mutex);
+
+	WARN_ON(!gt->in_gart);
+
+	gt->in_gart--;
+	if (gt->in_gart == 0 && gt->stolen == 0) {
+		psb_gtt_remove(dev, gt);
+		psb_gtt_detach_pages(gt);
+	}
+	mutex_unlock(&dev_priv->gtt_mutex);
+}
+
+/*
+ *	GTT resource allocator - allocate and manage GTT address space
+ */
+
+/**
+ *	psb_gtt_alloc_range	-	allocate GTT address space
+ *	@dev: Our DRM device
+ *	@len: length (bytes) of address space required
+ *	@name: resource name
+ *	@backed: resource should be backed by stolen pages
+ *
+ *	Ask the kernel core to find us a suitable range of addresses
+ *	to use for a GTT mapping.
+ *
+ *	Returns a gtt_range structure describing the object, or NULL on
+ *	error. On successful return the resource is both allocated and marked
+ *	as in use.
+ */
+struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+						const char *name, int backed)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct gtt_range *gt;
+	struct resource *r = dev_priv->gtt_mem;
+	int ret;
+	unsigned long start, end;
+
+	if (backed) {
+		/* The start of the GTT is the stolen pages */
+		start = r->start;
+		end = r->start + dev_priv->gtt.stolen_size - 1;
+	} else {
+		/* The rest we will use for GEM backed objects */
+		start = r->start + dev_priv->gtt.stolen_size;
+		end = r->end;
+	}
+
+	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+	if (gt == NULL)
+		return NULL;
+	gt->resource.name = name;
+	gt->stolen = backed;
+	gt->in_gart = backed;
+	gt->roll = 0;
+	/* Ensure this is set for non GEM objects */
+	gt->gem.dev = dev;
+	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+				len, start, end, PAGE_SIZE, NULL, NULL);
+	if (ret == 0) {
+		gt->offset = gt->resource.start - r->start;
+		return gt;
+	}
+	kfree(gt);
+	return NULL;
+}
+
+/**
+ *	psb_gtt_free_range	-	release GTT address space
+ *	@dev: our DRM device
+ *	@gt: a mapping created with psb_gtt_alloc_range
+ *
+ *	Release a resource that was allocated with psb_gtt_alloc_range. If the
+ *	object has been pinned by mmap users we clean this up here currently.
+ */
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+{
+	/* Undo the mmap pin if we are destroying the object */
+	if (gt->mmapping) {
+		psb_gtt_unpin(gt);
+		gt->mmapping = 0;
+	}
+	WARN_ON(gt->in_gart && !gt->stolen);
+	release_resource(&gt->resource);
+	kfree(gt);
+}
+
+static void psb_gtt_alloc(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	init_rwsem(&dev_priv->gtt.sem);
+}
+
+void psb_gtt_takedown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->gtt_map) {
+		iounmap(dev_priv->gtt_map);
+		dev_priv->gtt_map = NULL;
+	}
+	if (dev_priv->gtt_initialized) {
+		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+				      dev_priv->gmch_ctrl);
+		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+		(void) PSB_RVDC32(PSB_PGETBL_CTL);
+	}
+	if (dev_priv->vram_addr)
+		iounmap(dev_priv->gtt_map);
+}
+
+int psb_gtt_init(struct drm_device *dev, int resume)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned gtt_pages;
+	unsigned long stolen_size, vram_stolen_size;
+	unsigned i, num_pages;
+	unsigned pfn_base;
+	struct psb_gtt *pg;
+
+	int ret = 0;
+	uint32_t pte;
+
+	if (!resume) {
+		mutex_init(&dev_priv->gtt_mutex);
+		psb_gtt_alloc(dev);
+	}
+
+	pg = &dev_priv->gtt;
+
+	/* Enable the GTT */
+	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+	(void) PSB_RVDC32(PSB_PGETBL_CTL);
+
+	/* The root resource we allocate address space from */
+	dev_priv->gtt_initialized = 1;
+
+	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+
+	/*
+	 *	The video mmu has a hw bug when accessing 0x0D0000000.
+	 *	Make gatt start at 0x0e000,0000. This doesn't actually
+	 *	matter for us but may do if the video acceleration ever
+	 *	gets opened up.
+	 */
+	pg->mmu_gatt_start = 0xE0000000;
+
+	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+								>> PAGE_SHIFT;
+	/* CDV doesn't report this. In which case the system has 64 gtt pages */
+	if (pg->gtt_start == 0 || gtt_pages == 0) {
+		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
+		gtt_pages = 64;
+		pg->gtt_start = dev_priv->pge_ctl;
+	}
+
+	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+								>> PAGE_SHIFT;
+	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+		static struct resource fudge;	/* Preferably peppermint */
+		/* This can occur on CDV systems. Fudge it in this case.
+		   We really don't care what imaginary space is being allocated
+		   at this point */
+		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
+		pg->gatt_start = 0x40000000;
+		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+		/* This is a little confusing but in fact the GTT is providing
+		   a view from the GPU into memory and not vice versa. As such
+		   this is really allocating space that is not the same as the
+		   CPU address space on CDV */
+		fudge.start = 0x40000000;
+		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+		fudge.name = "fudge";
+		fudge.flags = IORESOURCE_MEM;
+		dev_priv->gtt_mem = &fudge;
+	}
+
+	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+								- PAGE_SIZE;
+
+	stolen_size = vram_stolen_size;
+
+	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+			dev_priv->stolen_base, vram_stolen_size / 1024);
+
+	if (resume && (gtt_pages != pg->gtt_pages) &&
+	    (stolen_size != pg->stolen_size)) {
+		dev_err(dev->dev, "GTT resume error.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	pg->gtt_pages = gtt_pages;
+	pg->stolen_size = stolen_size;
+	dev_priv->vram_stolen_size = vram_stolen_size;
+
+	/*
+	 *	Map the GTT and the stolen memory area
+	 */
+	if (!resume)
+		dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+						gtt_pages << PAGE_SHIFT);
+	if (!dev_priv->gtt_map) {
+		dev_err(dev->dev, "Failure to map gtt.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	if (!resume)
+		dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
+						 stolen_size);
+	if (!dev_priv->vram_addr) {
+		dev_err(dev->dev, "Failure to map stolen base.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	/*
+	 * Insert vram stolen pages into the GTT
+	 */
+
+	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+	num_pages = vram_stolen_size >> PAGE_SHIFT;
+	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+		num_pages, pfn_base << PAGE_SHIFT, 0);
+	for (i = 0; i < num_pages; ++i) {
+		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+		iowrite32(pte, dev_priv->gtt_map + i);
+	}
+
+	/*
+	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
+	 */
+
+	pfn_base = page_to_pfn(dev_priv->scratch_page);
+	pte = psb_gtt_mask_pte(pfn_base, 0);
+	for (; i < gtt_pages; ++i)
+		iowrite32(pte, dev_priv->gtt_map + i);
+
+	(void) ioread32(dev_priv->gtt_map + i - 1);
+	return 0;
+
+out_err:
+	psb_gtt_takedown(dev);
+	return ret;
+}
+
+int psb_gtt_restore(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct resource *r = dev_priv->gtt_mem->child;
+	struct gtt_range *range;
+	unsigned int restored = 0, total = 0, size = 0;
+
+	/* On resume, the gtt_mutex is already initialized */
+	mutex_lock(&dev_priv->gtt_mutex);
+	psb_gtt_init(dev, 1);
+
+	while (r != NULL) {
+		range = container_of(r, struct gtt_range, resource);
+		if (range->pages) {
+			psb_gtt_insert(dev, range, 1);
+			size += range->resource.end - range->resource.start;
+			restored++;
+		}
+		r = r->sibling;
+		total++;
+	}
+	mutex_unlock(&dev_priv->gtt_mutex);
+	DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
+			 total, (size / 1024));
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/gtt.h b/linux-imx/drivers/gpu/drm/gma500/gtt.h
new file mode 100644
index 0000000..6191d10
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/gtt.h
@@ -0,0 +1,64 @@
+/**************************************************************************
+ * Copyright (c) 2007-2008, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_GTT_H_
+#define _PSB_GTT_H_
+
+#include <drm/drmP.h>
+
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+struct psb_gtt {
+	uint32_t gatt_start;
+	uint32_t mmu_gatt_start;
+	uint32_t gtt_start;
+	uint32_t gtt_phys_start;
+	unsigned gtt_pages;
+	unsigned gatt_pages;
+	unsigned long stolen_size;
+	unsigned long vram_stolen_size;
+	struct rw_semaphore sem;
+};
+
+/* Exported functions */
+extern int psb_gtt_init(struct drm_device *dev, int resume);
+extern void psb_gtt_takedown(struct drm_device *dev);
+
+/* Each gtt_range describes an allocation in the GTT area */
+struct gtt_range {
+	struct resource resource;	/* Resource for our allocation */
+	u32 offset;			/* GTT offset of our object */
+	struct drm_gem_object gem;	/* GEM high level stuff */
+	int in_gart;			/* Currently in the GART (ref ct) */
+	bool stolen;			/* Backed from stolen RAM */
+	bool mmapping;			/* Is mmappable */
+	struct page **pages;		/* Backing pages if present */
+	int npage;			/* Number of backing pages */
+	int roll;			/* Roll applied to the GTT entries */
+};
+
+extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+						const char *name, int backed);
+extern void psb_gtt_kref_put(struct gtt_range *gt);
+extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+extern int psb_gtt_pin(struct gtt_range *gt);
+extern void psb_gtt_unpin(struct gtt_range *gt);
+extern void psb_gtt_roll(struct drm_device *dev,
+					struct gtt_range *gt, int roll);
+extern int psb_gtt_restore(struct drm_device *dev);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/intel_bios.c b/linux-imx/drivers/gpu/drm/gma500/intel_bios.c
new file mode 100644
index 0000000..d349734
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/intel_bios.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+#define	SLAVE_ADDR1	0x70
+#define	SLAVE_ADDR2	0x72
+
+static void *find_section(struct bdb_header *bdb, int section_id)
+{
+	u8 *base = (u8 *)bdb;
+	int index = 0;
+	u16 total, current_size;
+	u8 current_id;
+
+	/* skip to first section */
+	index += bdb->header_size;
+	total = bdb->bdb_size;
+
+	/* walk the sections looking for section_id */
+	while (index < total) {
+		current_id = *(base + index);
+		index++;
+		current_size = *((u16 *)(base + index));
+		index += 2;
+		if (current_id == section_id)
+			return base + index;
+		index += current_size;
+	}
+
+	return NULL;
+}
+
+static void
+parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
+	uint8_t	panel_type;
+
+	edp = find_section(bdb, BDB_EDP);
+	
+	dev_priv->edp.bpp = 18;
+	if (!edp) {
+		if (dev_priv->edp.support) {
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
+				      dev_priv->edp.bpp);
+		}
+		return;
+	}
+
+	panel_type = dev_priv->panel_type;
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->edp.bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->edp.bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+				dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, 
+				dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
+				dev_priv->edp.pps.t11_t12);
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
+			dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
+
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		break;
+	}
+	DRM_DEBUG_KMS("VBT reports EDP: VSwing  %d, Preemph %d\n",
+			dev_priv->edp.vswing, dev_priv->edp.preemphasis);
+}
+
+static u16
+get_blocksize(void *p)
+{
+	u16 *block_ptr, block_size;
+
+	block_ptr = (u16 *)((char *)p - 2);
+	block_size = *block_ptr;
+	return block_size;
+}
+
+static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+			struct lvds_dvo_timing *dvo_timing)
+{
+	panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+		dvo_timing->hactive_lo;
+	panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+		((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+	panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+		dvo_timing->hsync_pulse_width;
+	panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+		((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+	panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+		dvo_timing->vactive_lo;
+	panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+		dvo_timing->vsync_off;
+	panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+		dvo_timing->vsync_pulse_width;
+	panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+		((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+	panel_fixed_mode->clock = dvo_timing->clock * 10;
+	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+	if (dvo_timing->hsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dvo_timing->vsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+	/* Some VBTs have bogus h/vtotal values */
+	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+	if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+		panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+	drm_mode_set_name(panel_fixed_mode);
+}
+
+static void parse_backlight_data(struct drm_psb_private *dev_priv,
+				struct bdb_header *bdb)
+{
+	struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+	struct bdb_lvds_backlight *lvds_bl;
+	u8 p_type = 0;
+	void *bl_start = NULL;
+	struct bdb_lvds_options *lvds_opts
+				= find_section(bdb, BDB_LVDS_OPTIONS);
+
+	dev_priv->lvds_bl = NULL;
+
+	if (lvds_opts)
+		p_type = lvds_opts->panel_type;
+	else
+		return;
+
+	bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+	vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+
+	lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
+	if (!lvds_bl) {
+		dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+		return;
+	}
+	dev_priv->lvds_bl = lvds_bl;
+}
+
+/* Try to find integrated panel data */
+static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+			    struct bdb_header *bdb)
+{
+	struct bdb_lvds_options *lvds_options;
+	struct bdb_lvds_lfp_data *lvds_lfp_data;
+	struct bdb_lvds_lfp_data_entry *entry;
+	struct lvds_dvo_timing *dvo_timing;
+	struct drm_display_mode *panel_fixed_mode;
+
+	/* Defaults if we can't find VBT info */
+	dev_priv->lvds_dither = 0;
+	dev_priv->lvds_vbt = 0;
+
+	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+	if (!lvds_options)
+		return;
+
+	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	dev_priv->panel_type = lvds_options->panel_type;
+
+	if (lvds_options->panel_type == 0xff)
+		return;
+
+	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+	if (!lvds_lfp_data)
+		return;
+
+
+	entry = &lvds_lfp_data->data[lvds_options->panel_type];
+	dvo_timing = &entry->dvo_timing;
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+				      GFP_KERNEL);
+	if (panel_fixed_mode == NULL) {
+		dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+		return;
+	}
+
+	dev_priv->lvds_vbt = 1;
+	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+
+	if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+		dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+		drm_mode_debug_printmodeline(panel_fixed_mode);
+	} else {
+		dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+		dev_priv->lvds_vbt = 0;
+		kfree(panel_fixed_mode);
+	}
+	return;
+}
+
+/* Try to find sdvo panel data */
+static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+		      struct bdb_header *bdb)
+{
+	struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+	struct lvds_dvo_timing *dvo_timing;
+	struct drm_display_mode *panel_fixed_mode;
+
+	dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+	if (!sdvo_lvds_options)
+		return;
+
+	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+	if (!dvo_timing)
+		return;
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+
+	if (!panel_fixed_mode)
+		return;
+
+	fill_detail_timing_data(panel_fixed_mode,
+			dvo_timing + sdvo_lvds_options->panel_type);
+
+	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+	return;
+}
+
+static void parse_general_features(struct drm_psb_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct bdb_general_features *general;
+
+	/* Set sensible defaults in case we can't find the general block */
+	dev_priv->int_tv_support = 1;
+	dev_priv->int_crt_support = 1;
+
+	general = find_section(bdb, BDB_GENERAL_FEATURES);
+	if (general) {
+		dev_priv->int_tv_support = general->int_tv_support;
+		dev_priv->int_crt_support = general->int_crt_support;
+		dev_priv->lvds_use_ssc = general->enable_ssc;
+
+		if (dev_priv->lvds_use_ssc) {
+			dev_priv->lvds_ssc_freq
+				= general->ssc_freq ? 100 : 96;
+		}
+	}
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_psb_private *dev_priv,
+			  struct bdb_header *bdb)
+{
+	struct sdvo_device_mapping *p_mapping;
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		if (p_child->slave_addr != SLAVE_ADDR1 &&
+			p_child->slave_addr != SLAVE_ADDR2) {
+			/*
+			 * If the slave address is neither 0x70 nor 0x72,
+			 * it is not a SDVO device. Skip it.
+			 */
+			continue;
+		}
+		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+			p_child->dvo_port != DEVICE_PORT_DVOC) {
+			/* skip the incorrect SDVO port */
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+			continue;
+		}
+		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+				" %s port\n",
+				p_child->slave_addr,
+				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
+					"SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+		if (!p_mapping->initialized) {
+			p_mapping->dvo_port = p_child->dvo_port;
+			p_mapping->slave_addr = p_child->slave_addr;
+			p_mapping->dvo_wiring = p_child->dvo_wiring;
+			p_mapping->ddc_pin = p_child->ddc_pin;
+			p_mapping->i2c_pin = p_child->i2c_pin;
+			p_mapping->initialized = 1;
+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+				      p_mapping->dvo_port,
+				      p_mapping->slave_addr,
+				      p_mapping->dvo_wiring,
+				      p_mapping->ddc_pin,
+				      p_mapping->i2c_pin);
+		} else {
+			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+					 "two SDVO device.\n");
+		}
+		if (p_child->slave2_addr) {
+			/* Maybe this is a SDVO device with multiple inputs */
+			/* And the mapping info is not added */
+			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+				" is a SDVO device with multiple inputs.\n");
+		}
+		count++;
+	}
+
+	if (!count) {
+		/* No SDVO device info is found */
+		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+	}
+	return;
+}
+
+
+static void
+parse_driver_features(struct drm_psb_private *dev_priv,
+		      struct bdb_header *bdb)
+{
+	struct bdb_driver_features *driver;
+
+	driver = find_section(bdb, BDB_DRIVER_FEATURES);
+	if (!driver)
+		return;
+
+	if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
+
+	/* This bit means to use 96Mhz for DPLL_A or not */
+	if (driver->primary_lfp_id)
+		dev_priv->dplla_96mhz = true;
+	else
+		dev_priv->dplla_96mhz = false;
+}
+
+static void
+parse_device_mapping(struct drm_psb_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child, *child_dev_ptr;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	/* get the number of child devices that are present */
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		count++;
+	}
+	if (!count) {
+		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+		return;
+	}
+	dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+	if (!dev_priv->child_dev) {
+		DRM_DEBUG_KMS("No memory space for child devices\n");
+		return;
+	}
+
+	dev_priv->child_dev_num = count;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		child_dev_ptr = dev_priv->child_dev + count;
+		count++;
+		memcpy((void *)child_dev_ptr, (void *)p_child,
+					sizeof(*p_child));
+	}
+	return;
+}
+
+
+/**
+ * psb_intel_init_bios - initialize VBIOS settings & find VBT
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+ * feed an updated VBT back through that, compared to what we'll fetch using
+ * this method of groping around in the BIOS data.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int psb_intel_init_bios(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	struct vbt_header *vbt = NULL;
+	struct bdb_header *bdb = NULL;
+	u8 __iomem *bios = NULL;
+	size_t size;
+	int i;
+
+
+	dev_priv->panel_type = 0xff;
+
+	/* XXX Should this validation be moved to intel_opregion.c? */
+	if (dev_priv->opregion.vbt) {
+		struct vbt_header *vbt = dev_priv->opregion.vbt;
+		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+			DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+					 vbt->signature);
+			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+		} else
+			dev_priv->opregion.vbt = NULL;
+	}
+
+	if (bdb == NULL) {
+		bios = pci_map_rom(pdev, &size);
+		if (!bios)
+			return -1;
+
+		/* Scour memory looking for the VBT signature */
+		for (i = 0; i + 4 < size; i++) {
+			if (!memcmp(bios + i, "$VBT", 4)) {
+				vbt = (struct vbt_header *)(bios + i);
+				break;
+			}
+		}
+
+		if (!vbt) {
+			dev_err(dev->dev, "VBT signature missing\n");
+			pci_unmap_rom(pdev, bios);
+			return -1;
+		}
+		bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+	}
+
+	/* Grab useful general dxefinitions */
+	parse_general_features(dev_priv, bdb);
+	parse_driver_features(dev_priv, bdb);
+	parse_lfp_panel_data(dev_priv, bdb);
+	parse_sdvo_panel_data(dev_priv, bdb);
+	parse_sdvo_device_mapping(dev_priv, bdb);
+	parse_device_mapping(dev_priv, bdb);
+	parse_backlight_data(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
+
+	if (bios)
+		pci_unmap_rom(pdev, bios);
+
+	return 0;
+}
+
+/**
+ * Destroy and free VBT data
+ */
+void psb_intel_destroy_bios(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	kfree(dev_priv->sdvo_lvds_vbt_mode);
+	kfree(dev_priv->lfp_lvds_vbt_mode);
+	kfree(dev_priv->lvds_bl);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/intel_bios.h b/linux-imx/drivers/gpu/drm/gma500/intel_bios.h
new file mode 100644
index 0000000..978ae4b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/intel_bios.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright (c) 2006 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
+
+struct vbt_header {
+	u8 signature[20];		/**< Always starts with 'VBT$' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 vbt_size;			/**< in bytes */
+	u8 vbt_checksum;
+	u8 reserved0;
+	u32 bdb_offset;			/**< from beginning of VBT */
+	u32 aim_offset[4];		/**< from beginning of VBT */
+} __attribute__((packed));
+
+
+struct bdb_header {
+	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 bdb_size;			/**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+	u8 type; /* 0 == desktop, 1 == mobile */
+	u8 relstage;
+	u8 chipset;
+	u8 lvds_present:1;
+	u8 tv_present:1;
+	u8 rsvd2:6; /* finish byte */
+	u8 rsvd3[4];
+	u8 signon[155];
+	u8 copyright[61];
+	u16 code_segment;
+	u8 dos_boot_mode;
+	u8 bandwidth_percent;
+	u8 rsvd4; /* popup memory size */
+	u8 resize_pci_bios;
+	u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES	  1
+#define BDB_GENERAL_DEFINITIONS	  2
+#define BDB_OLD_TOGGLE_LIST	  3
+#define BDB_MODE_SUPPORT_LIST	  4
+#define BDB_GENERIC_MODE_TABLE	  5
+#define BDB_EXT_MMIO_REGS	  6
+#define BDB_SWF_IO		  7
+#define BDB_SWF_MMIO		  8
+#define BDB_DOT_CLOCK_TABLE	  9
+#define BDB_MODE_REMOVAL_TABLE	 10
+#define BDB_CHILD_DEVICE_TABLE	 11
+#define BDB_DRIVER_FEATURES	 12
+#define BDB_DRIVER_PERSISTENCE	 13
+#define BDB_EXT_TABLE_PTRS	 14
+#define BDB_DOT_CLOCK_OVERRIDE	 15
+#define BDB_DISPLAY_SELECT	 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION	 18
+#define BDB_DISPLAY_REMOVE	 19
+#define BDB_OEM_CUSTOM		 20
+#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS	 22
+#define BDB_SDVO_PANEL_DTDS	 23
+#define BDB_SDVO_LVDS_PNP_IDS	 24
+#define BDB_SDVO_LVDS_POWER_SEQ	 25
+#define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
+#define BDB_LVDS_OPTIONS	 40
+#define BDB_LVDS_LFP_DATA_PTRS	 41
+#define BDB_LVDS_LFP_DATA	 42
+#define BDB_LVDS_BACKLIGHT	 43
+#define BDB_LVDS_POWER		 44
+#define BDB_SKIP		254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+	/* bits 1 */
+	u8 panel_fitting:2;
+	u8 flexaim:1;
+	u8 msg_enable:1;
+	u8 clear_screen:3;
+	u8 color_flip:1;
+
+	/* bits 2 */
+	u8 download_ext_vbt:1;
+	u8 enable_ssc:1;
+	u8 ssc_freq:1;
+	u8 enable_lfp_on_override:1;
+	u8 disable_ssc_ddt:1;
+	u8 rsvd8:3; /* finish byte */
+
+	/* bits 3 */
+	u8 disable_smooth_vision:1;
+	u8 single_dvi:1;
+	u8 rsvd9:6; /* finish byte */
+
+	/* bits 4 */
+	u8 legacy_monitor_detect;
+
+	/* bits 5 */
+	u8 int_crt_support:1;
+	u8 int_tv_support:1;
+	u8 int_efp_support:1;
+	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
+	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
+	u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C	0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC	0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C	0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE	0x00
+#define DEVICE_TYPE_CRT		0x01
+#define DEVICE_TYPE_TV		0x09
+#define DEVICE_TYPE_EFP		0x12
+#define DEVICE_TYPE_LFP		0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS		0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG	0x4001
+#define DEVICE_TYPE_TV_COMPOSITE	0x0209
+#define DEVICE_TYPE_TV_MACROVISION	0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE	0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE	0x0609
+#define DEVICE_TYPE_TV_SCART		0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR	0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR	0x6052
+#define DEVICE_TYPE_EFP_DVI_I		0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL	0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP	0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR	0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX	0x6162
+#define DEVICE_TYPE_LFP_PANELLINK	0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR	0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR	0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL	0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP	0x51e2
+
+#define DEVICE_CFG_NONE		0x00
+#define DEVICE_CFG_12BIT_DVOB	0x01
+#define DEVICE_CFG_12BIT_DVOC	0x02
+#define DEVICE_CFG_24BIT_DVOBC	0x09
+#define DEVICE_CFG_24BIT_DVOCB	0x0a
+#define DEVICE_CFG_DUAL_DVOB	0x11
+#define DEVICE_CFG_DUAL_DVOC	0x12
+#define DEVICE_CFG_DUAL_DVOBC	0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC	0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB	0x1a
+
+#define DEVICE_WIRE_NONE	0x00
+#define DEVICE_WIRE_DVOB	0x01
+#define DEVICE_WIRE_DVOC	0x02
+#define DEVICE_WIRE_DVOBC	0x03
+#define DEVICE_WIRE_DVOBB	0x05
+#define DEVICE_WIRE_DVOCC	0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA	0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB	0x01
+#define DEVICE_PORT_DVOC	0x02
+
+struct child_device_config {
+	u16 handle;
+	u16 device_type;
+	u8  device_id[10]; /* ascii string */
+	u16 addin_offset;
+	u8  dvo_port; /* See Device_PORT_* above */
+	u8  i2c_pin;
+	u8  slave_addr;
+	u8  ddc_pin;
+	u16 edid_ptr;
+	u8  dvo_cfg; /* See DEVICE_CFG_* above */
+	u8  dvo2_port;
+	u8  i2c2_pin;
+	u8  slave2_addr;
+	u8  ddc2_pin;
+	u8  capabilities;
+	u8  dvo_wiring;/* See DEVICE_WIRE_* above */
+	u8  dvo2_wiring;
+	u16 extended_type;
+	u8  dvo_function;
+} __attribute__((packed));
+
+
+struct bdb_general_definitions {
+	/* DDC GPIO */
+	u8 crt_ddc_gmbus_pin;
+
+	/* DPMS bits */
+	u8 dpms_acpi:1;
+	u8 skip_boot_crt_detect:1;
+	u8 dpms_aim:1;
+	u8 rsvd1:5; /* finish byte */
+
+	/* boot device bits */
+	u8 boot_display[2];
+	u8 child_dev_size;
+
+	/*
+	 * Device info:
+	 * If TV is present, it'll be at devices[0].
+	 * LVDS will be next, either devices[0] or [1], if present.
+	 * On some platforms the number of device is 6. But could be as few as
+	 * 4 if both TV and LVDS are missing.
+	 * And the device num is related with the size of general definition
+	 * block. It is obtained by using the following formula:
+	 * number = (block_size - sizeof(bdb_general_definitions))/
+	 *	     sizeof(child_device_config);
+	 */
+	struct child_device_config devices[0];
+};
+
+struct bdb_lvds_options {
+	u8 panel_type;
+	u8 rsvd1;
+	/* LVDS capabilities, stored in a dword */
+	u8 pfit_mode:2;
+	u8 pfit_text_mode_enhanced:1;
+	u8 pfit_gfx_mode_enhanced:1;
+	u8 pfit_ratio_auto:1;
+	u8 pixel_dither:1;
+	u8 lvds_edid:1;
+	u8 rsvd2:1;
+	u8 rsvd4;
+} __attribute__((packed));
+
+struct bdb_lvds_backlight {
+	u8 type:2;
+	u8 pol:1;
+	u8 gpio:3;
+	u8 gmbus:2;
+	u16 freq;
+	u8 minbrightness;
+	u8 i2caddr;
+	u8 brightnesscmd;
+	/*FIXME: more...*/
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+	u16 fp_timing_offset; /* offsets are from start of bdb */
+	u8 fp_table_size;
+	u16 dvo_timing_offset;
+	u8 dvo_table_size;
+	u16 panel_pnp_id_offset;
+	u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+	struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+	u16 x_res;
+	u16 y_res;
+	u32 lvds_reg;
+	u32 lvds_reg_val;
+	u32 pp_on_reg;
+	u32 pp_on_reg_val;
+	u32 pp_off_reg;
+	u32 pp_off_reg_val;
+	u32 pp_cycle_reg;
+	u32 pp_cycle_reg_val;
+	u32 pfit_reg;
+	u32 pfit_reg_val;
+	u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+	u16 clock;		/**< In 10khz */
+	u8 hactive_lo;
+	u8 hblank_lo;
+	u8 hblank_hi:4;
+	u8 hactive_hi:4;
+	u8 vactive_lo;
+	u8 vblank_lo;
+	u8 vblank_hi:4;
+	u8 vactive_hi:4;
+	u8 hsync_off_lo;
+	u8 hsync_pulse_width;
+	u8 vsync_pulse_width:4;
+	u8 vsync_off:4;
+	u8 rsvd0:6;
+	u8 hsync_off_hi:2;
+	u8 h_image;
+	u8 v_image;
+	u8 max_hv;
+	u8 h_border;
+	u8 v_border;
+	u8 rsvd1:3;
+	u8 digital:2;
+	u8 vsync_positive:1;
+	u8 hsync_positive:1;
+	u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+	u16 mfg_name;
+	u16 product_code;
+	u32 serial;
+	u8 mfg_week;
+	u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+	struct lvds_fp_timing fp_timing;
+	struct lvds_dvo_timing dvo_timing;
+	struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+	struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+	char signature[16];
+	char oem_device[20];
+	u16 aimdb_version;
+	u16 aimdb_header_size;
+	u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+	u8 aimdb_id;
+	u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+	u16 fp_timing_offset;
+	u8 fp_timing_size;
+	u16 dvo_timing_offset;
+	u8 dvo_timing_size;
+	u16 text_fitting_offset;
+	u8 text_fitting_size;
+	u16 graphics_fitting_offset;
+	u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+	struct aimdb_block aimdb_block;
+	struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+	u8 panel_backlight;
+	u8 h40_set_panel_type;
+	u8 panel_type;
+	u8 ssc_clk_freq;
+	u16 als_low_trip;
+	u16 als_high_trip;
+	u8 sclalarcoeff_tab_row_num;
+	u8 sclalarcoeff_tab_row_size;
+	u8 coefficient[8];
+	u8 panel_misc_bits_1;
+	u8 panel_misc_bits_2;
+	u8 panel_misc_bits_3;
+	u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
+struct bdb_driver_features {
+	u8 boot_dev_algorithm:1;
+	u8 block_display_switch:1;
+	u8 allow_display_switch:1;
+	u8 hotplug_dvo:1;
+	u8 dual_view_zoom:1;
+	u8 int15h_hook:1;
+	u8 sprite_in_clone:1;
+	u8 primary_lfp_id:1;
+
+	u16 boot_mode_x;
+	u16 boot_mode_y;
+	u8 boot_mode_bpp;
+	u8 boot_mode_refresh;
+
+	u16 enable_lfp_primary:1;
+	u16 selective_mode_pruning:1;
+	u16 dual_frequency:1;
+	u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+	u16 nt_clone_support:1;
+	u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+	u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+	u16 cui_aspect_scaling:1;
+	u16 preserve_aspect_ratio:1;
+	u16 sdvo_device_power_down:1;
+	u16 crt_hotplug:1;
+	u16 lvds_config:2;
+	u16 tv_hotplug:1;
+	u16 hdmi_config:2;
+
+	u8 static_display:1;
+	u8 reserved2:7;
+	u16 legacy_crt_max_x;
+	u16 legacy_crt_max_y;
+	u8 legacy_crt_max_refresh;
+
+	u8 hdmi_termination;
+	u8 custom_vbt_version;
+} __attribute__((packed));
+
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	u32 sdrrs_msa_timing_delay;
+	struct edp_link_params link_params[16];
+} __attribute__ ((packed));
+
+extern int psb_intel_init_bios(struct drm_device *dev);
+extern void psb_intel_destroy_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE		(0x0<<3)
+#define   GR18_HK_LFP_STRETCH	(0x1<<3)
+#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
+#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
+#define   GR18_HK_PFIT		(0x8<<3)
+#define   GR18_HK_APM_CHANGE	(0xa<<3)
+#define   GR18_HK_MULTIPLE	(0xc<<3)
+#define GR18_USER_INT_EN	(1<<2)
+#define GR18_A0000_FLUSH_EN	(1<<1)
+#define GR18_SMM_EN		(1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT	16
+#define SWF00_XRES_SHIFT	0
+#define SWF00_RES_MASK		0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT	8
+#define SWF01_TV1_FORMAT_SHIFT	0
+#define SWF01_TV_FORMAT_MASK	0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
+#define SWF10_GTT_OVERRIDE_EN	(1<<28)
+#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE	0x0
+#define   SWF10_TOGGLE_LIST_1	0x1
+#define   SWF10_TOGGLE_LIST_2	0x2
+#define   SWF10_TOGGLE_LIST_3	0x3
+#define   SWF10_TOGGLE_LIST_4	0x4
+#define SWF10_PANNING_EN	(1<<23)
+#define SWF10_DRIVER_LOADED	(1<<22)
+#define SWF10_EXTENDED_DESKTOP	(1<<21)
+#define SWF10_EXCLUSIVE_MODE	(1<<20)
+#define SWF10_OVERLAY_EN	(1<<19)
+#define SWF10_PLANEB_HOLDOFF	(1<<18)
+#define SWF10_PLANEA_HOLDOFF	(1<<17)
+#define SWF10_VGA_HOLDOFF	(1<<16)
+#define SWF10_ACTIVE_DISP_MASK	0xffff
+#define   SWF10_PIPEB_LFP2	(1<<15)
+#define   SWF10_PIPEB_EFP2	(1<<14)
+#define   SWF10_PIPEB_TV2	(1<<13)
+#define   SWF10_PIPEB_CRT2	(1<<12)
+#define   SWF10_PIPEB_LFP	(1<<11)
+#define   SWF10_PIPEB_EFP	(1<<10)
+#define   SWF10_PIPEB_TV	(1<<9)
+#define   SWF10_PIPEB_CRT	(1<<8)
+#define   SWF10_PIPEA_LFP2	(1<<7)
+#define   SWF10_PIPEA_EFP2	(1<<6)
+#define   SWF10_PIPEA_TV2	(1<<5)
+#define   SWF10_PIPEA_CRT2	(1<<4)
+#define   SWF10_PIPEA_LFP	(1<<3)
+#define   SWF10_PIPEA_EFP	(1<<2)
+#define   SWF10_PIPEA_TV	(1<<1)
+#define   SWF10_PIPEA_CRT	(1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT	16
+#define SWF11_SV_TEST_EN	(1<<15)
+#define SWF11_IS_AGP		(1<<14)
+#define SWF11_DISPLAY_HOLDOFF	(1<<13)
+#define SWF11_DPMS_REDUCED	(1<<12)
+#define SWF11_IS_VBE_MODE	(1<<11)
+#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK		0x07
+#define   SWF11_DPMS_OFF	(1<<2)
+#define   SWF11_DPMS_SUSPEND	(1<<1)
+#define   SWF11_DPMS_STANDBY	(1<<0)
+#define   SWF11_DPMS_ON		0
+
+#define SWF14_GFX_PFIT_EN	(1<<31)
+#define SWF14_TEXT_PFIT_EN	(1<<30)
+#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN		(1<<28)
+#define SWF14_DISPLAY_HOLDOFF	(1<<27)
+#define SWF14_DISP_DETECT_EN	(1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS	(1<<24)
+#define SWF14_OS_TYPE_WIN9X	(1<<23)
+#define SWF14_OS_TYPE_WINNT	(1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK	0x00070000
+#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
+#define   SWF14_PM_ACPI		(0x3 << 16)
+#define   SWF14_PM_APM_12	(0x2 << 16)
+#define   SWF14_PM_APM_11	(0x1 << 16)
+#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
+	  /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN	 (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN	 (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+	  /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
+	  /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE	0x4
+#define   SWF14_APM_SUSPEND	0x3
+#define   SWF14_APM_STANDBY	0x1
+#define   SWF14_APM_RESTORE	0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* define the PORT for DP output type */
+#define		PORT_IDPB	7
+#define		PORT_IDPC	8
+#define		PORT_IDPD	9
+
+#endif /* _INTEL_BIOS_H_ */
diff --git a/linux-imx/drivers/gpu/drm/gma500/intel_gmbus.c b/linux-imx/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 0000000..62cd42e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+	int ret__ = 0;							\
+	while (! (COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			ret__ = -ETIMEDOUT;				\
+			break;						\
+		}							\
+		if (W && !(in_atomic() || in_dbg_master())) msleep(W);	\
+	}								\
+	ret__;								\
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+	return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+	struct i2c_adapter adapter;
+	struct i2c_algo_bit_data algo;
+	struct drm_psb_private *dev_priv;
+	u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+	REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+	/* When using bit bashing for I2C, this bit needs to be set to 1 */
+	/* FIXME: We are never Pineview, right?
+
+	u32 val;
+
+	if (!IS_PINEVIEW(dev_priv->dev))
+		return;
+
+	val = REG_READ(DSPCLK_GATE_D);
+	if (enable)
+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
+	else
+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+	REG_WRITE(DSPCLK_GATE_D, val);
+
+	return;
+	*/
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+	struct drm_psb_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = 0;
+
+	/* On most chips, these bits must be preserved in software. */
+	reserved = REG_READ(gpio->reg) &
+				     (GPIO_DATA_PULLUP_DISABLE |
+				      GPIO_CLOCK_PULLUP_DISABLE);
+
+	return reserved;
+}
+
+static int get_clock(void *data)
+{
+	struct intel_gpio *gpio = data;
+	struct drm_psb_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = get_reserved(gpio);
+	REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	REG_WRITE(gpio->reg, reserved);
+	return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+	struct intel_gpio *gpio = data;
+	struct drm_psb_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = get_reserved(gpio);
+	REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	REG_WRITE(gpio->reg, reserved);
+	return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+	struct intel_gpio *gpio = data;
+	struct drm_psb_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = get_reserved(gpio);
+	u32 clock_bits;
+
+	if (state_high)
+		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+	else
+		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+			GPIO_CLOCK_VAL_MASK;
+
+	REG_WRITE(gpio->reg, reserved | clock_bits);
+	REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+	struct intel_gpio *gpio = data;
+	struct drm_psb_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = get_reserved(gpio);
+	u32 data_bits;
+
+	if (state_high)
+		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+	else
+		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+			GPIO_DATA_VAL_MASK;
+
+	REG_WRITE(gpio->reg, reserved | data_bits);
+	REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+	static const int map_pin_to_reg[] = {
+		0,
+		GPIOB,
+		GPIOA,
+		GPIOC,
+		GPIOD,
+		GPIOE,
+		0,
+		GPIOF,
+	};
+	struct intel_gpio *gpio;
+
+	if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+		return NULL;
+
+	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+	if (gpio == NULL)
+		return NULL;
+
+	gpio->reg = map_pin_to_reg[pin];
+	gpio->dev_priv = dev_priv;
+
+	snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+		 "gma500 GPIO%c", "?BACDE?F"[pin]);
+	gpio->adapter.owner = THIS_MODULE;
+	gpio->adapter.algo_data	= &gpio->algo;
+	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+	gpio->algo.setsda = set_data;
+	gpio->algo.setscl = set_clock;
+	gpio->algo.getsda = get_data;
+	gpio->algo.getscl = get_clock;
+	gpio->algo.udelay = I2C_RISEFALL_TIME;
+	gpio->algo.timeout = usecs_to_jiffies(2200);
+	gpio->algo.data = gpio;
+
+	if (i2c_bit_add_bus(&gpio->adapter))
+		goto out_free;
+
+	return &gpio->adapter;
+
+out_free:
+	kfree(gpio);
+	return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+		     struct i2c_adapter *adapter,
+		     struct i2c_msg *msgs,
+		     int num)
+{
+	struct intel_gpio *gpio = container_of(adapter,
+					       struct intel_gpio,
+					       adapter);
+	int ret;
+
+	gma_intel_i2c_reset(dev_priv->dev);
+
+	intel_i2c_quirk_set(dev_priv, true);
+	set_data(gpio, 1);
+	set_clock(gpio, 1);
+	udelay(I2C_RISEFALL_TIME);
+
+	ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+	set_data(gpio, 1);
+	set_clock(gpio, 1);
+	intel_i2c_quirk_set(dev_priv, false);
+
+	return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+	   struct i2c_msg *msgs,
+	   int num)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_psb_private *dev_priv = adapter->algo_data;
+	struct drm_device *dev = dev_priv->dev;
+	int i, reg_offset;
+
+	if (bus->force_bit)
+		return intel_i2c_quirk_xfer(dev_priv,
+					    bus->force_bit, msgs, num);
+
+	reg_offset = 0;
+
+	REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+	for (i = 0; i < num; i++) {
+		u16 len = msgs[i].len;
+		u8 *buf = msgs[i].buf;
+
+		if (msgs[i].flags & I2C_M_RD) {
+			REG_WRITE(GMBUS1 + reg_offset,
+				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+				   (len << GMBUS_BYTE_COUNT_SHIFT) |
+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+			REG_READ(GMBUS2+reg_offset);
+			do {
+				u32 val, loop = 0;
+
+				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+					goto timeout;
+				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+					goto clear_err;
+
+				val = REG_READ(GMBUS3 + reg_offset);
+				do {
+					*buf++ = val & 0xff;
+					val >>= 8;
+				} while (--len && ++loop < 4);
+			} while (len);
+		} else {
+			u32 val, loop;
+
+			val = loop = 0;
+			do {
+				val |= *buf++ << (8 * loop);
+			} while (--len && ++loop < 4);
+
+			REG_WRITE(GMBUS3 + reg_offset, val);
+			REG_WRITE(GMBUS1 + reg_offset,
+				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+			REG_READ(GMBUS2+reg_offset);
+
+			while (len) {
+				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+					goto timeout;
+				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+					goto clear_err;
+
+				val = loop = 0;
+				do {
+					val |= *buf++ << (8 * loop);
+				} while (--len && ++loop < 4);
+
+				REG_WRITE(GMBUS3 + reg_offset, val);
+				REG_READ(GMBUS2+reg_offset);
+			}
+		}
+
+		if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+			goto timeout;
+		if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+			goto clear_err;
+	}
+
+	goto done;
+
+clear_err:
+	/* Toggle the Software Clear Interrupt bit. This has the effect
+	 * of resetting the GMBUS controller and so clearing the
+	 * BUS_ERROR raised by the slave's NAK.
+	 */
+	REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+	REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+	/* Mark the GMBUS interface as disabled. We will re-enable it at the
+	 * start of the next xfer, till then let it sleep.
+	 */
+	REG_WRITE(GMBUS0 + reg_offset, 0);
+	return i;
+
+timeout:
+	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+		 bus->reg0 & 0xff, bus->adapter.name);
+	REG_WRITE(GMBUS0 + reg_offset, 0);
+
+	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+	if (!bus->force_bit)
+		return -ENOMEM;
+
+	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+
+	if (bus->force_bit)
+		bus->force_bit->algo->functionality(bus->force_bit);
+
+	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+		/* I2C_FUNC_10BIT_ADDR | */
+		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+	.master_xfer	= gmbus_xfer,
+	.functionality	= gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+	static const char *names[GMBUS_NUM_PORTS] = {
+		"disabled",
+		"ssc",
+		"vga",
+		"panel",
+		"dpc",
+		"dpb",
+		"reserved",
+		"dpd",
+	};
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
+				  GFP_KERNEL);
+	if (dev_priv->gmbus == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+		bus->adapter.owner = THIS_MODULE;
+		bus->adapter.class = I2C_CLASS_DDC;
+		snprintf(bus->adapter.name,
+			 sizeof(bus->adapter.name),
+			 "gma500 gmbus %s",
+			 names[i]);
+
+		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->adapter.algo_data	= dev_priv;
+
+		bus->adapter.algo = &gmbus_algorithm;
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
+
+		/* By default use a conservative clock rate */
+		bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+		/* XXX force bit banging until GMBUS is fully debugged */
+		bus->force_bit = intel_gpio_create(dev_priv, i);
+	}
+
+	gma_intel_i2c_reset(dev_priv->dev);
+
+	return 0;
+
+err:
+	while (--i) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		i2c_del_adapter(&bus->adapter);
+	}
+	kfree(dev_priv->gmbus);
+	dev_priv->gmbus = NULL;
+	return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	/* speed:
+	 * 0x0 = 100 KHz
+	 * 0x1 = 50 KHz
+	 * 0x2 = 400 KHz
+	 * 0x3 = 1000 Khz
+	 */
+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	if (force_bit) {
+		if (bus->force_bit == NULL) {
+			struct drm_psb_private *dev_priv = adapter->algo_data;
+			bus->force_bit = intel_gpio_create(dev_priv,
+							   bus->reg0 & 0xff);
+		}
+	} else {
+		if (bus->force_bit) {
+			i2c_del_adapter(bus->force_bit);
+			kfree(bus->force_bit);
+			bus->force_bit = NULL;
+		}
+	}
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (dev_priv->gmbus == NULL)
+		return;
+
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		if (bus->force_bit) {
+			i2c_del_adapter(bus->force_bit);
+			kfree(bus->force_bit);
+		}
+		i2c_del_adapter(&bus->adapter);
+	}
+
+	kfree(dev_priv->gmbus);
+	dev_priv->gmbus = NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/intel_i2c.c b/linux-imx/drivers/gpu/drm/gma500/intel_i2c.c
new file mode 100644
index 0000000..98a28c2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/intel_i2c.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+/*
+ * Intel GPIO access functions
+ */
+
+#define I2C_RISEFALL_TIME 20
+
+static int get_clock(void *data)
+{
+	struct psb_intel_i2c_chan *chan = data;
+	struct drm_device *dev = chan->drm_dev;
+	u32 val;
+
+	val = REG_READ(chan->reg);
+	return (val & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+	struct psb_intel_i2c_chan *chan = data;
+	struct drm_device *dev = chan->drm_dev;
+	u32 val;
+
+	val = REG_READ(chan->reg);
+	return (val & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+	struct psb_intel_i2c_chan *chan = data;
+	struct drm_device *dev = chan->drm_dev;
+	u32 reserved = 0, clock_bits;
+
+	/* On most chips, these bits must be preserved in software. */
+	reserved =
+		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+					   GPIO_CLOCK_PULLUP_DISABLE);
+
+	if (state_high)
+		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+	else
+		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+		    GPIO_CLOCK_VAL_MASK;
+	REG_WRITE(chan->reg, reserved | clock_bits);
+	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
+}
+
+static void set_data(void *data, int state_high)
+{
+	struct psb_intel_i2c_chan *chan = data;
+	struct drm_device *dev = chan->drm_dev;
+	u32 reserved = 0, data_bits;
+
+	/* On most chips, these bits must be preserved in software. */
+	reserved =
+		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+					   GPIO_CLOCK_PULLUP_DISABLE);
+
+	if (state_high)
+		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+	else
+		data_bits =
+		    GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+		    GPIO_DATA_VAL_MASK;
+
+	REG_WRITE(chan->reg, reserved | data_bits);
+	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
+}
+
+/**
+ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * @dev: DRM device
+ * @output: driver specific output device
+ * @reg: GPIO reg to use
+ * @name: name for this bus
+ *
+ * Creates and registers a new i2c bus with the Linux i2c layer, for use
+ * in output probing and control (e.g. DDC or SDVO control functions).
+ *
+ * Possible values for @reg include:
+ *   %GPIOA
+ *   %GPIOB
+ *   %GPIOC
+ *   %GPIOD
+ *   %GPIOE
+ *   %GPIOF
+ *   %GPIOG
+ *   %GPIOH
+ * see PRM for details on how these different busses are used.
+ */
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+					const u32 reg, const char *name)
+{
+	struct psb_intel_i2c_chan *chan;
+
+	chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+	if (!chan)
+		goto out_free;
+
+	chan->drm_dev = dev;
+	chan->reg = reg;
+	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+	chan->adapter.owner = THIS_MODULE;
+	chan->adapter.algo_data = &chan->algo;
+	chan->adapter.dev.parent = &dev->pdev->dev;
+	chan->algo.setsda = set_data;
+	chan->algo.setscl = set_clock;
+	chan->algo.getsda = get_data;
+	chan->algo.getscl = get_clock;
+	chan->algo.udelay = 20;
+	chan->algo.timeout = usecs_to_jiffies(2200);
+	chan->algo.data = chan;
+
+	i2c_set_adapdata(&chan->adapter, chan);
+
+	if (i2c_bit_add_bus(&chan->adapter))
+		goto out_free;
+
+	/* JJJ:  raise SCL and SDA? */
+	set_data(chan, 1);
+	set_clock(chan, 1);
+	udelay(20);
+
+	return chan;
+
+out_free:
+	kfree(chan);
+	return NULL;
+}
+
+/**
+ * psb_intel_i2c_destroy - unregister and free i2c bus resources
+ * @output: channel to free
+ *
+ * Unregister the adapter from the i2c layer, then free the structure.
+ */
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+{
+	if (!chan)
+		return;
+
+	i2c_del_adapter(&chan->adapter);
+	kfree(chan);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_device.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_device.c
new file mode 100644
index 0000000..265ad0d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_device.c
@@ -0,0 +1,551 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include "psb_drv.h"
+#include "mid_bios.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+#include "tc35876x-dsi-lvds.h"
+
+#include <asm/intel_scu_ipc.h>
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BRIGHTNESS_MIN_LEVEL 1
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK	0xFF
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+#define BLC_ADJUSTMENT_MAX 100
+
+#define MDFLD_BLC_PWM_PRECISION_FACTOR    10
+#define MDFLD_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define MDFLD_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT	(16)
+
+static struct backlight_device *mdfld_backlight_device;
+
+int mdfld_set_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev =
+		(struct drm_device *)bl_get_data(mdfld_backlight_device);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int level = bd->props.brightness;
+
+	DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
+
+	/* Perform value bounds checking */
+	if (level < BRIGHTNESS_MIN_LEVEL)
+		level = BRIGHTNESS_MIN_LEVEL;
+
+	if (gma_power_begin(dev, false)) {
+		u32 adjusted_level = 0;
+
+		/*
+		 * Adjust the backlight level with the percent in
+		 * dev_priv->blc_adj2
+		 */
+		adjusted_level = level * dev_priv->blc_adj2;
+		adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
+		dev_priv->brightness_adjusted = adjusted_level;
+
+		if (mdfld_get_panel_type(dev, 0) == TC35876X) {
+			if (dev_priv->dpi_panel_on[0] ||
+					dev_priv->dpi_panel_on[2])
+				tc35876x_brightness_control(dev,
+						dev_priv->brightness_adjusted);
+		} else {
+			if (dev_priv->dpi_panel_on[0])
+				mdfld_dsi_brightness_control(dev, 0,
+						dev_priv->brightness_adjusted);
+		}
+
+		if (dev_priv->dpi_panel_on[2])
+			mdfld_dsi_brightness_control(dev, 2,
+					dev_priv->brightness_adjusted);
+		gma_power_end(dev);
+	}
+
+	/* cache the brightness for later use */
+	dev_priv->brightness = level;
+	return 0;
+}
+
+static int mdfld_get_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev =
+		(struct drm_device *)bl_get_data(mdfld_backlight_device);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
+
+	/* return locally cached var instead of HW read (due to DPST etc.) */
+	return dev_priv->brightness;
+}
+
+static const struct backlight_ops mdfld_ops = {
+	.get_brightness = mdfld_get_brightness,
+	.update_status  = mdfld_set_brightness,
+};
+
+static int device_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = (struct drm_psb_private *)
+		dev->dev_private;
+
+	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+
+	return 0;
+}
+
+static int mdfld_backlight_init(struct drm_device *dev)
+{
+	struct backlight_properties props;
+	int ret = 0;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+	props.type = BACKLIGHT_PLATFORM;
+	mdfld_backlight_device = backlight_device_register("mdfld-bl",
+				NULL, (void *)dev, &mdfld_ops, &props);
+
+	if (IS_ERR(mdfld_backlight_device))
+		return PTR_ERR(mdfld_backlight_device);
+
+	ret = device_backlight_init(dev);
+	if (ret)
+		return ret;
+
+	mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
+	mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
+	backlight_update_status(mdfld_backlight_device);
+	return 0;
+}
+#endif
+
+struct backlight_device *mdfld_get_backlight_device(void)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	return mdfld_backlight_device;
+#else
+	return NULL;
+#endif
+}
+
+/*
+ * mdfld_save_display_registers
+ *
+ * Description: We are going to suspend so save current display
+ * register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct medfield_state *regs = &dev_priv->regs.mdfld;
+	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+	const struct psb_offset *map = &dev_priv->regmap[pipenum];
+	int i;
+	u32 *mipi_val;
+
+	/* register */
+	u32 mipi_reg = MIPI;
+
+	switch (pipenum) {
+	case 0:
+		mipi_val = &regs->saveMIPI;
+		break;
+	case 1:
+		mipi_val = &regs->saveMIPI;
+		break;
+	case 2:
+		/* register */
+		mipi_reg = MIPI_C;
+		/* pointer to values */
+		mipi_val = &regs->saveMIPI_C;
+		break;
+	default:
+		DRM_ERROR("%s, invalid pipe number.\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Pipe & plane A info */
+	pipe->dpll = PSB_RVDC32(map->dpll);
+	pipe->fp0 = PSB_RVDC32(map->fp0);
+	pipe->conf = PSB_RVDC32(map->conf);
+	pipe->htotal = PSB_RVDC32(map->htotal);
+	pipe->hblank = PSB_RVDC32(map->hblank);
+	pipe->hsync = PSB_RVDC32(map->hsync);
+	pipe->vtotal = PSB_RVDC32(map->vtotal);
+	pipe->vblank = PSB_RVDC32(map->vblank);
+	pipe->vsync = PSB_RVDC32(map->vsync);
+	pipe->src = PSB_RVDC32(map->src);
+	pipe->stride = PSB_RVDC32(map->stride);
+	pipe->linoff = PSB_RVDC32(map->linoff);
+	pipe->tileoff = PSB_RVDC32(map->tileoff);
+	pipe->size = PSB_RVDC32(map->size);
+	pipe->pos = PSB_RVDC32(map->pos);
+	pipe->surf = PSB_RVDC32(map->surf);
+	pipe->cntr = PSB_RVDC32(map->cntr);
+	pipe->status = PSB_RVDC32(map->status);
+
+	/*save palette (gamma) */
+	for (i = 0; i < 256; i++)
+		pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
+
+	if (pipenum == 1) {
+		regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+		regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+
+		regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
+		regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
+		return 0;
+	}
+
+	*mipi_val = PSB_RVDC32(mipi_reg);
+	return 0;
+}
+
+/*
+ * mdfld_restore_display_registers
+ *
+ * Description: We are going to resume so restore display register state.
+ *
+ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
+ */
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
+{
+	/* To get  panel out of ULPS mode. */
+	u32 temp = 0;
+	u32 device_ready_reg = DEVICE_READY_REG;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct mdfld_dsi_config *dsi_config = NULL;
+	struct medfield_state *regs = &dev_priv->regs.mdfld;
+	struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+	const struct psb_offset *map = &dev_priv->regmap[pipenum];
+	u32 i;
+	u32 dpll;
+	u32 timeout = 0;
+
+	/* register */
+	u32 mipi_reg = MIPI;
+
+	/* values */
+	u32 dpll_val = pipe->dpll;
+	u32 mipi_val = regs->saveMIPI;
+
+	switch (pipenum) {
+	case 0:
+		dpll_val &= ~DPLL_VCO_ENABLE;
+		dsi_config = dev_priv->dsi_configs[0];
+		break;
+	case 1:
+		dpll_val &= ~DPLL_VCO_ENABLE;
+		break;
+	case 2:
+		mipi_reg = MIPI_C;
+		mipi_val = regs->saveMIPI_C;
+		dsi_config = dev_priv->dsi_configs[1];
+		break;
+	default:
+		DRM_ERROR("%s, invalid pipe number.\n", __func__);
+		return -EINVAL;
+	}
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	PSB_WVDC32(0x80000000, VGACNTRL);
+
+	if (pipenum == 1) {
+		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+		PSB_RVDC32(map->dpll);
+
+		PSB_WVDC32(pipe->fp0, map->fp0);
+	} else {
+
+		dpll = PSB_RVDC32(map->dpll);
+
+		if (!(dpll & DPLL_VCO_ENABLE)) {
+
+			/* When ungating power of DPLL, needs to wait 0.5us
+			   before enable the VCO */
+			if (dpll & MDFLD_PWR_GATE_EN) {
+				dpll &= ~MDFLD_PWR_GATE_EN;
+				PSB_WVDC32(dpll, map->dpll);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(500);
+			}
+
+			PSB_WVDC32(pipe->fp0, map->fp0);
+			PSB_WVDC32(dpll_val, map->dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			dpll_val |= DPLL_VCO_ENABLE;
+			PSB_WVDC32(dpll_val, map->dpll);
+			PSB_RVDC32(map->dpll);
+
+			/* wait for DSI PLL to lock */
+			while (timeout < 20000 &&
+			  !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+				udelay(150);
+				timeout++;
+			}
+
+			if (timeout == 20000) {
+				DRM_ERROR("%s, can't lock DSIPLL.\n",
+								__func__);
+				return -EINVAL;
+			}
+		}
+	}
+	/* Restore mode */
+	PSB_WVDC32(pipe->htotal, map->htotal);
+	PSB_WVDC32(pipe->hblank, map->hblank);
+	PSB_WVDC32(pipe->hsync, map->hsync);
+	PSB_WVDC32(pipe->vtotal, map->vtotal);
+	PSB_WVDC32(pipe->vblank, map->vblank);
+	PSB_WVDC32(pipe->vsync, map->vsync);
+	PSB_WVDC32(pipe->src, map->src);
+	PSB_WVDC32(pipe->status, map->status);
+
+	/*set up the plane*/
+	PSB_WVDC32(pipe->stride, map->stride);
+	PSB_WVDC32(pipe->linoff, map->linoff);
+	PSB_WVDC32(pipe->tileoff, map->tileoff);
+	PSB_WVDC32(pipe->size, map->size);
+	PSB_WVDC32(pipe->pos, map->pos);
+	PSB_WVDC32(pipe->surf, map->surf);
+
+	if (pipenum == 1) {
+		/* restore palette (gamma) */
+		/*DRM_UDELAY(50000); */
+		for (i = 0; i < 256; i++)
+			PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
+
+		PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
+		PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+
+		/*TODO: resume HDMI port */
+
+		/*TODO: resume pipe*/
+
+		/*enable the plane*/
+		PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
+
+		return 0;
+	}
+
+	/*set up pipe related registers*/
+	PSB_WVDC32(mipi_val, mipi_reg);
+
+	/*setup MIPI adapter + MIPI IP registers*/
+	if (dsi_config)
+		mdfld_dsi_controller_init(dsi_config, pipenum);
+
+	if (in_atomic() || in_interrupt())
+		mdelay(20);
+	else
+		msleep(20);
+
+	/*enable the plane*/
+	PSB_WVDC32(pipe->cntr, map->cntr);
+
+	if (in_atomic() || in_interrupt())
+		mdelay(20);
+	else
+		msleep(20);
+
+	/* LP Hold Release */
+	temp = REG_READ(mipi_reg);
+	temp |= LP_OUTPUT_HOLD_RELEASE;
+	REG_WRITE(mipi_reg, temp);
+	mdelay(1);
+
+
+	/* Set DSI host to exit from Utra Low Power State */
+	temp = REG_READ(device_ready_reg);
+	temp &= ~ULPS_MASK;
+	temp |= 0x3;
+	temp |= EXIT_ULPS_DEV_READY;
+	REG_WRITE(device_ready_reg, temp);
+	mdelay(1);
+
+	temp = REG_READ(device_ready_reg);
+	temp &= ~ULPS_MASK;
+	temp |= EXITING_ULPS;
+	REG_WRITE(device_ready_reg, temp);
+	mdelay(1);
+
+	/*enable the pipe*/
+	PSB_WVDC32(pipe->conf, map->conf);
+
+	/* restore palette (gamma) */
+	/*DRM_UDELAY(50000); */
+	for (i = 0; i < 256; i++)
+		PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
+
+	return 0;
+}
+
+static int mdfld_save_registers(struct drm_device *dev)
+{
+	/* mdfld_save_cursor_overlay_registers(dev); */
+	mdfld_save_display_registers(dev, 0);
+	mdfld_save_display_registers(dev, 2);
+	mdfld_disable_crtc(dev, 0);
+	mdfld_disable_crtc(dev, 2);
+
+	return 0;
+}
+
+static int mdfld_restore_registers(struct drm_device *dev)
+{
+	mdfld_restore_display_registers(dev, 2);
+	mdfld_restore_display_registers(dev, 0);
+	/* mdfld_restore_cursor_overlay_registers(dev); */
+
+	return 0;
+}
+
+static int mdfld_power_down(struct drm_device *dev)
+{
+	/* FIXME */
+	return 0;
+}
+
+static int mdfld_power_up(struct drm_device *dev)
+{
+	/* FIXME */
+	return 0;
+}
+
+/* Medfield  */
+static const struct psb_offset mdfld_regmap[3] = {
+	{
+		.fp0 = MRST_FPA0,
+		.fp1 = MRST_FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.surf = DSPASURF,
+		.addr = MRST_DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = MDFLD_DPLL_DIV0,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = MDFLD_DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.surf = DSPBSURF,
+		.addr = MRST_DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	},
+	{
+		.fp0 = MRST_FPA0,	/* This is what the old code did ?? */
+		.cntr = DSPCCNTR,
+		.conf = PIPECCONF,
+		.src = PIPECSRC,
+		/* No DPLL_C */
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_C,
+		.hblank = HBLANK_C,
+		.hsync = HSYNC_C,
+		.vtotal = VTOTAL_C,
+		.vblank = VBLANK_C,
+		.vsync = VSYNC_C,
+		.stride = DSPCSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPCPOS,
+		.surf = DSPCSURF,
+		.addr = MDFLD_DSPCBASE,
+		.status = PIPECSTAT,
+		.linoff = DSPCLINOFF,
+		.tileoff = DSPCTILEOFF,
+		.palette = PALETTE_C,
+	},
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+	dev_priv->regmap = mdfld_regmap;
+	return mid_chip_setup(dev);
+}
+
+const struct psb_ops mdfld_chip_ops = {
+	.name = "mdfld",
+	.accel_2d = 0,
+	.pipes = 3,
+	.crtcs = 3,
+	.lvds_mask = (1 << 1),
+	.hdmi_mask = (1 << 1),
+	.cursor_needs_phys = 0,
+	.sgx_offset = MRST_SGX_OFFSET,
+
+	.chip_setup = mdfld_chip_setup,
+	.crtc_helper = &mdfld_helper_funcs,
+	.crtc_funcs = &psb_intel_crtc_funcs,
+
+	.output_init = mdfld_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	.backlight_init = mdfld_backlight_init,
+#endif
+
+	.save_regs = mdfld_save_registers,
+	.restore_regs = mdfld_restore_registers,
+	.power_down = mdfld_power_down,
+	.power_up = mdfld_power_up,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
new file mode 100644
index 0000000..d4813e0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "psb_drv.h"
+#include "tc35876x-dsi-lvds.h"
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+								int pipe);
+
+static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
+{
+	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+	int timeout = 0;
+
+	udelay(500);
+
+	/* This will time out after approximately 2+ seconds */
+	while ((timeout < 20000) &&
+		(REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
+		udelay(100);
+		timeout++;
+	}
+
+	if (timeout == 20000)
+		DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+	int timeout = 0;
+
+	udelay(500);
+
+	/* This will time out after approximately 2+ seconds */
+	while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
+					& DSI_FIFO_GEN_HS_CTRL_FULL)) {
+		udelay(100);
+		timeout++;
+	}
+	if (timeout == 20000)
+		DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+	int timeout = 0;
+
+	udelay(500);
+
+	/* This will time out after approximately 2+ seconds */
+	while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
+					DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
+		udelay(100);
+		timeout++;
+	}
+
+	if (timeout == 20000)
+		DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
+}
+
+static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
+{
+	u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+	int timeout = 0;
+
+	udelay(500);
+
+	/* This will time out after approximately 2+ seconds */
+	while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
+					& DSI_INTR_STATE_SPL_PKG_SENT))) {
+		udelay(100);
+		timeout++;
+	}
+
+	if (timeout == 20000)
+                DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
+}
+
+/* For TC35876X */
+
+static void dsi_set_device_ready_state(struct drm_device *dev, int state,
+				int pipe)
+{
+	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
+}
+
+static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
+							int state, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+
+	u32 dspcntr = dev_priv->dspcntr[pipe];
+	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+	if (pipe) {
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+	} else
+		mipi &= (~0x03);
+
+	if (state) {
+		/*Set up pipe */
+		REG_WRITE(pipeconf_reg, BIT(31));
+
+		if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
+			dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
+				__func__);
+
+		/*Set up display plane */
+		REG_WRITE(dspcntr_reg, dspcntr);
+	} else {
+		u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
+
+		/* Put DSI lanes to ULPS to disable pipe */
+		REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
+		REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
+
+		/* LP Hold */
+		REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
+		REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
+
+		/* Disable display plane */
+		REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
+
+		/* Flush the plane changes ??? posted write? */
+		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+		REG_READ(dspbase_reg);
+
+		/* Disable PIPE */
+		REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
+
+		if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
+			dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
+				__func__);
+
+		if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
+			dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
+				__func__);
+	}
+}
+
+static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
+								int pipe)
+{
+	struct mdfld_dsi_dpi_output *dpi_output =
+				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->dpi_panel_on[pipe]) {
+		dev_err(dev->dev, "DPI panel is already off\n");
+		return;
+	}
+	tc35876x_toshiba_bridge_panel_off(dev);
+	tc35876x_set_bridge_reset_state(dev, 1);
+	dsi_set_pipe_plane_enable_state(dev, 0, pipe);
+	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+	dsi_set_device_ready_state(dev, 0, pipe);
+}
+
+static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
+								int pipe)
+{
+	struct mdfld_dsi_dpi_output *dpi_output =
+				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->dpi_panel_on[pipe]) {
+		dev_err(dev->dev, "DPI panel is already on\n");
+		return;
+	}
+
+	/* For resume path sequence */
+	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+	dsi_set_device_ready_state(dev, 0, pipe);
+
+	dsi_set_device_ready_state(dev, 1, pipe);
+	tc35876x_set_bridge_reset_state(dev, 0);
+	tc35876x_configure_lvds_bridge(dev);
+	mdfld_dsi_dpi_turn_on(dpi_output, pipe);  /* Send turn on command */
+	dsi_set_pipe_plane_enable_state(dev, 1, pipe);
+}
+/* End for TC35876X */
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_dsi_tpo_ic_init
+ *
+ * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
+ *               restore_display_registers.  since this function does not
+ *               acquire the mutex, it is important that the calling function
+ *               does!
+\* ************************************************************************* */
+static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	u32 dcsChannelNumber = dsi_config->channel_num;
+	u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+	u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+	u32 gen_ctrl_val = GEN_LONG_WRITE;
+
+	DRM_INFO("Enter mrst init TPO MIPI display.\n");
+
+	gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
+
+	/* Flip page order */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00008036);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+	/* 0xF0 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x005a5af0);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+	/* Write protection key */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x005a5af1);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+	/* 0xFC */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x005a5afc);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+	/* 0xB7 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x770000b7);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000044);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
+
+	/* 0xB6 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x000a0ab6);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+	/* 0xF2 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x081010f2);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x4a070708);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x000000c5);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+	/* 0xF8 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x024003f8);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x01030a04);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x0e020220);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000004);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
+
+	/* 0xE2 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x398fc3e2);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x0000916f);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
+
+	/* 0xB0 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x000000b0);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+	/* 0xF4 */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x240242f4);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x78ee2002);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x2a071050);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x507fee10);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x10300710);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
+
+	/* 0xBA */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x19fe07ba);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x101c0a31);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000010);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+	/* 0xBB */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x28ff07bb);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x24280a31);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000034);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+	/* 0xFB */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x535d05fb);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1b1a2130);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x221e180e);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x131d2120);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x535d0508);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1c1a2131);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x231f160d);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x111b2220);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x535c2008);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1f1d2433);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x2c251a10);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x2c34372d);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000023);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+	/* 0xFA */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x525c0bfa);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1c1c232f);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x2623190e);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x18212625);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x545d0d0e);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1e1d2333);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x26231a10);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x1a222725);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x545d280f);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x21202635);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x31292013);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x31393d33);
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x00000029);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+	/* Set DM */
+	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+	REG_WRITE(gen_data_reg, 0x000100f7);
+	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+}
+
+static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+						int num_lane, int bpp)
+{
+	return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
+}
+
+/*
+ * Calculate the dpi time basing on a given drm mode @mode
+ * return 0 on success.
+ * FIXME: I was using proposed mode value for calculation, may need to
+ * use crtc mode values later
+ */
+int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+				struct mdfld_dsi_dpi_timing *dpi_timing,
+				int num_lane, int bpp)
+{
+	int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+	int pclk_vsync, pclk_vfp, pclk_vbp;
+
+	pclk_hactive = mode->hdisplay;
+	pclk_hfp = mode->hsync_start - mode->hdisplay;
+	pclk_hsync = mode->hsync_end - mode->hsync_start;
+	pclk_hbp = mode->htotal - mode->hsync_end;
+
+	pclk_vfp = mode->vsync_start - mode->vdisplay;
+	pclk_vsync = mode->vsync_end - mode->vsync_start;
+	pclk_vbp = mode->vtotal - mode->vsync_end;
+
+	/*
+	 * byte clock counts were calculated by following formula
+	 * bclock_count = pclk_count * bpp / num_lane / 8
+	 */
+	dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_hsync, num_lane, bpp);
+	dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_hbp, num_lane, bpp);
+	dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_hfp, num_lane, bpp);
+	dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_hactive, num_lane, bpp);
+	dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_vsync, num_lane, bpp);
+	dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_vbp, num_lane, bpp);
+	dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
+						pclk_vfp, num_lane, bpp);
+
+	return 0;
+}
+
+void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+								int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	int lane_count = dsi_config->lane_count;
+	struct mdfld_dsi_dpi_timing dpi_timing;
+	struct drm_display_mode *mode = dsi_config->mode;
+	u32 val;
+
+	/*un-ready device*/
+	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
+
+	/*init dsi adapter before kicking off*/
+	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+
+	/*enable all interrupts*/
+	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+
+	/*set up func_prg*/
+	val = lane_count;
+	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+
+	switch (dsi_config->bpp) {
+	case 16:
+		val |= DSI_DPI_COLOR_FORMAT_RGB565;
+		break;
+	case 18:
+		val |= DSI_DPI_COLOR_FORMAT_RGB666;
+		break;
+	case 24:
+		val |= DSI_DPI_COLOR_FORMAT_RGB888;
+		break;
+	default:
+		DRM_ERROR("unsupported color format, bpp = %d\n",
+							dsi_config->bpp);
+	}
+	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
+
+	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
+			(mode->vtotal * mode->htotal * dsi_config->bpp /
+				(8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
+				0xffff & DSI_LP_RX_TIMEOUT_MASK);
+
+	/*max value: 20 clock cycles of txclkesc*/
+	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
+				0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+
+	/*min 21 txclkesc, max: ffffh*/
+	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
+				0xffff & DSI_RESET_TIMER_MASK);
+
+	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+				mode->vdisplay << 16 | mode->hdisplay);
+
+	/*set DPI timing registers*/
+	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+				dsi_config->lane_count, dsi_config->bpp);
+
+	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+			dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+			dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+			dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+			dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+			dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+			dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+			dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+
+	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
+
+	/*min: 7d0 max: 4e20*/
+	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
+
+	/*set up video mode*/
+	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
+
+	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+
+	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+
+	/*TODO: figure out how to setup these registers*/
+	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+	else
+		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
+
+	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+
+	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
+
+	/*set device ready*/
+	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
+}
+
+void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
+{
+	struct drm_device *dev = output->dev;
+
+	/* clear special packet sent bit */
+	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+					DSI_INTR_STATE_SPL_PKG_SENT);
+
+	/*send turn on package*/
+	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
+
+	/*wait for SPL_PKG_SENT interrupt*/
+	mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
+
+	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+					DSI_INTR_STATE_SPL_PKG_SENT);
+
+	output->panel_on = 1;
+
+	/* FIXME the following is disabled to WA the X slow start issue
+	   for TMD panel
+	if (pipe == 2)
+		dev_priv->dpi_panel_on2 = true;
+	else if (pipe == 0)
+		dev_priv->dpi_panel_on = true; */
+}
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
+								int pipe)
+{
+	struct drm_device *dev = output->dev;
+
+	/*if output is on, or mode setting didn't happen, ignore this*/
+	if ((!output->panel_on) || output->first_boot) {
+		output->first_boot = 0;
+		return;
+	}
+
+	/* Wait for dpi fifo to empty */
+	mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
+
+	/* Clear the special packet interrupt bit if set */
+	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
+		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
+					DSI_INTR_STATE_SPL_PKG_SENT);
+
+	if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
+		goto shutdown_out;
+
+	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
+
+shutdown_out:
+	output->panel_on = 0;
+	output->first_boot = 0;
+
+	/* FIXME the following is disabled to WA the X slow start issue
+	   for TMD panel
+	if (pipe == 2)
+		dev_priv->dpi_panel_on2 = false;
+	else if (pipe == 0)
+		dev_priv->dpi_panel_on = false;	 */
+}
+
+static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+	struct mdfld_dsi_dpi_output *dpi_output =
+				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_encoder_get_config(dsi_encoder);
+	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/*start up display island if it was shutdown*/
+	if (!gma_power_begin(dev, true))
+		return;
+
+	if (on) {
+		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+			mdfld_dsi_configure_up(dsi_encoder, pipe);
+		else {
+			/*enable mipi port*/
+			REG_WRITE(MIPI_PORT_CONTROL(pipe),
+				REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
+			REG_READ(MIPI_PORT_CONTROL(pipe));
+
+			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+			mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+		}
+		dev_priv->dpi_panel_on[pipe] = true;
+	} else {
+		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+			mdfld_dsi_configure_down(dsi_encoder, pipe);
+		else {
+			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+
+			/*disable mipi port*/
+			REG_WRITE(MIPI_PORT_CONTROL(pipe),
+				REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
+			REG_READ(MIPI_PORT_CONTROL(pipe));
+		}
+		dev_priv->dpi_panel_on[pipe] = false;
+	}
+	gma_power_end(dev);
+}
+
+void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+{
+	mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+	if (fixed_mode) {
+		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+		adjusted_mode->htotal = fixed_mode->htotal;
+		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+		adjusted_mode->vtotal = fixed_mode->vtotal;
+		adjusted_mode->clock = fixed_mode->clock;
+		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+	}
+	return true;
+}
+
+void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
+{
+	mdfld_dsi_dpi_set_power(encoder, false);
+}
+
+void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
+{
+	mdfld_dsi_dpi_set_power(encoder, true);
+}
+
+/* For TC35876X */
+/* This functionality was implemented in FW in iCDK */
+/* But removed in DV0 and later. So need to add here. */
+static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+
+	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
+	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
+	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
+	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
+	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
+	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
+	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
+	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
+	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
+	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
+	REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
+	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
+}
+
+static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
+					int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	struct mdfld_dsi_dpi_timing dpi_timing;
+	struct drm_display_mode *mode = dsi_config->mode;
+
+	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
+					dsi_config->lane_count,
+					dsi_config->bpp);
+
+	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
+		mode->vdisplay << 16 | mode->hdisplay);
+	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
+		dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
+		dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
+		dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
+		dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
+		dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
+		dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
+		dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+}
+
+static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	int lane_count = dsi_config->lane_count;
+
+	if (pipe) {
+		REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
+		REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
+	} else {
+		REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
+		REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
+	}
+
+	REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
+	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
+
+	/* lane_count = 3 */
+	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
+
+	mdfld_mipi_set_video_timing(dsi_config, pipe);
+}
+
+static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_display_mode *mode = dsi_config->mode;
+
+	REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(HSYNC_A,
+		((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
+
+	REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+	REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
+	REG_WRITE(VSYNC_A,
+		((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
+
+	REG_WRITE(PIPEASRC,
+		((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+/* End for TC35876X */
+
+void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
+	struct mdfld_dsi_dpi_output *dpi_output =
+					MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_encoder_get_config(dsi_encoder);
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+	u32 pipeconf_reg = PIPEACONF;
+	u32 dspcntr_reg = DSPACNTR;
+
+	u32 pipeconf = dev_priv->pipeconf[pipe];
+	u32 dspcntr = dev_priv->dspcntr[pipe];
+	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+	if (pipe) {
+		pipeconf_reg = PIPECCONF;
+		dspcntr_reg = DSPCCNTR;
+	} else {
+		if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+			mipi &= (~0x03); /* Use all four lanes */
+		else
+			mipi |= 2;
+	}
+
+	/*start up display island if it was shutdown*/
+	if (!gma_power_begin(dev, true))
+		return;
+
+	if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+		/*
+		 * The following logic is required to reset the bridge and
+		 * configure. This also starts the DSI clock at 200MHz.
+		 */
+		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
+		tc35876x_toshiba_bridge_panel_on(dev);
+		udelay(100);
+		/* Now start the DSI clock */
+		REG_WRITE(MRST_DPLL_A, 0x00);
+		REG_WRITE(MRST_FPA0, 0xC1);
+		REG_WRITE(MRST_DPLL_A, 0x00800000);
+		udelay(500);
+		REG_WRITE(MRST_DPLL_A, 0x80800000);
+
+		if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
+			dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
+				__func__);
+
+		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
+
+		mipi_set_properties(dsi_config, pipe);
+		mdfld_mipi_config(dsi_config, pipe);
+		mdfld_set_pipe_timing(dsi_config, pipe);
+
+		REG_WRITE(DSPABASE, 0x00);
+		REG_WRITE(DSPASIZE,
+			((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+
+		REG_WRITE(DSPACNTR, 0x98000000);
+		REG_WRITE(DSPASURF, 0x00);
+
+		REG_WRITE(VGACNTRL, 0x80000000);
+		REG_WRITE(DEVICE_READY_REG, 0x00000001);
+
+		REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
+	} else {
+		/*set up mipi port FIXME: do at init time */
+		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
+	}
+	REG_READ(MIPI_PORT_CONTROL(pipe));
+
+	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+		/* NOP */
+	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+		/* set up DSI controller DPI interface */
+		mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+
+		/* Configure MIPI Bridge and Panel */
+		tc35876x_configure_lvds_bridge(dev);
+		dev_priv->dpi_panel_on[pipe] = true;
+	} else {
+		/*turn on DPI interface*/
+		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+	}
+
+	/*set up pipe*/
+	REG_WRITE(pipeconf_reg, pipeconf);
+	REG_READ(pipeconf_reg);
+
+	/*set up display plane*/
+	REG_WRITE(dspcntr_reg, dspcntr);
+	REG_READ(dspcntr_reg);
+
+	msleep(20); /* FIXME: this should wait for vblank */
+
+	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+		/* NOP */
+	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
+		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+	} else {
+		/* init driver ic */
+		mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+		/*init backlight*/
+		mdfld_dsi_brightness_init(dsi_config, pipe);
+	}
+
+	gma_power_end(dev);
+}
+
+/*
+ * Init DSI DPI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DPI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+				struct mdfld_dsi_connector *dsi_connector,
+				const struct panel_funcs *p_funcs)
+{
+	struct mdfld_dsi_dpi_output *dpi_output = NULL;
+	struct mdfld_dsi_config *dsi_config;
+	struct drm_connector *connector = NULL;
+	struct drm_encoder *encoder = NULL;
+	int pipe;
+	u32 data;
+	int ret;
+
+	pipe = dsi_connector->pipe;
+
+	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+		dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+		/* panel hard-reset */
+		if (p_funcs->reset) {
+			ret = p_funcs->reset(pipe);
+			if (ret) {
+				DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+				return NULL;
+			}
+		}
+
+		/* panel drvIC init */
+		if (p_funcs->drv_ic_init)
+			p_funcs->drv_ic_init(dsi_config, pipe);
+
+		/* panel power mode detect */
+		ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
+		if (ret) {
+			DRM_ERROR("Panel %d get power mode failed\n", pipe);
+			dsi_connector->status = connector_status_disconnected;
+		} else {
+			DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+			dsi_connector->status = connector_status_connected;
+		}
+	}
+
+	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+	if (!dpi_output) {
+		DRM_ERROR("No memory\n");
+		return NULL;
+	}
+
+	if (dsi_connector->pipe)
+		dpi_output->panel_on = 0;
+	else
+		dpi_output->panel_on = 0;
+
+	dpi_output->dev = dev;
+	if (mdfld_get_panel_type(dev, pipe) != TC35876X)
+		dpi_output->p_funcs = p_funcs;
+	dpi_output->first_boot = 1;
+
+	/*get fixed mode*/
+	dsi_config = mdfld_dsi_get_config(dsi_connector);
+
+	/*create drm encoder object*/
+	connector = &dsi_connector->base.base;
+	encoder = &dpi_output->base.base.base;
+	drm_encoder_init(dev,
+			encoder,
+			p_funcs->encoder_funcs,
+			DRM_MODE_ENCODER_LVDS);
+	drm_encoder_helper_add(encoder,
+				p_funcs->encoder_helper_funcs);
+
+	/*attach to given connector*/
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	/*set possible crtcs and clones*/
+	if (dsi_connector->pipe) {
+		encoder->possible_crtcs = (1 << 2);
+		encoder->possible_clones = (1 << 1);
+	} else {
+		encoder->possible_crtcs = (1 << 0);
+		encoder->possible_clones = (1 << 0);
+	}
+
+	dsi_connector->base.encoder = &dpi_output->base.base;
+
+	return &dpi_output->base;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
new file mode 100644
index 0000000..2b40663
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DPI_H__
+#define __MDFLD_DSI_DPI_H__
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+struct mdfld_dsi_dpi_timing {
+	u16 hsync_count;
+	u16 hbp_count;
+	u16 hfp_count;
+	u16 hactive_count;
+	u16 vsync_count;
+	u16 vbp_count;
+	u16 vfp_count;
+};
+
+struct mdfld_dsi_dpi_output {
+	struct mdfld_dsi_encoder base;
+	struct drm_device *dev;
+
+	int panel_on;
+	int first_boot;
+
+	const struct panel_funcs *p_funcs;
+};
+
+#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
+	container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+
+/* Export functions */
+extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+				struct mdfld_dsi_dpi_timing *dpi_timing,
+				int num_lane, int bpp);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+				struct mdfld_dsi_connector *dsi_connector,
+				const struct panel_funcs *p_funcs);
+
+/* MDFLD DPI helper functions */
+extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
+extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
+				int pipe);
+extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
+				int pipe);
+#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.c
new file mode 100644
index 0000000..3abf831
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/module.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/pm_runtime.h>
+#include <asm/intel_scu_ipc.h>
+
+/* get the LABC from command line. */
+static int LABC_control = 1;
+
+#ifdef MODULE
+module_param(LABC_control, int, 0644);
+#else
+
+static int __init parse_LABC_control(char *arg)
+{
+	/* LABC control can be passed in as a cmdline parameter */
+	/* to enable this feature add LABC=1 to cmdline */
+	/* to disable this feature add LABC=0 to cmdline */
+	if (!arg)
+		return -EINVAL;
+
+	if (!strcasecmp(arg, "0"))
+		LABC_control = 0;
+	else if (!strcasecmp(arg, "1"))
+		LABC_control = 1;
+
+	return 0;
+}
+early_param("LABC", parse_LABC_control);
+#endif
+
+/**
+ * Check and see if the generic control or data buffer is empty and ready.
+ */
+void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
+							u32 fifo_stat)
+{
+	u32 GEN_BF_time_out_count;
+
+	/* Check MIPI Adatper command registers */
+	for (GEN_BF_time_out_count = 0;
+			GEN_BF_time_out_count < GEN_FB_TIME_OUT;
+			GEN_BF_time_out_count++) {
+		if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+			break;
+		udelay(100);
+	}
+
+	if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+		DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
+					gen_fifo_stat_reg);
+}
+
+/**
+ * Manage the DSI MIPI keyboard and display brightness.
+ * FIXME: this is exported to OSPM code. should work out an specific
+ * display interface to OSPM.
+ */
+
+void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender =
+				mdfld_dsi_get_pkg_sender(dsi_config);
+	struct drm_device *dev;
+	struct drm_psb_private *dev_priv;
+	u32 gen_ctrl_val;
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	dev = sender->dev;
+	dev_priv = dev->dev_private;
+
+	/* Set default display backlight value to 85% (0xd8)*/
+	mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
+				true);
+
+	/* Set minimum brightness setting of CABC function to 20% (0x33)*/
+	mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
+
+	/* Enable backlight or/and LABC */
+	gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
+								BACKLIGHT_ON;
+	if (LABC_control == 1)
+		gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
+								| GAMMA_AUTO;
+
+	if (LABC_control == 1)
+		gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
+
+	dev_priv->mipi_ctrl_display = gen_ctrl_val;
+
+	mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
+				1, true);
+
+	mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
+}
+
+void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+{
+	struct mdfld_dsi_pkg_sender *sender;
+	struct drm_psb_private *dev_priv;
+	struct mdfld_dsi_config *dsi_config;
+	u32 gen_ctrl_val = 0;
+	int p_type = TMD_VID;
+
+	if (!dev || (pipe != 0 && pipe != 2)) {
+		DRM_ERROR("Invalid parameter\n");
+		return;
+	}
+
+	p_type = mdfld_get_panel_type(dev, 0);
+
+	dev_priv = dev->dev_private;
+
+	if (pipe)
+		dsi_config = dev_priv->dsi_configs[1];
+	else
+		dsi_config = dev_priv->dsi_configs[0];
+
+	sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender) {
+		DRM_ERROR("No sender found\n");
+		return;
+	}
+
+	gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
+
+	dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
+							pipe, gen_ctrl_val);
+
+	if (p_type == TMD_VID) {
+		/* Set display backlight value */
+		mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
+					(u8)gen_ctrl_val, 1, true);
+	} else {
+		/* Set display backlight value */
+		mdfld_dsi_send_mcs_short(sender, write_display_brightness,
+					(u8)gen_ctrl_val, 1, true);
+
+		/* Enable backlight control */
+		if (level == 0)
+			gen_ctrl_val = 0;
+		else
+			gen_ctrl_val = dev_priv->mipi_ctrl_display;
+
+		mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
+					(u8)gen_ctrl_val, 1, true);
+	}
+}
+
+static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+				u8 dcs, u32 *data, bool hs)
+{
+	struct mdfld_dsi_pkg_sender *sender
+		= mdfld_dsi_get_pkg_sender(dsi_config);
+
+	if (!sender || !data) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
+}
+
+int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
+			bool hs)
+{
+	if (!dsi_config || !mode) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
+}
+
+/*
+ * NOTE: this function was used by OSPM.
+ * TODO: will be removed later, should work out display interfaces for OSPM
+ */
+void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+	if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
+		DRM_ERROR("Invalid parameters\n");
+		return;
+	}
+
+	mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+}
+
+static void mdfld_dsi_connector_save(struct drm_connector *connector)
+{
+}
+
+static void mdfld_dsi_connector_restore(struct drm_connector *connector)
+{
+}
+
+/* FIXME: start using the force parameter */
+static enum drm_connector_status
+mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct mdfld_dsi_connector *dsi_connector
+		= mdfld_dsi_connector(connector);
+
+	dsi_connector->status = connector_status_connected;
+
+	return dsi_connector->status;
+}
+
+static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+				struct drm_property *property,
+				uint64_t value)
+{
+	struct drm_encoder *encoder = connector->encoder;
+
+	if (!strcmp(property->name, "scaling mode") && encoder) {
+		struct psb_intel_crtc *psb_crtc =
+					to_psb_intel_crtc(encoder->crtc);
+		bool centerechange;
+		uint64_t val;
+
+		if (!psb_crtc)
+			goto set_prop_error;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			goto set_prop_error;
+		}
+
+		if (drm_object_property_get_value(&connector->base, property, &val))
+			goto set_prop_error;
+
+		if (val == value)
+			goto set_prop_done;
+
+		if (drm_object_property_set_value(&connector->base,
+							property, value))
+			goto set_prop_error;
+
+		centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
+			(value == DRM_MODE_SCALE_NO_SCALE);
+
+		if (psb_crtc->saved_mode.hdisplay != 0 &&
+		    psb_crtc->saved_mode.vdisplay != 0) {
+			if (centerechange) {
+				if (!drm_crtc_helper_set_mode(encoder->crtc,
+						&psb_crtc->saved_mode,
+						encoder->crtc->x,
+						encoder->crtc->y,
+						encoder->crtc->fb))
+					goto set_prop_error;
+			} else {
+				struct drm_encoder_helper_funcs *funcs =
+						encoder->helper_private;
+				funcs->mode_set(encoder,
+					&psb_crtc->saved_mode,
+					&psb_crtc->saved_adjusted_mode);
+			}
+		}
+	} else if (!strcmp(property->name, "backlight") && encoder) {
+		if (drm_object_property_set_value(&connector->base, property,
+									value))
+			goto set_prop_error;
+		else
+			gma_backlight_set(encoder->dev, value);
+	}
+set_prop_done:
+	return 0;
+set_prop_error:
+	return -1;
+}
+
+static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+{
+	struct mdfld_dsi_connector *dsi_connector =
+					mdfld_dsi_connector(connector);
+	struct mdfld_dsi_pkg_sender *sender;
+
+	if (!dsi_connector)
+		return;
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	sender = dsi_connector->pkg_sender;
+	mdfld_dsi_pkg_sender_destroy(sender);
+	kfree(dsi_connector);
+}
+
+static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
+{
+	struct mdfld_dsi_connector *dsi_connector =
+				mdfld_dsi_connector(connector);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_get_config(dsi_connector);
+	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+	struct drm_display_mode *dup_mode = NULL;
+	struct drm_device *dev = connector->dev;
+
+	connector->display_info.min_vfreq = 0;
+	connector->display_info.max_vfreq = 200;
+	connector->display_info.min_hfreq = 0;
+	connector->display_info.max_hfreq = 200;
+
+	if (fixed_mode) {
+		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
+				fixed_mode->hdisplay, fixed_mode->vdisplay);
+		dup_mode = drm_mode_duplicate(dev, fixed_mode);
+		drm_mode_probed_add(connector, dup_mode);
+		return 1;
+	}
+	DRM_ERROR("Didn't get any modes!\n");
+	return 0;
+}
+
+static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+						struct drm_display_mode *mode)
+{
+	struct mdfld_dsi_connector *dsi_connector =
+					mdfld_dsi_connector(connector);
+	struct mdfld_dsi_config *dsi_config =
+					mdfld_dsi_get_config(dsi_connector);
+	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	/**
+	 * FIXME: current DC has no fitting unit, reject any mode setting
+	 * request
+	 * Will figure out a way to do up-scaling(pannel fitting) later.
+	 **/
+	if (fixed_mode) {
+		if (mode->hdisplay != fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay != fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return MODE_OK;
+}
+
+static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+	if (mode == connector->dpms)
+		return;
+
+	/*first, execute dpms*/
+
+	drm_helper_connector_dpms(connector, mode);
+}
+
+static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+				struct drm_connector *connector)
+{
+	struct mdfld_dsi_connector *dsi_connector =
+				mdfld_dsi_connector(connector);
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_get_config(dsi_connector);
+	return &dsi_config->encoder->base.base;
+}
+
+/*DSI connector funcs*/
+static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+	.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+	.save = mdfld_dsi_connector_save,
+	.restore = mdfld_dsi_connector_restore,
+	.detect = mdfld_dsi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = mdfld_dsi_connector_set_property,
+	.destroy = mdfld_dsi_connector_destroy,
+};
+
+/*DSI connector helper funcs*/
+static const struct drm_connector_helper_funcs
+	mdfld_dsi_connector_helper_funcs = {
+	.get_modes = mdfld_dsi_connector_get_modes,
+	.mode_valid = mdfld_dsi_connector_mode_valid,
+	.best_encoder = mdfld_dsi_connector_best_encoder,
+};
+
+static int mdfld_dsi_get_default_config(struct drm_device *dev,
+				struct mdfld_dsi_config *config, int pipe)
+{
+	if (!dev || !config) {
+		DRM_ERROR("Invalid parameters");
+		return -EINVAL;
+	}
+
+	config->bpp = 24;
+	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+		config->lane_count = 4;
+	else
+		config->lane_count = 2;
+	config->channel_num = 0;
+
+	if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
+		config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
+	else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
+		config->video_mode =
+				MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
+	else
+		config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+
+	return 0;
+}
+
+int mdfld_dsi_panel_reset(int pipe)
+{
+	unsigned gpio;
+	int ret = 0;
+
+	switch (pipe) {
+	case 0:
+		gpio = 128;
+		break;
+	case 2:
+		gpio = 34;
+		break;
+	default:
+		DRM_ERROR("Invalid output\n");
+		return -EINVAL;
+	}
+
+	ret = gpio_request(gpio, "gfx");
+	if (ret) {
+		DRM_ERROR("gpio_rqueset failed\n");
+		return ret;
+	}
+
+	ret = gpio_direction_output(gpio, 1);
+	if (ret) {
+		DRM_ERROR("gpio_direction_output failed\n");
+		goto gpio_error;
+	}
+
+	gpio_get_value(128);
+
+gpio_error:
+	if (gpio_is_valid(gpio))
+		gpio_free(gpio);
+
+	return ret;
+}
+
+/*
+ * MIPI output init
+ * @dev drm device
+ * @pipe pipe number. 0 or 2
+ * @config
+ *
+ * Do the initialization of a MIPI output, including create DRM mode objects
+ * initialization of DSI output on @pipe
+ */
+void mdfld_dsi_output_init(struct drm_device *dev,
+			   int pipe,
+			   const struct panel_funcs *p_vid_funcs)
+{
+	struct mdfld_dsi_config *dsi_config;
+	struct mdfld_dsi_connector *dsi_connector;
+	struct drm_connector *connector;
+	struct mdfld_dsi_encoder *encoder;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct panel_info dsi_panel_info;
+	u32 width_mm, height_mm;
+
+	dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
+
+	if (pipe != 0 && pipe != 2) {
+		DRM_ERROR("Invalid parameter\n");
+		return;
+	}
+
+	/*create a new connetor*/
+	dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+	if (!dsi_connector) {
+		DRM_ERROR("No memory");
+		return;
+	}
+
+	dsi_connector->pipe =  pipe;
+
+	dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
+			GFP_KERNEL);
+	if (!dsi_config) {
+		DRM_ERROR("cannot allocate memory for DSI config\n");
+		goto dsi_init_err0;
+	}
+	mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+
+	dsi_connector->private = dsi_config;
+
+	dsi_config->changed = 1;
+	dsi_config->dev = dev;
+
+	dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
+	if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+			goto dsi_init_err0;
+
+	width_mm = dsi_panel_info.width_mm;
+	height_mm = dsi_panel_info.height_mm;
+
+	dsi_config->mode = dsi_config->fixed_mode;
+	dsi_config->connector = dsi_connector;
+
+	if (!dsi_config->fixed_mode) {
+		DRM_ERROR("No pannel fixed mode was found\n");
+		goto dsi_init_err0;
+	}
+
+	if (pipe && dev_priv->dsi_configs[0]) {
+		dsi_config->dvr_ic_inited = 0;
+		dev_priv->dsi_configs[1] = dsi_config;
+	} else if (pipe == 0) {
+		dsi_config->dvr_ic_inited = 1;
+		dev_priv->dsi_configs[0] = dsi_config;
+	} else {
+		DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
+		goto dsi_init_err0;
+	}
+
+
+	connector = &dsi_connector->base.base;
+	drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+						DRM_MODE_CONNECTOR_LVDS);
+	drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->display_info.width_mm = width_mm;
+	connector->display_info.height_mm = height_mm;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/*attach properties*/
+	drm_object_attach_property(&connector->base,
+				dev->mode_config.scaling_mode_property,
+				DRM_MODE_SCALE_FULLSCREEN);
+	drm_object_attach_property(&connector->base,
+				dev_priv->backlight_property,
+				MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+	/*init DSI package sender on this output*/
+	if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+		DRM_ERROR("Package Sender initialization failed on pipe %d\n",
+									pipe);
+		goto dsi_init_err0;
+	}
+
+	encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
+	if (!encoder) {
+		DRM_ERROR("Create DPI encoder failed\n");
+		goto dsi_init_err1;
+	}
+	encoder->private = dsi_config;
+	dsi_config->encoder = encoder;
+	encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
+		INTEL_OUTPUT_MIPI2;
+	drm_sysfs_connector_add(connector);
+	return;
+
+	/*TODO: add code to destroy outputs on error*/
+dsi_init_err1:
+	/*destroy sender*/
+	mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+
+	drm_connector_cleanup(connector);
+
+	kfree(dsi_config->fixed_mode);
+	kfree(dsi_config);
+dsi_init_err0:
+	kfree(dsi_connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.h
new file mode 100644
index 0000000..36eb074
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -0,0 +1,377 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_OUTPUT_H__
+#define __MDFLD_DSI_OUTPUT_H__
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "mdfld_output.h"
+
+#include <asm/mrst.h>
+
+#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+	(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define REG_FLD_MOD(reg, val, start, end) \
+	REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
+
+static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
+		u32 val, int start, int end)
+{
+	int t = 100000;
+
+	while (FLD_GET(REG_READ(reg), start, end) != val) {
+		if (--t == 0)
+			return 1;
+	}
+
+	return 0;
+}
+
+#define REG_FLD_WAIT(reg, val, start, end) \
+	REGISTER_FLD_WAIT(dev, reg, val, start, end)
+
+#define REG_BIT_WAIT(reg, val, bitnum) \
+	REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
+
+#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+
+#ifdef DEBUG
+#define CHECK_PIPE(pipe) ({			\
+	const typeof(pipe) __pipe = (pipe);	\
+	BUG_ON(__pipe != 0 && __pipe != 2);	\
+	__pipe;	})
+#else
+#define CHECK_PIPE(pipe) (pipe)
+#endif
+
+/*
+ * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
+ */
+#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
+
+/* mdfld DSI controller registers */
+#define MIPI_DEVICE_READY_REG(pipe)		(0xb000 + REG_OFFSET(pipe))
+#define MIPI_INTR_STAT_REG(pipe)		(0xb004 + REG_OFFSET(pipe))
+#define MIPI_INTR_EN_REG(pipe)			(0xb008 + REG_OFFSET(pipe))
+#define MIPI_DSI_FUNC_PRG_REG(pipe)		(0xb00c + REG_OFFSET(pipe))
+#define MIPI_HS_TX_TIMEOUT_REG(pipe)		(0xb010 + REG_OFFSET(pipe))
+#define MIPI_LP_RX_TIMEOUT_REG(pipe)		(0xb014 + REG_OFFSET(pipe))
+#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe)	(0xb018 + REG_OFFSET(pipe))
+#define MIPI_DEVICE_RESET_TIMER_REG(pipe)	(0xb01c + REG_OFFSET(pipe))
+#define MIPI_DPI_RESOLUTION_REG(pipe)		(0xb020 + REG_OFFSET(pipe))
+#define MIPI_DBI_FIFO_THROTTLE_REG(pipe)	(0xb024 + REG_OFFSET(pipe))
+#define MIPI_HSYNC_COUNT_REG(pipe)		(0xb028 + REG_OFFSET(pipe))
+#define MIPI_HBP_COUNT_REG(pipe)		(0xb02c + REG_OFFSET(pipe))
+#define MIPI_HFP_COUNT_REG(pipe)		(0xb030 + REG_OFFSET(pipe))
+#define MIPI_HACTIVE_COUNT_REG(pipe)		(0xb034 + REG_OFFSET(pipe))
+#define MIPI_VSYNC_COUNT_REG(pipe)		(0xb038 + REG_OFFSET(pipe))
+#define MIPI_VBP_COUNT_REG(pipe)		(0xb03c + REG_OFFSET(pipe))
+#define MIPI_VFP_COUNT_REG(pipe)		(0xb040 + REG_OFFSET(pipe))
+#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe)	(0xb044 + REG_OFFSET(pipe))
+#define MIPI_DPI_CONTROL_REG(pipe)		(0xb048 + REG_OFFSET(pipe))
+#define MIPI_DPI_DATA_REG(pipe)			(0xb04c + REG_OFFSET(pipe))
+#define MIPI_INIT_COUNT_REG(pipe)		(0xb050 + REG_OFFSET(pipe))
+#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe)	(0xb054 + REG_OFFSET(pipe))
+#define MIPI_VIDEO_MODE_FORMAT_REG(pipe)	(0xb058 + REG_OFFSET(pipe))
+#define MIPI_EOT_DISABLE_REG(pipe)		(0xb05c + REG_OFFSET(pipe))
+#define MIPI_LP_BYTECLK_REG(pipe)		(0xb060 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_DATA_REG(pipe)		(0xb064 + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_DATA_REG(pipe)		(0xb068 + REG_OFFSET(pipe))
+#define MIPI_LP_GEN_CTRL_REG(pipe)		(0xb06c + REG_OFFSET(pipe))
+#define MIPI_HS_GEN_CTRL_REG(pipe)		(0xb070 + REG_OFFSET(pipe))
+#define MIPI_GEN_FIFO_STAT_REG(pipe)		(0xb074 + REG_OFFSET(pipe))
+#define MIPI_HS_LS_DBI_ENABLE_REG(pipe)		(0xb078 + REG_OFFSET(pipe))
+#define MIPI_DPHY_PARAM_REG(pipe)		(0xb080 + REG_OFFSET(pipe))
+#define MIPI_DBI_BW_CTRL_REG(pipe)		(0xb084 + REG_OFFSET(pipe))
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe)	(0xb088 + REG_OFFSET(pipe))
+
+#define MIPI_CTRL_REG(pipe)			(0xb104 + REG_OFFSET(pipe))
+#define MIPI_DATA_ADD_REG(pipe)			(0xb108 + REG_OFFSET(pipe))
+#define MIPI_DATA_LEN_REG(pipe)			(0xb10c + REG_OFFSET(pipe))
+#define MIPI_CMD_ADD_REG(pipe)			(0xb110 + REG_OFFSET(pipe))
+#define MIPI_CMD_LEN_REG(pipe)			(0xb114 + REG_OFFSET(pipe))
+
+/* non-uniform reg offset */
+#define MIPI_PORT_CONTROL(pipe)		(CHECK_PIPE(pipe) ? MIPI_C : MIPI)
+
+#define DSI_DEVICE_READY				(0x1)
+#define DSI_POWER_STATE_ULPS_ENTER			(0x2 << 1)
+#define DSI_POWER_STATE_ULPS_EXIT			(0x1 << 1)
+#define DSI_POWER_STATE_ULPS_OFFSET			(0x1)
+
+
+#define DSI_ONE_DATA_LANE					(0x1)
+#define DSI_TWO_DATA_LANE					(0x2)
+#define DSI_THREE_DATA_LANE					(0X3)
+#define DSI_FOUR_DATA_LANE					(0x4)
+#define DSI_DPI_VIRT_CHANNEL_OFFSET			(0x3)
+#define DSI_DBI_VIRT_CHANNEL_OFFSET			(0x5)
+#define DSI_DPI_COLOR_FORMAT_RGB565			(0x01 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666			(0x02 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK		(0x03 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB888			(0x04 << 7)
+#define DSI_DBI_COLOR_FORMAT_OPTION2			(0x05 << 13)
+
+#define DSI_INTR_STATE_RXSOTERROR			BIT(0)
+
+#define DSI_INTR_STATE_SPL_PKG_SENT			BIT(30)
+#define DSI_INTR_STATE_TE				BIT(31)
+
+#define DSI_HS_TX_TIMEOUT_MASK				(0xffffff)
+
+#define DSI_LP_RX_TIMEOUT_MASK				(0xffffff)
+
+#define DSI_TURN_AROUND_TIMEOUT_MASK		(0x3f)
+
+#define DSI_RESET_TIMER_MASK				(0xffff)
+
+#define DSI_DBI_FIFO_WM_HALF				(0x0)
+#define DSI_DBI_FIFO_WM_QUARTER				(0x1)
+#define DSI_DBI_FIFO_WM_LOW					(0x2)
+
+#define DSI_DPI_TIMING_MASK					(0xffff)
+
+#define DSI_INIT_TIMER_MASK					(0xffff)
+
+#define DSI_DBI_RETURN_PACK_SIZE_MASK		(0x3ff)
+
+#define DSI_LP_BYTECLK_MASK					(0x0ffff)
+
+#define DSI_HS_CTRL_GEN_SHORT_W0			(0x03)
+#define DSI_HS_CTRL_GEN_SHORT_W1			(0x13)
+#define DSI_HS_CTRL_GEN_SHORT_W2			(0x23)
+#define DSI_HS_CTRL_GEN_R0					(0x04)
+#define DSI_HS_CTRL_GEN_R1					(0x14)
+#define DSI_HS_CTRL_GEN_R2					(0x24)
+#define DSI_HS_CTRL_GEN_LONG_W				(0x29)
+#define DSI_HS_CTRL_MCS_SHORT_W0			(0x05)
+#define DSI_HS_CTRL_MCS_SHORT_W1			(0x15)
+#define DSI_HS_CTRL_MCS_R0					(0x06)
+#define DSI_HS_CTRL_MCS_LONG_W				(0x39)
+#define DSI_HS_CTRL_VC_OFFSET				(0x06)
+#define DSI_HS_CTRL_WC_OFFSET				(0x08)
+
+#define	DSI_FIFO_GEN_HS_DATA_FULL			BIT(0)
+#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY		BIT(1)
+#define DSI_FIFO_GEN_HS_DATA_EMPTY			BIT(2)
+#define DSI_FIFO_GEN_LP_DATA_FULL			BIT(8)
+#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY		BIT(9)
+#define DSI_FIFO_GEN_LP_DATA_EMPTY			BIT(10)
+#define DSI_FIFO_GEN_HS_CTRL_FULL			BIT(16)
+#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY		BIT(17)
+#define DSI_FIFO_GEN_HS_CTRL_EMPTY			BIT(18)
+#define DSI_FIFO_GEN_LP_CTRL_FULL			BIT(24)
+#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY		BIT(25)
+#define DSI_FIFO_GEN_LP_CTRL_EMPTY			BIT(26)
+#define DSI_FIFO_DBI_EMPTY					BIT(27)
+#define DSI_FIFO_DPI_EMPTY					BIT(28)
+
+#define DSI_DBI_HS_LP_SWITCH_MASK			(0x1)
+
+#define DSI_HS_LP_SWITCH_COUNTER_OFFSET		(0x0)
+#define DSI_LP_HS_SWITCH_COUNTER_OFFSET		(0x16)
+
+#define DSI_DPI_CTRL_HS_SHUTDOWN			(0x00000001)
+#define DSI_DPI_CTRL_HS_TURN_ON				(0x00000002)
+
+/*dsi power modes*/
+#define DSI_POWER_MODE_DISPLAY_ON	BIT(2)
+#define DSI_POWER_MODE_NORMAL_ON	BIT(3)
+#define DSI_POWER_MODE_SLEEP_OUT	BIT(4)
+#define DSI_POWER_MODE_PARTIAL_ON	BIT(5)
+#define DSI_POWER_MODE_IDLE_ON		BIT(6)
+
+enum {
+	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+	MDFLD_DSI_VIDEO_BURST_MODE = 3,
+};
+
+#define DSI_DPI_COMPLETE_LAST_LINE			BIT(2)
+#define DSI_DPI_DISABLE_BTA					BIT(3)
+
+struct mdfld_dsi_connector {
+	struct psb_intel_connector base;
+
+	int pipe;
+	void *private;
+	void *pkg_sender;
+
+	/* Connection status */
+	enum drm_connector_status status;
+};
+
+struct mdfld_dsi_encoder {
+	struct psb_intel_encoder base;
+	void *private;
+};
+
+/*
+ * DSI config, consists of one DSI connector, two DSI encoders.
+ * DRM will pick up on DSI encoder basing on differents configs.
+ */
+struct mdfld_dsi_config {
+	struct drm_device *dev;
+	struct drm_display_mode *fixed_mode;
+	struct drm_display_mode *mode;
+
+	struct mdfld_dsi_connector *connector;
+	struct mdfld_dsi_encoder *encoder;
+
+	int changed;
+
+	int bpp;
+	int lane_count;
+	/*Virtual channel number for this encoder*/
+	int channel_num;
+	/*video mode configure*/
+	int video_mode;
+
+	int dvr_ic_inited;
+};
+
+static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
+		struct drm_connector *connector)
+{
+	struct psb_intel_connector *psb_connector;
+
+	psb_connector = to_psb_intel_connector(connector);
+
+	return container_of(psb_connector, struct mdfld_dsi_connector, base);
+}
+
+static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
+		struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *psb_encoder;
+
+	psb_encoder = to_psb_intel_encoder(encoder);
+
+	return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
+}
+
+static inline struct mdfld_dsi_config *
+	mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+{
+	if (!connector)
+		return NULL;
+	return (struct mdfld_dsi_config *)connector->private;
+}
+
+static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+{
+	struct mdfld_dsi_connector *dsi_connector;
+
+	if (!config)
+		return NULL;
+
+	dsi_connector = config->connector;
+
+	if (!dsi_connector)
+		return NULL;
+
+	return dsi_connector->pkg_sender;
+}
+
+static inline struct mdfld_dsi_config *
+	mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+{
+	if (!encoder)
+		return NULL;
+	return (struct mdfld_dsi_config *)encoder->private;
+}
+
+static inline struct mdfld_dsi_connector *
+	mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_config *config;
+
+	if (!encoder)
+		return NULL;
+
+	config = mdfld_dsi_encoder_get_config(encoder);
+	if (!config)
+		return NULL;
+
+	return config->connector;
+}
+
+static inline void *mdfld_dsi_encoder_get_pkg_sender(
+				struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_config *dsi_config;
+
+	dsi_config = mdfld_dsi_encoder_get_config(encoder);
+	if (!dsi_config)
+		return NULL;
+
+	return mdfld_dsi_get_pkg_sender(dsi_config);
+}
+
+static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+{
+	struct mdfld_dsi_connector *connector;
+
+	if (!encoder)
+		return -1;
+
+	connector = mdfld_dsi_encoder_get_connector(encoder);
+	if (!connector)
+		return -1;
+	return connector->pipe;
+}
+
+/* Export functions */
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+					u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+					int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+					int level);
+extern void mdfld_dsi_output_init(struct drm_device *dev,
+					int pipe,
+					const struct panel_funcs *p_vid_funcs);
+extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
+					int pipe);
+
+extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+					u32 *mode, bool hs);
+extern int mdfld_dsi_panel_reset(int pipe);
+
+#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
new file mode 100644
index 0000000..489ffd2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/freezer.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dpi.h"
+
+#define MDFLD_DSI_READ_MAX_COUNT		5000
+
+enum data_type {
+	DSI_DT_GENERIC_SHORT_WRITE_0	= 0x03,
+	DSI_DT_GENERIC_SHORT_WRITE_1	= 0x13,
+	DSI_DT_GENERIC_SHORT_WRITE_2	= 0x23,
+	DSI_DT_GENERIC_READ_0		= 0x04,
+	DSI_DT_GENERIC_READ_1		= 0x14,
+	DSI_DT_GENERIC_READ_2		= 0x24,
+	DSI_DT_GENERIC_LONG_WRITE	= 0x29,
+	DSI_DT_DCS_SHORT_WRITE_0	= 0x05,
+	DSI_DT_DCS_SHORT_WRITE_1	= 0x15,
+	DSI_DT_DCS_READ			= 0x06,
+	DSI_DT_DCS_LONG_WRITE		= 0x39,
+};
+
+enum {
+	MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+};
+
+enum {
+	MDFLD_DSI_PKG_SENDER_FREE = 0x0,
+	MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
+};
+
+static const char *const dsi_errors[] = {
+	"RX SOT Error",
+	"RX SOT Sync Error",
+	"RX EOT Sync Error",
+	"RX Escape Mode Entry Error",
+	"RX LP TX Sync Error",
+	"RX HS Receive Timeout Error",
+	"RX False Control Error",
+	"RX ECC Single Bit Error",
+	"RX ECC Multibit Error",
+	"RX Checksum Error",
+	"RX DSI Data Type Not Recognised",
+	"RX DSI VC ID Invalid",
+	"TX False Control Error",
+	"TX ECC Single Bit Error",
+	"TX ECC Multibit Error",
+	"TX Checksum Error",
+	"TX DSI Data Type Not Recognised",
+	"TX DSI VC ID invalid",
+	"High Contention",
+	"Low contention",
+	"DPI FIFO Under run",
+	"HS TX Timeout",
+	"LP RX Timeout",
+	"Turn Around ACK Timeout",
+	"ACK With No Error",
+	"RX Invalid TX Length",
+	"RX Prot Violation",
+	"HS Generic Write FIFO Full",
+	"LP Generic Write FIFO Full",
+	"Generic Read Data Avail"
+	"Special Packet Sent",
+	"Tearing Effect",
+};
+
+static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+						u32 mask)
+{
+	struct drm_device *dev = sender->dev;
+	u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+	int retry = 0xffff;
+
+	while (retry--) {
+		if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+			return 0;
+		udelay(100);
+	}
+	DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
+	return -EIO;
+}
+
+static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
+						BIT(26) | BIT(27) | BIT(28)));
+}
+
+static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
+}
+
+static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
+}
+
+static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
+{
+	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+	struct drm_device *dev = sender->dev;
+
+	dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
+
+	switch (mask) {
+	case BIT(0):
+	case BIT(1):
+	case BIT(2):
+	case BIT(3):
+	case BIT(4):
+	case BIT(5):
+	case BIT(6):
+	case BIT(7):
+	case BIT(8):
+	case BIT(9):
+	case BIT(10):
+	case BIT(11):
+	case BIT(12):
+	case BIT(13):
+		dev_dbg(sender->dev->dev, "No Action required\n");
+		break;
+	case BIT(14):
+		/*wait for all fifo empty*/
+		/*wait_for_all_fifos_empty(sender)*/;
+		break;
+	case BIT(15):
+		dev_dbg(sender->dev->dev, "No Action required\n");
+		break;
+	case BIT(16):
+		break;
+	case BIT(17):
+		break;
+	case BIT(18):
+	case BIT(19):
+		dev_dbg(sender->dev->dev, "High/Low contention detected\n");
+		/*wait for contention recovery time*/
+		/*mdelay(10);*/
+		/*wait for all fifo empty*/
+		if (0)
+			wait_for_all_fifos_empty(sender);
+		break;
+	case BIT(20):
+		dev_dbg(sender->dev->dev, "No Action required\n");
+		break;
+	case BIT(21):
+		/*wait for all fifo empty*/
+		/*wait_for_all_fifos_empty(sender);*/
+		break;
+	case BIT(22):
+		break;
+	case BIT(23):
+	case BIT(24):
+	case BIT(25):
+	case BIT(26):
+	case BIT(27):
+		dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
+		REG_WRITE(intr_stat_reg, mask);
+		wait_for_hs_fifos_empty(sender);
+		break;
+	case BIT(28):
+		dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
+		REG_WRITE(intr_stat_reg, mask);
+		wait_for_lp_fifos_empty(sender);
+		break;
+	case BIT(29):
+	case BIT(30):
+	case BIT(31):
+		dev_dbg(sender->dev->dev, "No Action required\n");
+		break;
+	}
+
+	if (mask & REG_READ(intr_stat_reg))
+		dev_dbg(sender->dev->dev,
+				"Cannot clean interrupt 0x%08x\n", mask);
+	return 0;
+}
+
+static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+{
+	struct drm_device *dev = sender->dev;
+	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+	u32 mask;
+	u32 intr_stat;
+	int i;
+	int err = 0;
+
+	intr_stat = REG_READ(intr_stat_reg);
+
+	for (i = 0; i < 32; i++) {
+		mask = (0x00000001UL) << i;
+		if (intr_stat & mask) {
+			dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
+			err = handle_dsi_error(sender, mask);
+			if (err)
+				DRM_ERROR("Cannot handle error\n");
+		}
+	}
+	return err;
+}
+
+static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+			u8 cmd, u8 param, bool hs)
+{
+	struct drm_device *dev = sender->dev;
+	u32 ctrl_reg;
+	u32 val;
+	u8 virtual_channel = 0;
+
+	if (hs) {
+		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+
+		/* FIXME: wait_for_hs_fifos_empty(sender); */
+	} else {
+		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+
+		/* FIXME: wait_for_lp_fifos_empty(sender); */
+	}
+
+	val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
+		FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
+
+	REG_WRITE(ctrl_reg, val);
+
+	return 0;
+}
+
+static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+			u8 *data, int len, bool hs)
+{
+	struct drm_device *dev = sender->dev;
+	u32 ctrl_reg;
+	u32 data_reg;
+	u32 val;
+	u8 *p;
+	u8 b1, b2, b3, b4;
+	u8 virtual_channel = 0;
+	int i;
+
+	if (hs) {
+		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+		data_reg = sender->mipi_hs_gen_data_reg;
+
+		/* FIXME: wait_for_hs_fifos_empty(sender); */
+	} else {
+		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+		data_reg = sender->mipi_lp_gen_data_reg;
+
+		/* FIXME: wait_for_lp_fifos_empty(sender); */
+	}
+
+	p = data;
+	for (i = 0; i < len / 4; i++) {
+		b1 = *p++;
+		b2 = *p++;
+		b3 = *p++;
+		b4 = *p++;
+
+		REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
+	}
+
+	i = len % 4;
+	if (i) {
+		b1 = 0; b2 = 0; b3 = 0;
+
+		switch (i) {
+		case 3:
+			b1 = *p++;
+			b2 = *p++;
+			b3 = *p++;
+			break;
+		case 2:
+			b1 = *p++;
+			b2 = *p++;
+			break;
+		case 1:
+			b1 = *p++;
+			break;
+		}
+
+		REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
+	}
+
+	val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
+		FLD_VAL(data_type, 5, 0);
+
+	REG_WRITE(ctrl_reg, val);
+
+	return 0;
+}
+
+static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+			u8 *data, u16 len)
+{
+	u8 cmd;
+
+	switch (data_type) {
+	case DSI_DT_DCS_SHORT_WRITE_0:
+	case DSI_DT_DCS_SHORT_WRITE_1:
+	case DSI_DT_DCS_LONG_WRITE:
+		cmd = *data;
+		break;
+	default:
+		return 0;
+	}
+
+	/*this prevents other package sending while doing msleep*/
+	sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+
+	/*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
+	if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+		/*TODO: replace it with msleep later*/
+		mdelay(120);
+	}
+
+	if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+		/*TODO: replace it with msleep later*/
+		mdelay(120);
+	}
+	return 0;
+}
+
+static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+			u8 *data, u16 len)
+{
+	u8 cmd;
+
+	switch (data_type) {
+	case DSI_DT_DCS_SHORT_WRITE_0:
+	case DSI_DT_DCS_SHORT_WRITE_1:
+	case DSI_DT_DCS_LONG_WRITE:
+		cmd = *data;
+		break;
+	default:
+		return 0;
+	}
+
+	/*update panel status*/
+	if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
+		sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+		/*TODO: replace it with msleep later*/
+		mdelay(120);
+	} else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
+		sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+		/*TODO: replace it with msleep later*/
+		mdelay(120);
+	} else if (unlikely(cmd == DCS_SOFT_RESET)) {
+		/*TODO: replace it with msleep later*/
+		mdelay(5);
+	}
+
+	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	return 0;
+}
+
+static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+		u8 *data, u16 len, bool hs)
+{
+	int ret;
+
+	/*handle DSI error*/
+	ret = dsi_error_handler(sender);
+	if (ret) {
+		DRM_ERROR("Error handling failed\n");
+		return -EAGAIN;
+	}
+
+	/* send pkg */
+	if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+		DRM_ERROR("sender is busy\n");
+		return -EAGAIN;
+	}
+
+	ret = send_pkg_prepare(sender, data_type, data, len);
+	if (ret) {
+		DRM_ERROR("send_pkg_prepare error\n");
+		return ret;
+	}
+
+	switch (data_type) {
+	case DSI_DT_GENERIC_SHORT_WRITE_0:
+	case DSI_DT_GENERIC_SHORT_WRITE_1:
+	case DSI_DT_GENERIC_SHORT_WRITE_2:
+	case DSI_DT_GENERIC_READ_0:
+	case DSI_DT_GENERIC_READ_1:
+	case DSI_DT_GENERIC_READ_2:
+	case DSI_DT_DCS_SHORT_WRITE_0:
+	case DSI_DT_DCS_SHORT_WRITE_1:
+	case DSI_DT_DCS_READ:
+		ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
+		break;
+	case DSI_DT_GENERIC_LONG_WRITE:
+	case DSI_DT_DCS_LONG_WRITE:
+		ret = send_long_pkg(sender, data_type, data, len, hs);
+		break;
+	}
+
+	send_pkg_done(sender, data_type, data, len);
+
+	/*FIXME: should I query complete and fifo empty here?*/
+
+	return ret;
+}
+
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+			u32 len, bool hs)
+{
+	unsigned long flags;
+
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&sender->lock, flags);
+	send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
+	spin_unlock_irqrestore(&sender->lock, flags);
+
+	return 0;
+}
+
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+			u8 param, u8 param_num, bool hs)
+{
+	u8 data[2];
+	unsigned long flags;
+	u8 data_type;
+
+	if (!sender) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	data[0] = cmd;
+
+	if (param_num) {
+		data_type = DSI_DT_DCS_SHORT_WRITE_1;
+		data[1] = param;
+	} else {
+		data_type = DSI_DT_DCS_SHORT_WRITE_0;
+		data[1] = 0;
+	}
+
+	spin_lock_irqsave(&sender->lock, flags);
+	send_pkg(sender, data_type, data, sizeof(data), hs);
+	spin_unlock_irqrestore(&sender->lock, flags);
+
+	return 0;
+}
+
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+			u8 param1, u8 param_num, bool hs)
+{
+	u8 data[2];
+	unsigned long flags;
+	u8 data_type;
+
+	if (!sender || param_num > 2) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	switch (param_num) {
+	case 0:
+		data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
+		data[0] = 0;
+		data[1] = 0;
+		break;
+	case 1:
+		data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
+		data[0] = param0;
+		data[1] = 0;
+		break;
+	case 2:
+		data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
+		data[0] = param0;
+		data[1] = param1;
+		break;
+	}
+
+	spin_lock_irqsave(&sender->lock, flags);
+	send_pkg(sender, data_type, data, sizeof(data), hs);
+	spin_unlock_irqrestore(&sender->lock, flags);
+
+	return 0;
+}
+
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+			u32 len, bool hs)
+{
+	unsigned long flags;
+
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&sender->lock, flags);
+	send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
+	spin_unlock_irqrestore(&sender->lock, flags);
+
+	return 0;
+}
+
+static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
+			u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
+{
+	unsigned long flags;
+	struct drm_device *dev = sender->dev;
+	int i;
+	u32 gen_data_reg;
+	int retry = MDFLD_DSI_READ_MAX_COUNT;
+
+	if (!sender || !data_out || !len_out) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * do reading.
+	 * 0) send out generic read request
+	 * 1) polling read data avail interrupt
+	 * 2) read data
+	 */
+	spin_lock_irqsave(&sender->lock, flags);
+
+	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+	if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
+		DRM_ERROR("Can NOT clean read data valid interrupt\n");
+
+	/*send out read request*/
+	send_pkg(sender, data_type, data, len, hs);
+
+	/*polling read data avail interrupt*/
+	while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
+		udelay(100);
+		retry--;
+	}
+
+	if (!retry) {
+		spin_unlock_irqrestore(&sender->lock, flags);
+		return -ETIMEDOUT;
+	}
+
+	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
+
+	/*read data*/
+	if (hs)
+		gen_data_reg = sender->mipi_hs_gen_data_reg;
+	else
+		gen_data_reg = sender->mipi_lp_gen_data_reg;
+
+	for (i = 0; i < len_out; i++)
+		*(data_out + i) = REG_READ(gen_data_reg);
+
+	spin_unlock_irqrestore(&sender->lock, flags);
+
+	return 0;
+}
+
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+		u32 *data, u16 len, bool hs)
+{
+	if (!sender || !data || !len) {
+		DRM_ERROR("Invalid parameters\n");
+		return -EINVAL;
+	}
+
+	return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
+				data, len, hs);
+}
+
+int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+								int pipe)
+{
+	struct mdfld_dsi_pkg_sender *pkg_sender;
+	struct mdfld_dsi_config *dsi_config =
+				mdfld_dsi_get_config(dsi_connector);
+	struct drm_device *dev = dsi_config->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 mipi_val = 0;
+
+	if (!dsi_connector) {
+		DRM_ERROR("Invalid parameter\n");
+		return -EINVAL;
+	}
+
+	pkg_sender = dsi_connector->pkg_sender;
+
+	if (!pkg_sender || IS_ERR(pkg_sender)) {
+		pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+								GFP_KERNEL);
+		if (!pkg_sender) {
+			DRM_ERROR("Create DSI pkg sender failed\n");
+			return -ENOMEM;
+		}
+		dsi_connector->pkg_sender = (void *)pkg_sender;
+	}
+
+	pkg_sender->dev = dev;
+	pkg_sender->dsi_connector = dsi_connector;
+	pkg_sender->pipe = pipe;
+	pkg_sender->pkg_num = 0;
+	pkg_sender->panel_mode = 0;
+	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+	/*init regs*/
+	/* FIXME: should just copy the regmap ptr ? */
+	pkg_sender->dpll_reg = map->dpll;
+	pkg_sender->dspcntr_reg = map->cntr;
+	pkg_sender->pipeconf_reg = map->conf;
+	pkg_sender->dsplinoff_reg = map->linoff;
+	pkg_sender->dspsurf_reg = map->surf;
+	pkg_sender->pipestat_reg = map->status;
+
+	pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
+	pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
+	pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
+	pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
+	pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
+	pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
+	pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
+	pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
+	pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
+	pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
+
+	/*init lock*/
+	spin_lock_init(&pkg_sender->lock);
+
+	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
+		/**
+		 * For video mode, don't enable DPI timing output here,
+		 * will init the DPI timing output during mode setting.
+		 */
+		mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+		if (pipe == 0)
+			mipi_val |= 0x2;
+
+		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
+		REG_READ(MIPI_PORT_CONTROL(pipe));
+
+		/* do dsi controller init */
+		mdfld_dsi_controller_init(dsi_config, pipe);
+	}
+
+	return 0;
+}
+
+void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+	if (!sender || IS_ERR(sender))
+		return;
+
+	/*free*/
+	kfree(sender);
+}
+
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
new file mode 100644
index 0000000..459cd7e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#ifndef __MDFLD_DSI_PKG_SENDER_H__
+#define __MDFLD_DSI_PKG_SENDER_H__
+
+#include <linux/kthread.h>
+
+#define MDFLD_MAX_DCS_PARAM	8
+
+struct mdfld_dsi_pkg_sender {
+	struct drm_device *dev;
+	struct mdfld_dsi_connector *dsi_connector;
+	u32 status;
+	u32 panel_mode;
+
+	int pipe;
+
+	spinlock_t lock;
+
+	u32 pkg_num;
+
+	/* Registers */
+	u32 dpll_reg;
+	u32 dspcntr_reg;
+	u32 pipeconf_reg;
+	u32 pipestat_reg;
+	u32 dsplinoff_reg;
+	u32 dspsurf_reg;
+
+	u32 mipi_intr_stat_reg;
+	u32 mipi_lp_gen_data_reg;
+	u32 mipi_hs_gen_data_reg;
+	u32 mipi_lp_gen_ctrl_reg;
+	u32 mipi_hs_gen_ctrl_reg;
+	u32 mipi_gen_fifo_stat_reg;
+	u32 mipi_data_addr_reg;
+	u32 mipi_data_len_reg;
+	u32 mipi_cmd_addr_reg;
+	u32 mipi_cmd_len_reg;
+};
+
+/* DCS definitions */
+#define DCS_SOFT_RESET			0x01
+#define DCS_ENTER_SLEEP_MODE		0x10
+#define DCS_EXIT_SLEEP_MODE		0x11
+#define DCS_SET_DISPLAY_OFF		0x28
+#define DCS_SET_DISPLAY_ON		0x29
+#define DCS_SET_COLUMN_ADDRESS		0x2a
+#define DCS_SET_PAGE_ADDRESS		0x2b
+#define DCS_WRITE_MEM_START		0x2c
+#define DCS_SET_TEAR_OFF		0x34
+#define DCS_SET_TEAR_ON			0x35
+
+extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+					int pipe);
+extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+					u8 param, u8 param_num, bool hs);
+int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+					u32 len, bool hs);
+int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
+					u8 param1, u8 param_num, bool hs);
+int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
+					u32 len, bool hs);
+/* Read interfaces */
+int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
+		u32 *data, u16 len, bool hs);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_intel_display.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_intel_display.c
new file mode 100644
index 0000000..74485dc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "framebuffer.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+
+/* Hardcoded currently */
+static int ksel = KSEL_CRYSTAL_19;
+
+struct psb_intel_range_t {
+	int min, max;
+};
+
+struct mrst_limit_t {
+	struct psb_intel_range_t dot, m, p1;
+};
+
+struct mrst_clock_t {
+	/* derived values */
+	int dot;
+	int m;
+	int p1;
+};
+
+#define COUNT_MAX 0x10000000
+
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int count, temp;
+
+	switch (pipe) {
+	case 0:
+	case 1:
+	case 2:
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return;
+	}
+
+	/* FIXME JLIU7_PO */
+	psb_intel_wait_for_vblank(dev);
+	return;
+
+	/* Wait for for the pipe disable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_PIPE_STATE) == 0)
+			break;
+	}
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int count, temp;
+
+	switch (pipe) {
+	case 0:
+	case 1:
+	case 2:
+		break;
+	default:
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return;
+	}
+
+	/* FIXME JLIU7_PO */
+	psb_intel_wait_for_vblank(dev);
+	return;
+
+	/* Wait for for the pipe enable to take effect. */
+	for (count = 0; count < COUNT_MAX; count++) {
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_PIPE_STATE) == 1)
+			break;
+	}
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	u32 pfit_control;
+
+	pfit_control = REG_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+
+	/* 965 can place panel fitter on either pipe */
+	return (pfit_control >> 29) & 0x3;
+}
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+	struct drm_device *dev = &globle_dev;
+	int dspcntr_reg = DSPACNTR;
+	u32 dspcntr;
+
+	dspcntr = REG_READ(dspcntr_reg);
+
+	if (enable) {
+		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+		dspcntr |= DISPPLANE_32BPP;
+	} else {
+		dspcntr &= ~DISPPLANE_32BPP;
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+	}
+
+	REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+static int check_fb(struct drm_framebuffer *fb)
+{
+	if (!fb)
+		return 0;
+
+	switch (fb->bits_per_pixel) {
+	case 8:
+	case 16:
+	case 24:
+	case 32:
+		return 0;
+	default:
+		DRM_ERROR("Unknown color depth\n");
+		return -EINVAL;
+	}
+}
+
+static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+				struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	unsigned long start, offset;
+	u32 dspcntr;
+	int ret;
+
+	memcpy(&globle_dev, dev, sizeof(struct drm_device));
+
+	dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		dev_dbg(dev->dev, "No FB bound\n");
+		return 0;
+	}
+
+	ret = check_fb(crtc->fb);
+	if (ret)
+		return ret;
+
+	if (pipe > 2) {
+		DRM_ERROR("Illegal Pipe Number.\n");
+		return -EINVAL;
+	}
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	start = psbfb->gtt->offset;
+	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+	dspcntr = REG_READ(map->cntr);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	}
+	REG_WRITE(map->cntr, dspcntr);
+
+	dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
+						start, offset, x, y);
+	REG_WRITE(map->linoff, offset);
+	REG_READ(map->linoff);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
+
+	gma_power_end(dev);
+
+	return 0;
+}
+
+/*
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 temp;
+
+	dev_dbg(dev->dev, "pipe = %d\n", pipe);
+
+
+	if (pipe != 1)
+		mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
+				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+	/* Disable display plane */
+	temp = REG_READ(map->cntr);
+	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+		REG_WRITE(map->cntr,
+			  temp & ~DISPLAY_PLANE_ENABLE);
+		/* Flush the plane changes */
+		REG_WRITE(map->base, REG_READ(map->base));
+		REG_READ(map->base);
+	}
+
+	/* FIXME_JLIU7 MDFLD_PO revisit */
+
+	/* Next, disable display pipes */
+	temp = REG_READ(map->conf);
+	if ((temp & PIPEACONF_ENABLE) != 0) {
+		temp &= ~PIPEACONF_ENABLE;
+		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+		REG_WRITE(map->conf, temp);
+		REG_READ(map->conf);
+
+		/* Wait for for the pipe disable to take effect. */
+		mdfldWaitForPipeDisable(dev, pipe);
+	}
+
+	temp = REG_READ(map->dpll);
+	if (temp & DPLL_VCO_ENABLE) {
+		if ((pipe != 1 &&
+			!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
+				& PIPEACONF_ENABLE)) || pipe == 1) {
+			temp &= ~(DPLL_VCO_ENABLE);
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to turn off. */
+			/* FIXME_MDFLD PO may need more delay */
+			udelay(500);
+
+			if (!(temp & MDFLD_PWR_GATE_EN)) {
+				/* gating power of DPLL */
+				REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(5000);
+			}
+		}
+	}
+
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 pipeconf = dev_priv->pipeconf[pipe];
+	u32 temp;
+	int timeout = 0;
+
+	dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
+
+	/* Note: Old code uses pipe a stat for pipe b but that appears
+	   to be a bug */
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable the DPLL */
+		temp = REG_READ(map->dpll);
+
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			/* When ungating power of DPLL, needs to wait 0.5us
+			   before enable the VCO */
+			if (temp & MDFLD_PWR_GATE_EN) {
+				temp &= ~MDFLD_PWR_GATE_EN;
+				REG_WRITE(map->dpll, temp);
+				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+				udelay(500);
+			}
+
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+
+			/**
+			 * wait for DSI PLL to lock
+			 * NOTE: only need to poll status of pipe 0 and pipe 1,
+			 * since both MIPI pipes share the same PLL.
+			 */
+			while ((pipe != 2) && (timeout < 20000) &&
+			  !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+				udelay(150);
+				timeout++;
+			}
+		}
+
+		/* Enable the plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(map->cntr,
+				temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+		}
+
+		/* Enable the pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(map->conf, pipeconf);
+
+			/* Wait for for the pipe enable to take effect. */
+			mdfldWaitForPipeEnable(dev, pipe);
+		}
+
+		/*workaround for sighting 3741701 Random X blank display*/
+		/*perform w/a in video mode only on pipe A or C*/
+		if (pipe == 0 || pipe == 2) {
+			REG_WRITE(map->status, REG_READ(map->status));
+			msleep(100);
+			if (PIPE_VBLANK_STATUS & REG_READ(map->status))
+				dev_dbg(dev->dev, "OK");
+			else {
+				dev_dbg(dev->dev, "STUCK!!!!");
+				/*shutdown controller*/
+				temp = REG_READ(map->cntr);
+				REG_WRITE(map->cntr,
+						temp & ~DISPLAY_PLANE_ENABLE);
+				REG_WRITE(map->base, REG_READ(map->base));
+				/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
+				REG_WRITE(0xb048, 1);
+				msleep(100);
+				temp = REG_READ(map->conf);
+				temp &= ~PIPEACONF_ENABLE;
+				REG_WRITE(map->conf, temp);
+				msleep(100); /*wait for pipe disable*/
+				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
+				msleep(100);
+				REG_WRITE(0xb004, REG_READ(0xb004));
+				/* try to bring the controller back up again*/
+				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
+				temp = REG_READ(map->cntr);
+				REG_WRITE(map->cntr,
+						temp | DISPLAY_PLANE_ENABLE);
+				REG_WRITE(map->base, REG_READ(map->base));
+				/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
+				REG_WRITE(0xb048, 2);
+				msleep(100);
+				temp = REG_READ(map->conf);
+				temp |= PIPEACONF_ENABLE;
+				REG_WRITE(map->conf, temp);
+			}
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		   if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+		if (pipe != 1)
+			mdfld_dsi_gen_fifo_ready(dev,
+				MIPI_GEN_FIFO_STAT_REG(pipe),
+				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Disable display plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(map->cntr,
+				  temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
+		}
+
+		/* Next, disable display pipes */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			temp &= ~PIPEACONF_ENABLE;
+			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+			REG_WRITE(map->conf, temp);
+			REG_READ(map->conf);
+
+			/* Wait for for the pipe disable to take effect. */
+			mdfldWaitForPipeDisable(dev, pipe);
+		}
+
+		temp = REG_READ(map->dpll);
+		if (temp & DPLL_VCO_ENABLE) {
+			if ((pipe != 1 && !((REG_READ(PIPEACONF)
+				| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+					|| pipe == 1) {
+				temp &= ~(DPLL_VCO_ENABLE);
+				REG_WRITE(map->dpll, temp);
+				REG_READ(map->dpll);
+				/* Wait for the clocks to turn off. */
+				/* FIXME_MDFLD PO may need more delay */
+				udelay(500);
+			}
+		}
+		break;
+	}
+	gma_power_end(dev);
+}
+
+
+#define MDFLD_LIMT_DPLL_19	    0
+#define MDFLD_LIMT_DPLL_25	    1
+#define MDFLD_LIMT_DPLL_83	    2
+#define MDFLD_LIMT_DPLL_100	    3
+#define MDFLD_LIMT_DSIPLL_19	    4
+#define MDFLD_LIMT_DSIPLL_25	    5
+#define MDFLD_LIMT_DSIPLL_83	    6
+#define MDFLD_LIMT_DSIPLL_100	    7
+
+#define MDFLD_DOT_MIN		  19750
+#define MDFLD_DOT_MAX		  120000
+#define MDFLD_DPLL_M_MIN_19	    113
+#define MDFLD_DPLL_M_MAX_19	    155
+#define MDFLD_DPLL_P1_MIN_19	    2
+#define MDFLD_DPLL_P1_MAX_19	    10
+#define MDFLD_DPLL_M_MIN_25	    101
+#define MDFLD_DPLL_M_MAX_25	    130
+#define MDFLD_DPLL_P1_MIN_25	    2
+#define MDFLD_DPLL_P1_MAX_25	    10
+#define MDFLD_DPLL_M_MIN_83	    64
+#define MDFLD_DPLL_M_MAX_83	    64
+#define MDFLD_DPLL_P1_MIN_83	    2
+#define MDFLD_DPLL_P1_MAX_83	    2
+#define MDFLD_DPLL_M_MIN_100	    64
+#define MDFLD_DPLL_M_MAX_100	    64
+#define MDFLD_DPLL_P1_MIN_100	    2
+#define MDFLD_DPLL_P1_MAX_100	    2
+#define MDFLD_DSIPLL_M_MIN_19	    131
+#define MDFLD_DSIPLL_M_MAX_19	    175
+#define MDFLD_DSIPLL_P1_MIN_19	    3
+#define MDFLD_DSIPLL_P1_MAX_19	    8
+#define MDFLD_DSIPLL_M_MIN_25	    97
+#define MDFLD_DSIPLL_M_MAX_25	    140
+#define MDFLD_DSIPLL_P1_MIN_25	    3
+#define MDFLD_DSIPLL_P1_MAX_25	    9
+#define MDFLD_DSIPLL_M_MIN_83	    33
+#define MDFLD_DSIPLL_M_MAX_83	    92
+#define MDFLD_DSIPLL_P1_MIN_83	    2
+#define MDFLD_DSIPLL_P1_MAX_83	    3
+#define MDFLD_DSIPLL_M_MIN_100	    97
+#define MDFLD_DSIPLL_M_MAX_100	    140
+#define MDFLD_DSIPLL_P1_MIN_100	    3
+#define MDFLD_DSIPLL_P1_MAX_100	    9
+
+static const struct mrst_limit_t mdfld_limits[] = {
+	{			/* MDFLD_LIMT_DPLL_19 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
+	 },
+	{			/* MDFLD_LIMT_DPLL_25 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
+	 },
+	{			/* MDFLD_LIMT_DPLL_83 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
+	 },
+	{			/* MDFLD_LIMT_DPLL_100 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
+	 .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_19 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_25 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_83 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
+	 },
+	{			/* MDFLD_LIMT_DSIPLL_100 */
+	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+	 .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
+	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
+	 },
+};
+
+#define MDFLD_M_MIN	    21
+#define MDFLD_M_MAX	    180
+static const u32 mdfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
+	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+};
+
+static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
+{
+	const struct mrst_limit_t *limit = NULL;
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+		else if (ksel == KSEL_BYPASS_25)
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+		else if ((ksel == KSEL_BYPASS_83_100) &&
+				(dev_priv->core_freq == 166))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+		else if ((ksel == KSEL_BYPASS_83_100) &&
+			 (dev_priv->core_freq == 100 ||
+				dev_priv->core_freq == 200))
+			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+		else if (ksel == KSEL_BYPASS_25)
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+		else if ((ksel == KSEL_BYPASS_83_100) &&
+				(dev_priv->core_freq == 166))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+		else if ((ksel == KSEL_BYPASS_83_100) &&
+				 (dev_priv->core_freq == 100 ||
+				 dev_priv->core_freq == 200))
+			limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+	} else {
+		limit = NULL;
+		dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
+	}
+
+	return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  Divisor values are the actual divisors for
+ */
+static bool
+mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+		struct mrst_clock_t *best_clock)
+{
+	struct mrst_clock_t clock;
+	const struct mrst_limit_t *limit = mdfld_limit(crtc);
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			mdfld_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	return err != target;
+}
+
+static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode,
+			      int x, int y,
+			      struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int refclk = 0;
+	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
+								clk_tmp = 0;
+	struct mrst_clock_t clock;
+	bool ok;
+	u32 dpll = 0, fp = 0;
+	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct psb_intel_encoder *psb_intel_encoder = NULL;
+	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int timeout = 0;
+	int ret;
+
+	dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
+
+#if 0
+	if (pipe == 1) {
+		if (!gma_power_begin(dev, true))
+			return 0;
+		android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
+			x, y, old_fb);
+		goto mrst_crtc_mode_set_exit;
+	}
+#endif
+
+	ret = check_fb(crtc->fb);
+	if (ret)
+		return ret;
+
+	dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
+		 adjusted_mode->hdisplay);
+	dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
+		 adjusted_mode->vdisplay);
+	dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
+		 adjusted_mode->hsync_start);
+	dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
+		 adjusted_mode->hsync_end);
+	dev_dbg(dev->dev, "adjusted_htotal = %d\n",
+		 adjusted_mode->htotal);
+	dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
+		 adjusted_mode->vsync_start);
+	dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
+		 adjusted_mode->vsync_end);
+	dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
+		 adjusted_mode->vtotal);
+	dev_dbg(dev->dev, "adjusted_clock = %d\n",
+		 adjusted_mode->clock);
+	dev_dbg(dev->dev, "hdisplay = %d\n",
+		 mode->hdisplay);
+	dev_dbg(dev->dev, "vdisplay = %d\n",
+		 mode->vdisplay);
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	memcpy(&psb_intel_crtc->saved_mode, mode,
+					sizeof(struct drm_display_mode));
+	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+					sizeof(struct drm_display_mode));
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (!connector)
+			continue;
+
+		encoder = connector->encoder;
+
+		if (!encoder)
+			continue;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_MIPI:
+			is_mipi = true;
+			break;
+		case INTEL_OUTPUT_MIPI2:
+			is_mipi2 = true;
+			break;
+		case INTEL_OUTPUT_HDMI:
+			is_hdmi = true;
+			break;
+		}
+	}
+
+	/* Disable the VGA plane that we never use */
+	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (psb_intel_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	if (pipe == 1) {
+		/* FIXME: To make HDMI display with 864x480 (TPO), 480x864
+		 * (PYR) or 480x854 (TMD), set the sprite width/height and
+		 * souce image size registers with the adjusted mode for
+		 * pipe B.
+		 */
+
+		/*
+		 * The defined sprite rectangle must always be completely
+		 * contained within the displayable area of the screen image
+		 * (frame buffer).
+		 */
+		REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+				| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
+		/* Set the CRTC with encoder mode. */
+		REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
+				 | (mode->crtc_vdisplay - 1));
+	} else {
+		REG_WRITE(map->size,
+				((mode->crtc_vdisplay - 1) << 16) |
+						(mode->crtc_hdisplay - 1));
+		REG_WRITE(map->src,
+				((mode->crtc_hdisplay - 1) << 16) |
+						(mode->crtc_vdisplay - 1));
+	}
+
+	REG_WRITE(map->pos, 0);
+
+	if (psb_intel_encoder)
+		drm_object_property_get_value(&connector->base,
+			dev->mode_config.scaling_mode_property, &scalingType);
+
+	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+		/* Medfield doesn't have register support for centering so we
+		 * need to mess with the h/vblank and h/vsync start and ends
+		 * to get centering
+		 */
+		int offsetX = 0, offsetY = 0;
+
+		offsetX = (adjusted_mode->crtc_hdisplay -
+					mode->crtc_hdisplay) / 2;
+		offsetY = (adjusted_mode->crtc_vdisplay -
+					mode->crtc_vdisplay) / 2;
+
+		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
+			((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
+			((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
+								offsetX - 1) |
+			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
+								offsetX - 1) |
+			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
+								offsetY - 1) |
+			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
+								offsetY - 1) |
+			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+	} else {
+		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+			((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+			((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+			((adjusted_mode->crtc_hblank_end - 1) << 16));
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+			((adjusted_mode->crtc_hsync_end - 1) << 16));
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+			((adjusted_mode->crtc_vblank_end - 1) << 16));
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+			((adjusted_mode->crtc_vsync_end - 1) << 16));
+	}
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs =
+		    crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* setup pipeconf */
+	dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+
+	/* Set up the display plane register */
+	dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+	dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+	dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
+
+	if (is_mipi2)
+		goto mrst_crtc_mode_set_exit;
+	clk = adjusted_mode->clock;
+
+	if (is_hdmi) {
+		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
+			refclk = 19200;
+
+			if (is_mipi || is_mipi2)
+				clk_n = 1, clk_p2 = 8;
+			else if (is_hdmi)
+				clk_n = 1, clk_p2 = 10;
+		} else if (ksel == KSEL_BYPASS_25) {
+			refclk = 25000;
+
+			if (is_mipi || is_mipi2)
+				clk_n = 1, clk_p2 = 8;
+			else if (is_hdmi)
+				clk_n = 1, clk_p2 = 10;
+		} else if ((ksel == KSEL_BYPASS_83_100) &&
+					dev_priv->core_freq == 166) {
+			refclk = 83000;
+
+			if (is_mipi || is_mipi2)
+				clk_n = 4, clk_p2 = 8;
+			else if (is_hdmi)
+				clk_n = 4, clk_p2 = 10;
+		} else if ((ksel == KSEL_BYPASS_83_100) &&
+					(dev_priv->core_freq == 100 ||
+					dev_priv->core_freq == 200)) {
+			refclk = 100000;
+			if (is_mipi || is_mipi2)
+				clk_n = 4, clk_p2 = 8;
+			else if (is_hdmi)
+				clk_n = 4, clk_p2 = 10;
+		}
+
+		if (is_mipi)
+			clk_byte = dev_priv->bpp / 8;
+		else if (is_mipi2)
+			clk_byte = dev_priv->bpp2 / 8;
+
+		clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+		dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
+					clk, clk_n, clk_p2);
+		dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
+					adjusted_mode->clock, clk_tmp);
+
+		ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+
+		if (!ok) {
+			DRM_ERROR
+			    ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
+		} else {
+			m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+
+			dev_dbg(dev->dev, "dot clock = %d,"
+				 "m = %d, p1 = %d, m_conv = %d.\n",
+					clock.dot, clock.m,
+					clock.p1, m_conv);
+		}
+
+		dpll = REG_READ(map->dpll);
+
+		if (dpll & DPLL_VCO_ENABLE) {
+			dpll &= ~DPLL_VCO_ENABLE;
+			REG_WRITE(map->dpll, dpll);
+			REG_READ(map->dpll);
+
+			/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+
+			/* reset M1, N1 & P1 */
+			REG_WRITE(map->fp0, 0);
+			dpll &= ~MDFLD_P1_MASK;
+			REG_WRITE(map->dpll, dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+		}
+
+		/* When ungating power of DPLL, needs to wait 0.5us before
+		 * enable the VCO */
+		if (dpll & MDFLD_PWR_GATE_EN) {
+			dpll &= ~MDFLD_PWR_GATE_EN;
+			REG_WRITE(map->dpll, dpll);
+			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+			udelay(500);
+		}
+		dpll = 0;
+
+#if 0 /* FIXME revisit later */
+		if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
+						ksel == KSEL_BYPASS_25)
+			dpll &= ~MDFLD_INPUT_REF_SEL;
+		else if (ksel == KSEL_BYPASS_83_100)
+			dpll |= MDFLD_INPUT_REF_SEL;
+#endif /* FIXME revisit later */
+
+		if (is_hdmi)
+			dpll |= MDFLD_VCO_SEL;
+
+		fp = (clk_n / 2) << 16;
+		fp |= m_conv;
+
+		/* compute bitmask from p1 value */
+		dpll |= (1 << (clock.p1 - 2)) << 17;
+
+#if 0 /* 1080p30 & 720p */
+		dpll = 0x00050000;
+		fp = 0x000001be;
+#endif
+#if 0 /* 480p */
+		dpll = 0x02010000;
+		fp = 0x000000d2;
+#endif
+	} else {
+#if 0 /*DBI_TPO_480x864*/
+		dpll = 0x00020000;
+		fp = 0x00000156;
+#endif /* DBI_TPO_480x864 */ /* get from spec. */
+
+		dpll = 0x00800000;
+		fp = 0x000000c1;
+	}
+
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
+	/* FIXME_MDFLD PO - change 500 to 1 after PO */
+	udelay(500);
+
+	dpll |= DPLL_VCO_ENABLE;
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
+
+	/* wait for DSI PLL to lock */
+	while (timeout < 20000 &&
+			!(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
+		udelay(150);
+		timeout++;
+	}
+
+	if (is_mipi)
+		goto mrst_crtc_mode_set_exit;
+
+	dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
+
+	REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+	REG_READ(map->conf);
+
+	/* Wait for for the pipe enable to take effect. */
+	REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
+	psb_intel_wait_for_vblank(dev);
+
+mrst_crtc_mode_set_exit:
+
+	gma_power_end(dev);
+
+	return 0;
+}
+
+const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
+	.dpms = mdfld_crtc_dpms,
+	.mode_fixup = psb_intel_crtc_mode_fixup,
+	.mode_set = mdfld_crtc_mode_set,
+	.mode_set_base = mdfld__intel_pipe_set_base,
+	.prepare = psb_intel_crtc_prepare,
+	.commit = psb_intel_crtc_commit,
+};
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_output.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_output.c
new file mode 100644
index 0000000..c95966b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_output.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include "mdfld_output.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+
+#include "tc35876x-dsi-lvds.h"
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	return dev_priv->mdfld_panel_id;
+}
+
+static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
+								int p_type)
+{
+	switch (p_type) {
+	case TPO_VID:
+		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
+		break;
+	case TC35876X:
+		tc35876x_init(dev);
+		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
+		break;
+	case TMD_VID:
+		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
+		break;
+	case HDMI:
+/*		if (dev_priv->mdfld_hdmi_present)
+			mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
+		break;
+	}
+}
+
+
+int mdfld_output_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/* FIXME: hardcoded for now */
+	dev_priv->mdfld_panel_id = TC35876X;
+	/* MIPI panel 1 */
+	mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
+	/* HDMI panel */
+	mdfld_init_panel(dev, 1, HDMI);
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_output.h b/linux-imx/drivers/gpu/drm/gma500/mdfld_output.h
new file mode 100644
index 0000000..ab2b27c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_output.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c)  2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef MDFLD_OUTPUT_H
+#define MDFLD_OUTPUT_H
+
+#include "psb_drv.h"
+
+#define TPO_PANEL_WIDTH		84
+#define TPO_PANEL_HEIGHT	46
+#define TMD_PANEL_WIDTH		39
+#define TMD_PANEL_HEIGHT	71
+
+struct mdfld_dsi_config;
+
+enum panel_type {
+	TPO_VID,
+	TMD_VID,
+	HDMI,
+	TC35876X,
+};
+
+struct panel_info {
+	u32 width_mm;
+	u32 height_mm;
+	/* Other info */
+};
+
+struct panel_funcs {
+	const struct drm_encoder_funcs *encoder_funcs;
+	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
+	struct drm_display_mode * (*get_config_mode)(struct drm_device *);
+	int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
+	int (*reset)(int pipe);
+	void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
+};
+
+int mdfld_output_init(struct drm_device *dev);
+
+struct backlight_device *mdfld_get_backlight_device(void);
+int mdfld_set_brightness(struct backlight_device *bd);
+
+int mdfld_get_panel_type(struct drm_device *dev, int pipe);
+
+extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
+
+extern const struct panel_funcs mdfld_tmd_vid_funcs;
+extern const struct panel_funcs mdfld_tpo_vid_funcs;
+
+extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
new file mode 100644
index 0000000..dc0c6c3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ * Gideon Eaton <eaton.
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
+{
+	struct drm_display_mode *mode;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+	bool use_gct = false; /*Disable GCT for now*/
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	if (use_gct) {
+		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+		mode->hsync_start = mode->hdisplay + \
+				((ti->hsync_offset_hi << 8) | \
+				ti->hsync_offset_lo);
+		mode->hsync_end = mode->hsync_start + \
+				((ti->hsync_pulse_width_hi << 8) | \
+				ti->hsync_pulse_width_lo);
+		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+								ti->hblank_lo);
+		mode->vsync_start = \
+			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+						ti->vsync_offset_lo);
+		mode->vsync_end = \
+			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+						ti->vsync_pulse_width_lo);
+		mode->vtotal = mode->vdisplay + \
+				((ti->vblank_hi << 8) | ti->vblank_lo);
+		mode->clock = ti->pixel_clock * 10;
+
+		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+	} else {
+		mode->hdisplay = 480;
+		mode->vdisplay = 854;
+		mode->hsync_start = 487;
+		mode->hsync_end = 490;
+		mode->htotal = 499;
+		mode->vsync_start = 861;
+		mode->vsync_end = 865;
+		mode->vtotal = 873;
+		mode->clock = 33264;
+	}
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static int tmd_vid_get_panel_info(struct drm_device *dev,
+				int pipe,
+				struct panel_info *pi)
+{
+	if (!dev || !pi)
+		return -EINVAL;
+
+	pi->width_mm = TMD_PANEL_WIDTH;
+	pi->height_mm = TMD_PANEL_HEIGHT;
+
+	return 0;
+}
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_init_TMD_MIPI
+ *
+ * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
+ *               restore_display_registers.  since this function does not
+ *               acquire the mutex, it is important that the calling function
+ *               does!
+\* ************************************************************************* */
+
+/* FIXME: make the below data u8 instead of u32; note byte order! */
+static u32 tmd_cmd_mcap_off[] = {0x000000b2};
+static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
+static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
+static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
+static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
+static u32 tmd_cmd_set_mode[] = {0x000000b3};
+static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
+static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
+static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
+static u32 tmd_cmd_set_video_mode[] = {0x00000153};
+/*no auto_bl,need add in furture*/
+static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
+static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
+
+static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+				      int pipe)
+{
+	struct mdfld_dsi_pkg_sender *sender
+			= mdfld_dsi_get_pkg_sender(dsi_config);
+
+	DRM_INFO("Enter mdfld init TMD MIPI display.\n");
+
+	if (!sender) {
+		DRM_ERROR("Cannot get sender\n");
+		return;
+	}
+
+	if (dsi_config->dvr_ic_inited)
+		return;
+
+	msleep(3);
+
+	/* FIXME: make the below data u8 instead of u32; note byte order! */
+
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
+				sizeof(tmd_cmd_mcap_off), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
+				sizeof(tmd_cmd_enable_lane_switch), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
+				sizeof(tmd_cmd_set_lane_num), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
+				sizeof(tmd_cmd_pushing_clock0), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
+				sizeof(tmd_cmd_pushing_clock1), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
+				sizeof(tmd_cmd_set_mode), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
+				sizeof(tmd_cmd_set_sync_pulse_mode), false);
+	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
+				sizeof(tmd_cmd_set_column), false);
+	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
+				sizeof(tmd_cmd_set_page), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
+				sizeof(tmd_cmd_set_video_mode), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
+				sizeof(tmd_cmd_enable_backlight), false);
+	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
+				sizeof(tmd_cmd_set_backlight_dimming), false);
+
+	dsi_config->dvr_ic_inited = 1;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+				mdfld_tpo_dpi_encoder_helper_funcs = {
+	.dpms = mdfld_dsi_dpi_dpms,
+	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+	.prepare = mdfld_dsi_dpi_prepare,
+	.mode_set = mdfld_dsi_dpi_mode_set,
+	.commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tmd_vid_funcs = {
+	.encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+	.get_config_mode = &tmd_vid_get_config_mode,
+	.get_panel_info = tmd_vid_get_panel_info,
+	.reset = mdfld_dsi_panel_reset,
+	.drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/linux-imx/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
new file mode 100644
index 0000000..d8d4170
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+
+static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
+{
+	struct drm_display_mode *mode;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+	bool use_gct = false;
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	if (use_gct) {
+		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+		mode->hsync_start = mode->hdisplay +
+				((ti->hsync_offset_hi << 8) |
+				ti->hsync_offset_lo);
+		mode->hsync_end = mode->hsync_start +
+				((ti->hsync_pulse_width_hi << 8) |
+				ti->hsync_pulse_width_lo);
+		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+								ti->hblank_lo);
+		mode->vsync_start =
+			mode->vdisplay + ((ti->vsync_offset_hi << 8) |
+						ti->vsync_offset_lo);
+		mode->vsync_end =
+			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) |
+						ti->vsync_pulse_width_lo);
+		mode->vtotal = mode->vdisplay +
+				((ti->vblank_hi << 8) | ti->vblank_lo);
+		mode->clock = ti->pixel_clock * 10;
+
+		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+	} else {
+		mode->hdisplay = 864;
+		mode->vdisplay = 480;
+		mode->hsync_start = 873;
+		mode->hsync_end = 876;
+		mode->htotal = 887;
+		mode->vsync_start = 487;
+		mode->vsync_end = 490;
+		mode->vtotal = 499;
+		mode->clock = 33264;
+	}
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+static int tpo_vid_get_panel_info(struct drm_device *dev,
+				int pipe,
+				struct panel_info *pi)
+{
+	if (!dev || !pi)
+		return -EINVAL;
+
+	pi->width_mm = TPO_PANEL_WIDTH;
+	pi->height_mm = TPO_PANEL_HEIGHT;
+
+	return 0;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+				mdfld_tpo_dpi_encoder_helper_funcs = {
+	.dpms = mdfld_dsi_dpi_dpms,
+	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+	.prepare = mdfld_dsi_dpi_prepare,
+	.mode_set = mdfld_dsi_dpi_mode_set,
+	.commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tpo_vid_funcs = {
+	.encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
+	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
+	.get_config_mode = &tpo_vid_get_config_mode,
+	.get_panel_info = tpo_vid_get_panel_info,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/mid_bios.c b/linux-imx/drivers/gpu/drm/gma500/mid_bios.c
new file mode 100644
index 0000000..a97e38e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mid_bios.c
@@ -0,0 +1,338 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "mid_bios.h"
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	uint32_t fuse_value = 0;
+	uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE  (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK  0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+	if (pci_root == NULL) {
+		WARN_ON(1);
+		return;
+	}
+
+
+	pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+	/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+	if (IS_MRST(dev))
+		dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+	DRM_INFO("internal display is %s\n",
+		 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+	 /* Prevent runtime suspend at start*/
+	 if (dev_priv->iLVDS_enable) {
+		dev_priv->is_lvds_on = true;
+		dev_priv->is_mipi_on = false;
+	} else {
+		dev_priv->is_mipi_on = true;
+		dev_priv->is_lvds_on = false;
+	}
+
+	dev_priv->video_device_fuse = fuse_value;
+
+	pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+	dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+	dev_priv->fuse_reg_value = fuse_value;
+
+	switch (fuse_value_tmp) {
+	case FB_SKU_100:
+		dev_priv->core_freq = 200;
+		break;
+	case FB_SKU_100L:
+		dev_priv->core_freq = 100;
+		break;
+	case FB_SKU_83:
+		dev_priv->core_freq = 166;
+		break;
+	default:
+		dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+								fuse_value_tmp);
+		dev_priv->core_freq = 0;
+	}
+	dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+	pci_dev_put(pci_root);
+}
+
+/*
+ *	Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+	uint32_t platform_rev_id = 0;
+	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+	if (pci_gfx_root == NULL) {
+		WARN_ON(1);
+		return;
+	}
+	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+	pci_dev_put(pci_gfx_root);
+	dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+					dev_priv->platform_rev_id);
+}
+
+struct mid_vbt_header {
+	u32 signature;
+	u8 revision;
+} __packed;
+
+/* The same for r0 and r1 */
+struct vbt_r0 {
+	struct mid_vbt_header vbt_header;
+	u8 size;
+	u8 checksum;
+} __packed;
+
+struct vbt_r10 {
+	struct mid_vbt_header vbt_header;
+	u8 checksum;
+	u16 size;
+	u8 panel_count;
+	u8 primary_panel_idx;
+	u8 secondary_panel_idx;
+	u8 __reserved[5];
+} __packed;
+
+static int read_vbt_r0(u32 addr, struct vbt_r0 *vbt)
+{
+	void __iomem *vbt_virtual;
+
+	vbt_virtual = ioremap(addr, sizeof(*vbt));
+	if (vbt_virtual == NULL)
+		return -1;
+
+	memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+	iounmap(vbt_virtual);
+
+	return 0;
+}
+
+static int read_vbt_r10(u32 addr, struct vbt_r10 *vbt)
+{
+	void __iomem *vbt_virtual;
+
+	vbt_virtual = ioremap(addr, sizeof(*vbt));
+	if (!vbt_virtual)
+		return -1;
+
+	memcpy_fromio(vbt, vbt_virtual, sizeof(*vbt));
+	iounmap(vbt_virtual);
+
+	return 0;
+}
+
+static int mid_get_vbt_data_r0(struct drm_psb_private *dev_priv, u32 addr)
+{
+	struct vbt_r0 vbt;
+	void __iomem *gct_virtual;
+	struct gct_r0 gct;
+	u8 bpi;
+
+	if (read_vbt_r0(addr, &vbt))
+		return -1;
+
+	gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+	if (!gct_virtual)
+		return -1;
+	memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+	iounmap(gct_virtual);
+
+	bpi = gct.PD.BootPanelIndex;
+	dev_priv->gct_data.bpi = bpi;
+	dev_priv->gct_data.pt = gct.PD.PanelType;
+	dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+	dev_priv->gct_data.Panel_Port_Control =
+		gct.panel[bpi].Panel_Port_Control;
+	dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+		gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+	return 0;
+}
+
+static int mid_get_vbt_data_r1(struct drm_psb_private *dev_priv, u32 addr)
+{
+	struct vbt_r0 vbt;
+	void __iomem *gct_virtual;
+	struct gct_r1 gct;
+	u8 bpi;
+
+	if (read_vbt_r0(addr, &vbt))
+		return -1;
+
+	gct_virtual = ioremap(addr + sizeof(vbt), vbt.size - sizeof(vbt));
+	if (!gct_virtual)
+		return -1;
+	memcpy_fromio(&gct, gct_virtual, sizeof(gct));
+	iounmap(gct_virtual);
+
+	bpi = gct.PD.BootPanelIndex;
+	dev_priv->gct_data.bpi = bpi;
+	dev_priv->gct_data.pt = gct.PD.PanelType;
+	dev_priv->gct_data.DTD = gct.panel[bpi].DTD;
+	dev_priv->gct_data.Panel_Port_Control =
+		gct.panel[bpi].Panel_Port_Control;
+	dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+		gct.panel[bpi].Panel_MIPI_Display_Descriptor;
+
+	return 0;
+}
+
+static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
+{
+	struct vbt_r10 vbt;
+	void __iomem *gct_virtual;
+	struct gct_r10 *gct;
+	struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+	struct gct_r10_timing_info *ti;
+	int ret = -1;
+
+	if (read_vbt_r10(addr, &vbt))
+		return -1;
+
+	gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
+	if (!gct)
+		return -1;
+
+	gct_virtual = ioremap(addr + sizeof(vbt),
+			sizeof(*gct) * vbt.panel_count);
+	if (!gct_virtual)
+		goto out;
+	memcpy_fromio(gct, gct_virtual, sizeof(*gct));
+	iounmap(gct_virtual);
+
+	dev_priv->gct_data.bpi = vbt.primary_panel_idx;
+	dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+		gct[vbt.primary_panel_idx].Panel_MIPI_Display_Descriptor;
+
+	ti = &gct[vbt.primary_panel_idx].DTD;
+	dp_ti->pixel_clock = ti->pixel_clock;
+	dp_ti->hactive_hi = ti->hactive_hi;
+	dp_ti->hactive_lo = ti->hactive_lo;
+	dp_ti->hblank_hi = ti->hblank_hi;
+	dp_ti->hblank_lo = ti->hblank_lo;
+	dp_ti->hsync_offset_hi = ti->hsync_offset_hi;
+	dp_ti->hsync_offset_lo = ti->hsync_offset_lo;
+	dp_ti->hsync_pulse_width_hi = ti->hsync_pulse_width_hi;
+	dp_ti->hsync_pulse_width_lo = ti->hsync_pulse_width_lo;
+	dp_ti->vactive_hi = ti->vactive_hi;
+	dp_ti->vactive_lo = ti->vactive_lo;
+	dp_ti->vblank_hi = ti->vblank_hi;
+	dp_ti->vblank_lo = ti->vblank_lo;
+	dp_ti->vsync_offset_hi = ti->vsync_offset_hi;
+	dp_ti->vsync_offset_lo = ti->vsync_offset_lo;
+	dp_ti->vsync_pulse_width_hi = ti->vsync_pulse_width_hi;
+	dp_ti->vsync_pulse_width_lo = ti->vsync_pulse_width_lo;
+
+	ret = 0;
+out:
+	kfree(gct);
+	return ret;
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	u32 addr;
+	u8 __iomem *vbt_virtual;
+	struct mid_vbt_header vbt_header;
+	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+	int ret = -1;
+
+	/* Get the address of the platform config vbt */
+	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+	pci_dev_put(pci_gfx_root);
+
+	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+	if (!addr)
+		goto out;
+
+	/* get the virtual address of the vbt */
+	vbt_virtual = ioremap(addr, sizeof(vbt_header));
+	if (!vbt_virtual)
+		goto out;
+
+	memcpy_fromio(&vbt_header, vbt_virtual, sizeof(vbt_header));
+	iounmap(vbt_virtual);
+
+	if (memcmp(&vbt_header.signature, "$GCT", 4))
+		goto out;
+
+	dev_dbg(dev->dev, "GCT revision is %02x\n", vbt_header.revision);
+
+	switch (vbt_header.revision) {
+	case 0x00:
+		ret = mid_get_vbt_data_r0(dev_priv, addr);
+		break;
+	case 0x01:
+		ret = mid_get_vbt_data_r1(dev_priv, addr);
+		break;
+	case 0x10:
+		ret = mid_get_vbt_data_r10(dev_priv, addr);
+		break;
+	default:
+		dev_err(dev->dev, "Unknown revision of GCT!\n");
+	}
+
+out:
+	if (ret)
+		dev_err(dev->dev, "Unable to read GCT!");
+	else
+		dev_priv->has_gct = true;
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	mid_get_fuse_settings(dev);
+	mid_get_vbt_data(dev_priv);
+	mid_get_pci_revID(dev_priv);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/mid_bios.h b/linux-imx/drivers/gpu/drm/gma500/mid_bios.h
new file mode 100644
index 0000000..00e7d56
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mid_bios.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/mmu.c b/linux-imx/drivers/gpu/drm/gma500/mmu.c
new file mode 100644
index 0000000..49bac41
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/mmu.c
@@ -0,0 +1,849 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+
+/*
+ * Code for the SGX MMU:
+ */
+
+/*
+ * clflush on one processor only:
+ * clflush should apparently flush the cache line on all processors in an
+ * SMP system.
+ */
+
+/*
+ * kmap atomic:
+ * The usage of the slots must be completely encapsulated within a spinlock, and
+ * no other functions that may be using the locks for other purposed may be
+ * called from within the locked region.
+ * Since the slots are per processor, this will guarantee that we are the only
+ * user.
+ */
+
+/*
+ * TODO: Inserting ptes from an interrupt handler:
+ * This may be desirable for some SGX functionality where the GPU can fault in
+ * needed pages. For that, we need to make an atomic insert_pages function, that
+ * may fail.
+ * If it fails, the caller need to insert the page using a workqueue function,
+ * but on average it should be fast.
+ */
+
+struct psb_mmu_driver {
+	/* protects driver- and pd structures. Always take in read mode
+	 * before taking the page table spinlock.
+	 */
+	struct rw_semaphore sem;
+
+	/* protects page tables, directory tables and pt tables.
+	 * and pt structures.
+	 */
+	spinlock_t lock;
+
+	atomic_t needs_tlbflush;
+
+	uint8_t __iomem *register_map;
+	struct psb_mmu_pd *default_pd;
+	/*uint32_t bif_ctrl;*/
+	int has_clflush;
+	int clflush_add;
+	unsigned long clflush_mask;
+
+	struct drm_psb_private *dev_priv;
+};
+
+struct psb_mmu_pd;
+
+struct psb_mmu_pt {
+	struct psb_mmu_pd *pd;
+	uint32_t index;
+	uint32_t count;
+	struct page *p;
+	uint32_t *v;
+};
+
+struct psb_mmu_pd {
+	struct psb_mmu_driver *driver;
+	int hw_context;
+	struct psb_mmu_pt **tables;
+	struct page *p;
+	struct page *dummy_pt;
+	struct page *dummy_page;
+	uint32_t pd_mask;
+	uint32_t invalid_pde;
+	uint32_t invalid_pte;
+};
+
+static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+{
+	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+}
+
+static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+{
+	return offset >> PSB_PDE_SHIFT;
+}
+
+static inline void psb_clflush(void *addr)
+{
+	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+}
+
+static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+				   void *addr)
+{
+	if (!driver->has_clflush)
+		return;
+
+	mb();
+	psb_clflush(addr);
+	mb();
+}
+
+static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+{
+	uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+	int i;
+	uint8_t *clf;
+
+	clf = kmap_atomic(page);
+	mb();
+	for (i = 0; i < clflush_count; ++i) {
+		psb_clflush(clf);
+		clf += clflush_add;
+	}
+	mb();
+	kunmap_atomic(clf);
+}
+
+static void psb_pages_clflush(struct psb_mmu_driver *driver,
+				struct page *page[], unsigned long num_pages)
+{
+	int i;
+
+	if (!driver->has_clflush)
+		return ;
+
+	for (i = 0; i < num_pages; i++)
+		psb_page_clflush(driver, *page++);
+}
+
+static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+				    int force)
+{
+	atomic_set(&driver->needs_tlbflush, 0);
+}
+
+static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+{
+	down_write(&driver->sem);
+	psb_mmu_flush_pd_locked(driver, force);
+	up_write(&driver->sem);
+}
+
+void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+{
+	if (rc_prot)
+		down_write(&driver->sem);
+	if (rc_prot)
+		up_write(&driver->sem);
+}
+
+void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+{
+	/*ttm_tt_cache_flush(&pd->p, 1);*/
+	psb_pages_clflush(pd->driver, &pd->p, 1);
+	down_write(&pd->driver->sem);
+	wmb();
+	psb_mmu_flush_pd_locked(pd->driver, 1);
+	pd->hw_context = hw_context;
+	up_write(&pd->driver->sem);
+
+}
+
+static inline unsigned long psb_pd_addr_end(unsigned long addr,
+					    unsigned long end)
+{
+
+	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+	return (addr < end) ? addr : end;
+}
+
+static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+{
+	uint32_t mask = PSB_PTE_VALID;
+
+	if (type & PSB_MMU_CACHED_MEMORY)
+		mask |= PSB_PTE_CACHED;
+	if (type & PSB_MMU_RO_MEMORY)
+		mask |= PSB_PTE_RO;
+	if (type & PSB_MMU_WO_MEMORY)
+		mask |= PSB_PTE_WO;
+
+	return (pfn << PAGE_SHIFT) | mask;
+}
+
+struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+				    int trap_pagefaults, int invalid_type)
+{
+	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+	uint32_t *v;
+	int i;
+
+	if (!pd)
+		return NULL;
+
+	pd->p = alloc_page(GFP_DMA32);
+	if (!pd->p)
+		goto out_err1;
+	pd->dummy_pt = alloc_page(GFP_DMA32);
+	if (!pd->dummy_pt)
+		goto out_err2;
+	pd->dummy_page = alloc_page(GFP_DMA32);
+	if (!pd->dummy_page)
+		goto out_err3;
+
+	if (!trap_pagefaults) {
+		pd->invalid_pde =
+		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+				     invalid_type);
+		pd->invalid_pte =
+		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+				     invalid_type);
+	} else {
+		pd->invalid_pde = 0;
+		pd->invalid_pte = 0;
+	}
+
+	v = kmap(pd->dummy_pt);
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		v[i] = pd->invalid_pte;
+
+	kunmap(pd->dummy_pt);
+
+	v = kmap(pd->p);
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		v[i] = pd->invalid_pde;
+
+	kunmap(pd->p);
+
+	clear_page(kmap(pd->dummy_page));
+	kunmap(pd->dummy_page);
+
+	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+	if (!pd->tables)
+		goto out_err4;
+
+	pd->hw_context = -1;
+	pd->pd_mask = PSB_PTE_VALID;
+	pd->driver = driver;
+
+	return pd;
+
+out_err4:
+	__free_page(pd->dummy_page);
+out_err3:
+	__free_page(pd->dummy_pt);
+out_err2:
+	__free_page(pd->p);
+out_err1:
+	kfree(pd);
+	return NULL;
+}
+
+static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+{
+	__free_page(pt->p);
+	kfree(pt);
+}
+
+void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+{
+	struct psb_mmu_driver *driver = pd->driver;
+	struct psb_mmu_pt *pt;
+	int i;
+
+	down_write(&driver->sem);
+	if (pd->hw_context != -1)
+		psb_mmu_flush_pd_locked(driver, 1);
+
+	/* Should take the spinlock here, but we don't need to do that
+	   since we have the semaphore in write mode. */
+
+	for (i = 0; i < 1024; ++i) {
+		pt = pd->tables[i];
+		if (pt)
+			psb_mmu_free_pt(pt);
+	}
+
+	vfree(pd->tables);
+	__free_page(pd->dummy_page);
+	__free_page(pd->dummy_pt);
+	__free_page(pd->p);
+	kfree(pd);
+	up_write(&driver->sem);
+}
+
+static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+{
+	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+	void *v;
+	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+	spinlock_t *lock = &pd->driver->lock;
+	uint8_t *clf;
+	uint32_t *ptes;
+	int i;
+
+	if (!pt)
+		return NULL;
+
+	pt->p = alloc_page(GFP_DMA32);
+	if (!pt->p) {
+		kfree(pt);
+		return NULL;
+	}
+
+	spin_lock(lock);
+
+	v = kmap_atomic(pt->p);
+	clf = (uint8_t *) v;
+	ptes = (uint32_t *) v;
+	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+		*ptes++ = pd->invalid_pte;
+
+
+	if (pd->driver->has_clflush && pd->hw_context != -1) {
+		mb();
+		for (i = 0; i < clflush_count; ++i) {
+			psb_clflush(clf);
+			clf += clflush_add;
+		}
+		mb();
+	}
+
+	kunmap_atomic(v);
+	spin_unlock(lock);
+
+	pt->count = 0;
+	pt->pd = pd;
+	pt->index = 0;
+
+	return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+					     unsigned long addr)
+{
+	uint32_t index = psb_mmu_pd_index(addr);
+	struct psb_mmu_pt *pt;
+	uint32_t *v;
+	spinlock_t *lock = &pd->driver->lock;
+
+	spin_lock(lock);
+	pt = pd->tables[index];
+	while (!pt) {
+		spin_unlock(lock);
+		pt = psb_mmu_alloc_pt(pd);
+		if (!pt)
+			return NULL;
+		spin_lock(lock);
+
+		if (pd->tables[index]) {
+			spin_unlock(lock);
+			psb_mmu_free_pt(pt);
+			spin_lock(lock);
+			pt = pd->tables[index];
+			continue;
+		}
+
+		v = kmap_atomic(pd->p);
+		pd->tables[index] = pt;
+		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+		pt->index = index;
+		kunmap_atomic((void *) v);
+
+		if (pd->hw_context != -1) {
+			psb_mmu_clflush(pd->driver, (void *) &v[index]);
+			atomic_set(&pd->driver->needs_tlbflush, 1);
+		}
+	}
+	pt->v = kmap_atomic(pt->p);
+	return pt;
+}
+
+static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+					      unsigned long addr)
+{
+	uint32_t index = psb_mmu_pd_index(addr);
+	struct psb_mmu_pt *pt;
+	spinlock_t *lock = &pd->driver->lock;
+
+	spin_lock(lock);
+	pt = pd->tables[index];
+	if (!pt) {
+		spin_unlock(lock);
+		return NULL;
+	}
+	pt->v = kmap_atomic(pt->p);
+	return pt;
+}
+
+static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+{
+	struct psb_mmu_pd *pd = pt->pd;
+	uint32_t *v;
+
+	kunmap_atomic(pt->v);
+	if (pt->count == 0) {
+		v = kmap_atomic(pd->p);
+		v[pt->index] = pd->invalid_pde;
+		pd->tables[pt->index] = NULL;
+
+		if (pd->hw_context != -1) {
+			psb_mmu_clflush(pd->driver,
+					(void *) &v[pt->index]);
+			atomic_set(&pd->driver->needs_tlbflush, 1);
+		}
+		kunmap_atomic(pt->v);
+		spin_unlock(&pd->driver->lock);
+		psb_mmu_free_pt(pt);
+		return;
+	}
+	spin_unlock(&pd->driver->lock);
+}
+
+static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+				   unsigned long addr, uint32_t pte)
+{
+	pt->v[psb_mmu_pt_index(addr)] = pte;
+}
+
+static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+					  unsigned long addr)
+{
+	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+}
+
+
+void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+			uint32_t mmu_offset, uint32_t gtt_start,
+			uint32_t gtt_pages)
+{
+	uint32_t *v;
+	uint32_t start = psb_mmu_pd_index(mmu_offset);
+	struct psb_mmu_driver *driver = pd->driver;
+	int num_pages = gtt_pages;
+
+	down_read(&driver->sem);
+	spin_lock(&driver->lock);
+
+	v = kmap_atomic(pd->p);
+	v += start;
+
+	while (gtt_pages--) {
+		*v++ = gtt_start | pd->pd_mask;
+		gtt_start += PAGE_SIZE;
+	}
+
+	/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+	psb_pages_clflush(pd->driver, &pd->p, num_pages);
+	kunmap_atomic(v);
+	spin_unlock(&driver->lock);
+
+	if (pd->hw_context != -1)
+		atomic_set(&pd->driver->needs_tlbflush, 1);
+
+	up_read(&pd->driver->sem);
+	psb_mmu_flush_pd(pd->driver, 0);
+}
+
+struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+{
+	struct psb_mmu_pd *pd;
+
+	/* down_read(&driver->sem); */
+	pd = driver->default_pd;
+	/* up_read(&driver->sem); */
+
+	return pd;
+}
+
+void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+{
+	psb_mmu_free_pagedir(driver->default_pd);
+	kfree(driver);
+}
+
+struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+					int trap_pagefaults,
+					int invalid_type,
+					struct drm_psb_private *dev_priv)
+{
+	struct psb_mmu_driver *driver;
+
+	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+
+	if (!driver)
+		return NULL;
+	driver->dev_priv = dev_priv;
+
+	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+					      invalid_type);
+	if (!driver->default_pd)
+		goto out_err1;
+
+	spin_lock_init(&driver->lock);
+	init_rwsem(&driver->sem);
+	down_write(&driver->sem);
+	driver->register_map = registers;
+	atomic_set(&driver->needs_tlbflush, 1);
+
+	driver->has_clflush = 0;
+
+	if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+		uint32_t tfms, misc, cap0, cap4, clflush_size;
+
+		/*
+		 * clflush size is determined at kernel setup for x86_64
+		 *  but not for i386. We have to do it here.
+		 */
+
+		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+		clflush_size = ((misc >> 8) & 0xff) * 8;
+		driver->has_clflush = 1;
+		driver->clflush_add =
+		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
+		driver->clflush_mask = driver->clflush_add - 1;
+		driver->clflush_mask = ~driver->clflush_mask;
+	}
+
+	up_write(&driver->sem);
+	return driver;
+
+out_err1:
+	kfree(driver);
+	return NULL;
+}
+
+static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+			       unsigned long address, uint32_t num_pages,
+			       uint32_t desired_tile_stride,
+			       uint32_t hw_tile_stride)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t rows = 1;
+	uint32_t i;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long clflush_add = pd->driver->clflush_add;
+	unsigned long clflush_mask = pd->driver->clflush_mask;
+
+	if (!pd->driver->has_clflush) {
+		/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+		psb_pages_clflush(pd->driver, &pd->p, num_pages);
+		return;
+	}
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+	mb();
+	for (i = 0; i < rows; ++i) {
+
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_map_lock(pd, addr);
+			if (!pt)
+				continue;
+			do {
+				psb_clflush(&pt->v
+					    [psb_mmu_pt_index(addr)]);
+			} while (addr +=
+				 clflush_add,
+				 (addr & clflush_mask) < next);
+
+			psb_mmu_pt_unmap_unlock(pt);
+		} while (addr = next, next != end);
+		address += row_add;
+	}
+	mb();
+}
+
+void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+				 unsigned long address, uint32_t num_pages)
+{
+	struct psb_mmu_pt *pt;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long f_address = address;
+
+	down_read(&pd->driver->sem);
+
+	addr = address;
+	end = addr + (num_pages << PAGE_SHIFT);
+
+	do {
+		next = psb_pd_addr_end(addr, end);
+		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+		if (!pt)
+			goto out;
+		do {
+			psb_mmu_invalidate_pte(pt, addr);
+			--pt->count;
+		} while (addr += PAGE_SIZE, addr < next);
+		psb_mmu_pt_unmap_unlock(pt);
+
+	} while (addr = next, next != end);
+
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 0);
+
+	return;
+}
+
+void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+			  uint32_t num_pages, uint32_t desired_tile_stride,
+			  uint32_t hw_tile_stride)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t rows = 1;
+	uint32_t i;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long f_address = address;
+
+	if (hw_tile_stride)
+		rows = num_pages / desired_tile_stride;
+	else
+		desired_tile_stride = num_pages;
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+
+	/* down_read(&pd->driver->sem); */
+
+	/* Make sure we only need to flush this processor's cache */
+
+	for (i = 0; i < rows; ++i) {
+
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_map_lock(pd, addr);
+			if (!pt)
+				continue;
+			do {
+				psb_mmu_invalidate_pte(pt, addr);
+				--pt->count;
+
+			} while (addr += PAGE_SIZE, addr < next);
+			psb_mmu_pt_unmap_unlock(pt);
+
+		} while (addr = next, next != end);
+		address += row_add;
+	}
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages,
+				   desired_tile_stride, hw_tile_stride);
+
+	/* up_read(&pd->driver->sem); */
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 0);
+}
+
+int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+				unsigned long address, uint32_t num_pages,
+				int type)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t pte;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long f_address = address;
+	int ret = 0;
+
+	down_read(&pd->driver->sem);
+
+	addr = address;
+	end = addr + (num_pages << PAGE_SHIFT);
+
+	do {
+		next = psb_pd_addr_end(addr, end);
+		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+		if (!pt) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		do {
+			pte = psb_mmu_mask_pte(start_pfn++, type);
+			psb_mmu_set_pte(pt, addr, pte);
+			pt->count++;
+		} while (addr += PAGE_SIZE, addr < next);
+		psb_mmu_pt_unmap_unlock(pt);
+
+	} while (addr = next, next != end);
+
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 1);
+
+	return ret;
+}
+
+int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+			 unsigned long address, uint32_t num_pages,
+			 uint32_t desired_tile_stride,
+			 uint32_t hw_tile_stride, int type)
+{
+	struct psb_mmu_pt *pt;
+	uint32_t rows = 1;
+	uint32_t i;
+	uint32_t pte;
+	unsigned long addr;
+	unsigned long end;
+	unsigned long next;
+	unsigned long add;
+	unsigned long row_add;
+	unsigned long f_address = address;
+	int ret = 0;
+
+	if (hw_tile_stride) {
+		if (num_pages % desired_tile_stride != 0)
+			return -EINVAL;
+		rows = num_pages / desired_tile_stride;
+	} else {
+		desired_tile_stride = num_pages;
+	}
+
+	add = desired_tile_stride << PAGE_SHIFT;
+	row_add = hw_tile_stride << PAGE_SHIFT;
+
+	down_read(&pd->driver->sem);
+
+	for (i = 0; i < rows; ++i) {
+
+		addr = address;
+		end = addr + add;
+
+		do {
+			next = psb_pd_addr_end(addr, end);
+			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+			if (!pt) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			do {
+				pte =
+				    psb_mmu_mask_pte(page_to_pfn(*pages++),
+						     type);
+				psb_mmu_set_pte(pt, addr, pte);
+				pt->count++;
+			} while (addr += PAGE_SIZE, addr < next);
+			psb_mmu_pt_unmap_unlock(pt);
+
+		} while (addr = next, next != end);
+
+		address += row_add;
+	}
+out:
+	if (pd->hw_context != -1)
+		psb_mmu_flush_ptes(pd, f_address, num_pages,
+				   desired_tile_stride, hw_tile_stride);
+
+	up_read(&pd->driver->sem);
+
+	if (pd->hw_context != -1)
+		psb_mmu_flush(pd->driver, 1);
+
+	return ret;
+}
+
+int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+			   unsigned long *pfn)
+{
+	int ret;
+	struct psb_mmu_pt *pt;
+	uint32_t tmp;
+	spinlock_t *lock = &pd->driver->lock;
+
+	down_read(&pd->driver->sem);
+	pt = psb_mmu_pt_map_lock(pd, virtual);
+	if (!pt) {
+		uint32_t *v;
+
+		spin_lock(lock);
+		v = kmap_atomic(pd->p);
+		tmp = v[psb_mmu_pd_index(virtual)];
+		kunmap_atomic(v);
+		spin_unlock(lock);
+
+		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+		    !(pd->invalid_pte & PSB_PTE_VALID)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		ret = 0;
+		*pfn = pd->invalid_pte >> PAGE_SHIFT;
+		goto out;
+	}
+	tmp = pt->v[psb_mmu_pt_index(virtual)];
+	if (!(tmp & PSB_PTE_VALID)) {
+		ret = -EINVAL;
+	} else {
+		ret = 0;
+		*pfn = tmp >> PAGE_SHIFT;
+	}
+	psb_mmu_pt_unmap_unlock(pt);
+out:
+	up_read(&pd->driver->sem);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail.h b/linux-imx/drivers/gpu/drm/gma500/oaktrail.h
new file mode 100644
index 0000000..30adbbe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail.h
@@ -0,0 +1,257 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* MID device specific descriptors */
+
+struct oaktrail_timing_info {
+	u16 pixel_clock;
+	u8 hactive_lo;
+	u8 hblank_lo;
+	u8 hblank_hi:4;
+	u8 hactive_hi:4;
+	u8 vactive_lo;
+	u8 vblank_lo;
+	u8 vblank_hi:4;
+	u8 vactive_hi:4;
+	u8 hsync_offset_lo;
+	u8 hsync_pulse_width_lo;
+	u8 vsync_pulse_width_lo:4;
+	u8 vsync_offset_lo:4;
+	u8 vsync_pulse_width_hi:2;
+	u8 vsync_offset_hi:2;
+	u8 hsync_pulse_width_hi:2;
+	u8 hsync_offset_hi:2;
+	u8 width_mm_lo;
+	u8 height_mm_lo;
+	u8 height_mm_hi:4;
+	u8 width_mm_hi:4;
+	u8 hborder;
+	u8 vborder;
+	u8 unknown0:1;
+	u8 hsync_positive:1;
+	u8 vsync_positive:1;
+	u8 separate_sync:2;
+	u8 stereo:1;
+	u8 unknown6:1;
+	u8 interlaced:1;
+} __packed;
+
+struct gct_r10_timing_info {
+	u16 pixel_clock;
+	u32 hactive_lo:8;
+	u32 hactive_hi:4;
+	u32 hblank_lo:8;
+	u32 hblank_hi:4;
+	u32 hsync_offset_lo:8;
+	u16 hsync_offset_hi:2;
+	u16 hsync_pulse_width_lo:8;
+	u16 hsync_pulse_width_hi:2;
+	u16 hsync_positive:1;
+	u16 rsvd_1:3;
+	u8  vactive_lo:8;
+	u16 vactive_hi:4;
+	u16 vblank_lo:8;
+	u16 vblank_hi:4;
+	u16 vsync_offset_lo:4;
+	u16 vsync_offset_hi:2;
+	u16 vsync_pulse_width_lo:4;
+	u16 vsync_pulse_width_hi:2;
+	u16 vsync_positive:1;
+	u16 rsvd_2:3;
+} __packed;
+
+struct oaktrail_panel_descriptor_v1 {
+	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+				/* 0x61190 if MIPI */
+	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+						/* Register 0x61210 */
+	struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+	u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+				/* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+			/* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+	u16 Panel_MIPI_Display_Descriptor;
+			/*16 bits, Defined as follows: */
+			/* if MIPI, 0x0000 if LVDS */
+			/* Bit 0, Type, 2 bits, */
+			/* 0: Type-1, */
+			/* 1: Type-2, */
+			/* 2: Type-3, */
+			/* 3: Type-4 */
+			/* Bit 2, Pixel Format, 4 bits */
+			/* Bit0: 16bpp (not supported in LNC), */
+			/* Bit1: 18bpp loosely packed, */
+			/* Bit2: 18bpp packed, */
+			/* Bit3: 24bpp */
+			/* Bit 6, Reserved, 2 bits, 00b */
+			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+			/* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+struct oaktrail_panel_descriptor_v2 {
+	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+				/* 0x61190 if MIPI */
+	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+						/* Register 0x61210 */
+	struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
+	u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+				/*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+	u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+			/*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+	u16 Panel_MIPI_Display_Descriptor;
+			/*16 bits, Defined as follows: */
+			/* if MIPI, 0x0000 if LVDS */
+			/* Bit 0, Type, 2 bits, */
+			/* 0: Type-1, */
+			/* 1: Type-2, */
+			/* 2: Type-3, */
+			/* 3: Type-4 */
+			/* Bit 2, Pixel Format, 4 bits */
+			/* Bit0: 16bpp (not supported in LNC), */
+			/* Bit1: 18bpp loosely packed, */
+			/* Bit2: 18bpp packed, */
+			/* Bit3: 24bpp */
+			/* Bit 6, Reserved, 2 bits, 00b */
+			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+			/* Bit 14, Reserved, 2 bits, 00b */
+} __packed;
+
+union oaktrail_panel_rx {
+	struct {
+		u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+			/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+		u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+		/*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+		u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+					/* 1: Burst and non-burst */
+					/* 2/3: Reserved */
+		u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+		u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+		u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+		u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+		u16 Rsvd:5;/*5 bits,00000b */
+	} panelrx;
+	u16 panel_receiver;
+} __packed;
+
+struct gct_r0 {
+	union { /*8 bits,Defined as follows: */
+		struct {
+			u8 PanelType:4; /*4 bits, Bit field for panels*/
+					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+					/*2 bits,Specifies which of the*/
+			u8 BootPanelIndex:2;
+					/* 4 panels to use by default*/
+			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+					/* the 4 MIPI DSI receivers to use*/
+		} PD;
+		u8 PanelDescriptor;
+	};
+	struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+	union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct gct_r1 {
+	union { /*8 bits,Defined as follows: */
+		struct {
+			u8 PanelType:4; /*4 bits, Bit field for panels*/
+					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+					/*2 bits,Specifies which of the*/
+			u8 BootPanelIndex:2;
+					/* 4 panels to use by default*/
+			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+					/* the 4 MIPI DSI receivers to use*/
+		} PD;
+		u8 PanelDescriptor;
+	};
+	struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+	union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
+} __packed;
+
+struct gct_r10 {
+	struct gct_r10_timing_info DTD;
+	u16 Panel_MIPI_Display_Descriptor;
+	u16 Panel_MIPI_Receiver_Descriptor;
+	u16 Panel_Backlight_Inverter_Descriptor;
+	u8 Panel_Initial_Brightness;
+	u32 MIPI_Ctlr_Init_ptr;
+	u32 MIPI_Panel_Init_ptr;
+} __packed;
+
+struct oaktrail_gct_data {
+	u8 bpi; /* boot panel index, number of panel used during boot */
+	u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+	struct oaktrail_timing_info DTD; /* timing info for the selected panel */
+	u32 Panel_Port_Control;
+	u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+	u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+	u32 PP_Cycle_Delay;
+	u16 Panel_Backlight_Inverter_Descriptor;
+	u16 Panel_MIPI_Display_Descriptor;
+} __packed;
+
+#define MODE_SETTING_IN_CRTC		0x1
+#define MODE_SETTING_IN_ENCODER		0x2
+#define MODE_SETTING_ON_GOING		0x3
+#define MODE_SETTING_IN_DSR		0x4
+#define MODE_SETTING_ENCODER_DONE	0x8
+
+/*
+ *	Moorestown HDMI interfaces
+ */
+
+struct oaktrail_hdmi_dev {
+	struct pci_dev *dev;
+	void __iomem *regs;
+	unsigned int mmio, mmio_len;
+	int dpms_mode;
+	struct hdmi_i2c_dev *i2c_dev;
+
+	/* register state */
+	u32 saveDPLL_CTRL;
+	u32 saveDPLL_DIV_CTRL;
+	u32 saveDPLL_ADJUST;
+	u32 saveDPLL_UPDATE;
+	u32 saveDPLL_CLK_ENABLE;
+	u32 savePCH_HTOTAL_B;
+	u32 savePCH_HBLANK_B;
+	u32 savePCH_HSYNC_B;
+	u32 savePCH_VTOTAL_B;
+	u32 savePCH_VBLANK_B;
+	u32 savePCH_VSYNC_B;
+	u32 savePCH_PIPEBCONF;
+	u32 savePCH_PIPEBSRC;
+};
+
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int  oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+						struct drm_display_mode *adjusted_mode, int x, int y,
+						struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail_crtc.c b/linux-imx/drivers/gpu/drm/gma500/oaktrail_crtc.c
new file mode 100644
index 0000000..3071526
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_range_t {
+	int min, max;
+};
+
+struct oaktrail_limit_t {
+	struct psb_intel_range_t dot, m, p1;
+};
+
+struct oaktrail_clock_t {
+	/* derived values */
+	int dot;
+	int m;
+	int p1;
+};
+
+#define MRST_LIMIT_LVDS_100L	    0
+#define MRST_LIMIT_LVDS_83	    1
+#define MRST_LIMIT_LVDS_100	    2
+
+#define MRST_DOT_MIN		  19750
+#define MRST_DOT_MAX		  120000
+#define MRST_M_MIN_100L		    20
+#define MRST_M_MIN_100		    10
+#define MRST_M_MIN_83		    12
+#define MRST_M_MAX_100L		    34
+#define MRST_M_MAX_100		    17
+#define MRST_M_MAX_83		    20
+#define MRST_P1_MIN		    2
+#define MRST_P1_MAX_0		    7
+#define MRST_P1_MAX_1		    8
+
+static const struct oaktrail_limit_t oaktrail_limits[] = {
+	{			/* MRST_LIMIT_LVDS_100L */
+	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 },
+	{			/* MRST_LIMIT_LVDS_83L */
+	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+	 },
+	{			/* MRST_LIMIT_LVDS_100 */
+	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+	 },
+};
+
+#define MRST_M_MIN	    10
+static const u32 oaktrail_m_converts[] = {
+	0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+	0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+};
+
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
+{
+	const struct oaktrail_limit_t *limit = NULL;
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+		switch (dev_priv->core_freq) {
+		case 100:
+			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
+			break;
+		case 166:
+			limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
+			break;
+		case 200:
+			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
+			break;
+		}
+	} else {
+		limit = NULL;
+		dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
+	}
+
+	return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
+{
+	clock->dot = (refclk * clock->m) / (14 * clock->p1);
+}
+
+static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
+{
+	pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
+	     prefix, clock->dot, clock->m, clock->p1);
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE.  Divisor values are the actual divisors for
+ */
+static bool
+mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+		struct oaktrail_clock_t *best_clock)
+{
+	struct oaktrail_clock_t clock;
+	const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
+	int err = target;
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+		     clock.p1++) {
+			int this_err;
+
+			oaktrail_clock(refclk, &clock);
+
+			this_err = abs(clock.dot - target);
+			if (this_err < err) {
+				*best_clock = clock;
+				err = this_err;
+			}
+		}
+	}
+	dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+	return err != target;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 temp;
+
+	if (pipe == 1) {
+		oaktrail_crtc_hdmi_dpms(crtc, mode);
+		return;
+	}
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable the DPLL */
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+		}
+		/* Enable the pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) == 0)
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+		/* Enable the plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(map->cntr,
+				  temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		   if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+		/* Disable display plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(map->cntr,
+				  temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
+		}
+
+		/* Next, disable display pipes */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
+		}
+		/* Wait for for the pipe disable to take effect. */
+		psb_intel_wait_for_vblank(dev);
+
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) != 0) {
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+		}
+
+		/* Wait for the clocks to turn off. */
+		udelay(150);
+		break;
+	}
+
+	/*Set FIFO Watermarks*/
+	REG_WRITE(DSPARB, 0x3FFF);
+	REG_WRITE(DSPFW1, 0x3F88080A);
+	REG_WRITE(DSPFW2, 0x0b060808);
+	REG_WRITE(DSPFW3, 0x0);
+	REG_WRITE(DSPFW4, 0x08030404);
+	REG_WRITE(DSPFW5, 0x04040404);
+	REG_WRITE(DSPFW6, 0x78);
+	REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+	/* Must write Bit 14 of the Chicken Bit Register */
+
+	gma_power_end(dev);
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
+{
+	u32 pfit_control;
+
+	pfit_control = REG_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+	return (pfit_control >> 29) & 3;
+}
+
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode,
+			      int x, int y,
+			      struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int refclk = 0;
+	struct oaktrail_clock_t clock;
+	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+	bool ok, is_sdvo = false;
+	bool is_lvds = false;
+	bool is_mipi = false;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct psb_intel_encoder *psb_intel_encoder = NULL;
+	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+	struct drm_connector *connector;
+
+	if (pipe == 1)
+		return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	memcpy(&psb_intel_crtc->saved_mode,
+		mode,
+		sizeof(struct drm_display_mode));
+	memcpy(&psb_intel_crtc->saved_adjusted_mode,
+		adjusted_mode,
+		sizeof(struct drm_display_mode));
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (!connector->encoder || connector->encoder->crtc != crtc)
+			continue;
+
+		psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+			is_sdvo = true;
+			break;
+		case INTEL_OUTPUT_MIPI:
+			is_mipi = true;
+			break;
+		}
+	}
+
+	/* Disable the VGA plane that we never use */
+	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (oaktrail_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	REG_WRITE(map->src,
+		  ((mode->crtc_hdisplay - 1) << 16) |
+		  (mode->crtc_vdisplay - 1));
+
+	if (psb_intel_encoder)
+		drm_object_property_get_value(&connector->base,
+			dev->mode_config.scaling_mode_property, &scalingType);
+
+	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+		/* Moorestown doesn't have register support for centering so
+		 * we need to mess with the h/vblank and h/vsync start and
+		 * ends to get centering */
+		int offsetX = 0, offsetY = 0;
+
+		offsetX = (adjusted_mode->crtc_hdisplay -
+			   mode->crtc_hdisplay) / 2;
+		offsetY = (adjusted_mode->crtc_vdisplay -
+			   mode->crtc_vdisplay) / 2;
+
+		REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
+			((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
+			((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(map->hblank,
+			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
+			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+		REG_WRITE(map->hsync,
+			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
+			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+		REG_WRITE(map->vblank,
+			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
+			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+		REG_WRITE(map->vsync,
+			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
+			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+	} else {
+		REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+			((adjusted_mode->crtc_htotal - 1) << 16));
+		REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+			((adjusted_mode->crtc_vtotal - 1) << 16));
+		REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+			((adjusted_mode->crtc_hblank_end - 1) << 16));
+		REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+			((adjusted_mode->crtc_hsync_end - 1) << 16));
+		REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+			((adjusted_mode->crtc_vblank_end - 1) << 16));
+		REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+			((adjusted_mode->crtc_vsync_end - 1) << 16));
+	}
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs =
+		    crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* setup pipeconf */
+	pipeconf = REG_READ(map->conf);
+
+	/* Set up the display plane register */
+	dspcntr = REG_READ(map->cntr);
+	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+
+	if (pipe == 0)
+		dspcntr |= DISPPLANE_SEL_PIPE_A;
+	else
+		dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+	if (is_mipi)
+		goto oaktrail_crtc_mode_set_exit;
+
+	refclk = dev_priv->core_freq * 1000;
+
+	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
+
+	ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+
+	if (!ok) {
+		dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
+	} else {
+		dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
+			 "m = %x, p1 = %x.\n", clock.dot, clock.m,
+			 clock.p1);
+	}
+
+	fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
+
+	dpll |= DPLL_VGA_MODE_DIS;
+
+
+	dpll |= DPLL_VCO_ENABLE;
+
+	if (is_lvds)
+		dpll |= DPLLA_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	if (is_sdvo) {
+		int sdvo_pixel_multiply =
+		    adjusted_mode->clock / mode->clock;
+
+		dpll |= DPLL_DVO_HIGH_SPEED;
+		dpll |=
+		    (sdvo_pixel_multiply -
+		     1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+	}
+
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock.p1 - 2)) << 17;
+
+	dpll |= DPLL_VCO_ENABLE;
+
+	mrstPrintPll("chosen", &clock);
+
+	if (dpll & DPLL_VCO_ENABLE) {
+		REG_WRITE(map->fp0, fp);
+		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
+		/* Check the DPLLA lock bit PIPEACONF[29] */
+		udelay(150);
+	}
+
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
+	/* Wait for the clocks to stabilize. */
+	udelay(150);
+
+	/* write it again -- the BIOS does, after all */
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
+	/* Wait for the clocks to stabilize. */
+	udelay(150);
+
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
+	psb_intel_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, dspcntr);
+	psb_intel_wait_for_vblank(dev);
+
+oaktrail_crtc_mode_set_exit:
+	gma_power_end(dev);
+	return 0;
+}
+
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
+			    int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	unsigned long start, offset;
+
+	u32 dspcntr;
+	int ret = 0;
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		dev_dbg(dev->dev, "No FB bound\n");
+		return 0;
+	}
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	start = psbfb->gtt->offset;
+	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+	dspcntr = REG_READ(map->cntr);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		dev_err(dev->dev, "Unknown color depth\n");
+		ret = -EINVAL;
+		goto pipe_set_base_exit;
+	}
+	REG_WRITE(map->cntr, dspcntr);
+
+	REG_WRITE(map->base, offset);
+	REG_READ(map->base);
+	REG_WRITE(map->surf, start);
+	REG_READ(map->surf);
+
+pipe_set_base_exit:
+	gma_power_end(dev);
+	return ret;
+}
+
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+	.dpms = oaktrail_crtc_dpms,
+	.mode_fixup = oaktrail_crtc_mode_fixup,
+	.mode_set = oaktrail_crtc_mode_set,
+	.mode_set_base = oaktrail_pipe_set_base,
+	.prepare = oaktrail_crtc_prepare,
+	.commit = oaktrail_crtc_commit,
+};
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail_device.c b/linux-imx/drivers/gpu/drm/gma500/oaktrail_device.c
new file mode 100644
index 0000000..08747fd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -0,0 +1,569 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+#include "intel_bios.h"
+
+static int oaktrail_output_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	if (dev_priv->iLVDS_enable)
+		oaktrail_lvds_init(dev, &dev_priv->mode_dev);
+	else
+		dev_err(dev->dev, "DSI is not supported\n");
+	if (dev_priv->hdmi_priv)
+		oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
+	return 0;
+}
+
+/*
+ *	Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
+
+static int oaktrail_set_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int level = bd->props.brightness;
+	u32 blc_pwm_ctl;
+	u32 max_pwm_blc;
+
+	/* Percentage 1-100% being valid */
+	if (level < 1)
+		level = 1;
+
+	if (gma_power_begin(dev, 0)) {
+		/* Calculate and set the brightness value */
+		max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+		blc_pwm_ctl = level * max_pwm_blc / 100;
+
+		/* Adjust the backlight level with the percent in
+		 * dev_priv->blc_adj1;
+		 */
+		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+		blc_pwm_ctl = blc_pwm_ctl / 100;
+
+		/* Adjust the backlight level with the percent in
+		 * dev_priv->blc_adj2;
+		 */
+		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+		blc_pwm_ctl = blc_pwm_ctl / 100;
+
+		/* force PWM bit on */
+		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+		REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+		gma_power_end(dev);
+	}
+	oaktrail_brightness = level;
+	return 0;
+}
+
+static int oaktrail_get_brightness(struct backlight_device *bd)
+{
+	/* return locally cached var instead of HW read (due to DPST etc.) */
+	/* FIXME: ideally return actual value in case firmware fiddled with
+	   it */
+	return oaktrail_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long core_clock;
+	u16 bl_max_freq;
+	uint32_t value;
+	uint32_t blc_pwm_precision_factor;
+
+	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+	bl_max_freq = 256;
+	/* this needs to be set elsewhere */
+	blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+	core_clock = dev_priv->core_freq;
+
+	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+	value *= blc_pwm_precision_factor;
+	value /= bl_max_freq;
+	value /= blc_pwm_precision_factor;
+
+	if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+			return -ERANGE;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+		REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+		gma_power_end(dev);
+	}
+	return 0;
+}
+
+static const struct backlight_ops oaktrail_ops = {
+	.get_brightness = oaktrail_get_brightness,
+	.update_status  = oaktrail_set_brightness,
+};
+
+static int oaktrail_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+	struct backlight_properties props;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 100;
+	props.type = BACKLIGHT_PLATFORM;
+
+	oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+				NULL, (void *)dev, &oaktrail_ops, &props);
+
+	if (IS_ERR(oaktrail_backlight_device))
+		return PTR_ERR(oaktrail_backlight_device);
+
+	ret = device_backlight_init(dev);
+	if (ret < 0) {
+		backlight_device_unregister(oaktrail_backlight_device);
+		return ret;
+	}
+	oaktrail_backlight_device->props.brightness = 100;
+	oaktrail_backlight_device->props.max_brightness = 100;
+	backlight_update_status(oaktrail_backlight_device);
+	dev_priv->backlight_device = oaktrail_backlight_device;
+	return 0;
+}
+
+#endif
+
+/*
+ *	Provide the Moorestown specific chip logic and low level methods
+ *	for power management
+ */
+
+/**
+ *	oaktrail_save_display_registers	-	save registers lost on suspend
+ *	@dev: our DRM device
+ *
+ *	Save the state we need in order to be able to restore the interface
+ *	upon resume from suspend
+ */
+static int oaktrail_save_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_save_area *regs = &dev_priv->regs;
+	struct psb_pipe *p = &regs->pipe[0];
+	int i;
+	u32 pp_stat;
+
+	/* Display arbitration control + watermarks */
+	regs->psb.saveDSPARB = PSB_RVDC32(DSPARB);
+	regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1);
+	regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2);
+	regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3);
+	regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4);
+	regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5);
+	regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6);
+	regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+	/* Pipe & plane A info */
+	p->conf = PSB_RVDC32(PIPEACONF);
+	p->src = PSB_RVDC32(PIPEASRC);
+	p->fp0 = PSB_RVDC32(MRST_FPA0);
+	p->fp1 = PSB_RVDC32(MRST_FPA1);
+	p->dpll = PSB_RVDC32(MRST_DPLL_A);
+	p->htotal = PSB_RVDC32(HTOTAL_A);
+	p->hblank = PSB_RVDC32(HBLANK_A);
+	p->hsync = PSB_RVDC32(HSYNC_A);
+	p->vtotal = PSB_RVDC32(VTOTAL_A);
+	p->vblank = PSB_RVDC32(VBLANK_A);
+	p->vsync = PSB_RVDC32(VSYNC_A);
+	regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+	p->cntr = PSB_RVDC32(DSPACNTR);
+	p->stride = PSB_RVDC32(DSPASTRIDE);
+	p->addr = PSB_RVDC32(DSPABASE);
+	p->surf = PSB_RVDC32(DSPASURF);
+	p->linoff = PSB_RVDC32(DSPALINOFF);
+	p->tileoff = PSB_RVDC32(DSPATILEOFF);
+
+	/* Save cursor regs */
+	regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+	regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+	regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+	/* Save palette (gamma) */
+	for (i = 0; i < 256; i++)
+		p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+	if (dev_priv->hdmi_priv)
+		oaktrail_hdmi_save(dev);
+
+	/* Save performance state */
+	regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+	/* LVDS state */
+	regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+	regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+	regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+	regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+	regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+	regs->psb.saveLVDS = PSB_RVDC32(LVDS);
+	regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+	regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+	regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+	regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+	/* HW overlay */
+	regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+	regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+	regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+	regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+	regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+	regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+	regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+	/* DPST registers */
+	regs->psb.saveHISTOGRAM_INT_CONTROL_REG =
+					PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+	regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG =
+					PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+	regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+	if (dev_priv->iLVDS_enable) {
+		/* Shut down the panel */
+		PSB_WVDC32(0, PP_CONTROL);
+
+		do {
+			pp_stat = PSB_RVDC32(PP_STATUS);
+		} while (pp_stat & 0x80000000);
+
+		/* Turn off the plane */
+		PSB_WVDC32(0x58000000, DSPACNTR);
+		/* Trigger the plane disable */
+		PSB_WVDC32(0, DSPASURF);
+
+		/* Wait ~4 ticks */
+		msleep(4);
+
+		/* Turn off pipe */
+		PSB_WVDC32(0x0, PIPEACONF);
+		/* Wait ~8 ticks */
+		msleep(8);
+
+		/* Turn off PLLs */
+		PSB_WVDC32(0, MRST_DPLL_A);
+	}
+	return 0;
+}
+
+/**
+ *	oaktrail_restore_display_registers	-	restore lost register state
+ *	@dev: our DRM device
+ *
+ *	Restore register state that was lost during suspend and resume.
+ */
+static int oaktrail_restore_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_save_area *regs = &dev_priv->regs;
+	struct psb_pipe *p = &regs->pipe[0];
+	u32 pp_stat;
+	int i;
+
+	/* Display arbitration + watermarks */
+	PSB_WVDC32(regs->psb.saveDSPARB, DSPARB);
+	PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1);
+	PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2);
+	PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3);
+	PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4);
+	PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5);
+	PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6);
+	PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT);
+
+	/* Make sure VGA plane is off. it initializes to on after reset!*/
+	PSB_WVDC32(0x80000000, VGACNTRL);
+
+	/* set the plls */
+	PSB_WVDC32(p->fp0, MRST_FPA0);
+	PSB_WVDC32(p->fp1, MRST_FPA1);
+
+	/* Actually enable it */
+	PSB_WVDC32(p->dpll, MRST_DPLL_A);
+	DRM_UDELAY(150);
+
+	/* Restore mode */
+	PSB_WVDC32(p->htotal, HTOTAL_A);
+	PSB_WVDC32(p->hblank, HBLANK_A);
+	PSB_WVDC32(p->hsync, HSYNC_A);
+	PSB_WVDC32(p->vtotal, VTOTAL_A);
+	PSB_WVDC32(p->vblank, VBLANK_A);
+	PSB_WVDC32(p->vsync, VSYNC_A);
+	PSB_WVDC32(p->src, PIPEASRC);
+	PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
+
+	/* Restore performance mode*/
+	PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE);
+
+	/* Enable the pipe*/
+	if (dev_priv->iLVDS_enable)
+		PSB_WVDC32(p->conf, PIPEACONF);
+
+	/* Set up the plane*/
+	PSB_WVDC32(p->linoff, DSPALINOFF);
+	PSB_WVDC32(p->stride, DSPASTRIDE);
+	PSB_WVDC32(p->tileoff, DSPATILEOFF);
+
+	/* Enable the plane */
+	PSB_WVDC32(p->cntr, DSPACNTR);
+	PSB_WVDC32(p->surf, DSPASURF);
+
+	/* Enable Cursor A */
+	PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
+	PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS);
+	PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE);
+
+	/* Restore palette (gamma) */
+	for (i = 0; i < 256; i++)
+		PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
+
+	if (dev_priv->hdmi_priv)
+		oaktrail_hdmi_restore(dev);
+
+	if (dev_priv->iLVDS_enable) {
+		PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+		PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/
+		PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL);
+		PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+		PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+		PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL);
+		PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON);
+		PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF);
+		PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE);
+		PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL);
+	}
+
+	/* Wait for cycle delay */
+	do {
+		pp_stat = PSB_RVDC32(PP_STATUS);
+	} while (pp_stat & 0x08000000);
+
+	/* Wait for panel power up */
+	do {
+		pp_stat = PSB_RVDC32(PP_STATUS);
+	} while (pp_stat & 0x10000000);
+
+	/* Restore HW overlay */
+	PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4);
+	PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5);
+
+	/* DPST registers */
+	PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG,
+						HISTOGRAM_INT_CONTROL);
+	PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG,
+						HISTOGRAM_LOGIC_CONTROL);
+	PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+	return 0;
+}
+
+/**
+ *	oaktrail_power_down	-	power down the display island
+ *	@dev: our DRM device
+ *
+ *	Power down the display interface of our device
+ */
+static int oaktrail_power_down(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pwr_mask ;
+	u32 pwr_sts;
+
+	pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+	outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+	while (true) {
+		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+		if ((pwr_sts & pwr_mask) == pwr_mask)
+			break;
+		else
+			udelay(10);
+	}
+	return 0;
+}
+
+/*
+ * oaktrail_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int oaktrail_power_up(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+	u32 pwr_sts, pwr_cnt;
+
+	pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+	pwr_cnt &= ~pwr_mask;
+	outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+	while (true) {
+		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+		if ((pwr_sts & pwr_mask) == 0)
+			break;
+		else
+			udelay(10);
+	}
+	return 0;
+}
+
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+	{
+		.fp0 = MRST_FPA0,
+		.fp1 = MRST_FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = MRST_DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.surf = DSPASURF,
+		.addr = MRST_DSPABASE,
+		.base = MRST_DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.base = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	},
+};
+
+static int oaktrail_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+	
+	if (pci_enable_msi(dev->pdev))
+		dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+	dev_priv->regmap = oaktrail_regmap;
+
+	ret = mid_chip_setup(dev);
+	if (ret < 0)
+		return ret;
+	if (!dev_priv->has_gct) {
+		/* Now pull the BIOS data */
+		psb_intel_opregion_init(dev);
+		psb_intel_init_bios(dev);
+	}
+	oaktrail_hdmi_setup(dev);
+	return 0;
+}
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	oaktrail_hdmi_teardown(dev);
+	if (!dev_priv->has_gct)
+		psb_intel_destroy_bios(dev);
+}
+
+const struct psb_ops oaktrail_chip_ops = {
+	.name = "Oaktrail",
+	.accel_2d = 1,
+	.pipes = 2,
+	.crtcs = 2,
+	.hdmi_mask = (1 << 1),
+	.lvds_mask = (1 << 0),
+	.cursor_needs_phys = 0,
+	.sgx_offset = MRST_SGX_OFFSET,
+
+	.chip_setup = oaktrail_chip_setup,
+	.chip_teardown = oaktrail_teardown,
+	.crtc_helper = &oaktrail_helper_funcs,
+	.crtc_funcs = &psb_intel_crtc_funcs,
+
+	.output_init = oaktrail_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	.backlight_init = oaktrail_backlight_init,
+#endif
+
+	.save_regs = oaktrail_save_display_registers,
+	.restore_regs = oaktrail_restore_display_registers,
+	.power_down = oaktrail_power_down,
+	.power_up = oaktrail_power_up,
+
+	.i2c_bus = 1,
+};
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi.c
new file mode 100644
index 0000000..f036f1f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -0,0 +1,870 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR	0x1000
+#define HCR_ENABLE_HDCP		(1 << 5)
+#define HCR_ENABLE_AUDIO	(1 << 2)
+#define HCR_ENABLE_PIXEL	(1 << 1)
+#define HCR_ENABLE_TMDS		(1 << 0)
+
+#define HDMI_HICR	0x1004
+#define HDMI_HSR	0x1008
+#define HDMI_HISR	0x100C
+#define HDMI_DETECT_HDP		(1 << 0)
+
+#define HDMI_VIDEO_REG	0x3000
+#define HDMI_UNIT_EN		(1 << 7)
+#define HDMI_MODE_OUTPUT	(1 << 0)
+#define HDMI_HBLANK_A	0x3100
+
+#define HDMI_AUDIO_CTRL	0x4000
+#define HDMI_ENABLE_AUDIO	(1 << 0)
+
+#define PCH_HTOTAL_B	0x3100
+#define PCH_HBLANK_B	0x3104
+#define PCH_HSYNC_B	0x3108
+#define PCH_VTOTAL_B	0x310C
+#define PCH_VBLANK_B	0x3110
+#define PCH_VSYNC_B	0x3114
+#define PCH_PIPEBSRC	0x311C
+
+#define PCH_PIPEB_DSL	0x3800
+#define PCH_PIPEB_SLC	0x3804
+#define PCH_PIPEBCONF	0x3808
+#define PCH_PIPEBSTAT	0x3824
+
+#define CDVO_DFT	0x5000
+#define CDVO_SLEWRATE	0x5004
+#define CDVO_STRENGTH	0x5008
+#define CDVO_RCOMP	0x500C
+
+#define DPLL_CTRL       0x6000
+#define DPLL_PDIV_SHIFT		16
+#define DPLL_PDIV_MASK		(0xf << 16)
+#define DPLL_PWRDN		(1 << 4)
+#define DPLL_RESET		(1 << 3)
+#define DPLL_FASTEN		(1 << 2)
+#define DPLL_ENSTAT		(1 << 1)
+#define DPLL_DITHEN		(1 << 0)
+
+#define DPLL_DIV_CTRL   0x6004
+#define DPLL_CLKF_MASK		0xffffffc0
+#define DPLL_CLKR_MASK		(0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP		(1 << 31)
+#define DPLL_SEL_HDMI		(1 << 8)
+#define DPLL_EN_HDMI		(1 << 1)
+#define DPLL_EN_VGA		(1 << 0)
+
+#define DPLL_ADJUST     0x600C
+#define DPLL_STATUS     0x6010
+#define DPLL_UPDATE     0x6014
+#define DPLL_DFT        0x6020
+
+struct intel_range {
+	int	min, max;
+};
+
+struct oaktrail_hdmi_limit {
+	struct intel_range vco, np, nr, nf;
+};
+
+struct oaktrail_hdmi_clock {
+	int np;
+	int nr;
+	int nf;
+	int dot;
+};
+
+#define VCO_MIN		320000
+#define VCO_MAX		1650000
+#define	NP_MIN		1
+#define	NP_MAX		15
+#define	NR_MIN		1
+#define	NR_MAX		64
+#define NF_MIN		2
+#define NF_MAX		4095
+
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
+	.vco = { .min = VCO_MIN,		.max = VCO_MAX },
+	.np  = { .min = NP_MIN,			.max = NP_MAX  },
+	.nr  = { .min = NR_MIN,			.max = NR_MAX  },
+	.nf  = { .min = NF_MIN,			.max = NF_MAX  },
+};
+
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+	HDMI_WRITE(HDMI_HCR, 0x67);
+	HDMI_READ(HDMI_HCR);
+
+	HDMI_WRITE(0x51a8, 0x10);
+	HDMI_READ(0x51a8);
+
+	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+	HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+	HDMI_WRITE(0x51a8, 0x0);
+	HDMI_READ(0x51a8);
+
+	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+	HDMI_READ(HDMI_AUDIO_CTRL);
+
+	HDMI_WRITE(HDMI_HCR, 0x47);
+	HDMI_READ(HDMI_HCR);
+}
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+	mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+	u32 htotal, new_crtc_htotal;
+
+	htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+	/*
+	 * 1024 x 768  new_crtc_htotal = 0x1024;
+	 * 1280 x 1024 new_crtc_htotal = 0x0c34;
+	 */
+	new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+	DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+	return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+				int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+	int np_min, np_max, nr_min, nr_max;
+	int np, nr, nf;
+
+	np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+	np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+	if (np_min < oaktrail_hdmi_limit.np.min)
+		np_min = oaktrail_hdmi_limit.np.min;
+	if (np_max > oaktrail_hdmi_limit.np.max)
+		np_max = oaktrail_hdmi_limit.np.max;
+
+	nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+	nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+	if (nr_min < oaktrail_hdmi_limit.nr.min)
+		nr_min = oaktrail_hdmi_limit.nr.min;
+	if (nr_max > oaktrail_hdmi_limit.nr.max)
+		nr_max = oaktrail_hdmi_limit.nr.max;
+
+	np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+	nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+	nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+	DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+	/*
+	 * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+	 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+	 */
+	best_clock->np = np;
+	best_clock->nr = nr - 1;
+	best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+	u32 status = 0;
+	u32 loop_count = 0;
+
+	status = readl(scu_base + 0x04);
+	while (status & 1) {
+		udelay(1); /* scu processing time is in few u secods */
+		status = readl(scu_base + 0x04);
+		loop_count++;
+		/* break if scu doesn't reset busy bit after huge retry */
+		if (loop_count > 1000) {
+			DRM_DEBUG_KMS("SCU IPC timed out");
+			return;
+		}
+	}
+}
+
+/*
+ *	You don't want to know, you really really don't want to know....
+ *
+ *	This is magic. However it's safe magic because of the way the platform
+ *	works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+	void __iomem *base;
+	unsigned long scu_ipc_mmio = 0xff11c000UL;
+	int scu_len = 1024;
+
+	base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+	if (base == NULL) {
+		DRM_ERROR("failed to map scu mmio\n");
+		return;
+	}
+
+	/* scu ipc: assert hdmi controller reset */
+	writel(0xff11d118, base + 0x0c);
+	writel(0x7fffffdf, base + 0x80);
+	writel(0x42005, base + 0x0);
+	scu_busy_loop(base);
+
+	/* scu ipc: de-assert hdmi controller reset */
+	writel(0xff11d118, base + 0x0c);
+	writel(0x7fffffff, base + 0x80);
+	writel(0x42005, base + 0x0);
+	scu_busy_loop(base);
+
+	iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode,
+			    int x, int y,
+			    struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	int pipe = 1;
+	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+	int refclk;
+	struct oaktrail_hdmi_clock clock;
+	u32 dspcntr, pipeconf, dpll, temp;
+	int dspcntr_reg = DSPBCNTR;
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	/* Disable the VGA plane that we never use */
+	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+	/* Disable dpll if necessary */
+	dpll = REG_READ(DPLL_CTRL);
+	if ((dpll & DPLL_PWRDN) == 0) {
+		REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+		REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+		REG_WRITE(DPLL_STATUS, 0x1);
+	}
+	udelay(150);
+
+	/* Reset controller */
+	oaktrail_hdmi_reset(dev);
+
+	/* program and enable dpll */
+	refclk = 25000;
+	oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+	/* Set the DPLL */
+	dpll = REG_READ(DPLL_CTRL);
+	dpll &= ~DPLL_PDIV_MASK;
+	dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+	REG_WRITE(DPLL_CTRL, 0x00000008);
+	REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+	REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+	REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+	REG_WRITE(DPLL_UPDATE, 0x80000000);
+	REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+	udelay(150);
+
+	/* configure HDMI */
+	HDMI_WRITE(0x1004, 0x1fd);
+	HDMI_WRITE(0x2000, 0x1);
+	HDMI_WRITE(0x2008, 0x0);
+	HDMI_WRITE(0x3130, 0x8);
+	HDMI_WRITE(0x101c, 0x1800810);
+
+	temp = htotal_calculate(adjusted_mode);
+	REG_WRITE(htot_reg, temp);
+	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+	REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+	REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+	temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+	HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+
+	REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(dsppos_reg, 0);
+
+	/* Flush the plane changes */
+	{
+		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+	}
+
+	/* Set up the display plane register */
+	dspcntr = REG_READ(dspcntr_reg);
+	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+	dspcntr |= DISPPLANE_SEL_PIPE_B;
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+
+	/* setup pipeconf */
+	pipeconf = REG_READ(pipeconf_reg);
+	pipeconf |= PIPEACONF_ENABLE;
+
+	REG_WRITE(pipeconf_reg, pipeconf);
+	REG_READ(pipeconf_reg);
+
+	REG_WRITE(PCH_PIPEBCONF, pipeconf);
+	REG_READ(PCH_PIPEBCONF);
+	wait_for_vblank(dev);
+
+	REG_WRITE(dspcntr_reg, dspcntr);
+	wait_for_vblank(dev);
+
+	gma_power_end(dev);
+
+	return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	u32 temp;
+
+	DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_OFF:
+		REG_WRITE(VGACNTRL, 0x80000000);
+
+		/* Disable plane */
+		temp = REG_READ(DSPBCNTR);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+			REG_READ(DSPBCNTR);
+			/* Flush the plane changes */
+			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+			REG_READ(DSPBSURF);
+		}
+
+		/* Disable pipe B */
+		temp = REG_READ(PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+			REG_READ(PIPEBCONF);
+		}
+
+		/* Disable LNW Pipes, etc */
+		temp = REG_READ(PCH_PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+			REG_READ(PCH_PIPEBCONF);
+		}
+
+		/* wait for pipe off */
+		udelay(150);
+
+		/* Disable dpll */
+		temp = REG_READ(DPLL_CTRL);
+		if ((temp & DPLL_PWRDN) == 0) {
+			REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+			REG_WRITE(DPLL_STATUS, 0x1);
+		}
+
+		/* wait for dpll off */
+		udelay(150);
+
+		break;
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable dpll */
+		temp = REG_READ(DPLL_CTRL);
+		if ((temp & DPLL_PWRDN) != 0) {
+			REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+			temp = REG_READ(DPLL_CLK_ENABLE);
+			REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+			REG_READ(DPLL_CLK_ENABLE);
+		}
+		/* wait for dpll warm up */
+		udelay(150);
+
+		/* Enable pipe B */
+		temp = REG_READ(PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+			REG_READ(PIPEBCONF);
+		}
+
+		/* Enable LNW Pipe B */
+		temp = REG_READ(PCH_PIPEBCONF);
+		if ((temp & PIPEACONF_ENABLE) == 0) {
+			REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+			REG_READ(PCH_PIPEBCONF);
+		}
+
+		wait_for_vblank(dev);
+
+		/* Enable plane */
+		temp = REG_READ(DSPBCNTR);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+			REG_READ(DSPBSURF);
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+	}
+
+	/* DSPARB */
+	REG_WRITE(DSPARB, 0x00003fbf);
+
+	/* FW1 */
+	REG_WRITE(0x70034, 0x3f880a0a);
+
+	/* FW2 */
+	REG_WRITE(0x70038, 0x0b060808);
+
+	/* FW4 */
+	REG_WRITE(0x70050, 0x08030404);
+
+	/* FW5 */
+	REG_WRITE(0x70054, 0x04040404);
+
+	/* LNC Chicken Bits - Squawk! */
+	REG_WRITE(0x70400, 0x4000);
+
+	return;
+}
+
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+	static int dpms_mode = -1;
+
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	u32 temp;
+
+	if (dpms_mode == mode)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON)
+		temp = 0x0;
+	else
+		temp = 0x99;
+
+	dpms_mode = mode;
+	HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+	if (mode->clock < 20000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	return MODE_OK;
+}
+
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static enum drm_connector_status
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
+{
+	enum drm_connector_status status;
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	u32 temp;
+
+	temp = HDMI_READ(HDMI_HSR);
+	DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+	if ((temp & HDMI_DETECT_HDP) != 0)
+		status = connector_status_connected;
+	else
+		status = connector_status_disconnected;
+
+	return status;
+}
+
+static const unsigned char raw_edid[] = {
+	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+	0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+	0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+	0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+	0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+	0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+	0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+	0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+	0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+	0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
+{
+	struct i2c_adapter *i2c_adap;
+	struct edid *edid;
+	int ret = 0;
+
+	/*
+	 *	FIXME: We need to figure this lot out. In theory we can
+	 *	read the EDID somehow but I've yet to find working reference
+	 *	code.
+	 */
+	i2c_adap = i2c_get_adapter(3);
+	if (i2c_adap == NULL) {
+		DRM_ERROR("No ddc adapter available!\n");
+		edid = (struct edid *)raw_edid;
+	} else {
+		edid = (struct edid *)raw_edid;
+		/* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+	}
+
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+	}
+	return ret;
+}
+
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+
+	oaktrail_hdmi_audio_enable(dev);
+	return;
+}
+
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
+{
+	return;
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+	.dpms = oaktrail_hdmi_dpms,
+	.mode_fixup = oaktrail_hdmi_mode_fixup,
+	.prepare = psb_intel_encoder_prepare,
+	.mode_set = oaktrail_hdmi_mode_set,
+	.commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+					oaktrail_hdmi_connector_helper_funcs = {
+	.get_modes = oaktrail_hdmi_get_modes,
+	.mode_valid = oaktrail_hdmi_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = oaktrail_hdmi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = oaktrail_hdmi_destroy,
+};
+
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+	.destroy = oaktrail_hdmi_enc_destroy,
+};
+
+void oaktrail_hdmi_init(struct drm_device *dev,
+					struct psb_intel_mode_device *mode_dev)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+
+	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+	if (!psb_intel_connector)
+		goto failed_connector;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+	drm_connector_init(dev, connector,
+			   &oaktrail_hdmi_connector_funcs,
+			   DRM_MODE_CONNECTOR_DVID);
+
+	drm_encoder_init(dev, encoder,
+			 &oaktrail_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+
+	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+	drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+	drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
+
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+	drm_sysfs_connector_add(connector);
+	dev_info(dev->dev, "HDMI initialised.\n");
+
+	return;
+
+failed_connector:
+	kfree(psb_intel_encoder);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+	{ 0 }
+};
+
+void oaktrail_hdmi_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev;
+	struct oaktrail_hdmi_dev *hdmi_dev;
+	int ret;
+
+	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+	if (!pdev)
+		return;
+
+	hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
+	if (!hdmi_dev) {
+		dev_err(dev->dev, "failed to allocate memory\n");
+		goto out;
+	}
+
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable hdmi controller\n");
+		goto free;
+	}
+
+	hdmi_dev->mmio = pci_resource_start(pdev, 0);
+	hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+	hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+	if (!hdmi_dev->regs) {
+		dev_err(dev->dev, "failed to map hdmi mmio\n");
+		goto free;
+	}
+
+	hdmi_dev->dev = pdev;
+	pci_set_drvdata(pdev, hdmi_dev);
+
+	/* Initialize i2c controller */
+	ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
+	if (ret)
+		dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+	dev_priv->hdmi_priv = hdmi_dev;
+	oaktrail_hdmi_audio_disable(dev);
+
+	dev_info(dev->dev, "HDMI hardware present.\n");
+
+	return;
+
+free:
+	kfree(hdmi_dev);
+out:
+	return;
+}
+
+void oaktrail_hdmi_teardown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	struct pci_dev *pdev;
+
+	if (hdmi_dev) {
+		pdev = hdmi_dev->dev;
+		pci_set_drvdata(pdev, NULL);
+		oaktrail_hdmi_i2c_exit(pdev);
+		iounmap(hdmi_dev->regs);
+		kfree(hdmi_dev);
+		pci_dev_put(pdev);
+	}
+}
+
+/* save HDMI register state */
+void oaktrail_hdmi_save(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	struct psb_state *regs = &dev_priv->regs.psb;
+	struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
+	int i;
+
+	/* dpll */
+	hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+	hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+	hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+	hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+	hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+	/* pipe B */
+	pipeb->conf = PSB_RVDC32(PIPEBCONF);
+	pipeb->src = PSB_RVDC32(PIPEBSRC);
+	pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+	pipeb->hblank = PSB_RVDC32(HBLANK_B);
+	pipeb->hsync = PSB_RVDC32(HSYNC_B);
+	pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+	pipeb->vblank = PSB_RVDC32(VBLANK_B);
+	pipeb->vsync = PSB_RVDC32(VSYNC_B);
+
+	hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+	hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+	hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+	hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+	hdmi_dev->savePCH_HSYNC_B  = PSB_RVDC32(PCH_HSYNC_B);
+	hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+	hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+	hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
+
+	/* plane */
+	pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+	pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+	pipeb->addr = PSB_RVDC32(DSPBBASE);
+	pipeb->surf = PSB_RVDC32(DSPBSURF);
+	pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+	pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
+
+	/* cursor B */
+	regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+	regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+	regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+	/* save palette */
+	for (i = 0; i < 256; i++)
+		pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void oaktrail_hdmi_restore(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+	struct psb_state *regs = &dev_priv->regs.psb;
+	struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
+	int i;
+
+	/* dpll */
+	PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+	PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+	PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+	PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+	PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+	DRM_UDELAY(150);
+
+	/* pipe */
+	PSB_WVDC32(pipeb->src, PIPEBSRC);
+	PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+	PSB_WVDC32(pipeb->hblank, HBLANK_B);
+	PSB_WVDC32(pipeb->hsync,  HSYNC_B);
+	PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+	PSB_WVDC32(pipeb->vblank, VBLANK_B);
+	PSB_WVDC32(pipeb->vsync,  VSYNC_B);
+
+	PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+	PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+	PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+	PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B,  PCH_HSYNC_B);
+	PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+	PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+	PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
+
+	PSB_WVDC32(pipeb->conf, PIPEBCONF);
+	PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+	/* plane */
+	PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+	PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+	PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+	PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+	PSB_WVDC32(pipeb->surf, DSPBSURF);
+
+	/* cursor B */
+	PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
+	PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS);
+	PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE);
+
+	/* restore palette */
+	for (i = 0; i < 256; i++)
+		PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
new file mode 100644
index 0000000..1eb86c7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR	0x1000
+#define HCR_DETECT_HDP		(1 << 6)
+#define HCR_ENABLE_HDCP		(1 << 5)
+#define HCR_ENABLE_AUDIO	(1 << 2)
+#define HCR_ENABLE_PIXEL	(1 << 1)
+#define HCR_ENABLE_TMDS		(1 << 0)
+#define HDMI_HICR	0x1004
+#define HDMI_INTR_I2C_ERROR	(1 << 4)
+#define HDMI_INTR_I2C_FULL	(1 << 3)
+#define HDMI_INTR_I2C_DONE	(1 << 2)
+#define HDMI_INTR_HPD		(1 << 0)
+#define HDMI_HSR	0x1008
+#define HDMI_HISR	0x100C
+#define HDMI_HI2CRDB0	0x1200
+#define HDMI_HI2CHCR	0x1240
+#define HI2C_HDCP_WRITE		(0 << 2)
+#define HI2C_HDCP_RI_READ	(1 << 2)
+#define HI2C_HDCP_READ		(2 << 2)
+#define HI2C_EDID_READ		(3 << 2)
+#define HI2C_READ_CONTINUE	(1 << 1)
+#define HI2C_ENABLE_TRANSACTION	(1 << 0)
+
+#define HDMI_ICRH	0x1100
+#define HDMI_HI2CTDR0	0x1244
+#define HDMI_HI2CTDR1	0x1248
+
+#define I2C_STAT_INIT		0
+#define I2C_READ_DONE		1
+#define I2C_TRANSACTION_DONE	2
+
+struct hdmi_i2c_dev {
+	struct i2c_adapter *adap;
+	struct mutex i2c_lock;
+	struct completion complete;
+	int status;
+	struct i2c_msg *msg;
+	int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+	u32 temp;
+
+	temp = HDMI_READ(HDMI_HICR);
+	temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+	HDMI_WRITE(HDMI_HICR, temp);
+	HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+	HDMI_WRITE(HDMI_HICR, 0x0);
+	HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+	struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+	u32 temp;
+
+	i2c_dev->status = I2C_STAT_INIT;
+	i2c_dev->msg = pmsg;
+	i2c_dev->buf_offset = 0;
+	INIT_COMPLETION(i2c_dev->complete);
+
+	/* Enable I2C transaction */
+	temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+	HDMI_WRITE(HDMI_HI2CHCR, temp);
+	HDMI_READ(HDMI_HI2CHCR);
+
+	while (i2c_dev->status != I2C_TRANSACTION_DONE)
+		wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+								10 * HZ);
+
+	return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+	/*
+	 * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+	 */
+	return 0;
+}
+
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
+				struct i2c_msg *pmsg,
+				int num)
+{
+	struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+	int i;
+
+	mutex_lock(&i2c_dev->i2c_lock);
+
+	/* Enable i2c unit */
+	HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+	/* Enable irq */
+	hdmi_i2c_irq_enable(hdmi_dev);
+	for (i = 0; i < num; i++) {
+		if (pmsg->len && pmsg->buf) {
+			if (pmsg->flags & I2C_M_RD)
+				xfer_read(adap, pmsg);
+			else
+				xfer_write(adap, pmsg);
+		}
+		pmsg++;         /* next message */
+	}
+
+	/* Disable irq */
+	hdmi_i2c_irq_disable(hdmi_dev);
+
+	mutex_unlock(&i2c_dev->i2c_lock);
+
+	return i;
+}
+
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+	.master_xfer	= oaktrail_hdmi_i2c_access,
+	.functionality  = oaktrail_hdmi_i2c_func,
+};
+
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+	.name		= "oaktrail_hdmi_i2c",
+	.nr		= 3,
+	.owner		= THIS_MODULE,
+	.class		= I2C_CLASS_DDC,
+	.algo		= &oaktrail_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+	struct i2c_msg *msg = i2c_dev->msg;
+	u8 *buf = msg->buf;
+	u32 temp;
+	int i, offset;
+
+	offset = i2c_dev->buf_offset;
+	for (i = 0; i < 0x10; i++) {
+		temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+		memcpy(buf + (offset + i * 4), &temp, 4);
+	}
+	i2c_dev->buf_offset += (0x10 * 4);
+
+	/* clearing read buffer full intr */
+	temp = HDMI_READ(HDMI_HISR);
+	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+	HDMI_READ(HDMI_HISR);
+
+	/* continue read transaction */
+	temp = HDMI_READ(HDMI_HI2CHCR);
+	HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+	HDMI_READ(HDMI_HI2CHCR);
+
+	i2c_dev->status = I2C_READ_DONE;
+	return;
+}
+
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
+{
+	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+	u32 temp;
+
+	/* clear transaction done intr */
+	temp = HDMI_READ(HDMI_HISR);
+	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+	HDMI_READ(HDMI_HISR);
+
+
+	temp = HDMI_READ(HDMI_HI2CHCR);
+	HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+	HDMI_READ(HDMI_HI2CHCR);
+
+	i2c_dev->status = I2C_TRANSACTION_DONE;
+	return;
+}
+
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
+{
+	struct oaktrail_hdmi_dev *hdmi_dev = dev;
+	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+	u32 stat;
+
+	stat = HDMI_READ(HDMI_HISR);
+
+	if (stat & HDMI_INTR_HPD) {
+		HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+		HDMI_READ(HDMI_HISR);
+	}
+
+	if (stat & HDMI_INTR_I2C_FULL)
+		hdmi_i2c_read(hdmi_dev);
+
+	if (stat & HDMI_INTR_I2C_DONE)
+		hdmi_i2c_transaction_done(hdmi_dev);
+
+	complete(&i2c_dev->complete);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void oaktrail_hdmi_i2c_gpio_fix(void)
+{
+	void __iomem *base;
+	unsigned int gpio_base = 0xff12c000;
+	int gpio_len = 0x1000;
+	u32 temp;
+
+	base = ioremap((resource_size_t)gpio_base, gpio_len);
+	if (base == NULL) {
+		DRM_ERROR("gpio ioremap fail\n");
+		return;
+	}
+
+	temp = readl(base + 0x44);
+	DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+	writel((temp | 0x00000a00), (base +  0x44));
+	temp = readl(base + 0x44);
+	DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+	iounmap(base);
+}
+
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+{
+	struct oaktrail_hdmi_dev *hdmi_dev;
+	struct hdmi_i2c_dev *i2c_dev;
+	int ret;
+
+	hdmi_dev = pci_get_drvdata(dev);
+
+	i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+	if (i2c_dev == NULL) {
+		DRM_ERROR("Can't allocate interface\n");
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+	i2c_dev->status = I2C_STAT_INIT;
+	init_completion(&i2c_dev->complete);
+	mutex_init(&i2c_dev->i2c_lock);
+	i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
+	hdmi_dev->i2c_dev = i2c_dev;
+
+	/* Enable HDMI I2C function on gpio */
+	oaktrail_hdmi_i2c_gpio_fix();
+
+	/* request irq */
+	ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+			  oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+	if (ret) {
+		DRM_ERROR("Failed to request IRQ for I2C controller\n");
+		goto err;
+	}
+
+	/* Adapter registration */
+	ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+	return ret;
+
+err:
+	kfree(i2c_dev);
+exit:
+	return ret;
+}
+
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
+{
+	struct oaktrail_hdmi_dev *hdmi_dev;
+	struct hdmi_i2c_dev *i2c_dev;
+
+	hdmi_dev = pci_get_drvdata(dev);
+	i2c_del_adapter(&oaktrail_hdmi_i2c_adapter);
+
+	i2c_dev = hdmi_dev->i2c_dev;
+	kfree(i2c_dev);
+	free_irq(dev->irq, hdmi_dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/oaktrail_lvds.c b/linux-imx/drivers/gpu/drm/gma500/oaktrail_lvds.c
new file mode 100644
index 0000000..325013a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -0,0 +1,448 @@
+/*
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Dave Airlie <airlied@linux.ie>
+ *	Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <asm/mrst.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/* The max/min PWM frequency in BPCR[31:17] - */
+/* The smallest number is 1 (not 0) that can fit in the
+ * 15-bit field of the and then*/
+/* shifts to the left by one bit to get the actual 16-bit
+ * value that the 15-bits correspond to.*/
+#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+#define BRIGHTNESS_MAX_LEVEL 100
+
+/**
+ * Sets the power state for the panel.
+ */
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+				struct psb_intel_encoder *psb_intel_encoder,
+				bool on)
+{
+	u32 pp_status;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	if (on) {
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+			  POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+		dev_priv->is_lvds_on = true;
+		if (dev_priv->ops->lvds_bl_power)
+			dev_priv->ops->lvds_bl_power(dev, true);
+	} else {
+		if (dev_priv->ops->lvds_bl_power)
+			dev_priv->ops->lvds_bl_power(dev, false);
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+			  ~POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while (pp_status & PP_ON);
+		dev_priv->is_lvds_on = false;
+		pm_request_idle(&dev->pdev->dev);
+	}
+	gma_power_end(dev);
+}
+
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+						to_psb_intel_encoder(encoder);
+
+	if (mode == DRM_MODE_DPMS_ON)
+		oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+	else
+		oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+
+	/* XXX: We never power down the LVDS pairs. */
+}
+
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector = NULL;
+	struct drm_crtc *crtc = encoder->crtc;
+	u32 lvds_port;
+	uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	/*
+	 * The LVDS pin pair will already have been turned on in the
+	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+	 * settings.
+	 */
+	lvds_port = (REG_READ(LVDS) &
+		    (~LVDS_PIPEB_SELECT)) |
+		    LVDS_PORT_EN |
+		    LVDS_BORDER_EN;
+
+	/* If the firmware says dither on Moorestown, or the BIOS does
+	   on Oaktrail then enable dithering */
+	if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+		lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+
+	REG_WRITE(LVDS, lvds_port);
+
+	/* Find the connector we're trying to set up */
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		if (!connector->encoder || connector->encoder->crtc != crtc)
+			continue;
+	}
+
+	if (!connector) {
+		DRM_ERROR("Couldn't find connector when setting mode");
+		return;
+	}
+
+	drm_object_property_get_value(
+		&connector->base,
+		dev->mode_config.scaling_mode_property,
+		&v);
+
+	if (v == DRM_MODE_SCALE_NO_SCALE)
+		REG_WRITE(PFIT_CONTROL, 0);
+	else if (v == DRM_MODE_SCALE_ASPECT) {
+		if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+		    (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+			if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+			    (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+			else if ((adjusted_mode->crtc_hdisplay *
+				mode->vdisplay) > (mode->hdisplay *
+				adjusted_mode->crtc_vdisplay))
+				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+					  PFIT_SCALING_MODE_PILLARBOX);
+			else
+				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+					  PFIT_SCALING_MODE_LETTERBOX);
+		} else
+			REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+	} else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+		REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+
+	gma_power_end(dev);
+}
+
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder =
+						to_psb_intel_encoder(encoder);
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+					  BACKLIGHT_DUTY_CYCLE_MASK);
+	oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+	gma_power_end(dev);
+}
+
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 ret;
+
+	if (gma_power_begin(dev, false)) {
+		ret = ((REG_READ(BLC_PWM_CTL) &
+			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+		gma_power_end(dev);
+	} else
+		ret = ((dev_priv->regs.saveBLC_PWM_CTL &
+			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+	return ret;
+}
+
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder =
+						to_psb_intel_encoder(encoder);
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (mode_dev->backlight_duty_cycle == 0)
+		mode_dev->backlight_duty_cycle =
+					oaktrail_lvds_get_max_backlight(dev);
+	oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+}
+
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+	.dpms = oaktrail_lvds_dpms,
+	.mode_fixup = psb_intel_lvds_mode_fixup,
+	.prepare = oaktrail_lvds_prepare,
+	.mode_set = oaktrail_lvds_mode_set,
+	.commit = oaktrail_lvds_commit,
+};
+
+static struct drm_display_mode lvds_configuration_modes[] = {
+	/* hard coded fixed mode for TPO LTPS LPJ040K001A */
+	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+		   846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+	/* hard coded fixed mode for LVDS 800x480 */
+	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+		   802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+		   1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600@75 */
+	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+		   1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+	/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+		   1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+	/* hard coded fixed mode for LVDS 1024x768 */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+	/* hard coded fixed mode for LVDS 1366x768 */
+	{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+		   1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+};
+
+/* Returns the panel fixed mode from configuration. */
+
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+					struct psb_intel_mode_device *mode_dev)
+{
+	struct drm_display_mode *mode = NULL;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+
+	mode_dev->panel_fixed_mode = NULL;
+
+	/* Use the firmware provided data on Moorestown */
+	if (dev_priv->has_gct) {
+		mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+		if (!mode)
+			return;
+
+		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+		mode->hsync_start = mode->hdisplay + \
+				((ti->hsync_offset_hi << 8) | \
+				ti->hsync_offset_lo);
+		mode->hsync_end = mode->hsync_start + \
+				((ti->hsync_pulse_width_hi << 8) | \
+				ti->hsync_pulse_width_lo);
+		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+							ti->hblank_lo);
+		mode->vsync_start = \
+			mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+						ti->vsync_offset_lo);
+		mode->vsync_end = \
+			mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+						ti->vsync_pulse_width_lo);
+		mode->vtotal = mode->vdisplay + \
+				((ti->vblank_hi << 8) | ti->vblank_lo);
+		mode->clock = ti->pixel_clock * 10;
+#if 0
+		printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+		printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+		printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+		printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+		printk(KERN_INFO "htotal is %d\n", mode->htotal);
+		printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+		printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+		printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+		printk(KERN_INFO "clock is %d\n", mode->clock);
+#endif
+		mode_dev->panel_fixed_mode = mode;
+	}
+
+	/* Use the BIOS VBT mode if available */
+	if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+		mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+						mode_dev->vbt_mode);
+
+	/* Then try the LVDS VBT mode */
+	if (mode_dev->panel_fixed_mode == NULL)
+		if (dev_priv->lfp_lvds_vbt_mode)
+			mode_dev->panel_fixed_mode =
+				drm_mode_duplicate(dev,
+					dev_priv->lfp_lvds_vbt_mode);
+	/* Then guess */
+	if (mode_dev->panel_fixed_mode == NULL)
+		mode_dev->panel_fixed_mode
+			= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+	drm_mode_set_name(mode_dev->panel_fixed_mode);
+	drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
+}
+
+/**
+ * oaktrail_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void oaktrail_lvds_init(struct drm_device *dev,
+		    struct psb_intel_mode_device *mode_dev)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct edid *edid;
+	struct i2c_adapter *i2c_adap;
+	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
+
+	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder)
+		return;
+
+	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+	if (!psb_intel_connector)
+		goto failed_connector;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+	dev_priv->is_lvds_on = true;
+	drm_connector_init(dev, connector,
+			   &psb_intel_lvds_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+
+	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+	drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
+	drm_connector_helper_add(connector,
+				 &psb_intel_lvds_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_object_attach_property(&connector->base,
+					dev->mode_config.scaling_mode_property,
+					DRM_MODE_SCALE_FULLSCREEN);
+	drm_object_attach_property(&connector->base,
+					dev_priv->backlight_property,
+					BRIGHTNESS_MAX_LEVEL);
+
+	mode_dev->panel_wants_dither = false;
+	if (dev_priv->has_gct)
+		mode_dev->panel_wants_dither = (dev_priv->gct_data.
+			Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+        if (dev_priv->lvds_dither)
+                mode_dev->panel_wants_dither = 1;
+
+	/*
+	 * LVDS discovery:
+	 * 1) check for EDID on DDC
+	 * 2) check for VBT data
+	 * 3) check to see if LVDS is already on
+	 *    if none of the above, no panel
+	 * 4) make sure lid is open
+	 *    if closed, act like it's not there for now
+	 */
+
+	i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+	if (i2c_adap == NULL)
+		dev_err(dev->dev, "No ddc adapter available!\n");
+	/*
+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+	 * preferred mode is the right one.
+	 */
+	if (i2c_adap) {
+		edid = drm_get_edid(connector, i2c_adap);
+		if (edid) {
+			drm_mode_connector_update_edid_property(connector,
+									edid);
+			drm_add_edid_modes(connector, edid);
+			kfree(edid);
+		}
+
+		list_for_each_entry(scan, &connector->probed_modes, head) {
+			if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+				mode_dev->panel_fixed_mode =
+				    drm_mode_duplicate(dev, scan);
+				goto out;	/* FIXME: check for quirks */
+			}
+		}
+	}
+	/*
+	 * If we didn't get EDID, try geting panel timing
+	 * from configuration data
+	 */
+	oaktrail_lvds_get_configuration_mode(dev, mode_dev);
+
+	if (mode_dev->panel_fixed_mode) {
+		mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+		goto out;	/* FIXME: check for quirks */
+	}
+
+	/* If we still don't have a mode after all that, give up. */
+	if (!mode_dev->panel_fixed_mode) {
+		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+		goto failed_find;
+	}
+
+out:
+	drm_sysfs_connector_add(connector);
+	return;
+
+failed_find:
+	dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+	if (psb_intel_encoder->ddc_bus)
+		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+
+/* failed_ddc: */
+
+	drm_encoder_cleanup(encoder);
+	drm_connector_cleanup(connector);
+	kfree(psb_intel_connector);
+failed_connector:
+	kfree(psb_intel_encoder);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/opregion.c b/linux-imx/drivers/gpu/drm/gma500/opregion.c
new file mode 100644
index 0000000..ad0d6de
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/opregion.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define   ACPI_CLID 0x01ac /* current lid state indicator */
+#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+
+struct opregion_header {
+	u8 signature[16];
+	u32 size;
+	u32 opregion_ver;
+	u8 bios_ver[32];
+	u8 vbios_ver[16];
+	u8 driver_ver[16];
+	u32 mboxes;
+	u8 reserved[164];
+} __packed;
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+	u32 drdy;	/* driver readiness */
+	u32 csts;	/* notification status */
+	u32 cevt;	/* current event */
+	u8 rsvd1[20];
+	u32 didl[8];	/* supported display devices ID list */
+	u32 cpdl[8];	/* currently presented display list */
+	u32 cadl[8];	/* currently active display list */
+	u32 nadl[8];	/* next active devices list */
+	u32 aslp;	/* ASL sleep time-out */
+	u32 tidx;	/* toggle table index */
+	u32 chpd;	/* current hotplug enable indicator */
+	u32 clid;	/* current lid state*/
+	u32 cdck;	/* current docking state */
+	u32 sxsw;	/* Sx state resume */
+	u32 evts;	/* ASL supported events */
+	u32 cnot;	/* current OS notification */
+	u32 nrdy;	/* driver status */
+	u8 rsvd2[60];
+} __packed;
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+	/*FIXME: add it later*/
+} __packed;
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+	u32 ardy;	/* driver readiness */
+	u32 aslc;	/* ASLE interrupt command */
+	u32 tche;	/* technology enabled indicator */
+	u32 alsi;	/* current ALS illuminance reading */
+	u32 bclp;	/* backlight brightness to set */
+	u32 pfit;	/* panel fitting state */
+	u32 cblv;	/* current brightness level */
+	u16 bclm[20];	/* backlight level duty cycle mapping table */
+	u32 cpfm;	/* current panel fitting mode */
+	u32 epfm;	/* enabled panel fitting modes */
+	u8 plut[74];	/* panel LUT and identifier */
+	u32 pfmb;	/* PWM freq and min brightness */
+	u8 rsvd[102];
+} __packed;
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM     (1 << 0)
+#define ASLE_SET_BACKLIGHT     (1 << 1)
+#define ASLE_SET_PFIT          (1 << 2)
+#define ASLE_SET_PWM_FREQ      (1 << 3)
+#define ASLE_REQ_MSK           0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED   (1<<10)
+#define ASLE_BACKLIGHT_FAILED   (1<<12)
+#define ASLE_PFIT_FAILED        (1<<14)
+#define ASLE_PWM_FREQ_FAILED    (1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED	(1<<10)
+#define ASLE_BACKLIGHT_FAILED	(1<<12)
+#define ASLE_PFIT_FAILED	(1<<14)
+#define ASLE_PWM_FREQ_FAILED	(1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+static struct psb_intel_opregion *system_opregion;
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	struct backlight_device *bd = dev_priv->backlight_device;
+
+	DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAILED;
+
+	if (bd == NULL)
+		return ASLE_BACKLIGHT_FAILED;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp > 255)
+		return ASLE_BACKLIGHT_FAILED;
+
+	if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+		int max = bd->props.max_brightness;
+		gma_backlight_set(dev, bclp * max / 255);
+	}
+
+	asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
+
+	return 0;
+}
+
+void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = asle->aslc & ASLE_REQ_MSK;
+	if (!asle_req) {
+		DRM_DEBUG_DRIVER("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+	asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN    (1<<0)
+#define ASLE_BLC_EN    (1<<1)
+#define ASLE_PFIT_EN   (1<<2)
+#define ASLE_PFMB_EN   (1<<3)
+
+void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+
+	if (asle && system_opregion ) {
+		/* Don't do this on Medfield or other non PC like devices, they
+		   use the bit for something different altogether */
+		psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
+		psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
+
+		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
+								| ASLE_PFMB_EN;
+		asle->ardy = 1;
+	}
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+
+static int psb_intel_opregion_video_event(struct notifier_block *nb,
+					  unsigned long val, void *data)
+{
+	/* The only video events relevant to opregion are 0x80. These indicate
+	   either a docking event, lid switch or display switch request. In
+	   Linux, these are handled by the dock, button and video drivers.
+	   We might want to fix the video driver to be opregion-aware in
+	   future, but right now we just indicate to the firmware that the
+	   request has been handled */
+
+	struct opregion_acpi *acpi;
+
+	if (!system_opregion)
+		return NOTIFY_DONE;
+
+	acpi = system_opregion->acpi;
+	acpi->csts = 0;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block psb_intel_opregion_notifier = {
+	.notifier_call = psb_intel_opregion_video_event,
+};
+
+void psb_intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		/* Notify BIOS we are ready to handle ACPI video ext notifs.
+		 * Right now, all the events are handled by the ACPI video
+		 * module. We don't actually need to do anything with them. */
+		opregion->acpi->csts = 0;
+		opregion->acpi->drdy = 1;
+
+		system_opregion = opregion;
+		register_acpi_notifier(&psb_intel_opregion_notifier);
+	}
+}
+
+void psb_intel_opregion_fini(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		opregion->acpi->drdy = 0;
+
+		system_opregion = NULL;
+		unregister_acpi_notifier(&psb_intel_opregion_notifier);
+	}
+
+	/* just clear all opregion memory pointers now */
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+	opregion->vbt = NULL;
+}
+
+int psb_intel_opregion_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_opregion *opregion = &dev_priv->opregion;
+	u32 opregion_phy, mboxes;
+	void __iomem *base;
+	int err = 0;
+
+	pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy);
+	if (opregion_phy == 0) {
+		DRM_DEBUG_DRIVER("ACPI Opregion not supported\n");
+		return -ENOTSUPP;
+	}
+	DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
+	base = acpi_os_ioremap(opregion_phy, 8*1024);
+	if (!base)
+		return -ENOMEM;
+
+	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	opregion->header = base;
+	opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+	opregion->lid_state = base + ACPI_CLID;
+
+	mboxes = opregion->header->mboxes;
+	if (mboxes & MBOX_ACPI) {
+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+	}
+
+	if (mboxes & MBOX_ASLE) {
+		DRM_DEBUG_DRIVER("ASLE supported\n");
+		opregion->asle = base + OPREGION_ASLE_OFFSET;
+	}
+
+	return 0;
+
+err_out:
+	iounmap(base);
+	return err;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/opregion.h b/linux-imx/drivers/gpu/drm/gma500/opregion.h
new file mode 100644
index 0000000..4a90f8b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/opregion.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if defined(CONFIG_ACPI)
+extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
+extern void psb_intel_opregion_init(struct drm_device *dev);
+extern void psb_intel_opregion_fini(struct drm_device *dev);
+extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
+
+#else
+
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
+}
+
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
+{
+	return 0;
+}
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/power.c b/linux-imx/drivers/gpu/drm/gma500/power.c
new file mode 100644
index 0000000..b6b135f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/power.c
@@ -0,0 +1,332 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+
+#include "power.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+
+static struct mutex power_mutex;	/* Serialize power ops */
+static spinlock_t power_ctrl_lock;	/* Serialize power claim */
+
+/**
+ *	gma_power_init		-	initialise power manager
+ *	@dev: our device
+ *
+ *	Set up for power management tracking of our hardware.
+ */
+void gma_power_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/* FIXME: Move APM/OSPM base into relevant device code */
+	dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+	dev_priv->ospm_base &= 0xffff;
+
+	dev_priv->display_power = true;	/* We start active */
+	dev_priv->display_count = 0;	/* Currently no users */
+	dev_priv->suspended = false;	/* And not suspended */
+	spin_lock_init(&power_ctrl_lock);
+	mutex_init(&power_mutex);
+
+	if (dev_priv->ops->init_pm)
+		dev_priv->ops->init_pm(dev);
+}
+
+/**
+ *	gma_power_uninit	-	end power manager
+ *	@dev: device to end for
+ *
+ *	Undo the effects of gma_power_init
+ */
+void gma_power_uninit(struct drm_device *dev)
+{
+	pm_runtime_disable(&dev->pdev->dev);
+	pm_runtime_set_suspended(&dev->pdev->dev);
+}
+
+/**
+ *	gma_suspend_display	-	suspend the display logic
+ *	@dev: our DRM device
+ *
+ *	Suspend the display logic of the graphics interface
+ */
+static void gma_suspend_display(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->suspended)
+		return;
+	dev_priv->ops->save_regs(dev);
+	dev_priv->ops->power_down(dev);
+	dev_priv->display_power = false;
+}
+
+/**
+ *	gma_resume_display	-	resume display side logic
+ *
+ *	Resume the display hardware restoring state and enabling
+ *	as necessary.
+ */
+static void gma_resume_display(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/* turn on the display power island */
+	dev_priv->ops->power_up(dev);
+	dev_priv->suspended = false;
+	dev_priv->display_power = true;
+
+	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+	pci_write_config_word(pdev, PSB_GMCH_CTRL,
+			dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+
+	psb_gtt_restore(dev); /* Rebuild our GTT mappings */
+	dev_priv->ops->restore_regs(dev);
+}
+
+/**
+ *	gma_suspend_pci		-	suspend PCI side
+ *	@pdev: PCI device
+ *
+ *	Perform the suspend processing on our PCI device state
+ */
+static void gma_suspend_pci(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int bsm, vbt;
+
+	if (dev_priv->suspended)
+		return;
+
+	pci_save_state(pdev);
+	pci_read_config_dword(pdev, 0x5C, &bsm);
+	dev_priv->regs.saveBSM = bsm;
+	pci_read_config_dword(pdev, 0xFC, &vbt);
+	dev_priv->regs.saveVBT = vbt;
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+	pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	dev_priv->suspended = true;
+}
+
+/**
+ *	gma_resume_pci		-	resume helper
+ *	@dev: our PCI device
+ *
+ *	Perform the resume processing on our PCI device state - rewrite
+ *	register state and re-enable the PCI device
+ */
+static bool gma_resume_pci(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!dev_priv->suspended)
+		return true;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
+	pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
+	/* restoring MSI address and data in PCIx space */
+	pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+	pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+	ret = pci_enable_device(pdev);
+
+	if (ret != 0)
+		dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+	else
+		dev_priv->suspended = false;
+	return !dev_priv->suspended;
+}
+
+/**
+ *	gma_power_suspend		-	bus callback for suspend
+ *	@pdev: our PCI device
+ *	@state: suspend type
+ *
+ *	Called back by the PCI layer during a suspend of the system. We
+ *	perform the necessary shut down steps and save enough state that
+ *	we can undo this when resume is called.
+ */
+int gma_power_suspend(struct device *_dev)
+{
+	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&power_mutex);
+	if (!dev_priv->suspended) {
+		if (dev_priv->display_count) {
+			mutex_unlock(&power_mutex);
+			dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
+			return -EBUSY;
+		}
+		psb_irq_uninstall(dev);
+		gma_suspend_display(dev);
+		gma_suspend_pci(pdev);
+	}
+	mutex_unlock(&power_mutex);
+	return 0;
+}
+
+/**
+ *	gma_power_resume		-	resume power
+ *	@pdev: PCI device
+ *
+ *	Resume the PCI side of the graphics and then the displays
+ */
+int gma_power_resume(struct device *_dev)
+{
+	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	mutex_lock(&power_mutex);
+	gma_resume_pci(pdev);
+	gma_resume_display(pdev);
+	psb_irq_preinstall(dev);
+	psb_irq_postinstall(dev);
+	mutex_unlock(&power_mutex);
+	return 0;
+}
+
+/**
+ *	gma_power_is_on		-	returne true if power is on
+ *	@dev: our DRM device
+ *
+ *	Returns true if the display island power is on at this moment
+ */
+bool gma_power_is_on(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	return dev_priv->display_power;
+}
+
+/**
+ *	gma_power_begin		-	begin requiring power
+ *	@dev: our DRM device
+ *	@force_on: true to force power on
+ *
+ *	Begin an action that requires the display power island is enabled.
+ *	We refcount the islands.
+ */
+bool gma_power_begin(struct drm_device *dev, bool force_on)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&power_ctrl_lock, flags);
+	/* Power already on ? */
+	if (dev_priv->display_power) {
+		dev_priv->display_count++;
+		pm_runtime_get(&dev->pdev->dev);
+		spin_unlock_irqrestore(&power_ctrl_lock, flags);
+		return true;
+	}
+	if (force_on == false)
+		goto out_false;
+
+	/* Ok power up needed */
+	ret = gma_resume_pci(dev->pdev);
+	if (ret == 0) {
+		psb_irq_preinstall(dev);
+		psb_irq_postinstall(dev);
+		pm_runtime_get(&dev->pdev->dev);
+		dev_priv->display_count++;
+		spin_unlock_irqrestore(&power_ctrl_lock, flags);
+		return true;
+	}
+out_false:
+	spin_unlock_irqrestore(&power_ctrl_lock, flags);
+	return false;
+}
+
+/**
+ *	gma_power_end		-	end use of power
+ *	@dev: Our DRM device
+ *
+ *	Indicate that one of our gma_power_begin() requested periods when
+ *	the diplay island power is needed has completed.
+ */
+void gma_power_end(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+	spin_lock_irqsave(&power_ctrl_lock, flags);
+	dev_priv->display_count--;
+	WARN_ON(dev_priv->display_count < 0);
+	spin_unlock_irqrestore(&power_ctrl_lock, flags);
+	pm_runtime_put(&dev->pdev->dev);
+}
+
+int psb_runtime_suspend(struct device *dev)
+{
+	return gma_power_suspend(dev);
+}
+
+int psb_runtime_resume(struct device *dev)
+{
+	return gma_power_resume(dev);
+}
+
+int psb_runtime_idle(struct device *dev)
+{
+	struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+	struct drm_psb_private *dev_priv = drmdev->dev_private;
+	if (dev_priv->display_count)
+		return 0;
+	else
+		return 1;
+}
+
+int gma_power_thaw(struct device *_dev)
+{
+	return gma_power_resume(_dev);
+}
+
+int gma_power_freeze(struct device *_dev)
+{
+	return gma_power_suspend(_dev);
+}
+
+int gma_power_restore(struct device *_dev)
+{
+	return gma_power_resume(_dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/power.h b/linux-imx/drivers/gpu/drm/gma500/power.h
new file mode 100644
index 0000000..56d8708
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/power.h
@@ -0,0 +1,70 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ * Massively reworked
+ *    Alan Cox <alan@linux.intel.com>
+ */
+#ifndef _PSB_POWERMGMT_H_
+#define _PSB_POWERMGMT_H_
+
+#include <linux/pci.h>
+#include <drm/drmP.h>
+
+void gma_power_init(struct drm_device *dev);
+void gma_power_uninit(struct drm_device *dev);
+
+/*
+ * The kernel bus power management  will call these functions
+ */
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
+int gma_power_thaw(struct device *dev);
+int gma_power_freeze(struct device *dev);
+int gma_power_restore(struct device *_dev);
+
+/*
+ * These are the functions the driver should use to wrap all hw access
+ * (i.e. register reads and writes)
+ */
+bool gma_power_begin(struct drm_device *dev, bool force);
+void gma_power_end(struct drm_device *dev);
+
+/*
+ * Use this function to do an instantaneous check for if the hw is on.
+ * Only use this in cases where you know the mutex is already held such
+ * as in irq install/uninstall and you need to
+ * prevent a deadlock situation.  Otherwise use gma_power_begin().
+ */
+bool gma_power_is_on(struct drm_device *dev);
+
+/*
+ * GFX-Runtime PM callbacks
+ */
+int psb_runtime_suspend(struct device *dev);
+int psb_runtime_resume(struct device *dev);
+int psb_runtime_idle(struct device *dev);
+
+#endif /*_PSB_POWERMGMT_H_*/
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_device.c b/linux-imx/drivers/gpu/drm/gma500/psb_device.c
new file mode 100644
index 0000000..f6f534b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_device.c
@@ -0,0 +1,396 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+	psb_intel_sdvo_init(dev, SDVOB);
+	return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ *	Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR    10
+#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+	/* return locally cached var instead of HW read (due to DPST etc.) */
+	/* FIXME: ideally return actual value in case firmware fiddled with
+	   it */
+	return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long core_clock;
+	/* u32 bl_max_freq; */
+	/* unsigned long value; */
+	u16 bl_max_freq;
+	uint32_t value;
+	uint32_t blc_pwm_precision_factor;
+
+	/* get bl_max_freq and pol from dev_priv*/
+	if (!dev_priv->lvds_bl) {
+		dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+		return -ENOENT;
+	}
+	bl_max_freq = dev_priv->lvds_bl->freq;
+	blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+	core_clock = dev_priv->core_freq;
+
+	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+	value *= blc_pwm_precision_factor;
+	value /= bl_max_freq;
+	value /= blc_pwm_precision_factor;
+
+	if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+		 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+				return -ERANGE;
+	else {
+		value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+		REG_WRITE(BLC_PWM_CTL,
+			(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+	}
+	return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(psb_backlight_device);
+	int level = bd->props.brightness;
+
+	/* Percentage 1-100% being valid */
+	if (level < 1)
+		level = 1;
+
+	psb_intel_lvds_set_brightness(dev, level);
+	psb_brightness = level;
+	return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+	.get_brightness = psb_get_brightness,
+	.update_status  = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int ret;
+	struct backlight_properties props;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 100;
+	props.type = BACKLIGHT_PLATFORM;
+
+	psb_backlight_device = backlight_device_register("psb-bl",
+					NULL, (void *)dev, &psb_ops, &props);
+	if (IS_ERR(psb_backlight_device))
+		return PTR_ERR(psb_backlight_device);
+
+	ret = psb_backlight_setup(dev);
+	if (ret < 0) {
+		backlight_device_unregister(psb_backlight_device);
+		psb_backlight_device = NULL;
+		return ret;
+	}
+	psb_backlight_device->props.brightness = 100;
+	psb_backlight_device->props.max_brightness = 100;
+	backlight_update_status(psb_backlight_device);
+	dev_priv->backlight_device = psb_backlight_device;
+
+	/* This must occur after the backlight is properly initialised */
+	psb_lid_timer_init(dev_priv);
+
+	return 0;
+}
+
+#endif
+
+/*
+ *	Provide the Poulsbo specific chip logic and low level methods
+ *	for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+	gating &= ~3;	/* Disable 2D clock gating */
+	gating |= 1;
+	PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+	PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ *	psb_save_display_registers	-	save registers lost on suspend
+ *	@dev: our DRM device
+ *
+ *	Save the state we need in order to be able to restore the interface
+ *	upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_state *regs = &dev_priv->regs.psb;
+
+	/* Display arbitration control + watermarks */
+	regs->saveDSPARB = PSB_RVDC32(DSPARB);
+	regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+	regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+	regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+	regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+	regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+	regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+	regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+	/* Save crtc and output state */
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (drm_helper_crtc_in_use(crtc))
+			crtc->funcs->save(crtc);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->funcs->save)
+			connector->funcs->save(connector);
+
+	drm_modeset_unlock_all(dev);
+	return 0;
+}
+
+/**
+ *	psb_restore_display_registers	-	restore lost register state
+ *	@dev: our DRM device
+ *
+ *	Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_state *regs = &dev_priv->regs.psb;
+
+	/* Display arbitration + watermarks */
+	PSB_WVDC32(regs->saveDSPARB, DSPARB);
+	PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
+	PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
+	PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
+	PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
+	PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
+	PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
+	PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
+
+	/*make sure VGA plane is off. it initializes to on after reset!*/
+	PSB_WVDC32(0x80000000, VGACNTRL);
+
+	drm_modeset_lock_all(dev);
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (drm_helper_crtc_in_use(crtc))
+			crtc->funcs->restore(crtc);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->funcs->restore)
+			connector->funcs->restore(connector);
+
+	drm_modeset_unlock_all(dev);
+	return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+	return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+	return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+	uint32_t clock;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+	/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+	pci_read_config_dword(pci_root, 0xD4, &clock);
+	pci_dev_put(pci_root);
+
+	switch (clock & 0x07) {
+	case 0:
+		dev_priv->core_freq = 100;
+		break;
+	case 1:
+		dev_priv->core_freq = 133;
+		break;
+	case 2:
+		dev_priv->core_freq = 150;
+		break;
+	case 3:
+		dev_priv->core_freq = 178;
+		break;
+	case 4:
+		dev_priv->core_freq = 200;
+		break;
+	case 5:
+	case 6:
+	case 7:
+		dev_priv->core_freq = 266;
+		break;
+	default:
+		dev_priv->core_freq = 0;
+	}
+}
+
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+	{
+		.fp0 = FPA0,
+		.fp1 = FPA1,
+		.cntr = DSPACNTR,
+		.conf = PIPEACONF,
+		.src = PIPEASRC,
+		.dpll = DPLL_A,
+		.htotal = HTOTAL_A,
+		.hblank = HBLANK_A,
+		.hsync = HSYNC_A,
+		.vtotal = VTOTAL_A,
+		.vblank = VBLANK_A,
+		.vsync = VSYNC_A,
+		.stride = DSPASTRIDE,
+		.size = DSPASIZE,
+		.pos = DSPAPOS,
+		.base = DSPABASE,
+		.surf = DSPASURF,
+		.addr = DSPABASE,
+		.status = PIPEASTAT,
+		.linoff = DSPALINOFF,
+		.tileoff = DSPATILEOFF,
+		.palette = PALETTE_A,
+	},
+	{
+		.fp0 = FPB0,
+		.fp1 = FPB1,
+		.cntr = DSPBCNTR,
+		.conf = PIPEBCONF,
+		.src = PIPEBSRC,
+		.dpll = DPLL_B,
+		.htotal = HTOTAL_B,
+		.hblank = HBLANK_B,
+		.hsync = HSYNC_B,
+		.vtotal = VTOTAL_B,
+		.vblank = VBLANK_B,
+		.vsync = VSYNC_B,
+		.stride = DSPBSTRIDE,
+		.size = DSPBSIZE,
+		.pos = DSPBPOS,
+		.base = DSPBBASE,
+		.surf = DSPBSURF,
+		.addr = DSPBBASE,
+		.status = PIPEBSTAT,
+		.linoff = DSPBLINOFF,
+		.tileoff = DSPBTILEOFF,
+		.palette = PALETTE_B,
+	}
+};
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	dev_priv->regmap = psb_regmap;
+	psb_get_core_freq(dev);
+	gma_intel_setup_gmbus(dev);
+	psb_intel_opregion_init(dev);
+	psb_intel_init_bios(dev);
+	return 0;
+}
+
+static void psb_chip_teardown(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	psb_lid_timer_takedown(dev_priv);
+	gma_intel_teardown_gmbus(dev);
+}
+
+const struct psb_ops psb_chip_ops = {
+	.name = "Poulsbo",
+	.accel_2d = 1,
+	.pipes = 2,
+	.crtcs = 2,
+	.hdmi_mask = (1 << 0),
+	.lvds_mask = (1 << 1),
+	.cursor_needs_phys = 1,
+	.sgx_offset = PSB_SGX_OFFSET,
+	.chip_setup = psb_chip_setup,
+	.chip_teardown = psb_chip_teardown,
+
+	.crtc_helper = &psb_intel_helper_funcs,
+	.crtc_funcs = &psb_intel_crtc_funcs,
+
+	.output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	.backlight_init = psb_backlight_init,
+#endif
+
+	.init_pm = psb_init_pm,
+	.save_regs = psb_save_display_registers,
+	.restore_regs = psb_restore_display_registers,
+	.power_down = psb_power_down,
+	.power_up = psb_power_up,
+};
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_drv.c b/linux-imx/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 0000000..bddea58
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,695 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+	{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+	{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+	/* Atom E620 */
+	{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_MEDFIELD)
+	{0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+	{0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+	{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+	{ 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_GMA_ADB	\
+		DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_GMA_MODE_OPERATION	\
+		DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+			 struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_GMA_STOLEN_MEMORY	\
+		DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+			 struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_GMA_GAMMA	\
+		DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+			 struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_GMA_DPST_BL	\
+		DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+			 uint32_t)
+#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID	\
+		DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+			 struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_GMA_GEM_CREATE	\
+		DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+			 struct drm_psb_gem_create)
+#define DRM_IOCTL_GMA_GEM_MMAP	\
+		DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+			 struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
+		      DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+		      DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
+					psb_intel_get_pipe_from_crtc_id, 0),
+	DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
+						DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
+						DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+	int ret;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_fbdev *fbdev = dev_priv->fbdev;
+
+	drm_modeset_lock_all(dev);
+	ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+	if (ret)
+		DRM_DEBUG("failed to restore crtc mode\n");
+	drm_modeset_unlock_all(dev);
+
+	return;
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_gtt *pg = &dev_priv->gtt;
+
+	uint32_t stolen_gtt;
+
+	int ret = -ENOMEM;
+
+	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+		dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+
+	stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+	stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	stolen_gtt =
+	    (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+	    (stolen_gtt << PAGE_SHIFT) * 1024;
+
+	spin_lock_init(&dev_priv->irqmask_lock);
+	spin_lock_init(&dev_priv->lock_2d);
+
+	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+	PSB_RSGX32(PSB_CR_BIF_BANK1);
+	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+							PSB_CR_BIF_CTRL);
+	psb_spank(dev_priv);
+
+	/* mmu_gatt ?? */
+	PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+	return 0;
+out_err:
+	return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	/* Kill vblank etc here */
+
+
+	if (dev_priv) {
+		if (dev_priv->backlight_device)
+			gma_backlight_exit(dev);
+		psb_modeset_cleanup(dev);
+
+		if (dev_priv->ops->chip_teardown)
+			dev_priv->ops->chip_teardown(dev);
+
+		psb_intel_opregion_fini(dev);
+
+		if (dev_priv->pf_pd) {
+			psb_mmu_free_pagedir(dev_priv->pf_pd);
+			dev_priv->pf_pd = NULL;
+		}
+		if (dev_priv->mmu) {
+			struct psb_gtt *pg = &dev_priv->gtt;
+
+			down_read(&pg->sem);
+			psb_mmu_remove_pfn_sequence(
+				psb_mmu_get_default_pd
+				(dev_priv->mmu),
+				pg->mmu_gatt_start,
+				dev_priv->vram_stolen_size >> PAGE_SHIFT);
+			up_read(&pg->sem);
+			psb_mmu_driver_takedown(dev_priv->mmu);
+			dev_priv->mmu = NULL;
+		}
+		psb_gtt_takedown(dev);
+		if (dev_priv->scratch_page) {
+			set_pages_wb(dev_priv->scratch_page, 1);
+			__free_page(dev_priv->scratch_page);
+			dev_priv->scratch_page = NULL;
+		}
+		if (dev_priv->vdc_reg) {
+			iounmap(dev_priv->vdc_reg);
+			dev_priv->vdc_reg = NULL;
+		}
+		if (dev_priv->sgx_reg) {
+			iounmap(dev_priv->sgx_reg);
+			dev_priv->sgx_reg = NULL;
+		}
+
+		/* Destroy VBT data */
+		psb_intel_destroy_bios(dev);
+
+		kfree(dev_priv);
+		dev->dev_private = NULL;
+	}
+	gma_power_uninit(dev);
+	return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	struct drm_psb_private *dev_priv;
+	unsigned long resource_start;
+	unsigned long irqflags;
+	int ret = -ENOMEM;
+	struct drm_connector *connector;
+	struct psb_intel_encoder *psb_intel_encoder;
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev_priv->ops = (struct psb_ops *)chipset;
+	dev_priv->dev = dev;
+	dev->dev_private = (void *) dev_priv;
+
+	pci_set_master(dev->pdev);
+
+	dev_priv->num_pipe = dev_priv->ops->pipes;
+
+	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+	dev_priv->vdc_reg =
+	    ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+	if (!dev_priv->vdc_reg)
+		goto out_err;
+
+	dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+							PSB_SGX_SIZE);
+	if (!dev_priv->sgx_reg)
+		goto out_err;
+
+	psb_intel_opregion_setup(dev);
+
+	ret = dev_priv->ops->chip_setup(dev);
+	if (ret)
+		goto out_err;
+
+	/* Init OSPM support */
+	gma_power_init(dev);
+
+	ret = -ENOMEM;
+
+	dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+	if (!dev_priv->scratch_page)
+		goto out_err;
+
+	set_pages_uc(dev_priv->scratch_page, 1);
+
+	ret = psb_gtt_init(dev, 0);
+	if (ret)
+		goto out_err;
+
+	dev_priv->mmu = psb_mmu_driver_init((void *)0,
+					drm_psb_trap_pagefaults, 0,
+					dev_priv);
+	if (!dev_priv->mmu)
+		goto out_err;
+
+	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+	if (!dev_priv->pf_pd)
+		goto out_err;
+
+	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+	psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+	ret = psb_do_init(dev);
+	if (ret)
+		return ret;
+
+	PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+	PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+	acpi_video_register();
+
+	ret = drm_vblank_init(dev, dev_priv->num_pipe);
+	if (ret)
+		goto out_err;
+
+	/*
+	 * Install interrupt handlers prior to powering off SGX or else we will
+	 * crash.
+	 */
+	dev_priv->vdc_irq_mask = 0;
+	dev_priv->pipestat[0] = 0;
+	dev_priv->pipestat[1] = 0;
+	dev_priv->pipestat[2] = 0;
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	drm_irq_install(dev);
+
+	dev->vblank_disable_allowed = 1;
+
+	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+	dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+	psb_modeset_init(dev);
+	psb_fbdev_init(dev);
+	drm_kms_helper_poll_init(dev);
+
+	/* Only add backlight support if we have LVDS output */
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+		case INTEL_OUTPUT_MIPI:
+			ret = gma_backlight_init(dev);
+			break;
+		}
+	}
+
+	if (ret)
+		return ret;
+	psb_intel_opregion_enable_asle(dev);
+#if 0
+	/*enable runtime pm at last*/
+	pm_runtime_enable(&dev->pdev->dev);
+	pm_runtime_set_active(&dev->pdev->dev);
+#endif
+	/*Intel drm driver load is done, continue doing pvr load*/
+	return 0;
+out_err:
+	psb_driver_unload(dev);
+	return ret;
+}
+
+static int psb_driver_device_is_agp(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	if (bd) {
+		bd->props.brightness = bd->ops->get_brightness(bd);
+		backlight_update_status(bd);
+	}
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+
+	dev_priv->blc_adj2 = *arg;
+	get_brightness(dev_priv->backlight_device);
+	return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	uint32_t *arg = data;
+
+	dev_priv->blc_adj1 = *arg;
+	get_brightness(dev_priv->backlight_device);
+	return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_psb_dpst_lut_arg *lut_arg = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	struct psb_intel_crtc *psb_intel_crtc;
+	int i = 0;
+	int32_t obj_id;
+
+	obj_id = lut_arg->output_id;
+	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+	if (!obj) {
+		dev_dbg(dev->dev, "Invalid Connector object.\n");
+		return -EINVAL;
+	}
+
+	connector = obj_to_connector(obj);
+	crtc = connector->encoder->crtc;
+	psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+	for (i = 0; i < 256; i++)
+		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+	psb_intel_crtc_load_lut(crtc);
+
+	return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	uint32_t obj_id;
+	uint16_t op;
+	struct drm_mode_modeinfo *umode;
+	struct drm_display_mode *mode = NULL;
+	struct drm_psb_mode_operation_arg *arg;
+	struct drm_mode_object *obj;
+	struct drm_connector *connector;
+	struct drm_connector_helper_funcs *connector_funcs;
+	int ret = 0;
+	int resp = MODE_OK;
+
+	arg = (struct drm_psb_mode_operation_arg *)data;
+	obj_id = arg->obj_id;
+	op = arg->operation;
+
+	switch (op) {
+	case PSB_MODE_OPERATION_MODE_VALID:
+		umode = &arg->mode;
+
+		drm_modeset_lock_all(dev);
+
+		obj = drm_mode_object_find(dev, obj_id,
+					DRM_MODE_OBJECT_CONNECTOR);
+		if (!obj) {
+			ret = -EINVAL;
+			goto mode_op_out;
+		}
+
+		connector = obj_to_connector(obj);
+
+		mode = drm_mode_create(dev);
+		if (!mode) {
+			ret = -ENOMEM;
+			goto mode_op_out;
+		}
+
+		/* drm_crtc_convert_umode(mode, umode); */
+		{
+			mode->clock = umode->clock;
+			mode->hdisplay = umode->hdisplay;
+			mode->hsync_start = umode->hsync_start;
+			mode->hsync_end = umode->hsync_end;
+			mode->htotal = umode->htotal;
+			mode->hskew = umode->hskew;
+			mode->vdisplay = umode->vdisplay;
+			mode->vsync_start = umode->vsync_start;
+			mode->vsync_end = umode->vsync_end;
+			mode->vtotal = umode->vtotal;
+			mode->vscan = umode->vscan;
+			mode->vrefresh = umode->vrefresh;
+			mode->flags = umode->flags;
+			mode->type = umode->type;
+			strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+			mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+		}
+
+		connector_funcs = (struct drm_connector_helper_funcs *)
+				   connector->helper_private;
+
+		if (connector_funcs->mode_valid) {
+			resp = connector_funcs->mode_valid(connector, mode);
+			arg->data = resp;
+		}
+
+		/*do some clean up work*/
+		if (mode)
+			drm_mode_destroy(dev, mode);
+mode_op_out:
+		drm_modeset_unlock_all(dev);
+		return ret;
+
+	default:
+		dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = psb_priv(dev);
+	struct drm_psb_stolen_memory_arg *arg = data;
+
+	arg->base = dev_priv->stolen_base;
+	arg->size = dev_priv->vram_stolen_size;
+
+	return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+	return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	static unsigned int runtime_allowed;
+
+	if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+		runtime_allowed++;
+		pm_runtime_allow(&dev->pdev->dev);
+		dev_priv->rpm_enabled = 1;
+	}
+	return drm_ioctl(filp, cmd, arg);
+	/* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ */
+static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+	.resume = gma_power_resume,
+	.suspend = gma_power_suspend,
+	.thaw = gma_power_thaw,
+	.freeze = gma_power_freeze,
+	.restore = gma_power_restore,
+	.runtime_suspend = psb_runtime_suspend,
+	.runtime_resume = psb_runtime_resume,
+	.runtime_idle = psb_runtime_idle,
+};
+
+static const struct vm_operations_struct psb_gem_vm_ops = {
+	.fault = psb_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = psb_unlocked_ioctl,
+	.mmap = drm_gem_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+			   DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+	.load = psb_driver_load,
+	.unload = psb_driver_unload,
+
+	.ioctls = psb_ioctls,
+	.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+	.device_is_agp = psb_driver_device_is_agp,
+	.irq_preinstall = psb_irq_preinstall,
+	.irq_postinstall = psb_irq_postinstall,
+	.irq_uninstall = psb_irq_uninstall,
+	.irq_handler = psb_irq_handler,
+	.enable_vblank = psb_enable_vblank,
+	.disable_vblank = psb_disable_vblank,
+	.get_vblank_counter = psb_get_vblank_counter,
+	.lastclose = psb_lastclose,
+	.open = psb_driver_open,
+	.preclose = psb_driver_preclose,
+	.postclose = psb_driver_close,
+
+	.gem_init_object = psb_gem_init_object,
+	.gem_free_object = psb_gem_free_object,
+	.gem_vm_ops = &psb_gem_vm_ops,
+	.dumb_create = psb_gem_dumb_create,
+	.dumb_map_offset = psb_gem_dumb_map_gtt,
+	.dumb_destroy = psb_gem_dumb_destroy,
+	.fops = &psb_gem_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = PSB_DRM_DRIVER_DATE,
+	.major = PSB_DRM_DRIVER_MAJOR,
+	.minor = PSB_DRM_DRIVER_MINOR,
+	.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = psb_probe,
+	.remove = psb_remove,
+	.driver = {
+		.pm = &psb_pm_ops,
+	}
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+	return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+	drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_drv.h b/linux-imx/drivers/gpu/drm/gma500/psb_drv.h
new file mode 100644
index 0000000..6053b8a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_drv.h
@@ -0,0 +1,982 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _PSB_DRV_H_
+#define _PSB_DRV_H_
+
+#include <linux/kref.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_global.h>
+#include <drm/gma_drm.h>
+#include "psb_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "gtt.h"
+#include "power.h"
+#include "opregion.h"
+#include "oaktrail.h"
+
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE   	2
+
+enum {
+	CHIP_PSB_8108 = 0,		/* Poulsbo */
+	CHIP_PSB_8109 = 1,		/* Poulsbo */
+	CHIP_MRST_4100 = 2,		/* Moorestown/Oaktrail */
+	CHIP_MFLD_0130 = 3,		/* Medfield */
+};
+
+#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+
+/*
+ * Driver definitions
+ */
+
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
+
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
+#define PSB_DRM_DRIVER_PATCHLEVEL 0
+
+/*
+ *	Hardware offsets
+ */
+#define PSB_VDC_OFFSET		 0x00000000
+#define PSB_VDC_SIZE		 0x000080000
+#define MRST_MMIO_SIZE		 0x0000C0000
+#define MDFLD_MMIO_SIZE          0x000100000
+#define PSB_SGX_SIZE		 0x8000
+#define PSB_SGX_OFFSET		 0x00040000
+#define MRST_SGX_OFFSET		 0x00080000
+/*
+ *	PCI resource identifiers
+ */
+#define PSB_MMIO_RESOURCE	 0
+#define PSB_GATT_RESOURCE	 2
+#define PSB_GTT_RESOURCE	 3
+/*
+ *	PCI configuration
+ */
+#define PSB_GMCH_CTRL		 0x52
+#define PSB_BSM			 0x5C
+#define _PSB_GMCH_ENABLED	 0x4
+#define PSB_PGETBL_CTL		 0x2020
+#define _PSB_PGETBL_ENABLED	 0x00000001
+#define PSB_SGX_2D_SLAVE_PORT	 0x4000
+
+/* To get rid of */
+#define PSB_TT_PRIV0_LIMIT	 (256*1024*1024)
+#define PSB_TT_PRIV0_PLIMIT	 (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+
+/*
+ *	SGX side MMU definitions (these can probably go)
+ */
+
+/*
+ *	Flags for external memory type field.
+ */
+#define PSB_MMU_CACHED_MEMORY	  0x0001	/* Bind to MMU only */
+#define PSB_MMU_RO_MEMORY	  0x0002	/* MMU RO memory */
+#define PSB_MMU_WO_MEMORY	  0x0004	/* MMU WO memory */
+/*
+ *	PTE's and PDE's
+ */
+#define PSB_PDE_MASK		  0x003FFFFF
+#define PSB_PDE_SHIFT		  22
+#define PSB_PTE_SHIFT		  12
+/*
+ *	Cache control
+ */
+#define PSB_PTE_VALID		  0x0001	/* PTE / PDE valid */
+#define PSB_PTE_WO		  0x0002	/* Write only */
+#define PSB_PTE_RO		  0x0004	/* Read only */
+#define PSB_PTE_CACHED		  0x0008	/* CPU cache coherent */
+
+/*
+ *	VDC registers and bits
+ */
+#define PSB_MSVDX_CLOCKGATING	  0x2064
+#define PSB_TOPAZ_CLOCKGATING	  0x2068
+#define PSB_HWSTAM		  0x2098
+#define PSB_INSTPM		  0x20C0
+#define PSB_INT_IDENTITY_R        0x20A4
+#define _PSB_IRQ_ASLE		  (1<<0)
+#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+#define _PSB_VSYNC_PIPEB_FLAG	  (1<<5)
+#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+#define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
+#define _MDFLD_MIPIA_FLAG	  (1<<16)
+#define _MDFLD_MIPIC_FLAG	  (1<<17)
+#define _PSB_IRQ_DISP_HOTSYNC	  (1<<17)
+#define _PSB_IRQ_SGX_FLAG	  (1<<18)
+#define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
+#define _LNC_IRQ_TOPAZ_FLAG	  (1<<20)
+
+#define _PSB_PIPE_EVENT_FLAG	(_PSB_VSYNC_PIPEA_FLAG | \
+				 _PSB_VSYNC_PIPEB_FLAG)
+
+/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+				  _MDFLD_PIPEB_EVENT_FLAG | \
+				  _PSB_PIPEA_EVENT_FLAG | \
+				  _PSB_VSYNC_PIPEA_FLAG | \
+				  _MDFLD_MIPIA_FLAG | \
+				  _MDFLD_MIPIC_FLAG)
+#define PSB_INT_IDENTITY_R	  0x20A4
+#define PSB_INT_MASK_R		  0x20A8
+#define PSB_INT_ENABLE_R	  0x20A0
+
+#define _PSB_MMU_ER_MASK      0x0001FF00
+#define _PSB_MMU_ER_HOST      (1 << 16)
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+#define GPIO_CLOCK_DIR_MASK		(1 << 0)
+#define GPIO_CLOCK_DIR_IN		(0 << 1)
+#define GPIO_CLOCK_DIR_OUT		(1 << 1)
+#define GPIO_CLOCK_VAL_MASK		(1 << 2)
+#define GPIO_CLOCK_VAL_OUT		(1 << 3)
+#define GPIO_CLOCK_VAL_IN		(1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+#define GPIO_DATA_DIR_MASK		(1 << 8)
+#define GPIO_DATA_DIR_IN		(0 << 9)
+#define GPIO_DATA_DIR_OUT		(1 << 9)
+#define GPIO_DATA_VAL_MASK		(1 << 10)
+#define GPIO_DATA_VAL_OUT		(1 << 11)
+#define GPIO_DATA_VAL_IN		(1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV	    0x6010
+
+#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+#define PSB_COMM_USER_IRQ (1024 >> 2)
+#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+#define PSB_COMM_FW (2048 >> 2)
+
+#define PSB_UIRQ_VISTEST	       1
+#define PSB_UIRQ_OOM_REPLY	       2
+#define PSB_UIRQ_FIRE_TA_REPLY	       3
+#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+
+
+#define PSB_2D_SIZE (256*1024*1024)
+#define PSB_MAX_RELOC_PAGES 1024
+
+#define PSB_LOW_REG_OFFS 0x0204
+#define PSB_HIGH_REG_OFFS 0x0600
+
+#define PSB_NUM_VBLANKS 2
+#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+#define PSB_LID_DELAY (DRM_HZ / 10)
+
+#define MDFLD_PNW_B0 0x04
+#define MDFLD_PNW_C0 0x08
+
+#define MDFLD_DSR_2D_3D_0 	(1 << 0)
+#define MDFLD_DSR_2D_3D_2 	(1 << 1)
+#define MDFLD_DSR_CURSOR_0 	(1 << 2)
+#define MDFLD_DSR_CURSOR_2	(1 << 3)
+#define MDFLD_DSR_OVERLAY_0 	(1 << 4)
+#define MDFLD_DSR_OVERLAY_2 	(1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL	(1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0	((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2	((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR		45
+#define MDFLD_DPU_ENABLE 	(1 << 31)
+#define MDFLD_DSR_FULLSCREEN 	(1 << 30)
+#define MDFLD_DSR_DELAY		(DRM_HZ / MDFLD_DSR_RR)
+
+#define PSB_PWR_STATE_ON		1
+#define PSB_PWR_STATE_OFF		2
+
+#define PSB_PMPOLICY_NOPM		0
+#define PSB_PMPOLICY_CLOCKGATING	1
+#define PSB_PMPOLICY_POWERDOWN		2
+
+#define PSB_PMSTATE_POWERUP		0
+#define PSB_PMSTATE_CLOCKGATED		1
+#define PSB_PMSTATE_POWERDOWN		2
+#define PSB_PCIx_MSI_ADDR_LOC		0x94
+#define PSB_PCIx_MSI_DATA_LOC		0x98
+
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct psb_intel_opregion {
+	struct opregion_header *header;
+	struct opregion_acpi *acpi;
+	struct opregion_swsci *swsci;
+	struct opregion_asle *asle;
+	void *vbt;
+	u32 __iomem *lid_state;
+};
+
+struct sdvo_device_mapping {
+	u8 initialized;
+	u8 dvo_port;
+	u8 slave_addr;
+	u8 dvo_wiring;
+	u8 i2c_pin;
+	u8 i2c_speed;
+	u8 ddc_pin;
+};
+
+struct intel_gmbus {
+	struct i2c_adapter adapter;
+	struct i2c_adapter *force_bit;
+	u32 reg0;
+};
+
+/*
+ *	Register offset maps
+ */
+
+struct psb_offset {
+	u32	fp0;
+	u32	fp1;
+	u32	cntr;
+	u32	conf;
+	u32	src;
+	u32	dpll;
+	u32	dpll_md;
+	u32	htotal;
+	u32	hblank;
+	u32	hsync;
+	u32	vtotal;
+	u32	vblank;
+	u32	vsync;
+	u32	stride;
+	u32	size;
+	u32	pos;
+	u32	surf;
+	u32	addr;
+	u32	base;
+	u32	status;
+	u32	linoff;
+	u32	tileoff;
+	u32	palette;
+};
+
+/*
+ *	Register save state. This is used to hold the context when the
+ *	device is powered off. In the case of Oaktrail this can (but does not
+ *	yet) include screen blank. Operations occuring during the save
+ *	update the register cache instead.
+ */
+
+/*
+ *	Common status for pipes.
+ */
+struct psb_pipe {
+	u32	fp0;
+	u32	fp1;
+	u32	cntr;
+	u32	conf;
+	u32	src;
+	u32	dpll;
+	u32	dpll_md;
+	u32	htotal;
+	u32	hblank;
+	u32	hsync;
+	u32	vtotal;
+	u32	vblank;
+	u32	vsync;
+	u32	stride;
+	u32	size;
+	u32	pos;
+	u32	base;
+	u32	surf;
+	u32	addr;
+	u32	status;
+	u32	linoff;
+	u32	tileoff;
+	u32	palette[256];
+};
+
+struct psb_state {
+	uint32_t saveVCLK_DIVISOR_VGA0;
+	uint32_t saveVCLK_DIVISOR_VGA1;
+	uint32_t saveVCLK_POST_DIV;
+	uint32_t saveVGACNTRL;
+	uint32_t saveADPA;
+	uint32_t saveLVDS;
+	uint32_t saveDVOA;
+	uint32_t saveDVOB;
+	uint32_t saveDVOC;
+	uint32_t savePP_ON;
+	uint32_t savePP_OFF;
+	uint32_t savePP_CONTROL;
+	uint32_t savePP_CYCLE;
+	uint32_t savePFIT_CONTROL;
+	uint32_t saveCLOCKGATING;
+	uint32_t saveDSPARB;
+	uint32_t savePFIT_AUTO_RATIOS;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t savePP_ON_DELAYS;
+	uint32_t savePP_OFF_DELAYS;
+	uint32_t savePP_DIVISOR;
+	uint32_t saveBCLRPAT_A;
+	uint32_t saveBCLRPAT_B;
+	uint32_t savePERF_MODE;
+	uint32_t saveDSPFW1;
+	uint32_t saveDSPFW2;
+	uint32_t saveDSPFW3;
+	uint32_t saveDSPFW4;
+	uint32_t saveDSPFW5;
+	uint32_t saveDSPFW6;
+	uint32_t saveCHICKENBIT;
+	uint32_t saveDSPACURSOR_CTRL;
+	uint32_t saveDSPBCURSOR_CTRL;
+	uint32_t saveDSPACURSOR_BASE;
+	uint32_t saveDSPBCURSOR_BASE;
+	uint32_t saveDSPACURSOR_POS;
+	uint32_t saveDSPBCURSOR_POS;
+	uint32_t saveOV_OVADD;
+	uint32_t saveOV_OGAMC0;
+	uint32_t saveOV_OGAMC1;
+	uint32_t saveOV_OGAMC2;
+	uint32_t saveOV_OGAMC3;
+	uint32_t saveOV_OGAMC4;
+	uint32_t saveOV_OGAMC5;
+	uint32_t saveOVC_OVADD;
+	uint32_t saveOVC_OGAMC0;
+	uint32_t saveOVC_OGAMC1;
+	uint32_t saveOVC_OGAMC2;
+	uint32_t saveOVC_OGAMC3;
+	uint32_t saveOVC_OGAMC4;
+	uint32_t saveOVC_OGAMC5;
+
+	/* DPST register save */
+	uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+	uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+	uint32_t savePWM_CONTROL_LOGIC;
+};
+
+struct medfield_state {
+	uint32_t saveMIPI;
+	uint32_t saveMIPI_C;
+
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t saveHDMIPHYMISCCTL;
+	uint32_t saveHDMIB_CONTROL;
+};
+
+struct cdv_state {
+	uint32_t saveDSPCLK_GATE_D;
+	uint32_t saveRAMCLK_GATE_D;
+	uint32_t saveDSPARB;
+	uint32_t saveDSPFW[6];
+	uint32_t saveADPA;
+	uint32_t savePP_CONTROL;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t saveLVDS;
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePP_ON_DELAYS;
+	uint32_t savePP_OFF_DELAYS;
+	uint32_t savePP_CYCLE;
+	uint32_t saveVGACNTRL;
+	uint32_t saveIER;
+	uint32_t saveIMR;
+	u8	 saveLBB;
+};
+
+struct psb_save_area {
+	struct psb_pipe pipe[3];
+	uint32_t saveBSM;
+	uint32_t saveVBT;
+	union {
+	        struct psb_state psb;
+		struct medfield_state mdfld;
+		struct cdv_state cdv;
+	};
+	uint32_t saveBLC_PWM_CTL2;
+	uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_ops;
+
+#define PSB_NUM_PIPE		3
+
+struct drm_psb_private {
+	struct drm_device *dev;
+	const struct psb_ops *ops;
+	const struct psb_offset *regmap;
+	
+	struct child_device_config *child_dev;
+	int child_dev_num;
+
+	struct psb_gtt gtt;
+
+	/* GTT Memory manager */
+	struct psb_gtt_mm *gtt_mm;
+	struct page *scratch_page;
+	u32 __iomem *gtt_map;
+	uint32_t stolen_base;
+	u8 __iomem *vram_addr;
+	unsigned long vram_stolen_size;
+	int gtt_initialized;
+	u16 gmch_ctrl;		/* Saved GTT setup */
+	u32 pge_ctl;
+
+	struct mutex gtt_mutex;
+	struct resource *gtt_mem;	/* Our PCI resource */
+
+	struct psb_mmu_driver *mmu;
+	struct psb_mmu_pd *pf_pd;
+
+	/*
+	 * Register base
+	 */
+
+	uint8_t __iomem *sgx_reg;
+	uint8_t __iomem *vdc_reg;
+	uint32_t gatt_free_offset;
+
+	/*
+	 * Fencing / irq.
+	 */
+
+	uint32_t vdc_irq_mask;
+	uint32_t pipestat[PSB_NUM_PIPE];
+
+	spinlock_t irqmask_lock;
+
+	/*
+	 * Power
+	 */
+
+	bool suspended;
+	bool display_power;
+	int display_count;
+
+	/*
+	 * Modesetting
+	 */
+	struct psb_intel_mode_device mode_dev;
+	bool modeset;	/* true if we have done the mode_device setup */
+
+	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+	uint32_t num_pipe;
+
+	/*
+	 * OSPM info (Power management base) (can go ?)
+	 */
+	uint32_t ospm_base;
+
+	/*
+	 * Sizes info
+	 */
+
+	u32 fuse_reg_value;
+	u32 video_device_fuse;
+
+	/* PCI revision ID for B0:D2:F0 */
+	uint8_t platform_rev_id;
+
+	/* gmbus */
+	struct intel_gmbus *gmbus;
+
+	/* Used by SDVO */
+	int crt_ddc_pin;
+	/* FIXME: The mappings should be parsed from bios but for now we can
+		  pretend there are no mappings available */
+	struct sdvo_device_mapping sdvo_mappings[2];
+	u32 hotplug_supported_mask;
+	struct drm_property *broadcast_rgb_property;
+	struct drm_property *force_audio_property;
+
+	/*
+	 * LVDS info
+	 */
+	int backlight_duty_cycle;	/* restore backlight to this value */
+	bool panel_wants_dither;
+	struct drm_display_mode *panel_fixed_mode;
+	struct drm_display_mode *lfp_lvds_vbt_mode;
+	struct drm_display_mode *sdvo_lvds_vbt_mode;
+
+	struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+	struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
+
+	/* Feature bits from the VBIOS */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	int lvds_ssc_freq;
+	bool is_lvds_on;
+	bool is_mipi_on;
+	u32 mipi_ctrl_display;
+
+	unsigned int core_freq;
+	uint32_t iLVDS_enable;
+
+	/* Runtime PM state */
+	int rpm_enabled;
+
+	/* MID specific */
+	bool has_gct;
+	struct oaktrail_gct_data gct_data;
+
+	/* Oaktrail HDMI state */
+	struct oaktrail_hdmi_dev *hdmi_priv;
+	
+	/*
+	 * Register state
+	 */
+
+	struct psb_save_area regs;
+
+	/* MSI reg save */
+	uint32_t msi_addr;
+	uint32_t msi_data;
+
+	/*
+	 * Hotplug handling
+	 */
+
+	struct work_struct hotplug_work;
+
+	/*
+	 * LID-Switch
+	 */
+	spinlock_t lid_lock;
+	struct timer_list lid_timer;
+	struct psb_intel_opregion opregion;
+	u32 lid_last_state;
+
+	/*
+	 * Watchdog
+	 */
+
+	uint32_t apm_reg;
+	uint16_t apm_base;
+
+	/*
+	 * Used for modifying backlight from
+	 * xrandr -- consider removing and using HAL instead
+	 */
+	struct backlight_device *backlight_device;
+	struct drm_property *backlight_property;
+	bool backlight_enabled;
+	int backlight_level;
+	uint32_t blc_adj1;
+	uint32_t blc_adj2;
+
+	void *fbdev;
+
+	/* 2D acceleration */
+	spinlock_t lock_2d;
+
+	/*
+	 * Panel brightness
+	 */
+	int brightness;
+	int brightness_adjusted;
+
+	bool dsr_enable;
+	u32 dsr_fb_update;
+	bool dpi_panel_on[3];
+	void *dsi_configs[2];
+	u32 bpp;
+	u32 bpp2;
+
+	u32 pipeconf[3];
+	u32 dspcntr[3];
+
+	int mdfld_panel_id;
+
+	bool dplla_96mhz;	/* DPLL data from the VBT */
+
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	uint8_t panel_type;
+};
+
+
+/*
+ *	Operations for each board type
+ */
+ 
+struct psb_ops {
+	const char *name;
+	unsigned int accel_2d:1;
+	int pipes;		/* Number of output pipes */
+	int crtcs;		/* Number of CRTCs */
+	int sgx_offset;		/* Base offset of SGX device */
+	int hdmi_mask;		/* Mask of HDMI CRTCs */
+	int lvds_mask;		/* Mask of LVDS CRTCs */
+	int cursor_needs_phys;  /* If cursor base reg need physical address */
+
+	/* Sub functions */
+	struct drm_crtc_helper_funcs const *crtc_helper;
+	struct drm_crtc_funcs const *crtc_funcs;
+
+	/* Setup hooks */
+	int (*chip_setup)(struct drm_device *dev);
+	void (*chip_teardown)(struct drm_device *dev);
+	/* Optional helper caller after modeset */
+	void (*errata)(struct drm_device *dev);
+
+	/* Display management hooks */
+	int (*output_init)(struct drm_device *dev);
+	int (*hotplug)(struct drm_device *dev);
+	void (*hotplug_enable)(struct drm_device *dev, bool on);
+	/* Power management hooks */
+	void (*init_pm)(struct drm_device *dev);
+	int (*save_regs)(struct drm_device *dev);
+	int (*restore_regs)(struct drm_device *dev);
+	int (*power_up)(struct drm_device *dev);
+	int (*power_down)(struct drm_device *dev);
+
+	void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+	/* Backlight */
+	int (*backlight_init)(struct drm_device *dev);
+#endif
+	int i2c_bus;		/* I2C bus identifier for Moorestown */
+};
+
+
+
+struct psb_mmu_driver;
+
+extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+extern int drm_pick_crtcs(struct drm_device *dev);
+
+static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+{
+	return (struct drm_psb_private *) dev->dev_private;
+}
+
+/*
+ * MMU stuff.
+ */
+
+extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+					int trap_pagefaults,
+					int invalid_type,
+					struct drm_psb_private *dev_priv);
+extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+						 *driver);
+extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+			       uint32_t gtt_start, uint32_t gtt_pages);
+extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+					   int trap_pagefaults,
+					   int invalid_type);
+extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+					unsigned long address,
+					uint32_t num_pages);
+extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+				       uint32_t start_pfn,
+				       unsigned long address,
+				       uint32_t num_pages, int type);
+extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+				  unsigned long *pfn);
+
+/*
+ * Enable / disable MMU for different requestors.
+ */
+
+
+extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+				unsigned long address, uint32_t num_pages,
+				uint32_t desired_tile_stride,
+				uint32_t hw_tile_stride, int type);
+extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+				 unsigned long address, uint32_t num_pages,
+				 uint32_t desired_tile_stride,
+				 uint32_t hw_tile_stride);
+/*
+ *psb_irq.c
+ */
+
+extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+extern int psb_irq_enable_dpst(struct drm_device *dev);
+extern int psb_irq_disable_dpst(struct drm_device *dev);
+extern void psb_irq_preinstall(struct drm_device *dev);
+extern int psb_irq_postinstall(struct drm_device *dev);
+extern void psb_irq_uninstall(struct drm_device *dev);
+extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+
+extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+
+extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+
+/*
+ * framebuffer.c
+ */
+extern int psbfb_probed(struct drm_device *dev);
+extern int psbfb_remove(struct drm_device *dev,
+			struct drm_framebuffer *fb);
+/*
+ * accel_2d.c
+ */
+extern void psbfb_copyarea(struct fb_info *info,
+					const struct fb_copyarea *region);
+extern int psbfb_sync(struct fb_info *info);
+extern void psb_spank(struct drm_psb_private *dev_priv);
+
+/*
+ * psb_reset.c
+ */
+
+extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+
+/* modesetting */
+extern void psb_modeset_init(struct drm_device *dev);
+extern void psb_modeset_cleanup(struct drm_device *dev);
+extern int psb_fbdev_init(struct drm_device *dev);
+
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
+void gma_backlight_disable(struct drm_device *dev);
+void gma_backlight_enable(struct drm_device *dev);
+void gma_backlight_set(struct drm_device *dev, int v);
+
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
+
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
+		    struct psb_intel_mode_device *mode_dev);
+
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
+/* psb_intel_lvds.c */
+extern const struct drm_connector_helper_funcs
+					psb_intel_lvds_connector_helper_funcs;
+extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+
+/* gem.c */
+extern int psb_gem_init_object(struct drm_gem_object *obj);
+extern void psb_gem_free_object(struct drm_gem_object *obj);
+extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+			struct drm_file *file);
+extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			struct drm_mode_create_dumb *args);
+extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+			uint32_t handle);
+extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+			uint32_t handle, uint64_t *offset);
+extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
+
+/* mdlfd_device.c */
+extern const struct psb_ops mdfld_chip_ops;
+
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
+
+/*
+ * Debug print bits setting
+ */
+#define PSB_D_GENERAL (1 << 0)
+#define PSB_D_INIT    (1 << 1)
+#define PSB_D_IRQ     (1 << 2)
+#define PSB_D_ENTRY   (1 << 3)
+/* debug the get H/V BP/FP count */
+#define PSB_D_HV      (1 << 4)
+#define PSB_D_DBI_BF  (1 << 5)
+#define PSB_D_PM      (1 << 6)
+#define PSB_D_RENDER  (1 << 7)
+#define PSB_D_REG     (1 << 8)
+#define PSB_D_MSVDX   (1 << 9)
+#define PSB_D_TOPAZ   (1 << 10)
+
+extern int drm_idle_check_interval;
+
+/*
+ *	Utilities
+ */
+
+static inline u32 MRST_MSG_READ32(uint port, uint offset)
+{
+	int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+	uint32_t ret_val = 0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+	pci_dev_put(pci_root);
+	return ret_val;
+}
+static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+	int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD4, value);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_dev_put(pci_root);
+}
+static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+{
+	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+	uint32_t ret_val = 0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+	pci_dev_put(pci_root);
+	return ret_val;
+}
+static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+	pci_write_config_dword(pci_root, 0xD4, value);
+	pci_write_config_dword(pci_root, 0xD0, mcr);
+	pci_dev_put(pci_root);
+}
+
+static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	return ioread32(dev_priv->vdc_reg + reg);
+}
+
+#define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+
+static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+				      uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	iowrite32((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+
+static inline void REGISTER_WRITE16(struct drm_device *dev,
+					uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	iowrite16((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE16(reg, val)	  REGISTER_WRITE16(dev, (reg), (val))
+
+static inline void REGISTER_WRITE8(struct drm_device *dev,
+				       uint32_t reg, uint32_t val)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	iowrite8((val), dev_priv->vdc_reg + (reg));
+}
+
+#define REG_WRITE8(reg, val)		REGISTER_WRITE8(dev, (reg), (val))
+
+#define PSB_WVDC32(_val, _offs)		iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs)		ioread32(dev_priv->vdc_reg + (_offs))
+
+/* #define TRAP_SGX_PM_FAULT 1 */
+#ifdef TRAP_SGX_PM_FAULT
+#define PSB_RSGX32(_offs)						\
+({									\
+	if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {		\
+		printk(KERN_ERR						\
+			"access sgx when it's off!! (READ) %s, %d\n",	\
+	       __FILE__, __LINE__);					\
+		melay(1000);						\
+	}								\
+	ioread32(dev_priv->sgx_reg + (_offs));				\
+})
+#else
+#define PSB_RSGX32(_offs)		ioread32(dev_priv->sgx_reg + (_offs))
+#endif
+#define PSB_WSGX32(_val, _offs)		iowrite32(_val, dev_priv->sgx_reg + (_offs))
+
+#define MSVDX_REG_DUMP 0
+
+#define PSB_WMSVDX32(_val, _offs)	iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs)		ioread32(dev_priv->msvdx_reg + (_offs))
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.c b/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.c
new file mode 100644
index 0000000..6666493
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+
+struct psb_intel_clock_t {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int dot;
+	int vco;
+	int m;
+	int p;
+};
+
+struct psb_intel_range_t {
+	int min, max;
+};
+
+struct psb_intel_p2_t {
+	int dot_limit;
+	int p2_slow, p2_fast;
+};
+
+struct psb_intel_limit_t {
+	struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+	struct psb_intel_p2_t p2;
+};
+
+#define INTEL_LIMIT_I9XX_SDVO_DAC   0
+#define INTEL_LIMIT_I9XX_LVDS	    1
+
+static const struct psb_intel_limit_t psb_intel_limits[] = {
+	{			/* INTEL_LIMIT_I9XX_SDVO_DAC */
+	 .dot = {.min = 20000, .max = 400000},
+	 .vco = {.min = 1400000, .max = 2800000},
+	 .n = {.min = 1, .max = 6},
+	 .m = {.min = 70, .max = 120},
+	 .m1 = {.min = 8, .max = 18},
+	 .m2 = {.min = 3, .max = 7},
+	 .p = {.min = 5, .max = 80},
+	 .p1 = {.min = 1, .max = 8},
+	 .p2 = {.dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5},
+	 },
+	{			/* INTEL_LIMIT_I9XX_LVDS */
+	 .dot = {.min = 20000, .max = 400000},
+	 .vco = {.min = 1400000, .max = 2800000},
+	 .n = {.min = 1, .max = 6},
+	 .m = {.min = 70, .max = 120},
+	 .m1 = {.min = 8, .max = 18},
+	 .m2 = {.min = 3, .max = 7},
+	 .p = {.min = 7, .max = 98},
+	 .p1 = {.min = 1, .max = 8},
+	 /* The single-channel range is 25-112Mhz, and dual-channel
+	  * is 80-224Mhz.  Prefer single channel as much as possible.
+	  */
+	 .p2 = {.dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 7},
+	 },
+};
+
+static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+{
+	const struct psb_intel_limit_t *limit;
+
+	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+	else
+		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+	return limit;
+}
+
+static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
+{
+	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = refclk * clock->m / (clock->n + 2);
+	clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *l_entry;
+
+	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+			struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(l_entry);
+			if (psb_intel_encoder->type == type)
+				return true;
+		}
+	}
+	return false;
+}
+
+#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+			       struct psb_intel_clock_t *clock)
+{
+	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+
+	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+		INTELPllInvalid("p1 out of range\n");
+	if (clock->p < limit->p.min || limit->p.max < clock->p)
+		INTELPllInvalid("p out of range\n");
+	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+		INTELPllInvalid("m2 out of range\n");
+	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+		INTELPllInvalid("m1 out of range\n");
+	if (clock->m1 <= clock->m2)
+		INTELPllInvalid("m1 <= m2\n");
+	if (clock->m < limit->m.min || limit->m.max < clock->m)
+		INTELPllInvalid("m out of range\n");
+	if (clock->n < limit->n.min || limit->n.max < clock->n)
+		INTELPllInvalid("n out of range\n");
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		INTELPllInvalid("vco out of range\n");
+	/* XXX: We may need to be checking "Dot clock"
+	 * depending on the multiplier, connector, etc.,
+	 * rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		INTELPllInvalid("dot out of range\n");
+
+	return true;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE.  The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
+static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+				int refclk,
+				struct psb_intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_clock_t clock;
+	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+	int err = target;
+
+	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+		/*
+		 * For LVDS, if the panel is on, just rely on its current
+		 * settings for dual-channel.  We haven't figured out how to
+		 * reliably set up different single/dual channel state, if we
+		 * even can.
+		 */
+		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+		    LVDS_CLKB_POWER_UP)
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+		     clock.m2++) {
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+				     clock.p1 <= limit->p1.max;
+				     clock.p1++) {
+					int this_err;
+
+					psb_intel_clock(refclk, &clock);
+
+					if (!psb_intel_PLL_is_valid
+					    (crtc, &clock))
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return err != target;
+}
+
+void psb_intel_wait_for_vblank(struct drm_device *dev)
+{
+	/* Wait for 20ms, i.e. one cycle at 50hz. */
+	mdelay(20);
+}
+
+static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+			    int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	unsigned long start, offset;
+	u32 dspcntr;
+	int ret = 0;
+
+	if (!gma_power_begin(dev, true))
+		return 0;
+
+	/* no fb bound */
+	if (!crtc->fb) {
+		dev_dbg(dev->dev, "No FB bound\n");
+		goto psb_intel_pipe_cleaner;
+	}
+
+	/* We are displaying this buffer, make sure it is actually loaded
+	   into the GTT */
+	ret = psb_gtt_pin(psbfb->gtt);
+	if (ret < 0)
+		goto psb_intel_pipe_set_base_exit;
+	start = psbfb->gtt->offset;
+
+	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+	REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+	dspcntr = REG_READ(map->cntr);
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dspcntr |= DISPPLANE_15_16BPP;
+		else
+			dspcntr |= DISPPLANE_16BPP;
+		break;
+	case 24:
+	case 32:
+		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+		break;
+	default:
+		dev_err(dev->dev, "Unknown color depth\n");
+		ret = -EINVAL;
+		psb_gtt_unpin(psbfb->gtt);
+		goto psb_intel_pipe_set_base_exit;
+	}
+	REG_WRITE(map->cntr, dspcntr);
+
+	REG_WRITE(map->base, start + offset);
+	REG_READ(map->base);
+
+psb_intel_pipe_cleaner:
+	/* If there was a previous display we can now unpin it */
+	if (old_fb)
+		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+	gma_power_end(dev);
+	return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 temp;
+
+	/* XXX: When our outputs are all unaware of DPMS modes other than off
+	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+	 */
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Enable the DPLL */
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) == 0) {
+			REG_WRITE(map->dpll, temp);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+			REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+			/* Wait for the clocks to stabilize. */
+			udelay(150);
+		}
+
+		/* Enable the pipe */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) == 0)
+			REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+		/* Enable the plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+			REG_WRITE(map->cntr,
+				  temp | DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+		}
+
+		psb_intel_crtc_load_lut(crtc);
+
+		/* Give the overlay scaler a chance to enable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Give the overlay scaler a chance to disable
+		 * if it's on this pipe */
+		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+		/* Disable the VGA plane that we never use */
+		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+		/* Disable display plane */
+		temp = REG_READ(map->cntr);
+		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+			REG_WRITE(map->cntr,
+				  temp & ~DISPLAY_PLANE_ENABLE);
+			/* Flush the plane changes */
+			REG_WRITE(map->base, REG_READ(map->base));
+			REG_READ(map->base);
+		}
+
+		/* Next, disable display pipes */
+		temp = REG_READ(map->conf);
+		if ((temp & PIPEACONF_ENABLE) != 0) {
+			REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+			REG_READ(map->conf);
+		}
+
+		/* Wait for vblank for the disable to take effect. */
+		psb_intel_wait_for_vblank(dev);
+
+		temp = REG_READ(map->dpll);
+		if ((temp & DPLL_VCO_ENABLE) != 0) {
+			REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+			REG_READ(map->dpll);
+		}
+
+		/* Wait for the clocks to turn off. */
+		udelay(150);
+		break;
+	}
+
+	/*Set FIFO Watermarks*/
+	REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void psb_intel_encoder_commit(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *encoder_funcs =
+	    encoder->helper_private;
+	/* lvds has its own version of commit see psb_intel_lvds_commit */
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
+}
+
+static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	u32 pfit_control;
+
+	pfit_control = REG_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+	/* Must be on PIPE 1 for PSB */
+	return 1;
+}
+
+static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y,
+			       struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	int refclk;
+	struct psb_intel_clock_t clock;
+	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+	bool ok, is_sdvo = false;
+	bool is_lvds = false, is_tv = false;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	/* No scan out no play */
+	if (crtc->fb == NULL) {
+		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+		return 0;
+	}
+
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+		if (!connector->encoder
+		    || connector->encoder->crtc != crtc)
+			continue;
+
+		switch (psb_intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+			is_sdvo = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+	}
+
+	refclk = 96000;
+
+	ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+				 &clock);
+	if (!ok) {
+		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+		return 0;
+	}
+
+	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+
+	dpll = DPLL_VGA_MODE_DIS;
+	if (is_lvds) {
+		dpll |= DPLLB_MODE_LVDS;
+		dpll |= DPLL_DVO_HIGH_SPEED;
+	} else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+	if (is_sdvo) {
+		int sdvo_pixel_multiply =
+			    adjusted_mode->clock / mode->clock;
+		dpll |= DPLL_DVO_HIGH_SPEED;
+		dpll |=
+		    (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+	}
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock.p1 - 1)) << 16;
+	switch (clock.p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+
+	if (is_tv) {
+		/* XXX: just matching BIOS for now */
+/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	}
+	dpll |= PLL_REF_INPUT_DREFCLK;
+
+	/* setup pipeconf */
+	pipeconf = REG_READ(map->conf);
+
+	/* Set up the display plane register */
+	dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+	if (pipe == 0)
+		dspcntr |= DISPPLANE_SEL_PIPE_A;
+	else
+		dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+	dspcntr |= DISPLAY_PLANE_ENABLE;
+	pipeconf |= PIPEACONF_ENABLE;
+	dpll |= DPLL_VCO_ENABLE;
+
+
+	/* Disable the panel fitter if it was on our pipe */
+	if (psb_intel_panel_fitter_pipe(dev) == pipe)
+		REG_WRITE(PFIT_CONTROL, 0);
+
+	drm_mode_debug_printmodeline(mode);
+
+	if (dpll & DPLL_VCO_ENABLE) {
+		REG_WRITE(map->fp0, fp);
+		REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
+		udelay(150);
+	}
+
+	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+	 * This is an exception to the general rule that mode_set doesn't turn
+	 * things on.
+	 */
+	if (is_lvds) {
+		u32 lvds = REG_READ(LVDS);
+
+		lvds &= ~LVDS_PIPEB_SELECT;
+		if (pipe == 1)
+			lvds |= LVDS_PIPEB_SELECT;
+
+		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+		/* Set the B0-B3 data pairs corresponding to
+		 * whether we're going to
+		 * set the DPLLs for dual-channel mode or not.
+		 */
+		lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+		if (clock.p2 == 7)
+			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+
+		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+		 * appropriately here, but we need to look more
+		 * thoroughly into how panels behave in the two modes.
+		 */
+
+		REG_WRITE(LVDS, lvds);
+		REG_READ(LVDS);
+	}
+
+	REG_WRITE(map->fp0, fp);
+	REG_WRITE(map->dpll, dpll);
+	REG_READ(map->dpll);
+	/* Wait for the clocks to stabilize. */
+	udelay(150);
+
+	/* write it again -- the BIOS does, after all */
+	REG_WRITE(map->dpll, dpll);
+
+	REG_READ(map->dpll);
+	/* Wait for the clocks to stabilize. */
+	udelay(150);
+
+	REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
+		  ((adjusted_mode->crtc_htotal - 1) << 16));
+	REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
+		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
+		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+	REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
+		  ((adjusted_mode->crtc_vtotal - 1) << 16));
+	REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
+		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
+		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	REG_WRITE(map->size,
+		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+	REG_WRITE(map->pos, 0);
+	REG_WRITE(map->src,
+		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+	REG_WRITE(map->conf, pipeconf);
+	REG_READ(map->conf);
+
+	psb_intel_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, dspcntr);
+
+	/* Flush the plane changes */
+	crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+
+	psb_intel_wait_for_vblank(dev);
+
+	return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	int palreg = map->palette;
+	int i;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled)
+		return;
+
+	switch (psb_intel_crtc->pipe) {
+	case 0:
+	case 1:
+		break;
+	default:
+		dev_err(dev->dev, "Illegal Pipe Number.\n");
+		return;
+	}
+
+	if (gma_power_begin(dev, false)) {
+		for (i = 0; i < 256; i++) {
+			REG_WRITE(palreg + 4 * i,
+				  ((psb_intel_crtc->lut_r[i] +
+				  psb_intel_crtc->lut_adj[i]) << 16) |
+				  ((psb_intel_crtc->lut_g[i] +
+				  psb_intel_crtc->lut_adj[i]) << 8) |
+				  (psb_intel_crtc->lut_b[i] +
+				  psb_intel_crtc->lut_adj[i]));
+		}
+		gma_power_end(dev);
+	} else {
+		for (i = 0; i < 256; i++) {
+			dev_priv->regs.pipe[0].palette[i] =
+				  ((psb_intel_crtc->lut_r[i] +
+				  psb_intel_crtc->lut_adj[i]) << 16) |
+				  ((psb_intel_crtc->lut_g[i] +
+				  psb_intel_crtc->lut_adj[i]) << 8) |
+				  (psb_intel_crtc->lut_b[i] +
+				  psb_intel_crtc->lut_adj[i]);
+		}
+
+	}
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void psb_intel_crtc_save(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	uint32_t paletteReg;
+	int i;
+
+	if (!crtc_state) {
+		dev_err(dev->dev, "No CRTC state found\n");
+		return;
+	}
+
+	crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+	crtc_state->savePIPECONF = REG_READ(map->conf);
+	crtc_state->savePIPESRC = REG_READ(map->src);
+	crtc_state->saveFP0 = REG_READ(map->fp0);
+	crtc_state->saveFP1 = REG_READ(map->fp1);
+	crtc_state->saveDPLL = REG_READ(map->dpll);
+	crtc_state->saveHTOTAL = REG_READ(map->htotal);
+	crtc_state->saveHBLANK = REG_READ(map->hblank);
+	crtc_state->saveHSYNC = REG_READ(map->hsync);
+	crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+	crtc_state->saveVBLANK = REG_READ(map->vblank);
+	crtc_state->saveVSYNC = REG_READ(map->vsync);
+	crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+	/*NOTE: DSPSIZE DSPPOS only for psb*/
+	crtc_state->saveDSPSIZE = REG_READ(map->size);
+	crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+	crtc_state->saveDSPBASE = REG_READ(map->base);
+
+	paletteReg = map->palette;
+	for (i = 0; i < 256; ++i)
+		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+	const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+	uint32_t paletteReg;
+	int i;
+
+	if (!crtc_state) {
+		dev_err(dev->dev, "No crtc state\n");
+		return;
+	}
+
+	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+		REG_WRITE(map->dpll,
+			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+		REG_READ(map->dpll);
+		udelay(150);
+	}
+
+	REG_WRITE(map->fp0, crtc_state->saveFP0);
+	REG_READ(map->fp0);
+
+	REG_WRITE(map->fp1, crtc_state->saveFP1);
+	REG_READ(map->fp1);
+
+	REG_WRITE(map->dpll, crtc_state->saveDPLL);
+	REG_READ(map->dpll);
+	udelay(150);
+
+	REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+	REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+	REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+	REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+	REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+	REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+	REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+	REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+	REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+	REG_WRITE(map->src, crtc_state->savePIPESRC);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+	REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+	psb_intel_wait_for_vblank(dev);
+
+	REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+	REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+	psb_intel_wait_for_vblank(dev);
+
+	paletteReg = map->palette;
+	for (i = 0; i < 256; ++i)
+		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+				 struct drm_file *file_priv,
+				 uint32_t handle,
+				 uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+	uint32_t temp;
+	size_t addr = 0;
+	struct gtt_range *gt;
+	struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
+	struct drm_gem_object *obj;
+	void *tmp_dst, *tmp_src;
+	int ret = 0, i, cursor_pages;
+
+	/* if we want to turn of the cursor ignore width and height */
+	if (!handle) {
+		/* turn off the cursor */
+		temp = CURSOR_MODE_DISABLE;
+
+		if (gma_power_begin(dev, false)) {
+			REG_WRITE(control, temp);
+			REG_WRITE(base, 0);
+			gma_power_end(dev);
+		}
+
+		/* Unpin the old GEM object */
+		if (psb_intel_crtc->cursor_obj) {
+			gt = container_of(psb_intel_crtc->cursor_obj,
+							struct gtt_range, gem);
+			psb_gtt_unpin(gt);
+			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+			psb_intel_crtc->cursor_obj = NULL;
+		}
+
+		return 0;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj)
+		return -ENOENT;
+
+	if (obj->size < width * height * 4) {
+		dev_dbg(dev->dev, "buffer is to small\n");
+		ret = -ENOMEM;
+		goto unref_cursor;
+	}
+
+	gt = container_of(obj, struct gtt_range, gem);
+
+	/* Pin the memory into the GTT */
+	ret = psb_gtt_pin(gt);
+	if (ret) {
+		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+		goto unref_cursor;
+	}
+
+	if (dev_priv->ops->cursor_needs_phys) {
+		if (cursor_gt == NULL) {
+			dev_err(dev->dev, "No hardware cursor mem available");
+			ret = -ENOMEM;
+			goto unref_cursor;
+		}
+
+		/* Prevent overflow */
+		if (gt->npage > 4)
+			cursor_pages = 4;
+		else
+			cursor_pages = gt->npage;
+
+		/* Copy the cursor to cursor mem */
+		tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+		for (i = 0; i < cursor_pages; i++) {
+			tmp_src = kmap(gt->pages[i]);
+			memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+			kunmap(gt->pages[i]);
+			tmp_dst += PAGE_SIZE;
+		}
+
+		addr = psb_intel_crtc->cursor_addr;
+	} else {
+		addr = gt->offset;      /* Or resource.start ??? */
+		psb_intel_crtc->cursor_addr = addr;
+	}
+
+	temp = 0;
+	/* set the pipe for the cursor */
+	temp |= (pipe << 28);
+	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE(control, temp);
+		REG_WRITE(base, addr);
+		gma_power_end(dev);
+	}
+
+	/* unpin the old bo */
+	if (psb_intel_crtc->cursor_obj) {
+		gt = container_of(psb_intel_crtc->cursor_obj,
+							struct gtt_range, gem);
+		psb_gtt_unpin(gt);
+		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+	}
+
+	psb_intel_crtc->cursor_obj = obj;
+	return ret;
+
+unref_cursor:
+	drm_gem_object_unreference(obj);
+	return ret;
+}
+
+static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	uint32_t temp = 0;
+	uint32_t addr;
+
+
+	if (x < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+		x = -x;
+	}
+	if (y < 0) {
+		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+		y = -y;
+	}
+
+	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+	addr = psb_intel_crtc->cursor_addr;
+
+	if (gma_power_begin(dev, false)) {
+		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+		gma_power_end(dev);
+	}
+	return 0;
+}
+
+static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+			 u16 *green, u16 *blue, uint32_t type, uint32_t size)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int i;
+
+	if (size != 256)
+		return;
+
+	for (i = 0; i < 256; i++) {
+		psb_intel_crtc->lut_r[i] = red[i] >> 8;
+		psb_intel_crtc->lut_g[i] = green[i] >> 8;
+		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	psb_intel_crtc_load_lut(crtc);
+}
+
+static int psb_crtc_set_config(struct drm_mode_set *set)
+{
+	int ret;
+	struct drm_device *dev = set->crtc->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->rpm_enabled)
+		return drm_crtc_helper_set_config(set);
+
+	pm_runtime_forbid(&dev->pdev->dev);
+	ret = drm_crtc_helper_set_config(set);
+	pm_runtime_allow(&dev->pdev->dev);
+	return ret;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int psb_intel_crtc_clock_get(struct drm_device *dev,
+				struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	int pipe = psb_intel_crtc->pipe;
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+	u32 dpll;
+	u32 fp;
+	struct psb_intel_clock_t clock;
+	bool is_lvds;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+
+	if (gma_power_begin(dev, false)) {
+		dpll = REG_READ(map->dpll);
+		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+			fp = REG_READ(map->fp0);
+		else
+			fp = REG_READ(map->fp1);
+		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+		gma_power_end(dev);
+	} else {
+		dpll = p->dpll;
+
+		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+			fp = p->fp0;
+		else
+		        fp = p->fp1;
+
+		is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
+								LVDS_PORT_EN);
+	}
+
+	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+	if (is_lvds) {
+		clock.p1 =
+		    ffs((dpll &
+			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+			DPLL_FPA01_P1_POST_DIV_SHIFT);
+		clock.p2 = 14;
+
+		if ((dpll & PLL_REF_INPUT_MASK) ==
+		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+			/* XXX: might not be 66MHz */
+			psb_intel_clock(66000, &clock);
+		} else
+			psb_intel_clock(48000, &clock);
+	} else {
+		if (dpll & PLL_P1_DIVIDE_BY_TWO)
+			clock.p1 = 2;
+		else {
+			clock.p1 =
+			    ((dpll &
+			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+		}
+		if (dpll & PLL_P2_DIVIDE_BY_4)
+			clock.p2 = 4;
+		else
+			clock.p2 = 2;
+
+		psb_intel_clock(48000, &clock);
+	}
+
+	/* XXX: It would be nice to validate the clocks, but we can't reuse
+	 * i830PllIsValid() because it relies on the xf86_config connector
+	 * configuration being accurate, which it isn't necessarily.
+	 */
+
+	return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	int pipe = psb_intel_crtc->pipe;
+	struct drm_display_mode *mode;
+	int htot;
+	int hsync;
+	int vtot;
+	int vsync;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+	const struct psb_offset *map = &dev_priv->regmap[pipe];
+
+	if (gma_power_begin(dev, false)) {
+		htot = REG_READ(map->htotal);
+		hsync = REG_READ(map->hsync);
+		vtot = REG_READ(map->vtotal);
+		vsync = REG_READ(map->vsync);
+		gma_power_end(dev);
+	} else {
+		htot = p->htotal;
+		hsync = p->hsync;
+		vtot = p->vtotal;
+		vsync = p->vsync;
+	}
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+	mode->hdisplay = (htot & 0xffff) + 1;
+	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+	mode->hsync_start = (hsync & 0xffff) + 1;
+	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+	mode->vdisplay = (vtot & 0xffff) + 1;
+	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+	mode->vsync_start = (vsync & 0xffff) + 1;
+	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	return mode;
+}
+
+static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct gtt_range *gt;
+
+	/* Unpin the old GEM object */
+	if (psb_intel_crtc->cursor_obj) {
+		gt = container_of(psb_intel_crtc->cursor_obj,
+						struct gtt_range, gem);
+		psb_gtt_unpin(gt);
+		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+		psb_intel_crtc->cursor_obj = NULL;
+	}
+
+	if (psb_intel_crtc->cursor_gt != NULL)
+		psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
+	kfree(psb_intel_crtc->crtc_state);
+	drm_crtc_cleanup(crtc);
+	kfree(psb_intel_crtc);
+}
+
+static void psb_intel_crtc_disable(struct drm_crtc *crtc)
+{
+	struct gtt_range *gt;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	if (crtc->fb) {
+		gt = to_psb_fb(crtc->fb)->gtt;
+		psb_gtt_unpin(gt);
+	}
+}
+
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+	.dpms = psb_intel_crtc_dpms,
+	.mode_fixup = psb_intel_crtc_mode_fixup,
+	.mode_set = psb_intel_crtc_mode_set,
+	.mode_set_base = psb_intel_pipe_set_base,
+	.prepare = psb_intel_crtc_prepare,
+	.commit = psb_intel_crtc_commit,
+	.disable = psb_intel_crtc_disable,
+};
+
+const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+	.save = psb_intel_crtc_save,
+	.restore = psb_intel_crtc_restore,
+	.cursor_set = psb_intel_crtc_cursor_set,
+	.cursor_move = psb_intel_crtc_cursor_move,
+	.gamma_set = psb_intel_crtc_gamma_set,
+	.set_config = psb_crtc_set_config,
+	.destroy = psb_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev,
+				  struct psb_intel_crtc *psb_intel_crtc)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+	u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+	struct gtt_range *cursor_gt;
+
+	if (dev_priv->ops->cursor_needs_phys) {
+		/* Allocate 4 pages of stolen mem for a hardware cursor. That
+		 * is enough for the 64 x 64 ARGB cursors we support.
+		 */
+		cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
+		if (!cursor_gt) {
+			psb_intel_crtc->cursor_gt = NULL;
+			goto out;
+		}
+		psb_intel_crtc->cursor_gt = cursor_gt;
+		psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+							cursor_gt->offset;
+	} else {
+		psb_intel_crtc->cursor_gt = NULL;
+	}
+
+out:
+	REG_WRITE(control[psb_intel_crtc->pipe], 0);
+	REG_WRITE(base[psb_intel_crtc->pipe], 0);
+}
+
+void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+		     struct psb_intel_mode_device *mode_dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_crtc *psb_intel_crtc;
+	int i;
+	uint16_t *r_base, *g_base, *b_base;
+
+	/* We allocate a extra array of drm_connector pointers
+	 * for fbdev after the crtc */
+	psb_intel_crtc =
+	    kzalloc(sizeof(struct psb_intel_crtc) +
+		    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+		    GFP_KERNEL);
+	if (psb_intel_crtc == NULL)
+		return;
+
+	psb_intel_crtc->crtc_state =
+		kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+	if (!psb_intel_crtc->crtc_state) {
+		dev_err(dev->dev, "Crtc state error: No memory\n");
+		kfree(psb_intel_crtc);
+		return;
+	}
+
+	/* Set the CRTC operations from the chip specific data */
+	drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+	psb_intel_crtc->pipe = pipe;
+	psb_intel_crtc->plane = pipe;
+
+	r_base = psb_intel_crtc->base.gamma_store;
+	g_base = r_base + 256;
+	b_base = g_base + 256;
+	for (i = 0; i < 256; i++) {
+		psb_intel_crtc->lut_r[i] = i;
+		psb_intel_crtc->lut_g[i] = i;
+		psb_intel_crtc->lut_b[i] = i;
+		r_base[i] = i << 8;
+		g_base[i] = i << 8;
+		b_base[i] = i << 8;
+
+		psb_intel_crtc->lut_adj[i] = 0;
+	}
+
+	psb_intel_crtc->mode_dev = mode_dev;
+	psb_intel_crtc->cursor_addr = 0;
+
+	drm_crtc_helper_add(&psb_intel_crtc->base,
+						dev_priv->ops->crtc_helper);
+
+	/* Setup the array of drm_connector pointer array */
+	psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+	       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+	dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+							&psb_intel_crtc->base;
+	dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+							&psb_intel_crtc->base;
+	psb_intel_crtc->mode_set.connectors =
+	    (struct drm_connector **) (psb_intel_crtc + 1);
+	psb_intel_crtc->mode_set.num_connectors = 0;
+	psb_intel_cursor_init(dev, psb_intel_crtc);
+
+	/* Set to true so that the pipe is forced off on initial config. */
+	psb_intel_crtc->active = true;
+}
+
+int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+	struct drm_mode_object *drmmode_obj;
+	struct psb_intel_crtc *crtc;
+
+	if (!dev_priv) {
+		dev_err(dev->dev, "called with no initialization\n");
+		return -EINVAL;
+	}
+
+	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+			DRM_MODE_OBJECT_CRTC);
+
+	if (!drmmode_obj) {
+		dev_err(dev->dev, "no such CRTC id\n");
+		return -EINVAL;
+	}
+
+	crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+	pipe_from_crtc_id->pipe = crtc->pipe;
+
+	return 0;
+}
+
+struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_crtc *crtc = NULL;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+		if (psb_intel_crtc->pipe == pipe)
+			break;
+	}
+	return crtc;
+}
+
+int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+{
+	int index_mask = 0;
+	struct drm_connector *connector;
+	int entry = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+		if (type_mask & (1 << psb_intel_encoder->type))
+			index_mask |= (1 << entry);
+		entry++;
+	}
+	return index_mask;
+}
+
+/* current intel driver doesn't take advantage of encoders
+   always give back the encoder for the connector
+*/
+struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+
+	return &psb_intel_encoder->base;
+}
+
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+					struct psb_intel_encoder *encoder)
+{
+	connector->encoder = encoder;
+	drm_mode_connector_attach_encoder(&connector->base,
+					  &encoder->base);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.h b/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.h
new file mode 100644
index 0000000..3724b97
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_display.h
@@ -0,0 +1,25 @@
+/* copyright (c) 2008, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_drv.h b/linux-imx/drivers/gpu/drm/gma500/psb_intel_drv.h
new file mode 100644
index 0000000..4dcae42
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/gpio.h>
+
+/*
+ * Display related stuff
+ */
+
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
+/* these are outputs from the chip - integrated only
+ * external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_MIPI 7
+#define INTEL_OUTPUT_MIPI2 8
+#define INTEL_OUTPUT_DISPLAYPORT 9
+#define INTEL_OUTPUT_EDP 10
+
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+				int multiplier)
+{
+	mode->clock *= multiplier;
+	mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+	       >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
+/*
+ * Hold information useally put on the device driver privates here,
+ * since it needs to be shared across multiple of devices drivers privates.
+ */
+struct psb_intel_mode_device {
+
+	/*
+	 * Abstracted memory manager operations
+	 */
+	 size_t(*bo_offset) (struct drm_device *dev, void *bo);
+
+	/*
+	 * LVDS info
+	 */
+	int backlight_duty_cycle;	/* restore backlight to this value */
+	bool panel_wants_dither;
+	struct drm_display_mode *panel_fixed_mode;
+	struct drm_display_mode *panel_fixed_mode2;
+	struct drm_display_mode *vbt_mode;	/* if any */
+
+	uint32_t saveBLC_PWM_CTL;
+};
+
+struct psb_intel_i2c_chan {
+	/* for getting at dev. private (mmio etc.) */
+	struct drm_device *drm_dev;
+	u32 reg;		/* GPIO reg */
+	struct i2c_adapter adapter;
+	struct i2c_algo_bit_data algo;
+	u8 slave_addr;
+};
+
+struct psb_intel_encoder {
+	struct drm_encoder base;
+	int type;
+	bool needs_tv_clock;
+	void (*hot_plug)(struct psb_intel_encoder *);
+	int crtc_mask;
+	int clone_mask;
+	u32 ddi_select;	/* Channel info */
+#define DDI0_SELECT	0x01
+#define DDI1_SELECT	0x02
+#define DP_MASK		0x8000
+#define DDI_MASK	0x03
+	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+	   own set of output privates */
+	struct psb_intel_i2c_chan *i2c_bus;
+	struct psb_intel_i2c_chan *ddc_bus;
+};
+
+struct psb_intel_connector {
+	struct drm_connector base;
+	struct psb_intel_encoder *encoder;
+};
+
+struct psb_intel_crtc_state {
+	uint32_t saveDSPCNTR;
+	uint32_t savePIPECONF;
+	uint32_t savePIPESRC;
+	uint32_t saveDPLL;
+	uint32_t saveFP0;
+	uint32_t saveFP1;
+	uint32_t saveHTOTAL;
+	uint32_t saveHBLANK;
+	uint32_t saveHSYNC;
+	uint32_t saveVTOTAL;
+	uint32_t saveVBLANK;
+	uint32_t saveVSYNC;
+	uint32_t saveDSPSTRIDE;
+	uint32_t saveDSPSIZE;
+	uint32_t saveDSPPOS;
+	uint32_t saveDSPBASE;
+	uint32_t savePalette[256];
+};
+
+struct psb_intel_crtc {
+	struct drm_crtc base;
+	int pipe;
+	int plane;
+	uint32_t cursor_addr;
+	struct gtt_range *cursor_gt;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	u8 lut_adj[256];
+	struct psb_intel_framebuffer *fbdev_fb;
+	/* a mode_set for fbdev users on this crtc */
+	struct drm_mode_set mode_set;
+
+	/* GEM object that holds our cursor */
+	struct drm_gem_object *cursor_obj;
+
+	struct drm_display_mode saved_mode;
+	struct drm_display_mode saved_adjusted_mode;
+
+	struct psb_intel_mode_device *mode_dev;
+
+	/*crtc mode setting flags*/
+	u32 mode_flags;
+
+	bool active;
+
+	/* Saved Crtc HW states */
+	struct psb_intel_crtc_state *crtc_state;
+};
+
+#define to_psb_intel_crtc(x)	\
+		container_of(x, struct psb_intel_crtc, base)
+#define to_psb_intel_connector(x) \
+		container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x)	\
+		container_of(x, struct psb_intel_encoder, base)
+#define to_psb_intel_framebuffer(x)	\
+		container_of(x, struct psb_intel_framebuffer, base)
+
+struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+					const u32 reg, const char *name);
+void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+			    struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
+
+extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+			    struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_crt_init(struct drm_device *dev);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void psb_intel_dvo_init(struct drm_device *dev);
+extern void psb_intel_tv_init(struct drm_device *dev);
+extern void psb_intel_lvds_init(struct drm_device *dev,
+			    struct psb_intel_mode_device *mode_dev);
+extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+extern void oaktrail_lvds_init(struct drm_device *dev,
+			   struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
+			   struct psb_intel_mode_device *mode_dev);
+extern void mid_dsi_init(struct drm_device *dev,
+		    struct psb_intel_mode_device *mode_dev, int dsi_num);
+
+extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+						struct drm_connector *connector)
+{
+	return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+					struct psb_intel_connector *connector,
+					struct psb_intel_encoder *encoder);
+
+extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+					      *connector);
+
+extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+						    struct drm_crtc *crtc);
+extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+						 int pipe);
+extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+					     int sdvoB);
+extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+				   int enable);
+extern int intelfb_probe(struct drm_device *dev);
+extern int intelfb_remove(struct drm_device *dev,
+			  struct drm_framebuffer *fb);
+extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+							*dev, struct
+							drm_mode_fb_cmd
+							*mode_cmd,
+							void *mm_private);
+extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode);
+extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+				     struct drm_display_mode *mode);
+extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+					struct drm_property *property,
+					uint64_t value);
+extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
+
+/* DP support */
+extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
+extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+
+extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
+extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
+extern void cdv_sb_reset(struct drm_device *dev);
+
+extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
+extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+#endif				/* __INTEL_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_lvds.c b/linux-imx/drivers/gpu/drm/gma500/psb_intel_lvds.c
new file mode 100644
index 0000000..9fa5fa2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Dave Airlie <airlied@linux.ie>
+ *	Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+/*
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE	0x01
+#define BLC_PWM_TYPT	0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct psb_intel_lvds_priv {
+	/*
+	 * Saved LVDO output states
+	 */
+	uint32_t savePP_ON;
+	uint32_t savePP_OFF;
+	uint32_t saveLVDS;
+	uint32_t savePP_CONTROL;
+	uint32_t savePP_CYCLE;
+	uint32_t savePFIT_CONTROL;
+	uint32_t savePFIT_PGM_RATIOS;
+	uint32_t saveBLC_PWM_CTL;
+
+	struct psb_intel_i2c_chan *i2c_bus;
+	struct psb_intel_i2c_chan *ddc_bus;
+};
+
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 ret;
+
+	if (gma_power_begin(dev, false)) {
+		ret = REG_READ(BLC_PWM_CTL);
+		gma_power_end(dev);
+	} else /* Powered off, use the saved value */
+		ret = dev_priv->regs.saveBLC_PWM_CTL;
+
+	/* Top 15bits hold the frequency mask */
+	ret = (ret &  BACKLIGHT_MODULATION_FREQ_MASK) >>
+					BACKLIGHT_MODULATION_FREQ_SHIFT;
+
+        ret *= 2;	/* Return a 16bit range as needed for setting */
+        if (ret == 0)
+                dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+                        REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL);
+	return ret;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ *
+ * FIXME: at some point we need to both track this for PM and also
+ * disable runtime pm on MRST if the brightness is nil (ie blanked)
+ */
+static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+					unsigned int level)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+
+	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+	u8 out_buf[2];
+	unsigned int blc_i2c_brightness;
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = lvds_i2c_bus->slave_addr,
+			.flags = 0,
+			.len = 2,
+			.buf = out_buf,
+		}
+	};
+
+	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+			     BRIGHTNESS_MASK /
+			     BRIGHTNESS_MAX_LEVEL);
+
+	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+	out_buf[1] = (u8)blc_i2c_brightness;
+
+	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+		dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+			dev_priv->lvds_bl->brightnesscmd,
+			blc_i2c_brightness);
+		return 0;
+	}
+
+	dev_err(dev->dev, "I2C transfer error\n");
+	return -1;
+}
+
+
+static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv =
+			(struct drm_psb_private *)dev->dev_private;
+
+	u32 max_pwm_blc;
+	u32 blc_pwm_duty_cycle;
+
+	max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+
+	/*BLC_PWM_CTL Should be initiated while backlight device init*/
+	BUG_ON(max_pwm_blc == 0);
+
+	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+	REG_WRITE(BLC_PWM_CTL,
+		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+		  (blc_pwm_duty_cycle));
+
+        dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+		  (blc_pwm_duty_cycle));
+
+	return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	dev_dbg(dev->dev, "backlight level is %d\n", level);
+
+	if (!dev_priv->lvds_bl) {
+		dev_err(dev->dev, "NO LVDS backlight info\n");
+		return;
+	}
+
+	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+		psb_lvds_i2c_set_brightness(dev, level);
+	else
+		psb_lvds_pwm_set_brightness(dev, level);
+}
+
+/*
+ * Sets the backlight level.
+ *
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ */
+static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 blc_pwm_ctl;
+
+	if (gma_power_begin(dev, false)) {
+		blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+		blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+		REG_WRITE(BLC_PWM_CTL,
+				(blc_pwm_ctl |
+				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+		gma_power_end(dev);
+	} else {
+		blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
+				~BACKLIGHT_DUTY_CYCLE_MASK;
+		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
+					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+	}
+}
+
+/*
+ * Sets the power state for the panel.
+ */
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	u32 pp_status;
+
+	if (!gma_power_begin(dev, true)) {
+	        dev_err(dev->dev, "set power, chip off!\n");
+		return;
+        }
+        
+	if (on) {
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+			  POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & PP_ON) == 0);
+
+		psb_intel_lvds_set_backlight(dev,
+					     mode_dev->backlight_duty_cycle);
+	} else {
+		psb_intel_lvds_set_backlight(dev, 0);
+
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+			  ~POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while (pp_status & PP_ON);
+	}
+
+	gma_power_end(dev);
+}
+
+static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+
+	if (mode == DRM_MODE_DPMS_ON)
+		psb_intel_lvds_set_power(dev, true);
+	else
+		psb_intel_lvds_set_power(dev, false);
+
+	/* XXX: We never power down the LVDS pairs. */
+}
+
+static void psb_intel_lvds_save(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *)dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv =
+		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+	lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+	lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+	lvds_priv->saveLVDS = REG_READ(LVDS);
+	lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+	lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+	/*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+	lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+	lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+	lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+
+	/*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+	dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL &
+						BACKLIGHT_DUTY_CYCLE_MASK);
+
+	/*
+	 * If the light is off at server startup,
+	 * just make it full brightness
+	 */
+	if (dev_priv->backlight_duty_cycle == 0)
+		dev_priv->backlight_duty_cycle =
+		psb_intel_lvds_get_max_backlight(dev);
+
+	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+			lvds_priv->savePP_ON,
+			lvds_priv->savePP_OFF,
+			lvds_priv->saveLVDS,
+			lvds_priv->savePP_CONTROL,
+			lvds_priv->savePP_CYCLE,
+			lvds_priv->saveBLC_PWM_CTL);
+}
+
+static void psb_intel_lvds_restore(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	u32 pp_status;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv =
+		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+
+	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+			lvds_priv->savePP_ON,
+			lvds_priv->savePP_OFF,
+			lvds_priv->saveLVDS,
+			lvds_priv->savePP_CONTROL,
+			lvds_priv->savePP_CYCLE,
+			lvds_priv->saveBLC_PWM_CTL);
+
+	REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+	REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+	REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+	REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+	REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+	/*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+	REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+	REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+	REG_WRITE(LVDS, lvds_priv->saveLVDS);
+
+	if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+			POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & PP_ON) == 0);
+	} else {
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+			~POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while (pp_status & PP_ON);
+	}
+}
+
+int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct drm_display_mode *fixed_mode =
+					dev_priv->mode_dev.panel_fixed_mode;
+
+	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+		fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* just in case */
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		return MODE_NO_INTERLACE;
+
+	if (fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+	return MODE_OK;
+}
+
+bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	struct psb_intel_crtc *psb_intel_crtc =
+				to_psb_intel_crtc(encoder->crtc);
+	struct drm_encoder *tmp_encoder;
+	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+	struct psb_intel_encoder *psb_intel_encoder =
+						to_psb_intel_encoder(encoder);
+
+	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+		panel_fixed_mode = mode_dev->panel_fixed_mode2;
+
+	/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+	if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+		printk(KERN_ERR "Can't support LVDS on pipe A\n");
+		return false;
+	}
+	if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+		printk(KERN_ERR "Must use PIPE A\n");
+		return false;
+	}
+	/* Should never happen!! */
+	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+			    head) {
+		if (tmp_encoder != encoder
+		    && tmp_encoder->crtc == encoder->crtc) {
+			printk(KERN_ERR "Can't enable LVDS and another "
+			       "encoder on the same pipe\n");
+			return false;
+		}
+	}
+
+	/*
+	 * If we have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	if (panel_fixed_mode != NULL) {
+		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+		adjusted_mode->htotal = panel_fixed_mode->htotal;
+		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+		adjusted_mode->clock = panel_fixed_mode->clock;
+		drm_mode_set_crtcinfo(adjusted_mode,
+				      CRTC_INTERLACE_HALVE_V);
+	}
+
+	/*
+	 * XXX: It would be nice to support lower refresh rates on the
+	 * panels to reduce power consumption, and perhaps match the
+	 * user's requested refresh rate.
+	 */
+
+	return true;
+}
+
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (!gma_power_begin(dev, true))
+		return;
+
+	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+					  BACKLIGHT_DUTY_CYCLE_MASK);
+
+	psb_intel_lvds_set_power(dev, false);
+
+	gma_power_end(dev);
+}
+
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+
+	if (mode_dev->backlight_duty_cycle == 0)
+		mode_dev->backlight_duty_cycle =
+		    psb_intel_lvds_get_max_backlight(dev);
+
+	psb_intel_lvds_set_power(dev, true);
+}
+
+static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 pfit_control;
+
+	/*
+	 * The LVDS pin pair will already have been turned on in the
+	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+	 * settings.
+	 */
+
+	/*
+	 * Enable automatic panel scaling so that non-native modes fill the
+	 * screen.  Should be enabled before the pipe is enabled, according to
+	 * register description and PRM.
+	 */
+	if (mode->hdisplay != adjusted_mode->hdisplay ||
+	    mode->vdisplay != adjusted_mode->vdisplay)
+		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+				HORIZ_INTERP_BILINEAR);
+	else
+		pfit_control = 0;
+
+	if (dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+	REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/*
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+						   *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+/*
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+	int ret = 0;
+
+	if (!IS_MRST(dev))
+		ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
+
+	if (ret)
+		return ret;
+
+	/* Didn't get an EDID, so
+	 * Set wide sync ranges so we get all modes
+	 * handed to valid_mode for checking
+	 */
+	connector->display_info.min_vfreq = 0;
+	connector->display_info.max_vfreq = 200;
+	connector->display_info.min_hfreq = 0;
+	connector->display_info.max_hfreq = 200;
+
+	if (mode_dev->panel_fixed_mode != NULL) {
+		struct drm_display_mode *mode =
+		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+		drm_mode_probed_add(connector, mode);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * psb_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void psb_intel_lvds_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+
+	if (lvds_priv->ddc_bus)
+		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+int psb_intel_lvds_set_property(struct drm_connector *connector,
+				       struct drm_property *property,
+				       uint64_t value)
+{
+	struct drm_encoder *encoder = connector->encoder;
+
+	if (!encoder)
+		return -1;
+
+	if (!strcmp(property->name, "scaling mode")) {
+		struct psb_intel_crtc *crtc =
+					to_psb_intel_crtc(encoder->crtc);
+		uint64_t curval;
+
+		if (!crtc)
+			goto set_prop_error;
+
+		switch (value) {
+		case DRM_MODE_SCALE_FULLSCREEN:
+			break;
+		case DRM_MODE_SCALE_NO_SCALE:
+			break;
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			goto set_prop_error;
+		}
+
+		if (drm_object_property_get_value(&connector->base,
+						     property,
+						     &curval))
+			goto set_prop_error;
+
+		if (curval == value)
+			goto set_prop_done;
+
+		if (drm_object_property_set_value(&connector->base,
+							property,
+							value))
+			goto set_prop_error;
+
+		if (crtc->saved_mode.hdisplay != 0 &&
+		    crtc->saved_mode.vdisplay != 0) {
+			if (!drm_crtc_helper_set_mode(encoder->crtc,
+						      &crtc->saved_mode,
+						      encoder->crtc->x,
+						      encoder->crtc->y,
+						      encoder->crtc->fb))
+				goto set_prop_error;
+		}
+	} else if (!strcmp(property->name, "backlight")) {
+		if (drm_object_property_set_value(&connector->base,
+							property,
+							value))
+			goto set_prop_error;
+		else
+                        gma_backlight_set(encoder->dev, value);
+	} else if (!strcmp(property->name, "DPMS")) {
+		struct drm_encoder_helper_funcs *hfuncs
+						= encoder->helper_private;
+		hfuncs->dpms(encoder, value);
+	}
+
+set_prop_done:
+	return 0;
+set_prop_error:
+	return -1;
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+	.dpms = psb_intel_lvds_encoder_dpms,
+	.mode_fixup = psb_intel_lvds_mode_fixup,
+	.prepare = psb_intel_lvds_prepare,
+	.mode_set = psb_intel_lvds_mode_set,
+	.commit = psb_intel_lvds_commit,
+};
+
+const struct drm_connector_helper_funcs
+				psb_intel_lvds_connector_helper_funcs = {
+	.get_modes = psb_intel_lvds_get_modes,
+	.mode_valid = psb_intel_lvds_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = psb_intel_lvds_save,
+	.restore = psb_intel_lvds_restore,
+	.detect = psb_intel_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = psb_intel_lvds_set_property,
+	.destroy = psb_intel_lvds_destroy,
+};
+
+
+static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+	.destroy = psb_intel_lvds_enc_destroy,
+};
+
+
+
+/**
+ * psb_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void psb_intel_lvds_init(struct drm_device *dev,
+			 struct psb_intel_mode_device *mode_dev)
+{
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_connector *psb_intel_connector;
+	struct psb_intel_lvds_priv *lvds_priv;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
+	struct drm_crtc *crtc;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	u32 lvds;
+	int pipe;
+
+	psb_intel_encoder =
+			kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+	if (!psb_intel_encoder) {
+		dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+		return;
+	}
+
+	psb_intel_connector =
+		kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+	if (!psb_intel_connector) {
+		dev_err(dev->dev, "psb_intel_connector allocation error\n");
+		goto failed_encoder;
+	}
+
+	lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+	if (!lvds_priv) {
+		dev_err(dev->dev, "LVDS private allocation error\n");
+		goto failed_connector;
+	}
+
+	psb_intel_encoder->dev_priv = lvds_priv;
+
+	connector = &psb_intel_connector->base;
+	encoder = &psb_intel_encoder->base;
+	drm_connector_init(dev, connector,
+			   &psb_intel_lvds_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+
+	drm_encoder_init(dev, encoder,
+			 &psb_intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+
+	psb_intel_connector_attach_encoder(psb_intel_connector,
+					   psb_intel_encoder);
+	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+	drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+	drm_connector_helper_add(connector,
+				 &psb_intel_lvds_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/*Attach connector properties*/
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.scaling_mode_property,
+				      DRM_MODE_SCALE_FULLSCREEN);
+	drm_object_attach_property(&connector->base,
+				      dev_priv->backlight_property,
+				      BRIGHTNESS_MAX_LEVEL);
+
+	/*
+	 * Set up I2C bus
+	 * FIXME: distroy i2c_bus when exit
+	 */
+	lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+	if (!lvds_priv->i2c_bus) {
+		dev_printk(KERN_ERR,
+			&dev->pdev->dev, "I2C bus registration failed.\n");
+		goto failed_blc_i2c;
+	}
+	lvds_priv->i2c_bus->slave_addr = 0x2C;
+	dev_priv->lvds_i2c_bus =  lvds_priv->i2c_bus;
+
+	/*
+	 * LVDS discovery:
+	 * 1) check for EDID on DDC
+	 * 2) check for VBT data
+	 * 3) check to see if LVDS is already on
+	 *    if none of the above, no panel
+	 * 4) make sure lid is open
+	 *    if closed, act like it's not there for now
+	 */
+
+	/* Set up the DDC bus. */
+	lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+	if (!lvds_priv->ddc_bus) {
+		dev_printk(KERN_ERR, &dev->pdev->dev,
+			   "DDC bus registration " "failed.\n");
+		goto failed_ddc;
+	}
+
+	/*
+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+	 * preferred mode is the right one.
+	 */
+	psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			mode_dev->panel_fixed_mode =
+			    drm_mode_duplicate(dev, scan);
+			goto out;	/* FIXME: check for quirks */
+		}
+	}
+
+	/* Failed to get EDID, what about VBT? do we need this? */
+	if (mode_dev->vbt_mode)
+		mode_dev->panel_fixed_mode =
+		    drm_mode_duplicate(dev, mode_dev->vbt_mode);
+
+	if (!mode_dev->panel_fixed_mode)
+		if (dev_priv->lfp_lvds_vbt_mode)
+			mode_dev->panel_fixed_mode =
+				drm_mode_duplicate(dev,
+					dev_priv->lfp_lvds_vbt_mode);
+
+	/*
+	 * If we didn't get EDID, try checking if the panel is already turned
+	 * on.	If so, assume that whatever is currently programmed is the
+	 * correct mode.
+	 */
+	lvds = REG_READ(LVDS);
+	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+	if (crtc && (lvds & LVDS_PORT_EN)) {
+		mode_dev->panel_fixed_mode =
+		    psb_intel_crtc_mode_get(dev, crtc);
+		if (mode_dev->panel_fixed_mode) {
+			mode_dev->panel_fixed_mode->type |=
+			    DRM_MODE_TYPE_PREFERRED;
+			goto out;	/* FIXME: check for quirks */
+		}
+	}
+
+	/* If we still don't have a mode after all that, give up. */
+	if (!mode_dev->panel_fixed_mode) {
+		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+		goto failed_find;
+	}
+
+	/*
+	 * Blacklist machines with BIOSes that list an LVDS panel without
+	 * actually having one.
+	 */
+out:
+	drm_sysfs_connector_add(connector);
+	return;
+
+failed_find:
+	if (lvds_priv->ddc_bus)
+		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+failed_ddc:
+	if (lvds_priv->i2c_bus)
+		psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+failed_blc_i2c:
+	drm_encoder_cleanup(encoder);
+	drm_connector_cleanup(connector);
+failed_connector:
+	kfree(psb_intel_connector);
+failed_encoder:
+	kfree(psb_intel_encoder);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_modes.c b/linux-imx/drivers/gpu/drm/gma500/psb_intel_modes.c
new file mode 100644
index 0000000..4fca0d6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2007 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authers: Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drmP.h>
+#include "psb_intel_drv.h"
+
+/**
+ * psb_intel_ddc_probe
+ *
+ */
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
+{
+	u8 out_buf[] = { 0x0, 0x0 };
+	u8 buf[2];
+	int ret;
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = 0x50,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = 0x50,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = buf,
+		 }
+	};
+
+	ret = i2c_transfer(adapter, msgs, 2);
+	if (ret == 2)
+		return true;
+
+	return false;
+}
+
+/**
+ * psb_intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+			    struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_reg.h b/linux-imx/drivers/gpu/drm/gma500/psb_intel_reg.h
new file mode 100644
index 0000000..0be30e4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __PSB_INTEL_REG_H__
+#define __PSB_INTEL_REG_H__
+
+/*
+ * GPIO regs
+ */
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+# define GPIO_CLOCK_DIR_MASK		(1 << 0)
+# define GPIO_CLOCK_DIR_IN		(0 << 1)
+# define GPIO_CLOCK_DIR_OUT		(1 << 1)
+# define GPIO_CLOCK_VAL_MASK		(1 << 2)
+# define GPIO_CLOCK_VAL_OUT		(1 << 3)
+# define GPIO_CLOCK_VAL_IN		(1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+# define GPIO_DATA_DIR_MASK		(1 << 8)
+# define GPIO_DATA_DIR_IN		(0 << 9)
+# define GPIO_DATA_DIR_OUT		(1 << 9)
+# define GPIO_DATA_VAL_MASK		(1 << 10)
+# define GPIO_DATA_VAL_OUT		(1 << 11)
+# define GPIO_DATA_VAL_IN		(1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define GMBUS0			0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ	(0<<8)
+#define   GMBUS_RATE_50KHZ	(1<<8)
+#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED	0
+#define   GMBUS_PORT_SSC	1
+#define   GMBUS_PORT_VGADDC	2
+#define   GMBUS_PORT_PANEL	3
+#define   GMBUS_PORT_DPC	4 /* HDMIC */
+#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
+				  /* 6 reserved */
+#define   GMBUS_PORT_DPD	7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1			0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT	(1<<31)
+#define   GMBUS_SW_RDY		(1<<30)
+#define   GMBUS_ENT		(1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE	(0<<25)
+#define   GMBUS_CYCLE_WAIT	(1<<25)
+#define   GMBUS_CYCLE_INDEX	(2<<25)
+#define   GMBUS_CYCLE_STOP	(4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ	(1<<0)
+#define   GMBUS_SLAVE_WRITE	(0<<0)
+#define GMBUS2			0x5108 /* status */
+#define   GMBUS_INUSE		(1<<15)
+#define   GMBUS_HW_WAIT_PHASE	(1<<14)
+#define   GMBUS_STALL_TIMEOUT	(1<<13)
+#define   GMBUS_INT		(1<<12)
+#define   GMBUS_HW_RDY		(1<<11)
+#define   GMBUS_SATOER		(1<<10)
+#define   GMBUS_ACTIVE		(1<<9)
+#define GMBUS3			0x510c /* data buffer bytes 3-0 */
+#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN		(1<<3)
+#define   GMBUS_IDLE_EN		(1<<2)
+#define   GMBUS_HW_WAIT_EN	(1<<1)
+#define   GMBUS_HW_RDY_EN	(1<<0)
+#define GMBUS5			0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
+
+#define BLC_PWM_CTL		0x61254
+#define BLC_PWM_CTL2		0x61250
+#define  PWM_ENABLE		(1 << 31)
+#define  PWM_LEGACY_MODE	(1 << 30)
+#define  PWM_PIPE_B		(1 << 29)
+#define BLC_PWM_CTL_C		0x62254
+#define BLC_PWM_CTL2_C		0x62250
+#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17)
+#define BLM_LEGACY_MODE			(1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT	(0)
+#define BACKLIGHT_DUTY_CYCLE_MASK	(0xffff)
+
+#define I915_GCFGC			0xf0
+#define I915_LOW_FREQUENCY_ENABLE	(1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+#define I915_DISPLAY_CLOCK_MASK		(7 << 4)
+
+#define I855_HPLLCC			0xc0
+#define I855_CLOCK_CONTROL_MASK		(3 << 0)
+#define I855_CLOCK_133_200		(0 << 0)
+#define I855_CLOCK_100_200		(1 << 0)
+#define I855_CLOCK_100_133		(2 << 0)
+#define I855_CLOCK_166_250		(3 << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A		0x60000
+#define HBLANK_A		0x60004
+#define HSYNC_A			0x60008
+#define VTOTAL_A		0x6000c
+#define VBLANK_A		0x60010
+#define VSYNC_A			0x60014
+#define PIPEASRC		0x6001c
+#define BCLRPAT_A		0x60020
+#define VSYNCSHIFT_A		0x60028
+
+#define HTOTAL_B		0x61000
+#define HBLANK_B		0x61004
+#define HSYNC_B			0x61008
+#define VTOTAL_B		0x6100c
+#define VBLANK_B		0x61010
+#define VSYNC_B			0x61014
+#define PIPEBSRC		0x6101c
+#define BCLRPAT_B		0x61020
+#define VSYNCSHIFT_B		0x61028
+
+#define HTOTAL_C		0x62000
+#define HBLANK_C		0x62004
+#define HSYNC_C			0x62008
+#define VTOTAL_C		0x6200c
+#define VBLANK_C		0x62010
+#define VSYNC_C			0x62014
+#define PIPECSRC		0x6201c
+#define BCLRPAT_C		0x62020
+#define VSYNCSHIFT_C		0x62028
+
+#define PP_STATUS		0x61200
+# define PP_ON				(1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define PP_READY			(1 << 30)
+#define PP_SEQUENCE_NONE		(0 << 28)
+#define PP_SEQUENCE_ON			(1 << 28)
+#define PP_SEQUENCE_OFF			(2 << 28)
+#define PP_SEQUENCE_MASK		0x30000000
+#define	PP_CYCLE_DELAY_ACTIVE		(1 << 27)
+#define	PP_SEQUENCE_STATE_ON_IDLE	(1 << 3)
+#define	PP_SEQUENCE_STATE_MASK		0x0000000f
+
+#define PP_CONTROL		0x61204
+#define POWER_TARGET_ON			(1 << 0)
+#define	PANEL_UNLOCK_REGS		(0xabcd << 16)
+#define	PANEL_UNLOCK_MASK		(0xffff << 16)
+#define	EDP_FORCE_VDD			(1 << 3)
+#define	EDP_BLC_ENABLE			(1 << 2)
+#define	PANEL_POWER_RESET		(1 << 1)
+#define	PANEL_POWER_OFF			(0 << 0)
+#define	PANEL_POWER_ON			(1 << 0)
+
+/* Poulsbo/Oaktrail */
+#define LVDSPP_ON		0x61208
+#define LVDSPP_OFF		0x6120c
+#define PP_CYCLE		0x61210
+
+/* Cedartrail */
+#define PP_ON_DELAYS		0x61208		/* Cedartrail */
+#define PANEL_PORT_SELECT_MASK 		(3 << 30)
+#define PANEL_PORT_SELECT_LVDS 		(0 << 30)
+#define PANEL_PORT_SELECT_EDP		(1 << 30)
+#define PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_UP_DELAY_SHIFT	16
+#define PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_ON_DELAY_SHIFT	0
+
+#define PP_OFF_DELAYS		0x6120c		/* Cedartrail */
+#define PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PP_DIVISOR		0x61210		/* Cedartrail */
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
+
+#define PFIT_CONTROL		0x61230
+#define PFIT_ENABLE			(1 << 31)
+#define PFIT_PIPE_MASK			(3 << 29)
+#define PFIT_PIPE_SHIFT			29
+#define PFIT_SCALING_MODE_PILLARBOX	(1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX	(3 << 26)
+#define VERT_INTERP_DISABLE		(0 << 10)
+#define VERT_INTERP_BILINEAR		(1 << 10)
+#define VERT_INTERP_MASK		(3 << 10)
+#define VERT_AUTO_SCALE			(1 << 9)
+#define HORIZ_INTERP_DISABLE		(0 << 6)
+#define HORIZ_INTERP_BILINEAR		(1 << 6)
+#define HORIZ_INTERP_MASK		(3 << 6)
+#define HORIZ_AUTO_SCALE		(1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE	(1 << 3)
+
+#define PFIT_PGM_RATIOS		0x61234
+#define PFIT_VERT_SCALE_MASK			0xfff00000
+#define PFIT_HORIZ_SCALE_MASK			0x0000fff0
+
+#define PFIT_AUTO_RATIOS	0x61238
+
+#define DPLL_A			0x06014
+#define DPLL_B			0x06018
+#define DPLL_VCO_ENABLE			(1 << 31)
+#define DPLL_DVO_HIGH_SPEED		(1 << 30)
+#define DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define DPLL_VGA_MODE_DIS		(1 << 28)
+#define DPLLB_MODE_DAC_SERIAL		(1 << 26)	/* i915 */
+#define DPLLB_MODE_LVDS			(2 << 26)	/* i915 */
+#define DPLL_MODE_MASK			(3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24)	/* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5	(1 << 24)	/* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24)	/* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24)	/* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK		0x03000000	/* i915 */
+#define DPLL_FPA0h1_P1_POST_DIV_MASK	0x00ff0000	/* i915 */
+#define DPLL_LOCK			(1 << 15)	/* CDV */
+
+/*
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT	16
+#define PLL_P2_DIVIDE_BY_4		(1 << 23)	/* i830, required
+							 * in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO		(1 << 21)	/* i830 */
+#define PLL_REF_INPUT_DREFCLK		(0 << 13)
+#define PLL_REF_INPUT_TVCLKINA		(1 << 13)	/* i830 */
+#define PLL_REF_INPUT_TVCLKINBC		(2 << 13)	/* SDVO
+								 * TVCLKIN */
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
+#define PLL_REF_INPUT_MASK		(3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT	9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define PLL_LOAD_PULSE_PHASE_MASK	(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1	(1 << 8)
+
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_MULTIPLIER_MASK		0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES	4
+#define SDVO_MULTIPLIER_SHIFT_VGA	0
+
+/*
+ * PLL_MD
+ */
+/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD		0x0601c
+/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD		0x06020
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define DPLL_MD_UDI_DIVIDER_MASK	0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT	24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK	0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT	16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define DPLL_MD_UDI_MULTIPLIER_MASK	0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT	8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+
+#define DPLL_TEST		0x606c
+#define DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+#define DPLLB_TEST_N_BYPASS		(1 << 19)
+#define DPLLB_TEST_M_BYPASS		(1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+#define DPLLA_TEST_N_BYPASS		(1 << 3)
+#define DPLLA_TEST_M_BYPASS		(1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+
+#define ADPA			0x61100
+#define ADPA_DAC_ENABLE			(1 << 31)
+#define ADPA_DAC_DISABLE		0
+#define ADPA_PIPE_SELECT_MASK		(1 << 30)
+#define ADPA_PIPE_A_SELECT		0
+#define ADPA_PIPE_B_SELECT		(1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY		(1 << 15)
+#define ADPA_SETS_HVPOLARITY		0
+#define ADPA_VSYNC_CNTL_DISABLE		(1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE		0
+#define ADPA_HSYNC_CNTL_DISABLE		(1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE		0
+#define ADPA_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW		0
+#define ADPA_HSYNC_ACTIVE_HIGH		(1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW		0
+
+#define FPA0			0x06040
+#define FPA1			0x06044
+#define FPB0			0x06048
+#define FPB1			0x0604c
+#define FP_N_DIV_MASK			0x003f0000
+#define FP_N_DIV_SHIFT			16
+#define FP_M1_DIV_MASK			0x00003f00
+#define FP_M1_DIV_SHIFT			8
+#define FP_M2_DIV_MASK			0x0000003f
+#define FP_M2_DIV_SHIFT			0
+
+#define PORT_HOTPLUG_EN		0x61110
+#define HDMIB_HOTPLUG_INT_EN		(1 << 29)
+#define HDMIC_HOTPLUG_INT_EN		(1 << 28)
+#define HDMID_HOTPLUG_INT_EN		(1 << 27)
+#define SDVOB_HOTPLUG_INT_EN		(1 << 26)
+#define SDVOC_HOTPLUG_INT_EN		(1 << 25)
+#define TV_HOTPLUG_INT_EN		(1 << 18)
+#define CRT_HOTPLUG_INT_EN		(1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT	(1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK			0x000000F8
+
+#define PORT_HOTPLUG_STAT	0x61114
+#define CRT_HOTPLUG_INT_STATUS		(1 << 11)
+#define TV_HOTPLUG_INT_STATUS		(1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK	(3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR	(3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO	(2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE	(0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS	(1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS	(1 << 6)
+
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define SDVO_ENABLE			(1 << 31)
+#define SDVO_PIPE_B_SELECT		(1 << 30)
+#define SDVO_STALL_SELECT		(1 << 29)
+#define SDVO_INTERRUPT_ENABLE		(1 << 26)
+#define SDVO_COLOR_RANGE_16_235		(1 << 8)
+#define SDVO_AUDIO_ENABLE		(1 << 6)
+
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK		(7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT	23
+#define SDVO_PHASE_SELECT_MASK		(15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT	(6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT	(1 << 18)
+#define SDVOC_GANG_MODE			(1 << 16)
+#define SDVO_BORDER_ENABLE		(1 << 7)
+#define SDVOB_PCIE_CONCURRENCY		(1 << 3)
+#define SDVO_DETECTED			(1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK		((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK		(1 << 17)
+
+/*
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS			0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define LVDS_PORT_EN			(1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define LVDS_PIPEB_SELECT		(1 << 30)
+
+/* Turns on border drawing to allow centered display. */
+#define LVDS_BORDER_EN			(1 << 15)
+
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP		(3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define LVDS_A3_POWER_MASK		(3 << 6)
+#define LVDS_A3_POWER_DOWN		(0 << 6)
+#define LVDS_A3_POWER_UP		(3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define LVDS_CLKB_POWER_MASK		(3 << 4)
+#define LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define LVDS_CLKB_POWER_UP		(3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define LVDS_B0B3_POWER_MASK		(3 << 2)
+#define LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define LVDS_B0B3_POWER_UP		(3 << 2)
+
+#define PIPEACONF		0x70008
+#define PIPEACONF_ENABLE		(1 << 31)
+#define PIPEACONF_DISABLE		0
+#define PIPEACONF_DOUBLE_WIDE		(1 << 30)
+#define PIPECONF_ACTIVE			(1 << 30)
+#define PIPECONF_DSIPLL_LOCK		(1 << 29)
+#define PIPEACONF_SINGLE_WIDE		0
+#define PIPEACONF_PIPE_UNLOCKED		0
+#define PIPEACONF_DSR			(1 << 26)
+#define PIPEACONF_PIPE_LOCKED		(1 << 25)
+#define PIPEACONF_PALETTE		0
+#define PIPECONF_FORCE_BORDER		(1 << 25)
+#define PIPEACONF_GAMMA			(1 << 24)
+#define PIPECONF_PROGRESSIVE		(0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+#define PIPECONF_PLANE_OFF		(1 << 19)
+#define PIPECONF_CURSOR_OFF		(1 << 18)
+
+#define PIPEBCONF		0x71008
+#define PIPEBCONF_ENABLE		(1 << 31)
+#define PIPEBCONF_DISABLE		0
+#define PIPEBCONF_DOUBLE_WIDE		(1 << 30)
+#define PIPEBCONF_DISABLE		0
+#define PIPEBCONF_GAMMA			(1 << 24)
+#define PIPEBCONF_PALETTE		0
+
+#define PIPECCONF		0x72008
+
+#define PIPEBGCMAXRED		0x71010
+#define PIPEBGCMAXGREEN		0x71014
+#define PIPEBGCMAXBLUE		0x71018
+
+#define PIPEASTAT		0x70024
+#define PIPEBSTAT		0x71024
+#define PIPECSTAT		0x72024
+#define PIPE_VBLANK_INTERRUPT_STATUS		(1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL << 2)
+#define PIPE_VBLANK_CLEAR			(1 << 1)
+#define PIPE_VBLANK_STATUS			(1 << 1)
+#define PIPE_TE_STATUS				(1UL << 6)
+#define PIPE_DPST_EVENT_STATUS			(1UL << 7)
+#define PIPE_VSYNC_CLEAR			(1UL << 9)
+#define PIPE_VSYNC_STATUS			(1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS		(1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS	(1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE		(1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL << 18)
+#define PIPE_TE_ENABLE				(1UL << 22)
+#define PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE			(1UL << 23)
+#define PIPE_VSYNC_ENABL			(1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN		(1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE		(1UL << 27)
+#define PIPE_FIFO_UNDERRUN			(1UL << 31)
+#define PIPE_HDMI_AUDIO_INT_MASK		(PIPE_HDMI_AUDIO_UNDERRUN | \
+						PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+#define HISTOGRAM_INT_CONTROL		0x61268
+#define HISTOGRAM_BIN_DATA		0X61264
+#define HISTOGRAM_LOGIC_CONTROL		0x61260
+#define PWM_CONTROL_LOGIC		0x61250
+#define PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE		(1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE			(1UL << 31)
+#define PWM_LOGIC_ENABLE			(1UL << 31)
+#define PWM_PHASEIN_ENABLE			(1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE			(1UL << 24)
+#define PWM_PHASEIN_VB_COUNT			0x00001f00
+#define PWM_PHASEIN_INC				0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR		(1UL << 30)
+#define DPST_YUV_LUMA_MODE			0
+
+struct dpst_ie_histogram_control {
+	union {
+		uint32_t data;
+		struct {
+			uint32_t bin_reg_index:7;
+			uint32_t reserved:4;
+			uint32_t bin_reg_func_select:1;
+			uint32_t sync_to_phase_in:1;
+			uint32_t alt_enhancement_mode:2;
+			uint32_t reserved1:1;
+			uint32_t sync_to_phase_in_count:8;
+			uint32_t histogram_mode_select:1;
+			uint32_t reserved2:4;
+			uint32_t ie_pipe_assignment:1;
+			uint32_t ie_mode_table_enabled:1;
+			uint32_t ie_histogram_enable:1;
+		};
+	};
+};
+
+struct dpst_guardband {
+	union {
+		uint32_t data;
+		struct {
+			uint32_t guardband:22;
+			uint32_t guardband_interrupt_delay:8;
+			uint32_t interrupt_status:1;
+			uint32_t interrupt_enable:1;
+		};
+	};
+};
+
+#define PIPEAFRAMEHIGH		0x70040
+#define PIPEAFRAMEPIXEL		0x70044
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+#define PIPECFRAMEHIGH		0x72040
+#define PIPECFRAMEPIXEL		0x72044
+#define PIPE_FRAME_HIGH_MASK	0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT	0
+#define PIPE_FRAME_LOW_MASK	0xff000000
+#define PIPE_FRAME_LOW_SHIFT	24
+#define PIPE_PIXEL_MASK		0x00ffffff
+#define PIPE_PIXEL_SHIFT	0
+
+#define FW_BLC_SELF		0x20e0 
+#define FW_BLC_SELF_EN          (1<<15)
+
+#define DSPARB			0x70030
+#define DSPFW1			0x70034
+#define DSP_FIFO_SR_WM_MASK		0xFF800000
+#define DSP_FIFO_SR_WM_SHIFT		23
+#define CURSOR_B_FIFO_WM_MASK		0x003F0000
+#define CURSOR_B_FIFO_WM_SHIFT		16
+#define DSPFW2			0x70038
+#define CURSOR_A_FIFO_WM_MASK		0x3F00
+#define CURSOR_A_FIFO_WM_SHIFT		8
+#define DSP_PLANE_C_FIFO_WM_MASK	0x7F
+#define DSP_PLANE_C_FIFO_WM_SHIFT	0
+#define DSPFW3			0x7003c
+#define DSPFW4			0x70050
+#define DSPFW5			0x70054
+#define DSP_PLANE_B_FIFO_WM1_SHIFT	24
+#define DSP_PLANE_A_FIFO_WM1_SHIFT	16
+#define CURSOR_B_FIFO_WM1_SHIFT		8
+#define CURSOR_FIFO_SR_WM1_SHIFT	0
+#define DSPFW6			0x70058
+#define DSPCHICKENBIT		0x70400
+#define DSPACNTR		0x70180
+#define DSPBCNTR		0x71180
+#define DSPCCNTR		0x72180
+#define DISPLAY_PLANE_ENABLE			(1 << 31)
+#define DISPLAY_PLANE_DISABLE			0
+#define DISPPLANE_GAMMA_ENABLE			(1 << 30)
+#define DISPPLANE_GAMMA_DISABLE			0
+#define DISPPLANE_PIXFORMAT_MASK		(0xf << 26)
+#define DISPPLANE_8BPP				(0x2 << 26)
+#define DISPPLANE_15_16BPP			(0x4 << 26)
+#define DISPPLANE_16BPP				(0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA		(0x6 << 26)
+#define DISPPLANE_32BPP				(0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE			(1 << 25)
+#define DISPPLANE_STEREO_DISABLE		0
+#define DISPPLANE_SEL_PIPE_MASK			(1 << 24)
+#define DISPPLANE_SEL_PIPE_POS			24
+#define DISPPLANE_SEL_PIPE_A			0
+#define DISPPLANE_SEL_PIPE_B			(1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE		(1 << 22)
+#define DISPPLANE_SRC_KEY_DISABLE		0
+#define DISPPLANE_LINE_DOUBLE			(1 << 20)
+#define DISPPLANE_NO_LINE_DOUBLE		0
+#define DISPPLANE_STEREO_POLARITY_FIRST		0
+#define DISPPLANE_STEREO_POLARITY_SECOND	(1 << 18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE		(1 << 15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
+#define DISPPLANE_BOTTOM			(4)
+
+#define DSPABASE		0x70184
+#define DSPALINOFF		0x70184
+#define DSPASTRIDE		0x70188
+
+#define DSPBBASE		0x71184
+#define DSPBLINOFF		0X71184
+#define DSPBADDR		DSPBBASE
+#define DSPBSTRIDE		0x71188
+
+#define DSPCBASE		0x72184
+#define DSPCLINOFF		0x72184
+#define DSPCSTRIDE		0x72188
+
+#define DSPAKEYVAL		0x70194
+#define DSPAKEYMASK		0x70198
+
+#define DSPAPOS			0x7018C	/* reserved */
+#define DSPASIZE		0x70190
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+#define DSPCPOS			0x7218C
+#define DSPCSIZE		0x72190
+
+#define DSPASURF		0x7019C
+#define DSPATILEOFF		0x701A4
+
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+
+#define DSPCSURF		0x7219C
+#define DSPCTILEOFF		0x721A4
+#define DSPCKEYMAXVAL		0x721A0
+#define DSPCKEYMINVAL		0x72194
+#define DSPCKEYMSK		0x72198
+
+#define VGACNTRL		0x71400
+#define VGA_DISP_DISABLE		(1 << 31)
+#define VGA_2X_MODE			(1 << 30)
+#define VGA_PIPE_B_SELECT		(1 << 29)
+
+/*
+ * Overlay registers
+ */
+#define OV_C_OFFSET		0x08000
+#define OV_OVADD		0x30000
+#define OV_DOVASTA		0x30008
+# define OV_PIPE_SELECT			((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS		6
+# define OV_PIPE_A			0
+# define OV_PIPE_C			1
+#define OV_OGAMC5		0x30010
+#define OV_OGAMC4		0x30014
+#define OV_OGAMC3		0x30018
+#define OV_OGAMC2		0x3001C
+#define OV_OGAMC1		0x30020
+#define OV_OGAMC0		0x30024
+#define OVC_OVADD		0x38000
+#define OVC_DOVCSTA		0x38008
+#define OVC_OGAMC5		0x38010
+#define OVC_OGAMC4		0x38014
+#define OVC_OGAMC3		0x38018
+#define OVC_OGAMC2		0x3801C
+#define OVC_OGAMC1		0x38020
+#define OVC_OGAMC0		0x38024
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+#define SWF0			0x71410
+#define SWF1			0x71414
+#define SWF2			0x71418
+#define SWF3			0x7141c
+#define SWF4			0x71420
+#define SWF5			0x71424
+#define SWF6			0x71428
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF00			0x70410
+#define SWF01			0x70414
+#define SWF02			0x70418
+#define SWF03			0x7041c
+#define SWF04			0x70420
+#define SWF05			0x70424
+#define SWF06			0x70428
+
+#define SWF10			SWF0
+#define SWF11			SWF1
+#define SWF12			SWF2
+#define SWF13			SWF3
+#define SWF14			SWF4
+#define SWF15			SWF5
+#define SWF16			SWF6
+
+#define SWF30			0x72414
+#define SWF31			0x72418
+#define SWF32			0x7241c
+
+
+/*
+ * Palette registers
+ */
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+#define PALETTE_C		0x0ac00
+
+/* Cursor A & B regs */
+#define CURACNTR		0x70080
+#define CURSOR_MODE_DISABLE		0x00
+#define CURSOR_MODE_64_32B_AX		0x07
+#define CURSOR_MODE_64_ARGB_AX		((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE		(1 << 26)
+#define CURABASE		0x70084
+#define CURAPOS			0x70088
+#define CURSOR_POS_MASK			0x007FF
+#define CURSOR_POS_SIGN			0x8000
+#define CURSOR_X_SHIFT			0
+#define CURSOR_Y_SHIFT			16
+#define CURBCNTR		0x700c0
+#define CURBBASE		0x700c4
+#define CURBPOS			0x700c8
+#define CURCCNTR		0x700e0
+#define CURCBASE		0x700e4
+#define CURCPOS			0x700e8
+
+/*
+ * Interrupt Registers
+ */
+#define IER			0x020a0
+#define IIR			0x020a4
+#define IMR			0x020a8
+#define ISR			0x020ac
+
+/*
+ * MOORESTOWN delta registers
+ */
+#define MRST_DPLL_A		0x0f014
+#define MDFLD_DPLL_B		0x0f018
+#define MDFLD_INPUT_REF_SEL		(1 << 14)
+#define MDFLD_VCO_SEL			(1 << 16)
+#define DPLLA_MODE_LVDS			(2 << 26)	/* mrst */
+#define MDFLD_PLL_LATCHEN		(1 << 28)
+#define MDFLD_PWR_GATE_EN		(1 << 30)
+#define MDFLD_P1_MASK			(0x1FF << 17)
+#define MRST_FPA0		0x0f040
+#define MRST_FPA1		0x0f044
+#define MDFLD_DPLL_DIV0		0x0f048
+#define MDFLD_DPLL_DIV1		0x0f04c
+#define MRST_PERF_MODE		0x020f4
+
+/*
+ * MEDFIELD HDMI registers
+ */
+#define HDMIPHYMISCCTL		0x61134
+#define HDMI_PHY_POWER_DOWN		0x7f
+#define HDMIB_CONTROL		0x61140
+#define HDMIB_PORT_EN			(1 << 31)
+#define HDMIB_PIPE_B_SELECT		(1 << 30)
+#define HDMIB_NULL_PACKET		(1 << 9)
+#define HDMIB_HDCP_PORT			(1 << 5)
+
+/* #define LVDS			0x61180 */
+#define MRST_PANEL_8TO6_DITHER_ENABLE	(1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT	(1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT	(1 << 6)
+
+#define MIPI			0x61190
+#define MIPI_C			0x62190
+#define MIPI_PORT_EN			(1 << 31)
+/* Turns on border drawing to allow centered display. */
+#define SEL_FLOPPED_HSTX		(1 << 23)
+#define PASS_FROM_SPHY_TO_AFE		(1 << 16)
+#define MIPI_BORDER_EN			(1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE		0x1
+#define MIPIA_2LANE_MIPIC_2LANE		0x2
+#define TE_TRIGGER_DSI_PROTOCOL		(1 << 2)
+#define TE_TRIGGER_GPIO_PIN		(1 << 3)
+#define MIPI_TE_COUNT		0x61194
+
+/* #define PP_CONTROL	0x61204 */
+#define POWER_DOWN_ON_RESET		(1 << 1)
+
+/* #define PFIT_CONTROL	0x61230 */
+#define PFIT_PIPE_SELECT		(3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT		(29)
+
+/* #define BLC_PWM_CTL		0x61254 */
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT	(16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK	(0xffff << 16)
+
+/* #define PIPEACONF 0x70008 */
+#define PIPEACONF_PIPE_STATE		(1 << 30)
+/* #define DSPACNTR		0x70180 */
+
+#define MRST_DSPABASE		0x7019c
+#define MRST_DSPBBASE		0x7119c
+#define MDFLD_DSPCBASE		0x7219c
+
+/*
+ * Moorestown registers.
+ */
+
+/*
+ *	MIPI IP registers
+ */
+#define MIPIC_REG_OFFSET		0x800
+
+#define DEVICE_READY_REG		0xb000
+#define LP_OUTPUT_HOLD				(1 << 16)
+#define EXIT_ULPS_DEV_READY			0x3
+#define LP_OUTPUT_HOLD_RELEASE			0x810000
+# define ENTERING_ULPS				(2 << 1)
+# define EXITING_ULPS				(1 << 1)
+# define ULPS_MASK				(3 << 1)
+# define BUS_POSSESSION				(1 << 3)
+#define INTR_STAT_REG			0xb004
+#define RX_SOT_ERROR				(1 << 0)
+#define RX_SOT_SYNC_ERROR			(1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR		(1 << 3)
+#define RX_LP_TX_SYNC_ERROR			(1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR		(1 << 5)
+#define RX_FALSE_CONTROL_ERROR			(1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR			(1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR			(1 << 8)
+#define RX_CHECKSUM_ERROR			(1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 10)
+#define RX_DSI_VC_ID_INVALID			(1 << 11)
+#define TX_FALSE_CONTROL_ERROR			(1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR			(1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR			(1 << 14)
+#define TX_CHECKSUM_ERROR			(1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 16)
+#define TX_DSI_VC_ID_INVALID			(1 << 17)
+#define HIGH_CONTENTION				(1 << 18)
+#define LOW_CONTENTION				(1 << 19)
+#define DPI_FIFO_UNDER_RUN			(1 << 20)
+#define HS_TX_TIMEOUT				(1 << 21)
+#define LP_RX_TIMEOUT				(1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+#define ACK_WITH_NO_ERROR			(1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+#define SPL_PKT_SENT				(1 << 30)
+#define INTR_EN_REG			0xb008
+#define DSI_FUNC_PRG_REG		0xb00c
+#define DPI_CHANNEL_NUMBER_POS			0x03
+#define DBI_CHANNEL_NUMBER_POS			0x05
+#define FMT_DPI_POS				0x07
+#define FMT_DBI_POS				0x0A
+#define DBI_DATA_WIDTH_POS			0x0D
+
+/* DPI PIXEL FORMATS */
+#define RGB_565_FMT				0x01	/* RGB 565 FORMAT */
+#define RGB_666_FMT				0x02	/* RGB 666 FORMAT */
+#define LRGB_666_FMT				0x03	/* RGB LOOSELY PACKED
+							 * 666 FORMAT
+							 */
+#define RGB_888_FMT				0x04	/* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0		0x00	/* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1		0x01	/* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2		0x02	/* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3		0x03	/* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED			0x00	/* command mode
+							 * is not supported
+							 */
+#define DBI_DATA_WIDTH_16BIT			0x01	/* 16 bit data */
+#define DBI_DATA_WIDTH_9BIT			0x02	/* 9 bit data */
+#define DBI_DATA_WIDTH_8BIT			0x03	/* 8 bit data */
+#define DBI_DATA_WIDTH_OPT1			0x04	/* option 1 */
+#define DBI_DATA_WIDTH_OPT2			0x05	/* option 2 */
+
+#define HS_TX_TIMEOUT_REG		0xb010
+#define LP_RX_TIMEOUT_REG		0xb014
+#define TURN_AROUND_TIMEOUT_REG		0xb018
+#define DEVICE_RESET_REG		0xb01C
+#define DPI_RESOLUTION_REG		0xb020
+#define RES_V_POS				0x10
+#define DBI_RESOLUTION_REG		0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG	0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG	0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG	0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG	0xb034
+#define VERT_SYNC_PAD_COUNT_REG		0xb038
+#define VERT_BACK_PORCH_COUNT_REG	0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG	0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG	0xb044
+#define DPI_CONTROL_REG			0xb048
+#define DPI_SHUT_DOWN				(1 << 0)
+#define DPI_TURN_ON				(1 << 1)
+#define DPI_COLOR_MODE_ON			(1 << 2)
+#define DPI_COLOR_MODE_OFF			(1 << 3)
+#define DPI_BACK_LIGHT_ON			(1 << 4)
+#define DPI_BACK_LIGHT_OFF			(1 << 5)
+#define DPI_LP					(1 << 6)
+#define DPI_DATA_REG			0xb04c
+#define DPI_BACK_LIGHT_ON_DATA			0x07
+#define DPI_BACK_LIGHT_OFF_DATA			0x17
+#define INIT_COUNT_REG			0xb050
+#define MAX_RET_PAK_REG			0xb054
+#define VIDEO_FMT_REG			0xb058
+#define COMPLETE_LAST_PCKT			(1 << 2)
+#define EOT_DISABLE_REG			0xb05c
+#define ENABLE_CLOCK_STOPPING			(1 << 1)
+#define LP_BYTECLK_REG			0xb060
+#define LP_GEN_DATA_REG			0xb064
+#define HS_GEN_DATA_REG			0xb068
+#define LP_GEN_CTRL_REG			0xb06C
+#define HS_GEN_CTRL_REG			0xb070
+#define DCS_CHANNEL_NUMBER_POS		0x6
+#define MCS_COMMANDS_POS		0x8
+#define WORD_COUNTS_POS			0x8
+#define MCS_PARAMETER_POS			0x10
+#define GEN_FIFO_STAT_REG		0xb074
+#define HS_DATA_FIFO_FULL			(1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+#define HS_DATA_FIFO_EMPTY			(1 << 2)
+#define LP_DATA_FIFO_FULL			(1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+#define LP_DATA_FIFO_EMPTY			(1 << 10)
+#define HS_CTRL_FIFO_FULL			(1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+#define HS_CTRL_FIFO_EMPTY			(1 << 18)
+#define LP_CTRL_FIFO_FULL			(1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+#define LP_CTRL_FIFO_EMPTY			(1 << 26)
+#define DBI_FIFO_EMPTY				(1 << 27)
+#define DPI_FIFO_EMPTY				(1 << 28)
+#define HS_LS_DBI_ENABLE_REG		0xb078
+#define TXCLKESC_REG			0xb07c
+#define DPHY_PARAM_REG			0xb080
+#define DBI_BW_CTRL_REG			0xb084
+#define CLK_LANE_SWT_REG		0xb088
+
+/*
+ * MIPI Adapter registers
+ */
+#define MIPI_CONTROL_REG		0xb104
+#define MIPI_2X_CLOCK_BITS			((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG		0xb108
+#define MIPI_DATA_LENGTH_REG		0xb10C
+#define MIPI_COMMAND_ADDRESS_REG	0xb110
+#define MIPI_COMMAND_LENGTH_REG		0xb114
+#define MIPI_READ_DATA_RETURN_REG0	0xb118
+#define MIPI_READ_DATA_RETURN_REG1	0xb11C
+#define MIPI_READ_DATA_RETURN_REG2	0xb120
+#define MIPI_READ_DATA_RETURN_REG3	0xb124
+#define MIPI_READ_DATA_RETURN_REG4	0xb128
+#define MIPI_READ_DATA_RETURN_REG5	0xb12C
+#define MIPI_READ_DATA_RETURN_REG6	0xb130
+#define MIPI_READ_DATA_RETURN_REG7	0xb134
+#define MIPI_READ_DATA_VALID_REG	0xb138
+
+/* DBI COMMANDS */
+#define soft_reset			0x01
+/*
+ *	The display module performs a software reset.
+ *	Registers are written with their SW Reset default values.
+ */
+#define get_power_mode			0x0a
+/*
+ *	The display module returns the current power mode
+ */
+#define get_address_mode		0x0b
+/*
+ *	The display module returns the current status.
+ */
+#define get_pixel_format		0x0c
+/*
+ *	This command gets the pixel format for the RGB image data
+ *	used by the interface.
+ */
+#define get_display_mode		0x0d
+/*
+ *	The display module returns the Display Image Mode status.
+ */
+#define get_signal_mode			0x0e
+/*
+ *	The display module returns the Display Signal Mode.
+ */
+#define get_diagnostic_result		0x0f
+/*
+ *	The display module returns the self-diagnostic results following
+ *	a Sleep Out command.
+ */
+#define enter_sleep_mode		0x10
+/*
+ *	This command causes the display module to enter the Sleep mode.
+ *	In this mode, all unnecessary blocks inside the display module are
+ *	disabled except interface communication. This is the lowest power
+ *	mode the display module supports.
+ */
+#define exit_sleep_mode			0x11
+/*
+ *	This command causes the display module to exit Sleep mode.
+ *	All blocks inside the display module are enabled.
+ */
+#define enter_partial_mode		0x12
+/*
+ *	This command causes the display module to enter the Partial Display
+ *	Mode. The Partial Display Mode window is described by the
+ *	set_partial_area command.
+ */
+#define enter_normal_mode		0x13
+/*
+ *	This command causes the display module to enter the Normal mode.
+ *	Normal Mode is defined as Partial Display mode and Scroll mode are off
+ */
+#define exit_invert_mode		0x20
+/*
+ *	This command causes the display module to stop inverting the image
+ *	data on the display device. The frame memory contents remain unchanged.
+ *	No status bits are changed.
+ */
+#define enter_invert_mode		0x21
+/*
+ *	This command causes the display module to invert the image data only on
+ *	the display device. The frame memory contents remain unchanged.
+ *	No status bits are changed.
+ */
+#define set_gamma_curve			0x26
+/*
+ *	This command selects the desired gamma curve for the display device.
+ *	Four fixed gamma curves are defined in section DCS spec.
+ */
+#define set_display_off			0x28
+/* ************************************************************************* *\
+This command causes the display module to stop displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_display_on			0x29
+/* ************************************************************************* *\
+This command causes the display module to start displaying the image data
+on the display device. The frame memory contents remain unchanged.
+No status bits are changed.
+\* ************************************************************************* */
+#define set_column_address		0x2a
+/*
+ *	This command defines the column extent of the frame memory accessed by
+ *	the hostprocessor with the read_memory_continue and
+ *	write_memory_continue commands.
+ *	No status bits are changed.
+ */
+#define set_page_addr			0x2b
+/*
+ *	This command defines the page extent of the frame memory accessed by
+ *	the host processor with the write_memory_continue and
+ *	read_memory_continue command.
+ *	No status bits are changed.
+ */
+#define write_mem_start			0x2c
+/*
+ *	This command transfers image data from the host processor to the
+ *	display modules frame memory starting at the pixel location specified
+ *	by preceding set_column_address and set_page_address commands.
+ */
+#define set_partial_area		0x30
+/*
+ *	This command defines the Partial Display mode s display area.
+ *	There are two parameters associated with this command, the first
+ *	defines the Start Row (SR) and the second the End Row (ER). SR and ER
+ *	refer to the Frame Memory Line Pointer.
+ */
+#define set_scroll_area			0x33
+/*
+ *	This command defines the display modules Vertical Scrolling Area.
+ */
+#define set_tear_off			0x34
+/*
+ *	This command turns off the display modules Tearing Effect output
+ *	signal on the TE signal line.
+ */
+#define set_tear_on			0x35
+/*
+ *	This command turns on the display modules Tearing Effect output signal
+ *	on the TE signal line.
+ */
+#define set_address_mode		0x36
+/*
+ *	This command sets the data order for transfers from the host processor
+ *	to display modules frame memory,bits B[7:5] and B3, and from the
+ *	display modules frame memory to the display device, bits B[2:0] and B4.
+ */
+#define set_scroll_start		0x37
+/*
+ *	This command sets the start of the vertical scrolling area in the frame
+ *	memory. The vertical scrolling area is fully defined when this command
+ *	is used with the set_scroll_area command The set_scroll_start command
+ *	has one parameter, the Vertical Scroll Pointer. The VSP defines the
+ *	line in the frame memory that is written to the display device as the
+ *	first line of the vertical scroll area.
+ */
+#define exit_idle_mode			0x38
+/*
+ *	This command causes the display module to exit Idle mode.
+ */
+#define enter_idle_mode			0x39
+/*
+ *	This command causes the display module to enter Idle Mode.
+ *	In Idle Mode, color expression is reduced. Colors are shown on the
+ *	display device using the MSB of each of the R, G and B color
+ *	components in the frame memory
+ */
+#define set_pixel_format		0x3a
+/*
+ *	This command sets the pixel format for the RGB image data used by the
+ *	interface.
+ *	Bits D[6:4]  DPI Pixel Format Definition
+ *	Bits D[2:0]  DBI Pixel Format Definition
+ *	Bits D7 and D3 are not used.
+ */
+#define DCS_PIXEL_FORMAT_3bpp		0x1
+#define DCS_PIXEL_FORMAT_8bpp		0x2
+#define DCS_PIXEL_FORMAT_12bpp		0x3
+#define DCS_PIXEL_FORMAT_16bpp		0x5
+#define DCS_PIXEL_FORMAT_18bpp		0x6
+#define DCS_PIXEL_FORMAT_24bpp		0x7
+
+#define write_mem_cont			0x3c
+
+/*
+ *	This command transfers image data from the host processor to the
+ *	display module's frame memory continuing from the pixel location
+ *	following the previous write_memory_continue or write_memory_start
+ *	command.
+ */
+#define set_tear_scanline		0x44
+/*
+ *	This command turns on the display modules Tearing Effect output signal
+ *	on the TE signal line when the display module reaches line N.
+ */
+#define get_scanline			0x45
+/*
+ *	The display module returns the current scanline, N, used to update the
+ *	 display device. The total number of scanlines on a display device is
+ *	defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+ *	the first line of V Sync and is denoted as Line 0.
+ *	When in Sleep Mode, the value returned by get_scanline is undefined.
+ */
+
+/* MCS or Generic COMMANDS */
+/* MCS/generic data type */
+#define GEN_SHORT_WRITE_0	0x03  /* generic short write, no parameters */
+#define GEN_SHORT_WRITE_1	0x13  /* generic short write, 1 parameters */
+#define GEN_SHORT_WRITE_2	0x23  /* generic short write, 2 parameters */
+#define GEN_READ_0		0x04  /* generic read, no parameters */
+#define GEN_READ_1		0x14  /* generic read, 1 parameters */
+#define GEN_READ_2		0x24  /* generic read, 2 parameters */
+#define GEN_LONG_WRITE		0x29  /* generic long write */
+#define MCS_SHORT_WRITE_0	0x05  /* MCS short write, no parameters */
+#define MCS_SHORT_WRITE_1	0x15  /* MCS short write, 1 parameters */
+#define MCS_READ		0x06  /* MCS read, no parameters */
+#define MCS_LONG_WRITE		0x39  /* MCS long write */
+/* MCS/generic commands */
+/* TPO MCS */
+#define write_display_profile		0x50
+#define write_display_brightness	0x51
+#define write_ctrl_display		0x53
+#define write_ctrl_cabc			0x55
+  #define UI_IMAGE		0x01
+  #define STILL_IMAGE		0x02
+  #define MOVING_IMAGE		0x03
+#define write_hysteresis		0x57
+#define write_gamma_setting		0x58
+#define write_cabc_min_bright		0x5e
+#define write_kbbc_profile		0x60
+/* TMD MCS */
+#define tmd_write_display_brightness 0x8c
+
+/*
+ *	This command is used to control ambient light, panel backlight
+ *	brightness and gamma settings.
+ */
+#define BRIGHT_CNTL_BLOCK_ON	(1 << 5)
+#define AMBIENT_LIGHT_SENSE_ON	(1 << 4)
+#define DISPLAY_DIMMING_ON	(1 << 3)
+#define BACKLIGHT_ON		(1 << 2)
+#define DISPLAY_BRIGHTNESS_AUTO	(1 << 1)
+#define GAMMA_AUTO		(1 << 0)
+
+/* DCS Interface Pixel Formats */
+#define DCS_PIXEL_FORMAT_3BPP	0x1
+#define DCS_PIXEL_FORMAT_8BPP	0x2
+#define DCS_PIXEL_FORMAT_12BPP	0x3
+#define DCS_PIXEL_FORMAT_16BPP	0x5
+#define DCS_PIXEL_FORMAT_18BPP	0x6
+#define DCS_PIXEL_FORMAT_24BPP	0x7
+/* ONE PARAMETER READ DATA */
+#define addr_mode_data		0xfc
+#define diag_res_data		0x00
+#define disp_mode_data		0x23
+#define pxl_fmt_data		0x77
+#define pwr_mode_data		0x74
+#define sig_mode_data		0x00
+/* TWO PARAMETERS READ DATA */
+#define scanline_data1		0xff
+#define scanline_data2		0xff
+#define NON_BURST_MODE_SYNC_PULSE	0x01	/* Non Burst Mode
+						 * with Sync Pulse
+						 */
+#define NON_BURST_MODE_SYNC_EVENTS	0x02	/* Non Burst Mode
+						 * with Sync events
+						 */
+#define BURST_MODE			0x03	/* Burst Mode */
+#define DBI_COMMAND_BUFFER_SIZE		0x240   /* 0x32 */    /* 0x120 */
+						/* Allocate at least
+						 * 0x100 Byte with 32
+						 * byte alignment
+						 */
+#define DBI_DATA_BUFFER_SIZE		0x120	/* Allocate at least
+						 * 0x100 Byte with 32
+						 * byte alignment
+						 */
+#define DBI_CB_TIME_OUT			0xFFFF
+
+#define GEN_FB_TIME_OUT			2000
+
+#define SKU_83				0x01
+#define SKU_100				0x02
+#define SKU_100L			0x04
+#define SKU_BYPASS			0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT         0x02100 /* cedarview */
+# define SB_OPCODE_MASK                         PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT                        16
+# define SB_OPCODE_READ                         0
+# define SB_OPCODE_WRITE                        1
+# define SB_DEST_MASK                           PSB_MASK(15, 8)
+# define SB_DEST_SHIFT                          8
+# define SB_DEST_DPLL                           0x88
+# define SB_BYTE_ENABLE_MASK                    PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT                   4
+# define SB_BUSY                                (1 << 0)
+
+#define DSPCLK_GATE_D		0x6200
+# define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */
+# define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+# define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6)
+# define DPUNIT_PIPEB_GATE_DISABLE		(1 << 30)
+# define DPUNIT_PIPEA_GATE_DISABLE		(1 << 25)
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24)
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13)
+
+#define RAMCLK_GATE_D		0x6210
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA		0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR		0x02108 /* cedarview */
+#define DPIO_CFG	0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1			(1 << 3)
+# define DPIO_MODE_SELECT_0			(1 << 2)
+# define DPIO_SFR_BYPASS			(1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N			(1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A			0x8008
+#define _SB_M_B			0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK			(0xFF << 24)
+# define SB_M_DIVIDER_SHIFT			24
+
+#define _SB_N_VCO_A		0x8014
+#define _SB_N_VCO_B		0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK			PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT			30
+#define SB_N_DIVIDER_MASK			PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT			26
+#define SB_N_CB_TUNE_MASK			PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT			24
+
+/* the bit 14:13 is used to select between the different reference clock for Pipe A/B */
+#define SB_REF_DPLLA		0x8010
+#define SB_REF_DPLLB		0x8030
+#define	REF_CLK_MASK		(0x3 << 13)
+#define REF_CLK_CORE		(0 << 13)
+#define REF_CLK_DPLL		(1 << 13)
+#define REF_CLK_DPLLA		(2 << 13)
+/* For the DPLL B, it will use the reference clk from DPLL A when using (2 << 13) */
+
+#define _SB_REF_A		0x8018
+#define _SB_REF_B		0x8038
+#define SB_REF_SFR(pipe)	_PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A			0x801c
+#define _SB_P_B			0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK			PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT			30
+#define SB_P2_10				0 /* HDMI, DP, DAC */
+#define SB_P2_5				1 /* DAC */
+#define SB_P2_14				2 /* LVDS single */
+#define SB_P2_7				3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK			PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT			12
+
+#define PSB_LANE0		0x120
+#define PSB_LANE1		0x220
+#define PSB_LANE2		0x2320
+#define PSB_LANE3		0x2420
+
+#define LANE_PLL_MASK		(0x7 << 20)
+#define LANE_PLL_ENABLE		(0x3 << 20)
+#define LANE_PLL_PIPE(p)	(((p) == 0) ? (1 << 21) : (0 << 21))
+
+#define DP_B				0x64100
+#define DP_C				0x64200
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH_1		(0 << 19)
+#define   DP_PORT_WIDTH_2		(1 << 19)
+#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/** locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/** Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/** vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/** A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define   PIPE_GMCH_DATA_M_TU_SIZE_MASK		(0x3f << 25)
+#define   PIPE_GMCH_DATA_M_TU_SIZE_SHIFT	25
+
+#define   PIPE_GMCH_DATA_M_MASK			(0xffffff)
+
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
+#define   PIPE_GMCH_DATA_N_MASK			(0xffffff)
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
+#define   PIPEA_DP_LINK_M_MASK			(0xffffff)
+
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
+#define   PIPEA_DP_LINK_N_MASK			(0xffffff)
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+#define   PIPE_BPC_MASK				(7 << 5)
+#define   PIPE_8BPC				(0 << 5)
+#define   PIPE_10BPC				(1 << 5)
+#define   PIPE_6BPC				(2 << 5)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 0000000..19e3660
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2628 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include <drm/gma_drm.h>
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+                         SDVO_TV_MASK)
+
+#define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
+	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
+	"PAL_H"    , "PAL_I"   , "PAL_M"   ,
+	"PAL_N"    , "PAL_NC"  , "PAL_60"  ,
+	"SECAM_B"  , "SECAM_D" , "SECAM_G" ,
+	"SECAM_K"  , "SECAM_K1", "SECAM_L" ,
+	"SECAM_60"
+};
+
+#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+	struct psb_intel_encoder base;
+
+	struct i2c_adapter *i2c;
+	u8 slave_addr;
+
+	struct i2c_adapter ddc;
+
+	/* Register for the SDVO device: SDVOB or SDVOC */
+	int sdvo_reg;
+
+	/* Active outputs controlled by this SDVO output */
+	uint16_t controlled_output;
+
+	/*
+	 * Capabilities of the SDVO device returned by
+	 * i830_sdvo_get_capabilities()
+	 */
+	struct psb_intel_sdvo_caps caps;
+
+	/* Pixel clock limitations reported by the SDVO device, in kHz */
+	int pixel_clock_min, pixel_clock_max;
+
+	/*
+	* For multiple function SDVO device,
+	* this is for current attached outputs.
+	*/
+	uint16_t attached_output;
+
+	/**
+	 * This is used to select the color range of RBG outputs in HDMI mode.
+	 * It is only valid when using TMDS encoding and 8 bit per color mode.
+	 */
+	uint32_t color_range;
+
+	/**
+	 * This is set if we're going to treat the device as TV-out.
+	 *
+	 * While we have these nice friendly flags for output types that ought
+	 * to decide this for us, the S-Video output on our HDMI+S-Video card
+	 * shows up as RGB1 (VGA).
+	 */
+	bool is_tv;
+
+	/* This is for current tv format name */
+	int tv_format_index;
+
+	/**
+	 * This is set if we treat the device as HDMI, instead of DVI.
+	 */
+	bool is_hdmi;
+	bool has_hdmi_monitor;
+	bool has_hdmi_audio;
+
+	/**
+	 * This is set if we detect output of sdvo device as LVDS and
+	 * have a valid fixed mode to use with the panel.
+	 */
+	bool is_lvds;
+
+	/**
+	 * This is sdvo fixed pannel mode pointer
+	 */
+	struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+	/* DDC bus used by this SDVO encoder */
+	uint8_t ddc_bus;
+
+	/* Input timings for adjusted_mode */
+	struct psb_intel_sdvo_dtd input_dtd;
+
+	/* Saved SDVO output states */
+	uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
+};
+
+struct psb_intel_sdvo_connector {
+	struct psb_intel_connector base;
+
+	/* Mark the type of connector */
+	uint16_t output_flag;
+
+	int force_audio;
+
+	/* This contains all current supported TV format */
+	u8 tv_format_supported[TV_FORMAT_NUM];
+	int   format_supported_num;
+	struct drm_property *tv_format;
+
+	/* add the property for the SDVO-TV */
+	struct drm_property *left;
+	struct drm_property *right;
+	struct drm_property *top;
+	struct drm_property *bottom;
+	struct drm_property *hpos;
+	struct drm_property *vpos;
+	struct drm_property *contrast;
+	struct drm_property *saturation;
+	struct drm_property *hue;
+	struct drm_property *sharpness;
+	struct drm_property *flicker_filter;
+	struct drm_property *flicker_filter_adaptive;
+	struct drm_property *flicker_filter_2d;
+	struct drm_property *tv_chroma_filter;
+	struct drm_property *tv_luma_filter;
+	struct drm_property *dot_crawl;
+
+	/* add the property for the SDVO-TV/LVDS */
+	struct drm_property *brightness;
+
+	/* Add variable to record current setting for the above property */
+	u32	left_margin, right_margin, top_margin, bottom_margin;
+
+	/* this is to get the range of margin.*/
+	u32	max_hscan,  max_vscan;
+	u32	max_hpos, cur_hpos;
+	u32	max_vpos, cur_vpos;
+	u32	cur_brightness, max_brightness;
+	u32	cur_contrast,	max_contrast;
+	u32	cur_saturation, max_saturation;
+	u32	cur_hue,	max_hue;
+	u32	cur_sharpness,	max_sharpness;
+	u32	cur_flicker_filter,		max_flicker_filter;
+	u32	cur_flicker_filter_adaptive,	max_flicker_filter_adaptive;
+	u32	cur_flicker_filter_2d,		max_flicker_filter_2d;
+	u32	cur_tv_chroma_filter,	max_tv_chroma_filter;
+	u32	cur_tv_luma_filter,	max_tv_luma_filter;
+	u32	cur_dot_crawl,	max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+	return container_of(psb_intel_attached_encoder(connector),
+			    struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+	return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+			      struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+			      int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+				   struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+	u32 bval = val, cval = val;
+	int i;
+
+	if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+		cval = REG_READ(SDVOC);
+	} else {
+		bval = REG_READ(SDVOB);
+	}
+	/*
+	 * Write the registers twice for luck. Sometimes,
+	 * writing them only once doesn't appear to 'stick'.
+	 * The BIOS does this too. Yay, magic
+	 */
+	for (i = 0; i < 2; i++)
+	{
+		REG_WRITE(SDVOB, bval);
+		REG_READ(SDVOB);
+		REG_WRITE(SDVOC, cval);
+		REG_READ(SDVOC);
+	}
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = psb_intel_sdvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr,
+		},
+		{
+			.addr = psb_intel_sdvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = ch,
+		}
+	};
+	int ret;
+
+	if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+		return true;
+
+	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+	return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+	u8 cmd;
+	const char *name;
+} sdvo_cmd_names[] = {
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+    /* Add the op code for SDVO enhancements */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+    /* HDMI op code */
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg)	(reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+				   const void *args, int args_len)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s: W: %02X ",
+				SDVO_NAME(psb_intel_sdvo), cmd);
+	for (i = 0; i < args_len; i++)
+		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+	for (; i < 8; i++)
+		DRM_LOG_KMS("   ");
+	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+		if (cmd == sdvo_cmd_names[i].cmd) {
+			DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(sdvo_cmd_names))
+		DRM_LOG_KMS("(%02X)", cmd);
+	DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+	"Power on",
+	"Success",
+	"Not supported",
+	"Invalid arg",
+	"Pending",
+	"Target not specified",
+	"Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+				 const void *args, int args_len)
+{
+	u8 buf[args_len*2 + 2], status;
+	struct i2c_msg msgs[args_len + 3];
+	int i, ret;
+
+	psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+	for (i = 0; i < args_len; i++) {
+		msgs[i].addr = psb_intel_sdvo->slave_addr;
+		msgs[i].flags = 0;
+		msgs[i].len = 2;
+		msgs[i].buf = buf + 2 *i;
+		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+		buf[2*i + 1] = ((u8*)args)[i];
+	}
+	msgs[i].addr = psb_intel_sdvo->slave_addr;
+	msgs[i].flags = 0;
+	msgs[i].len = 2;
+	msgs[i].buf = buf + 2*i;
+	buf[2*i + 0] = SDVO_I2C_OPCODE;
+	buf[2*i + 1] = cmd;
+
+	/* the following two are to read the response */
+	status = SDVO_I2C_CMD_STATUS;
+	msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+	msgs[i+1].flags = 0;
+	msgs[i+1].len = 1;
+	msgs[i+1].buf = &status;
+
+	msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+	msgs[i+2].flags = I2C_M_RD;
+	msgs[i+2].len = 1;
+	msgs[i+2].buf = &status;
+
+	ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+		return false;
+	}
+	if (ret != i+3) {
+		/* failure in I2C transfer */
+		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+		return false;
+	}
+
+	return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+				     void *response, int response_len)
+{
+	u8 retry = 5;
+	u8 status;
+	int i;
+
+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+	/*
+	 * The documentation states that all commands will be
+	 * processed within 15µs, and that we need only poll
+	 * the status byte a maximum of 3 times in order for the
+	 * command to be complete.
+	 *
+	 * Check 5 times in case the hardware failed to read the docs.
+	 */
+	if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+				  SDVO_I2C_CMD_STATUS,
+				  &status))
+		goto log_fail;
+
+	while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+		udelay(15);
+		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+					  SDVO_I2C_CMD_STATUS,
+					  &status))
+			goto log_fail;
+	}
+
+	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+	else
+		DRM_LOG_KMS("(??? %d)", status);
+
+	if (status != SDVO_CMD_STATUS_SUCCESS)
+		goto log_fail;
+
+	/* Read the command response */
+	for (i = 0; i < response_len; i++) {
+		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+					  SDVO_I2C_RETURN_0 + i,
+					  &((u8 *)response)[i]))
+			goto log_fail;
+		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+	}
+	DRM_LOG_KMS("\n");
+	return true;
+
+log_fail:
+	DRM_LOG_KMS("... failed\n");
+	return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+	if (mode->clock >= 100000)
+		return 1;
+	else if (mode->clock >= 50000)
+		return 2;
+	else
+		return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+					      u8 ddc_bus)
+{
+	/* This must be the immediately preceding write before the i2c xfer */
+	return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+				    &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+		return false;
+
+	return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+		return false;
+
+	return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	struct psb_intel_sdvo_set_target_input_args targets = {0};
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_TARGET_INPUT,
+				    &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+	struct psb_intel_sdvo_get_trained_inputs_response response;
+
+	BUILD_BUG_ON(sizeof(response) != 1);
+	if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+				  &response, sizeof(response)))
+		return false;
+
+	*input_1 = response.input0_trained;
+	*input_2 = response.input1_trained;
+	return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+					  u16 outputs)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_ACTIVE_OUTPUTS,
+				    &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+					       int mode)
+{
+	u8 state = SDVO_ENCODER_STATE_ON;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		state = SDVO_ENCODER_STATE_ON;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		state = SDVO_ENCODER_STATE_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		state = SDVO_ENCODER_STATE_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		state = SDVO_ENCODER_STATE_OFF;
+		break;
+	}
+
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+						   int *clock_min,
+						   int *clock_max)
+{
+	struct psb_intel_sdvo_pixel_clock_range clocks;
+
+	BUILD_BUG_ON(sizeof(clocks) != 4);
+	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+				  &clocks, sizeof(clocks)))
+		return false;
+
+	/* Convert the values from units of 10 kHz to kHz. */
+	*clock_min = clocks.min * 10;
+	*clock_max = clocks.max * 10;
+	return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+					 u16 outputs)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_TARGET_OUTPUT,
+				    &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+				  struct psb_intel_sdvo_dtd *dtd)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+					 struct psb_intel_sdvo_dtd *dtd)
+{
+	return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+				     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+					 struct psb_intel_sdvo_dtd *dtd)
+{
+	return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+					 uint16_t clock,
+					 uint16_t width,
+					 uint16_t height)
+{
+	struct psb_intel_sdvo_preferred_input_timing_args args;
+
+	memset(&args, 0, sizeof(args));
+	args.clock = clock;
+	args.width = width;
+	args.height = height;
+	args.interlace = 0;
+
+	if (psb_intel_sdvo->is_lvds &&
+	   (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+	    psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+		args.scaled = 1;
+
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+				    &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+						  struct psb_intel_sdvo_dtd *dtd)
+{
+	BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+	BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+	return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+				    &dtd->part1, sizeof(dtd->part1)) &&
+		psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+				     &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+					 const struct drm_display_mode *mode)
+{
+	uint16_t width, height;
+	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+	uint16_t h_sync_offset, v_sync_offset;
+
+	width = mode->crtc_hdisplay;
+	height = mode->crtc_vdisplay;
+
+	/* do some mode translations */
+	h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+	h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+	v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+	v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+	dtd->part1.clock = mode->clock / 10;
+	dtd->part1.h_active = width & 0xff;
+	dtd->part1.h_blank = h_blank_len & 0xff;
+	dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+		((h_blank_len >> 8) & 0xf);
+	dtd->part1.v_active = height & 0xff;
+	dtd->part1.v_blank = v_blank_len & 0xff;
+	dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+		((v_blank_len >> 8) & 0xf);
+
+	dtd->part2.h_sync_off = h_sync_offset & 0xff;
+	dtd->part2.h_sync_width = h_sync_len & 0xff;
+	dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+		(v_sync_len & 0xf);
+	dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+		((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+		((v_sync_len & 0x30) >> 4);
+
+	dtd->part2.dtd_flags = 0x18;
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dtd->part2.dtd_flags |= 0x2;
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dtd->part2.dtd_flags |= 0x4;
+
+	dtd->part2.sdvo_flags = 0;
+	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+	dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+					 const struct psb_intel_sdvo_dtd *dtd)
+{
+	mode->hdisplay = dtd->part1.h_active;
+	mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+	mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+	mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+	mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+	mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+	mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+	mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+	mode->vdisplay = dtd->part1.v_active;
+	mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+	mode->vsync_start = mode->vdisplay;
+	mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+	mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+	mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+	mode->vsync_end = mode->vsync_start +
+		(dtd->part2.v_sync_off_width & 0xf);
+	mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+	mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+	mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+	mode->clock = dtd->part1.clock * 10;
+
+	mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+	if (dtd->part2.dtd_flags & 0x2)
+		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	if (dtd->part2.dtd_flags & 0x4)
+		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	struct psb_intel_sdvo_encode encode;
+
+	BUILD_BUG_ON(sizeof(encode) != 2);
+	return psb_intel_sdvo_get_value(psb_intel_sdvo,
+				  SDVO_CMD_GET_SUPP_ENCODE,
+				  &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+				  uint8_t mode)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+				       uint8_t mode)
+{
+	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	int i, j;
+	uint8_t set_buf_index[2];
+	uint8_t av_split;
+	uint8_t buf_size;
+	uint8_t buf[48];
+	uint8_t *pos;
+
+	psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+	for (i = 0; i <= av_split; i++) {
+		set_buf_index[0] = i; set_buf_index[1] = 0;
+		psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+				     set_buf_index, 2);
+		psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+		psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+		pos = buf;
+		for (j = 0; j <= buf_size; j += 8) {
+			psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+					     NULL, 0);
+			psb_intel_sdvo_read_response(encoder, pos, 8);
+			pos += 8;
+		}
+	}
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	DRM_INFO("HDMI is not supported yet");
+
+	return false;
+#if 0
+	struct dip_infoframe avi_if = {
+		.type = DIP_TYPE_AVI,
+		.ver = DIP_VERSION_AVI,
+		.len = DIP_LEN_AVI,
+	};
+	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+	uint8_t set_buf_index[2] = { 1, 0 };
+	uint64_t *data = (uint64_t *)&avi_if;
+	unsigned i;
+
+	intel_dip_infoframe_csum(&avi_if);
+
+	if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
+				  set_buf_index, 2))
+		return false;
+
+	for (i = 0; i < sizeof(avi_if); i += 8) {
+		if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+					  SDVO_CMD_SET_HBUF_DATA,
+					  data, 8))
+			return false;
+		data++;
+	}
+
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_HBUF_TXRATE,
+				    &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	struct psb_intel_sdvo_tv_format format;
+	uint32_t format_map;
+
+	format_map = 1 << psb_intel_sdvo->tv_format_index;
+	memset(&format, 0, sizeof(format));
+	memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	return psb_intel_sdvo_set_value(psb_intel_sdvo,
+				    SDVO_CMD_SET_TV_FORMAT,
+				    &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+					const struct drm_display_mode *mode)
+{
+	struct psb_intel_sdvo_dtd output_dtd;
+
+	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+					  psb_intel_sdvo->attached_output))
+		return false;
+
+	psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+		return false;
+
+	return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	/* Reset the input timing to the screen. Assume always input 0. */
+	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+		return false;
+
+	if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+						      mode->clock / 10,
+						      mode->hdisplay,
+						      mode->vdisplay))
+		return false;
+
+	if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+						   &psb_intel_sdvo->input_dtd))
+		return false;
+
+	psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+	return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+	int multiplier;
+
+	/* We need to construct preferred input timings based on our
+	 * output timings.  To do that, we have to set the output
+	 * timings, even though this isn't really the right place in
+	 * the sequence to do it. Oh well.
+	 */
+	if (psb_intel_sdvo->is_tv) {
+		if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+			return false;
+
+		(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+							     mode,
+							     adjusted_mode);
+	} else if (psb_intel_sdvo->is_lvds) {
+		if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+							     psb_intel_sdvo->sdvo_lvds_fixed_mode))
+			return false;
+
+		(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+							     mode,
+							     adjusted_mode);
+	}
+
+	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
+	 * SDVO device will factor out the multiplier during mode_set.
+	 */
+	multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+	psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+	return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+	u32 sdvox;
+	struct psb_intel_sdvo_in_out_map in_out;
+	struct psb_intel_sdvo_dtd input_dtd;
+	int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+	int rate;
+
+	if (!mode)
+		return;
+
+	/* First, set the input mapping for the first input to our controlled
+	 * output. This is only correct if we're a single-input device, in
+	 * which case the first input is the output from the appropriate SDVO
+	 * channel on the motherboard.  In a two-input device, the first input
+	 * will be SDVOB and the second SDVOC.
+	 */
+	in_out.in0 = psb_intel_sdvo->attached_output;
+	in_out.in1 = 0;
+
+	psb_intel_sdvo_set_value(psb_intel_sdvo,
+			     SDVO_CMD_SET_IN_OUT_MAP,
+			     &in_out, sizeof(in_out));
+
+	/* Set the output timings to the screen */
+	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+					  psb_intel_sdvo->attached_output))
+		return;
+
+	/* We have tried to get input timing in mode_fixup, and filled into
+	 * adjusted_mode.
+	 */
+	if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+		input_dtd = psb_intel_sdvo->input_dtd;
+	} else {
+		/* Set the output timing to the screen */
+		if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+						  psb_intel_sdvo->attached_output))
+			return;
+
+		psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+		(void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+	}
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+		return;
+
+	if (psb_intel_sdvo->has_hdmi_monitor) {
+		psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+		psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+	} else
+		psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+	if (psb_intel_sdvo->is_tv &&
+	    !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+		return;
+
+	(void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+	switch (pixel_multiplier) {
+	default:
+	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+	}
+	if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+		return;
+
+	/* Set the SDVO control regs. */
+	sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+	switch (psb_intel_sdvo->sdvo_reg) {
+	case SDVOB:
+		sdvox &= SDVOB_PRESERVE_MASK;
+		break;
+	case SDVOC:
+		sdvox &= SDVOC_PRESERVE_MASK;
+		break;
+	}
+	sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+	if (psb_intel_crtc->pipe == 1)
+		sdvox |= SDVO_PIPE_B_SELECT;
+	if (psb_intel_sdvo->has_hdmi_audio)
+		sdvox |= SDVO_AUDIO_ENABLE;
+
+	/* FIXME: Check if this is needed for PSB
+	sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+	*/
+
+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+		sdvox |= SDVO_STALL_SELECT;
+	psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+	u32 temp;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		DRM_DEBUG("DPMS_ON");
+		break;
+	case DRM_MODE_DPMS_OFF:
+		DRM_DEBUG("DPMS_OFF");
+		break;
+	default:
+		DRM_DEBUG("DPMS: %d", mode);
+	}
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+		if (0)
+			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+		if (mode == DRM_MODE_DPMS_OFF) {
+			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+			if ((temp & SDVO_ENABLE) != 0) {
+				psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+			}
+		}
+	} else {
+		bool input1, input2;
+		int i;
+		u8 status;
+
+		temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+		if ((temp & SDVO_ENABLE) == 0)
+			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+		for (i = 0; i < 2; i++)
+			psb_intel_wait_for_vblank(dev);
+
+		status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+		/* Warn if the device reported failure to sync.
+		 * A lot of SDVO devices fail to notify of sync, but it's
+		 * a given it the status is a success, we succeeded.
+		 */
+		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+			DRM_DEBUG_KMS("First %s output reported failure to "
+					"sync\n", SDVO_NAME(psb_intel_sdvo));
+		}
+
+		if (0)
+			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+		psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+	}
+	return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+		return MODE_CLOCK_LOW;
+
+	if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+		return MODE_CLOCK_HIGH;
+
+	if (psb_intel_sdvo->is_lvds) {
+		if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+	BUILD_BUG_ON(sizeof(*caps) != 8);
+	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+				  SDVO_CMD_GET_DEVICE_CAPS,
+				  caps, sizeof(*caps)))
+		return false;
+
+	DRM_DEBUG_KMS("SDVO capabilities:\n"
+		      "  vendor_id: %d\n"
+		      "  device_id: %d\n"
+		      "  device_rev_id: %d\n"
+		      "  sdvo_version_major: %d\n"
+		      "  sdvo_version_minor: %d\n"
+		      "  sdvo_inputs_mask: %d\n"
+		      "  smooth_scaling: %d\n"
+		      "  sharp_scaling: %d\n"
+		      "  up_scaling: %d\n"
+		      "  down_scaling: %d\n"
+		      "  stall_support: %d\n"
+		      "  output_flags: %d\n",
+		      caps->vendor_id,
+		      caps->device_id,
+		      caps->device_rev_id,
+		      caps->sdvo_version_major,
+		      caps->sdvo_version_minor,
+		      caps->sdvo_inputs_mask,
+		      caps->smooth_scaling,
+		      caps->sharp_scaling,
+		      caps->up_scaling,
+		      caps->down_scaling,
+		      caps->stall_support,
+		      caps->output_flags);
+
+	return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+	struct drm_connector *connector = NULL;
+	struct psb_intel_sdvo *iout = NULL;
+	struct psb_intel_sdvo *sdvo;
+
+	/* find the sdvo connector */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		iout = to_psb_intel_sdvo(connector);
+
+		if (iout->type != INTEL_OUTPUT_SDVO)
+			continue;
+
+		sdvo = iout->dev_priv;
+
+		if (sdvo->sdvo_reg == SDVOB && sdvoB)
+			return connector;
+
+		if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+			return connector;
+
+	}
+
+	return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+	u8 response[2];
+	u8 status;
+	struct psb_intel_sdvo *psb_intel_sdvo;
+	DRM_DEBUG_KMS("\n");
+
+	if (!connector)
+		return 0;
+
+	psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+	return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+				    &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+	u8 response[2];
+	u8 status;
+	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+	psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+	psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+	if (on) {
+		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+		status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+	} else {
+		response[0] = 0;
+		response[1] = 0;
+		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+	}
+
+	psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+	psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+	/* Is there more than one type of output? */
+	int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+	return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+	return drm_get_edid(connector,
+			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	enum drm_connector_status status;
+	struct edid *edid;
+
+	edid = psb_intel_sdvo_get_edid(connector);
+
+	if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+		u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+		/*
+		 * Don't use the 1 as the argument of DDC bus switch to get
+		 * the EDID. It is used for SDVO SPD ROM.
+		 */
+		for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+			psb_intel_sdvo->ddc_bus = ddc;
+			edid = psb_intel_sdvo_get_edid(connector);
+			if (edid)
+				break;
+		}
+		/*
+		 * If we found the EDID on the other bus,
+		 * assume that is the correct DDC bus.
+		 */
+		if (edid == NULL)
+			psb_intel_sdvo->ddc_bus = saved_ddc;
+	}
+
+	/*
+	 * When there is no edid and no monitor is connected with VGA
+	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
+	 */
+	if (edid == NULL)
+		edid = psb_intel_sdvo_get_analog_edid(connector);
+
+	status = connector_status_unknown;
+	if (edid != NULL) {
+		/* DDC bus is shared, match EDID to connector type */
+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+			status = connector_status_connected;
+			if (psb_intel_sdvo->is_hdmi) {
+				psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+				psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+			}
+		} else
+			status = connector_status_disconnected;
+		kfree(edid);
+	}
+
+	if (status == connector_status_connected) {
+		struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+		if (psb_intel_sdvo_connector->force_audio)
+			psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+	}
+
+	return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+	uint16_t response;
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+	enum drm_connector_status ret;
+
+	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+				  SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+		return connector_status_unknown;
+
+	/* add 30ms delay when the output type might be TV */
+	if (psb_intel_sdvo->caps.output_flags &
+	    (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+		mdelay(30);
+
+	if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+		return connector_status_unknown;
+
+	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+		      response & 0xff, response >> 8,
+		      psb_intel_sdvo_connector->output_flag);
+
+	if (response == 0)
+		return connector_status_disconnected;
+
+	psb_intel_sdvo->attached_output = response;
+
+	psb_intel_sdvo->has_hdmi_monitor = false;
+	psb_intel_sdvo->has_hdmi_audio = false;
+
+	if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+		ret = connector_status_disconnected;
+	else if (IS_TMDS(psb_intel_sdvo_connector))
+		ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = psb_intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = psb_intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (edid->input & DRM_EDID_INPUT_DIGITAL)
+				ret = connector_status_disconnected;
+			else
+				ret = connector_status_connected;
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
+
+	/* May update encoder flag for like clock for SDVO TV, etc.*/
+	if (ret == connector_status_connected) {
+		psb_intel_sdvo->is_tv = false;
+		psb_intel_sdvo->is_lvds = false;
+		psb_intel_sdvo->base.needs_tv_clock = false;
+
+		if (response & SDVO_TV_MASK) {
+			psb_intel_sdvo->is_tv = true;
+			psb_intel_sdvo->base.needs_tv_clock = true;
+		}
+		if (response & SDVO_LVDS_MASK)
+			psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+	}
+
+	return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+	struct edid *edid;
+
+	/* set the bus switch and get the modes */
+	edid = psb_intel_sdvo_get_edid(connector);
+
+	/*
+	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+	 * link between analog and digital outputs. So, if the regular SDVO
+	 * DDC fails, check to see if the analog output is disconnected, in
+	 * which case we'll look there for the digital DDC data.
+	 */
+	if (edid == NULL)
+		edid = psb_intel_sdvo_get_analog_edid(connector);
+
+	if (edid != NULL) {
+		struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+		bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+		if (connector_is_digital == monitor_is_digital) {
+			drm_mode_connector_update_edid_property(connector, edid);
+			drm_add_edid_modes(connector, edid);
+		}
+
+		kfree(edid);
+	}
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note!  This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+		   416, 0, 200, 201, 232, 233, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+		   416, 0, 240, 241, 272, 273, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+		   496, 0, 300, 301, 332, 333, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+		   736, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+		   736, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+		   736, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+		   800, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+		   800, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+		   816, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+		   816, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+		   816, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+		   816, 0, 540, 541, 572, 573, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+		   816, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+		   864, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+		   896, 0, 600, 601, 632, 633, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+		   928, 0, 624, 625, 656, 657, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+		   1016, 0, 766, 767, 798, 799, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+		   1120, 0, 768, 769, 800, 801, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+		   1376, 0, 1024, 1025, 1056, 1057, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+	uint32_t reply = 0, format_map = 0;
+	int i;
+
+	/* Read the list of supported input resolutions for the selected TV
+	 * format.
+	 */
+	format_map = 1 << psb_intel_sdvo->tv_format_index;
+	memcpy(&tv_res, &format_map,
+	       min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+		return;
+
+	BUILD_BUG_ON(sizeof(tv_res) != 3);
+	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+				  &tv_res, sizeof(tv_res)))
+		return;
+	if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+		if (reply & (1 << i)) {
+			struct drm_display_mode *nmode;
+			nmode = drm_mode_duplicate(connector->dev,
+						   &sdvo_tv_modes[i]);
+			if (nmode)
+				drm_mode_probed_add(connector, nmode);
+		}
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	struct drm_display_mode *newmode;
+
+	/*
+	 * Attempt to get the mode list from DDC.
+	 * Assume that the preferred modes are
+	 * arranged in priority order.
+	 */
+	psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+	if (list_empty(&connector->probed_modes) == false)
+		goto end;
+
+	/* Fetch modes from VBT */
+	if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+		newmode = drm_mode_duplicate(connector->dev,
+					     dev_priv->sdvo_lvds_vbt_mode);
+		if (newmode != NULL) {
+			/* Guarantee the mode is preferred */
+			newmode->type = (DRM_MODE_TYPE_PREFERRED |
+					 DRM_MODE_TYPE_DRIVER);
+			drm_mode_probed_add(connector, newmode);
+		}
+	}
+
+end:
+	list_for_each_entry(newmode, &connector->probed_modes, head) {
+		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+			psb_intel_sdvo->sdvo_lvds_fixed_mode =
+				drm_mode_duplicate(connector->dev, newmode);
+
+			drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+					      0);
+
+			psb_intel_sdvo->is_lvds = true;
+			break;
+		}
+	}
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+	if (IS_TV(psb_intel_sdvo_connector))
+		psb_intel_sdvo_get_tv_modes(connector);
+	else if (IS_LVDS(psb_intel_sdvo_connector))
+		psb_intel_sdvo_get_lvds_modes(connector);
+	else
+		psb_intel_sdvo_get_ddc_modes(connector);
+
+	return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+	struct drm_device *dev = connector->dev;
+
+	if (psb_intel_sdvo_connector->left)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+	if (psb_intel_sdvo_connector->right)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+	if (psb_intel_sdvo_connector->top)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+	if (psb_intel_sdvo_connector->bottom)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+	if (psb_intel_sdvo_connector->hpos)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+	if (psb_intel_sdvo_connector->vpos)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+	if (psb_intel_sdvo_connector->saturation)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+	if (psb_intel_sdvo_connector->contrast)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+	if (psb_intel_sdvo_connector->hue)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+	if (psb_intel_sdvo_connector->sharpness)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+	if (psb_intel_sdvo_connector->flicker_filter)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+	if (psb_intel_sdvo_connector->flicker_filter_2d)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+	if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+	if (psb_intel_sdvo_connector->tv_luma_filter)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+	if (psb_intel_sdvo_connector->tv_chroma_filter)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+	if (psb_intel_sdvo_connector->dot_crawl)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+	if (psb_intel_sdvo_connector->brightness)
+		drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+	if (psb_intel_sdvo_connector->tv_format)
+		drm_property_destroy(connector->dev,
+				     psb_intel_sdvo_connector->tv_format);
+
+	psb_intel_sdvo_destroy_enhance_property(connector);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!psb_intel_sdvo->is_hdmi)
+		return false;
+
+	edid = psb_intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+			struct drm_property *property,
+			uint64_t val)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+	struct drm_psb_private *dev_priv = connector->dev->dev_private;
+	uint16_t temp_value;
+	uint8_t cmd;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == psb_intel_sdvo_connector->force_audio)
+			return 0;
+
+		psb_intel_sdvo_connector->force_audio = i;
+
+		if (i == 0)
+			has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+			return 0;
+
+		psb_intel_sdvo->has_hdmi_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		if (val == !!psb_intel_sdvo->color_range)
+			return 0;
+
+		psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+		goto done;
+	}
+
+#define CHECK_PROPERTY(name, NAME) \
+	if (psb_intel_sdvo_connector->name == property) { \
+		if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+		if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+		cmd = SDVO_CMD_SET_##NAME; \
+		psb_intel_sdvo_connector->cur_##name = temp_value; \
+		goto set_value; \
+	}
+
+	if (property == psb_intel_sdvo_connector->tv_format) {
+		if (val >= TV_FORMAT_NUM)
+			return -EINVAL;
+
+		if (psb_intel_sdvo->tv_format_index ==
+		    psb_intel_sdvo_connector->tv_format_supported[val])
+			return 0;
+
+		psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+		goto done;
+	} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+		temp_value = val;
+		if (psb_intel_sdvo_connector->left == property) {
+			drm_object_property_set_value(&connector->base,
+							 psb_intel_sdvo_connector->right, val);
+			if (psb_intel_sdvo_connector->left_margin == temp_value)
+				return 0;
+
+			psb_intel_sdvo_connector->left_margin = temp_value;
+			psb_intel_sdvo_connector->right_margin = temp_value;
+			temp_value = psb_intel_sdvo_connector->max_hscan -
+				psb_intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (psb_intel_sdvo_connector->right == property) {
+			drm_object_property_set_value(&connector->base,
+							 psb_intel_sdvo_connector->left, val);
+			if (psb_intel_sdvo_connector->right_margin == temp_value)
+				return 0;
+
+			psb_intel_sdvo_connector->left_margin = temp_value;
+			psb_intel_sdvo_connector->right_margin = temp_value;
+			temp_value = psb_intel_sdvo_connector->max_hscan -
+				psb_intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (psb_intel_sdvo_connector->top == property) {
+			drm_object_property_set_value(&connector->base,
+							 psb_intel_sdvo_connector->bottom, val);
+			if (psb_intel_sdvo_connector->top_margin == temp_value)
+				return 0;
+
+			psb_intel_sdvo_connector->top_margin = temp_value;
+			psb_intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = psb_intel_sdvo_connector->max_vscan -
+				psb_intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		} else if (psb_intel_sdvo_connector->bottom == property) {
+			drm_object_property_set_value(&connector->base,
+							 psb_intel_sdvo_connector->top, val);
+			if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+				return 0;
+
+			psb_intel_sdvo_connector->top_margin = temp_value;
+			psb_intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = psb_intel_sdvo_connector->max_vscan -
+				psb_intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		}
+		CHECK_PROPERTY(hpos, HPOS)
+		CHECK_PROPERTY(vpos, VPOS)
+		CHECK_PROPERTY(saturation, SATURATION)
+		CHECK_PROPERTY(contrast, CONTRAST)
+		CHECK_PROPERTY(hue, HUE)
+		CHECK_PROPERTY(brightness, BRIGHTNESS)
+		CHECK_PROPERTY(sharpness, SHARPNESS)
+		CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+		CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+		CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+		CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+		CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+		CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+	}
+
+	return -EINVAL; /* unknown property */
+
+set_value:
+	if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+		return -EIO;
+
+
+done:
+	if (psb_intel_sdvo->base.base.crtc) {
+		struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+					 crtc->y, crtc->fb);
+	}
+
+	return 0;
+#undef CHECK_PROPERTY
+}
+
+static void psb_intel_sdvo_save(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct psb_intel_encoder *psb_intel_encoder =
+					psb_intel_attached_encoder(connector);
+	struct psb_intel_sdvo *sdvo =
+				to_psb_intel_sdvo(&psb_intel_encoder->base);
+
+	sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
+}
+
+static void psb_intel_sdvo_restore(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_encoder *encoder =
+				&psb_intel_attached_encoder(connector)->base;
+	struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+
+	REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
+
+	/* Force a full mode set on the crtc. We're supposed to have the
+	   mode_config lock already. */
+	if (connector->status == connector_status_connected)
+		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+					 NULL);
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+	.dpms = psb_intel_sdvo_dpms,
+	.mode_fixup = psb_intel_sdvo_mode_fixup,
+	.prepare = psb_intel_encoder_prepare,
+	.mode_set = psb_intel_sdvo_mode_set,
+	.commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = psb_intel_sdvo_save,
+	.restore = psb_intel_sdvo_restore,
+	.detect = psb_intel_sdvo_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = psb_intel_sdvo_set_property,
+	.destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+	.get_modes = psb_intel_sdvo_get_modes,
+	.mode_valid = psb_intel_sdvo_mode_valid,
+	.best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+	if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+		drm_mode_destroy(encoder->dev,
+				 psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+	i2c_del_adapter(&psb_intel_sdvo->ddc);
+	psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+	.destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+	/* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+	 * We need to figure out if this is true for all available poulsbo
+	 * hardware, or if we need to fiddle with the guessing code above.
+	 * The problem might go away if we can parse sdvo mappings from bios */
+	sdvo->ddc_bus = 2;
+
+#if 0
+	uint16_t mask = 0;
+	unsigned int num_bits;
+
+	/* Make a mask of outputs less than or equal to our own priority in the
+	 * list.
+	 */
+	switch (sdvo->controlled_output) {
+	case SDVO_OUTPUT_LVDS1:
+		mask |= SDVO_OUTPUT_LVDS1;
+	case SDVO_OUTPUT_LVDS0:
+		mask |= SDVO_OUTPUT_LVDS0;
+	case SDVO_OUTPUT_TMDS1:
+		mask |= SDVO_OUTPUT_TMDS1;
+	case SDVO_OUTPUT_TMDS0:
+		mask |= SDVO_OUTPUT_TMDS0;
+	case SDVO_OUTPUT_RGB1:
+		mask |= SDVO_OUTPUT_RGB1;
+	case SDVO_OUTPUT_RGB0:
+		mask |= SDVO_OUTPUT_RGB0;
+		break;
+	}
+
+	/* Count bits to find what number we are in the priority list. */
+	mask &= sdvo->caps.output_flags;
+	num_bits = hweight16(mask);
+	/* If more than 3 outputs, default to DDC bus 3 for now. */
+	if (num_bits > 3)
+		num_bits = 3;
+
+	/* Corresponds to SDVO_CONTROL_BUS_DDCx */
+	sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+			  struct psb_intel_sdvo *sdvo, u32 reg)
+{
+	struct sdvo_device_mapping *mapping;
+
+	if (IS_SDVOB(reg))
+		mapping = &(dev_priv->sdvo_mappings[0]);
+	else
+		mapping = &(dev_priv->sdvo_mappings[1]);
+
+	if (mapping->initialized)
+		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+	else
+		psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+			  struct psb_intel_sdvo *sdvo, u32 reg)
+{
+	struct sdvo_device_mapping *mapping;
+	u8 pin, speed;
+
+	if (IS_SDVOB(reg))
+		mapping = &dev_priv->sdvo_mappings[0];
+	else
+		mapping = &dev_priv->sdvo_mappings[1];
+
+	pin = GMBUS_PORT_DPB;
+	speed = GMBUS_RATE_1MHZ >> 8;
+	if (mapping->initialized) {
+		pin = mapping->i2c_pin;
+		speed = mapping->i2c_speed;
+	}
+
+	if (pin < GMBUS_NUM_PORTS) {
+		sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+		gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+		gma_intel_gmbus_force_bit(sdvo->i2c, true);
+	} else
+		sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+	return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+	if (IS_SDVOB(sdvo_reg)) {
+		my_mapping = &dev_priv->sdvo_mappings[0];
+		other_mapping = &dev_priv->sdvo_mappings[1];
+	} else {
+		my_mapping = &dev_priv->sdvo_mappings[1];
+		other_mapping = &dev_priv->sdvo_mappings[0];
+	}
+
+	/* If the BIOS described our SDVO device, take advantage of it. */
+	if (my_mapping->slave_addr)
+		return my_mapping->slave_addr;
+
+	/* If the BIOS only described a different SDVO device, use the
+	 * address that it isn't using.
+	 */
+	if (other_mapping->slave_addr) {
+		if (other_mapping->slave_addr == 0x70)
+			return 0x72;
+		else
+			return 0x70;
+	}
+
+	/* No SDVO device info is found for another DVO port,
+	 * so use mapping assumption we had before BIOS parsing.
+	 */
+	if (IS_SDVOB(sdvo_reg))
+		return 0x70;
+	else
+		return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+			  struct psb_intel_sdvo *encoder)
+{
+	drm_connector_init(encoder->base.base.dev,
+			   &connector->base.base,
+			   &psb_intel_sdvo_connector_funcs,
+			   connector->base.base.connector_type);
+
+	drm_connector_helper_add(&connector->base.base,
+				 &psb_intel_sdvo_connector_helper_funcs);
+
+	connector->base.base.interlace_allowed = 0;
+	connector->base.base.doublescan_allowed = 0;
+	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+	psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+	drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+	/* FIXME: We don't support HDMI at the moment
+	struct drm_device *dev = connector->base.base.dev;
+
+	intel_attach_force_audio_property(&connector->base.base);
+	intel_attach_broadcast_rgb_property(&connector->base.base);
+	*/
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct psb_intel_connector *intel_connector;
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+	if (!psb_intel_sdvo_connector)
+		return false;
+
+	if (device == 0) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+	} else if (device == 1) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+	}
+
+	intel_connector = &psb_intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	// connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+	if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+		psb_intel_sdvo->is_hdmi = true;
+	}
+	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+				       (1 << INTEL_ANALOG_CLONE_BIT));
+
+	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+	if (psb_intel_sdvo->is_hdmi)
+		psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+	return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct psb_intel_connector *intel_connector;
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+	if (!psb_intel_sdvo_connector)
+		return false;
+
+	intel_connector = &psb_intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+	psb_intel_sdvo->controlled_output |= type;
+	psb_intel_sdvo_connector->output_flag = type;
+
+	psb_intel_sdvo->is_tv = true;
+	psb_intel_sdvo->base.needs_tv_clock = true;
+	psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+	if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+		goto err;
+
+	if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	psb_intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct psb_intel_connector *intel_connector;
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+	if (!psb_intel_sdvo_connector)
+		return false;
+
+	intel_connector = &psb_intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+	if (device == 0) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+	} else if (device == 1) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+	}
+
+	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+				       (1 << INTEL_ANALOG_CLONE_BIT));
+
+	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+				  psb_intel_sdvo);
+	return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct psb_intel_connector *intel_connector;
+	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+	if (!psb_intel_sdvo_connector)
+		return false;
+
+	intel_connector = &psb_intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+	if (device == 0) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+	} else if (device == 1) {
+		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+	}
+
+	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+	if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	psb_intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+	psb_intel_sdvo->is_tv = false;
+	psb_intel_sdvo->base.needs_tv_clock = false;
+	psb_intel_sdvo->is_lvds = false;
+
+	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+	if (flags & SDVO_OUTPUT_TMDS0)
+		if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+		if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+			return false;
+
+	/* TV has no XXX1 function block */
+	if (flags & SDVO_OUTPUT_SVID0)
+		if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_CVBS0)
+		if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_RGB0)
+		if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+		if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+			return false;
+
+	if (flags & SDVO_OUTPUT_LVDS0)
+		if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+		if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+			return false;
+
+	if ((flags & SDVO_OUTPUT_MASK) == 0) {
+		unsigned char bytes[2];
+
+		psb_intel_sdvo->controlled_output = 0;
+		memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+			      SDVO_NAME(psb_intel_sdvo),
+			      bytes[0], bytes[1]);
+		return false;
+	}
+	psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+	return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+					  struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+					  int type)
+{
+	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+	struct psb_intel_sdvo_tv_format format;
+	uint32_t format_map, i;
+
+	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+		return false;
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+				  &format, sizeof(format)))
+		return false;
+
+	memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+	if (format_map == 0)
+		return false;
+
+	psb_intel_sdvo_connector->format_supported_num = 0;
+	for (i = 0 ; i < TV_FORMAT_NUM; i++)
+		if (format_map & (1 << i))
+			psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+	psb_intel_sdvo_connector->tv_format =
+			drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					    "mode", psb_intel_sdvo_connector->format_supported_num);
+	if (!psb_intel_sdvo_connector->tv_format)
+		return false;
+
+	for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+		drm_property_add_enum(
+				psb_intel_sdvo_connector->tv_format, i,
+				i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+	psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+	drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
+				      psb_intel_sdvo_connector->tv_format, 0);
+	return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+	if (enhancements.name) { \
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+		    !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+			return false; \
+		psb_intel_sdvo_connector->max_##name = data_value[0]; \
+		psb_intel_sdvo_connector->cur_##name = response; \
+		psb_intel_sdvo_connector->name = \
+			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+		if (!psb_intel_sdvo_connector->name) return false; \
+		drm_object_attach_property(&connector->base, \
+					      psb_intel_sdvo_connector->name, \
+					      psb_intel_sdvo_connector->cur_##name); \
+		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+			      data_value[0], data_value[1], response); \
+	} \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+				      struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+				      struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	/* when horizontal overscan is supported, Add the left/right  property */
+	if (enhancements.overscan_h) {
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_H,
+					  &data_value, 4))
+			return false;
+
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_H,
+					  &response, 2))
+			return false;
+
+		psb_intel_sdvo_connector->max_hscan = data_value[0];
+		psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+		psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+		psb_intel_sdvo_connector->left =
+			drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+		if (!psb_intel_sdvo_connector->left)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      psb_intel_sdvo_connector->left,
+					      psb_intel_sdvo_connector->left_margin);
+
+		psb_intel_sdvo_connector->right =
+			drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+		if (!psb_intel_sdvo_connector->right)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      psb_intel_sdvo_connector->right,
+					      psb_intel_sdvo_connector->right_margin);
+		DRM_DEBUG_KMS("h_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	if (enhancements.overscan_v) {
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_V,
+					  &data_value, 4))
+			return false;
+
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_V,
+					  &response, 2))
+			return false;
+
+		psb_intel_sdvo_connector->max_vscan = data_value[0];
+		psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+		psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+		psb_intel_sdvo_connector->top =
+			drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
+		if (!psb_intel_sdvo_connector->top)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      psb_intel_sdvo_connector->top,
+					      psb_intel_sdvo_connector->top_margin);
+
+		psb_intel_sdvo_connector->bottom =
+			drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
+		if (!psb_intel_sdvo_connector->bottom)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      psb_intel_sdvo_connector->bottom,
+					      psb_intel_sdvo_connector->bottom_margin);
+		DRM_DEBUG_KMS("v_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	ENHANCEMENT(hpos, HPOS);
+	ENHANCEMENT(vpos, VPOS);
+	ENHANCEMENT(saturation, SATURATION);
+	ENHANCEMENT(contrast, CONTRAST);
+	ENHANCEMENT(hue, HUE);
+	ENHANCEMENT(sharpness, SHARPNESS);
+	ENHANCEMENT(brightness, BRIGHTNESS);
+	ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+	ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+	ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+	ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+	ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+	if (enhancements.dot_crawl) {
+		if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+			return false;
+
+		psb_intel_sdvo_connector->max_dot_crawl = 1;
+		psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+		psb_intel_sdvo_connector->dot_crawl =
+			drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+		if (!psb_intel_sdvo_connector->dot_crawl)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      psb_intel_sdvo_connector->dot_crawl,
+					      psb_intel_sdvo_connector->cur_dot_crawl);
+		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+	}
+
+	return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+					struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+					struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	ENHANCEMENT(brightness, BRIGHTNESS);
+
+	return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+					       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+	union {
+		struct psb_intel_sdvo_enhancements_reply reply;
+		uint16_t response;
+	} enhancements;
+
+	BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+	enhancements.response = 0;
+	psb_intel_sdvo_get_value(psb_intel_sdvo,
+			     SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+			     &enhancements, sizeof(enhancements));
+	if (enhancements.response == 0) {
+		DRM_DEBUG_KMS("No enhancement is supported\n");
+		return true;
+	}
+
+	if (IS_TV(psb_intel_sdvo_connector))
+		return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+	else if(IS_LVDS(psb_intel_sdvo_connector))
+		return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+	else
+		return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+				     struct i2c_msg *msgs,
+				     int num)
+{
+	struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+	if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+		return -EIO;
+
+	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+	struct psb_intel_sdvo *sdvo = adapter->algo_data;
+	return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+	.master_xfer	= psb_intel_sdvo_ddc_proxy_xfer,
+	.functionality	= psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+			  struct drm_device *dev)
+{
+	sdvo->ddc.owner = THIS_MODULE;
+	sdvo->ddc.class = I2C_CLASS_DDC;
+	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.algo_data = sdvo;
+	sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+	return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	struct psb_intel_encoder *psb_intel_encoder;
+	struct psb_intel_sdvo *psb_intel_sdvo;
+	int i;
+
+	psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+	if (!psb_intel_sdvo)
+		return false;
+
+	psb_intel_sdvo->sdvo_reg = sdvo_reg;
+	psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+	psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+	if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+		kfree(psb_intel_sdvo);
+		return false;
+	}
+
+	/* encoder type will be decided later */
+	psb_intel_encoder = &psb_intel_sdvo->base;
+	psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+	drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+	/* Read the regs to test if we can talk to the device */
+	for (i = 0; i < 0x40; i++) {
+		u8 byte;
+
+		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+			goto err;
+		}
+	}
+
+	if (IS_SDVOB(sdvo_reg))
+		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+	else
+		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+	drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+	/* In default case sdvo lvds is false */
+	if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+		goto err;
+
+	if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+				    psb_intel_sdvo->caps.output_flags) != true) {
+		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+		goto err;
+	}
+
+	psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+		goto err;
+
+	if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+						    &psb_intel_sdvo->pixel_clock_min,
+						    &psb_intel_sdvo->pixel_clock_max))
+		goto err;
+
+	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+			"clock range %dMHz - %dMHz, "
+			"input 1: %c, input 2: %c, "
+			"output 1: %c, output 2: %c\n",
+			SDVO_NAME(psb_intel_sdvo),
+			psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+			psb_intel_sdvo->caps.device_rev_id,
+			psb_intel_sdvo->pixel_clock_min / 1000,
+			psb_intel_sdvo->pixel_clock_max / 1000,
+			(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+			(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+			/* check currently supported outputs */
+			psb_intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+			psb_intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+	return true;
+
+err:
+	drm_encoder_cleanup(&psb_intel_encoder->base);
+	i2c_del_adapter(&psb_intel_sdvo->ddc);
+	kfree(psb_intel_sdvo);
+
+	return false;
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 0000000..600e797
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct psb_intel_sdvo_caps {
+    u8 vendor_id;
+    u8 device_id;
+    u8 device_rev_id;
+    u8 sdvo_version_major;
+    u8 sdvo_version_minor;
+    unsigned int sdvo_inputs_mask:2;
+    unsigned int smooth_scaling:1;
+    unsigned int sharp_scaling:1;
+    unsigned int up_scaling:1;
+    unsigned int down_scaling:1;
+    unsigned int stall_support:1;
+    unsigned int pad:1;
+    u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+    struct {
+	u16 clock;		/**< pixel clock, in 10kHz units */
+	u8 h_active;		/**< lower 8 bits (pixels) */
+	u8 h_blank;		/**< lower 8 bits (pixels) */
+	u8 h_high;		/**< upper 4 bits each h_active, h_blank */
+	u8 v_active;		/**< lower 8 bits (lines) */
+	u8 v_blank;		/**< lower 8 bits (lines) */
+	u8 v_high;		/**< upper 4 bits each v_active, v_blank */
+    } part1;
+
+    struct {
+	u8 h_sync_off;	/**< lower 8 bits, from hblank start */
+	u8 h_sync_width;	/**< lower 8 bits (pixels) */
+	/** lower 4 bits each vsync offset, vsync width */
+	u8 v_sync_off_width;
+	/**
+	 * 2 high bits of hsync offset, 2 high bits of hsync width,
+	 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+	 */
+	u8 sync_off_width_high;
+	u8 dtd_flags;
+	u8 sdvo_flags;
+	/** bits 6-7 of vsync offset at bits 6-7 */
+	u8 v_sync_off_high;
+	u8 reserved;
+    } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+    u16 min;			/**< pixel clock, in 10kHz units */
+    u16 max;			/**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+    u16 clock;
+    u16 width;
+    u16 height;
+    u8	interlace:1;
+    u8	scaled:1;
+    u8	pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0				0x07
+#define SDVO_I2C_ARG_1				0x06
+#define SDVO_I2C_ARG_2				0x05
+#define SDVO_I2C_ARG_3				0x04
+#define SDVO_I2C_ARG_4				0x03
+#define SDVO_I2C_ARG_5				0x02
+#define SDVO_I2C_ARG_6				0x01
+#define SDVO_I2C_ARG_7				0x00
+#define SDVO_I2C_OPCODE				0x08
+#define SDVO_I2C_CMD_STATUS			0x09
+#define SDVO_I2C_RETURN_0			0x0a
+#define SDVO_I2C_RETURN_1			0x0b
+#define SDVO_I2C_RETURN_2			0x0c
+#define SDVO_I2C_RETURN_3			0x0d
+#define SDVO_I2C_RETURN_4			0x0e
+#define SDVO_I2C_RETURN_5			0x0f
+#define SDVO_I2C_RETURN_6			0x10
+#define SDVO_I2C_RETURN_7			0x11
+#define SDVO_I2C_VENDOR_BEGIN			0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON		0x0
+#define SDVO_CMD_STATUS_SUCCESS			0x1
+#define SDVO_CMD_STATUS_NOTSUPP			0x2
+#define SDVO_CMD_STATUS_INVALID_ARG		0x3
+#define SDVO_CMD_STATUS_PENDING			0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET					0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS			0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV			0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+    unsigned int input0_trained:1;
+    unsigned int input1_trained:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP				0x06
+struct psb_intel_sdvo_in_out_map {
+    u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP				0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+    u16 interrupt_status;
+    unsigned int ambient_light_interrupt:1;
+    unsigned int hdmi_audio_encrypt_change:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT			0x10
+struct psb_intel_sdvo_set_target_input_args {
+    unsigned int target_1:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
+# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
+# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT				0x28
+
+#define SDVO_CMD_SET_TV_FORMAT				0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+    unsigned int ntsc_m:1;
+    unsigned int ntsc_j:1;
+    unsigned int ntsc_443:1;
+    unsigned int pal_b:1;
+    unsigned int pal_d:1;
+    unsigned int pal_g:1;
+    unsigned int pal_h:1;
+    unsigned int pal_i:1;
+
+    unsigned int pal_m:1;
+    unsigned int pal_n:1;
+    unsigned int pal_nc:1;
+    unsigned int pal_60:1;
+    unsigned int secam_b:1;
+    unsigned int secam_d:1;
+    unsigned int secam_g:1;
+    unsigned int secam_k:1;
+
+    unsigned int secam_k1:1;
+    unsigned int secam_l:1;
+    unsigned int secam_60:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+    unsigned int res_320x200:1;
+    unsigned int res_320x240:1;
+    unsigned int res_400x300:1;
+    unsigned int res_640x350:1;
+    unsigned int res_640x400:1;
+    unsigned int res_640x480:1;
+    unsigned int res_704x480:1;
+    unsigned int res_704x576:1;
+
+    unsigned int res_720x350:1;
+    unsigned int res_720x400:1;
+    unsigned int res_720x480:1;
+    unsigned int res_720x540:1;
+    unsigned int res_720x576:1;
+    unsigned int res_768x576:1;
+    unsigned int res_800x600:1;
+    unsigned int res_832x624:1;
+
+    unsigned int res_920x766:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x1024:1;
+    unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+   scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+    unsigned int hdtv_std_smpte_240m_1080i_59:1;
+    unsigned int hdtv_std_smpte_240m_1080i_60:1;
+    unsigned int hdtv_std_smpte_260m_1080i_59:1;
+    unsigned int hdtv_std_smpte_260m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080i_50:1;
+    unsigned int hdtv_std_smpte_274m_1080i_59:1;
+    unsigned int hdtv_std_smpte_274m_1080i_60:1;
+    unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+    unsigned int hdtv_std_smpte_274m_1080p_24:1;
+    unsigned int hdtv_std_smpte_274m_1080p_25:1;
+    unsigned int hdtv_std_smpte_274m_1080p_29:1;
+    unsigned int hdtv_std_smpte_274m_1080p_30:1;
+    unsigned int hdtv_std_smpte_274m_1080p_50:1;
+    unsigned int hdtv_std_smpte_274m_1080p_59:1;
+    unsigned int hdtv_std_smpte_274m_1080p_60:1;
+    unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+    unsigned int hdtv_std_smpte_295m_1080p_50:1;
+    unsigned int hdtv_std_smpte_296m_720p_59:1;
+    unsigned int hdtv_std_smpte_296m_720p_60:1;
+    unsigned int hdtv_std_smpte_296m_720p_50:1;
+    unsigned int hdtv_std_smpte_293m_480p_59:1;
+    unsigned int hdtv_std_smpte_170m_480i_59:1;
+    unsigned int hdtv_std_iturbt601_576i_50:1;
+    unsigned int hdtv_std_iturbt601_576p_50:1;
+
+    unsigned int hdtv_std_eia_7702a_480i_60:1;
+    unsigned int hdtv_std_eia_7702a_480p_60:1;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+    unsigned int res_640x480:1;
+    unsigned int res_800x600:1;
+    unsigned int res_1024x768:1;
+    unsigned int res_1280x960:1;
+    unsigned int res_1400x1050:1;
+    unsigned int res_1600x1200:1;
+    unsigned int res_1920x1440:1;
+    unsigned int res_2048x1536:1;
+
+    unsigned int res_2560x1920:1;
+    unsigned int res_3200x2400:1;
+    unsigned int res_3840x2880:1;
+    unsigned int pad1:5;
+
+    unsigned int res_848x480:1;
+    unsigned int res_1064x600:1;
+    unsigned int res_1280x720:1;
+    unsigned int res_1360x768:1;
+    unsigned int res_1704x960:1;
+    unsigned int res_1864x1050:1;
+    unsigned int res_1920x1080:1;
+    unsigned int res_2128x1200:1;
+
+    unsigned int res_2560x1400:1;
+    unsigned int res_2728x1536:1;
+    unsigned int res_3408x1920:1;
+    unsigned int res_4264x2400:1;
+    unsigned int res_5120x2880:1;
+    unsigned int pad2:3;
+
+    unsigned int res_768x480:1;
+    unsigned int res_960x600:1;
+    unsigned int res_1152x720:1;
+    unsigned int res_1124x768:1;
+    unsigned int res_1536x960:1;
+    unsigned int res_1680x1050:1;
+    unsigned int res_1728x1080:1;
+    unsigned int res_1920x1200:1;
+
+    unsigned int res_2304x1440:1;
+    unsigned int res_2456x1536:1;
+    unsigned int res_3072x1920:1;
+    unsigned int res_3840x2400:1;
+    unsigned int res_4608x2880:1;
+    unsigned int pad3:3;
+
+    unsigned int res_1280x1024:1;
+    unsigned int pad4:7;
+
+    unsigned int res_1280x768:1;
+    unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+   last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+   SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE			0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
+# define SDVO_ENCODER_STATE_ON					(1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
+# define SDVO_ENCODER_STATE_OFF					(1 << 3)
+# define SDVO_MONITOR_STATE_ON					(1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY				(1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND				(1 << 6)
+# define SDVO_MONITOR_STATE_OFF					(1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+    u8 t0;
+    u8 t1;
+    u8 t2;
+    u8 t3;
+    u8 t4;
+
+    unsigned int t0_high:2;
+    unsigned int t1_high:2;
+    unsigned int t2_high:2;
+    unsigned int t3_high:2;
+
+    unsigned int t4_high:2;
+    unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
+struct sdvo_max_backlight_reply {
+    u8 max_value;
+    u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
+struct sdvo_get_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
+struct sdvo_set_ambient_light_reply {
+    u16 trip_low;
+    u16 trip_high;
+    unsigned int enable:1;
+    unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
+# define SDVO_DISPLAY_STATE_ON				(1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY			(1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND			(1 << 2)
+# define SDVO_DISPLAY_STATE_OFF				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
+struct psb_intel_sdvo_enhancements_reply {
+    unsigned int flicker_filter:1;
+    unsigned int flicker_filter_adaptive:1;
+    unsigned int flicker_filter_2d:1;
+    unsigned int saturation:1;
+    unsigned int hue:1;
+    unsigned int brightness:1;
+    unsigned int contrast:1;
+    unsigned int overscan_h:1;
+
+    unsigned int overscan_v:1;
+    unsigned int hpos:1;
+    unsigned int vpos:1;
+    unsigned int sharpness:1;
+    unsigned int dot_crawl:1;
+    unsigned int dither:1;
+    unsigned int tv_chroma_filter:1;
+    unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
+#define SDVO_CMD_GET_MAX_SATURATION			0x55
+#define SDVO_CMD_GET_MAX_HUE				0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST			0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
+#define SDVO_CMD_GET_MAX_HPOS				0x67
+#define SDVO_CMD_GET_MAX_VPOS				0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+    u16 max_value;
+    u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
+# define SDVO_LVDS_COLOR_DEPTH_18			(0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24			(1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG			(0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI			(1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL			(0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL				(1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER			0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER			0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
+#define SDVO_CMD_GET_SATURATION				0x56
+#define SDVO_CMD_SET_SATURATION				0x57
+#define SDVO_CMD_GET_HUE				0x59
+#define SDVO_CMD_SET_HUE				0x5a
+#define SDVO_CMD_GET_BRIGHTNESS				0x5c
+#define SDVO_CMD_SET_BRIGHTNESS				0x5d
+#define SDVO_CMD_GET_CONTRAST				0x5f
+#define SDVO_CMD_SET_CONTRAST				0x60
+#define SDVO_CMD_GET_OVERSCAN_H				0x62
+#define SDVO_CMD_SET_OVERSCAN_H				0x63
+#define SDVO_CMD_GET_OVERSCAN_V				0x65
+#define SDVO_CMD_SET_OVERSCAN_V				0x66
+#define SDVO_CMD_GET_HPOS				0x68
+#define SDVO_CMD_SET_HPOS				0x69
+#define SDVO_CMD_GET_VPOS				0x6b
+#define SDVO_CMD_SET_VPOS				0x6c
+#define SDVO_CMD_GET_SHARPNESS				0x6e
+#define SDVO_CMD_SET_SHARPNESS				0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
+struct psb_intel_sdvo_enhancements_arg {
+    u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL				0x70
+#define SDVO_CMD_SET_DOT_CRAWL				0x71
+# define SDVO_DOT_CRAWL_ON					(1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON				(1 << 1)
+
+#define SDVO_CMD_GET_DITHER				0x72
+#define SDVO_CMD_SET_DITHER				0x73
+# define SDVO_DITHER_ON						(1 << 0)
+# define SDVO_DITHER_DEFAULT_ON					(1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
+# define SDVO_CONTROL_BUS_PROM				(1 << 0)
+# define SDVO_CONTROL_BUS_DDC1				(1 << 1)
+# define SDVO_CONTROL_BUS_DDC2				(1 << 2)
+# define SDVO_CONTROL_BUS_DDC3				(1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE	0x9d
+#define SDVO_CMD_GET_ENCODE		0x9e
+#define SDVO_CMD_SET_ENCODE		0x9f
+  #define SDVO_ENCODE_DVI	0x0
+  #define SDVO_ENCODE_HDMI	0x1
+#define SDVO_CMD_SET_PIXEL_REPLI	0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI	0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP	0x8d
+#define SDVO_CMD_SET_COLORIMETRY	0x8e
+  #define SDVO_COLORIMETRY_RGB256   0x0
+  #define SDVO_COLORIMETRY_RGB220   0x1
+  #define SDVO_COLORIMETRY_YCrCb422 0x3
+  #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY	0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT		0x91
+#define SDVO_CMD_GET_AUDIO_STAT		0x92
+#define SDVO_CMD_SET_HBUF_INDEX		0x93
+#define SDVO_CMD_GET_HBUF_INDEX		0x94
+#define SDVO_CMD_GET_HBUF_INFO		0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT	0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT	0x97
+#define SDVO_CMD_SET_HBUF_DATA		0x98
+#define SDVO_CMD_GET_HBUF_DATA		0x99
+#define SDVO_CMD_SET_HBUF_TXRATE	0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE	0x9b
+  #define SDVO_HBUF_TX_DISABLED	(0 << 6)
+  #define SDVO_HBUF_TX_ONCE	(2 << 6)
+  #define SDVO_HBUF_TX_VSYNC	(3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
+#define SDVO_NEED_TO_STALL  (1 << 7)
+
+struct psb_intel_sdvo_encode {
+    u8 dvi_rev;
+    u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_irq.c b/linux-imx/drivers/gpu/drm/gma500/psb_irq.c
new file mode 100644
index 0000000..029eccf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_irq.c
@@ -0,0 +1,642 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ * develop this driver.
+ *
+ **************************************************************************/
+/*
+ */
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include "psb_irq.h"
+#include "mdfld_output.h"
+
+/*
+ * inline functions
+ */
+
+static inline u32
+psb_pipestat(int pipe)
+{
+	if (pipe == 0)
+		return PIPEASTAT;
+	if (pipe == 1)
+		return PIPEBSTAT;
+	if (pipe == 2)
+		return PIPECSTAT;
+	BUG();
+}
+
+static inline u32
+mid_pipe_event(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_PIPEA_EVENT_FLAG;
+	if (pipe == 1)
+		return _MDFLD_PIPEB_EVENT_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_EVENT_FLAG;
+	BUG();
+}
+
+static inline u32
+mid_pipe_vsync(int pipe)
+{
+	if (pipe == 0)
+		return _PSB_VSYNC_PIPEA_FLAG;
+	if (pipe == 1)
+		return _PSB_VSYNC_PIPEB_FLAG;
+	if (pipe == 2)
+		return _MDFLD_PIPEC_VBLANK_FLAG;
+	BUG();
+}
+
+static inline u32
+mid_pipeconf(int pipe)
+{
+	if (pipe == 0)
+		return PIPEACONF;
+	if (pipe == 1)
+		return PIPEBCONF;
+	if (pipe == 2)
+		return PIPECCONF;
+	BUG();
+}
+
+void
+psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	if ((dev_priv->pipestat[pipe] & mask) != mask) {
+		u32 reg = psb_pipestat(pipe);
+		dev_priv->pipestat[pipe] |= mask;
+		/* Enable the interrupt, clear any pending status */
+		if (gma_power_begin(dev_priv->dev, false)) {
+			u32 writeVal = PSB_RVDC32(reg);
+			writeVal |= (mask | (mask >> 16));
+			PSB_WVDC32(writeVal, reg);
+			(void) PSB_RVDC32(reg);
+			gma_power_end(dev_priv->dev);
+		}
+	}
+}
+
+void
+psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+{
+	if ((dev_priv->pipestat[pipe] & mask) != 0) {
+		u32 reg = psb_pipestat(pipe);
+		dev_priv->pipestat[pipe] &= ~mask;
+		if (gma_power_begin(dev_priv->dev, false)) {
+			u32 writeVal = PSB_RVDC32(reg);
+			writeVal &= ~mask;
+			PSB_WVDC32(writeVal, reg);
+			(void) PSB_RVDC32(reg);
+			gma_power_end(dev_priv->dev);
+		}
+	}
+}
+
+static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	if (gma_power_begin(dev_priv->dev, false)) {
+		u32 pipe_event = mid_pipe_event(pipe);
+		dev_priv->vdc_irq_mask |= pipe_event;
+		PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+		PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+		gma_power_end(dev_priv->dev);
+	}
+}
+
+static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+{
+	if (dev_priv->pipestat[pipe] == 0) {
+		if (gma_power_begin(dev_priv->dev, false)) {
+			u32 pipe_event = mid_pipe_event(pipe);
+			dev_priv->vdc_irq_mask &= ~pipe_event;
+			PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+			PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+			gma_power_end(dev_priv->dev);
+		}
+	}
+}
+
+/**
+ * Display controller interrupt handler for pipe event.
+ *
+ */
+static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *) dev->dev_private;
+
+	uint32_t pipe_stat_val = 0;
+	uint32_t pipe_stat_reg = psb_pipestat(pipe);
+	uint32_t pipe_enable = dev_priv->pipestat[pipe];
+	uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+	uint32_t pipe_clear;
+	uint32_t i = 0;
+
+	spin_lock(&dev_priv->irqmask_lock);
+
+	pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+	pipe_stat_val &= pipe_enable | pipe_status;
+	pipe_stat_val &= pipe_stat_val >> 16;
+
+	spin_unlock(&dev_priv->irqmask_lock);
+
+	/* Clear the 2nd level interrupt status bits
+	 * Sometimes the bits are very sticky so we repeat until they unstick */
+	for (i = 0; i < 0xffff; i++) {
+		PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+		pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+
+		if (pipe_clear == 0)
+			break;
+	}
+
+	if (pipe_clear)
+		dev_err(dev->dev,
+		"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+		__func__, pipe, PSB_RVDC32(pipe_stat_reg));
+
+	if (pipe_stat_val & PIPE_VBLANK_STATUS)
+		drm_handle_vblank(dev, pipe);
+
+	if (pipe_stat_val & PIPE_TE_STATUS)
+		drm_handle_vblank(dev, pipe);
+}
+
+/*
+ * Display controller interrupt handler.
+ */
+static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+{
+	if (vdc_stat & _PSB_IRQ_ASLE)
+		psb_intel_opregion_asle_intr(dev);
+
+	if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+		mid_pipe_event_handler(dev, 0);
+
+	if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+		mid_pipe_event_handler(dev, 1);
+}
+
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = arg;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
+	int handled = 0;
+
+	spin_lock(&dev_priv->irqmask_lock);
+
+	vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+
+	if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
+		dsp_int = 1;
+
+	/* FIXME: Handle Medfield
+	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+		dsp_int = 1;
+	*/
+
+	if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+		sgx_int = 1;
+	if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
+		hotplug_int = 1;
+
+	vdc_stat &= dev_priv->vdc_irq_mask;
+	spin_unlock(&dev_priv->irqmask_lock);
+
+	if (dsp_int && gma_power_is_on(dev)) {
+		psb_vdc_interrupt(dev, vdc_stat);
+		handled = 1;
+	}
+
+	if (sgx_int) {
+		/* Not expected - we have it masked, shut it up */
+		u32 s, s2;
+		s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+		s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+		PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+		PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+		/* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+		   we may as well poll even if we add that ! */
+		handled = 1;
+	}
+
+	/* Note: this bit has other meanings on some devices, so we will
+	   need to address that later if it ever matters */
+	if (hotplug_int && dev_priv->ops->hotplug) {
+		handled = dev_priv->ops->hotplug(dev);
+		REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
+	}
+
+	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+	(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
+
+	if (!handled)
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
+void psb_irq_preinstall(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (gma_power_is_on(dev))
+		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+	if (dev->vblank_enabled[0])
+		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+	if (dev->vblank_enabled[1])
+		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+	/* FIXME: Handle Medfield irq mask
+	if (dev->vblank_enabled[1])
+		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+	if (dev->vblank_enabled[2])
+		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+	*/
+
+	/* Revisit this area - want per device masks ? */
+	if (dev_priv->ops->hotplug)
+		dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
+	dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE;
+
+	/* This register is safe even if display island is off */
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+int psb_irq_postinstall(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/* This register is safe even if display island is off */
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+	if (dev->vblank_enabled[0])
+		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+	else
+		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	if (dev->vblank_enabled[1])
+		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+	else
+		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	if (dev->vblank_enabled[2])
+		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+	else
+		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	if (dev_priv->ops->hotplug_enable)
+		dev_priv->ops->hotplug_enable(dev, true);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
+
+void psb_irq_uninstall(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (dev_priv->ops->hotplug_enable)
+		dev_priv->ops->hotplug_enable(dev, false);
+
+	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+
+	if (dev->vblank_enabled[0])
+		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	if (dev->vblank_enabled[1])
+		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	if (dev->vblank_enabled[2])
+		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+				  _PSB_IRQ_MSVDX_FLAG |
+				  _LNC_IRQ_TOPAZ_FLAG;
+
+	/* These two registers are safe even if display island is off */
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+
+	wmb();
+
+	/* This register is safe even if display island is off */
+	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+void psb_irq_turn_on_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	u32 hist_reg;
+	u32 pwm_reg;
+
+	if (gma_power_begin(dev, false)) {
+		PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+		PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+		PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+						| PWM_PHASEIN_INT_ENABLE,
+							   PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+		PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+							HISTOGRAM_INT_CONTROL);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+							PWM_CONTROL_LOGIC);
+
+		gma_power_end(dev);
+	}
+}
+
+int psb_irq_enable_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	/* enable DPST */
+	mid_enable_pipe_event(dev_priv, 0);
+	psb_irq_turn_on_dpst(dev);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+	return 0;
+}
+
+void psb_irq_turn_off_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *) dev->dev_private;
+	u32 hist_reg;
+	u32 pwm_reg;
+
+	if (gma_power_begin(dev, false)) {
+		PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+
+		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+		PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
+							PWM_CONTROL_LOGIC);
+		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+		gma_power_end(dev);
+	}
+}
+
+int psb_irq_disable_dpst(struct drm_device *dev)
+{
+	struct drm_psb_private *dev_priv =
+	    (struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, 0);
+	psb_irq_turn_off_dpst(dev);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+#ifdef PSB_FIXME
+static int psb_vblank_do_wait(struct drm_device *dev,
+			      unsigned int *sequence, atomic_t *counter)
+{
+	unsigned int cur_vblank;
+	int ret = 0;
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+		      - *sequence) <= (1 << 23)));
+	*sequence = cur_vblank;
+
+	return ret;
+}
+#endif
+
+/*
+ * It is used to enable VBLANK interrupt
+ */
+int psb_enable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+	/* Medfield is different - we should perhaps extract out vblank
+	   and blacklight etc ops */
+	if (IS_MFLD(dev))
+		return mdfld_enable_te(dev, pipe);
+
+	if (gma_power_begin(dev, false)) {
+		reg_val = REG_READ(pipeconf_reg);
+		gma_power_end(dev);
+	}
+
+	if (!(reg_val & PIPEACONF_ENABLE))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (pipe == 0)
+		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+	else if (pipe == 1)
+		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * It is used to disable VBLANK interrupt
+ */
+void psb_disable_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	if (IS_MFLD(dev))
+		mdfld_disable_te(dev, pipe);
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	if (pipe == 0)
+		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+	else if (pipe == 1)
+		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+
+	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+	psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/*
+ * It is used to enable TE interrupt
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+	uint32_t reg_val = 0;
+	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+	if (gma_power_begin(dev, false)) {
+		reg_val = REG_READ(pipeconf_reg);
+		gma_power_end(dev);
+	}
+
+	if (!(reg_val & PIPEACONF_ENABLE))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_enable_pipe_event(dev_priv, pipe);
+	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+
+	return 0;
+}
+
+/*
+ * It is used to disable TE interrupt
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+	struct drm_psb_private *dev_priv =
+		(struct drm_psb_private *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (!dev_priv->dsr_enable)
+		return;
+
+	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+
+	mid_disable_pipe_event(dev_priv, pipe);
+	psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	uint32_t high_frame = PIPEAFRAMEHIGH;
+	uint32_t low_frame = PIPEAFRAMEPIXEL;
+	uint32_t pipeconf_reg = PIPEACONF;
+	uint32_t reg_val = 0;
+	uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+
+	switch (pipe) {
+	case 0:
+		break;
+	case 1:
+		high_frame = PIPEBFRAMEHIGH;
+		low_frame = PIPEBFRAMEPIXEL;
+		pipeconf_reg = PIPEBCONF;
+		break;
+	case 2:
+		high_frame = PIPECFRAMEHIGH;
+		low_frame = PIPECFRAMEPIXEL;
+		pipeconf_reg = PIPECCONF;
+		break;
+	default:
+		dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+		return 0;
+	}
+
+	if (!gma_power_begin(dev, false))
+		return 0;
+
+	reg_val = REG_READ(pipeconf_reg);
+
+	if (!(reg_val & PIPEACONF_ENABLE)) {
+		dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+								pipe);
+		goto psb_get_vblank_counter_exit;
+	}
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+		low =  ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+			PIPE_FRAME_LOW_SHIFT);
+		high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+	} while (high1 != high2);
+
+	count = (high1 << 8) | low;
+
+psb_get_vblank_counter_exit:
+
+	gma_power_end(dev);
+
+	return count;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_irq.h b/linux-imx/drivers/gpu/drm/gma500/psb_irq.h
new file mode 100644
index 0000000..debb7f1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_irq.h
@@ -0,0 +1,47 @@
+/**************************************************************************
+ * Copyright (c) 2009-2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ *    Benjamin Defnet <benjamin.r.defnet@intel.com>
+ *    Rajesh Poornachandran <rajesh.poornachandran@intel.com>
+ *
+ **************************************************************************/
+
+#ifndef _PSB_IRQ_H_
+#define _PSB_IRQ_H_
+
+#include <drm/drmP.h>
+
+bool sysirq_init(struct drm_device *dev);
+void sysirq_uninit(struct drm_device *dev);
+
+void psb_irq_preinstall(struct drm_device *dev);
+int  psb_irq_postinstall(struct drm_device *dev);
+void psb_irq_uninstall(struct drm_device *dev);
+irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+
+int psb_irq_enable_dpst(struct drm_device *dev);
+int psb_irq_disable_dpst(struct drm_device *dev);
+void psb_irq_turn_on_dpst(struct drm_device *dev);
+void psb_irq_turn_off_dpst(struct drm_device *dev);
+int  psb_enable_vblank(struct drm_device *dev, int pipe);
+void psb_disable_vblank(struct drm_device *dev, int pipe);
+u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+
+int mdfld_enable_te(struct drm_device *dev, int pipe);
+void mdfld_disable_te(struct drm_device *dev, int pipe);
+#endif /* _PSB_IRQ_H_ */
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_lid.c b/linux-imx/drivers/gpu/drm/gma500/psb_lid.c
new file mode 100644
index 0000000..1d2ebb5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_lid.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <linux/spinlock.h>
+
+static void psb_lid_timer_func(unsigned long data)
+{
+	struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+	struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+	struct timer_list *lid_timer = &dev_priv->lid_timer;
+	unsigned long irq_flags;
+	u32 __iomem *lid_state = dev_priv->opregion.lid_state;
+	u32 pp_status;
+
+	if (readl(lid_state) == dev_priv->lid_last_state)
+		goto lid_timer_schedule;
+
+	if ((readl(lid_state)) & 0x01) {
+		/*lid state is open*/
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & PP_ON) == 0 &&
+			 (pp_status & PP_SEQUENCE_MASK) != 0);
+
+		if (REG_READ(PP_STATUS) & PP_ON) {
+			/*FIXME: should be backlight level before*/
+			psb_intel_lvds_set_brightness(dev, 100);
+		} else {
+			DRM_DEBUG("LVDS panel never powered up");
+			return;
+		}
+	} else {
+		psb_intel_lvds_set_brightness(dev, 0);
+
+		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+		do {
+			pp_status = REG_READ(PP_STATUS);
+		} while ((pp_status & PP_ON) == 0);
+	}
+	dev_priv->lid_last_state =  readl(lid_state);
+
+lid_timer_schedule:
+	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+	if (!timer_pending(lid_timer)) {
+		lid_timer->expires = jiffies + PSB_LID_DELAY;
+		add_timer(lid_timer);
+	}
+	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+{
+	struct timer_list *lid_timer = &dev_priv->lid_timer;
+	unsigned long irq_flags;
+
+	spin_lock_init(&dev_priv->lid_lock);
+	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+
+	init_timer(lid_timer);
+
+	lid_timer->data = (unsigned long)dev_priv;
+	lid_timer->function = psb_lid_timer_func;
+	lid_timer->expires = jiffies + PSB_LID_DELAY;
+
+	add_timer(lid_timer);
+	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+}
+
+void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+{
+	del_timer_sync(&dev_priv->lid_timer);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/gma500/psb_reg.h b/linux-imx/drivers/gpu/drm/gma500/psb_reg.h
new file mode 100644
index 0000000..b81c7c1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/psb_reg.h
@@ -0,0 +1,582 @@
+/**************************************************************************
+ *
+ * Copyright (c) (2005-2007) Imagination Technologies Limited.
+ * Copyright (c) 2007, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+ *
+ **************************************************************************/
+
+#ifndef _PSB_REG_H_
+#define _PSB_REG_H_
+
+#define PSB_CR_CLKGATECTL		0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG		(1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT	(20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK		(0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT	(16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK		(0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT		(12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK		(0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT	(8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK		(0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT	(4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK		(0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT		(0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK		(0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED		(0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED		(1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO		(2)
+
+#define PSB_CR_CORE_ID			0x0010
+#define _PSB_CC_ID_ID_SHIFT			(16)
+#define _PSB_CC_ID_ID_MASK			(0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT			(0)
+#define _PSB_CC_ID_CONFIG_MASK			(0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION		0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT		(24)
+#define _PSB_CC_REVISION_DESIGNER_MASK		(0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT		(16)
+#define _PSB_CC_REVISION_MAJOR_MASK		(0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT		(8)
+#define _PSB_CC_REVISION_MINOR_MASK		(0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT	(0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK	(0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1	0x0018
+
+#define PSB_CR_SOFT_RESET		0x0080
+#define _PSB_CS_RESET_TSP_RESET		(1 << 6)
+#define _PSB_CS_RESET_ISP_RESET		(1 << 5)
+#define _PSB_CS_RESET_USE_RESET		(1 << 4)
+#define _PSB_CS_RESET_TA_RESET		(1 << 3)
+#define _PSB_CS_RESET_DPM_RESET		(1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET	(1 << 1)
+#define _PSB_CS_RESET_BIF_RESET			(1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2	0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2	0x0110
+
+#define PSB_CR_EVENT_STATUS2		0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2	0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT		(1 << 4)
+
+#define PSB_CR_EVENT_STATUS		0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE	0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR		0x0134
+#define _PSB_CE_MASTER_INTERRUPT		(1 << 31)
+#define _PSB_CE_TA_DPM_FAULT			(1 << 28)
+#define _PSB_CE_TWOD_COMPLETE			(1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS		(1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE			(1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER		(1 << 18)
+#define _PSB_CE_SW_EVENT			(1 << 14)
+#define _PSB_CE_TA_FINISHED			(1 << 13)
+#define _PSB_CE_TA_TERMINATE			(1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH		(1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL		(1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT		(1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE			(1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK		0x0007FFFF
+#define PSB_USE_OFFSET_SIZE		(PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0		0x0A0C
+#define PSB_CR_USE_CODE_BASE1		0x0A10
+#define PSB_CR_USE_CODE_BASE2		0x0A14
+#define PSB_CR_USE_CODE_BASE3		0x0A18
+#define PSB_CR_USE_CODE_BASE4		0x0A1C
+#define PSB_CR_USE_CODE_BASE5		0x0A20
+#define PSB_CR_USE_CODE_BASE6		0x0A24
+#define PSB_CR_USE_CODE_BASE7		0x0A28
+#define PSB_CR_USE_CODE_BASE8		0x0A2C
+#define PSB_CR_USE_CODE_BASE9		0x0A30
+#define PSB_CR_USE_CODE_BASE10		0x0A34
+#define PSB_CR_USE_CODE_BASE11		0x0A38
+#define PSB_CR_USE_CODE_BASE12		0x0A3C
+#define PSB_CR_USE_CODE_BASE13		0x0A40
+#define PSB_CR_USE_CODE_BASE14		0x0A44
+#define PSB_CR_USE_CODE_BASE15		0x0A48
+#define PSB_CR_USE_CODE_BASE(_i)	(0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT			(25)
+#define _PSB_CUC_BASE_DM_MASK			(0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT		(0)	/* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT		(7)
+#define _PSB_CUC_BASE_ADDR_MASK			(0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX			(0)
+#define _PSB_CUC_DM_PIXEL			(1)
+#define _PSB_CUC_DM_RESERVED			(2)
+#define _PSB_CUC_DM_EDM				(3)
+
+#define PSB_CR_PDS_EXEC_BASE		0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT	(20)	/* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT	(20)
+
+#define PSB_CR_EVENT_KICKER		0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT		(4)	/* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK		0x0AC8
+#define _PSB_CE_KICK_NOW			(1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1	0x0C38
+
+#define PSB_CR_BIF_CTRL			0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT		(1 << 4)
+#define _PSB_CB_CTRL_INVALDC			(1 << 3)
+#define _PSB_CB_CTRL_FLUSH			(1 << 2)
+
+#define PSB_CR_BIF_INT_STAT		0x0C04
+
+#define PSB_CR_BIF_FAULT		0x0C08
+#define _PSB_CBI_STAT_PF_N_RW			(1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT		(0)
+#define _PSB_CBI_STAT_FAULT_MASK		(0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE		(1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA			(1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM			(1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D			(1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE			(1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP			(1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP			(1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS		(1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST		(1 << 9)
+
+#define PSB_CR_BIF_BANK0		0x0C78
+#define PSB_CR_BIF_BANK1		0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0	0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE	0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE		0x0CAC
+
+#define PSB_CR_2D_SOCIF			0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT		(0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK		(0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY			(0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS		0x0E04
+#define _PSB_C2B_STATUS_BUSY			(1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT		(0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK		(0xFFFFFF << 0)
+
+/*
+ * 2D defs.
+ */
+
+/*
+ * 2D Slave Port Data : Block Header's Object Type
+ */
+
+#define	PSB_2D_CLIP_BH			(0x00000000)
+#define	PSB_2D_PAT_BH			(0x10000000)
+#define	PSB_2D_CTRL_BH			(0x20000000)
+#define	PSB_2D_SRC_OFF_BH		(0x30000000)
+#define	PSB_2D_MASK_OFF_BH		(0x40000000)
+#define	PSB_2D_RESERVED1_BH		(0x50000000)
+#define	PSB_2D_RESERVED2_BH		(0x60000000)
+#define	PSB_2D_FENCE_BH			(0x70000000)
+#define	PSB_2D_BLIT_BH			(0x80000000)
+#define	PSB_2D_SRC_SURF_BH		(0x90000000)
+#define	PSB_2D_DST_SURF_BH		(0xA0000000)
+#define	PSB_2D_PAT_SURF_BH		(0xB0000000)
+#define	PSB_2D_SRC_PAL_BH		(0xC0000000)
+#define	PSB_2D_PAT_PAL_BH		(0xD0000000)
+#define	PSB_2D_MASK_SURF_BH		(0xE0000000)
+#define	PSB_2D_FLUSH_BH			(0xF0000000)
+
+/*
+ * Clip Definition block (PSB_2D_CLIP_BH)
+ */
+#define PSB_2D_CLIPCOUNT_MAX		(1)
+#define PSB_2D_CLIPCOUNT_MASK		(0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK	(0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT		(0)
+/* clip rectangle min & max */
+#define PSB_2D_CLIP_XMAX_MASK		(0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK	(0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT		(12)
+#define PSB_2D_CLIP_XMIN_MASK		(0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK	(0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT		(0)
+/* clip rectangle offset */
+#define PSB_2D_CLIP_YMAX_MASK		(0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK	(0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT		(12)
+#define PSB_2D_CLIP_YMIN_MASK		(0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK	(0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT		(0)
+
+/*
+ * Pattern Control (PSB_2D_PAT_BH)
+ */
+#define PSB_2D_PAT_HEIGHT_MASK		(0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT		(0)
+#define PSB_2D_PAT_WIDTH_MASK		(0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT		(5)
+#define PSB_2D_PAT_YSTART_MASK		(0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT		(10)
+#define PSB_2D_PAT_XSTART_MASK		(0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT		(15)
+
+/*
+ * 2D Control block (PSB_2D_CTRL_BH)
+ */
+/* Present Flags */
+#define PSB_2D_SRCCK_CTRL		(0x00000001)
+#define PSB_2D_DSTCK_CTRL		(0x00000002)
+#define PSB_2D_ALPHA_CTRL		(0x00000004)
+/* Colour Key Colour (SRC/DST)*/
+#define PSB_2D_CK_COL_MASK		(0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK		(0x00000000)
+#define PSB_2D_CK_COL_SHIFT		(0)
+/* Colour Key Mask (SRC/DST)*/
+#define PSB_2D_CK_MASK_MASK		(0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK		(0x00000000)
+#define PSB_2D_CK_MASK_SHIFT		(0)
+/* Alpha Control (Alpha/RGB)*/
+#define PSB_2D_GBLALPHA_MASK		(0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK		(0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT		(12)
+#define PSB_2D_SRCALPHA_OP_MASK		(0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK	(0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT	(20)
+#define PSB_2D_SRCALPHA_OP_ONE		(0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC		(0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST		(0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG		(0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG		(0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL		(0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO		(0x00600000)
+#define PSB_2D_SRCALPHA_INVERT		(0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR	(0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK		(0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK	(0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT	(24)
+#define PSB_2D_DSTALPHA_OP_ONE		(0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC		(0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST		(0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG		(0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG		(0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL		(0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO		(0x06000000)
+#define PSB_2D_DSTALPHA_INVERT		(0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR	(0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE	(0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK	(0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE		(0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK	(0xDFFFFFFF)
+
+/*
+ *Source Offset (PSB_2D_SRC_OFF_BH)
+ */
+#define PSB_2D_SRCOFF_XSTART_MASK	((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT	(12)
+#define PSB_2D_SRCOFF_YSTART_MASK	(0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT	(0)
+
+/*
+ * Mask Offset (PSB_2D_MASK_OFF_BH)
+ */
+#define PSB_2D_MASKOFF_XSTART_MASK	((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT	(12)
+#define PSB_2D_MASKOFF_YSTART_MASK	(0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT	(0)
+
+/*
+ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+ */
+
+/*
+ *Blit Rectangle (PSB_2D_BLIT_BH)
+ */
+
+#define PSB_2D_ROT_MASK			(3 << 25)
+#define PSB_2D_ROT_CLRMASK		(~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE			(0 << 25)
+#define PSB_2D_ROT_90DEGS		(1 << 25)
+#define PSB_2D_ROT_180DEGS		(2 << 25)
+#define PSB_2D_ROT_270DEGS		(3 << 25)
+
+#define PSB_2D_COPYORDER_MASK		(3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK	(~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR		(0 << 23)
+#define PSB_2D_COPYORDER_BR2TL		(1 << 23)
+#define PSB_2D_COPYORDER_TR2BL		(2 << 23)
+#define PSB_2D_COPYORDER_BL2TR		(3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK		(0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE		(0x00000000)
+#define PSB_2D_DSTCK_PASS		(0x00200000)
+#define PSB_2D_DSTCK_REJECT		(0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK		(0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE		(0x00000000)
+#define PSB_2D_SRCCK_PASS		(0x00080000)
+#define PSB_2D_SRCCK_REJECT		(0x00100000)
+
+#define PSB_2D_CLIP_ENABLE		(0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE		(0x00020000)
+
+#define PSB_2D_PAT_CLRMASK		(0xFFFEFFFF)
+#define PSB_2D_PAT_MASK			(0x00010000)
+#define PSB_2D_USE_PAT			(0x00010000)
+#define PSB_2D_USE_FILL			(0x00000000)
+/*
+ * Tungsten Graphics note on rop codes: If rop A and rop B are
+ * identical, the mask surface will not be read and need not be
+ * set up.
+ */
+
+#define PSB_2D_ROP3B_MASK		(0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK		(0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT		(8)
+/* rop code A */
+#define PSB_2D_ROP3A_MASK		(0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK		(0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT		(0)
+
+#define PSB_2D_ROP4_MASK		(0x0000FFFF)
+/*
+ *	DWORD0:	(Only pass if Pattern control == Use Fill Colour)
+ *	Fill Colour RGBA8888
+ */
+#define PSB_2D_FILLCOLOUR_MASK		(0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT		(0)
+/*
+ *	DWORD1: (Always Present)
+ *	X Start (Dest)
+ *	Y Start (Dest)
+ */
+#define PSB_2D_DST_XSTART_MASK		(0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK	(0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT		(12)
+#define PSB_2D_DST_YSTART_MASK		(0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK	(0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT		(0)
+/*
+ *	DWORD2: (Always Present)
+ *	X Size (Dest)
+ *	Y Size (Dest)
+ */
+#define PSB_2D_DST_XSIZE_MASK		(0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK	(0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT		(12)
+#define PSB_2D_DST_YSIZE_MASK		(0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK	(0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT		(0)
+
+/*
+ * Source Surface (PSB_2D_SRC_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_SRC_FORMAT_MASK		(0x00078000)
+#define PSB_2D_SRC_1_PAL		(0x00000000)
+#define PSB_2D_SRC_2_PAL		(0x00008000)
+#define PSB_2D_SRC_4_PAL		(0x00010000)
+#define PSB_2D_SRC_8_PAL		(0x00018000)
+#define PSB_2D_SRC_8_ALPHA		(0x00020000)
+#define PSB_2D_SRC_4_ALPHA		(0x00028000)
+#define PSB_2D_SRC_332RGB		(0x00030000)
+#define PSB_2D_SRC_4444ARGB		(0x00038000)
+#define PSB_2D_SRC_555RGB		(0x00040000)
+#define PSB_2D_SRC_1555ARGB		(0x00048000)
+#define PSB_2D_SRC_565RGB		(0x00050000)
+#define PSB_2D_SRC_0888ARGB		(0x00058000)
+#define PSB_2D_SRC_8888ARGB		(0x00060000)
+#define PSB_2D_SRC_8888UYVY		(0x00068000)
+#define PSB_2D_SRC_RESERVED		(0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP	(0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK		(0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK	(0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT		(0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_SRC_ADDR_MASK		(0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK		(0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT		(2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT	(2)
+
+/*
+ * Pattern Surface (PSB_2D_PAT_SURF_BH)
+ */
+/*
+ *  WORD 0
+ */
+
+#define PSB_2D_PAT_FORMAT_MASK		(0x00078000)
+#define PSB_2D_PAT_1_PAL		(0x00000000)
+#define PSB_2D_PAT_2_PAL		(0x00008000)
+#define PSB_2D_PAT_4_PAL		(0x00010000)
+#define PSB_2D_PAT_8_PAL		(0x00018000)
+#define PSB_2D_PAT_8_ALPHA		(0x00020000)
+#define PSB_2D_PAT_4_ALPHA		(0x00028000)
+#define PSB_2D_PAT_332RGB		(0x00030000)
+#define PSB_2D_PAT_4444ARGB		(0x00038000)
+#define PSB_2D_PAT_555RGB		(0x00040000)
+#define PSB_2D_PAT_1555ARGB		(0x00048000)
+#define PSB_2D_PAT_565RGB		(0x00050000)
+#define PSB_2D_PAT_0888ARGB		(0x00058000)
+#define PSB_2D_PAT_8888ARGB		(0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK		(0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK	(0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT		(0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_PAT_ADDR_MASK		(0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK		(0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT		(2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT	(2)
+
+/*
+ * Destination Surface (PSB_2D_DST_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+
+#define PSB_2D_DST_FORMAT_MASK		(0x00078000)
+#define PSB_2D_DST_332RGB		(0x00030000)
+#define PSB_2D_DST_4444ARGB		(0x00038000)
+#define PSB_2D_DST_555RGB		(0x00040000)
+#define PSB_2D_DST_1555ARGB		(0x00048000)
+#define PSB_2D_DST_565RGB		(0x00050000)
+#define PSB_2D_DST_0888ARGB		(0x00058000)
+#define PSB_2D_DST_8888ARGB		(0x00060000)
+#define PSB_2D_DST_8888AYUV		(0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK		(0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK	(0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT		(0)
+/*
+ * WORD 1 - Base Address
+ */
+#define PSB_2D_DST_ADDR_MASK		(0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK		(0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT		(2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT	(2)
+
+/*
+ * Mask Surface (PSB_2D_MASK_SURF_BH)
+ */
+/*
+ * WORD 0
+ */
+#define PSB_2D_MASK_STRIDE_MASK		(0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK	(0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT	(0)
+/*
+ *  WORD 1 - Base Address
+ */
+#define PSB_2D_MASK_ADDR_MASK		(0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK	(0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT		(2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT	(2)
+
+/*
+ * Source Palette (PSB_2D_SRC_PAL_BH)
+ */
+
+#define PSB_2D_SRCPAL_ADDR_SHIFT	(0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK	(0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK		(0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN		(1024)
+
+/*
+ * Pattern Palette (PSB_2D_PAT_PAL_BH)
+ */
+
+#define PSB_2D_PATPAL_ADDR_SHIFT	(0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK	(0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK		(0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN		(1024)
+
+/*
+ * Rop3 Codes (2 LS bytes)
+ */
+
+#define PSB_2D_ROP3_SRCCOPY		(0xCCCC)
+#define PSB_2D_ROP3_PATCOPY		(0xF0F0)
+#define PSB_2D_ROP3_WHITENESS		(0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS		(0x0000)
+#define PSB_2D_ROP3_SRC			(0xCC)
+#define PSB_2D_ROP3_PAT			(0xF0)
+#define PSB_2D_ROP3_DST			(0xAA)
+
+/*
+ * Sizes.
+ */
+
+#define PSB_SCENE_HW_COOKIE_SIZE	16
+#define PSB_TA_MEM_HW_COOKIE_SIZE	16
+
+/*
+ * Scene stuff.
+ */
+
+#define PSB_NUM_HW_SCENES		2
+
+/*
+ * Scheduler completion actions.
+ */
+
+#define PSB_RASTER_BLOCK		0
+#define PSB_RASTER			1
+#define PSB_RETURN			2
+#define PSB_TA				3
+
+/* Power management */
+#define PSB_PUNIT_PORT			0x04
+#define PSB_OSPMBA			0x78
+#define PSB_APMBA			0x7a
+#define PSB_APM_CMD			0x0
+#define PSB_APM_STS			0x04
+#define PSB_PWRGT_VID_ENC_MASK		0x30
+#define PSB_PWRGT_VID_DEC_MASK		0xc
+#define PSB_PWRGT_GL3_MASK		0xc0
+
+#define PSB_PM_SSC			0x20
+#define PSB_PM_SSS			0x30
+#define PSB_PWRGT_DISPLAY_MASK		0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR	0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR	0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR	0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR	0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK		0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS	0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS	0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS	0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0		0xc3
+#define MDFLD_PWRGT_DISPLAY_A_STS_B0	0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0	0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0	0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS	0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#endif
diff --git a/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
new file mode 100644
index 0000000..771ff66
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "tc35876x-dsi-lvds.h"
+#include <linux/i2c/tc35876x.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct i2c_client *tc35876x_client;
+static struct i2c_client *cmi_lcd_i2c_client;
+
+#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX		0x0004
+#define CLW_DPHYCONTRX		0x0020
+#define D0W_DPHYCONTRX		0x0024
+#define D1W_DPHYCONTRX		0x0028
+#define D2W_DPHYCONTRX		0x002C
+#define D3W_DPHYCONTRX		0x0030
+#define COM_DPHYCONTRX		0x0038
+#define CLW_CNTRL		0x0040
+#define D0W_CNTRL		0x0044
+#define D1W_CNTRL		0x0048
+#define D2W_CNTRL		0x004C
+#define D3W_CNTRL		0x0050
+#define DFTMODE_CNTRL		0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI		0x0104
+#define PPI_BUSYPPI		0x0108
+#define PPI_LINEINITCNT		0x0110
+#define PPI_LPTXTIMECNT		0x0114
+#define PPI_LANEENABLE		0x0134
+#define PPI_TX_RX_TA		0x013C
+#define PPI_CLS_ATMR		0x0140
+#define PPI_D0S_ATMR		0x0144
+#define PPI_D1S_ATMR		0x0148
+#define PPI_D2S_ATMR		0x014C
+#define PPI_D3S_ATMR		0x0150
+#define PPI_D0S_CLRSIPOCOUNT	0x0164
+#define PPI_D1S_CLRSIPOCOUNT	0x0168
+#define PPI_D2S_CLRSIPOCOUNT	0x016C
+#define PPI_D3S_CLRSIPOCOUNT	0x0170
+#define CLS_PRE			0x0180
+#define D0S_PRE			0x0184
+#define D1S_PRE			0x0188
+#define D2S_PRE			0x018C
+#define D3S_PRE			0x0190
+#define CLS_PREP		0x01A0
+#define D0S_PREP		0x01A4
+#define D1S_PREP		0x01A8
+#define D2S_PREP		0x01AC
+#define D3S_PREP		0x01B0
+#define CLS_ZERO		0x01C0
+#define D0S_ZERO		0x01C4
+#define D1S_ZERO		0x01C8
+#define D2S_ZERO		0x01CC
+#define D3S_ZERO		0x01D0
+#define PPI_CLRFLG		0x01E0
+#define PPI_CLRSIPO		0x01E4
+#define HSTIMEOUT		0x01F0
+#define HSTIMEOUTENABLE		0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI		0x0204
+#define DSI_BUSYDSI		0x0208
+#define DSI_LANEENABLE		0x0210
+#define DSI_LANESTATUS0		0x0214
+#define DSI_LANESTATUS1		0x0218
+#define DSI_INTSTATUS		0x0220
+#define DSI_INTMASK		0x0224
+#define DSI_INTCLR		0x0228
+#define DSI_LPTXTO		0x0230
+
+/* DSI General Registers */
+#define DSIERRCNT		0x0300
+
+/* DSI Application Layer Registers */
+#define APLCTRL			0x0400
+#define RDPKTLN			0x0404
+
+/* Video Path Registers */
+#define VPCTRL			0x0450
+#define HTIM1			0x0454
+#define HTIM2			0x0458
+#define VTIM1			0x045C
+#define VTIM2			0x0460
+#define VFUEN			0x0464
+
+/* LVDS Registers */
+#define LVMX0003		0x0480
+#define LVMX0407		0x0484
+#define LVMX0811		0x0488
+#define LVMX1215		0x048C
+#define LVMX1619		0x0490
+#define LVMX2023		0x0494
+#define LVMX2427		0x0498
+#define LVCFG			0x049C
+#define LVPHY0			0x04A0
+#define LVPHY1			0x04A4
+
+/* System Registers */
+#define SYSSTAT			0x0500
+#define SYSRST			0x0504
+
+/* GPIO Registers */
+/*#define GPIOC			0x0520*/
+#define GPIOO			0x0524
+#define GPIOI			0x0528
+
+/* I2C Registers */
+#define I2CTIMCTRL		0x0540
+#define I2CMADDR		0x0544
+#define WDATAQ			0x0548
+#define RDATAQ			0x054C
+
+/* Chip/Rev Registers */
+#define IDREG			0x0580
+
+/* Debug Registers */
+#define DEBUG00			0x05A0
+#define DEBUG01			0x05A4
+
+/* Panel CABC registers */
+#define PANEL_PWM_CONTROL	0x90
+#define PANEL_FREQ_DIVIDER_HI	0x91
+#define PANEL_FREQ_DIVIDER_LO	0x92
+#define PANEL_DUTY_CONTROL	0x93
+#define PANEL_MODIFY_RGB	0x94
+#define PANEL_FRAMERATE_CONTROL	0x96
+#define PANEL_PWM_MIN		0x97
+#define PANEL_PWM_REF		0x98
+#define PANEL_PWM_MAX		0x99
+#define PANEL_ALLOW_DISTORT	0x9A
+#define PANEL_BYPASS_PWMI	0x9B
+
+/* Panel color management registers */
+#define PANEL_CM_ENABLE		0x700
+#define PANEL_CM_HUE		0x701
+#define PANEL_CM_SATURATION	0x702
+#define PANEL_CM_INTENSITY	0x703
+#define PANEL_CM_BRIGHTNESS	0x704
+#define PANEL_CM_CE_ENABLE	0x705
+#define PANEL_CM_PEAK_EN	0x710
+#define PANEL_CM_GAIN		0x711
+#define PANEL_CM_HUETABLE_START	0x730
+#define PANEL_CM_HUETABLE_END	0x747 /* inclusive */
+
+/* Input muxing for registers LVMX0003...LVMX2427 */
+enum {
+	INPUT_R0,	/* 0 */
+	INPUT_R1,
+	INPUT_R2,
+	INPUT_R3,
+	INPUT_R4,
+	INPUT_R5,
+	INPUT_R6,
+	INPUT_R7,
+	INPUT_G0,	/* 8 */
+	INPUT_G1,
+	INPUT_G2,
+	INPUT_G3,
+	INPUT_G4,
+	INPUT_G5,
+	INPUT_G6,
+	INPUT_G7,
+	INPUT_B0,	/* 16 */
+	INPUT_B1,
+	INPUT_B2,
+	INPUT_B3,
+	INPUT_B4,
+	INPUT_B5,
+	INPUT_B6,
+	INPUT_B7,
+	INPUT_HSYNC,	/* 24 */
+	INPUT_VSYNC,
+	INPUT_DE,
+	LOGIC_0,
+	/* 28...31 undefined */
+};
+
+#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00)		\
+	(FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) |	\
+	FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
+
+/**
+ * tc35876x_regw - Write DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: value to write
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
+{
+	int r;
+	u8 tx_data[] = {
+		/* NOTE: Register address big-endian, data little-endian. */
+		(reg >> 8) & 0xff,
+		reg & 0xff,
+		value & 0xff,
+		(value >> 8) & 0xff,
+		(value >> 16) & 0xff,
+		(value >> 24) & 0xff,
+	};
+	struct i2c_msg msgs[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.buf = tx_data,
+			.len = ARRAY_SIZE(tx_data),
+		},
+	};
+
+	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+	if (r < 0) {
+		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
+			__func__, reg, value, r);
+		return r;
+	}
+
+	if (r < ARRAY_SIZE(msgs)) {
+		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
+			__func__, reg, value, r);
+		return -EAGAIN;
+	}
+
+	dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
+			__func__, reg, value);
+
+	return 0;
+}
+
+/**
+ * tc35876x_regr - Read DSI-LVDS bridge register using I2C
+ * @client: struct i2c_client to use
+ * @reg: register address
+ * @value: pointer for storing the value
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
+{
+	int r;
+	u8 tx_data[] = {
+		(reg >> 8) & 0xff,
+		reg & 0xff,
+	};
+	u8 rx_data[4];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = client->addr,
+			.flags = 0,
+			.buf = tx_data,
+			.len = ARRAY_SIZE(tx_data),
+		},
+		{
+			.addr = client->addr,
+			.flags = I2C_M_RD,
+			.buf = rx_data,
+			.len = ARRAY_SIZE(rx_data),
+		 },
+	};
+
+	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+	if (r < 0) {
+		dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
+			reg, r);
+		return r;
+	}
+
+	if (r < ARRAY_SIZE(msgs)) {
+		dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
+			reg, r);
+		return -EAGAIN;
+	}
+
+	*value = rx_data[0] << 24 | rx_data[1] << 16 |
+		rx_data[2] << 8 | rx_data[3];
+
+	dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
+		reg, *value);
+
+	return 0;
+}
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
+{
+	struct tc35876x_platform_data *pdata;
+
+	if (WARN(!tc35876x_client, "%s called before probe", __func__))
+		return;
+
+	dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
+
+	pdata = dev_get_platdata(&tc35876x_client->dev);
+
+	if (pdata->gpio_bridge_reset == -1)
+		return;
+
+	if (state) {
+		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+		mdelay(10);
+	} else {
+		/* Pull MIPI Bridge reset pin to Low */
+		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
+		mdelay(20);
+		/* Pull MIPI Bridge reset pin to High */
+		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
+		mdelay(40);
+	}
+}
+
+void tc35876x_configure_lvds_bridge(struct drm_device *dev)
+{
+	struct i2c_client *i2c = tc35876x_client;
+	u32 ppi_lptxtimecnt;
+	u32 txtagocnt;
+	u32 txtasurecnt;
+	u32 id;
+
+	if (WARN(!tc35876x_client, "%s called before probe", __func__))
+		return;
+
+	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+	if (!tc35876x_regr(i2c, IDREG, &id))
+		dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
+	else
+		dev_err(&tc35876x_client->dev, "Cannot read ID\n");
+
+	ppi_lptxtimecnt = 4;
+	txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
+	txtasurecnt = 3 * ppi_lptxtimecnt / 2;
+	tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
+		FLD_VAL(txtasurecnt, 10, 0));
+	tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
+
+	tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+	tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+	tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+	tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
+
+	/* Enabling MIPI & PPI lanes, Enable 4 lanes */
+	tc35876x_regw(i2c, PPI_LANEENABLE,
+		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+	tc35876x_regw(i2c, DSI_LANEENABLE,
+		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
+	tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
+	tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
+
+	/* Setting LVDS output frequency */
+	tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
+		FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
+
+	/* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
+	tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
+
+	/* Horizontal back porch and horizontal pulse width. 0x00280028 */
+	tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
+
+	/* Horizontal front porch and horizontal active video size. 0x00500500*/
+	tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
+
+	/* Vertical back porch and vertical sync pulse width. 0x000e000a */
+	tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
+
+	/* Vertical front porch and vertical display size. 0x000e0320 */
+	tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
+
+	/* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
+	tc35876x_regw(i2c, VFUEN, BIT(0));
+
+	/* Soft reset LCD controller. */
+	tc35876x_regw(i2c, SYSRST, BIT(2));
+
+	/* LVDS-TX input muxing */
+	tc35876x_regw(i2c, LVMX0003,
+		INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
+	tc35876x_regw(i2c, LVMX0407,
+		INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
+	tc35876x_regw(i2c, LVMX0811,
+		INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
+	tc35876x_regw(i2c, LVMX1215,
+		INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
+	tc35876x_regw(i2c, LVMX1619,
+		INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
+	tc35876x_regw(i2c, LVMX2023,
+		INPUT_MUX(LOGIC_0,  INPUT_B7, INPUT_B6, INPUT_B5));
+	tc35876x_regw(i2c, LVMX2427,
+		INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
+
+	/* Enable LVDS transmitter. */
+	tc35876x_regw(i2c, LVCFG, BIT(0));
+
+	/* Clear notifications. Don't write reserved bits. Was write 0xffffffff
+	 * to 0x0288, must be in error?! */
+	tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
+}
+
+#define GPIOPWMCTRL	0x38F
+#define PWM0CLKDIV0	0x62 /* low byte */
+#define PWM0CLKDIV1	0x61 /* high byte */
+
+#define SYSTEMCLK	19200000UL /* 19.2 MHz */
+#define PWM_FREQUENCY	9600 /* Hz */
+
+/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
+static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
+{
+	return (baseclk - f) / f;
+}
+
+static void tc35876x_brightness_init(struct drm_device *dev)
+{
+	int ret;
+	u8 pwmctrl;
+	u16 clkdiv;
+
+	/* Make sure the PWM reference is the 19.2 MHz system clock. Read first
+	 * instead of setting directly to catch potential conflicts between PWM
+	 * users. */
+	ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
+	if (ret || pwmctrl != 0x01) {
+		if (ret)
+			dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
+		else
+			dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
+
+		ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
+		if (ret)
+			dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
+	}
+
+	clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
+
+	ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
+	if (!ret)
+		ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
+
+	if (ret)
+		dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
+	else
+		dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
+			clkdiv, PWM_FREQUENCY);
+}
+
+#define PWM0DUTYCYCLE			0x67
+
+void tc35876x_brightness_control(struct drm_device *dev, int level)
+{
+	int ret;
+	u8 duty_val;
+	u8 panel_duty_val;
+
+	level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+	/* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
+	duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
+
+	/* I won't pretend to understand this formula. The panel spec is quite
+	 * bad engrish.
+	 */
+	panel_duty_val = (2 * level - 100) * 0xA9 /
+			 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
+
+	ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
+	if (ret)
+		dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
+			__func__);
+
+	if (cmi_lcd_i2c_client) {
+		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+						PANEL_PWM_MAX, panel_duty_val);
+		if (ret < 0)
+			dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
+				__func__);
+	}
+}
+
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
+{
+	struct tc35876x_platform_data *pdata;
+
+	if (WARN(!tc35876x_client, "%s called before probe", __func__))
+		return;
+
+	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+	pdata = dev_get_platdata(&tc35876x_client->dev);
+
+	if (pdata->gpio_panel_bl_en != -1)
+		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
+
+	if (pdata->gpio_panel_vadd != -1)
+		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
+}
+
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
+{
+	struct tc35876x_platform_data *pdata;
+	struct drm_psb_private *dev_priv = dev->dev_private;
+
+	if (WARN(!tc35876x_client, "%s called before probe", __func__))
+		return;
+
+	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
+
+	pdata = dev_get_platdata(&tc35876x_client->dev);
+
+	if (pdata->gpio_panel_vadd != -1) {
+		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
+		msleep(260);
+	}
+
+	if (cmi_lcd_i2c_client) {
+		int ret;
+		dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
+		/* Bit 4 is average_saving. Setting it to 1, the brightness is
+		 * referenced to the average of the frame content. 0 means
+		 * reference to the maximum of frame contents. Bits 3:0 are
+		 * allow_distort. When set to a nonzero value, all color values
+		 * between 255-allow_distort*2 and 255 are mapped to the
+		 * 255-allow_distort*2 value.
+		 */
+		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+						PANEL_ALLOW_DISTORT, 0x10);
+		if (ret < 0)
+			dev_err(&cmi_lcd_i2c_client->dev,
+				"i2c write failed (%d)\n", ret);
+		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+						PANEL_BYPASS_PWMI, 0);
+		if (ret < 0)
+			dev_err(&cmi_lcd_i2c_client->dev,
+				"i2c write failed (%d)\n", ret);
+		/* Set minimum brightness value - this is tunable */
+		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
+						PANEL_PWM_MIN, 0x35);
+		if (ret < 0)
+			dev_err(&cmi_lcd_i2c_client->dev,
+				"i2c write failed (%d)\n", ret);
+	}
+
+	if (pdata->gpio_panel_bl_en != -1)
+		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
+
+	tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
+}
+
+static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
+{
+	struct drm_display_mode *mode;
+
+	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	/* FIXME: do this properly. */
+	mode->hdisplay = 1280;
+	mode->vdisplay = 800;
+	mode->hsync_start = 1360;
+	mode->hsync_end = 1400;
+	mode->htotal = 1440;
+	mode->vsync_start = 814;
+	mode->vsync_end = 824;
+	mode->vtotal = 838;
+	mode->clock = 33324 << 1;
+
+	dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
+	dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
+	dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
+	dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
+	dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
+	dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
+	dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
+	dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
+	dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
+
+	drm_mode_set_name(mode);
+	drm_mode_set_crtcinfo(mode, 0);
+
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+	return mode;
+}
+
+/* DV1 Active area 216.96 x 135.6 mm */
+#define DV1_PANEL_WIDTH 217
+#define DV1_PANEL_HEIGHT 136
+
+static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
+				struct panel_info *pi)
+{
+	if (!dev || !pi)
+		return -EINVAL;
+
+	pi->width_mm = DV1_PANEL_WIDTH;
+	pi->height_mm = DV1_PANEL_HEIGHT;
+
+	return 0;
+}
+
+static int tc35876x_bridge_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct tc35876x_platform_data *pdata;
+
+	dev_info(&client->dev, "%s\n", __func__);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	pdata = dev_get_platdata(&client->dev);
+	if (!pdata) {
+		dev_err(&client->dev, "%s: no platform data\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdata->gpio_bridge_reset != -1) {
+		gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
+		gpio_direction_output(pdata->gpio_bridge_reset, 0);
+	}
+
+	if (pdata->gpio_panel_bl_en != -1) {
+		gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
+		gpio_direction_output(pdata->gpio_panel_bl_en, 0);
+	}
+
+	if (pdata->gpio_panel_vadd != -1) {
+		gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
+		gpio_direction_output(pdata->gpio_panel_vadd, 0);
+	}
+
+	tc35876x_client = client;
+
+	return 0;
+}
+
+static int tc35876x_bridge_remove(struct i2c_client *client)
+{
+	struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	if (pdata->gpio_bridge_reset != -1)
+		gpio_free(pdata->gpio_bridge_reset);
+
+	if (pdata->gpio_panel_bl_en != -1)
+		gpio_free(pdata->gpio_panel_bl_en);
+
+	if (pdata->gpio_panel_vadd != -1)
+		gpio_free(pdata->gpio_panel_vadd);
+
+	tc35876x_client = NULL;
+
+	return 0;
+}
+
+static const struct i2c_device_id tc35876x_bridge_id[] = {
+	{ "i2c_disp_brig", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
+
+static struct i2c_driver tc35876x_bridge_i2c_driver = {
+	.driver = {
+		.name = "i2c_disp_brig",
+	},
+	.id_table = tc35876x_bridge_id,
+	.probe = tc35876x_bridge_probe,
+	.remove = tc35876x_bridge_remove,
+};
+
+/* LCD panel I2C */
+static int cmi_lcd_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	dev_info(&client->dev, "%s\n", __func__);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	cmi_lcd_i2c_client = client;
+
+	return 0;
+}
+
+static int cmi_lcd_i2c_remove(struct i2c_client *client)
+{
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	cmi_lcd_i2c_client = NULL;
+
+	return 0;
+}
+
+static const struct i2c_device_id cmi_lcd_i2c_id[] = {
+	{ "cmi-lcd", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
+
+static struct i2c_driver cmi_lcd_i2c_driver = {
+	.driver = {
+		.name = "cmi-lcd",
+	},
+	.id_table = cmi_lcd_i2c_id,
+	.probe = cmi_lcd_i2c_probe,
+	.remove = cmi_lcd_i2c_remove,
+};
+
+/* HACK to create I2C device while it's not created by platform code */
+#define CMI_LCD_I2C_ADAPTER	2
+#define CMI_LCD_I2C_ADDR	0x60
+
+static int cmi_lcd_hack_create_device(void)
+{
+	struct i2c_adapter *adapter;
+	struct i2c_client *client;
+	struct i2c_board_info info = {
+		.type = "cmi-lcd",
+		.addr = CMI_LCD_I2C_ADDR,
+	};
+
+	pr_debug("%s\n", __func__);
+
+	adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
+	if (!adapter) {
+		pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
+			CMI_LCD_I2C_ADAPTER);
+		return -EINVAL;
+	}
+
+	client = i2c_new_device(adapter, &info);
+	if (!client) {
+		pr_err("%s: i2c_new_device() failed\n", __func__);
+		i2c_put_adapter(adapter);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
+	.dpms = mdfld_dsi_dpi_dpms,
+	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+	.prepare = mdfld_dsi_dpi_prepare,
+	.mode_set = mdfld_dsi_dpi_mode_set,
+	.commit = mdfld_dsi_dpi_commit,
+};
+
+static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
+	.destroy = drm_encoder_cleanup,
+};
+
+const struct panel_funcs mdfld_tc35876x_funcs = {
+	.encoder_funcs = &tc35876x_encoder_funcs,
+	.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
+	.get_config_mode = tc35876x_get_config_mode,
+	.get_panel_info = tc35876x_get_panel_info,
+};
+
+void tc35876x_init(struct drm_device *dev)
+{
+	int r;
+
+	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
+
+	cmi_lcd_hack_create_device();
+
+	r = i2c_add_driver(&cmi_lcd_i2c_driver);
+	if (r < 0)
+		dev_err(&dev->pdev->dev,
+			"%s: i2c_add_driver() for %s failed (%d)\n",
+			__func__, cmi_lcd_i2c_driver.driver.name, r);
+
+	r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
+	if (r < 0)
+		dev_err(&dev->pdev->dev,
+			"%s: i2c_add_driver() for %s failed (%d)\n",
+			__func__, tc35876x_bridge_i2c_driver.driver.name, r);
+
+	tc35876x_brightness_init(dev);
+}
+
+void tc35876x_exit(void)
+{
+	pr_debug("%s\n", __func__);
+
+	i2c_del_driver(&tc35876x_bridge_i2c_driver);
+
+	if (cmi_lcd_i2c_client)
+		i2c_del_driver(&cmi_lcd_i2c_driver);
+}
diff --git a/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
new file mode 100644
index 0000000..b14b7f9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
+#define __MDFLD_DSI_LVDS_BRIDGE_H__
+
+void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
+void tc35876x_configure_lvds_bridge(struct drm_device *dev);
+void tc35876x_brightness_control(struct drm_device *dev, int level);
+void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
+void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
+void tc35876x_init(struct drm_device *dev);
+void tc35876x_exit(void);
+
+extern const struct panel_funcs mdfld_tc35876x_funcs;
+
+#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
diff --git a/linux-imx/drivers/gpu/drm/i2c/Kconfig b/linux-imx/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 0000000..4d341db
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
+menu "I2C encoder or helper chips"
+     depends on DRM && DRM_KMS_HELPER && I2C
+
+config DRM_I2C_CH7006
+	tristate "Chrontel ch7006 TV encoder"
+	default m if DRM_NOUVEAU
+	help
+	  Support for Chrontel ch7006 and similar TV encoders, found
+	  on some nVidia video cards.
+
+	  This driver is currently only useful if you're also using
+	  the nouveau driver.
+
+config DRM_I2C_SIL164
+	tristate "Silicon Image sil164 TMDS transmitter"
+	default m if DRM_NOUVEAU
+	help
+	  Support for sil164 and similar single-link (or dual-link
+	  when used in pairs) TMDS transmitters, used in some nVidia
+	  video cards.
+
+config DRM_I2C_NXP_TDA998X
+	tristate "NXP Semiconductors TDA998X HDMI encoder"
+	default m if DRM_TILCDC
+	help
+	  Support for NXP Semiconductors TDA998X HDMI encoders.
+
+endmenu
diff --git a/linux-imx/drivers/gpu/drm/i2c/Makefile b/linux-imx/drivers/gpu/drm/i2c/Makefile
new file mode 100644
index 0000000..43aa33b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/Makefile
@@ -0,0 +1,10 @@
+ccflags-y := -Iinclude/drm
+
+ch7006-y := ch7006_drv.o ch7006_mode.o
+obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+
+sil164-y := sil164_drv.o
+obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
+
+tda998x-y := tda998x_drv.o
+obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/linux-imx/drivers/gpu/drm/i2c/ch7006_drv.c b/linux-imx/drivers/gpu/drm/i2c/ch7006_drv.c
new file mode 100644
index 0000000..51fa323
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include "ch7006_priv.h"
+
+/* DRM encoder functions */
+
+static void ch7006_encoder_set_config(struct drm_encoder *encoder,
+				      void *params)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	priv->params = *(struct ch7006_encoder_params *)params;
+}
+
+static void ch7006_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	drm_property_destroy(encoder->dev, priv->scale_property);
+
+	kfree(priv);
+	to_encoder_slave(encoder)->slave_priv = NULL;
+
+	drm_i2c_encoder_destroy(encoder);
+}
+
+static void  ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+
+	ch7006_dbg(client, "\n");
+
+	if (mode == priv->last_dpms)
+		return;
+	priv->last_dpms = mode;
+
+	ch7006_setup_power_state(encoder);
+
+	ch7006_load_reg(client, state, CH7006_POWER);
+}
+
+static void ch7006_encoder_save(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_state_save(client, &priv->saved_state);
+}
+
+static void ch7006_encoder_restore(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_state_load(client, &priv->saved_state);
+}
+
+static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+	/* The ch7006 is painfully picky with the input timings so no
+	 * custom modes for now... */
+
+	priv->mode = ch7006_lookup_mode(encoder, mode);
+
+	return !!priv->mode;
+}
+
+static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode)
+{
+	if (ch7006_lookup_mode(encoder, mode))
+		return MODE_OK;
+	else
+		return MODE_BAD;
+}
+
+static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
+				     struct drm_display_mode *drm_mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_encoder_params *params = &priv->params;
+	struct ch7006_state *state = &priv->state;
+	uint8_t *regs = state->regs;
+	struct ch7006_mode *mode = priv->mode;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	int start_active;
+
+	ch7006_dbg(client, "\n");
+
+	regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
+	regs[CH7006_BWIDTH] = 0;
+	regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
+					 params->input_format);
+
+	regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
+		| bitf(CH7006_CLKMODE_XCM, params->xcm)
+		| bitf(CH7006_CLKMODE_PCM, params->pcm);
+	if (params->clock_mode)
+		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
+	if (params->clock_edge)
+		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
+
+	start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
+	regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
+	regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
+
+	regs[CH7006_INPUT_SYNC] = 0;
+	if (params->sync_direction)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
+	if (params->sync_encoding)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
+	if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
+	if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
+
+	regs[CH7006_DETECT] = 0;
+	regs[CH7006_BCLKOUT] = 0;
+
+	regs[CH7006_SUBC_INC3] = 0;
+	if (params->pout_level)
+		regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
+
+	regs[CH7006_SUBC_INC4] = 0;
+	if (params->active_detect)
+		regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
+
+	regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
+
+	ch7006_setup_levels(encoder);
+	ch7006_setup_subcarrier(encoder);
+	ch7006_setup_pll(encoder);
+	ch7006_setup_power_state(encoder);
+	ch7006_setup_properties(encoder);
+
+	ch7006_state_load(client, state);
+}
+
+static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
+						       struct drm_connector *connector)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	int det;
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_save_reg(client, state, CH7006_DETECT);
+	ch7006_save_reg(client, state, CH7006_POWER);
+	ch7006_save_reg(client, state, CH7006_CLKMODE);
+
+	ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
+					   bitfs(CH7006_POWER_LEVEL, NORMAL));
+	ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
+
+	ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
+
+	ch7006_write(client, CH7006_DETECT, 0);
+
+	det = ch7006_read(client, CH7006_DETECT);
+
+	ch7006_load_reg(client, state, CH7006_CLKMODE);
+	ch7006_load_reg(client, state, CH7006_POWER);
+	ch7006_load_reg(client, state, CH7006_DETECT);
+
+	if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+		    CH7006_DETECT_SVIDEO_C_TEST|
+		    CH7006_DETECT_CVBS_TEST)) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+	else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+			 CH7006_DETECT_SVIDEO_C_TEST)) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+	else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+	else
+		priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+	drm_object_property_set_value(&connector->base,
+			encoder->dev->mode_config.tv_subconnector_property,
+							priv->subconnector);
+
+	return priv->subconnector ? connector_status_connected :
+					connector_status_disconnected;
+}
+
+static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_mode *mode;
+	int n = 0;
+
+	for (mode = ch7006_modes; mode->mode.clock; mode++) {
+		if (~mode->valid_scales & 1<<priv->scale ||
+		    ~mode->valid_norms & 1<<priv->norm)
+			continue;
+
+		drm_mode_probed_add(connector,
+				drm_mode_duplicate(encoder->dev, &mode->mode));
+
+		n++;
+	}
+
+	return n;
+}
+
+static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
+					   struct drm_connector *connector)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct drm_mode_config *conf = &dev->mode_config;
+
+	drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+
+	priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+
+	drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
+				      priv->select_subconnector);
+	drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
+				      priv->subconnector);
+	drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
+				      priv->hmargin);
+	drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
+				      priv->vmargin);
+	drm_object_attach_property(&connector->base, conf->tv_mode_property,
+				      priv->norm);
+	drm_object_attach_property(&connector->base, conf->tv_brightness_property,
+				      priv->brightness);
+	drm_object_attach_property(&connector->base, conf->tv_contrast_property,
+				      priv->contrast);
+	drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
+				      priv->flicker);
+	drm_object_attach_property(&connector->base, priv->scale_property,
+				      priv->scale);
+
+	return 0;
+}
+
+static int ch7006_encoder_set_property(struct drm_encoder *encoder,
+				       struct drm_connector *connector,
+				       struct drm_property *property,
+				       uint64_t val)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct drm_mode_config *conf = &encoder->dev->mode_config;
+	struct drm_crtc *crtc = encoder->crtc;
+	bool modes_changed = false;
+
+	ch7006_dbg(client, "\n");
+
+	if (property == conf->tv_select_subconnector_property) {
+		priv->select_subconnector = val;
+
+		ch7006_setup_power_state(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POWER);
+
+	} else if (property == conf->tv_left_margin_property) {
+		priv->hmargin = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POV);
+		ch7006_load_reg(client, state, CH7006_HPOS);
+
+	} else if (property == conf->tv_bottom_margin_property) {
+		priv->vmargin = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_POV);
+		ch7006_load_reg(client, state, CH7006_VPOS);
+
+	} else if (property == conf->tv_mode_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		priv->norm = val;
+
+		modes_changed = true;
+
+	} else if (property == conf->tv_brightness_property) {
+		priv->brightness = val;
+
+		ch7006_setup_levels(encoder);
+
+		ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+
+	} else if (property == conf->tv_contrast_property) {
+		priv->contrast = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_CONTRAST);
+
+	} else if (property == conf->tv_flicker_reduction_property) {
+		priv->flicker = val;
+
+		ch7006_setup_properties(encoder);
+
+		ch7006_load_reg(client, state, CH7006_FFILTER);
+
+	} else if (property == priv->scale_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		priv->scale = val;
+
+		modes_changed = true;
+
+	} else {
+		return -EINVAL;
+	}
+
+	if (modes_changed) {
+		drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+		/* Disable the crtc to ensure a full modeset is
+		 * performed whenever it's turned on again. */
+		if (crtc) {
+			struct drm_mode_set modeset = {
+				.crtc = crtc,
+			};
+
+			drm_mode_set_config_internal(&modeset);
+		}
+	}
+
+	return 0;
+}
+
+static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+	.set_config = ch7006_encoder_set_config,
+	.destroy = ch7006_encoder_destroy,
+	.dpms = ch7006_encoder_dpms,
+	.save = ch7006_encoder_save,
+	.restore = ch7006_encoder_restore,
+	.mode_fixup = ch7006_encoder_mode_fixup,
+	.mode_valid = ch7006_encoder_mode_valid,
+	.mode_set = ch7006_encoder_mode_set,
+	.detect = ch7006_encoder_detect,
+	.get_modes = ch7006_encoder_get_modes,
+	.create_resources = ch7006_encoder_create_resources,
+	.set_property = ch7006_encoder_set_property,
+};
+
+
+/* I2C driver functions */
+
+static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	uint8_t addr = CH7006_VERSION_ID;
+	uint8_t val;
+	int ret;
+
+	ch7006_dbg(client, "\n");
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	ch7006_info(client, "Detected version ID: %x\n", val);
+
+	/* I don't know what this is for, but otherwise I get no
+	 * signal.
+	 */
+	ch7006_write(client, 0x3d, 0x0);
+
+	return 0;
+
+fail:
+	ch7006_err(client, "Error %d reading version ID\n", ret);
+
+	return -ENODEV;
+}
+
+static int ch7006_remove(struct i2c_client *client)
+{
+	ch7006_dbg(client, "\n");
+
+	return 0;
+}
+
+static int ch7006_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+
+	ch7006_dbg(client, "\n");
+
+	ch7006_write(client, 0x3d, 0x0);
+
+	return 0;
+}
+
+static int ch7006_encoder_init(struct i2c_client *client,
+			       struct drm_device *dev,
+			       struct drm_encoder_slave *encoder)
+{
+	struct ch7006_priv *priv;
+	int i;
+
+	ch7006_dbg(client, "\n");
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	encoder->slave_priv = priv;
+	encoder->slave_funcs = &ch7006_encoder_funcs;
+
+	priv->norm = TV_NORM_PAL;
+	priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+	priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+	priv->scale = 1;
+	priv->contrast = 50;
+	priv->brightness = 50;
+	priv->flicker = 50;
+	priv->hmargin = 50;
+	priv->vmargin = 50;
+	priv->last_dpms = -1;
+	priv->chip_version = ch7006_read(client, CH7006_VERSION_ID);
+
+	if (ch7006_tv_norm) {
+		for (i = 0; i < NUM_TV_NORMS; i++) {
+			if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
+				priv->norm = i;
+				break;
+			}
+		}
+
+		if (i == NUM_TV_NORMS)
+			ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
+				   ch7006_tv_norm);
+	}
+
+	if (ch7006_scale >= 0 && ch7006_scale <= 2)
+		priv->scale = ch7006_scale;
+	else
+		ch7006_err(client, "Invalid scale setting \"%d\".\n",
+			   ch7006_scale);
+
+	return 0;
+}
+
+static struct i2c_device_id ch7006_ids[] = {
+	{ "ch7006", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+
+static const struct dev_pm_ops ch7006_pm_ops = {
+	.resume = ch7006_resume,
+};
+
+static struct drm_i2c_encoder_driver ch7006_driver = {
+	.i2c_driver = {
+		.probe = ch7006_probe,
+		.remove = ch7006_remove,
+
+		.driver = {
+			.name = "ch7006",
+			.pm = &ch7006_pm_ops,
+		},
+
+		.id_table = ch7006_ids,
+	},
+
+	.encoder_init = ch7006_encoder_init,
+};
+
+
+/* Module initialization */
+
+static int __init ch7006_init(void)
+{
+	return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+}
+
+static void __exit ch7006_exit(void)
+{
+	drm_i2c_encoder_unregister(&ch7006_driver);
+}
+
+int ch7006_debug;
+module_param_named(debug, ch7006_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Enable debug output.");
+
+char *ch7006_tv_norm;
+module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
+		 "\t\tDefault: PAL");
+
+int ch7006_scale = 1;
+module_param_named(scale, ch7006_scale, int, 0600);
+MODULE_PARM_DESC(scale, "Default scale.\n"
+		 "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
+		 "\t\t\t1 -> Select default video modes.\n"
+		 "\t\t\t2 -> Select video modes with a lower blanking ratio.");
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(ch7006_init);
+module_exit(ch7006_exit);
diff --git a/linux-imx/drivers/gpu/drm/i2c/ch7006_mode.c b/linux-imx/drivers/gpu/drm/i2c/ch7006_mode.c
new file mode 100644
index 0000000..9b83574
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+char *ch7006_tv_norm_names[] = {
+	[TV_NORM_PAL] = "PAL",
+	[TV_NORM_PAL_M] = "PAL-M",
+	[TV_NORM_PAL_N] = "PAL-N",
+	[TV_NORM_PAL_NC] = "PAL-Nc",
+	[TV_NORM_PAL_60] = "PAL-60",
+	[TV_NORM_NTSC_M] = "NTSC-M",
+	[TV_NORM_NTSC_J] = "NTSC-J",
+};
+
+#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001,		\
+		.vdisplay = 480,					\
+		.vtotal = 525,						\
+		.hvirtual = 660
+
+#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1,		\
+		.vdisplay = 576,				\
+		.vtotal = 625,					\
+		.hvirtual = 810
+
+struct ch7006_tv_norm_info ch7006_tv_norms[] = {
+	[TV_NORM_NTSC_M] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 3579545 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
+		.voffset = 0,
+	},
+	[TV_NORM_NTSC_J] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.286 * fixed1,
+		.subc_freq = 3579545 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_M] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 3575611.433 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+		.voffset = 16,
+	},
+
+	/* The following modes seem to work right but they're
+	 * undocumented */
+
+	[TV_NORM_PAL_N] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.339 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_NC] = {
+		PAL_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 3582056.25 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+		.voffset = 0,
+	},
+	[TV_NORM_PAL_60] = {
+		NTSC_LIKE_TIMINGS,
+		.black_level = 0.3 * fixed1,
+		.subc_freq = 4433618.75 * fixed1,
+		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+		.voffset = 16,
+	},
+};
+
+#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
+	       subc, scale, scale_mask, norm_mask, e_hd, e_vd) {	\
+		.mode = {						\
+			.name = #hd "x" #vd,				\
+			.status = 0,					\
+			.type = DRM_MODE_TYPE_DRIVER,			\
+			.clock = f,					\
+			.hdisplay = hd,					\
+			.hsync_start = e_hd + 16,			\
+			.hsync_end = e_hd + 80,				\
+			.htotal = ht,					\
+			.hskew = 0,					\
+			.vdisplay = vd,					\
+			.vsync_start = vd + 10,				\
+			.vsync_end = vd + 26,				\
+			.vtotal = vt,					\
+			.vscan = 0,					\
+			.flags = DRM_MODE_FLAG_##hsynp##HSYNC |		\
+				DRM_MODE_FLAG_##vsynp##VSYNC,		\
+			.vrefresh = 0,					\
+		},							\
+		.enc_hdisp = e_hd,					\
+		.enc_vdisp = e_vd,					\
+		.subc_coeff = subc * fixed1,				\
+		.dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
+			    bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
+		.valid_scales = scale_mask,				\
+		.valid_norms = norm_mask				\
+	 }
+
+#define MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
+	     subc, scale, scale_mask, norm_mask)			\
+	__MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale,		\
+	       scale_mask, norm_mask, hd, vd)
+
+#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J |		\
+		   1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
+
+#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
+
+struct ch7006_mode ch7006_modes[] = {
+	MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
+	MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
+	MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
+	MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
+	MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
+	MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
+	MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
+	MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
+	MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
+	MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
+	MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
+	MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
+	MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
+	MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
+	MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
+	MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
+	MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
+	MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
+	MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
+	__MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
+	MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
+	MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
+	MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
+	MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
+	MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
+	{}
+};
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+				       const struct drm_display_mode *drm_mode)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_mode *mode;
+
+	for (mode = ch7006_modes; mode->mode.clock; mode++) {
+
+		if (~mode->valid_norms & 1<<priv->norm)
+			continue;
+
+		if (mode->mode.hdisplay != drm_mode->hdisplay ||
+		    mode->mode.vdisplay != drm_mode->vdisplay ||
+		    mode->mode.vtotal != drm_mode->vtotal ||
+		    mode->mode.htotal != drm_mode->htotal ||
+		    mode->mode.clock != drm_mode->clock)
+			continue;
+
+		return mode;
+	}
+
+	return NULL;
+}
+
+/* Some common HW state calculation code */
+
+void ch7006_setup_levels(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *regs = priv->state.regs;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	int gain;
+	int black_level;
+
+	/* Set DAC_GAIN if the voltage drop between white and black is
+	 * high enough. */
+	if (norm->black_level < 339*fixed1/1000) {
+		gain = 76;
+
+		regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
+	} else {
+		gain = 71;
+
+		regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
+	}
+
+	black_level = round_fixed(norm->black_level*26625)/gain;
+
+	/* Correct it with the specified brightness. */
+	black_level = interpolate(90, black_level, 208, priv->brightness);
+
+	regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
+
+	ch7006_dbg(client, "black level: %d\n", black_level);
+}
+
+void ch7006_setup_subcarrier(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	struct ch7006_mode *mode = priv->mode;
+	uint32_t subc_inc;
+
+	subc_inc = round_fixed((mode->subc_coeff >> 8)
+			       * (norm->subc_freq >> 24));
+
+	setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
+	setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
+	setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
+	setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
+	setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
+	setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
+	setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
+	setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
+
+	ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
+}
+
+void ch7006_setup_pll(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *regs = priv->state.regs;
+	struct ch7006_mode *mode = priv->mode;
+	int n, best_n = 0;
+	int m, best_m = 0;
+	int freq, best_freq = 0;
+
+	for (n = 0; n < CH7006_MAXN; n++) {
+		for (m = 0; m < CH7006_MAXM; m++) {
+			freq = CH7006_FREQ0*(n+2)/(m+2);
+
+			if (abs(freq - mode->mode.clock) <
+			    abs(best_freq - mode->mode.clock)) {
+				best_freq = freq;
+				best_n = n;
+				best_m = m;
+			}
+		}
+	}
+
+	regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
+		bitf(CH7006_PLLOV_M_8, best_m);
+
+	regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
+	regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
+
+	if (best_n < 108)
+		regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
+	else
+		regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
+
+	ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
+		   best_n, best_m, best_freq, best_n < 108);
+}
+
+void ch7006_setup_power_state(struct drm_encoder *encoder)
+{
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	uint8_t *power = &priv->state.regs[CH7006_POWER];
+	int subconnector;
+
+	subconnector = priv->select_subconnector ? priv->select_subconnector :
+							priv->subconnector;
+
+	*power = CH7006_POWER_RESET;
+
+	if (priv->last_dpms == DRM_MODE_DPMS_ON) {
+		switch (subconnector) {
+		case DRM_MODE_SUBCONNECTOR_SVIDEO:
+			*power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
+			break;
+		case DRM_MODE_SUBCONNECTOR_Composite:
+			*power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
+			break;
+		case DRM_MODE_SUBCONNECTOR_SCART:
+			*power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
+				CH7006_POWER_SCART;
+			break;
+		}
+
+	} else {
+		if (priv->chip_version >= 0x20)
+			*power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+		else
+			*power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF);
+	}
+}
+
+void ch7006_setup_properties(struct drm_encoder *encoder)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	struct ch7006_priv *priv = to_ch7006_priv(encoder);
+	struct ch7006_state *state = &priv->state;
+	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+	struct ch7006_mode *ch_mode = priv->mode;
+	struct drm_display_mode *mode = &ch_mode->mode;
+	uint8_t *regs = state->regs;
+	int flicker, contrast, hpos, vpos;
+	uint64_t scale, aspect;
+
+	flicker = interpolate(0, 2, 3, priv->flicker);
+	regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
+		bitf(CH7006_FFILTER_LUMA, flicker) |
+		bitf(CH7006_FFILTER_CHROMA, 1);
+
+	contrast = interpolate(0, 5, 7, priv->contrast);
+	regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
+
+	scale = norm->vtotal*fixed1;
+	do_div(scale, mode->vtotal);
+
+	aspect = ch_mode->enc_hdisp*fixed1;
+	do_div(aspect, ch_mode->enc_vdisp);
+
+	hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
+			   * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
+
+	setbitf(state, CH7006_POV, HPOS_8, hpos);
+	setbitf(state, CH7006_HPOS, 0, hpos);
+
+	vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
+		   + norm->voffset) * priv->vmargin / 100 / 2;
+
+	setbitf(state, CH7006_POV, VPOS_8, vpos);
+	setbitf(state, CH7006_VPOS, 0, vpos);
+
+	ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
+}
+
+/* HW access functions */
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+	uint8_t buf[] = {addr, val};
+	int ret;
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
+			   ret, addr);
+}
+
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
+{
+	uint8_t val;
+	int ret;
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	return val;
+
+fail:
+	ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
+		   ret, addr);
+	return 0;
+}
+
+void ch7006_state_load(struct i2c_client *client,
+		       struct ch7006_state *state)
+{
+	ch7006_load_reg(client, state, CH7006_POWER);
+
+	ch7006_load_reg(client, state, CH7006_DISPMODE);
+	ch7006_load_reg(client, state, CH7006_FFILTER);
+	ch7006_load_reg(client, state, CH7006_BWIDTH);
+	ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
+	ch7006_load_reg(client, state, CH7006_CLKMODE);
+	ch7006_load_reg(client, state, CH7006_START_ACTIVE);
+	ch7006_load_reg(client, state, CH7006_POV);
+	ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+	ch7006_load_reg(client, state, CH7006_HPOS);
+	ch7006_load_reg(client, state, CH7006_VPOS);
+	ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
+	ch7006_load_reg(client, state, CH7006_DETECT);
+	ch7006_load_reg(client, state, CH7006_CONTRAST);
+	ch7006_load_reg(client, state, CH7006_PLLOV);
+	ch7006_load_reg(client, state, CH7006_PLLM);
+	ch7006_load_reg(client, state, CH7006_PLLN);
+	ch7006_load_reg(client, state, CH7006_BCLKOUT);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC0);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC1);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC2);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC3);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC4);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC5);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC6);
+	ch7006_load_reg(client, state, CH7006_SUBC_INC7);
+	ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
+	ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
+}
+
+void ch7006_state_save(struct i2c_client *client,
+		       struct ch7006_state *state)
+{
+	ch7006_save_reg(client, state, CH7006_POWER);
+
+	ch7006_save_reg(client, state, CH7006_DISPMODE);
+	ch7006_save_reg(client, state, CH7006_FFILTER);
+	ch7006_save_reg(client, state, CH7006_BWIDTH);
+	ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
+	ch7006_save_reg(client, state, CH7006_CLKMODE);
+	ch7006_save_reg(client, state, CH7006_START_ACTIVE);
+	ch7006_save_reg(client, state, CH7006_POV);
+	ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
+	ch7006_save_reg(client, state, CH7006_HPOS);
+	ch7006_save_reg(client, state, CH7006_VPOS);
+	ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
+	ch7006_save_reg(client, state, CH7006_DETECT);
+	ch7006_save_reg(client, state, CH7006_CONTRAST);
+	ch7006_save_reg(client, state, CH7006_PLLOV);
+	ch7006_save_reg(client, state, CH7006_PLLM);
+	ch7006_save_reg(client, state, CH7006_PLLN);
+	ch7006_save_reg(client, state, CH7006_BCLKOUT);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC0);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC1);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC2);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC3);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC4);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC5);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC6);
+	ch7006_save_reg(client, state, CH7006_SUBC_INC7);
+	ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
+	ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+	state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
+		(state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
+		(state->regs[CH7006_FFILTER] & 0x03) << 2;
+}
diff --git a/linux-imx/drivers/gpu/drm/i2c/ch7006_priv.h b/linux-imx/drivers/gpu/drm/i2c/ch7006_priv.h
new file mode 100644
index 0000000..ce57784
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_PRIV_H__
+#define __DRM_I2C_CH7006_PRIV_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/i2c/ch7006.h>
+
+typedef int64_t fixed;
+#define fixed1 (1LL << 32)
+
+enum ch7006_tv_norm {
+	TV_NORM_PAL,
+	TV_NORM_PAL_M,
+	TV_NORM_PAL_N,
+	TV_NORM_PAL_NC,
+	TV_NORM_PAL_60,
+	TV_NORM_NTSC_M,
+	TV_NORM_NTSC_J,
+	NUM_TV_NORMS
+};
+
+struct ch7006_tv_norm_info {
+	fixed vrefresh;
+	int vdisplay;
+	int vtotal;
+	int hvirtual;
+
+	fixed subc_freq;
+	fixed black_level;
+
+	uint32_t dispmode;
+	int voffset;
+};
+
+struct ch7006_mode {
+	struct drm_display_mode mode;
+
+	int enc_hdisp;
+	int enc_vdisp;
+
+	fixed subc_coeff;
+	uint32_t dispmode;
+
+	uint32_t valid_scales;
+	uint32_t valid_norms;
+};
+
+struct ch7006_state {
+	uint8_t regs[0x26];
+};
+
+struct ch7006_priv {
+	struct ch7006_encoder_params params;
+	struct ch7006_mode *mode;
+
+	struct ch7006_state state;
+	struct ch7006_state saved_state;
+
+	struct drm_property *scale_property;
+
+	int select_subconnector;
+	int subconnector;
+	int hmargin;
+	int vmargin;
+	enum ch7006_tv_norm norm;
+	int brightness;
+	int contrast;
+	int flicker;
+	int scale;
+
+	int chip_version;
+	int last_dpms;
+};
+
+#define to_ch7006_priv(x) \
+	((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+
+extern int ch7006_debug;
+extern char *ch7006_tv_norm;
+extern int ch7006_scale;
+
+extern char *ch7006_tv_norm_names[];
+extern struct ch7006_tv_norm_info ch7006_tv_norms[];
+extern struct ch7006_mode ch7006_modes[];
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+				       const struct drm_display_mode *drm_mode);
+
+void ch7006_setup_levels(struct drm_encoder *encoder);
+void ch7006_setup_subcarrier(struct drm_encoder *encoder);
+void ch7006_setup_pll(struct drm_encoder *encoder);
+void ch7006_setup_power_state(struct drm_encoder *encoder);
+void ch7006_setup_properties(struct drm_encoder *encoder);
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
+
+void ch7006_state_load(struct i2c_client *client,
+		       struct ch7006_state *state);
+void ch7006_state_save(struct i2c_client *client,
+		       struct ch7006_state *state);
+
+/* Some helper macros */
+
+#define ch7006_dbg(client, format, ...) do {				\
+		if (ch7006_debug)					\
+			dev_printk(KERN_DEBUG, &client->dev,		\
+				   "%s: " format, __func__, ## __VA_ARGS__); \
+	} while (0)
+#define ch7006_info(client, format, ...) \
+				dev_info(&client->dev, format, __VA_ARGS__)
+#define ch7006_err(client, format, ...) \
+				dev_err(&client->dev, format, __VA_ARGS__)
+
+#define __mask(src, bitfield) \
+		(((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
+#define mask(bitfield) __mask(bitfield)
+
+#define __bitf(src, bitfield, x) \
+		(((x) >> (src) << (0 ? bitfield)) &  __mask(src, bitfield))
+#define bitf(bitfield, x) __bitf(bitfield, x)
+#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
+#define setbitf(state, reg, bitfield, x)				\
+	state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield))	\
+		| bitf(reg##_##bitfield, x)
+
+#define __unbitf(src, bitfield, x) \
+		((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
+#define unbitf(bitfield, x) __unbitf(bitfield, x)
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+static inline int32_t round_fixed(fixed x)
+{
+	return (x + fixed1/2) >> 32;
+}
+
+#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
+#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
+
+/* Fixed hardware specs */
+
+#define CH7006_FREQ0				14318
+#define CH7006_MAXN				650
+#define CH7006_MAXM				315
+
+/* Register definitions */
+
+#define CH7006_DISPMODE				0x00
+#define CH7006_DISPMODE_INPUT_RES		0, 7:5
+#define CH7006_DISPMODE_INPUT_RES_512x384	0x0
+#define CH7006_DISPMODE_INPUT_RES_720x400	0x1
+#define CH7006_DISPMODE_INPUT_RES_640x400	0x2
+#define CH7006_DISPMODE_INPUT_RES_640x480	0x3
+#define CH7006_DISPMODE_INPUT_RES_800x600	0x4
+#define CH7006_DISPMODE_INPUT_RES_NATIVE	0x5
+#define CH7006_DISPMODE_OUTPUT_STD		0, 4:3
+#define CH7006_DISPMODE_OUTPUT_STD_PAL		0x0
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC		0x1
+#define CH7006_DISPMODE_OUTPUT_STD_PAL_M	0x2
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J	0x3
+#define CH7006_DISPMODE_SCALING_RATIO		0, 2:0
+#define CH7006_DISPMODE_SCALING_RATIO_5_4	0x0
+#define CH7006_DISPMODE_SCALING_RATIO_1_1	0x1
+#define CH7006_DISPMODE_SCALING_RATIO_7_8	0x2
+#define CH7006_DISPMODE_SCALING_RATIO_5_6	0x3
+#define CH7006_DISPMODE_SCALING_RATIO_3_4	0x4
+#define CH7006_DISPMODE_SCALING_RATIO_7_10	0x5
+
+#define CH7006_FFILTER				0x01
+#define CH7006_FFILTER_TEXT			0, 5:4
+#define CH7006_FFILTER_LUMA			0, 3:2
+#define CH7006_FFILTER_CHROMA			0, 1:0
+#define CH7006_FFILTER_CHROMA_NO_DCRAWL		0x3
+
+#define CH7006_BWIDTH				0x03
+#define CH7006_BWIDTH_5L_FFILER			(1 << 7)
+#define CH7006_BWIDTH_CVBS_NO_CHROMA		(1 << 6)
+#define CH7006_BWIDTH_CHROMA			0, 5:4
+#define CH7006_BWIDTH_SVIDEO_YPEAK		(1 << 3)
+#define CH7006_BWIDTH_SVIDEO_LUMA		0, 2:1
+#define CH7006_BWIDTH_CVBS_LUMA			0, 0:0
+
+#define CH7006_INPUT_FORMAT			0x04
+#define CH7006_INPUT_FORMAT_DAC_GAIN		(1 << 6)
+#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH	(1 << 5)
+#define CH7006_INPUT_FORMAT_FORMAT		0, 3:0
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16	0x0
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16	0x1
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16	0x2
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15	0x3
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C	0x4
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I	0x5
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8	0x6
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8	0x7
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8	0x8
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8	0x9
+
+#define CH7006_CLKMODE				0x06
+#define CH7006_CLKMODE_SUBC_LOCK		(1 << 7)
+#define CH7006_CLKMODE_MASTER			(1 << 6)
+#define CH7006_CLKMODE_POS_EDGE			(1 << 4)
+#define CH7006_CLKMODE_XCM			0, 3:2
+#define CH7006_CLKMODE_PCM			0, 1:0
+
+#define CH7006_START_ACTIVE			0x07
+#define CH7006_START_ACTIVE_0			0, 7:0
+
+#define CH7006_POV				0x08
+#define CH7006_POV_START_ACTIVE_8		8, 2:2
+#define CH7006_POV_HPOS_8			8, 1:1
+#define CH7006_POV_VPOS_8			8, 0:0
+
+#define CH7006_BLACK_LEVEL			0x09
+#define CH7006_BLACK_LEVEL_0			0, 7:0
+
+#define CH7006_HPOS				0x0a
+#define CH7006_HPOS_0				0, 7:0
+
+#define CH7006_VPOS				0x0b
+#define CH7006_VPOS_0				0, 7:0
+
+#define CH7006_INPUT_SYNC			0x0d
+#define CH7006_INPUT_SYNC_EMBEDDED		(1 << 3)
+#define CH7006_INPUT_SYNC_OUTPUT		(1 << 2)
+#define CH7006_INPUT_SYNC_PVSYNC		(1 << 1)
+#define CH7006_INPUT_SYNC_PHSYNC		(1 << 0)
+
+#define CH7006_POWER				0x0e
+#define CH7006_POWER_SCART			(1 << 4)
+#define CH7006_POWER_RESET			(1 << 3)
+#define CH7006_POWER_LEVEL			0, 2:0
+#define CH7006_POWER_LEVEL_CVBS_OFF		0x0
+#define CH7006_POWER_LEVEL_POWER_OFF		0x1
+#define CH7006_POWER_LEVEL_SVIDEO_OFF		0x2
+#define CH7006_POWER_LEVEL_NORMAL		0x3
+#define CH7006_POWER_LEVEL_FULL_POWER_OFF	0x4
+
+#define CH7006_DETECT				0x10
+#define CH7006_DETECT_SVIDEO_Y_TEST		(1 << 3)
+#define CH7006_DETECT_SVIDEO_C_TEST		(1 << 2)
+#define CH7006_DETECT_CVBS_TEST			(1 << 1)
+#define CH7006_DETECT_SENSE			(1 << 0)
+
+#define CH7006_CONTRAST				0x11
+#define CH7006_CONTRAST_0			0, 2:0
+
+#define CH7006_PLLOV	 			0x13
+#define CH7006_PLLOV_N_8	 		8, 2:1
+#define CH7006_PLLOV_M_8	 		8, 0:0
+
+#define CH7006_PLLM	 			0x14
+#define CH7006_PLLM_0	 			0, 7:0
+
+#define CH7006_PLLN	 			0x15
+#define CH7006_PLLN_0	 			0, 7:0
+
+#define CH7006_BCLKOUT	 			0x17
+
+#define CH7006_SUBC_INC0			0x18
+#define CH7006_SUBC_INC0_28			28, 3:0
+
+#define CH7006_SUBC_INC1			0x19
+#define CH7006_SUBC_INC1_24			24, 3:0
+
+#define CH7006_SUBC_INC2			0x1a
+#define CH7006_SUBC_INC2_20			20, 3:0
+
+#define CH7006_SUBC_INC3			0x1b
+#define CH7006_SUBC_INC3_GPIO1_VAL		(1 << 7)
+#define CH7006_SUBC_INC3_GPIO0_VAL		(1 << 6)
+#define CH7006_SUBC_INC3_POUT_3_3V		(1 << 5)
+#define CH7006_SUBC_INC3_POUT_INV		(1 << 4)
+#define CH7006_SUBC_INC3_16			16, 3:0
+
+#define CH7006_SUBC_INC4			0x1c
+#define CH7006_SUBC_INC4_GPIO1_IN		(1 << 7)
+#define CH7006_SUBC_INC4_GPIO0_IN		(1 << 6)
+#define CH7006_SUBC_INC4_DS_INPUT		(1 << 4)
+#define CH7006_SUBC_INC4_12			12, 3:0
+
+#define CH7006_SUBC_INC5			0x1d
+#define CH7006_SUBC_INC5_8			8, 3:0
+
+#define CH7006_SUBC_INC6			0x1e
+#define CH7006_SUBC_INC6_4			4, 3:0
+
+#define CH7006_SUBC_INC7			0x1f
+#define CH7006_SUBC_INC7_0			0, 3:0
+
+#define CH7006_PLL_CONTROL			0x20
+#define CH7006_PLL_CONTROL_CPI			(1 << 5)
+#define CH7006_PLL_CONTROL_CAPACITOR		(1 << 4)
+#define CH7006_PLL_CONTROL_7STAGES		(1 << 3)
+#define CH7006_PLL_CONTROL_DIGITAL_5V		(1 << 2)
+#define CH7006_PLL_CONTROL_ANALOG_5V		(1 << 1)
+#define CH7006_PLL_CONTROL_MEMORY_5V		(1 << 0)
+
+#define CH7006_CALC_SUBC_INC0			0x21
+#define CH7006_CALC_SUBC_INC0_24		24, 4:3
+#define CH7006_CALC_SUBC_INC0_HYST		0, 2:1
+#define CH7006_CALC_SUBC_INC0_AUTO		(1 << 0)
+
+#define CH7006_CALC_SUBC_INC1			0x22
+#define CH7006_CALC_SUBC_INC1_16		16, 7:0
+
+#define CH7006_CALC_SUBC_INC2			0x23
+#define CH7006_CALC_SUBC_INC2_8			8, 7:0
+
+#define CH7006_CALC_SUBC_INC3			0x24
+#define CH7006_CALC_SUBC_INC3_0			0, 7:0
+
+#define CH7006_VERSION_ID			0x25
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i2c/sil164_drv.c b/linux-imx/drivers/gpu/drm/i2c/sil164_drv.c
new file mode 100644
index 0000000..002ce78
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/sil164_drv.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/i2c/sil164.h>
+
+struct sil164_priv {
+	struct sil164_encoder_params config;
+	struct i2c_client *duallink_slave;
+
+	uint8_t saved_state[0x10];
+	uint8_t saved_slave_state[0x10];
+};
+
+#define to_sil164_priv(x) \
+	((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+
+#define sil164_dbg(client, format, ...) do {				\
+		if (drm_debug & DRM_UT_KMS)				\
+			dev_printk(KERN_DEBUG, &client->dev,		\
+				   "%s: " format, __func__, ## __VA_ARGS__); \
+	} while (0)
+#define sil164_info(client, format, ...)		\
+	dev_info(&client->dev, format, __VA_ARGS__)
+#define sil164_err(client, format, ...)			\
+	dev_err(&client->dev, format, __VA_ARGS__)
+
+#define SIL164_I2C_ADDR_MASTER			0x38
+#define SIL164_I2C_ADDR_SLAVE			0x39
+
+/* HW register definitions */
+
+#define SIL164_VENDOR_LO			0x0
+#define SIL164_VENDOR_HI			0x1
+#define SIL164_DEVICE_LO			0x2
+#define SIL164_DEVICE_HI			0x3
+#define SIL164_REVISION				0x4
+#define SIL164_FREQ_MIN				0x6
+#define SIL164_FREQ_MAX				0x7
+#define SIL164_CONTROL0				0x8
+#  define SIL164_CONTROL0_POWER_ON		0x01
+#  define SIL164_CONTROL0_EDGE_RISING		0x02
+#  define SIL164_CONTROL0_INPUT_24BIT		0x04
+#  define SIL164_CONTROL0_DUAL_EDGE		0x08
+#  define SIL164_CONTROL0_HSYNC_ON		0x10
+#  define SIL164_CONTROL0_VSYNC_ON		0x20
+#define SIL164_DETECT				0x9
+#  define SIL164_DETECT_INTR_STAT		0x01
+#  define SIL164_DETECT_HOTPLUG_STAT		0x02
+#  define SIL164_DETECT_RECEIVER_STAT		0x04
+#  define SIL164_DETECT_INTR_MODE_RECEIVER	0x00
+#  define SIL164_DETECT_INTR_MODE_HOTPLUG	0x08
+#  define SIL164_DETECT_OUT_MODE_HIGH		0x00
+#  define SIL164_DETECT_OUT_MODE_INTR		0x10
+#  define SIL164_DETECT_OUT_MODE_RECEIVER	0x20
+#  define SIL164_DETECT_OUT_MODE_HOTPLUG	0x30
+#  define SIL164_DETECT_VSWING_STAT		0x80
+#define SIL164_CONTROL1				0xa
+#  define SIL164_CONTROL1_DESKEW_ENABLE		0x10
+#  define SIL164_CONTROL1_DESKEW_INCR_SHIFT	5
+#define SIL164_GPIO				0xb
+#define SIL164_CONTROL2				0xc
+#  define SIL164_CONTROL2_FILTER_ENABLE		0x01
+#  define SIL164_CONTROL2_FILTER_SETTING_SHIFT	1
+#  define SIL164_CONTROL2_DUALLINK_MASTER	0x40
+#  define SIL164_CONTROL2_SYNC_CONT		0x80
+#define SIL164_DUALLINK				0xd
+#  define SIL164_DUALLINK_ENABLE		0x10
+#  define SIL164_DUALLINK_SKEW_SHIFT		5
+#define SIL164_PLLZONE				0xe
+#  define SIL164_PLLZONE_STAT			0x08
+#  define SIL164_PLLZONE_FORCE_ON		0x10
+#  define SIL164_PLLZONE_FORCE_HIGH		0x20
+
+/* HW access functions */
+
+static void
+sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+	uint8_t buf[] = {addr, val};
+	int ret;
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		sil164_err(client, "Error %d writing to subaddress 0x%x\n",
+			   ret, addr);
+}
+
+static uint8_t
+sil164_read(struct i2c_client *client, uint8_t addr)
+{
+	uint8_t val;
+	int ret;
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	return val;
+
+fail:
+	sil164_err(client, "Error %d reading from subaddress 0x%x\n",
+		   ret, addr);
+	return 0;
+}
+
+static void
+sil164_save_state(struct i2c_client *client, uint8_t *state)
+{
+	int i;
+
+	for (i = 0x8; i <= 0xe; i++)
+		state[i] = sil164_read(client, i);
+}
+
+static void
+sil164_restore_state(struct i2c_client *client, uint8_t *state)
+{
+	int i;
+
+	for (i = 0x8; i <= 0xe; i++)
+		sil164_write(client, i, state[i]);
+}
+
+static void
+sil164_set_power_state(struct i2c_client *client, bool on)
+{
+	uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
+
+	if (on)
+		control0 |= SIL164_CONTROL0_POWER_ON;
+	else
+		control0 &= ~SIL164_CONTROL0_POWER_ON;
+
+	sil164_write(client, SIL164_CONTROL0, control0);
+}
+
+static void
+sil164_init_state(struct i2c_client *client,
+		  struct sil164_encoder_params *config,
+		  bool duallink)
+{
+	sil164_write(client, SIL164_CONTROL0,
+		     SIL164_CONTROL0_HSYNC_ON |
+		     SIL164_CONTROL0_VSYNC_ON |
+		     (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
+		     (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
+		     (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
+
+	sil164_write(client, SIL164_DETECT,
+		     SIL164_DETECT_INTR_STAT |
+		     SIL164_DETECT_OUT_MODE_RECEIVER);
+
+	sil164_write(client, SIL164_CONTROL1,
+		     (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
+		     (((config->input_skew + 4) & 0x7)
+		      << SIL164_CONTROL1_DESKEW_INCR_SHIFT));
+
+	sil164_write(client, SIL164_CONTROL2,
+		     SIL164_CONTROL2_SYNC_CONT |
+		     (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
+		     (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
+
+	sil164_write(client, SIL164_PLLZONE, 0);
+
+	if (duallink)
+		sil164_write(client, SIL164_DUALLINK,
+			     SIL164_DUALLINK_ENABLE |
+			     (((config->duallink_skew + 4) & 0x7)
+			      << SIL164_DUALLINK_SKEW_SHIFT));
+	else
+		sil164_write(client, SIL164_DUALLINK, 0);
+}
+
+/* DRM encoder functions */
+
+static void
+sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+
+	priv->config = *(struct sil164_encoder_params *)params;
+}
+
+static void
+sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+	bool on = (mode == DRM_MODE_DPMS_ON);
+	bool duallink = (on && encoder->crtc->mode.clock > 165000);
+
+	sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+
+	if (priv->duallink_slave)
+		sil164_set_power_state(priv->duallink_slave, duallink);
+}
+
+static void
+sil164_encoder_save(struct drm_encoder *encoder)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+
+	sil164_save_state(drm_i2c_encoder_get_client(encoder),
+			  priv->saved_state);
+
+	if (priv->duallink_slave)
+		sil164_save_state(priv->duallink_slave,
+				  priv->saved_slave_state);
+}
+
+static void
+sil164_encoder_restore(struct drm_encoder *encoder)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+
+	sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+			     priv->saved_state);
+
+	if (priv->duallink_slave)
+		sil164_restore_state(priv->duallink_slave,
+				     priv->saved_slave_state);
+}
+
+static bool
+sil164_encoder_mode_fixup(struct drm_encoder *encoder,
+			  const struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int
+sil164_encoder_mode_valid(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+
+	if (mode->clock < 32000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->clock > 330000 ||
+	    (mode->clock > 165000 && !priv->duallink_slave))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void
+sil164_encoder_mode_set(struct drm_encoder *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+	bool duallink = adjusted_mode->clock > 165000;
+
+	sil164_init_state(drm_i2c_encoder_get_client(encoder),
+			  &priv->config, duallink);
+
+	if (priv->duallink_slave)
+		sil164_init_state(priv->duallink_slave,
+				  &priv->config, duallink);
+
+	sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static enum drm_connector_status
+sil164_encoder_detect(struct drm_encoder *encoder,
+		      struct drm_connector *connector)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+
+	if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static int
+sil164_encoder_get_modes(struct drm_encoder *encoder,
+			 struct drm_connector *connector)
+{
+	return 0;
+}
+
+static int
+sil164_encoder_create_resources(struct drm_encoder *encoder,
+				struct drm_connector *connector)
+{
+	return 0;
+}
+
+static int
+sil164_encoder_set_property(struct drm_encoder *encoder,
+			    struct drm_connector *connector,
+			    struct drm_property *property,
+			    uint64_t val)
+{
+	return 0;
+}
+
+static void
+sil164_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct sil164_priv *priv = to_sil164_priv(encoder);
+
+	if (priv->duallink_slave)
+		i2c_unregister_device(priv->duallink_slave);
+
+	kfree(priv);
+	drm_i2c_encoder_destroy(encoder);
+}
+
+static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+	.set_config = sil164_encoder_set_config,
+	.destroy = sil164_encoder_destroy,
+	.dpms = sil164_encoder_dpms,
+	.save = sil164_encoder_save,
+	.restore = sil164_encoder_restore,
+	.mode_fixup = sil164_encoder_mode_fixup,
+	.mode_valid = sil164_encoder_mode_valid,
+	.mode_set = sil164_encoder_mode_set,
+	.detect = sil164_encoder_detect,
+	.get_modes = sil164_encoder_get_modes,
+	.create_resources = sil164_encoder_create_resources,
+	.set_property = sil164_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
+		sil164_read(client, SIL164_VENDOR_LO);
+	int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
+		sil164_read(client, SIL164_DEVICE_LO);
+	int rev = sil164_read(client, SIL164_REVISION);
+
+	if (vendor != 0x1 || device != 0x6) {
+		sil164_dbg(client, "Unknown device %x:%x.%x\n",
+			   vendor, device, rev);
+		return -ENODEV;
+	}
+
+	sil164_info(client, "Detected device %x:%x.%x\n",
+		    vendor, device, rev);
+
+	return 0;
+}
+
+static int
+sil164_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+static struct i2c_client *
+sil164_detect_slave(struct i2c_client *client)
+{
+	struct i2c_adapter *adap = client->adapter;
+	struct i2c_msg msg = {
+		.addr = SIL164_I2C_ADDR_SLAVE,
+		.len = 0,
+	};
+	const struct i2c_board_info info = {
+		I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
+	};
+
+	if (i2c_transfer(adap, &msg, 1) != 1) {
+		sil164_dbg(adap, "No dual-link slave found.");
+		return NULL;
+	}
+
+	return i2c_new_device(adap, &info);
+}
+
+static int
+sil164_encoder_init(struct i2c_client *client,
+		    struct drm_device *dev,
+		    struct drm_encoder_slave *encoder)
+{
+	struct sil164_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	encoder->slave_priv = priv;
+	encoder->slave_funcs = &sil164_encoder_funcs;
+
+	priv->duallink_slave = sil164_detect_slave(client);
+
+	return 0;
+}
+
+static struct i2c_device_id sil164_ids[] = {
+	{ "sil164", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, sil164_ids);
+
+static struct drm_i2c_encoder_driver sil164_driver = {
+	.i2c_driver = {
+		.probe = sil164_probe,
+		.remove = sil164_remove,
+		.driver = {
+			.name = "sil164",
+		},
+		.id_table = sil164_ids,
+	},
+	.encoder_init = sil164_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+sil164_init(void)
+{
+	return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+}
+
+static void __exit
+sil164_exit(void)
+{
+	drm_i2c_encoder_unregister(&sil164_driver);
+}
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(sil164_init);
+module_exit(sil164_exit);
diff --git a/linux-imx/drivers/gpu/drm/i2c/tda998x_drv.c b/linux-imx/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 0000000..e68b58a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,906 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_edid.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct tda998x_priv {
+	struct i2c_client *cec;
+	uint16_t rev;
+	uint8_t current_page;
+	int dpms;
+};
+
+#define to_tda998x_priv(x)  ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+
+/* The TDA9988 series of devices use a paged register scheme.. to simplify
+ * things we encode the page # in upper bits of the register #.  To read/
+ * write a given register, we need to make sure CURPAGE register is set
+ * appropriately.  Which implies reads/writes are not atomic.  Fun!
+ */
+
+#define REG(page, addr) (((page) << 8) | (addr))
+#define REG2ADDR(reg)   ((reg) & 0xff)
+#define REG2PAGE(reg)   (((reg) >> 8) & 0xff)
+
+#define REG_CURPAGE               0xff                /* write */
+
+
+/* Page 00h: General Control */
+#define REG_VERSION_LSB           REG(0x00, 0x00)     /* read */
+#define REG_MAIN_CNTRL0           REG(0x00, 0x01)     /* read/write */
+# define MAIN_CNTRL0_SR           (1 << 0)
+# define MAIN_CNTRL0_DECS         (1 << 1)
+# define MAIN_CNTRL0_DEHS         (1 << 2)
+# define MAIN_CNTRL0_CECS         (1 << 3)
+# define MAIN_CNTRL0_CEHS         (1 << 4)
+# define MAIN_CNTRL0_SCALER       (1 << 7)
+#define REG_VERSION_MSB           REG(0x00, 0x02)     /* read */
+#define REG_SOFTRESET             REG(0x00, 0x0a)     /* write */
+# define SOFTRESET_AUDIO          (1 << 0)
+# define SOFTRESET_I2C_MASTER     (1 << 1)
+#define REG_DDC_DISABLE           REG(0x00, 0x0b)     /* read/write */
+#define REG_CCLK_ON               REG(0x00, 0x0c)     /* read/write */
+#define REG_I2C_MASTER            REG(0x00, 0x0d)     /* read/write */
+# define I2C_MASTER_DIS_MM        (1 << 0)
+# define I2C_MASTER_DIS_FILT      (1 << 1)
+# define I2C_MASTER_APP_STRT_LAT  (1 << 2)
+#define REG_INT_FLAGS_0           REG(0x00, 0x0f)     /* read/write */
+#define REG_INT_FLAGS_1           REG(0x00, 0x10)     /* read/write */
+#define REG_INT_FLAGS_2           REG(0x00, 0x11)     /* read/write */
+# define INT_FLAGS_2_EDID_BLK_RD  (1 << 1)
+#define REG_ENA_VP_0              REG(0x00, 0x18)     /* read/write */
+#define REG_ENA_VP_1              REG(0x00, 0x19)     /* read/write */
+#define REG_ENA_VP_2              REG(0x00, 0x1a)     /* read/write */
+#define REG_ENA_AP                REG(0x00, 0x1e)     /* read/write */
+#define REG_VIP_CNTRL_0           REG(0x00, 0x20)     /* write */
+# define VIP_CNTRL_0_MIRR_A       (1 << 7)
+# define VIP_CNTRL_0_SWAP_A(x)    (((x) & 7) << 4)
+# define VIP_CNTRL_0_MIRR_B       (1 << 3)
+# define VIP_CNTRL_0_SWAP_B(x)    (((x) & 7) << 0)
+#define REG_VIP_CNTRL_1           REG(0x00, 0x21)     /* write */
+# define VIP_CNTRL_1_MIRR_C       (1 << 7)
+# define VIP_CNTRL_1_SWAP_C(x)    (((x) & 7) << 4)
+# define VIP_CNTRL_1_MIRR_D       (1 << 3)
+# define VIP_CNTRL_1_SWAP_D(x)    (((x) & 7) << 0)
+#define REG_VIP_CNTRL_2           REG(0x00, 0x22)     /* write */
+# define VIP_CNTRL_2_MIRR_E       (1 << 7)
+# define VIP_CNTRL_2_SWAP_E(x)    (((x) & 7) << 4)
+# define VIP_CNTRL_2_MIRR_F       (1 << 3)
+# define VIP_CNTRL_2_SWAP_F(x)    (((x) & 7) << 0)
+#define REG_VIP_CNTRL_3           REG(0x00, 0x23)     /* write */
+# define VIP_CNTRL_3_X_TGL        (1 << 0)
+# define VIP_CNTRL_3_H_TGL        (1 << 1)
+# define VIP_CNTRL_3_V_TGL        (1 << 2)
+# define VIP_CNTRL_3_EMB          (1 << 3)
+# define VIP_CNTRL_3_SYNC_DE      (1 << 4)
+# define VIP_CNTRL_3_SYNC_HS      (1 << 5)
+# define VIP_CNTRL_3_DE_INT       (1 << 6)
+# define VIP_CNTRL_3_EDGE         (1 << 7)
+#define REG_VIP_CNTRL_4           REG(0x00, 0x24)     /* write */
+# define VIP_CNTRL_4_BLC(x)       (((x) & 3) << 0)
+# define VIP_CNTRL_4_BLANKIT(x)   (((x) & 3) << 2)
+# define VIP_CNTRL_4_CCIR656      (1 << 4)
+# define VIP_CNTRL_4_656_ALT      (1 << 5)
+# define VIP_CNTRL_4_TST_656      (1 << 6)
+# define VIP_CNTRL_4_TST_PAT      (1 << 7)
+#define REG_VIP_CNTRL_5           REG(0x00, 0x25)     /* write */
+# define VIP_CNTRL_5_CKCASE       (1 << 0)
+# define VIP_CNTRL_5_SP_CNT(x)    (((x) & 3) << 1)
+#define REG_MAT_CONTRL            REG(0x00, 0x80)     /* write */
+# define MAT_CONTRL_MAT_SC(x)     (((x) & 3) << 0)
+# define MAT_CONTRL_MAT_BP        (1 << 2)
+#define REG_VIDFORMAT             REG(0x00, 0xa0)     /* write */
+#define REG_REFPIX_MSB            REG(0x00, 0xa1)     /* write */
+#define REG_REFPIX_LSB            REG(0x00, 0xa2)     /* write */
+#define REG_REFLINE_MSB           REG(0x00, 0xa3)     /* write */
+#define REG_REFLINE_LSB           REG(0x00, 0xa4)     /* write */
+#define REG_NPIX_MSB              REG(0x00, 0xa5)     /* write */
+#define REG_NPIX_LSB              REG(0x00, 0xa6)     /* write */
+#define REG_NLINE_MSB             REG(0x00, 0xa7)     /* write */
+#define REG_NLINE_LSB             REG(0x00, 0xa8)     /* write */
+#define REG_VS_LINE_STRT_1_MSB    REG(0x00, 0xa9)     /* write */
+#define REG_VS_LINE_STRT_1_LSB    REG(0x00, 0xaa)     /* write */
+#define REG_VS_PIX_STRT_1_MSB     REG(0x00, 0xab)     /* write */
+#define REG_VS_PIX_STRT_1_LSB     REG(0x00, 0xac)     /* write */
+#define REG_VS_LINE_END_1_MSB     REG(0x00, 0xad)     /* write */
+#define REG_VS_LINE_END_1_LSB     REG(0x00, 0xae)     /* write */
+#define REG_VS_PIX_END_1_MSB      REG(0x00, 0xaf)     /* write */
+#define REG_VS_PIX_END_1_LSB      REG(0x00, 0xb0)     /* write */
+#define REG_VS_PIX_STRT_2_MSB     REG(0x00, 0xb3)     /* write */
+#define REG_VS_PIX_STRT_2_LSB     REG(0x00, 0xb4)     /* write */
+#define REG_VS_PIX_END_2_MSB      REG(0x00, 0xb7)     /* write */
+#define REG_VS_PIX_END_2_LSB      REG(0x00, 0xb8)     /* write */
+#define REG_HS_PIX_START_MSB      REG(0x00, 0xb9)     /* write */
+#define REG_HS_PIX_START_LSB      REG(0x00, 0xba)     /* write */
+#define REG_HS_PIX_STOP_MSB       REG(0x00, 0xbb)     /* write */
+#define REG_HS_PIX_STOP_LSB       REG(0x00, 0xbc)     /* write */
+#define REG_VWIN_START_1_MSB      REG(0x00, 0xbd)     /* write */
+#define REG_VWIN_START_1_LSB      REG(0x00, 0xbe)     /* write */
+#define REG_VWIN_END_1_MSB        REG(0x00, 0xbf)     /* write */
+#define REG_VWIN_END_1_LSB        REG(0x00, 0xc0)     /* write */
+#define REG_DE_START_MSB          REG(0x00, 0xc5)     /* write */
+#define REG_DE_START_LSB          REG(0x00, 0xc6)     /* write */
+#define REG_DE_STOP_MSB           REG(0x00, 0xc7)     /* write */
+#define REG_DE_STOP_LSB           REG(0x00, 0xc8)     /* write */
+#define REG_TBG_CNTRL_0           REG(0x00, 0xca)     /* write */
+# define TBG_CNTRL_0_FRAME_DIS    (1 << 5)
+# define TBG_CNTRL_0_SYNC_MTHD    (1 << 6)
+# define TBG_CNTRL_0_SYNC_ONCE    (1 << 7)
+#define REG_TBG_CNTRL_1           REG(0x00, 0xcb)     /* write */
+# define TBG_CNTRL_1_VH_TGL_0     (1 << 0)
+# define TBG_CNTRL_1_VH_TGL_1     (1 << 1)
+# define TBG_CNTRL_1_VH_TGL_2     (1 << 2)
+# define TBG_CNTRL_1_VHX_EXT_DE   (1 << 3)
+# define TBG_CNTRL_1_VHX_EXT_HS   (1 << 4)
+# define TBG_CNTRL_1_VHX_EXT_VS   (1 << 5)
+# define TBG_CNTRL_1_DWIN_DIS     (1 << 6)
+#define REG_ENABLE_SPACE          REG(0x00, 0xd6)     /* write */
+#define REG_HVF_CNTRL_0           REG(0x00, 0xe4)     /* write */
+# define HVF_CNTRL_0_SM           (1 << 7)
+# define HVF_CNTRL_0_RWB          (1 << 6)
+# define HVF_CNTRL_0_PREFIL(x)    (((x) & 3) << 2)
+# define HVF_CNTRL_0_INTPOL(x)    (((x) & 3) << 0)
+#define REG_HVF_CNTRL_1           REG(0x00, 0xe5)     /* write */
+# define HVF_CNTRL_1_FOR          (1 << 0)
+# define HVF_CNTRL_1_YUVBLK       (1 << 1)
+# define HVF_CNTRL_1_VQR(x)       (((x) & 3) << 2)
+# define HVF_CNTRL_1_PAD(x)       (((x) & 3) << 4)
+# define HVF_CNTRL_1_SEMI_PLANAR  (1 << 6)
+#define REG_RPT_CNTRL             REG(0x00, 0xf0)     /* write */
+
+
+/* Page 02h: PLL settings */
+#define REG_PLL_SERIAL_1          REG(0x02, 0x00)     /* read/write */
+# define PLL_SERIAL_1_SRL_FDN     (1 << 0)
+# define PLL_SERIAL_1_SRL_IZ(x)   (((x) & 3) << 1)
+# define PLL_SERIAL_1_SRL_MAN_IZ  (1 << 6)
+#define REG_PLL_SERIAL_2          REG(0x02, 0x01)     /* read/write */
+# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_PR(x)   (((x) & 0xf) << 4)
+#define REG_PLL_SERIAL_3          REG(0x02, 0x02)     /* read/write */
+# define PLL_SERIAL_3_SRL_CCIR    (1 << 0)
+# define PLL_SERIAL_3_SRL_DE      (1 << 2)
+# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
+#define REG_SERIALIZER            REG(0x02, 0x03)     /* read/write */
+#define REG_BUFFER_OUT            REG(0x02, 0x04)     /* read/write */
+#define REG_PLL_SCG1              REG(0x02, 0x05)     /* read/write */
+#define REG_PLL_SCG2              REG(0x02, 0x06)     /* read/write */
+#define REG_PLL_SCGN1             REG(0x02, 0x07)     /* read/write */
+#define REG_PLL_SCGN2             REG(0x02, 0x08)     /* read/write */
+#define REG_PLL_SCGR1             REG(0x02, 0x09)     /* read/write */
+#define REG_PLL_SCGR2             REG(0x02, 0x0a)     /* read/write */
+#define REG_AUDIO_DIV             REG(0x02, 0x0e)     /* read/write */
+#define REG_SEL_CLK               REG(0x02, 0x11)     /* read/write */
+# define SEL_CLK_SEL_CLK1         (1 << 0)
+# define SEL_CLK_SEL_VRF_CLK(x)   (((x) & 3) << 1)
+# define SEL_CLK_ENA_SC_CLK       (1 << 3)
+#define REG_ANA_GENERAL           REG(0x02, 0x12)     /* read/write */
+
+
+/* Page 09h: EDID Control */
+#define REG_EDID_DATA_0           REG(0x09, 0x00)     /* read */
+/* next 127 successive registers are the EDID block */
+#define REG_EDID_CTRL             REG(0x09, 0xfa)     /* read/write */
+#define REG_DDC_ADDR              REG(0x09, 0xfb)     /* read/write */
+#define REG_DDC_OFFS              REG(0x09, 0xfc)     /* read/write */
+#define REG_DDC_SEGM_ADDR         REG(0x09, 0xfd)     /* read/write */
+#define REG_DDC_SEGM              REG(0x09, 0xfe)     /* read/write */
+
+
+/* Page 10h: information frames and packets */
+
+
+/* Page 11h: audio settings and content info packets */
+#define REG_AIP_CNTRL_0           REG(0x11, 0x00)     /* read/write */
+# define AIP_CNTRL_0_RST_FIFO     (1 << 0)
+# define AIP_CNTRL_0_SWAP         (1 << 1)
+# define AIP_CNTRL_0_LAYOUT       (1 << 2)
+# define AIP_CNTRL_0_ACR_MAN      (1 << 5)
+# define AIP_CNTRL_0_RST_CTS      (1 << 6)
+#define REG_ENC_CNTRL             REG(0x11, 0x0d)     /* read/write */
+# define ENC_CNTRL_RST_ENC        (1 << 0)
+# define ENC_CNTRL_RST_SEL        (1 << 1)
+# define ENC_CNTRL_CTL_CODE(x)    (((x) & 3) << 2)
+
+
+/* Page 12h: HDCP and OTP */
+#define REG_TX3                   REG(0x12, 0x9a)     /* read/write */
+#define REG_TX33                  REG(0x12, 0xb8)     /* read/write */
+# define TX33_HDMI                (1 << 1)
+
+
+/* Page 13h: Gamut related metadata packets */
+
+
+
+/* CEC registers: (not paged)
+ */
+#define REG_CEC_FRO_IM_CLK_CTRL   0xfb                /* read/write */
+# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
+# define CEC_FRO_IM_CLK_CTRL_ENA_OTP   (1 << 6)
+# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
+# define CEC_FRO_IM_CLK_CTRL_FRO_DIV   (1 << 0)
+#define REG_CEC_RXSHPDLEV         0xfe                /* read */
+# define CEC_RXSHPDLEV_RXSENS     (1 << 0)
+# define CEC_RXSHPDLEV_HPD        (1 << 1)
+
+#define REG_CEC_ENAMODS           0xff                /* read/write */
+# define CEC_ENAMODS_DIS_FRO      (1 << 6)
+# define CEC_ENAMODS_DIS_CCLK     (1 << 5)
+# define CEC_ENAMODS_EN_RXSENS    (1 << 2)
+# define CEC_ENAMODS_EN_HDMI      (1 << 1)
+# define CEC_ENAMODS_EN_CEC       (1 << 0)
+
+
+/* Device versions: */
+#define TDA9989N2                 0x0101
+#define TDA19989                  0x0201
+#define TDA19989N2                0x0202
+#define TDA19988                  0x0301
+
+static void
+cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
+{
+	struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+	uint8_t buf[] = {addr, val};
+	int ret;
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
+}
+
+static uint8_t
+cec_read(struct drm_encoder *encoder, uint8_t addr)
+{
+	struct i2c_client *client = to_tda998x_priv(encoder)->cec;
+	uint8_t val;
+	int ret;
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, &val, sizeof(val));
+	if (ret < 0)
+		goto fail;
+
+	return val;
+
+fail:
+	dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
+	return 0;
+}
+
+static void
+set_page(struct drm_encoder *encoder, uint16_t reg)
+{
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+	if (REG2PAGE(reg) != priv->current_page) {
+		struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+		uint8_t buf[] = {
+				REG_CURPAGE, REG2PAGE(reg)
+		};
+		int ret = i2c_master_send(client, buf, sizeof(buf));
+		if (ret < 0)
+			dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
+
+		priv->current_page = REG2PAGE(reg);
+	}
+}
+
+static int
+reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	uint8_t addr = REG2ADDR(reg);
+	int ret;
+
+	set_page(encoder, reg);
+
+	ret = i2c_master_send(client, &addr, sizeof(addr));
+	if (ret < 0)
+		goto fail;
+
+	ret = i2c_master_recv(client, buf, cnt);
+	if (ret < 0)
+		goto fail;
+
+	return ret;
+
+fail:
+	dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+	return ret;
+}
+
+static uint8_t
+reg_read(struct drm_encoder *encoder, uint16_t reg)
+{
+	uint8_t val = 0;
+	reg_read_range(encoder, reg, &val, sizeof(val));
+	return val;
+}
+
+static void
+reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	uint8_t buf[] = {REG2ADDR(reg), val};
+	int ret;
+
+	set_page(encoder, reg);
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
+{
+	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+	uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+	int ret;
+
+	set_page(encoder, reg);
+
+	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+	if (ret < 0)
+		dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
+static void
+reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+	reg_write(encoder, reg, reg_read(encoder, reg) | val);
+}
+
+static void
+reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
+{
+	reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
+}
+
+static void
+tda998x_reset(struct drm_encoder *encoder)
+{
+	/* reset audio and i2c master: */
+	reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+	msleep(50);
+	reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
+	msleep(50);
+
+	/* reset transmitter: */
+	reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+	reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
+
+	/* PLL registers common configuration */
+	reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
+	reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
+	reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
+	reg_write(encoder, REG_SERIALIZER,   0x00);
+	reg_write(encoder, REG_BUFFER_OUT,   0x00);
+	reg_write(encoder, REG_PLL_SCG1,     0x00);
+	reg_write(encoder, REG_AUDIO_DIV,    0x03);
+	reg_write(encoder, REG_SEL_CLK,      SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+	reg_write(encoder, REG_PLL_SCGN1,    0xfa);
+	reg_write(encoder, REG_PLL_SCGN2,    0x00);
+	reg_write(encoder, REG_PLL_SCGR1,    0x5b);
+	reg_write(encoder, REG_PLL_SCGR2,    0x00);
+	reg_write(encoder, REG_PLL_SCG2,     0x10);
+}
+
+/* DRM encoder functions */
+
+static void
+tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+}
+
+static void
+tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+	/* we only care about on or off: */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == priv->dpms)
+		return;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		/* enable audio and video ports */
+		reg_write(encoder, REG_ENA_AP, 0xff);
+		reg_write(encoder, REG_ENA_VP_0, 0xff);
+		reg_write(encoder, REG_ENA_VP_1, 0xff);
+		reg_write(encoder, REG_ENA_VP_2, 0xff);
+		/* set muxing after enabling ports: */
+		reg_write(encoder, REG_VIP_CNTRL_0,
+				VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
+		reg_write(encoder, REG_VIP_CNTRL_1,
+				VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
+		reg_write(encoder, REG_VIP_CNTRL_2,
+				VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* disable audio and video ports */
+		reg_write(encoder, REG_ENA_AP, 0x00);
+		reg_write(encoder, REG_ENA_VP_0, 0x00);
+		reg_write(encoder, REG_ENA_VP_1, 0x00);
+		reg_write(encoder, REG_ENA_VP_2, 0x00);
+		break;
+	}
+
+	priv->dpms = mode;
+}
+
+static void
+tda998x_encoder_save(struct drm_encoder *encoder)
+{
+	DBG("");
+}
+
+static void
+tda998x_encoder_restore(struct drm_encoder *encoder)
+{
+	DBG("");
+}
+
+static bool
+tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
+			  const struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int
+tda998x_encoder_mode_valid(struct drm_encoder *encoder,
+			  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
+	uint16_t hs_start, hs_end, line_start, line_end;
+	uint16_t vwin_start, vwin_end, de_start, de_end;
+	uint16_t ref_pix, ref_line, pix_start2;
+	uint8_t reg, div, rep;
+
+	hs_start   = mode->hsync_start - mode->hdisplay;
+	hs_end     = mode->hsync_end - mode->hdisplay;
+	line_start = 1;
+	line_end   = 1 + mode->vsync_end - mode->vsync_start;
+	vwin_start = mode->vtotal - mode->vsync_start;
+	vwin_end   = vwin_start + mode->vdisplay;
+	de_start   = mode->htotal - mode->hdisplay;
+	de_end     = mode->htotal;
+
+	pix_start2 = 0;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		pix_start2 = (mode->htotal / 2) + hs_start;
+
+	/* TODO how is this value calculated?  It is 2 for all common
+	 * formats in the tables in out of tree nxp driver (assuming
+	 * I've properly deciphered their byzantine table system)
+	 */
+	ref_line = 2;
+
+	/* this might changes for other color formats from the CRTC: */
+	ref_pix = 3 + hs_start;
+
+	div = 148500 / mode->clock;
+
+	DBG("clock=%d, div=%u", mode->clock, div);
+	DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
+			hs_start, hs_end, line_start, line_end);
+	DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
+			vwin_start, vwin_end, de_start, de_end);
+	DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
+			ref_line, ref_pix, pix_start2);
+
+	/* mute the audio FIFO: */
+	reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+
+	/* set HDMI HDCP mode off: */
+	reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+	reg_clear(encoder, REG_TX33, TX33_HDMI);
+
+	reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
+	/* no pre-filter or interpolator: */
+	reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
+			HVF_CNTRL_0_INTPOL(0));
+	reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
+	reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
+			VIP_CNTRL_4_BLC(0));
+	reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
+
+	reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
+	reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
+	reg_write(encoder, REG_SERIALIZER, 0);
+	reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
+
+	/* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
+	rep = 0;
+	reg_write(encoder, REG_RPT_CNTRL, 0);
+	reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
+			SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
+
+	reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
+			PLL_SERIAL_2_SRL_PR(rep));
+
+	reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
+	reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
+
+	/* set color matrix bypass flag: */
+	reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
+
+	/* set BIAS tmds value: */
+	reg_write(encoder, REG_ANA_GENERAL, 0x09);
+
+	reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+
+	reg_write(encoder, REG_VIP_CNTRL_3, 0);
+	reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+
+	reg_write(encoder, REG_VIDFORMAT, 0x00);
+	reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
+	reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
+	reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
+	reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
+	reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
+	reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
+	reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
+	reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
+	reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
+	reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
+	reg_write16(encoder, REG_DE_START_MSB, de_start);
+	reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+
+	if (priv->rev == TDA19988) {
+		/* let incoming pixels fill the active space (if any) */
+		reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+	}
+
+	reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+	reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+
+	reg = TBG_CNTRL_1_VHX_EXT_DE |
+			TBG_CNTRL_1_VHX_EXT_HS |
+			TBG_CNTRL_1_VHX_EXT_VS |
+			TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
+			TBG_CNTRL_1_VH_TGL_2;
+	if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
+		reg |= TBG_CNTRL_1_VH_TGL_0;
+	reg_set(encoder, REG_TBG_CNTRL_1, reg);
+
+	/* must be last register set: */
+	reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+}
+
+static enum drm_connector_status
+tda998x_encoder_detect(struct drm_encoder *encoder,
+		      struct drm_connector *connector)
+{
+	uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
+	return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
+			connector_status_disconnected;
+}
+
+static int
+read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+{
+	uint8_t offset, segptr;
+	int ret, i;
+
+	/* enable EDID read irq: */
+	reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+	offset = (blk & 1) ? 128 : 0;
+	segptr = blk / 2;
+
+	reg_write(encoder, REG_DDC_ADDR, 0xa0);
+	reg_write(encoder, REG_DDC_OFFS, offset);
+	reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
+	reg_write(encoder, REG_DDC_SEGM, segptr);
+
+	/* enable reading EDID: */
+	reg_write(encoder, REG_EDID_CTRL, 0x1);
+
+	/* flag must be cleared by sw: */
+	reg_write(encoder, REG_EDID_CTRL, 0x0);
+
+	/* wait for block read to complete: */
+	for (i = 100; i > 0; i--) {
+		uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
+		if (val & INT_FLAGS_2_EDID_BLK_RD)
+			break;
+		msleep(1);
+	}
+
+	if (i == 0)
+		return -ETIMEDOUT;
+
+	ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
+	if (ret != EDID_LENGTH) {
+		dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
+				blk, ret);
+		return ret;
+	}
+
+	reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+	return 0;
+}
+
+static uint8_t *
+do_get_edid(struct drm_encoder *encoder)
+{
+	int j = 0, valid_extensions = 0;
+	uint8_t *block, *new;
+	bool print_bad_edid = drm_debug & DRM_UT_KMS;
+
+	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+		return NULL;
+
+	/* base block fetch */
+	if (read_edid_block(encoder, block, 0))
+		goto fail;
+
+	if (!drm_edid_block_valid(block, 0, print_bad_edid))
+		goto fail;
+
+	/* if there's no extensions, we're done */
+	if (block[0x7e] == 0)
+		return block;
+
+	new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
+	if (!new)
+		goto fail;
+	block = new;
+
+	for (j = 1; j <= block[0x7e]; j++) {
+		uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
+		if (read_edid_block(encoder, ext_block, j))
+			goto fail;
+
+		if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
+			goto fail;
+
+		valid_extensions++;
+	}
+
+	if (valid_extensions != block[0x7e]) {
+		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+		block[0x7e] = valid_extensions;
+		new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+		if (!new)
+			goto fail;
+		block = new;
+	}
+
+	return block;
+
+fail:
+	dev_warn(encoder->dev->dev, "failed to read EDID\n");
+	kfree(block);
+	return NULL;
+}
+
+static int
+tda998x_encoder_get_modes(struct drm_encoder *encoder,
+			 struct drm_connector *connector)
+{
+	struct edid *edid = (struct edid *)do_get_edid(encoder);
+	int n = 0;
+
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		n = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return n;
+}
+
+static int
+tda998x_encoder_create_resources(struct drm_encoder *encoder,
+				struct drm_connector *connector)
+{
+	DBG("");
+	return 0;
+}
+
+static int
+tda998x_encoder_set_property(struct drm_encoder *encoder,
+			    struct drm_connector *connector,
+			    struct drm_property *property,
+			    uint64_t val)
+{
+	DBG("");
+	return 0;
+}
+
+static void
+tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct tda998x_priv *priv = to_tda998x_priv(encoder);
+	drm_i2c_encoder_destroy(encoder);
+	kfree(priv);
+}
+
+static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
+	.set_config = tda998x_encoder_set_config,
+	.destroy = tda998x_encoder_destroy,
+	.dpms = tda998x_encoder_dpms,
+	.save = tda998x_encoder_save,
+	.restore = tda998x_encoder_restore,
+	.mode_fixup = tda998x_encoder_mode_fixup,
+	.mode_valid = tda998x_encoder_mode_valid,
+	.mode_set = tda998x_encoder_mode_set,
+	.detect = tda998x_encoder_detect,
+	.get_modes = tda998x_encoder_get_modes,
+	.create_resources = tda998x_encoder_create_resources,
+	.set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	return 0;
+}
+
+static int
+tda998x_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+static int
+tda998x_encoder_init(struct i2c_client *client,
+		    struct drm_device *dev,
+		    struct drm_encoder_slave *encoder_slave)
+{
+	struct drm_encoder *encoder = &encoder_slave->base;
+	struct tda998x_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->current_page = 0;
+	priv->cec = i2c_new_dummy(client->adapter, 0x34);
+	priv->dpms = DRM_MODE_DPMS_OFF;
+
+	encoder_slave->slave_priv = priv;
+	encoder_slave->slave_funcs = &tda998x_encoder_funcs;
+
+	/* wake up the device: */
+	cec_write(encoder, REG_CEC_ENAMODS,
+			CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
+
+	tda998x_reset(encoder);
+
+	/* read version: */
+	priv->rev = reg_read(encoder, REG_VERSION_LSB) |
+			reg_read(encoder, REG_VERSION_MSB) << 8;
+
+	/* mask off feature bits: */
+	priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
+
+	switch (priv->rev) {
+	case TDA9989N2:  dev_info(dev->dev, "found TDA9989 n2");  break;
+	case TDA19989:   dev_info(dev->dev, "found TDA19989");    break;
+	case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
+	case TDA19988:   dev_info(dev->dev, "found TDA19988");    break;
+	default:
+		DBG("found unsupported device: %04x", priv->rev);
+		goto fail;
+	}
+
+	/* after reset, enable DDC: */
+	reg_write(encoder, REG_DDC_DISABLE, 0x00);
+
+	/* set clock on DDC channel: */
+	reg_write(encoder, REG_TX3, 39);
+
+	/* if necessary, disable multi-master: */
+	if (priv->rev == TDA19989)
+		reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
+
+	cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
+			CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
+
+	return 0;
+
+fail:
+	/* if encoder_init fails, the encoder slave is never registered,
+	 * so cleanup here:
+	 */
+	if (priv->cec)
+		i2c_unregister_device(priv->cec);
+	kfree(priv);
+	encoder_slave->slave_priv = NULL;
+	encoder_slave->slave_funcs = NULL;
+	return -ENXIO;
+}
+
+static struct i2c_device_id tda998x_ids[] = {
+	{ "tda998x", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tda998x_ids);
+
+static struct drm_i2c_encoder_driver tda998x_driver = {
+	.i2c_driver = {
+		.probe = tda998x_probe,
+		.remove = tda998x_remove,
+		.driver = {
+			.name = "tda998x",
+		},
+		.id_table = tda998x_ids,
+	},
+	.encoder_init = tda998x_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+tda998x_init(void)
+{
+	DBG("");
+	return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
+}
+
+static void __exit
+tda998x_exit(void)
+{
+	DBG("");
+	drm_i2c_encoder_unregister(&tda998x_driver);
+}
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
+MODULE_LICENSE("GPL");
+
+module_init(tda998x_init);
+module_exit(tda998x_exit);
diff --git a/linux-imx/drivers/gpu/drm/i810/Makefile b/linux-imx/drivers/gpu/drm/i810/Makefile
new file mode 100644
index 0000000..43844ec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i810/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i810-y := i810_drv.o i810_dma.o
+
+obj-$(CONFIG_DRM_I810)	+= i810.o
diff --git a/linux-imx/drivers/gpu/drm/i810/i810_dma.c b/linux-imx/drivers/gpu/drm/i810/i810_dma.c
new file mode 100644
index 0000000..004ecdf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i810/i810_dma.c
@@ -0,0 +1,1278 @@
+/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *          Keith Whitwell <keith@tungstengraphics.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i810_drm.h>
+#include "i810_drv.h"
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+
+#define I810_BUF_FREE		2
+#define I810_BUF_CLIENT		1
+#define I810_BUF_HARDWARE	0
+
+#define I810_BUF_UNMAPPED 0
+#define I810_BUF_MAPPED   1
+
+static struct drm_buf *i810_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+	int used;
+
+	/* Linear search might not be the best solution */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+		/* In use is already a pointer */
+		used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
+			       I810_BUF_CLIENT);
+		if (used == I810_BUF_FREE)
+			return buf;
+	}
+	return NULL;
+}
+
+/* This should only be called if the buffer is not sent to the hardware
+ * yet, the hardware updates in use for us once its on the ring buffer.
+ */
+
+static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
+{
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	int used;
+
+	/* In use is already a pointer */
+	used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
+	if (used != I810_BUF_CLIENT) {
+		DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	drm_i810_private_t *dev_priv;
+	struct drm_buf *buf;
+	drm_i810_buf_priv_t *buf_priv;
+
+	dev = priv->minor->dev;
+	dev_priv = dev->dev_private;
+	buf = dev_priv->mmap_buffer;
+	buf_priv = buf->dev_private;
+
+	vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+
+	buf_priv->currently_mapped = I810_BUF_MAPPED;
+
+	if (io_remap_pfn_range(vma, vma->vm_start,
+			       vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
+		return -EAGAIN;
+	return 0;
+}
+
+static const struct file_operations i810_buffer_fops = {
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = i810_mmap_buffers,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	const struct file_operations *old_fops;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+		return -EINVAL;
+
+	/* This is all entirely broken */
+	old_fops = file_priv->filp->f_op;
+	file_priv->filp->f_op = &i810_buffer_fops;
+	dev_priv->mmap_buffer = buf;
+	buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
+					    PROT_READ | PROT_WRITE,
+					    MAP_SHARED, buf->bus_address);
+	dev_priv->mmap_buffer = NULL;
+	file_priv->filp->f_op = old_fops;
+	if (IS_ERR(buf_priv->virtual)) {
+		/* Real error */
+		DRM_ERROR("mmap error\n");
+		retcode = PTR_ERR(buf_priv->virtual);
+		buf_priv->virtual = NULL;
+	}
+
+	return retcode;
+}
+
+static int i810_unmap_buffer(struct drm_buf *buf)
+{
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
+		return -EINVAL;
+
+	retcode = vm_munmap((unsigned long)buf_priv->virtual,
+			    (size_t) buf->total);
+
+	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+	buf_priv->virtual = NULL;
+
+	return retcode;
+}
+
+static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
+			       struct drm_file *file_priv)
+{
+	struct drm_buf *buf;
+	drm_i810_buf_priv_t *buf_priv;
+	int retcode = 0;
+
+	buf = i810_freelist_get(dev);
+	if (!buf) {
+		retcode = -ENOMEM;
+		DRM_DEBUG("retcode=%d\n", retcode);
+		return retcode;
+	}
+
+	retcode = i810_map_buffer(buf, file_priv);
+	if (retcode) {
+		i810_freelist_put(dev, buf);
+		DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
+		return retcode;
+	}
+	buf->file_priv = file_priv;
+	buf_priv = buf->dev_private;
+	d->granted = 1;
+	d->request_idx = buf->idx;
+	d->request_size = buf->total;
+	d->virtual = buf_priv->virtual;
+
+	return retcode;
+}
+
+static int i810_dma_cleanup(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		int i;
+		drm_i810_private_t *dev_priv =
+		    (drm_i810_private_t *) dev->dev_private;
+
+		if (dev_priv->ring.virtual_start)
+			drm_core_ioremapfree(&dev_priv->ring.map, dev);
+		if (dev_priv->hw_status_page) {
+			pci_free_consistent(dev->pdev, PAGE_SIZE,
+					    dev_priv->hw_status_page,
+					    dev_priv->dma_status_page);
+		}
+		kfree(dev->dev_private);
+		dev->dev_private = NULL;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			struct drm_buf *buf = dma->buflist[i];
+			drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+			if (buf_priv->kernel_virtual && buf->total)
+				drm_core_ioremapfree(&buf_priv->map, dev);
+		}
+	}
+	return 0;
+}
+
+static int i810_wait_ring(struct drm_device *dev, int n)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
+	int iters = 0;
+	unsigned long end;
+	unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+
+	end = jiffies + (HZ * 3);
+	while (ring->space < n) {
+		ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->Size;
+
+		if (ring->head != last_head) {
+			end = jiffies + (HZ * 3);
+			last_head = ring->head;
+		}
+
+		iters++;
+		if (time_before(end, jiffies)) {
+			DRM_ERROR("space: %d wanted %d\n", ring->space, n);
+			DRM_ERROR("lockup\n");
+			goto out_wait_ring;
+		}
+		udelay(1);
+	}
+
+out_wait_ring:
+	return iters;
+}
+
+static void i810_kernel_lost_context(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
+
+	ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	ring->tail = I810_READ(LP_RING + RING_TAIL);
+	ring->space = ring->head - (ring->tail + 8);
+	if (ring->space < 0)
+		ring->space += ring->Size;
+}
+
+static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int my_idx = 24;
+	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
+	int i;
+
+	if (dma->buf_count > 1019) {
+		/* Not enough space in the status page for the freelist */
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		buf_priv->in_use = hw_status++;
+		buf_priv->my_use_idx = my_idx;
+		my_idx += 4;
+
+		*buf_priv->in_use = I810_BUF_FREE;
+
+		buf_priv->map.offset = buf->bus_address;
+		buf_priv->map.size = buf->total;
+		buf_priv->map.type = _DRM_AGP;
+		buf_priv->map.flags = 0;
+		buf_priv->map.mtrr = 0;
+
+		drm_core_ioremap(&buf_priv->map, dev);
+		buf_priv->kernel_virtual = buf_priv->map.handle;
+
+	}
+	return 0;
+}
+
+static int i810_dma_initialize(struct drm_device *dev,
+			       drm_i810_private_t *dev_priv,
+			       drm_i810_init_t *init)
+{
+	struct drm_map_list *r_list;
+	memset(dev_priv, 0, sizeof(drm_i810_private_t));
+
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->map->type == _DRM_SHM &&
+		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
+			dev_priv->sarea_map = r_list->map;
+			break;
+		}
+	}
+	if (!dev_priv->sarea_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find sarea!\n");
+		return -EINVAL;
+	}
+	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find dma buffer map!\n");
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv = (drm_i810_sarea_t *)
+	    ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
+
+	dev_priv->ring.Start = init->ring_start;
+	dev_priv->ring.End = init->ring_end;
+	dev_priv->ring.Size = init->ring_size;
+
+	dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
+	dev_priv->ring.map.size = init->ring_size;
+	dev_priv->ring.map.type = _DRM_AGP;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+	dev_priv->w = init->w;
+	dev_priv->h = init->h;
+	dev_priv->pitch = init->pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->front_offset = init->front_offset;
+
+	dev_priv->overlay_offset = init->overlay_offset;
+	dev_priv->overlay_physical = init->overlay_physical;
+
+	dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+	dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+	dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
+	/* Program Hardware Status Page */
+	dev_priv->hw_status_page =
+	    pci_alloc_consistent(dev->pdev, PAGE_SIZE,
+				 &dev_priv->dma_status_page);
+	if (!dev_priv->hw_status_page) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("Can not allocate hardware status page\n");
+		return -ENOMEM;
+	}
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+	I810_WRITE(0x02080, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+
+	/* Now we need to init our freelist */
+	if (i810_freelist_init(dev, dev_priv) != 0) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("Not enough space in the status page for"
+			  " the freelist\n");
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)dev_priv;
+
+	return 0;
+}
+
+static int i810_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv;
+	drm_i810_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case I810_INIT_DMA_1_4:
+		DRM_INFO("Using v1.4 init.\n");
+		dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
+		if (dev_priv == NULL)
+			return -ENOMEM;
+		retcode = i810_dma_initialize(dev, dev_priv, init);
+		break;
+
+	case I810_CLEANUP_DMA:
+		DRM_INFO("DMA Cleanup\n");
+		retcode = i810_dma_cleanup(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return retcode;
+}
+
+/* Most efficient way to verify state for the i810 is as it is
+ * emitted.  Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i810EmitContextVerified(struct drm_device *dev,
+				    volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
+
+	OUT_RING(GFX_OP_COLOR_FACTOR);
+	OUT_RING(code[I810_CTXREG_CF1]);
+
+	OUT_RING(GFX_OP_STIPPLE);
+	OUT_RING(code[I810_CTXREG_ST1]);
+
+	for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
+		tmp = code[i];
+
+		if ((tmp & (7 << 29)) == (3 << 29) &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else
+			printk("constext state dropped!!!\n");
+	}
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
+
+	OUT_RING(GFX_OP_MAP_INFO);
+	OUT_RING(code[I810_TEXREG_MI1]);
+	OUT_RING(code[I810_TEXREG_MI2]);
+	OUT_RING(code[I810_TEXREG_MI3]);
+
+	for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
+		tmp = code[i];
+
+		if ((tmp & (7 << 29)) == (3 << 29) &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else
+			printk("texture state dropped!!!\n");
+	}
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i810EmitDestVerified(struct drm_device *dev,
+				 volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
+
+	tmp = code[I810_DESTREG_DI1];
+	if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
+		OUT_RING(CMD_OP_DESTBUFFER_INFO);
+		OUT_RING(tmp);
+	} else
+		DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+			  tmp, dev_priv->front_di1, dev_priv->back_di1);
+
+	/* invarient:
+	 */
+	OUT_RING(CMD_OP_Z_BUFFER_INFO);
+	OUT_RING(dev_priv->zi1);
+
+	OUT_RING(GFX_OP_DESTBUFFER_VARS);
+	OUT_RING(code[I810_DESTREG_DV1]);
+
+	OUT_RING(GFX_OP_DRAWRECT_INFO);
+	OUT_RING(code[I810_DESTREG_DR1]);
+	OUT_RING(code[I810_DESTREG_DR2]);
+	OUT_RING(code[I810_DESTREG_DR3]);
+	OUT_RING(code[I810_DESTREG_DR4]);
+	OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i810EmitState(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG("%x\n", dirty);
+
+	if (dirty & I810_UPLOAD_BUFFERS) {
+		i810EmitDestVerified(dev, sarea_priv->BufferState);
+		sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
+	}
+
+	if (dirty & I810_UPLOAD_CTX) {
+		i810EmitContextVerified(dev, sarea_priv->ContextState);
+		sarea_priv->dirty &= ~I810_UPLOAD_CTX;
+	}
+
+	if (dirty & I810_UPLOAD_TEX0) {
+		i810EmitTexVerified(dev, sarea_priv->TexState[0]);
+		sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
+	}
+
+	if (dirty & I810_UPLOAD_TEX1) {
+		i810EmitTexVerified(dev, sarea_priv->TexState[1]);
+		sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
+	}
+}
+
+/* need to verify
+ */
+static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
+				    unsigned int clear_color,
+				    unsigned int clear_zval)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = 2;
+	int i;
+	RING_LOCALS;
+
+	if (dev_priv->current_page == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(I810_FRONT | I810_BACK);
+		if (tmp & I810_FRONT)
+			flags |= I810_BACK;
+		if (tmp & I810_BACK)
+			flags |= I810_FRONT;
+	}
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		unsigned int x = pbox->x1;
+		unsigned int y = pbox->y1;
+		unsigned int width = (pbox->x2 - x) * cpp;
+		unsigned int height = pbox->y2 - y;
+		unsigned int start = y * pitch + x * cpp;
+
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		if (flags & I810_FRONT) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(start);
+			OUT_RING(clear_color);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I810_BACK) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(dev_priv->back_offset + start);
+			OUT_RING(clear_color);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I810_DEPTH) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(dev_priv->depth_offset + start);
+			OUT_RING(clear_zval);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+	}
+}
+
+static void i810_dma_dispatch_swap(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = 2;
+	int i;
+	RING_LOCALS;
+
+	DRM_DEBUG("swapbuffers\n");
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		unsigned int w = pbox->x2 - pbox->x1;
+		unsigned int h = pbox->y2 - pbox->y1;
+		unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
+		unsigned int start = dst;
+
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		BEGIN_LP_RING(6);
+		OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
+		OUT_RING(pitch | (0xCC << 16));
+		OUT_RING((h << 16) | (w * cpp));
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->front_offset + start);
+		else
+			OUT_RING(dev_priv->back_offset + start);
+		OUT_RING(pitch);
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->back_offset + start);
+		else
+			OUT_RING(dev_priv->front_offset + start);
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i810_dma_dispatch_vertex(struct drm_device *dev,
+				     struct drm_buf *buf, int discard, int used)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_clip_rect *box = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	unsigned long address = (unsigned long)buf->bus_address;
+	unsigned long start = address - dev->agp->base;
+	int i = 0;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	if (used > 4 * 1024)
+		used = 0;
+
+	if (sarea_priv->dirty)
+		i810EmitState(dev);
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+		unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
+
+		*(u32 *) buf_priv->kernel_virtual =
+		    ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
+
+		if (used & 4) {
+			*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
+			used += 4;
+		}
+
+		i810_unmap_buffer(buf);
+	}
+
+	if (used) {
+		do {
+			if (i < nbox) {
+				BEGIN_LP_RING(4);
+				OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
+					 SC_ENABLE);
+				OUT_RING(GFX_OP_SCISSOR_INFO);
+				OUT_RING(box[i].x1 | (box[i].y1 << 16));
+				OUT_RING((box[i].x2 -
+					  1) | ((box[i].y2 - 1) << 16));
+				ADVANCE_LP_RING();
+			}
+
+			BEGIN_LP_RING(4);
+			OUT_RING(CMD_OP_BATCH_BUFFER);
+			OUT_RING(start | BB1_PROTECTED);
+			OUT_RING(start + used - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+
+		} while (++i < nbox);
+	}
+
+	if (discard) {
+		dev_priv->counter++;
+
+		(void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+			      I810_BUF_HARDWARE);
+
+		BEGIN_LP_RING(8);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(20);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(buf_priv->my_use_idx);
+		OUT_RING(I810_BUF_FREE);
+		OUT_RING(CMD_REPORT_HEAD);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i810_dma_dispatch_flip(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int pitch = dev_priv->pitch;
+	RING_LOCALS;
+
+	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
+		  dev_priv->current_page,
+		  dev_priv->sarea_priv->pf_current_page);
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
+	/* On i815 at least ASYNC is buggy */
+	/* pitch<<5 is from 11.2.8 p158,
+	   its the pitch / 8 then left shifted 8,
+	   so (pitch >> 3) << 8 */
+	OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+		dev_priv->current_page = 1;
+	} else {
+		OUT_RING(dev_priv->front_offset);
+		dev_priv->current_page = 0;
+	}
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(2);
+	OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+}
+
+static void i810_dma_quiescent(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i810_wait_ring(dev, dev_priv->ring.Size - 8);
+}
+
+static int i810_flush_queue(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	int i, ret = 0;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i810_wait_ring(dev, dev_priv->ring.Size - 8);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
+				   I810_BUF_FREE);
+
+		if (used == I810_BUF_HARDWARE)
+			DRM_DEBUG("reclaimed from HARDWARE\n");
+		if (used == I810_BUF_CLIENT)
+			DRM_DEBUG("still on client\n");
+	}
+
+	return ret;
+}
+
+/* Must be called with the lock held */
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+				 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev->dev_private)
+		return;
+	if (!dma->buflist)
+		return;
+
+	i810_flush_queue(dev);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->file_priv == file_priv && buf_priv) {
+			int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+					   I810_BUF_FREE);
+
+			if (used == I810_BUF_CLIENT)
+				DRM_DEBUG("reclaimed from client\n");
+			if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+				buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+		}
+	}
+}
+
+static int i810_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i810_flush_queue(dev);
+	return 0;
+}
+
+static int i810_dma_vertex(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i810_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("idx %d used %d discard %d\n",
+		  vertex->idx, vertex->used, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+		return -EINVAL;
+
+	i810_dma_dispatch_vertex(dev,
+				 dma->buflist[vertex->idx],
+				 vertex->discard, vertex->used);
+
+	atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+	sarea_priv->last_enqueue = dev_priv->counter - 1;
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return 0;
+}
+
+static int i810_clear_bufs(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	drm_i810_clear_t *clear = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* GH: Someone's doing nasty things... */
+	if (!dev->dev_private)
+		return -EINVAL;
+
+	i810_dma_dispatch_clear(dev, clear->flags,
+				clear->clear_color, clear->clear_depth);
+	return 0;
+}
+
+static int i810_swap_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i810_dma_dispatch_swap(dev);
+	return 0;
+}
+
+static int i810_getage(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return 0;
+}
+
+static int i810_getbuf(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int retcode = 0;
+	drm_i810_dma_t *d = data;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	d->granted = 0;
+
+	retcode = i810_dma_get_buffer(dev, d, file_priv);
+
+	DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
+		  task_pid_nr(current), retcode, d->granted);
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return retcode;
+}
+
+static int i810_copybuf(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	/* Never copy - 2.4.x doesn't need it */
+	return 0;
+}
+
+static int i810_docopy(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	/* Never copy - 2.4.x doesn't need it */
+	return 0;
+}
+
+static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
+				 unsigned int last_render)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned long address = (unsigned long)buf->bus_address;
+	unsigned long start = address - dev->agp->base;
+	int u;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
+	if (u != I810_BUF_CLIENT)
+		DRM_DEBUG("MC found buffer that isn't mine!\n");
+
+	if (used > 4 * 1024)
+		used = 0;
+
+	sarea_priv->dirty = 0x7f;
+
+	DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
+
+	dev_priv->counter++;
+	DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
+	DRM_DEBUG("start : %lx\n", start);
+	DRM_DEBUG("used : %d\n", used);
+	DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+		if (used & 4) {
+			*(u32 *) ((char *) buf_priv->virtual + used) = 0;
+			used += 4;
+		}
+
+		i810_unmap_buffer(buf);
+	}
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_OP_BATCH_BUFFER);
+	OUT_RING(start | BB1_PROTECTED);
+	OUT_RING(start + used - 4);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(8);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(buf_priv->my_use_idx);
+	OUT_RING(I810_BUF_FREE);
+	OUT_RING(0);
+
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(16);
+	OUT_RING(last_render);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+}
+
+static int i810_dma_mc(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i810_mc_t *mc = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (mc->idx >= dma->buf_count || mc->idx < 0)
+		return -EINVAL;
+
+	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+			     mc->last_render);
+
+	atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+	sarea_priv->last_enqueue = dev_priv->counter - 1;
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return 0;
+}
+
+static int i810_rstatus(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
+}
+
+static int i810_ov0_info(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	drm_i810_overlay_t *ov = data;
+
+	ov->offset = dev_priv->overlay_offset;
+	ov->physical = dev_priv->overlay_physical;
+
+	return 0;
+}
+
+static int i810_fstatus(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	return I810_READ(0x30008);
+}
+
+static int i810_ov0_flip(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Tell the overlay to update */
+	I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
+
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static void i810_do_init_pageflip(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	dev_priv->page_flipping = 1;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+}
+
+static int i810_do_cleanup_pageflip(struct drm_device *dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	if (dev_priv->current_page != 0)
+		i810_dma_dispatch_flip(dev);
+
+	dev_priv->page_flipping = 0;
+	return 0;
+}
+
+static int i810_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->page_flipping)
+		i810_do_init_pageflip(dev);
+
+	i810_dma_dispatch_flip(dev);
+	return 0;
+}
+
+int i810_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	/* i810 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	pci_set_master(dev->pdev);
+
+	return 0;
+}
+
+void i810_driver_lastclose(struct drm_device *dev)
+{
+	i810_dma_cleanup(dev);
+}
+
+void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_i810_private_t *dev_priv = dev->dev_private;
+		if (dev_priv->page_flipping)
+			i810_do_cleanup_pageflip(dev);
+	}
+
+	if (file_priv->master && file_priv->master->lock.hw_lock) {
+		drm_idlelock_take(&file_priv->master->lock);
+		i810_driver_reclaim_buffers(dev, file_priv);
+		drm_idlelock_release(&file_priv->master->lock);
+	} else {
+		/* master disappeared, clean up stuff anyway and hope nothing
+		 * goes wrong */
+		i810_driver_reclaim_buffers(dev, file_priv);
+	}
+
+}
+
+int i810_driver_dma_quiescent(struct drm_device *dev)
+{
+	i810_dma_quiescent(dev);
+	return 0;
+}
+
+struct drm_ioctl_desc i810_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+};
+
+int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i810 is AGP.
+ */
+int i810_driver_device_is_agp(struct drm_device *dev)
+{
+	return 1;
+}
diff --git a/linux-imx/drivers/gpu/drm/i810/i810_drv.c b/linux-imx/drivers/gpu/drm/i810/i810_drv.c
new file mode 100644
index 0000000..2e91fc3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i810/i810_drv.c
@@ -0,0 +1,104 @@
+/* i810_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Jeff Hartmann <jhartmann@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/i810_drm.h>
+#include "i810_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static struct pci_device_id pciidlist[] = {
+	i810_PCI_IDS
+};
+
+static const struct file_operations i810_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_HAVE_DMA,
+	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
+	.load = i810_driver_load,
+	.lastclose = i810_driver_lastclose,
+	.preclose = i810_driver_preclose,
+	.device_is_agp = i810_driver_device_is_agp,
+	.dma_quiescent = i810_driver_dma_quiescent,
+	.ioctls = i810_ioctls,
+	.fops = &i810_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver i810_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init i810_init(void)
+{
+	if (num_possible_cpus() > 1) {
+		pr_err("drm/i810 does not support SMP\n");
+		return -EINVAL;
+	}
+	driver.num_ioctls = i810_max_ioctl;
+	return drm_pci_init(&driver, &i810_pci_driver);
+}
+
+static void __exit i810_exit(void)
+{
+	drm_pci_exit(&driver, &i810_pci_driver);
+}
+
+module_init(i810_init);
+module_exit(i810_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/i810/i810_drv.h b/linux-imx/drivers/gpu/drm/i810/i810_drv.h
new file mode 100644
index 0000000..6e0acad
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i810/i810_drv.h
@@ -0,0 +1,243 @@
+/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _I810_DRV_H_
+#define _I810_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"VA Linux Systems Inc."
+
+#define DRIVER_NAME		"i810"
+#define DRIVER_DESC		"Intel i810"
+#define DRIVER_DATE		"20030605"
+
+/* Interface history
+ *
+ * 1.1   - XFree86 4.1
+ * 1.2   - XvMC interfaces
+ *       - XFree86 4.2
+ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
+ *       - Remove requirement for interrupt (leave stubs again)
+ * 1.3   - Add page flipping.
+ * 1.4   - fix DRM interface
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		4
+#define DRIVER_PATCHLEVEL	0
+
+typedef struct drm_i810_buf_priv {
+	u32 *in_use;
+	int my_use_idx;
+	int currently_mapped;
+	void *virtual;
+	void *kernel_virtual;
+	drm_local_map_t map;
+} drm_i810_buf_priv_t;
+
+typedef struct _drm_i810_ring_buffer {
+	int tail_mask;
+	unsigned long Start;
+	unsigned long End;
+	unsigned long Size;
+	u8 *virtual_start;
+	int head;
+	int tail;
+	int space;
+	drm_local_map_t map;
+} drm_i810_ring_buffer_t;
+
+typedef struct drm_i810_private {
+	struct drm_local_map *sarea_map;
+	struct drm_local_map *mmio_map;
+
+	drm_i810_sarea_t *sarea_priv;
+	drm_i810_ring_buffer_t ring;
+
+	void *hw_status_page;
+	unsigned long counter;
+
+	dma_addr_t dma_status_page;
+
+	struct drm_buf *mmap_buffer;
+
+	u32 front_di1, back_di1, zi1;
+
+	int back_offset;
+	int depth_offset;
+	int overlay_offset;
+	int overlay_physical;
+	int w, h;
+	int pitch;
+	int back_pitch;
+	int depth_pitch;
+
+	int do_boxes;
+	int dma_used;
+
+	int current_page;
+	int page_flipping;
+
+	wait_queue_head_t irq_queue;
+	atomic_t irq_received;
+	atomic_t irq_emitted;
+
+	int front_offset;
+} drm_i810_private_t;
+
+				/* i810_dma.c */
+extern int i810_driver_dma_quiescent(struct drm_device *dev);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+			         struct drm_file *file_priv);
+extern int i810_driver_load(struct drm_device *, unsigned long flags);
+extern void i810_driver_lastclose(struct drm_device *dev);
+extern void i810_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file_priv);
+extern int i810_driver_device_is_agp(struct drm_device *dev);
+
+extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+extern struct drm_ioctl_desc i810_ioctls[];
+extern int i810_max_ioctl;
+
+#define I810_BASE(reg)		((unsigned long) \
+				dev_priv->mmio_map->handle)
+#define I810_ADDR(reg)		(I810_BASE(reg) + reg)
+#define I810_DEREF(reg)		(*(__volatile__ int *)I810_ADDR(reg))
+#define I810_READ(reg)		I810_DEREF(reg)
+#define I810_WRITE(reg, val)	do { I810_DEREF(reg) = val; } while (0)
+#define I810_DEREF16(reg)	(*(__volatile__ u16 *)I810_ADDR(reg))
+#define I810_READ16(reg)	I810_DEREF16(reg)
+#define I810_WRITE16(reg, val)	do { I810_DEREF16(reg) = val; } while (0)
+
+#define I810_VERBOSE 0
+#define RING_LOCALS	unsigned int outring, ringmask; \
+			volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {					\
+	if (I810_VERBOSE)					\
+		DRM_DEBUG("BEGIN_LP_RING(%d)\n", n);		\
+	if (dev_priv->ring.space < n*4)				\
+		i810_wait_ring(dev, n*4);			\
+	dev_priv->ring.space -= n*4;				\
+	outring = dev_priv->ring.tail;				\
+	ringmask = dev_priv->ring.tail_mask;			\
+	virt = dev_priv->ring.virtual_start;			\
+} while (0)
+
+#define ADVANCE_LP_RING() do {					\
+	if (I810_VERBOSE)					\
+		DRM_DEBUG("ADVANCE_LP_RING\n");			\
+	dev_priv->ring.tail = outring;				\
+	I810_WRITE(LP_RING + RING_TAIL, outring);		\
+} while (0)
+
+#define OUT_RING(n) do {					\
+	if (I810_VERBOSE)					\
+		DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
+	*(volatile unsigned int *)(virt + outring) = n;		\
+	outring += 4;						\
+	outring &= ringmask;					\
+} while (0)
+
+#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD			(7<<23)
+#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT   0x00000000
+#define INST_OP_FLUSH        0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+#define I810REG_HWSTAM		0x02098
+#define I810REG_INT_IDENTITY_R	0x020a4
+#define I810REG_INT_MASK_R	0x020a8
+#define I810REG_INT_ENABLE_R	0x020a0
+
+#define LP_RING			0x2030
+#define HP_RING			0x2040
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x000FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0x00FFFFF8
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x000FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x2)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE         ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_Z_BUFFER_INFO     ((0x0<<29)|(0x16<<23))
+#define CMD_OP_DESTBUFFER_INFO   ((0x0<<29)|(0x15<<23))
+#define CMD_OP_FRONTBUFFER_INFO  ((0x0<<29)|(0x14<<23))
+#define CMD_OP_WAIT_FOR_EVENT    ((0x0<<29)|(0x03<<23))
+
+#define BR00_BITBLT_CLIENT   0x40000000
+#define BR00_OP_COLOR_BLT    0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN   0x80000000
+
+#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define WAIT_FOR_VBLANK (1<<3)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/Makefile b/linux-imx/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 0000000..91f3ac6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,53 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i915-y := i915_drv.o i915_dma.o i915_irq.o \
+	  i915_debugfs.o \
+          i915_suspend.o \
+	  i915_gem.o \
+	  i915_gem_context.o \
+	  i915_gem_debug.o \
+	  i915_gem_evict.o \
+	  i915_gem_execbuffer.o \
+	  i915_gem_gtt.o \
+	  i915_gem_stolen.o \
+	  i915_gem_tiling.o \
+	  i915_sysfs.o \
+	  i915_trace_points.o \
+	  i915_ums.o \
+	  intel_display.o \
+	  intel_crt.o \
+	  intel_lvds.o \
+	  intel_bios.o \
+	  intel_ddi.o \
+	  intel_dp.o \
+	  intel_hdmi.o \
+	  intel_sdvo.o \
+	  intel_modes.o \
+	  intel_panel.o \
+	  intel_pm.o \
+	  intel_i2c.o \
+	  intel_fb.o \
+	  intel_tv.o \
+	  intel_dvo.o \
+	  intel_ringbuffer.o \
+	  intel_overlay.o \
+	  intel_sprite.o \
+	  intel_opregion.o \
+	  dvo_ch7xxx.o \
+	  dvo_ch7017.o \
+	  dvo_ivch.o \
+	  dvo_tfp410.o \
+	  dvo_sil164.o \
+	  dvo_ns2501.o \
+	  i915_gem_dmabuf.o
+
+i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+
+i915-$(CONFIG_ACPI)	+= intel_acpi.o
+
+obj-$(CONFIG_DRM_I915)  += i915.o
+
+CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo.h b/linux-imx/drivers/gpu/drm/i915/dvo.h
new file mode 100644
index 0000000..33a62ad
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright © 2006 Eric Anholt
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DVO_H
+#define _INTEL_DVO_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dvo_device {
+	const char *name;
+	int type;
+	/* DVOA/B/C output register */
+	u32 dvo_reg;
+	/* GPIO register used for i2c bus to control this device */
+	u32 gpio;
+	int slave_addr;
+
+	const struct intel_dvo_dev_ops *dev_ops;
+	void *dev_priv;
+	struct i2c_adapter *i2c_bus;
+};
+
+struct intel_dvo_dev_ops {
+	/*
+	 * Initialize the device at startup time.
+	 * Returns NULL if the device does not exist.
+	 */
+	bool (*init)(struct intel_dvo_device *dvo,
+		     struct i2c_adapter *i2cbus);
+
+	/*
+	 * Called to allow the output a chance to create properties after the
+	 * RandR objects have been created.
+	 */
+	void (*create_resources)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Turn on/off output.
+	 *
+	 * Because none of our dvo drivers support an intermediate power levels,
+	 * we don't expose this in the interfac.
+	 */
+	void (*dpms)(struct intel_dvo_device *dvo, bool enable);
+
+	/*
+	 * Callback for testing a video mode for a given output.
+	 *
+	 * This function should only check for cases where a mode can't
+	 * be supported on the output specifically, and not represent
+	 * generic CRTC limitations.
+	 *
+	 * \return MODE_OK if the mode is valid, or another MODE_* otherwise.
+	 */
+	int (*mode_valid)(struct intel_dvo_device *dvo,
+			  struct drm_display_mode *mode);
+
+	/*
+	 * Callback to adjust the mode to be set in the CRTC.
+	 *
+	 * This allows an output to adjust the clock or even the entire set of
+	 * timings, which is used for panels with fixed timings or for
+	 * buses with clock limitations.
+	 */
+	bool (*mode_fixup)(struct intel_dvo_device *dvo,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode);
+
+	/*
+	 * Callback for preparing mode changes on an output
+	 */
+	void (*prepare)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Callback for committing mode changes on an output
+	 */
+	void (*commit)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Callback for setting up a video mode after fixups have been made.
+	 *
+	 * This is only called while the output is disabled.  The dpms callback
+	 * must be all that's necessary for the output, to turn the output on
+	 * after this function is called.
+	 */
+	void (*mode_set)(struct intel_dvo_device *dvo,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode);
+
+	/*
+	 * Probe for a connected output, and return detect_status.
+	 */
+	enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
+
+	/*
+	 * Probe the current hw status, returning true if the connected output
+	 * is active.
+	 */
+	bool (*get_hw_state)(struct intel_dvo_device *dev);
+
+	/**
+	 * Query the device for the modes it provides.
+	 *
+	 * This function may also update MonInfo, mm_width, and mm_height.
+	 *
+	 * \return singly-linked list of modes or NULL if no modes found.
+	 */
+	struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
+
+	/**
+	 * Clean up driver-specific bits of the output
+	 */
+	void (*destroy) (struct intel_dvo_device *dvo);
+
+	/**
+	 * Debugging hook to dump device registers to log file
+	 */
+	void (*dump_regs)(struct intel_dvo_device *dvo);
+};
+
+extern struct intel_dvo_dev_ops sil164_ops;
+extern struct intel_dvo_dev_ops ch7xxx_ops;
+extern struct intel_dvo_dev_ops ivch_ops;
+extern struct intel_dvo_dev_ops tfp410_ops;
+extern struct intel_dvo_dev_ops ch7017_ops;
+extern struct intel_dvo_dev_ops ns2501_ops;
+
+#endif /* _INTEL_DVO_H */
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_ch7017.c b/linux-imx/drivers/gpu/drm/i915/dvo_ch7017.c
new file mode 100644
index 0000000..86b27d1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+#define CH7017_TV_DISPLAY_MODE		0x00
+#define CH7017_FLICKER_FILTER		0x01
+#define CH7017_VIDEO_BANDWIDTH		0x02
+#define CH7017_TEXT_ENHANCEMENT		0x03
+#define CH7017_START_ACTIVE_VIDEO	0x04
+#define CH7017_HORIZONTAL_POSITION	0x05
+#define CH7017_VERTICAL_POSITION	0x06
+#define CH7017_BLACK_LEVEL		0x07
+#define CH7017_CONTRAST_ENHANCEMENT	0x08
+#define CH7017_TV_PLL			0x09
+#define CH7017_TV_PLL_M			0x0a
+#define CH7017_TV_PLL_N			0x0b
+#define CH7017_SUB_CARRIER_0		0x0c
+#define CH7017_CIV_CONTROL		0x10
+#define CH7017_CIV_0			0x11
+#define CH7017_CHROMA_BOOST		0x14
+#define CH7017_CLOCK_MODE		0x1c
+#define CH7017_INPUT_CLOCK		0x1d
+#define CH7017_GPIO_CONTROL		0x1e
+#define CH7017_INPUT_DATA_FORMAT	0x1f
+#define CH7017_CONNECTION_DETECT	0x20
+#define CH7017_DAC_CONTROL		0x21
+#define CH7017_BUFFERED_CLOCK_OUTPUT	0x22
+#define CH7017_DEFEAT_VSYNC		0x47
+#define CH7017_TEST_PATTERN		0x48
+
+#define CH7017_POWER_MANAGEMENT		0x49
+/** Enables the TV output path. */
+#define CH7017_TV_EN			(1 << 0)
+#define CH7017_DAC0_POWER_DOWN		(1 << 1)
+#define CH7017_DAC1_POWER_DOWN		(1 << 2)
+#define CH7017_DAC2_POWER_DOWN		(1 << 3)
+#define CH7017_DAC3_POWER_DOWN		(1 << 4)
+/** Powers down the TV out block, and DAC0-3 */
+#define CH7017_TV_POWER_DOWN_EN		(1 << 5)
+
+#define CH7017_VERSION_ID		0x4a
+
+#define CH7017_DEVICE_ID		0x4b
+#define CH7017_DEVICE_ID_VALUE		0x1b
+#define CH7018_DEVICE_ID_VALUE		0x1a
+#define CH7019_DEVICE_ID_VALUE		0x19
+
+#define CH7017_XCLK_D2_ADJUST		0x53
+#define CH7017_UP_SCALER_COEFF_0	0x55
+#define CH7017_UP_SCALER_COEFF_1	0x56
+#define CH7017_UP_SCALER_COEFF_2	0x57
+#define CH7017_UP_SCALER_COEFF_3	0x58
+#define CH7017_UP_SCALER_COEFF_4	0x59
+#define CH7017_UP_SCALER_VERTICAL_INC_0	0x5a
+#define CH7017_UP_SCALER_VERTICAL_INC_1	0x5b
+#define CH7017_GPIO_INVERT		0x5c
+#define CH7017_UP_SCALER_HORIZONTAL_INC_0	0x5d
+#define CH7017_UP_SCALER_HORIZONTAL_INC_1	0x5e
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT	0x5f
+/**< Low bits of horizontal active pixel input */
+
+#define CH7017_ACTIVE_INPUT_LINE_OUTPUT	0x60
+/** High bits of horizontal active pixel input */
+#define CH7017_LVDS_HAP_INPUT_MASK	(0x7 << 0)
+/** High bits of vertical active line output */
+#define CH7017_LVDS_VAL_HIGH_MASK	(0x7 << 3)
+
+#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT	0x61
+/**< Low bits of vertical active line output */
+
+#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT	0x62
+/**< Low bits of horizontal active pixel output */
+
+#define CH7017_LVDS_POWER_DOWN		0x63
+/** High bits of horizontal active pixel output */
+#define CH7017_LVDS_HAP_HIGH_MASK	(0x7 << 0)
+/** Enables the LVDS power down state transition */
+#define CH7017_LVDS_POWER_DOWN_EN	(1 << 6)
+/** Enables the LVDS upscaler */
+#define CH7017_LVDS_UPSCALER_EN		(1 << 7)
+#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
+
+#define CH7017_LVDS_ENCODING		0x64
+#define CH7017_LVDS_DITHER_2D		(1 << 2)
+#define CH7017_LVDS_DITHER_DIS		(1 << 3)
+#define CH7017_LVDS_DUAL_CHANNEL_EN	(1 << 4)
+#define CH7017_LVDS_24_BIT		(1 << 5)
+
+#define CH7017_LVDS_ENCODING_2		0x65
+
+#define CH7017_LVDS_PLL_CONTROL		0x66
+/** Enables the LVDS panel output path */
+#define CH7017_LVDS_PANEN		(1 << 0)
+/** Enables the LVDS panel backlight */
+#define CH7017_LVDS_BKLEN		(1 << 3)
+
+#define CH7017_POWER_SEQUENCING_T1	0x67
+#define CH7017_POWER_SEQUENCING_T2	0x68
+#define CH7017_POWER_SEQUENCING_T3	0x69
+#define CH7017_POWER_SEQUENCING_T4	0x6a
+#define CH7017_POWER_SEQUENCING_T5	0x6b
+#define CH7017_GPIO_DRIVER_TYPE		0x6c
+#define CH7017_GPIO_DATA		0x6d
+#define CH7017_GPIO_DIRECTION_CONTROL	0x6e
+
+#define CH7017_LVDS_PLL_FEEDBACK_DIV	0x71
+# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
+# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
+# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
+
+#define CH7017_LVDS_PLL_VCO_CONTROL	0x72
+# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
+# define CH7017_LVDS_PLL_VCO_SHIFT	4
+# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
+
+#define CH7017_OUTPUTS_ENABLE		0x73
+# define CH7017_CHARGE_PUMP_LOW		0x0
+# define CH7017_CHARGE_PUMP_HIGH	0x3
+# define CH7017_LVDS_CHANNEL_A		(1 << 3)
+# define CH7017_LVDS_CHANNEL_B		(1 << 4)
+# define CH7017_TV_DAC_A		(1 << 5)
+# define CH7017_TV_DAC_B		(1 << 6)
+# define CH7017_DDC_SELECT_DC2		(1 << 7)
+
+#define CH7017_LVDS_OUTPUT_AMPLITUDE	0x74
+#define CH7017_LVDS_PLL_EMI_REDUCTION	0x75
+#define CH7017_LVDS_POWER_DOWN_FLICKER	0x76
+
+#define CH7017_LVDS_CONTROL_2		0x78
+# define CH7017_LOOP_FILTER_SHIFT	5
+# define CH7017_PHASE_DETECTOR_SHIFT	0
+
+#define CH7017_BANG_LIMIT_CONTROL	0x7f
+
+struct ch7017_priv {
+	uint8_t dummy;
+};
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo);
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
+
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = val,
+		}
+	};
+	return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
+}
+
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
+{
+	uint8_t buf[2] = { addr, val };
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = buf,
+	};
+	return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
+}
+
+/** Probes for a CH7017 on the given bus and slave address. */
+static bool ch7017_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	struct ch7017_priv *priv;
+	const char *str;
+	u8 val;
+
+	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = priv;
+
+	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
+		goto fail;
+
+	switch (val) {
+	case CH7017_DEVICE_ID_VALUE:
+		str = "ch7017";
+		break;
+	case CH7018_DEVICE_ID_VALUE:
+		str = "ch7018";
+		break;
+	case CH7019_DEVICE_ID_VALUE:
+		str = "ch7019";
+		break;
+	default:
+		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+			      "slave %d.\n",
+			      val, adapter->name, dvo->slave_addr);
+		goto fail;
+	}
+
+	DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+		      str, adapter->name, dvo->slave_addr);
+	return true;
+
+fail:
+	kfree(priv);
+	return false;
+}
+
+static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
+{
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	if (mode->clock > 160000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void ch7017_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
+	uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+	uint8_t horizontal_active_pixel_input;
+	uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+	uint8_t active_input_line_output;
+
+	DRM_DEBUG_KMS("Registers before mode setting\n");
+	ch7017_dump_regs(dvo);
+
+	/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+	if (mode->clock < 100000) {
+		outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
+		lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+			(13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+		lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+			(3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
+			(0 << CH7017_PHASE_DETECTOR_SHIFT);
+	} else {
+		outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
+		lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+			(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
+			(3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
+		lvds_pll_feedback_div = 35;
+		lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
+			(0 << CH7017_PHASE_DETECTOR_SHIFT);
+		if (1) { /* XXX: dual channel panel detection.  Assume yes for now. */
+			outputs_enable |= CH7017_LVDS_CHANNEL_B;
+			lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+				(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
+				(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		} else {
+			lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
+				(1 << CH7017_LVDS_PLL_VCO_SHIFT) |
+				(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
+		}
+	}
+
+	horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
+
+	vertical_active_line_output = mode->vdisplay & 0x00ff;
+	horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
+
+	active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
+				   (((mode->vdisplay & 0x0700) >> 8) << 3);
+
+	lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
+			  (mode->hdisplay & 0x0700) >> 8;
+
+	ch7017_dpms(dvo, false);
+	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
+			horizontal_active_pixel_input);
+	ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
+			horizontal_active_pixel_output);
+	ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
+			vertical_active_line_output);
+	ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
+			active_input_line_output);
+	ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
+	ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
+	ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
+	ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
+
+	/* Turn the LVDS back on with new settings. */
+	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+
+	DRM_DEBUG_KMS("Registers after mode setting\n");
+	ch7017_dump_regs(dvo);
+}
+
+/* set the CH7017 power state */
+static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	/* Turn off TV/VGA, and never turn it on since we don't support it. */
+	ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
+			CH7017_DAC0_POWER_DOWN |
+			CH7017_DAC1_POWER_DOWN |
+			CH7017_DAC2_POWER_DOWN |
+			CH7017_DAC3_POWER_DOWN |
+			CH7017_TV_POWER_DOWN_EN);
+
+	if (enable) {
+		/* Turn on the LVDS */
+		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+			     val & ~CH7017_LVDS_POWER_DOWN_EN);
+	} else {
+		/* Turn off the LVDS */
+		ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
+			     val | CH7017_LVDS_POWER_DOWN_EN);
+	}
+
+	/* XXX: Should actually wait for update power status somehow */
+	msleep(20);
+}
+
+static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
+
+	if (val & CH7017_LVDS_POWER_DOWN_EN)
+		return false;
+	else
+		return true;
+}
+
+static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+#define DUMP(reg)					\
+do {							\
+	ch7017_read(dvo, reg, &val);			\
+	DRM_DEBUG_KMS(#reg ": %02x\n", val);		\
+} while (0)
+
+	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
+	DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
+	DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
+	DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
+	DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
+	DUMP(CH7017_LVDS_CONTROL_2);
+	DUMP(CH7017_OUTPUTS_ENABLE);
+	DUMP(CH7017_LVDS_POWER_DOWN);
+}
+
+static void ch7017_destroy(struct intel_dvo_device *dvo)
+{
+	struct ch7017_priv *priv = dvo->dev_priv;
+
+	if (priv) {
+		kfree(priv);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ch7017_ops = {
+	.init = ch7017_init,
+	.detect = ch7017_detect,
+	.mode_valid = ch7017_mode_valid,
+	.mode_set = ch7017_mode_set,
+	.dpms = ch7017_dpms,
+	.get_hw_state = ch7017_get_hw_state,
+	.dump_regs = ch7017_dump_regs,
+	.destroy = ch7017_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_ch7xxx.c b/linux-imx/drivers/gpu/drm/i915/dvo_ch7xxx.c
new file mode 100644
index 0000000..3edd981
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -0,0 +1,344 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define CH7xxx_REG_VID		0x4a
+#define CH7xxx_REG_DID		0x4b
+
+#define CH7011_VID		0x83 /* 7010 as well */
+#define CH7009A_VID		0x84
+#define CH7009B_VID		0x85
+#define CH7301_VID		0x95
+
+#define CH7xxx_VID		0x84
+#define CH7xxx_DID		0x17
+
+#define CH7xxx_NUM_REGS		0x4c
+
+#define CH7xxx_CM		0x1c
+#define CH7xxx_CM_XCM		(1<<0)
+#define CH7xxx_CM_MCP		(1<<2)
+#define CH7xxx_INPUT_CLOCK	0x1d
+#define CH7xxx_GPIO		0x1e
+#define CH7xxx_GPIO_HPIR	(1<<3)
+#define CH7xxx_IDF		0x1f
+
+#define CH7xxx_IDF_HSP		(1<<3)
+#define CH7xxx_IDF_VSP		(1<<4)
+
+#define CH7xxx_CONNECTION_DETECT 0x20
+#define CH7xxx_CDET_DVI		(1<<5)
+
+#define CH7301_DAC_CNTL		0x21
+#define CH7301_HOTPLUG		0x23
+#define CH7xxx_TCTL		0x31
+#define CH7xxx_TVCO		0x32
+#define CH7xxx_TPCP		0x33
+#define CH7xxx_TPD		0x34
+#define CH7xxx_TPVT		0x35
+#define CH7xxx_TLPF		0x36
+#define CH7xxx_TCT		0x37
+#define CH7301_TEST_PATTERN	0x48
+
+#define CH7xxx_PM		0x49
+#define CH7xxx_PM_FPD		(1<<0)
+#define CH7301_PM_DACPD0	(1<<1)
+#define CH7301_PM_DACPD1	(1<<2)
+#define CH7301_PM_DACPD2	(1<<3)
+#define CH7xxx_PM_DVIL		(1<<6)
+#define CH7xxx_PM_DVIP		(1<<7)
+
+#define CH7301_SYNC_POLARITY	0x56
+#define CH7301_SYNC_RGB_YUV	(1<<0)
+#define CH7301_SYNC_POL_DVI	(1<<5)
+
+/** @file
+ * driver for the Chrontel 7xxx DVI chip over DVO.
+ */
+
+static struct ch7xxx_id_struct {
+	uint8_t vid;
+	char *name;
+} ch7xxx_ids[] = {
+	{ CH7011_VID, "CH7011" },
+	{ CH7009A_VID, "CH7009A" },
+	{ CH7009B_VID, "CH7009B" },
+	{ CH7301_VID, "CH7301" },
+};
+
+struct ch7xxx_priv {
+	bool quiet;
+};
+
+static char *ch7xxx_get_id(uint8_t vid)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
+		if (ch7xxx_ids[i].vid == vid)
+			return ch7xxx_ids[i].name;
+	}
+
+	return NULL;
+}
+
+/** Reads an 8 bit register */
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!ch7xxx->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+/** Writes an 8 bit register */
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!ch7xxx->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+static bool ch7xxx_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the CH7xxx chip on the specified i2c bus */
+	struct ch7xxx_priv *ch7xxx;
+	uint8_t vendor, device;
+	char *name;
+
+	ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+	if (ch7xxx == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ch7xxx;
+	ch7xxx->quiet = true;
+
+	if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
+		goto out;
+
+	name = ch7xxx_get_id(vendor);
+	if (!name) {
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
+			  vendor, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+
+	if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
+		goto out;
+
+	if (device != CH7xxx_DID) {
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
+			  vendor, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	ch7xxx->quiet = false;
+	DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+		  name, vendor, device);
+	return true;
+out:
+	kfree(ch7xxx);
+	return false;
+}
+
+static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
+{
+	uint8_t cdet, orig_pm, pm;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
+
+	pm = orig_pm;
+	pm &= ~CH7xxx_PM_FPD;
+	pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
+
+	ch7xxx_writeb(dvo, CH7xxx_PM, pm);
+
+	ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
+
+	ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
+
+	if (cdet & CH7xxx_CDET_DVI)
+		return connector_status_connected;
+	return connector_status_disconnected;
+}
+
+static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	uint8_t tvco, tpcp, tpd, tlpf, idf;
+
+	if (mode->clock <= 65000) {
+		tvco = 0x23;
+		tpcp = 0x08;
+		tpd = 0x16;
+		tlpf = 0x60;
+	} else {
+		tvco = 0x2d;
+		tpcp = 0x06;
+		tpd = 0x26;
+		tlpf = 0xa0;
+	}
+
+	ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
+	ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
+	ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
+	ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
+	ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
+	ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
+	ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
+
+	ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+
+	idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		idf |= CH7xxx_IDF_HSP;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		idf |= CH7xxx_IDF_HSP;
+
+	ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+}
+
+/* set the CH7xxx power state */
+static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	if (enable)
+		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
+	else
+		ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
+}
+
+static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
+{
+	u8 val;
+
+	ch7xxx_readb(dvo, CH7xxx_PM, &val);
+
+	if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
+		return true;
+	else
+		return false;
+}
+
+static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+{
+	int i;
+
+	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+		uint8_t val;
+		if ((i % 8) == 0)
+			DRM_LOG_KMS("\n %02X: ", i);
+		ch7xxx_readb(dvo, i, &val);
+		DRM_LOG_KMS("%02X ", val);
+	}
+}
+
+static void ch7xxx_destroy(struct intel_dvo_device *dvo)
+{
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
+
+	if (ch7xxx) {
+		kfree(ch7xxx);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ch7xxx_ops = {
+	.init = ch7xxx_init,
+	.detect = ch7xxx_detect,
+	.mode_valid = ch7xxx_mode_valid,
+	.mode_set = ch7xxx_mode_set,
+	.dpms = ch7xxx_dpms,
+	.get_hw_state = ch7xxx_get_hw_state,
+	.dump_regs = ch7xxx_dump_regs,
+	.destroy = ch7xxx_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_ivch.c b/linux-imx/drivers/gpu/drm/i915/dvo_ivch.c
new file mode 100644
index 0000000..baaf65b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_ivch.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "dvo.h"
+
+/*
+ * register definitions for the i82807aa.
+ *
+ * Documentation on this chipset can be found in datasheet #29069001 at
+ * intel.com.
+ */
+
+/*
+ * VCH Revision & GMBus Base Addr
+ */
+#define VR00		0x00
+# define VR00_BASE_ADDRESS_MASK		0x007f
+
+/*
+ * Functionality Enable
+ */
+#define VR01		0x01
+
+/*
+ * Enable the panel fitter
+ */
+# define VR01_PANEL_FIT_ENABLE		(1 << 3)
+/*
+ * Enables the LCD display.
+ *
+ * This must not be set while VR01_DVO_BYPASS_ENABLE is set.
+ */
+# define VR01_LCD_ENABLE		(1 << 2)
+/** Enables the DVO repeater. */
+# define VR01_DVO_BYPASS_ENABLE		(1 << 1)
+/** Enables the DVO clock */
+# define VR01_DVO_ENABLE		(1 << 0)
+
+/*
+ * LCD Interface Format
+ */
+#define VR10		0x10
+/** Enables LVDS output instead of CMOS */
+# define VR10_LVDS_ENABLE		(1 << 4)
+/** Enables 18-bit LVDS output. */
+# define VR10_INTERFACE_1X18		(0 << 2)
+/** Enables 24-bit LVDS or CMOS output */
+# define VR10_INTERFACE_1X24		(1 << 2)
+/** Enables 2x18-bit LVDS or CMOS output. */
+# define VR10_INTERFACE_2X18		(2 << 2)
+/** Enables 2x24-bit LVDS output */
+# define VR10_INTERFACE_2X24		(3 << 2)
+
+/*
+ * VR20 LCD Horizontal Display Size
+ */
+#define VR20	0x20
+
+/*
+ * LCD Vertical Display Size
+ */
+#define VR21	0x20
+
+/*
+ * Panel power down status
+ */
+#define VR30		0x30
+/** Read only bit indicating that the panel is not in a safe poweroff state. */
+# define VR30_PANEL_ON			(1 << 15)
+
+#define VR40		0x40
+# define VR40_STALL_ENABLE		(1 << 13)
+# define VR40_VERTICAL_INTERP_ENABLE	(1 << 12)
+# define VR40_ENHANCED_PANEL_FITTING	(1 << 11)
+# define VR40_HORIZONTAL_INTERP_ENABLE	(1 << 10)
+# define VR40_AUTO_RATIO_ENABLE		(1 << 9)
+# define VR40_CLOCK_GATING_ENABLE	(1 << 8)
+
+/*
+ * Panel Fitting Vertical Ratio
+ * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
+ */
+#define VR41		0x41
+
+/*
+ * Panel Fitting Horizontal Ratio
+ * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
+ */
+#define VR42		0x42
+
+/*
+ * Horizontal Image Size
+ */
+#define VR43		0x43
+
+/* VR80 GPIO 0
+ */
+#define VR80	    0x80
+#define VR81	    0x81
+#define VR82	    0x82
+#define VR83	    0x83
+#define VR84	    0x84
+#define VR85	    0x85
+#define VR86	    0x86
+#define VR87	    0x87
+
+/* VR88 GPIO 8
+ */
+#define VR88	    0x88
+
+/* Graphics BIOS scratch 0
+ */
+#define VR8E	    0x8E
+# define VR8E_PANEL_TYPE_MASK		(0xf << 0)
+# define VR8E_PANEL_INTERFACE_CMOS	(0 << 4)
+# define VR8E_PANEL_INTERFACE_LVDS	(1 << 4)
+# define VR8E_FORCE_DEFAULT_PANEL	(1 << 5)
+
+/* Graphics BIOS scratch 1
+ */
+#define VR8F	    0x8F
+# define VR8F_VCH_PRESENT		(1 << 0)
+# define VR8F_DISPLAY_CONN		(1 << 1)
+# define VR8F_POWER_MASK		(0x3c)
+# define VR8F_POWER_POS			(2)
+
+
+struct ivch_priv {
+	bool quiet;
+
+	uint16_t width, height;
+};
+
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo);
+
+/**
+ * Reads a register on the ivch.
+ *
+ * Each of the 256 registers are 16 bits long.
+ */
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[1];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 0,
+		},
+		{
+			.addr = 0,
+			.flags = I2C_M_NOSTART,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD | I2C_M_NOSTART,
+			.len = 2,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+
+	if (i2c_transfer(adapter, msgs, 3) == 3) {
+		*data = (in_buf[1] << 8) | in_buf[0];
+		return true;
+	};
+
+	if (!priv->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+				"%s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+/** Writes a 16-bit register on the ivch */
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[3];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 3,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = data & 0xff;
+	out_buf[2] = data >> 8;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!priv->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/** Probes the given bus and slave address for an ivch */
+static bool ivch_init(struct intel_dvo_device *dvo,
+		      struct i2c_adapter *adapter)
+{
+	struct ivch_priv *priv;
+	uint16_t temp;
+
+	priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+	if (priv == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = priv;
+	priv->quiet = true;
+
+	if (!ivch_read(dvo, VR00, &temp))
+		goto out;
+	priv->quiet = false;
+
+	/* Since the identification bits are probably zeroes, which doesn't seem
+	 * very unique, check that the value in the base address field matches
+	 * the address it's responding on.
+	 */
+	if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+		DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+			  "(%d vs %d)\n",
+			  (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+		goto out;
+	}
+
+	ivch_read(dvo, VR20, &priv->width);
+	ivch_read(dvo, VR21, &priv->height);
+
+	return true;
+
+out:
+	kfree(priv);
+	return false;
+}
+
+static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
+{
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
+					    struct drm_display_mode *mode)
+{
+	if (mode->clock > 112000)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+/** Sets the power state of the panel connected to the ivch */
+static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	int i;
+	uint16_t vr01, vr30, backlight;
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return;
+
+	if (enable)
+		backlight = 1;
+	else
+		backlight = 0;
+	ivch_write(dvo, VR80, backlight);
+
+	if (enable)
+		vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
+	else
+		vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
+
+	ivch_write(dvo, VR01, vr01);
+
+	/* Wait for the panel to make its state transition */
+	for (i = 0; i < 100; i++) {
+		if (!ivch_read(dvo, VR30, &vr30))
+			break;
+
+		if (((vr30 & VR30_PANEL_ON) != 0) == enable)
+			break;
+		udelay(1000);
+	}
+	/* wait some more; vch may fail to resync sometimes without this */
+	udelay(16 * 1000);
+}
+
+static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint16_t vr01;
+
+	/* Set the new power state of the panel. */
+	if (!ivch_read(dvo, VR01, &vr01))
+		return false;
+
+	if (vr01 & VR01_LCD_ENABLE)
+		return true;
+	else
+		return false;
+}
+
+static void ivch_mode_set(struct intel_dvo_device *dvo,
+			  struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode)
+{
+	uint16_t vr40 = 0;
+	uint16_t vr01;
+
+	vr01 = 0;
+	vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
+		VR40_HORIZONTAL_INTERP_ENABLE);
+
+	if (mode->hdisplay != adjusted_mode->hdisplay ||
+	    mode->vdisplay != adjusted_mode->vdisplay) {
+		uint16_t x_ratio, y_ratio;
+
+		vr01 |= VR01_PANEL_FIT_ENABLE;
+		vr40 |= VR40_CLOCK_GATING_ENABLE;
+		x_ratio = (((mode->hdisplay - 1) << 16) /
+			   (adjusted_mode->hdisplay - 1)) >> 2;
+		y_ratio = (((mode->vdisplay - 1) << 16) /
+			   (adjusted_mode->vdisplay - 1)) >> 2;
+		ivch_write(dvo, VR42, x_ratio);
+		ivch_write(dvo, VR41, y_ratio);
+	} else {
+		vr01 &= ~VR01_PANEL_FIT_ENABLE;
+		vr40 &= ~VR40_CLOCK_GATING_ENABLE;
+	}
+	vr40 &= ~VR40_AUTO_RATIO_ENABLE;
+
+	ivch_write(dvo, VR01, vr01);
+	ivch_write(dvo, VR40, vr40);
+
+	ivch_dump_regs(dvo);
+}
+
+static void ivch_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint16_t val;
+
+	ivch_read(dvo, VR00, &val);
+	DRM_LOG_KMS("VR00: 0x%04x\n", val);
+	ivch_read(dvo, VR01, &val);
+	DRM_LOG_KMS("VR01: 0x%04x\n", val);
+	ivch_read(dvo, VR30, &val);
+	DRM_LOG_KMS("VR30: 0x%04x\n", val);
+	ivch_read(dvo, VR40, &val);
+	DRM_LOG_KMS("VR40: 0x%04x\n", val);
+
+	/* GPIO registers */
+	ivch_read(dvo, VR80, &val);
+	DRM_LOG_KMS("VR80: 0x%04x\n", val);
+	ivch_read(dvo, VR81, &val);
+	DRM_LOG_KMS("VR81: 0x%04x\n", val);
+	ivch_read(dvo, VR82, &val);
+	DRM_LOG_KMS("VR82: 0x%04x\n", val);
+	ivch_read(dvo, VR83, &val);
+	DRM_LOG_KMS("VR83: 0x%04x\n", val);
+	ivch_read(dvo, VR84, &val);
+	DRM_LOG_KMS("VR84: 0x%04x\n", val);
+	ivch_read(dvo, VR85, &val);
+	DRM_LOG_KMS("VR85: 0x%04x\n", val);
+	ivch_read(dvo, VR86, &val);
+	DRM_LOG_KMS("VR86: 0x%04x\n", val);
+	ivch_read(dvo, VR87, &val);
+	DRM_LOG_KMS("VR87: 0x%04x\n", val);
+	ivch_read(dvo, VR88, &val);
+	DRM_LOG_KMS("VR88: 0x%04x\n", val);
+
+	/* Scratch register 0 - AIM Panel type */
+	ivch_read(dvo, VR8E, &val);
+	DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+
+	/* Scratch register 1 - Status register */
+	ivch_read(dvo, VR8F, &val);
+	DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+}
+
+static void ivch_destroy(struct intel_dvo_device *dvo)
+{
+	struct ivch_priv *priv = dvo->dev_priv;
+
+	if (priv) {
+		kfree(priv);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ivch_ops = {
+	.init = ivch_init,
+	.dpms = ivch_dpms,
+	.get_hw_state = ivch_get_hw_state,
+	.mode_valid = ivch_mode_valid,
+	.mode_set = ivch_mode_set,
+	.detect = ivch_detect,
+	.dump_regs = ivch_dump_regs,
+	.destroy = ivch_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_ns2501.c b/linux-imx/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 0000000..c4a255b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
+/*
+ *
+ * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "dvo.h"
+#include "i915_reg.h"
+#include "i915_drv.h"
+
+#define NS2501_VID 0x1305
+#define NS2501_DID 0x6726
+
+#define NS2501_VID_LO 0x00
+#define NS2501_VID_HI 0x01
+#define NS2501_DID_LO 0x02
+#define NS2501_DID_HI 0x03
+#define NS2501_REV 0x04
+#define NS2501_RSVD 0x05
+#define NS2501_FREQ_LO 0x06
+#define NS2501_FREQ_HI 0x07
+
+#define NS2501_REG8 0x08
+#define NS2501_8_VEN (1<<5)
+#define NS2501_8_HEN (1<<4)
+#define NS2501_8_DSEL (1<<3)
+#define NS2501_8_BPAS (1<<2)
+#define NS2501_8_RSVD (1<<1)
+#define NS2501_8_PD (1<<0)
+
+#define NS2501_REG9 0x09
+#define NS2501_9_VLOW (1<<7)
+#define NS2501_9_MSEL_MASK (0x7<<4)
+#define NS2501_9_TSEL (1<<3)
+#define NS2501_9_RSEN (1<<2)
+#define NS2501_9_RSVD (1<<1)
+#define NS2501_9_MDI (1<<0)
+
+#define NS2501_REGC 0x0c
+
+struct ns2501_priv {
+	//I2CDevRec d;
+	bool quiet;
+	int reg_8_shadow;
+	int reg_8_set;
+	// Shadow registers for i915
+	int dvoc;
+	int pll_a;
+	int srcdim;
+	int fw_blc;
+};
+
+#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
+
+/*
+ * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
+ * laptops does not react on the i2c bus unless
+ * both the PLL is running and the display is configured in its native
+ * resolution.
+ * This function forces the DVO on, and stores the registers it touches.
+ * Afterwards, registers are restored to regular values.
+ *
+ * This is pretty much a hack, though it works.
+ * Without that, ns2501_readb and ns2501_writeb fail
+ * when switching the resolution.
+ */
+
+static void enable_dvo(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
+
+	ns->dvoc = I915_READ(DVO_C);
+	ns->pll_a = I915_READ(_DPLL_A);
+	ns->srcdim = I915_READ(DVOC_SRCDIM);
+	ns->fw_blc = I915_READ(FW_BLC);
+
+	I915_WRITE(DVOC, 0x10004084);
+	I915_WRITE(_DPLL_A, 0xd0820000);
+	I915_WRITE(DVOC_SRCDIM, 0x400300);	// 1024x768
+	I915_WRITE(FW_BLC, 0x1080304);
+
+	I915_WRITE(DVOC, 0x90004084);
+}
+
+/*
+ * Restore the I915 registers modified by the above
+ * trigger function.
+ */
+static void restore_dvo(struct intel_dvo_device *dvo)
+{
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	I915_WRITE(DVOC, ns->dvoc);
+	I915_WRITE(_DPLL_A, ns->pll_a);
+	I915_WRITE(DVOC_SRCDIM, ns->srcdim);
+	I915_WRITE(FW_BLC, ns->fw_blc);
+}
+
+/*
+** Read a register from the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = 0,
+		 .len = 1,
+		 .buf = out_buf,
+		 },
+		{
+		 .addr = dvo->slave_addr,
+		 .flags = I2C_M_RD,
+		 .len = 1,
+		 .buf = in_buf,
+		 }
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS
+		    ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
+		     adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/*
+** Write a register to the ns2501.
+** Returns true if successful, false otherwise.
+** If it returns false, it might be wise to enable the
+** DVO with the above function.
+*/
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1) {
+		return true;
+	}
+
+	if (!ns->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
+			      addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* National Semiconductor 2501 driver for chip on i2c bus
+ * scan for the chip on the bus.
+ * Hope the VBIOS initialized the PLL correctly so we can
+ * talk to it. If not, it will not be seen and not detected.
+ * Bummer!
+ */
+static bool ns2501_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the NS2501 chip on the specified i2c bus */
+	struct ns2501_priv *ns;
+	unsigned char ch;
+
+	ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+	if (ns == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = ns;
+	ns->quiet = true;
+
+	if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_VID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
+		goto out;
+
+	if (ch != (NS2501_DID & 0xff)) {
+		DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
+			      ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	ns->quiet = false;
+	ns->reg_8_set = 0;
+	ns->reg_8_shadow =
+	    NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
+
+	DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(ns);
+	return false;
+}
+
+static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
+{
+	/*
+	 * This is a Laptop display, it doesn't have hotplugging.
+	 * Even if not, the detection bit of the 2501 is unreliable as
+	 * it only works for some display types.
+	 * It is even more unreliable as the PLL must be active for
+	 * allowing reading from the chiop.
+	 */
+	return connector_status_connected;
+}
+
+static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS
+	    ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Currently, these are all the modes I have data from.
+	 * More might exist. Unclear how to find the native resolution
+	 * of the panel in here so we could always accept it
+	 * by disabling the scaler.
+	 */
+	if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
+	    (mode->hdisplay == 640 && mode->vdisplay == 480) ||
+	    (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+		return MODE_OK;
+	} else {
+		return MODE_ONE_SIZE;	/* Is this a reasonable error? */
+	}
+}
+
+static void ns2501_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+
+	DRM_DEBUG_KMS
+	    ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
+	     __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
+	     mode->vtotal);
+
+	/*
+	 * Where do I find the native resolution for which scaling is not required???
+	 *
+	 * First trigger the DVO on as otherwise the chip does not appear on the i2c
+	 * bus.
+	 */
+	do {
+		ok = true;
+
+		if (mode->hdisplay == 800 && mode->vdisplay == 600) {
+			/* mode 277 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+			DRM_DEBUG_KMS("%s: switching to 800x600\n",
+				      __FUNCTION__);
+
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ok &= ns2501_writeb(dvo, 0x11, 0xc8);	// 0xc7 also works.
+			ok &= ns2501_writeb(dvo, 0x1b, 0x19);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x62);	// VBIOS left 0x64 here, but 0x62 works nicer
+			ok &= ns2501_writeb(dvo, 0x1d, 0x02);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0x27);
+			ok &= ns2501_writeb(dvo, 0x81, 0x03);
+			ok &= ns2501_writeb(dvo, 0x82, 0x41);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x04);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xfe);	/* vertical. VBIOS left 0xff here, but 0xfe works better */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x23);	/* Looks like first and last line of the image. */
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x80);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xc8);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x73);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+			/* mode 274 */
+			DRM_DEBUG_KMS("%s: switching to 640x480\n",
+				      __FUNCTION__);
+			/*
+			 * No, I do not know where this data comes from.
+			 * It is just what the video bios left in the DVO, so
+			 * I'm just copying it here over.
+			 * This also means that I cannot support any other modes
+			 * except the ones supported by the bios.
+			 */
+			ns->reg_8_shadow &= ~NS2501_8_BPAS;
+
+			ok &= ns2501_writeb(dvo, 0x11, 0xa0);
+			ok &= ns2501_writeb(dvo, 0x1b, 0x11);
+			ok &= ns2501_writeb(dvo, 0x1c, 0x54);
+			ok &= ns2501_writeb(dvo, 0x1d, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0x34, 0x03);
+			ok &= ns2501_writeb(dvo, 0x35, 0xff);
+
+			ok &= ns2501_writeb(dvo, 0x80, 0xff);
+			ok &= ns2501_writeb(dvo, 0x81, 0x07);
+			ok &= ns2501_writeb(dvo, 0x82, 0x3d);
+			ok &= ns2501_writeb(dvo, 0x83, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x8d, 0x02);
+			ok &= ns2501_writeb(dvo, 0x8e, 0x10);
+			ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x90, 0xff);	/* vertical */
+			ok &= ns2501_writeb(dvo, 0x91, 0x07);
+			ok &= ns2501_writeb(dvo, 0x94, 0x00);
+			ok &= ns2501_writeb(dvo, 0x95, 0x00);
+
+			ok &= ns2501_writeb(dvo, 0x96, 0x05);
+
+			ok &= ns2501_writeb(dvo, 0x99, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9a, 0x88);
+
+			ok &= ns2501_writeb(dvo, 0x9c, 0x24);
+			ok &= ns2501_writeb(dvo, 0x9d, 0x00);
+			ok &= ns2501_writeb(dvo, 0x9e, 0x25);
+			ok &= ns2501_writeb(dvo, 0x9f, 0x03);
+
+			ok &= ns2501_writeb(dvo, 0xa4, 0x84);
+
+			ok &= ns2501_writeb(dvo, 0xb6, 0x09);
+
+			ok &= ns2501_writeb(dvo, 0xb9, 0xa0);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xba, 0x00);	/* horizontal? */
+
+			ok &= ns2501_writeb(dvo, 0xc0, 0x05);	/* horizontal? */
+			ok &= ns2501_writeb(dvo, 0xc1, 0x90);
+
+			ok &= ns2501_writeb(dvo, 0xc2, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
+
+			ok &= ns2501_writeb(dvo, 0xc4, 0x03);
+			ok &= ns2501_writeb(dvo, 0xc5, 0x16);
+
+			ok &= ns2501_writeb(dvo, 0xc6, 0x00);
+			ok &= ns2501_writeb(dvo, 0xc7, 0x02);
+			ok &= ns2501_writeb(dvo, 0xc8, 0x02);
+
+		} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
+			/* mode 280 */
+			DRM_DEBUG_KMS("%s: switching to 1024x768\n",
+				      __FUNCTION__);
+			/*
+			 * This might or might not work, actually. I'm silently
+			 * assuming here that the native panel resolution is
+			 * 1024x768. If not, then this leaves the scaler disabled
+			 * generating a picture that is likely not the expected.
+			 *
+			 * Problem is that I do not know where to take the panel
+			 * dimensions from.
+			 *
+			 * Enable the bypass, scaling not required.
+			 *
+			 * The scaler registers are irrelevant here....
+			 *
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+			ok &= ns2501_writeb(dvo, 0x37, 0x44);
+		} else {
+			/*
+			 * Data not known. Bummer!
+			 * Hopefully, the code should not go here
+			 * as mode_OK delivered no other modes.
+			 */
+			ns->reg_8_shadow |= NS2501_8_BPAS;
+		}
+		ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
+
+		if (!ok) {
+			if (restore)
+				restore_dvo(dvo);
+			enable_dvo(dvo);
+			restore = true;
+		}
+	} while (!ok);
+	/*
+	 * Restore the old i915 registers before
+	 * forcing the ns2501 on.
+	 */
+	if (restore)
+		restore_dvo(dvo);
+}
+
+/* set the NS2501 power state */
+static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
+{
+	unsigned char ch;
+
+	if (!ns2501_readb(dvo, NS2501_REG8, &ch))
+		return false;
+
+	if (ch & NS2501_8_PD)
+		return true;
+	else
+		return false;
+}
+
+/* set the NS2501 power state */
+static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	bool ok;
+	bool restore = false;
+	struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+	unsigned char ch;
+
+	DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
+		      __FUNCTION__, enable);
+
+	ch = ns->reg_8_shadow;
+
+	if (enable)
+		ch |= NS2501_8_PD;
+	else
+		ch &= ~NS2501_8_PD;
+
+	if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
+		ns->reg_8_set = 1;
+		ns->reg_8_shadow = ch;
+
+		do {
+			ok = true;
+			ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
+			ok &=
+			    ns2501_writeb(dvo, 0x34,
+					  enable ? 0x03 : 0x00);
+			ok &=
+			    ns2501_writeb(dvo, 0x35,
+					  enable ? 0xff : 0x00);
+			if (!ok) {
+				if (restore)
+					restore_dvo(dvo);
+				enable_dvo(dvo);
+				restore = true;
+			}
+		} while (!ok);
+
+		if (restore)
+			restore_dvo(dvo);
+	}
+}
+
+static void ns2501_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	ns2501_readb(dvo, NS2501_FREQ_LO, &val);
+	DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_FREQ_HI, &val);
+	DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG8, &val);
+	DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REG9, &val);
+	DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
+	ns2501_readb(dvo, NS2501_REGC, &val);
+	DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
+}
+
+static void ns2501_destroy(struct intel_dvo_device *dvo)
+{
+	struct ns2501_priv *ns = dvo->dev_priv;
+
+	if (ns) {
+		kfree(ns);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops ns2501_ops = {
+	.init = ns2501_init,
+	.detect = ns2501_detect,
+	.mode_valid = ns2501_mode_valid,
+	.mode_set = ns2501_mode_set,
+	.dpms = ns2501_dpms,
+	.get_hw_state = ns2501_get_hw_state,
+	.dump_regs = ns2501_dump_regs,
+	.destroy = ns2501_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_sil164.c b/linux-imx/drivers/gpu/drm/i915/dvo_sil164.c
new file mode 100644
index 0000000..4debd32
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_sil164.c
@@ -0,0 +1,279 @@
+/**************************************************************************
+
+Copyright © 2006 Dave Airlie
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#include "dvo.h"
+
+#define SIL164_VID 0x0001
+#define SIL164_DID 0x0006
+
+#define SIL164_VID_LO 0x00
+#define SIL164_VID_HI 0x01
+#define SIL164_DID_LO 0x02
+#define SIL164_DID_HI 0x03
+#define SIL164_REV    0x04
+#define SIL164_RSVD   0x05
+#define SIL164_FREQ_LO 0x06
+#define SIL164_FREQ_HI 0x07
+
+#define SIL164_REG8 0x08
+#define SIL164_8_VEN (1<<5)
+#define SIL164_8_HEN (1<<4)
+#define SIL164_8_DSEL (1<<3)
+#define SIL164_8_BSEL (1<<2)
+#define SIL164_8_EDGE (1<<1)
+#define SIL164_8_PD   (1<<0)
+
+#define SIL164_REG9 0x09
+#define SIL164_9_VLOW (1<<7)
+#define SIL164_9_MSEL_MASK (0x7<<4)
+#define SIL164_9_TSEL (1<<3)
+#define SIL164_9_RSEN (1<<2)
+#define SIL164_9_HTPLG (1<<1)
+#define SIL164_9_MDI (1<<0)
+
+#define SIL164_REGC 0x0c
+
+struct sil164_priv {
+	//I2CDevRec d;
+	bool quiet;
+};
+
+#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
+
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!sil->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!sil->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+/* Silicon Image 164 driver for chip on i2c bus */
+static bool sil164_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the SIL164 chip on the specified i2c bus */
+	struct sil164_priv *sil;
+	unsigned char ch;
+
+	sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+	if (sil == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = sil;
+	sil->quiet = true;
+
+	if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
+		goto out;
+
+	if (ch != (SIL164_VID & 0xff)) {
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+			  ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
+		goto out;
+
+	if (ch != (SIL164_DID & 0xff)) {
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+			  ch, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	sil->quiet = false;
+
+	DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+	return true;
+
+out:
+	kfree(sil);
+	return false;
+}
+
+static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
+{
+	uint8_t reg9;
+
+	sil164_readb(dvo, SIL164_REG9, &reg9);
+
+	if (reg9 & SIL164_9_HTPLG)
+		return connector_status_connected;
+	else
+		return connector_status_disconnected;
+}
+
+static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void sil164_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	/* As long as the basics are set up, since we don't have clock
+	 * dependencies in the mode setup, we can just leave the
+	 * registers alone and everything will work fine.
+	 */
+	/* recommended programming sequence from doc */
+	/*sil164_writeb(sil, 0x08, 0x30);
+	  sil164_writeb(sil, 0x09, 0x00);
+	  sil164_writeb(sil, 0x0a, 0x90);
+	  sil164_writeb(sil, 0x0c, 0x89);
+	  sil164_writeb(sil, 0x08, 0x31);*/
+	/* don't do much */
+	return;
+}
+
+/* set the SIL164 power state */
+static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return;
+
+	if (enable)
+		ch |= SIL164_8_PD;
+	else
+		ch &= ~SIL164_8_PD;
+
+	sil164_writeb(dvo, SIL164_REG8, ch);
+	return;
+}
+
+static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
+{
+	int ret;
+	unsigned char ch;
+
+	ret = sil164_readb(dvo, SIL164_REG8, &ch);
+	if (ret == false)
+		return false;
+
+	if (ch & SIL164_8_PD)
+		return true;
+	else
+		return false;
+}
+
+static void sil164_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val;
+
+	sil164_readb(dvo, SIL164_FREQ_LO, &val);
+	DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_FREQ_HI, &val);
+	DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REG8, &val);
+	DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REG9, &val);
+	DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+	sil164_readb(dvo, SIL164_REGC, &val);
+	DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+}
+
+static void sil164_destroy(struct intel_dvo_device *dvo)
+{
+	struct sil164_priv *sil = dvo->dev_priv;
+
+	if (sil) {
+		kfree(sil);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops sil164_ops = {
+	.init = sil164_init,
+	.detect = sil164_detect,
+	.mode_valid = sil164_mode_valid,
+	.mode_set = sil164_mode_set,
+	.dpms = sil164_dpms,
+	.get_hw_state = sil164_get_hw_state,
+	.dump_regs = sil164_dump_regs,
+	.destroy = sil164_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/dvo_tfp410.c b/linux-imx/drivers/gpu/drm/i915/dvo_tfp410.c
new file mode 100644
index 0000000..e17f1b0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright © 2007 Dave Mueller
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Mueller <dave.mueller@gmx.ch>
+ *
+ */
+
+#include "dvo.h"
+
+/* register definitions according to the TFP410 data sheet */
+#define TFP410_VID		0x014C
+#define TFP410_DID		0x0410
+
+#define TFP410_VID_LO		0x00
+#define TFP410_VID_HI		0x01
+#define TFP410_DID_LO		0x02
+#define TFP410_DID_HI		0x03
+#define TFP410_REV		0x04
+
+#define TFP410_CTL_1		0x08
+#define TFP410_CTL_1_TDIS	(1<<6)
+#define TFP410_CTL_1_VEN	(1<<5)
+#define TFP410_CTL_1_HEN	(1<<4)
+#define TFP410_CTL_1_DSEL	(1<<3)
+#define TFP410_CTL_1_BSEL	(1<<2)
+#define TFP410_CTL_1_EDGE	(1<<1)
+#define TFP410_CTL_1_PD		(1<<0)
+
+#define TFP410_CTL_2		0x09
+#define TFP410_CTL_2_VLOW	(1<<7)
+#define TFP410_CTL_2_MSEL_MASK	(0x7<<4)
+#define TFP410_CTL_2_MSEL	(1<<4)
+#define TFP410_CTL_2_TSEL	(1<<3)
+#define TFP410_CTL_2_RSEN	(1<<2)
+#define TFP410_CTL_2_HTPLG	(1<<1)
+#define TFP410_CTL_2_MDI	(1<<0)
+
+#define TFP410_CTL_3		0x0A
+#define TFP410_CTL_3_DK_MASK	(0x7<<5)
+#define TFP410_CTL_3_DK		(1<<5)
+#define TFP410_CTL_3_DKEN	(1<<4)
+#define TFP410_CTL_3_CTL_MASK	(0x7<<1)
+#define TFP410_CTL_3_CTL	(1<<1)
+
+#define TFP410_USERCFG		0x0B
+
+#define TFP410_DE_DLY		0x32
+
+#define TFP410_DE_CTL		0x33
+#define TFP410_DE_CTL_DEGEN	(1<<6)
+#define TFP410_DE_CTL_VSPOL	(1<<5)
+#define TFP410_DE_CTL_HSPOL	(1<<4)
+#define TFP410_DE_CTL_DEDLY8	(1<<0)
+
+#define TFP410_DE_TOP		0x34
+
+#define TFP410_DE_CNT_LO	0x36
+#define TFP410_DE_CNT_HI	0x37
+
+#define TFP410_DE_LIN_LO	0x38
+#define TFP410_DE_LIN_HI	0x39
+
+#define TFP410_H_RES_LO		0x3A
+#define TFP410_H_RES_HI		0x3B
+
+#define TFP410_V_RES_LO		0x3C
+#define TFP410_V_RES_HI		0x3D
+
+struct tfp410_priv {
+	bool quiet;
+};
+
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	u8 out_buf[2];
+	u8 in_buf[2];
+
+	struct i2c_msg msgs[] = {
+		{
+			.addr = dvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = dvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
+		*ch = in_buf[0];
+		return true;
+	};
+
+	if (!tfp->quiet) {
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+	return false;
+}
+
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+	struct i2c_adapter *adapter = dvo->i2c_bus;
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = dvo->slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = ch;
+
+	if (i2c_transfer(adapter, &msg, 1) == 1)
+		return true;
+
+	if (!tfp->quiet) {
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+			  addr, adapter->name, dvo->slave_addr);
+	}
+
+	return false;
+}
+
+static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
+{
+	uint8_t ch1, ch2;
+
+	if (tfp410_readb(dvo, addr+0, &ch1) &&
+	    tfp410_readb(dvo, addr+1, &ch2))
+		return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
+
+	return -1;
+}
+
+/* Ti TFP410 driver for chip on i2c bus */
+static bool tfp410_init(struct intel_dvo_device *dvo,
+			struct i2c_adapter *adapter)
+{
+	/* this will detect the tfp410 chip on the specified i2c bus */
+	struct tfp410_priv *tfp;
+	int id;
+
+	tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+	if (tfp == NULL)
+		return false;
+
+	dvo->i2c_bus = adapter;
+	dvo->dev_priv = tfp;
+	tfp->quiet = true;
+
+	if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+		DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+				"Slave %d.\n",
+			  id, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+
+	if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+		DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+				"Slave %d.\n",
+			  id, adapter->name, dvo->slave_addr);
+		goto out;
+	}
+	tfp->quiet = false;
+	return true;
+out:
+	kfree(tfp);
+	return false;
+}
+
+static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
+{
+	enum drm_connector_status ret = connector_status_disconnected;
+	uint8_t ctl2;
+
+	if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
+		if (ctl2 & TFP410_CTL_2_RSEN)
+			ret = connector_status_connected;
+		else
+			ret = connector_status_disconnected;
+	}
+
+	return ret;
+}
+
+static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
+					      struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static void tfp410_mode_set(struct intel_dvo_device *dvo,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	/* As long as the basics are set up, since we don't have clock dependencies
+	* in the mode setup, we can just leave the registers alone and everything
+	* will work fine.
+	*/
+	/* don't do much */
+	return;
+}
+
+/* set the tfp410 power state */
+static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return;
+
+	if (enable)
+		ctl1 |= TFP410_CTL_1_PD;
+	else
+		ctl1 &= ~TFP410_CTL_1_PD;
+
+	tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
+}
+
+static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
+{
+	uint8_t ctl1;
+
+	if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
+		return false;
+
+	if (ctl1 & TFP410_CTL_1_PD)
+		return true;
+	else
+		return false;
+}
+
+static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+{
+	uint8_t val, val2;
+
+	tfp410_readb(dvo, TFP410_REV, &val);
+	DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_1, &val);
+	DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_2, &val);
+	DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_CTL_3, &val);
+	DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_USERCFG, &val);
+	DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_DLY, &val);
+	DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_CTL, &val);
+	DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_TOP, &val);
+	DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+	tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+	tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+	DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+	tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+	DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+	tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+	DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+	tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+	tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+	DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+}
+
+static void tfp410_destroy(struct intel_dvo_device *dvo)
+{
+	struct tfp410_priv *tfp = dvo->dev_priv;
+
+	if (tfp) {
+		kfree(tfp);
+		dvo->dev_priv = NULL;
+	}
+}
+
+struct intel_dvo_dev_ops tfp410_ops = {
+	.init = tfp410_init,
+	.detect = tfp410_detect,
+	.mode_valid = tfp410_mode_valid,
+	.mode_set = tfp410_mode_set,
+	.dpms = tfp410_dpms,
+	.get_hw_state = tfp410_get_hw_state,
+	.dump_regs = tfp410_dump_regs,
+	.destroy = tfp410_destroy,
+};
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_debugfs.c b/linux-imx/drivers/gpu/drm/i915/i915_debugfs.c
new file mode 100644
index 0000000..e913d32
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_debugfs.c
@@ -0,0 +1,2165 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <generated/utsrelease.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "intel_ringbuffer.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define DRM_I915_RING_DEBUG 1
+
+
+#if defined(CONFIG_DEBUG_FS)
+
+enum {
+	ACTIVE_LIST,
+	INACTIVE_LIST,
+	PINNED_LIST,
+};
+
+static const char *yesno(int v)
+{
+	return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	const struct intel_device_info *info = INTEL_INFO(dev);
+
+	seq_printf(m, "gen: %d\n", info->gen);
+	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
+#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+#define DEV_INFO_SEP ;
+	DEV_INFO_FLAGS;
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+
+	return 0;
+}
+
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+{
+	if (obj->user_pin_count > 0)
+		return "P";
+	else if (obj->pin_count > 0)
+		return "p";
+	else
+		return " ";
+}
+
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+{
+	switch (obj->tiling_mode) {
+	default:
+	case I915_TILING_NONE: return " ";
+	case I915_TILING_X: return "X";
+	case I915_TILING_Y: return "Y";
+	}
+}
+
+static const char *cache_level_str(int type)
+{
+	switch (type) {
+	case I915_CACHE_NONE: return " uncached";
+	case I915_CACHE_LLC: return " snooped (LLC)";
+	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+	default: return "";
+	}
+}
+
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+	seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+		   &obj->base,
+		   get_pin_flag(obj),
+		   get_tiling_flag(obj),
+		   obj->base.size / 1024,
+		   obj->base.read_domains,
+		   obj->base.write_domain,
+		   obj->last_read_seqno,
+		   obj->last_write_seqno,
+		   obj->last_fenced_seqno,
+		   cache_level_str(obj->cache_level),
+		   obj->dirty ? " dirty" : "",
+		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+	if (obj->base.name)
+		seq_printf(m, " (name: %d)", obj->base.name);
+	if (obj->pin_count)
+		seq_printf(m, " (pinned x %d)", obj->pin_count);
+	if (obj->fence_reg != I915_FENCE_REG_NONE)
+		seq_printf(m, " (fence: %d)", obj->fence_reg);
+	if (obj->gtt_space != NULL)
+		seq_printf(m, " (gtt offset: %08x, size: %08x)",
+			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+	if (obj->stolen)
+		seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
+	if (obj->pin_mappable || obj->fault_mappable) {
+		char s[3], *t = s;
+		if (obj->pin_mappable)
+			*t++ = 'p';
+		if (obj->fault_mappable)
+			*t++ = 'f';
+		*t = '\0';
+		seq_printf(m, " (%s mappable)", s);
+	}
+	if (obj->ring != NULL)
+		seq_printf(m, " (%s)", obj->ring->name);
+}
+
+static int i915_gem_object_list_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
+	struct list_head *head;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	switch (list) {
+	case ACTIVE_LIST:
+		seq_printf(m, "Active:\n");
+		head = &dev_priv->mm.active_list;
+		break;
+	case INACTIVE_LIST:
+		seq_printf(m, "Inactive:\n");
+		head = &dev_priv->mm.inactive_list;
+		break;
+	default:
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, head, mm_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, obj);
+		seq_printf(m, "\n");
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
+		count++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+	return 0;
+}
+
+#define count_objects(list, member) do { \
+	list_for_each_entry(obj, list, member) { \
+		size += obj->gtt_space->size; \
+		++count; \
+		if (obj->map_and_fenceable) { \
+			mappable_size += obj->gtt_space->size; \
+			++mappable_count; \
+		} \
+	} \
+} while (0)
+
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 count, mappable_count, purgeable_count;
+	size_t size, mappable_size, purgeable_size;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "%u objects, %zu bytes\n",
+		   dev_priv->mm.object_count,
+		   dev_priv->mm.object_memory);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.bound_list, gtt_list);
+	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.active_list, mm_list);
+	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.inactive_list, mm_list);
+	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = purgeable_size = purgeable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
+		size += obj->base.size, ++count;
+		if (obj->madv == I915_MADV_DONTNEED)
+			purgeable_size += obj->base.size, ++purgeable_count;
+	}
+	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
+
+	size = count = mappable_size = mappable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		if (obj->fault_mappable) {
+			size += obj->gtt_space->size;
+			++count;
+		}
+		if (obj->pin_mappable) {
+			mappable_size += obj->gtt_space->size;
+			++mappable_count;
+		}
+		if (obj->madv == I915_MADV_DONTNEED) {
+			purgeable_size += obj->base.size;
+			++purgeable_count;
+		}
+	}
+	seq_printf(m, "%u purgeable objects, %zu bytes\n",
+		   purgeable_count, purgeable_size);
+	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+		   mappable_count, mappable_size);
+	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+		   count, size);
+
+	seq_printf(m, "%zu [%lu] gtt total\n",
+		   dev_priv->gtt.total,
+		   dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_gtt_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	uintptr_t list = (uintptr_t) node->info_ent->data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		if (list == PINNED_LIST && obj->pin_count == 0)
+			continue;
+
+		seq_printf(m, "   ");
+		describe_obj(m, obj);
+		seq_printf(m, "\n");
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
+		count++;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+
+	return 0;
+}
+
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	unsigned long flags;
+	struct intel_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		const char pipe = pipe_name(crtc->pipe);
+		const char plane = plane_name(crtc->plane);
+		struct intel_unpin_work *work;
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		work = crtc->unpin_work;
+		if (work == NULL) {
+			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+				   pipe, plane);
+		} else {
+			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+					   pipe, plane);
+			} else {
+				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
+					   pipe, plane);
+			}
+			if (work->enable_stall_check)
+				seq_printf(m, "Stall check enabled, ");
+			else
+				seq_printf(m, "Stall check waiting for page flip ioctl, ");
+			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
+
+			if (work->old_fb_obj) {
+				struct drm_i915_gem_object *obj = work->old_fb_obj;
+				if (obj)
+					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+			}
+			if (work->pending_flip_obj) {
+				struct drm_i915_gem_object *obj = work->pending_flip_obj;
+				if (obj)
+					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+			}
+		}
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static int i915_gem_request_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	struct drm_i915_gem_request *gem_request;
+	int ret, count, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	count = 0;
+	for_each_ring(ring, dev_priv, i) {
+		if (list_empty(&ring->request_list))
+			continue;
+
+		seq_printf(m, "%s requests:\n", ring->name);
+		list_for_each_entry(gem_request,
+				    &ring->request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	if (count == 0)
+		seq_printf(m, "No requests\n");
+
+	return 0;
+}
+
+static void i915_ring_seqno_info(struct seq_file *m,
+				 struct intel_ring_buffer *ring)
+{
+	if (ring->get_seqno) {
+		seq_printf(m, "Current sequence (%s): %u\n",
+			   ring->name, ring->get_seqno(ring, false));
+	}
+}
+
+static int i915_gem_seqno_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_seqno_info(m, ring);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+
+static int i915_interrupt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i, pipe;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (IS_VALLEYVIEW(dev)) {
+		seq_printf(m, "Display IER:\t%08x\n",
+			   I915_READ(VLV_IER));
+		seq_printf(m, "Display IIR:\t%08x\n",
+			   I915_READ(VLV_IIR));
+		seq_printf(m, "Display IIR_RW:\t%08x\n",
+			   I915_READ(VLV_IIR_RW));
+		seq_printf(m, "Display IMR:\t%08x\n",
+			   I915_READ(VLV_IMR));
+		for_each_pipe(pipe)
+			seq_printf(m, "Pipe %c stat:\t%08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+
+		seq_printf(m, "Master IER:\t%08x\n",
+			   I915_READ(VLV_MASTER_IER));
+
+		seq_printf(m, "Render IER:\t%08x\n",
+			   I915_READ(GTIER));
+		seq_printf(m, "Render IIR:\t%08x\n",
+			   I915_READ(GTIIR));
+		seq_printf(m, "Render IMR:\t%08x\n",
+			   I915_READ(GTIMR));
+
+		seq_printf(m, "PM IER:\t\t%08x\n",
+			   I915_READ(GEN6_PMIER));
+		seq_printf(m, "PM IIR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIIR));
+		seq_printf(m, "PM IMR:\t\t%08x\n",
+			   I915_READ(GEN6_PMIMR));
+
+		seq_printf(m, "Port hotplug:\t%08x\n",
+			   I915_READ(PORT_HOTPLUG_EN));
+		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
+			   I915_READ(VLV_DPFLIPSTAT));
+		seq_printf(m, "DPINVGTT:\t%08x\n",
+			   I915_READ(DPINVGTT));
+
+	} else if (!HAS_PCH_SPLIT(dev)) {
+		seq_printf(m, "Interrupt enable:    %08x\n",
+			   I915_READ(IER));
+		seq_printf(m, "Interrupt identity:  %08x\n",
+			   I915_READ(IIR));
+		seq_printf(m, "Interrupt mask:      %08x\n",
+			   I915_READ(IMR));
+		for_each_pipe(pipe)
+			seq_printf(m, "Pipe %c stat:         %08x\n",
+				   pipe_name(pipe),
+				   I915_READ(PIPESTAT(pipe)));
+	} else {
+		seq_printf(m, "North Display Interrupt enable:		%08x\n",
+			   I915_READ(DEIER));
+		seq_printf(m, "North Display Interrupt identity:	%08x\n",
+			   I915_READ(DEIIR));
+		seq_printf(m, "North Display Interrupt mask:		%08x\n",
+			   I915_READ(DEIMR));
+		seq_printf(m, "South Display Interrupt enable:		%08x\n",
+			   I915_READ(SDEIER));
+		seq_printf(m, "South Display Interrupt identity:	%08x\n",
+			   I915_READ(SDEIIR));
+		seq_printf(m, "South Display Interrupt mask:		%08x\n",
+			   I915_READ(SDEIMR));
+		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
+			   I915_READ(GTIER));
+		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
+			   I915_READ(GTIIR));
+		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
+			   I915_READ(GTIMR));
+	}
+	seq_printf(m, "Interrupts received: %d\n",
+		   atomic_read(&dev_priv->irq_received));
+	for_each_ring(ring, dev_priv, i) {
+		if (IS_GEN6(dev) || IS_GEN7(dev)) {
+			seq_printf(m,
+				   "Graphics Interrupt mask (%s):	%08x\n",
+				   ring->name, I915_READ_IMR(ring));
+		}
+		i915_ring_seqno_info(m, ring);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
+	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
+
+		seq_printf(m, "Fence %d, pin count = %d, object = ",
+			   i, dev_priv->fence_regs[i].pin_count);
+		if (obj == NULL)
+			seq_printf(m, "unused");
+		else
+			describe_obj(m, obj);
+		seq_printf(m, "\n");
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+static int i915_hws_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	const u32 *hws;
+	int i;
+
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	hws = ring->status_page.page_addr;
+	if (hws == NULL)
+		return 0;
+
+	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
+		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			   i * 4,
+			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
+	}
+	return 0;
+}
+
+static const char *ring_str(int ring)
+{
+	switch (ring) {
+	case RCS: return "render";
+	case VCS: return "bsd";
+	case BCS: return "blt";
+	default: return "";
+	}
+}
+
+static const char *pin_flag(int pinned)
+{
+	if (pinned > 0)
+		return " P";
+	else if (pinned < 0)
+		return " p";
+	else
+		return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+	switch (tiling) {
+	default:
+	case I915_TILING_NONE: return "";
+	case I915_TILING_X: return " X";
+	case I915_TILING_Y: return " Y";
+	}
+}
+
+static const char *dirty_flag(int dirty)
+{
+	return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+	return purgeable ? " purgeable" : "";
+}
+
+static void print_error_buffers(struct seq_file *m,
+				const char *name,
+				struct drm_i915_error_buffer *err,
+				int count)
+{
+	seq_printf(m, "%s [%d]:\n", name, count);
+
+	while (count--) {
+		seq_printf(m, "  %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
+			   err->gtt_offset,
+			   err->size,
+			   err->read_domains,
+			   err->write_domain,
+			   err->rseqno, err->wseqno,
+			   pin_flag(err->pinned),
+			   tiling_flag(err->tiling),
+			   dirty_flag(err->dirty),
+			   purgeable_flag(err->purgeable),
+			   err->ring != -1 ? " " : "",
+			   ring_str(err->ring),
+			   cache_level_str(err->cache_level));
+
+		if (err->name)
+			seq_printf(m, " (name: %d)", err->name);
+		if (err->fence_reg != I915_FENCE_REG_NONE)
+			seq_printf(m, " (fence: %d)", err->fence_reg);
+
+		seq_printf(m, "\n");
+		err++;
+	}
+}
+
+static void i915_ring_error_state(struct seq_file *m,
+				  struct drm_device *dev,
+				  struct drm_i915_error_state *error,
+				  unsigned ring)
+{
+	BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+	seq_printf(m, "%s command stream:\n", ring_str(ring));
+	seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
+	seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+	seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
+	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
+	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
+	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
+	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+	seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+	if (INTEL_INFO(dev)->gen >= 6) {
+		seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+		seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+			   error->semaphore_mboxes[ring][0],
+			   error->semaphore_seqno[ring][0]);
+		seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+			   error->semaphore_mboxes[ring][1],
+			   error->semaphore_seqno[ring][1]);
+	}
+	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+	seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
+	seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+	seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+struct i915_error_state_file_priv {
+	struct drm_device *dev;
+	struct drm_i915_error_state *error;
+};
+
+static int i915_error_state(struct seq_file *m, void *unused)
+{
+	struct i915_error_state_file_priv *error_priv = m->private;
+	struct drm_device *dev = error_priv->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error = error_priv->error;
+	struct intel_ring_buffer *ring;
+	int i, j, page, offset, elt;
+
+	if (!error) {
+		seq_printf(m, "no error state collected\n");
+		return 0;
+	}
+
+	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+		   error->time.tv_usec);
+	seq_printf(m, "Kernel: " UTS_RELEASE "\n");
+	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+	seq_printf(m, "EIR: 0x%08x\n", error->eir);
+	seq_printf(m, "IER: 0x%08x\n", error->ier);
+	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+	seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+	seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+	seq_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++)
+		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
+	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		seq_printf(m, "ERROR: 0x%08x\n", error->error);
+		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+	}
+
+	if (INTEL_INFO(dev)->gen == 7)
+		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+	for_each_ring(ring, dev_priv, i)
+		i915_ring_error_state(m, dev, error, i);
+
+	if (error->active_bo)
+		print_error_buffers(m, "Active",
+				    error->active_bo,
+				    error->active_bo_count);
+
+	if (error->pinned_bo)
+		print_error_buffers(m, "Pinned",
+				    error->pinned_bo,
+				    error->pinned_bo_count);
+
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		struct drm_i915_error_object *obj;
+
+		if ((obj = error->ring[i].batchbuffer)) {
+			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
+			offset = 0;
+			for (page = 0; page < obj->page_count; page++) {
+				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
+					offset += 4;
+				}
+			}
+		}
+
+		if (error->ring[i].num_requests) {
+			seq_printf(m, "%s --- %d requests\n",
+				   dev_priv->ring[i].name,
+				   error->ring[i].num_requests);
+			for (j = 0; j < error->ring[i].num_requests; j++) {
+				seq_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+					   error->ring[i].requests[j].seqno,
+					   error->ring[i].requests[j].jiffies,
+					   error->ring[i].requests[j].tail);
+			}
+		}
+
+		if ((obj = error->ring[i].ringbuffer)) {
+			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
+			offset = 0;
+			for (page = 0; page < obj->page_count; page++) {
+				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+					seq_printf(m, "%08x :  %08x\n",
+						   offset,
+						   obj->pages[page][elt]);
+					offset += 4;
+				}
+			}
+		}
+
+		obj = error->ring[i].ctx;
+		if (obj) {
+			seq_printf(m, "%s --- HW Context = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
+			offset = 0;
+			for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+				seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
+					   offset,
+					   obj->pages[0][elt],
+					   obj->pages[0][elt+1],
+					   obj->pages[0][elt+2],
+					   obj->pages[0][elt+3]);
+					offset += 16;
+			}
+		}
+	}
+
+	if (error->overlay)
+		intel_overlay_print_error_state(m, error->overlay);
+
+	if (error->display)
+		intel_display_print_error_state(m, dev, error->display);
+
+	return 0;
+}
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+		       const char __user *ubuf,
+		       size_t cnt,
+		       loff_t *ppos)
+{
+	struct seq_file *m = filp->private_data;
+	struct i915_error_state_file_priv *error_priv = m->private;
+	struct drm_device *dev = error_priv->dev;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Resetting error state\n");
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	i915_destroy_error_state(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct i915_error_state_file_priv *error_priv;
+	unsigned long flags;
+
+	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
+	if (!error_priv)
+		return -ENOMEM;
+
+	error_priv->dev = dev;
+
+	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+	error_priv->error = dev_priv->gpu_error.first_error;
+	if (error_priv->error)
+		kref_get(&error_priv->error->ref);
+	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+	return single_open(file, i915_error_state, error_priv);
+}
+
+static int i915_error_state_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *m = file->private_data;
+	struct i915_error_state_file_priv *error_priv = m->private;
+
+	if (error_priv->error)
+		kref_put(&error_priv->error->ref, i915_error_state_free);
+	kfree(error_priv);
+
+	return single_release(inode, file);
+}
+
+static const struct file_operations i915_error_state_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_error_state_open,
+	.read = seq_read,
+	.write = i915_error_state_write,
+	.llseek = default_llseek,
+	.release = i915_error_state_release,
+};
+
+static int
+i915_next_seqno_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	*val = dev_priv->next_seqno;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_next_seqno_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_set_seqno(dev, val);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
+			i915_next_seqno_get, i915_next_seqno_set,
+			"0x%llx\n");
+
+static int i915_rstdby_delays(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u16 crstanddelay;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	crstanddelay = I915_READ16(CRSTANDVID);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+	return 0;
+}
+
+static int i915_cur_delayinfo(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (IS_GEN5(dev)) {
+		u16 rgvswctl = I915_READ16(MEMSWCTL);
+		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+			   MEMSTAT_VID_SHIFT);
+		seq_printf(m, "Current P-state: %d\n",
+			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		u32 rpstat, cagf;
+		u32 rpupei, rpcurup, rpprevup;
+		u32 rpdownei, rpcurdown, rpprevdown;
+		int max_freq;
+
+		/* RPSTAT1 is in the GT power well */
+		ret = mutex_lock_interruptible(&dev->struct_mutex);
+		if (ret)
+			return ret;
+
+		gen6_gt_force_wake_get(dev_priv);
+
+		rpstat = I915_READ(GEN6_RPSTAT1);
+		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+		rpcurup = I915_READ(GEN6_RP_CUR_UP);
+		rpprevup = I915_READ(GEN6_RP_PREV_UP);
+		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+		if (IS_HASWELL(dev))
+			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+		else
+			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+		cagf *= GT_FREQUENCY_MULTIPLIER;
+
+		gen6_gt_force_wake_put(dev_priv);
+		mutex_unlock(&dev->struct_mutex);
+
+		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+		seq_printf(m, "Render p-state ratio: %d\n",
+			   (gt_perf_status & 0xff00) >> 8);
+		seq_printf(m, "Render p-state VID: %d\n",
+			   gt_perf_status & 0xff);
+		seq_printf(m, "Render p-state limit: %d\n",
+			   rp_state_limits & 0xff);
+		seq_printf(m, "CAGF: %dMHz\n", cagf);
+		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+			   GEN6_CURICONT_MASK);
+		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+			   GEN6_CURIAVG_MASK);
+		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+			   GEN6_CURBSYTAVG_MASK);
+		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+			   GEN6_CURBSYTAVG_MASK);
+
+		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
+
+		max_freq = (rp_state_cap & 0xff00) >> 8;
+		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
+
+		max_freq = rp_state_cap & 0xff;
+		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+			   max_freq * GT_FREQUENCY_MULTIPLIER);
+
+		seq_printf(m, "Max overclocked frequency: %dMHz\n",
+			   dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
+	} else {
+		seq_printf(m, "no P-state info available\n");
+	}
+
+	return 0;
+}
+
+static int i915_delayfreq_table(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 delayfreq;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 16; i++) {
+		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
+			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static inline int MAP_TO_MV(int map)
+{
+	return 1250 - (map * 25);
+}
+
+static int i915_inttoext_table(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 inttoext;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	for (i = 1; i <= 32; i++) {
+		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int ironlake_drpc_info(struct seq_file *m)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 rgvmodectl, rstdbyctl;
+	u16 crstandvid;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	rgvmodectl = I915_READ(MEMMODECTL);
+	rstdbyctl = I915_READ(RSTDBYCTL);
+	crstandvid = I915_READ16(CRSTANDVID);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+		   "yes" : "no");
+	seq_printf(m, "Boost freq: %d\n",
+		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+		   MEMMODE_BOOST_FREQ_SHIFT);
+	seq_printf(m, "HW control enabled: %s\n",
+		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+	seq_printf(m, "SW control enabled: %s\n",
+		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+	seq_printf(m, "Gated voltage change: %s\n",
+		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+	seq_printf(m, "Starting frequency: P%d\n",
+		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+	seq_printf(m, "Max P-state: P%d\n",
+		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+	seq_printf(m, "Render standby enabled: %s\n",
+		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+	seq_printf(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_printf(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_printf(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_printf(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_printf(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_printf(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_printf(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_printf(m, "unknown\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+	unsigned forcewake_count;
+	int count=0, ret;
+
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	spin_lock_irq(&dev_priv->gt_lock);
+	forcewake_count = dev_priv->forcewake_count;
+	spin_unlock_irq(&dev_priv->gt_lock);
+
+	if (forcewake_count) {
+		seq_printf(m, "RC information inaccurate because somebody "
+			      "holds a forcewake reference \n");
+	} else {
+		/* NB: we cannot use forcewake, else we read the wrong values */
+		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+			udelay(10);
+		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+	}
+
+	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+	rcctl1 = I915_READ(GEN6_RC_CONTROL);
+	mutex_unlock(&dev->struct_mutex);
+	mutex_lock(&dev_priv->rps.hw_lock);
+	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	seq_printf(m, "Video Turbo Mode: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+	seq_printf(m, "HW control enabled: %s\n",
+		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
+	seq_printf(m, "SW control enabled: %s\n",
+		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+			  GEN6_RP_MEDIA_SW_MODE));
+	seq_printf(m, "RC1e Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+	seq_printf(m, "RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+	seq_printf(m, "Deep RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+	seq_printf(m, "Deepest RC6 Enabled: %s\n",
+		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+	seq_printf(m, "Current RC state: ");
+	switch (gt_core_status & GEN6_RCn_MASK) {
+	case GEN6_RC0:
+		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+			seq_printf(m, "Core Power Down\n");
+		else
+			seq_printf(m, "on\n");
+		break;
+	case GEN6_RC3:
+		seq_printf(m, "RC3\n");
+		break;
+	case GEN6_RC6:
+		seq_printf(m, "RC6\n");
+		break;
+	case GEN6_RC7:
+		seq_printf(m, "RC7\n");
+		break;
+	default:
+		seq_printf(m, "Unknown\n");
+		break;
+	}
+
+	seq_printf(m, "Core Power Down: %s\n",
+		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+
+	/* Not exactly sure what this is */
+	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
+	seq_printf(m, "RC6 residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6));
+	seq_printf(m, "RC6+ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6p));
+	seq_printf(m, "RC6++ residency since boot: %u\n",
+		   I915_READ(GEN6_GT_GFX_RC6pp));
+
+	seq_printf(m, "RC6   voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+	seq_printf(m, "RC6+  voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+	seq_printf(m, "RC6++ voltage: %dmV\n",
+		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+	return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+
+	if (IS_GEN6(dev) || IS_GEN7(dev))
+		return gen6_drpc_info(m);
+	else
+		return ironlake_drpc_info(m);
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!I915_HAS_FBC(dev)) {
+		seq_printf(m, "FBC unsupported on this chipset\n");
+		return 0;
+	}
+
+	if (intel_fbc_enabled(dev)) {
+		seq_printf(m, "FBC enabled\n");
+	} else {
+		seq_printf(m, "FBC disabled: ");
+		switch (dev_priv->no_fbc_reason) {
+		case FBC_NO_OUTPUT:
+			seq_printf(m, "no outputs");
+			break;
+		case FBC_STOLEN_TOO_SMALL:
+			seq_printf(m, "not enough stolen memory");
+			break;
+		case FBC_UNSUPPORTED_MODE:
+			seq_printf(m, "mode not supported");
+			break;
+		case FBC_MODE_TOO_LARGE:
+			seq_printf(m, "mode too large");
+			break;
+		case FBC_BAD_PLANE:
+			seq_printf(m, "FBC unsupported on plane");
+			break;
+		case FBC_NOT_TILED:
+			seq_printf(m, "scanout buffer not tiled");
+			break;
+		case FBC_MULTIPLE_PIPES:
+			seq_printf(m, "multiple pipes are enabled");
+			break;
+		case FBC_MODULE_PARAM:
+			seq_printf(m, "disabled per module param (default off)");
+			break;
+		default:
+			seq_printf(m, "unknown reason");
+		}
+		seq_printf(m, "\n");
+	}
+	return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	bool sr_enabled = false;
+
+	if (HAS_PCH_SPLIT(dev))
+		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+	else if (IS_I915GM(dev))
+		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+	else if (IS_PINEVIEW(dev))
+		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+	seq_printf(m, "self-refresh: %s\n",
+		   sr_enabled ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int i915_emon_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long temp, chipset, gfx;
+	int ret;
+
+	if (!IS_GEN5(dev))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	temp = i915_mch_val(dev_priv);
+	chipset = i915_chipset_val(dev_priv);
+	gfx = i915_gfx_val(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "GMCH temp: %ld\n", temp);
+	seq_printf(m, "Chipset power: %ld\n", chipset);
+	seq_printf(m, "GFX power: %ld\n", gfx);
+	seq_printf(m, "Total power: %ld\n", chipset + gfx);
+
+	return 0;
+}
+
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	int gpu_freq, ia_freq;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+		seq_printf(m, "unsupported on this chipset\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+	for (gpu_freq = dev_priv->rps.min_delay;
+	     gpu_freq <= dev_priv->rps.max_delay;
+	     gpu_freq++) {
+		ia_freq = gpu_freq;
+		sandybridge_pcode_read(dev_priv,
+				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
+				       &ia_freq);
+		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+			   gpu_freq * GT_FREQUENCY_MULTIPLIER,
+			   ((ia_freq >> 0) & 0xff) * 100,
+			   ((ia_freq >> 8) & 0xff) * 100);
+	}
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+static int i915_gfxec(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
+	int ret;
+
+	if (data == NULL)
+		return -ENOMEM;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		goto out;
+
+	if (opregion->header) {
+		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
+		seq_write(m, data, OPREGION_SIZE);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+out:
+	kfree(data);
+	return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev;
+	struct intel_framebuffer *fb;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+	ifbdev = dev_priv->fbdev;
+	fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+		   fb->base.width,
+		   fb->base.height,
+		   fb->base.depth,
+		   fb->base.bits_per_pixel,
+		   atomic_read(&fb->base.refcount.refcount));
+	describe_obj(m, fb->obj);
+	seq_printf(m, "\n");
+	mutex_unlock(&dev->mode_config.mutex);
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+		if (&fb->base == ifbdev->helper.fb)
+			continue;
+
+		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
+			   fb->base.width,
+			   fb->base.height,
+			   fb->base.depth,
+			   fb->base.bits_per_pixel,
+			   atomic_read(&fb->base.refcount.refcount));
+		describe_obj(m, fb->obj);
+		seq_printf(m, "\n");
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+static int i915_context_status(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i;
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+	if (dev_priv->ips.pwrctx) {
+		seq_printf(m, "power context ");
+		describe_obj(m, dev_priv->ips.pwrctx);
+		seq_printf(m, "\n");
+	}
+
+	if (dev_priv->ips.renderctx) {
+		seq_printf(m, "render context ");
+		describe_obj(m, dev_priv->ips.renderctx);
+		seq_printf(m, "\n");
+	}
+
+	for_each_ring(ring, dev_priv, i) {
+		if (ring->default_context) {
+			seq_printf(m, "HW default context %s ring ", ring->name);
+			describe_obj(m, ring->default_context->obj);
+			seq_printf(m, "\n");
+		}
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
+static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned forcewake_count;
+
+	spin_lock_irq(&dev_priv->gt_lock);
+	forcewake_count = dev_priv->forcewake_count;
+	spin_unlock_irq(&dev_priv->gt_lock);
+
+	seq_printf(m, "forcewake count = %u\n", forcewake_count);
+
+	return 0;
+}
+
+static const char *swizzle_string(unsigned swizzle)
+{
+	switch(swizzle) {
+	case I915_BIT_6_SWIZZLE_NONE:
+		return "none";
+	case I915_BIT_6_SWIZZLE_9:
+		return "bit9";
+	case I915_BIT_6_SWIZZLE_9_10:
+		return "bit9/bit10";
+	case I915_BIT_6_SWIZZLE_9_11:
+		return "bit9/bit11";
+	case I915_BIT_6_SWIZZLE_9_10_11:
+		return "bit9/bit10/bit11";
+	case I915_BIT_6_SWIZZLE_9_17:
+		return "bit9/bit17";
+	case I915_BIT_6_SWIZZLE_9_10_17:
+		return "bit9/bit10/bit17";
+	case I915_BIT_6_SWIZZLE_UNKNOWN:
+		return "unknown";
+	}
+
+	return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+	if (IS_GEN3(dev) || IS_GEN4(dev)) {
+		seq_printf(m, "DDC = 0x%08x\n",
+			   I915_READ(DCC));
+		seq_printf(m, "C0DRB3 = 0x%04x\n",
+			   I915_READ16(C0DRB3));
+		seq_printf(m, "C1DRB3 = 0x%04x\n",
+			   I915_READ16(C1DRB3));
+	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C0));
+		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C1));
+		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+			   I915_READ(MAD_DIMM_C2));
+		seq_printf(m, "TILECTL = 0x%08x\n",
+			   I915_READ(TILECTL));
+		seq_printf(m, "ARB_MODE = 0x%08x\n",
+			   I915_READ(ARB_MODE));
+		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+			   I915_READ(DISP_ARB_CTL));
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i, ret;
+
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	if (INTEL_INFO(dev)->gen == 6)
+		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+	for_each_ring(ring, dev_priv, i) {
+		seq_printf(m, "%s\n", ring->name);
+		if (INTEL_INFO(dev)->gen == 7)
+			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+	}
+	if (dev_priv->mm.aliasing_ppgtt) {
+		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+		seq_printf(m, "aliasing PPGTT:\n");
+		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+	}
+	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_dpio_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+
+	if (!IS_VALLEYVIEW(dev)) {
+		seq_printf(m, "unsupported\n");
+		return 0;
+	}
+
+	ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
+
+	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_DIV_A));
+	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_DIV_B));
+
+	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
+	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
+
+	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+
+	seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
+	seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
+		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
+
+	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
+		   intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+
+	mutex_unlock(&dev_priv->dpio_lock);
+
+	return 0;
+}
+
+static int
+i915_wedged_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	return 0;
+}
+
+static int
+i915_wedged_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+
+	DRM_INFO("Manually setting wedged to %llu\n", val);
+	i915_handle_error(dev, val);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
+			i915_wedged_get, i915_wedged_set,
+			"%llu\n");
+
+static int
+i915_ring_stop_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	*val = dev_priv->gpu_error.stop_rings;
+
+	return 0;
+}
+
+static int
+i915_ring_stop_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	dev_priv->gpu_error.stop_rings = val;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
+			i915_ring_stop_get, i915_ring_stop_set,
+			"0x%08llx\n");
+
+#define DROP_UNBOUND 0x1
+#define DROP_BOUND 0x2
+#define DROP_RETIRE 0x4
+#define DROP_ACTIVE 0x8
+#define DROP_ALL (DROP_UNBOUND | \
+		  DROP_BOUND | \
+		  DROP_RETIRE | \
+		  DROP_ACTIVE)
+static int
+i915_drop_caches_get(void *data, u64 *val)
+{
+	*val = DROP_ALL;
+
+	return 0;
+}
+
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
+	int ret;
+
+	DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
+
+	/* No need to check and wait for gpu resets, only libdrm auto-restarts
+	 * on ioctls on -EAGAIN. */
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (val & DROP_ACTIVE) {
+		ret = i915_gpu_idle(dev);
+		if (ret)
+			goto unlock;
+	}
+
+	if (val & (DROP_RETIRE | DROP_ACTIVE))
+		i915_gem_retire_requests(dev);
+
+	if (val & DROP_BOUND) {
+		list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+			if (obj->pin_count == 0) {
+				ret = i915_gem_object_unbind(obj);
+				if (ret)
+					goto unlock;
+			}
+	}
+
+	if (val & DROP_UNBOUND) {
+		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+			if (obj->pages_pin_count == 0) {
+				ret = i915_gem_object_put_pages(obj);
+				if (ret)
+					goto unlock;
+			}
+	}
+
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
+			i915_drop_caches_get, i915_drop_caches_set,
+			"0x%08llx\n");
+
+static int
+i915_max_freq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	*val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+static int
+i915_max_freq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * Turbo will still be enabled, but won't go above the set value.
+	 */
+	do_div(val, GT_FREQUENCY_MULTIPLIER);
+	dev_priv->rps.max_delay = val;
+	gen6_set_rps(dev, val);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
+			i915_max_freq_get, i915_max_freq_set,
+			"%llu\n");
+
+static int
+i915_min_freq_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	*val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+static int
+i915_min_freq_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
+
+	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+	if (ret)
+		return ret;
+
+	/*
+	 * Turbo will still be enabled, but won't go below the set value.
+	 */
+	do_div(val, GT_FREQUENCY_MULTIPLIER);
+	dev_priv->rps.min_delay = val;
+	gen6_set_rps(dev, val);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
+			i915_min_freq_get, i915_min_freq_set,
+			"%llu\n");
+
+static int
+i915_cache_sharing_get(void *data, u64 *val)
+{
+	struct drm_device *dev = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 snpcr;
+	int ret;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	mutex_unlock(&dev_priv->dev->struct_mutex);
+
+	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
+
+	return 0;
+}
+
+static int
+i915_cache_sharing_set(void *data, u64 val)
+{
+	struct drm_device *dev = data;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 snpcr;
+
+	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+		return -ENODEV;
+
+	if (val > 3)
+		return -EINVAL;
+
+	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+
+	/* Update the cache sharing policy here as well */
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	snpcr &= ~GEN6_MBC_SNPCR_MASK;
+	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
+			i915_cache_sharing_get, i915_cache_sharing_set,
+			"%llu\n");
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+
+	mutex_lock(&minor->debugfs_lock);
+	list_add(&node->list, &minor->debugfs_list);
+	mutex_unlock(&minor->debugfs_lock);
+
+	return 0;
+}
+
+static int i915_forcewake_open(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	gen6_gt_force_wake_get(dev_priv);
+
+	return 0;
+}
+
+static int i915_forcewake_release(struct inode *inode, struct file *file)
+{
+	struct drm_device *dev = inode->i_private;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	gen6_gt_force_wake_put(dev_priv);
+
+	return 0;
+}
+
+static const struct file_operations i915_forcewake_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_forcewake_open,
+	.release = i915_forcewake_release,
+};
+
+static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file("i915_forcewake_user",
+				  S_IRUSR,
+				  root, dev,
+				  &i915_forcewake_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
+}
+
+static int i915_debugfs_create(struct dentry *root,
+			       struct drm_minor *minor,
+			       const char *name,
+			       const struct file_operations *fops)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file(name,
+				  S_IRUGO | S_IWUSR,
+				  root, dev,
+				  fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, fops);
+}
+
+static struct drm_info_list i915_debugfs_list[] = {
+	{"i915_capabilities", i915_capabilities, 0},
+	{"i915_gem_objects", i915_gem_object_info, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, 0},
+	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
+	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
+	{"i915_gem_request", i915_gem_request_info, 0},
+	{"i915_gem_seqno", i915_gem_seqno_info, 0},
+	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+	{"i915_gem_interrupt", i915_interrupt_info, 0},
+	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+	{"i915_rstdby_delays", i915_rstdby_delays, 0},
+	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+	{"i915_delayfreq_table", i915_delayfreq_table, 0},
+	{"i915_inttoext_table", i915_inttoext_table, 0},
+	{"i915_drpc_info", i915_drpc_info, 0},
+	{"i915_emon_status", i915_emon_status, 0},
+	{"i915_ring_freq_table", i915_ring_freq_table, 0},
+	{"i915_gfxec", i915_gfxec, 0},
+	{"i915_fbc_status", i915_fbc_status, 0},
+	{"i915_sr_status", i915_sr_status, 0},
+	{"i915_opregion", i915_opregion, 0},
+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+	{"i915_context_status", i915_context_status, 0},
+	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+	{"i915_swizzle_info", i915_swizzle_info, 0},
+	{"i915_ppgtt_info", i915_ppgtt_info, 0},
+	{"i915_dpio", i915_dpio_info, 0},
+};
+#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+
+int i915_debugfs_init(struct drm_minor *minor)
+{
+	int ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_wedged",
+				  &i915_wedged_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_forcewake_create(minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_max_freq",
+				  &i915_max_freq_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_min_freq",
+				  &i915_min_freq_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_cache_sharing",
+				  &i915_cache_sharing_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_ring_stop",
+				  &i915_ring_stop_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_gem_drop_caches",
+				  &i915_drop_caches_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				  "i915_error_state",
+				  &i915_error_state_fops);
+	if (ret)
+		return ret;
+
+	ret = i915_debugfs_create(minor->debugfs_root, minor,
+				 "i915_next_seqno",
+				 &i915_next_seqno_fops);
+	if (ret)
+		return ret;
+
+	return drm_debugfs_create_files(i915_debugfs_list,
+					I915_DEBUGFS_ENTRIES,
+					minor->debugfs_root, minor);
+}
+
+void i915_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(i915_debugfs_list,
+				 I915_DEBUGFS_ENTRIES, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+				 1, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
+				 1, minor);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_dma.c b/linux-imx/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 0000000..ccfc636
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,1933 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/video.h>
+#include <asm/pat.h>
+
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+	intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+	intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+	intel_ring_advance(LP_RING(dev_priv))
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
+	if (LP_RING(dev->dev_private)->obj == NULL)			\
+		LOCK_TEST_WITH_RETURN(dev, file);			\
+} while (0)
+
+static inline u32
+intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
+{
+	if (I915_NEED_GFX_HWS(dev_priv->dev))
+		return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
+	else
+		return intel_read_status_page(LP_RING(dev_priv), reg);
+}
+
+#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_BREADCRUMB_INDEX		0x21
+
+void i915_update_dri1_breadcrumb(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv;
+
+	/*
+	 * The dri breadcrumb update races against the drm master disappearing.
+	 * Instead of trying to fix this (this is by far not the only ums issue)
+	 * just don't do the update in kms mode.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	if (dev->primary->master) {
+		master_priv = dev->primary->master->driver_priv;
+		if (master_priv->sarea_priv)
+			master_priv->sarea_priv->last_dispatch =
+				READ_BREADCRUMB(dev_priv);
+	}
+}
+
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 addr;
+
+	addr = dev_priv->status_page_dmah->busaddr;
+	if (INTEL_INFO(dev)->gen >= 4)
+		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+	I915_WRITE(HWS_PGA, addr);
+}
+
+/**
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+static void i915_free_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+	if (dev_priv->status_page_dmah) {
+		drm_pci_free(dev, dev_priv->status_page_dmah);
+		dev_priv->status_page_dmah = NULL;
+	}
+
+	if (ring->status_page.gfx_addr) {
+		ring->status_page.gfx_addr = 0;
+		iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
+	}
+
+	/* Need to rewrite hardware status page */
+	I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+	/*
+	 * We should never lose context on the ring with modesetting
+	 * as we don't expose it to userspace
+	 */
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+	ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
+	if (ring->space < 0)
+		ring->space += ring->size;
+
+	if (!dev->primary->master)
+		return;
+
+	master_priv = dev->primary->master->driver_priv;
+	if (ring->head == ring->tail && master_priv->sarea_priv)
+		master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+static int i915_dma_cleanup(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Clear the HWS virtual address at teardown */
+	if (I915_NEED_GFX_HWS(dev))
+		i915_free_hws(dev);
+
+	return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	int ret;
+
+	master_priv->sarea = drm_getsarea(dev);
+	if (master_priv->sarea) {
+		master_priv->sarea_priv = (drm_i915_sarea_t *)
+			((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
+	} else {
+		DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
+	}
+
+	if (init->ring_size != 0) {
+		if (LP_RING(dev_priv)->obj != NULL) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("Client tried to initialize ringbuffer in "
+				  "GEM mode\n");
+			return -EINVAL;
+		}
+
+		ret = intel_render_ring_init_dri(dev,
+						 init->ring_start,
+						 init->ring_size);
+		if (ret) {
+			i915_dma_cleanup(dev);
+			return ret;
+		}
+	}
+
+	dev_priv->dri1.cpp = init->cpp;
+	dev_priv->dri1.back_offset = init->back_offset;
+	dev_priv->dri1.front_offset = init->front_offset;
+	dev_priv->dri1.current_page = 0;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->pf_current_page = 0;
+
+	/* Allow hardware batchbuffers unless told otherwise.
+	 */
+	dev_priv->dri1.allow_batchbuffer = 1;
+
+	return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+
+	if (ring->virtual_start == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Program Hardware Status Page */
+	if (!ring->status_page.page_addr) {
+		DRM_ERROR("Can not find hardware status page\n");
+		return -EINVAL;
+	}
+	DRM_DEBUG_DRIVER("hw status page @ %p\n",
+				ring->status_page.page_addr);
+	if (ring->status_page.gfx_addr != 0)
+		intel_ring_setup_status_page(ring);
+	else
+		i915_write_hws_pga(dev);
+
+	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
+
+	return 0;
+}
+
+static int i915_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_init_t *init = data;
+	int retcode = 0;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	switch (init->func) {
+	case I915_INIT_DMA:
+		retcode = i915_initialize(dev, init);
+		break;
+	case I915_CLEANUP_DMA:
+		retcode = i915_dma_cleanup(dev);
+		break;
+	case I915_RESUME_DMA:
+		retcode = i915_dma_resume(dev);
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction.  It's important to get the size right as
+ * that tells us where the next instruction to check is.  Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int validate_cmd(int cmd)
+{
+	switch (((cmd >> 29) & 0x7)) {
+	case 0x0:
+		switch ((cmd >> 23) & 0x3f) {
+		case 0x0:
+			return 1;	/* MI_NOOP */
+		case 0x4:
+			return 1;	/* MI_FLUSH */
+		default:
+			return 0;	/* disallow everything else */
+		}
+		break;
+	case 0x1:
+		return 0;	/* reserved */
+	case 0x2:
+		return (cmd & 0xff) + 2;	/* 2d commands */
+	case 0x3:
+		if (((cmd >> 24) & 0x1f) <= 0x18)
+			return 1;
+
+		switch ((cmd >> 24) & 0x1f) {
+		case 0x1c:
+			return 1;
+		case 0x1d:
+			switch ((cmd >> 16) & 0xff) {
+			case 0x3:
+				return (cmd & 0x1f) + 2;
+			case 0x4:
+				return (cmd & 0xf) + 2;
+			default:
+				return (cmd & 0xffff) + 2;
+			}
+		case 0x1e:
+			if (cmd & (1 << 23))
+				return (cmd & 0xffff) + 1;
+			else
+				return 1;
+		case 0x1f:
+			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
+				return (cmd & 0x1ffff) + 2;
+			else if (cmd & (1 << 17))	/* indirect random */
+				if ((cmd & 0xffff) == 0)
+					return 0;	/* unknown length, too hard */
+				else
+					return (((cmd & 0xffff) + 1) / 2) + 1;
+			else
+				return 2;	/* indirect sequential */
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
+		return -EINVAL;
+
+	for (i = 0; i < dwords;) {
+		int sz = validate_cmd(buffer[i]);
+		if (sz == 0 || i + sz > dwords)
+			return -EINVAL;
+		i += sz;
+	}
+
+	ret = BEGIN_LP_RING((dwords+1)&~1);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dwords; i++)
+		OUT_RING(buffer[i]);
+	if (dwords & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+
+	return 0;
+}
+
+int
+i915_emit_box(struct drm_device *dev,
+	      struct drm_clip_rect *box,
+	      int DR1, int DR4)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+	    box->y2 <= 0 || box->x2 <= 0) {
+		DRM_ERROR("Bad box %d,%d..%d,%d\n",
+			  box->x1, box->y1, box->x2, box->y2);
+		return -EINVAL;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		ret = BEGIN_LP_RING(4);
+		if (ret)
+			return ret;
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+		OUT_RING(DR4);
+	} else {
+		ret = BEGIN_LP_RING(6);
+		if (ret)
+			return ret;
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO);
+		OUT_RING(DR1);
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
+		OUT_RING(DR4);
+		OUT_RING(0);
+	}
+	ADVANCE_LP_RING();
+
+	return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+static void i915_emit_breadcrumb(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+	dev_priv->dri1.counter++;
+	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+		dev_priv->dri1.counter = 0;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->dri1.counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+}
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+				   drm_i915_cmdbuffer_t *cmd,
+				   struct drm_clip_rect *cliprects,
+				   void *cmdbuf)
+{
+	int nbox = cmd->num_cliprects;
+	int i = 0, count, ret;
+
+	if (cmd->sz & 0x3) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    cmd->DR1, cmd->DR4);
+			if (ret)
+				return ret;
+		}
+
+		ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+		if (ret)
+			return ret;
+	}
+
+	i915_emit_breadcrumb(dev);
+	return 0;
+}
+
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
+				     drm_i915_batchbuffer_t * batch,
+				     struct drm_clip_rect *cliprects)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int nbox = batch->num_cliprects;
+	int i, count, ret;
+
+	if ((batch->start | batch->used) & 0x7) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    batch->DR1, batch->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (!IS_I830(dev) && !IS_845G(dev)) {
+			ret = BEGIN_LP_RING(2);
+			if (ret)
+				return ret;
+
+			if (INTEL_INFO(dev)->gen >= 4) {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+				OUT_RING(batch->start);
+			} else {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			}
+		} else {
+			ret = BEGIN_LP_RING(4);
+			if (ret)
+				return ret;
+
+			OUT_RING(MI_BATCH_BUFFER);
+			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			OUT_RING(batch->start + batch->used - 4);
+			OUT_RING(0);
+		}
+		ADVANCE_LP_RING();
+	}
+
+
+	if (IS_G4X(dev) || IS_GEN5(dev)) {
+		if (BEGIN_LP_RING(2) == 0) {
+			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+			OUT_RING(MI_NOOP);
+			ADVANCE_LP_RING();
+		}
+	}
+
+	i915_emit_breadcrumb(dev);
+	return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv =
+		dev->primary->master->driver_priv;
+	int ret;
+
+	if (!master_priv->sarea_priv)
+		return -EINVAL;
+
+	DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
+			  __func__,
+			 dev_priv->dri1.current_page,
+			 master_priv->sarea_priv->pf_current_page);
+
+	i915_kernel_lost_context(dev);
+
+	ret = BEGIN_LP_RING(10);
+	if (ret)
+		return ret;
+
+	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
+	OUT_RING(0);
+
+	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+	OUT_RING(0);
+	if (dev_priv->dri1.current_page == 0) {
+		OUT_RING(dev_priv->dri1.back_offset);
+		dev_priv->dri1.current_page = 1;
+	} else {
+		OUT_RING(dev_priv->dri1.front_offset);
+		dev_priv->dri1.current_page = 0;
+	}
+	OUT_RING(0);
+
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+
+	ADVANCE_LP_RING();
+
+	master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
+
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->dri1.counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+
+	master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
+	return 0;
+}
+
+static int i915_quiescent(struct drm_device *dev)
+{
+	i915_kernel_lost_context(dev);
+	return intel_ring_idle(LP_RING(dev->dev_private));
+}
+
+static int i915_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	int ret;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_quiescent(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    master_priv->sarea_priv;
+	drm_i915_batchbuffer_t *batch = data;
+	int ret;
+	struct drm_clip_rect *cliprects = NULL;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv->dri1.allow_batchbuffer) {
+		DRM_ERROR("Batchbuffer ioctl disabled\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
+			batch->start, batch->used, batch->num_cliprects);
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (batch->num_cliprects < 0)
+		return -EINVAL;
+
+	if (batch->num_cliprects) {
+		cliprects = kcalloc(batch->num_cliprects,
+				    sizeof(struct drm_clip_rect),
+				    GFP_KERNEL);
+		if (cliprects == NULL)
+			return -ENOMEM;
+
+		ret = copy_from_user(cliprects, batch->cliprects,
+				     batch->num_cliprects *
+				     sizeof(struct drm_clip_rect));
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto fail_free;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (sarea_priv)
+		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_free:
+	kfree(cliprects);
+
+	return ret;
+}
+
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    master_priv->sarea_priv;
+	drm_i915_cmdbuffer_t *cmdbuf = data;
+	struct drm_clip_rect *cliprects = NULL;
+	void *batch_data;
+	int ret;
+
+	DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+			cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (cmdbuf->num_cliprects < 0)
+		return -EINVAL;
+
+	batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
+	if (batch_data == NULL)
+		return -ENOMEM;
+
+	ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
+	if (ret != 0) {
+		ret = -EFAULT;
+		goto fail_batch_free;
+	}
+
+	if (cmdbuf->num_cliprects) {
+		cliprects = kcalloc(cmdbuf->num_cliprects,
+				    sizeof(struct drm_clip_rect), GFP_KERNEL);
+		if (cliprects == NULL) {
+			ret = -ENOMEM;
+			goto fail_batch_free;
+		}
+
+		ret = copy_from_user(cliprects, cmdbuf->cliprects,
+				     cmdbuf->num_cliprects *
+				     sizeof(struct drm_clip_rect));
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto fail_clip_free;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret) {
+		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+		goto fail_clip_free;
+	}
+
+	if (sarea_priv)
+		sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+fail_clip_free:
+	kfree(cliprects);
+fail_batch_free:
+	kfree(batch_data);
+
+	return ret;
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+
+	i915_kernel_lost_context(dev);
+
+	DRM_DEBUG_DRIVER("\n");
+
+	dev_priv->dri1.counter++;
+	if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+		dev_priv->dri1.counter = 1;
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
+
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->dri1.counter);
+		OUT_RING(MI_USER_INTERRUPT);
+		ADVANCE_LP_RING();
+	}
+
+	return dev_priv->dri1.counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	int ret = 0;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
+	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+		  READ_BREADCRUMB(dev_priv));
+
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+		if (master_priv->sarea_priv)
+			master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+		return 0;
+	}
+
+	if (master_priv->sarea_priv)
+		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+	if (ring->irq_get(ring)) {
+		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+			    READ_BREADCRUMB(dev_priv) >= irq_nr);
+		ring->irq_put(ring);
+	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+		ret = -EBUSY;
+
+	if (ret == -EBUSY) {
+		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+			  READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
+	}
+
+	return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+static int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_emit_t *emit = data;
+	int result;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	result = i915_emit_irq(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+static int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_wait_t *irqwait = data;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+
+	return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+static int i915_vblank_swap(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	/* The delayed swap mechanism was fundamentally racy, and has been
+	 * removed.  The model was that the client requested a delayed flip/swap
+	 * from the kernel, then waited for vblank before continuing to perform
+	 * rendering.  The problem was that the kernel might wake the client
+	 * up before it dispatched the vblank swap (since the lock has to be
+	 * held while touching the ringbuffer), in which case the client would
+	 * clear and start the next frame before the swap occurred, and
+	 * flicker would occur in addition to likely missing the vblank.
+	 *
+	 * In the absence of this ioctl, userland falls back to a correct path
+	 * of waiting for a vblank, then dispatching the swap on its own.
+	 * Context switching to userland and back is plenty fast enough for
+	 * meeting the requirements of vblank swapping.
+	 */
+	return -EINVAL;
+}
+
+static int i915_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	int ret;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	DRM_DEBUG_DRIVER("%s\n", __func__);
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_dispatch_flip(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_PARAM_IRQ_ACTIVE:
+		value = dev->pdev->irq ? 1 : 0;
+		break;
+	case I915_PARAM_ALLOW_BATCHBUFFER:
+		value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
+		break;
+	case I915_PARAM_LAST_DISPATCH:
+		value = READ_BREADCRUMB(dev_priv);
+		break;
+	case I915_PARAM_CHIPSET_ID:
+		value = dev->pci_device;
+		break;
+	case I915_PARAM_HAS_GEM:
+		value = 1;
+		break;
+	case I915_PARAM_NUM_FENCES_AVAIL:
+		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+		break;
+	case I915_PARAM_HAS_OVERLAY:
+		value = dev_priv->overlay ? 1 : 0;
+		break;
+	case I915_PARAM_HAS_PAGEFLIPPING:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXECBUF2:
+		/* depends on GEM */
+		value = 1;
+		break;
+	case I915_PARAM_HAS_BSD:
+		value = intel_ring_initialized(&dev_priv->ring[VCS]);
+		break;
+	case I915_PARAM_HAS_BLT:
+		value = intel_ring_initialized(&dev_priv->ring[BCS]);
+		break;
+	case I915_PARAM_HAS_RELAXED_FENCING:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_COHERENT_RINGS:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_CONSTANTS:
+		value = INTEL_INFO(dev)->gen >= 4;
+		break;
+	case I915_PARAM_HAS_RELAXED_DELTA:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_GEN7_SOL_RESET:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_LLC:
+		value = HAS_LLC(dev);
+		break;
+	case I915_PARAM_HAS_ALIASING_PPGTT:
+		value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
+		break;
+	case I915_PARAM_HAS_WAIT_TIMEOUT:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_SEMAPHORES:
+		value = i915_semaphore_is_enabled(dev);
+		break;
+	case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_SECURE_BATCHES:
+		value = capable(CAP_SYS_ADMIN);
+		break;
+	case I915_PARAM_HAS_PINNED_BATCHES:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_NO_RELOC:
+		value = 1;
+		break;
+	case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+		value = 1;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+				 param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("DRM_COPY_TO_USER failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int i915_setparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_setparam_t *param = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+		break;
+	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+		break;
+	case I915_SETPARAM_ALLOW_BATCHBUFFER:
+		dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
+		break;
+	case I915_SETPARAM_NUM_USED_FENCES:
+		if (param->value > dev_priv->num_fence_regs ||
+		    param->value < 0)
+			return -EINVAL;
+		/* Userspace can use first N regs */
+		dev_priv->fence_reg_start = param->value;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown parameter %d\n",
+					param->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i915_set_status_page(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_hws_addr_t *hws = data;
+	struct intel_ring_buffer *ring;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (!I915_NEED_GFX_HWS(dev))
+		return -EINVAL;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		WARN(1, "tried to set status page when mode setting active\n");
+		return 0;
+	}
+
+	DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+
+	ring = LP_RING(dev_priv);
+	ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
+
+	dev_priv->dri1.gfx_hws_cpu_addr =
+		ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
+	if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
+		i915_dma_cleanup(dev);
+		ring->status_page.gfx_addr = 0;
+		DRM_ERROR("can not ioremap virtual address for"
+				" G33 hw status page\n");
+		return -ENOMEM;
+	}
+
+	memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
+	I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
+
+	DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
+			 ring->status_page.gfx_addr);
+	DRM_DEBUG_DRIVER("load hws at %p\n",
+			 ring->status_page.page_addr);
+	return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+	if (!dev_priv->bridge_dev) {
+		DRM_ERROR("bridge device not found\n");
+		return -1;
+	}
+	return 0;
+}
+
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define   DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp_lo, temp_hi = 0;
+	u64 mchbar_addr;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+	if (mchbar_addr &&
+	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+		return 0;
+#endif
+
+	/* Get some space for it */
+	dev_priv->mch_res.name = "i915 MCHBAR";
+	dev_priv->mch_res.flags = IORESOURCE_MEM;
+	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+				     &dev_priv->mch_res,
+				     MCHBAR_SIZE, MCHBAR_SIZE,
+				     PCIBIOS_MIN_MEM,
+				     0, pcibios_align_resource,
+				     dev_priv->bridge_dev);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+		dev_priv->mch_res.start = 0;
+		return ret;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+				       upper_32_bits(dev_priv->mch_res.start));
+
+	pci_write_config_dword(dev_priv->bridge_dev, reg,
+			       lower_32_bits(dev_priv->mch_res.start));
+	return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp;
+	bool enabled;
+
+	dev_priv->mchbar_need_disable = false;
+
+	if (IS_I915G(dev) || IS_I915GM(dev)) {
+		pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+		enabled = !!(temp & DEVEN_MCHBAR_EN);
+	} else {
+		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+		enabled = temp & 1;
+	}
+
+	/* If it's already enabled, don't have to do anything */
+	if (enabled)
+		return;
+
+	if (intel_alloc_mchbar_resource(dev))
+		return;
+
+	dev_priv->mchbar_need_disable = true;
+
+	/* Space is allocated or reserved, so enable it. */
+	if (IS_I915G(dev) || IS_I915GM(dev)) {
+		pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+				       temp | DEVEN_MCHBAR_EN);
+	} else {
+		pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+		pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+	}
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+	u32 temp;
+
+	if (dev_priv->mchbar_need_disable) {
+		if (IS_I915G(dev) || IS_I915GM(dev)) {
+			pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+			temp &= ~DEVEN_MCHBAR_EN;
+			pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+		} else {
+			pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+			temp &= ~1;
+			pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+		}
+	}
+
+	if (dev_priv->mch_res.start)
+		release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+	struct drm_device *dev = cookie;
+
+	intel_modeset_vga_set_state(dev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+	if (state == VGA_SWITCHEROO_ON) {
+		pr_info("switched on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		/* i915 resume handler doesn't set to D0 */
+		pci_set_power_state(dev->pdev, PCI_D0);
+		i915_resume(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	} else {
+		pr_err("switched off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		i915_suspend(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	bool can_switch;
+
+	spin_lock(&dev->count_lock);
+	can_switch = (dev->open_count == 0);
+	spin_unlock(&dev->count_lock);
+	return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+	.set_gpu_state = i915_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = i915_switcheroo_can_switch,
+};
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_parse_bios(dev);
+	if (ret)
+		DRM_INFO("failed to find VBIOS tables\n");
+
+	/* If we have > 1 VGA cards, then we need to arbitrate access
+	 * to the common VGA resources.
+	 *
+	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+	 * then we do not take part in VGA arbitration and the
+	 * vga_client_register() fails with -ENODEV.
+	 */
+	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+	if (ret && ret != -ENODEV)
+		goto out;
+
+	intel_register_dsm_handler();
+
+	ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+	if (ret)
+		goto cleanup_vga_client;
+
+	/* Initialise stolen first so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(dev);
+	if (ret)
+		goto cleanup_vga_switcheroo;
+
+	ret = drm_irq_install(dev);
+	if (ret)
+		goto cleanup_gem_stolen;
+
+	/* Important: The output setup functions called by modeset_init need
+	 * working irqs for e.g. gmbus and dp aux transfers. */
+	intel_modeset_init(dev);
+
+	ret = i915_gem_init(dev);
+	if (ret)
+		goto cleanup_irq;
+
+	INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
+	intel_modeset_gem_init(dev);
+
+	/* Always safe in the mode setting case. */
+	/* FIXME: do pre/post-mode set stuff in core KMS code */
+	dev->vblank_disable_allowed = 1;
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		dev_priv->mm.suspended = 0;
+		return 0;
+	}
+
+	ret = intel_fbdev_init(dev);
+	if (ret)
+		goto cleanup_gem;
+
+	/* Only enable hotplug handling once the fbdev is fully set up. */
+	intel_hpd_init(dev);
+
+	/*
+	 * Some ports require correctly set-up hpd registers for detection to
+	 * work properly (leading to ghost connected connector status), e.g. VGA
+	 * on gm45.  Hence we can only set up the initial fbdev config after hpd
+	 * irqs are fully enabled. Now we should scan for the initial config
+	 * only once hotplug handling is enabled, but due to screwed-up locking
+	 * around kms/fbdev init we can't protect the fdbev initial config
+	 * scanning against hotplug events. Hence do this first and ignore the
+	 * tiny window where we will loose hotplug notifactions.
+	 */
+	intel_fbdev_initial_config(dev);
+
+	/* Only enable hotplug handling once the fbdev is fully set up. */
+	dev_priv->enable_hotplug_processing = true;
+
+	drm_kms_helper_poll_init(dev);
+
+	/* We're off and running w/KMS */
+	dev_priv->mm.suspended = 0;
+
+	return 0;
+
+cleanup_gem:
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_cleanup_ringbuffer(dev);
+	mutex_unlock(&dev->struct_mutex);
+	i915_gem_cleanup_aliasing_ppgtt(dev);
+cleanup_irq:
+	drm_irq_uninstall(dev);
+cleanup_gem_stolen:
+	i915_gem_cleanup_stolen(dev);
+cleanup_vga_switcheroo:
+	vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+	vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+	return ret;
+}
+
+int i915_master_create(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_i915_master_private *master_priv;
+
+	master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
+	if (!master_priv)
+		return -ENOMEM;
+
+	master->driver_priv = master_priv;
+	return 0;
+}
+
+void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_i915_master_private *master_priv = master->driver_priv;
+
+	if (!master_priv)
+		return;
+
+	kfree(master_priv);
+
+	master->driver_priv = NULL;
+}
+
+static void
+i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+		unsigned long size)
+{
+	dev_priv->mm.gtt_mtrr = -1;
+
+#if defined(CONFIG_X86_PAT)
+	if (cpu_has_pat)
+		return;
+#endif
+
+	/* Set up a WC MTRR for non-PAT systems.  This is more common than
+	 * one would think, because the kernel disables PAT on first
+	 * generation Core chips because WC PAT gets overridden by a UC
+	 * MTRR if present.  Even if a UC MTRR isn't present.
+	 */
+	dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
+	if (dev_priv->mm.gtt_mtrr < 0) {
+		DRM_INFO("MTRR allocation failed.  Graphics "
+			 "performance may suffer.\n");
+	}
+}
+
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+	struct apertures_struct *ap;
+	struct pci_dev *pdev = dev_priv->dev->pdev;
+	bool primary;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return;
+
+	ap->ranges[0].base = dev_priv->gtt.mappable_base;
+	ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
+
+	primary =
+		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+	kfree(ap);
+}
+
+static void i915_dump_device_info(struct drm_i915_private *dev_priv)
+{
+	const struct intel_device_info *info = dev_priv->info;
+
+#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
+#define DEV_INFO_SEP ,
+	DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+			 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+			 info->gen,
+			 dev_priv->dev->pdev->device,
+			 DEV_INFO_FLAGS);
+#undef DEV_INFO_FLAG
+#undef DEV_INFO_SEP
+}
+
+/**
+ * intel_early_sanitize_regs - clean up BIOS state
+ * @dev: DRM device
+ *
+ * This function must be called before we do any I915_READ or I915_WRITE. Its
+ * purpose is to clean up any state left by the BIOS that may affect us when
+ * reading and/or writing registers.
+ */
+static void intel_early_sanitize_regs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_HASWELL(dev))
+		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_i915_private *dev_priv;
+	struct intel_device_info *info;
+	int ret = 0, mmio_bar, mmio_size;
+	uint32_t aperture_size;
+
+	info = (struct intel_device_info *) flags;
+
+	/* Refuse to load on gen6+ without kms enabled. */
+	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	/* i915 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->dev = dev;
+	dev_priv->info = info;
+
+	spin_lock_init(&dev_priv->irq_lock);
+	spin_lock_init(&dev_priv->gpu_error.lock);
+	spin_lock_init(&dev_priv->rps.lock);
+	spin_lock_init(&dev_priv->gt_lock);
+	mutex_init(&dev_priv->dpio_lock);
+	mutex_init(&dev_priv->rps.hw_lock);
+	mutex_init(&dev_priv->modeset_restore_lock);
+
+	i915_dump_device_info(dev_priv);
+
+	if (i915_get_bridge_dev(dev)) {
+		ret = -EIO;
+		goto free_priv;
+	}
+
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	/* Before gen4, the registers and the GTT are behind different BARs.
+	 * However, from gen4 onwards, the registers and the GTT are shared
+	 * in the same BAR, so we want to restrict this ioremap from
+	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+	 * the register BAR remains the same size for all the earlier
+	 * generations up to Ironlake.
+	 */
+	if (info->gen < 5)
+		mmio_size = 512*1024;
+	else
+		mmio_size = 2*1024*1024;
+
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+	if (!dev_priv->regs) {
+		DRM_ERROR("failed to map registers\n");
+		ret = -EIO;
+		goto put_bridge;
+	}
+
+	intel_early_sanitize_regs(dev);
+
+	ret = i915_gem_gtt_init(dev);
+	if (ret)
+		goto put_bridge;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_kick_out_firmware_fb(dev_priv);
+
+	pci_set_master(dev->pdev);
+
+	/* overlay on gen2 is broken and can't address above 1G */
+	if (IS_GEN2(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
+	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
+	 * using 32bit addressing, overwriting memory if HWS is located
+	 * above 4GB.
+	 *
+	 * The documentation also mentions an issue with undefined
+	 * behaviour if any general state is accessed within a page above 4GB,
+	 * which also needs to be handled carefully.
+	 */
+	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+	aperture_size = dev_priv->gtt.mappable_end;
+
+	dev_priv->gtt.mappable =
+		io_mapping_create_wc(dev_priv->gtt.mappable_base,
+				     aperture_size);
+	if (dev_priv->gtt.mappable == NULL) {
+		ret = -EIO;
+		goto out_rmmap;
+	}
+
+	i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
+			aperture_size);
+
+	/* The i915 workqueue is primarily used for batched retirement of
+	 * requests (and thus managing bo) once the task has been completed
+	 * by the GPU. i915_gem_retire_requests() is called directly when we
+	 * need high-priority retirement, such as waiting for an explicit
+	 * bo.
+	 *
+	 * It is also used for periodic low-priority events, such as
+	 * idle-timers and recording error state.
+	 *
+	 * All tasks on the workqueue are expected to acquire the dev mutex
+	 * so there is no point in running more than one instance of the
+	 * workqueue at any time.  Use an ordered one.
+	 */
+	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+	if (dev_priv->wq == NULL) {
+		DRM_ERROR("Failed to create our workqueue.\n");
+		ret = -ENOMEM;
+		goto out_mtrrfree;
+	}
+
+	/* This must be called before any calls to HAS_PCH_* */
+	intel_detect_pch(dev);
+
+	intel_irq_init(dev);
+	intel_pm_init(dev);
+	intel_gt_sanitize(dev);
+	intel_gt_init(dev);
+
+	/* Try to make sure MCHBAR is enabled before poking at it */
+	intel_setup_mchbar(dev);
+	intel_setup_gmbus(dev);
+	intel_opregion_setup(dev);
+
+	intel_setup_bios(dev);
+
+	i915_gem_load(dev);
+
+	/* On the 945G/GM, the chipset reports the MSI capability on the
+	 * integrated graphics even though the support isn't actually there
+	 * according to the published specs.  It doesn't appear to function
+	 * correctly in testing on 945G.
+	 * This may be a side effect of MSI having been made available for PEG
+	 * and the registers being closely associated.
+	 *
+	 * According to chipset errata, on the 965GM, MSI interrupts may
+	 * be lost or delayed, but we use them anyways to avoid
+	 * stuck interrupts on some machines.
+	 */
+	if (!IS_I945G(dev) && !IS_I945GM(dev))
+		pci_enable_msi(dev->pdev);
+
+	dev_priv->num_plane = 1;
+	if (IS_VALLEYVIEW(dev))
+		dev_priv->num_plane = 2;
+
+	if (INTEL_INFO(dev)->num_pipes) {
+		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+		if (ret)
+			goto out_gem_unload;
+	}
+
+	/* Start out suspended */
+	dev_priv->mm.suspended = 1;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = i915_load_modeset_init(dev);
+		if (ret < 0) {
+			DRM_ERROR("failed to init modeset\n");
+			goto out_gem_unload;
+		}
+	}
+
+	i915_setup_sysfs(dev);
+
+	if (INTEL_INFO(dev)->num_pipes) {
+		/* Must be done after probing outputs */
+		intel_opregion_init(dev);
+		acpi_video_register();
+	}
+
+	if (IS_GEN5(dev))
+		intel_gpu_ips_init(dev_priv);
+
+	return 0;
+
+out_gem_unload:
+	if (dev_priv->mm.inactive_shrinker.shrink)
+		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_teardown_gmbus(dev);
+	intel_teardown_mchbar(dev);
+	pm_qos_remove_request(&dev_priv->pm_qos);
+	destroy_workqueue(dev_priv->wq);
+out_mtrrfree:
+	if (dev_priv->mm.gtt_mtrr >= 0) {
+		mtrr_del(dev_priv->mm.gtt_mtrr,
+			 dev_priv->gtt.mappable_base,
+			 aperture_size);
+		dev_priv->mm.gtt_mtrr = -1;
+	}
+	io_mapping_free(dev_priv->gtt.mappable);
+	dev_priv->gtt.gtt_remove(dev);
+out_rmmap:
+	pci_iounmap(dev->pdev, dev_priv->regs);
+put_bridge:
+	pci_dev_put(dev_priv->bridge_dev);
+free_priv:
+	kfree(dev_priv);
+	return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	intel_gpu_ips_teardown();
+
+	i915_teardown_sysfs(dev);
+
+	if (dev_priv->mm.inactive_shrinker.shrink)
+		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+	i915_gem_retire_requests(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Cancel the retire work handler, which should be idle now. */
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
+	io_mapping_free(dev_priv->gtt.mappable);
+	if (dev_priv->mm.gtt_mtrr >= 0) {
+		mtrr_del(dev_priv->mm.gtt_mtrr,
+			 dev_priv->gtt.mappable_base,
+			 dev_priv->gtt.mappable_end);
+		dev_priv->mm.gtt_mtrr = -1;
+	}
+
+	acpi_video_unregister();
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		intel_fbdev_fini(dev);
+		intel_modeset_cleanup(dev);
+		cancel_work_sync(&dev_priv->console_resume_work);
+
+		/*
+		 * free the memory space allocated for the child device
+		 * config parsed from VBT
+		 */
+		if (dev_priv->child_dev && dev_priv->child_dev_num) {
+			kfree(dev_priv->child_dev);
+			dev_priv->child_dev = NULL;
+			dev_priv->child_dev_num = 0;
+		}
+
+		vga_switcheroo_unregister_client(dev->pdev);
+		vga_client_register(dev->pdev, NULL, NULL, NULL);
+	}
+
+	/* Free error state after interrupts are fully disabled. */
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+	cancel_work_sync(&dev_priv->gpu_error.work);
+	i915_destroy_error_state(dev);
+
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_opregion_fini(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Flush any outstanding unpin_work. */
+		flush_workqueue(dev_priv->wq);
+
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_free_all_phys_object(dev);
+		i915_gem_cleanup_ringbuffer(dev);
+		i915_gem_context_fini(dev);
+		mutex_unlock(&dev->struct_mutex);
+		i915_gem_cleanup_aliasing_ppgtt(dev);
+		i915_gem_cleanup_stolen(dev);
+
+		if (!I915_NEED_GFX_HWS(dev))
+			i915_free_hws(dev);
+	}
+
+	if (dev_priv->regs != NULL)
+		pci_iounmap(dev->pdev, dev_priv->regs);
+
+	intel_teardown_gmbus(dev);
+	intel_teardown_mchbar(dev);
+
+	destroy_workqueue(dev_priv->wq);
+	pm_qos_remove_request(&dev_priv->pm_qos);
+
+	if (dev_priv->slab)
+		kmem_cache_destroy(dev_priv->slab);
+
+	pci_dev_put(dev_priv->bridge_dev);
+	kfree(dev->dev_private);
+
+	return 0;
+}
+
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
+
+	idr_init(&file_priv->context_idr);
+
+	return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+void i915_driver_lastclose(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	/* On gen6+ we refuse to init without kms enabled, but then the drm core
+	 * goes right around and calls lastclose. Check for this and don't clean
+	 * up anything. */
+	if (!dev_priv)
+		return;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		intel_fb_restore_mode(dev);
+		vga_switcheroo_process_delayed_switch();
+		return;
+	}
+
+	i915_gem_lastclose(dev);
+
+	i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_context_close(dev, file_priv);
+	i915_gem_release(dev, file_priv);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	kfree(file_priv);
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/*
+ * This is really ugly: Because old userspace abused the linux agp interface to
+ * manage the gtt, we need to claim that all intel devices are agp.  For
+ * otherwise the drm core refuses to initialize the agp support code.
+ */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+	return 1;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_drv.c b/linux-imx/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 0000000..bc6cd31
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,1344 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <drm/drm_crtc_helper.h>
+
+static int i915_modeset __read_mostly = -1;
+module_param_named(modeset, i915_modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+		"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+		"1=on, -1=force vga console preference [default])");
+
+unsigned int i915_fbpercrtc __always_unused = 0;
+module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+
+int i915_panel_ignore_lid __read_mostly = 1;
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+		"Override lid status (0=autodetect, 1=autodetect disabled [default], "
+		"-1=force lid closed, -2=force lid open)");
+
+unsigned int i915_powersave __read_mostly = 1;
+module_param_named(powersave, i915_powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+		"Enable powersavings, fbc, downclocking, etc. (default: true)");
+
+int i915_semaphores __read_mostly = -1;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+MODULE_PARM_DESC(semaphores,
+		"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
+
+int i915_enable_rc6 __read_mostly = -1;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
+MODULE_PARM_DESC(i915_enable_rc6,
+		"Enable power-saving render C-state 6. "
+		"Different stages can be selected via bitmask values "
+		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
+		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
+		"default: -1 (use per-chip default)");
+
+int i915_enable_fbc __read_mostly = -1;
+module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
+MODULE_PARM_DESC(i915_enable_fbc,
+		"Enable frame buffer compression for power savings "
+		"(default: -1 (use per-chip default))");
+
+unsigned int i915_lvds_downclock __read_mostly = 0;
+module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+		"Use panel (LVDS/eDP) downclocking for power savings "
+		"(default: false)");
+
+int i915_lvds_channel_mode __read_mostly;
+module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
+MODULE_PARM_DESC(lvds_channel_mode,
+		 "Specify LVDS channel mode "
+		 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+int i915_panel_use_ssc __read_mostly = -1;
+module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
+		"(default: auto from VBT)");
+
+int i915_vbt_sdvo_panel_type __read_mostly = -1;
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+		"Override/Ignore selection of SDVO panel mode in the VBT "
+		"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+static bool i915_try_reset __read_mostly = true;
+module_param_named(reset, i915_try_reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+bool i915_enable_hangcheck __read_mostly = true;
+module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+		"Periodically check GPU activity for detecting hangs. "
+		"WARNING: Disabling this can cause system wide hangs. "
+		"(default: true)");
+
+int i915_enable_ppgtt __read_mostly = -1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
+MODULE_PARM_DESC(i915_enable_ppgtt,
+		"Enable PPGTT (default: true)");
+
+unsigned int i915_preliminary_hw_support __read_mostly = 0;
+module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+		"Enable preliminary hardware support. (default: false)");
+
+int i915_disable_power_well __read_mostly = 0;
+module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+		 "Disable the power well when possible (default: false)");
+
+static struct drm_driver driver;
+extern int intel_agp_enabled;
+
+#define INTEL_VGA_DEVICE(id, info) {		\
+	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
+	.class_mask = 0xff0000,			\
+	.vendor = 0x8086,			\
+	.device = id,				\
+	.subvendor = PCI_ANY_ID,		\
+	.subdevice = PCI_ANY_ID,		\
+	.driver_data = (unsigned long) info }
+
+#define INTEL_QUANTA_VGA_DEVICE(info) {		\
+	.class = PCI_BASE_CLASS_DISPLAY << 16,	\
+	.class_mask = 0xff0000,			\
+	.vendor = 0x8086,			\
+	.device = 0x16a,			\
+	.subvendor = 0x152d,			\
+	.subdevice = 0x8990,			\
+	.driver_data = (unsigned long) info }
+
+
+static const struct intel_device_info intel_i830_info = {
+	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_845g_info = {
+	.gen = 2, .num_pipes = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+	.gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+	.gen = 2, .num_pipes = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i915gm_info = {
+	.gen = 3, .is_mobile = 1, .num_pipes = 2,
+	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
+};
+static const struct intel_device_info intel_i945g_info = {
+	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+};
+static const struct intel_device_info intel_i945gm_info = {
+	.gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+	.has_hotplug = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+	.gen = 4, .is_broadwater = 1, .num_pipes = 2,
+	.has_hotplug = 1,
+	.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+	.gen = 4, .is_crestline = 1, .num_pipes = 2,
+	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+	.supports_tv = 1,
+};
+
+static const struct intel_device_info intel_g33_info = {
+	.gen = 3, .is_g33 = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_g45_info = {
+	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+	.gen = 4, .is_g4x = 1, .num_pipes = 2,
+	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.supports_tv = 1,
+	.has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+	.gen = 5, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+	.gen = 5, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
+	.has_bsd_ring = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+	.gen = 6, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.has_llc = 1,
+	.has_force_wake = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+	.gen = 6, .is_mobile = 1, .num_pipes = 2,
+	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
+	.has_llc = 1,
+	.has_force_wake = 1,
+};
+
+#define GEN7_FEATURES  \
+	.gen = 7, .num_pipes = 3, \
+	.need_gfx_hws = 1, .has_hotplug = 1, \
+	.has_bsd_ring = 1, \
+	.has_blt_ring = 1, \
+	.has_llc = 1, \
+	.has_force_wake = 1
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+	.is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+	GEN7_FEATURES,
+	.is_ivybridge = 1,
+	.num_pipes = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_valleyview_m_info = {
+	GEN7_FEATURES,
+	.is_mobile = 1,
+	.num_pipes = 2,
+	.is_valleyview = 1,
+	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_llc = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+	GEN7_FEATURES,
+	.num_pipes = 2,
+	.is_valleyview = 1,
+	.display_mmio_offset = VLV_DISPLAY_BASE,
+	.has_llc = 0, /* legal, last one wins */
+};
+
+static const struct intel_device_info intel_haswell_d_info = {
+	GEN7_FEATURES,
+	.is_haswell = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+	GEN7_FEATURES,
+	.is_haswell = 1,
+	.is_mobile = 1,
+};
+
+static const struct pci_device_id pciidlist[] = {		/* aka */
+	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),		/* I830_M */
+	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),		/* 845_G */
+	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),		/* I855_GM */
+	INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
+	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),		/* I865_G */
+	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),		/* I915_G */
+	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),		/* E7221_G */
+	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),		/* I915_GM */
+	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),		/* I945_G */
+	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),		/* I945_GM */
+	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),		/* I945_GME */
+	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),		/* I946_GZ */
+	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),		/* G35_G */
+	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),		/* I965_Q */
+	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),		/* I965_G */
+	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),		/* Q35_G */
+	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),		/* G33_G */
+	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),		/* Q33_G */
+	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),		/* I965_GM */
+	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),		/* I965_GME */
+	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),		/* GM45_G */
+	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),		/* IGD_E_G */
+	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),		/* Q45_G */
+	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),		/* G45_G */
+	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),		/* G41_G */
+	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),		/* B43_G */
+	INTEL_VGA_DEVICE(0x2e92, &intel_g45_info),		/* B43_G.1 */
+	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+	INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+	INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+	INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
+	INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+	INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
+	INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+	INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
+	INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */
+	INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+	INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
+	INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */
+	INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
+	INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+	INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */
+	INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
+	INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+	INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */
+	INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
+	INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+	INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */
+	INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */
+	INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */
+	INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */
+	INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */
+	INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */
+	INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+	INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+	INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */
+	INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+	INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+	INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */
+	INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+	INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+	INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */
+	INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */
+	INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */
+	INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */
+	INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */
+	INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */
+	INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */
+	INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+	INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+	INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */
+	INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+	INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+	INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */
+	INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+	INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+	INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */
+	INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */
+	INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */
+	INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */
+	INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */
+	INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */
+	INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */
+	INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
+	INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
+	INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */
+	INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
+	INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
+	INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */
+	INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
+	INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
+	INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */
+	INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */
+	INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */
+	INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */
+	INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */
+	INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */
+	INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */
+	INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+	INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info),
+	INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info),
+	INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info),
+	INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
+	INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
+	{0, 0, 0}
+};
+
+#if defined(CONFIG_DRM_I915_KMS)
+MODULE_DEVICE_TABLE(pci, pciidlist);
+#endif
+
+void intel_detect_pch(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pch;
+
+	/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+	 * (which really amounts to a PCH but no South Display).
+	 */
+	if (INTEL_INFO(dev)->num_pipes == 0) {
+		dev_priv->pch_type = PCH_NOP;
+		dev_priv->num_pch_pll = 0;
+		return;
+	}
+
+	/*
+	 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+	 * make graphics device passthrough work easy for VMM, that only
+	 * need to expose ISA bridge to let driver know the real hardware
+	 * underneath. This is a requirement from virtualization team.
+	 */
+	pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+	if (pch) {
+		if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+			unsigned short id;
+			id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+			dev_priv->pch_id = id;
+
+			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_IBX;
+				dev_priv->num_pch_pll = 2;
+				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+				WARN_ON(!IS_GEN5(dev));
+			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_CPT;
+				dev_priv->num_pch_pll = 2;
+				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+				/* PantherPoint is CPT compatible */
+				dev_priv->pch_type = PCH_CPT;
+				dev_priv->num_pch_pll = 2;
+				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_LPT;
+				dev_priv->num_pch_pll = 0;
+				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+				WARN_ON(!IS_HASWELL(dev));
+				WARN_ON(IS_ULT(dev));
+			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+				dev_priv->pch_type = PCH_LPT;
+				dev_priv->num_pch_pll = 0;
+				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+				WARN_ON(!IS_HASWELL(dev));
+				WARN_ON(!IS_ULT(dev));
+			}
+			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
+		}
+		pci_dev_put(pch);
+	}
+}
+
+bool i915_semaphore_is_enabled(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		return 0;
+
+	if (i915_semaphores >= 0)
+		return i915_semaphores;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Enable semaphores on SNB when IO remapping is off */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return 1;
+}
+
+static int i915_drm_freeze(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+
+	/* ignore lid events during suspend */
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	dev_priv->modeset_restore = MODESET_SUSPENDED;
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+
+	intel_set_power_well(dev, true);
+
+	drm_kms_helper_poll_disable(dev);
+
+	pci_save_state(dev->pdev);
+
+	/* If KMS is active, we do the leavevt stuff here */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		int error = i915_gem_idle(dev);
+		if (error) {
+			dev_err(&dev->pdev->dev,
+				"GEM idle failed, resume might fail\n");
+			return error;
+		}
+
+		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
+		drm_irq_uninstall(dev);
+		dev_priv->enable_hotplug_processing = false;
+		/*
+		 * Disable CRTCs directly since we want to preserve sw state
+		 * for _thaw.
+		 */
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+			dev_priv->display.crtc_disable(crtc);
+	}
+
+	i915_save_state(dev);
+
+	intel_opregion_fini(dev);
+
+	console_lock();
+	intel_fbdev_set_suspend(dev, 1);
+	console_unlock();
+
+	return 0;
+}
+
+int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+	int error;
+
+	if (!dev || !dev->dev_private) {
+		DRM_ERROR("dev: %p\n", dev);
+		DRM_ERROR("DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (state.event == PM_EVENT_PRETHAW)
+		return 0;
+
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	error = i915_drm_freeze(dev);
+	if (error)
+		return error;
+
+	if (state.event == PM_EVENT_SUSPEND) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+void intel_console_resume(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     console_resume_work);
+	struct drm_device *dev = dev_priv->dev;
+
+	console_lock();
+	intel_fbdev_set_suspend(dev, 0);
+	console_unlock();
+}
+
+static void intel_resume_hotplug(struct drm_device *dev)
+{
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+		if (encoder->hot_plug)
+			encoder->hot_plug(encoder);
+
+	mutex_unlock(&mode_config->mutex);
+
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int error = 0;
+
+	i915_restore_state(dev);
+	intel_opregion_setup(dev);
+
+	/* KMS EnterVT equivalent */
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		intel_init_pch_refclk(dev);
+
+		mutex_lock(&dev->struct_mutex);
+		dev_priv->mm.suspended = 0;
+
+		error = i915_gem_init_hw(dev);
+		mutex_unlock(&dev->struct_mutex);
+
+		/* We need working interrupts for modeset enabling ... */
+		drm_irq_install(dev);
+
+		intel_modeset_init_hw(dev);
+
+		drm_modeset_lock_all(dev);
+		intel_modeset_setup_hw_state(dev, true);
+		drm_modeset_unlock_all(dev);
+
+		/*
+		 * ... but also need to make sure that hotplug processing
+		 * doesn't cause havoc. Like in the driver load code we don't
+		 * bother with the tiny race here where we might loose hotplug
+		 * notifications.
+		 * */
+		intel_hpd_init(dev);
+		dev_priv->enable_hotplug_processing = true;
+		/* Config may have changed between suspend and resume */
+		intel_resume_hotplug(dev);
+	}
+
+	intel_opregion_init(dev);
+
+	/*
+	 * The console lock can be pretty contented on resume due
+	 * to all the printk activity.  Try to keep it out of the hot
+	 * path of resume if possible.
+	 */
+	if (console_trylock()) {
+		intel_fbdev_set_suspend(dev, 0);
+		console_unlock();
+	} else {
+		schedule_work(&dev_priv->console_resume_work);
+	}
+
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	dev_priv->modeset_restore = MODESET_DONE;
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+	return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+	int error = 0;
+
+	intel_gt_sanitize(dev);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	__i915_drm_thaw(dev);
+
+	return error;
+}
+
+int i915_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	if (pci_enable_device(dev->pdev))
+		return -EIO;
+
+	pci_set_master(dev->pdev);
+
+	intel_gt_sanitize(dev);
+
+	/*
+	 * Platforms with opregion should have sane BIOS, older ones (gen3 and
+	 * earlier) need this since the BIOS might clear all our scratch PTEs.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+	    !dev_priv->opregion.header) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	ret = __i915_drm_thaw(dev);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_I85X(dev))
+		return -ENODEV;
+
+	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+	POSTING_READ(D_STATE);
+
+	if (IS_I830(dev) || IS_845G(dev)) {
+		I915_WRITE(DEBUG_RESET_I830,
+			   DEBUG_RESET_DISPLAY |
+			   DEBUG_RESET_RENDER |
+			   DEBUG_RESET_FULL);
+		POSTING_READ(DEBUG_RESET_I830);
+		msleep(1);
+
+		I915_WRITE(DEBUG_RESET_I830, 0);
+		POSTING_READ(DEBUG_RESET_I830);
+	}
+
+	msleep(1);
+
+	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+	POSTING_READ(D_STATE);
+
+	return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+	u8 gdrst;
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+	int ret;
+	u8 gdrst;
+
+	/*
+	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+	 * well as the reset bit (GR/bit 0).  Setting the GR bit
+	 * triggers the reset; when done, the hardware will clear it.
+	 */
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	pci_write_config_byte(dev->pdev, I965_GDRST,
+			      gdrst | GRDOM_RENDER |
+			      GRDOM_RESET_ENABLE);
+	ret =  wait_for(i965_reset_complete(dev), 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	pci_write_config_byte(dev->pdev, I965_GDRST,
+			      gdrst | GRDOM_MEDIA |
+			      GRDOM_RESET_ENABLE);
+
+	return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 gdrst;
+	int ret;
+
+	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	gdrst &= ~GRDOM_MASK;
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+	ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+	if (ret)
+		return ret;
+
+	/* We can't reset render&media without also resetting display ... */
+	gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	gdrst &= ~GRDOM_MASK;
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+		   gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int	ret;
+	unsigned long irqflags;
+
+	/* Hold gt_lock across reset to prevent any register access
+	 * with forcewake not set correctly
+	 */
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+
+	/* Reset the chip */
+
+	/* GEN6_GDRST is not in the gt power well, no need to check
+	 * for fifo space for the write or forcewake the chip for
+	 * the read
+	 */
+	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+	/* Spin waiting for the device to ack the reset request */
+	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+	/* If reset with a user forcewake, try to restore, otherwise turn it off */
+	if (dev_priv->forcewake_count)
+		dev_priv->gt.force_wake_get(dev_priv);
+	else
+		dev_priv->gt.force_wake_put(dev_priv);
+
+	/* Restore fifo count */
+	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+	return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = -ENODEV;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		ret = gen6_do_reset(dev);
+		break;
+	case 5:
+		ret = ironlake_do_reset(dev);
+		break;
+	case 4:
+		ret = i965_do_reset(dev);
+		break;
+	case 2:
+		ret = i8xx_do_reset(dev);
+		break;
+	}
+
+	/* Also reset the gpu hangman. */
+	if (dev_priv->gpu_error.stop_rings) {
+		DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+		dev_priv->gpu_error.stop_rings = 0;
+		if (ret == -ENODEV) {
+			DRM_ERROR("Reset not implemented, but ignoring "
+				  "error for simulated gpu hangs\n");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @dev: drm device to reset
+ *
+ * Reset the chip.  Useful if a hang is detected. Returns zero on successful
+ * reset or otherwise an error code.
+ *
+ * Procedure is fairly simple:
+ *   - reset the chip using the reset reg
+ *   - re-init context state
+ *   - re-init hardware status page
+ *   - re-init ring buffer
+ *   - re-init interrupt state
+ *   - re-init display
+ */
+int i915_reset(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!i915_try_reset)
+		return 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_reset(dev);
+
+	ret = -ENODEV;
+	if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
+		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+	else
+		ret = intel_gpu_reset(dev);
+
+	dev_priv->gpu_error.last_reset = get_seconds();
+	if (ret) {
+		DRM_ERROR("Failed to reset chip.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* Ok, now get things going again... */
+
+	/*
+	 * Everything depends on having the GTT running, so we need to start
+	 * there.  Fortunately we don't need to do this unless we reset the
+	 * chip at a PCI level.
+	 *
+	 * Next we need to restore the context, but we don't use those
+	 * yet either...
+	 *
+	 * Ring buffer needs to be re-initialized in the KMS case, or if X
+	 * was running at the time of the reset (i.e. we weren't VT
+	 * switched away).
+	 */
+	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
+			!dev_priv->mm.suspended) {
+		struct intel_ring_buffer *ring;
+		int i;
+
+		dev_priv->mm.suspended = 0;
+
+		i915_gem_init_swizzling(dev);
+
+		for_each_ring(ring, dev_priv, i)
+			ring->init(ring);
+
+		i915_gem_context_init(dev);
+		if (dev_priv->mm.aliasing_ppgtt) {
+			ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+			if (ret)
+				i915_gem_cleanup_aliasing_ppgtt(dev);
+		}
+
+		/*
+		 * It would make sense to re-init all the other hw state, at
+		 * least the rps/rc6/emon init done within modeset_init_hw. For
+		 * some unknown reason, this blows up my ilk, so don't.
+		 */
+
+		mutex_unlock(&dev->struct_mutex);
+
+		drm_irq_uninstall(dev);
+		drm_irq_install(dev);
+		intel_hpd_init(dev);
+	} else {
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	return 0;
+}
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct intel_device_info *intel_info =
+		(struct intel_device_info *) ent->driver_data;
+
+	if (intel_info->is_valleyview)
+		if(!i915_preliminary_hw_support) {
+			DRM_ERROR("Preliminary hardware support disabled\n");
+			return -ENODEV;
+		}
+
+	/* Only bind to function 0 of the device. Early generations
+	 * used function 1 as a placeholder for multi-head. This causes
+	 * us confusion instead, especially on the systems where both
+	 * functions have the same PCI-ID!
+	 */
+	if (PCI_FUNC(pdev->devfn))
+		return -ENODEV;
+
+	/* We've managed to ship a kms-enabled ddx that shipped with an XvMC
+	 * implementation for gen3 (and only gen3) that used legacy drm maps
+	 * (gasp!) to share buffers between X and the client. Hence we need to
+	 * keep around the fake agp stuff for gen3, even when kms is enabled. */
+	if (intel_info->gen != 3) {
+		driver.driver_features &=
+			~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
+	} else if (!intel_agp_enabled) {
+		DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+		return -ENODEV;
+	}
+
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+i915_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int i915_pm_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int error;
+
+	if (!drm_dev || !drm_dev->dev_private) {
+		dev_err(dev, "DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	error = i915_drm_freeze(drm_dev);
+	if (error)
+		return error;
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int i915_pm_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return i915_resume(drm_dev);
+}
+
+static int i915_pm_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	if (!drm_dev || !drm_dev->dev_private) {
+		dev_err(dev, "DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	return i915_drm_freeze(drm_dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return i915_drm_thaw(drm_dev);
+}
+
+static int i915_pm_poweroff(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return i915_drm_freeze(drm_dev);
+}
+
+static const struct dev_pm_ops i915_pm_ops = {
+	.suspend = i915_pm_suspend,
+	.resume = i915_pm_resume,
+	.freeze = i915_pm_freeze,
+	.thaw = i915_pm_thaw,
+	.poweroff = i915_pm_poweroff,
+	.restore = i915_pm_resume,
+};
+
+static const struct vm_operations_struct i915_gem_vm_ops = {
+	.fault = i915_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations i915_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_gem_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = i915_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	/* Don't use MTRRs here; the Xserver or userspace app should
+	 * deal with them for Intel hardware.
+	 */
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
+	.load = i915_driver_load,
+	.unload = i915_driver_unload,
+	.open = i915_driver_open,
+	.lastclose = i915_driver_lastclose,
+	.preclose = i915_driver_preclose,
+	.postclose = i915_driver_postclose,
+
+	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
+	.suspend = i915_suspend,
+	.resume = i915_resume,
+
+	.device_is_agp = i915_driver_device_is_agp,
+	.master_create = i915_master_create,
+	.master_destroy = i915_master_destroy,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = i915_debugfs_init,
+	.debugfs_cleanup = i915_debugfs_cleanup,
+#endif
+	.gem_init_object = i915_gem_init_object,
+	.gem_free_object = i915_gem_free_object,
+	.gem_vm_ops = &i915_gem_vm_ops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = i915_gem_prime_export,
+	.gem_prime_import = i915_gem_prime_import,
+
+	.dumb_create = i915_gem_dumb_create,
+	.dumb_map_offset = i915_gem_mmap_gtt,
+	.dumb_destroy = i915_gem_dumb_destroy,
+	.ioctls = i915_ioctls,
+	.fops = &i915_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver i915_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = i915_pci_probe,
+	.remove = i915_pci_remove,
+	.driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+	driver.num_ioctls = i915_max_ioctl;
+
+	/*
+	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
+	 * explicitly disabled with the module pararmeter.
+	 *
+	 * Otherwise, just follow the parameter (defaulting to off).
+	 *
+	 * Allow optional vga_text_mode_force boot option to override
+	 * the default behavior.
+	 */
+#if defined(CONFIG_DRM_I915_KMS)
+	if (i915_modeset != 0)
+		driver.driver_features |= DRIVER_MODESET;
+#endif
+	if (i915_modeset == 1)
+		driver.driver_features |= DRIVER_MODESET;
+
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && i915_modeset == -1)
+		driver.driver_features &= ~DRIVER_MODESET;
+#endif
+
+	if (!(driver.driver_features & DRIVER_MODESET))
+		driver.get_vblank_timestamp = NULL;
+
+	return drm_pci_init(&driver, &i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+	drm_pci_exit(&driver, &i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+	 ((reg) < 0x40000) &&            \
+	 ((reg) != FORCEWAKE))
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+	/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+	 * chip from rc6 before touching it for real. MI_MODE is masked, hence
+	 * harmless to write 0 into. */
+	I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (IS_HASWELL(dev_priv->dev) &&
+	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+		DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+			  reg);
+		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+	if (IS_HASWELL(dev_priv->dev) &&
+	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+		DRM_ERROR("Unclaimed write to %x\n", reg);
+		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+	}
+}
+
+#define __i915_read(x, y) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+	unsigned long irqflags; \
+	u##x val = 0; \
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
+	if (IS_GEN5(dev_priv->dev)) \
+		ilk_dummy_write(dev_priv); \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		if (dev_priv->forcewake_count == 0) \
+			dev_priv->gt.force_wake_get(dev_priv); \
+		val = read##y(dev_priv->regs + reg); \
+		if (dev_priv->forcewake_count == 0) \
+			dev_priv->gt.force_wake_put(dev_priv); \
+	} else { \
+		val = read##y(dev_priv->regs + reg); \
+	} \
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
+	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+	return val; \
+}
+
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+	unsigned long irqflags; \
+	u32 __fifo_ret = 0; \
+	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	if (IS_GEN5(dev_priv->dev)) \
+		ilk_dummy_write(dev_priv); \
+	hsw_unclaimed_reg_clear(dev_priv, reg); \
+	write##y(val, dev_priv->regs + reg); \
+	if (unlikely(__fifo_ret)) { \
+		gen6_gt_check_fifodbg(dev_priv); \
+	} \
+	hsw_unclaimed_reg_check(dev_priv, reg); \
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+static const struct register_whitelist {
+	uint64_t offset;
+	uint32_t size;
+	uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+	{ RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+			void *data, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_reg_read *reg = data;
+	struct register_whitelist const *entry = whitelist;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+		if (entry->offset == reg->offset &&
+		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+			break;
+	}
+
+	if (i == ARRAY_SIZE(whitelist))
+		return -EINVAL;
+
+	switch (entry->size) {
+	case 8:
+		reg->val = I915_READ64(reg->offset);
+		break;
+	case 4:
+		reg->val = I915_READ(reg->offset);
+		break;
+	case 2:
+		reg->val = I915_READ16(reg->offset);
+		break;
+	case 1:
+		reg->val = I915_READ8(reg->offset);
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_drv.h b/linux-imx/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 0000000..47d8b68
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,1965 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_reg.h"
+#include "intel_bios.h"
+#include "intel_ringbuffer.h"
+#include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/intel-gtt.h>
+#include <linux/backlight.h>
+#include <linux/intel-iommu.h>
+#include <linux/kref.h>
+#include <linux/pm_qos.h>
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
+
+#define DRIVER_NAME		"i915"
+#define DRIVER_DESC		"Intel Graphics"
+#define DRIVER_DATE		"20080730"
+
+enum pipe {
+	PIPE_A = 0,
+	PIPE_B,
+	PIPE_C,
+	I915_MAX_PIPES
+};
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+	TRANSCODER_A = 0,
+	TRANSCODER_B,
+	TRANSCODER_C,
+	TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
+enum plane {
+	PLANE_A = 0,
+	PLANE_B,
+	PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+enum port {
+	PORT_A = 0,
+	PORT_B,
+	PORT_C,
+	PORT_D,
+	PORT_E,
+	I915_MAX_PORTS
+};
+#define port_name(p) ((p) + 'A')
+
+enum hpd_pin {
+	HPD_NONE = 0,
+	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
+	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
+	HPD_CRT,
+	HPD_SDVO_B,
+	HPD_SDVO_C,
+	HPD_PORT_B,
+	HPD_PORT_C,
+	HPD_PORT_D,
+	HPD_NUM_PINS
+};
+
+#define I915_GEM_GPU_DOMAINS \
+	(I915_GEM_DOMAIN_RENDER | \
+	 I915_GEM_DOMAIN_SAMPLER | \
+	 I915_GEM_DOMAIN_COMMAND | \
+	 I915_GEM_DOMAIN_INSTRUCTION | \
+	 I915_GEM_DOMAIN_VERTEX)
+
+#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+		if ((intel_encoder)->base.crtc == (__crtc))
+
+struct intel_pch_pll {
+	int refcount; /* count of number of CRTCs sharing this PLL */
+	int active; /* count of number of active CRTCs (i.e. DPMS on) */
+	bool on; /* is the PLL actually active? Disabled during modeset */
+	int pll_reg;
+	int fp0_reg;
+	int fp1_reg;
+};
+#define I915_NUM_PLLS 2
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+	uint32_t	tu;
+	uint32_t	gmch_m;
+	uint32_t	gmch_n;
+	uint32_t	link_m;
+	uint32_t	link_n;
+};
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+			    int pixel_clock, int link_clock,
+			    struct intel_link_m_n *m_n);
+
+struct intel_ddi_plls {
+	int spll_refcount;
+	int wrpll1_refcount;
+	int wrpll2_refcount;
+};
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		6
+#define DRIVER_PATCHLEVEL	0
+
+#define WATCH_COHERENCY	0
+#define WATCH_LISTS	0
+#define WATCH_GTT	0
+
+#define I915_GEM_PHYS_CURSOR_0 1
+#define I915_GEM_PHYS_CURSOR_1 2
+#define I915_GEM_PHYS_OVERLAY_REGS 3
+#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
+
+struct drm_i915_gem_phys_object {
+	int id;
+	struct page **page_list;
+	drm_dma_handle_t *handle;
+	struct drm_i915_gem_object *cur_obj;
+};
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+struct drm_i915_private;
+
+struct intel_opregion {
+	struct opregion_header __iomem *header;
+	struct opregion_acpi __iomem *acpi;
+	struct opregion_swsci __iomem *swsci;
+	struct opregion_asle __iomem *asle;
+	void __iomem *vbt;
+	u32 __iomem *lid_state;
+};
+#define OPREGION_SIZE            (8*1024)
+
+struct intel_overlay;
+struct intel_overlay_error_state;
+
+struct drm_i915_master_private {
+	drm_local_map_t *sarea;
+	struct _drm_i915_sarea *sarea_priv;
+};
+#define I915_FENCE_REG_NONE -1
+#define I915_MAX_NUM_FENCES 32
+/* 32 fences + sign bit for FENCE_REG_NONE */
+#define I915_MAX_NUM_FENCE_BITS 6
+
+struct drm_i915_fence_reg {
+	struct list_head lru_list;
+	struct drm_i915_gem_object *obj;
+	int pin_count;
+};
+
+struct sdvo_device_mapping {
+	u8 initialized;
+	u8 dvo_port;
+	u8 slave_addr;
+	u8 dvo_wiring;
+	u8 i2c_pin;
+	u8 ddc_pin;
+};
+
+struct intel_display_error_state;
+
+struct drm_i915_error_state {
+	struct kref ref;
+	u32 eir;
+	u32 pgtbl_er;
+	u32 ier;
+	u32 ccid;
+	u32 derrmr;
+	u32 forcewake;
+	bool waiting[I915_NUM_RINGS];
+	u32 pipestat[I915_MAX_PIPES];
+	u32 tail[I915_NUM_RINGS];
+	u32 head[I915_NUM_RINGS];
+	u32 ctl[I915_NUM_RINGS];
+	u32 ipeir[I915_NUM_RINGS];
+	u32 ipehr[I915_NUM_RINGS];
+	u32 instdone[I915_NUM_RINGS];
+	u32 acthd[I915_NUM_RINGS];
+	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+	u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
+	/* our own tracking of ring head and tail */
+	u32 cpu_ring_head[I915_NUM_RINGS];
+	u32 cpu_ring_tail[I915_NUM_RINGS];
+	u32 error; /* gen6+ */
+	u32 err_int; /* gen7 */
+	u32 instpm[I915_NUM_RINGS];
+	u32 instps[I915_NUM_RINGS];
+	u32 extra_instdone[I915_NUM_INSTDONE_REG];
+	u32 seqno[I915_NUM_RINGS];
+	u64 bbaddr;
+	u32 fault_reg[I915_NUM_RINGS];
+	u32 done_reg;
+	u32 faddr[I915_NUM_RINGS];
+	u64 fence[I915_MAX_NUM_FENCES];
+	struct timeval time;
+	struct drm_i915_error_ring {
+		struct drm_i915_error_object {
+			int page_count;
+			u32 gtt_offset;
+			u32 *pages[0];
+		} *ringbuffer, *batchbuffer, *ctx;
+		struct drm_i915_error_request {
+			long jiffies;
+			u32 seqno;
+			u32 tail;
+		} *requests;
+		int num_requests;
+	} ring[I915_NUM_RINGS];
+	struct drm_i915_error_buffer {
+		u32 size;
+		u32 name;
+		u32 rseqno, wseqno;
+		u32 gtt_offset;
+		u32 read_domains;
+		u32 write_domain;
+		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
+		s32 pinned:2;
+		u32 tiling:2;
+		u32 dirty:1;
+		u32 purgeable:1;
+		s32 ring:4;
+		u32 cache_level:2;
+	} *active_bo, *pinned_bo;
+	u32 active_bo_count, pinned_bo_count;
+	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
+};
+
+struct intel_crtc_config;
+struct intel_crtc;
+
+struct drm_i915_display_funcs {
+	bool (*fbc_enabled)(struct drm_device *dev);
+	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
+	void (*disable_fbc)(struct drm_device *dev);
+	int (*get_display_clock_speed)(struct drm_device *dev);
+	int (*get_fifo_size)(struct drm_device *dev, int plane);
+	void (*update_wm)(struct drm_device *dev);
+	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+				 uint32_t sprite_width, int pixel_size);
+	void (*update_linetime_wm)(struct drm_device *dev, int pipe,
+				 struct drm_display_mode *mode);
+	void (*modeset_global_resources)(struct drm_device *dev);
+	/* Returns the active state of the crtc, and if the crtc is active,
+	 * fills out the pipe-config with the hw state. */
+	bool (*get_pipe_config)(struct intel_crtc *,
+				struct intel_crtc_config *);
+	int (*crtc_mode_set)(struct drm_crtc *crtc,
+			     int x, int y,
+			     struct drm_framebuffer *old_fb);
+	void (*crtc_enable)(struct drm_crtc *crtc);
+	void (*crtc_disable)(struct drm_crtc *crtc);
+	void (*off)(struct drm_crtc *crtc);
+	void (*write_eld)(struct drm_connector *connector,
+			  struct drm_crtc *crtc);
+	void (*fdi_link_train)(struct drm_crtc *crtc);
+	void (*init_clock_gating)(struct drm_device *dev);
+	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb,
+			  struct drm_i915_gem_object *obj);
+	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			    int x, int y);
+	void (*hpd_irq_setup)(struct drm_device *dev);
+	/* clock updates for mode set */
+	/* cursor updates */
+	/* render clock increase/decrease */
+	/* display clock increase/decrease */
+	/* pll clock increase/decrease */
+};
+
+struct drm_i915_gt_funcs {
+	void (*force_wake_get)(struct drm_i915_private *dev_priv);
+	void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
+#define DEV_INFO_FLAGS \
+	DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
+	DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
+	DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
+	DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
+	DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
+	DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
+	DEV_INFO_FLAG(has_llc)
+
+struct intel_device_info {
+	u32 display_mmio_offset;
+	u8 num_pipes:3;
+	u8 gen;
+	u8 is_mobile:1;
+	u8 is_i85x:1;
+	u8 is_i915g:1;
+	u8 is_i945gm:1;
+	u8 is_g33:1;
+	u8 need_gfx_hws:1;
+	u8 is_g4x:1;
+	u8 is_pineview:1;
+	u8 is_broadwater:1;
+	u8 is_crestline:1;
+	u8 is_ivybridge:1;
+	u8 is_valleyview:1;
+	u8 has_force_wake:1;
+	u8 is_haswell:1;
+	u8 has_fbc:1;
+	u8 has_pipe_cxsr:1;
+	u8 has_hotplug:1;
+	u8 cursor_needs_physical:1;
+	u8 has_overlay:1;
+	u8 overlay_needs_physical:1;
+	u8 supports_tv:1;
+	u8 has_bsd_ring:1;
+	u8 has_blt_ring:1;
+	u8 has_llc:1;
+};
+
+enum i915_cache_level {
+	I915_CACHE_NONE = 0,
+	I915_CACHE_LLC,
+	I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
+/* The Graphics Translation Table is the way in which GEN hardware translates a
+ * Graphics Virtual Address into a Physical Address. In addition to the normal
+ * collateral associated with any va->pa translations GEN hardware also has a
+ * portion of the GTT which can be mapped by the CPU and remain both coherent
+ * and correct (in cases like swizzling). That region is referred to as GMADR in
+ * the spec.
+ */
+struct i915_gtt {
+	unsigned long start;		/* Start offset of used GTT */
+	size_t total;			/* Total size GTT can map */
+	size_t stolen_size;		/* Total size of stolen memory */
+
+	unsigned long mappable_end;	/* End offset that we can CPU map */
+	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
+	phys_addr_t mappable_base;	/* PA of our GMADR */
+
+	/** "Graphics Stolen Memory" holds the global PTEs */
+	void __iomem *gsm;
+
+	bool do_idle_maps;
+	dma_addr_t scratch_page_dma;
+	struct page *scratch_page;
+
+	/* global gtt ops */
+	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+			  size_t *stolen, phys_addr_t *mappable_base,
+			  unsigned long *mappable_end);
+	void (*gtt_remove)(struct drm_device *dev);
+	void (*gtt_clear_range)(struct drm_device *dev,
+				unsigned int first_entry,
+				unsigned int num_entries);
+	void (*gtt_insert_entries)(struct drm_device *dev,
+				   struct sg_table *st,
+				   unsigned int pg_start,
+				   enum i915_cache_level cache_level);
+};
+#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+	struct drm_device *dev;
+	unsigned num_pd_entries;
+	struct page **pt_pages;
+	uint32_t pd_offset;
+	dma_addr_t *pt_dma_addr;
+	dma_addr_t scratch_page_dma_addr;
+
+	/* pte functions, mirroring the interface of the global gtt. */
+	void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
+			    unsigned int first_entry,
+			    unsigned int num_entries);
+	void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
+			       struct sg_table *st,
+			       unsigned int pg_start,
+			       enum i915_cache_level cache_level);
+	int (*enable)(struct drm_device *dev);
+	void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+	int id;
+	bool is_initialized;
+	struct drm_i915_file_private *file_priv;
+	struct intel_ring_buffer *ring;
+	struct drm_i915_gem_object *obj;
+};
+
+enum no_fbc_reason {
+	FBC_NO_OUTPUT, /* no outputs enabled to compress */
+	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+	FBC_MODE_TOO_LARGE, /* mode too large for compression */
+	FBC_BAD_PLANE, /* fbc not supported on plane */
+	FBC_NOT_TILED, /* buffer not tiled */
+	FBC_MULTIPLE_PIPES, /* more than one pipe active */
+	FBC_MODULE_PARAM,
+};
+
+enum intel_pch {
+	PCH_NONE = 0,	/* No PCH present */
+	PCH_IBX,	/* Ibexpeak PCH */
+	PCH_CPT,	/* Cougarpoint PCH */
+	PCH_LPT,	/* Lynxpoint PCH */
+	PCH_NOP,
+};
+
+enum intel_sbi_destination {
+	SBI_ICLK,
+	SBI_MPHY,
+};
+
+#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
+#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
+
+struct intel_fbdev;
+struct intel_fbc_work;
+
+struct intel_gmbus {
+	struct i2c_adapter adapter;
+	u32 force_bit;
+	u32 reg0;
+	u32 gpio_reg;
+	struct i2c_algo_bit_data bit_algo;
+	struct drm_i915_private *dev_priv;
+};
+
+struct i915_suspend_saved_registers {
+	u8 saveLBB;
+	u32 saveDSPACNTR;
+	u32 saveDSPBCNTR;
+	u32 saveDSPARB;
+	u32 savePIPEACONF;
+	u32 savePIPEBCONF;
+	u32 savePIPEASRC;
+	u32 savePIPEBSRC;
+	u32 saveFPA0;
+	u32 saveFPA1;
+	u32 saveDPLL_A;
+	u32 saveDPLL_A_MD;
+	u32 saveHTOTAL_A;
+	u32 saveHBLANK_A;
+	u32 saveHSYNC_A;
+	u32 saveVTOTAL_A;
+	u32 saveVBLANK_A;
+	u32 saveVSYNC_A;
+	u32 saveBCLRPAT_A;
+	u32 saveTRANSACONF;
+	u32 saveTRANS_HTOTAL_A;
+	u32 saveTRANS_HBLANK_A;
+	u32 saveTRANS_HSYNC_A;
+	u32 saveTRANS_VTOTAL_A;
+	u32 saveTRANS_VBLANK_A;
+	u32 saveTRANS_VSYNC_A;
+	u32 savePIPEASTAT;
+	u32 saveDSPASTRIDE;
+	u32 saveDSPASIZE;
+	u32 saveDSPAPOS;
+	u32 saveDSPAADDR;
+	u32 saveDSPASURF;
+	u32 saveDSPATILEOFF;
+	u32 savePFIT_PGM_RATIOS;
+	u32 saveBLC_HIST_CTL;
+	u32 saveBLC_PWM_CTL;
+	u32 saveBLC_PWM_CTL2;
+	u32 saveBLC_CPU_PWM_CTL;
+	u32 saveBLC_CPU_PWM_CTL2;
+	u32 saveFPB0;
+	u32 saveFPB1;
+	u32 saveDPLL_B;
+	u32 saveDPLL_B_MD;
+	u32 saveHTOTAL_B;
+	u32 saveHBLANK_B;
+	u32 saveHSYNC_B;
+	u32 saveVTOTAL_B;
+	u32 saveVBLANK_B;
+	u32 saveVSYNC_B;
+	u32 saveBCLRPAT_B;
+	u32 saveTRANSBCONF;
+	u32 saveTRANS_HTOTAL_B;
+	u32 saveTRANS_HBLANK_B;
+	u32 saveTRANS_HSYNC_B;
+	u32 saveTRANS_VTOTAL_B;
+	u32 saveTRANS_VBLANK_B;
+	u32 saveTRANS_VSYNC_B;
+	u32 savePIPEBSTAT;
+	u32 saveDSPBSTRIDE;
+	u32 saveDSPBSIZE;
+	u32 saveDSPBPOS;
+	u32 saveDSPBADDR;
+	u32 saveDSPBSURF;
+	u32 saveDSPBTILEOFF;
+	u32 saveVGA0;
+	u32 saveVGA1;
+	u32 saveVGA_PD;
+	u32 saveVGACNTRL;
+	u32 saveADPA;
+	u32 saveLVDS;
+	u32 savePP_ON_DELAYS;
+	u32 savePP_OFF_DELAYS;
+	u32 saveDVOA;
+	u32 saveDVOB;
+	u32 saveDVOC;
+	u32 savePP_ON;
+	u32 savePP_OFF;
+	u32 savePP_CONTROL;
+	u32 savePP_DIVISOR;
+	u32 savePFIT_CONTROL;
+	u32 save_palette_a[256];
+	u32 save_palette_b[256];
+	u32 saveDPFC_CB_BASE;
+	u32 saveFBC_CFB_BASE;
+	u32 saveFBC_LL_BASE;
+	u32 saveFBC_CONTROL;
+	u32 saveFBC_CONTROL2;
+	u32 saveIER;
+	u32 saveIIR;
+	u32 saveIMR;
+	u32 saveDEIER;
+	u32 saveDEIMR;
+	u32 saveGTIER;
+	u32 saveGTIMR;
+	u32 saveFDI_RXA_IMR;
+	u32 saveFDI_RXB_IMR;
+	u32 saveCACHE_MODE_0;
+	u32 saveMI_ARB_STATE;
+	u32 saveSWF0[16];
+	u32 saveSWF1[16];
+	u32 saveSWF2[3];
+	u8 saveMSR;
+	u8 saveSR[8];
+	u8 saveGR[25];
+	u8 saveAR_INDEX;
+	u8 saveAR[21];
+	u8 saveDACMASK;
+	u8 saveCR[37];
+	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+	u32 saveCURACNTR;
+	u32 saveCURAPOS;
+	u32 saveCURABASE;
+	u32 saveCURBCNTR;
+	u32 saveCURBPOS;
+	u32 saveCURBBASE;
+	u32 saveCURSIZE;
+	u32 saveDP_B;
+	u32 saveDP_C;
+	u32 saveDP_D;
+	u32 savePIPEA_GMCH_DATA_M;
+	u32 savePIPEB_GMCH_DATA_M;
+	u32 savePIPEA_GMCH_DATA_N;
+	u32 savePIPEB_GMCH_DATA_N;
+	u32 savePIPEA_DP_LINK_M;
+	u32 savePIPEB_DP_LINK_M;
+	u32 savePIPEA_DP_LINK_N;
+	u32 savePIPEB_DP_LINK_N;
+	u32 saveFDI_RXA_CTL;
+	u32 saveFDI_TXA_CTL;
+	u32 saveFDI_RXB_CTL;
+	u32 saveFDI_TXB_CTL;
+	u32 savePFA_CTL_1;
+	u32 savePFB_CTL_1;
+	u32 savePFA_WIN_SZ;
+	u32 savePFB_WIN_SZ;
+	u32 savePFA_WIN_POS;
+	u32 savePFB_WIN_POS;
+	u32 savePCH_DREF_CONTROL;
+	u32 saveDISP_ARB_CTL;
+	u32 savePIPEA_DATA_M1;
+	u32 savePIPEA_DATA_N1;
+	u32 savePIPEA_LINK_M1;
+	u32 savePIPEA_LINK_N1;
+	u32 savePIPEB_DATA_M1;
+	u32 savePIPEB_DATA_N1;
+	u32 savePIPEB_LINK_M1;
+	u32 savePIPEB_LINK_N1;
+	u32 saveMCHBAR_RENDER_STANDBY;
+	u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+	struct work_struct work;
+	u32 pm_iir;
+	/* lock - irqsave spinlock that protectects the work_struct and
+	 * pm_iir. */
+	spinlock_t lock;
+
+	/* The below variables an all the rps hw state are protected by
+	 * dev->struct mutext. */
+	u8 cur_delay;
+	u8 min_delay;
+	u8 max_delay;
+	u8 hw_max;
+
+	struct delayed_work delayed_resume_work;
+
+	/*
+	 * Protects RPS/RC6 register access and PCU communication.
+	 * Must be taken after struct_mutex if nested.
+	 */
+	struct mutex hw_lock;
+};
+
+/* defined intel_pm.c */
+extern spinlock_t mchdev_lock;
+
+struct intel_ilk_power_mgmt {
+	u8 cur_delay;
+	u8 min_delay;
+	u8 max_delay;
+	u8 fmax;
+	u8 fstart;
+
+	u64 last_count1;
+	unsigned long last_time1;
+	unsigned long chipset_power;
+	u64 last_count2;
+	struct timespec last_time2;
+	unsigned long gfx_power;
+	u8 corr;
+
+	int c_m;
+	int r_t;
+
+	struct drm_i915_gem_object *pwrctx;
+	struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+	unsigned allow_batchbuffer : 1;
+	u32 __iomem *gfx_hws_cpu_addr;
+
+	unsigned int cpp;
+	int back_offset;
+	int front_offset;
+	int current_page;
+	int page_flipping;
+
+	uint32_t counter;
+};
+
+struct intel_l3_parity {
+	u32 *remap_info;
+	struct work_struct error_work;
+};
+
+struct i915_gem_mm {
+	/** Memory allocator for GTT stolen memory */
+	struct drm_mm stolen;
+	/** Memory allocator for GTT */
+	struct drm_mm gtt_space;
+	/** List of all objects in gtt_space. Used to restore gtt
+	 * mappings on resume */
+	struct list_head bound_list;
+	/**
+	 * List of objects which are not bound to the GTT (thus
+	 * are idle and not used by the GPU) but still have
+	 * (presumably uncached) pages still attached.
+	 */
+	struct list_head unbound_list;
+
+	/** Usable portion of the GTT for GEM */
+	unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+	int gtt_mtrr;
+
+	/** PPGTT used for aliasing the PPGTT with the GTT */
+	struct i915_hw_ppgtt *aliasing_ppgtt;
+
+	struct shrinker inactive_shrinker;
+	bool shrinker_no_lock_stealing;
+
+	/**
+	 * List of objects currently involved in rendering.
+	 *
+	 * Includes buffers having the contents of their GPU caches
+	 * flushed, not necessarily primitives.  last_rendering_seqno
+	 * represents when the rendering involved will be completed.
+	 *
+	 * A reference is held on the buffer while on this list.
+	 */
+	struct list_head active_list;
+
+	/**
+	 * LRU list of objects which are not in the ringbuffer and
+	 * are ready to unbind, but are still in the GTT.
+	 *
+	 * last_rendering_seqno is 0 while an object is in this list.
+	 *
+	 * A reference is not held on the buffer while on this list,
+	 * as merely being GTT-bound shouldn't prevent its being
+	 * freed, and we'll pull it off the list in the free path.
+	 */
+	struct list_head inactive_list;
+
+	/** LRU list of objects with fence regs on them. */
+	struct list_head fence_list;
+
+	/**
+	 * We leave the user IRQ off as much as possible,
+	 * but this means that requests will finish and never
+	 * be retired once the system goes idle. Set a timer to
+	 * fire periodically while the ring is running. When it
+	 * fires, go retire requests.
+	 */
+	struct delayed_work retire_work;
+
+	/**
+	 * Are we in a non-interruptible section of code like
+	 * modesetting?
+	 */
+	bool interruptible;
+
+	/**
+	 * Flag if the X Server, and thus DRM, is not currently in
+	 * control of the device.
+	 *
+	 * This is set between LeaveVT and EnterVT.  It needs to be
+	 * replaced with a semaphore.  It also needs to be
+	 * transitioned away from for kernel modesetting.
+	 */
+	int suspended;
+
+	/** Bit 6 swizzling required for X tiling */
+	uint32_t bit_6_swizzle_x;
+	/** Bit 6 swizzling required for Y tiling */
+	uint32_t bit_6_swizzle_y;
+
+	/* storage for physical objects */
+	struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+	/* accounting, useful for userland debugging */
+	size_t object_memory;
+	u32 object_count;
+};
+
+struct i915_gpu_error {
+	/* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+	struct timer_list hangcheck_timer;
+	int hangcheck_count;
+	uint32_t last_acthd[I915_NUM_RINGS];
+	uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+	/* For reset and error_state handling. */
+	spinlock_t lock;
+	/* Protected by the above dev->gpu_error.lock. */
+	struct drm_i915_error_state *first_error;
+	struct work_struct work;
+
+	unsigned long last_reset;
+
+	/**
+	 * State variable and reset counter controlling the reset flow
+	 *
+	 * Upper bits are for the reset counter.  This counter is used by the
+	 * wait_seqno code to race-free noticed that a reset event happened and
+	 * that it needs to restart the entire ioctl (since most likely the
+	 * seqno it waited for won't ever signal anytime soon).
+	 *
+	 * This is important for lock-free wait paths, where no contended lock
+	 * naturally enforces the correct ordering between the bail-out of the
+	 * waiter and the gpu reset work code.
+	 *
+	 * Lowest bit controls the reset state machine: Set means a reset is in
+	 * progress. This state will (presuming we don't have any bugs) decay
+	 * into either unset (successful reset) or the special WEDGED value (hw
+	 * terminally sour). All waiters on the reset_queue will be woken when
+	 * that happens.
+	 */
+	atomic_t reset_counter;
+
+	/**
+	 * Special values/flags for reset_counter
+	 *
+	 * Note that the code relies on
+	 * 	I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
+	 * being true.
+	 */
+#define I915_RESET_IN_PROGRESS_FLAG	1
+#define I915_WEDGED			0xffffffff
+
+	/**
+	 * Waitqueue to signal when the reset has completed. Used by clients
+	 * that wait for dev_priv->mm.wedged to settle.
+	 */
+	wait_queue_head_t reset_queue;
+
+	/* For gpu hang simulation. */
+	unsigned int stop_rings;
+};
+
+enum modeset_restore {
+	MODESET_ON_LID_OPEN,
+	MODESET_DONE,
+	MODESET_SUSPENDED,
+};
+
+typedef struct drm_i915_private {
+	struct drm_device *dev;
+	struct kmem_cache *slab;
+
+	const struct intel_device_info *info;
+
+	int relative_constants_mode;
+
+	void __iomem *regs;
+
+	struct drm_i915_gt_funcs gt;
+	/** gt_fifo_count and the subsequent register write are synchronized
+	 * with dev->struct_mutex. */
+	unsigned gt_fifo_count;
+	/** forcewake_count is protected by gt_lock */
+	unsigned forcewake_count;
+	/** gt_lock is also taken in irq contexts. */
+	spinlock_t gt_lock;
+
+	struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+
+	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
+	 * controller on different i2c buses. */
+	struct mutex gmbus_mutex;
+
+	/**
+	 * Base address of the gmbus and gpio block.
+	 */
+	uint32_t gpio_mmio_base;
+
+	wait_queue_head_t gmbus_wait_queue;
+
+	struct pci_dev *bridge_dev;
+	struct intel_ring_buffer ring[I915_NUM_RINGS];
+	uint32_t last_seqno, next_seqno;
+
+	drm_dma_handle_t *status_page_dmah;
+	struct resource mch_res;
+
+	atomic_t irq_received;
+
+	/* protects the irq masks */
+	spinlock_t irq_lock;
+
+	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+	struct pm_qos_request pm_qos;
+
+	/* DPIO indirect register protection */
+	struct mutex dpio_lock;
+
+	/** Cached value of IMR to avoid reads in updating the bitfield */
+	u32 irq_mask;
+	u32 gt_irq_mask;
+
+	struct work_struct hotplug_work;
+	bool enable_hotplug_processing;
+	struct {
+		unsigned long hpd_last_jiffies;
+		int hpd_cnt;
+		enum {
+			HPD_ENABLED = 0,
+			HPD_DISABLED = 1,
+			HPD_MARK_DISABLED = 2
+		} hpd_mark;
+	} hpd_stats[HPD_NUM_PINS];
+	struct timer_list hotplug_reenable_timer;
+
+	int num_pch_pll;
+	int num_plane;
+
+	unsigned long cfb_size;
+	unsigned int cfb_fb;
+	enum plane cfb_plane;
+	int cfb_y;
+	struct intel_fbc_work *fbc_work;
+
+	struct intel_opregion opregion;
+
+	/* overlay */
+	struct intel_overlay *overlay;
+	unsigned int sprite_scaling_enabled;
+
+	/* backlight */
+	struct {
+		int level;
+		bool enabled;
+		struct backlight_device *device;
+	} backlight;
+
+	/* LVDS info */
+	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+	/* Feature bits from the VBIOS */
+	unsigned int int_tv_support:1;
+	unsigned int lvds_dither:1;
+	unsigned int lvds_vbt:1;
+	unsigned int int_crt_support:1;
+	unsigned int lvds_use_ssc:1;
+	unsigned int display_clock_mode:1;
+	unsigned int fdi_rx_polarity_inverted:1;
+	int lvds_ssc_freq;
+	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	bool no_aux_handshake;
+
+	int crt_ddc_pin;
+	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+	unsigned int fsb_freq, mem_freq, is_ddr3;
+
+	struct workqueue_struct *wq;
+
+	/* Display functions */
+	struct drm_i915_display_funcs display;
+
+	/* PCH chipset type */
+	enum intel_pch pch_type;
+	unsigned short pch_id;
+
+	unsigned long quirks;
+
+	enum modeset_restore modeset_restore;
+	struct mutex modeset_restore_lock;
+
+	struct i915_gtt gtt;
+
+	struct i915_gem_mm mm;
+
+	/* Kernel Modesetting */
+
+	struct sdvo_device_mapping sdvo_mappings[2];
+	/* indicate whether the LVDS_BORDER should be enabled or not */
+	unsigned int lvds_border_bits;
+	/* Panel fitter placement and size for Ironlake+ */
+	u32 pch_pf_pos, pch_pf_size;
+
+	struct drm_crtc *plane_to_crtc_mapping[3];
+	struct drm_crtc *pipe_to_crtc_mapping[3];
+	wait_queue_head_t pending_flip_queue;
+
+	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+	struct intel_ddi_plls ddi_plls;
+
+	/* Reclocking support */
+	bool render_reclock_avail;
+	bool lvds_downclock_avail;
+	/* indicates the reduced downclock for LVDS*/
+	int lvds_downclock;
+	u16 orig_clock;
+	int child_dev_num;
+	struct child_device_config *child_dev;
+
+	bool mchbar_need_disable;
+
+	struct intel_l3_parity l3_parity;
+
+	/* gen6+ rps state */
+	struct intel_gen6_power_mgmt rps;
+
+	/* ilk-only ips/rps state. Everything in here is protected by the global
+	 * mchdev_lock in intel_pm.c */
+	struct intel_ilk_power_mgmt ips;
+
+	enum no_fbc_reason no_fbc_reason;
+
+	struct drm_mm_node *compressed_fb;
+	struct drm_mm_node *compressed_llb;
+
+	struct i915_gpu_error gpu_error;
+
+	/* list of fbdev register on this device */
+	struct intel_fbdev *fbdev;
+
+	/*
+	 * The console may be contended at resume, but we don't
+	 * want it to block on it.
+	 */
+	struct work_struct console_resume_work;
+
+	struct drm_property *broadcast_rgb_property;
+	struct drm_property *force_audio_property;
+
+	bool hw_contexts_disabled;
+	uint32_t hw_context_size;
+
+	u32 fdi_rx_config;
+
+	struct i915_suspend_saved_registers regfile;
+
+	/* Old dri1 support infrastructure, beware the dragons ya fools entering
+	 * here! */
+	struct i915_dri1_state dri1;
+} drm_i915_private_t;
+
+/* Iterate over initialised rings */
+#define for_each_ring(ring__, dev_priv__, i__) \
+	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
+		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
+
+enum hdmi_force_audio {
+	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
+	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
+	HDMI_AUDIO_AUTO,		/* trust EDID */
+	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
+};
+
+#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+
+struct drm_i915_gem_object_ops {
+	/* Interface between the GEM object and its backing storage.
+	 * get_pages() is called once prior to the use of the associated set
+	 * of pages before to binding them into the GTT, and put_pages() is
+	 * called after we no longer need them. As we expect there to be
+	 * associated cost with migrating pages between the backing storage
+	 * and making them available for the GPU (e.g. clflush), we may hold
+	 * onto the pages after they are no longer referenced by the GPU
+	 * in case they may be used again shortly (for example migrating the
+	 * pages to a different memory domain within the GTT). put_pages()
+	 * will therefore most likely be called when the object itself is
+	 * being released or under memory pressure (where we attempt to
+	 * reap pages for the shrinker).
+	 */
+	int (*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *);
+};
+
+struct drm_i915_gem_object {
+	struct drm_gem_object base;
+
+	const struct drm_i915_gem_object_ops *ops;
+
+	/** Current space allocated to this object in the GTT, if any. */
+	struct drm_mm_node *gtt_space;
+	/** Stolen memory for this object, instead of being backed by shmem. */
+	struct drm_mm_node *stolen;
+	struct list_head gtt_list;
+
+	/** This object's place on the active/inactive lists */
+	struct list_head ring_list;
+	struct list_head mm_list;
+	/** This object's place in the batchbuffer or on the eviction list */
+	struct list_head exec_list;
+
+	/**
+	 * This is set if the object is on the active lists (has pending
+	 * rendering and so a non-zero seqno), and is not set if it i s on
+	 * inactive (ready to be unbound) list.
+	 */
+	unsigned int active:1;
+
+	/**
+	 * This is set if the object has been written to since last bound
+	 * to the GTT
+	 */
+	unsigned int dirty:1;
+
+	/**
+	 * Fence register bits (if any) for this object.  Will be set
+	 * as needed when mapped into the GTT.
+	 * Protected by dev->struct_mutex.
+	 */
+	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
+
+	/**
+	 * Advice: are the backing pages purgeable?
+	 */
+	unsigned int madv:2;
+
+	/**
+	 * Current tiling mode for the object.
+	 */
+	unsigned int tiling_mode:2;
+	/**
+	 * Whether the tiling parameters for the currently associated fence
+	 * register have changed. Note that for the purposes of tracking
+	 * tiling changes we also treat the unfenced register, the register
+	 * slot that the object occupies whilst it executes a fenced
+	 * command (such as BLT on gen2/3), as a "fence".
+	 */
+	unsigned int fence_dirty:1;
+
+	/** How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
+	 * (via user_pin_count), execbuffer (objects are not allowed multiple
+	 * times for the same batchbuffer), and the framebuffer code. When
+	 * switching/pageflipping, the framebuffer code has at most two buffers
+	 * pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits. */
+	unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+
+	/**
+	 * Is the object at the current location in the gtt mappable and
+	 * fenceable? Used to avoid costly recalculations.
+	 */
+	unsigned int map_and_fenceable:1;
+
+	/**
+	 * Whether the current gtt mapping needs to be mappable (and isn't just
+	 * mappable by accident). Track pin and fault separate for a more
+	 * accurate mappable working set.
+	 */
+	unsigned int fault_mappable:1;
+	unsigned int pin_mappable:1;
+
+	/*
+	 * Is the GPU currently using a fence to access this buffer,
+	 */
+	unsigned int pending_fenced_gpu_access:1;
+	unsigned int fenced_gpu_access:1;
+
+	unsigned int cache_level:2;
+
+	unsigned int has_aliasing_ppgtt_mapping:1;
+	unsigned int has_global_gtt_mapping:1;
+	unsigned int has_dma_mapping:1;
+
+	struct sg_table *pages;
+	int pages_pin_count;
+
+	/* prime dma-buf support */
+	void *dma_buf_vmapping;
+	int vmapping_count;
+
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
+	/**
+	 * Current offset of the object in GTT space.
+	 *
+	 * This is the same as gtt_space->start
+	 */
+	uint32_t gtt_offset;
+
+	struct intel_ring_buffer *ring;
+
+	/** Breadcrumb of last rendering to the buffer. */
+	uint32_t last_read_seqno;
+	uint32_t last_write_seqno;
+	/** Breadcrumb of last fenced GPU access to the buffer. */
+	uint32_t last_fenced_seqno;
+
+	/** Current tiling stride for the object, if it's tiled. */
+	uint32_t stride;
+
+	/** Record of address bit 17 of each page at last unbind. */
+	unsigned long *bit_17;
+
+	/** User space pin count and filp owning the pin */
+	uint32_t user_pin_count;
+	struct drm_file *pin_filp;
+
+	/** for phy allocated objects */
+	struct drm_i915_gem_phys_object *phys_obj;
+};
+#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
+
+#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+	/** On Which ring this request was generated */
+	struct intel_ring_buffer *ring;
+
+	/** GEM sequence number associated with this request. */
+	uint32_t seqno;
+
+	/** Postion in the ringbuffer of the end of the request */
+	u32 tail;
+
+	/** Time at which this request was emitted, in jiffies. */
+	unsigned long emitted_jiffies;
+
+	/** global list entry for this request */
+	struct list_head list;
+
+	struct drm_i915_file_private *file_priv;
+	/** file_priv list entry for this request */
+	struct list_head client_list;
+};
+
+struct drm_i915_file_private {
+	struct {
+		spinlock_t lock;
+		struct list_head request_list;
+	} mm;
+	struct idr context_idr;
+};
+
+#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev)		((dev)->pci_device == 0x3577)
+#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
+#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \
+				 (dev)->pci_device == 0x0152 ||	\
+				 (dev)->pci_device == 0x015a)
+#define IS_SNB_GT1(dev)		((dev)->pci_device == 0x0102 || \
+				 (dev)->pci_device == 0x0106 ||	\
+				 (dev)->pci_device == 0x010A)
+#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
+#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
+#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev)		(IS_HASWELL(dev) && \
+				 ((dev)->pci_device & 0xFF00) == 0x0A00)
+
+/*
+ * The genX designation typically refers to the render engine, so render
+ * capability related checks should use IS_GEN, while display and other checks
+ * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
+ * chips, etc.).
+ */
+#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
+#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
+
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
+#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+
+#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+
+/* Early gen2 have a totally busted CS tlb and require pinned batches. */
+#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+						      IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
+
+#define HAS_DDI(dev)		(IS_HASWELL(dev))
+#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))
+
+#define INTEL_PCH_DEVICE_ID_MASK		0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
+#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+
+#define GT_FREQUENCY_MULTIPLIER 50
+
+#include "i915_trace.h"
+
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE			(1<<0)
+#define INTEL_RC6p_ENABLE			(1<<1)
+#define INTEL_RC6pp_ENABLE			(1<<2)
+
+extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+extern unsigned int i915_fbpercrtc __always_unused;
+extern int i915_panel_ignore_lid __read_mostly;
+extern unsigned int i915_powersave __read_mostly;
+extern int i915_semaphores __read_mostly;
+extern unsigned int i915_lvds_downclock __read_mostly;
+extern int i915_lvds_channel_mode __read_mostly;
+extern int i915_panel_use_ssc __read_mostly;
+extern int i915_vbt_sdvo_panel_type __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
+extern int i915_enable_fbc __read_mostly;
+extern bool i915_enable_hangcheck __read_mostly;
+extern int i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_preliminary_hw_support __read_mostly;
+extern int i915_disable_power_well __read_mostly;
+
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
+extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
+extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
+
+				/* i915_dma.c */
+void i915_update_dri1_breadcrumb(struct drm_device *dev);
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+				  struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+#ifdef CONFIG_COMPAT
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+#endif
+extern int i915_emit_box(struct drm_device *dev,
+			 struct drm_clip_rect *box,
+			 int DR1, int DR4);
+extern int intel_gpu_reset(struct drm_device *dev);
+extern int i915_reset(struct drm_device *dev);
+extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
+extern void intel_console_resume(struct work_struct *work);
+
+/* i915_irq.c */
+void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
+
+extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
+extern void intel_hpd_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
+
+void i915_error_state_free(struct kref *error_ref);
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+
+void intel_enable_asle(struct drm_device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
+
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_device *dev);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			 const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size);
+void i915_gem_free_object(struct drm_gem_object *obj);
+
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+				     uint32_t alignment,
+				     bool map_and_fenceable,
+				     bool nonblocking);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+	struct sg_page_iter sg_iter;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
+		return sg_page_iter_page(&sg_iter);
+
+	return NULL;
+}
+static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages == NULL);
+	obj->pages_pin_count++;
+}
+static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages_pin_count == 0);
+	obj->pages_pin_count--;
+}
+
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int i915_gem_object_sync(struct drm_i915_gem_object *obj,
+			 struct intel_ring_buffer *to);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+				    struct intel_ring_buffer *ring);
+
+int i915_gem_dumb_create(struct drm_file *file_priv,
+			 struct drm_device *dev,
+			 struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+		      uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+			  uint32_t handle);
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
+static inline bool
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		dev_priv->fence_regs[obj->fence_reg].pin_count++;
+		return true;
+	} else
+		return false;
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		dev_priv->fence_regs[obj->fence_reg].pin_count--;
+	}
+}
+
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
+				      bool interruptible);
+static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+{
+	return unlikely(atomic_read(&error->reset_counter)
+			& I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+{
+	return atomic_read(&error->reset_counter) == I915_WEDGED;
+}
+
+void i915_gem_reset(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+					    uint32_t read_domains,
+					    uint32_t write_domain);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_init(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int i915_add_request(struct intel_ring_buffer *ring,
+		     struct drm_file *file,
+		     u32 *seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+				 uint32_t seqno);
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+				  bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+				     u32 alignment,
+				     struct intel_ring_buffer *pipelined);
+int i915_gem_attach_phys_object(struct drm_device *dev,
+				struct drm_i915_gem_object *obj,
+				int id,
+				int align);
+void i915_gem_detach_phys_object(struct drm_device *dev,
+				 struct drm_i915_gem_object *obj);
+void i915_gem_free_all_phys_object(struct drm_device *dev);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+			    int tiling_mode, bool fenced);
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level);
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+				struct drm_gem_object *gem_obj, int flags);
+
+void i915_gem_restore_fences(struct drm_device *dev);
+
+/* i915_gem_context.c */
+void i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_ring_buffer *ring,
+			struct drm_file *file, int to_id);
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file);
+
+/* i915_gem_gtt.c */
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+			    struct drm_i915_gem_object *obj,
+			    enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+			      struct drm_i915_gem_object *obj);
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+				enum i915_cache_level cache_level);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+void i915_gem_init_global_gtt(struct drm_device *dev);
+void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
+			       unsigned long mappable_end, unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+	if (INTEL_INFO(dev)->gen < 6)
+		intel_gtt_chipset_flush();
+}
+
+
+/* i915_gem_evict.c */
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+					  unsigned alignment,
+					  unsigned cache_level,
+					  bool mappable,
+					  bool nonblock);
+int i915_gem_evict_everything(struct drm_device *dev);
+
+/* i915_gem_stolen.c */
+int i915_gem_init_stolen(struct drm_device *dev);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
+void i915_gem_cleanup_stolen(struct drm_device *dev);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+					       u32 stolen_offset,
+					       u32 gtt_offset,
+					       u32 size);
+void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
+
+/* i915_gem_tiling.c */
+inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+{
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+
+	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+		obj->tiling_mode != I915_TILING_NONE;
+}
+
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+			  const char *where, uint32_t mark);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
+#else
+#define i915_verify_lists(dev) 0
+#endif
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+				     int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
+			  const char *where, uint32_t mark);
+
+/* i915_debugfs.c */
+int i915_debugfs_init(struct drm_minor *minor);
+void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* i915_ums.c */
+void i915_save_display_reg(struct drm_device *dev);
+void i915_restore_display_reg(struct drm_device *dev);
+
+/* i915_sysfs.c */
+void i915_setup_sysfs(struct drm_device *dev_priv);
+void i915_teardown_sysfs(struct drm_device *dev_priv);
+
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern inline bool intel_gmbus_is_port_valid(unsigned port)
+{
+	return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
+}
+
+extern struct i2c_adapter *intel_gmbus_get_adapter(
+		struct drm_i915_private *dev_priv, unsigned port);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
+#ifdef CONFIG_ACPI
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
+#else
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
+#endif
+
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
+/* modesetting */
+extern void intel_modeset_init_hw(struct drm_device *dev);
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_gem_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
+extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+					 bool force_restore);
+extern void i915_redisable_vga(struct drm_device *dev);
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
+extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern int intel_enable_rc6(const struct drm_device *dev);
+
+extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+int i915_reg_read_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file);
+
+/* overlay */
+#ifdef CONFIG_DEBUG_FS
+extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+					    struct drm_device *dev,
+					    struct intel_display_error_state *error);
+#endif
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
+
+#define __i915_read(x, y) \
+	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
+
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
+
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg)		i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg)	i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg)	readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val)	writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg)		i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg)		readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val)	writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg)	i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+
+/* "Broadcast RGB" property */
+#define INTEL_BROADCAST_RGB_AUTO 0
+#define INTEL_BROADCAST_RGB_FULL 1
+#define INTEL_BROADCAST_RGB_LIMITED 2
+
+static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
+{
+	if (HAS_PCH_SPLIT(dev))
+		return CPU_VGACNTRL;
+	else if (IS_VALLEYVIEW(dev))
+		return VLV_VGACNTRL;
+	else
+		return VGACNTRL;
+}
+
+static inline void __user *to_user_ptr(u64 address)
+{
+	return (void __user *)(uintptr_t)address;
+}
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+	unsigned long j = msecs_to_jiffies(m);
+
+	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+static inline unsigned long
+timespec_to_jiffies_timeout(const struct timespec *value)
+{
+	unsigned long j = timespec_to_jiffies(value);
+
+	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem.c b/linux-imx/drivers/gpu/drm/i915/i915_gem.c
new file mode 100644
index 0000000..0a30088
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem.c
@@ -0,0 +1,4503 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+						    unsigned alignment,
+						    bool map_and_fenceable,
+						    bool nonblocking);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+				struct drm_i915_gem_object *obj,
+				struct drm_i915_gem_pwrite *args,
+				struct drm_file *file);
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj);
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable);
+
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+				    struct shrink_control *sc);
+static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+
+static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
+{
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	/* As we do not have an associated fence register, we will force
+	 * a tiling change if we ever need to acquire one.
+	 */
+	obj->fence_dirty = false;
+	obj->fence_reg = I915_FENCE_REG_NONE;
+}
+
+/* some bookkeeping */
+static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+				  size_t size)
+{
+	dev_priv->mm.object_count++;
+	dev_priv->mm.object_memory += size;
+}
+
+static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
+				     size_t size)
+{
+	dev_priv->mm.object_count--;
+	dev_priv->mm.object_memory -= size;
+}
+
+static int
+i915_gem_wait_for_error(struct i915_gpu_error *error)
+{
+	int ret;
+
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+		   i915_terminally_wedged(error))
+	if (EXIT_COND)
+		return 0;
+
+	/*
+	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+	 * userspace. If it takes that long something really bad is going on and
+	 * we should simply try to bail out and fail as gracefully as possible.
+	 */
+	ret = wait_event_interruptible_timeout(error->reset_queue,
+					       EXIT_COND,
+					       10*HZ);
+	if (ret == 0) {
+		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+		return -EIO;
+	} else if (ret < 0) {
+		return ret;
+	}
+#undef EXIT_COND
+
+	return 0;
+}
+
+int i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+	if (ret)
+		return ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	WARN_ON(i915_verify_lists(dev));
+	return 0;
+}
+
+static inline bool
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
+{
+	return obj->gtt_space && !obj->active;
+}
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_init *args = data;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	if (args->gtt_start >= args->gtt_end ||
+	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+		return -EINVAL;
+
+	/* GEM with user mode setting was never supported on ilk and later. */
+	if (INTEL_INFO(dev)->gen >= 5)
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
+				  args->gtt_end);
+	dev_priv->gtt.mappable_end = args->gtt_end;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_get_aperture *args = data;
+	struct drm_i915_gem_object *obj;
+	size_t pinned;
+
+	pinned = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+		if (obj->pin_count)
+			pinned += obj->gtt_space->size;
+	mutex_unlock(&dev->struct_mutex);
+
+	args->aper_size = dev_priv->gtt.total;
+	args->aper_available_size = args->aper_size - pinned;
+
+	return 0;
+}
+
+void *i915_gem_object_alloc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	kmem_cache_free(dev_priv->slab, obj);
+}
+
+static int
+i915_gem_create(struct drm_file *file,
+		struct drm_device *dev,
+		uint64_t size,
+		uint32_t *handle_p)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+	u32 handle;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	/* Allocate the new object */
+	obj = i915_gem_alloc_object(dev, size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
+	if (ret) {
+		drm_gem_object_release(&obj->base);
+		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
+		i915_gem_object_free(obj);
+		return ret;
+	}
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference(&obj->base);
+	trace_i915_gem_object_create(obj);
+
+	*handle_p = handle;
+	return 0;
+}
+
+int
+i915_gem_dumb_create(struct drm_file *file,
+		     struct drm_device *dev,
+		     struct drm_mode_create_dumb *args)
+{
+	/* have to work out size/pitch and return them */
+	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+	args->size = args->pitch * args->height;
+	return i915_gem_create(file, dev,
+			       args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+			  struct drm_device *dev,
+			  uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_gem_create *args = data;
+
+	return i915_gem_create(file, dev,
+			       args->size, &args->handle);
+}
+
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+			const char *gpu_vaddr, int gpu_offset,
+			int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = ALIGN(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_to_user(cpu_vaddr + cpu_offset,
+				     gpu_vaddr + swizzled_gpu_offset,
+				     this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
+			  const char __user *cpu_vaddr,
+			  int length)
+{
+	int ret, cpu_offset = 0;
+
+	while (length > 0) {
+		int cacheline_end = ALIGN(gpu_offset + 1, 64);
+		int this_length = min(cacheline_end - gpu_offset, length);
+		int swizzled_gpu_offset = gpu_offset ^ 64;
+
+		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+				       cpu_vaddr + cpu_offset,
+				       this_length);
+		if (ret)
+			return ret + length;
+
+		cpu_offset += this_length;
+		gpu_offset += this_length;
+		length -= this_length;
+	}
+
+	return 0;
+}
+
+/* Per-page copy function for the shmem pread fastpath.
+ * Flushes invalid cachelines before reading the target if
+ * needs_clflush is set. */
+static int
+shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
+{
+	char *vaddr;
+	int ret;
+
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	vaddr = kmap_atomic(page);
+	if (needs_clflush)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_to_user_inatomic(user_data,
+				      vaddr + shmem_page_offset,
+				      page_length);
+	kunmap_atomic(vaddr);
+
+	return ret ? -EFAULT : 0;
+}
+
+static void
+shmem_clflush_swizzled_range(char *addr, unsigned long length,
+			     bool swizzled)
+{
+	if (unlikely(swizzled)) {
+		unsigned long start = (unsigned long) addr;
+		unsigned long end = (unsigned long) addr + length;
+
+		/* For swizzling simply ensure that we always flush both
+		 * channels. Lame, but simple and it works. Swizzled
+		 * pwrite/pread is far from a hotpath - current userspace
+		 * doesn't use it at all. */
+		start = round_down(start, 128);
+		end = round_up(end, 128);
+
+		drm_clflush_virt_range((void *)start, end - start);
+	} else {
+		drm_clflush_virt_range(addr, length);
+	}
+
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
+		 char __user *user_data,
+		 bool page_do_bit17_swizzling, bool needs_clflush)
+{
+	char *vaddr;
+	int ret;
+
+	vaddr = kmap(page);
+	if (needs_clflush)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+
+	if (page_do_bit17_swizzling)
+		ret = __copy_to_user_swizzled(user_data,
+					      vaddr, shmem_page_offset,
+					      page_length);
+	else
+		ret = __copy_to_user(user_data,
+				     vaddr + shmem_page_offset,
+				     page_length);
+	kunmap(page);
+
+	return ret ? - EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pread(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pread *args,
+		     struct drm_file *file)
+{
+	char __user *user_data;
+	ssize_t remain;
+	loff_t offset;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int prefaulted = 0;
+	int needs_clflush = 0;
+	struct sg_page_iter sg_iter;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+		/* If we're not in the cpu read domain, set ourself into the gtt
+		 * read domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will dirty the data
+		 * anyway again before the next pread happens. */
+		if (obj->cache_level == I915_CACHE_NONE)
+			needs_clflush = 1;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, false);
+			if (ret)
+				return ret;
+		}
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	offset = args->offset;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+			 offset >> PAGE_SHIFT) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+		if (ret == 0)
+			goto next_page;
+
+		mutex_unlock(&dev->struct_mutex);
+
+		if (!prefaulted) {
+			ret = fault_in_multipages_writeable(user_data, remain);
+			/* Userspace is tricking us, but we've already clobbered
+			 * its pages with the prefault and promised to write the
+			 * data up to the first fault. Hence ignore any errors
+			 * and just continue. */
+			(void)ret;
+			prefaulted = 1;
+		}
+
+		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+				       user_data, page_do_bit17_swizzling,
+				       needs_clflush);
+
+		mutex_lock(&dev->struct_mutex);
+
+next_page:
+		mark_page_accessed(page);
+
+		if (ret)
+			goto out;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+
+	return ret;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_pread *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	if (args->size == 0)
+		return 0;
+
+	if (!access_ok(VERIFY_WRITE,
+		       to_user_ptr(args->data_ptr),
+		       args->size))
+		return -EFAULT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Bounds check source.  */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	trace_i915_gem_object_pread(obj, args->offset, args->size);
+
+	ret = i915_gem_shmem_pread(dev, obj, args, file);
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/* This is the fast write path which cannot handle
+ * page faults in the source data
+ */
+
+static inline int
+fast_user_write(struct io_mapping *mapping,
+		loff_t page_base, int page_offset,
+		char __user *user_data,
+		int length)
+{
+	void __iomem *vaddr_atomic;
+	void *vaddr;
+	unsigned long unwritten;
+
+	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
+	/* We can use the cpu mem copy function because this is X86. */
+	vaddr = (void __force*)vaddr_atomic + page_offset;
+	unwritten = __copy_from_user_inatomic_nocache(vaddr,
+						      user_data, length);
+	io_mapping_unmap_atomic(vaddr_atomic);
+	return unwritten;
+}
+
+/**
+ * This is the fast pwrite path, where we copy the data directly from the
+ * user into the GTT, uncached.
+ */
+static int
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
+			 struct drm_i915_gem_pwrite *args,
+			 struct drm_file *file)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	ssize_t remain;
+	loff_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length, ret;
+
+	ret = i915_gem_object_pin(obj, 0, true, true);
+	if (ret)
+		goto out;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		goto out_unpin;
+
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		goto out_unpin;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	offset = obj->gtt_offset + args->offset;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = offset & PAGE_MASK;
+		page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (fast_user_write(dev_priv->gtt.mappable, page_base,
+				    page_offset, user_data, page_length)) {
+			ret = -EFAULT;
+			goto out_unpin;
+		}
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out_unpin:
+	i915_gem_object_unpin(obj);
+out:
+	return ret;
+}
+
+/* Per-page copy function for the shmem pwrite fastpath.
+ * Flushes invalid cachelines before writing to the target if
+ * needs_clflush_before is set and flushes out any written cachelines after
+ * writing if needs_clflush is set. */
+static int
+shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	int ret;
+
+	if (unlikely(page_do_bit17_swizzling))
+		return -EINVAL;
+
+	vaddr = kmap_atomic(page);
+	if (needs_clflush_before)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
+						user_data,
+						page_length);
+	if (needs_clflush_after)
+		drm_clflush_virt_range(vaddr + shmem_page_offset,
+				       page_length);
+	kunmap_atomic(vaddr);
+
+	return ret ? -EFAULT : 0;
+}
+
+/* Only difference to the fast-path function is that this can handle bit17
+ * and uses non-atomic copy and kmap functions. */
+static int
+shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
+		  char __user *user_data,
+		  bool page_do_bit17_swizzling,
+		  bool needs_clflush_before,
+		  bool needs_clflush_after)
+{
+	char *vaddr;
+	int ret;
+
+	vaddr = kmap(page);
+	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	if (page_do_bit17_swizzling)
+		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+						user_data,
+						page_length);
+	else
+		ret = __copy_from_user(vaddr + shmem_page_offset,
+				       user_data,
+				       page_length);
+	if (needs_clflush_after)
+		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
+					     page_length,
+					     page_do_bit17_swizzling);
+	kunmap(page);
+
+	return ret ? -EFAULT : 0;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev,
+		      struct drm_i915_gem_object *obj,
+		      struct drm_i915_gem_pwrite *args,
+		      struct drm_file *file)
+{
+	ssize_t remain;
+	loff_t offset;
+	char __user *user_data;
+	int shmem_page_offset, page_length, ret = 0;
+	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+	int hit_slowpath = 0;
+	int needs_clflush_after = 0;
+	int needs_clflush_before = 0;
+	struct sg_page_iter sg_iter;
+
+	user_data = to_user_ptr(args->data_ptr);
+	remain = args->size;
+
+	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		/* If we're not in the cpu write domain, set ourself into the gtt
+		 * write domain and manually flush cachelines (if required). This
+		 * optimizes for the case when the gpu will use the data
+		 * right away and we therefore have to clflush anyway. */
+		if (obj->cache_level == I915_CACHE_NONE)
+			needs_clflush_after = 1;
+		if (obj->gtt_space) {
+			ret = i915_gem_object_set_to_gtt_domain(obj, true);
+			if (ret)
+				return ret;
+		}
+	}
+	/* Same trick applies for invalidate partially written cachelines before
+	 * writing.  */
+	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+	    && obj->cache_level == I915_CACHE_NONE)
+		needs_clflush_before = 1;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	offset = args->offset;
+	obj->dirty = 1;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
+			 offset >> PAGE_SHIFT) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		int partial_cacheline_write;
+
+		if (remain <= 0)
+			break;
+
+		/* Operation in this page
+		 *
+		 * shmem_page_offset = offset within page in shmem file
+		 * page_length = bytes to copy for this page
+		 */
+		shmem_page_offset = offset_in_page(offset);
+
+		page_length = remain;
+		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - shmem_page_offset;
+
+		/* If we don't overwrite a cacheline completely we need to be
+		 * careful to have up-to-date data by first clflushing. Don't
+		 * overcomplicate things and flush the entire patch. */
+		partial_cacheline_write = needs_clflush_before &&
+			((shmem_page_offset | page_length)
+				& (boot_cpu_data.x86_clflush_size - 1));
+
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+			(page_to_phys(page) & (1 << 17)) != 0;
+
+		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+		if (ret == 0)
+			goto next_page;
+
+		hit_slowpath = 1;
+		mutex_unlock(&dev->struct_mutex);
+		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+					user_data, page_do_bit17_swizzling,
+					partial_cacheline_write,
+					needs_clflush_after);
+
+		mutex_lock(&dev->struct_mutex);
+
+next_page:
+		set_page_dirty(page);
+		mark_page_accessed(page);
+
+		if (ret)
+			goto out;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+out:
+	i915_gem_object_unpin_pages(obj);
+
+	if (hit_slowpath) {
+		/*
+		 * Fixup: Flush cpu caches in case we didn't flush the dirty
+		 * cachelines in-line while writing and the object moved
+		 * out of the cpu write domain while we've dropped the lock.
+		 */
+		if (!needs_clflush_after &&
+		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+			i915_gem_clflush_object(obj);
+			i915_gem_chipset_flush(dev);
+		}
+	}
+
+	if (needs_clflush_after)
+		i915_gem_chipset_flush(dev);
+
+	return ret;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file)
+{
+	struct drm_i915_gem_pwrite *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	if (args->size == 0)
+		return 0;
+
+	if (!access_ok(VERIFY_READ,
+		       to_user_ptr(args->data_ptr),
+		       args->size))
+		return -EFAULT;
+
+	ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+					   args->size);
+	if (ret)
+		return -EFAULT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Bounds check destination. */
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* prime objects have no backing filp to GEM pread/pwrite
+	 * pages from.
+	 */
+	if (!obj->base.filp) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
+	ret = -EFAULT;
+	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+	 * it would end up going through the fenced access, and we'll get
+	 * different detiling behavior between reading and writing.
+	 * pread/pwrite currently are reading and writing from the CPU
+	 * perspective, requiring manual detiling by the client.
+	 */
+	if (obj->phys_obj) {
+		ret = i915_gem_phys_pwrite(dev, obj, args, file);
+		goto out;
+	}
+
+	if (obj->cache_level == I915_CACHE_NONE &&
+	    obj->tiling_mode == I915_TILING_NONE &&
+	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+		/* Note that the gtt paths might fail with non-page-backed user
+		 * pointers (e.g. gtt mappings when moving data between
+		 * textures). Fallback to the shmem path in that case. */
+	}
+
+	if (ret == -EFAULT || ret == -ENOSPC)
+		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_check_wedge(struct i915_gpu_error *error,
+		     bool interruptible)
+{
+	if (i915_reset_in_progress(error)) {
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these. */
+		if (!interruptible)
+			return -EIO;
+
+		/* Recovery complete, but the reset failed ... */
+		if (i915_terminally_wedged(error))
+			return -EIO;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/*
+ * Compare seqno against outstanding lazy request. Emit a request if they are
+ * equal.
+ */
+static int
+i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	ret = 0;
+	if (seqno == ring->outstanding_lazy_request)
+		ret = i915_add_request(ring, NULL, NULL);
+
+	return ret;
+}
+
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @reset_counter: reset sequence associated with the given seqno
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
+			unsigned reset_counter,
+			bool interruptible, struct timespec *timeout)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct timespec before, now, wait_time={1,0};
+	unsigned long timeout_jiffies;
+	long end;
+	bool wait_forever = true;
+	int ret;
+
+	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+		return 0;
+
+	trace_i915_gem_request_wait_begin(ring, seqno);
+
+	if (timeout != NULL) {
+		wait_time = *timeout;
+		wait_forever = false;
+	}
+
+	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
+
+	if (WARN_ON(!ring->irq_get(ring)))
+		return -ENODEV;
+
+	/* Record current time in case interrupted by signal, or wedged * */
+	getrawmonotonic(&before);
+
+#define EXIT_COND \
+	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
+	 i915_reset_in_progress(&dev_priv->gpu_error) || \
+	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+	do {
+		if (interruptible)
+			end = wait_event_interruptible_timeout(ring->irq_queue,
+							       EXIT_COND,
+							       timeout_jiffies);
+		else
+			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+						 timeout_jiffies);
+
+		/* We need to check whether any gpu reset happened in between
+		 * the caller grabbing the seqno and now ... */
+		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+			end = -EAGAIN;
+
+		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
+		 * gone. */
+		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+		if (ret)
+			end = ret;
+	} while (end == 0 && wait_forever);
+
+	getrawmonotonic(&now);
+
+	ring->irq_put(ring);
+	trace_i915_gem_request_wait_end(ring, seqno);
+#undef EXIT_COND
+
+	if (timeout) {
+		struct timespec sleep_time = timespec_sub(now, before);
+		*timeout = timespec_sub(*timeout, sleep_time);
+		if (!timespec_valid(timeout)) /* i.e. negative time remains */
+			set_normalized_timespec(timeout, 0, 0);
+	}
+
+	switch (end) {
+	case -EIO:
+	case -EAGAIN: /* Wedged */
+	case -ERESTARTSYS: /* Signal */
+		return (int)end;
+	case 0: /* Timeout */
+		return -ETIME;
+	default: /* Completed */
+		WARN_ON(end < 0); /* We're not aware of other errors */
+		return 0;
+	}
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(seqno == 0);
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	return __wait_seqno(ring, seqno,
+			    atomic_read(&dev_priv->gpu_error.reset_counter),
+			    interruptible, NULL);
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct intel_ring_buffer *ring = obj->ring;
+	u32 seqno;
+	int ret;
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_wait_seqno(ring, seqno);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = obj->ring;
+	unsigned reset_counter;
+	u32 seqno;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!dev_priv->mm.interruptible);
+
+	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
+	if (seqno == 0)
+		return 0;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_olr(ring, seqno);
+	if (ret)
+		return ret;
+
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+	mutex_unlock(&dev->struct_mutex);
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_retire_requests_ring(ring);
+
+	/* Manually manage the write flush as we may have not yet
+	 * retired the buffer.
+	 */
+	if (ret == 0 &&
+	    obj->last_write_seqno &&
+	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
+		obj->last_write_seqno = 0;
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	return ret;
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file)
+{
+	struct drm_i915_gem_set_domain *args = data;
+	struct drm_i915_gem_object *obj;
+	uint32_t read_domains = args->read_domains;
+	uint32_t write_domain = args->write_domain;
+	int ret;
+
+	/* Only handle setting domains to types used by the CPU. */
+	if (write_domain & I915_GEM_GPU_DOMAINS)
+		return -EINVAL;
+
+	if (read_domains & I915_GEM_GPU_DOMAINS)
+		return -EINVAL;
+
+	/* Having something in the write domain implies it's in the read
+	 * domain, and only that read domain.  Enforce that in the request.
+	 */
+	if (write_domain != 0 && read_domains != write_domain)
+		return -EINVAL;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Try to flush the object off the GPU without holding the lock.
+	 * We will repeat the flush holding the lock in the normal manner
+	 * to catch cases where we are gazumped.
+	 */
+	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+	if (ret)
+		goto unref;
+
+	if (read_domains & I915_GEM_DOMAIN_GTT) {
+		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+
+		/* Silently promote "you're not bound, there was nothing to do"
+		 * to success, since the client was just asking us to
+		 * make sure everything was done.
+		 */
+		if (ret == -EINVAL)
+			ret = 0;
+	} else {
+		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+	}
+
+unref:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file)
+{
+	struct drm_i915_gem_sw_finish *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Pinned buffers may be scanout, so flush the cache */
+	if (obj->pin_count)
+		i915_gem_object_flush_cpu_write_domain(obj);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned long addr;
+
+	obj = drm_gem_object_lookup(dev, file, args->handle);
+	if (obj == NULL)
+		return -ENOENT;
+
+	/* prime objects have no backing filp to GEM mmap
+	 * pages from.
+	 */
+	if (!obj->filp) {
+		drm_gem_object_unreference_unlocked(obj);
+		return -EINVAL;
+	}
+
+	addr = vm_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+	drm_gem_object_unreference_unlocked(obj);
+	if (IS_ERR((void *)addr))
+		return addr;
+
+	args->addr_ptr = (uint64_t) addr;
+
+	return 0;
+}
+
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * vma: VMA in question
+ * vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace.  The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room.  So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ */
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	pgoff_t page_offset;
+	unsigned long pfn;
+	int ret = 0;
+	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
+
+	/* We don't use vmf->pgoff since that has the fake offset */
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto out;
+
+	trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+	/* Access to snoopable pages through the GTT is incoherent. */
+	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	/* Now bind it into the GTT if needed */
+	ret = i915_gem_object_pin(obj, 0, true, false);
+	if (ret)
+		goto unlock;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unpin;
+
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto unpin;
+
+	obj->fault_mappable = true;
+
+	pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
+		page_offset;
+
+	/* Finally, remap it using the new GTT offset */
+	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+	i915_gem_object_unpin(obj);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	switch (ret) {
+	case -EIO:
+		/* If this -EIO is due to a gpu hang, give the reset code a
+		 * chance to clean up the mess. Otherwise return the proper
+		 * SIGBUS. */
+		if (i915_terminally_wedged(&dev_priv->gpu_error))
+			return VM_FAULT_SIGBUS;
+	case -EAGAIN:
+		/* Give the error handler a chance to run and move the
+		 * objects off the GPU active list. Next time we service the
+		 * fault, we should be able to transition the page into the
+		 * GTT without touching the GPU (and so avoid further
+		 * EIO/EGAIN). If the GPU is wedged, then there is no issue
+		 * with coherency, just lost writes.
+		 */
+		set_need_resched();
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	case -ENOSPC:
+		return VM_FAULT_SIGBUS;
+	default:
+		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+/**
+ * i915_gem_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
+{
+	if (!obj->fault_mappable)
+		return;
+
+	if (obj->base.dev->dev_mapping)
+		unmap_mapping_range(obj->base.dev->dev_mapping,
+				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+				    obj->base.size, 1);
+
+	obj->fault_mappable = false;
+}
+
+uint32_t
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+{
+	uint32_t gtt_size;
+
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    tiling_mode == I915_TILING_NONE)
+		return size;
+
+	/* Previous chips need a power-of-two fence region when tiling */
+	if (INTEL_INFO(dev)->gen == 3)
+		gtt_size = 1024*1024;
+	else
+		gtt_size = 512*1024;
+
+	while (gtt_size < size)
+		gtt_size <<= 1;
+
+	return gtt_size;
+}
+
+/**
+ * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, taking into account
+ * potential fence register mapping.
+ */
+uint32_t
+i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
+			   int tiling_mode, bool fenced)
+{
+	/*
+	 * Minimum alignment is 4k (GTT page size), but might be greater
+	 * if a fence register is needed for the object.
+	 */
+	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	    tiling_mode == I915_TILING_NONE)
+		return 4096;
+
+	/*
+	 * Previous chips need to be aligned to the size of the smallest
+	 * fence register that can contain the object.
+	 */
+	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+}
+
+static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (obj->base.map_list.map)
+		return 0;
+
+	dev_priv->mm.shrinker_no_lock_stealing = true;
+
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		goto out;
+
+	/* Badly fragmented mmap space? The only way we can recover
+	 * space is by destroying unwanted objects. We can't randomly release
+	 * mmap_offsets as userspace expects them to be persistent for the
+	 * lifetime of the objects. The closest we can is to release the
+	 * offsets on purgeable objects by truncating it and marking it purged,
+	 * which prevents userspace from ever using that object again.
+	 */
+	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+	if (ret != -ENOSPC)
+		goto out;
+
+	i915_gem_shrink_all(dev_priv);
+	ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+	dev_priv->mm.shrinker_no_lock_stealing = false;
+
+	return ret;
+}
+
+static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	if (!obj->base.map_list.map)
+		return;
+
+	drm_gem_free_mmap_offset(&obj->base);
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+		  struct drm_device *dev,
+		  uint32_t handle,
+		  uint64_t *offset)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (obj->base.size > dev_priv->gtt.mappable_end) {
+		ret = -E2BIG;
+		goto out;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED) {
+		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = i915_gem_object_create_mmap_offset(obj);
+	if (ret)
+		goto out;
+
+	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file)
+{
+	struct drm_i915_gem_mmap_gtt *args = data;
+
+	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+	struct inode *inode;
+
+	i915_gem_object_free_mmap_offset(obj);
+
+	if (obj->base.filp == NULL)
+		return;
+
+	/* Our goal here is to return as much of the memory as
+	 * is possible back to the system as we are called from OOM.
+	 * To do this we must instruct the shmfs to drop all of its
+	 * backing pages, *now*.
+	 */
+	inode = file_inode(obj->base.filp);
+	shmem_truncate_range(inode, 0, (loff_t)-1);
+
+	obj->madv = __I915_MADV_PURGED;
+}
+
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+	return obj->madv == I915_MADV_DONTNEED;
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int ret;
+
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		/* In the event of a disaster, abandon all caches and
+		 * hope for the best.
+		 */
+		WARN_ON(ret != -EIO);
+		i915_gem_clflush_object(obj);
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_save_bit_17_swizzle(obj);
+
+	if (obj->madv == I915_MADV_DONTNEED)
+		obj->dirty = 0;
+
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+
+		if (obj->dirty)
+			set_page_dirty(page);
+
+		if (obj->madv == I915_MADV_WILLNEED)
+			mark_page_accessed(page);
+
+		page_cache_release(page);
+	}
+	obj->dirty = 0;
+
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+	if (obj->pages == NULL)
+		return 0;
+
+	BUG_ON(obj->gtt_space);
+
+	if (obj->pages_pin_count)
+		return -EBUSY;
+
+	/* ->put_pages might need to allocate memory for the bit17 swizzle
+	 * array, hence protect them from being reaped by removing them from gtt
+	 * lists early. */
+	list_del(&obj->gtt_list);
+
+	ops->put_pages(obj);
+	obj->pages = NULL;
+
+	if (i915_gem_object_is_purgeable(obj))
+		i915_gem_object_truncate(obj);
+
+	return 0;
+}
+
+static long
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+		  bool purgeable_only)
+{
+	struct drm_i915_gem_object *obj, *next;
+	long count = 0;
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.unbound_list,
+				 gtt_list) {
+		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
+		    i915_gem_object_unbind(obj) == 0 &&
+		    i915_gem_object_put_pages(obj) == 0) {
+			count += obj->base.size >> PAGE_SHIFT;
+			if (count >= target)
+				return count;
+		}
+	}
+
+	return count;
+}
+
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+	return __i915_gem_shrink(dev_priv, target, true);
+}
+
+static void
+i915_gem_shrink_all(struct drm_i915_private *dev_priv)
+{
+	struct drm_i915_gem_object *obj, *next;
+
+	i915_gem_evict_everything(dev_priv->dev);
+
+	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
+		i915_gem_object_put_pages(obj);
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct sg_table *st;
+	struct scatterlist *sg;
+	struct sg_page_iter sg_iter;
+	struct page *page;
+	unsigned long last_pfn = 0;	/* suppress gcc warning */
+	gfp_t gfp;
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return -ENOMEM;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+		sg_free_table(st);
+		kfree(st);
+		return -ENOMEM;
+	}
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 *
+	 * Fail silently without starting the shrinker
+	 */
+	mapping = file_inode(obj->base.filp)->i_mapping;
+	gfp = mapping_gfp_mask(mapping);
+	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+	gfp &= ~(__GFP_IO | __GFP_WAIT);
+	sg = st->sgl;
+	st->nents = 0;
+	for (i = 0; i < page_count; i++) {
+		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		if (IS_ERR(page)) {
+			i915_gem_purge(dev_priv, page_count);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+		}
+		if (IS_ERR(page)) {
+			/* We've tried hard to allocate the memory by reaping
+			 * our own buffer, now let the real VM do its job and
+			 * go down in flames if truly OOM.
+			 */
+			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+			gfp |= __GFP_IO | __GFP_WAIT;
+
+			i915_gem_shrink_all(dev_priv);
+			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+			if (IS_ERR(page))
+				goto err_pages;
+
+			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+			gfp &= ~(__GFP_IO | __GFP_WAIT);
+		}
+#ifdef CONFIG_SWIOTLB
+		if (swiotlb_nr_tbl()) {
+			st->nents++;
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+			sg = sg_next(sg);
+			continue;
+		}
+#endif
+		if (!i || page_to_pfn(page) != last_pfn + 1) {
+			if (i)
+				sg = sg_next(sg);
+			st->nents++;
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+		} else {
+			sg->length += PAGE_SIZE;
+		}
+		last_pfn = page_to_pfn(page);
+	}
+#ifdef CONFIG_SWIOTLB
+	if (!swiotlb_nr_tbl())
+#endif
+		sg_mark_end(sg);
+	obj->pages = st;
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	return 0;
+
+err_pages:
+	sg_mark_end(sg);
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
+		page_cache_release(sg_page_iter_page(&sg_iter));
+	sg_free_table(st);
+	kfree(st);
+	return PTR_ERR(page);
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_get_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_put_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	int ret;
+
+	if (obj->pages)
+		return 0;
+
+	if (obj->madv != I915_MADV_WILLNEED) {
+		DRM_ERROR("Attempting to obtain a purgeable object\n");
+		return -EINVAL;
+	}
+
+	BUG_ON(obj->pages_pin_count);
+
+	ret = ops->get_pages(obj);
+	if (ret)
+		return ret;
+
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+	return 0;
+}
+
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 seqno = intel_ring_get_seqno(ring);
+
+	BUG_ON(ring == NULL);
+	if (obj->ring != ring && obj->last_write_seqno) {
+		/* Keep the seqno relative to the current ring */
+		obj->last_write_seqno = seqno;
+	}
+	obj->ring = ring;
+
+	/* Add a reference if we're newly entering the active list. */
+	if (!obj->active) {
+		drm_gem_object_reference(&obj->base);
+		obj->active = 1;
+	}
+
+	/* Move from whatever list we were on to the tail of execution. */
+	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+	list_move_tail(&obj->ring_list, &ring->active_list);
+
+	obj->last_read_seqno = seqno;
+
+	if (obj->fenced_gpu_access) {
+		obj->last_fenced_seqno = seqno;
+
+		/* Bump MRU to take account of the delayed flush */
+		if (obj->fence_reg != I915_FENCE_REG_NONE) {
+			struct drm_i915_fence_reg *reg;
+
+			reg = &dev_priv->fence_regs[obj->fence_reg];
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+		}
+	}
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
+	BUG_ON(!obj->active);
+
+	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	list_del_init(&obj->ring_list);
+	obj->ring = NULL;
+
+	obj->last_read_seqno = 0;
+	obj->last_write_seqno = 0;
+	obj->base.write_domain = 0;
+
+	obj->last_fenced_seqno = 0;
+	obj->fenced_gpu_access = false;
+
+	obj->active = 0;
+	drm_gem_object_unreference(&obj->base);
+
+	WARN_ON(i915_verify_lists(dev));
+}
+
+static int
+i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i, j;
+
+	/* Carefully retire all requests without writing to the rings */
+	for_each_ring(ring, dev_priv, i) {
+		ret = intel_ring_idle(ring);
+		if (ret)
+			return ret;
+	}
+	i915_gem_retire_requests(dev);
+
+	/* Finally reset hw state */
+	for_each_ring(ring, dev_priv, i) {
+		intel_ring_init_seqno(ring, seqno);
+
+		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+			ring->sync_seqno[j] = 0;
+	}
+
+	return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (seqno == 0)
+		return -EINVAL;
+
+	/* HWS page needs to be set less than what we
+	 * will inject to ring
+	 */
+	ret = i915_gem_init_seqno(dev, seqno - 1);
+	if (ret)
+		return ret;
+
+	/* Carefully set the last_seqno value so that wrap
+	 * detection still works
+	 */
+	dev_priv->next_seqno = seqno;
+	dev_priv->last_seqno = seqno - 1;
+	if (dev_priv->last_seqno == 0)
+		dev_priv->last_seqno--;
+
+	return 0;
+}
+
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* reserve 0 for non-seqno */
+	if (dev_priv->next_seqno == 0) {
+		int ret = i915_gem_init_seqno(dev, 0);
+		if (ret)
+			return ret;
+
+		dev_priv->next_seqno = 1;
+	}
+
+	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	return 0;
+}
+
+int
+i915_add_request(struct intel_ring_buffer *ring,
+		 struct drm_file *file,
+		 u32 *out_seqno)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_request *request;
+	u32 request_ring_position;
+	int was_empty;
+	int ret;
+
+	/*
+	 * Emit any outstanding flushes - execbuf can fail to emit the flush
+	 * after having emitted the batchbuffer command. Hence we need to fix
+	 * things up similar to emitting the lazy request. The difference here
+	 * is that the flush _must_ happen before the next request, no matter
+	 * what.
+	 */
+	ret = intel_ring_flush_all_caches(ring);
+	if (ret)
+		return ret;
+
+	request = kmalloc(sizeof(*request), GFP_KERNEL);
+	if (request == NULL)
+		return -ENOMEM;
+
+
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	request_ring_position = intel_ring_get_tail(ring);
+
+	ret = ring->add_request(ring);
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
+
+	request->seqno = intel_ring_get_seqno(ring);
+	request->ring = ring;
+	request->tail = request_ring_position;
+	request->emitted_jiffies = jiffies;
+	was_empty = list_empty(&ring->request_list);
+	list_add_tail(&request->list, &ring->request_list);
+	request->file_priv = NULL;
+
+	if (file) {
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+
+		spin_lock(&file_priv->mm.lock);
+		request->file_priv = file_priv;
+		list_add_tail(&request->client_list,
+			      &file_priv->mm.request_list);
+		spin_unlock(&file_priv->mm.lock);
+	}
+
+	trace_i915_gem_request_add(ring, request->seqno);
+	ring->outstanding_lazy_request = 0;
+
+	if (!dev_priv->mm.suspended) {
+		if (i915_enable_hangcheck) {
+			mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+				  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+		}
+		if (was_empty) {
+			queue_delayed_work(dev_priv->wq,
+					   &dev_priv->mm.retire_work,
+					   round_jiffies_up_relative(HZ));
+			intel_mark_busy(dev_priv->dev);
+		}
+	}
+
+	if (out_seqno)
+		*out_seqno = request->seqno;
+	return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_file_private *file_priv = request->file_priv;
+
+	if (!file_priv)
+		return;
+
+	spin_lock(&file_priv->mm.lock);
+	if (request->file_priv) {
+		list_del(&request->client_list);
+		request->file_priv = NULL;
+	}
+	spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
+				      struct intel_ring_buffer *ring)
+{
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		list_del(&request->list);
+		i915_gem_request_remove_from_client(request);
+		kfree(request);
+	}
+
+	while (!list_empty(&ring->active_list)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&ring->active_list,
+				       struct drm_i915_gem_object,
+				       ring_list);
+
+		i915_gem_object_move_to_inactive(obj);
+	}
+}
+
+void i915_gem_restore_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < dev_priv->num_fence_regs; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+		/*
+		 * Commit delayed tiling changes if we have an object still
+		 * attached to the fence, otherwise just clear the fence.
+		 */
+		if (reg->obj) {
+			i915_gem_object_update_fence(reg->obj, reg,
+						     reg->obj->tiling_mode);
+		} else {
+			i915_gem_write_fence(dev, i, NULL);
+		}
+	}
+}
+
+void i915_gem_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct intel_ring_buffer *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_reset_ring_lists(dev_priv, ring);
+
+	/* Move everything out of the GPU domains to ensure we do any
+	 * necessary invalidation upon reuse.
+	 */
+	list_for_each_entry(obj,
+			    &dev_priv->mm.inactive_list,
+			    mm_list)
+	{
+		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	i915_gem_restore_fences(dev);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+{
+	uint32_t seqno;
+
+	if (list_empty(&ring->request_list))
+		return;
+
+	WARN_ON(i915_verify_lists(ring->dev));
+
+	seqno = ring->get_seqno(ring, true);
+
+	while (!list_empty(&ring->request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&ring->request_list,
+					   struct drm_i915_gem_request,
+					   list);
+
+		if (!i915_seqno_passed(seqno, request->seqno))
+			break;
+
+		trace_i915_gem_request_retire(ring, request->seqno);
+		/* We know the GPU must have read the request to have
+		 * sent us the seqno + interrupt, so use the position
+		 * of tail of the request to update the last known position
+		 * of the GPU head.
+		 */
+		ring->last_retired_head = request->tail;
+
+		list_del(&request->list);
+		i915_gem_request_remove_from_client(request);
+		kfree(request);
+	}
+
+	/* Move any buffers on the active list that are no longer referenced
+	 * by the ringbuffer to the flushing/inactive lists as appropriate.
+	 */
+	while (!list_empty(&ring->active_list)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&ring->active_list,
+				      struct drm_i915_gem_object,
+				      ring_list);
+
+		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+			break;
+
+		i915_gem_object_move_to_inactive(obj);
+	}
+
+	if (unlikely(ring->trace_irq_seqno &&
+		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
+		ring->irq_put(ring);
+		ring->trace_irq_seqno = 0;
+	}
+
+	WARN_ON(i915_verify_lists(ring->dev));
+}
+
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		i915_gem_retire_requests_ring(ring);
+}
+
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+	struct intel_ring_buffer *ring;
+	bool idle;
+	int i;
+
+	dev_priv = container_of(work, drm_i915_private_t,
+				mm.retire_work.work);
+	dev = dev_priv->dev;
+
+	/* Come back later if the device is busy... */
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
+		return;
+	}
+
+	i915_gem_retire_requests(dev);
+
+	/* Send a periodic flush down the ring so we don't hold onto GEM
+	 * objects indefinitely.
+	 */
+	idle = true;
+	for_each_ring(ring, dev_priv, i) {
+		if (ring->gpu_caches_dirty)
+			i915_add_request(ring, NULL, NULL);
+
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (!dev_priv->mm.suspended && !idle)
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
+	if (idle)
+		intel_mark_idle(dev);
+
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	if (obj->active) {
+		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
+		if (ret)
+			return ret;
+
+		i915_gem_retire_requests_ring(obj->ring);
+	}
+
+	return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ *  -ETIME: object is still busy after timeout
+ *  -ERESTARTSYS: signal interrupted the wait
+ *  -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ *  -EAGAIN: GPU wedged
+ *  -ENOMEM: damn
+ *  -ENODEV: Internal IRQ fail
+ *  -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_wait *args = data;
+	struct drm_i915_gem_object *obj;
+	struct intel_ring_buffer *ring = NULL;
+	struct timespec timeout_stack, *timeout = NULL;
+	unsigned reset_counter;
+	u32 seqno = 0;
+	int ret = 0;
+
+	if (args->timeout_ns >= 0) {
+		timeout_stack = ns_to_timespec(args->timeout_ns);
+		timeout = &timeout_stack;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+	if (&obj->base == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOENT;
+	}
+
+	/* Need to make sure the object gets inactive eventually. */
+	ret = i915_gem_object_flush_active(obj);
+	if (ret)
+		goto out;
+
+	if (obj->active) {
+		seqno = obj->last_read_seqno;
+		ring = obj->ring;
+	}
+
+	if (seqno == 0)
+		 goto out;
+
+	/* Do this after OLR check to make sure we make forward progress polling
+	 * on this IOCTL with a 0 timeout (like busy ioctl)
+	 */
+	if (!args->timeout_ns) {
+		ret = -ETIME;
+		goto out;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+	mutex_unlock(&dev->struct_mutex);
+
+	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+	if (timeout)
+		args->timeout_ns = timespec_to_ns(timeout);
+	return ret;
+
+out:
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * i915_gem_object_sync - sync an object to a ring.
+ *
+ * @obj: object which may be in use on another ring.
+ * @to: ring we wish to use the object on. May be NULL.
+ *
+ * This code is meant to abstract object synchronization with the GPU.
+ * Calling with NULL implies synchronizing the object with the CPU
+ * rather than a particular GPU ring.
+ *
+ * Returns 0 if successful, else propagates up the lower layer error.
+ */
+int
+i915_gem_object_sync(struct drm_i915_gem_object *obj,
+		     struct intel_ring_buffer *to)
+{
+	struct intel_ring_buffer *from = obj->ring;
+	u32 seqno;
+	int ret, idx;
+
+	if (from == NULL || to == from)
+		return 0;
+
+	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
+		return i915_gem_object_wait_rendering(obj, false);
+
+	idx = intel_ring_sync_index(from, to);
+
+	seqno = obj->last_read_seqno;
+	if (seqno <= from->sync_seqno[idx])
+		return 0;
+
+	ret = i915_gem_check_olr(obj->ring, seqno);
+	if (ret)
+		return ret;
+
+	ret = to->sync_to(to, from, seqno);
+	if (!ret)
+		/* We use last_read_seqno because sync_to()
+		 * might have just caused seqno wrap under
+		 * the radar.
+		 */
+		from->sync_seqno[idx] = obj->last_read_seqno;
+
+	return ret;
+}
+
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+	u32 old_write_domain, old_read_domains;
+
+	/* Force a pagefault for domain tracking on next user access */
+	i915_gem_release_mmap(obj);
+
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+		return;
+
+	/* Wait for any direct GTT access to complete */
+	mb();
+
+	old_read_domains = obj->base.read_domains;
+	old_write_domain = obj->base.write_domain;
+
+	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+	int ret;
+
+	if (obj->gtt_space == NULL)
+		return 0;
+
+	if (obj->pin_count)
+		return -EBUSY;
+
+	BUG_ON(obj->pages == NULL);
+
+	ret = i915_gem_object_finish_gpu(obj);
+	if (ret)
+		return ret;
+	/* Continue on if we fail due to EIO, the GPU is hung so we
+	 * should be safe and we need to cleanup or else we might
+	 * cause memory corruption through use-after-free.
+	 */
+
+	i915_gem_object_finish_gtt(obj);
+
+	/* release the fence reg _after_ flushing */
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_object_unbind(obj);
+
+	if (obj->has_global_gtt_mapping)
+		i915_gem_gtt_unbind_object(obj);
+	if (obj->has_aliasing_ppgtt_mapping) {
+		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+		obj->has_aliasing_ppgtt_mapping = 0;
+	}
+	i915_gem_gtt_finish_object(obj);
+
+	list_del(&obj->mm_list);
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+	/* Avoid an unnecessary call to unbind on rebind. */
+	obj->map_and_fenceable = true;
+
+	drm_mm_put_block(obj->gtt_space);
+	obj->gtt_space = NULL;
+	obj->gtt_offset = 0;
+
+	return 0;
+}
+
+int i915_gpu_idle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int ret, i;
+
+	/* Flush everything onto the inactive list. */
+	for_each_ring(ring, dev_priv, i) {
+		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+		if (ret)
+			return ret;
+
+		ret = intel_ring_idle(ring);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void i965_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int fence_reg;
+	int fence_pitch_shift;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		fence_reg = FENCE_REG_SANDYBRIDGE_0;
+		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
+	} else {
+		fence_reg = FENCE_REG_965_0;
+		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
+	}
+
+	fence_reg += reg * 8;
+
+	/* To w/a incoherency with non-atomic 64-bit register updates,
+	 * we split the 64-bit update into two 32-bit writes. In order
+	 * for a partial fence not to be evaluated between writes, we
+	 * precede the update with write to turn off the fence register,
+	 * and only enable the fence as the last step.
+	 *
+	 * For extra levels of paranoia, we make sure each step lands
+	 * before applying the next step.
+	 */
+	I915_WRITE(fence_reg, 0);
+	POSTING_READ(fence_reg);
+
+	if (obj) {
+		u32 size = obj->gtt_space->size;
+		uint64_t val;
+
+		val = (uint64_t)((obj->gtt_offset + size - 4096) &
+				 0xfffff000) << 32;
+		val |= obj->gtt_offset & 0xfffff000;
+		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+		val |= I965_FENCE_REG_VALID;
+
+		I915_WRITE(fence_reg + 4, val >> 32);
+		POSTING_READ(fence_reg + 4);
+
+		I915_WRITE(fence_reg + 0, val);
+		POSTING_READ(fence_reg);
+	} else {
+		I915_WRITE(fence_reg + 4, 0);
+		POSTING_READ(fence_reg + 4);
+	}
+}
+
+static void i915_write_fence_reg(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (obj) {
+		u32 size = obj->gtt_space->size;
+		int pitch_val;
+		int tile_width;
+
+		WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (obj->gtt_offset & (size - 1)),
+		     "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		     obj->gtt_offset, obj->map_and_fenceable, size);
+
+		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
+			tile_width = 128;
+		else
+			tile_width = 512;
+
+		/* Note: pitch better be a power of two tile widths */
+		pitch_val = obj->stride / tile_width;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = obj->gtt_offset;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I915_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	if (reg < 8)
+		reg = FENCE_REG_830_0 + reg * 4;
+	else
+		reg = FENCE_REG_945_8 + (reg - 8) * 4;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void i830_write_fence_reg(struct drm_device *dev, int reg,
+				struct drm_i915_gem_object *obj)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t val;
+
+	if (obj) {
+		u32 size = obj->gtt_space->size;
+		uint32_t pitch_val;
+
+		WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+		     (size & -size) != size ||
+		     (obj->gtt_offset & (size - 1)),
+		     "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+		     obj->gtt_offset, size);
+
+		pitch_val = obj->stride / 128;
+		pitch_val = ffs(pitch_val) - 1;
+
+		val = obj->gtt_offset;
+		if (obj->tiling_mode == I915_TILING_Y)
+			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
+		val |= I830_FENCE_SIZE_BITS(size);
+		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+		val |= I830_FENCE_REG_VALID;
+	} else
+		val = 0;
+
+	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
+	POSTING_READ(FENCE_REG_830_0 + reg * 4);
+}
+
+inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+{
+	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
+}
+
+static void i915_gem_write_fence(struct drm_device *dev, int reg,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
+		mb();
+
+	WARN(obj && (!obj->stride || !obj->tiling_mode),
+	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+	     obj->stride, obj->tiling_mode);
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+	case 5:
+	case 4: i965_write_fence_reg(dev, reg, obj); break;
+	case 3: i915_write_fence_reg(dev, reg, obj); break;
+	case 2: i830_write_fence_reg(dev, reg, obj); break;
+	default: BUG();
+	}
+
+	/* And similarly be paranoid that no direct access to this region
+	 * is reordered to before the fence is installed.
+	 */
+	if (i915_gem_object_needs_mb(obj))
+		mb();
+}
+
+static inline int fence_number(struct drm_i915_private *dev_priv,
+			       struct drm_i915_fence_reg *fence)
+{
+	return fence - dev_priv->fence_regs;
+}
+
+static void i915_gem_write_fence__ipi(void *data)
+{
+	wbinvd();
+}
+
+static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
+					 struct drm_i915_fence_reg *fence,
+					 bool enable)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fence_reg = fence_number(dev_priv, fence);
+
+	/* In order to fully serialize access to the fenced region and
+	 * the update to the fence register we need to take extreme
+	 * measures on SNB+. In theory, the write to the fence register
+	 * flushes all memory transactions before, and coupled with the
+	 * mb() placed around the register write we serialise all memory
+	 * operations with respect to the changes in the tiler. Yet, on
+	 * SNB+ we need to take a step further and emit an explicit wbinvd()
+	 * on each processor in order to manually flush all memory
+	 * transactions before updating the fence register.
+	 */
+	if (HAS_LLC(obj->base.dev))
+		on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+	i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
+
+	if (enable) {
+		obj->fence_reg = fence_reg;
+		fence->obj = obj;
+		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
+	} else {
+		obj->fence_reg = I915_FENCE_REG_NONE;
+		fence->obj = NULL;
+		list_del_init(&fence->lru_list);
+	}
+	obj->fence_dirty = false;
+}
+
+static int
+i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
+{
+	if (obj->last_fenced_seqno) {
+		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
+		if (ret)
+			return ret;
+
+		obj->last_fenced_seqno = 0;
+	}
+
+	obj->fenced_gpu_access = false;
+	return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_fence_reg *fence;
+	int ret;
+
+	ret = i915_gem_object_wait_fence(obj);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg == I915_FENCE_REG_NONE)
+		return 0;
+
+	fence = &dev_priv->fence_regs[obj->fence_reg];
+
+	i915_gem_object_fence_lost(obj);
+	i915_gem_object_update_fence(obj, fence, false);
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_fence_reg *reg, *avail;
+	int i;
+
+	/* First try to find a free reg */
+	avail = NULL;
+	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+		reg = &dev_priv->fence_regs[i];
+		if (!reg->obj)
+			return reg;
+
+		if (!reg->pin_count)
+			avail = reg;
+	}
+
+	if (avail == NULL)
+		return NULL;
+
+	/* None available, try to steal one or wait for a user to finish */
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->pin_count)
+			continue;
+
+		return reg;
+	}
+
+	return NULL;
+}
+
+/**
+ * i915_gem_object_get_fence - set up fencing for an object
+ * @obj: object to map through a fence reg
+ *
+ * When mapping objects through the GTT, userspace wants to be able to write
+ * to them without having to worry about swizzling if the object is tiled.
+ * This function walks the fence regs looking for a free one for @obj,
+ * stealing one if it can't find any.
+ *
+ * It then sets up the reg based on the object's properties: address, pitch
+ * and tiling format.
+ *
+ * For an untiled surface, this removes any existing fence.
+ */
+int
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = obj->tiling_mode != I915_TILING_NONE;
+	struct drm_i915_fence_reg *reg;
+	int ret;
+
+	/* Have we updated the tiling parameters upon the object and so
+	 * will need to serialise the write to the associated fence register?
+	 */
+	if (obj->fence_dirty) {
+		ret = i915_gem_object_wait_fence(obj);
+		if (ret)
+			return ret;
+	}
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		if (!obj->fence_dirty) {
+			list_move_tail(&reg->lru_list,
+				       &dev_priv->mm.fence_list);
+			return 0;
+		}
+	} else if (enable) {
+		reg = i915_find_fence_reg(dev);
+		if (reg == NULL)
+			return -EDEADLK;
+
+		if (reg->obj) {
+			struct drm_i915_gem_object *old = reg->obj;
+
+			ret = i915_gem_object_wait_fence(old);
+			if (ret)
+				return ret;
+
+			i915_gem_object_fence_lost(old);
+		}
+	} else
+		return 0;
+
+	i915_gem_object_update_fence(obj, reg, enable);
+
+	return 0;
+}
+
+static bool i915_gem_valid_gtt_space(struct drm_device *dev,
+				     struct drm_mm_node *gtt_space,
+				     unsigned long cache_level)
+{
+	struct drm_mm_node *other;
+
+	/* On non-LLC machines we have to be careful when putting differing
+	 * types of snoopable memory together to avoid the prefetcher
+	 * crossing memory domains and dying.
+	 */
+	if (HAS_LLC(dev))
+		return true;
+
+	if (gtt_space == NULL)
+		return true;
+
+	if (list_empty(&gtt_space->node_list))
+		return true;
+
+	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
+	if (other->allocated && !other->hole_follows && other->color != cache_level)
+		return false;
+
+	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
+	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+		return false;
+
+	return true;
+}
+
+static void i915_gem_verify_gtt(struct drm_device *dev)
+{
+#if WATCH_GTT
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->gtt_space == NULL) {
+			printk(KERN_ERR "object found on GTT list with no space reserved\n");
+			err++;
+			continue;
+		}
+
+		if (obj->cache_level != obj->gtt_space->color) {
+			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level,
+			       obj->gtt_space->color);
+			err++;
+			continue;
+		}
+
+		if (!i915_gem_valid_gtt_space(dev,
+					      obj->gtt_space,
+					      obj->cache_level)) {
+			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
+			       obj->gtt_space->start,
+			       obj->gtt_space->start + obj->gtt_space->size,
+			       obj->cache_level);
+			err++;
+			continue;
+		}
+	}
+
+	WARN_ON(err);
+#endif
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+			    unsigned alignment,
+			    bool map_and_fenceable,
+			    bool nonblocking)
+{
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_mm_node *node;
+	u32 size, fence_size, fence_alignment, unfenced_alignment;
+	bool mappable, fenceable;
+	int ret;
+
+	fence_size = i915_gem_get_gtt_size(dev,
+					   obj->base.size,
+					   obj->tiling_mode);
+	fence_alignment = i915_gem_get_gtt_alignment(dev,
+						     obj->base.size,
+						     obj->tiling_mode, true);
+	unfenced_alignment =
+		i915_gem_get_gtt_alignment(dev,
+						    obj->base.size,
+						    obj->tiling_mode, false);
+
+	if (alignment == 0)
+		alignment = map_and_fenceable ? fence_alignment :
+						unfenced_alignment;
+	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
+		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+		return -EINVAL;
+	}
+
+	size = map_and_fenceable ? fence_size : obj->base.size;
+
+	/* If the object is bigger than the entire aperture, reject it early
+	 * before evicting everything in a vain attempt to find space.
+	 */
+	if (obj->base.size >
+	    (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
+		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+		return -E2BIG;
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		return ret;
+
+	i915_gem_object_pin_pages(obj);
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (node == NULL) {
+		i915_gem_object_unpin_pages(obj);
+		return -ENOMEM;
+	}
+
+ search_free:
+	if (map_and_fenceable)
+		ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+							  size, alignment, obj->cache_level,
+							  0, dev_priv->gtt.mappable_end);
+	else
+		ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+						 size, alignment, obj->cache_level);
+	if (ret) {
+		ret = i915_gem_evict_something(dev, size, alignment,
+					       obj->cache_level,
+					       map_and_fenceable,
+					       nonblocking);
+		if (ret == 0)
+			goto search_free;
+
+		i915_gem_object_unpin_pages(obj);
+		kfree(node);
+		return ret;
+	}
+	if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+		i915_gem_object_unpin_pages(obj);
+		drm_mm_put_block(node);
+		return -EINVAL;
+	}
+
+	ret = i915_gem_gtt_prepare_object(obj);
+	if (ret) {
+		i915_gem_object_unpin_pages(obj);
+		drm_mm_put_block(node);
+		return ret;
+	}
+
+	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	obj->gtt_space = node;
+	obj->gtt_offset = node->start;
+
+	fenceable =
+		node->size == fence_size &&
+		(node->start & (fence_alignment - 1)) == 0;
+
+	mappable =
+		obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+
+	obj->map_and_fenceable = mappable && fenceable;
+
+	i915_gem_object_unpin_pages(obj);
+	trace_i915_gem_object_bind(obj, map_and_fenceable);
+	i915_gem_verify_gtt(dev);
+	return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+{
+	/* If we don't have a page list set up, then we're not pinned
+	 * to GPU, and we can ignore the cache flush because it'll happen
+	 * again at bind time.
+	 */
+	if (obj->pages == NULL)
+		return;
+
+	/*
+	 * Stolen memory is always coherent with the GPU as it is explicitly
+	 * marked as wc by the system, or the system is cache-coherent.
+	 */
+	if (obj->stolen)
+		return;
+
+	/* If the GPU is snooping the contents of the CPU cache,
+	 * we do not need to manually clear the CPU cache lines.  However,
+	 * the caches are only snooped when the render cache is
+	 * flushed/invalidated.  As we always have to emit invalidations
+	 * and flushes when moving into and out of the RENDER domain, correct
+	 * snooping behaviour occurs naturally as the result of our domain
+	 * tracking.
+	 */
+	if (obj->cache_level != I915_CACHE_NONE)
+		return;
+
+	trace_i915_gem_object_clflush(obj);
+
+	drm_clflush_sg(obj->pages);
+}
+
+/** Flushes the GTT write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
+{
+	uint32_t old_write_domain;
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
+		return;
+
+	/* No actual flushing is required for the GTT write domain.  Writes
+	 * to it immediately go to main memory as far as we know, so there's
+	 * no chipset flush.  It also doesn't land in render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
+	 */
+	wmb();
+
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->base.read_domains,
+					    old_write_domain);
+}
+
+/** Flushes the CPU write domain for the object if it's dirty. */
+static void
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+{
+	uint32_t old_write_domain;
+
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
+		return;
+
+	i915_gem_clflush_object(obj);
+	i915_gem_chipset_flush(obj->base.dev);
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
+
+	trace_i915_gem_object_change_domain(obj,
+					    obj->base.read_domains,
+					    old_write_domain);
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+	uint32_t old_write_domain, old_read_domains;
+	int ret;
+
+	/* Not valid to be called on unbound objects. */
+	if (obj->gtt_space == NULL)
+		return -EINVAL;
+
+	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+		return 0;
+
+	ret = i915_gem_object_wait_rendering(obj, !write);
+	if (ret)
+		return ret;
+
+	i915_gem_object_flush_cpu_write_domain(obj);
+
+	/* Serialise direct access to this object with the barriers for
+	 * coherent writes from the GPU, by effectively invalidating the
+	 * GTT domain upon first access.
+	 */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+		mb();
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+	if (write) {
+		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+		obj->dirty = 1;
+	}
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	/* And bump the LRU for this access */
+	if (i915_gem_object_is_inactive(obj))
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	return 0;
+}
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level)
+{
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (obj->cache_level == cache_level)
+		return 0;
+
+	if (obj->pin_count) {
+		DRM_DEBUG("can not change the cache level of pinned objects\n");
+		return -EBUSY;
+	}
+
+	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+		ret = i915_gem_object_unbind(obj);
+		if (ret)
+			return ret;
+	}
+
+	if (obj->gtt_space) {
+		ret = i915_gem_object_finish_gpu(obj);
+		if (ret)
+			return ret;
+
+		i915_gem_object_finish_gtt(obj);
+
+		/* Before SandyBridge, you could not use tiling or fence
+		 * registers with snooped memory, so relinquish any fences
+		 * currently pointing to our region in the aperture.
+		 */
+		if (INTEL_INFO(dev)->gen < 6) {
+			ret = i915_gem_object_put_fence(obj);
+			if (ret)
+				return ret;
+		}
+
+		if (obj->has_global_gtt_mapping)
+			i915_gem_gtt_bind_object(obj, cache_level);
+		if (obj->has_aliasing_ppgtt_mapping)
+			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+					       obj, cache_level);
+
+		obj->gtt_space->color = cache_level;
+	}
+
+	if (cache_level == I915_CACHE_NONE) {
+		u32 old_read_domains, old_write_domain;
+
+		/* If we're coming from LLC cached, then we haven't
+		 * actually been tracking whether the data is in the
+		 * CPU cache or not, since we only allow one bit set
+		 * in obj->write_domain and have been skipping the clflushes.
+		 * Just set it to the CPU cache for now.
+		 */
+		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+		WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+		old_read_domains = obj->base.read_domains;
+		old_write_domain = obj->base.write_domain;
+
+		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+		trace_i915_gem_object_change_domain(obj,
+						    old_read_domains,
+						    old_write_domain);
+	}
+
+	obj->cache_level = cache_level;
+	i915_gem_verify_gtt(dev);
+	return 0;
+}
+
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	args->caching = obj->cache_level != I915_CACHE_NONE;
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file)
+{
+	struct drm_i915_gem_caching *args = data;
+	struct drm_i915_gem_object *obj;
+	enum i915_cache_level level;
+	int ret;
+
+	switch (args->caching) {
+	case I915_CACHING_NONE:
+		level = I915_CACHE_NONE;
+		break;
+	case I915_CACHING_CACHED:
+		level = I915_CACHE_LLC;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	ret = i915_gem_object_set_cache_level(obj, level);
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ */
+int
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+				     u32 alignment,
+				     struct intel_ring_buffer *pipelined)
+{
+	u32 old_read_domains, old_write_domain;
+	int ret;
+
+	if (pipelined != obj->ring) {
+		ret = i915_gem_object_sync(obj, pipelined);
+		if (ret)
+			return ret;
+	}
+
+	/* The display engine is not coherent with the LLC cache on gen6.  As
+	 * a result, we make sure that the pinning that is about to occur is
+	 * done with uncached PTEs. This is lowest common denominator for all
+	 * chipsets.
+	 *
+	 * However for gen6+, we could do better by using the GFDT bit instead
+	 * of uncaching, which would allow us to flush all the LLC-cached data
+	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+	 */
+	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+	if (ret)
+		return ret;
+
+	/* As the user may map the buffer once pinned in the display plane
+	 * (e.g. libkms for the bootup splash), we have to ensure that we
+	 * always use map_and_fenceable for all scanout buffers.
+	 */
+	ret = i915_gem_object_pin(obj, alignment, true, false);
+	if (ret)
+		return ret;
+
+	i915_gem_object_flush_cpu_write_domain(obj);
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	obj->base.write_domain = 0;
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	return 0;
+}
+
+int
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
+		return 0;
+
+	ret = i915_gem_object_wait_rendering(obj, false);
+	if (ret)
+		return ret;
+
+	/* Ensure that we invalidate the GPU's caches and TLBs. */
+	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+	return 0;
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+	uint32_t old_write_domain, old_read_domains;
+	int ret;
+
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+		return 0;
+
+	ret = i915_gem_object_wait_rendering(obj, !write);
+	if (ret)
+		return ret;
+
+	i915_gem_object_flush_gtt_write_domain(obj);
+
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
+
+	/* Flush the CPU cache if it's still invalid. */
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+		i915_gem_clflush_object(obj);
+
+		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
+	}
+
+	/* It should now be out of any other write domains, and we can update
+	 * the domain values for our changes.
+	 */
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+
+	/* If we're writing through the CPU, then the GPU read domains will
+	 * need to be invalidated at next use.
+	 */
+	if (write) {
+		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	}
+
+	trace_i915_gem_object_change_domain(obj,
+					    old_read_domains,
+					    old_write_domain);
+
+	return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+	struct drm_i915_gem_request *request;
+	struct intel_ring_buffer *ring = NULL;
+	unsigned reset_counter;
+	u32 seqno = 0;
+	int ret;
+
+	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
+	if (ret)
+		return ret;
+
+	spin_lock(&file_priv->mm.lock);
+	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+		if (time_after_eq(request->emitted_jiffies, recent_enough))
+			break;
+
+		ring = request->ring;
+		seqno = request->seqno;
+	}
+	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+	spin_unlock(&file_priv->mm.lock);
+
+	if (seqno == 0)
+		return 0;
+
+	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+	if (ret == 0)
+		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+
+	return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		    uint32_t alignment,
+		    bool map_and_fenceable,
+		    bool nonblocking)
+{
+	int ret;
+
+	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+		return -EBUSY;
+
+	if (obj->gtt_space != NULL) {
+		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+		    (map_and_fenceable && !obj->map_and_fenceable)) {
+			WARN(obj->pin_count,
+			     "bo is already pinned with incorrect alignment:"
+			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " obj->map_and_fenceable=%d\n",
+			     obj->gtt_offset, alignment,
+			     map_and_fenceable,
+			     obj->map_and_fenceable);
+			ret = i915_gem_object_unbind(obj);
+			if (ret)
+				return ret;
+		}
+	}
+
+	if (obj->gtt_space == NULL) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+		ret = i915_gem_object_bind_to_gtt(obj, alignment,
+						  map_and_fenceable,
+						  nonblocking);
+		if (ret)
+			return ret;
+
+		if (!dev_priv->mm.aliasing_ppgtt)
+			i915_gem_gtt_bind_object(obj, obj->cache_level);
+	}
+
+	if (!obj->has_global_gtt_mapping && map_and_fenceable)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+	obj->pin_count++;
+	obj->pin_mappable |= map_and_fenceable;
+
+	return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pin_count == 0);
+	BUG_ON(obj->gtt_space == NULL);
+
+	if (--obj->pin_count == 0)
+		obj->pin_mappable = false;
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED) {
+		DRM_ERROR("Attempting to pin a purgeable buffer\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (obj->pin_filp != NULL && obj->pin_filp != file) {
+		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
+			  args->handle);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (obj->user_pin_count == 0) {
+		ret = i915_gem_object_pin(obj, args->alignment, true, false);
+		if (ret)
+			goto out;
+	}
+
+	obj->user_pin_count++;
+	obj->pin_filp = file;
+
+	/* XXX - flush the CPU caches for pinned objects
+	 * as the X server doesn't manage domains yet
+	 */
+	i915_gem_object_flush_cpu_write_domain(obj);
+	args->offset = obj->gtt_offset;
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (obj->pin_filp != file) {
+		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
+			  args->handle);
+		ret = -EINVAL;
+		goto out;
+	}
+	obj->user_pin_count--;
+	if (obj->user_pin_count == 0) {
+		obj->pin_filp = NULL;
+		i915_gem_object_unpin(obj);
+	}
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_busy *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	/* Count all active objects as busy, even if they are currently not used
+	 * by the gpu. Users of this interface expect objects to eventually
+	 * become non-busy without any further actions, therefore emit any
+	 * necessary flushes here.
+	 */
+	ret = i915_gem_object_flush_active(obj);
+
+	args->busy = obj->active;
+	if (obj->ring) {
+		BUILD_BUG_ON(I915_NUM_RINGS > 16);
+		args->busy |= intel_ring_flag(obj->ring) << 16;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int
+i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_i915_gem_madvise *args = data;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	switch (args->madv) {
+	case I915_MADV_DONTNEED:
+	case I915_MADV_WILLNEED:
+	    break;
+	default:
+	    return -EINVAL;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+	if (&obj->base == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	if (obj->pin_count) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (obj->madv != __I915_MADV_PURGED)
+		obj->madv = args->madv;
+
+	/* if the object is no longer attached, discard its backing storage */
+	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+		i915_gem_object_truncate(obj);
+
+	args->retained = obj->madv != __I915_MADV_PURGED;
+
+out:
+	drm_gem_object_unreference(&obj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+			  const struct drm_i915_gem_object_ops *ops)
+{
+	INIT_LIST_HEAD(&obj->mm_list);
+	INIT_LIST_HEAD(&obj->gtt_list);
+	INIT_LIST_HEAD(&obj->ring_list);
+	INIT_LIST_HEAD(&obj->exec_list);
+
+	obj->ops = ops;
+
+	obj->fence_reg = I915_FENCE_REG_NONE;
+	obj->madv = I915_MADV_WILLNEED;
+	/* Avoid an unnecessary call to unbind on the first bind. */
+	obj->map_and_fenceable = true;
+
+	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+	.get_pages = i915_gem_object_get_pages_gtt,
+	.put_pages = i915_gem_object_put_pages_gtt,
+};
+
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	struct address_space *mapping;
+	gfp_t mask;
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+		i915_gem_object_free(obj);
+		return NULL;
+	}
+
+	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+		/* 965gm cannot relocate objects above 4GiB. */
+		mask &= ~__GFP_HIGHMEM;
+		mask |= __GFP_DMA32;
+	}
+
+	mapping = file_inode(obj->base.filp)->i_mapping;
+	mapping_set_gfp_mask(mapping, mask);
+
+	i915_gem_object_init(obj, &i915_gem_object_ops);
+
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+
+	if (HAS_LLC(dev)) {
+		/* On some devices, we can have the GPU use the LLC (the CPU
+		 * cache) for about a 10% performance improvement
+		 * compared to uncached.  Graphics requests other than
+		 * display scanout are coherent with the CPU in
+		 * accessing this cache.  This means in this mode we
+		 * don't need to clflush on the CPU side, and on the
+		 * GPU side we only need to flush internal caches to
+		 * get data visible to the CPU.
+		 *
+		 * However, we maintain the display planes as UC, and so
+		 * need to rebind when first used as such.
+		 */
+		obj->cache_level = I915_CACHE_LLC;
+	} else
+		obj->cache_level = I915_CACHE_NONE;
+
+	return obj;
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+
+	return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	trace_i915_gem_object_destroy(obj);
+
+	if (obj->phys_obj)
+		i915_gem_detach_phys_object(dev, obj);
+
+	obj->pin_count = 0;
+	if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
+		bool was_interruptible;
+
+		was_interruptible = dev_priv->mm.interruptible;
+		dev_priv->mm.interruptible = false;
+
+		WARN_ON(i915_gem_object_unbind(obj));
+
+		dev_priv->mm.interruptible = was_interruptible;
+	}
+
+	obj->pages_pin_count = 0;
+	i915_gem_object_put_pages(obj);
+	i915_gem_object_free_mmap_offset(obj);
+	i915_gem_object_release_stolen(obj);
+
+	BUG_ON(obj->pages);
+
+	if (obj->base.import_attach)
+		drm_prime_gem_destroy(&obj->base, NULL);
+
+	drm_gem_object_release(&obj->base);
+	i915_gem_info_remove_obj(dev_priv, obj->base.size);
+
+	kfree(obj->bit_17);
+	i915_gem_object_free(obj);
+}
+
+int
+i915_gem_idle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (dev_priv->mm.suspended) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	ret = i915_gpu_idle(dev);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+	i915_gem_retire_requests(dev);
+
+	/* Under UMS, be paranoid and evict. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_gem_evict_everything(dev);
+
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 * And not confound mm.suspended!
+	 */
+	dev_priv->mm.suspended = 1;
+	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
+
+	i915_kernel_lost_context(dev);
+	i915_gem_cleanup_ringbuffer(dev);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Cancel the retire work handler, which should be idle now. */
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
+	return 0;
+}
+
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 misccpctl;
+	int i;
+
+	if (!HAS_L3_GPU_CACHE(dev))
+		return;
+
+	if (!dev_priv->l3_parity.remap_info)
+		return;
+
+	misccpctl = I915_READ(GEN7_MISCCPCTL);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	POSTING_READ(GEN7_MISCCPCTL);
+
+	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
+			DRM_DEBUG("0x%x was already programmed to %x\n",
+				  GEN7_L3LOG_BASE + i, remap);
+		if (remap && !dev_priv->l3_parity.remap_info[i/4])
+			DRM_DEBUG_DRIVER("Clearing remapped register\n");
+		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+	}
+
+	/* Make sure all the writes land before disabling dop clock gating */
+	POSTING_READ(GEN7_L3LOG_BASE);
+
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen < 5 ||
+	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+		return;
+
+	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+				 DISP_TILE_SURFACE_SWIZZLING);
+
+	if (IS_GEN5(dev))
+		return;
+
+	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+	if (IS_GEN6(dev))
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+	else if (IS_GEN7(dev))
+		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+	else
+		BUG();
+}
+
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+	if (!HAS_BLT(dev))
+		return false;
+
+	/* The blitter was dysfunctional on early prototypes */
+	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+		DRM_INFO("BLT not supported on this pre-production hardware;"
+			 " graphics performance will be degraded.\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int i915_gem_init_rings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_init_render_ring_buffer(dev);
+	if (ret)
+		return ret;
+
+	if (HAS_BSD(dev)) {
+		ret = intel_init_bsd_ring_buffer(dev);
+		if (ret)
+			goto cleanup_render_ring;
+	}
+
+	if (intel_enable_blt(dev)) {
+		ret = intel_init_blt_ring_buffer(dev);
+		if (ret)
+			goto cleanup_bsd_ring;
+	}
+
+	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+	if (ret)
+		goto cleanup_blt_ring;
+
+	return 0;
+
+cleanup_blt_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+
+	return ret;
+}
+
+int
+i915_gem_init_hw(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+		return -EIO;
+
+	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
+	if (HAS_PCH_NOP(dev)) {
+		u32 temp = I915_READ(GEN7_MSG_CTL);
+		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
+		I915_WRITE(GEN7_MSG_CTL, temp);
+	}
+
+	i915_gem_l3_remap(dev);
+
+	i915_gem_init_swizzling(dev);
+
+	ret = i915_gem_init_rings(dev);
+	if (ret)
+		return ret;
+
+	/*
+	 * XXX: There was some w/a described somewhere suggesting loading
+	 * contexts before PPGTT.
+	 */
+	i915_gem_context_init(dev);
+	if (dev_priv->mm.aliasing_ppgtt) {
+		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
+		if (ret) {
+			i915_gem_cleanup_aliasing_ppgtt(dev);
+			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
+		}
+	}
+
+	return 0;
+}
+
+int i915_gem_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (IS_VALLEYVIEW(dev)) {
+		/* VLVA0 (potential hack), BIOS isn't actually waking us */
+		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
+		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
+			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
+	}
+
+	i915_gem_init_global_gtt(dev);
+
+	ret = i915_gem_init_hw(dev);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret) {
+		i915_gem_cleanup_aliasing_ppgtt(dev);
+		return ret;
+	}
+
+	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		dev_priv->dri1.allow_batchbuffer = 1;
+	return 0;
+}
+
+void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i)
+		intel_cleanup_ring_buffer(ring);
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return 0;
+
+	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
+		DRM_ERROR("Reenabling wedged hardware, good luck\n");
+		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	dev_priv->mm.suspended = 0;
+
+	ret = i915_gem_init_hw(dev);
+	if (ret != 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	mutex_unlock(&dev->struct_mutex);
+
+	ret = drm_irq_install(dev);
+	if (ret)
+		goto cleanup_ringbuffer;
+
+	return 0;
+
+cleanup_ringbuffer:
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_cleanup_ringbuffer(dev);
+	dev_priv->mm.suspended = 1;
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return 0;
+
+	drm_irq_uninstall(dev);
+	return i915_gem_idle(dev);
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+	int ret;
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		return;
+
+	ret = i915_gem_idle(dev);
+	if (ret)
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+}
+
+static void
+init_ring_lists(struct intel_ring_buffer *ring)
+{
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->slab =
+		kmem_cache_create("i915_gem_object",
+				  sizeof(struct drm_i915_gem_object), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+
+	INIT_LIST_HEAD(&dev_priv->mm.active_list);
+	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		init_ring_lists(&dev_priv->ring[i]);
+	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
+		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
+	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+			  i915_gem_retire_work_handler);
+	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+
+	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+	if (IS_GEN3(dev)) {
+		I915_WRITE(MI_ARB_STATE,
+			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+	}
+
+	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
+	/* Old X drivers will take 0-2 for front, back, depth buffers */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		dev_priv->fence_reg_start = 3;
+
+	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
+		dev_priv->num_fence_regs = 32;
+	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+		dev_priv->num_fence_regs = 16;
+	else
+		dev_priv->num_fence_regs = 8;
+
+	/* Initialize fence registers to zero */
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	i915_gem_restore_fences(dev);
+
+	i915_gem_detect_bit_6_swizzle(dev);
+	init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+	dev_priv->mm.interruptible = true;
+
+	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&dev_priv->mm.inactive_shrinker);
+}
+
+/*
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
+static int i915_gem_init_phys_object(struct drm_device *dev,
+				     int id, int size, int align)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_phys_object *phys_obj;
+	int ret;
+
+	if (dev_priv->mm.phys_objs[id - 1] || !size)
+		return 0;
+
+	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+	if (!phys_obj)
+		return -ENOMEM;
+
+	phys_obj->id = id;
+
+	phys_obj->handle = drm_pci_alloc(dev, size, align);
+	if (!phys_obj->handle) {
+		ret = -ENOMEM;
+		goto kfree_obj;
+	}
+#ifdef CONFIG_X86
+	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+
+	dev_priv->mm.phys_objs[id - 1] = phys_obj;
+
+	return 0;
+kfree_obj:
+	kfree(phys_obj);
+	return ret;
+}
+
+static void i915_gem_free_phys_object(struct drm_device *dev, int id)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_phys_object *phys_obj;
+
+	if (!dev_priv->mm.phys_objs[id - 1])
+		return;
+
+	phys_obj = dev_priv->mm.phys_objs[id - 1];
+	if (phys_obj->cur_obj) {
+		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
+	}
+
+#ifdef CONFIG_X86
+	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
+#endif
+	drm_pci_free(dev, phys_obj->handle);
+	kfree(phys_obj);
+	dev_priv->mm.phys_objs[id - 1] = NULL;
+}
+
+void i915_gem_free_all_phys_object(struct drm_device *dev)
+{
+	int i;
+
+	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
+		i915_gem_free_phys_object(dev, i);
+}
+
+void i915_gem_detach_phys_object(struct drm_device *dev,
+				 struct drm_i915_gem_object *obj)
+{
+	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+	char *vaddr;
+	int i;
+	int page_count;
+
+	if (!obj->phys_obj)
+		return;
+	vaddr = obj->phys_obj->handle->vaddr;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	for (i = 0; i < page_count; i++) {
+		struct page *page = shmem_read_mapping_page(mapping, i);
+		if (!IS_ERR(page)) {
+			char *dst = kmap_atomic(page);
+			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+			kunmap_atomic(dst);
+
+			drm_clflush_pages(&page, 1);
+
+			set_page_dirty(page);
+			mark_page_accessed(page);
+			page_cache_release(page);
+		}
+	}
+	i915_gem_chipset_flush(dev);
+
+	obj->phys_obj->cur_obj = NULL;
+	obj->phys_obj = NULL;
+}
+
+int
+i915_gem_attach_phys_object(struct drm_device *dev,
+			    struct drm_i915_gem_object *obj,
+			    int id,
+			    int align)
+{
+	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret = 0;
+	int page_count;
+	int i;
+
+	if (id > I915_MAX_PHYS_OBJECT)
+		return -EINVAL;
+
+	if (obj->phys_obj) {
+		if (obj->phys_obj->id == id)
+			return 0;
+		i915_gem_detach_phys_object(dev, obj);
+	}
+
+	/* create a new object */
+	if (!dev_priv->mm.phys_objs[id - 1]) {
+		ret = i915_gem_init_phys_object(dev, id,
+						obj->base.size, align);
+		if (ret) {
+			DRM_ERROR("failed to init phys object %d size: %zu\n",
+				  id, obj->base.size);
+			return ret;
+		}
+	}
+
+	/* bind to the object */
+	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+	obj->phys_obj->cur_obj = obj;
+
+	page_count = obj->base.size / PAGE_SIZE;
+
+	for (i = 0; i < page_count; i++) {
+		struct page *page;
+		char *dst, *src;
+
+		page = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		src = kmap_atomic(page);
+		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+		memcpy(dst, src, PAGE_SIZE);
+		kunmap_atomic(src);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_phys_pwrite(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
+		     struct drm_i915_gem_pwrite *args,
+		     struct drm_file *file_priv)
+{
+	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
+	char __user *user_data = to_user_ptr(args->data_ptr);
+
+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+		unsigned long unwritten;
+
+		/* The physical object once assigned is fixed for the lifetime
+		 * of the obj, so we can safely drop the lock and continue
+		 * to access vaddr.
+		 */
+		mutex_unlock(&dev->struct_mutex);
+		unwritten = copy_from_user(vaddr, user_data, args->size);
+		mutex_lock(&dev->struct_mutex);
+		if (unwritten)
+			return -EFAULT;
+	}
+
+	i915_gem_chipset_flush(dev);
+	return 0;
+}
+
+void i915_gem_release(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	/* Clean up our request list when the client is going away, so that
+	 * later retire_requests won't dereference our soon-to-be-gone
+	 * file_priv.
+	 */
+	spin_lock(&file_priv->mm.lock);
+	while (!list_empty(&file_priv->mm.request_list)) {
+		struct drm_i915_gem_request *request;
+
+		request = list_first_entry(&file_priv->mm.request_list,
+					   struct drm_i915_gem_request,
+					   client_list);
+		list_del(&request->client_list);
+		request->file_priv = NULL;
+	}
+	spin_unlock(&file_priv->mm.lock);
+}
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+	if (!mutex_is_locked(mutex))
+		return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+	return mutex->owner == task;
+#else
+	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+	return false;
+#endif
+}
+
+static int
+i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(shrinker,
+			     struct drm_i915_private,
+			     mm.inactive_shrinker);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_i915_gem_object *obj;
+	int nr_to_scan = sc->nr_to_scan;
+	bool unlock = true;
+	int cnt;
+
+	if (!mutex_trylock(&dev->struct_mutex)) {
+		if (!mutex_is_locked_by(&dev->struct_mutex, current))
+			return 0;
+
+		if (dev_priv->mm.shrinker_no_lock_stealing)
+			return 0;
+
+		unlock = false;
+	}
+
+	if (nr_to_scan) {
+		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+		if (nr_to_scan > 0)
+			nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+							false);
+		if (nr_to_scan > 0)
+			i915_gem_shrink_all(dev_priv);
+	}
+
+	cnt = 0;
+	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
+		if (obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+			cnt += obj->base.size >> PAGE_SHIFT;
+
+	if (unlock)
+		mutex_unlock(&dev->struct_mutex);
+	return cnt;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_context.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 0000000..b10b1b1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,519 @@
+/*
+ * Copyright © 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ *                                         refcount     pincount     active
+ * S0: initial state                          0            0           0
+ * S1: context created                        1            0           0
+ * S2: context is currently running           2            1           X
+ * S3: GPU referenced, but not current        2            0           1
+ * S4: context is current, but destroyed      1            1           0
+ * S5: like S3, but destroyed                 1            0           1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ *  The "current context" means the context which is currently running on the
+ *  GPU. The GPU has loaded it's state already and has stored away the gtt
+ *  offset of the BO. The GPU is not actively referencing the data at this
+ *  offset, but it will on the next context switch. The only way to avoid this
+ *  is to do a GPU reset.
+ *
+ *  An "active context' is one which was previously the "current context" and is
+ *  on the active list waiting for the next context switch to occur. Until this
+ *  happens, the object must remain at the same gtt offset. It is therefore
+ *  possible to destroy a context, but it is still active.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define CONTEXT_ALIGN (64<<10)
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+static int do_switch(struct i915_hw_context *to);
+
+static int get_context_size(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+	u32 reg;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		reg = I915_READ(CXT_SIZE);
+		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+		break;
+	case 7:
+		reg = I915_READ(GEN7_CXT_SIZE);
+		if (IS_HASWELL(dev))
+			ret = HSW_CXT_TOTAL_SIZE;
+		else
+			ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+		break;
+	default:
+		BUG();
+	}
+
+	return ret;
+}
+
+static void do_destroy(struct i915_hw_context *ctx)
+{
+	if (ctx->file_priv)
+		idr_remove(&ctx->file_priv->context_idr, ctx->id);
+
+	drm_gem_object_unreference(&ctx->obj->base);
+	kfree(ctx);
+}
+
+static struct i915_hw_context *
+create_hw_context(struct drm_device *dev,
+		  struct drm_i915_file_private *file_priv)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_context *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+	if (ctx->obj == NULL) {
+		kfree(ctx);
+		DRM_DEBUG_DRIVER("Context object allocated failed\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 7) {
+		ret = i915_gem_object_set_cache_level(ctx->obj,
+						      I915_CACHE_LLC_MLC);
+		if (ret)
+			goto err_out;
+	}
+
+	/* The ring associated with the context object is handled by the normal
+	 * object tracking code. We give an initial ring value simple to pass an
+	 * assertion in the context switch code.
+	 */
+	ctx->ring = &dev_priv->ring[RCS];
+
+	/* Default context will never have a file_priv */
+	if (file_priv == NULL)
+		return ctx;
+
+	ctx->file_priv = file_priv;
+
+	ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
+			GFP_KERNEL);
+	if (ret < 0)
+		goto err_out;
+	ctx->id = ret;
+
+	return ctx;
+
+err_out:
+	do_destroy(ctx);
+	return ERR_PTR(ret);
+}
+
+static inline bool is_default_context(struct i915_hw_context *ctx)
+{
+	return (ctx == ctx->ring->default_context);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static int create_default_context(struct drm_i915_private *dev_priv)
+{
+	struct i915_hw_context *ctx;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+	ctx = create_hw_context(dev_priv->dev, NULL);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	/* We may need to do things with the shrinker which require us to
+	 * immediately switch back to the default context. This can cause a
+	 * problem as pinning the default context also requires GTT space which
+	 * may not be available. To avoid this we always pin the
+	 * default context.
+	 */
+	dev_priv->ring[RCS].default_context = ctx;
+	ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+	if (ret)
+		goto err_destroy;
+
+	ret = do_switch(ctx);
+	if (ret)
+		goto err_unpin;
+
+	DRM_DEBUG_DRIVER("Default HW context loaded\n");
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(ctx->obj);
+err_destroy:
+	do_destroy(ctx);
+	return ret;
+}
+
+void i915_gem_context_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_HW_CONTEXTS(dev)) {
+		dev_priv->hw_contexts_disabled = true;
+		return;
+	}
+
+	/* If called from reset, or thaw... we've been here already */
+	if (dev_priv->hw_contexts_disabled ||
+	    dev_priv->ring[RCS].default_context)
+		return;
+
+	dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+
+	if (dev_priv->hw_context_size > (1<<20)) {
+		dev_priv->hw_contexts_disabled = true;
+		return;
+	}
+
+	if (create_default_context(dev_priv)) {
+		dev_priv->hw_contexts_disabled = true;
+		return;
+	}
+
+	DRM_DEBUG_DRIVER("HW context support initialized\n");
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->hw_contexts_disabled)
+		return;
+
+	/* The only known way to stop the gpu from accessing the hw context is
+	 * to reset it. Do this as the very last operation to avoid confusing
+	 * other code, leading to spurious errors. */
+	intel_gpu_reset(dev);
+
+	i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
+
+	do_destroy(dev_priv->ring[RCS].default_context);
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+	struct i915_hw_context *ctx = p;
+
+	BUG_ON(id == DEFAULT_CONTEXT_ID);
+
+	do_destroy(ctx);
+
+	return 0;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+
+	idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+	idr_destroy(&file_priv->context_idr);
+}
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+	return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+}
+
+static inline int
+mi_set_context(struct intel_ring_buffer *ring,
+	       struct i915_hw_context *new_context,
+	       u32 hw_flags)
+{
+	int ret;
+
+	/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+	 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+	 * explicitly, so we rely on the value at ring init, stored in
+	 * itlb_before_ctx_switch.
+	 */
+	if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
+		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	if (IS_GEN7(ring->dev))
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+	else
+		intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring, new_context->obj->gtt_offset |
+			MI_MM_SPACE_GTT |
+			MI_SAVE_EXT_STATE_EN |
+			MI_RESTORE_EXT_STATE_EN |
+			hw_flags);
+	/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+	intel_ring_emit(ring, MI_NOOP);
+
+	if (IS_GEN7(ring->dev))
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+	else
+		intel_ring_emit(ring, MI_NOOP);
+
+	intel_ring_advance(ring);
+
+	return ret;
+}
+
+static int do_switch(struct i915_hw_context *to)
+{
+	struct intel_ring_buffer *ring = to->ring;
+	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
+	u32 hw_flags = 0;
+	int ret;
+
+	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
+
+	if (from_obj == to->obj)
+		return 0;
+
+	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+	if (ret)
+		return ret;
+
+	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
+	 * that thanks to write = false in this call and us not setting any gpu
+	 * write domains when putting a context object onto the active list
+	 * (when switching away from it), this won't block.
+	 * XXX: We need a real interface to do this instead of trickery. */
+	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+	if (ret) {
+		i915_gem_object_unpin(to->obj);
+		return ret;
+	}
+
+	if (!to->obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+
+	if (!to->is_initialized || is_default_context(to))
+		hw_flags |= MI_RESTORE_INHIBIT;
+	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
+		hw_flags |= MI_FORCE_RESTORE;
+
+	ret = mi_set_context(ring, to, hw_flags);
+	if (ret) {
+		i915_gem_object_unpin(to->obj);
+		return ret;
+	}
+
+	/* The backing object for the context is done after switching to the
+	 * *next* context. Therefore we cannot retire the previous context until
+	 * the next context has already started running. In fact, the below code
+	 * is a bit suboptimal because the retiring can occur simply after the
+	 * MI_SET_CONTEXT instead of when the next seqno has completed.
+	 */
+	if (from_obj != NULL) {
+		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_gem_object_move_to_active(from_obj, ring);
+		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+		 * whole damn pipeline, we don't need to explicitly mark the
+		 * object dirty. The only exception is that the context must be
+		 * correct in case the object gets swapped out. Ideally we'd be
+		 * able to defer doing this until we know the object would be
+		 * swapped, but there is no way to do that yet.
+		 */
+		from_obj->dirty = 1;
+		BUG_ON(from_obj->ring != ring);
+		i915_gem_object_unpin(from_obj);
+
+		drm_gem_object_unreference(&from_obj->base);
+	}
+
+	drm_gem_object_reference(&to->obj->base);
+	ring->last_context_obj = to->obj;
+	to->is_initialized = true;
+
+	return 0;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @ring: ring for which we'll execute the context switch
+ * @file_priv: file_priv associated with the context, may be NULL
+ * @id: context id number
+ * @seqno: sequence number by which the new context will be switched to
+ * @flags:
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ */
+int i915_switch_context(struct intel_ring_buffer *ring,
+			struct drm_file *file,
+			int to_id)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct i915_hw_context *to;
+
+	if (dev_priv->hw_contexts_disabled)
+		return 0;
+
+	if (ring != &dev_priv->ring[RCS])
+		return 0;
+
+	if (to_id == DEFAULT_CONTEXT_ID) {
+		to = ring->default_context;
+	} else {
+		if (file == NULL)
+			return -EINVAL;
+
+		to = i915_gem_context_get(file->driver_priv, to_id);
+		if (to == NULL)
+			return -ENOENT;
+	}
+
+	return do_switch(to);
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_context_create *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct i915_hw_context *ctx;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	if (dev_priv->hw_contexts_disabled)
+		return -ENODEV;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = create_hw_context(dev, file_priv);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR(ctx))
+		return PTR_ERR(ctx);
+
+	args->ctx_id = ctx->id;
+	DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+	return 0;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+				   struct drm_file *file)
+{
+	struct drm_i915_gem_context_destroy *args = data;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct i915_hw_context *ctx;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ctx = i915_gem_context_get(file_priv, args->ctx_id);
+	if (!ctx) {
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOENT;
+	}
+
+	do_destroy(ctx);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_debug.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_debug.c
new file mode 100644
index 0000000..582e6a5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
+{
+	static int warned;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	if (warned)
+		return 0;
+
+	list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed render active %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+			DRM_ERROR("invalid render active %p (a %d r %x)\n",
+				  obj,
+				  obj->active,
+				  obj->base.read_domains);
+			err++;
+		} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+			DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+				  obj,
+				  obj->base.write_domain,
+				  !list_empty(&obj->gpu_write_list));
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed flushing %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+			   list_empty(&obj->gpu_write_list)) {
+			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
+				  obj,
+				  obj->active,
+				  obj->base.write_domain,
+				  !list_empty(&obj->gpu_write_list));
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed gpu write %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+			DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+				  obj,
+				  obj->active,
+				  obj->base.write_domain);
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed inactive %p\n", obj);
+			err++;
+			break;
+		} else if (obj->pin_count || obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+			DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+				  obj,
+				  obj->pin_count, obj->active,
+				  obj->base.write_domain);
+			err++;
+		}
+	}
+
+	return warned = err;
+}
+#endif /* WATCH_INACTIVE */
+
+#if WATCH_COHERENCY
+void
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
+{
+	struct drm_device *dev = obj->base.dev;
+	int page;
+	uint32_t *gtt_mapping;
+	uint32_t *backing_map = NULL;
+	int bad_count = 0;
+
+	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
+		 __func__, obj, obj->gtt_offset, handle,
+		 obj->size / 1024);
+
+	gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+			      obj->base.size);
+	if (gtt_mapping == NULL) {
+		DRM_ERROR("failed to map GTT space\n");
+		return;
+	}
+
+	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+		int i;
+
+		backing_map = kmap_atomic(obj->pages[page]);
+
+		if (backing_map == NULL) {
+			DRM_ERROR("failed to map backing page\n");
+			goto out;
+		}
+
+		for (i = 0; i < PAGE_SIZE / 4; i++) {
+			uint32_t cpuval = backing_map[i];
+			uint32_t gttval = readl(gtt_mapping +
+						page * 1024 + i);
+
+			if (cpuval != gttval) {
+				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+					 "0x%08x vs 0x%08x\n",
+					 (int)(obj->gtt_offset +
+					       page * PAGE_SIZE + i * 4),
+					 cpuval, gttval);
+				if (bad_count++ >= 8) {
+					DRM_INFO("...\n");
+					goto out;
+				}
+			}
+		}
+		kunmap_atomic(backing_map);
+		backing_map = NULL;
+	}
+
+ out:
+	if (backing_map != NULL)
+		kunmap_atomic(backing_map);
+	iounmap(gtt_mapping);
+
+	/* give syslog time to catch up */
+	msleep(1);
+
+	/* Directly flush the object, since we just loaded values with the CPU
+	 * from the backing pages and we don't want to disturb the cache
+	 * management that we're trying to observe.
+	 */
+
+	i915_gem_clflush_object(obj);
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 0000000..dc53a52
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <linux/dma-buf.h>
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+					     enum dma_data_direction dir)
+{
+	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+	struct sg_table *st;
+	struct scatterlist *src, *dst;
+	int ret, i;
+
+	ret = i915_mutex_lock_interruptible(obj->base.dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret) {
+		st = ERR_PTR(ret);
+		goto out;
+	}
+
+	/* Copy sg so that we make an independent mapping */
+	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (st == NULL) {
+		st = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(st);
+		st = ERR_PTR(ret);
+		goto out;
+	}
+
+	src = obj->pages->sgl;
+	dst = st->sgl;
+	for (i = 0; i < obj->pages->nents; i++) {
+		sg_set_page(dst, sg_page(src), src->length, 0);
+		dst = sg_next(dst);
+		src = sg_next(src);
+	}
+
+	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+		sg_free_table(st);
+		kfree(st);
+		st = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	i915_gem_object_pin_pages(obj);
+
+out:
+	mutex_unlock(&obj->base.dev->struct_mutex);
+	return st;
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+				   struct sg_table *sg,
+				   enum dma_data_direction dir)
+{
+	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+
+	if (obj->base.export_dma_buf == dma_buf) {
+		/* drop the reference on the export fd holds */
+		obj->base.export_dma_buf = NULL;
+		drm_gem_object_unreference_unlocked(&obj->base);
+	}
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	struct sg_page_iter sg_iter;
+	struct page **pages;
+	int ret, i;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (obj->dma_buf_vmapping) {
+		obj->vmapping_count++;
+		goto out_unlock;
+	}
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret)
+		goto error;
+
+	ret = -ENOMEM;
+
+	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+	if (pages == NULL)
+		goto error;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+		pages[i++] = sg_page_iter_page(&sg_iter);
+
+	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
+	drm_free_large(pages);
+
+	if (!obj->dma_buf_vmapping)
+		goto error;
+
+	obj->vmapping_count = 1;
+	i915_gem_object_pin_pages(obj);
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return obj->dma_buf_vmapping;
+
+error:
+	mutex_unlock(&dev->struct_mutex);
+	return ERR_PTR(ret);
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return;
+
+	if (--obj->vmapping_count == 0) {
+		vunmap(obj->dma_buf_vmapping);
+		obj->dma_buf_vmapping = NULL;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+	return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+	return -EINVAL;
+}
+
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+	struct drm_i915_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->base.dev;
+	int ret;
+	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, write);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static const struct dma_buf_ops i915_dmabuf_ops =  {
+	.map_dma_buf = i915_gem_map_dma_buf,
+	.unmap_dma_buf = i915_gem_unmap_dma_buf,
+	.release = i915_gem_dmabuf_release,
+	.kmap = i915_gem_dmabuf_kmap,
+	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+	.kunmap = i915_gem_dmabuf_kunmap,
+	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+	.mmap = i915_gem_dmabuf_mmap,
+	.vmap = i915_gem_dmabuf_vmap,
+	.vunmap = i915_gem_dmabuf_vunmap,
+	.begin_cpu_access = i915_gem_begin_cpu_access,
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+				      struct drm_gem_object *gem_obj, int flags)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+	return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+}
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *sg;
+
+	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg))
+		return PTR_ERR(sg);
+
+	obj->pages = sg;
+	obj->has_dma_mapping = true;
+	return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+	dma_buf_unmap_attachment(obj->base.import_attach,
+				 obj->pages, DMA_BIDIRECTIONAL);
+	obj->has_dma_mapping = false;
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+	.get_pages = i915_gem_object_get_pages_dmabuf,
+	.put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+					     struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	/* is this one of own objects? */
+	if (dma_buf->ops == &i915_dmabuf_ops) {
+		obj = dma_buf->priv;
+		/* is it from our device? */
+		if (obj->base.dev == dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(&obj->base);
+			return &obj->base;
+		}
+	}
+
+	/* need to attach */
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	get_dma_buf(dma_buf);
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL) {
+		ret = -ENOMEM;
+		goto fail_detach;
+	}
+
+	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
+	if (ret) {
+		i915_gem_object_free(obj);
+		goto fail_detach;
+	}
+
+	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+	obj->base.import_attach = attach;
+
+	return &obj->base;
+
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_evict.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 0000000..c86d5d9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uuk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_trace.h"
+
+static bool
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+{
+	if (obj->pin_count)
+		return false;
+
+	list_add(&obj->exec_list, unwind);
+	return drm_mm_scan_add_block(obj->gtt_space);
+}
+
+int
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+			 unsigned alignment, unsigned cache_level,
+			 bool mappable, bool nonblocking)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct list_head eviction_list, unwind_list;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	trace_i915_gem_evict(dev, min_size, alignment, mappable);
+
+	/*
+	 * The goal is to evict objects and amalgamate space in LRU order.
+	 * The oldest idle objects reside on the inactive list, which is in
+	 * retirement order. The next objects to retire are those on the (per
+	 * ring) active list that do not have an outstanding flush. Once the
+	 * hardware reports completion (the seqno is updated after the
+	 * batchbuffer has been finished) the clean buffer objects would
+	 * be retired to the inactive list. Any dirty objects would be added
+	 * to the tail of the flushing list. So after processing the clean
+	 * active objects we need to emit a MI_FLUSH to retire the flushing
+	 * list, hence the retirement order of the flushing list is in
+	 * advance of the dirty objects on the active lists.
+	 *
+	 * The retirement sequence is thus:
+	 *   1. Inactive objects (already retired)
+	 *   2. Clean active objects
+	 *   3. Flushing list
+	 *   4. Dirty active objects.
+	 *
+	 * On each list, the oldest objects lie at the HEAD with the freshest
+	 * object on the TAIL.
+	 */
+
+	INIT_LIST_HEAD(&unwind_list);
+	if (mappable)
+		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
+					    min_size, alignment, cache_level,
+					    0, dev_priv->gtt.mappable_end);
+	else
+		drm_mm_init_scan(&dev_priv->mm.gtt_space,
+				 min_size, alignment, cache_level);
+
+	/* First see if there is a large enough contiguous idle region... */
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+		if (mark_free(obj, &unwind_list))
+			goto found;
+	}
+
+	if (nonblocking)
+		goto none;
+
+	/* Now merge in the soon-to-be-expired objects... */
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (mark_free(obj, &unwind_list))
+			goto found;
+	}
+
+none:
+	/* Nothing found, clean up and bail out! */
+	while (!list_empty(&unwind_list)) {
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+
+		ret = drm_mm_scan_remove_block(obj->gtt_space);
+		BUG_ON(ret);
+
+		list_del_init(&obj->exec_list);
+	}
+
+	/* We expect the caller to unpin, evict all and try again, or give up.
+	 * So calling i915_gem_evict_everything() is unnecessary.
+	 */
+	return -ENOSPC;
+
+found:
+	/* drm_mm doesn't allow any other other operations while
+	 * scanning, therefore store to be evicted objects on a
+	 * temporary list. */
+	INIT_LIST_HEAD(&eviction_list);
+	while (!list_empty(&unwind_list)) {
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		if (drm_mm_scan_remove_block(obj->gtt_space)) {
+			list_move(&obj->exec_list, &eviction_list);
+			drm_gem_object_reference(&obj->base);
+			continue;
+		}
+		list_del_init(&obj->exec_list);
+	}
+
+	/* Unbinding will emit any required flushes */
+	while (!list_empty(&eviction_list)) {
+		obj = list_first_entry(&eviction_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		if (ret == 0)
+			ret = i915_gem_object_unbind(obj);
+
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	return ret;
+}
+
+int
+i915_gem_evict_everything(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
+	bool lists_empty;
+	int ret;
+
+	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+		       list_empty(&dev_priv->mm.active_list));
+	if (lists_empty)
+		return -ENOSPC;
+
+	trace_i915_gem_evict_everything(dev);
+
+	/* The gpu_idle will flush everything in the write domain to the
+	 * active list. Then we must move everything off the active list
+	 * with retire requests.
+	 */
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(dev);
+
+	/* Having flushed everything, unbind() should never raise an error */
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list, mm_list)
+		if (obj->pin_count == 0)
+			WARN_ON(i915_gem_object_unbind(obj));
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..6416d0d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1234 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+#include <linux/dma_remapping.h>
+
+struct eb_objects {
+	struct list_head objects;
+	int and;
+	union {
+		struct drm_i915_gem_object *lut[0];
+		struct hlist_head buckets[0];
+	};
+};
+
+static struct eb_objects *
+eb_create(struct drm_i915_gem_execbuffer2 *args)
+{
+	struct eb_objects *eb = NULL;
+
+	if (args->flags & I915_EXEC_HANDLE_LUT) {
+		int size = args->buffer_count;
+		size *= sizeof(struct drm_i915_gem_object *);
+		size += sizeof(struct eb_objects);
+		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	}
+
+	if (eb == NULL) {
+		int size = args->buffer_count;
+		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
+		while (count > 2*size)
+			count >>= 1;
+		eb = kzalloc(count*sizeof(struct hlist_head) +
+			     sizeof(struct eb_objects),
+			     GFP_TEMPORARY);
+		if (eb == NULL)
+			return eb;
+
+		eb->and = count - 1;
+	} else
+		eb->and = -args->buffer_count;
+
+	INIT_LIST_HEAD(&eb->objects);
+	return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+	if (eb->and >= 0)
+		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static int
+eb_lookup_objects(struct eb_objects *eb,
+		  struct drm_i915_gem_exec_object2 *exec,
+		  const struct drm_i915_gem_execbuffer2 *args,
+		  struct drm_file *file)
+{
+	int i;
+
+	spin_lock(&file->table_lock);
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
+		if (obj == NULL) {
+			spin_unlock(&file->table_lock);
+			DRM_DEBUG("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			return -ENOENT;
+		}
+
+		if (!list_empty(&obj->exec_list)) {
+			spin_unlock(&file->table_lock);
+			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+				   obj, exec[i].handle, i);
+			return -EINVAL;
+		}
+
+		drm_gem_object_reference(&obj->base);
+		list_add_tail(&obj->exec_list, &eb->objects);
+
+		obj->exec_entry = &exec[i];
+		if (eb->and < 0) {
+			eb->lut[i] = obj;
+		} else {
+			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
+			obj->exec_handle = handle;
+			hlist_add_head(&obj->exec_node,
+				       &eb->buckets[handle & eb->and]);
+		}
+	}
+	spin_unlock(&file->table_lock);
+
+	return 0;
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+	if (eb->and < 0) {
+		if (handle >= -eb->and)
+			return NULL;
+		return eb->lut[handle];
+	} else {
+		struct hlist_head *head;
+		struct hlist_node *node;
+
+		head = &eb->buckets[handle & eb->and];
+		hlist_for_each(node, head) {
+			struct drm_i915_gem_object *obj;
+
+			obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+			if (obj->exec_handle == handle)
+				return obj;
+		}
+		return NULL;
+	}
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+	while (!list_empty(&eb->objects)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&eb->objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+	kfree(eb);
+}
+
+static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
+{
+	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+		!obj->map_and_fenceable ||
+		obj->cache_level != I915_CACHE_NONE);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+				   struct eb_objects *eb,
+				   struct drm_i915_gem_relocation_entry *reloc)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_gem_object *target_obj;
+	struct drm_i915_gem_object *target_i915_obj;
+	uint32_t target_offset;
+	int ret = -EINVAL;
+
+	/* we've already hold a reference to all valid objects */
+	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+	if (unlikely(target_obj == NULL))
+		return -ENOENT;
+
+	target_i915_obj = to_intel_bo(target_obj);
+	target_offset = target_i915_obj->gtt_offset;
+
+	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+	 * pipe_control writes because the gpu doesn't properly redirect them
+	 * through the ppgtt for non_secure batchbuffers. */
+	if (unlikely(IS_GEN6(dev) &&
+	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+	    !target_i915_obj->has_global_gtt_mapping)) {
+		i915_gem_gtt_bind_object(target_i915_obj,
+					 target_i915_obj->cache_level);
+	}
+
+	/* Validate that the target is in a valid r/w GPU domain */
+	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+		DRM_DEBUG("reloc with multiple write domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+	if (unlikely((reloc->write_domain | reloc->read_domains)
+		     & ~I915_GEM_GPU_DOMAINS)) {
+		DRM_DEBUG("reloc with read/write non-GPU domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+
+	target_obj->pending_read_domains |= reloc->read_domains;
+	target_obj->pending_write_domain |= reloc->write_domain;
+
+	/* If the relocation already has the right value in it, no
+	 * more work needs to be done.
+	 */
+	if (target_offset == reloc->presumed_offset)
+		return 0;
+
+	/* Check that the relocation address is valid... */
+	if (unlikely(reloc->offset > obj->base.size - 4)) {
+		DRM_DEBUG("Relocation beyond object bounds: "
+			  "obj %p target %d offset %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  (int) obj->base.size);
+		return ret;
+	}
+	if (unlikely(reloc->offset & 3)) {
+		DRM_DEBUG("Relocation not 4-byte aligned: "
+			  "obj %p target %d offset %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset);
+		return ret;
+	}
+
+	/* We can't wait for rendering with pagefaults disabled */
+	if (obj->active && in_atomic())
+		return -EFAULT;
+
+	reloc->delta += target_offset;
+	if (use_cpu_reloc(obj)) {
+		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+		char *vaddr;
+
+		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+		if (ret)
+			return ret;
+
+		vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+							     reloc->offset >> PAGE_SHIFT));
+		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+		kunmap_atomic(vaddr);
+	} else {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		uint32_t __iomem *reloc_entry;
+		void __iomem *reloc_page;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (ret)
+			return ret;
+
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			return ret;
+
+		/* Map the page containing the relocation we're going to perform.  */
+		reloc->offset += obj->gtt_offset;
+		reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+						      reloc->offset & PAGE_MASK);
+		reloc_entry = (uint32_t __iomem *)
+			(reloc_page + (reloc->offset & ~PAGE_MASK));
+		iowrite32(reloc->delta, reloc_entry);
+		io_mapping_unmap_atomic(reloc_page);
+	}
+
+	/* and update the user's relocation entry */
+	reloc->presumed_offset = target_offset;
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+				    struct eb_objects *eb)
+{
+#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
+	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
+	struct drm_i915_gem_relocation_entry __user *user_relocs;
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int remain, ret;
+
+	user_relocs = to_user_ptr(entry->relocs_ptr);
+
+	remain = entry->relocation_count;
+	while (remain) {
+		struct drm_i915_gem_relocation_entry *r = stack_reloc;
+		int count = remain;
+		if (count > ARRAY_SIZE(stack_reloc))
+			count = ARRAY_SIZE(stack_reloc);
+		remain -= count;
+
+		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
+			return -EFAULT;
+
+		do {
+			u64 offset = r->presumed_offset;
+
+			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+			if (ret)
+				return ret;
+
+			if (r->presumed_offset != offset &&
+			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
+						    &r->presumed_offset,
+						    sizeof(r->presumed_offset))) {
+				return -EFAULT;
+			}
+
+			user_relocs++;
+			r++;
+		} while (--count);
+	}
+
+	return 0;
+#undef N_RELOC
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+					 struct eb_objects *eb,
+					 struct drm_i915_gem_relocation_entry *relocs)
+{
+	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int i, ret;
+
+	for (i = 0; i < entry->relocation_count; i++) {
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct eb_objects *eb)
+{
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	/* This is the fast path and we cannot handle a pagefault whilst
+	 * holding the struct mutex lest the user pass in the relocations
+	 * contained within a mmaped bo. For in such a case we, the page
+	 * fault handler would call i915_gem_fault() and we would try to
+	 * acquire the struct mutex again. Obviously this is bad and so
+	 * lockdep complains vehemently.
+	 */
+	pagefault_disable();
+	list_for_each_entry(obj, &eb->objects, exec_list) {
+		ret = i915_gem_execbuffer_relocate_object(obj, eb);
+		if (ret)
+			break;
+	}
+	pagefault_enable();
+
+	return ret;
+}
+
+#define  __EXEC_OBJECT_HAS_PIN (1<<31)
+#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+
+static int
+need_reloc_mappable(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	return entry->relocation_count && !use_cpu_reloc(obj);
+}
+
+static int
+i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
+				   struct intel_ring_buffer *ring,
+				   bool *need_reloc)
+{
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	bool need_fence, need_mappable;
+	int ret;
+
+	need_fence =
+		has_fenced_gpu_access &&
+		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+		obj->tiling_mode != I915_TILING_NONE;
+	need_mappable = need_fence || need_reloc_mappable(obj);
+
+	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+	if (ret)
+		return ret;
+
+	entry->flags |= __EXEC_OBJECT_HAS_PIN;
+
+	if (has_fenced_gpu_access) {
+		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+			ret = i915_gem_object_get_fence(obj);
+			if (ret)
+				return ret;
+
+			if (i915_gem_object_pin_fence(obj))
+				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+
+			obj->pending_fenced_gpu_access = true;
+		}
+	}
+
+	/* Ensure ppgtt mapping exists if needed */
+	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+				       obj, obj->cache_level);
+
+		obj->has_aliasing_ppgtt_mapping = 1;
+	}
+
+	if (entry->offset != obj->gtt_offset) {
+		entry->offset = obj->gtt_offset;
+		*need_reloc = true;
+	}
+
+	if (entry->flags & EXEC_OBJECT_WRITE) {
+		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
+		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
+	}
+
+	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+	    !obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
+	return 0;
+}
+
+static void
+i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_gem_exec_object2 *entry;
+
+	if (!obj->gtt_space)
+		return;
+
+	entry = obj->exec_entry;
+
+	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
+		i915_gem_object_unpin_fence(obj);
+
+	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
+		i915_gem_object_unpin(obj);
+
+	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+			    struct list_head *objects,
+			    bool *need_relocs)
+{
+	struct drm_i915_gem_object *obj;
+	struct list_head ordered_objects;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	int retry;
+
+	INIT_LIST_HEAD(&ordered_objects);
+	while (!list_empty(objects)) {
+		struct drm_i915_gem_exec_object2 *entry;
+		bool need_fence, need_mappable;
+
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		entry = obj->exec_entry;
+
+		need_fence =
+			has_fenced_gpu_access &&
+			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+			obj->tiling_mode != I915_TILING_NONE;
+		need_mappable = need_fence || need_reloc_mappable(obj);
+
+		if (need_mappable)
+			list_move(&obj->exec_list, &ordered_objects);
+		else
+			list_move_tail(&obj->exec_list, &ordered_objects);
+
+		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
+		obj->base.pending_write_domain = 0;
+		obj->pending_fenced_gpu_access = false;
+	}
+	list_splice(&ordered_objects, objects);
+
+	/* Attempt to pin all of the buffers into the GTT.
+	 * This is done in 3 phases:
+	 *
+	 * 1a. Unbind all objects that do not match the GTT constraints for
+	 *     the execbuffer (fenceable, mappable, alignment etc).
+	 * 1b. Increment pin count for already bound objects.
+	 * 2.  Bind new objects.
+	 * 3.  Decrement pin count.
+	 *
+	 * This avoid unnecessary unbinding of later objects in order to make
+	 * room for the earlier objects *unless* we need to defragment.
+	 */
+	retry = 0;
+	do {
+		int ret = 0;
+
+		/* Unbind any ill-fitting objects or pin. */
+		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+			bool need_fence, need_mappable;
+
+			if (!obj->gtt_space)
+				continue;
+
+			need_fence =
+				has_fenced_gpu_access &&
+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+				obj->tiling_mode != I915_TILING_NONE;
+			need_mappable = need_fence || need_reloc_mappable(obj);
+
+			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+			    (need_mappable && !obj->map_and_fenceable))
+				ret = i915_gem_object_unbind(obj);
+			else
+				ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+			if (ret)
+				goto err;
+		}
+
+		/* Bind fresh objects */
+		list_for_each_entry(obj, objects, exec_list) {
+			if (obj->gtt_space)
+				continue;
+
+			ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+			if (ret)
+				goto err;
+		}
+
+err:		/* Decrement pin count for bound objects */
+		list_for_each_entry(obj, objects, exec_list)
+			i915_gem_execbuffer_unreserve_object(obj);
+
+		if (ret != -ENOSPC || retry++)
+			return ret;
+
+		ret = i915_gem_evict_everything(ring->dev);
+		if (ret)
+			return ret;
+	} while (1);
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+				  struct drm_i915_gem_execbuffer2 *args,
+				  struct drm_file *file,
+				  struct intel_ring_buffer *ring,
+				  struct eb_objects *eb,
+				  struct drm_i915_gem_exec_object2 *exec)
+{
+	struct drm_i915_gem_relocation_entry *reloc;
+	struct drm_i915_gem_object *obj;
+	bool need_relocs;
+	int *reloc_offset;
+	int i, total, ret;
+	int count = args->buffer_count;
+
+	/* We may process another execbuffer during the unlock... */
+	while (!list_empty(&eb->objects)) {
+		obj = list_first_entry(&eb->objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	total = 0;
+	for (i = 0; i < count; i++)
+		total += exec[i].relocation_count;
+
+	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+	reloc = drm_malloc_ab(total, sizeof(*reloc));
+	if (reloc == NULL || reloc_offset == NULL) {
+		drm_free_large(reloc);
+		drm_free_large(reloc_offset);
+		mutex_lock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	total = 0;
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+		u64 invalid_offset = (u64)-1;
+		int j;
+
+		user_relocs = to_user_ptr(exec[i].relocs_ptr);
+
+		if (copy_from_user(reloc+total, user_relocs,
+				   exec[i].relocation_count * sizeof(*reloc))) {
+			ret = -EFAULT;
+			mutex_lock(&dev->struct_mutex);
+			goto err;
+		}
+
+		/* As we do not update the known relocation offsets after
+		 * relocating (due to the complexities in lock handling),
+		 * we need to mark them as invalid now so that we force the
+		 * relocation processing next time. Just in case the target
+		 * object is evicted and then rebound into its old
+		 * presumed_offset before the next execbuffer - if that
+		 * happened we would make the mistake of assuming that the
+		 * relocations were valid.
+		 */
+		for (j = 0; j < exec[i].relocation_count; j++) {
+			if (__copy_to_user(&user_relocs[j].presumed_offset,
+					   &invalid_offset,
+					   sizeof(invalid_offset))) {
+				ret = -EFAULT;
+				mutex_lock(&dev->struct_mutex);
+				goto err;
+			}
+		}
+
+		reloc_offset[i] = total;
+		total += exec[i].relocation_count;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret) {
+		mutex_lock(&dev->struct_mutex);
+		goto err;
+	}
+
+	/* reacquire the objects */
+	eb_reset(eb);
+	ret = eb_lookup_objects(eb, exec, args, file);
+	if (ret)
+		goto err;
+
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+	if (ret)
+		goto err;
+
+	list_for_each_entry(obj, &eb->objects, exec_list) {
+		int offset = obj->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+							       reloc + reloc_offset[offset]);
+		if (ret)
+			goto err;
+	}
+
+	/* Leave the user relocations as are, this is the painfully slow path,
+	 * and we want to avoid the complication of dropping the lock whilst
+	 * having buffers reserved in the aperture and so causing spurious
+	 * ENOSPC for random operations.
+	 */
+
+err:
+	drm_free_large(reloc);
+	drm_free_large(reloc_offset);
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+				struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	uint32_t flush_domains = 0;
+	int ret;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		ret = i915_gem_object_sync(obj, ring);
+		if (ret)
+			return ret;
+
+		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
+			i915_gem_clflush_object(obj);
+
+		flush_domains |= obj->base.write_domain;
+	}
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		i915_gem_chipset_flush(ring->dev);
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
+	/* Unconditionally invalidate gpu caches and ensure that we do flush
+	 * any residual writes from the previous batch.
+	 */
+	return intel_ring_invalidate_all_caches(ring);
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
+		return false;
+
+	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+		   int count)
+{
+	int i;
+	int relocs_total = 0;
+	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+	for (i = 0; i < count; i++) {
+		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+		int length; /* limited by fault_in_pages_readable() */
+
+		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+			return -EINVAL;
+
+		/* First check for malicious input causing overflow in
+		 * the worst case where we need to allocate the entire
+		 * relocation tree as a single array.
+		 */
+		if (exec[i].relocation_count > relocs_max - relocs_total)
+			return -EINVAL;
+		relocs_total += exec[i].relocation_count;
+
+		length = exec[i].relocation_count *
+			sizeof(struct drm_i915_gem_relocation_entry);
+		/*
+		 * We must check that the entire relocation array is safe
+		 * to read, but since we may need to update the presumed
+		 * offsets during execution, check for full write access.
+		 */
+		if (!access_ok(VERIFY_WRITE, ptr, length))
+			return -EFAULT;
+
+		if (fault_in_multipages_readable(ptr, length))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+				   struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		u32 old_read = obj->base.read_domains;
+		u32 old_write = obj->base.write_domain;
+
+		obj->base.write_domain = obj->base.pending_write_domain;
+		if (obj->base.write_domain == 0)
+			obj->base.pending_read_domains |= obj->base.read_domains;
+		obj->base.read_domains = obj->base.pending_read_domains;
+		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+		i915_gem_object_move_to_active(obj, ring);
+		if (obj->base.write_domain) {
+			obj->dirty = 1;
+			obj->last_write_seqno = intel_ring_get_seqno(ring);
+			if (obj->pin_count) /* check for potential scanout */
+				intel_mark_fb_busy(obj);
+		}
+
+		trace_i915_gem_object_change_domain(obj, old_read, old_write);
+	}
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+				    struct drm_file *file,
+				    struct intel_ring_buffer *ring)
+{
+	/* Unconditionally force add_request to emit a full flush. */
+	ring->gpu_caches_dirty = true;
+
+	/* Add a breadcrumb for the completion of the batch buffer */
+	(void)i915_add_request(ring, file, NULL);
+}
+
+static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+			    struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret, i;
+
+	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+		return 0;
+
+	ret = intel_ring_begin(ring, 4 * 3);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 4; i++) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(ring, 0);
+	}
+
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+		       struct drm_file *file,
+		       struct drm_i915_gem_execbuffer2 *args,
+		       struct drm_i915_gem_exec_object2 *exec)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct eb_objects *eb;
+	struct drm_i915_gem_object *batch_obj;
+	struct drm_clip_rect *cliprects = NULL;
+	struct intel_ring_buffer *ring;
+	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
+	u32 exec_start, exec_len;
+	u32 mask, flags;
+	int ret, mode, i;
+	bool need_relocs;
+
+	if (!i915_gem_check_execbuffer(args))
+		return -EINVAL;
+
+	ret = validate_exec_list(exec, args->buffer_count);
+	if (ret)
+		return ret;
+
+	flags = 0;
+	if (args->flags & I915_EXEC_SECURE) {
+		if (!file->is_master || !capable(CAP_SYS_ADMIN))
+		    return -EPERM;
+
+		flags |= I915_DISPATCH_SECURE;
+	}
+	if (args->flags & I915_EXEC_IS_PINNED)
+		flags |= I915_DISPATCH_PINNED;
+
+	switch (args->flags & I915_EXEC_RING_MASK) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		ring = &dev_priv->ring[RCS];
+		break;
+	case I915_EXEC_BSD:
+		ring = &dev_priv->ring[VCS];
+		if (ctx_id != 0) {
+			DRM_DEBUG("Ring %s doesn't support contexts\n",
+				  ring->name);
+			return -EPERM;
+		}
+		break;
+	case I915_EXEC_BLT:
+		ring = &dev_priv->ring[BCS];
+		if (ctx_id != 0) {
+			DRM_DEBUG("Ring %s doesn't support contexts\n",
+				  ring->name);
+			return -EPERM;
+		}
+		break;
+	default:
+		DRM_DEBUG("execbuf with unknown ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+	if (!intel_ring_initialized(ring)) {
+		DRM_DEBUG("execbuf with invalid ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+
+	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	mask = I915_EXEC_CONSTANTS_MASK;
+	switch (mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (ring == &dev_priv->ring[RCS] &&
+		    mode != dev_priv->relative_constants_mode) {
+			if (INTEL_INFO(dev)->gen < 4)
+				return -EINVAL;
+
+			if (INTEL_INFO(dev)->gen > 5 &&
+			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+				return -EINVAL;
+
+			/* The HW changed the meaning on this bit on gen6 */
+			if (INTEL_INFO(dev)->gen >= 6)
+				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+		}
+		break;
+	default:
+		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+		return -EINVAL;
+	}
+
+	if (args->buffer_count < 1) {
+		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	if (args->num_cliprects != 0) {
+		if (ring != &dev_priv->ring[RCS]) {
+			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+			return -EINVAL;
+		}
+
+		if (INTEL_INFO(dev)->gen >= 5) {
+			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+			return -EINVAL;
+		}
+
+		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+			DRM_DEBUG("execbuf with %u cliprects\n",
+				  args->num_cliprects);
+			return -EINVAL;
+		}
+
+		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+				    GFP_KERNEL);
+		if (cliprects == NULL) {
+			ret = -ENOMEM;
+			goto pre_mutex_err;
+		}
+
+		if (copy_from_user(cliprects,
+				   to_user_ptr(args->cliprects_ptr),
+				   sizeof(*cliprects)*args->num_cliprects)) {
+			ret = -EFAULT;
+			goto pre_mutex_err;
+		}
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto pre_mutex_err;
+
+	if (dev_priv->mm.suspended) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -EBUSY;
+		goto pre_mutex_err;
+	}
+
+	eb = eb_create(args);
+	if (eb == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+
+	/* Look up object handles */
+	ret = eb_lookup_objects(eb, exec, args, file);
+	if (ret)
+		goto err;
+
+	/* take note of the batch buffer before we might reorder the lists */
+	batch_obj = list_entry(eb->objects.prev,
+			       struct drm_i915_gem_object,
+			       exec_list);
+
+	/* Move the objects en-masse into the GTT, evicting if necessary. */
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+	if (ret)
+		goto err;
+
+	/* The objects are in their final locations, apply the relocations. */
+	if (need_relocs)
+		ret = i915_gem_execbuffer_relocate(eb);
+	if (ret) {
+		if (ret == -EFAULT) {
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+								eb, exec);
+			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+		}
+		if (ret)
+			goto err;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	if (batch_obj->base.pending_write_domain) {
+		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+	 * batch" bit. Hence we need to pin secure batches into the global gtt.
+	 * hsw should have this fixed, but let's be paranoid and do it
+	 * unconditionally for now. */
+	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+	if (ret)
+		goto err;
+
+	ret = i915_switch_context(ring, file, ctx_id);
+	if (ret)
+		goto err;
+
+	if (ring == &dev_priv->ring[RCS] &&
+	    mode != dev_priv->relative_constants_mode) {
+		ret = intel_ring_begin(ring, 4);
+		if (ret)
+				goto err;
+
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(ring, INSTPM);
+		intel_ring_emit(ring, mask << 16 | mode);
+		intel_ring_advance(ring);
+
+		dev_priv->relative_constants_mode = mode;
+	}
+
+	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+		ret = i915_reset_gen7_sol_offsets(dev, ring);
+		if (ret)
+			goto err;
+	}
+
+	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+	exec_len = args->batch_len;
+	if (cliprects) {
+		for (i = 0; i < args->num_cliprects; i++) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    args->DR1, args->DR4);
+			if (ret)
+				goto err;
+
+			ret = ring->dispatch_execbuffer(ring,
+							exec_start, exec_len,
+							flags);
+			if (ret)
+				goto err;
+		}
+	} else {
+		ret = ring->dispatch_execbuffer(ring,
+						exec_start, exec_len,
+						flags);
+		if (ret)
+			goto err;
+	}
+
+	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+	i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+	i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+	eb_destroy(eb);
+
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	kfree(cliprects);
+	return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_execbuffer2 exec2;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret, i;
+
+	if (args->buffer_count < 1) {
+		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	/* Copy in the exec list from userland */
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec_list == NULL || exec2_list == NULL) {
+		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec_list,
+			     to_user_ptr(args->buffers_ptr),
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_DEBUG("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < args->buffer_count; i++) {
+		exec2_list[i].handle = exec_list[i].handle;
+		exec2_list[i].relocation_count = exec_list[i].relocation_count;
+		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+		exec2_list[i].alignment = exec_list[i].alignment;
+		exec2_list[i].offset = exec_list[i].offset;
+		if (INTEL_INFO(dev)->gen < 4)
+			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+		else
+			exec2_list[i].flags = 0;
+	}
+
+	exec2.buffers_ptr = args->buffers_ptr;
+	exec2.buffer_count = args->buffer_count;
+	exec2.batch_start_offset = args->batch_start_offset;
+	exec2.batch_len = args->batch_len;
+	exec2.DR1 = args->DR1;
+	exec2.DR4 = args->DR4;
+	exec2.num_cliprects = args->num_cliprects;
+	exec2.cliprects_ptr = args->cliprects_ptr;
+	exec2.flags = I915_EXEC_RENDER;
+	i915_execbuffer2_set_context_id(exec2, 0);
+
+	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+	if (!ret) {
+		struct drm_i915_gem_exec_object __user *user_exec_list =
+			to_user_ptr(args->buffers_ptr);
+
+		/* Copy the new buffer offsets back to the user's exec list. */
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user (%d)\n",
+					  args->buffer_count, ret);
+				break;
+			}
+		}
+	}
+
+	drm_free_large(exec_list);
+	drm_free_large(exec2_list);
+	return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer2 *args = data;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret;
+
+	if (args->buffer_count < 1 ||
+	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+	if (exec2_list == NULL)
+		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+					   args->buffer_count);
+	if (exec2_list == NULL) {
+		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec2_list,
+			     to_user_ptr(args->buffers_ptr),
+			     sizeof(*exec2_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_DEBUG("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		struct drm_i915_gem_exec_object2 *user_exec_list =
+				   to_user_ptr(args->buffers_ptr);
+		int i;
+
+		for (i = 0; i < args->buffer_count; i++) {
+			ret = __copy_to_user(&user_exec_list[i].offset,
+					     &exec2_list[i].offset,
+					     sizeof(user_exec_list[i].offset));
+			if (ret) {
+				ret = -EFAULT;
+				DRM_DEBUG("failed to copy %d exec entries "
+					  "back to user\n",
+					  args->buffer_count);
+				break;
+			}
+		}
+	}
+
+	drm_free_large(exec2_list);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_gtt.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..bdb0d77
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+typedef uint32_t gen6_gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID			(1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID			(1 << 0)
+#define GEN6_PTE_UNCACHED		(1 << 1)
+#define HSW_PTE_UNCACHED		(0)
+#define GEN6_PTE_CACHE_LLC		(2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
+					     dma_addr_t addr,
+					     enum i915_cache_level level)
+{
+	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+	pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+	switch (level) {
+	case I915_CACHE_LLC_MLC:
+		/* Haswell doesn't set L3 this way */
+		if (IS_HASWELL(dev))
+			pte |= GEN6_PTE_CACHE_LLC;
+		else
+			pte |= GEN6_PTE_CACHE_LLC_MLC;
+		break;
+	case I915_CACHE_LLC:
+		pte |= GEN6_PTE_CACHE_LLC;
+		break;
+	case I915_CACHE_NONE:
+		if (IS_HASWELL(dev))
+			pte |= HSW_PTE_UNCACHED;
+		else
+			pte |= GEN6_PTE_UNCACHED;
+		break;
+	default:
+		BUG();
+	}
+
+	return pte;
+}
+
+static int gen6_ppgtt_enable(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t pd_offset;
+	struct intel_ring_buffer *ring;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	gen6_gtt_pte_t __iomem *pd_addr;
+	uint32_t pd_entry;
+	int i;
+
+	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
+		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		dma_addr_t pt_addr;
+
+		pt_addr = ppgtt->pt_dma_addr[i];
+		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+		pd_entry |= GEN6_PDE_VALID;
+
+		writel(pd_entry, pd_addr + i);
+	}
+	readl(pd_addr);
+
+	pd_offset = ppgtt->pd_offset;
+	pd_offset /= 64; /* in cachelines, */
+	pd_offset <<= 16;
+
+	if (INTEL_INFO(dev)->gen == 6) {
+		uint32_t ecochk, gab_ctl, ecobits;
+
+		ecobits = I915_READ(GAC_ECO_BITS);
+		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+					 ECOBITS_PPGTT_CACHE64B);
+
+		gab_ctl = I915_READ(GAB_CTL);
+		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+		ecochk = I915_READ(GAM_ECOCHK);
+		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+				       ECOCHK_PPGTT_CACHE64B);
+		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+	} else if (INTEL_INFO(dev)->gen >= 7) {
+		uint32_t ecochk, ecobits;
+
+		ecobits = I915_READ(GAC_ECO_BITS);
+		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+		ecochk = I915_READ(GAM_ECOCHK);
+		if (IS_HASWELL(dev)) {
+			ecochk |= ECOCHK_PPGTT_WB_HSW;
+		} else {
+			ecochk |= ECOCHK_PPGTT_LLC_IVB;
+			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+		}
+		I915_WRITE(GAM_ECOCHK, ecochk);
+		/* GFX_MODE is per-ring on gen7+ */
+	}
+
+	for_each_ring(ring, dev_priv, i) {
+		if (INTEL_INFO(dev)->gen >= 7)
+			I915_WRITE(RING_MODE_GEN7(ring),
+				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+	}
+	return 0;
+}
+
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+				   unsigned first_entry,
+				   unsigned num_entries)
+{
+	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
+	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+	unsigned last_pte, i;
+
+	scratch_pte = gen6_pte_encode(ppgtt->dev,
+				      ppgtt->scratch_page_dma_addr,
+				      I915_CACHE_LLC);
+
+	while (num_entries) {
+		last_pte = first_pte + num_entries;
+		if (last_pte > I915_PPGTT_PT_ENTRIES)
+			last_pte = I915_PPGTT_PT_ENTRIES;
+
+		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+
+		for (i = first_pte; i < last_pte; i++)
+			pt_vaddr[i] = scratch_pte;
+
+		kunmap_atomic(pt_vaddr);
+
+		num_entries -= last_pte - first_pte;
+		first_pte = 0;
+		act_pt++;
+	}
+}
+
+static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+				      struct sg_table *pages,
+				      unsigned first_entry,
+				      enum i915_cache_level cache_level)
+{
+	gen6_gtt_pte_t *pt_vaddr;
+	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
+	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+	struct sg_page_iter sg_iter;
+
+	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+		dma_addr_t page_addr;
+
+		page_addr = sg_page_iter_dma_address(&sg_iter);
+		pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
+						    cache_level);
+		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
+			kunmap_atomic(pt_vaddr);
+			act_pt++;
+			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
+			act_pte = 0;
+
+		}
+	}
+	kunmap_atomic(pt_vaddr);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+{
+	int i;
+
+	if (ppgtt->pt_dma_addr) {
+		for (i = 0; i < ppgtt->num_pd_entries; i++)
+			pci_unmap_page(ppgtt->dev->pdev,
+				       ppgtt->pt_dma_addr[i],
+				       4096, PCI_DMA_BIDIRECTIONAL);
+	}
+
+	kfree(ppgtt->pt_dma_addr);
+	for (i = 0; i < ppgtt->num_pd_entries; i++)
+		__free_page(ppgtt->pt_pages[i]);
+	kfree(ppgtt->pt_pages);
+	kfree(ppgtt);
+}
+
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+{
+	struct drm_device *dev = ppgtt->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned first_pd_entry_in_global_pt;
+	int i;
+	int ret = -ENOMEM;
+
+	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+	 * entries. For aliasing ppgtt support we just steal them at the end for
+	 * now. */
+       first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+
+	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+	ppgtt->enable = gen6_ppgtt_enable;
+	ppgtt->clear_range = gen6_ppgtt_clear_range;
+	ppgtt->insert_entries = gen6_ppgtt_insert_entries;
+	ppgtt->cleanup = gen6_ppgtt_cleanup;
+	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+				  GFP_KERNEL);
+	if (!ppgtt->pt_pages)
+		return -ENOMEM;
+
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
+		if (!ppgtt->pt_pages[i])
+			goto err_pt_alloc;
+	}
+
+	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+				     GFP_KERNEL);
+	if (!ppgtt->pt_dma_addr)
+		goto err_pt_alloc;
+
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		dma_addr_t pt_addr;
+
+		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
+				       PCI_DMA_BIDIRECTIONAL);
+
+		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
+			ret = -EIO;
+			goto err_pd_pin;
+
+		}
+		ppgtt->pt_dma_addr[i] = pt_addr;
+	}
+
+	ppgtt->clear_range(ppgtt, 0,
+			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+
+	return 0;
+
+err_pd_pin:
+	if (ppgtt->pt_dma_addr) {
+		for (i--; i >= 0; i--)
+			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+				       4096, PCI_DMA_BIDIRECTIONAL);
+	}
+err_pt_alloc:
+	kfree(ppgtt->pt_dma_addr);
+	for (i = 0; i < ppgtt->num_pd_entries; i++) {
+		if (ppgtt->pt_pages[i])
+			__free_page(ppgtt->pt_pages[i]);
+	}
+	kfree(ppgtt->pt_pages);
+
+	return ret;
+}
+
+static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt;
+	int ret;
+
+	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+	if (!ppgtt)
+		return -ENOMEM;
+
+	ppgtt->dev = dev;
+	ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+
+	if (INTEL_INFO(dev)->gen < 8)
+		ret = gen6_ppgtt_init(ppgtt);
+	else
+		BUG();
+
+	if (ret)
+		kfree(ppgtt);
+	else
+		dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+	return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+	if (!ppgtt)
+		return;
+
+	ppgtt->cleanup(ppgtt);
+	dev_priv->mm.aliasing_ppgtt = NULL;
+}
+
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+			    struct drm_i915_gem_object *obj,
+			    enum i915_cache_level cache_level)
+{
+	ppgtt->insert_entries(ppgtt, obj->pages,
+			      obj->gtt_space->start >> PAGE_SHIFT,
+			      cache_level);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+			      struct drm_i915_gem_object *obj)
+{
+	ppgtt->clear_range(ppgtt,
+			   obj->gtt_space->start >> PAGE_SHIFT,
+			   obj->base.size >> PAGE_SHIFT);
+}
+
+extern int intel_iommu_gfx_mapped;
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline bool needs_idle_maps(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	/* Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
+		return true;
+#endif
+	return false;
+}
+
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+	bool ret = dev_priv->mm.interruptible;
+
+	if (unlikely(dev_priv->gtt.do_idle_maps)) {
+		dev_priv->mm.interruptible = false;
+		if (i915_gpu_idle(dev_priv->dev)) {
+			DRM_ERROR("Couldn't idle GPU\n");
+			/* Wait a bit, in hopes it avoids the hang */
+			udelay(10);
+		}
+	}
+
+	return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+	if (unlikely(dev_priv->gtt.do_idle_maps))
+		dev_priv->mm.interruptible = interruptible;
+}
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	/* First fill our portion of the GTT with scratch pages */
+	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+				      dev_priv->gtt.total / PAGE_SIZE);
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		i915_gem_clflush_object(obj);
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+	}
+
+	i915_gem_chipset_flush(dev);
+}
+
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+	if (obj->has_dma_mapping)
+		return 0;
+
+	if (!dma_map_sg(&obj->base.dev->pdev->dev,
+			obj->pages->sgl, obj->pages->nents,
+			PCI_DMA_BIDIRECTIONAL))
+		return -ENOSPC;
+
+	return 0;
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+				     struct sg_table *st,
+				     unsigned int first_entry,
+				     enum i915_cache_level level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	gen6_gtt_pte_t __iomem *gtt_entries =
+		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+	int i = 0;
+	struct sg_page_iter sg_iter;
+	dma_addr_t addr;
+
+	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
+		addr = sg_page_iter_dma_address(&sg_iter);
+		iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
+		i++;
+	}
+
+	/* XXX: This serves as a posting read to make sure that the PTE has
+	 * actually been updated. There is some concern that even though
+	 * registers and PTEs are within the same BAR that they are potentially
+	 * of NUMA access patterns. Therefore, even with the way we assume
+	 * hardware should work, we must keep this posting read for paranoia.
+	 */
+	if (i != 0)
+		WARN_ON(readl(&gtt_entries[i-1])
+			!= gen6_pte_encode(dev, addr, level));
+
+	/* This next bit makes the above posting read even more important. We
+	 * want to flush the TLBs only after we're certain all the PTE updates
+	 * have finished.
+	 */
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+				  unsigned int first_entry,
+				  unsigned int num_entries)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
+		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+	int i;
+
+	if (WARN(num_entries > max_entries,
+		 "First entry = %d; Num entries = %d (max=%d)\n",
+		 first_entry, num_entries, max_entries))
+		num_entries = max_entries;
+
+	scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
+				      I915_CACHE_LLC);
+	for (i = 0; i < num_entries; i++)
+		iowrite32(scratch_pte, &gtt_base[i]);
+	readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+				     struct sg_table *st,
+				     unsigned int pg_start,
+				     enum i915_cache_level cache_level)
+{
+	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+	intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+				  unsigned int first_entry,
+				  unsigned int num_entries)
+{
+	intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+			      enum i915_cache_level cache_level)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+					 obj->gtt_space->start >> PAGE_SHIFT,
+					 cache_level);
+
+	obj->has_global_gtt_mapping = 1;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->gtt.gtt_clear_range(obj->base.dev,
+				      obj->gtt_space->start >> PAGE_SHIFT,
+				      obj->base.size >> PAGE_SHIFT);
+
+	obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible;
+
+	interruptible = do_idling(dev_priv);
+
+	if (!obj->has_dma_mapping)
+		dma_unmap_sg(&dev->pdev->dev,
+			     obj->pages->sgl, obj->pages->nents,
+			     PCI_DMA_BIDIRECTIONAL);
+
+	undo_idling(dev_priv, interruptible);
+}
+
+static void i915_gtt_color_adjust(struct drm_mm_node *node,
+				  unsigned long color,
+				  unsigned long *start,
+				  unsigned long *end)
+{
+	if (node->color != color)
+		*start += 4096;
+
+	if (!list_empty(&node->node_list)) {
+		node = list_entry(node->node_list.next,
+				  struct drm_mm_node,
+				  node_list);
+		if (node->allocated && node->color != color)
+			*end -= 4096;
+	}
+}
+void i915_gem_setup_global_gtt(struct drm_device *dev,
+			       unsigned long start,
+			       unsigned long mappable_end,
+			       unsigned long end)
+{
+	/* Let GEM Manage all of the aperture.
+	 *
+	 * However, leave one page at the end still bound to the scratch page.
+	 * There are a number of places where the hardware apparently prefetches
+	 * past the end of the object, and we've seen multiple hangs with the
+	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
+	 * aperture.  One page should be enough to keep any prefetching inside
+	 * of the aperture.
+	 */
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_mm_node *entry;
+	struct drm_i915_gem_object *obj;
+	unsigned long hole_start, hole_end;
+
+	BUG_ON(mappable_end > end);
+
+	/* Subtract the guard page ... */
+	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+	if (!HAS_LLC(dev))
+		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+
+	/* Mark any preallocated objects as occupied */
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
+			      obj->gtt_offset, obj->base.size);
+
+		BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
+		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+						     obj->gtt_offset,
+						     obj->base.size,
+						     false);
+		obj->has_global_gtt_mapping = 1;
+	}
+
+	dev_priv->gtt.start = start;
+	dev_priv->gtt.total = end - start;
+
+	/* Clear any non-preallocated blocks */
+	drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+			     hole_start, hole_end) {
+		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
+			      hole_start, hole_end);
+		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+					      (hole_end-hole_start) / PAGE_SIZE);
+	}
+
+	/* And finally clear the reserved guard page */
+	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+}
+
+static bool
+intel_enable_ppgtt(struct drm_device *dev)
+{
+	if (i915_enable_ppgtt >= 0)
+		return i915_enable_ppgtt;
+
+#ifdef CONFIG_INTEL_IOMMU
+	/* Disable ppgtt on SNB if VT-d is on. */
+	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+		return false;
+#endif
+
+	return true;
+}
+
+void i915_gem_init_global_gtt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long gtt_size, mappable_size;
+
+	gtt_size = dev_priv->gtt.total;
+	mappable_size = dev_priv->gtt.mappable_end;
+
+	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
+		int ret;
+
+		if (INTEL_INFO(dev)->gen <= 7) {
+			/* PPGTT pdes are stolen from global gtt ptes, so shrink the
+			 * aperture accordingly when using aliasing ppgtt. */
+			gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+		}
+
+		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+
+		ret = i915_gem_init_aliasing_ppgtt(dev);
+		if (!ret)
+			return;
+
+		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
+		drm_mm_takedown(&dev_priv->mm.gtt_space);
+		gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+	}
+	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct page *page;
+	dma_addr_t dma_addr;
+
+	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+	if (page == NULL)
+		return -ENOMEM;
+	get_page(page);
+	set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+				PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(dev->pdev, dma_addr))
+		return -EINVAL;
+#else
+	dma_addr = page_to_phys(page);
+#endif
+	dev_priv->gtt.scratch_page = page;
+	dev_priv->gtt.scratch_page_dma = dma_addr;
+
+	return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	set_pages_wb(dev_priv->gtt.scratch_page, 1);
+	pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	put_page(dev_priv->gtt.scratch_page);
+	__free_page(dev_priv->gtt.scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+	return snb_gmch_ctl << 20;
+}
+
+static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+	return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static int gen6_gmch_probe(struct drm_device *dev,
+			   size_t *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   unsigned long *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	phys_addr_t gtt_bus_addr;
+	unsigned int gtt_size;
+	u16 snb_gmch_ctl;
+	int ret;
+
+	*mappable_base = pci_resource_start(dev->pdev, 2);
+	*mappable_end = pci_resource_len(dev->pdev, 2);
+
+	/* 64/512MB is the current min/max we actually know of, but this is just
+	 * a coarse sanity check.
+	 */
+	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+		DRM_ERROR("Unknown GMADR size (%lx)\n",
+			  dev_priv->gtt.mappable_end);
+		return -ENXIO;
+	}
+
+	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
+
+	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
+	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
+
+	/* For Modern GENs the PTEs and register space are split in the BAR */
+	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
+		(pci_resource_len(dev->pdev, 0) / 2);
+
+	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
+	if (!dev_priv->gtt.gsm) {
+		DRM_ERROR("Failed to map the gtt page table\n");
+		return -ENOMEM;
+	}
+
+	ret = setup_scratch_page(dev);
+	if (ret)
+		DRM_ERROR("Scratch setup failed\n");
+
+	dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+	dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
+	return ret;
+}
+
+static void gen6_gmch_remove(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	iounmap(dev_priv->gtt.gsm);
+	teardown_scratch_page(dev_priv->dev);
+}
+
+static int i915_gmch_probe(struct drm_device *dev,
+			   size_t *gtt_total,
+			   size_t *stolen,
+			   phys_addr_t *mappable_base,
+			   unsigned long *mappable_end)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+	if (!ret) {
+		DRM_ERROR("failed to set up gmch\n");
+		return -EIO;
+	}
+
+	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+
+	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+	dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+	dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+
+	return 0;
+}
+
+static void i915_gmch_remove(struct drm_device *dev)
+{
+	intel_gmch_remove();
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_gtt *gtt = &dev_priv->gtt;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen <= 5) {
+		dev_priv->gtt.gtt_probe = i915_gmch_probe;
+		dev_priv->gtt.gtt_remove = i915_gmch_remove;
+	} else {
+		dev_priv->gtt.gtt_probe = gen6_gmch_probe;
+		dev_priv->gtt.gtt_remove = gen6_gmch_remove;
+	}
+
+	ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
+				     &dev_priv->gtt.stolen_size,
+				     &gtt->mappable_base,
+				     &gtt->mappable_end);
+	if (ret)
+		return ret;
+
+	/* GMADR is the PCI mmio aperture into the global GTT. */
+	DRM_INFO("Memory usable by graphics device = %zdM\n",
+		 dev_priv->gtt.total >> 20);
+	DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
+			 dev_priv->gtt.mappable_end >> 20);
+	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
+			 dev_priv->gtt.stolen_size >> 20);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_stolen.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_stolen.c
new file mode 100644
index 0000000..fa2d15b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright © 2008-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/*
+ * The BIOS typically reserves some of the system's memory for the exclusive
+ * use of the integrated graphics. This memory is no longer available for
+ * use by the OS and so the user finds that his system has less memory
+ * available than he put in. We refer to this memory as stolen.
+ *
+ * The BIOS will allocate its framebuffer from the stolen memory. Our
+ * goal is try to reuse that object for our own fbcon which must always
+ * be available for panics. Anything else we can reuse the stolen memory
+ * for is a boon.
+ */
+
+static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev_priv->bridge_dev;
+	u32 base;
+
+	/* On the machines I have tested the Graphics Base of Stolen Memory
+	 * is unreliable, so on those compute the base by subtracting the
+	 * stolen memory from the Top of Low Usable DRAM which is where the
+	 * BIOS places the graphics stolen memory.
+	 *
+	 * On gen2, the layout is slightly different with the Graphics Segment
+	 * immediately following Top of Memory (or Top of Usable DRAM). Note
+	 * it appears that TOUD is only reported by 865g, so we just use the
+	 * top of memory as determined by the e820 probe.
+	 *
+	 * XXX gen2 requires an unavailable symbol and 945gm fails with
+	 * its value of TOLUD.
+	 */
+	base = 0;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		/* Read Base Data of Stolen Memory Register (BDSM) directly.
+		 * Note that there is also a MCHBAR miror at 0x1080c0 or
+		 * we could use device 2:0x5c instead.
+		*/
+		pci_read_config_dword(pdev, 0xB0, &base);
+		base &= ~4095; /* lower bits used for locking register */
+	} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		/* Read Graphics Base of Stolen Memory directly */
+		pci_read_config_dword(pdev, 0xA4, &base);
+#if 0
+	} else if (IS_GEN3(dev)) {
+		u8 val;
+		/* Stolen is immediately below Top of Low Usable DRAM */
+		pci_read_config_byte(pdev, 0x9c, &val);
+		base = val >> 3 << 27;
+		base -= dev_priv->mm.gtt->stolen_size;
+	} else {
+		/* Stolen is immediately above Top of Memory */
+		base = max_low_pfn_mapped << PAGE_SHIFT;
+#endif
+	}
+
+	return base;
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+
+	/* Try to over-allocate to reduce reallocations and fragmentation */
+	compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+					   size <<= 1, 4096, 0);
+	if (!compressed_fb)
+		compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
+						   size >>= 1, 4096, 0);
+	if (compressed_fb)
+		compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+	if (!compressed_fb)
+		goto err;
+
+	if (HAS_PCH_SPLIT(dev))
+		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+	else if (IS_GM45(dev)) {
+		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+	} else {
+		compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+						    4096, 4096, 0);
+		if (compressed_llb)
+			compressed_llb = drm_mm_get_block(compressed_llb,
+							  4096, 4096);
+		if (!compressed_llb)
+			goto err_fb;
+
+		dev_priv->compressed_llb = compressed_llb;
+
+		I915_WRITE(FBC_CFB_BASE,
+			   dev_priv->mm.stolen_base + compressed_fb->start);
+		I915_WRITE(FBC_LL_BASE,
+			   dev_priv->mm.stolen_base + compressed_llb->start);
+	}
+
+	dev_priv->compressed_fb = compressed_fb;
+	dev_priv->cfb_size = size;
+
+	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
+		      size);
+
+	return 0;
+
+err_fb:
+	drm_mm_put_block(compressed_fb);
+err:
+	return -ENOSPC;
+}
+
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->mm.stolen_base == 0)
+		return -ENODEV;
+
+	if (size < dev_priv->cfb_size)
+		return 0;
+
+	/* Release any current block */
+	i915_gem_stolen_cleanup_compression(dev);
+
+	return i915_setup_compression(dev, size);
+}
+
+void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->cfb_size == 0)
+		return;
+
+	if (dev_priv->compressed_fb)
+		drm_mm_put_block(dev_priv->compressed_fb);
+
+	if (dev_priv->compressed_llb)
+		drm_mm_put_block(dev_priv->compressed_llb);
+
+	dev_priv->cfb_size = 0;
+}
+
+void i915_gem_cleanup_stolen(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	i915_gem_stolen_cleanup_compression(dev);
+	drm_mm_takedown(&dev_priv->mm.stolen);
+}
+
+int i915_gem_init_stolen(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+	if (dev_priv->mm.stolen_base == 0)
+		return 0;
+
+	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
+		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+
+	/* Basic memrange allocator for stolen space */
+	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
+
+	return 0;
+}
+
+static struct sg_table *
+i915_pages_create_for_stolen(struct drm_device *dev,
+			     u32 offset, u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct sg_table *st;
+	struct scatterlist *sg;
+
+	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
+	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+
+	/* We hide that we have no struct page backing our stolen object
+	 * by wrapping the contiguous physical allocation with a fake
+	 * dma mapping in a single scatterlist.
+	 */
+
+	st = kmalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL)
+		return NULL;
+
+	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+		kfree(st);
+		return NULL;
+	}
+
+	sg = st->sgl;
+	sg->offset = 0;
+	sg->length = size;
+
+	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+	sg_dma_len(sg) = size;
+
+	return st;
+}
+
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+{
+	BUG();
+	return -EINVAL;
+}
+
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+{
+	/* Should only be called during free */
+	sg_free_table(obj->pages);
+	kfree(obj->pages);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
+	.get_pages = i915_gem_object_get_pages_stolen,
+	.put_pages = i915_gem_object_put_pages_stolen,
+};
+
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct drm_device *dev,
+			       struct drm_mm_node *stolen)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = i915_gem_object_alloc(dev);
+	if (obj == NULL)
+		return NULL;
+
+	if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
+		goto cleanup;
+
+	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+
+	obj->pages = i915_pages_create_for_stolen(dev,
+						  stolen->start, stolen->size);
+	if (obj->pages == NULL)
+		goto cleanup;
+
+	obj->has_dma_mapping = true;
+	obj->pages_pin_count = 1;
+	obj->stolen = stolen;
+
+	obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+	obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+	obj->cache_level = I915_CACHE_NONE;
+
+	return obj;
+
+cleanup:
+	i915_gem_object_free(obj);
+	return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_mm_node *stolen;
+
+	if (dev_priv->mm.stolen_base == 0)
+		return NULL;
+
+	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
+	if (size == 0)
+		return NULL;
+
+	stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+	if (stolen)
+		stolen = drm_mm_get_block(stolen, size, 4096);
+	if (stolen == NULL)
+		return NULL;
+
+	obj = _i915_gem_object_create_stolen(dev, stolen);
+	if (obj)
+		return obj;
+
+	drm_mm_put_block(stolen);
+	return NULL;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+					       u32 stolen_offset,
+					       u32 gtt_offset,
+					       u32 size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_mm_node *stolen;
+
+	if (dev_priv->mm.stolen_base == 0)
+		return NULL;
+
+	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+			stolen_offset, gtt_offset, size);
+
+	/* KISS and expect everything to be page-aligned */
+	BUG_ON(stolen_offset & 4095);
+	BUG_ON(gtt_offset & 4095);
+	BUG_ON(size & 4095);
+
+	if (WARN_ON(size == 0))
+		return NULL;
+
+	stolen = drm_mm_create_block(&dev_priv->mm.stolen,
+				     stolen_offset, size,
+				     false);
+	if (stolen == NULL) {
+		DRM_DEBUG_KMS("failed to allocate stolen space\n");
+		return NULL;
+	}
+
+	obj = _i915_gem_object_create_stolen(dev, stolen);
+	if (obj == NULL) {
+		DRM_DEBUG_KMS("failed to allocate stolen object\n");
+		drm_mm_put_block(stolen);
+		return NULL;
+	}
+
+	/* To simplify the initialisation sequence between KMS and GTT,
+	 * we allow construction of the stolen object prior to
+	 * setting up the GTT space. The actual reservation will occur
+	 * later.
+	 */
+	if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
+		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
+						     gtt_offset, size,
+						     false);
+		if (obj->gtt_space == NULL) {
+			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+			drm_gem_object_unreference(&obj->base);
+			return NULL;
+		}
+	} else
+		obj->gtt_space = I915_GTT_RESERVED;
+
+	obj->gtt_offset = gtt_offset;
+	obj->has_global_gtt_mapping = 1;
+
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
+	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	return obj;
+}
+
+void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+	if (obj->stolen) {
+		drm_mm_put_block(obj->stolen);
+		obj->stolen = NULL;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_gem_tiling.c b/linux-imx/drivers/gpu/drm/i915/i915_gem_tiling.c
new file mode 100644
index 0000000..537545b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (IS_VALLEYVIEW(dev)) {
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		uint32_t dimm_c0, dimm_c1;
+		dimm_c0 = I915_READ(MAD_DIMM_C0);
+		dimm_c1 = I915_READ(MAD_DIMM_C1);
+		dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+		dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+		/* Enable swizzling when the channels are populated with
+		 * identically sized dimms. We don't need to check the 3rd
+		 * channel because no cpu with gpu attached ships in that
+		 * configuration. Also, swizzling only makes sense for 2
+		 * channels anyway. */
+		if (dimm_c0 == dimm_c1) {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		}
+	} else if (IS_GEN5(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
+		 * same swizzling setup.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+		swizzle_y = I915_BIT_6_SWIZZLE_9;
+	} else if (IS_GEN2(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+		uint32_t dcc;
+
+		/* On 9xx chipsets, channel interleave by the CPU is
+		 * determined by DCC.  For single-channel, neither the CPU
+		 * nor the GPU do swizzling.  For dual channel interleaved,
+		 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+		 * 9 for Y tiled.  The CPU's interleave is independent, and
+		 * can be based on either bit 11 (haven't seen this yet) or
+		 * bit 17 (common).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (dcc & DCC_CHANNEL_XOR_DISABLE) {
+				/* This is the base swizzling by the GPU for
+				 * tiled buffers.
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+				/* Bit 11 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 swizzling by the CPU in addition. */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_17;
+			}
+			break;
+		}
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/* Check pitch constriants for all chips & tiling formats */
+static bool
+i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+{
+	int tile_width;
+
+	/* Linear is always fine */
+	if (tiling_mode == I915_TILING_NONE)
+		return true;
+
+	if (IS_GEN2(dev) ||
+	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+		tile_width = 128;
+	else
+		tile_width = 512;
+
+	/* check maximum stride & object size */
+	/* i965+ stores the end address of the gtt mapping in the fence
+	 * reg, so dont bother to check the size */
+	if (INTEL_INFO(dev)->gen >= 7) {
+		if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
+			return false;
+	} else if (INTEL_INFO(dev)->gen >= 4) {
+		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
+			return false;
+	} else {
+		if (stride > 8192)
+			return false;
+
+		if (IS_GEN3(dev)) {
+			if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+				return false;
+		} else {
+			if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+				return false;
+		}
+	}
+
+	if (stride < tile_width)
+		return false;
+
+	/* 965+ just needs multiples of tile width */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (stride & (tile_width - 1))
+			return false;
+		return true;
+	}
+
+	/* Pre-965 needs power of two tile widths */
+	if (stride & (stride - 1))
+		return false;
+
+	return true;
+}
+
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
+{
+	u32 size;
+
+	if (tiling_mode == I915_TILING_NONE)
+		return true;
+
+	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+		return true;
+
+	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+		if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+			return false;
+	} else {
+		if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+			return false;
+	}
+
+	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	if (obj->gtt_space->size != size)
+		return false;
+
+	if (obj->gtt_offset & (size - 1))
+		return false;
+
+	return true;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file)
+{
+	struct drm_i915_gem_set_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	int ret = 0;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	if (!i915_tiling_ok(dev,
+			    args->stride, obj->base.size, args->tiling_mode)) {
+		drm_gem_object_unreference_unlocked(&obj->base);
+		return -EINVAL;
+	}
+
+	if (obj->pin_count) {
+		drm_gem_object_unreference_unlocked(&obj->base);
+		return -EBUSY;
+	}
+
+	if (args->tiling_mode == I915_TILING_NONE) {
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		args->stride = 0;
+	} else {
+		if (args->tiling_mode == I915_TILING_X)
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		else
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+		/* Hide bit 17 swizzling from the user.  This prevents old Mesa
+		 * from aborting the application on sw fallbacks to bit 17,
+		 * and we use the pread/pwrite bit17 paths to swizzle for it.
+		 * If there was a user that was relying on the swizzle
+		 * information for drm_intel_bo_map()ed reads/writes this would
+		 * break it, but we don't have any of those.
+		 */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+		/* If we can't handle the swizzling, make it untiled. */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+			args->tiling_mode = I915_TILING_NONE;
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+			args->stride = 0;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (args->tiling_mode != obj->tiling_mode ||
+	    args->stride != obj->stride) {
+		/* We need to rebind the object if its current allocation
+		 * no longer meets the alignment restrictions for its new
+		 * tiling mode. Otherwise we can just leave it alone, but
+		 * need to ensure that any fence register is updated before
+		 * the next fenced (either through the GTT or by the BLT unit
+		 * on older GPUs) access.
+		 *
+		 * After updating the tiling parameters, we then flag whether
+		 * we need to update an associated fence register. Note this
+		 * has to also include the unfenced register the GPU uses
+		 * whilst executing a fenced command for an untiled object.
+		 */
+
+		obj->map_and_fenceable =
+			obj->gtt_space == NULL ||
+			(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+			 i915_gem_object_fence_ok(obj, args->tiling_mode));
+
+		/* Rebind if we need a change of alignment */
+		if (!obj->map_and_fenceable) {
+			u32 unfenced_alignment =
+				i915_gem_get_gtt_alignment(dev, obj->base.size,
+							    args->tiling_mode,
+							    false);
+			if (obj->gtt_offset & (unfenced_alignment - 1))
+				ret = i915_gem_object_unbind(obj);
+		}
+
+		if (ret == 0) {
+			obj->fence_dirty =
+				obj->fenced_gpu_access ||
+				obj->fence_reg != I915_FENCE_REG_NONE;
+
+			obj->tiling_mode = args->tiling_mode;
+			obj->stride = args->stride;
+
+			/* Force the fence to be reacquired for GTT access */
+			i915_gem_release_mmap(obj);
+		}
+	}
+	/* we have to maintain this existing ABI... */
+	args->stride = obj->stride;
+	args->tiling_mode = obj->tiling_mode;
+
+	/* Try to preallocate memory required to save swizzling on put-pages */
+	if (i915_gem_object_needs_bit17_swizzle(obj)) {
+		if (obj->bit_17 == NULL) {
+			obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+					      sizeof(long), GFP_KERNEL);
+		}
+	} else {
+		kfree(obj->bit_17);
+		obj->bit_17 = NULL;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file)
+{
+	struct drm_i915_gem_get_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	mutex_lock(&dev->struct_mutex);
+
+	args->tiling_mode = obj->tiling_mode;
+	switch (obj->tiling_mode) {
+	case I915_TILING_X:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		break;
+	case I915_TILING_Y:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		break;
+	case I915_TILING_NONE:
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		break;
+	default:
+		DRM_ERROR("unknown tiling mode\n");
+	}
+
+	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static void
+i915_gem_swizzle_page(struct page *page)
+{
+	char temp[64];
+	char *vaddr;
+	int i;
+
+	vaddr = kmap(page);
+
+	for (i = 0; i < PAGE_SIZE; i += 128) {
+		memcpy(temp, &vaddr[i], 64);
+		memcpy(&vaddr[i], &vaddr[i + 64], 64);
+		memcpy(&vaddr[i + 64], temp, 64);
+	}
+
+	kunmap(page);
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int i;
+
+	if (obj->bit_17 == NULL)
+		return;
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		struct page *page = sg_page_iter_page(&sg_iter);
+		char new_bit_17 = page_to_phys(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(page);
+			set_page_dirty(page);
+		}
+		i++;
+	}
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+{
+	struct sg_page_iter sg_iter;
+	int page_count = obj->base.size >> PAGE_SHIFT;
+	int i;
+
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+					   sizeof(long), GFP_KERNEL);
+		if (obj->bit_17 == NULL) {
+			DRM_ERROR("Failed to allocate memory for bit 17 "
+				  "record\n");
+			return;
+		}
+	}
+
+	i = 0;
+	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
+		if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i++;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_ioc32.c b/linux-imx/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 0000000..3c59584
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,221 @@
+/**
+ * \file i915_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the i915 DRM.
+ *
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Alan Hourihane 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+typedef struct _drm_i915_batchbuffer32 {
+	int start;		/* agp offset */
+	int used;		/* nr bytes in use */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_i915_batchbuffer32_t batchbuffer32;
+	drm_i915_batchbuffer_t __user *batchbuffer;
+
+	if (copy_from_user
+	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
+		return -EFAULT;
+
+	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
+	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
+	    || __put_user(batchbuffer32.start, &batchbuffer->start)
+	    || __put_user(batchbuffer32.used, &batchbuffer->used)
+	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
+	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
+	    || __put_user(batchbuffer32.num_cliprects,
+			  &batchbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
+			  &batchbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
+			 (unsigned long)batchbuffer);
+}
+
+typedef struct _drm_i915_cmdbuffer32 {
+	u32 buf;		/* pointer to userspace command buffer */
+	int sz;			/* nr bytes in buf */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_i915_cmdbuffer32_t cmdbuffer32;
+	drm_i915_cmdbuffer_t __user *cmdbuffer;
+
+	if (copy_from_user
+	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
+		return -EFAULT;
+
+	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
+	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
+			  &cmdbuffer->buf)
+	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
+	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
+	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
+	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
+			  &cmdbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
+			 (unsigned long)cmdbuffer);
+}
+
+typedef struct drm_i915_irq_emit32 {
+	u32 irq_seq;
+} drm_i915_irq_emit32_t;
+
+static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_irq_emit32_t req32;
+	drm_i915_irq_emit_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
+			  &request->irq_seq))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
+			 (unsigned long)request);
+}
+typedef struct drm_i915_getparam32 {
+	int param;
+	u32 value;
+} drm_i915_getparam32_t;
+
+static int compat_i915_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_getparam32_t req32;
+	drm_i915_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
+			 (unsigned long)request);
+}
+
+typedef struct drm_i915_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+static int compat_i915_alloc(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_i915_mem_alloc32_t req32;
+	drm_i915_mem_alloc_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.region, &request->region)
+	    || __put_user(req32.alignment, &request->alignment)
+	    || __put_user(req32.size, &request->size)
+	    || __put_user((void __user *)(unsigned long)req32.region_offset,
+			  &request->region_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
+			 (unsigned long)request);
+}
+
+static drm_ioctl_compat_t *i915_compat_ioctls[] = {
+	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+	[DRM_I915_GETPARAM] = compat_i915_getparam,
+	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
+	[DRM_I915_ALLOC] = compat_i915_alloc
+};
+
+#ifdef CONFIG_COMPAT
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_irq.c b/linux-imx/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 0000000..c8d16a6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,3212 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/sysrq.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+static const u32 hpd_ibx[] = {
+	[HPD_CRT] = SDE_CRT_HOTPLUG,
+	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
+};
+
+static const u32 hpd_cpt[] = {
+	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
+	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
+	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+};
+
+static const u32 hpd_mask_i915[] = {
+	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+};
+
+static const u32 hpd_status_gen4[] = {
+	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
+	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
+	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
+	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
+	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
+	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
+	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+};
+
+static void ibx_hpd_irq_setup(struct drm_device *dev);
+static void i915_hpd_irq_setup(struct drm_device *dev);
+
+/* For display hotplug interrupt */
+static void
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask & mask) != 0) {
+		dev_priv->irq_mask &= ~mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
+	}
+}
+
+static void
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask & mask) != mask) {
+		dev_priv->irq_mask |= mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
+	}
+}
+
+void
+i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+	u32 reg = PIPESTAT(pipe);
+	u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+	if ((pipestat & mask) == mask)
+		return;
+
+	/* Enable the interrupt, clear any pending status */
+	pipestat |= mask | (mask >> 16);
+	I915_WRITE(reg, pipestat);
+	POSTING_READ(reg);
+}
+
+void
+i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+{
+	u32 reg = PIPESTAT(pipe);
+	u32 pipestat = I915_READ(reg) & 0x7fff0000;
+
+	if ((pipestat & mask) == 0)
+		return;
+
+	pipestat &= ~mask;
+	I915_WRITE(reg, pipestat);
+	POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	/* FIXME: opregion/asle for VLV */
+	if (IS_VALLEYVIEW(dev))
+		return;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+	if (HAS_PCH_SPLIT(dev))
+		ironlake_enable_display_irq(dev_priv, DE_GSE);
+	else {
+		i915_enable_pipestat(dev_priv, 1,
+				     PIPE_LEGACY_BLC_EVENT_ENABLE);
+		if (INTEL_INFO(dev)->gen >= 4)
+			i915_enable_pipestat(dev_priv, 0,
+					     PIPE_LEGACY_BLC_EVENT_ENABLE);
+	}
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
+}
+
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long high_frame;
+	unsigned long low_frame;
+	u32 high1, high2, low;
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+				"pipe %c\n", pipe_name(pipe));
+		return 0;
+	}
+
+	high_frame = PIPEFRAME(pipe);
+	low_frame = PIPEFRAMEPIXEL(pipe);
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+	} while (high1 != high2);
+
+	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	low >>= PIPE_FRAME_LOW_SHIFT;
+	return (high1 << 8) | low;
+}
+
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int reg = PIPE_FRMCOUNT_GM45(pipe);
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+				 "pipe %c\n", pipe_name(pipe));
+		return 0;
+	}
+
+	return I915_READ(reg);
+}
+
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 vbl = 0, position = 0;
+	int vbl_start, vbl_end, htotal, vtotal;
+	bool in_vbl = true;
+	int ret = 0;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+				 "pipe %c\n", pipe_name(pipe));
+		return 0;
+	}
+
+	/* Get vtotal. */
+	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* No obvious pixelcount register. Only query vertical
+		 * scanout position from Display scan line register.
+		 */
+		position = I915_READ(PIPEDSL(pipe));
+
+		/* Decode into vertical scanout position. Don't have
+		 * horizontal scanout position.
+		 */
+		*vpos = position & 0x1fff;
+		*hpos = 0;
+	} else {
+		/* Have access to pixelcount since start of frame.
+		 * We can split this into vertical and horizontal
+		 * scanout position.
+		 */
+		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+		*vpos = position / htotal;
+		*hpos = position - (*vpos * htotal);
+	}
+
+	/* Query vblank area. */
+	vbl = I915_READ(VBLANK(cpu_transcoder));
+
+	/* Test position against vblank region. */
+	vbl_start = vbl & 0x1fff;
+	vbl_end = (vbl >> 16) & 0x1fff;
+
+	if ((*vpos < vbl_start) || (*vpos > vbl_end))
+		in_vbl = false;
+
+	/* Inside "upper part" of vblank area? Apply corrective offset: */
+	if (in_vbl && (*vpos >= vbl_start))
+		*vpos = *vpos - vtotal;
+
+	/* Readouts valid? */
+	if (vbl > 0)
+		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}
+
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags)
+{
+	struct drm_crtc *crtc;
+
+	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	/* Get drm_crtc to timestamp: */
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	if (crtc == NULL) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	if (!crtc->enabled) {
+		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+		return -EBUSY;
+	}
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+						     vblank_time, flags,
+						     crtc);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
+
+static void i915_hotplug_work_func(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+						    hotplug_work);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_connector *intel_connector;
+	struct intel_encoder *intel_encoder;
+	struct drm_connector *connector;
+	unsigned long irqflags;
+	bool hpd_disabled = false;
+
+	/* HPD irq before everything is fully set up. */
+	if (!dev_priv->enable_hotplug_processing)
+		return;
+
+	mutex_lock(&mode_config->mutex);
+	DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		intel_connector = to_intel_connector(connector);
+		intel_encoder = intel_connector->encoder;
+		if (intel_encoder->hpd_pin > HPD_NONE &&
+		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
+		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
+			DRM_INFO("HPD interrupt storm detected on connector %s: "
+				 "switching from hotplug detection to polling\n",
+				drm_get_connector_name(connector));
+			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT
+				| DRM_CONNECTOR_POLL_DISCONNECT;
+			hpd_disabled = true;
+		}
+	}
+	 /* if there were no outputs to poll, poll was disabled,
+	  * therefore make sure it's enabled when disabling HPD on
+	  * some connectors */
+	if (hpd_disabled) {
+		drm_kms_helper_poll_enable(dev);
+		mod_timer(&dev_priv->hotplug_reenable_timer,
+			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+	}
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+		if (intel_encoder->hot_plug)
+			intel_encoder->hot_plug(intel_encoder);
+
+	mutex_unlock(&mode_config->mutex);
+
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+static void ironlake_handle_rps_change(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 busy_up, busy_down, max_avg, min_avg;
+	u8 new_delay;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mchdev_lock, flags);
+
+	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+
+	new_delay = dev_priv->ips.cur_delay;
+
+	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
+	busy_up = I915_READ(RCPREVBSYTUPAVG);
+	busy_down = I915_READ(RCPREVBSYTDNAVG);
+	max_avg = I915_READ(RCBMAXAVG);
+	min_avg = I915_READ(RCBMINAVG);
+
+	/* Handle RCS change request from hw */
+	if (busy_up > max_avg) {
+		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.cur_delay - 1;
+		if (new_delay < dev_priv->ips.max_delay)
+			new_delay = dev_priv->ips.max_delay;
+	} else if (busy_down < min_avg) {
+		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.cur_delay + 1;
+		if (new_delay > dev_priv->ips.min_delay)
+			new_delay = dev_priv->ips.min_delay;
+	}
+
+	if (ironlake_set_drps(dev, new_delay))
+		dev_priv->ips.cur_delay = new_delay;
+
+	spin_unlock_irqrestore(&mchdev_lock, flags);
+
+	return;
+}
+
+static void notify_ring(struct drm_device *dev,
+			struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (ring->obj == NULL)
+		return;
+
+	trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+
+	wake_up_all(&ring->irq_queue);
+	if (i915_enable_hangcheck) {
+		dev_priv->gpu_error.hangcheck_count = 0;
+		mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+			  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+	}
+}
+
+static void gen6_pm_rps_work(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+						    rps.work);
+	u32 pm_iir, pm_imr;
+	u8 new_delay;
+
+	spin_lock_irq(&dev_priv->rps.lock);
+	pm_iir = dev_priv->rps.pm_iir;
+	dev_priv->rps.pm_iir = 0;
+	pm_imr = I915_READ(GEN6_PMIMR);
+	I915_WRITE(GEN6_PMIMR, 0);
+	spin_unlock_irq(&dev_priv->rps.lock);
+
+	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
+		return;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+		new_delay = dev_priv->rps.cur_delay + 1;
+	else
+		new_delay = dev_priv->rps.cur_delay - 1;
+
+	/* sysfs frequency interfaces may have snuck in while servicing the
+	 * interrupt
+	 */
+	if (!(new_delay > dev_priv->rps.max_delay ||
+	      new_delay < dev_priv->rps.min_delay)) {
+		gen6_set_rps(dev_priv->dev, new_delay);
+	}
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+						    l3_parity.error_work);
+	u32 error_status, row, bank, subbank;
+	char *parity_event[5];
+	uint32_t misccpctl;
+	unsigned long flags;
+
+	/* We must turn off DOP level clock gating to access the L3 registers.
+	 * In order to prevent a get/put style interface, acquire struct mutex
+	 * any time we access those registers.
+	 */
+	mutex_lock(&dev_priv->dev->struct_mutex);
+
+	misccpctl = I915_READ(GEN7_MISCCPCTL);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+	POSTING_READ(GEN7_MISCCPCTL);
+
+	error_status = I915_READ(GEN7_L3CDERRST1);
+	row = GEN7_PARITY_ERROR_ROW(error_status);
+	bank = GEN7_PARITY_ERROR_BANK(error_status);
+	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+				    GEN7_L3CDERRST1_ENABLE);
+	POSTING_READ(GEN7_L3CDERRST1);
+
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	mutex_unlock(&dev_priv->dev->struct_mutex);
+
+	parity_event[0] = "L3_PARITY_ERROR=1";
+	parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+	parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+	parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+	parity_event[4] = NULL;
+
+	kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+			   KOBJ_CHANGE, parity_event);
+
+	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+		  row, bank, subbank);
+
+	kfree(parity_event[3]);
+	kfree(parity_event[2]);
+	kfree(parity_event[1]);
+}
+
+static void ivybridge_handle_parity_error(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long flags;
+
+	if (!HAS_L3_GPU_CACHE(dev))
+		return;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
+}
+
+static void snb_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
+{
+
+	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
+		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
+		notify_ring(dev, &dev_priv->ring[RCS]);
+	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->ring[VCS]);
+	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->ring[BCS]);
+
+	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+		      GT_RENDER_CS_ERROR_INTERRUPT)) {
+		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
+		i915_handle_error(dev, false);
+	}
+
+	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+		ivybridge_handle_parity_error(dev);
+}
+
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+				u32 pm_iir)
+{
+	unsigned long flags;
+
+	/*
+	 * IIR bits should never already be set because IMR should
+	 * prevent an interrupt from being shown in IIR. The warning
+	 * displays a case where we've unsafely cleared
+	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
+	 * type is not a problem, it displays a problem in the logic.
+	 *
+	 * The mask bit in IMR is cleared by dev_priv->rps.work.
+	 */
+
+	spin_lock_irqsave(&dev_priv->rps.lock, flags);
+	dev_priv->rps.pm_iir |= pm_iir;
+	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
+	POSTING_READ(GEN6_PMIMR);
+	spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+
+	queue_work(dev_priv->wq, &dev_priv->rps.work);
+}
+
+#define HPD_STORM_DETECT_PERIOD 1000
+#define HPD_STORM_THRESHOLD 5
+
+static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
+					    u32 hotplug_trigger,
+					    const u32 *hpd)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	int i;
+	bool ret = false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+	for (i = 1; i < HPD_NUM_PINS; i++) {
+
+		if (!(hpd[i] & hotplug_trigger) ||
+		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
+			continue;
+
+		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
+				   dev_priv->hpd_stats[i].hpd_last_jiffies
+				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
+			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
+			dev_priv->hpd_stats[i].hpd_cnt = 0;
+		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
+			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
+			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
+			ret = true;
+		} else {
+			dev_priv->hpd_stats[i].hpd_cnt++;
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return ret;
+}
+
+static void gmbus_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static void dp_aux_irq_handler(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	wake_up_all(&dev_priv->gmbus_wait_queue);
+}
+
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 iir, gt_iir, pm_iir;
+	irqreturn_t ret = IRQ_NONE;
+	unsigned long irqflags;
+	int pipe;
+	u32 pipe_stats[I915_MAX_PIPES];
+
+	atomic_inc(&dev_priv->irq_received);
+
+	while (true) {
+		iir = I915_READ(VLV_IIR);
+		gt_iir = I915_READ(GTIIR);
+		pm_iir = I915_READ(GEN6_PMIIR);
+
+		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
+			goto out;
+
+		ret = IRQ_HANDLED;
+
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+			}
+		}
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+				drm_handle_vblank(dev, pipe);
+
+			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+				intel_prepare_page_flip(dev, pipe);
+				intel_finish_page_flip(dev, pipe);
+			}
+		}
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+					 hotplug_status);
+			if (hotplug_trigger) {
+				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+					i915_hpd_irq_setup(dev);
+				queue_work(dev_priv->wq,
+					   &dev_priv->hotplug_work);
+			}
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			I915_READ(PORT_HOTPLUG_STAT);
+		}
+
+		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+			gmbus_irq_handler(dev);
+
+		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+			gen6_queue_rps_work(dev_priv, pm_iir);
+
+		I915_WRITE(GTIIR, gt_iir);
+		I915_WRITE(GEN6_PMIIR, pm_iir);
+		I915_WRITE(VLV_IIR, iir);
+	}
+
+out:
+	return ret;
+}
+
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+
+	if (hotplug_trigger) {
+		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
+			ibx_hpd_irq_setup(dev);
+		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+	}
+	if (pch_iir & SDE_AUDIO_POWER_MASK)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
+				 SDE_AUDIO_POWER_SHIFT);
+
+	if (pch_iir & SDE_AUX_MASK)
+		dp_aux_irq_handler(dev);
+
+	if (pch_iir & SDE_GMBUS)
+		gmbus_irq_handler(dev);
+
+	if (pch_iir & SDE_AUDIO_HDCP_MASK)
+		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_TRANS_MASK)
+		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+	if (pch_iir & SDE_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK)
+		for_each_pipe(pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
+
+	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+
+	if (hotplug_trigger) {
+		if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
+			ibx_hpd_irq_setup(dev);
+		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+	}
+	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+				 SDE_AUDIO_POWER_SHIFT_CPT);
+
+	if (pch_iir & SDE_AUX_MASK_CPT)
+		dp_aux_irq_handler(dev);
+
+	if (pch_iir & SDE_GMBUS_CPT)
+		gmbus_irq_handler(dev);
+
+	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK_CPT)
+		for_each_pipe(pipe)
+			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
+					 pipe_name(pipe),
+					 I915_READ(FDI_RX_IIR(pipe)));
+}
+
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
+	irqreturn_t ret = IRQ_NONE;
+	int i;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	/* disable master interrupt before clearing iir  */
+	de_ier = I915_READ(DEIER);
+	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+	/* Disable south interrupts. We'll only write to SDEIIR once, so further
+	 * interrupts will will be stored on its back queue, and then we'll be
+	 * able to process them after we restore SDEIER (as soon as we restore
+	 * it, we'll get an interrupt if SDEIIR still has something to process
+	 * due to its back queue). */
+	if (!HAS_PCH_NOP(dev)) {
+		sde_ier = I915_READ(SDEIER);
+		I915_WRITE(SDEIER, 0);
+		POSTING_READ(SDEIER);
+	}
+
+	gt_iir = I915_READ(GTIIR);
+	if (gt_iir) {
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
+		I915_WRITE(GTIIR, gt_iir);
+		ret = IRQ_HANDLED;
+	}
+
+	de_iir = I915_READ(DEIIR);
+	if (de_iir) {
+		if (de_iir & DE_AUX_CHANNEL_A_IVB)
+			dp_aux_irq_handler(dev);
+
+		if (de_iir & DE_GSE_IVB)
+			intel_opregion_gse_intr(dev);
+
+		for (i = 0; i < 3; i++) {
+			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+				drm_handle_vblank(dev, i);
+			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+				intel_prepare_page_flip(dev, i);
+				intel_finish_page_flip_plane(dev, i);
+			}
+		}
+
+		/* check event from PCH */
+		if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+			u32 pch_iir = I915_READ(SDEIIR);
+
+			cpt_irq_handler(dev, pch_iir);
+
+			/* clear PCH hotplug event before clear CPU irq */
+			I915_WRITE(SDEIIR, pch_iir);
+		}
+
+		I915_WRITE(DEIIR, de_iir);
+		ret = IRQ_HANDLED;
+	}
+
+	pm_iir = I915_READ(GEN6_PMIIR);
+	if (pm_iir) {
+		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+			gen6_queue_rps_work(dev_priv, pm_iir);
+		I915_WRITE(GEN6_PMIIR, pm_iir);
+		ret = IRQ_HANDLED;
+	}
+
+	I915_WRITE(DEIER, de_ier);
+	POSTING_READ(DEIER);
+	if (!HAS_PCH_NOP(dev)) {
+		I915_WRITE(SDEIER, sde_ier);
+		POSTING_READ(SDEIER);
+	}
+
+	return ret;
+}
+
+static void ilk_gt_irq_handler(struct drm_device *dev,
+			       struct drm_i915_private *dev_priv,
+			       u32 gt_iir)
+{
+	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+		notify_ring(dev, &dev_priv->ring[RCS]);
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int ret = IRQ_NONE;
+	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	/* disable master interrupt before clearing iir  */
+	de_ier = I915_READ(DEIER);
+	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+	POSTING_READ(DEIER);
+
+	/* Disable south interrupts. We'll only write to SDEIIR once, so further
+	 * interrupts will will be stored on its back queue, and then we'll be
+	 * able to process them after we restore SDEIER (as soon as we restore
+	 * it, we'll get an interrupt if SDEIIR still has something to process
+	 * due to its back queue). */
+	sde_ier = I915_READ(SDEIER);
+	I915_WRITE(SDEIER, 0);
+	POSTING_READ(SDEIER);
+
+	de_iir = I915_READ(DEIIR);
+	gt_iir = I915_READ(GTIIR);
+	pm_iir = I915_READ(GEN6_PMIIR);
+
+	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
+		goto done;
+
+	ret = IRQ_HANDLED;
+
+	if (IS_GEN5(dev))
+		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+	else
+		snb_gt_irq_handler(dev, dev_priv, gt_iir);
+
+	if (de_iir & DE_AUX_CHANNEL_A)
+		dp_aux_irq_handler(dev);
+
+	if (de_iir & DE_GSE)
+		intel_opregion_gse_intr(dev);
+
+	if (de_iir & DE_PIPEA_VBLANK)
+		drm_handle_vblank(dev, 0);
+
+	if (de_iir & DE_PIPEB_VBLANK)
+		drm_handle_vblank(dev, 1);
+
+	if (de_iir & DE_PLANEA_FLIP_DONE) {
+		intel_prepare_page_flip(dev, 0);
+		intel_finish_page_flip_plane(dev, 0);
+	}
+
+	if (de_iir & DE_PLANEB_FLIP_DONE) {
+		intel_prepare_page_flip(dev, 1);
+		intel_finish_page_flip_plane(dev, 1);
+	}
+
+	/* check event from PCH */
+	if (de_iir & DE_PCH_EVENT) {
+		u32 pch_iir = I915_READ(SDEIIR);
+
+		if (HAS_PCH_CPT(dev))
+			cpt_irq_handler(dev, pch_iir);
+		else
+			ibx_irq_handler(dev, pch_iir);
+
+		/* should clear PCH hotplug event before clear CPU irq */
+		I915_WRITE(SDEIIR, pch_iir);
+	}
+
+	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
+		ironlake_handle_rps_change(dev);
+
+	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+		gen6_queue_rps_work(dev_priv, pm_iir);
+
+	I915_WRITE(GTIIR, gt_iir);
+	I915_WRITE(DEIIR, de_iir);
+	I915_WRITE(GEN6_PMIIR, pm_iir);
+
+done:
+	I915_WRITE(DEIER, de_ier);
+	POSTING_READ(DEIER);
+	I915_WRITE(SDEIER, sde_ier);
+	POSTING_READ(SDEIER);
+
+	return ret;
+}
+
+static void i915_error_wake_up(struct drm_i915_private *dev_priv,
+			       bool reset_completed)
+{
+	struct intel_ring_buffer *ring;
+	int i;
+
+	/*
+	 * Notify all waiters for GPU completion events that reset state has
+	 * been changed, and that they need to restart their wait after
+	 * checking for potential errors (and bail out to drop locks if there is
+	 * a gpu reset pending so that i915_error_work_func can acquire them).
+	 */
+
+	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
+	for_each_ring(ring, dev_priv, i)
+		wake_up_all(&ring->irq_queue);
+
+	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
+	wake_up_all(&dev_priv->pending_flip_queue);
+
+	/*
+	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
+	 * reset state is cleared.
+	 */
+	if (reset_completed)
+		wake_up_all(&dev_priv->gpu_error.reset_queue);
+}
+
+/**
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
+ *
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
+ */
+static void i915_error_work_func(struct work_struct *work)
+{
+	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+						    work);
+	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+						    gpu_error);
+	struct drm_device *dev = dev_priv->dev;
+	char *error_event[] = { "ERROR=1", NULL };
+	char *reset_event[] = { "RESET=1", NULL };
+	char *reset_done_event[] = { "ERROR=0", NULL };
+	int ret;
+
+	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+
+	/*
+	 * Note that there's only one work item which does gpu resets, so we
+	 * need not worry about concurrent gpu resets potentially incrementing
+	 * error->reset_counter twice. We only need to take care of another
+	 * racing irq/hangcheck declaring the gpu dead for a second time. A
+	 * quick check for that is good enough: schedule_work ensures the
+	 * correct ordering between hang detection and this work item, and since
+	 * the reset in-progress bit is only ever set by code outside of this
+	 * work we don't need to worry about any other races.
+	 */
+	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+		DRM_DEBUG_DRIVER("resetting chip\n");
+		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+				   reset_event);
+
+		/*
+		 * All state reset _must_ be completed before we update the
+		 * reset counter, for otherwise waiters might miss the reset
+		 * pending state and not properly drop locks, resulting in
+		 * deadlocks with the reset work.
+		 */
+		ret = i915_reset(dev);
+
+		intel_display_handle_reset(dev);
+
+		if (ret == 0) {
+			/*
+			 * After all the gem state is reset, increment the reset
+			 * counter and wake up everyone waiting for the reset to
+			 * complete.
+			 *
+			 * Since unlock operations are a one-sided barrier only,
+			 * we need to insert a barrier here to order any seqno
+			 * updates before
+			 * the counter increment.
+			 */
+			smp_mb__before_atomic_inc();
+			atomic_inc(&dev_priv->gpu_error.reset_counter);
+
+			kobject_uevent_env(&dev->primary->kdev.kobj,
+					   KOBJ_CHANGE, reset_done_event);
+		} else {
+			atomic_set(&error->reset_counter, I915_WEDGED);
+		}
+
+		/*
+		 * Note: The wake_up also serves as a memory barrier so that
+		 * waiters see the update value of the reset counter atomic_t.
+		 */
+		i915_error_wake_up(dev_priv, true);
+	}
+}
+
+/* NB: please notice the memset */
+static void i915_get_extra_instdone(struct drm_device *dev,
+				    uint32_t *instdone)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+	switch(INTEL_INFO(dev)->gen) {
+	case 2:
+	case 3:
+		instdone[0] = I915_READ(INSTDONE);
+		break;
+	case 4:
+	case 5:
+	case 6:
+		instdone[0] = I915_READ(INSTDONE_I965);
+		instdone[1] = I915_READ(INSTDONE1);
+		break;
+	default:
+		WARN_ONCE(1, "Unsupported platform\n");
+	case 7:
+		instdone[0] = I915_READ(GEN7_INSTDONE_1);
+		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+		break;
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+			       struct drm_i915_gem_object *src,
+			       const int num_pages)
+{
+	struct drm_i915_error_object *dst;
+	int i;
+	u32 reloc_offset;
+
+	if (src == NULL || src->pages == NULL)
+		return NULL;
+
+	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+	if (dst == NULL)
+		return NULL;
+
+	reloc_offset = src->gtt_offset;
+	for (i = 0; i < num_pages; i++) {
+		unsigned long flags;
+		void *d;
+
+		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+		if (d == NULL)
+			goto unwind;
+
+		local_irq_save(flags);
+		if (reloc_offset < dev_priv->gtt.mappable_end &&
+		    src->has_global_gtt_mapping) {
+			void __iomem *s;
+
+			/* Simply ignore tiling or any overlapping fence.
+			 * It's part of the error state, and this hopefully
+			 * captures what the GPU read.
+			 */
+
+			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+						     reloc_offset);
+			memcpy_fromio(d, s, PAGE_SIZE);
+			io_mapping_unmap_atomic(s);
+		} else if (src->stolen) {
+			unsigned long offset;
+
+			offset = dev_priv->mm.stolen_base;
+			offset += src->stolen->start;
+			offset += i << PAGE_SHIFT;
+
+			memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+		} else {
+			struct page *page;
+			void *s;
+
+			page = i915_gem_object_get_page(src, i);
+
+			drm_clflush_pages(&page, 1);
+
+			s = kmap_atomic(page);
+			memcpy(d, s, PAGE_SIZE);
+			kunmap_atomic(s);
+
+			drm_clflush_pages(&page, 1);
+		}
+		local_irq_restore(flags);
+
+		dst->pages[i] = d;
+
+		reloc_offset += PAGE_SIZE;
+	}
+	dst->page_count = num_pages;
+	dst->gtt_offset = src->gtt_offset;
+
+	return dst;
+
+unwind:
+	while (i--)
+		kfree(dst->pages[i]);
+	kfree(dst);
+	return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+	i915_error_object_create_sized((dev_priv), (src), \
+				       (src)->base.size>>PAGE_SHIFT)
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+	int page;
+
+	if (obj == NULL)
+		return;
+
+	for (page = 0; page < obj->page_count; page++)
+		kfree(obj->pages[page]);
+
+	kfree(obj);
+}
+
+void
+i915_error_state_free(struct kref *error_ref)
+{
+	struct drm_i915_error_state *error = container_of(error_ref,
+							  typeof(*error), ref);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+		i915_error_object_free(error->ring[i].batchbuffer);
+		i915_error_object_free(error->ring[i].ringbuffer);
+		kfree(error->ring[i].requests);
+	}
+
+	kfree(error->active_bo);
+	kfree(error->overlay);
+	kfree(error);
+}
+static void capture_bo(struct drm_i915_error_buffer *err,
+		       struct drm_i915_gem_object *obj)
+{
+	err->size = obj->base.size;
+	err->name = obj->base.name;
+	err->rseqno = obj->last_read_seqno;
+	err->wseqno = obj->last_write_seqno;
+	err->gtt_offset = obj->gtt_offset;
+	err->read_domains = obj->base.read_domains;
+	err->write_domain = obj->base.write_domain;
+	err->fence_reg = obj->fence_reg;
+	err->pinned = 0;
+	if (obj->pin_count > 0)
+		err->pinned = 1;
+	if (obj->user_pin_count > 0)
+		err->pinned = -1;
+	err->tiling = obj->tiling_mode;
+	err->dirty = obj->dirty;
+	err->purgeable = obj->madv != I915_MADV_WILLNEED;
+	err->ring = obj->ring ? obj->ring->id : -1;
+	err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head)
+{
+	struct drm_i915_gem_object *obj;
+	int i = 0;
+
+	list_for_each_entry(obj, head, mm_list) {
+		capture_bo(err++, obj);
+		if (++i == count)
+			break;
+	}
+
+	return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+			     int count, struct list_head *head)
+{
+	struct drm_i915_gem_object *obj;
+	int i = 0;
+
+	list_for_each_entry(obj, head, gtt_list) {
+		if (obj->pin_count == 0)
+			continue;
+
+		capture_bo(err++, obj);
+		if (++i == count)
+			break;
+	}
+
+	return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+				   struct drm_i915_error_state *error)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		for (i = 0; i < dev_priv->num_fence_regs; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+			     struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+	u32 seqno;
+
+	if (!ring->get_seqno)
+		return NULL;
+
+	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+		u32 acthd = I915_READ(ACTHD);
+
+		if (WARN_ON(ring->id != RCS))
+			return NULL;
+
+		obj = ring->private;
+		if (acthd >= obj->gtt_offset &&
+		    acthd < obj->gtt_offset + obj->base.size)
+			return i915_error_object_create(dev_priv, obj);
+	}
+
+	seqno = ring->get_seqno(ring, false);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->ring != ring)
+			continue;
+
+		if (i915_seqno_passed(seqno, obj->last_read_seqno))
+			continue;
+
+		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+			continue;
+
+		/* We need to copy these to an anonymous buffer as the simplest
+		 * method to avoid being overwritten by userspace.
+		 */
+		return i915_error_object_create(dev_priv, obj);
+	}
+
+	return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+				   struct drm_i915_error_state *error,
+				   struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+		error->semaphore_mboxes[ring->id][0]
+			= I915_READ(RING_SYNC_0(ring->mmio_base));
+		error->semaphore_mboxes[ring->id][1]
+			= I915_READ(RING_SYNC_1(ring->mmio_base));
+		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+		if (ring->id == RCS)
+			error->bbaddr = I915_READ64(BB_ADDR);
+	} else {
+		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+		error->ipeir[ring->id] = I915_READ(IPEIR);
+		error->ipehr[ring->id] = I915_READ(IPEHR);
+		error->instdone[ring->id] = I915_READ(INSTDONE);
+	}
+
+	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+	error->seqno[ring->id] = ring->get_seqno(ring, false);
+	error->acthd[ring->id] = intel_ring_get_active_head(ring);
+	error->head[ring->id] = I915_READ_HEAD(ring);
+	error->tail[ring->id] = I915_READ_TAIL(ring);
+	error->ctl[ring->id] = I915_READ_CTL(ring);
+
+	error->cpu_ring_head[ring->id] = ring->head;
+	error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+					   struct drm_i915_error_state *error,
+					   struct drm_i915_error_ring *ering)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	/* Currently render ring is the only HW context user */
+	if (ring->id != RCS || !error->ccid)
+		return;
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
+			ering->ctx = i915_error_object_create_sized(dev_priv,
+								    obj, 1);
+		}
+	}
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+				  struct drm_i915_error_state *error)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	struct drm_i915_gem_request *request;
+	int i, count;
+
+	for_each_ring(ring, dev_priv, i) {
+		i915_record_ring_state(dev, error, ring);
+
+		error->ring[i].batchbuffer =
+			i915_error_first_batchbuffer(dev_priv, ring);
+
+		error->ring[i].ringbuffer =
+			i915_error_object_create(dev_priv, ring->obj);
+
+
+		i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+		count = 0;
+		list_for_each_entry(request, &ring->request_list, list)
+			count++;
+
+		error->ring[i].num_requests = count;
+		error->ring[i].requests =
+			kmalloc(count*sizeof(struct drm_i915_error_request),
+				GFP_ATOMIC);
+		if (error->ring[i].requests == NULL) {
+			error->ring[i].num_requests = 0;
+			continue;
+		}
+
+		count = 0;
+		list_for_each_entry(request, &ring->request_list, list) {
+			struct drm_i915_error_request *erq;
+
+			erq = &error->ring[i].requests[count++];
+			erq->seqno = request->seqno;
+			erq->jiffies = request->emitted_jiffies;
+			erq->tail = request->tail;
+		}
+	}
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+static void i915_capture_error_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_i915_error_state *error;
+	unsigned long flags;
+	int i, pipe;
+
+	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+	error = dev_priv->gpu_error.first_error;
+	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+	if (error)
+		return;
+
+	/* Account for pipe specific data like PIPE*STAT */
+	error = kzalloc(sizeof(*error), GFP_ATOMIC);
+	if (!error) {
+		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+		return;
+	}
+
+	DRM_INFO("capturing error event; look for more information in "
+		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
+		 dev->primary->index);
+
+	kref_init(&error->ref);
+	error->eir = I915_READ(EIR);
+	error->pgtbl_er = I915_READ(PGTBL_ER);
+	if (HAS_HW_CONTEXTS(dev))
+		error->ccid = I915_READ(CCID);
+
+	if (HAS_PCH_SPLIT(dev))
+		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+	else if (IS_VALLEYVIEW(dev))
+		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+	else if (IS_GEN2(dev))
+		error->ier = I915_READ16(IER);
+	else
+		error->ier = I915_READ(IER);
+
+	if (INTEL_INFO(dev)->gen >= 6)
+		error->derrmr = I915_READ(DERRMR);
+
+	if (IS_VALLEYVIEW(dev))
+		error->forcewake = I915_READ(FORCEWAKE_VLV);
+	else if (INTEL_INFO(dev)->gen >= 7)
+		error->forcewake = I915_READ(FORCEWAKE_MT);
+	else if (INTEL_INFO(dev)->gen == 6)
+		error->forcewake = I915_READ(FORCEWAKE);
+
+	if (!HAS_PCH_SPLIT(dev))
+		for_each_pipe(pipe)
+			error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		error->error = I915_READ(ERROR_GEN6);
+		error->done_reg = I915_READ(DONE_REG);
+	}
+
+	if (INTEL_INFO(dev)->gen == 7)
+		error->err_int = I915_READ(GEN7_ERR_INT);
+
+	i915_get_extra_instdone(dev, error->extra_instdone);
+
+	i915_gem_record_fences(dev, error);
+	i915_gem_record_rings(dev, error);
+
+	/* Record buffers on the active and pinned lists. */
+	error->active_bo = NULL;
+	error->pinned_bo = NULL;
+
+	i = 0;
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+		i++;
+	error->active_bo_count = i;
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+		if (obj->pin_count)
+			i++;
+	error->pinned_bo_count = i - error->active_bo_count;
+
+	error->active_bo = NULL;
+	error->pinned_bo = NULL;
+	if (i) {
+		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+					   GFP_ATOMIC);
+		if (error->active_bo)
+			error->pinned_bo =
+				error->active_bo + error->active_bo_count;
+	}
+
+	if (error->active_bo)
+		error->active_bo_count =
+			capture_active_bo(error->active_bo,
+					  error->active_bo_count,
+					  &dev_priv->mm.active_list);
+
+	if (error->pinned_bo)
+		error->pinned_bo_count =
+			capture_pinned_bo(error->pinned_bo,
+					  error->pinned_bo_count,
+					  &dev_priv->mm.bound_list);
+
+	do_gettimeofday(&error->time);
+
+	error->overlay = intel_overlay_capture_error_state(dev);
+	error->display = intel_display_capture_error_state(dev);
+
+	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+	if (dev_priv->gpu_error.first_error == NULL) {
+		dev_priv->gpu_error.first_error = error;
+		error = NULL;
+	}
+	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+	if (error)
+		i915_error_state_free(&error->ref);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_error_state *error;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+	error = dev_priv->gpu_error.first_error;
+	dev_priv->gpu_error.first_error = NULL;
+	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+	if (error)
+		kref_put(&error->ref, i915_error_state_free);
+}
+#else
+#define i915_capture_error_state(x)
+#endif
+
+static void i915_report_and_clear_eir(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t instdone[I915_NUM_INSTDONE_REG];
+	u32 eir = I915_READ(EIR);
+	int pipe, i;
+
+	if (!eir)
+		return;
+
+	pr_err("render error detected, EIR: 0x%08x\n", eir);
+
+	i915_get_extra_instdone(dev, instdone);
+
+	if (IS_G4X(dev)) {
+		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
+			u32 ipeir = I915_READ(IPEIR_I965);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			for (i = 0; i < ARRAY_SIZE(instdone); i++)
+				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+			I915_WRITE(IPEIR_I965, ipeir);
+			POSTING_READ(IPEIR_I965);
+		}
+		if (eir & GM45_ERROR_PAGE_TABLE) {
+			u32 pgtbl_err = I915_READ(PGTBL_ER);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
+			I915_WRITE(PGTBL_ER, pgtbl_err);
+			POSTING_READ(PGTBL_ER);
+		}
+	}
+
+	if (!IS_GEN2(dev)) {
+		if (eir & I915_ERROR_PAGE_TABLE) {
+			u32 pgtbl_err = I915_READ(PGTBL_ER);
+			pr_err("page table error\n");
+			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
+			I915_WRITE(PGTBL_ER, pgtbl_err);
+			POSTING_READ(PGTBL_ER);
+		}
+	}
+
+	if (eir & I915_ERROR_MEMORY_REFRESH) {
+		pr_err("memory refresh error:\n");
+		for_each_pipe(pipe)
+			pr_err("pipe %c stat: 0x%08x\n",
+			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
+		/* pipestat has already been acked */
+	}
+	if (eir & I915_ERROR_INSTRUCTION) {
+		pr_err("instruction error\n");
+		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
+		for (i = 0; i < ARRAY_SIZE(instdone); i++)
+			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
+		if (INTEL_INFO(dev)->gen < 4) {
+			u32 ipeir = I915_READ(IPEIR);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
+			I915_WRITE(IPEIR, ipeir);
+			POSTING_READ(IPEIR);
+		} else {
+			u32 ipeir = I915_READ(IPEIR_I965);
+
+			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
+			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
+			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
+			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
+			I915_WRITE(IPEIR_I965, ipeir);
+			POSTING_READ(IPEIR_I965);
+		}
+	}
+
+	I915_WRITE(EIR, eir);
+	POSTING_READ(EIR);
+	eir = I915_READ(EIR);
+	if (eir) {
+		/*
+		 * some errors might have become stuck,
+		 * mask them.
+		 */
+		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
+		I915_WRITE(EMR, I915_READ(EMR) | eir);
+		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+	}
+}
+
+/**
+ * i915_handle_error - handle an error interrupt
+ * @dev: drm device
+ *
+ * Do some basic checking of regsiter state at error interrupt time and
+ * dump it to the syslog.  Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs.  Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_device *dev, bool wedged)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	i915_capture_error_state(dev);
+	i915_report_and_clear_eir(dev);
+
+	if (wedged) {
+		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+				&dev_priv->gpu_error.reset_counter);
+
+		/*
+		 * Wakeup waiting processes so that the reset work function
+		 * i915_error_work_func doesn't deadlock trying to grab various
+		 * locks. By bumping the reset counter first, the woken
+		 * processes will see a reset in progress and back off,
+		 * releasing their locks and then wait for the reset completion.
+		 * We must do this for _all_ gpu waiters that might hold locks
+		 * that the reset work needs to acquire.
+		 *
+		 * Note: The wake_up serves as the required memory barrier to
+		 * ensure that the waiters see the updated value of the reset
+		 * counter atomic_t.
+		 */
+		i915_error_wake_up(dev_priv, false);
+	}
+
+	/*
+	 * Our reset work can grab modeset locks (since it needs to reset the
+	 * state of outstanding pagelips). Hence it must not be run on our own
+	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
+	 * code will deadlock.
+	 */
+	schedule_work(&dev_priv->gpu_error.work);
+}
+
+static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_gem_object *obj;
+	struct intel_unpin_work *work;
+	unsigned long flags;
+	bool stall_detected;
+
+	/* Ignore early vblank irqs */
+	if (intel_crtc == NULL)
+		return;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work = intel_crtc->unpin_work;
+
+	if (work == NULL ||
+	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+	    !work->enable_stall_check) {
+		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return;
+	}
+
+	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+	obj = work->pending_flip_obj;
+	if (INTEL_INFO(dev)->gen >= 4) {
+		int dspsurf = DSPSURF(intel_crtc->plane);
+		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
+					obj->gtt_offset;
+	} else {
+		int dspaddr = DSPADDR(intel_crtc->plane);
+		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+							crtc->y * crtc->fb->pitches[0] +
+							crtc->x * crtc->fb->bits_per_pixel/8);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (stall_detected) {
+		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+		intel_prepare_page_flip(dev, intel_crtc->plane);
+	}
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	if (INTEL_INFO(dev)->gen >= 4)
+		i915_enable_pipestat(dev_priv, pipe,
+				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
+	else
+		i915_enable_pipestat(dev_priv, pipe,
+				     PIPE_VBLANK_INTERRUPT_ENABLE);
+
+	/* maintain vblank delivery even in deep C-states */
+	if (dev_priv->info->gen == 3)
+		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_enable_display_irq(dev_priv,
+				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+	u32 imr;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	imr = I915_READ(VLV_IMR);
+	if (pipe == 0)
+		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	else
+		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+	I915_WRITE(VLV_IMR, imr);
+	i915_enable_pipestat(dev_priv, pipe,
+			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+	return 0;
+}
+
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	if (dev_priv->info->gen == 3)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
+
+	i915_disable_pipestat(dev_priv, pipe,
+			      PIPE_VBLANK_INTERRUPT_ENABLE |
+			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	ironlake_disable_display_irq(dev_priv,
+				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+	u32 imr;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, pipe,
+			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
+	imr = I915_READ(VLV_IMR);
+	if (pipe == 0)
+		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	else
+		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+	I915_WRITE(VLV_IMR, imr);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
+{
+	return list_entry(ring->request_list.prev,
+			  struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+	if (list_empty(&ring->request_list) ||
+	    i915_seqno_passed(ring->get_seqno(ring, false),
+			      ring_last_seqno(ring))) {
+		/* Issue a wake-up to catch stuck h/w. */
+		if (waitqueue_active(&ring->irq_queue)) {
+			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+				  ring->name);
+			wake_up_all(&ring->irq_queue);
+			*err = true;
+		}
+		return true;
+	}
+	return false;
+}
+
+static bool semaphore_passed(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
+	struct intel_ring_buffer *signaller;
+	u32 cmd, ipehr, acthd_min;
+
+	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+	if ((ipehr & ~(0x3 << 16)) !=
+	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
+		return false;
+
+	/* ACTHD is likely pointing to the dword after the actual command,
+	 * so scan backwards until we find the MBOX.
+	 */
+	acthd_min = max((int)acthd - 3 * 4, 0);
+	do {
+		cmd = ioread32(ring->virtual_start + acthd);
+		if (cmd == ipehr)
+			break;
+
+		acthd -= 4;
+		if (acthd < acthd_min)
+			return false;
+	} while (1);
+
+	signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
+	return i915_seqno_passed(signaller->get_seqno(signaller, false),
+				 ioread32(ring->virtual_start+acthd+4)+1);
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT) {
+		DRM_ERROR("Kicking stuck wait on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6 &&
+	    tmp & RING_WAIT_SEMAPHORE &&
+	    semaphore_passed(ring)) {
+		DRM_ERROR("Kicking stuck semaphore on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+	return false;
+}
+
+static bool i915_hangcheck_hung(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->gpu_error.hangcheck_count++ > 1) {
+		bool hung = true;
+
+		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+		i915_handle_error(dev, true);
+
+		if (!IS_GEN2(dev)) {
+			struct intel_ring_buffer *ring;
+			int i;
+
+			/* Is the chip hanging on a WAIT_FOR_EVENT?
+			 * If so we can simply poke the RB_WAIT bit
+			 * and break the hang. This should work on
+			 * all but the second generation chipsets.
+			 */
+			for_each_ring(ring, dev_priv, i)
+				hung &= !kick_ring(ring);
+		}
+
+		return hung;
+	}
+
+	return false;
+}
+
+/**
+ * This is called when the chip hasn't reported back with completed
+ * batchbuffers in a long time. The first time this is called we simply record
+ * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
+ * again, we assume the chip is wedged and try to fix it.
+ */
+void i915_hangcheck_elapsed(unsigned long data)
+{
+	struct drm_device *dev = (struct drm_device *)data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
+	struct intel_ring_buffer *ring;
+	bool err = false, idle;
+	int i;
+
+	if (!i915_enable_hangcheck)
+		return;
+
+	memset(acthd, 0, sizeof(acthd));
+	idle = true;
+	for_each_ring(ring, dev_priv, i) {
+	    idle &= i915_hangcheck_ring_idle(ring, &err);
+	    acthd[i] = intel_ring_get_active_head(ring);
+	}
+
+	/* If all work is done then ACTHD clearly hasn't advanced. */
+	if (idle) {
+		if (err) {
+			if (i915_hangcheck_hung(dev))
+				return;
+
+			goto repeat;
+		}
+
+		dev_priv->gpu_error.hangcheck_count = 0;
+		return;
+	}
+
+	i915_get_extra_instdone(dev, instdone);
+	if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
+		   sizeof(acthd)) == 0 &&
+	    memcmp(dev_priv->gpu_error.prev_instdone, instdone,
+		   sizeof(instdone)) == 0) {
+		if (i915_hangcheck_hung(dev))
+			return;
+	} else {
+		dev_priv->gpu_error.hangcheck_count = 0;
+
+		memcpy(dev_priv->gpu_error.last_acthd, acthd,
+		       sizeof(acthd));
+		memcpy(dev_priv->gpu_error.prev_instdone, instdone,
+		       sizeof(instdone));
+	}
+
+repeat:
+	/* Reset timer case chip hangs without another request being added */
+	mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
+}
+
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(HWSTAM, 0xeffe);
+
+	/* XXX hotplug from PCH */
+
+	I915_WRITE(DEIMR, 0xffffffff);
+	I915_WRITE(DEIER, 0x0);
+	POSTING_READ(DEIER);
+
+	/* and GT */
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	POSTING_READ(GTIER);
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	/* south display irq */
+	I915_WRITE(SDEIMR, 0xffffffff);
+	/*
+	 * SDEIER is also touched by the interrupt handler to work around missed
+	 * PCH interrupts. Hence we can't update it after the interrupt handler
+	 * is enabled - instead we unconditionally enable all PCH interrupt
+	 * sources here, but then only unmask them as needed with SDEIMR.
+	 */
+	I915_WRITE(SDEIER, 0xffffffff);
+	POSTING_READ(SDEIER);
+}
+
+static void valleyview_irq_preinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	/* VLV magic */
+	I915_WRITE(VLV_IMR, 0);
+	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
+	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
+	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+
+	/* and GT */
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	POSTING_READ(GTIER);
+
+	I915_WRITE(DPINVGTT, 0xff);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IMR, 0xffffffff);
+	I915_WRITE(VLV_IER, 0x0);
+	POSTING_READ(VLV_IER);
+}
+
+static void ibx_hpd_irq_setup(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *intel_encoder;
+	u32 mask = ~I915_READ(SDEIMR);
+	u32 hotplug;
+
+	if (HAS_PCH_IBX(dev)) {
+		mask &= ~SDE_HOTPLUG_MASK;
+		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+				mask |= hpd_ibx[intel_encoder->hpd_pin];
+	} else {
+		mask &= ~SDE_HOTPLUG_MASK_CPT;
+		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+				mask |= hpd_cpt[intel_encoder->hpd_pin];
+	}
+
+	I915_WRITE(SDEIMR, ~mask);
+
+	/*
+	 * Enable digital hotplug on the PCH, and configure the DP short pulse
+	 * duration to 2ms (which is the minimum in the Display Port spec)
+	 *
+	 * This register is the same on all known PCH chips.
+	 */
+	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
+static void ibx_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 mask;
+
+	if (HAS_PCH_IBX(dev))
+		mask = SDE_GMBUS | SDE_AUX_MASK;
+	else
+		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+	I915_WRITE(SDEIMR, ~mask);
+}
+
+static int ironlake_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	/* enable kind of interrupts always enabled */
+	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+			   DE_AUX_CHANNEL_A;
+	u32 render_irqs;
+
+	dev_priv->irq_mask = ~display_mask;
+
+	/* should always can generate irq */
+	I915_WRITE(DEIIR, I915_READ(DEIIR));
+	I915_WRITE(DEIMR, dev_priv->irq_mask);
+	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+	POSTING_READ(DEIER);
+
+	dev_priv->gt_irq_mask = ~0;
+
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+	if (IS_GEN6(dev))
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GEN6_BSD_USER_INTERRUPT |
+			GEN6_BLITTER_USER_INTERRUPT;
+	else
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GT_PIPE_NOTIFY |
+			GT_BSD_USER_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
+
+	ibx_irq_postinstall(dev);
+
+	if (IS_IRONLAKE_M(dev)) {
+		/* Clear & enable PCU event interrupts */
+		I915_WRITE(DEIIR, DE_PCU_EVENT);
+		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+	}
+
+	return 0;
+}
+
+static int ivybridge_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	/* enable kind of interrupts always enabled */
+	u32 display_mask =
+		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
+		DE_PLANEC_FLIP_DONE_IVB |
+		DE_PLANEB_FLIP_DONE_IVB |
+		DE_PLANEA_FLIP_DONE_IVB |
+		DE_AUX_CHANNEL_A_IVB;
+	u32 render_irqs;
+
+	dev_priv->irq_mask = ~display_mask;
+
+	/* should always can generate irq */
+	I915_WRITE(DEIIR, I915_READ(DEIIR));
+	I915_WRITE(DEIMR, dev_priv->irq_mask);
+	I915_WRITE(DEIER,
+		   display_mask |
+		   DE_PIPEC_VBLANK_IVB |
+		   DE_PIPEB_VBLANK_IVB |
+		   DE_PIPEA_VBLANK_IVB);
+	POSTING_READ(DEIER);
+
+	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
+
+	ibx_irq_postinstall(dev);
+
+	return 0;
+}
+
+static int valleyview_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 enable_mask;
+	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+	u32 render_irqs;
+	u16 msid;
+
+	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
+	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+	/*
+	 *Leave vblank interrupts masked initially.  enable/disable will
+	 * toggle them based on usage.
+	 */
+	dev_priv->irq_mask = (~enable_mask) |
+		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+	/* Hack for broken MSIs on VLV */
+	pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
+	pci_read_config_word(dev->pdev, 0x98, &msid);
+	msid &= 0xff; /* mask out delivery bits */
+	msid |= (1<<14);
+	pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	POSTING_READ(PORT_HOTPLUG_EN);
+
+	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
+	I915_WRITE(VLV_IER, enable_mask);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(PIPESTAT(0), 0xffff);
+	I915_WRITE(PIPESTAT(1), 0xffff);
+	POSTING_READ(VLV_IER);
+
+	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+
+	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+		GEN6_BLITTER_USER_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
+
+	/* ack & enable invalid PTE error interrupts */
+#if 0 /* FIXME: add support to irq handler for checking these bits */
+	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
+#endif
+
+	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+
+	return 0;
+}
+
+static void valleyview_irq_uninstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0xffff);
+	I915_WRITE(VLV_IIR, 0xffffffff);
+	I915_WRITE(VLV_IMR, 0xffffffff);
+	I915_WRITE(VLV_IER, 0x0);
+	POSTING_READ(VLV_IER);
+}
+
+static void ironlake_irq_uninstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+
+	I915_WRITE(DEIMR, 0xffffffff);
+	I915_WRITE(DEIER, 0x0);
+	I915_WRITE(DEIIR, I915_READ(DEIIR));
+
+	I915_WRITE(GTIMR, 0xffffffff);
+	I915_WRITE(GTIER, 0x0);
+	I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+	if (HAS_PCH_NOP(dev))
+		return;
+
+	I915_WRITE(SDEIMR, 0xffffffff);
+	I915_WRITE(SDEIER, 0x0);
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+}
+
+static void i8xx_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	POSTING_READ16(IER);
+}
+
+static int i8xx_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	I915_WRITE16(EMR,
+		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+
+	I915_WRITE16(IER,
+		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+		     I915_USER_INTERRUPT);
+	POSTING_READ16(IER);
+
+	return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i8xx_handle_vblank(struct drm_device *dev,
+			       int pipe, u16 iir)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+	if (!drm_handle_vblank(dev, pipe))
+		return false;
+
+	if ((iir & flip_pending) == 0)
+		return false;
+
+	intel_prepare_page_flip(dev, pipe);
+
+	/* We detect FlipDone by looking for the change in PendingFlip from '1'
+	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
+	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+	 * the flip is completed (no longer pending). Since this doesn't raise
+	 * an interrupt per se, we watch for the change at vblank.
+	 */
+	if (I915_READ16(ISR) & flip_pending)
+		return false;
+
+	intel_finish_page_flip(dev, pipe);
+
+	return true;
+}
+
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 iir, new_iir;
+	u32 pipe_stats[2];
+	unsigned long irqflags;
+	int irq_received;
+	int pipe;
+	u16 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	iir = I915_READ16(IIR);
+	if (iir == 0)
+		return IRQ_NONE;
+
+	while (iir & ~flip_mask) {
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
+
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = 1;
+			}
+		}
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+		I915_WRITE16(IIR, iir & ~flip_mask);
+		new_iir = I915_READ16(IIR); /* Flush posted writes */
+
+		i915_update_dri1_breadcrumb(dev);
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[RCS]);
+
+		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
+		    i8xx_handle_vblank(dev, 0, iir))
+			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+
+		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
+		    i8xx_handle_vblank(dev, 1, iir))
+			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+
+		iir = new_iir;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void i8xx_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	for_each_pipe(pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE16(IMR, 0xffff);
+	I915_WRITE16(IER, 0x0);
+	I915_WRITE16(IIR, I915_READ16(IIR));
+}
+
+static void i915_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		I915_WRITE(PORT_HOTPLUG_EN, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
+
+	I915_WRITE16(HWSTAM, 0xeffe);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+	POSTING_READ(IER);
+}
+
+static int i915_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 enable_mask;
+
+	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask =
+		~(I915_ASLE_INTERRUPT |
+		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+	enable_mask =
+		I915_ASLE_INTERRUPT |
+		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+		I915_USER_INTERRUPT;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		I915_WRITE(PORT_HOTPLUG_EN, 0);
+		POSTING_READ(PORT_HOTPLUG_EN);
+
+		/* Enable in IER... */
+		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+		/* and unmask in IMR */
+		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
+	}
+
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	I915_WRITE(IER, enable_mask);
+	POSTING_READ(IER);
+
+	intel_opregion_enable_asle(dev);
+
+	return 0;
+}
+
+/*
+ * Returns true when a page flip has completed.
+ */
+static bool i915_handle_vblank(struct drm_device *dev,
+			       int plane, int pipe, u32 iir)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
+
+	if (!drm_handle_vblank(dev, pipe))
+		return false;
+
+	if ((iir & flip_pending) == 0)
+		return false;
+
+	intel_prepare_page_flip(dev, plane);
+
+	/* We detect FlipDone by looking for the change in PendingFlip from '1'
+	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
+	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
+	 * the flip is completed (no longer pending). Since this doesn't raise
+	 * an interrupt per se, we watch for the change at vblank.
+	 */
+	if (I915_READ(ISR) & flip_pending)
+		return false;
+
+	intel_finish_page_flip(dev, pipe);
+
+	return true;
+}
+
+static irqreturn_t i915_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
+	unsigned long irqflags;
+	u32 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+	int pipe, ret = IRQ_NONE;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	iir = I915_READ(IIR);
+	do {
+		bool irq_received = (iir & ~flip_mask) != 0;
+		bool blc_event = false;
+
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
+
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/* Clear the PIPE*STAT regs before the IIR */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = true;
+			}
+		}
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+		if (!irq_received)
+			break;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if ((I915_HAS_HOTPLUG(dev)) &&
+		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+				  hotplug_status);
+			if (hotplug_trigger) {
+				if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
+					i915_hpd_irq_setup(dev);
+				queue_work(dev_priv->wq,
+					   &dev_priv->hotplug_work);
+			}
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			POSTING_READ(PORT_HOTPLUG_STAT);
+		}
+
+		I915_WRITE(IIR, iir & ~flip_mask);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[RCS]);
+
+		for_each_pipe(pipe) {
+			int plane = pipe;
+			if (IS_MOBILE(dev))
+				plane = !plane;
+
+			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+			    i915_handle_vblank(dev, plane, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+		}
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		ret = IRQ_HANDLED;
+		iir = new_iir;
+	} while (iir & ~flip_mask);
+
+	i915_update_dri1_breadcrumb(dev);
+
+	return ret;
+}
+
+static void i915_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		I915_WRITE(PORT_HOTPLUG_EN, 0);
+		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+	}
+
+	I915_WRITE16(HWSTAM, 0xffff);
+	for_each_pipe(pipe) {
+		/* Clear enable bits; then clear status bits */
+		I915_WRITE(PIPESTAT(pipe), 0);
+		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
+	}
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+
+	I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i965_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	atomic_set(&dev_priv->irq_received, 0);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	I915_WRITE(HWSTAM, 0xeffe);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+	POSTING_READ(IER);
+}
+
+static int i965_irq_postinstall(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 enable_mask;
+	u32 error_mask;
+
+	/* Unmask the interrupts that we always want on. */
+	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+			       I915_DISPLAY_PORT_INTERRUPT |
+			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
+			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+
+	enable_mask = ~dev_priv->irq_mask;
+	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+	enable_mask |= I915_USER_INTERRUPT;
+
+	if (IS_G4X(dev))
+		enable_mask |= I915_BSD_USER_INTERRUPT;
+
+	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+
+	/*
+	 * Enable some error detection, note the instruction error mask
+	 * bit is reserved, so we leave it masked.
+	 */
+	if (IS_G4X(dev)) {
+		error_mask = ~(GM45_ERROR_PAGE_TABLE |
+			       GM45_ERROR_MEM_PRIV |
+			       GM45_ERROR_CP_PRIV |
+			       I915_ERROR_MEMORY_REFRESH);
+	} else {
+		error_mask = ~(I915_ERROR_PAGE_TABLE |
+			       I915_ERROR_MEMORY_REFRESH);
+	}
+	I915_WRITE(EMR, error_mask);
+
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	I915_WRITE(IER, enable_mask);
+	POSTING_READ(IER);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	POSTING_READ(PORT_HOTPLUG_EN);
+
+	intel_opregion_enable_asle(dev);
+
+	return 0;
+}
+
+static void i915_hpd_irq_setup(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *intel_encoder;
+	u32 hotplug_en;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
+		/* Note HDMI and DP share hotplug bits */
+		/* enable bits are the same for all generations */
+		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
+				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+		/* Programming the CRT detection parameters tends
+		   to generate a spurious hotplug event about three
+		   seconds later.  So just do it once.
+		*/
+		if (IS_G4X(dev))
+			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
+		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+		/* Ignore TV since it's buggy */
+		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+	}
+}
+
+static irqreturn_t i965_irq_handler(int irq, void *arg)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 iir, new_iir;
+	u32 pipe_stats[I915_MAX_PIPES];
+	unsigned long irqflags;
+	int irq_received;
+	int ret = IRQ_NONE, pipe;
+	u32 flip_mask =
+		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
+		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
+
+	atomic_inc(&dev_priv->irq_received);
+
+	iir = I915_READ(IIR);
+
+	for (;;) {
+		bool blc_event = false;
+
+		irq_received = (iir & ~flip_mask) != 0;
+
+		/* Can't rely on pipestat interrupt bit in iir as it might
+		 * have been cleared after the pipestat interrupt was received.
+		 * It doesn't set the bit in iir again, but it still produces
+		 * interrupts (for non-MSI).
+		 */
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+			i915_handle_error(dev, false);
+
+		for_each_pipe(pipe) {
+			int reg = PIPESTAT(pipe);
+			pipe_stats[pipe] = I915_READ(reg);
+
+			/*
+			 * Clear the PIPE*STAT regs before the IIR
+			 */
+			if (pipe_stats[pipe] & 0x8000ffff) {
+				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+					DRM_DEBUG_DRIVER("pipe %c underrun\n",
+							 pipe_name(pipe));
+				I915_WRITE(reg, pipe_stats[pipe]);
+				irq_received = 1;
+			}
+		}
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+		if (!irq_received)
+			break;
+
+		ret = IRQ_HANDLED;
+
+		/* Consume port.  Then clear IIR or we'll miss events */
+		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
+								  HOTPLUG_INT_STATUS_G4X :
+								  HOTPLUG_INT_STATUS_I915);
+
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+				  hotplug_status);
+			if (hotplug_trigger) {
+				if (hotplug_irq_storm_detect(dev, hotplug_trigger,
+							    IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
+					i915_hpd_irq_setup(dev);
+				queue_work(dev_priv->wq,
+					   &dev_priv->hotplug_work);
+			}
+			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+			I915_READ(PORT_HOTPLUG_STAT);
+		}
+
+		I915_WRITE(IIR, iir & ~flip_mask);
+		new_iir = I915_READ(IIR); /* Flush posted writes */
+
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[RCS]);
+		if (iir & I915_BSD_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[VCS]);
+
+		for_each_pipe(pipe) {
+			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
+			    i915_handle_vblank(dev, pipe, pipe, iir))
+				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+
+			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+				blc_event = true;
+		}
+
+
+		if (blc_event || (iir & I915_ASLE_INTERRUPT))
+			intel_opregion_asle_intr(dev);
+
+		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+			gmbus_irq_handler(dev);
+
+		/* With MSI, interrupts are only generated when iir
+		 * transitions from zero to nonzero.  If another bit got
+		 * set while we were handling the existing iir bits, then
+		 * we would never get another interrupt.
+		 *
+		 * This is fine on non-MSI as well, as if we hit this path
+		 * we avoid exiting the interrupt handler only to generate
+		 * another one.
+		 *
+		 * Note that for MSI this could cause a stray interrupt report
+		 * if an interrupt landed in the time between writing IIR and
+		 * the posting read.  This should be rare enough to never
+		 * trigger the 99% of 100,000 interrupts test for disabling
+		 * stray interrupts.
+		 */
+		iir = new_iir;
+	}
+
+	i915_update_dri1_breadcrumb(dev);
+
+	return ret;
+}
+
+static void i965_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe;
+
+	if (!dev_priv)
+		return;
+
+	del_timer_sync(&dev_priv->hotplug_reenable_timer);
+
+	I915_WRITE(PORT_HOTPLUG_EN, 0);
+	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe), 0);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
+
+	for_each_pipe(pipe)
+		I915_WRITE(PIPESTAT(pipe),
+			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
+	I915_WRITE(IIR, I915_READ(IIR));
+}
+
+static void i915_reenable_hotplug_timer_func(unsigned long data)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	unsigned long irqflags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
+		struct drm_connector *connector;
+
+		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
+			continue;
+
+		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+
+		list_for_each_entry(connector, &mode_config->connector_list, head) {
+			struct intel_connector *intel_connector = to_intel_connector(connector);
+
+			if (intel_connector->encoder->hpd_pin == i) {
+				if (connector->polled != intel_connector->polled)
+					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
+							 drm_get_connector_name(connector));
+				connector->polled = intel_connector->polled;
+				if (!connector->polled)
+					connector->polled = DRM_CONNECTOR_POLL_HPD;
+			}
+		}
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+void intel_irq_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
+	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+
+	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
+		    i915_hangcheck_elapsed,
+		    (unsigned long) dev);
+	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+		    (unsigned long) dev_priv);
+
+	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	dev->driver->get_vblank_counter = i915_get_vblank_counter;
+	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+	else
+		dev->driver->get_vblank_timestamp = NULL;
+	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+	if (IS_VALLEYVIEW(dev)) {
+		dev->driver->irq_handler = valleyview_irq_handler;
+		dev->driver->irq_preinstall = valleyview_irq_preinstall;
+		dev->driver->irq_postinstall = valleyview_irq_postinstall;
+		dev->driver->irq_uninstall = valleyview_irq_uninstall;
+		dev->driver->enable_vblank = valleyview_enable_vblank;
+		dev->driver->disable_vblank = valleyview_disable_vblank;
+		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+		/* Share pre & uninstall handlers with ILK/SNB */
+		dev->driver->irq_handler = ivybridge_irq_handler;
+		dev->driver->irq_preinstall = ironlake_irq_preinstall;
+		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+		dev->driver->irq_uninstall = ironlake_irq_uninstall;
+		dev->driver->enable_vblank = ivybridge_enable_vblank;
+		dev->driver->disable_vblank = ivybridge_disable_vblank;
+		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev->driver->irq_handler = ironlake_irq_handler;
+		dev->driver->irq_preinstall = ironlake_irq_preinstall;
+		dev->driver->irq_postinstall = ironlake_irq_postinstall;
+		dev->driver->irq_uninstall = ironlake_irq_uninstall;
+		dev->driver->enable_vblank = ironlake_enable_vblank;
+		dev->driver->disable_vblank = ironlake_disable_vblank;
+		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+	} else {
+		if (INTEL_INFO(dev)->gen == 2) {
+			dev->driver->irq_preinstall = i8xx_irq_preinstall;
+			dev->driver->irq_postinstall = i8xx_irq_postinstall;
+			dev->driver->irq_handler = i8xx_irq_handler;
+			dev->driver->irq_uninstall = i8xx_irq_uninstall;
+		} else if (INTEL_INFO(dev)->gen == 3) {
+			dev->driver->irq_preinstall = i915_irq_preinstall;
+			dev->driver->irq_postinstall = i915_irq_postinstall;
+			dev->driver->irq_uninstall = i915_irq_uninstall;
+			dev->driver->irq_handler = i915_irq_handler;
+			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+		} else {
+			dev->driver->irq_preinstall = i965_irq_preinstall;
+			dev->driver->irq_postinstall = i965_irq_postinstall;
+			dev->driver->irq_uninstall = i965_irq_uninstall;
+			dev->driver->irq_handler = i965_irq_handler;
+			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+		}
+		dev->driver->enable_vblank = i915_enable_vblank;
+		dev->driver->disable_vblank = i915_disable_vblank;
+	}
+}
+
+void intel_hpd_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+	int i;
+
+	for (i = 1; i < HPD_NUM_PINS; i++) {
+		dev_priv->hpd_stats[i].hpd_cnt = 0;
+		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
+	}
+	list_for_each_entry(connector, &mode_config->connector_list, head) {
+		struct intel_connector *intel_connector = to_intel_connector(connector);
+		connector->polled = intel_connector->polled;
+		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+			connector->polled = DRM_CONNECTOR_POLL_HPD;
+	}
+	if (dev_priv->display.hpd_irq_setup)
+		dev_priv->display.hpd_irq_setup(dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_reg.h b/linux-imx/drivers/gpu/drm/i915/i915_reg.h
new file mode 100644
index 0000000..2d90f96
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_reg.h
@@ -0,0 +1,4839 @@
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
+
+#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+
+#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
+#define _MASKED_BIT_DISABLE(a) ((a) << 16)
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL		0x52
+#define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+#define SNB_GMCH_CTRL		0x50
+#define    SNB_GMCH_GGMS_SHIFT	8 /* GTT Graphics Memory Size */
+#define    SNB_GMCH_GGMS_MASK	0x3
+#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
+#define    SNB_GMCH_GMS_MASK    0x1f
+
+
+/* PCI config space */
+
+#define HPLLCC	0xc0 /* 855 only */
+#define   GC_CLOCK_CONTROL_MASK		(0xf << 0)
+#define   GC_CLOCK_133_200		(0 << 0)
+#define   GC_CLOCK_100_200		(1 << 0)
+#define   GC_CLOCK_100_133		(2 << 0)
+#define   GC_CLOCK_166_250		(3 << 0)
+#define GCFGC2	0xda
+#define GCFGC	0xf0 /* 915+ only */
+#define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
+#define   GC_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+#define   GC_DISPLAY_CLOCK_MASK		(7 << 4)
+#define   GM45_GC_RENDER_CLOCK_MASK	(0xf << 0)
+#define   GM45_GC_RENDER_CLOCK_266_MHZ	(8 << 0)
+#define   GM45_GC_RENDER_CLOCK_320_MHZ	(9 << 0)
+#define   GM45_GC_RENDER_CLOCK_400_MHZ	(0xb << 0)
+#define   GM45_GC_RENDER_CLOCK_533_MHZ	(0xc << 0)
+#define   I965_GC_RENDER_CLOCK_MASK	(0xf << 0)
+#define   I965_GC_RENDER_CLOCK_267_MHZ	(2 << 0)
+#define   I965_GC_RENDER_CLOCK_333_MHZ	(3 << 0)
+#define   I965_GC_RENDER_CLOCK_444_MHZ	(4 << 0)
+#define   I965_GC_RENDER_CLOCK_533_MHZ	(5 << 0)
+#define   I945_GC_RENDER_CLOCK_MASK	(7 << 0)
+#define   I945_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
+#define   I945_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+#define   I945_GC_RENDER_CLOCK_250_MHZ	(3 << 0)
+#define   I945_GC_RENDER_CLOCK_400_MHZ	(5 << 0)
+#define   I915_GC_RENDER_CLOCK_MASK	(7 << 0)
+#define   I915_GC_RENDER_CLOCK_166_MHZ	(0 << 0)
+#define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
+#define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
+#define LBB	0xf4
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define  GRDOM_FULL	(0<<2)
+#define  GRDOM_RENDER	(1<<2)
+#define  GRDOM_MEDIA	(3<<2)
+#define  GRDOM_MASK	(3<<2)
+#define  GRDOM_RESET_ENABLE (1<<0)
+
+#define GEN6_MBCUNIT_SNPCR	0x900c /* for LLC config */
+#define   GEN6_MBC_SNPCR_SHIFT	21
+#define   GEN6_MBC_SNPCR_MASK	(3<<21)
+#define   GEN6_MBC_SNPCR_MAX	(0<<21)
+#define   GEN6_MBC_SNPCR_MED	(1<<21)
+#define   GEN6_MBC_SNPCR_LOW	(2<<21)
+#define   GEN6_MBC_SNPCR_MIN	(3<<21) /* only 1/16th of the cache is shared */
+
+#define GEN6_MBCTL		0x0907c
+#define   GEN6_MBCTL_ENABLE_BOOT_FETCH	(1 << 4)
+#define   GEN6_MBCTL_CTX_FETCH_NEEDED	(1 << 3)
+#define   GEN6_MBCTL_BME_UPDATE_ENABLE	(1 << 2)
+#define   GEN6_MBCTL_MAE_UPDATE_ENABLE	(1 << 1)
+#define   GEN6_MBCTL_BOOT_FETCH_MECH	(1 << 0)
+
+#define GEN6_GDRST	0x941c
+#define  GEN6_GRDOM_FULL		(1 << 0)
+#define  GEN6_GRDOM_RENDER		(1 << 1)
+#define  GEN6_GRDOM_MEDIA		(1 << 2)
+#define  GEN6_GRDOM_BLT			(1 << 3)
+
+#define RING_PP_DIR_BASE(ring)		((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring)	((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
+#define   PP_DIR_DCLV_2G		0xffffffff
+
+#define GAM_ECOCHK			0x4090
+#define   ECOCHK_SNB_BIT		(1<<10)
+#define   HSW_ECOCHK_ARB_PRIO_SOL	(1<<6)
+#define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
+#define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
+#define   ECOCHK_PPGTT_GFDT_IVB		(0x1<<4)
+#define   ECOCHK_PPGTT_LLC_IVB		(0x1<<3)
+#define   ECOCHK_PPGTT_UC_HSW		(0x1<<3)
+#define   ECOCHK_PPGTT_WT_HSW		(0x2<<3)
+#define   ECOCHK_PPGTT_WB_HSW		(0x3<<3)
+
+#define GAC_ECO_BITS			0x14090
+#define   ECOBITS_SNB_BIT		(1<<13)
+#define   ECOBITS_PPGTT_CACHE64B	(3<<8)
+#define   ECOBITS_PPGTT_CACHE4B		(0<<8)
+
+#define GAB_CTL				0x24000
+#define   GAB_CTL_CONT_AFTER_PAGEFAULT	(1<<8)
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+/*
+ * SR01 is the only VGA register touched on non-UMS setups.
+ * VLV doesn't do UMS, so the sequencer index/data registers
+ * are the only VGA registers which need to include
+ * display_mmio_offset.
+ */
+#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
+#define SR01			1
+#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP			MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
+#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH		MI_INSTR(0x04, 0)
+#define   MI_READ_FLUSH		(1 << 0)
+#define   MI_EXE_FLUSH		(1 << 1)
+#define   MI_NO_WRITE_FLUSH	(1 << 2)
+#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
+#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
+#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
+#define   MI_OVERLAY_CONTINUE	(0x0<<21)
+#define   MI_OVERLAY_ON		(0x1<<21)
+#define   MI_OVERLAY_OFF	(0x2<<21)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_I915	MI_INSTR(0x14, 1)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define   MI_DISPLAY_FLIP_IVB_PLANE_A  (0 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_B  (1 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define   MI_DISPLAY_FLIP_IVB_PLANE_C  (4 << 19)
+#define   MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_ARB_ON_OFF		MI_INSTR(0x08, 0)
+#define   MI_ARB_ENABLE			(1<<0)
+#define   MI_ARB_DISABLE		(0<<0)
+
+#define MI_SET_CONTEXT		MI_INSTR(0x18, 0)
+#define   MI_MM_SPACE_GTT		(1<<8)
+#define   MI_MM_SPACE_PHYSICAL		(0<<8)
+#define   MI_SAVE_EXT_STATE_EN		(1<<3)
+#define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
+#define   MI_RESTORE_INHIBIT		(1<<0)
+#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_FLUSH_DW_STORE_INDEX	(1<<21)
+#define   MI_INVALIDATE_TLB		(1<<18)
+#define   MI_FLUSH_DW_OP_STOREDW	(1<<14)
+#define   MI_INVALIDATE_BSD		(1<<7)
+#define   MI_FLUSH_DW_USE_GTT		(1<<2)
+#define   MI_FLUSH_DW_USE_PPGTT		(0<<2)
+#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+#define   MI_BATCH_NON_SECURE		(1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965 	(1<<8)
+#define   MI_BATCH_PPGTT_HSW		(1<<8)
+#define   MI_BATCH_NON_SECURE_HSW 	(1<<13)
+#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define   MI_BATCH_GTT		    (2<<6) /* aliased with (1<<7) on gen4 */
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
+#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define  MI_SEMAPHORE_UPDATE	    (1<<21)
+#define  MI_SEMAPHORE_COMPARE	    (1<<20)
+#define  MI_SEMAPHORE_REGISTER	    (1<<18)
+#define  MI_SEMAPHORE_SYNC_RV	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_RB	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_VR	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_VB	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_BR	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_BV	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_INVALID  (1<<0)
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define   SC_UPDATE_SCISSOR       (0x1<<1)
+#define   SC_ENABLE_MASK          (0x1<<0)
+#define   SC_ENABLE               (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define   SCI_YMIN_MASK      (0xffff<<16)
+#define   SCI_XMIN_MASK      (0xffff<<0)
+#define   SCI_YMAX_MASK      (0xffff<<16)
+#define   SCI_XMAX_MASK      (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+#define   BLT_DEPTH_8			(0<<24)
+#define   BLT_DEPTH_16_565		(1<<24)
+#define   BLT_DEPTH_16_1555		(2<<24)
+#define   BLT_DEPTH_32			(3<<24)
+#define   BLT_ROP_GXCOPY		(0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define   ASYNC_FLIP                (1<<22)
+#define   DISPLAY_PLANE_A           (0<<20)
+#define   DISPLAY_PLANE_B           (1<<20)
+#define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_GLOBAL_GTT_IVB			(1<<24) /* gen7+ */
+#define   PIPE_CONTROL_CS_STALL				(1<<20)
+#define   PIPE_CONTROL_TLB_INVALIDATE			(1<<18)
+#define   PIPE_CONTROL_QW_WRITE				(1<<14)
+#define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
+#define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
+#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
+#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on Ironlake */
+#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
+#define   PIPE_CONTROL_NOTIFY				(1<<8)
+#define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
+#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
+#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
+#define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
+#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
+#define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830		0x6070
+#define  DEBUG_RESET_FULL		(1<<7)
+#define  DEBUG_RESET_RENDER		(1<<8)
+#define  DEBUG_RESET_DISPLAY		(1<<9)
+
+/*
+ * DPIO - a special bus for various display related registers to hide behind:
+ *  0x800c: m1, m2, n, p1, p2, k dividers
+ *  0x8014: REF and SFR select
+ *  0x8014: N divider, VCO select
+ *  0x801c/3c: core clock bits
+ *  0x8048/68: low pass filter coefficients
+ *  0x8100: fast clock controls
+ *
+ * DPIO is VLV only.
+ */
+#define DPIO_PKT			(VLV_DISPLAY_BASE + 0x2100)
+#define  DPIO_RID			(0<<24)
+#define  DPIO_OP_WRITE			(1<<16)
+#define  DPIO_OP_READ			(0<<16)
+#define  DPIO_PORTID			(0x12<<8)
+#define  DPIO_BYTE			(0xf<<4)
+#define  DPIO_BUSY			(1<<0) /* status only */
+#define DPIO_DATA			(VLV_DISPLAY_BASE + 0x2104)
+#define DPIO_REG			(VLV_DISPLAY_BASE + 0x2108)
+#define DPIO_CTL			(VLV_DISPLAY_BASE + 0x2110)
+#define  DPIO_MODSEL1			(1<<3) /* if ref clk b == 27 */
+#define  DPIO_MODSEL0			(1<<2) /* if ref clk a == 27 */
+#define  DPIO_SFR_BYPASS		(1<<1)
+#define  DPIO_RESET			(1<<0)
+
+#define _DPIO_DIV_A			0x800c
+#define   DPIO_POST_DIV_SHIFT		(28) /* 3 bits */
+#define   DPIO_K_SHIFT			(24) /* 4 bits */
+#define   DPIO_P1_SHIFT			(21) /* 3 bits */
+#define   DPIO_P2_SHIFT			(16) /* 5 bits */
+#define   DPIO_N_SHIFT			(12) /* 4 bits */
+#define   DPIO_ENABLE_CALIBRATION	(1<<11)
+#define   DPIO_M1DIV_SHIFT		(8) /* 3 bits */
+#define   DPIO_M2DIV_MASK		0xff
+#define _DPIO_DIV_B			0x802c
+#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
+
+#define _DPIO_REFSFR_A			0x8014
+#define   DPIO_REFSEL_OVERRIDE		27
+#define   DPIO_PLL_MODESEL_SHIFT	24 /* 3 bits */
+#define   DPIO_BIAS_CURRENT_CTL_SHIFT	21 /* 3 bits, always 0x7 */
+#define   DPIO_PLL_REFCLK_SEL_SHIFT	16 /* 2 bits */
+#define   DPIO_PLL_REFCLK_SEL_MASK	3
+#define   DPIO_DRIVER_CTL_SHIFT		12 /* always set to 0x8 */
+#define   DPIO_CLK_BIAS_CTL_SHIFT	8 /* always set to 0x5 */
+#define _DPIO_REFSFR_B			0x8034
+#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
+
+#define _DPIO_CORE_CLK_A		0x801c
+#define _DPIO_CORE_CLK_B		0x803c
+#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
+
+#define _DPIO_LFP_COEFF_A		0x8048
+#define _DPIO_LFP_COEFF_B		0x8068
+#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
+
+#define DPIO_FASTCLK_DISABLE		0x8100
+
+#define DPIO_DATA_CHANNEL1		0x8220
+#define DPIO_DATA_CHANNEL2		0x8420
+
+/*
+ * Fence registers
+ */
+#define FENCE_REG_830_0			0x2000
+#define FENCE_REG_945_8			0x3000
+#define   I830_FENCE_START_MASK		0x07f80000
+#define   I830_FENCE_TILING_Y_SHIFT	12
+#define   I830_FENCE_SIZE_BITS(size)	((ffs((size) >> 19) - 1) << 8)
+#define   I830_FENCE_PITCH_SHIFT	4
+#define   I830_FENCE_REG_VALID		(1<<0)
+#define   I915_FENCE_MAX_PITCH_VAL	4
+#define   I830_FENCE_MAX_PITCH_VAL	6
+#define   I830_FENCE_MAX_SIZE_VAL	(1<<8)
+
+#define   I915_FENCE_START_MASK		0x0ff00000
+#define   I915_FENCE_SIZE_BITS(size)	((ffs((size) >> 20) - 1) << 8)
+
+#define FENCE_REG_965_0			0x03000
+#define   I965_FENCE_PITCH_SHIFT	2
+#define   I965_FENCE_TILING_Y_SHIFT	1
+#define   I965_FENCE_REG_VALID		(1<<0)
+#define   I965_FENCE_MAX_PITCH_VAL	0x0400
+
+#define FENCE_REG_SANDYBRIDGE_0		0x100000
+#define   SANDYBRIDGE_FENCE_PITCH_SHIFT	32
+#define   GEN7_FENCE_MAX_PITCH_VAL	0x0800
+
+/* control register for cpu gtt access */
+#define TILECTL				0x101000
+#define   TILECTL_SWZCTL			(1 << 0)
+#define   TILECTL_TLB_PREFETCH_DIS	(1 << 2)
+#define   TILECTL_BACKSNOOP_DIS		(1 << 3)
+
+/*
+ * Instruction and interrupt control regs
+ */
+#define PGTBL_ER	0x02024
+#define RENDER_RING_BASE	0x02000
+#define BSD_RING_BASE		0x04000
+#define GEN6_BSD_RING_BASE	0x12000
+#define BLT_RING_BASE		0x22000
+#define RING_TAIL(base)		((base)+0x30)
+#define RING_HEAD(base)		((base)+0x34)
+#define RING_START(base)	((base)+0x38)
+#define RING_CTL(base)		((base)+0x3c)
+#define RING_SYNC_0(base)	((base)+0x40)
+#define RING_SYNC_1(base)	((base)+0x44)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
+#define RING_MAX_IDLE(base)	((base)+0x54)
+#define RING_HWS_PGA(base)	((base)+0x80)
+#define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define ARB_MODE		0x04030
+#define   ARB_MODE_SWIZZLE_SNB	(1<<4)
+#define   ARB_MODE_SWIZZLE_IVB	(1<<5)
+#define RENDER_HWS_PGA_GEN7	(0x04080)
+#define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
+#define DONE_REG		0x40b0
+#define BSD_HWS_PGA_GEN7	(0x04180)
+#define BLT_HWS_PGA_GEN7	(0x04280)
+#define RING_ACTHD(base)	((base)+0x74)
+#define RING_NOPID(base)	((base)+0x94)
+#define RING_IMR(base)		((base)+0xa8)
+#define RING_TIMESTAMP(base)	((base)+0x358)
+#define   TAIL_ADDR		0x001FFFF8
+#define   HEAD_WRAP_COUNT	0xFFE00000
+#define   HEAD_WRAP_ONE		0x00200000
+#define   HEAD_ADDR		0x001FFFFC
+#define   RING_NR_PAGES		0x001FF000
+#define   RING_REPORT_MASK	0x00000006
+#define   RING_REPORT_64K	0x00000002
+#define   RING_REPORT_128K	0x00000004
+#define   RING_NO_REPORT	0x00000000
+#define   RING_VALID_MASK	0x00000001
+#define   RING_VALID		0x00000001
+#define   RING_INVALID		0x00000000
+#define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE	(1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
+#define PRB1_TAIL	0x02040 /* 915+ only */
+#define PRB1_HEAD	0x02044 /* 915+ only */
+#define PRB1_START	0x02048 /* 915+ only */
+#define PRB1_CTL	0x0204c /* 915+ only */
+#endif
+#define IPEIR_I965	0x02064
+#define IPEHR_I965	0x02068
+#define INSTDONE_I965	0x0206c
+#define GEN7_INSTDONE_1		0x0206c
+#define GEN7_SC_INSTDONE	0x07100
+#define GEN7_SAMPLER_INSTDONE	0x0e160
+#define GEN7_ROW_INSTDONE	0x0e164
+#define I915_NUM_INSTDONE_REG	4
+#define RING_IPEIR(base)	((base)+0x64)
+#define RING_IPEHR(base)	((base)+0x68)
+#define RING_INSTDONE(base)	((base)+0x6c)
+#define RING_INSTPS(base)	((base)+0x70)
+#define RING_DMA_FADD(base)	((base)+0x78)
+#define RING_INSTPM(base)	((base)+0xc0)
+#define INSTPS		0x02070 /* 965+ only */
+#define INSTDONE1	0x0207c /* 965+ only */
+#define ACTHD_I965	0x02074
+#define HWS_PGA		0x02080
+#define HWS_ADDRESS_MASK	0xfffff000
+#define HWS_START_ADDRESS_SHIFT	4
+#define PWRCTXA		0x2088 /* 965GM+ only */
+#define   PWRCTX_EN	(1<<0)
+#define IPEIR		0x02088
+#define IPEHR		0x0208c
+#define INSTDONE	0x02090
+#define NOPID		0x02094
+#define HWSTAM		0x02098
+#define DMA_FADD_I8XX	0x020d0
+
+#define ERROR_GEN6	0x040a0
+#define GEN7_ERR_INT	0x44040
+#define   ERR_INT_MMIO_UNCLAIMED (1<<13)
+
+#define FPGA_DBG		0x42300
+#define   FPGA_DBG_RM_NOCLAIM	(1<<31)
+
+#define DERRMR		0x44050
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior.  The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN	0x02084
+#define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB	(1 << 10)
+#define _3D_CHICKEN2	0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
+#define _3D_CHICKEN3	0x02090
+#define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL		(1 << 10)
+#define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL		(1 << 5)
+
+#define MI_MODE		0x0209c
+# define VS_TIMER_DISPATCH				(1 << 6)
+# define MI_FLUSH_ENABLE				(1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE			(1 << 14)
+
+#define GEN6_GT_MODE	0x20d0
+#define   GEN6_GT_MODE_HI				(1 << 9)
+#define   GEN6_TD_FOUR_ROW_DISPATCH_DISABLE		(1 << 5)
+
+#define GFX_MODE	0x02520
+#define GFX_MODE_GEN7	0x0229c
+#define RING_MODE_GEN7(ring)	((ring)->mmio_base+0x29c)
+#define   GFX_RUN_LIST_ENABLE		(1<<15)
+#define   GFX_TLB_INVALIDATE_ALWAYS	(1<<13)
+#define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
+#define   GFX_REPLAY_MODE		(1<<11)
+#define   GFX_PSMI_GRANULARITY		(1<<10)
+#define   GFX_PPGTT_ENABLE		(1<<9)
+
+#define VLV_DISPLAY_BASE 0x180000
+
+#define SCPD0		0x0209c /* 915+ only */
+#define IER		0x020a0
+#define IIR		0x020a4
+#define IMR		0x020a8
+#define ISR		0x020ac
+#define VLV_GUNIT_CLOCK_GATE	(VLV_DISPLAY_BASE + 0x2060)
+#define   GCFG_DIS		(1<<8)
+#define VLV_IIR_RW	(VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER		(VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR		(VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR		(VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR		(VLV_DISPLAY_BASE + 0x20ac)
+#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
+#define   I915_DISPLAY_PORT_INTERRUPT			(1<<17)
+#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
+#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14) /* p-state */
+#define   I915_HWB_OOM_INTERRUPT			(1<<13)
+#define   I915_SYNC_STATUS_INTERRUPT			(1<<12)
+#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
+#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
+#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
+#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
+#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
+#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
+#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
+#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
+#define   I915_DEBUG_INTERRUPT				(1<<2)
+#define   I915_USER_INTERRUPT				(1<<1)
+#define   I915_ASLE_INTERRUPT				(1<<0)
+#define   I915_BSD_USER_INTERRUPT                      (1<<25)
+#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define EIR		0x020b0
+#define EMR		0x020b4
+#define ESR		0x020b8
+#define   GM45_ERROR_PAGE_TABLE				(1<<5)
+#define   GM45_ERROR_MEM_PRIV				(1<<4)
+#define   I915_ERROR_PAGE_TABLE				(1<<4)
+#define   GM45_ERROR_CP_PRIV				(1<<3)
+#define   I915_ERROR_MEMORY_REFRESH			(1<<1)
+#define   I915_ERROR_INSTRUCTION			(1<<0)
+#define INSTPM	        0x020c0
+#define   INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define   INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+					will not assert AGPBUSY# and will only
+					be delivered when out of C3. */
+#define   INSTPM_FORCE_ORDERING				(1<<7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE	(1<<9)
+#define   INSTPM_SYNC_FLUSH	(1<<5)
+#define ACTHD	        0x020c8
+#define FW_BLC		0x020d8
+#define FW_BLC2		0x020dc
+#define FW_BLC_SELF	0x020e0 /* 915+ only */
+#define   FW_BLC_SELF_EN_MASK      (1<<31)
+#define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
+#define   FW_BLC_SELF_EN           (1<<15) /* 945 only */
+#define MM_BURST_LENGTH     0x00700000
+#define MM_FIFO_WATERMARK   0x0001F000
+#define LM_BURST_LENGTH     0x00000700
+#define LM_FIFO_WATERMARK   0x0000001F
+#define MI_ARB_STATE	0x020e4 /* 915+ only */
+
+/* Make render/texture TLB fetches lower priorty than associated data
+ *   fetches. This is not turned on by default
+ */
+#define   MI_ARB_RENDER_TLB_LOW_PRIORITY	(1 << 15)
+
+/* Isoch request wait on GTT enable (Display A/B/C streams).
+ * Make isoch requests stall on the TLB update. May cause
+ * display underruns (test mode only)
+ */
+#define   MI_ARB_ISOCH_WAIT_GTT			(1 << 14)
+
+/* Block grant count for isoch requests when block count is
+ * set to a finite value.
+ */
+#define   MI_ARB_BLOCK_GRANT_MASK		(3 << 12)
+#define   MI_ARB_BLOCK_GRANT_8			(0 << 12)	/* for 3 display planes */
+#define   MI_ARB_BLOCK_GRANT_4			(1 << 12)	/* for 2 display planes */
+#define   MI_ARB_BLOCK_GRANT_2			(2 << 12)	/* for 1 display plane */
+#define   MI_ARB_BLOCK_GRANT_0			(3 << 12)	/* don't use */
+
+/* Enable render writes to complete in C2/C3/C4 power states.
+ * If this isn't enabled, render writes are prevented in low
+ * power states. That seems bad to me.
+ */
+#define   MI_ARB_C3_LP_WRITE_ENABLE		(1 << 11)
+
+/* This acknowledges an async flip immediately instead
+ * of waiting for 2TLB fetches.
+ */
+#define   MI_ARB_ASYNC_FLIP_ACK_IMMEDIATE	(1 << 10)
+
+/* Enables non-sequential data reads through arbiter
+ */
+#define   MI_ARB_DUAL_DATA_PHASE_DISABLE	(1 << 9)
+
+/* Disable FSB snooping of cacheable write cycles from binner/render
+ * command stream
+ */
+#define   MI_ARB_CACHE_SNOOP_DISABLE		(1 << 8)
+
+/* Arbiter time slice for non-isoch streams */
+#define   MI_ARB_TIME_SLICE_MASK		(7 << 5)
+#define   MI_ARB_TIME_SLICE_1			(0 << 5)
+#define   MI_ARB_TIME_SLICE_2			(1 << 5)
+#define   MI_ARB_TIME_SLICE_4			(2 << 5)
+#define   MI_ARB_TIME_SLICE_6			(3 << 5)
+#define   MI_ARB_TIME_SLICE_8			(4 << 5)
+#define   MI_ARB_TIME_SLICE_10			(5 << 5)
+#define   MI_ARB_TIME_SLICE_14			(6 << 5)
+#define   MI_ARB_TIME_SLICE_16			(7 << 5)
+
+/* Low priority grace period page size */
+#define   MI_ARB_LOW_PRIORITY_GRACE_4KB		(0 << 4)	/* default */
+#define   MI_ARB_LOW_PRIORITY_GRACE_8KB		(1 << 4)
+
+/* Disable display A/B trickle feed */
+#define   MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE	(1 << 2)
+
+/* Set display plane priority */
+#define   MI_ARB_DISPLAY_PRIORITY_A_B		(0 << 0)	/* display A > display B */
+#define   MI_ARB_DISPLAY_PRIORITY_B_A		(1 << 0)	/* display B > display A */
+
+#define CACHE_MODE_0	0x02120 /* 915+ only */
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
+#define   CM0_IZ_OPT_DISABLE      (1<<6)
+#define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define	  CM0_STC_EVICT_DISABLE_LRA_SNB	(1<<5)
+#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define   CM0_COLOR_EVICT_DISABLE (1<<3)
+#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR		0x02140 /* 8 bytes */
+#define GFX_FLSH_CNTL	0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6	0x101008
+#define   GFX_FLSH_CNTL_EN	(1<<0)
+#define ECOSKPD		0x021d0
+#define   ECO_GATING_CX_ONLY	(1<<3)
+#define   ECO_FLIP_DONE		(1<<0)
+
+#define CACHE_MODE_1		0x7004 /* IVB+ */
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
+
+/* GEN6 interrupt control
+ * Note that the per-ring interrupt bits do alias with the global interrupt bits
+ * in GTIMR. */
+#define GEN6_RENDER_HWSTAM	0x2098
+#define GEN6_RENDER_IMR		0x20a8
+#define   GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT		(1 << 8)
+#define   GEN6_RENDER_PPGTT_PAGE_FAULT			(1 << 7)
+#define   GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED		(1 << 6)
+#define   GEN6_RENDER_L3_PARITY_ERROR			(1 << 5)
+#define   GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT	(1 << 4)
+#define   GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR	(1 << 3)
+#define   GEN6_RENDER_SYNC_STATUS			(1 << 2)
+#define   GEN6_RENDER_DEBUG_INTERRUPT			(1 << 1)
+#define   GEN6_RENDER_USER_INTERRUPT			(1 << 0)
+
+#define GEN6_BLITTER_HWSTAM	0x22098
+#define GEN6_BLITTER_IMR	0x220a8
+#define   GEN6_BLITTER_MI_FLUSH_DW_NOTIFY_INTERRUPT	(1 << 26)
+#define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
+#define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
+#define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
+
+#define GEN6_BLITTER_ECOSKPD	0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT			16
+#define   GEN6_BLITTER_FBC_NOTIFY			(1<<3)
+
+#define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
+#define   GEN6_BSD_SLEEP_MSG_DISABLE	(1 << 0)
+#define   GEN6_BSD_SLEEP_FLUSH_DISABLE	(1 << 2)
+#define   GEN6_BSD_SLEEP_INDICATOR	(1 << 3)
+#define   GEN6_BSD_GO_INDICATOR		(1 << 4)
+
+#define GEN6_BSD_HWSTAM			0x12098
+#define GEN6_BSD_IMR			0x120a8
+#define   GEN6_BSD_USER_INTERRUPT	(1 << 12)
+
+#define GEN6_BSD_RNCID			0x12198
+
+#define GEN7_FF_THREAD_MODE		0x20a0
+#define   GEN7_FF_SCHED_MASK		0x0077070
+#define   GEN7_FF_TS_SCHED_HS1		(0x5<<16)
+#define   GEN7_FF_TS_SCHED_HS0		(0x3<<16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE	(0x1<<16)
+#define   GEN7_FF_TS_SCHED_HW		(0x0<<16) /* Default */
+#define   GEN7_FF_VS_REF_CNT_FFME	(1 << 15)
+#define   GEN7_FF_VS_SCHED_HS1		(0x5<<12)
+#define   GEN7_FF_VS_SCHED_HS0		(0x3<<12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE	(0x1<<12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW		(0x0<<12)
+#define   GEN7_FF_DS_SCHED_HS1		(0x5<<4)
+#define   GEN7_FF_DS_SCHED_HS0		(0x3<<4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE	(0x1<<4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW		(0x0<<4)
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_C3_IDLE	(1<<13)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO	(1<<0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANEA	(0<<0)
+#define   FBC_CTL_PLANEB	(1<<0)
+#define FBC_FENCE_OFF		0x0321b
+#define FBC_TAG			0x03300
+
+#define FBC_LL_SIZE		(1536)
+
+/* Framebuffer compression for GM45+ */
+#define DPFC_CB_BASE		0x3200
+#define DPFC_CONTROL		0x3208
+#define   DPFC_CTL_EN		(1<<31)
+#define   DPFC_CTL_PLANEA	(0<<30)
+#define   DPFC_CTL_PLANEB	(1<<30)
+#define   DPFC_CTL_FENCE_EN	(1<<29)
+#define   DPFC_CTL_PERSISTENT_MODE	(1<<25)
+#define   DPFC_SR_EN		(1<<10)
+#define   DPFC_CTL_LIMIT_1X	(0<<6)
+#define   DPFC_CTL_LIMIT_2X	(1<<6)
+#define   DPFC_CTL_LIMIT_4X	(2<<6)
+#define DPFC_RECOMP_CTL		0x320c
+#define   DPFC_RECOMP_STALL_EN	(1<<27)
+#define   DPFC_RECOMP_STALL_WM_SHIFT (16)
+#define   DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
+#define   DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
+#define   DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define DPFC_STATUS		0x3210
+#define   DPFC_INVAL_SEG_SHIFT  (16)
+#define   DPFC_INVAL_SEG_MASK	(0x07ff0000)
+#define   DPFC_COMP_SEG_SHIFT	(0)
+#define   DPFC_COMP_SEG_MASK	(0x000003ff)
+#define DPFC_STATUS2		0x3214
+#define DPFC_FENCE_YOFF		0x3218
+#define DPFC_CHICKEN		0x3224
+#define   DPFC_HT_MODIFY	(1<<31)
+
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE	0x43200
+#define ILK_DPFC_CONTROL	0x43208
+/* The bit 28-8 is reserved */
+#define   DPFC_RESERVED		(0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL	0x4320c
+#define ILK_DPFC_STATUS		0x43210
+#define ILK_DPFC_FENCE_YOFF	0x43218
+#define ILK_DPFC_CHICKEN	0x43224
+#define ILK_FBC_RT_BASE		0x2128
+#define   ILK_FBC_RT_VALID	(1<<0)
+
+#define ILK_DISPLAY_CHICKEN1	0x42000
+#define   ILK_FBCQ_DIS		(1<<22)
+#define	  ILK_PABSTRETCH_DIS	(1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA		0x100100
+#define   SNB_CPU_FENCE_ENABLE	(1<<29)
+#define DPFC_CPU_FENCE_OFFSET	0x100104
+
+
+/*
+ * GPIO regs
+ */
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+# define GPIO_CLOCK_DIR_MASK		(1 << 0)
+# define GPIO_CLOCK_DIR_IN		(0 << 1)
+# define GPIO_CLOCK_DIR_OUT		(1 << 1)
+# define GPIO_CLOCK_VAL_MASK		(1 << 2)
+# define GPIO_CLOCK_VAL_OUT		(1 << 3)
+# define GPIO_CLOCK_VAL_IN		(1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+# define GPIO_DATA_DIR_MASK		(1 << 8)
+# define GPIO_DATA_DIR_IN		(0 << 9)
+# define GPIO_DATA_DIR_OUT		(1 << 9)
+# define GPIO_DATA_VAL_MASK		(1 << 10)
+# define GPIO_DATA_VAL_OUT		(1 << 11)
+# define GPIO_DATA_VAL_IN		(1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+#define GMBUS0			0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ	(0<<8)
+#define   GMBUS_RATE_50KHZ	(1<<8)
+#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED	0
+#define   GMBUS_PORT_SSC	1
+#define   GMBUS_PORT_VGADDC	2
+#define   GMBUS_PORT_PANEL	3
+#define   GMBUS_PORT_DPC	4 /* HDMIC */
+#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
+#define   GMBUS_PORT_DPD	6 /* HDMID */
+#define   GMBUS_PORT_RESERVED	7 /* 7 reserved */
+#define   GMBUS_NUM_PORTS	(GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
+#define GMBUS1			0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT	(1<<31)
+#define   GMBUS_SW_RDY		(1<<30)
+#define   GMBUS_ENT		(1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE	(0<<25)
+#define   GMBUS_CYCLE_WAIT	(1<<25)
+#define   GMBUS_CYCLE_INDEX	(2<<25)
+#define   GMBUS_CYCLE_STOP	(4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ	(1<<0)
+#define   GMBUS_SLAVE_WRITE	(0<<0)
+#define GMBUS2			0x5108 /* status */
+#define   GMBUS_INUSE		(1<<15)
+#define   GMBUS_HW_WAIT_PHASE	(1<<14)
+#define   GMBUS_STALL_TIMEOUT	(1<<13)
+#define   GMBUS_INT		(1<<12)
+#define   GMBUS_HW_RDY		(1<<11)
+#define   GMBUS_SATOER		(1<<10)
+#define   GMBUS_ACTIVE		(1<<9)
+#define GMBUS3			0x510c /* data buffer bytes 3-0 */
+#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN		(1<<3)
+#define   GMBUS_IDLE_EN		(1<<2)
+#define   GMBUS_HW_WAIT_EN	(1<<1)
+#define   GMBUS_HW_RDY_EN	(1<<0)
+#define GMBUS5			0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
+
+/*
+ * Clock control & power management
+ */
+
+#define VGA0	0x6000
+#define VGA1	0x6004
+#define VGA_PD	0x6010
+#define   VGA0_PD_P2_DIV_4	(1 << 7)
+#define   VGA0_PD_P1_DIV_2	(1 << 5)
+#define   VGA0_PD_P1_SHIFT	0
+#define   VGA0_PD_P1_MASK	(0x1f << 0)
+#define   VGA1_PD_P2_DIV_4	(1 << 15)
+#define   VGA1_PD_P1_DIV_2	(1 << 13)
+#define   VGA1_PD_P1_SHIFT	8
+#define   VGA1_PD_P1_MASK	(0x1f << 8)
+#define _DPLL_A	(dev_priv->info->display_mmio_offset + 0x6014)
+#define _DPLL_B	(dev_priv->info->display_mmio_offset + 0x6018)
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
+#define   DPLL_VCO_ENABLE		(1 << 31)
+#define   DPLL_DVO_HIGH_SPEED		(1 << 30)
+#define   DPLL_EXT_BUFFER_ENABLE_VLV	(1 << 30)
+#define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define   DPLL_REFA_CLK_ENABLE_VLV	(1 << 29)
+#define   DPLL_VGA_MODE_DIS		(1 << 28)
+#define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
+#define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
+#define   DPLL_MODE_MASK		(3 << 26)
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
+#define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
+#define   DPLL_LOCK_VLV			(1<<15)
+#define   DPLL_INTEGRATED_CLOCK_VLV	(1<<13)
+
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+/* i830, required in DVO non-gang */
+#define   PLL_P2_DIVIDE_BY_4		(1 << 23)
+#define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
+#define   PLL_REF_INPUT_DREFCLK		(0 << 13)
+#define   PLL_REF_INPUT_TVCLKINA	(1 << 13) /* i830 */
+#define   PLL_REF_INPUT_TVCLKINBC	(2 << 13) /* SDVO TVCLKIN */
+#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define   PLL_REF_INPUT_MASK		(3 << 13)
+#define   PLL_LOAD_PULSE_PHASE_SHIFT		9
+/* Ironlake */
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
+# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)	(((x)-1) << 9)
+# define DPLL_FPA1_P1_POST_DIV_SHIFT            0
+# define DPLL_FPA1_P1_POST_DIV_MASK             0xff
+
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define   PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define   DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define   SDVO_MULTIPLIER_MASK			0x000000ff
+#define   SDVO_MULTIPLIER_SHIFT_HIRES		4
+#define   SDVO_MULTIPLIER_SHIFT_VGA		0
+#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define   DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+#define   DPLL_MD_UDI_DIVIDER_SHIFT		24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define   DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define   DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+#define   DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+
+#define _FPA0	0x06040
+#define _FPA1	0x06044
+#define _FPB0	0x06048
+#define _FPB1	0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
+#define   FP_N_DIV_MASK		0x003f0000
+#define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
+#define   FP_N_DIV_SHIFT		16
+#define   FP_M1_DIV_MASK	0x00003f00
+#define   FP_M1_DIV_SHIFT		 8
+#define   FP_M2_DIV_MASK	0x0000003f
+#define   FP_M2_PINEVIEW_DIV_MASK	0x000000ff
+#define   FP_M2_DIV_SHIFT		 0
+#define DPLL_TEST	0x606c
+#define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+#define   DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+#define   DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+#define   DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+#define   DPLLB_TEST_N_BYPASS		(1 << 19)
+#define   DPLLB_TEST_M_BYPASS		(1 << 18)
+#define   DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+#define   DPLLA_TEST_N_BYPASS		(1 << 3)
+#define   DPLLA_TEST_M_BYPASS		(1 << 2)
+#define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+#define D_STATE		0x6104
+#define  DSTATE_GFX_RESET_I830			(1<<6)
+#define  DSTATE_PLL_D3_OFF			(1<<3)
+#define  DSTATE_GFX_CLOCK_GATING		(1<<1)
+#define  DSTATE_DOT_CLOCK_GATING		(1<<0)
+#define DSPCLK_GATE_D		0x6200
+# define DPUNIT_B_CLOCK_GATE_DISABLE		(1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE		(1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE		(1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE		(1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE		(1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE		(1 << 24) /* 965 */
+# define TVRUNIT_CLOCK_GATE_DISABLE		(1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE		(1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE		(1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE		(1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE		(1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE		(1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE		(1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE		(1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE		(1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE		(1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE		(1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE		(1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE		(1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE		(1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE		(1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE		(1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE		(1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE		(1 << 4)
+/**
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE		(1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE		(1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE		(1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE		(1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE		(1 << 0) /* 845,865 */
+
+#define RENCLK_GATE_D1		0x6204
+# define BLITTER_CLOCK_GATE_DISABLE		(1 << 13) /* 945GM only */
+# define MPEG_CLOCK_GATE_DISABLE		(1 << 12) /* 945GM only */
+# define PC_FE_CLOCK_GATE_DISABLE		(1 << 11)
+# define PC_BE_CLOCK_GATE_DISABLE		(1 << 10)
+# define WINDOWER_CLOCK_GATE_DISABLE		(1 << 9)
+# define INTERPOLATOR_CLOCK_GATE_DISABLE	(1 << 8)
+# define COLOR_CALCULATOR_CLOCK_GATE_DISABLE	(1 << 7)
+# define MOTION_COMP_CLOCK_GATE_DISABLE		(1 << 6)
+# define MAG_CLOCK_GATE_DISABLE			(1 << 5)
+/** This bit must be unset on 855,865 */
+# define MECI_CLOCK_GATE_DISABLE		(1 << 4)
+# define DCMP_CLOCK_GATE_DISABLE		(1 << 3)
+# define MEC_CLOCK_GATE_DISABLE			(1 << 2)
+# define MECO_CLOCK_GATE_DISABLE		(1 << 1)
+/** This bit must be set on 855,865. */
+# define SV_CLOCK_GATE_DISABLE			(1 << 0)
+# define I915_MPEG_CLOCK_GATE_DISABLE		(1 << 16)
+# define I915_VLD_IP_PR_CLOCK_GATE_DISABLE	(1 << 15)
+# define I915_MOTION_COMP_CLOCK_GATE_DISABLE	(1 << 14)
+# define I915_BD_BF_CLOCK_GATE_DISABLE		(1 << 13)
+# define I915_SF_SE_CLOCK_GATE_DISABLE		(1 << 12)
+# define I915_WM_CLOCK_GATE_DISABLE		(1 << 11)
+# define I915_IZ_CLOCK_GATE_DISABLE		(1 << 10)
+# define I915_PI_CLOCK_GATE_DISABLE		(1 << 9)
+# define I915_DI_CLOCK_GATE_DISABLE		(1 << 8)
+# define I915_SH_SV_CLOCK_GATE_DISABLE		(1 << 7)
+# define I915_PL_DG_QC_FT_CLOCK_GATE_DISABLE	(1 << 6)
+# define I915_SC_CLOCK_GATE_DISABLE		(1 << 5)
+# define I915_FL_CLOCK_GATE_DISABLE		(1 << 4)
+# define I915_DM_CLOCK_GATE_DISABLE		(1 << 3)
+# define I915_PS_CLOCK_GATE_DISABLE		(1 << 2)
+# define I915_CC_CLOCK_GATE_DISABLE		(1 << 1)
+# define I915_BY_CLOCK_GATE_DISABLE		(1 << 0)
+
+# define I965_RCZ_CLOCK_GATE_DISABLE		(1 << 30)
+/** This bit must always be set on 965G/965GM */
+# define I965_RCC_CLOCK_GATE_DISABLE		(1 << 29)
+# define I965_RCPB_CLOCK_GATE_DISABLE		(1 << 28)
+# define I965_DAP_CLOCK_GATE_DISABLE		(1 << 27)
+# define I965_ROC_CLOCK_GATE_DISABLE		(1 << 26)
+# define I965_GW_CLOCK_GATE_DISABLE		(1 << 25)
+# define I965_TD_CLOCK_GATE_DISABLE		(1 << 24)
+/** This bit must always be set on 965G */
+# define I965_ISC_CLOCK_GATE_DISABLE		(1 << 23)
+# define I965_IC_CLOCK_GATE_DISABLE		(1 << 22)
+# define I965_EU_CLOCK_GATE_DISABLE		(1 << 21)
+# define I965_IF_CLOCK_GATE_DISABLE		(1 << 20)
+# define I965_TC_CLOCK_GATE_DISABLE		(1 << 19)
+# define I965_SO_CLOCK_GATE_DISABLE		(1 << 17)
+# define I965_FBC_CLOCK_GATE_DISABLE		(1 << 16)
+# define I965_MARI_CLOCK_GATE_DISABLE		(1 << 15)
+# define I965_MASF_CLOCK_GATE_DISABLE		(1 << 14)
+# define I965_MAWB_CLOCK_GATE_DISABLE		(1 << 13)
+# define I965_EM_CLOCK_GATE_DISABLE		(1 << 12)
+# define I965_UC_CLOCK_GATE_DISABLE		(1 << 11)
+# define I965_SI_CLOCK_GATE_DISABLE		(1 << 6)
+# define I965_MT_CLOCK_GATE_DISABLE		(1 << 5)
+# define I965_PL_CLOCK_GATE_DISABLE		(1 << 4)
+# define I965_DG_CLOCK_GATE_DISABLE		(1 << 3)
+# define I965_QC_CLOCK_GATE_DISABLE		(1 << 2)
+# define I965_FT_CLOCK_GATE_DISABLE		(1 << 1)
+# define I965_DM_CLOCK_GATE_DISABLE		(1 << 0)
+
+#define RENCLK_GATE_D2		0x6208
+#define VF_UNIT_CLOCK_GATE_DISABLE		(1 << 9)
+#define GS_UNIT_CLOCK_GATE_DISABLE		(1 << 7)
+#define CL_UNIT_CLOCK_GATE_DISABLE		(1 << 6)
+#define RAMCLK_GATE_D		0x6210		/* CRL only */
+#define DEUC			0x6214          /* CRL only */
+
+#define FW_BLC_SELF_VLV		(VLV_DISPLAY_BASE + 0x6500)
+#define  FW_CSPWRDWNEN		(1<<15)
+
+/*
+ * Palette regs
+ */
+
+#define _PALETTE_A		(dev_priv->info->display_mmio_offset + 0xa000)
+#define _PALETTE_B		(dev_priv->info->display_mmio_offset + 0xa800)
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.  It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE	0x10000
+
+#define MCHBAR_MIRROR_BASE_SNB	0x140000
+
+/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
+#define DCLK 0x5e04
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC			0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC	(1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED	(2 << 0)
+#define DCC_ADDRESSING_MODE_MASK			(3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE				(1 << 10)
+#define DCC_CHANNEL_XOR_BIT_17				(1 << 9)
+
+/** Pineview MCH register contains DDR3 setting */
+#define CSHRDDR3CTL            0x101a8
+#define CSHRDDR3CTL_DDR3       (1 << 2)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3			0x10206
+#define C1DRB3			0x10606
+
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0			(MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1			(MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2			(MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define   MAD_DIMM_ECC_MASK		(0x3 << 24)
+#define   MAD_DIMM_ECC_OFF		(0x0 << 24)
+#define   MAD_DIMM_ECC_IO_ON_LOGIC_OFF	(0x1 << 24)
+#define   MAD_DIMM_ECC_IO_OFF_LOGIC_ON	(0x2 << 24)
+#define   MAD_DIMM_ECC_ON		(0x3 << 24)
+#define   MAD_DIMM_ENH_INTERLEAVE	(0x1 << 22)
+#define   MAD_DIMM_RANK_INTERLEAVE	(0x1 << 21)
+#define   MAD_DIMM_B_WIDTH_X16		(0x1 << 20) /* X8 chips if unset */
+#define   MAD_DIMM_A_WIDTH_X16		(0x1 << 19) /* X8 chips if unset */
+#define   MAD_DIMM_B_DUAL_RANK		(0x1 << 18)
+#define   MAD_DIMM_A_DUAL_RANK		(0x1 << 17)
+#define   MAD_DIMM_A_SELECT		(0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define   MAD_DIMM_B_SIZE_SHIFT		8
+#define   MAD_DIMM_B_SIZE_MASK		(0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define   MAD_DIMM_A_SIZE_SHIFT		0
+#define   MAD_DIMM_A_SIZE_MASK		(0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+/** snb MCH registers for priority tuning */
+#define MCH_SSKPD			(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
+#define   MCH_SSKPD_WM0_MASK		0x3f
+#define   MCH_SSKPD_WM0_VAL		0xc
+
+/* Clocking configuration register */
+#define CLKCFG			0x10c00
+#define CLKCFG_FSB_400					(5 << 0)	/* hrawclk 100 */
+#define CLKCFG_FSB_533					(1 << 0)	/* hrawclk 133 */
+#define CLKCFG_FSB_667					(3 << 0)	/* hrawclk 166 */
+#define CLKCFG_FSB_800					(2 << 0)	/* hrawclk 200 */
+#define CLKCFG_FSB_1067					(6 << 0)	/* hrawclk 266 */
+#define CLKCFG_FSB_1333					(7 << 0)	/* hrawclk 333 */
+/* Note, below two are guess */
+#define CLKCFG_FSB_1600					(4 << 0)	/* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT				(0 << 0)	/* hrawclk 400 */
+#define CLKCFG_FSB_MASK					(7 << 0)
+#define CLKCFG_MEM_533					(1 << 4)
+#define CLKCFG_MEM_667					(2 << 4)
+#define CLKCFG_MEM_800					(3 << 4)
+#define CLKCFG_MEM_MASK					(7 << 4)
+
+#define TSC1			0x11001
+#define   TSE			(1<<0)
+#define TR1			0x11006
+#define TSFS			0x11020
+#define   TSFS_SLOPE_MASK	0x0000ff00
+#define   TSFS_SLOPE_SHIFT	8
+#define   TSFS_INTR_MASK	0x000000ff
+
+#define CRSTANDVID		0x11100
+#define PXVFREQ_BASE		0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define   PXVFREQ_PX_MASK	0x7f000000
+#define   PXVFREQ_PX_SHIFT	24
+#define VIDFREQ_BASE		0x11110
+#define VIDFREQ1		0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2		0x11114
+#define VIDFREQ3		0x11118
+#define VIDFREQ4		0x1111c
+#define   VIDFREQ_P0_MASK	0x1f000000
+#define   VIDFREQ_P0_SHIFT	24
+#define   VIDFREQ_P0_CSCLK_MASK	0x00f00000
+#define   VIDFREQ_P0_CSCLK_SHIFT 20
+#define   VIDFREQ_P0_CRCLK_MASK	0x000f0000
+#define   VIDFREQ_P0_CRCLK_SHIFT 16
+#define   VIDFREQ_P1_MASK	0x00001f00
+#define   VIDFREQ_P1_SHIFT	8
+#define   VIDFREQ_P1_CSCLK_MASK	0x000000f0
+#define   VIDFREQ_P1_CSCLK_SHIFT 4
+#define   VIDFREQ_P1_CRCLK_MASK	0x0000000f
+#define INTTOEXT_BASE_ILK	0x11300
+#define INTTOEXT_BASE		0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define   INTTOEXT_MAP3_SHIFT	24
+#define   INTTOEXT_MAP3_MASK	(0x1f << INTTOEXT_MAP3_SHIFT)
+#define   INTTOEXT_MAP2_SHIFT	16
+#define   INTTOEXT_MAP2_MASK	(0x1f << INTTOEXT_MAP2_SHIFT)
+#define   INTTOEXT_MAP1_SHIFT	8
+#define   INTTOEXT_MAP1_MASK	(0x1f << INTTOEXT_MAP1_SHIFT)
+#define   INTTOEXT_MAP0_SHIFT	0
+#define   INTTOEXT_MAP0_MASK	(0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL		0x11170 /* Ironlake only */
+#define   MEMCTL_CMD_MASK	0xe000
+#define   MEMCTL_CMD_SHIFT	13
+#define   MEMCTL_CMD_RCLK_OFF	0
+#define   MEMCTL_CMD_RCLK_ON	1
+#define   MEMCTL_CMD_CHFREQ	2
+#define   MEMCTL_CMD_CHVID	3
+#define   MEMCTL_CMD_VMMOFF	4
+#define   MEMCTL_CMD_VMMON	5
+#define   MEMCTL_CMD_STS	(1<<12) /* write 1 triggers command, clears
+					   when command complete */
+#define   MEMCTL_FREQ_MASK	0x0f00 /* jitter, from 0-15 */
+#define   MEMCTL_FREQ_SHIFT	8
+#define   MEMCTL_SFCAVM		(1<<7)
+#define   MEMCTL_TGT_VID_MASK	0x007f
+#define MEMIHYST		0x1117c
+#define MEMINTREN		0x11180 /* 16 bits */
+#define   MEMINT_RSEXIT_EN	(1<<8)
+#define   MEMINT_CX_SUPR_EN	(1<<7)
+#define   MEMINT_CONT_BUSY_EN	(1<<6)
+#define   MEMINT_AVG_BUSY_EN	(1<<5)
+#define   MEMINT_EVAL_CHG_EN	(1<<4)
+#define   MEMINT_MON_IDLE_EN	(1<<3)
+#define   MEMINT_UP_EVAL_EN	(1<<2)
+#define   MEMINT_DOWN_EVAL_EN	(1<<1)
+#define   MEMINT_SW_CMD_EN	(1<<0)
+#define MEMINTRSTR		0x11182 /* 16 bits */
+#define   MEM_RSEXIT_MASK	0xc000
+#define   MEM_RSEXIT_SHIFT	14
+#define   MEM_CONT_BUSY_MASK	0x3000
+#define   MEM_CONT_BUSY_SHIFT	12
+#define   MEM_AVG_BUSY_MASK	0x0c00
+#define   MEM_AVG_BUSY_SHIFT	10
+#define   MEM_EVAL_CHG_MASK	0x0300
+#define   MEM_EVAL_BUSY_SHIFT	8
+#define   MEM_MON_IDLE_MASK	0x00c0
+#define   MEM_MON_IDLE_SHIFT	6
+#define   MEM_UP_EVAL_MASK	0x0030
+#define   MEM_UP_EVAL_SHIFT	4
+#define   MEM_DOWN_EVAL_MASK	0x000c
+#define   MEM_DOWN_EVAL_SHIFT	2
+#define   MEM_SW_CMD_MASK	0x0003
+#define   MEM_INT_STEER_GFX	0
+#define   MEM_INT_STEER_CMR	1
+#define   MEM_INT_STEER_SMI	2
+#define   MEM_INT_STEER_SCI	3
+#define MEMINTRSTS		0x11184
+#define   MEMINT_RSEXIT		(1<<7)
+#define   MEMINT_CONT_BUSY	(1<<6)
+#define   MEMINT_AVG_BUSY	(1<<5)
+#define   MEMINT_EVAL_CHG	(1<<4)
+#define   MEMINT_MON_IDLE	(1<<3)
+#define   MEMINT_UP_EVAL	(1<<2)
+#define   MEMINT_DOWN_EVAL	(1<<1)
+#define   MEMINT_SW_CMD		(1<<0)
+#define MEMMODECTL		0x11190
+#define   MEMMODE_BOOST_EN	(1<<31)
+#define   MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define   MEMMODE_BOOST_FREQ_SHIFT 24
+#define   MEMMODE_IDLE_MODE_MASK 0x00030000
+#define   MEMMODE_IDLE_MODE_SHIFT 16
+#define   MEMMODE_IDLE_MODE_EVAL 0
+#define   MEMMODE_IDLE_MODE_CONT 1
+#define   MEMMODE_HWIDLE_EN	(1<<15)
+#define   MEMMODE_SWMODE_EN	(1<<14)
+#define   MEMMODE_RCLK_GATE	(1<<13)
+#define   MEMMODE_HW_UPDATE	(1<<12)
+#define   MEMMODE_FSTART_MASK	0x00000f00 /* starting jitter, 0-15 */
+#define   MEMMODE_FSTART_SHIFT	8
+#define   MEMMODE_FMAX_MASK	0x000000f0 /* max jitter, 0-15 */
+#define   MEMMODE_FMAX_SHIFT	4
+#define   MEMMODE_FMIN_MASK	0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG		0x1119c
+#define MEMSWCTL2		0x1119e /* Cantiga only */
+#define   SWMEMCMD_RENDER_OFF	(0 << 13)
+#define   SWMEMCMD_RENDER_ON	(1 << 13)
+#define   SWMEMCMD_SWFREQ	(2 << 13)
+#define   SWMEMCMD_TARVID	(3 << 13)
+#define   SWMEMCMD_VRM_OFF	(4 << 13)
+#define   SWMEMCMD_VRM_ON	(5 << 13)
+#define   CMDSTS		(1<<12)
+#define   SFCAVM		(1<<11)
+#define   SWFREQ_MASK		0x0380 /* P0-7 */
+#define   SWFREQ_SHIFT		7
+#define   TARVID_MASK		0x001f
+#define MEMSTAT_CTG		0x111a0
+#define RCBMINAVG		0x111a0
+#define RCUPEI			0x111b0
+#define RCDNEI			0x111b4
+#define RSTDBYCTL		0x111b8
+#define   RS1EN			(1<<31)
+#define   RS2EN			(1<<30)
+#define   RS3EN			(1<<29)
+#define   D3RS3EN		(1<<28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX		(1<<27) /* RSx promotion timers ignored */
+#define   RCWAKERW		(1<<26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN		(1<<25) /* Fast voltage ramp enable */
+#define   GFXTGHYST		(1<<24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT		(1<<23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK	(7<<20)
+#define   RSX_STATUS_ON		(0<<20)
+#define   RSX_STATUS_RC1	(1<<20)
+#define   RSX_STATUS_RC1E	(2<<20)
+#define   RSX_STATUS_RS1	(3<<20)
+#define   RSX_STATUS_RS2	(4<<20) /* aka rc6 */
+#define   RSX_STATUS_RSVD	(5<<20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3	(6<<20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2	(7<<20)
+#define   UWRCRSXE		(1<<19) /* wake counter limit prevents rsx */
+#define   RSCRP			(1<<18) /* rs requests control on rs1/2 reqs */
+#define   JRSC			(1<<17) /* rsx coupled to cpu c-state */
+#define   RS2INC0		(1<<16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK	(3<<14)
+#define   RS1CONTSAV_NO_RS1	(0<<14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD	(1<<14)
+#define   RS1CONTSAV_SAVE_RS1	(2<<14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1	(3<<14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK	(3<<12)
+#define   SLOW_RS123		(0<<12)
+#define   SLOW_RS23		(1<<12)
+#define   SLOW_RS3		(2<<12)
+#define   NORMAL_RS123		(3<<12)
+#define   RCMODE_TIMEOUT	(1<<11) /* 0 is eval interval method */
+#define   IMPROMOEN		(1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC		(1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK		(1<<7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK	(3<<4)
+#define   RS_CSTATE_C367_RS1	(0<<4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define   RS_CSTATE_RSVD	(2<<4)
+#define   RS_CSTATE_C367_RS2	(3<<4)
+#define   REDSAVES		(1<<3) /* no context save if was idle during rs0 */
+#define   REDRESTORES		(1<<2) /* no restore if was idle during rs0 */
+#define VIDCTL			0x111c0
+#define VIDSTS			0x111c8
+#define VIDSTART		0x111cc /* 8 bits */
+#define MEMSTAT_ILK			0x111f8
+#define   MEMSTAT_VID_MASK	0x7f00
+#define   MEMSTAT_VID_SHIFT	8
+#define   MEMSTAT_PSTATE_MASK	0x00f8
+#define   MEMSTAT_PSTATE_SHIFT  3
+#define   MEMSTAT_MON_ACTV	(1<<2)
+#define   MEMSTAT_SRC_CTL_MASK	0x0003
+#define   MEMSTAT_SRC_CTL_CORE	0
+#define   MEMSTAT_SRC_CTL_TRB	1
+#define   MEMSTAT_SRC_CTL_THM	2
+#define   MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG		0x113b8
+#define RCPREVBSYTDNAVG		0x113bc
+#define PMMISC			0x11214
+#define   MCPPCE_EN		(1<<0) /* enable PM_MSG from PCH->MPC */
+#define SDEW			0x1124c
+#define CSIEW0			0x11250
+#define CSIEW1			0x11254
+#define CSIEW2			0x11258
+#define PEW			0x1125c
+#define DEW			0x11270
+#define MCHAFE			0x112c0
+#define CSIEC			0x112e0
+#define DMIEC			0x112e4
+#define DDREC			0x112e8
+#define PEG0EC			0x112ec
+#define PEG1EC			0x112f0
+#define GFXEC			0x112f4
+#define RPPREVBSYTUPAVG		0x113b8
+#define RPPREVBSYTDNAVG		0x113bc
+#define ECR			0x11600
+#define   ECR_GPFE		(1<<31)
+#define   ECR_IMONE		(1<<30)
+#define   ECR_CAP_MASK		0x0000001f /* Event range, 0-31 */
+#define OGW0			0x11608
+#define OGW1			0x1160c
+#define EG0			0x11610
+#define EG1			0x11614
+#define EG2			0x11618
+#define EG3			0x1161c
+#define EG4			0x11620
+#define EG5			0x11624
+#define EG6			0x11628
+#define EG7			0x1162c
+#define PXW			0x11664
+#define PXWL			0x11680
+#define LCFUSE02		0x116c0
+#define   LCFUSE_HIV_MASK	0x000000ff
+#define CSIPLL0			0x12c10
+#define DDRMPLL1		0X12c20
+#define PEG_BAND_GAP_DATA	0x14d68
+
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
+#define GEN6_GT_PERF_STATUS	0x145948
+#define GEN6_RP_STATE_LIMITS	0x145994
+#define GEN6_RP_STATE_CAP	0x145998
+
+/*
+ * Logical Context regs
+ */
+#define CCID			0x2180
+#define   CCID_EN		(1<<0)
+#define CXT_SIZE		0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg)	((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg)	((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg)	((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg)	((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg)	((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg)	(GEN6_CXT_POWER_SIZE(cxt_reg) + \
+					GEN6_CXT_RING_SIZE(cxt_reg) + \
+					GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+					GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+					GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE		0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg)	((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg)	((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg)	((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg)	((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg)	((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg)	((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg)	(GEN7_CXT_POWER_SIZE(ctx_reg) + \
+					 GEN7_CXT_RING_SIZE(ctx_reg) + \
+					 GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+					 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+					 GEN7_CXT_GT1_SIZE(ctx_reg) + \
+					 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE		(17 * PAGE_SIZE)
+
+/*
+ * Overlay regs
+ */
+
+#define OVADD			0x30000
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A timing regs */
+#define _HTOTAL_A	(dev_priv->info->display_mmio_offset + 0x60000)
+#define _HBLANK_A	(dev_priv->info->display_mmio_offset + 0x60004)
+#define _HSYNC_A	(dev_priv->info->display_mmio_offset + 0x60008)
+#define _VTOTAL_A	(dev_priv->info->display_mmio_offset + 0x6000c)
+#define _VBLANK_A	(dev_priv->info->display_mmio_offset + 0x60010)
+#define _VSYNC_A	(dev_priv->info->display_mmio_offset + 0x60014)
+#define _PIPEASRC	(dev_priv->info->display_mmio_offset + 0x6001c)
+#define _BCLRPAT_A	(dev_priv->info->display_mmio_offset + 0x60020)
+#define _VSYNCSHIFT_A	(dev_priv->info->display_mmio_offset + 0x60028)
+
+/* Pipe B timing regs */
+#define _HTOTAL_B	(dev_priv->info->display_mmio_offset + 0x61000)
+#define _HBLANK_B	(dev_priv->info->display_mmio_offset + 0x61004)
+#define _HSYNC_B	(dev_priv->info->display_mmio_offset + 0x61008)
+#define _VTOTAL_B	(dev_priv->info->display_mmio_offset + 0x6100c)
+#define _VBLANK_B	(dev_priv->info->display_mmio_offset + 0x61010)
+#define _VSYNC_B	(dev_priv->info->display_mmio_offset + 0x61014)
+#define _PIPEBSRC	(dev_priv->info->display_mmio_offset + 0x6101c)
+#define _BCLRPAT_B	(dev_priv->info->display_mmio_offset + 0x61020)
+#define _VSYNCSHIFT_B	(dev_priv->info->display_mmio_offset + 0x61028)
+
+
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+
+/* VGA port control */
+#define ADPA			0x61100
+#define PCH_ADPA                0xe1100
+#define VLV_ADPA		(VLV_DISPLAY_BASE + ADPA)
+
+#define   ADPA_DAC_ENABLE	(1<<31)
+#define   ADPA_DAC_DISABLE	0
+#define   ADPA_PIPE_SELECT_MASK	(1<<30)
+#define   ADPA_PIPE_A_SELECT	0
+#define   ADPA_PIPE_B_SELECT	(1<<30)
+#define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
+/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_SETS_HVPOLARITY	0
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_VSYNC_CNTL_ENABLE 0
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_HSYNC_CNTL_ENABLE 0
+#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_LOW	0
+#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_LOW	0
+#define   ADPA_DPMS_MASK	(~(3<<10))
+#define   ADPA_DPMS_ON		(0<<10)
+#define   ADPA_DPMS_SUSPEND	(1<<10)
+#define   ADPA_DPMS_STANDBY	(2<<10)
+#define   ADPA_DPMS_OFF		(3<<10)
+
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN		(dev_priv->info->display_mmio_offset + 0x61110)
+#define   PORTB_HOTPLUG_INT_EN			(1 << 29)
+#define   PORTC_HOTPLUG_INT_EN			(1 << 28)
+#define   PORTD_HOTPLUG_INT_EN			(1 << 27)
+#define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
+#define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
+#define   TV_HOTPLUG_INT_EN			(1 << 18)
+#define   CRT_HOTPLUG_INT_EN			(1 << 9)
+#define HOTPLUG_INT_EN_MASK			(PORTB_HOTPLUG_INT_EN | \
+						 PORTC_HOTPLUG_INT_EN | \
+						 PORTD_HOTPLUG_INT_EN | \
+						 SDVOC_HOTPLUG_INT_EN | \
+						 SDVOB_HOTPLUG_INT_EN | \
+						 CRT_HOTPLUG_INT_EN)
+#define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
+/* must use period 64 on GM45 according to docs */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
+
+#define PORT_HOTPLUG_STAT	(dev_priv->info->display_mmio_offset + 0x61114)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define   PORTD_HOTPLUG_LIVE_STATUS_G4X		(1 << 29)
+#define   PORTC_HOTPLUG_LIVE_STATUS_G4X		(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_G4X		(1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define   PORTD_HOTPLUG_LIVE_STATUS_VLV		(1 << 27)
+#define   PORTC_HOTPLUG_LIVE_STATUS_VLV		(1 << 28)
+#define   PORTB_HOTPLUG_LIVE_STATUS_VLV		(1 << 29)
+#define   PORTD_HOTPLUG_INT_STATUS		(3 << 21)
+#define   PORTC_HOTPLUG_INT_STATUS		(3 << 19)
+#define   PORTB_HOTPLUG_INT_STATUS		(3 << 17)
+/* CRT/TV common between gen3+ */
+#define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
+#define   TV_HOTPLUG_INT_STATUS			(1 << 10)
+#define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+#define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+/* SDVO is different across gen3/4 */
+#define   SDVOC_HOTPLUG_INT_STATUS_G4X		(1 << 3)
+#define   SDVOB_HOTPLUG_INT_STATUS_G4X		(1 << 2)
+/*
+ * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm,
+ * since reality corrobates that they're the same as on gen3. But keep these
+ * bits here (and the comment!) to help any other lost wanderers back onto the
+ * right tracks.
+ */
+#define   SDVOC_HOTPLUG_INT_STATUS_I965		(3 << 4)
+#define   SDVOB_HOTPLUG_INT_STATUS_I965		(3 << 2)
+#define   SDVOC_HOTPLUG_INT_STATUS_I915		(1 << 7)
+#define   SDVOB_HOTPLUG_INT_STATUS_I915		(1 << 6)
+#define   HOTPLUG_INT_STATUS_G4X		(CRT_HOTPLUG_INT_STATUS | \
+						 SDVOB_HOTPLUG_INT_STATUS_G4X | \
+						 SDVOC_HOTPLUG_INT_STATUS_G4X | \
+						 PORTB_HOTPLUG_INT_STATUS | \
+						 PORTC_HOTPLUG_INT_STATUS | \
+						 PORTD_HOTPLUG_INT_STATUS)
+
+#define HOTPLUG_INT_STATUS_I915			(CRT_HOTPLUG_INT_STATUS | \
+						 SDVOB_HOTPLUG_INT_STATUS_I915 | \
+						 SDVOC_HOTPLUG_INT_STATUS_I915 | \
+						 PORTB_HOTPLUG_INT_STATUS | \
+						 PORTC_HOTPLUG_INT_STATUS | \
+						 PORTD_HOTPLUG_INT_STATUS)
+
+/* SDVO and HDMI port control.
+ * The same register may be used for SDVO or HDMI */
+#define GEN3_SDVOB	0x61140
+#define GEN3_SDVOC	0x61160
+#define GEN4_HDMIB	GEN3_SDVOB
+#define GEN4_HDMIC	GEN3_SDVOC
+#define PCH_SDVOB	0xe1140
+#define PCH_HDMIB	PCH_SDVOB
+#define PCH_HDMIC	0xe1150
+#define PCH_HDMID	0xe1160
+
+/* Gen 3 SDVO bits: */
+#define   SDVO_ENABLE				(1 << 31)
+#define   SDVO_PIPE_SEL(pipe)			((pipe) << 30)
+#define   SDVO_PIPE_SEL_MASK			(1 << 30)
+#define   SDVO_PIPE_B_SELECT			(1 << 30)
+#define   SDVO_STALL_SELECT			(1 << 29)
+#define   SDVO_INTERRUPT_ENABLE			(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ * Programmed value is multiplier - 1, up to 5x.
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define   SDVO_PORT_MULTIPLY_MASK		(7 << 23)
+#define   SDVO_PORT_MULTIPLY_SHIFT		23
+#define   SDVO_PHASE_SELECT_MASK		(15 << 19)
+#define   SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
+#define   SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
+#define   SDVOC_GANG_MODE			(1 << 16) /* Port C only */
+#define   SDVO_BORDER_ENABLE			(1 << 7) /* SDVO only */
+#define   SDVOB_PCIE_CONCURRENCY		(1 << 3) /* Port B only */
+#define   SDVO_DETECTED				(1 << 2)
+/* Bits to be preserved when writing */
+#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \
+			       SDVO_INTERRUPT_ENABLE)
+#define   SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE)
+
+/* Gen 4 SDVO/HDMI bits: */
+#define   SDVO_COLOR_FORMAT_8bpc		(0 << 26)
+#define   SDVO_ENCODING_SDVO			(0 << 10)
+#define   SDVO_ENCODING_HDMI			(2 << 10)
+#define   HDMI_MODE_SELECT_HDMI			(1 << 9) /* HDMI only */
+#define   HDMI_MODE_SELECT_DVI			(0 << 9) /* HDMI only */
+#define   HDMI_COLOR_RANGE_16_235		(1 << 8) /* HDMI only */
+#define   SDVO_AUDIO_ENABLE			(1 << 6)
+/* VSYNC/HSYNC bits new with 965, default is to be set */
+#define   SDVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   SDVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+
+/* Gen 5 (IBX) SDVO/HDMI bits: */
+#define   HDMI_COLOR_FORMAT_12bpc		(3 << 26) /* HDMI only */
+#define   SDVOB_HOTPLUG_ENABLE			(1 << 23) /* SDVO only */
+
+/* Gen 6 (CPT) SDVO/HDMI bits: */
+#define   SDVO_PIPE_SEL_CPT(pipe)		((pipe) << 29)
+#define   SDVO_PIPE_SEL_MASK_CPT		(3 << 29)
+
+
+/* DVO port control */
+#define DVOA			0x61120
+#define DVOB			0x61140
+#define DVOC			0x61160
+#define   DVO_ENABLE			(1 << 31)
+#define   DVO_PIPE_B_SELECT		(1 << 30)
+#define   DVO_PIPE_STALL_UNUSED		(0 << 28)
+#define   DVO_PIPE_STALL		(1 << 28)
+#define   DVO_PIPE_STALL_TV		(2 << 28)
+#define   DVO_PIPE_STALL_MASK		(3 << 28)
+#define   DVO_USE_VGA_SYNC		(1 << 15)
+#define   DVO_DATA_ORDER_I740		(0 << 14)
+#define   DVO_DATA_ORDER_FP		(1 << 14)
+#define   DVO_VSYNC_DISABLE		(1 << 11)
+#define   DVO_HSYNC_DISABLE		(1 << 10)
+#define   DVO_VSYNC_TRISTATE		(1 << 9)
+#define   DVO_HSYNC_TRISTATE		(1 << 8)
+#define   DVO_BORDER_ENABLE		(1 << 7)
+#define   DVO_DATA_ORDER_GBRG		(1 << 6)
+#define   DVO_DATA_ORDER_RGGB		(0 << 6)
+#define   DVO_DATA_ORDER_GBRG_ERRATA	(0 << 6)
+#define   DVO_DATA_ORDER_RGGB_ERRATA	(1 << 6)
+#define   DVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   DVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+#define   DVO_BLANK_ACTIVE_HIGH		(1 << 2)
+#define   DVO_OUTPUT_CSTATE_PIXELS	(1 << 1)	/* SDG only */
+#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS	(1 << 0)	/* SDG only */
+#define   DVO_PRESERVE_MASK		(0x7<<24)
+#define DVOA_SRCDIM		0x61124
+#define DVOB_SRCDIM		0x61144
+#define DVOC_SRCDIM		0x61164
+#define   DVO_SRCDIM_HORIZONTAL_SHIFT	12
+#define   DVO_SRCDIM_VERTICAL_SHIFT	0
+
+/* LVDS port control */
+#define LVDS			0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define   LVDS_PORT_EN			(1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define   LVDS_PIPEB_SELECT		(1 << 30)
+#define   LVDS_PIPE_MASK		(1 << 30)
+#define   LVDS_PIPE(pipe)		((pipe) << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define   LVDS_ENABLE_DITHER		(1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define   LVDS_VSYNC_POLARITY		(1 << 21)
+#define   LVDS_HSYNC_POLARITY		(1 << 20)
+
+/* Enable border for unscaled (or aspect-scaled) display */
+#define   LVDS_BORDER_ENABLE		(1 << 15)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define   LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define   LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define   LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define   LVDS_A3_POWER_MASK		(3 << 6)
+#define   LVDS_A3_POWER_DOWN		(0 << 6)
+#define   LVDS_A3_POWER_UP		(3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define   LVDS_CLKB_POWER_MASK		(3 << 4)
+#define   LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define   LVDS_CLKB_POWER_UP		(3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define   LVDS_B0B3_POWER_MASK		(3 << 2)
+#define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define   LVDS_B0B3_POWER_UP		(3 << 2)
+
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA		0x61178
+/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+ * of the infoframe structure specified by CEA-861. */
+#define   VIDEO_DIP_DATA_SIZE	32
+#define VIDEO_DIP_CTL		0x61170
+/* Pre HSW: */
+#define   VIDEO_DIP_ENABLE		(1 << 31)
+#define   VIDEO_DIP_PORT_B		(1 << 29)
+#define   VIDEO_DIP_PORT_C		(2 << 29)
+#define   VIDEO_DIP_PORT_D		(3 << 29)
+#define   VIDEO_DIP_PORT_MASK		(3 << 29)
+#define   VIDEO_DIP_ENABLE_GCP		(1 << 25)
+#define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
+#define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
+#define   VIDEO_DIP_ENABLE_GAMUT	(4 << 21)
+#define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
+#define   VIDEO_DIP_SELECT_AVI		(0 << 19)
+#define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
+#define   VIDEO_DIP_SELECT_SPD		(3 << 19)
+#define   VIDEO_DIP_SELECT_MASK		(3 << 19)
+#define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
+#define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
+#define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
+#define   VIDEO_DIP_FREQ_MASK		(3 << 16)
+/* HSW and later: */
+#define   VIDEO_DIP_ENABLE_VSC_HSW	(1 << 20)
+#define   VIDEO_DIP_ENABLE_GCP_HSW	(1 << 16)
+#define   VIDEO_DIP_ENABLE_AVI_HSW	(1 << 12)
+#define   VIDEO_DIP_ENABLE_VS_HSW	(1 << 8)
+#define   VIDEO_DIP_ENABLE_GMP_HSW	(1 << 4)
+#define   VIDEO_DIP_ENABLE_SPD_HSW	(1 << 0)
+
+/* Panel power sequencing */
+#define PP_STATUS	0x61200
+#define   PP_ON		(1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define   PP_READY		(1 << 30)
+#define   PP_SEQUENCE_NONE	(0 << 28)
+#define   PP_SEQUENCE_POWER_UP	(1 << 28)
+#define   PP_SEQUENCE_POWER_DOWN (2 << 28)
+#define   PP_SEQUENCE_MASK	(3 << 28)
+#define   PP_SEQUENCE_SHIFT	28
+#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
+#define   PP_SEQUENCE_STATE_MASK 0x0000000f
+#define   PP_SEQUENCE_STATE_OFF_IDLE	(0x0 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_1	(0x1 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_2	(0x2 << 0)
+#define   PP_SEQUENCE_STATE_OFF_S0_3	(0x3 << 0)
+#define   PP_SEQUENCE_STATE_ON_IDLE	(0x8 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_0	(0x9 << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_2	(0xa << 0)
+#define   PP_SEQUENCE_STATE_ON_S1_3	(0xb << 0)
+#define   PP_SEQUENCE_STATE_RESET	(0xf << 0)
+#define PP_CONTROL	0x61204
+#define   POWER_TARGET_ON	(1 << 0)
+#define PP_ON_DELAYS	0x61208
+#define PP_OFF_DELAYS	0x6120c
+#define PP_DIVISOR	0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL	(dev_priv->info->display_mmio_offset + 0x61230)
+#define   PFIT_ENABLE		(1 << 31)
+#define   PFIT_PIPE_MASK	(3 << 29)
+#define   PFIT_PIPE_SHIFT	29
+#define   VERT_INTERP_DISABLE	(0 << 10)
+#define   VERT_INTERP_BILINEAR	(1 << 10)
+#define   VERT_INTERP_MASK	(3 << 10)
+#define   VERT_AUTO_SCALE	(1 << 9)
+#define   HORIZ_INTERP_DISABLE	(0 << 6)
+#define   HORIZ_INTERP_BILINEAR	(1 << 6)
+#define   HORIZ_INTERP_MASK	(3 << 6)
+#define   HORIZ_AUTO_SCALE	(1 << 5)
+#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define   PFIT_FILTER_FUZZY	(0 << 24)
+#define   PFIT_SCALING_AUTO	(0 << 26)
+#define   PFIT_SCALING_PROGRAMMED (1 << 26)
+#define   PFIT_SCALING_PILLAR	(2 << 26)
+#define   PFIT_SCALING_LETTER	(3 << 26)
+#define PFIT_PGM_RATIOS	(dev_priv->info->display_mmio_offset + 0x61234)
+/* Pre-965 */
+#define		PFIT_VERT_SCALE_SHIFT		20
+#define		PFIT_VERT_SCALE_MASK		0xfff00000
+#define		PFIT_HORIZ_SCALE_SHIFT		4
+#define		PFIT_HORIZ_SCALE_MASK		0x0000fff0
+/* 965+ */
+#define		PFIT_VERT_SCALE_SHIFT_965	16
+#define		PFIT_VERT_SCALE_MASK_965	0x1fff0000
+#define		PFIT_HORIZ_SCALE_SHIFT_965	0
+#define		PFIT_HORIZ_SCALE_MASK_965	0x00001fff
+
+#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
+
+/* Backlight control */
+#define BLC_PWM_CTL2	(dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */
+#define   BLM_PWM_ENABLE		(1 << 31)
+#define   BLM_COMBINATION_MODE		(1 << 30) /* gen4 only */
+#define   BLM_PIPE_SELECT		(1 << 29)
+#define   BLM_PIPE_SELECT_IVB		(3 << 29)
+#define   BLM_PIPE_A			(0 << 29)
+#define   BLM_PIPE_B			(1 << 29)
+#define   BLM_PIPE_C			(2 << 29) /* ivb + */
+#define   BLM_PIPE(pipe)		((pipe) << 29)
+#define   BLM_POLARITY_I965		(1 << 28) /* gen4 only */
+#define   BLM_PHASE_IN_INTERUPT_STATUS	(1 << 26)
+#define   BLM_PHASE_IN_ENABLE		(1 << 25)
+#define   BLM_PHASE_IN_INTERUPT_ENABL	(1 << 24)
+#define   BLM_PHASE_IN_TIME_BASE_SHIFT	(16)
+#define   BLM_PHASE_IN_TIME_BASE_MASK	(0xff << 16)
+#define   BLM_PHASE_IN_COUNT_SHIFT	(8)
+#define   BLM_PHASE_IN_COUNT_MASK	(0xff << 8)
+#define   BLM_PHASE_IN_INCR_SHIFT	(0)
+#define   BLM_PHASE_IN_INCR_MASK	(0xff << 0)
+#define BLC_PWM_CTL	(dev_priv->info->display_mmio_offset + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define   BACKLIGHT_MODULATION_FREQ_SHIFT	(17)
+#define   BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17)
+#define   BLM_LEGACY_MODE			(1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define   BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define   BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+#define   BACKLIGHT_DUTY_CYCLE_MASK_PNV		(0xfffe)
+#define   BLM_POLARITY_PNV			(1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL	(dev_priv->info->display_mmio_offset + 0x61260)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2	0x48250
+#define BLC_PWM_CPU_CTL		0x48254
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1	0xc8250
+#define   BLM_PCH_PWM_ENABLE			(1 << 31)
+#define   BLM_PCH_OVERRIDE_ENABLE		(1 << 30)
+#define   BLM_PCH_POLARITY			(1 << 29)
+#define BLC_PWM_PCH_CTL2	0xc8254
+
+/* TV port control */
+#define TV_CTL			0x68000
+/** Enables the TV encoder */
+# define TV_ENC_ENABLE			(1 << 31)
+/** Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT		(1 << 30)
+/** Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE	(0 << 28)
+/** Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO		(1 << 28)
+/** Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT	(2 << 28)
+/** Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE	(3 << 28)
+# define TV_TRILEVEL_SYNC		(1 << 21)
+/** Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC			(1 << 20)
+/** Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X		(0 << 18)
+/** Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X		(1 << 18)
+/** Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE		(2 << 18)
+/** Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X		(3 << 18)
+/** Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE			(1 << 17)
+/** Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
+# define TV_PAL_BURST			(1 << 16)
+/** Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK		(7 << 12)
+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX			(1 << 11)
+/**
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX			(1 << 10)
+/** Bits that must be preserved by software */
+# define TV_CTL_SAVE			((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
+# define TV_FUSE_STATE_MASK		(3 << 4)
+/** Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED		(0 << 4)
+/** Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION	(1 << 4)
+/** Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED		(2 << 4)
+/** Normal operation */
+# define TV_TEST_MODE_NORMAL		(0 << 0)
+/** Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1		(1 << 0)
+/** Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2		(2 << 0)
+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3		(3 << 0)
+/** Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4		(4 << 0)
+/** Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5		(5 << 0)
+/**
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT	(7 << 0)
+# define TV_TEST_MODE_MASK		(7 << 0)
+
+#define TV_DAC			0x68004
+# define TV_DAC_SAVE		0x00ffff00
+/**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG		(1 << 31)
+# define TVDAC_SENSE_MASK		(7 << 28)
+/** Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE			(1 << 30)
+/** Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE			(1 << 29)
+/** Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE			(1 << 28)
+/**
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN		(1 << 27)
+/** Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL		(1 << 26)
+/** Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL		(1 << 25)
+/** Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL		(1 << 24)
+/** Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE		(1 << 7)
+/** Sets the slew rate.  Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST		(1 << 6)
+# define DAC_A_1_3_V			(0 << 4)
+# define DAC_A_1_1_V			(1 << 4)
+# define DAC_A_0_7_V			(2 << 4)
+# define DAC_A_MASK			(3 << 4)
+# define DAC_B_1_3_V			(0 << 2)
+# define DAC_B_1_1_V			(1 << 2)
+# define DAC_B_0_7_V			(2 << 2)
+# define DAC_B_MASK			(3 << 2)
+# define DAC_C_1_3_V			(0 << 0)
+# define DAC_C_1_1_V			(1 << 0)
+# define DAC_C_0_7_V			(2 << 0)
+# define DAC_C_MASK			(3 << 0)
+
+/**
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y		0x68010
+# define TV_RY_MASK			0x07ff0000
+# define TV_RY_SHIFT			16
+# define TV_GY_MASK			0x00000fff
+# define TV_GY_SHIFT			0
+
+#define TV_CSC_Y2		0x68014
+# define TV_BY_MASK			0x07ff0000
+# define TV_BY_SHIFT			16
+/**
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK			0x000003ff
+# define TV_AY_SHIFT			0
+
+#define TV_CSC_U		0x68018
+# define TV_RU_MASK			0x07ff0000
+# define TV_RU_SHIFT			16
+# define TV_GU_MASK			0x000007ff
+# define TV_GU_SHIFT			0
+
+#define TV_CSC_U2		0x6801c
+# define TV_BU_MASK			0x07ff0000
+# define TV_BU_SHIFT			16
+/**
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK			0x000003ff
+# define TV_AU_SHIFT			0
+
+#define TV_CSC_V		0x68020
+# define TV_RV_MASK			0x0fff0000
+# define TV_RV_SHIFT			16
+# define TV_GV_MASK			0x000007ff
+# define TV_GV_SHIFT			0
+
+#define TV_CSC_V2		0x68024
+# define TV_BV_MASK			0x07ff0000
+# define TV_BV_SHIFT			16
+/**
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK			0x000007ff
+# define TV_AV_SHIFT			0
+
+#define TV_CLR_KNOBS		0x68028
+/** 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK		0xff000000
+# define TV_BRIGHTNESS_SHIFT		24
+/** Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK		0x00ff0000
+# define TV_CONTRAST_SHIFT		16
+/** Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK		0x0000ff00
+# define TV_SATURATION_SHIFT		8
+/** Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK			0x000000ff
+# define TV_HUE_SHIFT			0
+
+#define TV_CLR_LEVEL		0x6802c
+/** Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK		0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT		16
+/** Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK		0x000001ff
+# define TV_BLANK_LEVEL_SHIFT		0
+
+#define TV_H_CTL_1		0x68030
+/** Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK		0x1fff0000
+# define TV_HSYNC_END_SHIFT		16
+/** Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK			0x00001fff
+# define TV_HTOTAL_SHIFT		0
+
+#define TV_H_CTL_2		0x68034
+/** Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA			(1 << 31)
+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT		16
+# define TV_HBURST_START_MASK		0x1fff0000
+/** Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT		0
+# define TV_HBURST_LEN_MASK		0x0001fff
+
+#define TV_H_CTL_3		0x68038
+/** End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT		16
+# define TV_HBLANK_END_MASK		0x1fff0000
+/** Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT		0
+# define TV_HBLANK_START_MASK		0x0001fff
+
+#define TV_V_CTL_1		0x6803c
+/** XXX */
+# define TV_NBR_END_SHIFT		16
+# define TV_NBR_END_MASK		0x07ff0000
+/** XXX */
+# define TV_VI_END_F1_SHIFT		8
+# define TV_VI_END_F1_MASK		0x00003f00
+/** XXX */
+# define TV_VI_END_F2_SHIFT		0
+# define TV_VI_END_F2_MASK		0x0000003f
+
+#define TV_V_CTL_2		0x68040
+/** Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK		0x07ff0000
+# define TV_VSYNC_LEN_SHIFT		16
+/** Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK		0x00007f00
+# define TV_VSYNC_START_F1_SHIFT	8
+/**
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK		0x0000007f
+# define TV_VSYNC_START_F2_SHIFT	0
+
+#define TV_V_CTL_3		0x68044
+/** Enables generation of the equalization signal */
+# define TV_EQUAL_ENA			(1 << 31)
+/** Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK		0x007f0000
+# define TV_VEQ_LEN_SHIFT		16
+/** Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK		0x0007f00
+# define TV_VEQ_START_F1_SHIFT		8
+/**
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK		0x000007f
+# define TV_VEQ_START_F2_SHIFT		0
+
+#define TV_V_CTL_4		0x68048
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK	0x003f0000
+# define TV_VBURST_START_F1_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK		0x000000ff
+# define TV_VBURST_END_F1_SHIFT		0
+
+#define TV_V_CTL_5		0x6804c
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK	0x003f0000
+# define TV_VBURST_START_F2_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK		0x000000ff
+# define TV_VBURST_END_F2_SHIFT		0
+
+#define TV_V_CTL_6		0x68050
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK	0x003f0000
+# define TV_VBURST_START_F3_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK		0x000000ff
+# define TV_VBURST_END_F3_SHIFT		0
+
+#define TV_V_CTL_7		0x68054
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK	0x003f0000
+# define TV_VBURST_START_F4_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK		0x000000ff
+# define TV_VBURST_END_F4_SHIFT		0
+
+#define TV_SC_CTL_1		0x68060
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN			(1 << 31)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN			(1 << 30)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN			(1 << 29)
+/** Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2		(0 << 24)
+/** Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4		(1 << 24)
+/** Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8		(2 << 24)
+/** Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER		(3 << 24)
+/** Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK		0x00ff0000
+# define TV_BURST_LEVEL_SHIFT		16
+/** Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK		0x00000fff
+# define TV_SCDDA1_INC_SHIFT		0
+
+#define TV_SC_CTL_2		0x68064
+/** Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK		0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT		16
+/** Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK		0x00007fff
+# define TV_SCDDA2_INC_SHIFT		0
+
+#define TV_SC_CTL_3		0x68068
+/** Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK		0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT		16
+/** Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK		0x00007fff
+# define TV_SCDDA3_INC_SHIFT		0
+
+#define TV_WIN_POS		0x68070
+/** X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK			0x1fff0000
+# define TV_XPOS_SHIFT			16
+/** Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK			0x00000fff
+# define TV_YPOS_SHIFT			0
+
+#define TV_WIN_SIZE		0x68074
+/** Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK			0x1fff0000
+# define TV_XSIZE_SHIFT			16
+/**
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK			0x00000fff
+# define TV_YSIZE_SHIFT			0
+
+#define TV_FILTER_CTL_1		0x68080
+/**
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE			(1 << 31)
+/**
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS		(1 << 29)
+/** Enables adaptive vertical filtering */
+# define TV_VADAPT			(1 << 28)
+# define TV_VADAPT_MODE_MASK		(3 << 26)
+/** Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST		(0 << 26)
+/** Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE	(1 << 26)
+/** Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST		(3 << 26)
+/**
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK		0x00003fff
+# define TV_HSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_2		0x68084
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK		0x00038000
+# define TV_VSCALE_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK		0x00007fff
+# define TV_VSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_3		0x68088
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK		0x00038000
+# define TV_VSCALE_IP_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK		0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT		0
+
+#define TV_CC_CONTROL		0x68090
+# define TV_CC_ENABLE			(1 << 31)
+/**
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK			(1 << 27)
+# define TV_CC_FID_SHIFT		27
+/** Sets the horizontal position of the CC data.  Usually 135. */
+# define TV_CC_HOFF_MASK		0x03ff0000
+# define TV_CC_HOFF_SHIFT		16
+/** Sets the vertical position of the CC data.  Usually 21 */
+# define TV_CC_LINE_MASK		0x0000003f
+# define TV_CC_LINE_SHIFT		0
+
+#define TV_CC_DATA		0x68094
+# define TV_CC_RDY			(1 << 31)
+/** Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK		0x007f0000
+# define TV_CC_DATA_2_SHIFT		16
+/** First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK		0x0000007f
+# define TV_CC_DATA_1_SHIFT		0
+
+#define TV_H_LUMA_0		0x68100
+#define TV_H_LUMA_59		0x681ec
+#define TV_H_CHROMA_0		0x68200
+#define TV_H_CHROMA_59		0x682ec
+#define TV_V_LUMA_0		0x68300
+#define TV_V_LUMA_42		0x683a8
+#define TV_V_CHROMA_0		0x68400
+#define TV_V_CHROMA_42		0x684a8
+
+/* Display Port */
+#define DP_A				0x64000 /* eDP */
+#define DP_B				0x64100
+#define DP_C				0x64200
+#define DP_D				0x64300
+
+#define   DP_PORT_EN			(1 << 31)
+#define   DP_PIPEB_SELECT		(1 << 30)
+#define   DP_PIPE_MASK			(1 << 30)
+
+/* Link training mode - select a suitable mode for each stage */
+#define   DP_LINK_TRAIN_PAT_1		(0 << 28)
+#define   DP_LINK_TRAIN_PAT_2		(1 << 28)
+#define   DP_LINK_TRAIN_PAT_IDLE	(2 << 28)
+#define   DP_LINK_TRAIN_OFF		(3 << 28)
+#define   DP_LINK_TRAIN_MASK		(3 << 28)
+#define   DP_LINK_TRAIN_SHIFT		28
+
+/* CPT Link training mode */
+#define   DP_LINK_TRAIN_PAT_1_CPT	(0 << 8)
+#define   DP_LINK_TRAIN_PAT_2_CPT	(1 << 8)
+#define   DP_LINK_TRAIN_PAT_IDLE_CPT	(2 << 8)
+#define   DP_LINK_TRAIN_OFF_CPT		(3 << 8)
+#define   DP_LINK_TRAIN_MASK_CPT	(7 << 8)
+#define   DP_LINK_TRAIN_SHIFT_CPT	8
+
+/* Signal voltages. These are mostly controlled by the other end */
+#define   DP_VOLTAGE_0_4		(0 << 25)
+#define   DP_VOLTAGE_0_6		(1 << 25)
+#define   DP_VOLTAGE_0_8		(2 << 25)
+#define   DP_VOLTAGE_1_2		(3 << 25)
+#define   DP_VOLTAGE_MASK		(7 << 25)
+#define   DP_VOLTAGE_SHIFT		25
+
+/* Signal pre-emphasis levels, like voltages, the other end tells us what
+ * they want
+ */
+#define   DP_PRE_EMPHASIS_0		(0 << 22)
+#define   DP_PRE_EMPHASIS_3_5		(1 << 22)
+#define   DP_PRE_EMPHASIS_6		(2 << 22)
+#define   DP_PRE_EMPHASIS_9_5		(3 << 22)
+#define   DP_PRE_EMPHASIS_MASK		(7 << 22)
+#define   DP_PRE_EMPHASIS_SHIFT		22
+
+/* How many wires to use. I guess 3 was too hard */
+#define   DP_PORT_WIDTH_1		(0 << 19)
+#define   DP_PORT_WIDTH_2		(1 << 19)
+#define   DP_PORT_WIDTH_4		(3 << 19)
+#define   DP_PORT_WIDTH_MASK		(7 << 19)
+
+/* Mystic DPCD version 1.1 special mode */
+#define   DP_ENHANCED_FRAMING		(1 << 18)
+
+/* eDP */
+#define   DP_PLL_FREQ_270MHZ		(0 << 16)
+#define   DP_PLL_FREQ_160MHZ		(1 << 16)
+#define   DP_PLL_FREQ_MASK		(3 << 16)
+
+/** locked once port is enabled */
+#define   DP_PORT_REVERSAL		(1 << 15)
+
+/* eDP */
+#define   DP_PLL_ENABLE			(1 << 14)
+
+/** sends the clock on lane 15 of the PEG for debug */
+#define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+
+#define   DP_SCRAMBLING_DISABLE		(1 << 12)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+
+/** limit RGB values to avoid confusing TVs */
+#define   DP_COLOR_RANGE_16_235		(1 << 8)
+
+/** Turn on the audio link */
+#define   DP_AUDIO_OUTPUT_ENABLE	(1 << 6)
+
+/** vs and hs sync polarity */
+#define   DP_SYNC_VS_HIGH		(1 << 4)
+#define   DP_SYNC_HS_HIGH		(1 << 3)
+
+/** A fantasy */
+#define   DP_DETECTED			(1 << 2)
+
+/** The aux channel provides a way to talk to the
+ * signal sink for DDC etc. Max packet size supported
+ * is 20 bytes in each direction, hence the 5 fixed
+ * data registers
+ */
+#define DPA_AUX_CH_CTL			0x64010
+#define DPA_AUX_CH_DATA1		0x64014
+#define DPA_AUX_CH_DATA2		0x64018
+#define DPA_AUX_CH_DATA3		0x6401c
+#define DPA_AUX_CH_DATA4		0x64020
+#define DPA_AUX_CH_DATA5		0x64024
+
+#define DPB_AUX_CH_CTL			0x64110
+#define DPB_AUX_CH_DATA1		0x64114
+#define DPB_AUX_CH_DATA2		0x64118
+#define DPB_AUX_CH_DATA3		0x6411c
+#define DPB_AUX_CH_DATA4		0x64120
+#define DPB_AUX_CH_DATA5		0x64124
+
+#define DPC_AUX_CH_CTL			0x64210
+#define DPC_AUX_CH_DATA1		0x64214
+#define DPC_AUX_CH_DATA2		0x64218
+#define DPC_AUX_CH_DATA3		0x6421c
+#define DPC_AUX_CH_DATA4		0x64220
+#define DPC_AUX_CH_DATA5		0x64224
+
+#define DPD_AUX_CH_CTL			0x64310
+#define DPD_AUX_CH_DATA1		0x64314
+#define DPD_AUX_CH_DATA2		0x64318
+#define DPD_AUX_CH_DATA3		0x6431c
+#define DPD_AUX_CH_DATA4		0x64320
+#define DPD_AUX_CH_DATA5		0x64324
+
+#define   DP_AUX_CH_CTL_SEND_BUSY	    (1 << 31)
+#define   DP_AUX_CH_CTL_DONE		    (1 << 30)
+#define   DP_AUX_CH_CTL_INTERRUPT	    (1 << 29)
+#define   DP_AUX_CH_CTL_TIME_OUT_ERROR	    (1 << 28)
+#define   DP_AUX_CH_CTL_TIME_OUT_400us	    (0 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_600us	    (1 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_800us	    (2 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_1600us	    (3 << 26)
+#define   DP_AUX_CH_CTL_TIME_OUT_MASK	    (3 << 26)
+#define   DP_AUX_CH_CTL_RECEIVE_ERROR	    (1 << 25)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_MASK    (0x1f << 20)
+#define   DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT   20
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_MASK   (0xf << 16)
+#define   DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT  16
+#define   DP_AUX_CH_CTL_AUX_AKSV_SELECT	    (1 << 15)
+#define   DP_AUX_CH_CTL_MANCHESTER_TEST	    (1 << 14)
+#define   DP_AUX_CH_CTL_SYNC_TEST	    (1 << 13)
+#define   DP_AUX_CH_CTL_DEGLITCH_TEST	    (1 << 12)
+#define   DP_AUX_CH_CTL_PRECHARGE_TEST	    (1 << 11)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK    (0x7ff)
+#define   DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT   0
+
+/*
+ * Computing GMCH M and N values for the Display Port link
+ *
+ * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
+ *
+ * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
+ *
+ * The GMCH value is used internally
+ *
+ * bytes_per_pixel is the number of bytes coming out of the plane,
+ * which is after the LUTs, so we want the bytes for our color format.
+ * For our current usage, this is always 3, one byte for R, G and B.
+ */
+#define _PIPEA_GMCH_DATA_M			0x70050
+#define _PIPEB_GMCH_DATA_M			0x71050
+
+/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
+#define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+#define  TU_SIZE_MASK           (0x3f << 25)
+
+#define  DATA_LINK_M_N_MASK	(0xffffff)
+#define  DATA_LINK_N_MAX	(0x800000)
+
+#define _PIPEA_GMCH_DATA_N			0x70054
+#define _PIPEB_GMCH_DATA_N			0x71054
+
+/*
+ * Computing Link M and N values for the Display Port link
+ *
+ * Link M / N = pixel_clock / ls_clk
+ *
+ * (the DP spec calls pixel_clock the 'strm_clk')
+ *
+ * The Link value is transmitted in the Main Stream
+ * Attributes and VB-ID.
+ */
+
+#define _PIPEA_DP_LINK_M				0x70060
+#define _PIPEB_DP_LINK_M				0x71060
+
+#define _PIPEA_DP_LINK_N				0x70064
+#define _PIPEB_DP_LINK_N				0x71064
+
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define _PIPEADSL		(dev_priv->info->display_mmio_offset + 0x70000)
+#define   DSL_LINEMASK_GEN2	0x00000fff
+#define   DSL_LINEMASK_GEN3	0x00001fff
+#define _PIPEACONF		(dev_priv->info->display_mmio_offset + 0x70008)
+#define   PIPECONF_ENABLE	(1<<31)
+#define   PIPECONF_DISABLE	0
+#define   PIPECONF_DOUBLE_WIDE	(1<<30)
+#define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define   PIPECONF_SINGLE_WIDE	0
+#define   PIPECONF_PIPE_UNLOCKED 0
+#define   PIPECONF_PIPE_LOCKED	(1<<25)
+#define   PIPECONF_PALETTE	0
+#define   PIPECONF_GAMMA		(1<<24)
+#define   PIPECONF_FORCE_BORDER	(1<<25)
+#define   PIPECONF_INTERLACE_MASK	(7 << 21)
+#define   PIPECONF_INTERLACE_MASK_HSW	(3 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define   PIPECONF_PROGRESSIVE			(0 << 21)
+#define   PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL	(4 << 21) /* gen4 only */
+#define   PIPECONF_INTERLACE_W_SYNC_SHIFT	(5 << 21) /* gen4 only */
+#define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define   PIPECONF_INTERLACE_FIELD_0_ONLY	(7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define   PIPECONF_PFIT_PF_INTERLACED_ILK	(1 << 21)
+#define   PIPECONF_INTERLACED_ILK		(3 << 21)
+#define   PIPECONF_INTERLACED_DBL_ILK		(4 << 21) /* ilk/snb only */
+#define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK	(5 << 21) /* ilk/snb only */
+#define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
+#define   PIPECONF_COLOR_RANGE_SELECT	(1 << 13)
+#define   PIPECONF_BPC_MASK	(0x7 << 5)
+#define   PIPECONF_8BPC		(0<<5)
+#define   PIPECONF_10BPC	(1<<5)
+#define   PIPECONF_6BPC		(2<<5)
+#define   PIPECONF_12BPC	(3<<5)
+#define   PIPECONF_DITHER_EN	(1<<4)
+#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define   PIPECONF_DITHER_TYPE_SP (0<<2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define _PIPEASTAT		(dev_priv->info->display_mmio_offset + 0x70024)
+#define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define   SPRITE1_FLIPDONE_INT_EN_VLV		(1UL<<30)
+#define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
+#define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
+#define   PIPE_GMBUS_EVENT_ENABLE		(1UL<<27)
+#define   PLANE_FLIP_DONE_INT_EN_VLV		(1UL<<26)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE		(1UL<<26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define   PIPE_DPST_EVENT_ENABLE		(1UL<<23)
+#define   SPRITE0_FLIP_DONE_INT_EN_VLV		(1UL<<22)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE	(1UL<<21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE	(1UL<<18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define   PIPEA_HBLANK_INT_EN_VLV		(1UL<<16)
+#define   PIPE_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define   SPRITE1_FLIPDONE_INT_STATUS_VLV	(1UL<<15)
+#define   SPRITE0_FLIPDONE_INT_STATUS_VLV	(1UL<<14)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS	(1UL<<13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS	(1UL<<12)
+#define   PIPE_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define   PLANE_FLIPDONE_INT_STATUS_VLV		(1UL<<10)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL<<10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define   PIPE_DPST_EVENT_STATUS		(1UL<<7)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS	(1UL<<5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS	(1UL<<2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe)  _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
+
+#define VLV_DPFLIPSTAT				(VLV_DISPLAY_BASE + 0x70028)
+#define   PIPEB_LINE_COMPARE_INT_EN		(1<<29)
+#define   PIPEB_HLINE_INT_EN			(1<<28)
+#define   PIPEB_VBLANK_INT_EN			(1<<27)
+#define   SPRITED_FLIPDONE_INT_EN		(1<<26)
+#define   SPRITEC_FLIPDONE_INT_EN		(1<<25)
+#define   PLANEB_FLIPDONE_INT_EN		(1<<24)
+#define   PIPEA_LINE_COMPARE_INT_EN		(1<<21)
+#define   PIPEA_HLINE_INT_EN			(1<<20)
+#define   PIPEA_VBLANK_INT_EN			(1<<19)
+#define   SPRITEB_FLIPDONE_INT_EN		(1<<18)
+#define   SPRITEA_FLIPDONE_INT_EN		(1<<17)
+#define   PLANEA_FLIPDONE_INT_EN		(1<<16)
+
+#define DPINVGTT				(VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
+#define   CURSORB_INVALID_GTT_INT_EN		(1<<23)
+#define   CURSORA_INVALID_GTT_INT_EN		(1<<22)
+#define   SPRITED_INVALID_GTT_INT_EN		(1<<21)
+#define   SPRITEC_INVALID_GTT_INT_EN		(1<<20)
+#define   PLANEB_INVALID_GTT_INT_EN		(1<<19)
+#define   SPRITEB_INVALID_GTT_INT_EN		(1<<18)
+#define   SPRITEA_INVALID_GTT_INT_EN		(1<<17)
+#define   PLANEA_INVALID_GTT_INT_EN		(1<<16)
+#define   DPINVGTT_EN_MASK			0xff0000
+#define   CURSORB_INVALID_GTT_STATUS		(1<<7)
+#define   CURSORA_INVALID_GTT_STATUS		(1<<6)
+#define   SPRITED_INVALID_GTT_STATUS		(1<<5)
+#define   SPRITEC_INVALID_GTT_STATUS		(1<<4)
+#define   PLANEB_INVALID_GTT_STATUS		(1<<3)
+#define   SPRITEB_INVALID_GTT_STATUS		(1<<2)
+#define   SPRITEA_INVALID_GTT_STATUS		(1<<1)
+#define   PLANEA_INVALID_GTT_STATUS		(1<<0)
+#define   DPINVGTT_STATUS_MASK			0xff
+
+#define DSPARB			0x70030
+#define   DSPARB_CSTART_MASK	(0x7f << 7)
+#define   DSPARB_CSTART_SHIFT	7
+#define   DSPARB_BSTART_MASK	(0x7f)
+#define   DSPARB_BSTART_SHIFT	0
+#define   DSPARB_BEND_SHIFT	9 /* on 855 */
+#define   DSPARB_AEND_SHIFT	0
+
+#define DSPFW1			(dev_priv->info->display_mmio_offset + 0x70034)
+#define   DSPFW_SR_SHIFT	23
+#define   DSPFW_SR_MASK		(0x1ff<<23)
+#define   DSPFW_CURSORB_SHIFT	16
+#define   DSPFW_CURSORB_MASK	(0x3f<<16)
+#define   DSPFW_PLANEB_SHIFT	8
+#define   DSPFW_PLANEB_MASK	(0x7f<<8)
+#define   DSPFW_PLANEA_MASK	(0x7f)
+#define DSPFW2			(dev_priv->info->display_mmio_offset + 0x70038)
+#define   DSPFW_CURSORA_MASK	0x00003f00
+#define   DSPFW_CURSORA_SHIFT	8
+#define   DSPFW_PLANEC_MASK	(0x7f)
+#define DSPFW3			(dev_priv->info->display_mmio_offset + 0x7003c)
+#define   DSPFW_HPLL_SR_EN	(1<<31)
+#define   DSPFW_CURSOR_SR_SHIFT	24
+#define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
+#define   DSPFW_CURSOR_SR_MASK		(0x3f<<24)
+#define   DSPFW_HPLL_CURSOR_SHIFT	16
+#define   DSPFW_HPLL_CURSOR_MASK	(0x3f<<16)
+#define   DSPFW_HPLL_SR_MASK		(0x1ff)
+#define DSPFW4			(dev_priv->info->display_mmio_offset + 0x70070)
+#define DSPFW7			(dev_priv->info->display_mmio_offset + 0x7007c)
+
+/* drain latency register values*/
+#define DRAIN_LATENCY_PRECISION_32	32
+#define DRAIN_LATENCY_PRECISION_16	16
+#define VLV_DDL1			(VLV_DISPLAY_BASE + 0x70050)
+#define DDL_CURSORA_PRECISION_32	(1<<31)
+#define DDL_CURSORA_PRECISION_16	(0<<31)
+#define DDL_CURSORA_SHIFT		24
+#define DDL_PLANEA_PRECISION_32		(1<<7)
+#define DDL_PLANEA_PRECISION_16		(0<<7)
+#define VLV_DDL2			(VLV_DISPLAY_BASE + 0x70054)
+#define DDL_CURSORB_PRECISION_32	(1<<31)
+#define DDL_CURSORB_PRECISION_16	(0<<31)
+#define DDL_CURSORB_SHIFT		24
+#define DDL_PLANEB_PRECISION_32		(1<<7)
+#define DDL_PLANEB_PRECISION_16		(0<<7)
+
+/* FIFO watermark sizes etc */
+#define G4X_FIFO_LINE_SIZE	64
+#define I915_FIFO_LINE_SIZE	64
+#define I830_FIFO_LINE_SIZE	32
+
+#define VALLEYVIEW_FIFO_SIZE	255
+#define G4X_FIFO_SIZE		127
+#define I965_FIFO_SIZE		512
+#define I945_FIFO_SIZE		127
+#define I915_FIFO_SIZE		95
+#define I855GM_FIFO_SIZE	127 /* In cachelines */
+#define I830_FIFO_SIZE		95
+
+#define VALLEYVIEW_MAX_WM	0xff
+#define G4X_MAX_WM		0x3f
+#define I915_MAX_WM		0x3f
+
+#define PINEVIEW_DISPLAY_FIFO	512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE	64
+#define PINEVIEW_MAX_WM		0x1ff
+#define PINEVIEW_DFT_WM		0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM	0
+#define PINEVIEW_GUARD_WM		10
+#define PINEVIEW_CURSOR_FIFO		64
+#define PINEVIEW_CURSOR_MAX_WM	0x3f
+#define PINEVIEW_CURSOR_DFT_WM	0
+#define PINEVIEW_CURSOR_GUARD_WM	5
+
+#define VALLEYVIEW_CURSOR_MAX_WM 64
+#define I965_CURSOR_FIFO	64
+#define I965_CURSOR_MAX_WM	32
+#define I965_CURSOR_DFT_WM	8
+
+/* define the Watermark register on Ironlake */
+#define WM0_PIPEA_ILK		0x45100
+#define  WM0_PIPE_PLANE_MASK	(0x7f<<16)
+#define  WM0_PIPE_PLANE_SHIFT	16
+#define  WM0_PIPE_SPRITE_MASK	(0x3f<<8)
+#define  WM0_PIPE_SPRITE_SHIFT	8
+#define  WM0_PIPE_CURSOR_MASK	(0x1f)
+
+#define WM0_PIPEB_ILK		0x45104
+#define WM0_PIPEC_IVB		0x45200
+#define WM1_LP_ILK		0x45108
+#define  WM1_LP_SR_EN		(1<<31)
+#define  WM1_LP_LATENCY_SHIFT	24
+#define  WM1_LP_LATENCY_MASK	(0x7f<<24)
+#define  WM1_LP_FBC_MASK	(0xf<<20)
+#define  WM1_LP_FBC_SHIFT	20
+#define  WM1_LP_SR_MASK		(0x1ff<<8)
+#define  WM1_LP_SR_SHIFT	8
+#define  WM1_LP_CURSOR_MASK	(0x3f)
+#define WM2_LP_ILK		0x4510c
+#define  WM2_LP_EN		(1<<31)
+#define WM3_LP_ILK		0x45110
+#define  WM3_LP_EN		(1<<31)
+#define WM1S_LP_ILK		0x45120
+#define WM2S_LP_IVB		0x45124
+#define WM3S_LP_IVB		0x45128
+#define  WM1S_LP_EN		(1<<31)
+
+/* Memory latency timer register */
+#define MLTR_ILK		0x11222
+#define  MLTR_WM1_SHIFT		0
+#define  MLTR_WM2_SHIFT		8
+/* the unit of memory self-refresh latency time is 0.5us */
+#define  ILK_SRLT_MASK		0x3f
+#define ILK_LATENCY(shift)	(I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY()	ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY()	ILK_LATENCY(MLTR_WM2_SHIFT)
+
+/* define the fifo size on Ironlake */
+#define ILK_DISPLAY_FIFO	128
+#define ILK_DISPLAY_MAXWM	64
+#define ILK_DISPLAY_DFTWM	8
+#define ILK_CURSOR_FIFO		32
+#define ILK_CURSOR_MAXWM	16
+#define ILK_CURSOR_DFTWM	8
+
+#define ILK_DISPLAY_SR_FIFO	512
+#define ILK_DISPLAY_MAX_SRWM	0x1ff
+#define ILK_DISPLAY_DFT_SRWM	0x3f
+#define ILK_CURSOR_SR_FIFO	64
+#define ILK_CURSOR_MAX_SRWM	0x3f
+#define ILK_CURSOR_DFT_SRWM	8
+
+#define ILK_FIFO_LINE_SIZE	64
+
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO	128
+#define SNB_DISPLAY_MAXWM	0x7f	/* bit 16:22 */
+#define SNB_DISPLAY_DFTWM	8
+#define SNB_CURSOR_FIFO		32
+#define SNB_CURSOR_MAXWM	0x1f	/* bit 4:0 */
+#define SNB_CURSOR_DFTWM	8
+
+#define SNB_DISPLAY_SR_FIFO	512
+#define SNB_DISPLAY_MAX_SRWM	0x1ff	/* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM	0x3f
+#define SNB_CURSOR_SR_FIFO	64
+#define SNB_CURSOR_MAX_SRWM	0x3f	/* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM	8
+
+#define SNB_FBC_MAX_SRWM	0xf	/* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE	64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD			0x5d10
+#define SSKPD_WM_MASK		0x3f
+#define SSKPD_WM0_SHIFT		0
+#define SSKPD_WM1_SHIFT		8
+#define SSKPD_WM2_SHIFT		16
+#define SSKPD_WM3_SHIFT		24
+
+#define SNB_LATENCY(shift)	(I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY()		SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY()		SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY()		SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY()		SNB_LATENCY(SSKPD_WM3_SHIFT)
+
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ *  do {
+ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT;
+ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ *             PIPE_FRAME_LOW_SHIFT);
+ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT);
+ *  } while (high1 != high2);
+ *  frame = (high1 << 8) | low1;
+ */
+#define _PIPEAFRAMEHIGH          (dev_priv->info->display_mmio_offset + 0x70040)
+#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
+#define   PIPE_FRAME_HIGH_SHIFT   0
+#define _PIPEAFRAMEPIXEL         (dev_priv->info->display_mmio_offset + 0x70044)
+#define   PIPE_FRAME_LOW_MASK     0xff000000
+#define   PIPE_FRAME_LOW_SHIFT    24
+#define   PIPE_PIXEL_MASK         0x00ffffff
+#define   PIPE_PIXEL_SHIFT        0
+/* GM45+ just has to be different */
+#define _PIPEA_FRMCOUNT_GM45	0x70040
+#define _PIPEA_FLIPCOUNT_GM45	0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
+
+/* Cursor A & B regs */
+#define _CURACNTR		(dev_priv->info->display_mmio_offset + 0x70080)
+/* Old style CUR*CNTR flags (desktop 8xx) */
+#define   CURSOR_ENABLE		0x80000000
+#define   CURSOR_GAMMA_ENABLE	0x40000000
+#define   CURSOR_STRIDE_MASK	0x30000000
+#define   CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define   CURSOR_FORMAT_SHIFT	24
+#define   CURSOR_FORMAT_MASK	(0x07 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_2C	(0x00 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_3C	(0x01 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_4C	(0x02 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_ARGB	(0x04 << CURSOR_FORMAT_SHIFT)
+#define   CURSOR_FORMAT_XRGB	(0x05 << CURSOR_FORMAT_SHIFT)
+/* New style CUR*CNTR flags */
+#define   CURSOR_MODE		0x27
+#define   CURSOR_MODE_DISABLE   0x00
+#define   CURSOR_MODE_64_32B_AX 0x07
+#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_PIPE_SELECT	(1 << 28)
+#define   MCURSOR_PIPE_A	0x00
+#define   MCURSOR_PIPE_B	(1 << 28)
+#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define _CURABASE		(dev_priv->info->display_mmio_offset + 0x70084)
+#define _CURAPOS		(dev_priv->info->display_mmio_offset + 0x70088)
+#define   CURSOR_POS_MASK       0x007FF
+#define   CURSOR_POS_SIGN       0x8000
+#define   CURSOR_X_SHIFT        0
+#define   CURSOR_Y_SHIFT        16
+#define CURSIZE			0x700a0
+#define _CURBCNTR		(dev_priv->info->display_mmio_offset + 0x700c0)
+#define _CURBBASE		(dev_priv->info->display_mmio_offset + 0x700c4)
+#define _CURBPOS		(dev_priv->info->display_mmio_offset + 0x700c8)
+
+#define _CURBCNTR_IVB		0x71080
+#define _CURBBASE_IVB		0x71084
+#define _CURBPOS_IVB		0x71088
+
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
+
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
+/* Display A control */
+#define _DSPACNTR                (dev_priv->info->display_mmio_offset + 0x70180)
+#define   DISPLAY_PLANE_ENABLE			(1<<31)
+#define   DISPLAY_PLANE_DISABLE			0
+#define   DISPPLANE_GAMMA_ENABLE		(1<<30)
+#define   DISPPLANE_GAMMA_DISABLE		0
+#define   DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define   DISPPLANE_YUV422			(0x0<<26)
+#define   DISPPLANE_8BPP			(0x2<<26)
+#define   DISPPLANE_BGRA555			(0x3<<26)
+#define   DISPPLANE_BGRX555			(0x4<<26)
+#define   DISPPLANE_BGRX565			(0x5<<26)
+#define   DISPPLANE_BGRX888			(0x6<<26)
+#define   DISPPLANE_BGRA888			(0x7<<26)
+#define   DISPPLANE_RGBX101010			(0x8<<26)
+#define   DISPPLANE_RGBA101010			(0x9<<26)
+#define   DISPPLANE_BGRX101010			(0xa<<26)
+#define   DISPPLANE_RGBX161616			(0xc<<26)
+#define   DISPPLANE_RGBX888			(0xe<<26)
+#define   DISPPLANE_RGBA888			(0xf<<26)
+#define   DISPPLANE_STEREO_ENABLE		(1<<25)
+#define   DISPPLANE_STEREO_DISABLE		0
+#define   DISPPLANE_PIPE_CSC_ENABLE		(1<<24)
+#define   DISPPLANE_SEL_PIPE_SHIFT		24
+#define   DISPPLANE_SEL_PIPE_MASK		(3<<DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SEL_PIPE_A			0
+#define   DISPPLANE_SEL_PIPE_B			(1<<DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define   DISPPLANE_SRC_KEY_DISABLE		0
+#define   DISPPLANE_LINE_DOUBLE			(1<<20)
+#define   DISPPLANE_NO_LINE_DOUBLE		0
+#define   DISPPLANE_STEREO_POLARITY_FIRST	0
+#define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
+#define   DISPPLANE_TILED			(1<<10)
+#define _DSPAADDR		(dev_priv->info->display_mmio_offset + 0x70184)
+#define _DSPASTRIDE		(dev_priv->info->display_mmio_offset + 0x70188)
+#define _DSPAPOS		(dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
+#define _DSPASIZE		(dev_priv->info->display_mmio_offset + 0x70190)
+#define _DSPASURF		(dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
+#define _DSPATILEOFF		(dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
+#define _DSPAOFFSET		(dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
+#define _DSPASURFLIVE		(dev_priv->info->display_mmio_offset + 0x701AC)
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
+
+/* Display/Sprite base address macros */
+#define DISP_BASEADDR_MASK	(0xfffff000)
+#define I915_LO_DISPBASE(val)	(val & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val)	(val & DISP_BASEADDR_MASK)
+#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
+		(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
+
+/* VBIOS flags */
+#define SWF00			(dev_priv->info->display_mmio_offset + 0x71410)
+#define SWF01			(dev_priv->info->display_mmio_offset + 0x71414)
+#define SWF02			(dev_priv->info->display_mmio_offset + 0x71418)
+#define SWF03			(dev_priv->info->display_mmio_offset + 0x7141c)
+#define SWF04			(dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF05			(dev_priv->info->display_mmio_offset + 0x71424)
+#define SWF06			(dev_priv->info->display_mmio_offset + 0x71428)
+#define SWF10			(dev_priv->info->display_mmio_offset + 0x70410)
+#define SWF11			(dev_priv->info->display_mmio_offset + 0x70414)
+#define SWF14			(dev_priv->info->display_mmio_offset + 0x71420)
+#define SWF30			(dev_priv->info->display_mmio_offset + 0x72414)
+#define SWF31			(dev_priv->info->display_mmio_offset + 0x72418)
+#define SWF32			(dev_priv->info->display_mmio_offset + 0x7241c)
+
+/* Pipe B */
+#define _PIPEBDSL		(dev_priv->info->display_mmio_offset + 0x71000)
+#define _PIPEBCONF		(dev_priv->info->display_mmio_offset + 0x71008)
+#define _PIPEBSTAT		(dev_priv->info->display_mmio_offset + 0x71024)
+#define _PIPEBFRAMEHIGH		(dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEBFRAMEPIXEL	(dev_priv->info->display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_GM45	0x71040
+#define _PIPEB_FLIPCOUNT_GM45	0x71044
+
+
+/* Display B control */
+#define _DSPBCNTR		(dev_priv->info->display_mmio_offset + 0x71180)
+#define   DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define   DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
+#define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
+#define _DSPBADDR		(dev_priv->info->display_mmio_offset + 0x71184)
+#define _DSPBSTRIDE		(dev_priv->info->display_mmio_offset + 0x71188)
+#define _DSPBPOS		(dev_priv->info->display_mmio_offset + 0x7118C)
+#define _DSPBSIZE		(dev_priv->info->display_mmio_offset + 0x71190)
+#define _DSPBSURF		(dev_priv->info->display_mmio_offset + 0x7119C)
+#define _DSPBTILEOFF		(dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBOFFSET		(dev_priv->info->display_mmio_offset + 0x711A4)
+#define _DSPBSURFLIVE		(dev_priv->info->display_mmio_offset + 0x711AC)
+
+/* Sprite A control */
+#define _DVSACNTR		0x72180
+#define   DVS_ENABLE		(1<<31)
+#define   DVS_GAMMA_ENABLE	(1<<30)
+#define   DVS_PIXFORMAT_MASK	(3<<25)
+#define   DVS_FORMAT_YUV422	(0<<25)
+#define   DVS_FORMAT_RGBX101010	(1<<25)
+#define   DVS_FORMAT_RGBX888	(2<<25)
+#define   DVS_FORMAT_RGBX161616	(3<<25)
+#define   DVS_PIPE_CSC_ENABLE   (1<<24)
+#define   DVS_SOURCE_KEY	(1<<22)
+#define   DVS_RGB_ORDER_XBGR	(1<<20)
+#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define   DVS_YUV_ORDER_YUYV	(0<<16)
+#define   DVS_YUV_ORDER_UYVY	(1<<16)
+#define   DVS_YUV_ORDER_YVYU	(2<<16)
+#define   DVS_YUV_ORDER_VYUY	(3<<16)
+#define   DVS_DEST_KEY		(1<<2)
+#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define   DVS_TILED		(1<<10)
+#define _DVSALINOFF		0x72184
+#define _DVSASTRIDE		0x72188
+#define _DVSAPOS		0x7218c
+#define _DVSASIZE		0x72190
+#define _DVSAKEYVAL		0x72194
+#define _DVSAKEYMSK		0x72198
+#define _DVSASURF		0x7219c
+#define _DVSAKEYMAXVAL		0x721a0
+#define _DVSATILEOFF		0x721a4
+#define _DVSASURFLIVE		0x721ac
+#define _DVSASCALE		0x72204
+#define   DVS_SCALE_ENABLE	(1<<31)
+#define   DVS_FILTER_MASK	(3<<29)
+#define   DVS_FILTER_MEDIUM	(0<<29)
+#define   DVS_FILTER_ENHANCING	(1<<29)
+#define   DVS_FILTER_SOFTENING	(2<<29)
+#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC		0x72300
+
+#define _DVSBCNTR		0x73180
+#define _DVSBLINOFF		0x73184
+#define _DVSBSTRIDE		0x73188
+#define _DVSBPOS		0x7318c
+#define _DVSBSIZE		0x73190
+#define _DVSBKEYVAL		0x73194
+#define _DVSBKEYMSK		0x73198
+#define _DVSBSURF		0x7319c
+#define _DVSBKEYMAXVAL		0x731a0
+#define _DVSBTILEOFF		0x731a4
+#define _DVSBSURFLIVE		0x731ac
+#define _DVSBSCALE		0x73204
+#define _DVSBGAMC		0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+
+#define _SPRA_CTL		0x70280
+#define   SPRITE_ENABLE			(1<<31)
+#define   SPRITE_GAMMA_ENABLE		(1<<30)
+#define   SPRITE_PIXFORMAT_MASK		(7<<25)
+#define   SPRITE_FORMAT_YUV422		(0<<25)
+#define   SPRITE_FORMAT_RGBX101010	(1<<25)
+#define   SPRITE_FORMAT_RGBX888		(2<<25)
+#define   SPRITE_FORMAT_RGBX161616	(3<<25)
+#define   SPRITE_FORMAT_YUV444		(4<<25)
+#define   SPRITE_FORMAT_XR_BGR101010	(5<<25) /* Extended range */
+#define   SPRITE_PIPE_CSC_ENABLE	(1<<24)
+#define   SPRITE_SOURCE_KEY		(1<<22)
+#define   SPRITE_RGB_ORDER_RGBX		(1<<20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE	(1<<19)
+#define   SPRITE_YUV_CSC_FORMAT_BT709	(1<<18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK	(3<<16)
+#define   SPRITE_YUV_ORDER_YUYV		(0<<16)
+#define   SPRITE_YUV_ORDER_UYVY		(1<<16)
+#define   SPRITE_YUV_ORDER_YVYU		(2<<16)
+#define   SPRITE_YUV_ORDER_VYUY		(3<<16)
+#define   SPRITE_TRICKLE_FEED_DISABLE	(1<<14)
+#define   SPRITE_INT_GAMMA_ENABLE	(1<<13)
+#define   SPRITE_TILED			(1<<10)
+#define   SPRITE_DEST_KEY		(1<<2)
+#define _SPRA_LINOFF		0x70284
+#define _SPRA_STRIDE		0x70288
+#define _SPRA_POS		0x7028c
+#define _SPRA_SIZE		0x70290
+#define _SPRA_KEYVAL		0x70294
+#define _SPRA_KEYMSK		0x70298
+#define _SPRA_SURF		0x7029c
+#define _SPRA_KEYMAX		0x702a0
+#define _SPRA_TILEOFF		0x702a4
+#define _SPRA_OFFSET		0x702a4
+#define _SPRA_SURFLIVE		0x702ac
+#define _SPRA_SCALE		0x70304
+#define   SPRITE_SCALE_ENABLE	(1<<31)
+#define   SPRITE_FILTER_MASK	(3<<29)
+#define   SPRITE_FILTER_MEDIUM	(0<<29)
+#define   SPRITE_FILTER_ENHANCING	(1<<29)
+#define   SPRITE_FILTER_SOFTENING	(2<<29)
+#define   SPRITE_VERTICAL_OFFSET_HALF	(1<<28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE	(1<<27)
+#define _SPRA_GAMC		0x70400
+
+#define _SPRB_CTL		0x71280
+#define _SPRB_LINOFF		0x71284
+#define _SPRB_STRIDE		0x71288
+#define _SPRB_POS		0x7128c
+#define _SPRB_SIZE		0x71290
+#define _SPRB_KEYVAL		0x71294
+#define _SPRB_KEYMSK		0x71298
+#define _SPRB_SURF		0x7129c
+#define _SPRB_KEYMAX		0x712a0
+#define _SPRB_TILEOFF		0x712a4
+#define _SPRB_OFFSET		0x712a4
+#define _SPRB_SURFLIVE		0x712ac
+#define _SPRB_SCALE		0x71304
+#define _SPRB_GAMC		0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
+
+#define _SPACNTR		0x72180
+#define   SP_ENABLE			(1<<31)
+#define   SP_GEAMMA_ENABLE		(1<<30)
+#define   SP_PIXFORMAT_MASK		(0xf<<26)
+#define   SP_FORMAT_YUV422		(0<<26)
+#define   SP_FORMAT_BGR565		(5<<26)
+#define   SP_FORMAT_BGRX8888		(6<<26)
+#define   SP_FORMAT_BGRA8888		(7<<26)
+#define   SP_FORMAT_RGBX1010102		(8<<26)
+#define   SP_FORMAT_RGBA1010102		(9<<26)
+#define   SP_FORMAT_RGBX8888		(0xe<<26)
+#define   SP_FORMAT_RGBA8888		(0xf<<26)
+#define   SP_SOURCE_KEY			(1<<22)
+#define   SP_YUV_BYTE_ORDER_MASK	(3<<16)
+#define   SP_YUV_ORDER_YUYV		(0<<16)
+#define   SP_YUV_ORDER_UYVY		(1<<16)
+#define   SP_YUV_ORDER_YVYU		(2<<16)
+#define   SP_YUV_ORDER_VYUY		(3<<16)
+#define   SP_TILED			(1<<10)
+#define _SPALINOFF		0x72184
+#define _SPASTRIDE		0x72188
+#define _SPAPOS			0x7218c
+#define _SPASIZE		0x72190
+#define _SPAKEYMINVAL		0x72194
+#define _SPAKEYMSK		0x72198
+#define _SPASURF		0x7219c
+#define _SPAKEYMAXVAL		0x721a0
+#define _SPATILEOFF		0x721a4
+#define _SPACONSTALPHA		0x721a8
+#define _SPAGAMC		0x721f4
+
+#define _SPBCNTR		0x72280
+#define _SPBLINOFF		0x72284
+#define _SPBSTRIDE		0x72288
+#define _SPBPOS			0x7228c
+#define _SPBSIZE		0x72290
+#define _SPBKEYMINVAL		0x72294
+#define _SPBKEYMSK		0x72298
+#define _SPBSURF		0x7229c
+#define _SPBKEYMAXVAL		0x722a0
+#define _SPBTILEOFF		0x722a4
+#define _SPBCONSTALPHA		0x722a8
+#define _SPBGAMC		0x722f4
+
+#define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
+
+/* VBIOS regs */
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+#define VLV_VGACNTRL		(VLV_DISPLAY_BASE + 0x71400)
+
+/* Ironlake */
+
+#define CPU_VGACNTRL	0x41000
+
+#define DIGITAL_PORT_HOTPLUG_CNTRL      0x44030
+#define  DIGITAL_PORTA_HOTPLUG_ENABLE           (1 << 4)
+#define  DIGITAL_PORTA_SHORT_PULSE_2MS          (0 << 2)
+#define  DIGITAL_PORTA_SHORT_PULSE_4_5MS        (1 << 2)
+#define  DIGITAL_PORTA_SHORT_PULSE_6MS          (2 << 2)
+#define  DIGITAL_PORTA_SHORT_PULSE_100MS        (3 << 2)
+#define  DIGITAL_PORTA_NO_DETECT                (0 << 0)
+#define  DIGITAL_PORTA_LONG_PULSE_DETECT_MASK   (1 << 1)
+#define  DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK  (1 << 0)
+
+/* refresh rate hardware control */
+#define RR_HW_CTL       0x45300
+#define  RR_HW_LOW_POWER_FRAMES_MASK    0xff
+#define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
+
+#define FDI_PLL_BIOS_0  0x46000
+#define  FDI_PLL_FB_CLOCK_MASK  0xff
+#define FDI_PLL_BIOS_1  0x46004
+#define FDI_PLL_BIOS_2  0x46008
+#define DISPLAY_PORT_PLL_BIOS_0         0x4600c
+#define DISPLAY_PORT_PLL_BIOS_1         0x46010
+#define DISPLAY_PORT_PLL_BIOS_2         0x46014
+
+#define PCH_3DCGDIS0		0x46020
+# define MARIUNIT_CLOCK_GATE_DISABLE		(1 << 18)
+# define SVSMUNIT_CLOCK_GATE_DISABLE		(1 << 1)
+
+#define PCH_3DCGDIS1		0x46024
+# define VFMUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+
+#define FDI_PLL_FREQ_CTL        0x46030
+#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1<<24)
+#define  FDI_PLL_FREQ_LOCK_LIMIT_MASK   0xfff00
+#define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
+
+
+#define _PIPEA_DATA_M1           (dev_priv->info->display_mmio_offset + 0x60030)
+#define  PIPE_DATA_M1_OFFSET    0
+#define _PIPEA_DATA_N1           (dev_priv->info->display_mmio_offset + 0x60034)
+#define  PIPE_DATA_N1_OFFSET    0
+
+#define _PIPEA_DATA_M2           (dev_priv->info->display_mmio_offset + 0x60038)
+#define  PIPE_DATA_M2_OFFSET    0
+#define _PIPEA_DATA_N2           (dev_priv->info->display_mmio_offset + 0x6003c)
+#define  PIPE_DATA_N2_OFFSET    0
+
+#define _PIPEA_LINK_M1           (dev_priv->info->display_mmio_offset + 0x60040)
+#define  PIPE_LINK_M1_OFFSET    0
+#define _PIPEA_LINK_N1           (dev_priv->info->display_mmio_offset + 0x60044)
+#define  PIPE_LINK_N1_OFFSET    0
+
+#define _PIPEA_LINK_M2           (dev_priv->info->display_mmio_offset + 0x60048)
+#define  PIPE_LINK_M2_OFFSET    0
+#define _PIPEA_LINK_N2           (dev_priv->info->display_mmio_offset + 0x6004c)
+#define  PIPE_LINK_N2_OFFSET    0
+
+/* PIPEB timing regs are same start from 0x61000 */
+
+#define _PIPEB_DATA_M1           (dev_priv->info->display_mmio_offset + 0x61030)
+#define _PIPEB_DATA_N1           (dev_priv->info->display_mmio_offset + 0x61034)
+
+#define _PIPEB_DATA_M2           (dev_priv->info->display_mmio_offset + 0x61038)
+#define _PIPEB_DATA_N2           (dev_priv->info->display_mmio_offset + 0x6103c)
+
+#define _PIPEB_LINK_M1           (dev_priv->info->display_mmio_offset + 0x61040)
+#define _PIPEB_LINK_N1           (dev_priv->info->display_mmio_offset + 0x61044)
+
+#define _PIPEB_LINK_M2           (dev_priv->info->display_mmio_offset + 0x61048)
+#define _PIPEB_LINK_N2           (dev_priv->info->display_mmio_offset + 0x6104c)
+
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+
+/* CPU panel fitter */
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1               0x68080
+#define _PFB_CTL_1               0x68880
+#define  PF_ENABLE              (1<<31)
+#define  PF_PIPE_SEL_MASK_IVB	(3<<29)
+#define  PF_PIPE_SEL_IVB(pipe)	((pipe)<<29)
+#define  PF_FILTER_MASK		(3<<23)
+#define  PF_FILTER_PROGRAMMED	(0<<23)
+#define  PF_FILTER_MED_3x3	(1<<23)
+#define  PF_FILTER_EDGE_ENHANCE	(2<<23)
+#define  PF_FILTER_EDGE_SOFTEN	(3<<23)
+#define _PFA_WIN_SZ		0x68074
+#define _PFB_WIN_SZ		0x68874
+#define _PFA_WIN_POS		0x68070
+#define _PFB_WIN_POS		0x68870
+#define _PFA_VSCALE		0x68084
+#define _PFB_VSCALE		0x68884
+#define _PFA_HSCALE		0x68090
+#define _PFB_HSCALE		0x68890
+
+#define PF_CTL(pipe)		_PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe)		_PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe)	_PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe)		_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe)		_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
+
+/* legacy palette */
+#define _LGC_PALETTE_A           0x4a000
+#define _LGC_PALETTE_B           0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
+
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL   (1 << 31)
+#define DE_SPRITEB_FLIP_DONE    (1 << 29)
+#define DE_SPRITEA_FLIP_DONE    (1 << 28)
+#define DE_PLANEB_FLIP_DONE     (1 << 27)
+#define DE_PLANEA_FLIP_DONE     (1 << 26)
+#define DE_PCU_EVENT            (1 << 25)
+#define DE_GTT_FAULT            (1 << 24)
+#define DE_POISON               (1 << 23)
+#define DE_PERFORM_COUNTER      (1 << 22)
+#define DE_PCH_EVENT            (1 << 21)
+#define DE_AUX_CHANNEL_A        (1 << 20)
+#define DE_DP_A_HOTPLUG         (1 << 19)
+#define DE_GSE                  (1 << 18)
+#define DE_PIPEB_VBLANK         (1 << 15)
+#define DE_PIPEB_EVEN_FIELD     (1 << 14)
+#define DE_PIPEB_ODD_FIELD      (1 << 13)
+#define DE_PIPEB_LINE_COMPARE   (1 << 12)
+#define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
+#define DE_PIPEA_VBLANK         (1 << 7)
+#define DE_PIPEA_EVEN_FIELD     (1 << 6)
+#define DE_PIPEA_ODD_FIELD      (1 << 5)
+#define DE_PIPEA_LINE_COMPARE   (1 << 4)
+#define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
+
+/* More Ivybridge lolz */
+#define DE_ERR_DEBUG_IVB		(1<<30)
+#define DE_GSE_IVB			(1<<29)
+#define DE_PCH_EVENT_IVB		(1<<28)
+#define DE_DP_A_HOTPLUG_IVB		(1<<27)
+#define DE_AUX_CHANNEL_A_IVB		(1<<26)
+#define DE_SPRITEC_FLIP_DONE_IVB	(1<<14)
+#define DE_PLANEC_FLIP_DONE_IVB		(1<<13)
+#define DE_PIPEC_VBLANK_IVB		(1<<10)
+#define DE_SPRITEB_FLIP_DONE_IVB	(1<<9)
+#define DE_PLANEB_FLIP_DONE_IVB		(1<<8)
+#define DE_PIPEB_VBLANK_IVB		(1<<5)
+#define DE_SPRITEA_FLIP_DONE_IVB	(1<<4)
+#define DE_PLANEA_FLIP_DONE_IVB		(1<<3)
+#define DE_PIPEA_VBLANK_IVB		(1<<0)
+
+#define VLV_MASTER_IER			0x4400c /* Gunit master IER */
+#define   MASTER_INTERRUPT_ENABLE	(1<<31)
+
+#define DEISR   0x44000
+#define DEIMR   0x44004
+#define DEIIR   0x44008
+#define DEIER   0x4400c
+
+/* GT interrupt.
+ * Note that for gen6+ the ring-specific interrupt bits do alias with the
+ * corresponding bits in the per-ring interrupt control registers. */
+#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT	(1 << 26)
+#define GT_GEN6_BLT_CS_ERROR_INTERRUPT		(1 << 25)
+#define GT_GEN6_BLT_USER_INTERRUPT		(1 << 22)
+#define GT_GEN6_BSD_CS_ERROR_INTERRUPT		(1 << 15)
+#define GT_GEN6_BSD_USER_INTERRUPT		(1 << 12)
+#define GT_BSD_USER_INTERRUPT			(1 << 5) /* ilk only */
+#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT	(1 << 5)
+#define GT_PIPE_NOTIFY				(1 << 4)
+#define GT_RENDER_CS_ERROR_INTERRUPT		(1 << 3)
+#define GT_SYNC_STATUS				(1 << 2)
+#define GT_USER_INTERRUPT			(1 << 0)
+
+#define GTISR   0x44010
+#define GTIMR   0x44014
+#define GTIIR   0x44018
+#define GTIER   0x4401c
+
+#define ILK_DISPLAY_CHICKEN2	0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define  ILK_ELPIN_409_SELECT	(1 << 25)
+#define  ILK_DPARB_GATE	(1<<22)
+#define  ILK_VSDPFD_FULL	(1<<21)
+#define ILK_DISPLAY_CHICKEN_FUSES	0x42014
+#define  ILK_INTERNAL_GRAPHICS_DISABLE	(1<<31)
+#define  ILK_INTERNAL_DISPLAY_DISABLE	(1<<30)
+#define  ILK_DISPLAY_DEBUG_DISABLE	(1<<29)
+#define  ILK_HDCP_DISABLE		(1<<25)
+#define  ILK_eDP_A_DISABLE		(1<<24)
+#define  ILK_DESKTOP			(1<<23)
+
+#define ILK_DSPCLK_GATE_D			0x42020
+#define   ILK_VRHUNIT_CLOCK_GATE_DISABLE	(1 << 28)
+#define   ILK_DPFCUNIT_CLOCK_GATE_DISABLE	(1 << 9)
+#define   ILK_DPFCRUNIT_CLOCK_GATE_DISABLE	(1 << 8)
+#define   ILK_DPFDUNIT_CLOCK_GATE_ENABLE	(1 << 7)
+#define   ILK_DPARBUNIT_CLOCK_GATE_ENABLE	(1 << 5)
+
+#define IVB_CHICKEN3	0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE	(1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
+
+#define DISP_ARB_CTL	0x45000
+#define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
+#define  DISP_FBC_WM_DIS		(1<<15)
+#define GEN7_MSG_CTL	0x45010
+#define  WAIT_FOR_PCH_RESET_ACK		(1<<1)
+#define  WAIT_FOR_PCH_FLR_ACK		(1<<0)
+
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1		0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC	((1<<10) | (1<<26))
+
+#define GEN7_L3CNTLREG1				0xB01C
+#define  GEN7_WA_FOR_GEN7_L3_CONTROL			0x3C4FFF8C
+#define  GEN7_L3AGDIS				(1<<19)
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER		0xB030
+#define  GEN7_WA_L3_CHICKEN_MODE				0x20000000
+
+#define GEN7_L3SQCREG4				0xb034
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE	(1<<27)
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		0x9030
+#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
+
+#define HSW_FUSE_STRAP		0x42014
+#define  HSW_CDCLK_LIMIT	(1 << 24)
+
+/* PCH */
+
+/* south display engine interrupt: IBX */
+#define SDE_AUDIO_POWER_D	(1 << 27)
+#define SDE_AUDIO_POWER_C	(1 << 26)
+#define SDE_AUDIO_POWER_B	(1 << 25)
+#define SDE_AUDIO_POWER_SHIFT	(25)
+#define SDE_AUDIO_POWER_MASK	(7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS		(1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB	(1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA	(1 << 22)
+#define SDE_AUDIO_HDCP_MASK	(3 << 22)
+#define SDE_AUDIO_TRANSB	(1 << 21)
+#define SDE_AUDIO_TRANSA	(1 << 20)
+#define SDE_AUDIO_TRANS_MASK	(3 << 20)
+#define SDE_POISON		(1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB		(1 << 17)
+#define SDE_FDI_RXA		(1 << 16)
+#define SDE_FDI_MASK		(3 << 16)
+#define SDE_AUXD		(1 << 15)
+#define SDE_AUXC		(1 << 14)
+#define SDE_AUXB		(1 << 13)
+#define SDE_AUX_MASK		(7 << 13)
+/* 12 reserved */
+#define SDE_CRT_HOTPLUG         (1 << 11)
+#define SDE_PORTD_HOTPLUG       (1 << 10)
+#define SDE_PORTC_HOTPLUG       (1 << 9)
+#define SDE_PORTB_HOTPLUG       (1 << 8)
+#define SDE_SDVOB_HOTPLUG       (1 << 6)
+#define SDE_HOTPLUG_MASK        (SDE_CRT_HOTPLUG | \
+				 SDE_SDVOB_HOTPLUG |	\
+				 SDE_PORTB_HOTPLUG |	\
+				 SDE_PORTC_HOTPLUG |	\
+				 SDE_PORTD_HOTPLUG)
+#define SDE_TRANSB_CRC_DONE	(1 << 5)
+#define SDE_TRANSB_CRC_ERR	(1 << 4)
+#define SDE_TRANSB_FIFO_UNDER	(1 << 3)
+#define SDE_TRANSA_CRC_DONE	(1 << 2)
+#define SDE_TRANSA_CRC_ERR	(1 << 1)
+#define SDE_TRANSA_FIFO_UNDER	(1 << 0)
+#define SDE_TRANS_MASK		(0x3f)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT	(1 << 31)
+#define SDE_AUDIO_POWER_C_CPT	(1 << 30)
+#define SDE_AUDIO_POWER_B_CPT	(1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT   29
+#define SDE_AUDIO_POWER_MASK_CPT    (7 << 29)
+#define SDE_AUXD_CPT		(1 << 27)
+#define SDE_AUXC_CPT		(1 << 26)
+#define SDE_AUXB_CPT		(1 << 25)
+#define SDE_AUX_MASK_CPT	(7 << 25)
+#define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
+#define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
+#define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
+#define SDE_CRT_HOTPLUG_CPT	(1 << 19)
+#define SDE_SDVOB_HOTPLUG_CPT	(1 << 18)
+#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
+				 SDE_SDVOB_HOTPLUG_CPT |	\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT		(1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT	(1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT	(1 << 9)
+#define SDE_FDI_RXC_CPT		(1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT	(1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT	(1 << 5)
+#define SDE_FDI_RXB_CPT		(1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT	(1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT	(1 << 1)
+#define SDE_FDI_RXA_CPT		(1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT	(SDE_AUDIO_CP_REQ_C_CPT | \
+				 SDE_AUDIO_CP_REQ_B_CPT | \
+				 SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT	(SDE_AUDIO_CP_CHG_C_CPT | \
+				 SDE_AUDIO_CP_CHG_B_CPT | \
+				 SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT	(SDE_FDI_RXC_CPT | \
+				 SDE_FDI_RXB_CPT | \
+				 SDE_FDI_RXA_CPT)
+
+#define SDEISR  0xc4000
+#define SDEIMR  0xc4004
+#define SDEIIR  0xc4008
+#define SDEIER  0xc400c
+
+/* digital port hotplug */
+#define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
+#define PORTD_HOTPLUG_ENABLE            (1 << 20)
+#define PORTD_PULSE_DURATION_2ms        (0)
+#define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
+#define PORTD_PULSE_DURATION_6ms        (2 << 18)
+#define PORTD_PULSE_DURATION_100ms      (3 << 18)
+#define PORTD_PULSE_DURATION_MASK	(3 << 18)
+#define PORTD_HOTPLUG_STATUS_MASK	(0x3 << 16)
+#define  PORTD_HOTPLUG_NO_DETECT	(0 << 16)
+#define  PORTD_HOTPLUG_SHORT_DETECT	(1 << 16)
+#define  PORTD_HOTPLUG_LONG_DETECT	(2 << 16)
+#define PORTC_HOTPLUG_ENABLE            (1 << 12)
+#define PORTC_PULSE_DURATION_2ms        (0)
+#define PORTC_PULSE_DURATION_4_5ms      (1 << 10)
+#define PORTC_PULSE_DURATION_6ms        (2 << 10)
+#define PORTC_PULSE_DURATION_100ms      (3 << 10)
+#define PORTC_PULSE_DURATION_MASK	(3 << 10)
+#define PORTC_HOTPLUG_STATUS_MASK	(0x3 << 8)
+#define  PORTC_HOTPLUG_NO_DETECT	(0 << 8)
+#define  PORTC_HOTPLUG_SHORT_DETECT	(1 << 8)
+#define  PORTC_HOTPLUG_LONG_DETECT	(2 << 8)
+#define PORTB_HOTPLUG_ENABLE            (1 << 4)
+#define PORTB_PULSE_DURATION_2ms        (0)
+#define PORTB_PULSE_DURATION_4_5ms      (1 << 2)
+#define PORTB_PULSE_DURATION_6ms        (2 << 2)
+#define PORTB_PULSE_DURATION_100ms      (3 << 2)
+#define PORTB_PULSE_DURATION_MASK	(3 << 2)
+#define PORTB_HOTPLUG_STATUS_MASK	(0x3 << 0)
+#define  PORTB_HOTPLUG_NO_DETECT	(0 << 0)
+#define  PORTB_HOTPLUG_SHORT_DETECT	(1 << 0)
+#define  PORTB_HOTPLUG_LONG_DETECT	(2 << 0)
+
+#define PCH_GPIOA               0xc5010
+#define PCH_GPIOB               0xc5014
+#define PCH_GPIOC               0xc5018
+#define PCH_GPIOD               0xc501c
+#define PCH_GPIOE               0xc5020
+#define PCH_GPIOF               0xc5024
+
+#define PCH_GMBUS0		0xc5100
+#define PCH_GMBUS1		0xc5104
+#define PCH_GMBUS2		0xc5108
+#define PCH_GMBUS3		0xc510c
+#define PCH_GMBUS4		0xc5110
+#define PCH_GMBUS5		0xc5120
+
+#define _PCH_DPLL_A              0xc6014
+#define _PCH_DPLL_B              0xc6018
+#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+
+#define _PCH_FPA0                0xc6040
+#define  FP_CB_TUNE		(0x3<<22)
+#define _PCH_FPA1                0xc6044
+#define _PCH_FPB0                0xc6048
+#define _PCH_FPB1                0xc604c
+#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+
+#define PCH_DPLL_TEST           0xc606c
+
+#define PCH_DREF_CONTROL        0xC6200
+#define  DREF_CONTROL_MASK      0x7fc3
+#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3<<13)
+#define  DREF_CPU_SOURCE_OUTPUT_MASK		(3<<13)
+#define  DREF_SSC_SOURCE_DISABLE                (0<<11)
+#define  DREF_SSC_SOURCE_ENABLE                 (2<<11)
+#define  DREF_SSC_SOURCE_MASK			(3<<11)
+#define  DREF_NONSPREAD_SOURCE_DISABLE          (0<<9)
+#define  DREF_NONSPREAD_CK505_ENABLE		(1<<9)
+#define  DREF_NONSPREAD_SOURCE_ENABLE           (2<<9)
+#define  DREF_NONSPREAD_SOURCE_MASK		(3<<9)
+#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0<<7)
+#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2<<7)
+#define  DREF_SUPERSPREAD_SOURCE_MASK		(3<<7)
+#define  DREF_SSC4_DOWNSPREAD                   (0<<6)
+#define  DREF_SSC4_CENTERSPREAD                 (1<<6)
+#define  DREF_SSC1_DISABLE                      (0<<1)
+#define  DREF_SSC1_ENABLE                       (1<<1)
+#define  DREF_SSC4_DISABLE                      (0)
+#define  DREF_SSC4_ENABLE                       (1)
+
+#define PCH_RAWCLK_FREQ         0xc6204
+#define  FDL_TP1_TIMER_SHIFT    12
+#define  FDL_TP1_TIMER_MASK     (3<<12)
+#define  FDL_TP2_TIMER_SHIFT    10
+#define  FDL_TP2_TIMER_MASK     (3<<10)
+#define  RAWCLK_FREQ_MASK       0x3ff
+
+#define PCH_DPLL_TMR_CFG        0xc6208
+
+#define PCH_SSC4_PARMS          0xc6210
+#define PCH_SSC4_AUX_PARMS      0xc6214
+
+#define PCH_DPLL_SEL		0xc7000
+#define  TRANSA_DPLL_ENABLE	(1<<3)
+#define	 TRANSA_DPLLB_SEL	(1<<0)
+#define	 TRANSA_DPLLA_SEL	0
+#define  TRANSB_DPLL_ENABLE	(1<<7)
+#define	 TRANSB_DPLLB_SEL	(1<<4)
+#define	 TRANSB_DPLLA_SEL	(0)
+#define  TRANSC_DPLL_ENABLE	(1<<11)
+#define	 TRANSC_DPLLB_SEL	(1<<8)
+#define	 TRANSC_DPLLA_SEL	(0)
+
+/* transcoder */
+
+#define _TRANS_HTOTAL_A          0xe0000
+#define  TRANS_HTOTAL_SHIFT     16
+#define  TRANS_HACTIVE_SHIFT    0
+#define _TRANS_HBLANK_A          0xe0004
+#define  TRANS_HBLANK_END_SHIFT 16
+#define  TRANS_HBLANK_START_SHIFT 0
+#define _TRANS_HSYNC_A           0xe0008
+#define  TRANS_HSYNC_END_SHIFT  16
+#define  TRANS_HSYNC_START_SHIFT 0
+#define _TRANS_VTOTAL_A          0xe000c
+#define  TRANS_VTOTAL_SHIFT     16
+#define  TRANS_VACTIVE_SHIFT    0
+#define _TRANS_VBLANK_A          0xe0010
+#define  TRANS_VBLANK_END_SHIFT 16
+#define  TRANS_VBLANK_START_SHIFT 0
+#define _TRANS_VSYNC_A           0xe0014
+#define  TRANS_VSYNC_END_SHIFT  16
+#define  TRANS_VSYNC_START_SHIFT 0
+#define _TRANS_VSYNCSHIFT_A	0xe0028
+
+#define _TRANSA_DATA_M1          0xe0030
+#define _TRANSA_DATA_N1          0xe0034
+#define _TRANSA_DATA_M2          0xe0038
+#define _TRANSA_DATA_N2          0xe003c
+#define _TRANSA_DP_LINK_M1       0xe0040
+#define _TRANSA_DP_LINK_N1       0xe0044
+#define _TRANSA_DP_LINK_M2       0xe0048
+#define _TRANSA_DP_LINK_N2       0xe004c
+
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A         0xe0200
+#define _VIDEO_DIP_DATA_A        0xe0208
+#define _VIDEO_DIP_GCP_A         0xe0210
+
+#define _VIDEO_DIP_CTL_B         0xe1200
+#define _VIDEO_DIP_DATA_B        0xe1208
+#define _VIDEO_DIP_GCP_B         0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
+#define VLV_VIDEO_DIP_CTL_A		(VLV_DISPLAY_BASE + 0x60200)
+#define VLV_VIDEO_DIP_DATA_A		(VLV_DISPLAY_BASE + 0x60208)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A	(VLV_DISPLAY_BASE + 0x60210)
+
+#define VLV_VIDEO_DIP_CTL_B		(VLV_DISPLAY_BASE + 0x61170)
+#define VLV_VIDEO_DIP_DATA_B		(VLV_DISPLAY_BASE + 0x61174)
+#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B	(VLV_DISPLAY_BASE + 0x61178)
+
+#define VLV_TVIDEO_DIP_CTL(pipe) \
+	 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+#define VLV_TVIDEO_DIP_DATA(pipe) \
+	 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+#define VLV_TVIDEO_DIP_GCP(pipe) \
+	_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+
+/* Haswell DIP controls */
+#define HSW_VIDEO_DIP_CTL_A		0x60200
+#define HSW_VIDEO_DIP_AVI_DATA_A	0x60220
+#define HSW_VIDEO_DIP_VS_DATA_A		0x60260
+#define HSW_VIDEO_DIP_SPD_DATA_A	0x602A0
+#define HSW_VIDEO_DIP_GMP_DATA_A	0x602E0
+#define HSW_VIDEO_DIP_VSC_DATA_A	0x60320
+#define HSW_VIDEO_DIP_AVI_ECC_A		0x60240
+#define HSW_VIDEO_DIP_VS_ECC_A		0x60280
+#define HSW_VIDEO_DIP_SPD_ECC_A		0x602C0
+#define HSW_VIDEO_DIP_GMP_ECC_A		0x60300
+#define HSW_VIDEO_DIP_VSC_ECC_A		0x60344
+#define HSW_VIDEO_DIP_GCP_A		0x60210
+
+#define HSW_VIDEO_DIP_CTL_B		0x61200
+#define HSW_VIDEO_DIP_AVI_DATA_B	0x61220
+#define HSW_VIDEO_DIP_VS_DATA_B		0x61260
+#define HSW_VIDEO_DIP_SPD_DATA_B	0x612A0
+#define HSW_VIDEO_DIP_GMP_DATA_B	0x612E0
+#define HSW_VIDEO_DIP_VSC_DATA_B	0x61320
+#define HSW_VIDEO_DIP_BVI_ECC_B		0x61240
+#define HSW_VIDEO_DIP_VS_ECC_B		0x61280
+#define HSW_VIDEO_DIP_SPD_ECC_B		0x612C0
+#define HSW_VIDEO_DIP_GMP_ECC_B		0x61300
+#define HSW_VIDEO_DIP_VSC_ECC_B		0x61344
+#define HSW_VIDEO_DIP_GCP_B		0x61210
+
+#define HSW_TVIDEO_DIP_CTL(trans) \
+	 _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
+#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
+	 _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
+	 _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
+#define HSW_TVIDEO_DIP_GCP(trans) \
+	_TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
+#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
+	 _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+
+#define _TRANS_HTOTAL_B          0xe1000
+#define _TRANS_HBLANK_B          0xe1004
+#define _TRANS_HSYNC_B           0xe1008
+#define _TRANS_VTOTAL_B          0xe100c
+#define _TRANS_VBLANK_B          0xe1010
+#define _TRANS_VSYNC_B           0xe1014
+#define _TRANS_VSYNCSHIFT_B	 0xe1028
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
+				     _TRANS_VSYNCSHIFT_B)
+
+#define _TRANSB_DATA_M1          0xe1030
+#define _TRANSB_DATA_N1          0xe1034
+#define _TRANSB_DATA_M2          0xe1038
+#define _TRANSB_DATA_N2          0xe103c
+#define _TRANSB_DP_LINK_M1       0xe1040
+#define _TRANSB_DP_LINK_N1       0xe1044
+#define _TRANSB_DP_LINK_M2       0xe1048
+#define _TRANSB_DP_LINK_N2       0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF              0xf0008
+#define _TRANSBCONF              0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
+#define  TRANS_DISABLE          (0<<31)
+#define  TRANS_ENABLE           (1<<31)
+#define  TRANS_STATE_MASK       (1<<30)
+#define  TRANS_STATE_DISABLE    (0<<30)
+#define  TRANS_STATE_ENABLE     (1<<30)
+#define  TRANS_FSYNC_DELAY_HB1  (0<<27)
+#define  TRANS_FSYNC_DELAY_HB2  (1<<27)
+#define  TRANS_FSYNC_DELAY_HB3  (2<<27)
+#define  TRANS_FSYNC_DELAY_HB4  (3<<27)
+#define  TRANS_INTERLACE_MASK   (7<<21)
+#define  TRANS_PROGRESSIVE      (0<<21)
+#define  TRANS_INTERLACED       (3<<21)
+#define  TRANS_LEGACY_INTERLACED_ILK (2<<21)
+#define  TRANS_8BPC             (0<<5)
+#define  TRANS_10BPC            (1<<5)
+#define  TRANS_6BPC             (2<<5)
+#define  TRANS_12BPC            (3<<5)
+
+#define _TRANSA_CHICKEN1	 0xf0060
+#define _TRANSB_CHICKEN1	 0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE	(1<<4)
+#define _TRANSA_CHICKEN2	 0xf0064
+#define _TRANSB_CHICKEN2	 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE			(1<<31)
+#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED		(1<<29)
+#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK		(3<<27)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER	(1<<26)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH	(1<<25)
+
+#define SOUTH_CHICKEN1		0xc2000
+#define  FDIA_PHASE_SYNC_SHIFT_OVR	19
+#define  FDIA_PHASE_SYNC_SHIFT_EN	18
+#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_BC_BIFURCATION_SELECT	(1 << 12)
+#define SOUTH_CHICKEN2		0xc2004
+#define  FDI_MPHY_IOSFSB_RESET_STATUS	(1<<13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL	(1<<12)
+#define  DPLS_EDP_PPS_FIX_DIS		(1<<0)
+
+#define _FDI_RXA_CHICKEN         0xc200c
+#define _FDI_RXB_CHICKEN         0xc2010
+#define  FDI_RX_PHASE_SYNC_POINTER_OVR	(1<<1)
+#define  FDI_RX_PHASE_SYNC_POINTER_EN	(1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D	0xc2020
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
+
+/* CPU: FDI_TX */
+#define _FDI_TXA_CTL             0x60100
+#define _FDI_TXB_CTL             0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
+#define  FDI_TX_DISABLE         (0<<31)
+#define  FDI_TX_ENABLE          (1<<31)
+#define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
+#define  FDI_LINK_TRAIN_PATTERN_2       (1<<28)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2<<28)
+#define  FDI_LINK_TRAIN_NONE            (3<<28)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2<<25)
+#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3<<25)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+/* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
+   SNB has different settings. */
+/* SNB A-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK		(0x3f<<22)
+#define  FDI_DP_PORT_WIDTH_X1           (0<<19)
+#define  FDI_DP_PORT_WIDTH_X2           (1<<19)
+#define  FDI_DP_PORT_WIDTH_X3           (2<<19)
+#define  FDI_DP_PORT_WIDTH_X4           (3<<19)
+#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
+/* Ironlake: hardwired to 1 */
+#define  FDI_TX_PLL_ENABLE              (1<<14)
+
+/* Ivybridge has different bits for lolz */
+#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2<<8)
+#define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
+
+/* both Tx and Rx */
+#define  FDI_COMPOSITE_SYNC		(1<<11)
+#define  FDI_LINK_TRAIN_AUTO		(1<<10)
+#define  FDI_SCRAMBLING_ENABLE          (0<<7)
+#define  FDI_SCRAMBLING_DISABLE         (1<<7)
+
+/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
+#define _FDI_RXA_CTL             0xf000c
+#define _FDI_RXB_CTL             0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
+#define  FDI_RX_ENABLE          (1<<31)
+/* train, dp width same as FDI_TX */
+#define  FDI_FS_ERRC_ENABLE		(1<<27)
+#define  FDI_FE_ERRC_ENABLE		(1<<26)
+#define  FDI_DP_PORT_WIDTH_X8           (7<<19)
+#define  FDI_RX_POLARITY_REVERSED_LPT	(1<<16)
+#define  FDI_8BPC                       (0<<16)
+#define  FDI_10BPC                      (1<<16)
+#define  FDI_6BPC                       (2<<16)
+#define  FDI_12BPC                      (3<<16)
+#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1<<15)
+#define  FDI_DMI_LINK_REVERSE_MASK      (1<<14)
+#define  FDI_RX_PLL_ENABLE              (1<<13)
+#define  FDI_FS_ERR_CORRECT_ENABLE      (1<<11)
+#define  FDI_FE_ERR_CORRECT_ENABLE      (1<<10)
+#define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
+#define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
+#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
+#define  FDI_PCDCLK	                (1<<4)
+/* CPT */
+#define  FDI_AUTO_TRAINING			(1<<10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT		(1<<8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT	(2<<8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT		(3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT	(3<<8)
+/* LPT */
+#define  FDI_PORT_WIDTH_2X_LPT			(1<<19)
+#define  FDI_PORT_WIDTH_1X_LPT			(0<<19)
+
+#define _FDI_RXA_MISC			0xf0010
+#define _FDI_RXB_MISC			0xf1010
+#define  FDI_RX_PWRDN_LANE1_MASK	(3<<26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)	((x)<<26)
+#define  FDI_RX_PWRDN_LANE0_MASK	(3<<24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)	((x)<<24)
+#define  FDI_RX_TP1_TO_TP2_48		(2<<20)
+#define  FDI_RX_TP1_TO_TP2_64		(3<<20)
+#define  FDI_RX_FDI_DELAY_90		(0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
+#define _FDI_RXA_TUSIZE1         0xf0030
+#define _FDI_RXA_TUSIZE2         0xf0038
+#define _FDI_RXB_TUSIZE1         0xf1030
+#define _FDI_RXB_TUSIZE2         0xf1038
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
+
+/* FDI_RX interrupt register format */
+#define FDI_RX_INTER_LANE_ALIGN         (1<<10)
+#define FDI_RX_SYMBOL_LOCK              (1<<9) /* train 2 */
+#define FDI_RX_BIT_LOCK                 (1<<8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1<<7)
+#define FDI_RX_FS_CODE_ERR              (1<<6)
+#define FDI_RX_FE_CODE_ERR              (1<<5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1<<4)
+#define FDI_RX_HDCP_LINK_FAIL           (1<<3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1<<2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1<<1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1<<0)
+
+#define _FDI_RXA_IIR             0xf0014
+#define _FDI_RXA_IMR             0xf0018
+#define _FDI_RXB_IIR             0xf1014
+#define _FDI_RXB_IMR             0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
+
+#define FDI_PLL_CTL_1           0xfe000
+#define FDI_PLL_CTL_2           0xfe004
+
+#define PCH_LVDS	0xe1180
+#define  LVDS_DETECTED	(1 << 1)
+
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
+#define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
+#define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
+#define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
+
+#define PIPEB_PP_STATUS         (VLV_DISPLAY_BASE + 0x61300)
+#define PIPEB_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61304)
+#define PIPEB_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61308)
+#define PIPEB_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6130c)
+#define PIPEB_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61310)
+
+#define VLV_PIPE_PP_STATUS(pipe) _PIPE(pipe, PIPEA_PP_STATUS, PIPEB_PP_STATUS)
+#define VLV_PIPE_PP_CONTROL(pipe) _PIPE(pipe, PIPEA_PP_CONTROL, PIPEB_PP_CONTROL)
+#define VLV_PIPE_PP_ON_DELAYS(pipe) \
+		_PIPE(pipe, PIPEA_PP_ON_DELAYS, PIPEB_PP_ON_DELAYS)
+#define VLV_PIPE_PP_OFF_DELAYS(pipe) \
+		_PIPE(pipe, PIPEA_PP_OFF_DELAYS, PIPEB_PP_OFF_DELAYS)
+#define VLV_PIPE_PP_DIVISOR(pipe) \
+		_PIPE(pipe, PIPEA_PP_DIVISOR, PIPEB_PP_DIVISOR)
+
+#define PCH_PP_STATUS		0xc7200
+#define PCH_PP_CONTROL		0xc7204
+#define  PANEL_UNLOCK_REGS	(0xabcd << 16)
+#define  PANEL_UNLOCK_MASK	(0xffff << 16)
+#define  EDP_FORCE_VDD		(1 << 3)
+#define  EDP_BLC_ENABLE		(1 << 2)
+#define  PANEL_POWER_RESET	(1 << 1)
+#define  PANEL_POWER_OFF	(0 << 0)
+#define  PANEL_POWER_ON		(1 << 0)
+#define PCH_PP_ON_DELAYS	0xc7208
+#define  PANEL_PORT_SELECT_MASK	(3 << 30)
+#define  PANEL_PORT_SELECT_LVDS	(0 << 30)
+#define  PANEL_PORT_SELECT_DPA	(1 << 30)
+#define  EDP_PANEL		(1 << 30)
+#define  PANEL_PORT_SELECT_DPC	(2 << 30)
+#define  PANEL_PORT_SELECT_DPD	(3 << 30)
+#define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_UP_DELAY_SHIFT	16
+#define  PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
+
+#define PCH_PP_OFF_DELAYS	0xc720c
+#define  PANEL_POWER_PORT_SELECT_MASK	(0x3 << 30)
+#define  PANEL_POWER_PORT_LVDS		(0 << 30)
+#define  PANEL_POWER_PORT_DP_A		(1 << 30)
+#define  PANEL_POWER_PORT_DP_C		(2 << 30)
+#define  PANEL_POWER_PORT_DP_D		(3 << 30)
+#define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
+#define PCH_PP_DIVISOR		0xc7210
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
+
+#define PCH_DP_B		0xe4100
+#define PCH_DPB_AUX_CH_CTL	0xe4110
+#define PCH_DPB_AUX_CH_DATA1	0xe4114
+#define PCH_DPB_AUX_CH_DATA2	0xe4118
+#define PCH_DPB_AUX_CH_DATA3	0xe411c
+#define PCH_DPB_AUX_CH_DATA4	0xe4120
+#define PCH_DPB_AUX_CH_DATA5	0xe4124
+
+#define PCH_DP_C		0xe4200
+#define PCH_DPC_AUX_CH_CTL	0xe4210
+#define PCH_DPC_AUX_CH_DATA1	0xe4214
+#define PCH_DPC_AUX_CH_DATA2	0xe4218
+#define PCH_DPC_AUX_CH_DATA3	0xe421c
+#define PCH_DPC_AUX_CH_DATA4	0xe4220
+#define PCH_DPC_AUX_CH_DATA5	0xe4224
+
+#define PCH_DP_D		0xe4300
+#define PCH_DPD_AUX_CH_CTL	0xe4310
+#define PCH_DPD_AUX_CH_DATA1	0xe4314
+#define PCH_DPD_AUX_CH_DATA2	0xe4318
+#define PCH_DPD_AUX_CH_DATA3	0xe431c
+#define PCH_DPD_AUX_CH_DATA4	0xe4320
+#define PCH_DPD_AUX_CH_DATA5	0xe4324
+
+/* CPT */
+#define  PORT_TRANS_A_SEL_CPT	0
+#define  PORT_TRANS_B_SEL_CPT	(1<<29)
+#define  PORT_TRANS_C_SEL_CPT	(2<<29)
+#define  PORT_TRANS_SEL_MASK	(3<<29)
+#define  PORT_TRANS_SEL_CPT(pipe)	((pipe) << 29)
+#define  PORT_TO_PIPE(val)	(((val) & (1<<30)) >> 30)
+#define  PORT_TO_PIPE_CPT(val)	(((val) & PORT_TRANS_SEL_MASK) >> 29)
+
+#define TRANS_DP_CTL_A		0xe0300
+#define TRANS_DP_CTL_B		0xe1300
+#define TRANS_DP_CTL_C		0xe2300
+#define TRANS_DP_CTL(pipe)	_PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
+#define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
+#define  TRANS_DP_PORT_SEL_B	(0<<29)
+#define  TRANS_DP_PORT_SEL_C	(1<<29)
+#define  TRANS_DP_PORT_SEL_D	(2<<29)
+#define  TRANS_DP_PORT_SEL_NONE	(3<<29)
+#define  TRANS_DP_PORT_SEL_MASK	(3<<29)
+#define  TRANS_DP_AUDIO_ONLY	(1<<26)
+#define  TRANS_DP_ENH_FRAMING	(1<<18)
+#define  TRANS_DP_8BPC		(0<<9)
+#define  TRANS_DP_10BPC		(1<<9)
+#define  TRANS_DP_6BPC		(2<<9)
+#define  TRANS_DP_12BPC		(3<<9)
+#define  TRANS_DP_BPC_MASK	(3<<9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH	(1<<4)
+#define  TRANS_DP_VSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH	(1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_LOW	0
+#define  TRANS_DP_SYNC_MASK	(3<<3)
+
+/* SNB eDP training params */
+/* SNB A-stepping */
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A		(0x38<<22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A		(0x02<<22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
+/* SNB B-stepping */
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
+
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB		(0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB		(0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB		(0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB		(0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB		(0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB		(0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB		(0x3e <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB		(0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB		(0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB		(0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB		(0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB		(0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB	(0x3f<<22)
+
+#define  FORCEWAKE				0xA18C
+#define  FORCEWAKE_VLV				0x1300b0
+#define  FORCEWAKE_ACK_VLV			0x1300b4
+#define  FORCEWAKE_MEDIA_VLV			0x1300b8
+#define  FORCEWAKE_ACK_MEDIA_VLV		0x1300bc
+#define  FORCEWAKE_ACK_HSW			0x130044
+#define  FORCEWAKE_ACK				0x130090
+#define  VLV_GTLC_WAKE_CTRL			0x130090
+#define  VLV_GTLC_PW_STATUS			0x130094
+#define  FORCEWAKE_MT				0xa188 /* multi-threaded */
+#define   FORCEWAKE_KERNEL			0x1
+#define   FORCEWAKE_USER			0x2
+#define  FORCEWAKE_MT_ACK			0x130040
+#define  ECOBUS					0xa180
+#define    FORCEWAKE_MT_ENABLE			(1<<5)
+
+#define  GTFIFODBG				0x120000
+#define    GT_FIFO_CPU_ERROR_MASK		7
+#define    GT_FIFO_OVFERR			(1<<2)
+#define    GT_FIFO_IAWRERR			(1<<1)
+#define    GT_FIFO_IARDERR			(1<<0)
+
+#define  GT_FIFO_FREE_ENTRIES			0x120008
+#define    GT_FIFO_NUM_RESERVED_ENTRIES		20
+
+#define GEN6_UCGCTL1				0x9400
+# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
+# define GEN6_CSUNIT_CLOCK_GATE_DISABLE			(1 << 7)
+
+#define GEN6_UCGCTL2				0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE		(1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE		(1 << 22)
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
+# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
+# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE		(1 << 11)
+
+#define GEN7_UCGCTL4				0x940c
+#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE	(1<<25)
+
+#define GEN6_RPNSWREQ				0xA008
+#define   GEN6_TURBO_DISABLE			(1<<31)
+#define   GEN6_FREQUENCY(x)			((x)<<25)
+#define   HSW_FREQUENCY(x)			((x)<<24)
+#define   GEN6_OFFSET(x)			((x)<<19)
+#define   GEN6_AGGRESSIVE_TURBO			(0<<15)
+#define GEN6_RC_VIDEO_FREQ			0xA00C
+#define GEN6_RC_CONTROL				0xA090
+#define   GEN6_RC_CTL_RC6pp_ENABLE		(1<<16)
+#define   GEN6_RC_CTL_RC6p_ENABLE		(1<<17)
+#define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
+#define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
+#define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
+#define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
+#define GEN6_RP_DOWN_TIMEOUT			0xA010
+#define GEN6_RP_INTERRUPT_LIMITS		0xA014
+#define GEN6_RPSTAT1				0xA01C
+#define   GEN6_CAGF_SHIFT			8
+#define   HSW_CAGF_SHIFT			7
+#define   GEN6_CAGF_MASK			(0x7f << GEN6_CAGF_SHIFT)
+#define   HSW_CAGF_MASK				(0x7f << HSW_CAGF_SHIFT)
+#define GEN6_RP_CONTROL				0xA024
+#define   GEN6_RP_MEDIA_TURBO			(1<<11)
+#define   GEN6_RP_MEDIA_MODE_MASK		(3<<9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE		(3<<9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE		(2<<9)
+#define   GEN6_RP_MEDIA_HW_MODE			(1<<9)
+#define   GEN6_RP_MEDIA_SW_MODE			(0<<9)
+#define   GEN6_RP_MEDIA_IS_GFX			(1<<8)
+#define   GEN6_RP_ENABLE			(1<<7)
+#define   GEN6_RP_UP_IDLE_MIN			(0x1<<3)
+#define   GEN6_RP_UP_BUSY_AVG			(0x2<<3)
+#define   GEN6_RP_UP_BUSY_CONT			(0x4<<3)
+#define   GEN7_RP_DOWN_IDLE_AVG			(0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_CONT		(0x1<<0)
+#define GEN6_RP_UP_THRESHOLD			0xA02C
+#define GEN6_RP_DOWN_THRESHOLD			0xA030
+#define GEN6_RP_CUR_UP_EI			0xA050
+#define   GEN6_CURICONT_MASK			0xffffff
+#define GEN6_RP_CUR_UP				0xA054
+#define   GEN6_CURBSYTAVG_MASK			0xffffff
+#define GEN6_RP_PREV_UP				0xA058
+#define GEN6_RP_CUR_DOWN_EI			0xA05C
+#define   GEN6_CURIAVG_MASK			0xffffff
+#define GEN6_RP_CUR_DOWN			0xA060
+#define GEN6_RP_PREV_DOWN			0xA064
+#define GEN6_RP_UP_EI				0xA068
+#define GEN6_RP_DOWN_EI				0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS			0xA070
+#define GEN6_RC_STATE				0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT		0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT		0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT		0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL		0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS			0xA0AC
+#define GEN6_RC_SLEEP				0xA0B0
+#define GEN6_RC1e_THRESHOLD			0xA0B4
+#define GEN6_RC6_THRESHOLD			0xA0B8
+#define GEN6_RC6p_THRESHOLD			0xA0BC
+#define GEN6_RC6pp_THRESHOLD			0xA0C0
+#define GEN6_PMINTRMSK				0xA168
+
+#define GEN6_PMISR				0x44020
+#define GEN6_PMIMR				0x44024 /* rps_lock */
+#define GEN6_PMIIR				0x44028
+#define GEN6_PMIER				0x4402C
+#define  GEN6_PM_MBOX_EVENT			(1<<25)
+#define  GEN6_PM_THERMAL_EVENT			(1<<24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT		(1<<6)
+#define  GEN6_PM_RP_UP_THRESHOLD		(1<<5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD		(1<<4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
+#define  GEN6_PM_DEFERRED_EVENTS		(GEN6_PM_RP_UP_THRESHOLD | \
+						 GEN6_PM_RP_DOWN_THRESHOLD | \
+						 GEN6_PM_RP_DOWN_TIMEOUT)
+
+#define GEN6_GT_GFX_RC6_LOCKED			0x138104
+#define GEN6_GT_GFX_RC6				0x138108
+#define GEN6_GT_GFX_RC6p			0x13810C
+#define GEN6_GT_GFX_RC6pp			0x138110
+
+#define GEN6_PCODE_MAILBOX			0x138124
+#define   GEN6_PCODE_READY			(1<<31)
+#define   GEN6_READ_OC_PARAMS			0xc
+#define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x8
+#define   GEN6_PCODE_READ_MIN_FREQ_TABLE	0x9
+#define	  GEN6_PCODE_WRITE_RC6VIDS		0x4
+#define	  GEN6_PCODE_READ_RC6VIDS		0x5
+#define   GEN6_ENCODE_RC6_VID(mv)		(((mv) - 245) / 5)
+#define   GEN6_DECODE_RC6_VID(vids)		(((vids) * 5) + 245)
+#define GEN6_PCODE_DATA				0x138128
+#define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
+#define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
+
+#define VLV_IOSF_DOORBELL_REQ			0x182100
+#define   IOSF_DEVFN_SHIFT			24
+#define   IOSF_OPCODE_SHIFT			16
+#define   IOSF_PORT_SHIFT			8
+#define   IOSF_BYTE_ENABLES_SHIFT		4
+#define   IOSF_BAR_SHIFT			1
+#define   IOSF_SB_BUSY				(1<<0)
+#define   IOSF_PORT_PUNIT			0x4
+#define VLV_IOSF_DATA				0x182104
+#define VLV_IOSF_ADDR				0x182108
+
+#define PUNIT_OPCODE_REG_READ			6
+#define PUNIT_OPCODE_REG_WRITE			7
+
+#define GEN6_GT_CORE_STATUS		0x138060
+#define   GEN6_CORE_CPD_STATE_MASK	(7<<4)
+#define   GEN6_RCn_MASK			7
+#define   GEN6_RC0			0
+#define   GEN6_RC3			2
+#define   GEN6_RC6			3
+#define   GEN6_RC7			4
+
+#define GEN7_MISCCPCTL			(0x9424)
+#define   GEN7_DOP_CLOCK_GATE_ENABLE	(1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1			0xB008 /* L3CD Error Status 1 */
+#define   GEN7_L3CDERRST1_ROW_MASK	(0x7ff<<14)
+#define   GEN7_PARITY_ERROR_VALID	(1<<13)
+#define   GEN7_L3CDERRST1_BANK_MASK	(3<<11)
+#define   GEN7_L3CDERRST1_SUBBANK_MASK	(7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+		((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+		((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+		((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define   GEN7_L3CDERRST1_ENABLE	(1<<7)
+
+#define GEN7_L3LOG_BASE			0xB070
+#define GEN7_L3LOG_SIZE			0x80
+
+#define GEN7_HALF_SLICE_CHICKEN1	0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2	0xf100
+#define   GEN7_MAX_PS_THREAD_DEP		(8<<12)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE	(1<<3)
+
+#define GEN7_ROW_CHICKEN2		0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2		0xf4f4
+#define   DOP_CLOCK_GATING_DISABLE	(1<<0)
+
+#define G4X_AUD_VID_DID			(dev_priv->info->display_mmio_offset + 0x62020)
+#define INTEL_AUDIO_DEVCL		0x808629FB
+#define INTEL_AUDIO_DEVBLC		0x80862801
+#define INTEL_AUDIO_DEVCTG		0x80862802
+
+#define G4X_AUD_CNTL_ST			0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC		(1 << 13)
+#define G4X_ELDV_DEVCTG			(1 << 14)
+#define G4X_ELD_ADDR			(0xf << 5)
+#define G4X_ELD_ACK			(1 << 4)
+#define G4X_HDMIW_HDMIEDID		0x6210C
+
+#define IBX_HDMIW_HDMIEDID_A		0xE2050
+#define IBX_HDMIW_HDMIEDID_B		0xE2150
+#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					IBX_HDMIW_HDMIEDID_A, \
+					IBX_HDMIW_HDMIEDID_B)
+#define IBX_AUD_CNTL_ST_A		0xE20B4
+#define IBX_AUD_CNTL_ST_B		0xE21B4
+#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					IBX_AUD_CNTL_ST_A, \
+					IBX_AUD_CNTL_ST_B)
+#define IBX_ELD_BUFFER_SIZE		(0x1f << 10)
+#define IBX_ELD_ADDRESS			(0x1f << 5)
+#define IBX_ELD_ACK			(1 << 4)
+#define IBX_AUD_CNTL_ST2		0xE20C0
+#define IBX_ELD_VALIDB			(1 << 0)
+#define IBX_CP_READYB			(1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A		0xE5050
+#define CPT_HDMIW_HDMIEDID_B		0xE5150
+#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
+					CPT_HDMIW_HDMIEDID_A, \
+					CPT_HDMIW_HDMIEDID_B)
+#define CPT_AUD_CNTL_ST_A		0xE50B4
+#define CPT_AUD_CNTL_ST_B		0xE51B4
+#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
+					CPT_AUD_CNTL_ST_A, \
+					CPT_AUD_CNTL_ST_B)
+#define CPT_AUD_CNTRL_ST2		0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer.  It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n)		(0x5280 + (n) * 4)
+
+#define IBX_AUD_CONFIG_A			0xe2000
+#define IBX_AUD_CONFIG_B			0xe2100
+#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
+					IBX_AUD_CONFIG_A, \
+					IBX_AUD_CONFIG_B)
+#define CPT_AUD_CONFIG_A			0xe5000
+#define CPT_AUD_CONFIG_B			0xe5100
+#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
+					CPT_AUD_CONFIG_A, \
+					CPT_AUD_CONFIG_B)
+#define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
+#define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
+#define   AUD_CONFIG_UPPER_N_SHIFT		20
+#define   AUD_CONFIG_UPPER_N_VALUE		(0xff << 20)
+#define   AUD_CONFIG_LOWER_N_SHIFT		4
+#define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
+#define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
+
+/* HSW Audio */
+#define   HSW_AUD_CONFIG_A		0x65000 /* Audio Configuration Transcoder A */
+#define   HSW_AUD_CONFIG_B		0x65100 /* Audio Configuration Transcoder B */
+#define   HSW_AUD_CFG(pipe) _PIPE(pipe, \
+					HSW_AUD_CONFIG_A, \
+					HSW_AUD_CONFIG_B)
+
+#define   HSW_AUD_MISC_CTRL_A		0x65010 /* Audio Misc Control Convert 1 */
+#define   HSW_AUD_MISC_CTRL_B		0x65110 /* Audio Misc Control Convert 2 */
+#define   HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_MISC_CTRL_A, \
+					HSW_AUD_MISC_CTRL_B)
+
+#define   HSW_AUD_DIP_ELD_CTRL_ST_A	0x650b4 /* Audio DIP and ELD Control State Transcoder A */
+#define   HSW_AUD_DIP_ELD_CTRL_ST_B	0x651b4 /* Audio DIP and ELD Control State Transcoder B */
+#define   HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
+					HSW_AUD_DIP_ELD_CTRL_ST_A, \
+					HSW_AUD_DIP_ELD_CTRL_ST_B)
+
+/* Audio Digital Converter */
+#define   HSW_AUD_DIG_CNVT_1		0x65080 /* Audio Converter 1 */
+#define   HSW_AUD_DIG_CNVT_2		0x65180 /* Audio Converter 1 */
+#define   AUD_DIG_CNVT(pipe) _PIPE(pipe, \
+					HSW_AUD_DIG_CNVT_1, \
+					HSW_AUD_DIG_CNVT_2)
+#define   DIP_PORT_SEL_MASK		0x3
+
+#define   HSW_AUD_EDID_DATA_A		0x65050
+#define   HSW_AUD_EDID_DATA_B		0x65150
+#define   HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
+					HSW_AUD_EDID_DATA_A, \
+					HSW_AUD_EDID_DATA_B)
+
+#define   HSW_AUD_PIPE_CONV_CFG		0x6507c /* Audio pipe and converter configs */
+#define   HSW_AUD_PIN_ELD_CP_VLD	0x650c0 /* Audio ELD and CP Ready Status */
+#define   AUDIO_INACTIVE_C		(1<<11)
+#define   AUDIO_INACTIVE_B		(1<<7)
+#define   AUDIO_INACTIVE_A		(1<<3)
+#define   AUDIO_OUTPUT_ENABLE_A		(1<<2)
+#define   AUDIO_OUTPUT_ENABLE_B		(1<<6)
+#define   AUDIO_OUTPUT_ENABLE_C		(1<<10)
+#define   AUDIO_ELD_VALID_A		(1<<0)
+#define   AUDIO_ELD_VALID_B		(1<<4)
+#define   AUDIO_ELD_VALID_C		(1<<8)
+#define   AUDIO_CP_READY_A		(1<<1)
+#define   AUDIO_CP_READY_B		(1<<5)
+#define   AUDIO_CP_READY_C		(1<<9)
+
+/* HSW Power Wells */
+#define HSW_PWR_WELL_BIOS			0x45400 /* CTL1 */
+#define HSW_PWR_WELL_DRIVER			0x45404 /* CTL2 */
+#define HSW_PWR_WELL_KVMR			0x45408 /* CTL3 */
+#define HSW_PWR_WELL_DEBUG			0x4540C /* CTL4 */
+#define   HSW_PWR_WELL_ENABLE			(1<<31)
+#define   HSW_PWR_WELL_STATE			(1<<30)
+#define HSW_PWR_WELL_CTL5			0x45410
+#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP	(1<<31)
+#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE	(1<<20)
+#define   HSW_PWR_WELL_FORCE_ON			(1<<19)
+#define HSW_PWR_WELL_CTL6			0x45414
+
+/* Per-pipe DDI Function Control */
+#define TRANS_DDI_FUNC_CTL_A		0x60400
+#define TRANS_DDI_FUNC_CTL_B		0x61400
+#define TRANS_DDI_FUNC_CTL_C		0x62400
+#define TRANS_DDI_FUNC_CTL_EDP		0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+						   TRANS_DDI_FUNC_CTL_B)
+#define  TRANS_DDI_FUNC_ENABLE		(1<<31)
+/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
+#define  TRANS_DDI_PORT_MASK		(7<<28)
+#define  TRANS_DDI_SELECT_PORT(x)	((x)<<28)
+#define  TRANS_DDI_PORT_NONE		(0<<28)
+#define  TRANS_DDI_MODE_SELECT_MASK	(7<<24)
+#define  TRANS_DDI_MODE_SELECT_HDMI	(0<<24)
+#define  TRANS_DDI_MODE_SELECT_DVI	(1<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST	(2<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST	(3<<24)
+#define  TRANS_DDI_MODE_SELECT_FDI	(4<<24)
+#define  TRANS_DDI_BPC_MASK		(7<<20)
+#define  TRANS_DDI_BPC_8		(0<<20)
+#define  TRANS_DDI_BPC_10		(1<<20)
+#define  TRANS_DDI_BPC_6		(2<<20)
+#define  TRANS_DDI_BPC_12		(3<<20)
+#define  TRANS_DDI_PVSYNC		(1<<17)
+#define  TRANS_DDI_PHSYNC		(1<<16)
+#define  TRANS_DDI_EDP_INPUT_MASK	(7<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ON	(0<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF	(4<<12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF	(5<<12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF	(6<<12)
+#define  TRANS_DDI_BFI_ENABLE		(1<<4)
+#define  TRANS_DDI_PORT_WIDTH_X1	(0<<1)
+#define  TRANS_DDI_PORT_WIDTH_X2	(1<<1)
+#define  TRANS_DDI_PORT_WIDTH_X4	(3<<1)
+
+/* DisplayPort Transport Control */
+#define DP_TP_CTL_A			0x64040
+#define DP_TP_CTL_B			0x64140
+#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
+#define  DP_TP_CTL_ENABLE			(1<<31)
+#define  DP_TP_CTL_MODE_SST			(0<<27)
+#define  DP_TP_CTL_MODE_MST			(1<<27)
+#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE	(1<<18)
+#define  DP_TP_CTL_FDI_AUTOTRAIN		(1<<15)
+#define  DP_TP_CTL_LINK_TRAIN_MASK		(7<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT1		(0<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT2		(1<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3		(4<<8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE		(2<<8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL		(3<<8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE		(1<<7)
+
+/* DisplayPort Transport Status */
+#define DP_TP_STATUS_A			0x64044
+#define DP_TP_STATUS_B			0x64144
+#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define  DP_TP_STATUS_IDLE_DONE		(1<<25)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE	(1<<12)
+
+/* DDI Buffer Control */
+#define DDI_BUF_CTL_A				0x64000
+#define DDI_BUF_CTL_B				0x64100
+#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
+#define  DDI_BUF_CTL_ENABLE			(1<<31)
+#define  DDI_BUF_EMP_400MV_0DB_HSW		(0<<24)   /* Sel0 */
+#define  DDI_BUF_EMP_400MV_3_5DB_HSW		(1<<24)   /* Sel1 */
+#define  DDI_BUF_EMP_400MV_6DB_HSW		(2<<24)   /* Sel2 */
+#define  DDI_BUF_EMP_400MV_9_5DB_HSW		(3<<24)   /* Sel3 */
+#define  DDI_BUF_EMP_600MV_0DB_HSW		(4<<24)   /* Sel4 */
+#define  DDI_BUF_EMP_600MV_3_5DB_HSW		(5<<24)   /* Sel5 */
+#define  DDI_BUF_EMP_600MV_6DB_HSW		(6<<24)   /* Sel6 */
+#define  DDI_BUF_EMP_800MV_0DB_HSW		(7<<24)   /* Sel7 */
+#define  DDI_BUF_EMP_800MV_3_5DB_HSW		(8<<24)   /* Sel8 */
+#define  DDI_BUF_EMP_MASK			(0xf<<24)
+#define  DDI_BUF_PORT_REVERSAL			(1<<16)
+#define  DDI_BUF_IS_IDLE			(1<<7)
+#define  DDI_A_4_LANES				(1<<4)
+#define  DDI_PORT_WIDTH_X1			(0<<1)
+#define  DDI_PORT_WIDTH_X2			(1<<1)
+#define  DDI_PORT_WIDTH_X4			(3<<1)
+#define  DDI_INIT_DISPLAY_DETECTED		(1<<0)
+
+/* DDI Buffer Translations */
+#define DDI_BUF_TRANS_A				0x64E00
+#define DDI_BUF_TRANS_B				0x64E60
+#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
+
+/* Sideband Interface (SBI) is programmed indirectly, via
+ * SBI_ADDR, which contains the register offset; and SBI_DATA,
+ * which contains the payload */
+#define SBI_ADDR			0xC6000
+#define SBI_DATA			0xC6004
+#define SBI_CTL_STAT			0xC6008
+#define  SBI_CTL_DEST_ICLK		(0x0<<16)
+#define  SBI_CTL_DEST_MPHY		(0x1<<16)
+#define  SBI_CTL_OP_IORD		(0x2<<8)
+#define  SBI_CTL_OP_IOWR		(0x3<<8)
+#define  SBI_CTL_OP_CRRD		(0x6<<8)
+#define  SBI_CTL_OP_CRWR		(0x7<<8)
+#define  SBI_RESPONSE_FAIL		(0x1<<1)
+#define  SBI_RESPONSE_SUCCESS		(0x0<<1)
+#define  SBI_BUSY			(0x1<<0)
+#define  SBI_READY			(0x0<<0)
+
+/* SBI offsets */
+#define  SBI_SSCDIVINTPHASE6			0x0600
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK	((0x7f)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL(x)		((x)<<1)
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK	((0x7f)<<8)
+#define   SBI_SSCDIVINTPHASE_INCVAL(x)		((x)<<8)
+#define   SBI_SSCDIVINTPHASE_DIR(x)		((x)<<15)
+#define   SBI_SSCDIVINTPHASE_PROPAGATE		(1<<0)
+#define  SBI_SSCCTL				0x020c
+#define  SBI_SSCCTL6				0x060C
+#define   SBI_SSCCTL_PATHALT			(1<<3)
+#define   SBI_SSCCTL_DISABLE			(1<<0)
+#define  SBI_SSCAUXDIV6				0x0610
+#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)		((x)<<4)
+#define  SBI_DBUFF0				0x2a00
+#define   SBI_DBUFF0_ENABLE			(1<<0)
+
+/* LPT PIXCLK_GATE */
+#define PIXCLK_GATE			0xC6020
+#define  PIXCLK_GATE_UNGATE		(1<<0)
+#define  PIXCLK_GATE_GATE		(0<<0)
+
+/* SPLL */
+#define SPLL_CTL			0x46020
+#define  SPLL_PLL_ENABLE		(1<<31)
+#define  SPLL_PLL_SSC			(1<<28)
+#define  SPLL_PLL_NON_SSC		(2<<28)
+#define  SPLL_PLL_FREQ_810MHz		(0<<26)
+#define  SPLL_PLL_FREQ_1350MHz		(1<<26)
+
+/* WRPLL */
+#define WRPLL_CTL1			0x46040
+#define WRPLL_CTL2			0x46060
+#define  WRPLL_PLL_ENABLE		(1<<31)
+#define  WRPLL_PLL_SELECT_SSC		(0x01<<28)
+#define  WRPLL_PLL_SELECT_NON_SSC	(0x02<<28)
+#define  WRPLL_PLL_SELECT_LCPLL_2700	(0x03<<28)
+/* WRPLL divider programming */
+#define  WRPLL_DIVIDER_REFERENCE(x)	((x)<<0)
+#define  WRPLL_DIVIDER_POST(x)		((x)<<8)
+#define  WRPLL_DIVIDER_FEEDBACK(x)	((x)<<16)
+
+/* Port clock selection */
+#define PORT_CLK_SEL_A			0x46100
+#define PORT_CLK_SEL_B			0x46104
+#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
+#define  PORT_CLK_SEL_LCPLL_2700	(0<<29)
+#define  PORT_CLK_SEL_LCPLL_1350	(1<<29)
+#define  PORT_CLK_SEL_LCPLL_810		(2<<29)
+#define  PORT_CLK_SEL_SPLL		(3<<29)
+#define  PORT_CLK_SEL_WRPLL1		(4<<29)
+#define  PORT_CLK_SEL_WRPLL2		(5<<29)
+#define  PORT_CLK_SEL_NONE		(7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A			0x46140
+#define TRANS_CLK_SEL_B			0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define  TRANS_CLK_SEL_DISABLED		(0x0<<29)
+#define  TRANS_CLK_SEL_PORT(x)		((x+1)<<29)
+
+#define _TRANSA_MSA_MISC		0x60410
+#define _TRANSB_MSA_MISC		0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+					       _TRANSB_MSA_MISC)
+#define  TRANS_MSA_SYNC_CLK		(1<<0)
+#define  TRANS_MSA_6_BPC		(0<<5)
+#define  TRANS_MSA_8_BPC		(1<<5)
+#define  TRANS_MSA_10_BPC		(2<<5)
+#define  TRANS_MSA_12_BPC		(3<<5)
+#define  TRANS_MSA_16_BPC		(4<<5)
+
+/* LCPLL Control */
+#define LCPLL_CTL			0x130040
+#define  LCPLL_PLL_DISABLE		(1<<31)
+#define  LCPLL_PLL_LOCK			(1<<30)
+#define  LCPLL_CLK_FREQ_MASK		(3<<26)
+#define  LCPLL_CLK_FREQ_450		(0<<26)
+#define  LCPLL_CD_CLOCK_DISABLE		(1<<25)
+#define  LCPLL_CD2X_CLOCK_DISABLE	(1<<23)
+#define  LCPLL_CD_SOURCE_FCLK		(1<<21)
+
+/* Pipe WM_LINETIME - watermark line time */
+#define PIPE_WM_LINETIME_A		0x45270
+#define PIPE_WM_LINETIME_B		0x45274
+#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
+					   PIPE_WM_LINETIME_B)
+#define   PIPE_WM_LINETIME_MASK			(0x1ff)
+#define   PIPE_WM_LINETIME_TIME(x)		((x))
+#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK	(0x1ff<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)	((x)<<16)
+
+/* SFUSE_STRAP */
+#define SFUSE_STRAP			0xc2014
+#define  SFUSE_STRAP_DDIB_DETECTED	(1<<2)
+#define  SFUSE_STRAP_DDIC_DETECTED	(1<<1)
+#define  SFUSE_STRAP_DDID_DETECTED	(1<<0)
+
+#define WM_DBG				0x45280
+#define  WM_DBG_DISALLOW_MULTIPLE_LP	(1<<0)
+#define  WM_DBG_DISALLOW_MAXFIFO	(1<<1)
+#define  WM_DBG_DISALLOW_SPRITE		(1<<2)
+
+/* pipe CSC */
+#define _PIPE_A_CSC_COEFF_RY_GY	0x49010
+#define _PIPE_A_CSC_COEFF_BY	0x49014
+#define _PIPE_A_CSC_COEFF_RU_GU	0x49018
+#define _PIPE_A_CSC_COEFF_BU	0x4901c
+#define _PIPE_A_CSC_COEFF_RV_GV	0x49020
+#define _PIPE_A_CSC_COEFF_BV	0x49024
+#define _PIPE_A_CSC_MODE	0x49028
+#define _PIPE_A_CSC_PREOFF_HI	0x49030
+#define _PIPE_A_CSC_PREOFF_ME	0x49034
+#define _PIPE_A_CSC_PREOFF_LO	0x49038
+#define _PIPE_A_CSC_POSTOFF_HI	0x49040
+#define _PIPE_A_CSC_POSTOFF_ME	0x49044
+#define _PIPE_A_CSC_POSTOFF_LO	0x49048
+
+#define _PIPE_B_CSC_COEFF_RY_GY	0x49110
+#define _PIPE_B_CSC_COEFF_BY	0x49114
+#define _PIPE_B_CSC_COEFF_RU_GU	0x49118
+#define _PIPE_B_CSC_COEFF_BU	0x4911c
+#define _PIPE_B_CSC_COEFF_RV_GV	0x49120
+#define _PIPE_B_CSC_COEFF_BV	0x49124
+#define _PIPE_B_CSC_MODE	0x49128
+#define _PIPE_B_CSC_PREOFF_HI	0x49130
+#define _PIPE_B_CSC_PREOFF_ME	0x49134
+#define _PIPE_B_CSC_PREOFF_LO	0x49138
+#define _PIPE_B_CSC_POSTOFF_HI	0x49140
+#define _PIPE_B_CSC_POSTOFF_ME	0x49144
+#define _PIPE_B_CSC_POSTOFF_LO	0x49148
+
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
+#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
+#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
+#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
+#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
+#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
+#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
+#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
+#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
+#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
+#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
+#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
+#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
+#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+
+#endif /* _I915_REG_H_ */
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_suspend.c b/linux-imx/drivers/gpu/drm/i915/i915_suspend.c
new file mode 100644
index 0000000..369b3d8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_suspend.c
@@ -0,0 +1,424 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	return I915_READ8(data_port);
+}
+
+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	return I915_READ8(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	I915_WRITE8(data_port, val);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA state */
+	dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+	dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+	dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
+	dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
+
+	/* VGA color palette registers */
+	dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
+
+	/* MSR bits */
+	dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+	if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* CRT controller regs */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11,
+			   i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+			   (~0x80));
+	for (i = 0; i <= 0x24; i++)
+		dev_priv->regfile.saveCR[i] =
+			i915_read_indexed(dev, cr_index, cr_data, i);
+	/* Make sure we don't turn off CR group 0 writes */
+	dev_priv->regfile.saveCR[0x11] &= ~0x80;
+
+	/* Attribute controller registers */
+	I915_READ8(st01);
+	dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+	for (i = 0; i <= 0x14; i++)
+		dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
+	I915_READ8(st01);
+
+	/* Graphics controller registers */
+	for (i = 0; i < 9; i++)
+		dev_priv->regfile.saveGR[i] =
+			i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+	dev_priv->regfile.saveGR[0x10] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+	dev_priv->regfile.saveGR[0x11] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+	dev_priv->regfile.saveGR[0x18] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+	/* Sequencer registers */
+	for (i = 0; i < 8; i++)
+		dev_priv->regfile.saveSR[i] =
+			i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA state */
+	I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
+
+	I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+	I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+	I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
+	POSTING_READ(VGA_PD);
+	udelay(150);
+
+	/* MSR bits */
+	I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+	if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* Sequencer registers, don't write SR07 */
+	for (i = 0; i < 7; i++)
+		i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+				   dev_priv->regfile.saveSR[i]);
+
+	/* CRT controller regs */
+	/* Enable CR group 0 writes */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
+	for (i = 0; i <= 0x24; i++)
+		i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
+
+	/* Graphics controller regs */
+	for (i = 0; i < 9; i++)
+		i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+				   dev_priv->regfile.saveGR[i]);
+
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+			   dev_priv->regfile.saveGR[0x10]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+			   dev_priv->regfile.saveGR[0x11]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+			   dev_priv->regfile.saveGR[0x18]);
+
+	/* Attribute controller registers */
+	I915_READ8(st01); /* switch back to index mode */
+	for (i = 0; i <= 0x14; i++)
+		i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
+	I915_READ8(st01); /* switch back to index mode */
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
+	I915_READ8(st01);
+
+	/* VGA color palette registers */
+	I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
+}
+
+static void i915_save_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Display arbitration control */
+	if (INTEL_INFO(dev)->gen <= 4)
+		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
+
+	/* This is only meaningful in non-KMS mode */
+	/* Don't regfile.save them in KMS mode */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_save_display_reg(dev);
+
+	/* LVDS state */
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+		dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+		dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+		if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+			dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
+	} else {
+		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+		dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+		dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+		dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+		if (INTEL_INFO(dev)->gen >= 4)
+			dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+		if (IS_MOBILE(dev) && !IS_I830(dev))
+			dev_priv->regfile.saveLVDS = I915_READ(LVDS);
+	}
+
+	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+	} else {
+		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+		dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+	}
+
+	/* Only regfile.save FBC state on the platform that supports FBC */
+	if (I915_HAS_FBC(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
+			dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+		} else if (IS_GM45(dev)) {
+			dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+		} else {
+			dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+			dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+			dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+			dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+		}
+	}
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_save_vga(dev);
+}
+
+static void i915_restore_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 mask = 0xffffffff;
+
+	/* Display arbitration */
+	if (INTEL_INFO(dev)->gen <= 4)
+		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_restore_display_reg(dev);
+
+	/* LVDS state */
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		mask = ~LVDS_PORT_EN;
+
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
+	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
+
+	if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
+		I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+		/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+		 * otherwise we get blank eDP screen after S3 on some machines
+		 */
+		I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+		I915_WRITE(RSTDBYCTL,
+			   dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
+	} else {
+		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+		I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+		I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+	}
+
+	/* only restore FBC info on the platform that supports FBC*/
+	intel_disable_fbc(dev);
+	if (I915_HAS_FBC(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
+			I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+		} else if (IS_GM45(dev)) {
+			I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
+		} else {
+			I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+			I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+			I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+			I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
+		}
+	}
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		i915_restore_vga(dev);
+	else
+		i915_redisable_vga(dev);
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_save_display(dev);
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Interrupt state */
+		if (HAS_PCH_SPLIT(dev)) {
+			dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+			dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+			dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+			dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+			dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+			dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+			dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+				I915_READ(RSTDBYCTL);
+			dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+		} else {
+			dev_priv->regfile.saveIER = I915_READ(IER);
+			dev_priv->regfile.saveIMR = I915_READ(IMR);
+		}
+	}
+
+	intel_disable_gt_powersave(dev);
+
+	/* Cache mode state */
+	dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	for (i = 0; i < 16; i++) {
+		dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+		dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+	}
+	for (i = 0; i < 3; i++)
+		dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_gem_restore_fences(dev);
+	i915_restore_display(dev);
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Interrupt state */
+		if (HAS_PCH_SPLIT(dev)) {
+			I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+			I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+			I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+			I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+			I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+			I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+			I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+		} else {
+			I915_WRITE(IER, dev_priv->regfile.saveIER);
+			I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+		}
+	}
+
+	/* Cache mode state */
+	I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
+
+	for (i = 0; i < 16; i++) {
+		I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
+	}
+	for (i = 0; i < 3; i++)
+		I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_i2c_reset(dev);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_sysfs.c b/linux-imx/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 0000000..d5e1890
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+#ifdef CONFIG_PM
+static u32 calc_residency(struct drm_device *dev, const u32 reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u64 raw_time; /* 32b value may overflow during fixed point math */
+
+	if (!intel_enable_rc6(dev))
+		return 0;
+
+	raw_time = I915_READ(reg) * 128ULL;
+	return DIV_ROUND_UP_ULL(raw_time, 100000);
+}
+
+static ssize_t
+show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+}
+
+static ssize_t
+show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
+}
+
+static ssize_t
+show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
+}
+
+static ssize_t
+show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
+}
+
+static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
+static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
+static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
+static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
+
+static struct attribute *rc6_attrs[] = {
+	&dev_attr_rc6_enable.attr,
+	&dev_attr_rc6_residency_ms.attr,
+	&dev_attr_rc6p_residency_ms.attr,
+	&dev_attr_rc6pp_residency_ms.attr,
+	NULL
+};
+
+static struct attribute_group rc6_attr_group = {
+	.name = power_group_name,
+	.attrs =  rc6_attrs
+};
+#endif
+
+static int l3_access_valid(struct drm_device *dev, loff_t offset)
+{
+	if (!HAS_L3_GPU_CACHE(dev))
+		return -EPERM;
+
+	if (offset % 4 != 0)
+		return -EINVAL;
+
+	if (offset >= GEN7_L3LOG_SIZE)
+		return -ENXIO;
+
+	return 0;
+}
+
+static ssize_t
+i915_l3_read(struct file *filp, struct kobject *kobj,
+	     struct bin_attribute *attr, char *buf,
+	     loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_device *drm_dev = dminor->dev;
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	uint32_t misccpctl;
+	int i, ret;
+
+	ret = l3_access_valid(drm_dev, offset);
+	if (ret)
+		return ret;
+
+	ret = i915_mutex_lock_interruptible(drm_dev);
+	if (ret)
+		return ret;
+
+	misccpctl = I915_READ(GEN7_MISCCPCTL);
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+	for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
+		*((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
+
+	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+	mutex_unlock(&drm_dev->struct_mutex);
+
+	return i - offset;
+}
+
+static ssize_t
+i915_l3_write(struct file *filp, struct kobject *kobj,
+	      struct bin_attribute *attr, char *buf,
+	      loff_t offset, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+	struct drm_device *drm_dev = dminor->dev;
+	struct drm_i915_private *dev_priv = drm_dev->dev_private;
+	u32 *temp = NULL; /* Just here to make handling failures easy */
+	int ret;
+
+	ret = l3_access_valid(drm_dev, offset);
+	if (ret)
+		return ret;
+
+	ret = i915_mutex_lock_interruptible(drm_dev);
+	if (ret)
+		return ret;
+
+	if (!dev_priv->l3_parity.remap_info) {
+		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+		if (!temp) {
+			mutex_unlock(&drm_dev->struct_mutex);
+			return -ENOMEM;
+		}
+	}
+
+	ret = i915_gpu_idle(drm_dev);
+	if (ret) {
+		kfree(temp);
+		mutex_unlock(&drm_dev->struct_mutex);
+		return ret;
+	}
+
+	/* TODO: Ideally we really want a GPU reset here to make sure errors
+	 * aren't propagated. Since I cannot find a stable way to reset the GPU
+	 * at this point it is left as a TODO.
+	*/
+	if (temp)
+		dev_priv->l3_parity.remap_info = temp;
+
+	memcpy(dev_priv->l3_parity.remap_info + (offset/4),
+	       buf + (offset/4),
+	       count);
+
+	i915_gem_l3_remap(drm_dev);
+
+	mutex_unlock(&drm_dev->struct_mutex);
+
+	return count;
+}
+
+static struct bin_attribute dpf_attrs = {
+	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
+	.size = GEN7_L3LOG_SIZE,
+	.read = i915_l3_read,
+	.write = i915_l3_write,
+	.mmap = NULL
+};
+
+static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_max_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = dev_priv->rps.hw_max;
+	non_oc_max = (rp_state_cap & 0xff);
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	if (val > non_oc_max)
+		DRM_DEBUG("User requested overclocking to %d\n",
+			  val * GT_FREQUENCY_MULTIPLIER);
+
+	if (dev_priv->rps.cur_delay > val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.max_delay = val;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return count;
+}
+
+static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t gt_min_freq_mhz_store(struct device *kdev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap, hw_max, hw_min;
+	ssize_t ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	val /= GT_FREQUENCY_MULTIPLIER;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	hw_max = dev_priv->rps.hw_max;
+	hw_min = ((rp_state_cap & 0xff0000) >> 16);
+
+	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
+		mutex_unlock(&dev_priv->rps.hw_lock);
+		return -EINVAL;
+	}
+
+	if (dev_priv->rps.cur_delay < val)
+		gen6_set_rps(dev_priv->dev, val);
+
+	dev_priv->rps.min_delay = val;
+
+	mutex_unlock(&dev_priv->rps.hw_lock);
+
+	return count;
+
+}
+
+static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
+static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
+static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
+
+
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
+static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
+
+/* For now we have a static number of RP states */
+static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+	struct drm_device *dev = minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val, rp_state_cap;
+	ssize_t ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (attr == &dev_attr_gt_RP0_freq_mhz) {
+		val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
+		val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+	} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
+		val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+	} else {
+		BUG();
+	}
+	return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static const struct attribute *gen6_attrs[] = {
+	&dev_attr_gt_cur_freq_mhz.attr,
+	&dev_attr_gt_max_freq_mhz.attr,
+	&dev_attr_gt_min_freq_mhz.attr,
+	&dev_attr_gt_RP0_freq_mhz.attr,
+	&dev_attr_gt_RP1_freq_mhz.attr,
+	&dev_attr_gt_RPn_freq_mhz.attr,
+	NULL,
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+	int ret;
+
+#ifdef CONFIG_PM
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+					&rc6_attr_group);
+		if (ret)
+			DRM_ERROR("RC6 residency sysfs setup failed\n");
+	}
+#endif
+	if (HAS_L3_GPU_CACHE(dev)) {
+		ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+		if (ret)
+			DRM_ERROR("l3 parity sysfs setup failed\n");
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+		if (ret)
+			DRM_ERROR("gen6 sysfs setup failed\n");
+	}
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+	sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
+	device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+#ifdef CONFIG_PM
+	sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_trace.h b/linux-imx/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 0000000..3db4a68
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,455 @@
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_ringbuffer.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, size)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->size = obj->base.size;
+			   ),
+
+	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_bind,
+	    TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
+	    TP_ARGS(obj, mappable),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, size)
+			     __field(bool, mappable)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = obj->gtt_space->start;
+			   __entry->size = obj->gtt_space->size;
+			   __entry->mappable = mappable;
+			   ),
+
+	    TP_printk("obj=%p, offset=%08x size=%x%s",
+		      __entry->obj, __entry->offset, __entry->size,
+		      __entry->mappable ? ", mappable" : "")
+);
+
+TRACE_EVENT(i915_gem_object_unbind,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, size)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = obj->gtt_space->start;
+			   __entry->size = obj->gtt_space->size;
+			   ),
+
+	    TP_printk("obj=%p, offset=%08x size=%x",
+		      __entry->obj, __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+	    TP_ARGS(obj, old_read, old_write),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, read_domains)
+			     __field(u32, write_domain)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->read_domains = obj->base.read_domains | (old_read << 16);
+			   __entry->write_domain = obj->base.write_domain | (old_write << 16);
+			   ),
+
+	    TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
+		      __entry->obj,
+		      __entry->read_domains >> 16,
+		      __entry->read_domains & 0xffff,
+		      __entry->write_domain >> 16,
+		      __entry->write_domain & 0xffff)
+);
+
+TRACE_EVENT(i915_gem_object_pwrite,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
+			   ),
+
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+	    TP_ARGS(obj, offset, len),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, offset)
+			     __field(u32, len)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->offset = offset;
+			   __entry->len = len;
+			   ),
+
+	    TP_printk("obj=%p, offset=%u, len=%u",
+		      __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_fault,
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+	    TP_ARGS(obj, index, gtt, write),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     __field(u32, index)
+			     __field(bool, gtt)
+			     __field(bool, write)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   __entry->index = index;
+			   __entry->gtt = gtt;
+			   __entry->write = write;
+			   ),
+
+	    TP_printk("obj=%p, %s index=%u %s",
+		      __entry->obj,
+		      __entry->gtt ? "GTT" : "CPU",
+		      __entry->index,
+		      __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj),
+
+	    TP_STRUCT__entry(
+			     __field(struct drm_i915_gem_object *, obj)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->obj = obj;
+			   ),
+
+	    TP_printk("obj=%p", __entry->obj)
+);
+
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+	     TP_PROTO(struct drm_i915_gem_object *obj),
+	     TP_ARGS(obj)
+);
+
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+	    TP_PROTO(struct drm_i915_gem_object *obj),
+	    TP_ARGS(obj)
+);
+
+TRACE_EVENT(i915_gem_evict,
+	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
+	    TP_ARGS(dev, size, align, mappable),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, size)
+			     __field(u32, align)
+			     __field(bool, mappable)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->size = size;
+			   __entry->align = align;
+			   __entry->mappable = mappable;
+			  ),
+
+	    TP_printk("dev=%d, size=%d, align=%d %s",
+		      __entry->dev, __entry->size, __entry->align,
+		      __entry->mappable ? ", mappable" : "")
+);
+
+TRACE_EVENT(i915_gem_evict_everything,
+	    TP_PROTO(struct drm_device *dev),
+	    TP_ARGS(dev),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			    ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			  ),
+
+	    TP_printk("dev=%d", __entry->dev)
+);
+
+TRACE_EVENT(i915_gem_ring_dispatch,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+	    TP_ARGS(ring, seqno, flags),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     __field(u32, flags)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = seqno;
+			   __entry->flags = flags;
+			   i915_trace_irq_get(ring, seqno);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+		      __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
+);
+
+TRACE_EVENT(i915_gem_ring_flush,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+	    TP_ARGS(ring, invalidate, flush),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, invalidate)
+			     __field(u32, flush)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->invalidate = invalidate;
+			   __entry->flush = flush;
+			   ),
+
+	    TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+		      __entry->dev, __entry->ring,
+		      __entry->invalidate, __entry->flush)
+);
+
+DECLARE_EVENT_CLASS(i915_gem_request,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u",
+		      __entry->dev, __entry->ring, __entry->seqno)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     __field(u32, seqno)
+			     __field(bool, blocking)
+			     ),
+
+	    /* NB: the blocking information is racy since mutex_is_locked
+	     * doesn't check that the current thread holds the lock. The only
+	     * other option would be to pass the boolean information of whether
+	     * or not the class was blocking down through the stack which is
+	     * less desirable.
+	     */
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   __entry->seqno = seqno;
+			   __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+			   ),
+
+	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+		      __entry->dev, __entry->ring, __entry->seqno,
+		      __entry->blocking ?  "yes (NB)" : "no")
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
+	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+	    TP_ARGS(ring, seqno)
+);
+
+DECLARE_EVENT_CLASS(i915_ring,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, ring)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = ring->dev->primary->index;
+			   __entry->ring = ring->id;
+			   ),
+
+	    TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
+);
+
+DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring)
+);
+
+DEFINE_EVENT(i915_ring, i915_ring_wait_end,
+	    TP_PROTO(struct intel_ring_buffer *ring),
+	    TP_ARGS(ring)
+);
+
+TRACE_EVENT(i915_flip_request,
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+	    TP_ARGS(plane, obj),
+
+	    TP_STRUCT__entry(
+		    __field(int, plane)
+		    __field(struct drm_i915_gem_object *, obj)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->plane = plane;
+		    __entry->obj = obj;
+		    ),
+
+	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_flip_complete,
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
+
+	    TP_ARGS(plane, obj),
+
+	    TP_STRUCT__entry(
+		    __field(int, plane)
+		    __field(struct drm_i915_gem_object *, obj)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->plane = plane;
+		    __entry->obj = obj;
+		    ),
+
+	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_reg_rw,
+	TP_PROTO(bool write, u32 reg, u64 val, int len),
+
+	TP_ARGS(write, reg, val, len),
+
+	TP_STRUCT__entry(
+		__field(u64, val)
+		__field(u32, reg)
+		__field(u16, write)
+		__field(u16, len)
+		),
+
+	TP_fast_assign(
+		__entry->val = (u64)val;
+		__entry->reg = reg;
+		__entry->write = write;
+		__entry->len = len;
+		),
+
+	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+		__entry->write ? "write" : "read",
+		__entry->reg, __entry->len,
+		(u32)(__entry->val & 0xffffffff),
+		(u32)(__entry->val >> 32))
+);
+
+TRACE_EVENT(intel_gpu_freq_change,
+	    TP_PROTO(u32 freq),
+	    TP_ARGS(freq),
+
+	    TP_STRUCT__entry(
+			     __field(u32, freq)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->freq = freq;
+			   ),
+
+	    TP_printk("new_freq=%u", __entry->freq)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_trace_points.c b/linux-imx/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 0000000..f1df2bd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Authors:
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "i915_drv.h"
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "i915_trace.h"
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/i915_ums.c b/linux-imx/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 0000000..985a097
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,503 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ * Copyright 2013 (c) Intel Corporation
+ *   Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32	dpll_reg;
+
+	/* On IVB, 3rd pipe shares PLL with another one */
+	if (pipe > 1)
+		return false;
+
+	if (HAS_PCH_SPLIT(dev))
+		dpll_reg = _PCH_DPLL(pipe);
+	else
+		dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+
+	return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (HAS_PCH_SPLIT(dev))
+		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->regfile.save_palette_a;
+	else
+		array = dev_priv->regfile.save_palette_b;
+
+	for (i = 0; i < 256; i++)
+		array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (HAS_PCH_SPLIT(dev))
+		reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->regfile.save_palette_a;
+	else
+		array = dev_priv->regfile.save_palette_b;
+
+	for (i = 0; i < 256; i++)
+		I915_WRITE(reg + (i << 2), array[i]);
+}
+
+void i915_save_display_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	/* Cursor state */
+	dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+	dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+	dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+	dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+	dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+	dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
+	if (IS_GEN2(dev))
+		dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+		dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+	}
+
+	/* Pipe & plane A info */
+	dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+	dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+		dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+		dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
+	} else {
+		dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+		dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+		dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
+	}
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+	dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+	dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+	dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+	dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+	dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+	dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
+	if (!HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+		dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+		dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+		dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+		dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+		dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+		dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+		dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+		dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+		dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+		dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+		dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+		dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+		dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+		dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+	}
+
+	dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+	dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+	dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+	dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+	dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+		dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+	}
+	i915_save_palette(dev, PIPE_A);
+	dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
+
+	/* Pipe & plane B info */
+	dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+	dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+		dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+		dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
+	} else {
+		dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+		dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+		dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
+	}
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+	dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+	dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+	dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+	dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+	dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+	dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
+	if (!HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+		dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+		dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+		dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+		dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+		dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+		dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+		dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+		dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+		dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+		dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+		dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+		dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+		dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+		dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+		dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+	}
+
+	dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+	dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+	dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+	dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+	dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+		dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+	}
+	i915_save_palette(dev, PIPE_B);
+	dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		for (i = 0; i < 16; i++)
+			dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
+	}
+
+	/* CRT state */
+	if (HAS_PCH_SPLIT(dev))
+		dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+	else
+		dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
+	/* Display Port state */
+	if (SUPPORTS_INTEGRATED_DP(dev)) {
+		dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+		dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+		dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+		dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+		dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+		dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+		dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+		dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+		dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+		dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+		dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+	}
+	/* FIXME: regfile.save TV & SDVO state */
+
+	return;
+}
+
+void i915_restore_display_reg(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int dpll_a_reg, fpa0_reg, fpa1_reg;
+	int dpll_b_reg, fpb0_reg, fpb1_reg;
+	int i;
+
+	/* Display port ratios (must be done before clock is set) */
+	if (SUPPORTS_INTEGRATED_DP(dev)) {
+		I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+		I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+		I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+		I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+		I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+		I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+		I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+		I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+	}
+
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 7:
+	case 6:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
+		break;
+	case 3:
+	case 2:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
+		for (i = 0; i < 8; i++)
+			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
+		break;
+	}
+
+
+	if (HAS_PCH_SPLIT(dev)) {
+		dpll_a_reg = _PCH_DPLL_A;
+		dpll_b_reg = _PCH_DPLL_B;
+		fpa0_reg = _PCH_FPA0;
+		fpb0_reg = _PCH_FPB0;
+		fpa1_reg = _PCH_FPA1;
+		fpb1_reg = _PCH_FPB1;
+	} else {
+		dpll_a_reg = _DPLL_A;
+		dpll_b_reg = _DPLL_B;
+		fpa0_reg = _FPA0;
+		fpb0_reg = _FPB0;
+		fpa1_reg = _FPA1;
+		fpb1_reg = _FPB1;
+	}
+
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+		I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
+	}
+
+	/* Pipe & plane A info */
+	/* Prime the clock */
+	if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
+			   ~DPLL_VCO_ENABLE);
+		POSTING_READ(dpll_a_reg);
+		udelay(150);
+	}
+	I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+	I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
+	/* Actually enable it */
+	I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
+	POSTING_READ(dpll_a_reg);
+	udelay(150);
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
+		POSTING_READ(_DPLL_A_MD);
+	}
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+	I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+	I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+	I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+	I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+	I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
+	if (!HAS_PCH_SPLIT(dev))
+		I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+		I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+		I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+		I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
+
+		I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+		I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
+
+		I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+		I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+		I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
+
+		I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+		I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+		I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+		I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+		I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+		I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+		I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
+	}
+
+	/* Restore plane info */
+	I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+	I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+	I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+	I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+	I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+		I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
+	}
+
+	I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
+
+	i915_restore_palette(dev, PIPE_A);
+	/* Enable the plane */
+	I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
+	I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
+
+	/* Pipe & plane B info */
+	if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
+			   ~DPLL_VCO_ENABLE);
+		POSTING_READ(dpll_b_reg);
+		udelay(150);
+	}
+	I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+	I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
+	/* Actually enable it */
+	I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
+	POSTING_READ(dpll_b_reg);
+	udelay(150);
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
+		POSTING_READ(_DPLL_B_MD);
+	}
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+	I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+	I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+	I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+	I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+	I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
+	if (!HAS_PCH_SPLIT(dev))
+		I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+		I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+		I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+		I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
+
+		I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+		I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
+
+		I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+		I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+		I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
+
+		I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+		I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+		I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+		I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+		I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+		I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+		I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
+	}
+
+	/* Restore plane info */
+	I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+	I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+	I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+	I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+	I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+		I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
+	}
+
+	I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
+
+	i915_restore_palette(dev, PIPE_B);
+	/* Enable the plane */
+	I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
+	I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
+
+	/* Cursor state */
+	I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+	I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+	I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+	I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+	I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+	I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
+	if (IS_GEN2(dev))
+		I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+	/* CRT state */
+	if (HAS_PCH_SPLIT(dev))
+		I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+	else
+		I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
+
+	/* Display Port state */
+	if (SUPPORTS_INTEGRATED_DP(dev)) {
+		I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+		I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+		I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+	}
+	/* FIXME: restore TV & SDVO state */
+
+	return;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_acpi.c b/linux-imx/drivers/gpu/drm/i915/intel_acpi.c
new file mode 100644
index 0000000..bcbbaea
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_acpi.c
@@ -0,0 +1,251 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi_drivers.h>
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+	acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+	0xd3, 0x73, 0xd8, 0x7e,
+	0xd0, 0xc2,
+	0x4f, 0x4e,
+	0xa8, 0x54,
+	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(acpi_handle handle, int func, int arg)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *obj;
+	u32 result;
+	int ret = 0;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(intel_dsm_guid);
+	params[0].buffer.pointer = (char *)intel_dsm_guid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = INTEL_DSM_REVISION_ID;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = func;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = arg;
+
+	ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+		return ret;
+	}
+
+	obj = (union acpi_object *)output.pointer;
+
+	result = 0;
+	switch (obj->type) {
+	case ACPI_TYPE_INTEGER:
+		result = obj->integer.value;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		if (obj->buffer.length == 4) {
+			result = (obj->buffer.pointer[0] |
+				(obj->buffer.pointer[1] <<  8) |
+				(obj->buffer.pointer[2] << 16) |
+				(obj->buffer.pointer[3] << 24));
+			break;
+		}
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (result == 0x80000002)
+		ret = -ENODEV;
+
+	kfree(output.pointer);
+	return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+	switch (id) {
+	case 0:
+		return "Reserved";
+	case 1:
+		return "Analog VGA";
+	case 2:
+		return "LVDS";
+	case 3:
+		return "Reserved";
+	case 4:
+		return "HDMI/DVI_B";
+	case 5:
+		return "HDMI/DVI_C";
+	case 6:
+		return "HDMI/DVI_D";
+	case 7:
+		return "DisplayPort_A";
+	case 8:
+		return "DisplayPort_B";
+	case 9:
+		return "DisplayPort_C";
+	case 0xa:
+		return "DisplayPort_D";
+	case 0xb:
+	case 0xc:
+	case 0xd:
+		return "Reserved";
+	case 0xe:
+		return "WiDi";
+	default:
+		return "bad type";
+	}
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+	switch (type) {
+	case 0:
+		return "unknown";
+	case 1:
+		return "No MUX, iGPU only";
+	case 2:
+		return "No MUX, dGPU only";
+	case 3:
+		return "MUXed between iGPU and dGPU";
+	default:
+		return "bad type";
+	}
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *pkg;
+	int i, ret;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(intel_dsm_guid);
+	params[0].buffer.pointer = (char *)intel_dsm_guid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = INTEL_DSM_REVISION_ID;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = 0;
+
+	ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
+				   &output);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+		goto out;
+	}
+
+	pkg = (union acpi_object *)output.pointer;
+
+	if (pkg->type == ACPI_TYPE_PACKAGE) {
+		union acpi_object *connector_count = &pkg->package.elements[0];
+		DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+			  (unsigned long long)connector_count->integer.value);
+		for (i = 1; i < pkg->package.count; i++) {
+			union acpi_object *obj = &pkg->package.elements[i];
+			union acpi_object *connector_id =
+				&obj->package.elements[0];
+			union acpi_object *info = &obj->package.elements[1];
+			DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+				  (unsigned long long)connector_id->integer.value);
+			DRM_DEBUG_DRIVER("  port id: %s\n",
+			       intel_dsm_port_name(info->buffer.pointer[0]));
+			DRM_DEBUG_DRIVER("  display mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[1]));
+			DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[2]));
+			DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[3]));
+		}
+	}
+
+out:
+	kfree(output.pointer);
+}
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, intel_handle;
+	acpi_status status;
+	int ret;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
+	if (ACPI_FAILURE(status)) {
+		DRM_DEBUG_KMS("no _DSM method for intel device\n");
+		return false;
+	}
+
+	ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
+		return false;
+	}
+
+	intel_dsm_priv.dhandle = dhandle;
+
+	intel_dsm_platform_mux_info();
+	return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_dsm = false;
+	int vga_count = 0;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+		has_dsm |= intel_dsm_pci_probe(pdev);
+	}
+
+	if (vga_count == 2 && has_dsm) {
+		acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+		DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+				 acpi_method_name);
+		return true;
+	}
+
+	return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+	if (!intel_dsm_detect())
+		return;
+}
+
+void intel_unregister_dsm_handler(void)
+{
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_bios.c b/linux-imx/drivers/gpu/drm/i915/intel_bios.c
new file mode 100644
index 0000000..95070b2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_bios.c
@@ -0,0 +1,771 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+#include <linux/dmi.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_bios.h"
+
+#define	SLAVE_ADDR1	0x70
+#define	SLAVE_ADDR2	0x72
+
+static int panel_type;
+
+static void *
+find_section(struct bdb_header *bdb, int section_id)
+{
+	u8 *base = (u8 *)bdb;
+	int index = 0;
+	u16 total, current_size;
+	u8 current_id;
+
+	/* skip to first section */
+	index += bdb->header_size;
+	total = bdb->bdb_size;
+
+	/* walk the sections looking for section_id */
+	while (index < total) {
+		current_id = *(base + index);
+		index++;
+		current_size = *((u16 *)(base + index));
+		index += 2;
+		if (current_id == section_id)
+			return base + index;
+		index += current_size;
+	}
+
+	return NULL;
+}
+
+static u16
+get_blocksize(void *p)
+{
+	u16 *block_ptr, block_size;
+
+	block_ptr = (u16 *)((char *)p - 2);
+	block_size = *block_ptr;
+	return block_size;
+}
+
+static void
+fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+			const struct lvds_dvo_timing *dvo_timing)
+{
+	panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+		dvo_timing->hactive_lo;
+	panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+		((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+	panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+		dvo_timing->hsync_pulse_width;
+	panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+		((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+
+	panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+		dvo_timing->vactive_lo;
+	panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+		dvo_timing->vsync_off;
+	panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+		dvo_timing->vsync_pulse_width;
+	panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+		((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+	panel_fixed_mode->clock = dvo_timing->clock * 10;
+	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+
+	if (dvo_timing->hsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (dvo_timing->vsync_positive)
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+	/* Some VBTs have bogus h/vtotal values */
+	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+	if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+		panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+
+	drm_mode_set_name(panel_fixed_mode);
+}
+
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+			   const struct lvds_dvo_timing *b)
+{
+	if (a->hactive_hi != b->hactive_hi ||
+	    a->hactive_lo != b->hactive_lo)
+		return false;
+
+	if (a->hsync_off_hi != b->hsync_off_hi ||
+	    a->hsync_off_lo != b->hsync_off_lo)
+		return false;
+
+	if (a->hsync_pulse_width != b->hsync_pulse_width)
+		return false;
+
+	if (a->hblank_hi != b->hblank_hi ||
+	    a->hblank_lo != b->hblank_lo)
+		return false;
+
+	if (a->vactive_hi != b->vactive_hi ||
+	    a->vactive_lo != b->vactive_lo)
+		return false;
+
+	if (a->vsync_off != b->vsync_off)
+		return false;
+
+	if (a->vsync_pulse_width != b->vsync_pulse_width)
+		return false;
+
+	if (a->vblank_hi != b->vblank_hi ||
+	    a->vblank_lo != b->vblank_lo)
+		return false;
+
+	return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+		    const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+		    int index)
+{
+	/*
+	 * the size of fp_timing varies on the different platform.
+	 * So calculate the DVO timing relative offset in LVDS data
+	 * entry to get the DVO timing entry
+	 */
+
+	int lfp_data_size =
+		lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+		lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+	int dvo_timing_offset =
+		lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+		lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+	char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+	return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
+/* get lvds_fp_timing entry
+ * this function may return NULL if the corresponding entry is invalid
+ */
+static const struct lvds_fp_timing *
+get_lvds_fp_timing(const struct bdb_header *bdb,
+		   const struct bdb_lvds_lfp_data *data,
+		   const struct bdb_lvds_lfp_data_ptrs *ptrs,
+		   int index)
+{
+	size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
+	u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
+	size_t ofs;
+
+	if (index >= ARRAY_SIZE(ptrs->ptr))
+		return NULL;
+	ofs = ptrs->ptr[index].fp_timing_offset;
+	if (ofs < data_ofs ||
+	    ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
+		return NULL;
+	return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
+}
+
+/* Try to find integrated panel data */
+static void
+parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+			    struct bdb_header *bdb)
+{
+	const struct bdb_lvds_options *lvds_options;
+	const struct bdb_lvds_lfp_data *lvds_lfp_data;
+	const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+	const struct lvds_dvo_timing *panel_dvo_timing;
+	const struct lvds_fp_timing *fp_timing;
+	struct drm_display_mode *panel_fixed_mode;
+	int i, downclock;
+
+	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+	if (!lvds_options)
+		return;
+
+	dev_priv->lvds_dither = lvds_options->pixel_dither;
+	if (lvds_options->panel_type == 0xff)
+		return;
+
+	panel_type = lvds_options->panel_type;
+
+	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+	if (!lvds_lfp_data)
+		return;
+
+	lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+	if (!lvds_lfp_data_ptrs)
+		return;
+
+	dev_priv->lvds_vbt = 1;
+
+	panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+					       lvds_lfp_data_ptrs,
+					       lvds_options->panel_type);
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
+
+	fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
+
+	dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+
+	/*
+	 * Iterate over the LVDS panel timing info to find the lowest clock
+	 * for the native resolution.
+	 */
+	downclock = panel_dvo_timing->clock;
+	for (i = 0; i < 16; i++) {
+		const struct lvds_dvo_timing *dvo_timing;
+
+		dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+						 lvds_lfp_data_ptrs,
+						 i);
+		if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+		    dvo_timing->clock < downclock)
+			downclock = dvo_timing->clock;
+	}
+
+	if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = downclock * 10;
+		DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
+			      "Normal Clock %dKHz, downclock %dKHz\n",
+			      panel_fixed_mode->clock, 10*downclock);
+	}
+
+	fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
+				       lvds_lfp_data_ptrs,
+				       lvds_options->panel_type);
+	if (fp_timing) {
+		/* check the resolution, just to be sure */
+		if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
+		    fp_timing->y_res == panel_fixed_mode->vdisplay) {
+			dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
+			DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
+				      dev_priv->bios_lvds_val);
+		}
+	}
+}
+
+/* Try to find sdvo panel data */
+static void
+parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
+		      struct bdb_header *bdb)
+{
+	struct lvds_dvo_timing *dvo_timing;
+	struct drm_display_mode *panel_fixed_mode;
+	int index;
+
+	index = i915_vbt_sdvo_panel_type;
+	if (index == -2) {
+		DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+		return;
+	}
+
+	if (index == -1) {
+		struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+		sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+		if (!sdvo_lvds_options)
+			return;
+
+		index = sdvo_lvds_options->panel_type;
+	}
+
+	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+	if (!dvo_timing)
+		return;
+
+	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
+
+	fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+
+	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+
+	DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+	drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+				    bool alternate)
+{
+	switch (INTEL_INFO(dev)->gen) {
+	case 2:
+		return alternate ? 66 : 48;
+	case 3:
+	case 4:
+		return alternate ? 100 : 96;
+	default:
+		return alternate ? 100 : 120;
+	}
+}
+
+static void
+parse_general_features(struct drm_i915_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct bdb_general_features *general;
+
+	general = find_section(bdb, BDB_GENERAL_FEATURES);
+	if (general) {
+		dev_priv->int_tv_support = general->int_tv_support;
+		dev_priv->int_crt_support = general->int_crt_support;
+		dev_priv->lvds_use_ssc = general->enable_ssc;
+		dev_priv->lvds_ssc_freq =
+			intel_bios_ssc_frequency(dev, general->ssc_freq);
+		dev_priv->display_clock_mode = general->display_clock_mode;
+		dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+		DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+			      dev_priv->int_tv_support,
+			      dev_priv->int_crt_support,
+			      dev_priv->lvds_use_ssc,
+			      dev_priv->lvds_ssc_freq,
+			      dev_priv->display_clock_mode,
+			      dev_priv->fdi_rx_polarity_inverted);
+	}
+}
+
+static void
+parse_general_definitions(struct drm_i915_private *dev_priv,
+			  struct bdb_header *bdb)
+{
+	struct bdb_general_definitions *general;
+
+	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (general) {
+		u16 block_size = get_blocksize(general);
+		if (block_size >= sizeof(*general)) {
+			int bus_pin = general->crt_ddc_gmbus_pin;
+			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+			if (intel_gmbus_is_port_valid(bus_pin))
+				dev_priv->crt_ddc_pin = bus_pin;
+		} else {
+			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+				      block_size);
+		}
+	}
+}
+
+static void
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+			  struct bdb_header *bdb)
+{
+	struct sdvo_device_mapping *p_mapping;
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		if (p_child->slave_addr != SLAVE_ADDR1 &&
+			p_child->slave_addr != SLAVE_ADDR2) {
+			/*
+			 * If the slave address is neither 0x70 nor 0x72,
+			 * it is not a SDVO device. Skip it.
+			 */
+			continue;
+		}
+		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+			p_child->dvo_port != DEVICE_PORT_DVOC) {
+			/* skip the incorrect SDVO port */
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+			continue;
+		}
+		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+				" %s port\n",
+				p_child->slave_addr,
+				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
+					"SDVOB" : "SDVOC");
+		p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+		if (!p_mapping->initialized) {
+			p_mapping->dvo_port = p_child->dvo_port;
+			p_mapping->slave_addr = p_child->slave_addr;
+			p_mapping->dvo_wiring = p_child->dvo_wiring;
+			p_mapping->ddc_pin = p_child->ddc_pin;
+			p_mapping->i2c_pin = p_child->i2c_pin;
+			p_mapping->initialized = 1;
+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+				      p_mapping->dvo_port,
+				      p_mapping->slave_addr,
+				      p_mapping->dvo_wiring,
+				      p_mapping->ddc_pin,
+				      p_mapping->i2c_pin);
+		} else {
+			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+					 "two SDVO device.\n");
+		}
+		if (p_child->slave2_addr) {
+			/* Maybe this is a SDVO device with multiple inputs */
+			/* And the mapping info is not added */
+			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+				" is a SDVO device with multiple inputs.\n");
+		}
+		count++;
+	}
+
+	if (!count) {
+		/* No SDVO device info is found */
+		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+	}
+	return;
+}
+
+static void
+parse_driver_features(struct drm_i915_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct bdb_driver_features *driver;
+
+	driver = find_section(bdb, BDB_DRIVER_FEATURES);
+	if (!driver)
+		return;
+
+	if (SUPPORTS_EDP(dev) &&
+	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
+
+	if (driver->dual_frequency)
+		dev_priv->render_reclock_avail = true;
+}
+
+static void
+parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
+
+	edp = find_section(bdb, BDB_EDP);
+	if (!edp) {
+		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
+			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+		return;
+	}
+
+	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
+	case EDP_18BPP:
+		dev_priv->edp.bpp = 18;
+		break;
+	case EDP_24BPP:
+		dev_priv->edp.bpp = 24;
+		break;
+	case EDP_30BPP:
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
+		break;
+	}
+}
+
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child, *child_dev_ptr;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	/* get the number of child device that is present */
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		count++;
+	}
+	if (!count) {
+		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+		return;
+	}
+	dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+	if (!dev_priv->child_dev) {
+		DRM_DEBUG_KMS("No memory space for child device\n");
+		return;
+	}
+
+	dev_priv->child_dev_num = count;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		child_dev_ptr = dev_priv->child_dev + count;
+		count++;
+		memcpy((void *)child_dev_ptr, (void *)p_child,
+					sizeof(*p_child));
+	}
+	return;
+}
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+	/* LFP panel data */
+	dev_priv->lvds_dither = 1;
+	dev_priv->lvds_vbt = 0;
+
+	/* SDVO panel data */
+	dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+	/* general features */
+	dev_priv->int_tv_support = 1;
+	dev_priv->int_crt_support = 1;
+
+	/* Default to using SSC */
+	dev_priv->lvds_use_ssc = 1;
+	dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+}
+
+static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+{
+	DRM_DEBUG_KMS("Falling back to manually reading VBT from "
+		      "VBIOS ROM for %s\n",
+		      id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_no_opregion_vbt[] = {
+	{
+		.callback = intel_no_opregion_vbt_callback,
+		.ident = "ThinkCentre A57",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
+		},
+	},
+	{ }
+};
+
+/**
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+ *
+ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+ * to appropriate values.
+ *
+ * Returns 0 on success, nonzero on failure.
+ */
+int
+intel_parse_bios(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	struct bdb_header *bdb = NULL;
+	u8 __iomem *bios = NULL;
+
+	if (HAS_PCH_NOP(dev))
+		return -ENODEV;
+
+	init_vbt_defaults(dev_priv);
+
+	/* XXX Should this validation be moved to intel_opregion.c? */
+	if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
+		struct vbt_header *vbt = dev_priv->opregion.vbt;
+		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+			DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+					 vbt->signature);
+			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+		} else
+			dev_priv->opregion.vbt = NULL;
+	}
+
+	if (bdb == NULL) {
+		struct vbt_header *vbt = NULL;
+		size_t size;
+		int i;
+
+		bios = pci_map_rom(pdev, &size);
+		if (!bios)
+			return -1;
+
+		/* Scour memory looking for the VBT signature */
+		for (i = 0; i + 4 < size; i++) {
+			if (!memcmp(bios + i, "$VBT", 4)) {
+				vbt = (struct vbt_header *)(bios + i);
+				break;
+			}
+		}
+
+		if (!vbt) {
+			DRM_DEBUG_DRIVER("VBT signature missing\n");
+			pci_unmap_rom(pdev, bios);
+			return -1;
+		}
+
+		bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+	}
+
+	/* Grab useful general definitions */
+	parse_general_features(dev_priv, bdb);
+	parse_general_definitions(dev_priv, bdb);
+	parse_lfp_panel_data(dev_priv, bdb);
+	parse_sdvo_panel_data(dev_priv, bdb);
+	parse_sdvo_device_mapping(dev_priv, bdb);
+	parse_device_mapping(dev_priv, bdb);
+	parse_driver_features(dev_priv, bdb);
+	parse_edp(dev_priv, bdb);
+
+	if (bios)
+		pci_unmap_rom(pdev, bios);
+
+	return 0;
+}
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	 /* Set the Panel Power On/Off timings if uninitialized. */
+	if (!HAS_PCH_SPLIT(dev) &&
+	    I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+		/* Set T2 to 40ms and T5 to 200ms */
+		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+		/* Set T3 to 35ms and Tx to 200ms */
+		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_bios.h b/linux-imx/drivers/gpu/drm/i915/intel_bios.h
new file mode 100644
index 0000000..e088d6f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_bios.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright © 2006 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _I830_BIOS_H_
+#define _I830_BIOS_H_
+
+#include <drm/drmP.h>
+
+struct vbt_header {
+	u8 signature[20];		/**< Always starts with 'VBT$' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 vbt_size;			/**< in bytes */
+	u8 vbt_checksum;
+	u8 reserved0;
+	u32 bdb_offset;			/**< from beginning of VBT */
+	u32 aim_offset[4];		/**< from beginning of VBT */
+} __attribute__((packed));
+
+struct bdb_header {
+	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
+	u16 version;			/**< decimal */
+	u16 header_size;		/**< in bytes */
+	u16 bdb_size;			/**< in bytes */
+};
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+	u8 type; /* 0 == desktop, 1 == mobile */
+	u8 relstage;
+	u8 chipset;
+	u8 lvds_present:1;
+	u8 tv_present:1;
+	u8 rsvd2:6; /* finish byte */
+	u8 rsvd3[4];
+	u8 signon[155];
+	u8 copyright[61];
+	u16 code_segment;
+	u8 dos_boot_mode;
+	u8 bandwidth_percent;
+	u8 rsvd4; /* popup memory size */
+	u8 resize_pci_bios;
+	u8 rsvd5; /* is crt already on ddc2 */
+} __attribute__((packed));
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES	  1
+#define BDB_GENERAL_DEFINITIONS	  2
+#define BDB_OLD_TOGGLE_LIST	  3
+#define BDB_MODE_SUPPORT_LIST	  4
+#define BDB_GENERIC_MODE_TABLE	  5
+#define BDB_EXT_MMIO_REGS	  6
+#define BDB_SWF_IO		  7
+#define BDB_SWF_MMIO		  8
+#define BDB_DOT_CLOCK_TABLE	  9
+#define BDB_MODE_REMOVAL_TABLE	 10
+#define BDB_CHILD_DEVICE_TABLE	 11
+#define BDB_DRIVER_FEATURES	 12
+#define BDB_DRIVER_PERSISTENCE	 13
+#define BDB_EXT_TABLE_PTRS	 14
+#define BDB_DOT_CLOCK_OVERRIDE	 15
+#define BDB_DISPLAY_SELECT	 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION	 18
+#define BDB_DISPLAY_REMOVE	 19
+#define BDB_OEM_CUSTOM		 20
+#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS	 22
+#define BDB_SDVO_PANEL_DTDS	 23
+#define BDB_SDVO_LVDS_PNP_IDS	 24
+#define BDB_SDVO_LVDS_POWER_SEQ	 25
+#define BDB_TV_OPTIONS		 26
+#define BDB_EDP			 27
+#define BDB_LVDS_OPTIONS	 40
+#define BDB_LVDS_LFP_DATA_PTRS	 41
+#define BDB_LVDS_LFP_DATA	 42
+#define BDB_LVDS_BACKLIGHT	 43
+#define BDB_LVDS_POWER		 44
+#define BDB_SKIP		254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+        /* bits 1 */
+	u8 panel_fitting:2;
+	u8 flexaim:1;
+	u8 msg_enable:1;
+	u8 clear_screen:3;
+	u8 color_flip:1;
+
+        /* bits 2 */
+	u8 download_ext_vbt:1;
+	u8 enable_ssc:1;
+	u8 ssc_freq:1;
+	u8 enable_lfp_on_override:1;
+	u8 disable_ssc_ddt:1;
+	u8 rsvd7:1;
+	u8 display_clock_mode:1;
+	u8 rsvd8:1; /* finish byte */
+
+        /* bits 3 */
+	u8 disable_smooth_vision:1;
+	u8 single_dvi:1;
+	u8 rsvd9:1;
+	u8 fdi_rx_polarity_inverted:1;
+	u8 rsvd10:4; /* finish byte */
+
+        /* bits 4 */
+	u8 legacy_monitor_detect;
+
+        /* bits 5 */
+	u8 int_crt_support:1;
+	u8 int_tv_support:1;
+	u8 int_efp_support:1;
+	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
+	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
+	u8 rsvd11:3; /* finish byte */
+} __attribute__((packed));
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS	0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C	0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC	0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C	0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE	0x00
+#define DEVICE_TYPE_CRT		0x01
+#define DEVICE_TYPE_TV		0x09
+#define DEVICE_TYPE_EFP		0x12
+#define DEVICE_TYPE_LFP		0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS		0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG	0x4001
+#define DEVICE_TYPE_TV_COMPOSITE	0x0209
+#define DEVICE_TYPE_TV_MACROVISION	0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE	0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE	0x0609
+#define DEVICE_TYPE_TV_SCART		0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR	0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR	0x6052
+#define DEVICE_TYPE_EFP_DVI_I		0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL	0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP	0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR	0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX	0x6162
+#define DEVICE_TYPE_LFP_PANELLINK	0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR	0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR	0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL	0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP	0x51e2
+
+#define DEVICE_CFG_NONE		0x00
+#define DEVICE_CFG_12BIT_DVOB	0x01
+#define DEVICE_CFG_12BIT_DVOC	0x02
+#define DEVICE_CFG_24BIT_DVOBC	0x09
+#define DEVICE_CFG_24BIT_DVOCB	0x0a
+#define DEVICE_CFG_DUAL_DVOB	0x11
+#define DEVICE_CFG_DUAL_DVOC	0x12
+#define DEVICE_CFG_DUAL_DVOBC	0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC	0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB	0x1a
+
+#define DEVICE_WIRE_NONE	0x00
+#define DEVICE_WIRE_DVOB	0x01
+#define DEVICE_WIRE_DVOC	0x02
+#define DEVICE_WIRE_DVOBC	0x03
+#define DEVICE_WIRE_DVOBB	0x05
+#define DEVICE_WIRE_DVOCC	0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA	0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB	0x01
+#define DEVICE_PORT_DVOC	0x02
+
+struct child_device_config {
+	u16 handle;
+	u16 device_type;
+	u8  device_id[10]; /* ascii string */
+	u16 addin_offset;
+	u8  dvo_port; /* See Device_PORT_* above */
+	u8  i2c_pin;
+	u8  slave_addr;
+	u8  ddc_pin;
+	u16 edid_ptr;
+	u8  dvo_cfg; /* See DEVICE_CFG_* above */
+	u8  dvo2_port;
+	u8  i2c2_pin;
+	u8  slave2_addr;
+	u8  ddc2_pin;
+	u8  capabilities;
+	u8  dvo_wiring;/* See DEVICE_WIRE_* above */
+	u8  dvo2_wiring;
+	u16 extended_type;
+	u8  dvo_function;
+} __attribute__((packed));
+
+struct bdb_general_definitions {
+	/* DDC GPIO */
+	u8 crt_ddc_gmbus_pin;
+
+	/* DPMS bits */
+	u8 dpms_acpi:1;
+	u8 skip_boot_crt_detect:1;
+	u8 dpms_aim:1;
+	u8 rsvd1:5; /* finish byte */
+
+	/* boot device bits */
+	u8 boot_display[2];
+	u8 child_dev_size;
+
+	/*
+	 * Device info:
+	 * If TV is present, it'll be at devices[0].
+	 * LVDS will be next, either devices[0] or [1], if present.
+	 * On some platforms the number of device is 6. But could be as few as
+	 * 4 if both TV and LVDS are missing.
+	 * And the device num is related with the size of general definition
+	 * block. It is obtained by using the following formula:
+	 * number = (block_size - sizeof(bdb_general_definitions))/
+	 *	     sizeof(child_device_config);
+	 */
+	struct child_device_config devices[0];
+} __attribute__((packed));
+
+struct bdb_lvds_options {
+	u8 panel_type;
+	u8 rsvd1;
+	/* LVDS capabilities, stored in a dword */
+	u8 pfit_mode:2;
+	u8 pfit_text_mode_enhanced:1;
+	u8 pfit_gfx_mode_enhanced:1;
+	u8 pfit_ratio_auto:1;
+	u8 pixel_dither:1;
+	u8 lvds_edid:1;
+	u8 rsvd2:1;
+	u8 rsvd4;
+} __attribute__((packed));
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+	u16 fp_timing_offset; /* offsets are from start of bdb */
+	u8 fp_table_size;
+	u16 dvo_timing_offset;
+	u8 dvo_table_size;
+	u16 panel_pnp_id_offset;
+	u8 pnp_table_size;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_ptrs {
+	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+	struct bdb_lvds_lfp_data_ptr ptr[16];
+} __attribute__((packed));
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+	u16 x_res;
+	u16 y_res;
+	u32 lvds_reg;
+	u32 lvds_reg_val;
+	u32 pp_on_reg;
+	u32 pp_on_reg_val;
+	u32 pp_off_reg;
+	u32 pp_off_reg_val;
+	u32 pp_cycle_reg;
+	u32 pp_cycle_reg_val;
+	u32 pfit_reg;
+	u32 pfit_reg_val;
+	u16 terminator;
+} __attribute__((packed));
+
+struct lvds_dvo_timing {
+	u16 clock;		/**< In 10khz */
+	u8 hactive_lo;
+	u8 hblank_lo;
+	u8 hblank_hi:4;
+	u8 hactive_hi:4;
+	u8 vactive_lo;
+	u8 vblank_lo;
+	u8 vblank_hi:4;
+	u8 vactive_hi:4;
+	u8 hsync_off_lo;
+	u8 hsync_pulse_width;
+	u8 vsync_pulse_width:4;
+	u8 vsync_off:4;
+	u8 rsvd0:6;
+	u8 hsync_off_hi:2;
+	u8 h_image;
+	u8 v_image;
+	u8 max_hv;
+	u8 h_border;
+	u8 v_border;
+	u8 rsvd1:3;
+	u8 digital:2;
+	u8 vsync_positive:1;
+	u8 hsync_positive:1;
+	u8 rsvd2:1;
+} __attribute__((packed));
+
+struct lvds_pnp_id {
+	u16 mfg_name;
+	u16 product_code;
+	u32 serial;
+	u8 mfg_week;
+	u8 mfg_year;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data_entry {
+	struct lvds_fp_timing fp_timing;
+	struct lvds_dvo_timing dvo_timing;
+	struct lvds_pnp_id pnp_id;
+} __attribute__((packed));
+
+struct bdb_lvds_lfp_data {
+	struct bdb_lvds_lfp_data_entry data[16];
+} __attribute__((packed));
+
+struct aimdb_header {
+	char signature[16];
+	char oem_device[20];
+	u16 aimdb_version;
+	u16 aimdb_header_size;
+	u16 aimdb_size;
+} __attribute__((packed));
+
+struct aimdb_block {
+	u8 aimdb_id;
+	u16 aimdb_size;
+} __attribute__((packed));
+
+struct vch_panel_data {
+	u16 fp_timing_offset;
+	u8 fp_timing_size;
+	u16 dvo_timing_offset;
+	u8 dvo_timing_size;
+	u16 text_fitting_offset;
+	u8 text_fitting_size;
+	u16 graphics_fitting_offset;
+	u8 graphics_fitting_size;
+} __attribute__((packed));
+
+struct vch_bdb_22 {
+	struct aimdb_block aimdb_block;
+	struct vch_panel_data panels[16];
+} __attribute__((packed));
+
+struct bdb_sdvo_lvds_options {
+	u8 panel_backlight;
+	u8 h40_set_panel_type;
+	u8 panel_type;
+	u8 ssc_clk_freq;
+	u16 als_low_trip;
+	u16 als_high_trip;
+	u8 sclalarcoeff_tab_row_num;
+	u8 sclalarcoeff_tab_row_size;
+	u8 coefficient[8];
+	u8 panel_misc_bits_1;
+	u8 panel_misc_bits_2;
+	u8 panel_misc_bits_3;
+	u8 panel_misc_bits_4;
+} __attribute__((packed));
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS		0
+#define BDB_DRIVER_FEATURE_INT_LVDS		1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS		2
+#define BDB_DRIVER_FEATURE_EDP			3
+
+struct bdb_driver_features {
+	u8 boot_dev_algorithm:1;
+	u8 block_display_switch:1;
+	u8 allow_display_switch:1;
+	u8 hotplug_dvo:1;
+	u8 dual_view_zoom:1;
+	u8 int15h_hook:1;
+	u8 sprite_in_clone:1;
+	u8 primary_lfp_id:1;
+
+	u16 boot_mode_x;
+	u16 boot_mode_y;
+	u8 boot_mode_bpp;
+	u8 boot_mode_refresh;
+
+	u16 enable_lfp_primary:1;
+	u16 selective_mode_pruning:1;
+	u16 dual_frequency:1;
+	u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+	u16 nt_clone_support:1;
+	u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+	u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+	u16 cui_aspect_scaling:1;
+	u16 preserve_aspect_ratio:1;
+	u16 sdvo_device_power_down:1;
+	u16 crt_hotplug:1;
+	u16 lvds_config:2;
+	u16 tv_hotplug:1;
+	u16 hdmi_config:2;
+
+	u8 static_display:1;
+	u8 reserved2:7;
+	u16 legacy_crt_max_x;
+	u16 legacy_crt_max_y;
+	u8 legacy_crt_max_refresh;
+
+	u8 hdmi_termination;
+	u8 custom_vbt_version;
+} __attribute__((packed));
+
+#define EDP_18BPP	0
+#define EDP_24BPP	1
+#define EDP_30BPP	2
+#define EDP_RATE_1_62	0
+#define EDP_RATE_2_7	1
+#define EDP_LANE_1	0
+#define EDP_LANE_2	1
+#define EDP_LANE_4	3
+#define EDP_PREEMPHASIS_NONE	0
+#define EDP_PREEMPHASIS_3_5dB	1
+#define EDP_PREEMPHASIS_6dB	2
+#define EDP_PREEMPHASIS_9_5dB	3
+#define EDP_VSWING_0_4V		0
+#define EDP_VSWING_0_6V		1
+#define EDP_VSWING_0_8V		2
+#define EDP_VSWING_1_2V		3
+
+struct edp_power_seq {
+	u16 t1_t3;
+	u16 t8;
+	u16 t9;
+	u16 t10;
+	u16 t11_t12;
+} __attribute__ ((packed));
+
+struct edp_link_params {
+	u8 rate:4;
+	u8 lanes:4;
+	u8 preemphasis:4;
+	u8 vswing:4;
+} __attribute__ ((packed));
+
+struct bdb_edp {
+	struct edp_power_seq power_seqs[16];
+	u32 color_depth;
+	struct edp_link_params link_params[16];
+	u32 sdrrs_msa_timing_delay;
+
+	/* ith bit indicates enabled/disabled for (i+1)th panel */
+	u16 edp_s3d_feature;
+	u16 edp_t3_optimization;
+} __attribute__ ((packed));
+
+void intel_setup_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
+#define   GR18_HK_NONE		(0x0<<3)
+#define   GR18_HK_LFP_STRETCH	(0x1<<3)
+#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
+#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
+#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
+#define   GR18_HK_PFIT		(0x8<<3)
+#define   GR18_HK_APM_CHANGE	(0xa<<3)
+#define   GR18_HK_MULTIPLE	(0xc<<3)
+#define GR18_USER_INT_EN	(1<<2)
+#define GR18_A0000_FLUSH_EN	(1<<1)
+#define GR18_SMM_EN		(1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT	16
+#define SWF00_XRES_SHIFT	0
+#define SWF00_RES_MASK		0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT	8
+#define SWF01_TV1_FORMAT_SHIFT	0
+#define SWF01_TV_FORMAT_MASK	0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
+#define SWF10_GTT_OVERRIDE_EN	(1<<28)
+#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define   SWF10_OLD_TOGGLE	0x0
+#define   SWF10_TOGGLE_LIST_1	0x1
+#define   SWF10_TOGGLE_LIST_2	0x2
+#define   SWF10_TOGGLE_LIST_3	0x3
+#define   SWF10_TOGGLE_LIST_4	0x4
+#define SWF10_PANNING_EN	(1<<23)
+#define SWF10_DRIVER_LOADED	(1<<22)
+#define SWF10_EXTENDED_DESKTOP	(1<<21)
+#define SWF10_EXCLUSIVE_MODE	(1<<20)
+#define SWF10_OVERLAY_EN	(1<<19)
+#define SWF10_PLANEB_HOLDOFF	(1<<18)
+#define SWF10_PLANEA_HOLDOFF	(1<<17)
+#define SWF10_VGA_HOLDOFF	(1<<16)
+#define SWF10_ACTIVE_DISP_MASK	0xffff
+#define   SWF10_PIPEB_LFP2	(1<<15)
+#define   SWF10_PIPEB_EFP2	(1<<14)
+#define   SWF10_PIPEB_TV2	(1<<13)
+#define   SWF10_PIPEB_CRT2	(1<<12)
+#define   SWF10_PIPEB_LFP	(1<<11)
+#define   SWF10_PIPEB_EFP	(1<<10)
+#define   SWF10_PIPEB_TV	(1<<9)
+#define   SWF10_PIPEB_CRT	(1<<8)
+#define   SWF10_PIPEA_LFP2	(1<<7)
+#define   SWF10_PIPEA_EFP2	(1<<6)
+#define   SWF10_PIPEA_TV2	(1<<5)
+#define   SWF10_PIPEA_CRT2	(1<<4)
+#define   SWF10_PIPEA_LFP	(1<<3)
+#define   SWF10_PIPEA_EFP	(1<<2)
+#define   SWF10_PIPEA_TV	(1<<1)
+#define   SWF10_PIPEA_CRT	(1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT	16
+#define SWF11_SV_TEST_EN	(1<<15)
+#define SWF11_IS_AGP		(1<<14)
+#define SWF11_DISPLAY_HOLDOFF	(1<<13)
+#define SWF11_DPMS_REDUCED	(1<<12)
+#define SWF11_IS_VBE_MODE	(1<<11)
+#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK		0x07
+#define   SWF11_DPMS_OFF	(1<<2)
+#define   SWF11_DPMS_SUSPEND	(1<<1)
+#define   SWF11_DPMS_STANDBY	(1<<0)
+#define   SWF11_DPMS_ON		0
+
+#define SWF14_GFX_PFIT_EN	(1<<31)
+#define SWF14_TEXT_PFIT_EN	(1<<30)
+#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN		(1<<28)
+#define SWF14_DISPLAY_HOLDOFF	(1<<27)
+#define SWF14_DISP_DETECT_EN	(1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS	(1<<24)
+#define SWF14_OS_TYPE_WIN9X	(1<<23)
+#define SWF14_OS_TYPE_WINNT	(1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK	0x00070000
+#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
+#define   SWF14_PM_ACPI		(0x3 << 16)
+#define   SWF14_PM_APM_12	(0x2 << 16)
+#define   SWF14_PM_APM_11	(0x1 << 16)
+#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
+          /* if GR18 indicates a display switch */
+#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+#define   SWF14_DS_PIPEB_TV_EN   (1<<9)
+#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+#define   SWF14_DS_PIPEA_TV_EN   (1<<1)
+#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+          /* if GR18 indicates a panel fitting request */
+#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
+          /* if GR18 indicates an APM change request */
+#define   SWF14_APM_HIBERNATE	0x4
+#define   SWF14_APM_SUSPEND	0x3
+#define   SWF14_APM_STANDBY	0x1
+#define   SWF14_APM_RESTORE	0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* define the PORT for DP output type */
+#define		PORT_IDPB	7
+#define		PORT_IDPC	8
+#define		PORT_IDPD	9
+
+#endif /* _I830_BIOS_H_ */
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_crt.c b/linux-imx/drivers/gpu/drm/i915/intel_crt.c
new file mode 100644
index 0000000..4a80996
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_crt.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 |		\
+			   ADPA_CRT_HOTPLUG_WARMUP_10MS |		\
+			   ADPA_CRT_HOTPLUG_SAMPLE_4S |			\
+			   ADPA_CRT_HOTPLUG_VOLTAGE_50 |		\
+			   ADPA_CRT_HOTPLUG_VOLREF_325MV |		\
+			   ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+	struct intel_encoder base;
+	/* DPMS state is stored in the connector, which we need in the
+	 * encoder's enable/disable callbacks */
+	struct intel_connector *connector;
+	bool force_hotplug_required;
+	u32 adpa_reg;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_crt, base);
+}
+
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+{
+	return container_of(encoder, struct intel_crt, base);
+}
+
+static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 tmp;
+
+	tmp = I915_READ(crt->adpa_reg);
+
+	if (!(tmp & ADPA_DAC_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+/* Note: The caller is required to filter out dpms modes not supported by the
+ * platform. */
+static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+	u32 temp;
+
+	temp = I915_READ(crt->adpa_reg);
+	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+	temp &= ~ADPA_DAC_ENABLE;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		temp |= ADPA_DAC_ENABLE;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+		break;
+	}
+
+	I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+	intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+	struct intel_crt *crt = intel_encoder_to_crt(encoder);
+
+	intel_crt_set_dpms(encoder, crt->connector->base.dpms);
+}
+
+
+static void intel_crt_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
+	struct drm_crtc *crtc;
+	int old_dpms;
+
+	/* PCH platforms and VLV only support on/off. */
+	if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	old_dpms = connector->dpms;
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = encoder->base.crtc;
+	if (!crtc) {
+		encoder->connectors_active = false;
+		return;
+	}
+
+	/* We need the pipe to run for anything but OFF. */
+	if (mode == DRM_MODE_DPMS_OFF)
+		encoder->connectors_active = false;
+	else
+		encoder->connectors_active = true;
+
+	if (mode < old_dpms) {
+		/* From off to on, enable the pipe first. */
+		intel_crtc_update_dpms(crtc);
+
+		intel_crt_set_dpms(encoder, mode);
+	} else {
+		intel_crt_set_dpms(encoder, mode);
+
+		intel_crtc_update_dpms(crtc);
+	}
+
+	intel_modeset_check_state(connector->dev);
+}
+
+static int intel_crt_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+
+	int max_clock = 0;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (mode->clock < 25000)
+		return MODE_CLOCK_LOW;
+
+	if (IS_GEN2(dev))
+		max_clock = 350000;
+	else
+		max_clock = 400000;
+	if (mode->clock > max_clock)
+		return MODE_CLOCK_HIGH;
+
+	/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+	if (HAS_PCH_LPT(dev) &&
+	    (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool intel_crt_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+
+	if (HAS_PCH_SPLIT(dev))
+		pipe_config->has_pch_encoder = true;
+
+	return true;
+}
+
+static void intel_crt_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crt *crt =
+		intel_encoder_to_crt(to_intel_encoder(encoder));
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 adpa;
+
+	if (HAS_PCH_SPLIT(dev))
+		adpa = ADPA_HOTPLUG_BITS;
+	else
+		adpa = 0;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+	/* For CPT allow 3 pipe config, for others just use A or B */
+	if (HAS_PCH_LPT(dev))
+		; /* Those bits don't exist here */
+	else if (HAS_PCH_CPT(dev))
+		adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+	else if (intel_crtc->pipe == 0)
+		adpa |= ADPA_PIPE_A_SELECT;
+	else
+		adpa |= ADPA_PIPE_B_SELECT;
+
+	if (!HAS_PCH_SPLIT(dev))
+		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
+	I915_WRITE(crt->adpa_reg, adpa);
+}
+
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 adpa;
+	bool ret;
+
+	/* The first time through, trigger an explicit detection cycle */
+	if (crt->force_hotplug_required) {
+		bool turn_off_dac = HAS_PCH_SPLIT(dev);
+		u32 save_adpa;
+
+		crt->force_hotplug_required = 0;
+
+		save_adpa = adpa = I915_READ(crt->adpa_reg);
+		DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+		adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+		if (turn_off_dac)
+			adpa &= ~ADPA_DAC_ENABLE;
+
+		I915_WRITE(crt->adpa_reg, adpa);
+
+		if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+			     1000))
+			DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+		if (turn_off_dac) {
+			I915_WRITE(crt->adpa_reg, save_adpa);
+			POSTING_READ(crt->adpa_reg);
+		}
+	}
+
+	/* Check the status to see if both blue and green are on now */
+	adpa = I915_READ(crt->adpa_reg);
+	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+		ret = true;
+	else
+		ret = false;
+	DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+	return ret;
+}
+
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 adpa;
+	bool ret;
+	u32 save_adpa;
+
+	save_adpa = adpa = I915_READ(crt->adpa_reg);
+	DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+	adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+	I915_WRITE(crt->adpa_reg, adpa);
+
+	if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+		     1000)) {
+		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+		I915_WRITE(crt->adpa_reg, save_adpa);
+	}
+
+	/* Check the status to see if both blue and green are on now */
+	adpa = I915_READ(crt->adpa_reg);
+	if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+		ret = true;
+	else
+		ret = false;
+
+	DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+	/* FIXME: debug force function and remove */
+	ret = true;
+
+	return ret;
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * Not for i915G/i915GM
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 hotplug_en, orig, stat;
+	bool ret = false;
+	int i, tries = 0;
+
+	if (HAS_PCH_SPLIT(dev))
+		return intel_ironlake_crt_detect_hotplug(connector);
+
+	if (IS_VALLEYVIEW(dev))
+		return valleyview_crt_detect_hotplug(connector);
+
+	/*
+	 * On 4 series desktop, CRT detect sequence need to be done twice
+	 * to get a reliable result.
+	 */
+
+	if (IS_G4X(dev) && !IS_GM45(dev))
+		tries = 2;
+	else
+		tries = 1;
+	hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
+	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+	for (i = 0; i < tries ; i++) {
+		/* turn on the FORCE_DETECT */
+		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+		/* wait for FORCE_DETECT to go off */
+		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
+			      CRT_HOTPLUG_FORCE_DETECT) == 0,
+			     1000))
+			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+	}
+
+	stat = I915_READ(PORT_HOTPLUG_STAT);
+	if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
+		ret = true;
+
+	/* clear the interrupt we just generated, if any */
+	I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+
+	/* and put the bits back */
+	I915_WRITE(PORT_HOTPLUG_EN, orig);
+
+	return ret;
+}
+
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+				struct i2c_adapter *i2c)
+{
+	struct edid *edid;
+
+	edid = drm_get_edid(connector, i2c);
+
+	if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+		DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+		intel_gmbus_force_bit(i2c, true);
+		edid = drm_get_edid(connector, i2c);
+		intel_gmbus_force_bit(i2c, false);
+	}
+
+	return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+				struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret;
+
+	edid = intel_crt_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
+{
+	struct intel_crt *crt = intel_attached_crt(connector);
+	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+	struct edid *edid;
+	struct i2c_adapter *i2c;
+
+	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+	edid = intel_crt_get_edid(connector, i2c);
+
+	if (edid) {
+		bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+
+		/*
+		 * This may be a DVI-I connector with a shared DDC
+		 * link between analog and digital outputs, so we
+		 * have to check the EDID input spec of the attached device.
+		 */
+		if (!is_digital) {
+			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			return true;
+		}
+
+		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+	} else {
+		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+	}
+
+	kfree(edid);
+
+	return false;
+}
+
+static enum drm_connector_status
+intel_crt_load_detect(struct intel_crt *crt)
+{
+	struct drm_device *dev = crt->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
+	uint32_t save_bclrpat;
+	uint32_t save_vtotal;
+	uint32_t vtotal, vactive;
+	uint32_t vsample;
+	uint32_t vblank, vblank_start, vblank_end;
+	uint32_t dsl;
+	uint32_t bclrpat_reg;
+	uint32_t vtotal_reg;
+	uint32_t vblank_reg;
+	uint32_t vsync_reg;
+	uint32_t pipeconf_reg;
+	uint32_t pipe_dsl_reg;
+	uint8_t	st00;
+	enum drm_connector_status status;
+
+	DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
+	bclrpat_reg = BCLRPAT(pipe);
+	vtotal_reg = VTOTAL(pipe);
+	vblank_reg = VBLANK(pipe);
+	vsync_reg = VSYNC(pipe);
+	pipeconf_reg = PIPECONF(pipe);
+	pipe_dsl_reg = PIPEDSL(pipe);
+
+	save_bclrpat = I915_READ(bclrpat_reg);
+	save_vtotal = I915_READ(vtotal_reg);
+	vblank = I915_READ(vblank_reg);
+
+	vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
+	vactive = (save_vtotal & 0x7ff) + 1;
+
+	vblank_start = (vblank & 0xfff) + 1;
+	vblank_end = ((vblank >> 16) & 0xfff) + 1;
+
+	/* Set the border color to purple. */
+	I915_WRITE(bclrpat_reg, 0x500050);
+
+	if (!IS_GEN2(dev)) {
+		uint32_t pipeconf = I915_READ(pipeconf_reg);
+		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+		POSTING_READ(pipeconf_reg);
+		/* Wait for next Vblank to substitue
+		 * border color for Color info */
+		intel_wait_for_vblank(dev, pipe);
+		st00 = I915_READ8(VGA_MSR_WRITE);
+		status = ((st00 & (1 << 4)) != 0) ?
+			connector_status_connected :
+			connector_status_disconnected;
+
+		I915_WRITE(pipeconf_reg, pipeconf);
+	} else {
+		bool restore_vblank = false;
+		int count, detect;
+
+		/*
+		* If there isn't any border, add some.
+		* Yes, this will flicker
+		*/
+		if (vblank_start <= vactive && vblank_end >= vtotal) {
+			uint32_t vsync = I915_READ(vsync_reg);
+			uint32_t vsync_start = (vsync & 0xffff) + 1;
+
+			vblank_start = vsync_start;
+			I915_WRITE(vblank_reg,
+				   (vblank_start - 1) |
+				   ((vblank_end - 1) << 16));
+			restore_vblank = true;
+		}
+		/* sample in the vertical border, selecting the larger one */
+		if (vblank_start - vactive >= vtotal - vblank_end)
+			vsample = (vblank_start + vactive) >> 1;
+		else
+			vsample = (vtotal + vblank_end) >> 1;
+
+		/*
+		 * Wait for the border to be displayed
+		 */
+		while (I915_READ(pipe_dsl_reg) >= vactive)
+			;
+		while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+			;
+		/*
+		 * Watch ST00 for an entire scanline
+		 */
+		detect = 0;
+		count = 0;
+		do {
+			count++;
+			/* Read the ST00 VGA status register */
+			st00 = I915_READ8(VGA_MSR_WRITE);
+			if (st00 & (1 << 4))
+				detect++;
+		} while ((I915_READ(pipe_dsl_reg) == dsl));
+
+		/* restore vblank if necessary */
+		if (restore_vblank)
+			I915_WRITE(vblank_reg, vblank);
+		/*
+		 * If more than 3/4 of the scanline detected a monitor,
+		 * then it is assumed to be present. This works even on i830,
+		 * where there isn't any way to force the border color across
+		 * the screen
+		 */
+		status = detect * 4 > count * 3 ?
+			 connector_status_connected :
+			 connector_status_disconnected;
+	}
+
+	/* Restore previous settings */
+	I915_WRITE(bclrpat_reg, save_bclrpat);
+
+	return status;
+}
+
+static enum drm_connector_status
+intel_crt_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+	enum drm_connector_status status;
+	struct intel_load_detect_pipe tmp;
+
+	if (I915_HAS_HOTPLUG(dev)) {
+		/* We can not rely on the HPD pin always being correctly wired
+		 * up, for example many KVM do not pass it through, and so
+		 * only trust an assertion that the monitor is connected.
+		 */
+		if (intel_crt_detect_hotplug(connector)) {
+			DRM_DEBUG_KMS("CRT detected via hotplug\n");
+			return connector_status_connected;
+		} else
+			DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+	}
+
+	if (intel_crt_detect_ddc(connector))
+		return connector_status_connected;
+
+	/* Load detection is broken on HPD capable machines. Whoever wants a
+	 * broken monitor (without edid) to work behind a broken kvm (that fails
+	 * to have the right resistors for HP detection) needs to fix this up.
+	 * For now just bail out. */
+	if (I915_HAS_HOTPLUG(dev))
+		return connector_status_disconnected;
+
+	if (!force)
+		return connector->status;
+
+	/* for pre-945g platforms use load detect */
+	if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
+		if (intel_crt_detect_ddc(connector))
+			status = connector_status_connected;
+		else
+			status = intel_crt_load_detect(crt);
+		intel_release_load_detect_pipe(connector, &tmp);
+	} else
+		status = connector_status_unknown;
+
+	return status;
+}
+
+static void intel_crt_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int intel_crt_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+	struct i2c_adapter *i2c;
+
+	i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+	ret = intel_crt_ddc_get_modes(connector, i2c);
+	if (ret || !IS_G4X(dev))
+		return ret;
+
+	/* Try to probe digital port for output in DVI-I -> VGA mode. */
+	i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+	return intel_crt_ddc_get_modes(connector, i2c);
+}
+
+static int intel_crt_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t value)
+{
+	return 0;
+}
+
+static void intel_crt_reset(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crt *crt = intel_attached_crt(connector);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		u32 adpa;
+
+		adpa = I915_READ(crt->adpa_reg);
+		adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+		adpa |= ADPA_HOTPLUG_BITS;
+		I915_WRITE(crt->adpa_reg, adpa);
+		POSTING_READ(crt->adpa_reg);
+
+		DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+		crt->force_hotplug_required = 1;
+	}
+
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
+	.mode_set = intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs intel_crt_connector_funcs = {
+	.reset = intel_crt_reset,
+	.dpms = intel_crt_dpms,
+	.detect = intel_crt_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = intel_crt_destroy,
+	.set_property = intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
+	.mode_valid = intel_crt_mode_valid,
+	.get_modes = intel_crt_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+	{
+		.callback = intel_no_crt_dmi_callback,
+		.ident = "ACER ZGB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+		},
+	},
+	{
+		.callback = intel_no_crt_dmi_callback,
+		.ident = "DELL XPS 8700",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
+		},
+	},
+	{ }
+};
+
+void intel_crt_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct intel_crt *crt;
+	struct intel_connector *intel_connector;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Skip machines without VGA that falsely report hotplug events */
+	if (dmi_check_system(intel_no_crt))
+		return;
+
+	crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+	if (!crt)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(crt);
+		return;
+	}
+
+	connector = &intel_connector->base;
+	crt->connector = intel_connector;
+	drm_connector_init(dev, &intel_connector->base,
+			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+			 DRM_MODE_ENCODER_DAC);
+
+	intel_connector_attach_encoder(intel_connector, &crt->base);
+
+	crt->base.type = INTEL_OUTPUT_ANALOG;
+	crt->base.cloneable = true;
+	if (IS_I830(dev))
+		crt->base.crtc_mask = (1 << 0);
+	else
+		crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+	if (IS_GEN2(dev))
+		connector->interlace_allowed = 0;
+	else
+		connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+
+	if (HAS_PCH_SPLIT(dev))
+		crt->adpa_reg = PCH_ADPA;
+	else if (IS_VALLEYVIEW(dev))
+		crt->adpa_reg = VLV_ADPA;
+	else
+		crt->adpa_reg = ADPA;
+
+	crt->base.compute_config = intel_crt_compute_config;
+	crt->base.disable = intel_disable_crt;
+	crt->base.enable = intel_enable_crt;
+	if (I915_HAS_HOTPLUG(dev))
+		crt->base.hpd_pin = HPD_CRT;
+	if (HAS_DDI(dev))
+		crt->base.get_hw_state = intel_ddi_get_hw_state;
+	else
+		crt->base.get_hw_state = intel_crt_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
+	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+
+	if (!I915_HAS_HOTPLUG(dev))
+		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	/*
+	 * Configure the automatic hotplug detection stuff
+	 */
+	crt->force_hotplug_required = 0;
+
+	/*
+	 * TODO: find a proper way to discover whether we need to set the the
+	 * polarity and link reversal bits or not, instead of relying on the
+	 * BIOS.
+	 */
+	if (HAS_PCH_LPT(dev)) {
+		u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
+				 FDI_RX_LINK_REVERSAL_OVERRIDE;
+
+		dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_ddi.c b/linux-imx/drivers/gpu/drm/i915/intel_ddi.c
new file mode 100644
index 0000000..7ce3834
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_ddi.c
@@ -0,0 +1,1568 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+/* HDMI/DVI modes ignore everything but the last 2 items. So we share
+ * them for both DP and FDI transports, allowing those ports to
+ * automatically adapt to HDMI connections as well
+ */
+static const u32 hsw_ddi_translations_dp[] = {
+	0x00FFFFFF, 0x0006000E,		/* DP parameters */
+	0x00D75FFF, 0x0005000A,
+	0x00C30FFF, 0x00040006,
+	0x80AAAFFF, 0x000B0000,
+	0x00FFFFFF, 0x0005000A,
+	0x00D75FFF, 0x000C0004,
+	0x80C30FFF, 0x000B0000,
+	0x00FFFFFF, 0x00040006,
+	0x80D75FFF, 0x000B0000,
+	0x00FFFFFF, 0x00040006		/* HDMI parameters */
+};
+
+static const u32 hsw_ddi_translations_fdi[] = {
+	0x00FFFFFF, 0x0007000E,		/* FDI parameters */
+	0x00D75FFF, 0x000F000A,
+	0x00C30FFF, 0x00060006,
+	0x00AAAFFF, 0x001E0000,
+	0x00FFFFFF, 0x000F000A,
+	0x00D75FFF, 0x00160004,
+	0x00C30FFF, 0x001E0000,
+	0x00FFFFFF, 0x00060006,
+	0x00D75FFF, 0x001E0000,
+	0x00FFFFFF, 0x00040006		/* HDMI parameters */
+};
+
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+	    type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+		struct intel_digital_port *intel_dig_port =
+			enc_to_dig_port(encoder);
+		return intel_dig_port->port;
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		return PORT_E;
+
+	} else {
+		DRM_ERROR("Invalid DDI encoder type %d\n", type);
+		BUG();
+	}
+}
+
+/* On Haswell, DDI port buffers must be programmed with correct values
+ * in advance. The buffer values are different for FDI and DP modes,
+ * but the HDMI/DVI fields are shared among those. So we program the DDI
+ * in either FDI or DP modes only, as HDMI connections will work with both
+ * of those
+ */
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+				      bool use_fdi_mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+	int i;
+	const u32 *ddi_translations = ((use_fdi_mode) ?
+		hsw_ddi_translations_fdi :
+		hsw_ddi_translations_dp);
+
+	DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
+			port_name(port),
+			use_fdi_mode ? "FDI" : "DP");
+
+	WARN((use_fdi_mode && (port != PORT_E)),
+		"Programming port %c in FDI mode, this probably will not work.\n",
+		port_name(port));
+
+	for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+		I915_WRITE(reg, ddi_translations[i]);
+		reg += 4;
+	}
+}
+
+/* Program DDI buffers translations for DP. By default, program ports A-D in DP
+ * mode and port E for FDI.
+ */
+void intel_prepare_ddi(struct drm_device *dev)
+{
+	int port;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	for (port = PORT_A; port < PORT_E; port++)
+		intel_prepare_ddi_buffers(dev, port, false);
+
+	/* DDI E is the suggested one to work in FDI mode, so program is as such
+	 * by default. It will have to be re-programmed in case a digital DP
+	 * output will be detected on it
+	 */
+	intel_prepare_ddi_buffers(dev, PORT_E, true);
+}
+
+static const long hsw_ddi_buf_ctl_values[] = {
+	DDI_BUF_EMP_400MV_0DB_HSW,
+	DDI_BUF_EMP_400MV_3_5DB_HSW,
+	DDI_BUF_EMP_400MV_6DB_HSW,
+	DDI_BUF_EMP_400MV_9_5DB_HSW,
+	DDI_BUF_EMP_600MV_0DB_HSW,
+	DDI_BUF_EMP_600MV_3_5DB_HSW,
+	DDI_BUF_EMP_600MV_6DB_HSW,
+	DDI_BUF_EMP_800MV_0DB_HSW,
+	DDI_BUF_EMP_800MV_3_5DB_HSW
+};
+
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+				    enum port port)
+{
+	uint32_t reg = DDI_BUF_CTL(port);
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		udelay(1);
+		if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+			return;
+	}
+	DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
+
+/* Starting with Haswell, different DDI ports can work in FDI mode for
+ * connection to the PCH-located connectors. For this, it is necessary to train
+ * both the DDI port and PCH receiver for the desired DDI buffer settings.
+ *
+ * The recommended port to work in FDI mode is DDI E, which we use here. Also,
+ * please note that when FDI mode is active on DDI E, it shares 2 lines with
+ * DDI A (which is used for eDP)
+ */
+
+void hsw_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 temp, i, rx_ctl_val;
+
+	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+	 * mode set "sequence for CRT port" document:
+	 * - TP1 to TP2 time with the default value
+	 * - FDI delay to 90h
+	 */
+	I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+				  FDI_RX_PWRDN_LANE0_VAL(2) |
+				  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	/* Enable the PCH Receiver FDI PLL */
+	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+		     FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
+	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+	POSTING_READ(_FDI_RXA_CTL);
+	udelay(220);
+
+	/* Switch from Rawclk to PCDclk */
+	rx_ctl_val |= FDI_PCDCLK;
+	I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+	/* Configure Port Clock Select */
+	I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+	/* Start the training iterating through available voltages and emphasis,
+	 * testing each value twice. */
+	for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
+		/* Configure DP_TP_CTL with auto-training */
+		I915_WRITE(DP_TP_CTL(PORT_E),
+					DP_TP_CTL_FDI_AUTOTRAIN |
+					DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+					DP_TP_CTL_LINK_TRAIN_PAT1 |
+					DP_TP_CTL_ENABLE);
+
+		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
+		 * DDI E does not support port reversal, the functionality is
+		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
+		 * port reversal bit */
+		I915_WRITE(DDI_BUF_CTL(PORT_E),
+			   DDI_BUF_CTL_ENABLE |
+			   ((intel_crtc->fdi_lanes - 1) << 1) |
+			   hsw_ddi_buf_ctl_values[i / 2]);
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+		udelay(600);
+
+		/* Program PCH FDI Receiver TU */
+		I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+		/* Enable PCH FDI Receiver with auto-training */
+		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+		POSTING_READ(_FDI_RXA_CTL);
+
+		/* Wait for FDI receiver lane calibration */
+		udelay(30);
+
+		/* Unset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(_FDI_RXA_MISC);
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		I915_WRITE(_FDI_RXA_MISC, temp);
+		POSTING_READ(_FDI_RXA_MISC);
+
+		/* Wait for FDI auto training time */
+		udelay(5);
+
+		temp = I915_READ(DP_TP_STATUS(PORT_E));
+		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
+			DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
+
+			/* Enable normal pixel sending for FDI */
+			I915_WRITE(DP_TP_CTL(PORT_E),
+				   DP_TP_CTL_FDI_AUTOTRAIN |
+				   DP_TP_CTL_LINK_TRAIN_NORMAL |
+				   DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+				   DP_TP_CTL_ENABLE);
+
+			return;
+		}
+
+		temp = I915_READ(DDI_BUF_CTL(PORT_E));
+		temp &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+		POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+		temp = I915_READ(DP_TP_CTL(PORT_E));
+		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(PORT_E), temp);
+		POSTING_READ(DP_TP_CTL(PORT_E));
+
+		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+		rx_ctl_val &= ~FDI_RX_ENABLE;
+		I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+		POSTING_READ(_FDI_RXA_CTL);
+
+		/* Reset FDI_RX_MISC pwrdn lanes */
+		temp = I915_READ(_FDI_RXA_MISC);
+		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+		I915_WRITE(_FDI_RXA_MISC, temp);
+		POSTING_READ(_FDI_RXA_MISC);
+	}
+
+	DRM_ERROR("FDI link training failed!\n");
+}
+
+/* WRPLL clock dividers */
+struct wrpll_tmds_clock {
+	u32 clock;
+	u16 p;		/* Post divider */
+	u16 n2;		/* Feedback divider */
+	u16 r2;		/* Reference divider */
+};
+
+/* Table of matching values for WRPLL clocks programming for each frequency.
+ * The code assumes this table is sorted. */
+static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
+	{19750,	38,	25,	18},
+	{20000,	48,	32,	18},
+	{21000,	36,	21,	15},
+	{21912,	42,	29,	17},
+	{22000,	36,	22,	15},
+	{23000,	36,	23,	15},
+	{23500,	40,	40,	23},
+	{23750,	26,	16,	14},
+	{24000,	36,	24,	15},
+	{25000,	36,	25,	15},
+	{25175,	26,	40,	33},
+	{25200,	30,	21,	15},
+	{26000,	36,	26,	15},
+	{27000,	30,	21,	14},
+	{27027,	18,	100,	111},
+	{27500,	30,	29,	19},
+	{28000,	34,	30,	17},
+	{28320,	26,	30,	22},
+	{28322,	32,	42,	25},
+	{28750,	24,	23,	18},
+	{29000,	30,	29,	18},
+	{29750,	32,	30,	17},
+	{30000,	30,	25,	15},
+	{30750,	30,	41,	24},
+	{31000,	30,	31,	18},
+	{31500,	30,	28,	16},
+	{32000,	30,	32,	18},
+	{32500,	28,	32,	19},
+	{33000,	24,	22,	15},
+	{34000,	28,	30,	17},
+	{35000,	26,	32,	19},
+	{35500,	24,	30,	19},
+	{36000,	26,	26,	15},
+	{36750,	26,	46,	26},
+	{37000,	24,	23,	14},
+	{37762,	22,	40,	26},
+	{37800,	20,	21,	15},
+	{38000,	24,	27,	16},
+	{38250,	24,	34,	20},
+	{39000,	24,	26,	15},
+	{40000,	24,	32,	18},
+	{40500,	20,	21,	14},
+	{40541,	22,	147,	89},
+	{40750,	18,	19,	14},
+	{41000,	16,	17,	14},
+	{41500,	22,	44,	26},
+	{41540,	22,	44,	26},
+	{42000,	18,	21,	15},
+	{42500,	22,	45,	26},
+	{43000,	20,	43,	27},
+	{43163,	20,	24,	15},
+	{44000,	18,	22,	15},
+	{44900,	20,	108,	65},
+	{45000,	20,	25,	15},
+	{45250,	20,	52,	31},
+	{46000,	18,	23,	15},
+	{46750,	20,	45,	26},
+	{47000,	20,	40,	23},
+	{48000,	18,	24,	15},
+	{49000,	18,	49,	30},
+	{49500,	16,	22,	15},
+	{50000,	18,	25,	15},
+	{50500,	18,	32,	19},
+	{51000,	18,	34,	20},
+	{52000,	18,	26,	15},
+	{52406,	14,	34,	25},
+	{53000,	16,	22,	14},
+	{54000,	16,	24,	15},
+	{54054,	16,	173,	108},
+	{54500,	14,	24,	17},
+	{55000,	12,	22,	18},
+	{56000,	14,	45,	31},
+	{56250,	16,	25,	15},
+	{56750,	14,	25,	17},
+	{57000,	16,	27,	16},
+	{58000,	16,	43,	25},
+	{58250,	16,	38,	22},
+	{58750,	16,	40,	23},
+	{59000,	14,	26,	17},
+	{59341,	14,	40,	26},
+	{59400,	16,	44,	25},
+	{60000,	16,	32,	18},
+	{60500,	12,	39,	29},
+	{61000,	14,	49,	31},
+	{62000,	14,	37,	23},
+	{62250,	14,	42,	26},
+	{63000,	12,	21,	15},
+	{63500,	14,	28,	17},
+	{64000,	12,	27,	19},
+	{65000,	14,	32,	19},
+	{65250,	12,	29,	20},
+	{65500,	12,	32,	22},
+	{66000,	12,	22,	15},
+	{66667,	14,	38,	22},
+	{66750,	10,	21,	17},
+	{67000,	14,	33,	19},
+	{67750,	14,	58,	33},
+	{68000,	14,	30,	17},
+	{68179,	14,	46,	26},
+	{68250,	14,	46,	26},
+	{69000,	12,	23,	15},
+	{70000,	12,	28,	18},
+	{71000,	12,	30,	19},
+	{72000,	12,	24,	15},
+	{73000,	10,	23,	17},
+	{74000,	12,	23,	14},
+	{74176,	8,	100,	91},
+	{74250,	10,	22,	16},
+	{74481,	12,	43,	26},
+	{74500,	10,	29,	21},
+	{75000,	12,	25,	15},
+	{75250,	10,	39,	28},
+	{76000,	12,	27,	16},
+	{77000,	12,	53,	31},
+	{78000,	12,	26,	15},
+	{78750,	12,	28,	16},
+	{79000,	10,	38,	26},
+	{79500,	10,	28,	19},
+	{80000,	12,	32,	18},
+	{81000,	10,	21,	14},
+	{81081,	6,	100,	111},
+	{81624,	8,	29,	24},
+	{82000,	8,	17,	14},
+	{83000,	10,	40,	26},
+	{83950,	10,	28,	18},
+	{84000,	10,	28,	18},
+	{84750,	6,	16,	17},
+	{85000,	6,	17,	18},
+	{85250,	10,	30,	19},
+	{85750,	10,	27,	17},
+	{86000,	10,	43,	27},
+	{87000,	10,	29,	18},
+	{88000,	10,	44,	27},
+	{88500,	10,	41,	25},
+	{89000,	10,	28,	17},
+	{89012,	6,	90,	91},
+	{89100,	10,	33,	20},
+	{90000,	10,	25,	15},
+	{91000,	10,	32,	19},
+	{92000,	10,	46,	27},
+	{93000,	10,	31,	18},
+	{94000,	10,	40,	23},
+	{94500,	10,	28,	16},
+	{95000,	10,	44,	25},
+	{95654,	10,	39,	22},
+	{95750,	10,	39,	22},
+	{96000,	10,	32,	18},
+	{97000,	8,	23,	16},
+	{97750,	8,	42,	29},
+	{98000,	8,	45,	31},
+	{99000,	8,	22,	15},
+	{99750,	8,	34,	23},
+	{100000,	6,	20,	18},
+	{100500,	6,	19,	17},
+	{101000,	6,	37,	33},
+	{101250,	8,	21,	14},
+	{102000,	6,	17,	15},
+	{102250,	6,	25,	22},
+	{103000,	8,	29,	19},
+	{104000,	8,	37,	24},
+	{105000,	8,	28,	18},
+	{106000,	8,	22,	14},
+	{107000,	8,	46,	29},
+	{107214,	8,	27,	17},
+	{108000,	8,	24,	15},
+	{108108,	8,	173,	108},
+	{109000,	6,	23,	19},
+	{110000,	6,	22,	18},
+	{110013,	6,	22,	18},
+	{110250,	8,	49,	30},
+	{110500,	8,	36,	22},
+	{111000,	8,	23,	14},
+	{111264,	8,	150,	91},
+	{111375,	8,	33,	20},
+	{112000,	8,	63,	38},
+	{112500,	8,	25,	15},
+	{113100,	8,	57,	34},
+	{113309,	8,	42,	25},
+	{114000,	8,	27,	16},
+	{115000,	6,	23,	18},
+	{116000,	8,	43,	25},
+	{117000,	8,	26,	15},
+	{117500,	8,	40,	23},
+	{118000,	6,	38,	29},
+	{119000,	8,	30,	17},
+	{119500,	8,	46,	26},
+	{119651,	8,	39,	22},
+	{120000,	8,	32,	18},
+	{121000,	6,	39,	29},
+	{121250,	6,	31,	23},
+	{121750,	6,	23,	17},
+	{122000,	6,	42,	31},
+	{122614,	6,	30,	22},
+	{123000,	6,	41,	30},
+	{123379,	6,	37,	27},
+	{124000,	6,	51,	37},
+	{125000,	6,	25,	18},
+	{125250,	4,	13,	14},
+	{125750,	4,	27,	29},
+	{126000,	6,	21,	15},
+	{127000,	6,	24,	17},
+	{127250,	6,	41,	29},
+	{128000,	6,	27,	19},
+	{129000,	6,	43,	30},
+	{129859,	4,	25,	26},
+	{130000,	6,	26,	18},
+	{130250,	6,	42,	29},
+	{131000,	6,	32,	22},
+	{131500,	6,	38,	26},
+	{131850,	6,	41,	28},
+	{132000,	6,	22,	15},
+	{132750,	6,	28,	19},
+	{133000,	6,	34,	23},
+	{133330,	6,	37,	25},
+	{134000,	6,	61,	41},
+	{135000,	6,	21,	14},
+	{135250,	6,	167,	111},
+	{136000,	6,	62,	41},
+	{137000,	6,	35,	23},
+	{138000,	6,	23,	15},
+	{138500,	6,	40,	26},
+	{138750,	6,	37,	24},
+	{139000,	6,	34,	22},
+	{139050,	6,	34,	22},
+	{139054,	6,	34,	22},
+	{140000,	6,	28,	18},
+	{141000,	6,	36,	23},
+	{141500,	6,	22,	14},
+	{142000,	6,	30,	19},
+	{143000,	6,	27,	17},
+	{143472,	4,	17,	16},
+	{144000,	6,	24,	15},
+	{145000,	6,	29,	18},
+	{146000,	6,	47,	29},
+	{146250,	6,	26,	16},
+	{147000,	6,	49,	30},
+	{147891,	6,	23,	14},
+	{148000,	6,	23,	14},
+	{148250,	6,	28,	17},
+	{148352,	4,	100,	91},
+	{148500,	6,	33,	20},
+	{149000,	6,	48,	29},
+	{150000,	6,	25,	15},
+	{151000,	4,	19,	17},
+	{152000,	6,	27,	16},
+	{152280,	6,	44,	26},
+	{153000,	6,	34,	20},
+	{154000,	6,	53,	31},
+	{155000,	6,	31,	18},
+	{155250,	6,	50,	29},
+	{155750,	6,	45,	26},
+	{156000,	6,	26,	15},
+	{157000,	6,	61,	35},
+	{157500,	6,	28,	16},
+	{158000,	6,	65,	37},
+	{158250,	6,	44,	25},
+	{159000,	6,	53,	30},
+	{159500,	6,	39,	22},
+	{160000,	6,	32,	18},
+	{161000,	4,	31,	26},
+	{162000,	4,	18,	15},
+	{162162,	4,	131,	109},
+	{162500,	4,	53,	44},
+	{163000,	4,	29,	24},
+	{164000,	4,	17,	14},
+	{165000,	4,	22,	18},
+	{166000,	4,	32,	26},
+	{167000,	4,	26,	21},
+	{168000,	4,	46,	37},
+	{169000,	4,	104,	83},
+	{169128,	4,	64,	51},
+	{169500,	4,	39,	31},
+	{170000,	4,	34,	27},
+	{171000,	4,	19,	15},
+	{172000,	4,	51,	40},
+	{172750,	4,	32,	25},
+	{172800,	4,	32,	25},
+	{173000,	4,	41,	32},
+	{174000,	4,	49,	38},
+	{174787,	4,	22,	17},
+	{175000,	4,	35,	27},
+	{176000,	4,	30,	23},
+	{177000,	4,	38,	29},
+	{178000,	4,	29,	22},
+	{178500,	4,	37,	28},
+	{179000,	4,	53,	40},
+	{179500,	4,	73,	55},
+	{180000,	4,	20,	15},
+	{181000,	4,	55,	41},
+	{182000,	4,	31,	23},
+	{183000,	4,	42,	31},
+	{184000,	4,	30,	22},
+	{184750,	4,	26,	19},
+	{185000,	4,	37,	27},
+	{186000,	4,	51,	37},
+	{187000,	4,	36,	26},
+	{188000,	4,	32,	23},
+	{189000,	4,	21,	15},
+	{190000,	4,	38,	27},
+	{190960,	4,	41,	29},
+	{191000,	4,	41,	29},
+	{192000,	4,	27,	19},
+	{192250,	4,	37,	26},
+	{193000,	4,	20,	14},
+	{193250,	4,	53,	37},
+	{194000,	4,	23,	16},
+	{194208,	4,	23,	16},
+	{195000,	4,	26,	18},
+	{196000,	4,	45,	31},
+	{197000,	4,	35,	24},
+	{197750,	4,	41,	28},
+	{198000,	4,	22,	15},
+	{198500,	4,	25,	17},
+	{199000,	4,	28,	19},
+	{200000,	4,	37,	25},
+	{201000,	4,	61,	41},
+	{202000,	4,	112,	75},
+	{202500,	4,	21,	14},
+	{203000,	4,	146,	97},
+	{204000,	4,	62,	41},
+	{204750,	4,	44,	29},
+	{205000,	4,	38,	25},
+	{206000,	4,	29,	19},
+	{207000,	4,	23,	15},
+	{207500,	4,	40,	26},
+	{208000,	4,	37,	24},
+	{208900,	4,	48,	31},
+	{209000,	4,	48,	31},
+	{209250,	4,	31,	20},
+	{210000,	4,	28,	18},
+	{211000,	4,	25,	16},
+	{212000,	4,	22,	14},
+	{213000,	4,	30,	19},
+	{213750,	4,	38,	24},
+	{214000,	4,	46,	29},
+	{214750,	4,	35,	22},
+	{215000,	4,	43,	27},
+	{216000,	4,	24,	15},
+	{217000,	4,	37,	23},
+	{218000,	4,	42,	26},
+	{218250,	4,	42,	26},
+	{218750,	4,	34,	21},
+	{219000,	4,	47,	29},
+	{220000,	4,	44,	27},
+	{220640,	4,	49,	30},
+	{220750,	4,	36,	22},
+	{221000,	4,	36,	22},
+	{222000,	4,	23,	14},
+	{222525,	4,	28,	17},
+	{222750,	4,	33,	20},
+	{227000,	4,	37,	22},
+	{230250,	4,	29,	17},
+	{233500,	4,	38,	22},
+	{235000,	4,	40,	23},
+	{238000,	4,	30,	17},
+	{241500,	2,	17,	19},
+	{245250,	2,	20,	22},
+	{247750,	2,	22,	24},
+	{253250,	2,	15,	16},
+	{256250,	2,	18,	19},
+	{262500,	2,	31,	32},
+	{267250,	2,	66,	67},
+	{268500,	2,	94,	95},
+	{270000,	2,	14,	14},
+	{272500,	2,	77,	76},
+	{273750,	2,	57,	56},
+	{280750,	2,	24,	23},
+	{281250,	2,	23,	22},
+	{286000,	2,	17,	16},
+	{291750,	2,	26,	24},
+	{296703,	2,	56,	51},
+	{297000,	2,	22,	20},
+	{298000,	2,	21,	19},
+};
+
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	int port = intel_ddi_get_encoder_port(intel_encoder);
+	int pipe = intel_crtc->pipe;
+	int type = intel_encoder->type;
+
+	DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+		      port_name(port), pipe_name(pipe));
+
+	intel_crtc->eld_vld = false;
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		struct intel_digital_port *intel_dig_port =
+			enc_to_dig_port(encoder);
+
+		intel_dp->DP = intel_dig_port->saved_port_bits |
+			       DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+		switch (intel_dp->lane_count) {
+		case 1:
+			intel_dp->DP |= DDI_PORT_WIDTH_X1;
+			break;
+		case 2:
+			intel_dp->DP |= DDI_PORT_WIDTH_X2;
+			break;
+		case 4:
+			intel_dp->DP |= DDI_PORT_WIDTH_X4;
+			break;
+		default:
+			intel_dp->DP |= DDI_PORT_WIDTH_X4;
+			WARN(1, "Unexpected DP lane count %d\n",
+			     intel_dp->lane_count);
+			break;
+		}
+
+		if (intel_dp->has_audio) {
+			DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+					 pipe_name(intel_crtc->pipe));
+
+			/* write eld */
+			DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+			intel_write_eld(encoder, adjusted_mode);
+		}
+
+		intel_dp_init_link_config(intel_dp);
+
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		if (intel_hdmi->has_audio) {
+			/* Proper support for digital audio needs a new logic
+			 * and a new set of registers, so we leave it for future
+			 * patch bombing.
+			 */
+			DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+					 pipe_name(intel_crtc->pipe));
+
+			/* write eld */
+			DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+			intel_write_eld(encoder, adjusted_mode);
+		}
+
+		intel_hdmi->set_infoframes(encoder, adjusted_mode);
+	}
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder, *ret = NULL;
+	int num_encoders = 0;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		ret = intel_encoder;
+		num_encoders++;
+	}
+
+	if (num_encoders != 1)
+		WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+		     intel_crtc->pipe);
+
+	BUG_ON(ret == NULL);
+	return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t val;
+
+	switch (intel_crtc->ddi_pll_sel) {
+	case PORT_CLK_SEL_SPLL:
+		plls->spll_refcount--;
+		if (plls->spll_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling SPLL\n");
+			val = I915_READ(SPLL_CTL);
+			WARN_ON(!(val & SPLL_PLL_ENABLE));
+			I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+			POSTING_READ(SPLL_CTL);
+		}
+		break;
+	case PORT_CLK_SEL_WRPLL1:
+		plls->wrpll1_refcount--;
+		if (plls->wrpll1_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+			val = I915_READ(WRPLL_CTL1);
+			WARN_ON(!(val & WRPLL_PLL_ENABLE));
+			I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+			POSTING_READ(WRPLL_CTL1);
+		}
+		break;
+	case PORT_CLK_SEL_WRPLL2:
+		plls->wrpll2_refcount--;
+		if (plls->wrpll2_refcount == 0) {
+			DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+			val = I915_READ(WRPLL_CTL2);
+			WARN_ON(!(val & WRPLL_PLL_ENABLE));
+			I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+			POSTING_READ(WRPLL_CTL2);
+		}
+		break;
+	}
+
+	WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+	WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+	WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+	intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
+		if (clock <= wrpll_tmds_clock_table[i].clock)
+			break;
+
+	if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
+		i--;
+
+	*p = wrpll_tmds_clock_table[i].p;
+	*n2 = wrpll_tmds_clock_table[i].n2;
+	*r2 = wrpll_tmds_clock_table[i].r2;
+
+	if (wrpll_tmds_clock_table[i].clock != clock)
+		DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+			 wrpll_tmds_clock_table[i].clock, clock);
+
+	DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+		      clock, *p, *n2, *r2);
+}
+
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+	int type = intel_encoder->type;
+	enum pipe pipe = intel_crtc->pipe;
+	uint32_t reg, val;
+
+	/* TODO: reuse PLLs when possible (compare values) */
+
+	intel_ddi_put_crtc_pll(crtc);
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		switch (intel_dp->link_bw) {
+		case DP_LINK_BW_1_62:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+			break;
+		case DP_LINK_BW_2_7:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+			break;
+		case DP_LINK_BW_5_4:
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+			break;
+		default:
+			DRM_ERROR("Link bandwidth %d unsupported\n",
+				  intel_dp->link_bw);
+			return false;
+		}
+
+		/* We don't need to turn any PLL on because we'll use LCPLL. */
+		return true;
+
+	} else if (type == INTEL_OUTPUT_HDMI) {
+		int p, n2, r2;
+
+		if (plls->wrpll1_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+				      pipe_name(pipe));
+			plls->wrpll1_refcount++;
+			reg = WRPLL_CTL1;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+		} else if (plls->wrpll2_refcount == 0) {
+			DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+				      pipe_name(pipe));
+			plls->wrpll2_refcount++;
+			reg = WRPLL_CTL2;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+		} else {
+			DRM_ERROR("No WRPLLs available!\n");
+			return false;
+		}
+
+		WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+		     "WRPLL already enabled\n");
+
+		intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+		val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+		      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+		      WRPLL_DIVIDER_POST(p);
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		if (plls->spll_refcount == 0) {
+			DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+				      pipe_name(pipe));
+			plls->spll_refcount++;
+			reg = SPLL_CTL;
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+		} else {
+			DRM_ERROR("SPLL already in use\n");
+			return false;
+		}
+
+		WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+		     "SPLL already enabled\n");
+
+		val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+	} else {
+		WARN(1, "Invalid DDI encoder type %d\n", type);
+		return false;
+	}
+
+	I915_WRITE(reg, val);
+	udelay(20);
+
+	return true;
+}
+
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+		temp = TRANS_MSA_SYNC_CLK;
+		switch (intel_crtc->config.pipe_bpp) {
+		case 18:
+			temp |= TRANS_MSA_6_BPC;
+			break;
+		case 24:
+			temp |= TRANS_MSA_8_BPC;
+			break;
+		case 30:
+			temp |= TRANS_MSA_10_BPC;
+			break;
+		case 36:
+			temp |= TRANS_MSA_12_BPC;
+			break;
+		default:
+			BUG();
+		}
+		I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+	}
+}
+
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t temp;
+
+	/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+	temp = TRANS_DDI_FUNC_ENABLE;
+	temp |= TRANS_DDI_SELECT_PORT(port);
+
+	switch (intel_crtc->config.pipe_bpp) {
+	case 18:
+		temp |= TRANS_DDI_BPC_6;
+		break;
+	case 24:
+		temp |= TRANS_DDI_BPC_8;
+		break;
+	case 30:
+		temp |= TRANS_DDI_BPC_10;
+		break;
+	case 36:
+		temp |= TRANS_DDI_BPC_12;
+		break;
+	default:
+		BUG();
+	}
+
+	if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+		temp |= TRANS_DDI_PVSYNC;
+	if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+		temp |= TRANS_DDI_PHSYNC;
+
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		switch (pipe) {
+		case PIPE_A:
+			/* Can only use the always-on power well for eDP when
+			 * not using the panel fitter, and when not using motion
+			  * blur mitigation (which we don't support). */
+			if (dev_priv->pch_pf_size)
+				temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+			else
+				temp |= TRANS_DDI_EDP_INPUT_A_ON;
+			break;
+		case PIPE_B:
+			temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+			break;
+		case PIPE_C:
+			temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+		if (intel_hdmi->has_hdmi_sink)
+			temp |= TRANS_DDI_MODE_SELECT_HDMI;
+		else
+			temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+	} else if (type == INTEL_OUTPUT_ANALOG) {
+		temp |= TRANS_DDI_MODE_SELECT_FDI;
+		temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+	} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+		   type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+		switch (intel_dp->lane_count) {
+		case 1:
+			temp |= TRANS_DDI_PORT_WIDTH_X1;
+			break;
+		case 2:
+			temp |= TRANS_DDI_PORT_WIDTH_X2;
+			break;
+		case 4:
+			temp |= TRANS_DDI_PORT_WIDTH_X4;
+			break;
+		default:
+			temp |= TRANS_DDI_PORT_WIDTH_X4;
+			WARN(1, "Unsupported lane count %d\n",
+			     intel_dp->lane_count);
+		}
+
+	} else {
+		WARN(1, "Invalid encoder type %d for pipe %d\n",
+		     intel_encoder->type, pipe);
+	}
+
+	I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+				       enum transcoder cpu_transcoder)
+{
+	uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+	uint32_t val = I915_READ(reg);
+
+	val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+	val |= TRANS_DDI_PORT_NONE;
+	I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+	struct drm_device *dev = intel_connector->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_connector->encoder;
+	int type = intel_connector->base.connector_type;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum pipe pipe = 0;
+	enum transcoder cpu_transcoder;
+	uint32_t tmp;
+
+	if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+		return false;
+
+	if (port == PORT_A)
+		cpu_transcoder = TRANSCODER_EDP;
+	else
+		cpu_transcoder = (enum transcoder) pipe;
+
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+	switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+	case TRANS_DDI_MODE_SELECT_HDMI:
+	case TRANS_DDI_MODE_SELECT_DVI:
+		return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+	case TRANS_DDI_MODE_SELECT_DP_SST:
+		if (type == DRM_MODE_CONNECTOR_eDP)
+			return true;
+	case TRANS_DDI_MODE_SELECT_DP_MST:
+		return (type == DRM_MODE_CONNECTOR_DisplayPort);
+
+	case TRANS_DDI_MODE_SELECT_FDI:
+		return (type == DRM_MODE_CONNECTOR_VGA);
+
+	default:
+		return false;
+	}
+}
+
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+			    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(encoder);
+	u32 tmp;
+	int i;
+
+	tmp = I915_READ(DDI_BUF_CTL(port));
+
+	if (!(tmp & DDI_BUF_CTL_ENABLE))
+		return false;
+
+	if (port == PORT_A) {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+		case TRANS_DDI_EDP_INPUT_A_ON:
+		case TRANS_DDI_EDP_INPUT_A_ONOFF:
+			*pipe = PIPE_A;
+			break;
+		case TRANS_DDI_EDP_INPUT_B_ONOFF:
+			*pipe = PIPE_B;
+			break;
+		case TRANS_DDI_EDP_INPUT_C_ONOFF:
+			*pipe = PIPE_C;
+			break;
+		}
+
+		return true;
+	} else {
+		for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+			tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+			if ((tmp & TRANS_DDI_PORT_MASK)
+			    == TRANS_DDI_SELECT_PORT(port)) {
+				*pipe = i;
+				return true;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
+
+	return false;
+}
+
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+				       enum pipe pipe)
+{
+	uint32_t temp, ret;
+	enum port port = I915_MAX_PORTS;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	int i;
+
+	if (cpu_transcoder == TRANSCODER_EDP) {
+		port = PORT_A;
+	} else {
+		temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+		temp &= TRANS_DDI_PORT_MASK;
+
+		for (i = PORT_B; i <= PORT_E; i++)
+			if (temp == TRANS_DDI_SELECT_PORT(i))
+				port = i;
+	}
+
+	if (port == I915_MAX_PORTS) {
+		WARN(1, "Pipe %c enabled on an unknown port\n",
+		     pipe_name(pipe));
+		ret = PORT_CLK_SEL_NONE;
+	} else {
+		ret = I915_READ(PORT_CLK_SEL(port));
+		DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
+			      "0x%08x\n", pipe_name(pipe), port_name(port),
+			      ret);
+	}
+
+	return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	struct intel_crtc *intel_crtc;
+
+	dev_priv->ddi_plls.spll_refcount = 0;
+	dev_priv->ddi_plls.wrpll1_refcount = 0;
+	dev_priv->ddi_plls.wrpll2_refcount = 0;
+
+	for_each_pipe(pipe) {
+		intel_crtc =
+			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+
+		if (!intel_crtc->active) {
+			intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+			continue;
+		}
+
+		intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+								 pipe);
+
+		switch (intel_crtc->ddi_pll_sel) {
+		case PORT_CLK_SEL_SPLL:
+			dev_priv->ddi_plls.spll_refcount++;
+			break;
+		case PORT_CLK_SEL_WRPLL1:
+			dev_priv->ddi_plls.wrpll1_refcount++;
+			break;
+		case PORT_CLK_SEL_WRPLL2:
+			dev_priv->ddi_plls.wrpll2_refcount++;
+			break;
+		}
+	}
+}
+
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+	if (cpu_transcoder != TRANSCODER_EDP)
+		I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+			   TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		ironlake_edp_panel_vdd_on(intel_dp);
+		ironlake_edp_panel_on(intel_dp);
+		ironlake_edp_panel_vdd_off(intel_dp, true);
+	}
+
+	WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+	I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_complete_link_train(intel_dp);
+		if (port != PORT_A)
+			intel_dp_stop_link_train(intel_dp);
+	}
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t val;
+	bool wait = false;
+
+	val = I915_READ(DDI_BUF_CTL(port));
+	if (val & DDI_BUF_CTL_ENABLE) {
+		val &= ~DDI_BUF_CTL_ENABLE;
+		I915_WRITE(DDI_BUF_CTL(port), val);
+		wait = true;
+	}
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+	val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	if (wait)
+		intel_wait_ddi_buf_idle(dev_priv, port);
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+		ironlake_edp_panel_vdd_on(intel_dp);
+		ironlake_edp_panel_off(intel_dp);
+	}
+
+	I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_ddi_get_encoder_port(intel_encoder);
+	int type = intel_encoder->type;
+	uint32_t tmp;
+
+	if (type == INTEL_OUTPUT_HDMI) {
+		struct intel_digital_port *intel_dig_port =
+			enc_to_dig_port(encoder);
+
+		/* In HDMI/DVI mode, the port width, and swing/emphasis values
+		 * are ignored so nothing special needs to be done besides
+		 * enabling the port.
+		 */
+		I915_WRITE(DDI_BUF_CTL(port),
+			   intel_dig_port->saved_port_bits |
+			   DDI_BUF_CTL_ENABLE);
+	} else if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		if (port == PORT_A)
+			intel_dp_stop_link_train(intel_dp);
+
+		ironlake_edp_backlight_on(intel_dp);
+	}
+
+	if (intel_crtc->eld_vld) {
+		tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+		tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+		I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+	}
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int type = intel_encoder->type;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+	tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
+	I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+
+	if (type == INTEL_OUTPUT_EDP) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		ironlake_edp_backlight_off(intel_dp);
+	}
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+	if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+		return 450;
+	else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+		 LCPLL_CLK_FREQ_450)
+		return 450;
+	else if (IS_ULT(dev_priv->dev))
+		return 338;
+	else
+		return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t val = I915_READ(LCPLL_CTL);
+
+	/* The LCPLL register should be turned on by the BIOS. For now let's
+	 * just check its state and print errors in case something is wrong.
+	 * Don't even try to turn it on.
+	 */
+
+	DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+		      intel_ddi_get_cdclk_freq(dev_priv));
+
+	if (val & LCPLL_CD_SOURCE_FCLK)
+		DRM_ERROR("CDCLK source is not LCPLL\n");
+
+	if (val & LCPLL_PLL_DISABLE)
+		DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	enum port port = intel_dig_port->port;
+	uint32_t val;
+	bool wait = false;
+
+	if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+		val = I915_READ(DDI_BUF_CTL(port));
+		if (val & DDI_BUF_CTL_ENABLE) {
+			val &= ~DDI_BUF_CTL_ENABLE;
+			I915_WRITE(DDI_BUF_CTL(port), val);
+			wait = true;
+		}
+
+		val = I915_READ(DP_TP_CTL(port));
+		val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+		val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+		I915_WRITE(DP_TP_CTL(port), val);
+		POSTING_READ(DP_TP_CTL(port));
+
+		if (wait)
+			intel_wait_ddi_buf_idle(dev_priv, port);
+	}
+
+	val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+	      DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+	if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+		val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+	POSTING_READ(DP_TP_CTL(port));
+
+	intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+	I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+	POSTING_READ(DDI_BUF_CTL(port));
+
+	udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+	uint32_t val;
+
+	intel_ddi_post_disable(intel_encoder);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_RX_ENABLE;
+	I915_WRITE(_FDI_RXA_CTL, val);
+
+	val = I915_READ(_FDI_RXA_MISC);
+	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+	I915_WRITE(_FDI_RXA_MISC, val);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_PCDCLK;
+	I915_WRITE(_FDI_RXA_CTL, val);
+
+	val = I915_READ(_FDI_RXA_CTL);
+	val &= ~FDI_RX_PLL_ENABLE;
+	I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	int type = intel_encoder->type;
+
+	if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+		intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+	/* HDMI has nothing special to destroy, so we can go with this. */
+	intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+				     struct intel_crtc_config *pipe_config)
+{
+	int type = encoder->type;
+
+	WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
+
+	if (type == INTEL_OUTPUT_HDMI)
+		return intel_hdmi_compute_config(encoder, pipe_config);
+	else
+		return intel_dp_compute_config(encoder, pipe_config);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+	.destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+	.mode_set = intel_ddi_mode_set,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *hdmi_connector = NULL;
+	struct intel_connector *dp_connector = NULL;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!dp_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	if (port != PORT_A) {
+		hdmi_connector = kzalloc(sizeof(struct intel_connector),
+					 GFP_KERNEL);
+		if (!hdmi_connector) {
+			kfree(dp_connector);
+			kfree(intel_dig_port);
+			return;
+		}
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+	intel_encoder->compute_config = intel_ddi_compute_config;
+	intel_encoder->enable = intel_enable_ddi;
+	intel_encoder->pre_enable = intel_ddi_pre_enable;
+	intel_encoder->disable = intel_disable_ddi;
+	intel_encoder->post_disable = intel_ddi_post_disable;
+	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+	intel_dig_port->port = port;
+	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+					  (DDI_BUF_PORT_REVERSAL |
+					   DDI_A_4_LANES);
+	if (hdmi_connector)
+		intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+	intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+
+	intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+	intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+	intel_encoder->hot_plug = intel_ddi_hot_plug;
+
+	if (hdmi_connector)
+		intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+	intel_dp_init_connector(intel_dig_port, dp_connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_display.c b/linux-imx/drivers/gpu/drm/i915/intel_display.c
new file mode 100644
index 0000000..8814b0d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_display.c
@@ -0,0 +1,9673 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/dma_remapping.h>
+
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
+static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+
+typedef struct {
+	/* given values */
+	int n;
+	int m1, m2;
+	int p1, p2;
+	/* derived values */
+	int	dot;
+	int	vco;
+	int	m;
+	int	p;
+} intel_clock_t;
+
+typedef struct {
+	int	min, max;
+} intel_range_t;
+
+typedef struct {
+	int	dot_limit;
+	int	p2_slow, p2_fast;
+} intel_p2_t;
+
+#define INTEL_P2_NUM		      2
+typedef struct intel_limit intel_limit_t;
+struct intel_limit {
+	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
+	intel_p2_t	    p2;
+	/**
+	 * find_pll() - Find the best values for the PLL
+	 * @limit: limits for the PLL
+	 * @crtc: current CRTC
+	 * @target: target frequency in kHz
+	 * @refclk: reference clock frequency in kHz
+	 * @match_clock: if provided, @best_clock P divider must
+	 *               match the P divider from @match_clock
+	 *               used for LVDS downclocking
+	 * @best_clock: best PLL values found
+	 *
+	 * Returns true on success, false on failure.
+	 */
+	bool (*find_pll)(const intel_limit_t *limit,
+			 struct drm_crtc *crtc,
+			 int target, int refclk,
+			 intel_clock_t *match_clock,
+			 intel_clock_t *best_clock);
+};
+
+/* FDI */
+#define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
+
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	WARN_ON(!HAS_PCH_SPLIT(dev));
+
+	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock);
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *match_clock,
+			intel_clock_t *best_clock);
+
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+		      int target, int refclk, intel_clock_t *match_clock,
+		      intel_clock_t *best_clock);
+static bool
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+			   int target, int refclk, intel_clock_t *match_clock,
+			   intel_clock_t *best_clock);
+
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *match_clock,
+			intel_clock_t *best_clock);
+
+static inline u32 /* units of 100MHz */
+intel_fdi_link_freq(struct drm_device *dev)
+{
+	if (IS_GEN5(dev)) {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
+	} else
+		return 27;
+}
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 930000, .max = 1400000 },
+	.n = { .min = 3, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 2, .max = 33 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 4, .p2_fast = 2 },
+	.find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 930000, .max = 1400000 },
+	.n = { .min = 3, .max = 16 },
+	.m = { .min = 96, .max = 140 },
+	.m1 = { .min = 18, .max = 26 },
+	.m2 = { .min = 6, .max = 16 },
+	.p = { .min = 4, .max = 128 },
+	.p1 = { .min = 1, .max = 6 },
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 14, .p2_fast = 7 },
+	.find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+	.find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1400000, .max = 2800000 },
+	.n = { .min = 1, .max = 6 },
+	.m = { .min = 70, .max = 120 },
+	.m1 = { .min = 8, .max = 18 },
+	.m2 = { .min = 3, .max = 7 },
+	.p = { .min = 7, .max = 98 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 7 },
+	.find_pll = intel_find_best_PLL,
+};
+
+
+static const intel_limit_t intel_limits_g4x_sdvo = {
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 1, .max = 3},
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 10,
+		.p2_fast = 10
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
+	.dot = { .min = 22000, .max = 400000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 4 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 16, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8},
+	.p2 = { .dot_limit = 165000,
+		.p2_slow = 10, .p2_fast = 5 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+	.dot = { .min = 20000, .max = 115000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 14, .p2_fast = 14
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+	.dot = { .min = 80000, .max = 224000 },
+	.vco = { .min = 1750000, .max = 3500000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 104, .max = 138 },
+	.m1 = { .min = 17, .max = 23 },
+	.m2 = { .min = 5, .max = 11 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 7, .p2_fast = 7
+	},
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+	.dot = { .min = 161670, .max = 227000 },
+	.vco = { .min = 1750000, .max = 3500000},
+	.n = { .min = 1, .max = 2 },
+	.m = { .min = 97, .max = 108 },
+	.m1 = { .min = 0x10, .max = 0x12 },
+	.m2 = { .min = 0x05, .max = 0x06 },
+	.p = { .min = 10, .max = 20 },
+	.p1 = { .min = 1, .max = 2},
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 10, .p2_fast = 10 },
+	.find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_pineview_sdvo = {
+	.dot = { .min = 20000, .max = 400000},
+	.vco = { .min = 1700000, .max = 3500000 },
+	/* Pineview's Ncounter is a ring counter */
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	/* Pineview only has one combined m divider, which we treat as m2. */
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 200000,
+		.p2_slow = 10, .p2_fast = 5 },
+	.find_pll = intel_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_pineview_lvds = {
+	.dot = { .min = 20000, .max = 400000 },
+	.vco = { .min = 1700000, .max = 3500000 },
+	.n = { .min = 3, .max = 6 },
+	.m = { .min = 2, .max = 256 },
+	.m1 = { .min = 0, .max = 0 },
+	.m2 = { .min = 0, .max = 254 },
+	.p = { .min = 7, .max = 112 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 112000,
+		.p2_slow = 14, .p2_fast = 14 },
+	.find_pll = intel_find_best_PLL,
+};
+
+/* Ironlake / Sandybridge
+ *
+ * We calculate clock using (register_value + 2) for N/M1/M2, so here
+ * the range value for them is (actual_value - 2).
+ */
+static const intel_limit_t intel_limits_ironlake_dac = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 5 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 5, .max = 80 },
+	.p1 = { .min = 1, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 10, .p2_fast = 5 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 118 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 127 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 56 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+/* LVDS 100mhz refclk limits. */
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 2 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 28, .max = 112 },
+	.p1 = { .min = 2, .max = 8 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 14, .p2_fast = 14 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000 },
+	.n = { .min = 1, .max = 3 },
+	.m = { .min = 79, .max = 126 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 14, .max = 42 },
+	.p1 = { .min = 2, .max = 6 },
+	.p2 = { .dot_limit = 225000,
+		.p2_slow = 7, .p2_fast = 7 },
+	.find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+	.dot = { .min = 25000, .max = 350000 },
+	.vco = { .min = 1760000, .max = 3510000},
+	.n = { .min = 1, .max = 2 },
+	.m = { .min = 81, .max = 90 },
+	.m1 = { .min = 12, .max = 22 },
+	.m2 = { .min = 5, .max = 9 },
+	.p = { .min = 10, .max = 20 },
+	.p1 = { .min = 1, .max = 2},
+	.p2 = { .dot_limit = 0,
+		.p2_slow = 10, .p2_fast = 10 },
+	.find_pll = intel_find_pll_ironlake_dp,
+};
+
+static const intel_limit_t intel_limits_vlv_dac = {
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 4000000, .max = 6000000 },
+	.n = { .min = 1, .max = 7 },
+	.m = { .min = 22, .max = 450 }, /* guess */
+	.m1 = { .min = 2, .max = 3 },
+	.m2 = { .min = 11, .max = 156 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 2, .max = 3 },
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 2, .p2_fast = 20 },
+	.find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+	.dot = { .min = 20000, .max = 165000 },
+	.vco = { .min = 4000000, .max = 5994000},
+	.n = { .min = 1, .max = 7 },
+	.m = { .min = 60, .max = 300 }, /* guess */
+	.m1 = { .min = 2, .max = 3 },
+	.m2 = { .min = 11, .max = 156 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 2, .max = 3 },
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 2, .p2_fast = 20 },
+	.find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+	.dot = { .min = 25000, .max = 270000 },
+	.vco = { .min = 4000000, .max = 6000000 },
+	.n = { .min = 1, .max = 7 },
+	.m = { .min = 22, .max = 450 },
+	.m1 = { .min = 2, .max = 3 },
+	.m2 = { .min = 11, .max = 156 },
+	.p = { .min = 10, .max = 30 },
+	.p1 = { .min = 2, .max = 3 },
+	.p2 = { .dot_limit = 270000,
+		.p2_slow = 2, .p2_fast = 20 },
+	.find_pll = intel_vlv_find_best_pll,
+};
+
+u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO idle wait timed out\n");
+		return 0;
+	}
+
+	I915_WRITE(DPIO_REG, reg);
+	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
+		   DPIO_BYTE);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO read wait timed out\n");
+		return 0;
+	}
+
+	return I915_READ(DPIO_DATA);
+}
+
+static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
+			     u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+		DRM_ERROR("DPIO idle wait timed out\n");
+		return;
+	}
+
+	I915_WRITE(DPIO_DATA, val);
+	I915_WRITE(DPIO_REG, reg);
+	I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+		   DPIO_BYTE);
+	if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
+		DRM_ERROR("DPIO write wait timed out\n");
+}
+
+static void vlv_init_dpio(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Reset the DPIO config */
+	I915_WRITE(DPIO_CTL, 0);
+	POSTING_READ(DPIO_CTL);
+	I915_WRITE(DPIO_CTL, 1);
+	POSTING_READ(DPIO_CTL);
+}
+
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+						int refclk)
+{
+	struct drm_device *dev = crtc->dev;
+	const intel_limit_t *limit;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		if (intel_is_dual_link_lvds(dev)) {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_dual_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_dual_lvds;
+		} else {
+			if (refclk == 100000)
+				limit = &intel_limits_ironlake_single_lvds_100m;
+			else
+				limit = &intel_limits_ironlake_single_lvds;
+		}
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+		   intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+		limit = &intel_limits_ironlake_display_port;
+	else
+		limit = &intel_limits_ironlake_dac;
+
+	return limit;
+}
+
+static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	const intel_limit_t *limit;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		if (intel_is_dual_link_lvds(dev))
+			limit = &intel_limits_g4x_dual_channel_lvds;
+		else
+			limit = &intel_limits_g4x_single_channel_lvds;
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
+		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+		limit = &intel_limits_g4x_hdmi;
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
+		limit = &intel_limits_g4x_sdvo;
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+		limit = &intel_limits_g4x_display_port;
+	} else /* The option is for other outputs */
+		limit = &intel_limits_i9xx_sdvo;
+
+	return limit;
+}
+
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
+{
+	struct drm_device *dev = crtc->dev;
+	const intel_limit_t *limit;
+
+	if (HAS_PCH_SPLIT(dev))
+		limit = intel_ironlake_limit(crtc, refclk);
+	else if (IS_G4X(dev)) {
+		limit = intel_g4x_limit(crtc);
+	} else if (IS_PINEVIEW(dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_pineview_lvds;
+		else
+			limit = &intel_limits_pineview_sdvo;
+	} else if (IS_VALLEYVIEW(dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+			limit = &intel_limits_vlv_dac;
+		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+			limit = &intel_limits_vlv_hdmi;
+		else
+			limit = &intel_limits_vlv_dp;
+	} else if (!IS_GEN2(dev)) {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_i9xx_lvds;
+		else
+			limit = &intel_limits_i9xx_sdvo;
+	} else {
+		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+			limit = &intel_limits_i8xx_lvds;
+		else
+			limit = &intel_limits_i8xx_dvo;
+	}
+	return limit;
+}
+
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
+{
+	clock->m = clock->m2 + 2;
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = refclk * clock->m / clock->n;
+	clock->dot = clock->vco / clock->p;
+}
+
+static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+{
+	if (IS_PINEVIEW(dev)) {
+		pineview_clock(refclk, clock);
+		return;
+	}
+	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+	clock->p = clock->p1 * clock->p2;
+	clock->vco = refclk * clock->m / (clock->n + 2);
+	clock->dot = clock->vco / clock->p;
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->type == type)
+			return true;
+
+	return false;
+}
+
+#define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
+/**
+ * Returns whether the given set of divisors are valid for a given refclk with
+ * the given connectors.
+ */
+
+static bool intel_PLL_is_valid(struct drm_device *dev,
+			       const intel_limit_t *limit,
+			       const intel_clock_t *clock)
+{
+	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
+		INTELPllInvalid("p1 out of range\n");
+	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
+		INTELPllInvalid("p out of range\n");
+	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
+		INTELPllInvalid("m2 out of range\n");
+	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+		INTELPllInvalid("m1 out of range\n");
+	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
+		INTELPllInvalid("m1 <= m2\n");
+	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
+		INTELPllInvalid("m out of range\n");
+	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+		INTELPllInvalid("n out of range\n");
+	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+		INTELPllInvalid("vco out of range\n");
+	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
+	 * connector, etc., rather than just a single range.
+	 */
+	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+		INTELPllInvalid("dot out of range\n");
+
+	return true;
+}
+
+static bool
+intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+		    int target, int refclk, intel_clock_t *match_clock,
+		    intel_clock_t *best_clock)
+
+{
+	struct drm_device *dev = crtc->dev;
+	intel_clock_t clock;
+	int err = target;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		/*
+		 * For LVDS just rely on its current settings for dual-channel.
+		 * We haven't figured out how to reliably set up different
+		 * single/dual channel state, if we even can.
+		 */
+		if (intel_is_dual_link_lvds(dev))
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+
+	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+	     clock.m1++) {
+		for (clock.m2 = limit->m2.min;
+		     clock.m2 <= limit->m2.max; clock.m2++) {
+			/* m1 is always 0 in Pineview */
+			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+				break;
+			for (clock.n = limit->n.min;
+			     clock.n <= limit->n.max; clock.n++) {
+				for (clock.p1 = limit->p1.min;
+					clock.p1 <= limit->p1.max; clock.p1++) {
+					int this_err;
+
+					intel_clock(dev, refclk, &clock);
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err) {
+						*best_clock = clock;
+						err = this_err;
+					}
+				}
+			}
+		}
+	}
+
+	return (err != target);
+}
+
+static bool
+intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *match_clock,
+			intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	intel_clock_t clock;
+	int max_n;
+	bool found;
+	/* approximately equals target * 0.00585 */
+	int err_most = (target >> 8) + (target >> 9);
+	found = false;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		int lvds_reg;
+
+		if (HAS_PCH_SPLIT(dev))
+			lvds_reg = PCH_LVDS;
+		else
+			lvds_reg = LVDS;
+		if (intel_is_dual_link_lvds(dev))
+			clock.p2 = limit->p2.p2_fast;
+		else
+			clock.p2 = limit->p2.p2_slow;
+	} else {
+		if (target < limit->p2.dot_limit)
+			clock.p2 = limit->p2.p2_slow;
+		else
+			clock.p2 = limit->p2.p2_fast;
+	}
+
+	memset(best_clock, 0, sizeof(*best_clock));
+	max_n = limit->n.max;
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+		/* based on hardware requirement, prefere larger m1,m2 */
+		for (clock.m1 = limit->m1.max;
+		     clock.m1 >= limit->m1.min; clock.m1--) {
+			for (clock.m2 = limit->m2.max;
+			     clock.m2 >= limit->m2.min; clock.m2--) {
+				for (clock.p1 = limit->p1.max;
+				     clock.p1 >= limit->p1.min; clock.p1--) {
+					int this_err;
+
+					intel_clock(dev, refclk, &clock);
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
+						continue;
+					if (match_clock &&
+					    clock.p != match_clock->p)
+						continue;
+
+					this_err = abs(clock.dot - target);
+					if (this_err < err_most) {
+						*best_clock = clock;
+						err_most = this_err;
+						max_n = clock.n;
+						found = true;
+					}
+				}
+			}
+		}
+	}
+	return found;
+}
+
+static bool
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+			   int target, int refclk, intel_clock_t *match_clock,
+			   intel_clock_t *best_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	intel_clock_t clock;
+
+	if (target < 200000) {
+		clock.n = 1;
+		clock.p1 = 2;
+		clock.p2 = 10;
+		clock.m1 = 12;
+		clock.m2 = 9;
+	} else {
+		clock.n = 2;
+		clock.p1 = 1;
+		clock.p2 = 10;
+		clock.m1 = 14;
+		clock.m2 = 8;
+	}
+	intel_clock(dev, refclk, &clock);
+	memcpy(best_clock, &clock, sizeof(intel_clock_t));
+	return true;
+}
+
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+		      int target, int refclk, intel_clock_t *match_clock,
+		      intel_clock_t *best_clock)
+{
+	intel_clock_t clock;
+	if (target < 200000) {
+		clock.p1 = 2;
+		clock.p2 = 10;
+		clock.n = 2;
+		clock.m1 = 23;
+		clock.m2 = 8;
+	} else {
+		clock.p1 = 1;
+		clock.p2 = 10;
+		clock.n = 1;
+		clock.m1 = 14;
+		clock.m2 = 2;
+	}
+	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
+	clock.p = (clock.p1 * clock.p2);
+	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
+	clock.vco = 0;
+	memcpy(best_clock, &clock, sizeof(intel_clock_t));
+	return true;
+}
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+			int target, int refclk, intel_clock_t *match_clock,
+			intel_clock_t *best_clock)
+{
+	u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+	u32 m, n, fastclk;
+	u32 updrate, minupdate, fracbits, p;
+	unsigned long bestppm, ppm, absppm;
+	int dotclk, flag;
+
+	flag = 0;
+	dotclk = target * 1000;
+	bestppm = 1000000;
+	ppm = absppm = 0;
+	fastclk = dotclk / (2*100);
+	updrate = 0;
+	minupdate = 19200;
+	fracbits = 1;
+	n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+	bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+	/* based on hardware requirement, prefer smaller n to precision */
+	for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+		updrate = refclk / n;
+		for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+			for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+				if (p2 > 10)
+					p2 = p2 - 1;
+				p = p1 * p2;
+				/* based on hardware requirement, prefer bigger m1,m2 values */
+				for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+					m2 = (((2*(fastclk * p * n / m1 )) +
+					       refclk) / (2*refclk));
+					m = m1 * m2;
+					vco = updrate * m;
+					if (vco >= limit->vco.min && vco < limit->vco.max) {
+						ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+						absppm = (ppm > 0) ? ppm : (-ppm);
+						if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+							bestppm = 0;
+							flag = 1;
+						}
+						if (absppm < bestppm - 10) {
+							bestppm = absppm;
+							flag = 1;
+						}
+						if (flag) {
+							bestn = n;
+							bestm1 = m1;
+							bestm2 = m2;
+							bestp1 = p1;
+							bestp2 = p2;
+							flag = 0;
+						}
+					}
+				}
+			}
+		}
+	}
+	best_clock->n = bestn;
+	best_clock->m1 = bestm1;
+	best_clock->m2 = bestm2;
+	best_clock->p1 = bestp1;
+	best_clock->p2 = bestp2;
+
+	return true;
+}
+
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+					     enum pipe pipe)
+{
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	return intel_crtc->config.cpu_transcoder;
+}
+
+static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 frame, frame_reg = PIPEFRAME(pipe);
+
+	frame = I915_READ(frame_reg);
+
+	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
+		DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/**
+ * intel_wait_for_vblank - wait for vblank on a given pipe
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * Wait for vblank to occur on a given pipe.  Needed for various bits of
+ * mode setting code.
+ */
+void intel_wait_for_vblank(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipestat_reg = PIPESTAT(pipe);
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		ironlake_wait_for_vblank(dev, pipe);
+		return;
+	}
+
+	/* Clear existing vblank status. Note this will clear any other
+	 * sticky status fields as well.
+	 *
+	 * This races with i915_driver_irq_handler() with the result
+	 * that either function could miss a vblank event.  Here it is not
+	 * fatal, as we will either wait upon the next vblank interrupt or
+	 * timeout.  Generally speaking intel_wait_for_vblank() is only
+	 * called during modeset at which time the GPU should be idle and
+	 * should *not* be performing page flips and thus not waiting on
+	 * vblanks...
+	 * Currently, the result of us stealing a vblank from the irq
+	 * handler is that a single frame will be skipped during swapbuffers.
+	 */
+	I915_WRITE(pipestat_reg,
+		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
+	/* Wait for vblank interrupt bit to set */
+	if (wait_for(I915_READ(pipestat_reg) &
+		     PIPE_VBLANK_INTERRUPT_STATUS,
+		     50))
+		DRM_DEBUG_KMS("vblank wait timed out\n");
+}
+
+/*
+ * intel_wait_for_pipe_off - wait for pipe to turn off
+ * @dev: drm device
+ * @pipe: pipe to wait for
+ *
+ * After disabling a pipe, we can't wait for vblank in the usual way,
+ * spinning on the vblank interrupt status bit, since we won't actually
+ * see an interrupt when the pipe is disabled.
+ *
+ * On Gen4 and above:
+ *   wait for the pipe register state bit to turn off
+ *
+ * Otherwise:
+ *   wait for the display line value to settle (it usually
+ *   ends up stopping at the start of the next frame).
+ *
+ */
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		int reg = PIPECONF(cpu_transcoder);
+
+		/* Wait for the Pipe State to go off */
+		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
+			     100))
+			WARN(1, "pipe_off wait timed out\n");
+	} else {
+		u32 last_line, line_mask;
+		int reg = PIPEDSL(pipe);
+		unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+		if (IS_GEN2(dev))
+			line_mask = DSL_LINEMASK_GEN2;
+		else
+			line_mask = DSL_LINEMASK_GEN3;
+
+		/* Wait for the display line to settle */
+		do {
+			last_line = I915_READ(reg) & line_mask;
+			mdelay(5);
+		} while (((I915_READ(reg) & line_mask) != last_line) &&
+			 time_after(timeout, jiffies));
+		if (time_after(jiffies, timeout))
+			WARN(1, "pipe_off wait timed out\n");
+	}
+}
+
+/*
+ * ibx_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Returns true if @port is connected, false otherwise.
+ */
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+				struct intel_digital_port *port)
+{
+	u32 bit;
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		switch(port->port) {
+		case PORT_B:
+			bit = SDE_PORTB_HOTPLUG;
+			break;
+		case PORT_C:
+			bit = SDE_PORTC_HOTPLUG;
+			break;
+		case PORT_D:
+			bit = SDE_PORTD_HOTPLUG;
+			break;
+		default:
+			return true;
+		}
+	} else {
+		switch(port->port) {
+		case PORT_B:
+			bit = SDE_PORTB_HOTPLUG_CPT;
+			break;
+		case PORT_C:
+			bit = SDE_PORTC_HOTPLUG_CPT;
+			break;
+		case PORT_D:
+			bit = SDE_PORTD_HOTPLUG_CPT;
+			break;
+		default:
+			return true;
+		}
+	}
+
+	return I915_READ(SDEISR) & bit;
+}
+
+static const char *state_string(bool enabled)
+{
+	return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+		       enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	WARN(cur_state != state,
+	     "PLL state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+			   struct intel_pch_pll *pll,
+			   struct intel_crtc *crtc,
+			   bool state)
+{
+	u32 val;
+	bool cur_state;
+
+	if (HAS_PCH_LPT(dev_priv->dev)) {
+		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
+		return;
+	}
+
+	if (WARN (!pll,
+		  "asserting PCH PLL %s with no PLL\n", state_string(state)))
+		return;
+
+	val = I915_READ(pll->pll_reg);
+	cur_state = !!(val & DPLL_VCO_ENABLE);
+	WARN(cur_state != state,
+	     "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+	     pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+	/* Make sure the selected PLL is correctly attached to the transcoder */
+	if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
+		u32 pch_dpll;
+
+		pch_dpll = I915_READ(PCH_DPLL_SEL);
+		cur_state = pll->pll_reg == _PCH_DPLL_B;
+		if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+			  "PLL[%d] not attached to this transcoder %d: %08x\n",
+			  cur_state, crtc->pipe, pch_dpll)) {
+			cur_state = !!(val >> (4*crtc->pipe + 3));
+			WARN(cur_state != state,
+			     "PLL[%d] not %s on this transcoder %d: %08x\n",
+			     pll->pll_reg == _PCH_DPLL_B,
+			     state_string(state),
+			     crtc->pipe,
+			     val);
+		}
+	}
+}
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	if (HAS_DDI(dev_priv->dev)) {
+		/* DDI does not have a specific FDI_TX register */
+		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+		val = I915_READ(reg);
+		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
+	} else {
+		reg = FDI_TX_CTL(pipe);
+		val = I915_READ(reg);
+		cur_state = !!(val & FDI_TX_ENABLE);
+	}
+	WARN(cur_state != state,
+	     "FDI TX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+			  enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = FDI_RX_CTL(pipe);
+	val = I915_READ(reg);
+	cur_state = !!(val & FDI_RX_ENABLE);
+	WARN(cur_state != state,
+	     "FDI RX state assertion failure (expected %s, current %s)\n",
+	     state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* ILK FDI PLL is always enabled */
+	if (dev_priv->info->gen == 5)
+		return;
+
+	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
+	if (HAS_DDI(dev_priv->dev))
+		return;
+
+	reg = FDI_TX_CTL(pipe);
+	val = I915_READ(reg);
+	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	reg = FDI_RX_CTL(pipe);
+	val = I915_READ(reg);
+	WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+				  enum pipe pipe)
+{
+	int pp_reg, lvds_reg;
+	u32 val;
+	enum pipe panel_pipe = PIPE_A;
+	bool locked = true;
+
+	if (HAS_PCH_SPLIT(dev_priv->dev)) {
+		pp_reg = PCH_PP_CONTROL;
+		lvds_reg = PCH_LVDS;
+	} else {
+		pp_reg = PP_CONTROL;
+		lvds_reg = LVDS;
+	}
+
+	val = I915_READ(pp_reg);
+	if (!(val & PANEL_POWER_ON) ||
+	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+		locked = false;
+
+	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+		panel_pipe = PIPE_B;
+
+	WARN(panel_pipe == pipe && locked,
+	     "panel assertion failure, pipe %c regs locked\n",
+	     pipe_name(pipe));
+}
+
+void assert_pipe(struct drm_i915_private *dev_priv,
+		 enum pipe pipe, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+
+	/* if we need the pipe A quirk it must be always on */
+	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+		state = true;
+
+	if (!intel_using_power_well(dev_priv->dev) &&
+	    cpu_transcoder != TRANSCODER_EDP) {
+		cur_state = false;
+	} else {
+		reg = PIPECONF(cpu_transcoder);
+		val = I915_READ(reg);
+		cur_state = !!(val & PIPECONF_ENABLE);
+	}
+
+	WARN(cur_state != state,
+	     "pipe %c assertion failure (expected %s, current %s)\n",
+	     pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+
+static void assert_plane(struct drm_i915_private *dev_priv,
+			 enum plane plane, bool state)
+{
+	int reg;
+	u32 val;
+	bool cur_state;
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+	WARN(cur_state != state,
+	     "plane %c assertion failure (expected %s, current %s)\n",
+	     plane_name(plane), state_string(state), state_string(cur_state));
+}
+
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe)
+{
+	int reg, i;
+	u32 val;
+	int cur_pipe;
+
+	/* Planes are fixed to pipes on ILK+ */
+	if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) {
+		reg = DSPCNTR(pipe);
+		val = I915_READ(reg);
+		WARN((val & DISPLAY_PLANE_ENABLE),
+		     "plane %c assertion failure, should be disabled but not\n",
+		     plane_name(pipe));
+		return;
+	}
+
+	/* Need to check both planes against the pipe */
+	for (i = 0; i < 2; i++) {
+		reg = DSPCNTR(i);
+		val = I915_READ(reg);
+		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+			DISPPLANE_SEL_PIPE_SHIFT;
+		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
+		     plane_name(i), pipe_name(pipe));
+	}
+}
+
+static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
+				    enum pipe pipe)
+{
+	int reg, i;
+	u32 val;
+
+	if (!IS_VALLEYVIEW(dev_priv->dev))
+		return;
+
+	/* Need to check both planes against the pipe */
+	for (i = 0; i < dev_priv->num_plane; i++) {
+		reg = SPCNTR(pipe, i);
+		val = I915_READ(reg);
+		WARN((val & SP_ENABLE),
+		     "sprite %d assertion failure, should be off on pipe %c but is still active\n",
+		     pipe * 2 + i, pipe_name(pipe));
+	}
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+	bool enabled;
+
+	if (HAS_PCH_LPT(dev_priv->dev)) {
+		DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
+		return;
+	}
+
+	val = I915_READ(PCH_DREF_CONTROL);
+	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+			    DREF_SUPERSPREAD_SOURCE_MASK));
+	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+				       enum pipe pipe)
+{
+	int reg;
+	u32 val;
+	bool enabled;
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	enabled = !!(val & TRANS_ENABLE);
+	WARN(enabled,
+	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
+	     pipe_name(pipe));
+}
+
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+			    enum pipe pipe, u32 port_sel, u32 val)
+{
+	if ((val & DP_PORT_EN) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+			return false;
+	} else {
+		if ((val & DP_PIPE_MASK) != (pipe << 30))
+			return false;
+	}
+	return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & SDVO_ENABLE) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
+			return false;
+	} else {
+		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
+			return false;
+	}
+	return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & LVDS_PORT_EN) == 0)
+		return false;
+
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+			return false;
+	} else {
+		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+			return false;
+	}
+	return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+			      enum pipe pipe, u32 val)
+{
+	if ((val & ADPA_DAC_ENABLE) == 0)
+		return false;
+	if (HAS_PCH_CPT(dev_priv->dev)) {
+		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+			return false;
+	} else {
+		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+			return false;
+	}
+	return true;
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+				   enum pipe pipe, int reg, u32 port_sel)
+{
+	u32 val = I915_READ(reg);
+	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
+	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+
+	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+	     && (val & DP_PIPEB_SELECT),
+	     "IBX PCH dp port still using transcoder B\n");
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+				     enum pipe pipe, int reg)
+{
+	u32 val = I915_READ(reg);
+	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
+	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+	     reg, pipe_name(pipe));
+
+	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+	     && (val & SDVO_PIPE_B_SELECT),
+	     "IBX PCH hdmi port still using transcoder B\n");
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+				      enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+
+	reg = PCH_ADPA;
+	val = I915_READ(reg);
+	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+	     "PCH VGA enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	reg = PCH_LVDS;
+	val = I915_READ(reg);
+	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
+	     pipe_name(pipe));
+
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
+	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note!  This is for pre-ILK only.
+ *
+ * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* No really, not for ILK+ */
+	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+
+	/* PLL is protected by panel, make sure we can write it */
+	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+		assert_panel_unlocked(dev_priv, pipe);
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	val |= DPLL_VCO_ENABLE;
+
+	/* We do this three times for luck */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note!  This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* Don't disable pipe A or pipe A PLLs if needed */
+	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+		return;
+
+	/* Make sure the pipe isn't still relying on us */
+	assert_pipe_disabled(dev_priv, pipe);
+
+	reg = DPLL(pipe);
+	val = I915_READ(reg);
+	val &= ~DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+/* SBI access */
+static void
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+		enum intel_sbi_destination destination)
+{
+	u32 tmp;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+	I915_WRITE(SBI_DATA, value);
+
+	if (destination == SBI_ICLK)
+		tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+	else
+		tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+	I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+		return;
+	}
+}
+
+static u32
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+	       enum intel_sbi_destination destination)
+{
+	u32 value = 0;
+	WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to become ready\n");
+		return 0;
+	}
+
+	I915_WRITE(SBI_ADDR, (reg << 16));
+
+	if (destination == SBI_ICLK)
+		value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+	else
+		value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+	I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
+
+	if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
+				100)) {
+		DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+		return 0;
+	}
+
+	return I915_READ(SBI_DATA);
+}
+
+/**
+ * ironlake_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll;
+	int reg;
+	u32 val;
+
+	/* PCH PLLs only available on ILK, SNB and IVB */
+	BUG_ON(dev_priv->info->gen < 5);
+	pll = intel_crtc->pch_pll;
+	if (pll == NULL)
+		return;
+
+	if (WARN_ON(pll->refcount == 0))
+		return;
+
+	DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
+		      pll->pll_reg, pll->active, pll->on,
+		      intel_crtc->base.base.id);
+
+	/* PCH refclock must be enabled first */
+	assert_pch_refclk_enabled(dev_priv);
+
+	if (pll->active++ && pll->on) {
+		assert_pch_pll_enabled(dev_priv, pll, NULL);
+		return;
+	}
+
+	DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
+
+	reg = pll->pll_reg;
+	val = I915_READ(reg);
+	val |= DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(200);
+
+	pll->on = true;
+}
+
+static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll = intel_crtc->pch_pll;
+	int reg;
+	u32 val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+	if (pll == NULL)
+	       return;
+
+	if (WARN_ON(pll->refcount == 0))
+		return;
+
+	DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
+		      pll->pll_reg, pll->active, pll->on,
+		      intel_crtc->base.base.id);
+
+	if (WARN_ON(pll->active == 0)) {
+		assert_pch_pll_disabled(dev_priv, pll, NULL);
+		return;
+	}
+
+	if (--pll->active) {
+		assert_pch_pll_enabled(dev_priv, pll, NULL);
+		return;
+	}
+
+	DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
+
+	/* Make sure transcoder isn't still depending on us */
+	assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
+
+	reg = pll->pll_reg;
+	val = I915_READ(reg);
+	val &= ~DPLL_VCO_ENABLE;
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+	udelay(200);
+
+	pll->on = false;
+}
+
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+					   enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+	uint32_t reg, val, pipeconf_val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* Make sure PCH DPLL is enabled */
+	assert_pch_pll_enabled(dev_priv,
+			       to_intel_crtc(crtc)->pch_pll,
+			       to_intel_crtc(crtc));
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, pipe);
+	assert_fdi_rx_enabled(dev_priv, pipe);
+
+	if (HAS_PCH_CPT(dev)) {
+		/* Workaround: Set the timing override bit before enabling the
+		 * pch transcoder. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
+	}
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	pipeconf_val = I915_READ(PIPECONF(pipe));
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		/*
+		 * make the BPC in transcoder be consistent with
+		 * that in pipeconf reg.
+		 */
+		val &= ~PIPECONF_BPC_MASK;
+		val |= pipeconf_val & PIPECONF_BPC_MASK;
+	}
+
+	val &= ~TRANS_INTERLACE_MASK;
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+		if (HAS_PCH_IBX(dev_priv->dev) &&
+		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+			val |= TRANS_LEGACY_INTERLACED_ILK;
+		else
+			val |= TRANS_INTERLACED;
+	else
+		val |= TRANS_PROGRESSIVE;
+
+	I915_WRITE(reg, val | TRANS_ENABLE);
+	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+				      enum transcoder cpu_transcoder)
+{
+	u32 val, pipeconf_val;
+
+	/* PCH only available on ILK+ */
+	BUG_ON(dev_priv->info->gen < 5);
+
+	/* FDI must be feeding us bits for PCH ports */
+	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
+	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+	/* Workaround: set timing override bit. */
+	val = I915_READ(_TRANSA_CHICKEN2);
+	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(_TRANSA_CHICKEN2, val);
+
+	val = TRANS_ENABLE;
+	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+	    PIPECONF_INTERLACED_ILK)
+		val |= TRANS_INTERLACED;
+	else
+		val |= TRANS_PROGRESSIVE;
+
+	I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+	if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+		DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+					    enum pipe pipe)
+{
+	struct drm_device *dev = dev_priv->dev;
+	uint32_t reg, val;
+
+	/* FDI relies on the transcoder */
+	assert_fdi_tx_disabled(dev_priv, pipe);
+	assert_fdi_rx_disabled(dev_priv, pipe);
+
+	/* Ports must be off as well */
+	assert_pch_ports_disabled(dev_priv, pipe);
+
+	reg = TRANSCONF(pipe);
+	val = I915_READ(reg);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(reg, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+	if (!HAS_PCH_IBX(dev)) {
+		/* Workaround: Clear the timing override chicken bit again. */
+		reg = TRANS_CHICKEN2(pipe);
+		val = I915_READ(reg);
+		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+		I915_WRITE(reg, val);
+	}
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	val = I915_READ(_TRANSACONF);
+	val &= ~TRANS_ENABLE;
+	I915_WRITE(_TRANSACONF, val);
+	/* wait for PCH transcoder off, transcoder state */
+	if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+		DRM_ERROR("Failed to disable PCH transcoder\n");
+
+	/* Workaround: clear timing override bit. */
+	val = I915_READ(_TRANSA_CHICKEN2);
+	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+	I915_WRITE(_TRANSA_CHICKEN2, val);
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+			      bool pch_port)
+{
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	enum pipe pch_transcoder;
+	int reg;
+	u32 val;
+
+	if (HAS_PCH_LPT(dev_priv->dev))
+		pch_transcoder = TRANSCODER_A;
+	else
+		pch_transcoder = pipe;
+
+	/*
+	 * A pipe without a PLL won't actually be able to drive bits from
+	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
+	 * need the check.
+	 */
+	if (!HAS_PCH_SPLIT(dev_priv->dev))
+		assert_pll_enabled(dev_priv, pipe);
+	else {
+		if (pch_port) {
+			/* if driving the PCH, we need FDI enabled */
+			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+			assert_fdi_tx_pll_enabled(dev_priv,
+						  (enum pipe) cpu_transcoder);
+		}
+		/* FIXME: assert CPU port conditions for SNB+ */
+	}
+
+	reg = PIPECONF(cpu_transcoder);
+	val = I915_READ(reg);
+	if (val & PIPECONF_ENABLE)
+		return;
+
+	I915_WRITE(reg, val | PIPECONF_ENABLE);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+			       enum pipe pipe)
+{
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	int reg;
+	u32 val;
+
+	/*
+	 * Make sure planes won't keep trying to pump pixels to us,
+	 * or we might hang the display.
+	 */
+	assert_planes_disabled(dev_priv, pipe);
+	assert_sprites_disabled(dev_priv, pipe);
+
+	/* Don't disable pipe A or pipe A PLLs if needed */
+	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+		return;
+
+	reg = PIPECONF(cpu_transcoder);
+	val = I915_READ(reg);
+	if ((val & PIPECONF_ENABLE) == 0)
+		return;
+
+	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+	intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch.  The display address reg provides this.
+ */
+void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+				      enum plane plane)
+{
+	if (dev_priv->info->gen >= 4)
+		I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+	else
+		I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+			       enum plane plane, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	/* If the pipe isn't enabled, we can't pump pixels and may hang */
+	assert_pipe_enabled(dev_priv, pipe);
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	if (val & DISPLAY_PLANE_ENABLE)
+		return;
+
+	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+	intel_flush_display_plane(dev_priv, plane);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+				enum plane plane, enum pipe pipe)
+{
+	int reg;
+	u32 val;
+
+	reg = DSPCNTR(plane);
+	val = I915_READ(reg);
+	if ((val & DISPLAY_PLANE_ENABLE) == 0)
+		return;
+
+	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+	intel_flush_display_plane(dev_priv, plane);
+	intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static bool need_vtd_wa(struct drm_device *dev)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
+		return true;
+#endif
+	return false;
+}
+
+int
+intel_pin_and_fence_fb_obj(struct drm_device *dev,
+			   struct drm_i915_gem_object *obj,
+			   struct intel_ring_buffer *pipelined)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 alignment;
+	int ret;
+
+	switch (obj->tiling_mode) {
+	case I915_TILING_NONE:
+		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+			alignment = 128 * 1024;
+		else if (INTEL_INFO(dev)->gen >= 4)
+			alignment = 4 * 1024;
+		else
+			alignment = 64 * 1024;
+		break;
+	case I915_TILING_X:
+		/* pin() will align the object as required by fence */
+		alignment = 0;
+		break;
+	case I915_TILING_Y:
+		/* Despite that we check this in framebuffer_init userspace can
+		 * screw us over and change the tiling after the fact. Only
+		 * pinned buffers can't change their tiling. */
+		DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+		return -EINVAL;
+	default:
+		BUG();
+	}
+
+	/* Note that the w/a also requires 64 PTE of padding following the
+	 * bo. We currently fill all unused PTE with the shadow page and so
+	 * we should always have valid PTE following the scanout preventing
+	 * the VT-d warning.
+	 */
+	if (need_vtd_wa(dev) && alignment < 256 * 1024)
+		alignment = 256 * 1024;
+
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
+	if (ret)
+		goto err_interruptible;
+
+	/* Install a fence for tiled scan-out. Pre-i965 always needs a
+	 * fence, whereas 965+ only requires a fence if using
+	 * framebuffer compression.  For simplicity, we always install
+	 * a fence as the cost is not that onerous.
+	 */
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto err_unpin;
+
+	i915_gem_object_pin_fence(obj);
+
+	dev_priv->mm.interruptible = true;
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_interruptible:
+	dev_priv->mm.interruptible = true;
+	return ret;
+}
+
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin_fence(obj);
+	i915_gem_object_unpin(obj);
+}
+
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+					     unsigned int tiling_mode,
+					     unsigned int cpp,
+					     unsigned int pitch)
+{
+	if (tiling_mode != I915_TILING_NONE) {
+		unsigned int tile_rows, tiles;
+
+		tile_rows = *y / 8;
+		*y %= 8;
+
+		tiles = *x / (512/cpp);
+		*x %= 512/cpp;
+
+		return tile_rows * pitch * 8 + tiles * 4096;
+	} else {
+		unsigned int offset;
+
+		offset = *y * pitch + *x * cpp;
+		*y = 0;
+		*x = (offset & 4095) / cpp;
+		return offset & -4096;
+	}
+}
+
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			     int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_framebuffer *intel_fb;
+	struct drm_i915_gem_object *obj;
+	int plane = intel_crtc->plane;
+	unsigned long linear_offset;
+	u32 dspcntr;
+	u32 reg;
+
+	switch (plane) {
+	case 0:
+	case 1:
+		break;
+	default:
+		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+		return -EINVAL;
+	}
+
+	intel_fb = to_intel_framebuffer(fb);
+	obj = intel_fb->obj;
+
+	reg = DSPCNTR(plane);
+	dspcntr = I915_READ(reg);
+	/* Mask out pixel format bits in case we change it */
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		dspcntr |= DISPPLANE_BGRX555;
+		break;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
+		break;
+	default:
+		BUG();
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (obj->tiling_mode != I915_TILING_NONE)
+			dspcntr |= DISPPLANE_TILED;
+		else
+			dspcntr &= ~DISPPLANE_TILED;
+	}
+
+	I915_WRITE(reg, dspcntr);
+
+	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		intel_crtc->dspaddr_offset =
+			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+						       fb->bits_per_pixel / 8,
+						       fb->pitches[0]);
+		linear_offset -= intel_crtc->dspaddr_offset;
+	} else {
+		intel_crtc->dspaddr_offset = linear_offset;
+	}
+
+	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+	if (INTEL_INFO(dev)->gen >= 4) {
+		I915_MODIFY_DISPBASE(DSPSURF(plane),
+				     obj->gtt_offset + intel_crtc->dspaddr_offset);
+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE(DSPLINOFF(plane), linear_offset);
+	} else
+		I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+	POSTING_READ(reg);
+
+	return 0;
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_framebuffer *intel_fb;
+	struct drm_i915_gem_object *obj;
+	int plane = intel_crtc->plane;
+	unsigned long linear_offset;
+	u32 dspcntr;
+	u32 reg;
+
+	switch (plane) {
+	case 0:
+	case 1:
+	case 2:
+		break;
+	default:
+		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+		return -EINVAL;
+	}
+
+	intel_fb = to_intel_framebuffer(fb);
+	obj = intel_fb->obj;
+
+	reg = DSPCNTR(plane);
+	dspcntr = I915_READ(reg);
+	/* Mask out pixel format bits in case we change it */
+	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		dspcntr |= DISPPLANE_8BPP;
+		break;
+	case DRM_FORMAT_RGB565:
+		dspcntr |= DISPPLANE_BGRX565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		dspcntr |= DISPPLANE_BGRX888;
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		dspcntr |= DISPPLANE_RGBX888;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+		dspcntr |= DISPPLANE_BGRX101010;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		dspcntr |= DISPPLANE_RGBX101010;
+		break;
+	default:
+		BUG();
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dspcntr |= DISPPLANE_TILED;
+	else
+		dspcntr &= ~DISPPLANE_TILED;
+
+	/* must disable */
+	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+	I915_WRITE(reg, dspcntr);
+
+	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+	intel_crtc->dspaddr_offset =
+		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+					       fb->bits_per_pixel / 8,
+					       fb->pitches[0]);
+	linear_offset -= intel_crtc->dspaddr_offset;
+
+	DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+		      obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+	I915_MODIFY_DISPBASE(DSPSURF(plane),
+			     obj->gtt_offset + intel_crtc->dspaddr_offset);
+	if (IS_HASWELL(dev)) {
+		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+	} else {
+		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+		I915_WRITE(DSPLINOFF(plane), linear_offset);
+	}
+	POSTING_READ(reg);
+
+	return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   int x, int y, enum mode_set_atomic state)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.disable_fbc)
+		dev_priv->display.disable_fbc(dev);
+	intel_increase_pllclock(crtc);
+
+	return dev_priv->display.update_plane(crtc, fb, x, y);
+}
+
+void intel_display_handle_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+
+	/*
+	 * Flips in the rings have been nuked by the reset,
+	 * so complete all pending flips so that user space
+	 * will get its events and not get stuck.
+	 *
+	 * Also update the base address of all primary
+	 * planes to the the last fb to make sure we're
+	 * showing the correct fb after a reset.
+	 *
+	 * Need to make two loops over the crtcs so that we
+	 * don't try to grab a crtc mutex before the
+	 * pending_flip_queue really got woken up.
+	 */
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+		enum plane plane = intel_crtc->plane;
+
+		intel_prepare_page_flip(dev, plane);
+		intel_finish_page_flip_plane(dev, plane);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+		mutex_lock(&crtc->mutex);
+		if (intel_crtc->active)
+			dev_priv->display.update_plane(crtc, crtc->fb,
+						       crtc->x, crtc->y);
+		mutex_unlock(&crtc->mutex);
+	}
+}
+
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	bool was_interruptible = dev_priv->mm.interruptible;
+	int ret;
+
+	/* Big Hammer, we also need to ensure that any pending
+	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+	 * current scanout is retired before unpinning the old
+	 * framebuffer.
+	 *
+	 * This should only fail upon a hung GPU, in which case we
+	 * can safely continue.
+	 */
+	dev_priv->mm.interruptible = false;
+	ret = i915_gem_object_finish_gpu(obj);
+	dev_priv->mm.interruptible = was_interruptible;
+
+	return ret;
+}
+
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_master_private *master_priv;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (!dev->primary->master)
+		return;
+
+	master_priv = dev->primary->master->driver_priv;
+	if (!master_priv->sarea_priv)
+		return;
+
+	switch (intel_crtc->pipe) {
+	case 0:
+		master_priv->sarea_priv->pipeA_x = x;
+		master_priv->sarea_priv->pipeA_y = y;
+		break;
+	case 1:
+		master_priv->sarea_priv->pipeB_x = x;
+		master_priv->sarea_priv->pipeB_y = y;
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+		    struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_framebuffer *old_fb;
+	int ret;
+
+	/* no fb bound */
+	if (!fb) {
+		DRM_ERROR("No FB bound\n");
+		return 0;
+	}
+
+	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
+		DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
+				intel_crtc->plane,
+				INTEL_INFO(dev)->num_pipes);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	ret = intel_pin_and_fence_fb_obj(dev,
+					 to_intel_framebuffer(fb)->obj,
+					 NULL);
+	if (ret != 0) {
+		mutex_unlock(&dev->struct_mutex);
+		DRM_ERROR("pin & fence failed\n");
+		return ret;
+	}
+
+	ret = dev_priv->display.update_plane(crtc, fb, x, y);
+	if (ret) {
+		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
+		mutex_unlock(&dev->struct_mutex);
+		DRM_ERROR("failed to update base address\n");
+		return ret;
+	}
+
+	old_fb = crtc->fb;
+	crtc->fb = fb;
+	crtc->x = x;
+	crtc->y = y;
+
+	if (old_fb) {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+	}
+
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_crtc_update_sarea_pos(crtc, x, y);
+
+	return 0;
+}
+
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* enable normal train */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (IS_IVYBRIDGE(dev)) {
+		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+	}
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+	/* wait one idle pattern time */
+	POSTING_READ(reg);
+	udelay(1000);
+
+	/* IVB wants error correction enabled */
+	if (IS_IVYBRIDGE(dev))
+		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
+			   FDI_FE_ERRC_ENABLE);
+}
+
+static void ivb_modeset_global_resources(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *pipe_B_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+	struct intel_crtc *pipe_C_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+	uint32_t temp;
+
+	/* When everything is off disable fdi C so that we could enable fdi B
+	 * with all lanes. XXX: This misses the case where a pipe is not using
+	 * any pch resources and so doesn't need any fdi lanes. */
+	if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+		temp = I915_READ(SOUTH_CHICKEN1);
+		temp &= ~FDI_BC_BIFURCATION_SELECT;
+		DRM_DEBUG_KMS("disabling fdi C rx\n");
+		I915_WRITE(SOUTH_CHICKEN1, temp);
+	}
+}
+
+/* The FDI link training functions for ILK/Ibexpeak. */
+static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	u32 reg, temp, tries;
+
+	/* FDI needs bits from pipe & plane first */
+	assert_pipe_enabled(dev_priv, pipe);
+	assert_plane_enabled(dev_priv, plane);
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+	I915_READ(reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(7 << 19);
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	/* Ironlake workaround, enable clock pointer after FDI enable*/
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+		   FDI_RX_PHASE_SYNC_POINTER_EN);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if ((temp & FDI_RX_BIT_LOCK)) {
+			DRM_DEBUG_KMS("FDI train 1 done.\n");
+			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+			break;
+		}
+	}
+	if (tries == 5)
+		DRM_ERROR("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	reg = FDI_RX_IIR(pipe);
+	for (tries = 0; tries < 5; tries++) {
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+			DRM_DEBUG_KMS("FDI train 2 done.\n");
+			break;
+		}
+	}
+	if (tries == 5)
+		DRM_ERROR("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done\n");
+
+}
+
+static const int snb_b_fdi_train_param[] = {
+	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
+	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
+	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
+	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
+};
+
+/* The FDI link training functions for SNB/Cougarpoint. */
+static void gen6_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, i, retry;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(7 << 19);
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	/* SNB-B */
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+	I915_WRITE(FDI_RX_MISC(pipe),
+		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_BIT_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+				DRM_DEBUG_KMS("FDI train 1 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_2;
+	if (IS_GEN6(dev)) {
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		/* SNB-B */
+		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	}
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_2;
+	}
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		for (retry = 0; retry < 5; retry++) {
+			reg = FDI_RX_IIR(pipe);
+			temp = I915_READ(reg);
+			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+			if (temp & FDI_RX_SYMBOL_LOCK) {
+				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+				DRM_DEBUG_KMS("FDI train 2 done.\n");
+				break;
+			}
+			udelay(50);
+		}
+		if (retry < 5)
+			break;
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+/* Manual link training for Ivy Bridge A0 parts */
+static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp, i;
+
+	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+	   for train result */
+	reg = FDI_RX_IMR(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_RX_SYMBOL_LOCK;
+	temp &= ~FDI_RX_BIT_LOCK;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+		      I915_READ(FDI_RX_IIR(pipe)));
+
+	/* enable CPU FDI TX and PCH FDI RX */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(7 << 19);
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	temp |= FDI_COMPOSITE_SYNC;
+	I915_WRITE(reg, temp | FDI_TX_ENABLE);
+
+	I915_WRITE(FDI_RX_MISC(pipe),
+		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_AUTO;
+	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	temp |= FDI_COMPOSITE_SYNC;
+	I915_WRITE(reg, temp | FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		reg = FDI_RX_IIR(pipe);
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_BIT_LOCK ||
+		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+			DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
+			break;
+		}
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 1 fail!\n");
+
+	/* Train 2 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(150);
+
+	for (i = 0; i < 4; i++) {
+		reg = FDI_TX_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
+		temp |= snb_b_fdi_train_param[i];
+		I915_WRITE(reg, temp);
+
+		POSTING_READ(reg);
+		udelay(500);
+
+		reg = FDI_RX_IIR(pipe);
+		temp = I915_READ(reg);
+		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+
+		if (temp & FDI_RX_SYMBOL_LOCK) {
+			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+			DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
+			break;
+		}
+	}
+	if (i == 4)
+		DRM_ERROR("FDI train 2 fail!\n");
+
+	DRM_DEBUG_KMS("FDI train done.\n");
+}
+
+static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+
+	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~((0x7 << 19) | (0x7 << 16));
+	temp |= (intel_crtc->fdi_lanes - 1) << 19;
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(200);
+
+	/* Switch from Rawclk to PCDclk */
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp | FDI_PCDCLK);
+
+	POSTING_READ(reg);
+	udelay(200);
+
+	/* Enable CPU FDI TX PLL, always on for Ironlake */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+
+		POSTING_READ(reg);
+		udelay(100);
+	}
+}
+
+static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* Switch from PCDclk to Rawclk */
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_PCDCLK);
+
+	/* Disable CPU FDI TX PLL */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+
+	/* Wait for the clocks to turn off. */
+	POSTING_READ(reg);
+	udelay(100);
+}
+
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* disable CPU FDI tx and PCH FDI rx */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+	POSTING_READ(reg);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~(0x7 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+	POSTING_READ(reg);
+	udelay(100);
+
+	/* Ironlake workaround, disable clock pointer after downing FDI */
+	if (HAS_PCH_IBX(dev)) {
+		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+	}
+
+	/* still set train pattern 1 */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_PATTERN_1;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_PATTERN_1;
+	}
+	/* BPC in FDI rx is consistent with that in PIPECONF */
+	temp &= ~(0x07 << 16);
+	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+	I915_WRITE(reg, temp);
+
+	POSTING_READ(reg);
+	udelay(100);
+}
+
+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	unsigned long flags;
+	bool pending;
+
+	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+		return false;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	pending = to_intel_crtc(crtc)->unpin_work != NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return pending;
+}
+
+static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (crtc->fb == NULL)
+		return;
+
+	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
+
+	wait_event(dev_priv->pending_flip_queue,
+		   !intel_crtc_has_pending_flip(crtc));
+
+	mutex_lock(&dev->struct_mutex);
+	intel_finish_fb(crtc->fb);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/* Program iCLKIP clock to the desired frequency */
+static void lpt_program_iclkip(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 divsel, phaseinc, auxdiv, phasedir = 0;
+	u32 temp;
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* It is necessary to ungate the pixclk gate prior to programming
+	 * the divisors, and gate it back when it is done.
+	 */
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+
+	/* Disable SSCCTL */
+	intel_sbi_write(dev_priv, SBI_SSCCTL6,
+			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+				SBI_SSCCTL_DISABLE,
+			SBI_ICLK);
+
+	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
+	if (crtc->mode.clock == 20000) {
+		auxdiv = 1;
+		divsel = 0x41;
+		phaseinc = 0x20;
+	} else {
+		/* The iCLK virtual clock root frequency is in MHz,
+		 * but the crtc->mode.clock in in KHz. To get the divisors,
+		 * it is necessary to divide one by another, so we
+		 * convert the virtual clock precision to KHz here for higher
+		 * precision.
+		 */
+		u32 iclk_virtual_root_freq = 172800 * 1000;
+		u32 iclk_pi_range = 64;
+		u32 desired_divisor, msb_divisor_value, pi_value;
+
+		desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+		msb_divisor_value = desired_divisor / iclk_pi_range;
+		pi_value = desired_divisor % iclk_pi_range;
+
+		auxdiv = 0;
+		divsel = msb_divisor_value - 2;
+		phaseinc = pi_value;
+	}
+
+	/* This should not happen with any sane values */
+	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
+		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+			crtc->mode.clock,
+			auxdiv,
+			divsel,
+			phasedir,
+			phaseinc);
+
+	/* Program SSCDIVINTPHASE6 */
+	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
+	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
+	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
+	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
+	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
+	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
+
+	/* Program SSCAUXDIV */
+	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
+	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
+	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
+
+	/* Enable modulator and associated divider */
+	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+	temp &= ~SBI_SSCCTL_DISABLE;
+	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
+
+	/* Wait for initialization time */
+	udelay(24);
+
+	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
+/*
+ * Enable PCH resources required for PCH ports:
+ *   - PCH PLLs
+ *   - FDI training & RX/TX
+ *   - update transcoder timings
+ *   - DP transcoding bits
+ *   - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	assert_transcoder_disabled(dev_priv, pipe);
+
+	/* Write the TU size bits before fdi link training, so that error
+	 * detection works. */
+	I915_WRITE(FDI_RX_TUSIZE1(pipe),
+		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
+	/* For PCH output, training FDI link */
+	dev_priv->display.fdi_link_train(crtc);
+
+	/* XXX: pch pll's can be enabled any time before we enable the PCH
+	 * transcoder, and we actually should do this to not upset any PCH
+	 * transcoder that already use the clock when we share it.
+	 *
+	 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+	 * unconditionally resets the pll - we need that to have the right LVDS
+	 * enable sequence. */
+	ironlake_enable_pch_pll(intel_crtc);
+
+	if (HAS_PCH_CPT(dev)) {
+		u32 sel;
+
+		temp = I915_READ(PCH_DPLL_SEL);
+		switch (pipe) {
+		default:
+		case 0:
+			temp |= TRANSA_DPLL_ENABLE;
+			sel = TRANSA_DPLLB_SEL;
+			break;
+		case 1:
+			temp |= TRANSB_DPLL_ENABLE;
+			sel = TRANSB_DPLLB_SEL;
+			break;
+		case 2:
+			temp |= TRANSC_DPLL_ENABLE;
+			sel = TRANSC_DPLLB_SEL;
+			break;
+		}
+		if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
+			temp |= sel;
+		else
+			temp &= ~sel;
+		I915_WRITE(PCH_DPLL_SEL, temp);
+	}
+
+	/* set transcoder timing, panel must allow it */
+	assert_panel_unlocked(dev_priv, pipe);
+	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
+	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
+	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
+
+	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
+	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
+
+	intel_fdi_normal_train(crtc);
+
+	/* For PCH DP, enable TRANS_DP_CTL */
+	if (HAS_PCH_CPT(dev) &&
+	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+		reg = TRANS_DP_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(TRANS_DP_PORT_SEL_MASK |
+			  TRANS_DP_SYNC_MASK |
+			  TRANS_DP_BPC_MASK);
+		temp |= (TRANS_DP_OUTPUT_ENABLE |
+			 TRANS_DP_ENH_FRAMING);
+		temp |= bpc << 9; /* same format but at 11:9 */
+
+		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
+
+		switch (intel_trans_dp_port_sel(crtc)) {
+		case PCH_DP_B:
+			temp |= TRANS_DP_PORT_SEL_B;
+			break;
+		case PCH_DP_C:
+			temp |= TRANS_DP_PORT_SEL_C;
+			break;
+		case PCH_DP_D:
+			temp |= TRANS_DP_PORT_SEL_D;
+			break;
+		default:
+			BUG();
+		}
+
+		I915_WRITE(reg, temp);
+	}
+
+	ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+	assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+	lpt_program_iclkip(crtc);
+
+	/* Set transcoder timing. */
+	I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+	I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+	I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
+
+	I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+	I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+	I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
+	I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
+}
+
+static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
+{
+	struct intel_pch_pll *pll = intel_crtc->pch_pll;
+
+	if (pll == NULL)
+		return;
+
+	if (pll->refcount == 0) {
+		WARN(1, "bad PCH PLL refcount\n");
+		return;
+	}
+
+	--pll->refcount;
+	intel_crtc->pch_pll = NULL;
+}
+
+static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
+{
+	struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+	struct intel_pch_pll *pll;
+	int i;
+
+	pll = intel_crtc->pch_pll;
+	if (pll) {
+		DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
+			      intel_crtc->base.base.id, pll->pll_reg);
+		goto prepare;
+	}
+
+	if (HAS_PCH_IBX(dev_priv->dev)) {
+		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+		i = intel_crtc->pipe;
+		pll = &dev_priv->pch_plls[i];
+
+		DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
+			      intel_crtc->base.base.id, pll->pll_reg);
+
+		goto found;
+	}
+
+	for (i = 0; i < dev_priv->num_pch_pll; i++) {
+		pll = &dev_priv->pch_plls[i];
+
+		/* Only want to check enabled timings first */
+		if (pll->refcount == 0)
+			continue;
+
+		if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
+		    fp == I915_READ(pll->fp0_reg)) {
+			DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
+				      intel_crtc->base.base.id,
+				      pll->pll_reg, pll->refcount, pll->active);
+
+			goto found;
+		}
+	}
+
+	/* Ok no matching timings, maybe there's a free one? */
+	for (i = 0; i < dev_priv->num_pch_pll; i++) {
+		pll = &dev_priv->pch_plls[i];
+		if (pll->refcount == 0) {
+			DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
+				      intel_crtc->base.base.id, pll->pll_reg);
+			goto found;
+		}
+	}
+
+	return NULL;
+
+found:
+	intel_crtc->pch_pll = pll;
+	pll->refcount++;
+	DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
+prepare: /* separate function? */
+	DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
+
+	/* Wait for the clocks to stabilize before rewriting the regs */
+	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(pll->pll_reg);
+	udelay(150);
+
+	I915_WRITE(pll->fp0_reg, fp);
+	I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
+	pll->on = false;
+	return pll;
+}
+
+void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int dslreg = PIPEDSL(pipe);
+	u32 temp;
+
+	temp = I915_READ(dslreg);
+	udelay(500);
+	if (wait_for(I915_READ(dslreg) != temp, 5)) {
+		if (wait_for(I915_READ(dslreg) != temp, 5))
+			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
+	}
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	u32 temp;
+
+	WARN_ON(!crtc->enabled);
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		temp = I915_READ(PCH_LVDS);
+		if ((temp & LVDS_PORT_EN) == 0)
+			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+	}
+
+
+	if (intel_crtc->config.has_pch_encoder) {
+		/* Note: FDI PLL enabling _must_ be done before we enable the
+		 * cpu pipes, hence this is separate from all the other fdi/pch
+		 * enabling. */
+		ironlake_fdi_pll_enable(intel_crtc);
+	} else {
+		assert_fdi_tx_disabled(dev_priv, pipe);
+		assert_fdi_rx_disabled(dev_priv, pipe);
+	}
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	/* Enable panel fitting for LVDS */
+	if (dev_priv->pch_pf_size &&
+	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		if (IS_IVYBRIDGE(dev))
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+						 PF_PIPE_SEL_IVB(pipe));
+		else
+			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+	}
+
+	/*
+	 * On ILK+ LUT must be loaded before the pipe is running but with
+	 * clocks enabled
+	 */
+	intel_crtc_load_lut(crtc);
+
+	intel_enable_pipe(dev_priv, pipe,
+			  intel_crtc->config.has_pch_encoder);
+	intel_enable_plane(dev_priv, plane, pipe);
+
+	if (intel_crtc->config.has_pch_encoder)
+		ironlake_pch_enable(crtc);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	if (HAS_PCH_CPT(dev))
+		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+
+	/*
+	 * There seems to be a race in PCH platform hw (at least on some
+	 * outputs) where an enabled pipe still completes any pageflip right
+	 * away (as if the pipe is off) instead of waiting for vblank. As soon
+	 * as the first vblank happend, everything works as expected. Hence just
+	 * wait for one vblank before returning to avoid strange things
+	 * happening.
+	 */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+static void haswell_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	WARN_ON(!crtc->enabled);
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	if (intel_crtc->config.has_pch_encoder)
+		dev_priv->display.fdi_link_train(crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	intel_ddi_enable_pipe_clock(intel_crtc);
+
+	/* Enable panel fitting for eDP */
+	if (dev_priv->pch_pf_size &&
+	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		/* Force use of hard-coded filter coefficients
+		 * as some pre-programmed values are broken,
+		 * e.g. x201.
+		 */
+		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+					 PF_PIPE_SEL_IVB(pipe));
+		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+	}
+
+	/*
+	 * On ILK+ LUT must be loaded before the pipe is running but with
+	 * clocks enabled
+	 */
+	intel_crtc_load_lut(crtc);
+
+	intel_ddi_set_pipe_settings(crtc);
+	intel_ddi_enable_transcoder_func(crtc);
+
+	intel_enable_pipe(dev_priv, pipe,
+			  intel_crtc->config.has_pch_encoder);
+	intel_enable_plane(dev_priv, plane, pipe);
+
+	if (intel_crtc->config.has_pch_encoder)
+		lpt_pch_enable(crtc);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+
+	/*
+	 * There seems to be a race in PCH platform hw (at least on some
+	 * outputs) where an enabled pipe still completes any pageflip right
+	 * away (as if the pipe is off) instead of waiting for vblank. As soon
+	 * as the first vblank happend, everything works as expected. Hence just
+	 * wait for one vblank before returning to avoid strange things
+	 * happening.
+	 */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	u32 reg, temp;
+
+
+	if (!intel_crtc->active)
+		return;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+	intel_crtc_update_cursor(crtc, false);
+
+	intel_disable_plane(dev_priv, plane, pipe);
+
+	if (dev_priv->cfb_plane == plane)
+		intel_disable_fbc(dev);
+
+	intel_disable_pipe(dev_priv, pipe);
+
+	/* Disable PF */
+	I915_WRITE(PF_CTL(pipe), 0);
+	I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
+	ironlake_fdi_disable(crtc);
+
+	ironlake_disable_pch_transcoder(dev_priv, pipe);
+
+	if (HAS_PCH_CPT(dev)) {
+		/* disable TRANS_DP_CTL */
+		reg = TRANS_DP_CTL(pipe);
+		temp = I915_READ(reg);
+		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+		temp |= TRANS_DP_PORT_SEL_NONE;
+		I915_WRITE(reg, temp);
+
+		/* disable DPLL_SEL */
+		temp = I915_READ(PCH_DPLL_SEL);
+		switch (pipe) {
+		case 0:
+			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+			break;
+		case 1:
+			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+			break;
+		case 2:
+			/* C shares PLL A or B */
+			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+			break;
+		default:
+			BUG(); /* wtf */
+		}
+		I915_WRITE(PCH_DPLL_SEL, temp);
+	}
+
+	/* disable PCH DPLL */
+	intel_disable_pch_pll(intel_crtc);
+
+	ironlake_fdi_pll_disable(intel_crtc);
+
+	intel_crtc->active = false;
+	intel_update_watermarks(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+
+	if (!intel_crtc->active)
+		return;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+	intel_crtc_update_cursor(crtc, false);
+
+	intel_disable_plane(dev_priv, plane, pipe);
+
+	if (dev_priv->cfb_plane == plane)
+		intel_disable_fbc(dev);
+
+	intel_disable_pipe(dev_priv, pipe);
+
+	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+	/* XXX: Once we have proper panel fitter state tracking implemented with
+	 * hardware state read/check support we should switch to only disable
+	 * the panel fitter when we know it's used. */
+	if (intel_using_power_well(dev)) {
+		I915_WRITE(PF_CTL(pipe), 0);
+		I915_WRITE(PF_WIN_SZ(pipe), 0);
+	}
+
+	intel_ddi_disable_pipe_clock(intel_crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->post_disable)
+			encoder->post_disable(encoder);
+
+	if (intel_crtc->config.has_pch_encoder) {
+		lpt_disable_pch_transcoder(dev_priv);
+		intel_ddi_fdi_disable(crtc);
+	}
+
+	intel_crtc->active = false;
+	intel_update_watermarks(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+static void ironlake_crtc_off(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	intel_put_pch_pll(intel_crtc);
+}
+
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+	 * start using it. */
+	intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+
+	intel_ddi_put_crtc_pll(crtc);
+}
+
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+	if (!enable && intel_crtc->overlay) {
+		struct drm_device *dev = intel_crtc->base.dev;
+		struct drm_i915_private *dev_priv = dev->dev_private;
+
+		mutex_lock(&dev->struct_mutex);
+		dev_priv->mm.interruptible = false;
+		(void) intel_overlay_switch_off(intel_crtc->overlay);
+		dev_priv->mm.interruptible = true;
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	/* Let userspace switch the overlay on again. In most cases userspace
+	 * has to recompute where to put it anyway.
+	 */
+}
+
+/**
+ * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
+ * cursor plane briefly if not already running after enabling the display
+ * plane.
+ * This workaround avoids occasional blank screens when self refresh is
+ * enabled.
+ */
+static void
+g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+	u32 cntl = I915_READ(CURCNTR(pipe));
+
+	if ((cntl & CURSOR_MODE) == 0) {
+		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
+
+		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
+		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
+		intel_wait_for_vblank(dev_priv->dev, pipe);
+		I915_WRITE(CURCNTR(pipe), cntl);
+		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
+		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
+	}
+}
+
+static void i9xx_crtc_enable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	WARN_ON(!crtc->enabled);
+
+	if (intel_crtc->active)
+		return;
+
+	intel_crtc->active = true;
+	intel_update_watermarks(dev);
+
+	intel_enable_pll(dev_priv, pipe);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_enable)
+			encoder->pre_enable(encoder);
+
+	intel_enable_pipe(dev_priv, pipe, false);
+	intel_enable_plane(dev_priv, plane, pipe);
+	if (IS_G4X(dev))
+		g4x_fixup_plane(dev_priv, pipe);
+
+	intel_crtc_load_lut(crtc);
+	intel_update_fbc(dev);
+
+	/* Give the overlay scaler a chance to enable if it's on this pipe */
+	intel_crtc_dpms_overlay(intel_crtc, true);
+	intel_crtc_update_cursor(crtc, true);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->enable(encoder);
+}
+
+static void i9xx_pfit_disable(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	uint32_t pctl = I915_READ(PFIT_CONTROL);
+
+	assert_pipe_disabled(dev_priv, crtc->pipe);
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT;
+	else
+		pipe = PIPE_B;
+
+	if (pipe == crtc->pipe) {
+		DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl);
+		I915_WRITE(PFIT_CONTROL, 0);
+	}
+}
+
+static void i9xx_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_encoder *encoder;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+
+	if (!intel_crtc->active)
+		return;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		encoder->disable(encoder);
+
+	/* Give the overlay scaler a chance to disable if it's on this pipe */
+	intel_crtc_wait_for_pending_flips(crtc);
+	drm_vblank_off(dev, pipe);
+	intel_crtc_dpms_overlay(intel_crtc, false);
+	intel_crtc_update_cursor(crtc, false);
+
+	if (dev_priv->cfb_plane == plane)
+		intel_disable_fbc(dev);
+
+	intel_disable_plane(dev_priv, plane, pipe);
+	intel_disable_pipe(dev_priv, pipe);
+
+	i9xx_pfit_disable(intel_crtc);
+
+	intel_disable_pll(dev_priv, pipe);
+
+	intel_crtc->active = false;
+	intel_update_fbc(dev);
+	intel_update_watermarks(dev);
+}
+
+static void i9xx_crtc_off(struct drm_crtc *crtc)
+{
+}
+
+static void intel_crtc_update_sarea(struct drm_crtc *crtc,
+				    bool enabled)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_master_private *master_priv;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+
+	if (!dev->primary->master)
+		return;
+
+	master_priv = dev->primary->master->driver_priv;
+	if (!master_priv->sarea_priv)
+		return;
+
+	switch (pipe) {
+	case 0:
+		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
+		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+		break;
+	case 1:
+		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
+		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+		break;
+	default:
+		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
+		break;
+	}
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ */
+void intel_crtc_update_dpms(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	bool enable = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
+		enable |= intel_encoder->connectors_active;
+
+	if (enable)
+		dev_priv->display.crtc_enable(crtc);
+	else
+		dev_priv->display.crtc_disable(crtc);
+
+	intel_crtc_update_sarea(crtc, enable);
+}
+
+static void intel_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* crtc should still be enabled when we disable it. */
+	WARN_ON(!crtc->enabled);
+
+	intel_crtc->eld_vld = false;
+	dev_priv->display.crtc_disable(crtc);
+	intel_crtc_update_sarea(crtc, false);
+	dev_priv->display.off(crtc);
+
+	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+
+	if (crtc->fb) {
+		mutex_lock(&dev->struct_mutex);
+		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+		mutex_unlock(&dev->struct_mutex);
+		crtc->fb = NULL;
+	}
+
+	/* Update computed state. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		if (connector->encoder->crtc != crtc)
+			continue;
+
+		connector->dpms = DRM_MODE_DPMS_OFF;
+		to_intel_encoder(connector->encoder)->connectors_active = false;
+	}
+}
+
+void intel_modeset_disable(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc->enabled)
+			intel_crtc_disable(crtc);
+	}
+}
+
+void intel_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(intel_encoder);
+}
+
+/* Simple dpms helper for encodres with just one connector, no cloning and only
+ * one kind of off state. It clamps all !ON modes to fully OFF and changes the
+ * state of the entire output pipe. */
+void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+{
+	if (mode == DRM_MODE_DPMS_ON) {
+		encoder->connectors_active = true;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	} else {
+		encoder->connectors_active = false;
+
+		intel_crtc_update_dpms(encoder->base.crtc);
+	}
+}
+
+/* Cross check the actual hw state with our own modeset state tracking (and it's
+ * internal consistency). */
+static void intel_connector_check_state(struct intel_connector *connector)
+{
+	if (connector->get_hw_state(connector)) {
+		struct intel_encoder *encoder = connector->encoder;
+		struct drm_crtc *crtc;
+		bool encoder_enabled;
+		enum pipe pipe;
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base));
+
+		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+		     "wrong connector dpms state\n");
+		WARN(connector->base.encoder != &encoder->base,
+		     "active connector not linked to encoder\n");
+		WARN(!encoder->connectors_active,
+		     "encoder->connectors_active not set\n");
+
+		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+		WARN(!encoder_enabled, "encoder not enabled\n");
+		if (WARN_ON(!encoder->base.crtc))
+			return;
+
+		crtc = encoder->base.crtc;
+
+		WARN(!crtc->enabled, "crtc not enabled\n");
+		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+		WARN(pipe != to_intel_crtc(crtc)->pipe,
+		     "encoder active on the wrong pipe\n");
+	}
+}
+
+/* Even simpler default implementation, if there's really no special case to
+ * consider. */
+void intel_connector_dpms(struct drm_connector *connector, int mode)
+{
+	/* All the simple cases only support two dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	if (connector->encoder)
+		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
+
+	intel_modeset_check_state(connector->dev);
+}
+
+/* Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector. */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+	enum pipe pipe = 0;
+	struct intel_encoder *encoder = connector->encoder;
+
+	return encoder->get_hw_state(encoder, &pipe);
+}
+
+static bool intel_crtc_compute_config(struct drm_crtc *crtc,
+				      struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		/* FDI link clock is fixed at 2.7G */
+		if (pipe_config->requested_mode.clock * 3
+		    > IRONLAKE_FDI_FREQ * 4)
+			return false;
+	}
+
+	/* All interlaced capable intel hw wants timings in frames. Note though
+	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
+	 * timings, so we need to be careful not to clobber these.*/
+	if (!pipe_config->timings_set)
+		drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
+	 * with a hsync front porch of 0.
+	 */
+	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
+		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
+		return false;
+
+	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
+		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
+	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
+		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
+		 * for lvds. */
+		pipe_config->pipe_bpp = 8*3;
+	}
+
+	return true;
+}
+
+static int valleyview_get_display_clock_speed(struct drm_device *dev)
+{
+	return 400000; /* FIXME */
+}
+
+static int i945_get_display_clock_speed(struct drm_device *dev)
+{
+	return 400000;
+}
+
+static int i915_get_display_clock_speed(struct drm_device *dev)
+{
+	return 333000;
+}
+
+static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
+{
+	return 200000;
+}
+
+static int i915gm_get_display_clock_speed(struct drm_device *dev)
+{
+	u16 gcfgc = 0;
+
+	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
+		return 133000;
+	else {
+		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+		case GC_DISPLAY_CLOCK_333_MHZ:
+			return 333000;
+		default:
+		case GC_DISPLAY_CLOCK_190_200_MHZ:
+			return 190000;
+		}
+	}
+}
+
+static int i865_get_display_clock_speed(struct drm_device *dev)
+{
+	return 266000;
+}
+
+static int i855_get_display_clock_speed(struct drm_device *dev)
+{
+	u16 hpllcc = 0;
+	/* Assume that the hardware is in the high speed state.  This
+	 * should be the default.
+	 */
+	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+	case GC_CLOCK_133_200:
+	case GC_CLOCK_100_200:
+		return 200000;
+	case GC_CLOCK_166_250:
+		return 250000;
+	case GC_CLOCK_100_133:
+		return 133000;
+	}
+
+	/* Shouldn't happen */
+	return 0;
+}
+
+static int i830_get_display_clock_speed(struct drm_device *dev)
+{
+	return 133000;
+}
+
+static void
+intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+{
+	while (*num > DATA_LINK_M_N_MASK ||
+	       *den > DATA_LINK_M_N_MASK) {
+		*num >>= 1;
+		*den >>= 1;
+	}
+}
+
+static void compute_m_n(unsigned int m, unsigned int n,
+			uint32_t *ret_m, uint32_t *ret_n)
+{
+	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
+	*ret_m = div_u64((uint64_t) m * *ret_n, n);
+	intel_reduce_m_n_ratio(ret_m, ret_n);
+}
+
+void
+intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+		       int pixel_clock, int link_clock,
+		       struct intel_link_m_n *m_n)
+{
+	m_n->tu = 64;
+
+	compute_m_n(bits_per_pixel * pixel_clock,
+		    link_clock * nlanes * 8,
+		    &m_n->gmch_m, &m_n->gmch_n);
+
+	compute_m_n(pixel_clock, link_clock,
+		    &m_n->link_m, &m_n->link_n);
+}
+
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+	if (i915_panel_use_ssc >= 0)
+		return i915_panel_use_ssc != 0;
+	return dev_priv->lvds_use_ssc
+		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+static int vlv_get_refclk(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int refclk = 27000; /* for DP & HDMI */
+
+	return 100000; /* only one validated so far */
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+		refclk = 96000;
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+		if (intel_panel_use_ssc(dev_priv))
+			refclk = 100000;
+		else
+			refclk = 96000;
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+		refclk = 100000;
+	}
+
+	return refclk;
+}
+
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int refclk;
+
+	if (IS_VALLEYVIEW(dev)) {
+		refclk = vlv_get_refclk(crtc);
+	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+		refclk = dev_priv->lvds_ssc_freq * 1000;
+		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+			      refclk / 1000);
+	} else if (!IS_GEN2(dev)) {
+		refclk = 96000;
+	} else {
+		refclk = 48000;
+	}
+
+	return refclk;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc *crtc)
+{
+	unsigned dotclock = crtc->config.adjusted_mode.clock;
+	struct dpll *clock = &crtc->config.dpll;
+
+	/* SDVO TV has fixed PLL values depend on its clock range,
+	   this mirrors vbios setting. */
+	if (dotclock >= 100000 && dotclock < 140500) {
+		clock->p1 = 2;
+		clock->p2 = 10;
+		clock->n = 3;
+		clock->m1 = 16;
+		clock->m2 = 8;
+	} else if (dotclock >= 140500 && dotclock <= 200000) {
+		clock->p1 = 1;
+		clock->p2 = 10;
+		clock->n = 6;
+		clock->m1 = 12;
+		clock->m2 = 8;
+	}
+
+	crtc->config.clock_set = true;
+}
+
+static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
+				     intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	u32 fp, fp2 = 0;
+	struct dpll *clock = &crtc->config.dpll;
+
+	if (IS_PINEVIEW(dev)) {
+		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+		if (reduced_clock)
+			fp2 = (1 << reduced_clock->n) << 16 |
+				reduced_clock->m1 << 8 | reduced_clock->m2;
+	} else {
+		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+		if (reduced_clock)
+			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+				reduced_clock->m2;
+	}
+
+	I915_WRITE(FP0(pipe), fp);
+
+	crtc->lowfreq_avail = false;
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+	    reduced_clock && i915_powersave) {
+		I915_WRITE(FP1(pipe), fp2);
+		crtc->lowfreq_avail = true;
+	} else {
+		I915_WRITE(FP1(pipe), fp);
+	}
+}
+
+static void intel_dp_set_m_n(struct intel_crtc *crtc)
+{
+	if (crtc->config.has_pch_encoder)
+		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+	else
+		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+}
+
+static void vlv_update_pll(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	u32 dpll, mdiv, pdiv;
+	u32 bestn, bestm1, bestm2, bestp1, bestp2;
+	bool is_sdvo;
+	u32 temp;
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+
+	dpll = DPLL_VGA_MODE_DIS;
+	dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+	dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+	dpll |= DPLL_INTEGRATED_CLOCK_VLV;
+
+	I915_WRITE(DPLL(pipe), dpll);
+	POSTING_READ(DPLL(pipe));
+
+	bestn = crtc->config.dpll.n;
+	bestm1 = crtc->config.dpll.m1;
+	bestm2 = crtc->config.dpll.m2;
+	bestp1 = crtc->config.dpll.p1;
+	bestp2 = crtc->config.dpll.p2;
+
+	/*
+	 * In Valleyview PLL and program lane counter registers are exposed
+	 * through DPIO interface
+	 */
+	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+	mdiv |= ((bestn << DPIO_N_SHIFT));
+	mdiv |= (1 << DPIO_POST_DIV_SHIFT);
+	mdiv |= (1 << DPIO_K_SHIFT);
+	mdiv |= DPIO_ENABLE_CALIBRATION;
+	intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+	intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+
+	pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
+		(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
+		(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+		(5 << DPIO_CLK_BIAS_CTL_SHIFT);
+	intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+
+	intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
+
+	dpll |= DPLL_VCO_ENABLE;
+	I915_WRITE(DPLL(pipe), dpll);
+	POSTING_READ(DPLL(pipe));
+	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+		DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+	intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+	if (crtc->config.has_dp_encoder)
+		intel_dp_set_m_n(crtc);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	temp = 0;
+	if (is_sdvo) {
+		temp = 0;
+		if (crtc->config.pixel_multiplier > 1) {
+			temp = (crtc->config.pixel_multiplier - 1)
+				<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+		}
+	}
+	I915_WRITE(DPLL_MD(pipe), temp);
+	POSTING_READ(DPLL_MD(pipe));
+
+	/* Now program lane control registers */
+	if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)
+	   || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
+		temp = 0x1000C4;
+		if(pipe == 1)
+			temp |= (1 << 21);
+		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+	}
+
+	if(intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) {
+		temp = 0x1000C4;
+		if(pipe == 1)
+			temp |= (1 << 21);
+		intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+	}
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void i9xx_update_pll(struct intel_crtc *crtc,
+			    intel_clock_t *reduced_clock,
+			    int num_connectors,
+			    bool needs_tv_clock)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	int pipe = crtc->pipe;
+	u32 dpll;
+	bool is_sdvo;
+	struct dpll *clock = &crtc->config.dpll;
+
+	i9xx_update_pll_dividers(crtc, reduced_clock);
+
+	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
+		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+
+	if (is_sdvo) {
+		if ((crtc->config.pixel_multiplier > 1) &&
+		    (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) {
+			dpll |= (crtc->config.pixel_multiplier - 1)
+				<< SDVO_MULTIPLIER_SHIFT_HIRES;
+		}
+		dpll |= DPLL_DVO_HIGH_SPEED;
+	}
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
+		dpll |= DPLL_DVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	if (IS_PINEVIEW(dev))
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+	else {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (IS_G4X(dev) && reduced_clock)
+			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+	}
+	switch (clock->p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+	if (INTEL_INFO(dev)->gen >= 4)
+		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+
+	if (is_sdvo && needs_tv_clock)
+		dpll |= PLL_REF_INPUT_TVCLKINBC;
+	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_TVOUT))
+		/* XXX: just matching BIOS for now */
+		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	if (crtc->config.has_dp_encoder)
+		intel_dp_set_m_n(crtc);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		u32 temp = 0;
+		if (is_sdvo) {
+			temp = 0;
+			if (crtc->config.pixel_multiplier > 1) {
+				temp = (crtc->config.pixel_multiplier - 1)
+					<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
+			}
+		}
+		I915_WRITE(DPLL_MD(pipe), temp);
+	} else {
+		/* The pixel multiplier can only be updated once the
+		 * DPLL is enabled and the clocks are stable.
+		 *
+		 * So write it again.
+		 */
+		I915_WRITE(DPLL(pipe), dpll);
+	}
+}
+
+static void i8xx_update_pll(struct intel_crtc *crtc,
+			    struct drm_display_mode *adjusted_mode,
+			    intel_clock_t *reduced_clock,
+			    int num_connectors)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	int pipe = crtc->pipe;
+	u32 dpll;
+	struct dpll *clock = &crtc->config.dpll;
+
+	i9xx_update_pll_dividers(crtc, reduced_clock);
+
+	dpll = DPLL_VGA_MODE_DIS;
+
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
+		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	} else {
+		if (clock->p1 == 2)
+			dpll |= PLL_P1_DIVIDE_BY_TWO;
+		else
+			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+		if (clock->p2 == 4)
+			dpll |= PLL_P2_DIVIDE_BY_4;
+	}
+
+	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	dpll |= DPLL_VCO_ENABLE;
+	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	I915_WRITE(DPLL(pipe), dpll);
+
+	/* Wait for the clocks to stabilize. */
+	POSTING_READ(DPLL(pipe));
+	udelay(150);
+
+	/* The pixel multiplier can only be updated once the
+	 * DPLL is enabled and the clocks are stable.
+	 *
+	 * So write it again.
+	 */
+	I915_WRITE(DPLL(pipe), dpll);
+}
+
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe = intel_crtc->pipe;
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	uint32_t vsyncshift;
+
+	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		/* the chip adds 2 halflines automatically */
+		adjusted_mode->crtc_vtotal -= 1;
+		adjusted_mode->crtc_vblank_end -= 1;
+		vsyncshift = adjusted_mode->crtc_hsync_start
+			     - adjusted_mode->crtc_htotal / 2;
+	} else {
+		vsyncshift = 0;
+	}
+
+	if (INTEL_INFO(dev)->gen > 3)
+		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+	I915_WRITE(HTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_hdisplay - 1) |
+		   ((adjusted_mode->crtc_htotal - 1) << 16));
+	I915_WRITE(HBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_hblank_start - 1) |
+		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
+	I915_WRITE(HSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_hsync_start - 1) |
+		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+	I915_WRITE(VTOTAL(cpu_transcoder),
+		   (adjusted_mode->crtc_vdisplay - 1) |
+		   ((adjusted_mode->crtc_vtotal - 1) << 16));
+	I915_WRITE(VBLANK(cpu_transcoder),
+		   (adjusted_mode->crtc_vblank_start - 1) |
+		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
+	I915_WRITE(VSYNC(cpu_transcoder),
+		   (adjusted_mode->crtc_vsync_start - 1) |
+		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
+	 * bits. */
+	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+	    (pipe == PIPE_B || pipe == PIPE_C))
+		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+	/* pipesrc controls the size that is scaled from, which should
+	 * always be the user's requested size.
+	 */
+	I915_WRITE(PIPESRC(pipe),
+		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
+static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t pipeconf;
+
+	pipeconf = I915_READ(PIPECONF(intel_crtc->pipe));
+
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
+		pipeconf |= PIPECONF_ENABLE;
+
+	if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
+		/* Enable pixel doubling when the dot clock is > 90% of the (display)
+		 * core speed.
+		 *
+		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
+		 * pipe == 0 check?
+		 */
+		if (intel_crtc->config.requested_mode.clock >
+		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
+			pipeconf |= PIPECONF_DOUBLE_WIDE;
+		else
+			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
+	}
+
+	/* default to 8bpc */
+	pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
+	if (intel_crtc->config.has_dp_encoder) {
+		if (intel_crtc->config.dither) {
+			pipeconf |= PIPECONF_6BPC |
+				    PIPECONF_DITHER_EN |
+				    PIPECONF_DITHER_TYPE_SP;
+		}
+	}
+
+	if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(&intel_crtc->base,
+						      INTEL_OUTPUT_EDP)) {
+		if (intel_crtc->config.dither) {
+			pipeconf |= PIPECONF_6BPC |
+					PIPECONF_ENABLE |
+					I965_PIPECONF_ACTIVE;
+		}
+	}
+
+	if (HAS_PIPE_CXSR(dev)) {
+		if (intel_crtc->lowfreq_avail) {
+			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+		} else {
+			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+		}
+	}
+
+	pipeconf &= ~PIPECONF_INTERLACE_MASK;
+	if (!IS_GEN2(dev) &&
+	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+	else
+		pipeconf |= PIPECONF_PROGRESSIVE;
+
+	if (IS_VALLEYVIEW(dev)) {
+		if (intel_crtc->config.limited_color_range)
+			pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+		else
+			pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT;
+	}
+
+	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
+	POSTING_READ(PIPECONF(intel_crtc->pipe));
+}
+
+static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+			      int x, int y,
+			      struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	int refclk, num_connectors = 0;
+	intel_clock_t clock, reduced_clock;
+	u32 dspcntr;
+	bool ok, has_reduced_clock = false, is_sdvo = false;
+	bool is_lvds = false, is_tv = false;
+	struct intel_encoder *encoder;
+	const intel_limit_t *limit;
+	int ret;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	refclk = i9xx_get_refclk(crtc, num_connectors);
+
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc, refclk);
+	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+			     &clock);
+	if (!ok) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+
+	/* Ensure that the cursor is valid for the new mode before changing... */
+	intel_crtc_update_cursor(crtc, true);
+
+	if (is_lvds && dev_priv->lvds_downclock_avail) {
+		/*
+		 * Ensure we match the reduced clock's P to the target clock.
+		 * If the clocks don't match, we can't switch the display clock
+		 * by using the FP0/FP1. In such case we will disable the LVDS
+		 * downclock feature.
+		*/
+		has_reduced_clock = limit->find_pll(limit, crtc,
+						    dev_priv->lvds_downclock,
+						    refclk,
+						    &clock,
+						    &reduced_clock);
+	}
+	/* Compat-code for transition, will disappear. */
+	if (!intel_crtc->config.clock_set) {
+		intel_crtc->config.dpll.n = clock.n;
+		intel_crtc->config.dpll.m1 = clock.m1;
+		intel_crtc->config.dpll.m2 = clock.m2;
+		intel_crtc->config.dpll.p1 = clock.p1;
+		intel_crtc->config.dpll.p2 = clock.p2;
+	}
+
+	if (is_sdvo && is_tv)
+		i9xx_adjust_sdvo_tv_clock(intel_crtc);
+
+	if (IS_GEN2(dev))
+		i8xx_update_pll(intel_crtc, adjusted_mode,
+				has_reduced_clock ? &reduced_clock : NULL,
+				num_connectors);
+	else if (IS_VALLEYVIEW(dev))
+		vlv_update_pll(intel_crtc);
+	else
+		i9xx_update_pll(intel_crtc,
+				has_reduced_clock ? &reduced_clock : NULL,
+				num_connectors,
+				is_sdvo && is_tv);
+
+	/* Set up the display plane register */
+	dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+	if (!IS_VALLEYVIEW(dev)) {
+		if (pipe == 0)
+			dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+		else
+			dspcntr |= DISPPLANE_SEL_PIPE_B;
+	}
+
+	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+	drm_mode_debug_printmodeline(mode);
+
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+	/* pipesrc and dspsize control the size that is scaled from,
+	 * which should always be the user's requested size.
+	 */
+	I915_WRITE(DSPSIZE(plane),
+		   ((mode->vdisplay - 1) << 16) |
+		   (mode->hdisplay - 1));
+	I915_WRITE(DSPPOS(plane), 0);
+
+	i9xx_set_pipeconf(intel_crtc);
+
+	intel_enable_pipe(dev_priv, pipe, false);
+
+	intel_wait_for_vblank(dev, pipe);
+
+	I915_WRITE(DSPCNTR(plane), dspcntr);
+	POSTING_READ(DSPCNTR(plane));
+
+	ret = intel_pipe_set_base(crtc, x, y, fb);
+
+	intel_update_watermarks(dev);
+
+	return ret;
+}
+
+static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
+				 struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PIPECONF(crtc->pipe));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	return true;
+}
+
+static void ironlake_init_pch_refclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+	u32 val, final;
+	bool has_lvds = false;
+	bool has_cpu_edp = false;
+	bool has_pch_edp = false;
+	bool has_panel = false;
+	bool has_ck505 = false;
+	bool can_ssc = false;
+
+	/* We need to take the global config into account */
+	list_for_each_entry(encoder, &mode_config->encoder_list,
+			    base.head) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			has_panel = true;
+			has_lvds = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			has_panel = true;
+			if (intel_encoder_is_pch_edp(&encoder->base))
+				has_pch_edp = true;
+			else
+				has_cpu_edp = true;
+			break;
+		}
+	}
+
+	if (HAS_PCH_IBX(dev)) {
+		has_ck505 = dev_priv->display_clock_mode;
+		can_ssc = has_ck505;
+	} else {
+		has_ck505 = false;
+		can_ssc = true;
+	}
+
+	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
+		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
+		      has_ck505);
+
+	/* Ironlake: try to setup display ref clock before DPLL
+	 * enabling. This is only under driver's control after
+	 * PCH B stepping, previous chipset stepping should be
+	 * ignoring this setting.
+	 */
+	val = I915_READ(PCH_DREF_CONTROL);
+
+	/* As we must carefully and slowly disable/enable each source in turn,
+	 * compute the final state we want first and check if we need to
+	 * make any changes at all.
+	 */
+	final = val;
+	final &= ~DREF_NONSPREAD_SOURCE_MASK;
+	if (has_ck505)
+		final |= DREF_NONSPREAD_CK505_ENABLE;
+	else
+		final |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+	final &= ~DREF_SSC_SOURCE_MASK;
+	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+	final &= ~DREF_SSC1_ENABLE;
+
+	if (has_panel) {
+		final |= DREF_SSC_SOURCE_ENABLE;
+
+		if (intel_panel_use_ssc(dev_priv) && can_ssc)
+			final |= DREF_SSC1_ENABLE;
+
+		if (has_cpu_edp) {
+			if (intel_panel_use_ssc(dev_priv) && can_ssc)
+				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+			else
+				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+		} else
+			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+	} else {
+		final |= DREF_SSC_SOURCE_DISABLE;
+		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+	}
+
+	if (final == val)
+		return;
+
+	/* Always enable nonspread source */
+	val &= ~DREF_NONSPREAD_SOURCE_MASK;
+
+	if (has_ck505)
+		val |= DREF_NONSPREAD_CK505_ENABLE;
+	else
+		val |= DREF_NONSPREAD_SOURCE_ENABLE;
+
+	if (has_panel) {
+		val &= ~DREF_SSC_SOURCE_MASK;
+		val |= DREF_SSC_SOURCE_ENABLE;
+
+		/* SSC must be turned on before enabling the CPU output  */
+		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+			DRM_DEBUG_KMS("Using SSC on panel\n");
+			val |= DREF_SSC1_ENABLE;
+		} else
+			val &= ~DREF_SSC1_ENABLE;
+
+		/* Get SSC going before enabling the outputs */
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+
+		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+		/* Enable CPU source on CPU attached eDP */
+		if (has_cpu_edp) {
+			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+				DRM_DEBUG_KMS("Using SSC on eDP\n");
+				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+			}
+			else
+				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+		} else
+			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+	} else {
+		DRM_DEBUG_KMS("Disabling SSC entirely\n");
+
+		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+		/* Turn off CPU output */
+		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+
+		/* Turn off the SSC source */
+		val &= ~DREF_SSC_SOURCE_MASK;
+		val |= DREF_SSC_SOURCE_DISABLE;
+
+		/* Turn off SSC1 */
+		val &= ~DREF_SSC1_ENABLE;
+
+		I915_WRITE(PCH_DREF_CONTROL, val);
+		POSTING_READ(PCH_DREF_CONTROL);
+		udelay(200);
+	}
+
+	BUG_ON(val != final);
+}
+
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct intel_encoder *encoder;
+	bool has_vga = false;
+	bool is_sdv = false;
+	u32 tmp;
+
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_ANALOG:
+			has_vga = true;
+			break;
+		}
+	}
+
+	if (!has_vga)
+		return;
+
+	mutex_lock(&dev_priv->dpio_lock);
+
+	/* XXX: Rip out SDV support once Haswell ships for real. */
+	if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+		is_sdv = true;
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp &= ~SBI_SSCCTL_DISABLE;
+	tmp |= SBI_SSCCTL_PATHALT;
+	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+	udelay(24);
+
+	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+	tmp &= ~SBI_SSCCTL_PATHALT;
+	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+	if (!is_sdv) {
+		tmp = I915_READ(SOUTH_CHICKEN2);
+		tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+		I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+		if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+				       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+			DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+		tmp = I915_READ(SOUTH_CHICKEN2);
+		tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+		I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+		if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+				        FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+				       100))
+			DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+	tmp &= ~(0xFF << 24);
+	tmp |= (0x12 << 24);
+	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+	if (is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+		tmp |= 0x7FFF;
+		intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+	tmp |= (1 << 11);
+	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+	if (is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+		intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+		tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+		intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+		tmp |= (0x3F << 8);
+		intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+		tmp |= (0x3F << 8);
+		intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+	if (!is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+		tmp &= ~(7 << 13);
+		tmp |= (5 << 13);
+		intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+		tmp &= ~(7 << 13);
+		tmp |= (5 << 13);
+		intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+	}
+
+	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+	tmp &= ~0xFF;
+	tmp |= 0x1C;
+	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+	tmp &= ~(0xFF << 16);
+	tmp |= (0x1C << 16);
+	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+	if (!is_sdv) {
+		tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+		tmp |= (1 << 27);
+		intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+		tmp |= (1 << 27);
+		intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+		tmp &= ~(0xF << 28);
+		tmp |= (4 << 28);
+		intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+		tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+		tmp &= ~(0xF << 28);
+		tmp |= (4 << 28);
+		intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+	}
+
+	/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+	tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+	tmp |= SBI_DBUFF0_ENABLE;
+	intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+
+	mutex_unlock(&dev_priv->dpio_lock);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		ironlake_init_pch_refclk(dev);
+	else if (HAS_PCH_LPT(dev))
+		lpt_init_pch_refclk(dev);
+}
+
+static int ironlake_get_refclk(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	struct intel_encoder *edp_encoder = NULL;
+	int num_connectors = 0;
+	bool is_lvds = false;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_EDP:
+			edp_encoder = encoder;
+			break;
+		}
+		num_connectors++;
+	}
+
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+			      dev_priv->lvds_ssc_freq);
+		return dev_priv->lvds_ssc_freq * 1000;
+	}
+
+	return 120000;
+}
+
+static void ironlake_set_pipeconf(struct drm_crtc *crtc,
+				  struct drm_display_mode *adjusted_mode,
+				  bool dither)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint32_t val;
+
+	val = I915_READ(PIPECONF(pipe));
+
+	val &= ~PIPECONF_BPC_MASK;
+	switch (intel_crtc->config.pipe_bpp) {
+	case 18:
+		val |= PIPECONF_6BPC;
+		break;
+	case 24:
+		val |= PIPECONF_8BPC;
+		break;
+	case 30:
+		val |= PIPECONF_10BPC;
+		break;
+	case 36:
+		val |= PIPECONF_12BPC;
+		break;
+	default:
+		/* Case prevented by intel_choose_pipe_bpp_dither. */
+		BUG();
+	}
+
+	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+	if (dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	val &= ~PIPECONF_INTERLACE_MASK;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	if (intel_crtc->config.limited_color_range)
+		val |= PIPECONF_COLOR_RANGE_SELECT;
+	else
+		val &= ~PIPECONF_COLOR_RANGE_SELECT;
+
+	I915_WRITE(PIPECONF(pipe), val);
+	POSTING_READ(PIPECONF(pipe));
+}
+
+/*
+ * Set up the pipe CSC unit.
+ *
+ * Currently only full range RGB to limited range RGB conversion
+ * is supported, but eventually this should handle various
+ * RGB<->YCbCr scenarios as well.
+ */
+static void intel_set_pipe_csc(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	uint16_t coeff = 0x7800; /* 1.0 */
+
+	/*
+	 * TODO: Check what kind of values actually come out of the pipe
+	 * with these coeff/postoff values and adjust to get the best
+	 * accuracy. Perhaps we even need to take the bpc value into
+	 * consideration.
+	 */
+
+	if (intel_crtc->config.limited_color_range)
+		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
+
+	/*
+	 * GY/GU and RY/RU should be the other way around according
+	 * to BSpec, but reality doesn't agree. Just set them up in
+	 * a way that results in the correct picture.
+	 */
+	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
+	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
+
+	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
+	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
+
+	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
+	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
+
+	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+	if (INTEL_INFO(dev)->gen > 6) {
+		uint16_t postoff = 0;
+
+		if (intel_crtc->config.limited_color_range)
+			postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+	} else {
+		uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+		if (intel_crtc->config.limited_color_range)
+			mode |= CSC_BLACK_SCREEN_OFFSET;
+
+		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+	}
+}
+
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+				 struct drm_display_mode *adjusted_mode,
+				 bool dither)
+{
+	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	uint32_t val;
+
+	val = I915_READ(PIPECONF(cpu_transcoder));
+
+	val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+	if (dither)
+		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+	val &= ~PIPECONF_INTERLACE_MASK_HSW;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		val |= PIPECONF_INTERLACED_ILK;
+	else
+		val |= PIPECONF_PROGRESSIVE;
+
+	I915_WRITE(PIPECONF(cpu_transcoder), val);
+	POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
+static bool ironlake_compute_clocks(struct drm_crtc *crtc,
+				    struct drm_display_mode *adjusted_mode,
+				    intel_clock_t *clock,
+				    bool *has_reduced_clock,
+				    intel_clock_t *reduced_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	int refclk;
+	const intel_limit_t *limit;
+	bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (intel_encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+	}
+
+	refclk = ironlake_get_refclk(crtc);
+
+	/*
+	 * Returns a set of divisors for the desired target clock with the given
+	 * refclk, or FALSE.  The returned values represent the clock equation:
+	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+	 */
+	limit = intel_limit(crtc, refclk);
+	ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+			      clock);
+	if (!ret)
+		return false;
+
+	if (is_lvds && dev_priv->lvds_downclock_avail) {
+		/*
+		 * Ensure we match the reduced clock's P to the target clock.
+		 * If the clocks don't match, we can't switch the display clock
+		 * by using the FP0/FP1. In such case we will disable the LVDS
+		 * downclock feature.
+		*/
+		*has_reduced_clock = limit->find_pll(limit, crtc,
+						     dev_priv->lvds_downclock,
+						     refclk,
+						     clock,
+						     reduced_clock);
+	}
+
+	if (is_sdvo && is_tv)
+		i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc));
+
+	return true;
+}
+
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t temp;
+
+	temp = I915_READ(SOUTH_CHICKEN1);
+	if (temp & FDI_BC_BIFURCATION_SELECT)
+		return;
+
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+	temp |= FDI_BC_BIFURCATION_SELECT;
+	DRM_DEBUG_KMS("enabling fdi C rx\n");
+	I915_WRITE(SOUTH_CHICKEN1, temp);
+	POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+	struct drm_device *dev = intel_crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *pipe_B_crtc =
+		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+	DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+		      intel_crtc->pipe, intel_crtc->fdi_lanes);
+	if (intel_crtc->fdi_lanes > 4) {
+		DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+			      intel_crtc->pipe, intel_crtc->fdi_lanes);
+		/* Clamp lanes to avoid programming the hw with bogus values. */
+		intel_crtc->fdi_lanes = 4;
+
+		return false;
+	}
+
+	if (INTEL_INFO(dev)->num_pipes == 2)
+		return true;
+
+	switch (intel_crtc->pipe) {
+	case PIPE_A:
+		return true;
+	case PIPE_B:
+		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+		    intel_crtc->fdi_lanes > 2) {
+			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+				      intel_crtc->pipe, intel_crtc->fdi_lanes);
+			/* Clamp lanes to avoid programming the hw with bogus values. */
+			intel_crtc->fdi_lanes = 2;
+
+			return false;
+		}
+
+		if (intel_crtc->fdi_lanes > 2)
+			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+		else
+			cpt_enable_fdi_bc_bifurcation(dev);
+
+		return true;
+	case PIPE_C:
+		if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+			if (intel_crtc->fdi_lanes > 2) {
+				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+					      intel_crtc->pipe, intel_crtc->fdi_lanes);
+				/* Clamp lanes to avoid programming the hw with bogus values. */
+				intel_crtc->fdi_lanes = 2;
+
+				return false;
+			}
+		} else {
+			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+			return false;
+		}
+
+		cpt_enable_fdi_bc_bifurcation(dev);
+
+		return true;
+	default:
+		BUG();
+	}
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+	/*
+	 * Account for spread spectrum to avoid
+	 * oversubscribing the link. Max center spread
+	 * is 2.5%; use 5% for safety's sake.
+	 */
+	u32 bps = target_clock * bpp * 21 / 20;
+	return bps / (link_bw * 8) + 1;
+}
+
+void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+				  struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+
+	I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+	I915_WRITE(TRANSDATA_N1(pipe), m_n->gmch_n);
+	I915_WRITE(TRANSDPLINK_M1(pipe), m_n->link_m);
+	I915_WRITE(TRANSDPLINK_N1(pipe), m_n->link_n);
+}
+
+void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+				  struct intel_link_m_n *m_n)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = crtc->pipe;
+	enum transcoder transcoder = crtc->config.cpu_transcoder;
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
+		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
+		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+	} else {
+		I915_WRITE(PIPE_GMCH_DATA_M(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
+		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n->gmch_n);
+		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n->link_m);
+		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n->link_n);
+	}
+}
+
+static void ironlake_fdi_set_m_n(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct intel_link_m_n m_n = {0};
+	int target_clock, lane, link_bw;
+
+	/* FDI is a binary signal running at ~2.7GHz, encoding
+	 * each output octet as 10 bits. The actual frequency
+	 * is stored as a divider into a 100MHz clock, and the
+	 * mode pixel clock is stored in units of 1KHz.
+	 * Hence the bw of each lane in terms of the mode signal
+	 * is:
+	 */
+	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+
+	if (intel_crtc->config.pixel_target_clock)
+		target_clock = intel_crtc->config.pixel_target_clock;
+	else
+		target_clock = adjusted_mode->clock;
+
+	lane = ironlake_get_lanes_required(target_clock, link_bw,
+					   intel_crtc->config.pipe_bpp);
+
+	intel_crtc->fdi_lanes = lane;
+
+	if (intel_crtc->config.pixel_multiplier > 1)
+		link_bw *= intel_crtc->config.pixel_multiplier;
+	intel_link_compute_m_n(intel_crtc->config.pipe_bpp, lane, target_clock,
+			       link_bw, &m_n);
+
+	intel_cpu_transcoder_set_m_n(intel_crtc, &m_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+				      intel_clock_t *clock, u32 *fp,
+				      intel_clock_t *reduced_clock, u32 *fp2)
+{
+	struct drm_crtc *crtc = &intel_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	uint32_t dpll;
+	int factor, num_connectors = 0;
+	bool is_lvds = false, is_sdvo = false, is_tv = false;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		switch (intel_encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		case INTEL_OUTPUT_SDVO:
+		case INTEL_OUTPUT_HDMI:
+			is_sdvo = true;
+			if (intel_encoder->needs_tv_clock)
+				is_tv = true;
+			break;
+		case INTEL_OUTPUT_TVOUT:
+			is_tv = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	/* Enable autotuning of the PLL clock (if permissible) */
+	factor = 21;
+	if (is_lvds) {
+		if ((intel_panel_use_ssc(dev_priv) &&
+		     dev_priv->lvds_ssc_freq == 100) ||
+		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
+			factor = 25;
+	} else if (is_sdvo && is_tv)
+		factor = 20;
+
+	if (clock->m < factor * clock->n)
+		*fp |= FP_CB_TUNE;
+
+	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
+		*fp2 |= FP_CB_TUNE;
+
+	dpll = 0;
+
+	if (is_lvds)
+		dpll |= DPLLB_MODE_LVDS;
+	else
+		dpll |= DPLLB_MODE_DAC_SERIAL;
+	if (is_sdvo) {
+		if (intel_crtc->config.pixel_multiplier > 1) {
+			dpll |= (intel_crtc->config.pixel_multiplier - 1)
+				<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+		}
+		dpll |= DPLL_DVO_HIGH_SPEED;
+	}
+	if (intel_crtc->config.has_dp_encoder &&
+	    intel_crtc->config.has_pch_encoder)
+		dpll |= DPLL_DVO_HIGH_SPEED;
+
+	/* compute bitmask from p1 value */
+	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+	/* also FPA1 */
+	dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+
+	switch (clock->p2) {
+	case 5:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+		break;
+	case 7:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+		break;
+	case 10:
+		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+		break;
+	case 14:
+		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+		break;
+	}
+
+	if (is_sdvo && is_tv)
+		dpll |= PLL_REF_INPUT_TVCLKINBC;
+	else if (is_tv)
+		/* XXX: just matching BIOS for now */
+		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+		dpll |= 3;
+	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+	else
+		dpll |= PLL_REF_INPUT_DREFCLK;
+
+	return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+				  int x, int y,
+				  struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	int num_connectors = 0;
+	intel_clock_t clock, reduced_clock;
+	u32 dpll, fp = 0, fp2 = 0;
+	bool ok, has_reduced_clock = false;
+	bool is_lvds = false;
+	struct intel_encoder *encoder;
+	int ret;
+	bool dither, fdi_config_ok;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_LVDS:
+			is_lvds = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+	intel_crtc->config.cpu_transcoder = pipe;
+
+	ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+				     &has_reduced_clock, &reduced_clock);
+	if (!ok) {
+		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+		return -EINVAL;
+	}
+	/* Compat-code for transition, will disappear. */
+	if (!intel_crtc->config.clock_set) {
+		intel_crtc->config.dpll.n = clock.n;
+		intel_crtc->config.dpll.m1 = clock.m1;
+		intel_crtc->config.dpll.m2 = clock.m2;
+		intel_crtc->config.dpll.p1 = clock.p1;
+		intel_crtc->config.dpll.p2 = clock.p2;
+	}
+
+	/* Ensure that the cursor is valid for the new mode before changing... */
+	intel_crtc_update_cursor(crtc, true);
+
+	/* determine panel color depth */
+	dither = intel_crtc->config.dither;
+	if (is_lvds && dev_priv->lvds_dither)
+		dither = true;
+
+	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+	if (has_reduced_clock)
+		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+			reduced_clock.m2;
+
+	dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock,
+				     has_reduced_clock ? &fp2 : NULL);
+
+	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+	drm_mode_debug_printmodeline(mode);
+
+	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+	if (intel_crtc->config.has_pch_encoder) {
+		struct intel_pch_pll *pll;
+
+		pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+		if (pll == NULL) {
+			DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+					 pipe);
+			return -EINVAL;
+		}
+	} else
+		intel_put_pch_pll(intel_crtc);
+
+	if (intel_crtc->config.has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc);
+
+	for_each_encoder_on_crtc(dev, crtc, encoder)
+		if (encoder->pre_pll_enable)
+			encoder->pre_pll_enable(encoder);
+
+	if (intel_crtc->pch_pll) {
+		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+		/* Wait for the clocks to stabilize. */
+		POSTING_READ(intel_crtc->pch_pll->pll_reg);
+		udelay(150);
+
+		/* The pixel multiplier can only be updated once the
+		 * DPLL is enabled and the clocks are stable.
+		 *
+		 * So write it again.
+		 */
+		I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+	}
+
+	intel_crtc->lowfreq_avail = false;
+	if (intel_crtc->pch_pll) {
+		if (is_lvds && has_reduced_clock && i915_powersave) {
+			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+			intel_crtc->lowfreq_avail = true;
+		} else {
+			I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+		}
+	}
+
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+	/* Note, this also computes intel_crtc->fdi_lanes which is used below in
+	 * ironlake_check_fdi_lanes. */
+	intel_crtc->fdi_lanes = 0;
+	if (intel_crtc->config.has_pch_encoder)
+		ironlake_fdi_set_m_n(crtc);
+
+	fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
+
+	ironlake_set_pipeconf(crtc, adjusted_mode, dither);
+
+	intel_wait_for_vblank(dev, pipe);
+
+	/* Set up the display plane register */
+	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+	POSTING_READ(DSPCNTR(plane));
+
+	ret = intel_pipe_set_base(crtc, x, y, fb);
+
+	intel_update_watermarks(dev);
+
+	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
+	return fdi_config_ok ? ret : -EINVAL;
+}
+
+static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
+				     struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PIPECONF(crtc->pipe));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	if (I915_READ(TRANSCONF(crtc->pipe)) & TRANS_ENABLE)
+		pipe_config->has_pch_encoder = true;
+
+	return true;
+}
+
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool enable = false;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+		if (crtc->pipe != PIPE_A && crtc->base.enabled)
+			enable = true;
+		/* XXX: Should check for edp transcoder here, but thanks to init
+		 * sequence that's not yet available. Just in case desktop eDP
+		 * on PORT D is possible on haswell, too. */
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (encoder->type != INTEL_OUTPUT_EDP &&
+		    encoder->connectors_active)
+			enable = true;
+	}
+
+	/* Even the eDP panel fitter is outside the always-on well. */
+	if (dev_priv->pch_pf_size)
+		enable = true;
+
+	intel_set_power_well(dev, enable);
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+				 int x, int y,
+				 struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+	int pipe = intel_crtc->pipe;
+	int plane = intel_crtc->plane;
+	int num_connectors = 0;
+	bool is_cpu_edp = false;
+	struct intel_encoder *encoder;
+	int ret;
+	bool dither;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		switch (encoder->type) {
+		case INTEL_OUTPUT_EDP:
+			if (!intel_encoder_is_pch_edp(&encoder->base))
+				is_cpu_edp = true;
+			break;
+		}
+
+		num_connectors++;
+	}
+
+	if (is_cpu_edp)
+		intel_crtc->config.cpu_transcoder = TRANSCODER_EDP;
+	else
+		intel_crtc->config.cpu_transcoder = pipe;
+
+	/* We are not sure yet this won't happen. */
+	WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+	     INTEL_PCH_TYPE(dev));
+
+	WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+	     num_connectors, pipe_name(pipe));
+
+	WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) &
+		(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+	WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+	if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+		return -EINVAL;
+
+	/* Ensure that the cursor is valid for the new mode before changing... */
+	intel_crtc_update_cursor(crtc, true);
+
+	/* determine panel color depth */
+	dither = intel_crtc->config.dither;
+
+	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+	drm_mode_debug_printmodeline(mode);
+
+	if (intel_crtc->config.has_dp_encoder)
+		intel_dp_set_m_n(intel_crtc);
+
+	intel_crtc->lowfreq_avail = false;
+
+	intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+	if (intel_crtc->config.has_pch_encoder)
+		ironlake_fdi_set_m_n(crtc);
+
+	haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+	intel_set_pipe_csc(crtc);
+
+	/* Set up the display plane register */
+	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
+	POSTING_READ(DSPCNTR(plane));
+
+	ret = intel_pipe_set_base(crtc, x, y, fb);
+
+	intel_update_watermarks(dev);
+
+	intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
+	return ret;
+}
+
+static bool haswell_get_pipe_config(struct intel_crtc *crtc,
+				    struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder));
+	if (!(tmp & PIPECONF_ENABLE))
+		return false;
+
+	/*
+	 * aswell has only FDI/PCH transcoder A. It is which is connected to
+	 * DDI E. So just check whether this pipe is wired to DDI E and whether
+	 * the PCH transcoder is on.
+	 */
+	tmp = I915_READ(TRANS_DDI_FUNC_CTL(crtc->pipe));
+	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
+	    I915_READ(TRANSCONF(PIPE_A)) & TRANS_ENABLE)
+		pipe_config->has_pch_encoder = true;
+
+
+	return true;
+}
+
+static int intel_crtc_mode_set(struct drm_crtc *crtc,
+			       int x, int y,
+			       struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct intel_encoder *encoder;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+	int pipe = intel_crtc->pipe;
+	int ret;
+
+	drm_vblank_pre_modeset(dev, pipe);
+
+	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
+
+	drm_vblank_post_modeset(dev, pipe);
+
+	if (ret != 0)
+		return ret;
+
+	for_each_encoder_on_crtc(dev, crtc, encoder) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+			encoder->base.base.id,
+			drm_get_encoder_name(&encoder->base),
+			mode->base.id, mode->name);
+		if (encoder->mode_set) {
+			encoder->mode_set(encoder);
+		} else {
+			encoder_funcs = encoder->base.helper_private;
+			encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+		}
+	}
+
+	return 0;
+}
+
+static bool intel_eld_uptodate(struct drm_connector *connector,
+			       int reg_eldv, uint32_t bits_eldv,
+			       int reg_elda, uint32_t bits_elda,
+			       int reg_edid)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	uint32_t i;
+
+	i = I915_READ(reg_eldv);
+	i &= bits_eldv;
+
+	if (!eld[0])
+		return !i;
+
+	if (!i)
+		return false;
+
+	i = I915_READ(reg_elda);
+	i &= ~bits_elda;
+	I915_WRITE(reg_elda, i);
+
+	for (i = 0; i < eld[2]; i++)
+		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+			return false;
+
+	return true;
+}
+
+static void g4x_write_eld(struct drm_connector *connector,
+			  struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	uint32_t eldv;
+	uint32_t len;
+	uint32_t i;
+
+	i = I915_READ(G4X_AUD_VID_DID);
+
+	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
+		eldv = G4X_ELDV_DEVCL_DEVBLC;
+	else
+		eldv = G4X_ELDV_DEVCTG;
+
+	if (intel_eld_uptodate(connector,
+			       G4X_AUD_CNTL_ST, eldv,
+			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+			       G4X_HDMIW_HDMIEDID))
+		return;
+
+	i = I915_READ(G4X_AUD_CNTL_ST);
+	i &= ~(eldv | G4X_ELD_ADDR);
+	len = (i >> 9) & 0x1f;		/* ELD buffer size */
+	I915_WRITE(G4X_AUD_CNTL_ST, i);
+
+	if (!eld[0])
+		return;
+
+	len = min_t(uint8_t, eld[2], len);
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+
+	i = I915_READ(G4X_AUD_CNTL_ST);
+	i |= eldv;
+	I915_WRITE(G4X_AUD_CNTL_ST, i);
+}
+
+static void haswell_write_eld(struct drm_connector *connector,
+				     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	struct drm_device *dev = crtc->dev;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t eldv;
+	uint32_t i;
+	int len;
+	int pipe = to_intel_crtc(crtc)->pipe;
+	int tmp;
+
+	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
+	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
+	int aud_config = HSW_AUD_CFG(pipe);
+	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
+
+
+	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
+
+	/* Audio output enable */
+	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
+	tmp = I915_READ(aud_cntrl_st2);
+	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+
+	/* Wait for 1 vertical blank */
+	intel_wait_for_vblank(dev, pipe);
+
+	/* Set ELD valid state */
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
+	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
+	I915_WRITE(aud_cntrl_st2, tmp);
+	tmp = I915_READ(aud_cntrl_st2);
+	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
+
+	/* Enable HDMI mode */
+	tmp = I915_READ(aud_config);
+	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
+	/* clear N_programing_enable and N_value_index */
+	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
+	I915_WRITE(aud_config, tmp);
+
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
+	intel_crtc->eld_vld = true;
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
+		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+	} else
+		I915_WRITE(aud_config, 0);
+
+	if (intel_eld_uptodate(connector,
+			       aud_cntrl_st2, eldv,
+			       aud_cntl_st, IBX_ELD_ADDRESS,
+			       hdmiw_hdmiedid))
+		return;
+
+	i = I915_READ(aud_cntrl_st2);
+	i &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+	if (!eld[0])
+		return;
+
+	i = I915_READ(aud_cntl_st);
+	i &= ~IBX_ELD_ADDRESS;
+	I915_WRITE(aud_cntl_st, i);
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	DRM_DEBUG_DRIVER("port num:%d\n", i);
+
+	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+	i = I915_READ(aud_cntrl_st2);
+	i |= eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+}
+
+static void ironlake_write_eld(struct drm_connector *connector,
+				     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint8_t *eld = connector->eld;
+	uint32_t eldv;
+	uint32_t i;
+	int len;
+	int hdmiw_hdmiedid;
+	int aud_config;
+	int aud_cntl_st;
+	int aud_cntrl_st2;
+	int pipe = to_intel_crtc(crtc)->pipe;
+
+	if (HAS_PCH_IBX(connector->dev)) {
+		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+		aud_config = IBX_AUD_CFG(pipe);
+		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+	} else {
+		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
+		aud_config = CPT_AUD_CFG(pipe);
+		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
+		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+	}
+
+	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
+
+	i = I915_READ(aud_cntl_st);
+	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
+	if (!i) {
+		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+		/* operate blindly on all ports */
+		eldv = IBX_ELD_VALIDB;
+		eldv |= IBX_ELD_VALIDB << 4;
+		eldv |= IBX_ELD_VALIDB << 8;
+	} else {
+		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
+		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+	}
+
+	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
+		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+	} else
+		I915_WRITE(aud_config, 0);
+
+	if (intel_eld_uptodate(connector,
+			       aud_cntrl_st2, eldv,
+			       aud_cntl_st, IBX_ELD_ADDRESS,
+			       hdmiw_hdmiedid))
+		return;
+
+	i = I915_READ(aud_cntrl_st2);
+	i &= ~eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+
+	if (!eld[0])
+		return;
+
+	i = I915_READ(aud_cntl_st);
+	i &= ~IBX_ELD_ADDRESS;
+	I915_WRITE(aud_cntl_st, i);
+
+	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
+	DRM_DEBUG_DRIVER("ELD size %d\n", len);
+	for (i = 0; i < len; i++)
+		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+
+	i = I915_READ(aud_cntrl_st2);
+	i |= eldv;
+	I915_WRITE(aud_cntrl_st2, i);
+}
+
+void intel_write_eld(struct drm_encoder *encoder,
+		     struct drm_display_mode *mode)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	connector = drm_select_eld(encoder, mode);
+	if (!connector)
+		return;
+
+	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+			 connector->base.id,
+			 drm_get_connector_name(connector),
+			 connector->encoder->base.id,
+			 drm_get_encoder_name(connector->encoder));
+
+	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
+
+	if (dev_priv->display.write_eld)
+		dev_priv->display.write_eld(connector, crtc);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int palreg = PALETTE(intel_crtc->pipe);
+	int i;
+
+	/* The clocks have to be on to load the palette. */
+	if (!crtc->enabled || !intel_crtc->active)
+		return;
+
+	/* use legacy palette for Ironlake */
+	if (HAS_PCH_SPLIT(dev))
+		palreg = LGC_PALETTE(intel_crtc->pipe);
+
+	for (i = 0; i < 256; i++) {
+		I915_WRITE(palreg + 4 * i,
+			   (intel_crtc->lut_r[i] << 16) |
+			   (intel_crtc->lut_g[i] << 8) |
+			   intel_crtc->lut_b[i]);
+	}
+}
+
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	bool visible = base != 0;
+	u32 cntl;
+
+	if (intel_crtc->cursor_visible == visible)
+		return;
+
+	cntl = I915_READ(_CURACNTR);
+	if (visible) {
+		/* On these chipsets we can only modify the base whilst
+		 * the cursor is disabled.
+		 */
+		I915_WRITE(_CURABASE, base);
+
+		cntl &= ~(CURSOR_FORMAT_MASK);
+		/* XXX width must be 64, stride 256 => 0x00 << 28 */
+		cntl |= CURSOR_ENABLE |
+			CURSOR_GAMMA_ENABLE |
+			CURSOR_FORMAT_ARGB;
+	} else
+		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+	I915_WRITE(_CURACNTR, cntl);
+
+	intel_crtc->cursor_visible = visible;
+}
+
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	bool visible = base != 0;
+
+	if (intel_crtc->cursor_visible != visible) {
+		uint32_t cntl = I915_READ(CURCNTR(pipe));
+		if (base) {
+			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+			cntl |= pipe << 28; /* Connect to correct pipe */
+		} else {
+			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+			cntl |= CURSOR_MODE_DISABLE;
+		}
+		I915_WRITE(CURCNTR(pipe), cntl);
+
+		intel_crtc->cursor_visible = visible;
+	}
+	/* and commit changes on next vblank */
+	POSTING_READ(CURCNTR(pipe));
+	I915_WRITE(CURBASE(pipe), base);
+	POSTING_READ(CURBASE(pipe));
+}
+
+static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	bool visible = base != 0;
+
+	if (intel_crtc->cursor_visible != visible) {
+		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
+		if (base) {
+			cntl &= ~CURSOR_MODE;
+			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+		} else {
+			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+			cntl |= CURSOR_MODE_DISABLE;
+		}
+		if (IS_HASWELL(dev))
+			cntl |= CURSOR_PIPE_CSC_ENABLE;
+		I915_WRITE(CURCNTR_IVB(pipe), cntl);
+
+		intel_crtc->cursor_visible = visible;
+	}
+	/* and commit changes on next vblank */
+	POSTING_READ(CURCNTR_IVB(pipe));
+	I915_WRITE(CURBASE_IVB(pipe), base);
+	POSTING_READ(CURBASE_IVB(pipe));
+}
+
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc,
+				     bool on)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int x = intel_crtc->cursor_x;
+	int y = intel_crtc->cursor_y;
+	u32 base, pos;
+	bool visible;
+
+	pos = 0;
+
+	if (on && crtc->enabled && crtc->fb) {
+		base = intel_crtc->cursor_addr;
+		if (x > (int) crtc->fb->width)
+			base = 0;
+
+		if (y > (int) crtc->fb->height)
+			base = 0;
+	} else
+		base = 0;
+
+	if (x < 0) {
+		if (x + intel_crtc->cursor_width < 0)
+			base = 0;
+
+		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+		x = -x;
+	}
+	pos |= x << CURSOR_X_SHIFT;
+
+	if (y < 0) {
+		if (y + intel_crtc->cursor_height < 0)
+			base = 0;
+
+		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+		y = -y;
+	}
+	pos |= y << CURSOR_Y_SHIFT;
+
+	visible = base != 0;
+	if (!visible && !intel_crtc->cursor_visible)
+		return;
+
+	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+		I915_WRITE(CURPOS_IVB(pipe), pos);
+		ivb_update_cursor(crtc, base);
+	} else {
+		I915_WRITE(CURPOS(pipe), pos);
+		if (IS_845G(dev) || IS_I865G(dev))
+			i845_update_cursor(crtc, base);
+		else
+			i9xx_update_cursor(crtc, base);
+	}
+}
+
+static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+				 struct drm_file *file,
+				 uint32_t handle,
+				 uint32_t width, uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_i915_gem_object *obj;
+	uint32_t addr;
+	int ret;
+
+	/* if we want to turn off the cursor ignore width and height */
+	if (!handle) {
+		DRM_DEBUG_KMS("cursor off\n");
+		addr = 0;
+		obj = NULL;
+		mutex_lock(&dev->struct_mutex);
+		goto finish;
+	}
+
+	/* Currently we only support 64x64 cursors */
+	if (width != 64 || height != 64) {
+		DRM_ERROR("we currently only support 64x64 cursors\n");
+		return -EINVAL;
+	}
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	if (&obj->base == NULL)
+		return -ENOENT;
+
+	if (obj->base.size < width * height * 4) {
+		DRM_ERROR("buffer is to small\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* we only need to pin inside GTT if cursor is non-phy */
+	mutex_lock(&dev->struct_mutex);
+	if (!dev_priv->info->cursor_needs_physical) {
+		unsigned alignment;
+
+		if (obj->tiling_mode) {
+			DRM_ERROR("cursor cannot be tiled\n");
+			ret = -EINVAL;
+			goto fail_locked;
+		}
+
+		/* Note that the w/a also requires 2 PTE of padding following
+		 * the bo. We currently fill all unused PTE with the shadow
+		 * page and so we should always have valid PTE following the
+		 * cursor preventing the VT-d warning.
+		 */
+		alignment = 0;
+		if (need_vtd_wa(dev))
+			alignment = 64*1024;
+
+		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
+		if (ret) {
+			DRM_ERROR("failed to move cursor bo into the GTT\n");
+			goto fail_locked;
+		}
+
+		ret = i915_gem_object_put_fence(obj);
+		if (ret) {
+			DRM_ERROR("failed to release fence for cursor");
+			goto fail_unpin;
+		}
+
+		addr = obj->gtt_offset;
+	} else {
+		int align = IS_I830(dev) ? 16 * 1024 : 256;
+		ret = i915_gem_attach_phys_object(dev, obj,
+						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
+						  align);
+		if (ret) {
+			DRM_ERROR("failed to attach phys object\n");
+			goto fail_locked;
+		}
+		addr = obj->phys_obj->handle->busaddr;
+	}
+
+	if (IS_GEN2(dev))
+		I915_WRITE(CURSIZE, (height << 12) | width);
+
+ finish:
+	if (intel_crtc->cursor_bo) {
+		if (dev_priv->info->cursor_needs_physical) {
+			if (intel_crtc->cursor_bo != obj)
+				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+		} else
+			i915_gem_object_unpin(intel_crtc->cursor_bo);
+		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	intel_crtc->cursor_addr = addr;
+	intel_crtc->cursor_bo = obj;
+	intel_crtc->cursor_width = width;
+	intel_crtc->cursor_height = height;
+
+	intel_crtc_update_cursor(crtc, true);
+
+	return 0;
+fail_unpin:
+	i915_gem_object_unpin(obj);
+fail_locked:
+	mutex_unlock(&dev->struct_mutex);
+fail:
+	drm_gem_object_unreference_unlocked(&obj->base);
+	return ret;
+}
+
+static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->cursor_x = x;
+	intel_crtc->cursor_y = y;
+
+	intel_crtc_update_cursor(crtc, true);
+
+	return 0;
+}
+
+/** Sets the color ramps on behalf of RandR */
+void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				 u16 blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	intel_crtc->lut_r[regno] = red >> 8;
+	intel_crtc->lut_g[regno] = green >> 8;
+	intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	*red = intel_crtc->lut_r[regno] << 8;
+	*green = intel_crtc->lut_g[regno] << 8;
+	*blue = intel_crtc->lut_b[regno] << 8;
+}
+
+static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				 u16 *blue, uint32_t start, uint32_t size)
+{
+	int end = (start + size > 256) ? 256 : start + size, i;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	for (i = start; i < end; i++) {
+		intel_crtc->lut_r[i] = red[i] >> 8;
+		intel_crtc->lut_g[i] = green[i] >> 8;
+		intel_crtc->lut_b[i] = blue[i] >> 8;
+	}
+
+	intel_crtc_load_lut(crtc);
+}
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static struct drm_display_mode load_detect_mode = {
+	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+static struct drm_framebuffer *
+intel_framebuffer_create(struct drm_device *dev,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct drm_i915_gem_object *obj)
+{
+	struct intel_framebuffer *intel_fb;
+	int ret;
+
+	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+	if (!intel_fb) {
+		drm_gem_object_unreference_unlocked(&obj->base);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(&obj->base);
+		kfree(intel_fb);
+		return ERR_PTR(ret);
+	}
+
+	return &intel_fb->base;
+}
+
+static u32
+intel_framebuffer_pitch_for_width(int width, int bpp)
+{
+	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
+	return ALIGN(pitch, 64);
+}
+
+static u32
+intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+{
+	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
+	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+}
+
+static struct drm_framebuffer *
+intel_framebuffer_create_for_mode(struct drm_device *dev,
+				  struct drm_display_mode *mode,
+				  int depth, int bpp)
+{
+	struct drm_i915_gem_object *obj;
+	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+	obj = i915_gem_alloc_object(dev,
+				    intel_framebuffer_size_for_mode(mode, bpp));
+	if (obj == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mode_cmd.width = mode->hdisplay;
+	mode_cmd.height = mode->vdisplay;
+	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+								bpp);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+	return intel_framebuffer_create(dev, &mode_cmd, obj);
+}
+
+static struct drm_framebuffer *
+mode_fits_in_fbdev(struct drm_device *dev,
+		   struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	struct drm_framebuffer *fb;
+
+	if (dev_priv->fbdev == NULL)
+		return NULL;
+
+	obj = dev_priv->fbdev->ifb.obj;
+	if (obj == NULL)
+		return NULL;
+
+	fb = &dev_priv->fbdev->ifb.base;
+	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+							       fb->bits_per_pixel))
+		return NULL;
+
+	if (obj->base.size < mode->vdisplay * fb->pitches[0])
+		return NULL;
+
+	return fb;
+}
+
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				struct drm_display_mode *mode,
+				struct intel_load_detect_pipe *old)
+{
+	struct intel_crtc *intel_crtc;
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
+	struct drm_crtc *possible_crtc;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = NULL;
+	struct drm_device *dev = encoder->dev;
+	struct drm_framebuffer *fb;
+	int i = -1;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector),
+		      encoder->base.id, drm_get_encoder_name(encoder));
+
+	/*
+	 * Algorithm gets a little messy:
+	 *
+	 *   - if the connector already has an assigned crtc, use it (but make
+	 *     sure it's on first)
+	 *
+	 *   - try to find the first unused crtc that can drive this connector,
+	 *     and use that if we find one
+	 */
+
+	/* See if we already have a CRTC for this connector */
+	if (encoder->crtc) {
+		crtc = encoder->crtc;
+
+		mutex_lock(&crtc->mutex);
+
+		old->dpms_mode = connector->dpms;
+		old->load_detect_temp = false;
+
+		/* Make sure the crtc and connector are running */
+		if (connector->dpms != DRM_MODE_DPMS_ON)
+			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
+
+		return true;
+	}
+
+	/* Find an unused one (if possible) */
+	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
+		i++;
+		if (!(encoder->possible_crtcs & (1 << i)))
+			continue;
+		if (!possible_crtc->enabled) {
+			crtc = possible_crtc;
+			break;
+		}
+	}
+
+	/*
+	 * If we didn't find an unused CRTC, don't use any.
+	 */
+	if (!crtc) {
+		DRM_DEBUG_KMS("no pipe available for load-detect\n");
+		return false;
+	}
+
+	mutex_lock(&crtc->mutex);
+	intel_encoder->new_crtc = to_intel_crtc(crtc);
+	to_intel_connector(connector)->new_encoder = intel_encoder;
+
+	intel_crtc = to_intel_crtc(crtc);
+	old->dpms_mode = connector->dpms;
+	old->load_detect_temp = true;
+	old->release_fb = NULL;
+
+	if (!mode)
+		mode = &load_detect_mode;
+
+	/* We need a framebuffer large enough to accommodate all accesses
+	 * that the plane may generate whilst we perform load detection.
+	 * We can not rely on the fbcon either being present (we get called
+	 * during its initialisation to detect all boot displays, or it may
+	 * not even exist) or that it is large enough to satisfy the
+	 * requested mode.
+	 */
+	fb = mode_fits_in_fbdev(dev, mode);
+	if (fb == NULL) {
+		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
+		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
+		old->release_fb = fb;
+	} else
+		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
+	if (IS_ERR(fb)) {
+		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+		mutex_unlock(&crtc->mutex);
+		return false;
+	}
+
+	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
+		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+		if (old->release_fb)
+			old->release_fb->funcs->destroy(old->release_fb);
+		mutex_unlock(&crtc->mutex);
+		return false;
+	}
+
+	/* let the connector get through one full cycle before testing */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+	return true;
+}
+
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+				    struct intel_load_detect_pipe *old)
+{
+	struct intel_encoder *intel_encoder =
+		intel_attached_encoder(connector);
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_crtc *crtc = encoder->crtc;
+
+	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+		      connector->base.id, drm_get_connector_name(connector),
+		      encoder->base.id, drm_get_encoder_name(encoder));
+
+	if (old->load_detect_temp) {
+		to_intel_connector(connector)->new_encoder = NULL;
+		intel_encoder->new_crtc = NULL;
+		intel_set_mode(crtc, NULL, 0, 0, NULL);
+
+		if (old->release_fb) {
+			drm_framebuffer_unregister_private(old->release_fb);
+			drm_framebuffer_unreference(old->release_fb);
+		}
+
+		mutex_unlock(&crtc->mutex);
+		return;
+	}
+
+	/* Switch crtc and encoder back off if necessary */
+	if (old->dpms_mode != DRM_MODE_DPMS_ON)
+		connector->funcs->dpms(connector, old->dpms_mode);
+
+	mutex_unlock(&crtc->mutex);
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 dpll = I915_READ(DPLL(pipe));
+	u32 fp;
+	intel_clock_t clock;
+
+	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+		fp = I915_READ(FP0(pipe));
+	else
+		fp = I915_READ(FP1(pipe));
+
+	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+	if (IS_PINEVIEW(dev)) {
+		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	} else {
+		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+	}
+
+	if (!IS_GEN2(dev)) {
+		if (IS_PINEVIEW(dev))
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+		else
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+			       DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+		switch (dpll & DPLL_MODE_MASK) {
+		case DPLLB_MODE_DAC_SERIAL:
+			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+				5 : 10;
+			break;
+		case DPLLB_MODE_LVDS:
+			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+				7 : 14;
+			break;
+		default:
+			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
+			return 0;
+		}
+
+		/* XXX: Handle the 100Mhz refclk */
+		intel_clock(dev, 96000, &clock);
+	} else {
+		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
+
+		if (is_lvds) {
+			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+				       DPLL_FPA01_P1_POST_DIV_SHIFT);
+			clock.p2 = 14;
+
+			if ((dpll & PLL_REF_INPUT_MASK) ==
+			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+				/* XXX: might not be 66MHz */
+				intel_clock(dev, 66000, &clock);
+			} else
+				intel_clock(dev, 48000, &clock);
+		} else {
+			if (dpll & PLL_P1_DIVIDE_BY_TWO)
+				clock.p1 = 2;
+			else {
+				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+			}
+			if (dpll & PLL_P2_DIVIDE_BY_4)
+				clock.p2 = 4;
+			else
+				clock.p2 = 2;
+
+			intel_clock(dev, 48000, &clock);
+		}
+	}
+
+	/* XXX: It would be nice to validate the clocks, but we can't reuse
+	 * i830PllIsValid() because it relies on the xf86_config connector
+	 * configuration being accurate, which it isn't necessarily.
+	 */
+
+	return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+					     struct drm_crtc *crtc)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+	struct drm_display_mode *mode;
+	int htot = I915_READ(HTOTAL(cpu_transcoder));
+	int hsync = I915_READ(HSYNC(cpu_transcoder));
+	int vtot = I915_READ(VTOTAL(cpu_transcoder));
+	int vsync = I915_READ(VSYNC(cpu_transcoder));
+
+	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+	if (!mode)
+		return NULL;
+
+	mode->clock = intel_crtc_clock_get(dev, crtc);
+	mode->hdisplay = (htot & 0xffff) + 1;
+	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+	mode->hsync_start = (hsync & 0xffff) + 1;
+	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+	mode->vdisplay = (vtot & 0xffff) + 1;
+	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+	mode->vsync_start = (vsync & 0xffff) + 1;
+	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+	drm_mode_set_name(mode);
+
+	return mode;
+}
+
+static void intel_increase_pllclock(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	int dpll_reg = DPLL(pipe);
+	int dpll;
+
+	if (HAS_PCH_SPLIT(dev))
+		return;
+
+	if (!dev_priv->lvds_downclock_avail)
+		return;
+
+	dpll = I915_READ(dpll_reg);
+	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+		DRM_DEBUG_DRIVER("upclocking LVDS\n");
+
+		assert_panel_unlocked(dev_priv, pipe);
+
+		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+		I915_WRITE(dpll_reg, dpll);
+		intel_wait_for_vblank(dev, pipe);
+
+		dpll = I915_READ(dpll_reg);
+		if (dpll & DISPLAY_RATE_SELECT_FPA1)
+			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+	}
+}
+
+static void intel_decrease_pllclock(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	if (HAS_PCH_SPLIT(dev))
+		return;
+
+	if (!dev_priv->lvds_downclock_avail)
+		return;
+
+	/*
+	 * Since this is called by a timer, we should never get here in
+	 * the manual case.
+	 */
+	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+		int pipe = intel_crtc->pipe;
+		int dpll_reg = DPLL(pipe);
+		int dpll;
+
+		DRM_DEBUG_DRIVER("downclocking LVDS\n");
+
+		assert_panel_unlocked(dev_priv, pipe);
+
+		dpll = I915_READ(dpll_reg);
+		dpll |= DISPLAY_RATE_SELECT_FPA1;
+		I915_WRITE(dpll_reg, dpll);
+		intel_wait_for_vblank(dev, pipe);
+		dpll = I915_READ(dpll_reg);
+		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+	}
+
+}
+
+void intel_mark_busy(struct drm_device *dev)
+{
+	i915_update_gfx_val(dev->dev_private);
+}
+
+void intel_mark_idle(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	if (!i915_powersave)
+		return;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (!crtc->fb)
+			continue;
+
+		intel_decrease_pllclock(crtc);
+	}
+}
+
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_crtc *crtc;
+
+	if (!i915_powersave)
+		return;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (!crtc->fb)
+			continue;
+
+		if (to_intel_framebuffer(crtc->fb)->obj == obj)
+			intel_increase_pllclock(crtc);
+	}
+}
+
+static void intel_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct intel_unpin_work *work;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work = intel_crtc->unpin_work;
+	intel_crtc->unpin_work = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (work) {
+		cancel_work_sync(&work->work);
+		kfree(work);
+	}
+
+	drm_crtc_cleanup(crtc);
+
+	kfree(intel_crtc);
+}
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+	struct intel_unpin_work *work =
+		container_of(__work, struct intel_unpin_work, work);
+	struct drm_device *dev = work->crtc->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	intel_unpin_fb_obj(work->old_fb_obj);
+	drm_gem_object_unreference(&work->pending_flip_obj->base);
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+
+	intel_update_fbc(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
+	kfree(work);
+}
+
+static void do_intel_finish_page_flip(struct drm_device *dev,
+				      struct drm_crtc *crtc)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+	unsigned long flags;
+
+	/* Ignore early vblank irqs */
+	if (intel_crtc == NULL)
+		return;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work = intel_crtc->unpin_work;
+
+	/* Ensure we don't miss a work->pending update ... */
+	smp_rmb();
+
+	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return;
+	}
+
+	/* and that the unpin work is consistent wrt ->pending. */
+	smp_rmb();
+
+	intel_crtc->unpin_work = NULL;
+
+	if (work->event)
+		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
+
+	drm_vblank_put(dev, intel_crtc->pipe);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	wake_up_all(&dev_priv->pending_flip_queue);
+
+	queue_work(dev_priv->wq, &work->work);
+
+	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+	do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
+
+	do_intel_finish_page_flip(dev, crtc);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+	unsigned long flags;
+
+	/* NB: An MMIO update of the plane base pointer will also
+	 * generate a page-flip completion irq, i.e. every modeset
+	 * is also accompanied by a spurious intel_prepare_page_flip().
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (intel_crtc->unpin_work)
+		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+	/* Ensure that the work item is consistent when activating it ... */
+	smp_wmb();
+	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+	/* and that it is marked active as soon as the irq could fire. */
+	smp_wmb();
+}
+
+static int intel_gen2_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 flip_mask;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	if (ret)
+		goto err;
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		goto err_unpin;
+
+	/* Can't queue multiple flips, so wait for the previous
+	 * one to finish before executing the next.
+	 */
+	if (intel_crtc->plane)
+		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+	else
+		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+	intel_mark_page_flip_active(intel_crtc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
+	return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	u32 flip_mask;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	if (ret)
+		goto err;
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		goto err_unpin;
+
+	if (intel_crtc->plane)
+		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+	else
+		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+	intel_ring_emit(ring, MI_NOOP);
+
+	intel_mark_page_flip_active(intel_crtc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
+	return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	uint32_t pf, pipesrc;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	if (ret)
+		goto err;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		goto err_unpin;
+
+	/* i965+ uses the linear or tiled offsets from the
+	 * Display Registers (which do not change across a page-flip)
+	 * so we need only reprogram the base address.
+	 */
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring,
+			(obj->gtt_offset + intel_crtc->dspaddr_offset) |
+			obj->tiling_mode);
+
+	/* XXX Enabling the panel-fitter across page-flip is so far
+	 * untested on non-native modes, so ignore it for now.
+	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+	 */
+	pf = 0;
+	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
+	return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	uint32_t pf, pipesrc;
+	int ret;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	if (ret)
+		goto err;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		goto err_unpin;
+
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
+			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+
+	/* Contrary to the suggestions in the documentation,
+	 * "Enable Panel Fitter" does not seem to be required when page
+	 * flipping with a non-native mode, and worse causes a normal
+	 * modeset to fail.
+	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+	 */
+	pf = 0;
+	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+	intel_ring_emit(ring, pf | pipesrc);
+
+	intel_mark_page_flip_active(intel_crtc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
+	return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued.  Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+				 struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_i915_gem_object *obj)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+	uint32_t plane_bit = 0;
+	int ret;
+
+	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+	if (ret)
+		goto err;
+
+	switch(intel_crtc->plane) {
+	case PLANE_A:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+		break;
+	case PLANE_B:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+		break;
+	case PLANE_C:
+		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+		break;
+	default:
+		WARN_ONCE(1, "unknown plane in flip command\n");
+		ret = -ENODEV;
+		goto err_unpin;
+	}
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		goto err_unpin;
+
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+	intel_ring_emit(ring, (MI_NOOP));
+
+	intel_mark_page_flip_active(intel_crtc);
+	intel_ring_advance(ring);
+	return 0;
+
+err_unpin:
+	intel_unpin_fb_obj(obj);
+err:
+	return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+				    struct drm_crtc *crtc,
+				    struct drm_framebuffer *fb,
+				    struct drm_i915_gem_object *obj)
+{
+	return -ENODEV;
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *old_fb = crtc->fb;
+	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_unpin_work *work;
+	unsigned long flags;
+	int ret;
+
+	/* Can't change pixel format via MI display flips. */
+	if (fb->pixel_format != crtc->fb->pixel_format)
+		return -EINVAL;
+
+	/*
+	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
+	 * Note that pitch changes could also affect these register.
+	 */
+	if (INTEL_INFO(dev)->gen > 3 &&
+	    (fb->offsets[0] != crtc->fb->offsets[0] ||
+	     fb->pitches[0] != crtc->fb->pitches[0]))
+		return -EINVAL;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	work->event = event;
+	work->crtc = crtc;
+	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+	INIT_WORK(&work->work, intel_unpin_work_fn);
+
+	ret = drm_vblank_get(dev, intel_crtc->pipe);
+	if (ret)
+		goto free_work;
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (intel_crtc->unpin_work) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(work);
+		drm_vblank_put(dev, intel_crtc->pipe);
+
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		return -EBUSY;
+	}
+	intel_crtc->unpin_work = work;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+		flush_workqueue(dev_priv->wq);
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto cleanup;
+
+	/* Reference the objects for the scheduled work. */
+	drm_gem_object_reference(&work->old_fb_obj->base);
+	drm_gem_object_reference(&obj->base);
+
+	crtc->fb = fb;
+
+	work->pending_flip_obj = obj;
+
+	work->enable_stall_check = true;
+
+	atomic_inc(&intel_crtc->unpin_work_count);
+	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+
+	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+	if (ret)
+		goto cleanup_pending;
+
+	intel_disable_fbc(dev);
+	intel_mark_fb_busy(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	trace_i915_flip_request(intel_crtc->plane, obj);
+
+	return 0;
+
+cleanup_pending:
+	atomic_dec(&intel_crtc->unpin_work_count);
+	crtc->fb = old_fb;
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+
+cleanup:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	intel_crtc->unpin_work = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
+	kfree(work);
+
+	return ret;
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+	.mode_set_base_atomic = intel_pipe_set_base_atomic,
+	.load_lut = intel_crtc_load_lut,
+};
+
+bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
+{
+	struct intel_encoder *other_encoder;
+	struct drm_crtc *crtc = &encoder->new_crtc->base;
+
+	if (WARN_ON(!crtc))
+		return false;
+
+	list_for_each_entry(other_encoder,
+			    &crtc->dev->mode_config.encoder_list,
+			    base.head) {
+
+		if (&other_encoder->new_crtc->base != crtc ||
+		    encoder == other_encoder)
+			continue;
+		else
+			return true;
+	}
+
+	return false;
+}
+
+static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
+				  struct drm_crtc *crtc)
+{
+	struct drm_device *dev;
+	struct drm_crtc *tmp;
+	int crtc_mask = 1;
+
+	WARN(!crtc, "checking null crtc?\n");
+
+	dev = crtc->dev;
+
+	list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+		if (tmp == crtc)
+			break;
+		crtc_mask <<= 1;
+	}
+
+	if (encoder->possible_crtcs & crtc_mask)
+		return true;
+	return false;
+}
+
+/**
+ * intel_modeset_update_staged_output_state
+ *
+ * Updates the staged output configuration state, e.g. after we've read out the
+ * current hw state.
+ */
+static void intel_modeset_update_staged_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->new_encoder =
+			to_intel_encoder(connector->base.encoder);
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(encoder->base.crtc);
+	}
+}
+
+/**
+ * intel_modeset_commit_output_state
+ *
+ * This function copies the stage display pipe configuration to the real one.
+ */
+static void intel_modeset_commit_output_state(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		connector->base.encoder = &connector->new_encoder->base;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		encoder->base.crtc = &encoder->new_crtc->base;
+	}
+}
+
+static int
+pipe_config_set_bpp(struct drm_crtc *crtc,
+		    struct drm_framebuffer *fb,
+		    struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_connector *connector;
+	int bpp;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_C8:
+		bpp = 8*3; /* since we go through a colormap */
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		/* checked in intel_framebuffer_init already */
+		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
+			return -EINVAL;
+	case DRM_FORMAT_RGB565:
+		bpp = 6*3; /* min is 18bpp */
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+		/* checked in intel_framebuffer_init already */
+		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+			return -EINVAL;
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		bpp = 8*3;
+		break;
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		/* checked in intel_framebuffer_init already */
+		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
+			return -EINVAL;
+		bpp = 10*3;
+		break;
+	/* TODO: gen4+ supports 16 bpc floating point, too. */
+	default:
+		DRM_DEBUG_KMS("unsupported depth\n");
+		return -EINVAL;
+	}
+
+	pipe_config->pipe_bpp = bpp;
+
+	/* Clamp display bpp to EDID value */
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    head) {
+		if (connector->encoder && connector->encoder->crtc != crtc)
+			continue;
+
+		/* Don't use an invalid EDID bpc value */
+		if (connector->display_info.bpc &&
+		    connector->display_info.bpc * 3 < bpp) {
+			DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
+				      bpp, connector->display_info.bpc*3);
+			pipe_config->pipe_bpp = connector->display_info.bpc*3;
+		}
+	}
+
+	return bpp;
+}
+
+static struct intel_crtc_config *
+intel_modeset_pipe_config(struct drm_crtc *crtc,
+			  struct drm_framebuffer *fb,
+			  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct intel_encoder *encoder;
+	struct intel_crtc_config *pipe_config;
+	int plane_bpp;
+
+	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+	if (!pipe_config)
+		return ERR_PTR(-ENOMEM);
+
+	drm_mode_copy(&pipe_config->adjusted_mode, mode);
+	drm_mode_copy(&pipe_config->requested_mode, mode);
+
+	plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config);
+	if (plane_bpp < 0)
+		goto fail;
+
+	/* Pass our mode to the connectors and the CRTC to give them a chance to
+	 * adjust it according to limitations or connector properties, and also
+	 * a chance to reject the mode entirely.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+
+		if (&encoder->new_crtc->base != crtc)
+			continue;
+
+		if (encoder->compute_config) {
+			if (!(encoder->compute_config(encoder, pipe_config))) {
+				DRM_DEBUG_KMS("Encoder config failure\n");
+				goto fail;
+			}
+
+			continue;
+		}
+
+		encoder_funcs = encoder->base.helper_private;
+		if (!(encoder_funcs->mode_fixup(&encoder->base,
+						&pipe_config->requested_mode,
+						&pipe_config->adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
+			goto fail;
+		}
+	}
+
+	if (!(intel_crtc_compute_config(crtc, pipe_config))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
+		goto fail;
+	}
+	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+
+	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+
+	return pipe_config;
+fail:
+	kfree(pipe_config);
+	return ERR_PTR(-EINVAL);
+}
+
+/* Computes which crtcs are affected and sets the relevant bits in the mask. For
+ * simplicity we use the crtc's pipe number (because it's easier to obtain). */
+static void
+intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
+			     unsigned *prepare_pipes, unsigned *disable_pipes)
+{
+	struct intel_crtc *intel_crtc;
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	struct drm_crtc *tmp_crtc;
+
+	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
+
+	/* Check which crtcs have changed outputs connected to them, these need
+	 * to be part of the prepare_pipes mask. We don't (yet) support global
+	 * modeset across multiple crtcs, so modeset_pipes will only have one
+	 * bit set at most. */
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->base.encoder == &connector->new_encoder->base)
+			continue;
+
+		if (connector->base.encoder) {
+			tmp_crtc = connector->base.encoder->crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (connector->new_encoder)
+			*prepare_pipes |=
+				1 << connector->new_encoder->new_crtc->pipe;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (encoder->base.crtc == &encoder->new_crtc->base)
+			continue;
+
+		if (encoder->base.crtc) {
+			tmp_crtc = encoder->base.crtc;
+
+			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
+		}
+
+		if (encoder->new_crtc)
+			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
+	}
+
+	/* Check for any pipes that will be fully disabled ... */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool used = false;
+
+		/* Don't try to disable disabled crtcs. */
+		if (!intel_crtc->base.enabled)
+			continue;
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->new_crtc == intel_crtc)
+				used = true;
+		}
+
+		if (!used)
+			*disable_pipes |= 1 << intel_crtc->pipe;
+	}
+
+
+	/* set_mode is also used to update properties on life display pipes. */
+	intel_crtc = to_intel_crtc(crtc);
+	if (crtc->enabled)
+		*prepare_pipes |= 1 << intel_crtc->pipe;
+
+	/*
+	 * For simplicity do a full modeset on any pipe where the output routing
+	 * changed. We could be more clever, but that would require us to be
+	 * more careful with calling the relevant encoder->mode_set functions.
+	 */
+	if (*prepare_pipes)
+		*modeset_pipes = *prepare_pipes;
+
+	/* ... and mask these out. */
+	*modeset_pipes &= ~(*disable_pipes);
+	*prepare_pipes &= ~(*disable_pipes);
+
+	/*
+	 * HACK: We don't (yet) fully support global modesets. intel_set_config
+	 * obies this rule, but the modeset restore mode of
+	 * intel_modeset_setup_hw_state does not.
+	 */
+	*modeset_pipes &= 1 << intel_crtc->pipe;
+	*prepare_pipes &= 1 << intel_crtc->pipe;
+}
+
+static bool intel_crtc_in_use(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev = crtc->dev;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc)
+			return true;
+
+	return false;
+}
+
+static void
+intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+{
+	struct intel_encoder *intel_encoder;
+	struct intel_crtc *intel_crtc;
+	struct drm_connector *connector;
+
+	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (!intel_encoder->base.crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe))
+			intel_encoder->connectors_active = false;
+	}
+
+	intel_modeset_commit_output_state(dev);
+
+	/* Update computed state. */
+	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (!connector->encoder || !connector->encoder->crtc)
+			continue;
+
+		intel_crtc = to_intel_crtc(connector->encoder->crtc);
+
+		if (prepare_pipes & (1 << intel_crtc->pipe)) {
+			struct drm_property *dpms_property =
+				dev->mode_config.dpms_property;
+
+			connector->dpms = DRM_MODE_DPMS_ON;
+			drm_object_property_set_value(&connector->base,
+							 dpms_property,
+							 DRM_MODE_DPMS_ON);
+
+			intel_encoder = to_intel_encoder(connector->encoder);
+			intel_encoder->connectors_active = true;
+		}
+	}
+
+}
+
+#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
+	list_for_each_entry((intel_crtc), \
+			    &(dev)->mode_config.crtc_list, \
+			    base.head) \
+		if (mask & (1 <<(intel_crtc)->pipe)) \
+
+static bool
+intel_pipe_config_compare(struct intel_crtc_config *current_config,
+			  struct intel_crtc_config *pipe_config)
+{
+	if (current_config->has_pch_encoder != pipe_config->has_pch_encoder) {
+		DRM_ERROR("mismatch in has_pch_encoder "
+			  "(expected %i, found %i)\n",
+			  current_config->has_pch_encoder,
+			  pipe_config->has_pch_encoder);
+		return false;
+	}
+
+	return true;
+}
+
+void
+intel_modeset_check_state(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	struct intel_crtc_config pipe_config;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* This also checks the encoder/connector hw state with the
+		 * ->get_hw_state callbacks. */
+		intel_connector_check_state(connector);
+
+		WARN(&connector->new_encoder->base != connector->base.encoder,
+		     "connector's staged encoder doesn't match current encoder\n");
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+		enum pipe pipe, tracked_pipe;
+
+		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		WARN(&encoder->new_crtc->base != encoder->base.crtc,
+		     "encoder's stage crtc doesn't match current crtc\n");
+		WARN(encoder->connectors_active && !encoder->base.crtc,
+		     "encoder's active_connectors set, but no crtc\n");
+
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->base.encoder != &encoder->base)
+				continue;
+			enabled = true;
+			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+				active = true;
+		}
+		WARN(!!encoder->base.crtc != enabled,
+		     "encoder's enabled state mismatch "
+		     "(expected %i, found %i)\n",
+		     !!encoder->base.crtc, enabled);
+		WARN(active && !encoder->base.crtc,
+		     "active encoder with no crtc\n");
+
+		WARN(encoder->connectors_active != active,
+		     "encoder's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, encoder->connectors_active);
+
+		active = encoder->get_hw_state(encoder, &pipe);
+		WARN(active != encoder->connectors_active,
+		     "encoder's hw state doesn't match sw tracking "
+		     "(expected %i, found %i)\n",
+		     encoder->connectors_active, active);
+
+		if (!encoder->base.crtc)
+			continue;
+
+		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+		WARN(active && pipe != tracked_pipe,
+		     "active encoder's pipe doesn't match"
+		     "(expected %i, found %i)\n",
+		     tracked_pipe, pipe);
+
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		bool enabled = false;
+		bool active = false;
+
+		DRM_DEBUG_KMS("[CRTC:%d]\n",
+			      crtc->base.base.id);
+
+		WARN(crtc->active && !crtc->base.enabled,
+		     "active crtc, but not enabled in sw tracking\n");
+
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+				    base.head) {
+			if (encoder->base.crtc != &crtc->base)
+				continue;
+			enabled = true;
+			if (encoder->connectors_active)
+				active = true;
+		}
+		WARN(active != crtc->active,
+		     "crtc's computed active state doesn't match tracked active state "
+		     "(expected %i, found %i)\n", active, crtc->active);
+		WARN(enabled != crtc->base.enabled,
+		     "crtc's computed enabled state doesn't match tracked enabled state "
+		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
+
+		memset(&pipe_config, 0, sizeof(pipe_config));
+		active = dev_priv->display.get_pipe_config(crtc,
+							   &pipe_config);
+
+		/* hw state is inconsistent with the pipe A quirk */
+		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+			active = crtc->active;
+
+		WARN(crtc->active != active,
+		     "crtc active state doesn't match with hw state "
+		     "(expected %i, found %i)\n", crtc->active, active);
+
+		WARN(active &&
+		     !intel_pipe_config_compare(&crtc->config, &pipe_config),
+		     "pipe state doesn't match!\n");
+	}
+}
+
+static int __intel_set_mode(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    int x, int y, struct drm_framebuffer *fb)
+{
+	struct drm_device *dev = crtc->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_display_mode *saved_mode, *saved_hwmode;
+	struct intel_crtc_config *pipe_config = NULL;
+	struct intel_crtc *intel_crtc;
+	unsigned disable_pipes, prepare_pipes, modeset_pipes;
+	int ret = 0;
+
+	saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+	if (!saved_mode)
+		return -ENOMEM;
+	saved_hwmode = saved_mode + 1;
+
+	intel_modeset_affected_pipes(crtc, &modeset_pipes,
+				     &prepare_pipes, &disable_pipes);
+
+	*saved_hwmode = crtc->hwmode;
+	*saved_mode = crtc->mode;
+
+	/* Hack: Because we don't (yet) support global modeset on multiple
+	 * crtcs, we don't keep track of the new mode for more than one crtc.
+	 * Hence simply check whether any bit is set in modeset_pipes in all the
+	 * pieces of code that are not yet converted to deal with mutliple crtcs
+	 * changing their mode at the same time. */
+	if (modeset_pipes) {
+		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
+		if (IS_ERR(pipe_config)) {
+			ret = PTR_ERR(pipe_config);
+			pipe_config = NULL;
+
+			goto out;
+		}
+	}
+
+	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
+		      modeset_pipes, prepare_pipes, disable_pipes);
+
+	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
+		intel_crtc_disable(&intel_crtc->base);
+
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
+		if (intel_crtc->base.enabled)
+			dev_priv->display.crtc_disable(&intel_crtc->base);
+	}
+
+	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
+	 * to set it here already despite that we pass it down the callchain.
+	 */
+	if (modeset_pipes) {
+		enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder;
+		crtc->mode = *mode;
+		/* mode_set/enable/disable functions rely on a correct pipe
+		 * config. */
+		to_intel_crtc(crtc)->config = *pipe_config;
+		to_intel_crtc(crtc)->config.cpu_transcoder = tmp;
+	}
+
+	/* Only after disabling all output pipelines that will be changed can we
+	 * update the the output configuration. */
+	intel_modeset_update_state(dev, prepare_pipes);
+
+	if (dev_priv->display.modeset_global_resources)
+		dev_priv->display.modeset_global_resources(dev);
+
+	/* Set up the DPLL and any encoders state that needs to adjust or depend
+	 * on the DPLL.
+	 */
+	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
+		ret = intel_crtc_mode_set(&intel_crtc->base,
+					  x, y, fb);
+		if (ret)
+			goto done;
+	}
+
+	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
+	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
+		dev_priv->display.crtc_enable(&intel_crtc->base);
+
+	if (modeset_pipes) {
+		/* Store real post-adjustment hardware mode. */
+		crtc->hwmode = pipe_config->adjusted_mode;
+
+		/* Calculate and store various constants which
+		 * are later needed by vblank and swap-completion
+		 * timestamping. They are derived from true hwmode.
+		 */
+		drm_calc_timestamping_constants(crtc);
+	}
+
+	/* FIXME: add subpixel order */
+done:
+	if (ret && crtc->enabled) {
+		crtc->hwmode = *saved_hwmode;
+		crtc->mode = *saved_mode;
+	}
+
+out:
+	kfree(pipe_config);
+	kfree(saved_mode);
+	return ret;
+}
+
+int intel_set_mode(struct drm_crtc *crtc,
+		     struct drm_display_mode *mode,
+		     int x, int y, struct drm_framebuffer *fb)
+{
+	int ret;
+
+	ret = __intel_set_mode(crtc, mode, x, y, fb);
+
+	if (ret == 0)
+		intel_modeset_check_state(crtc->dev);
+
+	return ret;
+}
+
+void intel_crtc_restore_mode(struct drm_crtc *crtc)
+{
+	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
+}
+
+#undef for_each_intel_crtc_masked
+
+static void intel_set_config_free(struct intel_set_config *config)
+{
+	if (!config)
+		return;
+
+	kfree(config->save_connector_encoders);
+	kfree(config->save_encoder_crtcs);
+	kfree(config);
+}
+
+static int intel_set_config_save_state(struct drm_device *dev,
+				       struct intel_set_config *config)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int count;
+
+	config->save_encoder_crtcs =
+		kcalloc(dev->mode_config.num_encoder,
+			sizeof(struct drm_crtc *), GFP_KERNEL);
+	if (!config->save_encoder_crtcs)
+		return -ENOMEM;
+
+	config->save_connector_encoders =
+		kcalloc(dev->mode_config.num_connector,
+			sizeof(struct drm_encoder *), GFP_KERNEL);
+	if (!config->save_connector_encoders)
+		return -ENOMEM;
+
+	/* Copy data. Note that driver private data is not affected.
+	 * Should anything bad happen only the expected state is
+	 * restored, not the drivers personal bookkeeping.
+	 */
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		config->save_encoder_crtcs[count++] = encoder->crtc;
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		config->save_connector_encoders[count++] = connector->encoder;
+	}
+
+	return 0;
+}
+
+static void intel_set_config_restore_state(struct drm_device *dev,
+					   struct intel_set_config *config)
+{
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+	int count;
+
+	count = 0;
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+		encoder->new_crtc =
+			to_intel_crtc(config->save_encoder_crtcs[count++]);
+	}
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+		connector->new_encoder =
+			to_intel_encoder(config->save_connector_encoders[count++]);
+	}
+}
+
+static bool
+is_crtc_connector_off(struct drm_mode_set *set)
+{
+	int i;
+
+	if (set->num_connectors == 0)
+		return false;
+
+	if (WARN_ON(set->connectors == NULL))
+		return false;
+
+	for (i = 0; i < set->num_connectors; i++)
+		if (set->connectors[i]->encoder &&
+		    set->connectors[i]->encoder->crtc == set->crtc &&
+		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
+			return true;
+
+	return false;
+}
+
+static void
+intel_set_config_compute_mode_changes(struct drm_mode_set *set,
+				      struct intel_set_config *config)
+{
+
+	/* We should be able to check here if the fb has the same properties
+	 * and then just flip_or_move it */
+	if (is_crtc_connector_off(set)) {
+		config->mode_changed = true;
+	} else if (set->crtc->fb != set->fb) {
+		/* If we have no fb then treat it as a full mode set */
+		if (set->crtc->fb == NULL) {
+			DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+			config->mode_changed = true;
+		} else if (set->fb == NULL) {
+			config->mode_changed = true;
+		} else if (set->fb->pixel_format !=
+			   set->crtc->fb->pixel_format) {
+			config->mode_changed = true;
+		} else {
+			config->fb_changed = true;
+		}
+	}
+
+	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
+		config->fb_changed = true;
+
+	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+		DRM_DEBUG_KMS("modes are different, full mode set\n");
+		drm_mode_debug_printmodeline(&set->crtc->mode);
+		drm_mode_debug_printmodeline(set->mode);
+		config->mode_changed = true;
+	}
+}
+
+static int
+intel_modeset_stage_output_state(struct drm_device *dev,
+				 struct drm_mode_set *set,
+				 struct intel_set_config *config)
+{
+	struct drm_crtc *new_crtc;
+	struct intel_connector *connector;
+	struct intel_encoder *encoder;
+	int count, ro;
+
+	/* The upper layers ensure that we either disable a crtc or have a list
+	 * of connectors. For paranoia, double-check this. */
+	WARN_ON(!set->fb && (set->num_connectors != 0));
+	WARN_ON(set->fb && (set->num_connectors == 0));
+
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		/* Otherwise traverse passed in connector list and get encoders
+		 * for them. */
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base) {
+				connector->new_encoder = connector->encoder;
+				break;
+			}
+		}
+
+		/* If we disable the crtc, disable all its connectors. Also, if
+		 * the connector is on the changing crtc but not on the new
+		 * connector list, disable it. */
+		if ((!set->fb || ro == set->num_connectors) &&
+		    connector->base.encoder &&
+		    connector->base.encoder->crtc == set->crtc) {
+			connector->new_encoder = NULL;
+
+			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+				connector->base.base.id,
+				drm_get_connector_name(&connector->base));
+		}
+
+
+		if (&connector->new_encoder->base != connector->base.encoder) {
+			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+	}
+	/* connector->new_encoder is now updated for all connectors. */
+
+	/* Update crtc of enabled connectors. */
+	count = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (!connector->new_encoder)
+			continue;
+
+		new_crtc = connector->new_encoder->base.crtc;
+
+		for (ro = 0; ro < set->num_connectors; ro++) {
+			if (set->connectors[ro] == &connector->base)
+				new_crtc = set->crtc;
+		}
+
+		/* Make sure the new CRTC will work with the encoder */
+		if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
+					   new_crtc)) {
+			return -EINVAL;
+		}
+		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+			connector->base.base.id,
+			drm_get_connector_name(&connector->base),
+			new_crtc->base.id);
+	}
+
+	/* Check for any encoders that needs to be disabled. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->new_encoder == encoder) {
+				WARN_ON(!connector->new_encoder->new_crtc);
+
+				goto next_encoder;
+			}
+		}
+		encoder->new_crtc = NULL;
+next_encoder:
+		/* Only now check for crtc changes so we don't miss encoders
+		 * that will be disabled. */
+		if (&encoder->new_crtc->base != encoder->base.crtc) {
+			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
+			config->mode_changed = true;
+		}
+	}
+	/* Now we've also updated encoder->new_crtc for all encoders. */
+
+	return 0;
+}
+
+static int intel_crtc_set_config(struct drm_mode_set *set)
+{
+	struct drm_device *dev;
+	struct drm_mode_set save_set;
+	struct intel_set_config *config;
+	int ret;
+
+	BUG_ON(!set);
+	BUG_ON(!set->crtc);
+	BUG_ON(!set->crtc->helper_private);
+
+	/* Enforce sane interface api - has been abused by the fb helper. */
+	BUG_ON(!set->mode && set->fb);
+	BUG_ON(set->fb && set->num_connectors == 0);
+
+	if (set->fb) {
+		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+				set->crtc->base.id, set->fb->base.id,
+				(int)set->num_connectors, set->x, set->y);
+	} else {
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+	}
+
+	dev = set->crtc->dev;
+
+	ret = -ENOMEM;
+	config = kzalloc(sizeof(*config), GFP_KERNEL);
+	if (!config)
+		goto out_config;
+
+	ret = intel_set_config_save_state(dev, config);
+	if (ret)
+		goto out_config;
+
+	save_set.crtc = set->crtc;
+	save_set.mode = &set->crtc->mode;
+	save_set.x = set->crtc->x;
+	save_set.y = set->crtc->y;
+	save_set.fb = set->crtc->fb;
+
+	/* Compute whether we need a full modeset, only an fb base update or no
+	 * change at all. In the future we might also check whether only the
+	 * mode changed, e.g. for LVDS where we only change the panel fitter in
+	 * such cases. */
+	intel_set_config_compute_mode_changes(set, config);
+
+	ret = intel_modeset_stage_output_state(dev, set, config);
+	if (ret)
+		goto fail;
+
+	if (config->mode_changed) {
+		if (set->mode) {
+			DRM_DEBUG_KMS("attempting to set mode from"
+					" userspace\n");
+			drm_mode_debug_printmodeline(set->mode);
+		}
+
+		ret = intel_set_mode(set->crtc, set->mode,
+				     set->x, set->y, set->fb);
+	} else if (config->fb_changed) {
+		intel_crtc_wait_for_pending_flips(set->crtc);
+
+		ret = intel_pipe_set_base(set->crtc,
+					  set->x, set->y, set->fb);
+	}
+
+	if (ret) {
+		DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
+			  set->crtc->base.id, ret);
+fail:
+		intel_set_config_restore_state(dev, config);
+
+		/* Try to restore the config */
+		if (config->mode_changed &&
+		    intel_set_mode(save_set.crtc, save_set.mode,
+				   save_set.x, save_set.y, save_set.fb))
+			DRM_ERROR("failed to restore config after modeset failure\n");
+	}
+
+out_config:
+	intel_set_config_free(config);
+	return ret;
+}
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+	.cursor_set = intel_crtc_cursor_set,
+	.cursor_move = intel_crtc_cursor_move,
+	.gamma_set = intel_crtc_gamma_set,
+	.set_config = intel_crtc_set_config,
+	.destroy = intel_crtc_destroy,
+	.page_flip = intel_crtc_page_flip,
+};
+
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+	if (HAS_DDI(dev))
+		intel_ddi_pll_init(dev);
+}
+
+static void intel_pch_pll_init(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	if (dev_priv->num_pch_pll == 0) {
+		DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
+		return;
+	}
+
+	for (i = 0; i < dev_priv->num_pch_pll; i++) {
+		dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
+		dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
+		dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
+	}
+}
+
+static void intel_crtc_init(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc;
+	int i;
+
+	intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (intel_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
+	for (i = 0; i < 256; i++) {
+		intel_crtc->lut_r[i] = i;
+		intel_crtc->lut_g[i] = i;
+		intel_crtc->lut_b[i] = i;
+	}
+
+	/* Swap pipes & planes for FBC on pre-965 */
+	intel_crtc->pipe = pipe;
+	intel_crtc->plane = pipe;
+	intel_crtc->config.cpu_transcoder = pipe;
+	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
+		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+		intel_crtc->plane = !pipe;
+	}
+
+	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
+	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+}
+
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
+	struct drm_mode_object *drmmode_obj;
+	struct intel_crtc *crtc;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+			DRM_MODE_OBJECT_CRTC);
+
+	if (!drmmode_obj) {
+		DRM_ERROR("no such CRTC id\n");
+		return -EINVAL;
+	}
+
+	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+	pipe_from_crtc_id->pipe = crtc->pipe;
+
+	return 0;
+}
+
+static int intel_encoder_clones(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_encoder *source_encoder;
+	int index_mask = 0;
+	int entry = 0;
+
+	list_for_each_entry(source_encoder,
+			    &dev->mode_config.encoder_list, base.head) {
+
+		if (encoder == source_encoder)
+			index_mask |= (1 << entry);
+
+		/* Intel hw has only one MUX where enocoders could be cloned. */
+		if (encoder->cloneable && source_encoder->cloneable)
+			index_mask |= (1 << entry);
+
+		entry++;
+	}
+
+	return index_mask;
+}
+
+static bool has_edp_a(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!IS_MOBILE(dev))
+		return false;
+
+	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+		return false;
+
+	if (IS_GEN5(dev) &&
+	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
+		return false;
+
+	return true;
+}
+
+static void intel_setup_outputs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *encoder;
+	bool dpd_is_edp = false;
+	bool has_lvds;
+
+	has_lvds = intel_lvds_init(dev);
+	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+		/* disable the panel fitter on everything but LVDS */
+		I915_WRITE(PFIT_CONTROL, 0);
+	}
+
+	if (!IS_ULT(dev))
+		intel_crt_init(dev);
+
+	if (HAS_DDI(dev)) {
+		int found;
+
+		/* Haswell uses DDI functions to detect digital outputs */
+		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
+		/* DDI A only supports eDP */
+		if (found)
+			intel_ddi_init(dev, PORT_A);
+
+		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
+		 * register */
+		found = I915_READ(SFUSE_STRAP);
+
+		if (found & SFUSE_STRAP_DDIB_DETECTED)
+			intel_ddi_init(dev, PORT_B);
+		if (found & SFUSE_STRAP_DDIC_DETECTED)
+			intel_ddi_init(dev, PORT_C);
+		if (found & SFUSE_STRAP_DDID_DETECTED)
+			intel_ddi_init(dev, PORT_D);
+	} else if (HAS_PCH_SPLIT(dev)) {
+		int found;
+		dpd_is_edp = intel_dpd_is_edp(dev);
+
+		if (has_edp_a(dev))
+			intel_dp_init(dev, DP_A, PORT_A);
+
+		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+			/* PCH SDVOB multiplex with HDMIB */
+			found = intel_sdvo_init(dev, PCH_SDVOB, true);
+			if (!found)
+				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
+			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+				intel_dp_init(dev, PCH_DP_B, PORT_B);
+		}
+
+		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
+
+		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
+
+		if (I915_READ(PCH_DP_C) & DP_DETECTED)
+			intel_dp_init(dev, PCH_DP_C, PORT_C);
+
+		if (I915_READ(PCH_DP_D) & DP_DETECTED)
+			intel_dp_init(dev, PCH_DP_D, PORT_D);
+	} else if (IS_VALLEYVIEW(dev)) {
+		/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+		if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+			intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+
+		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+					PORT_B);
+			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+		}
+	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
+		bool found = false;
+
+		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+			DRM_DEBUG_KMS("probing SDVOB\n");
+			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
+			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
+			}
+
+			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+				DRM_DEBUG_KMS("probing DP_B\n");
+				intel_dp_init(dev, DP_B, PORT_B);
+			}
+		}
+
+		/* Before G4X SDVOC doesn't have its own detect register */
+
+		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
+			DRM_DEBUG_KMS("probing SDVOC\n");
+			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
+		}
+
+		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+
+			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
+			}
+			if (SUPPORTS_INTEGRATED_DP(dev)) {
+				DRM_DEBUG_KMS("probing DP_C\n");
+				intel_dp_init(dev, DP_C, PORT_C);
+			}
+		}
+
+		if (SUPPORTS_INTEGRATED_DP(dev) &&
+		    (I915_READ(DP_D) & DP_DETECTED)) {
+			DRM_DEBUG_KMS("probing DP_D\n");
+			intel_dp_init(dev, DP_D, PORT_D);
+		}
+	} else if (IS_GEN2(dev))
+		intel_dvo_init(dev);
+
+	if (SUPPORTS_TV(dev))
+		intel_tv_init(dev);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+		encoder->base.possible_crtcs = encoder->crtc_mask;
+		encoder->base.possible_clones =
+			intel_encoder_clones(encoder);
+	}
+
+	intel_init_pch_refclk(dev);
+
+	drm_helper_move_panel_connectors_to_head(dev);
+}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+	drm_framebuffer_cleanup(fb);
+	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+
+	kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						struct drm_file *file,
+						unsigned int *handle)
+{
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+
+	return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+	.destroy = intel_user_framebuffer_destroy,
+	.create_handle = intel_user_framebuffer_create_handle,
+};
+
+int intel_framebuffer_init(struct drm_device *dev,
+			   struct intel_framebuffer *intel_fb,
+			   struct drm_mode_fb_cmd2 *mode_cmd,
+			   struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	if (obj->tiling_mode == I915_TILING_Y) {
+		DRM_DEBUG("hardware does not support tiling Y\n");
+		return -EINVAL;
+	}
+
+	if (mode_cmd->pitches[0] & 63) {
+		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+			  mode_cmd->pitches[0]);
+		return -EINVAL;
+	}
+
+	/* FIXME <= Gen4 stride limits are bit unclear */
+	if (mode_cmd->pitches[0] > 32768) {
+		DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+			  mode_cmd->pitches[0]);
+		return -EINVAL;
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE &&
+	    mode_cmd->pitches[0] != obj->stride) {
+		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+			  mode_cmd->pitches[0], obj->stride);
+		return -EINVAL;
+	}
+
+	/* Reject formats not supported by any plane early. */
+	switch (mode_cmd->pixel_format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_ARGB8888:
+		break;
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_ARGB1555:
+		if (INTEL_INFO(dev)->gen > 3) {
+			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_ABGR2101010:
+		if (INTEL_INFO(dev)->gen < 4) {
+			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			return -EINVAL;
+		}
+		break;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_VYUY:
+		if (INTEL_INFO(dev)->gen < 5) {
+			DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+		return -EINVAL;
+	}
+
+	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+	if (mode_cmd->offsets[0] != 0)
+		return -EINVAL;
+
+	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+	intel_fb->obj = obj;
+
+	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+	if (ret) {
+		DRM_ERROR("framebuffer init failed %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+			      struct drm_file *filp,
+			      struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+						mode_cmd->handles[0]));
+	if (&obj->base == NULL)
+		return ERR_PTR(-ENOENT);
+
+	return intel_framebuffer_create(dev, mode_cmd, obj);
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+	.fb_create = intel_user_framebuffer_create,
+	.output_poll_changed = intel_fb_output_poll_changed,
+};
+
+/* Set up chip specific display functions */
+static void intel_init_display(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_DDI(dev)) {
+		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
+		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+		dev_priv->display.crtc_enable = haswell_crtc_enable;
+		dev_priv->display.crtc_disable = haswell_crtc_disable;
+		dev_priv->display.off = haswell_crtc_off;
+		dev_priv->display.update_plane = ironlake_update_plane;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+		dev_priv->display.crtc_enable = ironlake_crtc_enable;
+		dev_priv->display.crtc_disable = ironlake_crtc_disable;
+		dev_priv->display.off = ironlake_crtc_off;
+		dev_priv->display.update_plane = ironlake_update_plane;
+	} else {
+		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+		dev_priv->display.crtc_enable = i9xx_crtc_enable;
+		dev_priv->display.crtc_disable = i9xx_crtc_disable;
+		dev_priv->display.off = i9xx_crtc_off;
+		dev_priv->display.update_plane = i9xx_update_plane;
+	}
+
+	/* Returns the core display clock speed */
+	if (IS_VALLEYVIEW(dev))
+		dev_priv->display.get_display_clock_speed =
+			valleyview_get_display_clock_speed;
+	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+		dev_priv->display.get_display_clock_speed =
+			i945_get_display_clock_speed;
+	else if (IS_I915G(dev))
+		dev_priv->display.get_display_clock_speed =
+			i915_get_display_clock_speed;
+	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+		dev_priv->display.get_display_clock_speed =
+			i9xx_misc_get_display_clock_speed;
+	else if (IS_I915GM(dev))
+		dev_priv->display.get_display_clock_speed =
+			i915gm_get_display_clock_speed;
+	else if (IS_I865G(dev))
+		dev_priv->display.get_display_clock_speed =
+			i865_get_display_clock_speed;
+	else if (IS_I85X(dev))
+		dev_priv->display.get_display_clock_speed =
+			i855_get_display_clock_speed;
+	else /* 852, 830 */
+		dev_priv->display.get_display_clock_speed =
+			i830_get_display_clock_speed;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		if (IS_GEN5(dev)) {
+			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+			dev_priv->display.write_eld = ironlake_write_eld;
+		} else if (IS_GEN6(dev)) {
+			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+			dev_priv->display.write_eld = ironlake_write_eld;
+		} else if (IS_IVYBRIDGE(dev)) {
+			/* FIXME: detect B0+ stepping and use auto training */
+			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+			dev_priv->display.write_eld = ironlake_write_eld;
+			dev_priv->display.modeset_global_resources =
+				ivb_modeset_global_resources;
+		} else if (IS_HASWELL(dev)) {
+			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+			dev_priv->display.write_eld = haswell_write_eld;
+			dev_priv->display.modeset_global_resources =
+				haswell_modeset_global_resources;
+		}
+	} else if (IS_G4X(dev)) {
+		dev_priv->display.write_eld = g4x_write_eld;
+	}
+
+	/* Default just returns -ENODEV to indicate unsupported */
+	dev_priv->display.queue_flip = intel_default_queue_flip;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 2:
+		dev_priv->display.queue_flip = intel_gen2_queue_flip;
+		break;
+
+	case 3:
+		dev_priv->display.queue_flip = intel_gen3_queue_flip;
+		break;
+
+	case 4:
+	case 5:
+		dev_priv->display.queue_flip = intel_gen4_queue_flip;
+		break;
+
+	case 6:
+		dev_priv->display.queue_flip = intel_gen6_queue_flip;
+		break;
+	case 7:
+		dev_priv->display.queue_flip = intel_gen7_queue_flip;
+		break;
+	}
+}
+
+/*
+ * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
+ * resume, or other times.  This quirk makes sure that's the case for
+ * affected systems.
+ */
+static void quirk_pipea_force(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
+	DRM_INFO("applying pipe a force quirk\n");
+}
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+	DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+	DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+	DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
+struct intel_quirk {
+	int device;
+	int subsystem_vendor;
+	int subsystem_device;
+	void (*hook)(struct drm_device *dev);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+	void (*hook)(struct drm_device *dev);
+	const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+	return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+	{
+		.dmi_id_list = &(const struct dmi_system_id[]) {
+			{
+				.callback = intel_dmi_reverse_brightness,
+				.ident = "NCR Corporation",
+				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
+				},
+			},
+			{ }  /* terminating entry */
+		},
+		.hook = quirk_invert_brightness,
+	},
+};
+
+static struct intel_quirk intel_quirks[] = {
+	/* HP Mini needs pipe A force quirk (LP: #322104) */
+	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+
+	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
+	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
+
+	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
+	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
+
+	/* 830/845 need to leave pipe A & dpll A up */
+	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+	/* Lenovo U160 cannot use SSC on LVDS */
+	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+	/* Sony Vaio Y cannot use SSC on LVDS */
+	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+	/* Acer Aspire 5734Z must invert backlight brightness */
+	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+	/* Acer/eMachines G725 */
+	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+	/* Acer/eMachines e725 */
+	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+	/* Acer/Packard Bell NCL20 */
+	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+	/* Acer Aspire 4736Z */
+	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+	/* Dell XPS13 HD Sandy Bridge */
+	{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+	/* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+	{ 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
+};
+
+static void intel_init_quirks(struct drm_device *dev)
+{
+	struct pci_dev *d = dev->pdev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+		struct intel_quirk *q = &intel_quirks[i];
+
+		if (d->device == q->device &&
+		    (d->subsystem_vendor == q->subsystem_vendor ||
+		     q->subsystem_vendor == PCI_ANY_ID) &&
+		    (d->subsystem_device == q->subsystem_device ||
+		     q->subsystem_device == PCI_ANY_ID))
+			q->hook(dev);
+	}
+	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+			intel_dmi_quirks[i].hook(dev);
+	}
+}
+
+/* Disable the VGA plane that we never use */
+static void i915_disable_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 sr1;
+	u32 vga_reg = i915_vgacntrl_reg(dev);
+
+	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
+	outb(SR01, VGA_SR_INDEX);
+	sr1 = inb(VGA_SR_DATA);
+	outb(sr1 | 1<<5, VGA_SR_DATA);
+	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
+	udelay(300);
+
+	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+	POSTING_READ(vga_reg);
+}
+
+void intel_modeset_init_hw(struct drm_device *dev)
+{
+	intel_init_power_well(dev);
+
+	intel_prepare_ddi(dev);
+
+	intel_init_clock_gating(dev);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_enable_gt_powersave(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void intel_modeset_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i, j, ret;
+
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	dev->mode_config.funcs = &intel_mode_funcs;
+
+	intel_init_quirks(dev);
+
+	intel_init_pm(dev);
+
+	if (INTEL_INFO(dev)->num_pipes == 0)
+		return;
+
+	intel_init_display(dev);
+
+	if (IS_GEN2(dev)) {
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+	} else if (IS_GEN3(dev)) {
+		dev->mode_config.max_width = 4096;
+		dev->mode_config.max_height = 4096;
+	} else {
+		dev->mode_config.max_width = 8192;
+		dev->mode_config.max_height = 8192;
+	}
+	dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+
+	DRM_DEBUG_KMS("%d display pipe%s available.\n",
+		      INTEL_INFO(dev)->num_pipes,
+		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
+
+	for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+		intel_crtc_init(dev, i);
+		for (j = 0; j < dev_priv->num_plane; j++) {
+			ret = intel_plane_init(dev, i, j);
+			if (ret)
+				DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
+					      i, j, ret);
+		}
+	}
+
+	intel_cpu_pll_init(dev);
+	intel_pch_pll_init(dev);
+
+	/* Just disable it once at startup */
+	i915_disable_vga(dev);
+	intel_setup_outputs(dev);
+
+	/* Just in case the BIOS is doing something questionable. */
+	intel_disable_fbc(dev);
+}
+
+static void intel_enable_pipe_a(struct drm_device *dev)
+{
+	struct intel_connector *connector;
+	struct drm_connector *crt = NULL;
+	struct intel_load_detect_pipe load_detect_temp;
+
+	/* We can't just switch on the pipe A, we need to set things up with a
+	 * proper mode and output configuration. As a gross hack, enable pipe A
+	 * by enabling the load detect pipe once. */
+	list_for_each_entry(connector,
+			    &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
+			crt = &connector->base;
+			break;
+		}
+	}
+
+	if (!crt)
+		return;
+
+	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
+		intel_release_load_detect_pipe(crt, &load_detect_temp);
+
+
+}
+
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg, val;
+
+	if (INTEL_INFO(dev)->num_pipes == 1)
+		return true;
+
+	reg = DSPCNTR(!crtc->plane);
+	val = I915_READ(reg);
+
+	if ((val & DISPLAY_PLANE_ENABLE) &&
+	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+		return false;
+
+	return true;
+}
+
+static void intel_sanitize_crtc(struct intel_crtc *crtc)
+{
+	struct drm_device *dev = crtc->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 reg;
+
+	/* Clear any frame start delays used for debugging left by the BIOS */
+	reg = PIPECONF(crtc->config.cpu_transcoder);
+	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+
+	/* We need to sanitize the plane -> pipe mapping first because this will
+	 * disable the crtc (and hence change the state) if it is wrong. Note
+	 * that gen4+ has a fixed plane -> pipe mapping.  */
+	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
+		struct intel_connector *connector;
+		bool plane;
+
+		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
+			      crtc->base.base.id);
+
+		/* Pipe has the wrong plane attached and the plane is active.
+		 * Temporarily change the plane mapping and disable everything
+		 * ...  */
+		plane = crtc->plane;
+		crtc->plane = !plane;
+		dev_priv->display.crtc_disable(&crtc->base);
+		crtc->plane = plane;
+
+		/* ... and break all links. */
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder->base.crtc != &crtc->base)
+				continue;
+
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+		/* multiple connectors may have the same encoder:
+		 *  handle them and break crtc link separately */
+		list_for_each_entry(connector, &dev->mode_config.connector_list,
+				    base.head)
+			if (connector->encoder->base.crtc == &crtc->base) {
+				connector->encoder->base.crtc = NULL;
+				connector->encoder->connectors_active = false;
+			}
+
+		WARN_ON(crtc->active);
+		crtc->base.enabled = false;
+	}
+
+	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
+	    crtc->pipe == PIPE_A && !crtc->active) {
+		/* BIOS forgot to enable pipe A, this mostly happens after
+		 * resume. Force-enable the pipe to fix this, the update_dpms
+		 * call below we restore the pipe to the right state, but leave
+		 * the required bits on. */
+		intel_enable_pipe_a(dev);
+	}
+
+	/* Adjust the state of the output pipe according to whether we
+	 * have active connectors/encoders. */
+	intel_crtc_update_dpms(&crtc->base);
+
+	if (crtc->active != crtc->base.enabled) {
+		struct intel_encoder *encoder;
+
+		/* This can happen either due to bugs in the get_hw_state
+		 * functions or because the pipe is force-enabled due to the
+		 * pipe A quirk. */
+		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
+			      crtc->base.base.id,
+			      crtc->base.enabled ? "enabled" : "disabled",
+			      crtc->active ? "enabled" : "disabled");
+
+		crtc->base.enabled = crtc->active;
+
+		/* Because we only establish the connector -> encoder ->
+		 * crtc links if something is active, this means the
+		 * crtc is now deactivated. Break the links. connector
+		 * -> encoder links are only establish when things are
+		 *  actually up, hence no need to break them. */
+		WARN_ON(crtc->active);
+
+		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+			WARN_ON(encoder->connectors_active);
+			encoder->base.crtc = NULL;
+		}
+	}
+}
+
+static void intel_sanitize_encoder(struct intel_encoder *encoder)
+{
+	struct intel_connector *connector;
+	struct drm_device *dev = encoder->base.dev;
+
+	/* We need to check both for a crtc link (meaning that the
+	 * encoder is active and trying to read from a pipe) and the
+	 * pipe itself being active. */
+	bool has_active_crtc = encoder->base.crtc &&
+		to_intel_crtc(encoder->base.crtc)->active;
+
+	if (encoder->connectors_active && !has_active_crtc) {
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base));
+
+		/* Connector is active, but has no active pipe. This is
+		 * fallout from our resume register restoring. Disable
+		 * the encoder manually again. */
+		if (encoder->base.crtc) {
+			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
+				      encoder->base.base.id,
+				      drm_get_encoder_name(&encoder->base));
+			encoder->disable(encoder);
+		}
+		encoder->base.crtc = NULL;
+		encoder->connectors_active = false;
+
+		/* Inconsistent output/port/pipe state happens presumably due to
+		 * a bug in one of the get_hw_state functions. Or someplace else
+		 * in our code, like the register restore mess on resume. Clamp
+		 * things to off as a safer default. */
+		list_for_each_entry(connector,
+				    &dev->mode_config.connector_list,
+				    base.head) {
+			if (connector->encoder != encoder)
+				continue;
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+	}
+	/* Enabled encoders without active connectors will be fixed in
+	 * the crtc fixup. */
+}
+
+void i915_redisable_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 vga_reg = i915_vgacntrl_reg(dev);
+
+	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+		i915_disable_vga(dev);
+	}
+}
+
+/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
+ * and i915 state tracking structures. */
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+				  bool force_restore)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum pipe pipe;
+	u32 tmp;
+	struct drm_plane *plane;
+	struct intel_crtc *crtc;
+	struct intel_encoder *encoder;
+	struct intel_connector *connector;
+
+	if (HAS_DDI(dev)) {
+		tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+		if (tmp & TRANS_DDI_FUNC_ENABLE) {
+			switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+			case TRANS_DDI_EDP_INPUT_A_ON:
+			case TRANS_DDI_EDP_INPUT_A_ONOFF:
+				pipe = PIPE_A;
+				break;
+			case TRANS_DDI_EDP_INPUT_B_ONOFF:
+				pipe = PIPE_B;
+				break;
+			case TRANS_DDI_EDP_INPUT_C_ONOFF:
+				pipe = PIPE_C;
+				break;
+			default:
+				/* A bogus value has been programmed, disable
+				 * the transcoder */
+				WARN(1, "Bogus eDP source %08x\n", tmp);
+				intel_ddi_disable_transcoder_func(dev_priv,
+						TRANSCODER_EDP);
+				goto setup_pipes;
+			}
+
+			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+			crtc->config.cpu_transcoder = TRANSCODER_EDP;
+
+			DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+				      pipe_name(pipe));
+		}
+	}
+
+setup_pipes:
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+			    base.head) {
+		enum transcoder tmp = crtc->config.cpu_transcoder;
+		memset(&crtc->config, 0, sizeof(crtc->config));
+		crtc->config.cpu_transcoder = tmp;
+
+		crtc->active = dev_priv->display.get_pipe_config(crtc,
+								 &crtc->config);
+
+		crtc->base.enabled = crtc->active;
+
+		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
+			      crtc->base.base.id,
+			      crtc->active ? "enabled" : "disabled");
+	}
+
+	if (HAS_DDI(dev))
+		intel_ddi_setup_hw_pll_state(dev);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		pipe = 0;
+
+		if (encoder->get_hw_state(encoder, &pipe)) {
+			encoder->base.crtc =
+				dev_priv->pipe_to_crtc_mapping[pipe];
+		} else {
+			encoder->base.crtc = NULL;
+		}
+
+		encoder->connectors_active = false;
+		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+			      encoder->base.base.id,
+			      drm_get_encoder_name(&encoder->base),
+			      encoder->base.crtc ? "enabled" : "disabled",
+			      pipe);
+	}
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list,
+			    base.head) {
+		if (connector->get_hw_state(connector)) {
+			connector->base.dpms = DRM_MODE_DPMS_ON;
+			connector->encoder->connectors_active = true;
+			connector->base.encoder = &connector->encoder->base;
+		} else {
+			connector->base.dpms = DRM_MODE_DPMS_OFF;
+			connector->base.encoder = NULL;
+		}
+		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
+			      connector->base.base.id,
+			      drm_get_connector_name(&connector->base),
+			      connector->base.encoder ? "enabled" : "disabled");
+	}
+
+	/* HW state is read out, now we need to sanitize this mess. */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		intel_sanitize_encoder(encoder);
+	}
+
+	for_each_pipe(pipe) {
+		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+		intel_sanitize_crtc(crtc);
+	}
+
+	if (force_restore) {
+		/*
+		 * We need to use raw interfaces for restoring state to avoid
+		 * checking (bogus) intermediate states.
+		 */
+		for_each_pipe(pipe) {
+			struct drm_crtc *crtc =
+				dev_priv->pipe_to_crtc_mapping[pipe];
+
+			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
+					 crtc->fb);
+		}
+		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+			intel_plane_restore(plane);
+
+		i915_redisable_vga(dev);
+	} else {
+		intel_modeset_update_staged_output_state(dev);
+	}
+
+	intel_modeset_check_state(dev);
+
+	drm_mode_config_reset(dev);
+}
+
+void intel_modeset_gem_init(struct drm_device *dev)
+{
+	intel_modeset_init_hw(dev);
+
+	intel_setup_overlay(dev);
+
+	mutex_lock(&dev->mode_config.mutex);
+	intel_modeset_setup_hw_state(dev, false);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+void intel_modeset_cleanup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	struct intel_crtc *intel_crtc;
+
+	drm_kms_helper_poll_fini(dev);
+	mutex_lock(&dev->struct_mutex);
+
+	intel_unregister_dsm_handler();
+
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		/* Skip inactive CRTCs */
+		if (!crtc->fb)
+			continue;
+
+		intel_crtc = to_intel_crtc(crtc);
+		intel_increase_pllclock(crtc);
+	}
+
+	intel_disable_fbc(dev);
+
+	intel_disable_gt_powersave(dev);
+
+	ironlake_teardown_rc6(dev);
+
+	if (IS_VALLEYVIEW(dev))
+		vlv_init_dpio(dev);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Disable the irq before mode object teardown, for the irq might
+	 * enqueue unpin/hotplug work. */
+	drm_irq_uninstall(dev);
+	cancel_work_sync(&dev_priv->hotplug_work);
+	cancel_work_sync(&dev_priv->rps.work);
+
+	/* flush any delayed tasks or pending work */
+	flush_scheduled_work();
+
+	/* destroy backlight, if any, before the connectors */
+	intel_panel_destroy_backlight(dev);
+
+	drm_mode_config_cleanup(dev);
+
+	intel_cleanup_overlay(dev);
+}
+
+/*
+ * Return which encoder is currently attached for connector.
+ */
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
+{
+	return &intel_attached_encoder(connector)->base;
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+				    struct intel_encoder *encoder)
+{
+	connector->encoder = encoder;
+	drm_mode_connector_attach_encoder(&connector->base,
+					  &encoder->base);
+}
+
+/*
+ * set vga decode state - true == enable VGA decode
+ */
+int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+	u16 gmch_ctrl;
+
+	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+	if (state)
+		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+	else
+		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+	struct intel_cursor_error_state {
+		u32 control;
+		u32 position;
+		u32 base;
+		u32 size;
+	} cursor[I915_MAX_PIPES];
+
+	struct intel_pipe_error_state {
+		u32 conf;
+		u32 source;
+
+		u32 htotal;
+		u32 hblank;
+		u32 hsync;
+		u32 vtotal;
+		u32 vblank;
+		u32 vsync;
+	} pipe[I915_MAX_PIPES];
+
+	struct intel_plane_error_state {
+		u32 control;
+		u32 stride;
+		u32 size;
+		u32 pos;
+		u32 addr;
+		u32 surface;
+		u32 tile_offset;
+	} plane[I915_MAX_PIPES];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_display_error_state *error;
+	enum transcoder cpu_transcoder;
+	int i;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	for_each_pipe(i) {
+		cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
+		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
+			error->cursor[i].control = I915_READ(CURCNTR(i));
+			error->cursor[i].position = I915_READ(CURPOS(i));
+			error->cursor[i].base = I915_READ(CURBASE(i));
+		} else {
+			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
+			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
+			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
+		}
+
+		error->plane[i].control = I915_READ(DSPCNTR(i));
+		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+		if (INTEL_INFO(dev)->gen <= 3) {
+			error->plane[i].size = I915_READ(DSPSIZE(i));
+			error->plane[i].pos = I915_READ(DSPPOS(i));
+		}
+		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+			error->plane[i].addr = I915_READ(DSPADDR(i));
+		if (INTEL_INFO(dev)->gen >= 4) {
+			error->plane[i].surface = I915_READ(DSPSURF(i));
+			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+		}
+
+		error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+		error->pipe[i].source = I915_READ(PIPESRC(i));
+		error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+		error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+		error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+		error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+		error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+		error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+	}
+
+	return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+				struct drm_device *dev,
+				struct intel_display_error_state *error)
+{
+	int i;
+
+	seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
+	for_each_pipe(i) {
+		seq_printf(m, "Pipe [%d]:\n", i);
+		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
+		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
+		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
+		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
+		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
+		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
+		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
+
+		seq_printf(m, "Plane [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+		if (INTEL_INFO(dev)->gen <= 3) {
+			seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+			seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
+		}
+		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+			seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+		}
+
+		seq_printf(m, "Cursor [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
+		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+	}
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_dp.c b/linux-imx/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 0000000..cfd327c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,3026 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+
+/**
+ * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * @intel_dp: DP struct
+ *
+ * If a CPU or PCH DP output is attached to an eDP panel, this function
+ * will return true, and false otherwise.
+ */
+static bool is_edp(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
+}
+
+/**
+ * is_pch_edp - is the port on the PCH and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a PCH DP port attached
+ * to an eDP panel, false otherwise.  Helpful for determining whether we
+ * may need FDI resources for a given DP output or not.
+ */
+static bool is_pch_edp(struct intel_dp *intel_dp)
+{
+	return intel_dp->is_pch_edp;
+}
+
+/**
+ * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
+ * @intel_dp: DP struct
+ *
+ * Returns true if the given DP struct corresponds to a CPU eDP port.
+ */
+static bool is_cpu_edp(struct intel_dp *intel_dp)
+{
+	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
+}
+
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+	return intel_dig_port->base.base.dev;
+}
+
+static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
+{
+	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+}
+
+/**
+ * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
+ * @encoder: DRM encoder
+ *
+ * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
+ * by intel_display.c.
+ */
+bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
+{
+	struct intel_dp *intel_dp;
+
+	if (!encoder)
+		return false;
+
+	intel_dp = enc_to_intel_dp(encoder);
+
+	return is_pch_edp(intel_dp);
+}
+
+static void intel_dp_link_down(struct intel_dp *intel_dp);
+
+static int
+intel_dp_max_link_bw(struct intel_dp *intel_dp)
+{
+	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	case DP_LINK_BW_2_7:
+		break;
+	default:
+		max_link_bw = DP_LINK_BW_1_62;
+		break;
+	}
+	return max_link_bw;
+}
+
+/*
+ * The units on the numbers in the next two are... bizarre.  Examples will
+ * make it clearer; this one parallels an example in the eDP spec.
+ *
+ * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
+ *
+ *     270000 * 1 * 8 / 10 == 216000
+ *
+ * The actual data capacity of that configuration is 2.16Gbit/s, so the
+ * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
+ * or equivalently, kilopixels per second - so for 1680x1050R it'd be
+ * 119000.  At 18bpp that's 2142000 kilobits per second.
+ *
+ * Thus the strange-looking division by 10 in intel_dp_link_required, to
+ * get the result in decakilobits instead of kilobits.
+ */
+
+static int
+intel_dp_link_required(int pixel_clock, int bpp)
+{
+	return (pixel_clock * bpp + 9) / 10;
+}
+
+static int
+intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+{
+	return (max_link_clock * max_lanes * 8) / 10;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+	int target_clock = mode->clock;
+	int max_rate, mode_rate, max_lanes, max_link_clock;
+
+	if (is_edp(intel_dp) && fixed_mode) {
+		if (mode->hdisplay > fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay > fixed_mode->vdisplay)
+			return MODE_PANEL;
+
+		target_clock = fixed_mode->clock;
+	}
+
+	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
+	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+	mode_rate = intel_dp_link_required(target_clock, 18);
+
+	if (mode_rate > max_rate)
+		return MODE_CLOCK_HIGH;
+
+	if (mode->clock < 10000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return MODE_H_ILLEGAL;
+
+	return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+	int	i;
+	uint32_t v = 0;
+
+	if (src_bytes > 4)
+		src_bytes = 4;
+	for (i = 0; i < src_bytes; i++)
+		v |= ((uint32_t) src[i]) << ((3-i) * 8);
+	return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+	int i;
+	if (dst_bytes > 4)
+		dst_bytes = 4;
+	for (i = 0; i < dst_bytes; i++)
+		dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t clkcfg;
+
+	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+	if (IS_VALLEYVIEW(dev))
+		return 200;
+
+	clkcfg = I915_READ(CLKCFG);
+	switch (clkcfg & CLKCFG_FSB_MASK) {
+	case CLKCFG_FSB_400:
+		return 100;
+	case CLKCFG_FSB_533:
+		return 133;
+	case CLKCFG_FSB_667:
+		return 166;
+	case CLKCFG_FSB_800:
+		return 200;
+	case CLKCFG_FSB_1067:
+		return 266;
+	case CLKCFG_FSB_1333:
+		return 333;
+	/* these two are just a guess; one of them might be right */
+	case CLKCFG_FSB_1600:
+	case CLKCFG_FSB_1600_ALT:
+		return 400;
+	default:
+		return 133;
+	}
+}
+
+static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_stat_reg;
+
+	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+	return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+}
+
+static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_ctrl_reg;
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+}
+
+static void
+intel_dp_check_edp(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+		WARN(1, "eDP powered off while attempting aux channel communication.\n");
+		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
+				I915_READ(pp_stat_reg),
+				I915_READ(pp_ctrl_reg));
+	}
+}
+
+static uint32_t
+intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+	uint32_t status;
+	bool done;
+
+#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+	if (has_aux_irq)
+		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+					  msecs_to_jiffies_timeout(10));
+	else
+		done = wait_for_atomic(C, 10) == 0;
+	if (!done)
+		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
+			  has_aux_irq);
+#undef C
+
+	return status;
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+		uint8_t *send, int send_bytes,
+		uint8_t *recv, int recv_size)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+	uint32_t ch_data = ch_ctl + 4;
+	int i, ret, recv_bytes;
+	uint32_t status;
+	uint32_t aux_clock_divider;
+	int try, precharge;
+	bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+	/* dp aux is extremely sensitive to irq latency, hence request the
+	 * lowest possible wakeup latency and so prevent the cpu from going into
+	 * deep sleep states.
+	 */
+	pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+	intel_dp_check_edp(intel_dp);
+	/* The clock divider is based off the hrawclk,
+	 * and would like to run at 2MHz. So, take the
+	 * hrawclk value and divide by 2 and use that
+	 *
+	 * Note that PCH attached eDP panels should use a 125MHz input
+	 * clock divider.
+	 */
+	if (is_cpu_edp(intel_dp)) {
+		if (HAS_DDI(dev))
+			aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+		else if (IS_VALLEYVIEW(dev))
+			aux_clock_divider = 100;
+		else if (IS_GEN6(dev) || IS_GEN7(dev))
+			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+		else
+			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+		/* Workaround for non-ULT HSW */
+		aux_clock_divider = 74;
+	} else if (HAS_PCH_SPLIT(dev)) {
+		aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+	} else {
+		aux_clock_divider = intel_hrawclk(dev) / 2;
+	}
+
+	if (IS_GEN6(dev))
+		precharge = 3;
+	else
+		precharge = 5;
+
+	/* Try to wait for any previous AUX channel activity */
+	for (try = 0; try < 3; try++) {
+		status = I915_READ_NOTRACE(ch_ctl);
+		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+			break;
+		msleep(1);
+	}
+
+	if (try == 3) {
+		WARN(1, "dp_aux_ch not started status 0x%08x\n",
+		     I915_READ(ch_ctl));
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Must try at least 3 times according to DP spec */
+	for (try = 0; try < 5; try++) {
+		/* Load the send data into the aux channel data registers */
+		for (i = 0; i < send_bytes; i += 4)
+			I915_WRITE(ch_data + i,
+				   pack_aux(send + i, send_bytes - i));
+
+		/* Send the command and wait for it to complete */
+		I915_WRITE(ch_ctl,
+			   DP_AUX_CH_CTL_SEND_BUSY |
+			   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+			   DP_AUX_CH_CTL_TIME_OUT_400us |
+			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+		/* Clear done status and any errors */
+		I915_WRITE(ch_ctl,
+			   status |
+			   DP_AUX_CH_CTL_DONE |
+			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			   DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+			      DP_AUX_CH_CTL_RECEIVE_ERROR))
+			continue;
+		if (status & DP_AUX_CH_CTL_DONE)
+			break;
+	}
+
+	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Check for timeout or receive error.
+	 * Timeouts occur when the sink is not connected
+	 */
+	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Timeouts occur when the device isn't connected, so they're
+	 * "normal" -- don't fill the kernel log with these */
+	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* Unload any bytes sent back from the other side */
+	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	for (i = 0; i < recv_bytes; i += 4)
+		unpack_aux(I915_READ(ch_data + i),
+			   recv + i, recv_bytes - i);
+
+	ret = recv_bytes;
+out:
+	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+	return ret;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_dp *intel_dp,
+			  uint16_t address, uint8_t *send, int send_bytes)
+{
+	int ret;
+	uint8_t	msg[20];
+	int msg_bytes;
+	uint8_t	ack;
+
+	intel_dp_check_edp(intel_dp);
+	if (send_bytes > 16)
+		return -1;
+	msg[0] = AUX_NATIVE_WRITE << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = send_bytes - 1;
+	memcpy(&msg[4], send, send_bytes);
+	msg_bytes = send_bytes + 4;
+	for (;;) {
+		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+		if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			break;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+	return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
+			    uint16_t address, uint8_t byte)
+{
+	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_dp *intel_dp,
+			 uint16_t address, uint8_t *recv, int recv_bytes)
+{
+	uint8_t msg[4];
+	int msg_bytes;
+	uint8_t reply[20];
+	int reply_bytes;
+	uint8_t ack;
+	int ret;
+
+	intel_dp_check_edp(intel_dp);
+	msg[0] = AUX_NATIVE_READ << 4;
+	msg[1] = address >> 8;
+	msg[2] = address & 0xff;
+	msg[3] = recv_bytes - 1;
+
+	msg_bytes = 4;
+	reply_bytes = recv_bytes + 1;
+
+	for (;;) {
+		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret == 0)
+			return -EPROTO;
+		if (ret < 0)
+			return ret;
+		ack = reply[0];
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+			memcpy(recv, reply + 1, ret - 1);
+			return ret - 1;
+		}
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(100);
+		else
+			return -EIO;
+	}
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct intel_dp *intel_dp = container_of(adapter,
+						struct intel_dp,
+						adapter);
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	intel_dp_check_edp(intel_dp);
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
+
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (retry = 0; retry < 5; retry++) {
+		ret = intel_dp_aux_ch(intel_dp,
+				      msg, msg_bytes,
+				      reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			/*
+			 * For now, just give more slack to branch devices. We
+			 * could check the DPCD for I2C bit rate capabilities,
+			 * and if available, adjust the interval. We could also
+			 * be more careful with DP-to-Legacy adapters where a
+			 * long legacy cable may force very low I2C bit rates.
+			 */
+			if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+			    DP_DWN_STRM_PORT_PRESENT)
+				usleep_range(500, 600);
+			else
+				usleep_range(300, 400);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+				  reply[0]);
+			return -EREMOTEIO;
+		}
+
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_ERROR("too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+static int
+intel_dp_i2c_init(struct intel_dp *intel_dp,
+		  struct intel_connector *intel_connector, const char *name)
+{
+	int	ret;
+
+	DRM_DEBUG_KMS("i2c_init %s\n", name);
+	intel_dp->algo.running = false;
+	intel_dp->algo.address = 0;
+	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
+	intel_dp->adapter.owner = THIS_MODULE;
+	intel_dp->adapter.class = I2C_CLASS_DDC;
+	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
+	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
+	intel_dp->adapter.algo_data = &intel_dp->algo;
+	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+
+	ironlake_edp_panel_vdd_on(intel_dp);
+	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
+	ironlake_edp_panel_vdd_off(intel_dp, false);
+	return ret;
+}
+
+bool
+intel_dp_compute_config(struct intel_encoder *encoder,
+			struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	struct drm_display_mode *mode = &pipe_config->requested_mode;
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct intel_connector *intel_connector = intel_dp->attached_connector;
+	int lane_count, clock;
+	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+	int bpp, mode_rate;
+	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+	int target_clock, link_avail, link_clock;
+
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
+		pipe_config->has_pch_encoder = true;
+
+	pipe_config->has_dp_encoder = true;
+
+	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+				       adjusted_mode);
+		intel_pch_panel_fitting(dev,
+					intel_connector->panel.fitting_mode,
+					mode, adjusted_mode);
+	}
+	/* We need to take the panel's fixed mode into account. */
+	target_clock = adjusted_mode->clock;
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		return false;
+
+	DRM_DEBUG_KMS("DP link computation with max lane count %i "
+		      "max bw %02x pixel clock %iKHz\n",
+		      max_lane_count, bws[max_clock], adjusted_mode->clock);
+
+	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
+	 * bpc in between. */
+	bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
+	if (is_edp(intel_dp) && dev_priv->edp.bpp)
+		bpp = min_t(int, bpp, dev_priv->edp.bpp);
+
+	for (; bpp >= 6*3; bpp -= 2*3) {
+		mode_rate = intel_dp_link_required(target_clock, bpp);
+
+		for (clock = 0; clock <= max_clock; clock++) {
+			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
+				link_avail = intel_dp_max_data_rate(link_clock,
+								    lane_count);
+
+				if (mode_rate <= link_avail) {
+					goto found;
+				}
+			}
+		}
+	}
+
+	return false;
+
+found:
+	if (intel_dp->color_range_auto) {
+		/*
+		 * See:
+		 * CEA-861-E - 5.1 Default Encoding Parameters
+		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+		 */
+		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
+			intel_dp->color_range = DP_COLOR_RANGE_16_235;
+		else
+			intel_dp->color_range = 0;
+	}
+
+	if (intel_dp->color_range)
+		pipe_config->limited_color_range = true;
+
+	intel_dp->link_bw = bws[clock];
+	intel_dp->lane_count = lane_count;
+	adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+	pipe_config->pipe_bpp = bpp;
+	pipe_config->pixel_target_clock = target_clock;
+
+	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
+		      intel_dp->link_bw, intel_dp->lane_count,
+		      adjusted_mode->clock, bpp);
+	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
+		      mode_rate, link_avail);
+
+	intel_link_compute_m_n(bpp, lane_count,
+			       target_clock, adjusted_mode->clock,
+			       &pipe_config->dp_m_n);
+
+	return true;
+}
+
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	intel_dp->link_configuration[0] = intel_dp->link_bw;
+	intel_dp->link_configuration[1] = intel_dp->lane_count;
+	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+	/*
+	 * Check for DPCD version > 1.1 and enhanced framing support
+	 */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	}
+}
+
+static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+	dpa_ctl = I915_READ(DP_A);
+	dpa_ctl &= ~DP_PLL_FREQ_MASK;
+
+	if (clock < 200000) {
+		/* For a long time we've carried around a ILK-DevA w/a for the
+		 * 160MHz clock. If we're really unlucky, it's still required.
+		 */
+		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
+		dpa_ctl |= DP_PLL_FREQ_160MHZ;
+	} else {
+		dpa_ctl |= DP_PLL_FREQ_270MHZ;
+	}
+
+	I915_WRITE(DP_A, dpa_ctl);
+
+	POSTING_READ(DP_A);
+	udelay(500);
+}
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/*
+	 * There are four kinds of DP registers:
+	 *
+	 * 	IBX PCH
+	 * 	SNB CPU
+	 *	IVB CPU
+	 * 	CPT PCH
+	 *
+	 * IBX PCH and CPU are the same for almost everything,
+	 * except that the CPU DP PLL is configured in this
+	 * register
+	 *
+	 * CPT PCH is quite different, having many bits moved
+	 * to the TRANS_DP_CTL register instead. That
+	 * configuration happens (oddly) in ironlake_pch_enable
+	 */
+
+	/* Preserve the BIOS-computed detected bit. This is
+	 * supposed to be read-only.
+	 */
+	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+
+	/* Handle DP bits in common between all three register formats */
+	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+
+	switch (intel_dp->lane_count) {
+	case 1:
+		intel_dp->DP |= DP_PORT_WIDTH_1;
+		break;
+	case 2:
+		intel_dp->DP |= DP_PORT_WIDTH_2;
+		break;
+	case 4:
+		intel_dp->DP |= DP_PORT_WIDTH_4;
+		break;
+	}
+	if (intel_dp->has_audio) {
+		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
+				 pipe_name(intel_crtc->pipe));
+		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+		intel_write_eld(encoder, adjusted_mode);
+	}
+
+	intel_dp_init_link_config(intel_dp);
+
+	/* Split out the IBX/CPU vs CPT settings */
+
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		intel_dp->DP |= intel_crtc->pipe << 29;
+
+		/* don't miss out required setting for eDP */
+		if (adjusted_mode->clock < 200000)
+			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+		else
+			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+			intel_dp->DP |= intel_dp->color_range;
+
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			intel_dp->DP |= DP_SYNC_HS_HIGH;
+		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			intel_dp->DP |= DP_SYNC_VS_HIGH;
+		intel_dp->DP |= DP_LINK_TRAIN_OFF;
+
+		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+			intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+		if (intel_crtc->pipe == 1)
+			intel_dp->DP |= DP_PIPEB_SELECT;
+
+		if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+			/* don't miss out required setting for eDP */
+			if (adjusted_mode->clock < 200000)
+				intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+			else
+				intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+		}
+	} else {
+		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+	}
+
+	if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
+		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+}
+
+#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+
+#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+
+static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+				       u32 mask,
+				       u32 value)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
+			mask, value,
+			I915_READ(pp_stat_reg),
+			I915_READ(pp_ctrl_reg));
+
+	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
+				I915_READ(pp_stat_reg),
+				I915_READ(pp_ctrl_reg));
+	}
+}
+
+static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power on\n");
+	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+}
+
+static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power off time\n");
+	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+}
+
+static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+{
+	DRM_DEBUG_KMS("Wait for panel power cycle\n");
+	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+}
+
+
+/* Read the current pp_control value, unlocking the register if it
+ * is locked
+ */
+
+static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 control;
+	u32 pp_ctrl_reg;
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+	control = I915_READ(pp_ctrl_reg);
+
+	control &= ~PANEL_UNLOCK_MASK;
+	control |= PANEL_UNLOCK_REGS;
+	return control;
+}
+
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+	DRM_DEBUG_KMS("Turn eDP VDD on\n");
+
+	WARN(intel_dp->want_panel_vdd,
+	     "eDP VDD already requested on\n");
+
+	intel_dp->want_panel_vdd = true;
+
+	if (ironlake_edp_have_panel_vdd(intel_dp)) {
+		DRM_DEBUG_KMS("eDP VDD already on\n");
+		return;
+	}
+
+	if (!ironlake_edp_have_panel_power(intel_dp))
+		ironlake_wait_panel_power_cycle(intel_dp);
+
+	pp = ironlake_get_pp_control(intel_dp);
+	pp |= EDP_FORCE_VDD;
+
+	pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+	/*
+	 * If the panel wasn't on, delay before accessing aux channel
+	 */
+	if (!ironlake_edp_have_panel_power(intel_dp)) {
+		DRM_DEBUG_KMS("eDP was not running\n");
+		msleep(intel_dp->panel_power_up_delay);
+	}
+}
+
+static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_stat_reg, pp_ctrl_reg;
+
+	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+		pp = ironlake_get_pp_control(intel_dp);
+		pp &= ~EDP_FORCE_VDD;
+
+		pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
+		pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+		I915_WRITE(pp_ctrl_reg, pp);
+		POSTING_READ(pp_ctrl_reg);
+
+		/* Make sure sequencer is idle before allowing subsequent activity */
+		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+		msleep(intel_dp->panel_power_down_delay);
+	}
+}
+
+static void ironlake_panel_vdd_work(struct work_struct *__work)
+{
+	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
+						 struct intel_dp, panel_vdd_work);
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	mutex_lock(&dev->mode_config.mutex);
+	ironlake_panel_vdd_off_sync(intel_dp);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+{
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
+	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
+
+	intel_dp->want_panel_vdd = false;
+
+	if (sync) {
+		ironlake_panel_vdd_off_sync(intel_dp);
+	} else {
+		/*
+		 * Queue the timer to fire a long
+		 * time from now (relative to the power down delay)
+		 * to keep the panel power up across a sequence of operations
+		 */
+		schedule_delayed_work(&intel_dp->panel_vdd_work,
+				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+	}
+}
+
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turn eDP power on\n");
+
+	if (ironlake_edp_have_panel_power(intel_dp)) {
+		DRM_DEBUG_KMS("eDP power already on\n");
+		return;
+	}
+
+	ironlake_wait_panel_power_cycle(intel_dp);
+
+	pp = ironlake_get_pp_control(intel_dp);
+	if (IS_GEN5(dev)) {
+		/* ILK workaround: disable reset around power sequence */
+		pp &= ~PANEL_POWER_RESET;
+		I915_WRITE(PCH_PP_CONTROL, pp);
+		POSTING_READ(PCH_PP_CONTROL);
+	}
+
+	pp |= POWER_TARGET_ON;
+	if (!IS_GEN5(dev))
+		pp |= PANEL_POWER_RESET;
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	ironlake_wait_panel_on(intel_dp);
+
+	if (IS_GEN5(dev)) {
+		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
+		I915_WRITE(PCH_PP_CONTROL, pp);
+		POSTING_READ(PCH_PP_CONTROL);
+	}
+}
+
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("Turn eDP power off\n");
+
+	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+	pp = ironlake_get_pp_control(intel_dp);
+	/* We need to switch off panel power _and_ force vdd, for otherwise some
+	 * panels get very unhappy and cease to work. */
+	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	intel_dp->want_panel_vdd = false;
+
+	ironlake_wait_panel_off(intel_dp);
+}
+
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+	/*
+	 * If we enable the backlight right away following a panel power
+	 * on, we may see slight flicker as the panel syncs with the eDP
+	 * link.  So delay a bit to make sure the image is solid before
+	 * allowing it to appear.
+	 */
+	msleep(intel_dp->backlight_on_delay);
+	pp = ironlake_get_pp_control(intel_dp);
+	pp |= EDP_BLC_ENABLE;
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+
+	intel_panel_enable_backlight(dev, pipe);
+}
+
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp;
+	u32 pp_ctrl_reg;
+
+	if (!is_edp(intel_dp))
+		return;
+
+	intel_panel_disable_backlight(dev);
+
+	DRM_DEBUG_KMS("\n");
+	pp = ironlake_get_pp_control(intel_dp);
+	pp &= ~EDP_BLC_ENABLE;
+
+	pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+
+	I915_WRITE(pp_ctrl_reg, pp);
+	POSTING_READ(pp_ctrl_reg);
+	msleep(intel_dp->backlight_off_delay);
+}
+
+static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
+	DRM_DEBUG_KMS("\n");
+	dpa_ctl = I915_READ(DP_A);
+	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We don't adjust intel_dp->DP while tearing down the link, to
+	 * facilitate link retraining (e.g. after hotplug). Hence clear all
+	 * enable bits here to ensure that we don't enable too much. */
+	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+	intel_dp->DP |= DP_PLL_ENABLE;
+	I915_WRITE(DP_A, intel_dp->DP);
+	POSTING_READ(DP_A);
+	udelay(200);
+}
+
+static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpa_ctl;
+
+	assert_pipe_disabled(dev_priv,
+			     to_intel_crtc(crtc)->pipe);
+
+	dpa_ctl = I915_READ(DP_A);
+	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
+	     "dp pll off, should be on\n");
+	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
+
+	/* We can't rely on the value tracked for the DP register in
+	 * intel_dp->DP because link_down must not change that (otherwise link
+	 * re-training will fail. */
+	dpa_ctl &= ~DP_PLL_ENABLE;
+	I915_WRITE(DP_A, dpa_ctl);
+	POSTING_READ(DP_A);
+	udelay(200);
+}
+
+/* If the sink supports it, try to set the power state appropriately */
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+	int ret, i;
+
+	/* Should have a valid DPCD by this point */
+	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+		return;
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+						  DP_SET_POWER_D3);
+		if (ret != 1)
+			DRM_DEBUG_DRIVER("failed to write sink power state\n");
+	} else {
+		/*
+		 * When turning on, we need to retry for 1ms to give the sink
+		 * time to wake up.
+		 */
+		for (i = 0; i < 3; i++) {
+			ret = intel_dp_aux_native_write_1(intel_dp,
+							  DP_SET_POWER,
+							  DP_SET_POWER_D0);
+			if (ret == 1)
+				break;
+			msleep(1);
+		}
+	}
+}
+
+static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
+				  enum pipe *pipe)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(intel_dp->output_reg);
+
+	if (!(tmp & DP_PORT_EN))
+		return false;
+
+	if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+		*pipe = PORT_TO_PIPE(tmp);
+	} else {
+		u32 trans_sel;
+		u32 trans_dp;
+		int i;
+
+		switch (intel_dp->output_reg) {
+		case PCH_DP_B:
+			trans_sel = TRANS_DP_PORT_SEL_B;
+			break;
+		case PCH_DP_C:
+			trans_sel = TRANS_DP_PORT_SEL_C;
+			break;
+		case PCH_DP_D:
+			trans_sel = TRANS_DP_PORT_SEL_D;
+			break;
+		default:
+			return true;
+		}
+
+		for_each_pipe(i) {
+			trans_dp = I915_READ(TRANS_DP_CTL(i));
+			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
+				*pipe = i;
+				return true;
+			}
+		}
+
+		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+			      intel_dp->output_reg);
+	}
+
+	return true;
+}
+
+static void intel_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+	/* Make sure the panel is off before trying to change the mode. But also
+	 * ensure that we have vdd while we switch off the panel. */
+	ironlake_edp_panel_vdd_on(intel_dp);
+	ironlake_edp_backlight_off(intel_dp);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	ironlake_edp_panel_off(intel_dp);
+
+	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
+	if (!is_cpu_edp(intel_dp))
+		intel_dp_link_down(intel_dp);
+}
+
+static void intel_post_disable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+
+	if (is_cpu_edp(intel_dp)) {
+		intel_dp_link_down(intel_dp);
+		if (!IS_VALLEYVIEW(dev))
+			ironlake_edp_pll_off(intel_dp);
+	}
+}
+
+static void intel_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+	if (WARN_ON(dp_reg & DP_PORT_EN))
+		return;
+
+	ironlake_edp_panel_vdd_on(intel_dp);
+	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+	intel_dp_start_link_train(intel_dp);
+	ironlake_edp_panel_on(intel_dp);
+	ironlake_edp_panel_vdd_off(intel_dp, true);
+	intel_dp_complete_link_train(intel_dp);
+	intel_dp_stop_link_train(intel_dp);
+	ironlake_edp_backlight_on(intel_dp);
+}
+
+static void intel_pre_enable_dp(struct intel_encoder *encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+
+	if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
+		ironlake_edp_pll_on(intel_dp);
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+			       uint8_t *recv, int recv_bytes)
+{
+	int ret, i;
+
+	/*
+	 * Sinks are *supposed* to come up within 1ms from an off state,
+	 * but we're also supposed to retry 3 times per the spec.
+	 */
+	for (i = 0; i < 3; i++) {
+		ret = intel_dp_aux_native_read(intel_dp, address, recv,
+					       recv_bytes);
+		if (ret == recv_bytes)
+			return true;
+		msleep(1);
+	}
+
+	return false;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	return intel_dp_aux_native_read_retry(intel_dp,
+					      DP_LANE0_1_STATUS,
+					      link_status,
+					      DP_LINK_STATUS_SIZE);
+}
+
+#if 0
+static char	*voltage_names[] = {
+	"0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char	*pre_emph_names[] = {
+	"0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char	*link_train_names[] = {
+	"pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+
+static uint8_t
+intel_dp_voltage_max(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_800;
+	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+		return DP_TRAIN_VOLTAGE_SWING_1200;
+	else
+		return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	if (HAS_DDI(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_9_5;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	} else {
+		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+		case DP_TRAIN_VOLTAGE_SWING_400:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_600:
+			return DP_TRAIN_PRE_EMPHASIS_6;
+		case DP_TRAIN_VOLTAGE_SWING_800:
+			return DP_TRAIN_PRE_EMPHASIS_3_5;
+		case DP_TRAIN_VOLTAGE_SWING_1200:
+		default:
+			return DP_TRAIN_PRE_EMPHASIS_0;
+		}
+	}
+}
+
+static void
+intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+	uint8_t v = 0;
+	uint8_t p = 0;
+	int lane;
+	uint8_t voltage_max;
+	uint8_t preemph_max;
+
+	for (lane = 0; lane < intel_dp->lane_count; lane++) {
+		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	voltage_max = intel_dp_voltage_max(intel_dp);
+	if (v >= voltage_max)
+		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+
+	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+	if (p >= preemph_max)
+		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	for (lane = 0; lane < 4; lane++)
+		intel_dp->train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_gen4_signal_levels(uint8_t train_set)
+{
+	uint32_t	signal_levels = 0;
+
+	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+	default:
+		signal_levels |= DP_VOLTAGE_0_4;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		signal_levels |= DP_VOLTAGE_0_6;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		signal_levels |= DP_VOLTAGE_0_8;
+		break;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+		signal_levels |= DP_VOLTAGE_1_2;
+		break;
+	}
+	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+	case DP_TRAIN_PRE_EMPHASIS_0:
+	default:
+		signal_levels |= DP_PRE_EMPHASIS_0;
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_3_5:
+		signal_levels |= DP_PRE_EMPHASIS_3_5;
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_6:
+		signal_levels |= DP_PRE_EMPHASIS_6;
+		break;
+	case DP_TRAIN_PRE_EMPHASIS_9_5:
+		signal_levels |= DP_PRE_EMPHASIS_9_5;
+		break;
+	}
+	return signal_levels;
+}
+
+/* Gen6's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen6_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	}
+}
+
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_600MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800MV_0DB_IVB;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_500MV_0DB_IVB;
+	}
+}
+
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_hsw_signal_levels(uint8_t train_set)
+{
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_400MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_400MV_3_5DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_400MV_6DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+		return DDI_BUF_EMP_400MV_9_5DB_HSW;
+
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_600MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_600MV_3_5DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return DDI_BUF_EMP_600MV_6DB_HSW;
+
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+		return DDI_BUF_EMP_800MV_0DB_HSW;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return DDI_BUF_EMP_800MV_3_5DB_HSW;
+	default:
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return DDI_BUF_EMP_400MV_0DB_HSW;
+	}
+}
+
+/* Properly updates "DP" with the correct signal levels. */
+static void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	uint32_t signal_levels, mask;
+	uint8_t train_set = intel_dp->train_set[0];
+
+	if (HAS_DDI(dev)) {
+		signal_levels = intel_hsw_signal_levels(train_set);
+		mask = DDI_BUF_EMP_MASK;
+	} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
+		signal_levels = intel_gen7_edp_signal_levels(train_set);
+		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
+	} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+		signal_levels = intel_gen6_edp_signal_levels(train_set);
+		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
+	} else {
+		signal_levels = intel_gen4_signal_levels(train_set);
+		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
+	}
+
+	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+	*DP = (*DP & ~mask) | signal_levels;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_dp *intel_dp,
+			uint32_t dp_reg_value,
+			uint8_t dp_train_pat)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	int ret;
+
+	if (HAS_DDI(dev)) {
+		uint32_t temp = I915_READ(DP_TP_CTL(port));
+
+		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+		else
+			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+			break;
+		case DP_TRAINING_PATTERN_1:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+			break;
+		}
+		I915_WRITE(DP_TP_CTL(port), temp);
+
+	} else if (HAS_PCH_CPT(dev) &&
+		   (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+			break;
+		}
+
+	} else {
+		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+
+		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+		case DP_TRAINING_PATTERN_DISABLE:
+			dp_reg_value |= DP_LINK_TRAIN_OFF;
+			break;
+		case DP_TRAINING_PATTERN_1:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			DRM_ERROR("DP training pattern 3 not supported\n");
+			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+			break;
+		}
+	}
+
+	I915_WRITE(intel_dp->output_reg, dp_reg_value);
+	POSTING_READ(intel_dp->output_reg);
+
+	intel_dp_aux_native_write_1(intel_dp,
+				    DP_TRAINING_PATTERN_SET,
+				    dp_train_pat);
+
+	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+	    DP_TRAINING_PATTERN_DISABLE) {
+		ret = intel_dp_aux_native_write(intel_dp,
+						DP_TRAINING_LANE0_SET,
+						intel_dp->train_set,
+						intel_dp->lane_count);
+		if (ret != intel_dp->lane_count)
+			return false;
+	}
+
+	return true;
+}
+
+static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+	uint32_t val;
+
+	if (!HAS_DDI(dev))
+		return;
+
+	val = I915_READ(DP_TP_CTL(port));
+	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
+	I915_WRITE(DP_TP_CTL(port), val);
+
+	/*
+	 * On PORT_A we can have only eDP in SST mode. There the only reason
+	 * we need to set idle transmission mode is to work around a HW issue
+	 * where we enable the pipe while not in idle link-training mode.
+	 * In this case there is requirement to wait for a minimum number of
+	 * idle patterns to be sent.
+	 */
+	if (port == PORT_A)
+		return;
+
+	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
+		     1))
+		DRM_ERROR("Timed out waiting for DP idle patterns\n");
+}
+
+/* Enable corresponding port and start training pattern 1 */
+void
+intel_dp_start_link_train(struct intel_dp *intel_dp)
+{
+	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+	struct drm_device *dev = encoder->dev;
+	int i;
+	uint8_t voltage;
+	bool clock_recovery = false;
+	int voltage_tries, loop_tries;
+	uint32_t DP = intel_dp->DP;
+
+	if (HAS_DDI(dev))
+		intel_ddi_prepare_link_retrain(encoder);
+
+	/* Write the link configuration data */
+	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+				  intel_dp->link_configuration,
+				  DP_LINK_CONFIGURATION_SIZE);
+
+	DP |= DP_PORT_EN;
+
+	memset(intel_dp->train_set, 0, 4);
+	voltage = 0xff;
+	voltage_tries = 0;
+	loop_tries = 0;
+	clock_recovery = false;
+	for (;;) {
+		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
+		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
+
+		intel_dp_set_signal_levels(intel_dp, &DP);
+
+		/* Set training pattern 1 */
+		if (!intel_dp_set_link_train(intel_dp, DP,
+					     DP_TRAINING_PATTERN_1 |
+					     DP_LINK_SCRAMBLING_DISABLE))
+			break;
+
+		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+		if (!intel_dp_get_link_status(intel_dp, link_status)) {
+			DRM_ERROR("failed to get link status\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+			DRM_DEBUG_KMS("clock recovery OK\n");
+			clock_recovery = true;
+			break;
+		}
+
+		/* Check to see if we've tried the max voltage */
+		for (i = 0; i < intel_dp->lane_count; i++)
+			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		if (i == intel_dp->lane_count) {
+			++loop_tries;
+			if (loop_tries == 5) {
+				DRM_DEBUG_KMS("too many full retries, give up\n");
+				break;
+			}
+			memset(intel_dp->train_set, 0, 4);
+			voltage_tries = 0;
+			continue;
+		}
+
+		/* Check to see if we've tried the same voltage 5 times */
+		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++voltage_tries;
+			if (voltage_tries == 5) {
+				DRM_DEBUG_KMS("too many voltage retries, give up\n");
+				break;
+			}
+		} else
+			voltage_tries = 0;
+		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new intel_dp->train_set as requested by target */
+		intel_get_adjust_train(intel_dp, link_status);
+	}
+
+	intel_dp->DP = DP;
+}
+
+void
+intel_dp_complete_link_train(struct intel_dp *intel_dp)
+{
+	bool channel_eq = false;
+	int tries, cr_tries;
+	uint32_t DP = intel_dp->DP;
+
+	/* channel equalization */
+	tries = 0;
+	cr_tries = 0;
+	channel_eq = false;
+	for (;;) {
+		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
+
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			intel_dp_link_down(intel_dp);
+			break;
+		}
+
+		intel_dp_set_signal_levels(intel_dp, &DP);
+
+		/* channel eq pattern */
+		if (!intel_dp_set_link_train(intel_dp, DP,
+					     DP_TRAINING_PATTERN_2 |
+					     DP_LINK_SCRAMBLING_DISABLE))
+			break;
+
+		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+		if (!intel_dp_get_link_status(intel_dp, link_status))
+			break;
+
+		/* Make sure clock is still ok */
+		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+			intel_dp_start_link_train(intel_dp);
+			cr_tries++;
+			continue;
+		}
+
+		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			intel_dp_link_down(intel_dp);
+			intel_dp_start_link_train(intel_dp);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
+
+		/* Compute new intel_dp->train_set as requested by target */
+		intel_get_adjust_train(intel_dp, link_status);
+		++tries;
+	}
+
+	intel_dp_set_idle_link_train(intel_dp);
+
+	intel_dp->DP = DP;
+
+	if (channel_eq)
+		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
+
+}
+
+void intel_dp_stop_link_train(struct intel_dp *intel_dp)
+{
+	intel_dp_set_link_train(intel_dp, intel_dp->DP,
+				DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_dp *intel_dp)
+{
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct drm_device *dev = intel_dig_port->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc =
+		to_intel_crtc(intel_dig_port->base.base.crtc);
+	uint32_t DP = intel_dp->DP;
+
+	/*
+	 * DDI code has a strict mode set sequence and we should try to respect
+	 * it, otherwise we might hang the machine in many different ways. So we
+	 * really should be disabling the port only on a complete crtc_disable
+	 * sequence. This function is just called under two conditions on DDI
+	 * code:
+	 * - Link train failed while doing crtc_enable, and on this case we
+	 *   really should respect the mode set sequence and wait for a
+	 *   crtc_disable.
+	 * - Someone turned the monitor off and intel_dp_check_link_status
+	 *   called us. We don't need to disable the whole port on this case, so
+	 *   when someone turns the monitor on again,
+	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
+	 *   train.
+	 */
+	if (HAS_DDI(dev))
+		return;
+
+	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+		return;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+		DP &= ~DP_LINK_TRAIN_MASK_CPT;
+		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+	} else {
+		DP &= ~DP_LINK_TRAIN_MASK;
+		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+	}
+	POSTING_READ(intel_dp->output_reg);
+
+	/* We don't really know why we're doing this */
+	intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	if (HAS_PCH_IBX(dev) &&
+	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+
+		/* Hardware workaround: leaving our transcoder select
+		 * set to transcoder B while it's off will prevent the
+		 * corresponding HDMI output on transcoder A.
+		 *
+		 * Combine this with another hardware workaround:
+		 * transcoder select bit can only be cleared while the
+		 * port is enabled.
+		 */
+		DP &= ~DP_PIPEB_SELECT;
+		I915_WRITE(intel_dp->output_reg, DP);
+
+		/* Changes to enable or select take place the vblank
+		 * after being written.
+		 */
+		if (WARN_ON(crtc == NULL)) {
+			/* We should never try to disable a port without a crtc
+			 * attached. For paranoia keep the code around for a
+			 * bit. */
+			POSTING_READ(intel_dp->output_reg);
+			msleep(50);
+		} else
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
+	}
+
+	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+	POSTING_READ(intel_dp->output_reg);
+	msleep(intel_dp->panel_power_down_delay);
+}
+
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
+
+	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+					   sizeof(intel_dp->dpcd)) == 0)
+		return false; /* aux transfer failed */
+
+	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
+
+	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+		return false; /* DPCD not present */
+
+	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+	      DP_DWN_STRM_PORT_PRESENT))
+		return true; /* native DP sink */
+
+	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
+		return true; /* no per-port downstream info */
+
+	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
+					   intel_dp->downstream_ports,
+					   DP_MAX_DOWNSTREAM_PORTS) == 0)
+		return false; /* downstream port status fetch failed */
+
+	return true;
+}
+
+static void
+intel_dp_probe_oui(struct intel_dp *intel_dp)
+{
+	u8 buf[3];
+
+	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	ironlake_edp_panel_vdd_on(intel_dp);
+
+	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
+		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
+		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	ironlake_edp_panel_vdd_off(intel_dp, false);
+}
+
+static bool
+intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+	int ret;
+
+	ret = intel_dp_aux_native_read_retry(intel_dp,
+					     DP_DEVICE_SERVICE_IRQ_VECTOR,
+					     sink_irq_vector, 1);
+	if (!ret)
+		return false;
+
+	return true;
+}
+
+static void
+intel_dp_handle_test_request(struct intel_dp *intel_dp)
+{
+	/* NAK by default */
+	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ *  1. Read DPCD
+ *  2. Configure link according to Receiver Capabilities
+ *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ *  4. Check link status on receipt of hot-plug interrupt
+ */
+
+void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+	u8 sink_irq_vector;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	if (!intel_encoder->connectors_active)
+		return;
+
+	if (WARN_ON(!intel_encoder->base.crtc))
+		return;
+
+	/* Try to read receiver status if the link appears to be up */
+	if (!intel_dp_get_link_status(intel_dp, link_status)) {
+		intel_dp_link_down(intel_dp);
+		return;
+	}
+
+	/* Now read the DPCD to see if it's actually running */
+	if (!intel_dp_get_dpcd(intel_dp)) {
+		intel_dp_link_down(intel_dp);
+		return;
+	}
+
+	/* Try to read the source of the interrupt */
+	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+		/* Clear interrupt source */
+		intel_dp_aux_native_write_1(intel_dp,
+					    DP_DEVICE_SERVICE_IRQ_VECTOR,
+					    sink_irq_vector);
+
+		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+			intel_dp_handle_test_request(intel_dp);
+		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+	}
+
+	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+			      drm_get_encoder_name(&intel_encoder->base));
+		intel_dp_start_link_train(intel_dp);
+		intel_dp_complete_link_train(intel_dp);
+		intel_dp_stop_link_train(intel_dp);
+	}
+}
+
+/* XXX this is probably wrong for multiple downstream ports */
+static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+	uint8_t *dpcd = intel_dp->dpcd;
+	bool hpd;
+	uint8_t type;
+
+	if (!intel_dp_get_dpcd(intel_dp))
+		return connector_status_disconnected;
+
+	/* if there's no downstream port, we're done */
+	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+		return connector_status_connected;
+
+	/* If we're HPD-aware, SINK_COUNT changes dynamically */
+	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
+	if (hpd) {
+		uint8_t reg;
+		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
+						    &reg, 1))
+			return connector_status_unknown;
+		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
+					      : connector_status_disconnected;
+	}
+
+	/* If no HPD, poke DDC gently */
+	if (drm_probe_ddc(&intel_dp->adapter))
+		return connector_status_connected;
+
+	/* Well we tried, say unknown for unreliable port types */
+	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
+		return connector_status_unknown;
+
+	/* Anything else is out of spec, warn and ignore */
+	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	enum drm_connector_status status;
+
+	/* Can't disconnect eDP, but you can close the lid... */
+	if (is_edp(intel_dp)) {
+		status = intel_panel_detect(dev);
+		if (status == connector_status_unknown)
+			status = connector_status_connected;
+		return status;
+	}
+
+	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+		return connector_status_disconnected;
+
+	return intel_dp_detect_dpcd(intel_dp);
+}
+
+static enum drm_connector_status
+g4x_dp_detect(struct intel_dp *intel_dp)
+{
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	uint32_t bit;
+
+	/* Can't disconnect eDP, but you can close the lid... */
+	if (is_edp(intel_dp)) {
+		enum drm_connector_status status;
+
+		status = intel_panel_detect(dev);
+		if (status == connector_status_unknown)
+			status = connector_status_connected;
+		return status;
+	}
+
+	if (IS_VALLEYVIEW(dev)) {
+		switch (intel_dig_port->port) {
+		case PORT_B:
+			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		case PORT_C:
+			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		case PORT_D:
+			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+			break;
+		default:
+			return connector_status_unknown;
+		}
+	} else {
+		switch (intel_dig_port->port) {
+		case PORT_B:
+			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		case PORT_C:
+			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		case PORT_D:
+			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+			break;
+		default:
+			return connector_status_unknown;
+		}
+	}
+
+	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
+		return connector_status_disconnected;
+
+	return intel_dp_detect_dpcd(intel_dp);
+}
+
+static struct edid *
+intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	/* use cached edid if we have one */
+	if (intel_connector->edid) {
+		struct edid *edid;
+		int size;
+
+		/* invalid edid */
+		if (IS_ERR(intel_connector->edid))
+			return NULL;
+
+		size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
+		edid = kmalloc(size, GFP_KERNEL);
+		if (!edid)
+			return NULL;
+
+		memcpy(edid, intel_connector->edid, size);
+		return edid;
+	}
+
+	return drm_get_edid(connector, adapter);
+}
+
+static int
+intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	/* use cached edid if we have one */
+	if (intel_connector->edid) {
+		/* invalid edid */
+		if (IS_ERR(intel_connector->edid))
+			return 0;
+
+		return intel_connector_update_modes(connector,
+						    intel_connector->edid);
+	}
+
+	return intel_ddc_get_modes(connector, adapter);
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = connector->dev;
+	enum drm_connector_status status;
+	struct edid *edid = NULL;
+
+	intel_dp->has_audio = false;
+
+	if (HAS_PCH_SPLIT(dev))
+		status = ironlake_dp_detect(intel_dp);
+	else
+		status = g4x_dp_detect(intel_dp);
+
+	if (status != connector_status_connected)
+		return status;
+
+	intel_dp_probe_oui(intel_dp);
+
+	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+	} else {
+		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			intel_dp->has_audio = drm_detect_monitor_audio(edid);
+			kfree(edid);
+		}
+	}
+
+	if (intel_encoder->type != INTEL_OUTPUT_EDP)
+		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+	return connector_status_connected;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+	int ret;
+
+	/* We should parse the EDID data and find out if it has an audio sink
+	 */
+
+	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
+	if (ret)
+		return ret;
+
+	/* if eDP has no EDID, fall back to fixed mode */
+	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+		struct drm_display_mode *mode;
+		mode = drm_mode_duplicate(dev,
+					  intel_connector->panel.fixed_mode);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
+static int
+intel_dp_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
+			return 0;
+
+		intel_dp->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_dp_detect_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (has_audio == intel_dp->has_audio)
+			return 0;
+
+		intel_dp->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_dp->color_range_auto;
+		uint32_t old_range = intel_dp->color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_dp->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_dp->color_range_auto = false;
+			intel_dp->color_range = 0;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_dp->color_range_auto = false;
+			intel_dp->color_range = DP_COLOR_RANGE_16_235;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_dp->color_range_auto &&
+		    old_range == intel_dp->color_range)
+			return 0;
+
+		goto done;
+	}
+
+	if (is_edp(intel_dp) &&
+	    property == connector->dev->mode_config.scaling_mode_property) {
+		if (val == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == val) {
+			/* the eDP scaling property is not changed */
+			return 0;
+		}
+		intel_connector->panel.fitting_mode = val;
+
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (intel_encoder->base.crtc)
+		intel_crtc_restore_mode(intel_encoder->base.crtc);
+
+	return 0;
+}
+
+static void
+intel_dp_destroy(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	if (!IS_ERR_OR_NULL(intel_connector->edid))
+		kfree(intel_connector->edid);
+
+	if (is_edp(intel_dp))
+		intel_panel_fini(&intel_connector->panel);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+	i2c_del_adapter(&intel_dp->adapter);
+	drm_encoder_cleanup(encoder);
+	if (is_edp(intel_dp)) {
+		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+		mutex_lock(&dev->mode_config.mutex);
+		ironlake_panel_vdd_off_sync(intel_dp);
+		mutex_unlock(&dev->mode_config.mutex);
+	}
+	kfree(intel_dig_port);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+	.mode_set = intel_dp_mode_set,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_dp_set_property,
+	.destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+	.get_modes = intel_dp_get_modes,
+	.mode_valid = intel_dp_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+	.destroy = intel_dp_encoder_destroy,
+};
+
+static void
+intel_dp_hot_plug(struct intel_encoder *intel_encoder)
+{
+	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+	intel_dp_check_link_status(intel_dp);
+}
+
+/* Return which DP Port should be selected for Transcoder DP control */
+int
+intel_trans_dp_port_sel(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct intel_encoder *intel_encoder;
+	struct intel_dp *intel_dp;
+
+	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+		intel_dp = enc_to_intel_dp(&intel_encoder->base);
+
+		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+		    intel_encoder->type == INTEL_OUTPUT_EDP)
+			return intel_dp->output_reg;
+	}
+
+	return -1;
+}
+
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return false;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+
+		if (p_child->dvo_port == PORT_IDPD &&
+		    p_child->device_type == DEVICE_TYPE_eDP)
+			return true;
+	}
+	return false;
+}
+
+static void
+intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+
+	intel_attach_force_audio_property(connector);
+	intel_attach_broadcast_rgb_property(connector);
+	intel_dp->color_range_auto = true;
+
+	if (is_edp(intel_dp)) {
+		drm_mode_create_scaling_mode_property(connector->dev);
+		drm_object_attach_property(
+			&connector->base,
+			connector->dev->mode_config.scaling_mode_property,
+			DRM_MODE_SCALE_ASPECT);
+		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	}
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+				    struct intel_dp *intel_dp,
+				    struct edp_power_seq *out)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct edp_power_seq cur, vbt, spec, final;
+	u32 pp_on, pp_off, pp_div, pp;
+	int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		pp_control_reg = PCH_PP_CONTROL;
+		pp_on_reg = PCH_PP_ON_DELAYS;
+		pp_off_reg = PCH_PP_OFF_DELAYS;
+		pp_div_reg = PCH_PP_DIVISOR;
+	} else {
+		pp_control_reg = PIPEA_PP_CONTROL;
+		pp_on_reg = PIPEA_PP_ON_DELAYS;
+		pp_off_reg = PIPEA_PP_OFF_DELAYS;
+		pp_div_reg = PIPEA_PP_DIVISOR;
+	}
+
+	/* Workaround: Need to write PP_CONTROL with the unlock key as
+	 * the very first thing. */
+	pp = ironlake_get_pp_control(intel_dp);
+	I915_WRITE(pp_control_reg, pp);
+
+	pp_on = I915_READ(pp_on_reg);
+	pp_off = I915_READ(pp_off_reg);
+	pp_div = I915_READ(pp_div_reg);
+
+	/* Pull timing values out of registers */
+	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+		PANEL_POWER_UP_DELAY_SHIFT;
+
+	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+		PANEL_LIGHT_ON_DELAY_SHIFT;
+
+	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+		PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+		PANEL_POWER_DOWN_DELAY_SHIFT;
+
+	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+	vbt = dev_priv->edp.pps;
+
+	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+	 * our hw here, which are all in 100usec. */
+	spec.t1_t3 = 210 * 10;
+	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+	spec.t10 = 500 * 10;
+	/* This one is special and actually in units of 100ms, but zero
+	 * based in the hw (so we need to add 100 ms). But the sw vbt
+	 * table multiplies it with 1000 to make it in units of 100usec,
+	 * too. */
+	spec.t11_t12 = (510 + 100) * 10;
+
+	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+	/* Use the max of the register settings and vbt. If both are
+	 * unset, fall back to the spec limits. */
+#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
+				       spec.field : \
+				       max(cur.field, vbt.field))
+	assign_final(t1_t3);
+	assign_final(t8);
+	assign_final(t9);
+	assign_final(t10);
+	assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
+	intel_dp->panel_power_up_delay = get_delay(t1_t3);
+	intel_dp->backlight_on_delay = get_delay(t8);
+	intel_dp->backlight_off_delay = get_delay(t9);
+	intel_dp->panel_power_down_delay = get_delay(t10);
+	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+		      intel_dp->panel_power_cycle_delay);
+
+	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+	if (out)
+		*out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+					      struct intel_dp *intel_dp,
+					      struct edp_power_seq *seq)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 pp_on, pp_off, pp_div, port_sel = 0;
+	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+	int pp_on_reg, pp_off_reg, pp_div_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		pp_on_reg = PCH_PP_ON_DELAYS;
+		pp_off_reg = PCH_PP_OFF_DELAYS;
+		pp_div_reg = PCH_PP_DIVISOR;
+	} else {
+		pp_on_reg = PIPEA_PP_ON_DELAYS;
+		pp_off_reg = PIPEA_PP_OFF_DELAYS;
+		pp_div_reg = PIPEA_PP_DIVISOR;
+	}
+
+	if (IS_VALLEYVIEW(dev))
+		port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+
+	/* And finally store the new values in the power sequencer. */
+	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+	/* Compute the divisor for the pp clock, simply match the Bspec
+	 * formula. */
+	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
+	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
+			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+	/* Haswell doesn't have any port selection bits for the panel
+	 * power sequencer any more. */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+		if (is_cpu_edp(intel_dp))
+			port_sel = PANEL_POWER_PORT_DP_A;
+		else
+			port_sel = PANEL_POWER_PORT_DP_D;
+	}
+
+	pp_on |= port_sel;
+
+	I915_WRITE(pp_on_reg, pp_on);
+	I915_WRITE(pp_off_reg, pp_off);
+	I915_WRITE(pp_div_reg, pp_div);
+
+	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+		      I915_READ(pp_on_reg),
+		      I915_READ(pp_off_reg),
+		      I915_READ(pp_div_reg));
+}
+
+void
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+			struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_dp *intel_dp = &intel_dig_port->dp;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *fixed_mode = NULL;
+	struct edp_power_seq power_seq = { 0 };
+	enum port port = intel_dig_port->port;
+	const char *name = NULL;
+	int type;
+
+	/* Preserve the current hw state. */
+	intel_dp->DP = I915_READ(intel_dp->output_reg);
+	intel_dp->attached_connector = intel_connector;
+
+	if (HAS_PCH_SPLIT(dev) && port == PORT_D)
+		if (intel_dpd_is_edp(dev))
+			intel_dp->is_pch_edp = true;
+
+	/*
+	 * FIXME : We need to initialize built-in panels before external panels.
+	 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+	 */
+	if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+		type = DRM_MODE_CONNECTOR_eDP;
+		intel_encoder->type = INTEL_OUTPUT_EDP;
+	} else if (port == PORT_A || is_pch_edp(intel_dp)) {
+		type = DRM_MODE_CONNECTOR_eDP;
+		intel_encoder->type = INTEL_OUTPUT_EDP;
+	} else {
+		/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+		 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+		 * rewrite it.
+		 */
+		type = DRM_MODE_CONNECTOR_DisplayPort;
+	}
+
+	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
+	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+	connector->interlace_allowed = true;
+	connector->doublescan_allowed = 0;
+
+	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+			  ironlake_panel_vdd_work);
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	drm_sysfs_connector_add(connector);
+
+	if (HAS_DDI(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
+	if (HAS_DDI(dev)) {
+		switch (intel_dig_port->port) {
+		case PORT_A:
+			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
+			break;
+		case PORT_B:
+			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
+			break;
+		case PORT_C:
+			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
+			break;
+		case PORT_D:
+			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	/* Set up the DDC bus. */
+	switch (port) {
+	case PORT_A:
+		intel_encoder->hpd_pin = HPD_PORT_A;
+		name = "DPDDC-A";
+		break;
+	case PORT_B:
+		intel_encoder->hpd_pin = HPD_PORT_B;
+		name = "DPDDC-B";
+		break;
+	case PORT_C:
+		intel_encoder->hpd_pin = HPD_PORT_C;
+		name = "DPDDC-C";
+		break;
+	case PORT_D:
+		intel_encoder->hpd_pin = HPD_PORT_D;
+		name = "DPDDC-D";
+		break;
+	default:
+		BUG();
+	}
+
+	if (is_edp(intel_dp))
+		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+
+	intel_dp_i2c_init(intel_dp, intel_connector, name);
+
+	/* Cache DPCD and EDID for edp. */
+	if (is_edp(intel_dp)) {
+		bool ret;
+		struct drm_display_mode *scan;
+		struct edid *edid;
+
+		ironlake_edp_panel_vdd_on(intel_dp);
+		ret = intel_dp_get_dpcd(intel_dp);
+		ironlake_edp_panel_vdd_off(intel_dp, false);
+
+		if (ret) {
+			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+				dev_priv->no_aux_handshake =
+					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
+		} else {
+			/* if this fails, presume the device is a ghost */
+			DRM_INFO("failed to retrieve link info, disabling eDP\n");
+			intel_dp_encoder_destroy(&intel_encoder->base);
+			intel_dp_destroy(connector);
+			return;
+		}
+
+		/* We now know it's not a ghost, init power sequence regs. */
+		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+							      &power_seq);
+
+		ironlake_edp_panel_vdd_on(intel_dp);
+		edid = drm_get_edid(connector, &intel_dp->adapter);
+		if (edid) {
+			if (drm_add_edid_modes(connector, edid)) {
+				drm_mode_connector_update_edid_property(connector, edid);
+				drm_edid_to_eld(connector, edid);
+			} else {
+				kfree(edid);
+				edid = ERR_PTR(-EINVAL);
+			}
+		} else {
+			edid = ERR_PTR(-ENOENT);
+		}
+		intel_connector->edid = edid;
+
+		/* prefer fixed mode from EDID if available */
+		list_for_each_entry(scan, &connector->probed_modes, head) {
+			if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+				fixed_mode = drm_mode_duplicate(dev, scan);
+				break;
+			}
+		}
+
+		/* fallback to VBT if available for eDP */
+		if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+			fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+			if (fixed_mode)
+				fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+		}
+
+		ironlake_edp_panel_vdd_off(intel_dp, false);
+	}
+
+	if (is_edp(intel_dp)) {
+		intel_panel_init(&intel_connector->panel, fixed_mode);
+		intel_panel_setup_backlight(connector);
+	}
+
+	intel_dp_add_properties(intel_dp, connector);
+
+	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+	 * 0xd.  Failure to do so will result in spurious interrupts being
+	 * generated on the port when a cable is not attached.
+	 */
+	if (IS_G4X(dev) && !IS_GM45(dev)) {
+		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+	}
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+	intel_encoder->compute_config = intel_dp_compute_config;
+	intel_encoder->enable = intel_enable_dp;
+	intel_encoder->pre_enable = intel_pre_enable_dp;
+	intel_encoder->disable = intel_disable_dp;
+	intel_encoder->post_disable = intel_post_disable_dp;
+	intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+	intel_dig_port->port = port;
+	intel_dig_port->dp.output_reg = output_reg;
+
+	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+	intel_encoder->hot_plug = intel_dp_hot_plug;
+
+	intel_dp_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_drv.h b/linux-imx/drivers/gpu/drm/i915/intel_drv.h
new file mode 100644
index 0000000..7cd5584
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_drv.h
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef __INTEL_DRV_H__
+#define __INTEL_DRV_H__
+
+#include <linux/i2c.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_helper.h>
+
+/**
+ * _wait_for - magic (register) wait macro
+ *
+ * Does the right thing for modeset paths when run under kdgb or similar atomic
+ * contexts. Note that it's important that we check the condition again after
+ * having timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define _wait_for(COND, MS, W) ({ \
+	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;	\
+	int ret__ = 0;							\
+	while (!(COND)) {						\
+		if (time_after(jiffies, timeout__)) {			\
+			if (!(COND))					\
+				ret__ = -ETIMEDOUT;			\
+			break;						\
+		}							\
+		if (W && drm_can_sleep())  {				\
+			msleep(W);					\
+		} else {						\
+			cpu_relax();					\
+		}							\
+	}								\
+	ret__;								\
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+#define wait_for_atomic_us(COND, US) _wait_for((COND), \
+					       DIV_ROUND_UP((US), 1000), 0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
+/*
+ * Display related stuff
+ */
+
+/* store information about an Ixxx DVO */
+/* The i830->i865 use multiple DVOs with multiple i2cs */
+/* the i915, i945 have a single sDVO i2c bus - which is different */
+#define MAX_OUTPUTS 6
+/* maximum connectors per crtcs in the mode set */
+#define INTELFB_CONN_LIMIT 4
+
+#define INTEL_I2C_BUS_DVO 1
+#define INTEL_I2C_BUS_SDVO 2
+
+/* these are outputs from the chip - integrated only
+   external chips are via DVO or SDVO output */
+#define INTEL_OUTPUT_UNUSED 0
+#define INTEL_OUTPUT_ANALOG 1
+#define INTEL_OUTPUT_DVO 2
+#define INTEL_OUTPUT_SDVO 3
+#define INTEL_OUTPUT_LVDS 4
+#define INTEL_OUTPUT_TVOUT 5
+#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
+#define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
+
+#define INTEL_DVO_CHIP_NONE 0
+#define INTEL_DVO_CHIP_LVDS 1
+#define INTEL_DVO_CHIP_TMDS 2
+#define INTEL_DVO_CHIP_TVOUT 4
+
+struct intel_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_i915_gem_object *obj;
+};
+
+struct intel_fbdev {
+	struct drm_fb_helper helper;
+	struct intel_framebuffer ifb;
+	struct list_head fbdev_list;
+	struct drm_display_mode *our_mode;
+};
+
+struct intel_encoder {
+	struct drm_encoder base;
+	/*
+	 * The new crtc this encoder will be driven from. Only differs from
+	 * base->crtc while a modeset is in progress.
+	 */
+	struct intel_crtc *new_crtc;
+
+	int type;
+	bool needs_tv_clock;
+	/*
+	 * Intel hw has only one MUX where encoders could be clone, hence a
+	 * simple flag is enough to compute the possible_clones mask.
+	 */
+	bool cloneable;
+	bool connectors_active;
+	void (*hot_plug)(struct intel_encoder *);
+	bool (*compute_config)(struct intel_encoder *,
+			       struct intel_crtc_config *);
+	void (*pre_pll_enable)(struct intel_encoder *);
+	void (*pre_enable)(struct intel_encoder *);
+	void (*enable)(struct intel_encoder *);
+	void (*mode_set)(struct intel_encoder *intel_encoder);
+	void (*disable)(struct intel_encoder *);
+	void (*post_disable)(struct intel_encoder *);
+	/* Read out the current hw state of this connector, returning true if
+	 * the encoder is active. If the encoder is enabled it also set the pipe
+	 * it is connected to in the pipe parameter. */
+	bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
+	int crtc_mask;
+	enum hpd_pin hpd_pin;
+};
+
+struct intel_panel {
+	struct drm_display_mode *fixed_mode;
+	int fitting_mode;
+};
+
+struct intel_connector {
+	struct drm_connector base;
+	/*
+	 * The fixed encoder this connector is connected to.
+	 */
+	struct intel_encoder *encoder;
+
+	/*
+	 * The new encoder this connector will be driven. Only differs from
+	 * encoder while a modeset is in progress.
+	 */
+	struct intel_encoder *new_encoder;
+
+	/* Reads out the current hw, returning true if the connector is enabled
+	 * and active (i.e. dpms ON state). */
+	bool (*get_hw_state)(struct intel_connector *);
+
+	/* Panel info for eDP and LVDS */
+	struct intel_panel panel;
+
+	/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+	struct edid *edid;
+
+	/* since POLL and HPD connectors may use the same HPD line keep the native
+	   state of connector->polled in case hotplug storm detection changes it */
+	u8 polled;
+};
+
+struct intel_crtc_config {
+	struct drm_display_mode requested_mode;
+	struct drm_display_mode adjusted_mode;
+	/* This flag must be set by the encoder's compute_config callback if it
+	 * changes the crtc timings in the mode to prevent the crtc fixup from
+	 * overwriting them.  Currently only lvds needs that. */
+	bool timings_set;
+	/* Whether to set up the PCH/FDI. Note that we never allow sharing
+	 * between pch encoders and cpu encoders. */
+	bool has_pch_encoder;
+
+	/* CPU Transcoder for the pipe. Currently this can only differ from the
+	 * pipe on Haswell (where we have a special eDP transcoder). */
+	enum transcoder cpu_transcoder;
+
+	/*
+	 * Use reduced/limited/broadcast rbg range, compressing from the full
+	 * range fed into the crtcs.
+	 */
+	bool limited_color_range;
+
+	/* DP has a bunch of special case unfortunately, so mark the pipe
+	 * accordingly. */
+	bool has_dp_encoder;
+	bool dither;
+
+	/* Controls for the clock computation, to override various stages. */
+	bool clock_set;
+
+	/* Settings for the intel dpll used on pretty much everything but
+	 * haswell. */
+	struct dpll {
+		unsigned n;
+		unsigned m1, m2;
+		unsigned p1, p2;
+	} dpll;
+
+	int pipe_bpp;
+	struct intel_link_m_n dp_m_n;
+	/**
+	 * This is currently used by DP and HDMI encoders since those can have a
+	 * target pixel clock != the port link clock (which is currently stored
+	 * in adjusted_mode->clock).
+	 */
+	int pixel_target_clock;
+	/* Used by SDVO (and if we ever fix it, HDMI). */
+	unsigned pixel_multiplier;
+};
+
+struct intel_crtc {
+	struct drm_crtc base;
+	enum pipe pipe;
+	enum plane plane;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	/*
+	 * Whether the crtc and the connected output pipeline is active. Implies
+	 * that crtc->enabled is set, i.e. the current mode configuration has
+	 * some outputs connected to this crtc.
+	 */
+	bool active;
+	bool eld_vld;
+	bool primary_disabled; /* is the crtc obscured by a plane? */
+	bool lowfreq_avail;
+	struct intel_overlay *overlay;
+	struct intel_unpin_work *unpin_work;
+	int fdi_lanes;
+
+	atomic_t unpin_work_count;
+
+	/* Display surface base address adjustement for pageflips. Note that on
+	 * gen4+ this only adjusts up to a tile, offsets within a tile are
+	 * handled in the hw itself (with the TILEOFF register). */
+	unsigned long dspaddr_offset;
+
+	struct drm_i915_gem_object *cursor_bo;
+	uint32_t cursor_addr;
+	int16_t cursor_x, cursor_y;
+	int16_t cursor_width, cursor_height;
+	bool cursor_visible;
+
+	struct intel_crtc_config config;
+
+	/* We can share PLLs across outputs if the timings match */
+	struct intel_pch_pll *pch_pll;
+	uint32_t ddi_pll_sel;
+
+	/* reset counter value when the last flip was submitted */
+	unsigned int reset_counter;
+};
+
+struct intel_plane {
+	struct drm_plane base;
+	int plane;
+	enum pipe pipe;
+	struct drm_i915_gem_object *obj;
+	bool can_scale;
+	int max_downscale;
+	u32 lut_r[1024], lut_g[1024], lut_b[1024];
+	int crtc_x, crtc_y;
+	unsigned int crtc_w, crtc_h;
+	uint32_t src_x, src_y;
+	uint32_t src_w, src_h;
+	void (*update_plane)(struct drm_plane *plane,
+			     struct drm_framebuffer *fb,
+			     struct drm_i915_gem_object *obj,
+			     int crtc_x, int crtc_y,
+			     unsigned int crtc_w, unsigned int crtc_h,
+			     uint32_t x, uint32_t y,
+			     uint32_t src_w, uint32_t src_h);
+	void (*disable_plane)(struct drm_plane *plane);
+	int (*update_colorkey)(struct drm_plane *plane,
+			       struct drm_intel_sprite_colorkey *key);
+	void (*get_colorkey)(struct drm_plane *plane,
+			     struct drm_intel_sprite_colorkey *key);
+};
+
+struct intel_watermark_params {
+	unsigned long fifo_size;
+	unsigned long max_wm;
+	unsigned long default_wm;
+	unsigned long guard_size;
+	unsigned long cacheline_size;
+};
+
+struct cxsr_latency {
+	int is_desktop;
+	int is_ddr3;
+	unsigned long fsb_freq;
+	unsigned long mem_freq;
+	unsigned long display_sr;
+	unsigned long display_hpll_disable;
+	unsigned long cursor_sr;
+	unsigned long cursor_hpll_disable;
+};
+
+#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+#define to_intel_connector(x) container_of(x, struct intel_connector, base)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+
+#define DIP_HEADER_SIZE	5
+
+#define DIP_TYPE_AVI    0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI     13
+#define DIP_AVI_PR_1    0
+#define DIP_AVI_PR_2    1
+#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT	(0 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_LIMITED	(1 << 2)
+#define DIP_AVI_RGB_QUANT_RANGE_FULL	(2 << 2)
+
+#define DIP_TYPE_SPD	0x83
+#define DIP_VERSION_SPD	0x1
+#define DIP_LEN_SPD	25
+#define DIP_SPD_UNKNOWN	0
+#define DIP_SPD_DSTB	0x1
+#define DIP_SPD_DVDP	0x2
+#define DIP_SPD_DVHS	0x3
+#define DIP_SPD_HDDVR	0x4
+#define DIP_SPD_DVC	0x5
+#define DIP_SPD_DSC	0x6
+#define DIP_SPD_VCD	0x7
+#define DIP_SPD_GAME	0x8
+#define DIP_SPD_PC	0x9
+#define DIP_SPD_BD	0xa
+#define DIP_SPD_SCD	0xb
+
+struct dip_infoframe {
+	uint8_t type;		/* HB0 */
+	uint8_t ver;		/* HB1 */
+	uint8_t len;		/* HB2 - body len, not including checksum */
+	uint8_t ecc;		/* Header ECC */
+	uint8_t checksum;	/* PB0 */
+	union {
+		struct {
+			/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+			uint8_t Y_A_B_S;
+			/* PB2 - C 7:6, M 5:4, R 3:0 */
+			uint8_t C_M_R;
+			/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+			uint8_t ITC_EC_Q_SC;
+			/* PB4 - VIC 6:0 */
+			uint8_t VIC;
+			/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
+			uint8_t YQ_CN_PR;
+			/* PB6 to PB13 */
+			uint16_t top_bar_end;
+			uint16_t bottom_bar_start;
+			uint16_t left_bar_end;
+			uint16_t right_bar_start;
+		} __attribute__ ((packed)) avi;
+		struct {
+			uint8_t vn[8];
+			uint8_t pd[16];
+			uint8_t sdi;
+		} __attribute__ ((packed)) spd;
+		uint8_t payload[27];
+	} __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+struct intel_hdmi {
+	u32 hdmi_reg;
+	int ddc_bus;
+	uint32_t color_range;
+	bool color_range_auto;
+	bool has_hdmi_sink;
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	bool rgb_quant_range_selectable;
+	void (*write_infoframe)(struct drm_encoder *encoder,
+				struct dip_infoframe *frame);
+	void (*set_infoframes)(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode);
+};
+
+#define DP_MAX_DOWNSTREAM_PORTS		0x10
+#define DP_LINK_CONFIGURATION_SIZE	9
+
+struct intel_dp {
+	uint32_t output_reg;
+	uint32_t aux_ch_ctl_reg;
+	uint32_t DP;
+	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	bool has_audio;
+	enum hdmi_force_audio force_audio;
+	uint32_t color_range;
+	bool color_range_auto;
+	uint8_t link_bw;
+	uint8_t lane_count;
+	uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+	uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+	struct i2c_adapter adapter;
+	struct i2c_algo_dp_aux_data algo;
+	bool is_pch_edp;
+	uint8_t train_set[4];
+	int panel_power_up_delay;
+	int panel_power_down_delay;
+	int panel_power_cycle_delay;
+	int backlight_on_delay;
+	int backlight_off_delay;
+	struct delayed_work panel_vdd_work;
+	bool want_panel_vdd;
+	struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+	struct intel_encoder base;
+	enum port port;
+	u32 saved_port_bits;
+	struct intel_dp dp;
+	struct intel_hdmi hdmi;
+};
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->plane_to_crtc_mapping[plane];
+}
+
+struct intel_unpin_work {
+	struct work_struct work;
+	struct drm_crtc *crtc;
+	struct drm_i915_gem_object *old_fb_obj;
+	struct drm_i915_gem_object *pending_flip_obj;
+	struct drm_pending_vblank_event *event;
+	atomic_t pending;
+#define INTEL_FLIP_INACTIVE	0
+#define INTEL_FLIP_PENDING	1
+#define INTEL_FLIP_COMPLETE	2
+	bool enable_stall_check;
+};
+
+struct intel_fbc_work {
+	struct delayed_work work;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	int interval;
+};
+
+int intel_pch_rawclk(struct drm_device *dev);
+
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+
+extern void intel_attach_force_audio_property(struct drm_connector *connector);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_hdmi_init(struct drm_device *dev,
+			    int hdmi_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+				      struct intel_connector *intel_connector);
+extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+				      struct intel_crtc_config *pipe_config);
+extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
+extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
+			    bool is_sdvob);
+extern void intel_dvo_init(struct drm_device *dev);
+extern void intel_tv_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev);
+extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
+extern void intel_mark_idle(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
+extern bool intel_is_dual_link_lvds(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int output_reg,
+			  enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+				    struct intel_connector *intel_connector);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_compute_config(struct intel_encoder *encoder,
+				    struct intel_crtc_config *pipe_config);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+				      enum plane plane);
+
+/* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+			    struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
+extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+				   struct drm_display_mode *adjusted_mode);
+extern void intel_pch_panel_fitting(struct drm_device *dev,
+				    int fitting_mode,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+					 enum pipe pipe);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+struct intel_set_config {
+	struct drm_encoder **save_connector_encoders;
+	struct drm_crtc **save_encoder_crtcs;
+
+	bool fb_changed;
+	bool mode_changed;
+};
+
+extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			  int x, int y, struct drm_framebuffer *old_fb);
+extern void intel_modeset_disable(struct drm_device *dev);
+extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
+extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
+extern void intel_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
+extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
+extern void intel_connector_dpms(struct drm_connector *, int mode);
+extern bool intel_connector_get_hw_state(struct intel_connector *connector);
+extern void intel_modeset_check_state(struct drm_device *dev);
+extern void intel_plane_restore(struct drm_plane *plane);
+
+
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+	return to_intel_connector(connector)->encoder;
+}
+
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port =
+		container_of(encoder, struct intel_digital_port, base.base);
+	return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+	return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+	return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
+bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+				struct intel_digital_port *port);
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+					   struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+						    struct drm_crtc *crtc);
+int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+			     enum pipe pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+
+struct intel_load_detect_pipe {
+	struct drm_framebuffer *release_fb;
+	bool load_detect_temp;
+	int dpms_mode;
+};
+extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
+				       struct drm_display_mode *mode,
+				       struct intel_load_detect_pipe *old);
+extern void intel_release_load_detect_pipe(struct drm_connector *connector,
+					   struct intel_load_detect_pipe *old);
+
+extern void intelfb_restore(void);
+extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno);
+extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+
+extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+				      struct drm_i915_gem_object *obj,
+				      struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+
+extern int intel_framebuffer_init(struct drm_device *dev,
+				  struct intel_framebuffer *ifb,
+				  struct drm_mode_fb_cmd2 *mode_cmd,
+				  struct drm_i915_gem_object *obj);
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+
+extern void intel_fb_output_poll_changed(struct drm_device *dev);
+extern void intel_fb_restore_mode(struct drm_device *dev);
+
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+			bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+			    struct drm_display_mode *mode);
+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+extern void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n);
+extern void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
+					 struct intel_link_m_n *m_n);
+extern void intel_prepare_ddi(struct drm_device *dev);
+extern void hsw_fdi_link_train(struct drm_crtc *crtc);
+extern void intel_ddi_init(struct drm_device *dev, enum port port);
+
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void intel_update_watermarks(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+					   uint32_t sprite_width,
+					   int pixel_size);
+extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
+			 struct drm_display_mode *mode);
+
+extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+						    unsigned int tiling_mode,
+						    unsigned int bpp,
+						    unsigned int pitch);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+
+extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
+
+/* Power-related functions, located in intel_pm.c */
+extern void intel_init_pm(struct drm_device *dev);
+/* FBC */
+extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
+extern void intel_update_fbc(struct drm_device *dev);
+/* IPS */
+extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+extern void intel_gpu_ips_teardown(void);
+
+extern bool intel_using_power_well(struct drm_device *dev);
+extern void intel_init_power_well(struct drm_device *dev);
+extern void intel_set_power_well(struct drm_device *dev, bool enable);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
+
+extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+					      enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+
+extern void intel_display_handle_reset(struct drm_device *dev);
+
+#endif /* __INTEL_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_dvo.c b/linux-imx/drivers/gpu/drm/i915/intel_dvo.c
new file mode 100644
index 0000000..cc70b16
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_dvo.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "dvo.h"
+
+#define SIL164_ADDR	0x38
+#define CH7xxx_ADDR	0x76
+#define TFP410_ADDR	0x38
+#define NS2501_ADDR     0x38
+
+static const struct intel_dvo_device intel_dvo_devices[] = {
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "sil164",
+		.dvo_reg = DVOC,
+		.slave_addr = SIL164_ADDR,
+		.dev_ops = &sil164_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "ch7xxx",
+		.dvo_reg = DVOC,
+		.slave_addr = CH7xxx_ADDR,
+		.dev_ops = &ch7xxx_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_LVDS,
+		.name = "ivch",
+		.dvo_reg = DVOA,
+		.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
+		.dev_ops = &ivch_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_TMDS,
+		.name = "tfp410",
+		.dvo_reg = DVOC,
+		.slave_addr = TFP410_ADDR,
+		.dev_ops = &tfp410_ops,
+	},
+	{
+		.type = INTEL_DVO_CHIP_LVDS,
+		.name = "ch7017",
+		.dvo_reg = DVOC,
+		.slave_addr = 0x75,
+		.gpio = GMBUS_PORT_DPB,
+		.dev_ops = &ch7017_ops,
+	},
+	{
+	        .type = INTEL_DVO_CHIP_TMDS,
+		.name = "ns2501",
+		.dvo_reg = DVOC,
+		.slave_addr = NS2501_ADDR,
+		.dev_ops = &ns2501_ops,
+       }
+};
+
+struct intel_dvo {
+	struct intel_encoder base;
+
+	struct intel_dvo_device dev;
+
+	struct drm_display_mode *panel_fixed_mode;
+	bool panel_wants_dither;
+};
+
+static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_dvo, base);
+}
+
+static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+
+	return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
+}
+
+static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
+				   enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+	if (!(tmp & DVO_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+	I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
+	I915_READ(dvo_reg);
+}
+
+static void intel_enable_dvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+	u32 dvo_reg = intel_dvo->dev.dvo_reg;
+	u32 temp = I915_READ(dvo_reg);
+
+	I915_WRITE(dvo_reg, temp | DVO_ENABLE);
+	I915_READ(dvo_reg);
+	intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+}
+
+static void intel_dvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct drm_crtc *crtc;
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_dvo->base.base.crtc;
+	if (!crtc) {
+		intel_dvo->base.connectors_active = false;
+		return;
+	}
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		intel_dvo->base.connectors_active = true;
+
+		intel_crtc_update_dpms(crtc);
+
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
+	} else {
+		intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
+
+		intel_dvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
+	}
+
+	intel_modeset_check_state(connector->dev);
+}
+
+static int intel_dvo_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	/* XXX: Validate clock range */
+
+	if (intel_dvo->panel_fixed_mode) {
+		if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
+			return MODE_PANEL;
+		if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
+}
+
+static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+				 const struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+	/* If we have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	if (intel_dvo->panel_fixed_mode != NULL) {
+#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
+		C(hdisplay);
+		C(hsync_start);
+		C(hsync_end);
+		C(htotal);
+		C(vdisplay);
+		C(vsync_start);
+		C(vsync_end);
+		C(vtotal);
+		C(clock);
+#undef C
+	}
+
+	if (intel_dvo->dev.dev_ops->mode_fixup)
+		return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+
+	return true;
+}
+
+static void intel_dvo_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	int pipe = intel_crtc->pipe;
+	u32 dvo_val;
+	u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
+	int dpll_reg = DPLL(pipe);
+
+	switch (dvo_reg) {
+	case DVOA:
+	default:
+		dvo_srcdim_reg = DVOA_SRCDIM;
+		break;
+	case DVOB:
+		dvo_srcdim_reg = DVOB_SRCDIM;
+		break;
+	case DVOC:
+		dvo_srcdim_reg = DVOC_SRCDIM;
+		break;
+	}
+
+	intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+
+	/* Save the data order, since I don't know what it should be set to. */
+	dvo_val = I915_READ(dvo_reg) &
+		  (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+	dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
+		   DVO_BLANK_ACTIVE_HIGH;
+
+	if (pipe == 1)
+		dvo_val |= DVO_PIPE_B_SELECT;
+	dvo_val |= DVO_PIPE_STALL;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
+
+	I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
+
+	/*I915_WRITE(DVOB_SRCDIM,
+	  (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+	  (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
+	I915_WRITE(dvo_srcdim_reg,
+		   (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
+		   (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+	/*I915_WRITE(DVOB, dvo_val);*/
+	I915_WRITE(dvo_reg, dvo_val);
+}
+
+/**
+ * Detect the output connection on our DVO device.
+ *
+ * Unimplemented.
+ */
+static enum drm_connector_status
+intel_dvo_detect(struct drm_connector *connector, bool force)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
+}
+
+static int intel_dvo_get_modes(struct drm_connector *connector)
+{
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+	/* We should probably have an i2c driver get_modes function for those
+	 * devices which will have a fixed set of modes determined by the chip
+	 * (TV-out, for example), but for now with just TMDS and LVDS,
+	 * that's not the case.
+	 */
+	intel_ddc_get_modes(connector,
+			    intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
+	if (!list_empty(&connector->probed_modes))
+		return 1;
+
+	if (intel_dvo->panel_fixed_mode != NULL) {
+		struct drm_display_mode *mode;
+		mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
+		if (mode) {
+			drm_mode_probed_add(connector, mode);
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static void intel_dvo_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
+	.mode_fixup = intel_dvo_mode_fixup,
+	.mode_set = intel_dvo_mode_set,
+};
+
+static const struct drm_connector_funcs intel_dvo_connector_funcs = {
+	.dpms = intel_dvo_dpms,
+	.detect = intel_dvo_detect,
+	.destroy = intel_dvo_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
+	.mode_valid = intel_dvo_mode_valid,
+	.get_modes = intel_dvo_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
+{
+	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+
+	if (intel_dvo->dev.dev_ops->destroy)
+		intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
+
+	kfree(intel_dvo->panel_fixed_mode);
+
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
+	.destroy = intel_dvo_enc_destroy,
+};
+
+/**
+ * Attempts to get a fixed panel timing for LVDS (currently only the i830).
+ *
+ * Other chips with DVO LVDS will need to extend this to deal with the LVDS
+ * chip being on DVOB/C and having multiple pipes.
+ */
+static struct drm_display_mode *
+intel_dvo_get_current_mode(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
+	struct drm_display_mode *mode = NULL;
+
+	/* If the DVO port is active, that'll be the LVDS, so we can pull out
+	 * its timings to get how the BIOS set up the panel.
+	 */
+	if (dvo_val & DVO_ENABLE) {
+		struct drm_crtc *crtc;
+		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
+
+		crtc = intel_get_crtc_for_pipe(dev, pipe);
+		if (crtc) {
+			mode = intel_crtc_mode_get(dev, crtc);
+			if (mode) {
+				mode->type |= DRM_MODE_TYPE_PREFERRED;
+				if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
+					mode->flags |= DRM_MODE_FLAG_PHSYNC;
+				if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
+					mode->flags |= DRM_MODE_FLAG_PVSYNC;
+			}
+		}
+	}
+
+	return mode;
+}
+
+void intel_dvo_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	struct intel_dvo *intel_dvo;
+	struct intel_connector *intel_connector;
+	int i;
+	int encoder_type = DRM_MODE_ENCODER_NONE;
+
+	intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+	if (!intel_dvo)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dvo);
+		return;
+	}
+
+	intel_encoder = &intel_dvo->base;
+	drm_encoder_init(dev, &intel_encoder->base,
+			 &intel_dvo_enc_funcs, encoder_type);
+
+	intel_encoder->disable = intel_disable_dvo;
+	intel_encoder->enable = intel_enable_dvo;
+	intel_encoder->get_hw_state = intel_dvo_get_hw_state;
+	intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+
+	/* Now, try to find a controller */
+	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+		struct drm_connector *connector = &intel_connector->base;
+		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+		struct i2c_adapter *i2c;
+		int gpio;
+		bool dvoinit;
+
+		/* Allow the I2C driver info to specify the GPIO to be used in
+		 * special cases, but otherwise default to what's defined
+		 * in the spec.
+		 */
+		if (intel_gmbus_is_port_valid(dvo->gpio))
+			gpio = dvo->gpio;
+		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+			gpio = GMBUS_PORT_SSC;
+		else
+			gpio = GMBUS_PORT_DPB;
+
+		/* Set up the I2C bus necessary for the chip we're probing.
+		 * It appears that everything is on GPIOE except for panels
+		 * on i830 laptops, which are on GPIOB (DVOA).
+		 */
+		i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+		intel_dvo->dev = *dvo;
+
+		/* GMBUS NAK handling seems to be unstable, hence let the
+		 * transmitter detection run in bit banging mode for now.
+		 */
+		intel_gmbus_force_bit(i2c, true);
+
+		dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+		intel_gmbus_force_bit(i2c, false);
+
+		if (!dvoinit)
+			continue;
+
+		intel_encoder->type = INTEL_OUTPUT_DVO;
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+		switch (dvo->type) {
+		case INTEL_DVO_CHIP_TMDS:
+			intel_encoder->cloneable = true;
+			drm_connector_init(dev, connector,
+					   &intel_dvo_connector_funcs,
+					   DRM_MODE_CONNECTOR_DVII);
+			encoder_type = DRM_MODE_ENCODER_TMDS;
+			break;
+		case INTEL_DVO_CHIP_LVDS:
+			intel_encoder->cloneable = false;
+			drm_connector_init(dev, connector,
+					   &intel_dvo_connector_funcs,
+					   DRM_MODE_CONNECTOR_LVDS);
+			encoder_type = DRM_MODE_ENCODER_LVDS;
+			break;
+		}
+
+		drm_connector_helper_add(connector,
+					 &intel_dvo_connector_helper_funcs);
+		connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+
+		drm_encoder_helper_add(&intel_encoder->base,
+				       &intel_dvo_helper_funcs);
+
+		intel_connector_attach_encoder(intel_connector, intel_encoder);
+		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
+			/* For our LVDS chipsets, we should hopefully be able
+			 * to dig the fixed panel mode out of the BIOS data.
+			 * However, it's in a different format from the BIOS
+			 * data on chipsets with integrated LVDS (stored in AIM
+			 * headers, likely), so for now, just get the current
+			 * mode being output through DVO.
+			 */
+			intel_dvo->panel_fixed_mode =
+				intel_dvo_get_current_mode(connector);
+			intel_dvo->panel_wants_dither = true;
+		}
+
+		drm_sysfs_connector_add(connector);
+		return;
+	}
+
+	drm_encoder_cleanup(&intel_encoder->base);
+	kfree(intel_dvo);
+	kfree(intel_connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_fb.c b/linux-imx/drivers/gpu/drm/i915/intel_fb.c
new file mode 100644
index 0000000..6b7c3ca
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_fb.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static struct fb_ops intelfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int intelfb_create(struct drm_fb_helper *helper,
+			  struct drm_fb_helper_surface_size *sizes)
+{
+	struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
+	struct drm_device *dev = ifbdev->helper.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd = {};
+	struct drm_i915_gem_object *obj;
+	struct device *device = &dev->pdev->dev;
+	int size, ret;
+
+	/* we don't do packed 24bpp */
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+						      8), 64);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	size = ALIGN(size, PAGE_SIZE);
+	obj = i915_gem_object_create_stolen(dev, size);
+	if (obj == NULL)
+		obj = i915_gem_alloc_object(dev, size);
+	if (!obj) {
+		DRM_ERROR("failed to allocate framebuffer\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Flush everything out, we'll be doing GTT only from now on */
+	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+	if (ret) {
+		DRM_ERROR("failed to pin fb: %d\n", ret);
+		goto out_unref;
+	}
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	info->par = ifbdev;
+
+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
+	if (ret)
+		goto out_unpin;
+
+	fb = &ifbdev->ifb.base;
+
+	ifbdev->helper.fb = fb;
+	ifbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "inteldrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &intelfb_ops;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+	info->apertures->ranges[0].base = dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+
+	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+	info->fix.smem_len = size;
+
+	info->screen_base =
+		ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+			   size);
+	if (!info->screen_base) {
+		ret = -ENOSPC;
+		goto out_unpin;
+	}
+	info->screen_size = size;
+
+	/* This driver doesn't need a VT switch to restore the mode on resume */
+	info->skip_vt_switch = true;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	/* If the object is shmemfs backed, it will have given us zeroed pages.
+	 * If the object is stolen however, it will be full of whatever
+	 * garbage was left in there.
+	 */
+	if (ifbdev->ifb.obj->stolen)
+		memset_io(info->screen_base, 0, info->screen_size);
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+		      fb->width, fb->height,
+		      obj->gtt_offset, obj);
+
+
+	mutex_unlock(&dev->struct_mutex);
+	vga_switcheroo_client_fb_set(dev->pdev, info);
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(obj);
+out_unref:
+	drm_gem_object_unreference(&obj->base);
+	mutex_unlock(&dev->struct_mutex);
+out:
+	return ret;
+}
+
+static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+	.gamma_set = intel_crtc_fb_gamma_set,
+	.gamma_get = intel_crtc_fb_gamma_get,
+	.fb_probe = intelfb_create,
+};
+
+static void intel_fbdev_destroy(struct drm_device *dev,
+				struct intel_fbdev *ifbdev)
+{
+	struct fb_info *info;
+	struct intel_framebuffer *ifb = &ifbdev->ifb;
+
+	if (ifbdev->helper.fbdev) {
+		info = ifbdev->helper.fbdev;
+		unregister_framebuffer(info);
+		iounmap(info->screen_base);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	drm_fb_helper_fini(&ifbdev->helper);
+
+	drm_framebuffer_unregister_private(&ifb->base);
+	drm_framebuffer_cleanup(&ifb->base);
+	if (ifb->obj) {
+		drm_gem_object_unreference_unlocked(&ifb->obj->base);
+		ifb->obj = NULL;
+	}
+}
+
+int intel_fbdev_init(struct drm_device *dev)
+{
+	struct intel_fbdev *ifbdev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+	if (!ifbdev)
+		return -ENOMEM;
+
+	dev_priv->fbdev = ifbdev;
+	ifbdev->helper.funcs = &intel_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, &ifbdev->helper,
+				 INTEL_INFO(dev)->num_pipes,
+				 INTELFB_CONN_LIMIT);
+	if (ret) {
+		kfree(ifbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
+
+	return 0;
+}
+
+void intel_fbdev_initial_config(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	/* Due to peculiar init order wrt to hpd handling this is separate. */
+	drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
+}
+
+void intel_fbdev_fini(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	if (!dev_priv->fbdev)
+		return;
+
+	intel_fbdev_destroy(dev, dev_priv->fbdev);
+	kfree(dev_priv->fbdev);
+	dev_priv->fbdev = NULL;
+}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev = dev_priv->fbdev;
+	struct fb_info *info;
+
+	if (!ifbdev)
+		return;
+
+	info = ifbdev->helper.fbdev;
+
+	/* On resume from hibernation: If the object is shmemfs backed, it has
+	 * been restored from swap. If the object is stolen however, it will be
+	 * full of whatever garbage was left in there.
+	 */
+	if (!state && ifbdev->ifb.obj->stolen)
+		memset_io(info->screen_base, 0, info->screen_size);
+
+	fb_set_suspend(info, state);
+}
+
+MODULE_LICENSE("GPL and additional rights");
+
+void intel_fb_output_poll_changed(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+}
+
+void intel_fb_restore_mode(struct drm_device *dev)
+{
+	int ret;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_mode_config *config = &dev->mode_config;
+	struct drm_plane *plane;
+
+	if (INTEL_INFO(dev)->num_pipes == 0)
+		return;
+
+	drm_modeset_lock_all(dev);
+
+	ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+	if (ret)
+		DRM_DEBUG("failed to restore crtc mode\n");
+
+	/* Be sure to shut off any planes that may be active */
+	list_for_each_entry(plane, &config->plane_list, head)
+		if (plane->enabled)
+			plane->funcs->disable_plane(plane);
+
+	drm_modeset_unlock_all(dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_hdmi.c b/linux-imx/drivers/gpu/drm/i915/intel_hdmi.c
new file mode 100644
index 0000000..a905793
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_hdmi.c
@@ -0,0 +1,1107 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+	return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+	struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t enabled_bits;
+
+	enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+	WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
+	     "HDMI port enabled, expecting disabled\n");
+}
+
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
+{
+	struct intel_digital_port *intel_dig_port =
+		container_of(encoder, struct intel_digital_port, base.base);
+	return &intel_dig_port->hdmi;
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+	return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+{
+	uint8_t *data = (uint8_t *)frame;
+	uint8_t sum = 0;
+	unsigned i;
+
+	frame->checksum = 0;
+	frame->ecc = 0;
+
+	for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
+		sum += data[i];
+
+	frame->checksum = 0x100 - sum;
+}
+
+static u32 g4x_infoframe_index(struct dip_infoframe *frame)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_SELECT_AVI;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_SELECT_SPD;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
+
+static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
+
+static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return VIDEO_DIP_ENABLE_AVI_HSW;
+	case DIP_TYPE_SPD:
+		return VIDEO_DIP_ENABLE_SPD_HSW;
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
+
+static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+				  enum transcoder cpu_transcoder)
+{
+	switch (frame->type) {
+	case DIP_TYPE_AVI:
+		return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
+	case DIP_TYPE_SPD:
+		return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
+	default:
+		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+		return 0;
+	}
+}
+
+static void g4x_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val = I915_READ(VIDEO_DIP_CTL);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+
+	I915_WRITE(VIDEO_DIP_CTL, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(VIDEO_DIP_DATA, *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VIDEO_DIP_DATA, 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(VIDEO_DIP_CTL, val);
+	POSTING_READ(VIDEO_DIP_CTL);
+}
+
+static void ibx_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void cpt_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	/* The DIP control register spec says that we need to update the AVI
+	 * infoframe without clearing its enable bit */
+	if (frame->type != DIP_TYPE_AVI)
+		val &= ~g4x_infoframe_enable(frame);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void vlv_write_infoframe(struct drm_encoder *encoder,
+				     struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	unsigned i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(reg);
+
+	WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+	val |= g4x_infoframe_index(frame);
+
+	val &= ~g4x_infoframe_enable(frame);
+
+	I915_WRITE(reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+	mmiowb();
+
+	val |= g4x_infoframe_enable(frame);
+	val &= ~VIDEO_DIP_FREQ_MASK;
+	val |= VIDEO_DIP_FREQ_VSYNC;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+}
+
+static void hsw_write_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	uint32_t *data = (uint32_t *)frame;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+	u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
+	unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+	u32 val = I915_READ(ctl_reg);
+
+	if (data_reg == 0)
+		return;
+
+	val &= ~hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
+
+	mmiowb();
+	for (i = 0; i < len; i += 4) {
+		I915_WRITE(data_reg + i, *data);
+		data++;
+	}
+	/* Write every possible data byte to force correct ECC calculation. */
+	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+		I915_WRITE(data_reg + i, 0);
+	mmiowb();
+
+	val |= hsw_infoframe_enable(frame);
+	I915_WRITE(ctl_reg, val);
+	POSTING_READ(ctl_reg);
+}
+
+static void intel_set_infoframe(struct drm_encoder *encoder,
+				struct dip_infoframe *frame)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+	intel_dip_infoframe_csum(frame);
+	intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct dip_infoframe avi_if = {
+		.type = DIP_TYPE_AVI,
+		.ver = DIP_VERSION_AVI,
+		.len = DIP_LEN_AVI,
+	};
+
+	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+		avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+
+	if (intel_hdmi->rgb_quant_range_selectable) {
+		if (intel_crtc->config.limited_color_range)
+			avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+		else
+			avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+	}
+
+	avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
+	intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+	struct dip_infoframe spd_if;
+
+	memset(&spd_if, 0, sizeof(spd_if));
+	spd_if.type = DIP_TYPE_SPD;
+	spd_if.ver = DIP_VERSION_SPD;
+	spd_if.len = DIP_LEN_SPD;
+	strcpy(spd_if.body.spd.vn, "Intel");
+	strcpy(spd_if.body.spd.pd, "Integrated gfx");
+	spd_if.body.spd.sdi = DIP_SPD_PC;
+
+	intel_set_infoframe(encoder, &spd_if);
+}
+
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	u32 reg = VIDEO_DIP_CTL;
+	u32 val = I915_READ(reg);
+	u32 port;
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* If the registers were not initialized yet, they might be zeroes,
+	 * which means we're selecting the AVI DIP and we're setting its
+	 * frequency to once. This seems to really confuse the HW and make
+	 * things stop working (the register spec says the AVI always needs to
+	 * be sent every VSync). So here we avoid writing to the register more
+	 * than we need and also explicitly select the AVI DIP and explicitly
+	 * set its frequency to every VSync. Avoiding to write it twice seems to
+	 * be enough to solve the problem, but being defensive shouldn't hurt us
+	 * either. */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!intel_hdmi->has_hdmi_sink) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~VIDEO_DIP_ENABLE;
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	switch (intel_dig_port->port) {
+	case PORT_B:
+		port = VIDEO_DIP_PORT_B;
+		break;
+	case PORT_C:
+		port = VIDEO_DIP_PORT_C;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	if (port != (val & VIDEO_DIP_PORT_MASK)) {
+		if (val & VIDEO_DIP_ENABLE) {
+			val &= ~VIDEO_DIP_ENABLE;
+			I915_WRITE(reg, val);
+			POSTING_READ(reg);
+		}
+		val &= ~VIDEO_DIP_PORT_MASK;
+		val |= port;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+	u32 port;
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!intel_hdmi->has_hdmi_sink) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~VIDEO_DIP_ENABLE;
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	switch (intel_dig_port->port) {
+	case PORT_B:
+		port = VIDEO_DIP_PORT_B;
+		break;
+	case PORT_C:
+		port = VIDEO_DIP_PORT_C;
+		break;
+	case PORT_D:
+		port = VIDEO_DIP_PORT_D;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	if (port != (val & VIDEO_DIP_PORT_MASK)) {
+		if (val & VIDEO_DIP_ENABLE) {
+			val &= ~VIDEO_DIP_ENABLE;
+			I915_WRITE(reg, val);
+			POSTING_READ(reg);
+		}
+		val &= ~VIDEO_DIP_PORT_MASK;
+		val |= port;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_GCP);
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!intel_hdmi->has_hdmi_sink) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	/* Set both together, unset both together: see the spec. */
+	val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_GCP);
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+	u32 val = I915_READ(reg);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	/* See the big comment in g4x_set_infoframes() */
+	val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+	if (!intel_hdmi->has_hdmi_sink) {
+		if (!(val & VIDEO_DIP_ENABLE))
+			return;
+		val &= ~VIDEO_DIP_ENABLE;
+		I915_WRITE(reg, val);
+		POSTING_READ(reg);
+		return;
+	}
+
+	val |= VIDEO_DIP_ENABLE;
+	val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+		 VIDEO_DIP_ENABLE_GCP);
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
+	u32 val = I915_READ(reg);
+
+	assert_hdmi_port_disabled(intel_hdmi);
+
+	if (!intel_hdmi->has_hdmi_sink) {
+		I915_WRITE(reg, 0);
+		POSTING_READ(reg);
+		return;
+	}
+
+	val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+		 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+	I915_WRITE(reg, val);
+	POSTING_READ(reg);
+
+	intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+	intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void intel_hdmi_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 hdmi_val;
+
+	hdmi_val = SDVO_ENCODING_HDMI;
+	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
+		hdmi_val |= intel_hdmi->color_range;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
+
+	if (intel_crtc->config.pipe_bpp > 24)
+		hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
+	else
+		hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
+
+	/* Required on CPT */
+	if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+		hdmi_val |= HDMI_MODE_SELECT_HDMI;
+
+	if (intel_hdmi->has_audio) {
+		DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+				 pipe_name(intel_crtc->pipe));
+		hdmi_val |= SDVO_AUDIO_ENABLE;
+		hdmi_val |= HDMI_MODE_SELECT_HDMI;
+		intel_write_eld(encoder, adjusted_mode);
+	}
+
+	if (HAS_PCH_CPT(dev))
+		hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+	else
+		hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+	I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	intel_hdmi->set_infoframes(encoder, adjusted_mode);
+}
+
+static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(intel_hdmi->hdmi_reg);
+
+	if (!(tmp & SDVO_ENABLE))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_enable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+	u32 enable_bits = SDVO_ENABLE;
+
+	if (intel_hdmi->has_audio)
+		enable_bits |= SDVO_AUDIO_ENABLE;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	/* HW workaround for IBX, we need to move the port to transcoder A
+	 * before disabling it, so restore the transcoder select bit here. */
+	if (HAS_PCH_IBX(dev))
+		enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
+	 * we do this anyway which shows more stable in testing.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+
+	temp |= enable_bits;
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/* HW workaround, need to write this twice for issue that may result
+	 * in first write getting masked.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+}
+
+static void intel_disable_hdmi(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	u32 temp;
+	u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
+
+	temp = I915_READ(intel_hdmi->hdmi_reg);
+
+	/* HW workaround for IBX, we need to move the port to transcoder A
+	 * before disabling it. */
+	if (HAS_PCH_IBX(dev)) {
+		struct drm_crtc *crtc = encoder->base.crtc;
+		int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+		if (temp & SDVO_PIPE_B_SELECT) {
+			temp &= ~SDVO_PIPE_B_SELECT;
+			I915_WRITE(intel_hdmi->hdmi_reg, temp);
+			POSTING_READ(intel_hdmi->hdmi_reg);
+
+			/* Again we need to write this twice. */
+			I915_WRITE(intel_hdmi->hdmi_reg, temp);
+			POSTING_READ(intel_hdmi->hdmi_reg);
+
+			/* Transcoder selection bits only update
+			 * effectively on vblank. */
+			if (crtc)
+				intel_wait_for_vblank(dev, pipe);
+			else
+				msleep(50);
+		}
+	}
+
+	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
+	 * we do this anyway which shows more stable in testing.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+
+	temp &= ~enable_bits;
+
+	I915_WRITE(intel_hdmi->hdmi_reg, temp);
+	POSTING_READ(intel_hdmi->hdmi_reg);
+
+	/* HW workaround, need to write this twice for issue that may result
+	 * in first write getting masked.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(intel_hdmi->hdmi_reg, temp);
+		POSTING_READ(intel_hdmi->hdmi_reg);
+	}
+}
+
+static int intel_hdmi_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	if (mode->clock > 165000)
+		return MODE_CLOCK_HIGH;
+	if (mode->clock < 20000)
+		return MODE_CLOCK_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	return MODE_OK;
+}
+
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+			       struct intel_crtc_config *pipe_config)
+{
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+
+	if (intel_hdmi->color_range_auto) {
+		/* See CEA-861-E - 5.1 Default Encoding Parameters */
+		if (intel_hdmi->has_hdmi_sink &&
+		    drm_match_cea_mode(adjusted_mode) > 1)
+			intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+		else
+			intel_hdmi->color_range = 0;
+	}
+
+	if (intel_hdmi->color_range)
+		pipe_config->limited_color_range = true;
+
+	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
+		pipe_config->has_pch_encoder = true;
+
+	/*
+	 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+	 * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
+	 * outputs.
+	 */
+	if (pipe_config->pipe_bpp > 8*3 && HAS_PCH_SPLIT(dev)) {
+		DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
+		pipe_config->pipe_bpp = 12*3;
+	} else {
+		DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
+		pipe_config->pipe_bpp = 8*3;
+	}
+
+	return true;
+}
+
+static enum drm_connector_status
+intel_hdmi_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_digital_port *intel_dig_port =
+		hdmi_to_dig_port(intel_hdmi);
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct edid *edid;
+	enum drm_connector_status status = connector_status_disconnected;
+
+	intel_hdmi->has_hdmi_sink = false;
+	intel_hdmi->has_audio = false;
+	intel_hdmi->rgb_quant_range_selectable = false;
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+						    intel_hdmi->ddc_bus));
+
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+			status = connector_status_connected;
+			if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+				intel_hdmi->has_hdmi_sink =
+						drm_detect_hdmi_monitor(edid);
+			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+			intel_hdmi->rgb_quant_range_selectable =
+				drm_rgb_quant_range_selectable(edid);
+		}
+		kfree(edid);
+	}
+
+	if (status == connector_status_connected) {
+		if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+			intel_hdmi->has_audio =
+				(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+		intel_encoder->type = INTEL_OUTPUT_HDMI;
+	}
+
+	return status;
+}
+
+static int intel_hdmi_get_modes(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+	/* We should parse the EDID data and find out if it's an HDMI sink so
+	 * we can send audio to it.
+	 */
+
+	return intel_ddc_get_modes(connector,
+				   intel_gmbus_get_adapter(dev_priv,
+							   intel_hdmi->ddc_bus));
+}
+
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+						    intel_hdmi->ddc_bus));
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL)
+			has_audio = drm_detect_monitor_audio(edid);
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+			struct drm_property *property,
+			uint64_t val)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct intel_digital_port *intel_dig_port =
+		hdmi_to_dig_port(intel_hdmi);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		enum hdmi_force_audio i = val;
+		bool has_audio;
+
+		if (i == intel_hdmi->force_audio)
+			return 0;
+
+		intel_hdmi->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_hdmi_detect_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (i == HDMI_AUDIO_OFF_DVI)
+			intel_hdmi->has_hdmi_sink = 0;
+
+		intel_hdmi->has_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_hdmi->color_range_auto;
+		uint32_t old_range = intel_hdmi->color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_hdmi->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_hdmi->color_range_auto = false;
+			intel_hdmi->color_range = 0;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_hdmi->color_range_auto = false;
+			intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_hdmi->color_range_auto &&
+		    old_range == intel_hdmi->color_range)
+			return 0;
+
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (intel_dig_port->base.base.crtc)
+		intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
+
+	return 0;
+}
+
+static void intel_hdmi_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
+	.mode_set = intel_hdmi_mode_set,
+};
+
+static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_hdmi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_hdmi_set_property,
+	.destroy = intel_hdmi_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
+	.get_modes = intel_hdmi_get_modes,
+	.mode_valid = intel_hdmi_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+	intel_attach_force_audio_property(connector);
+	intel_attach_broadcast_rgb_property(connector);
+	intel_hdmi->color_range_auto = true;
+}
+
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+			       struct intel_connector *intel_connector)
+{
+	struct drm_connector *connector = &intel_connector->base;
+	struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+	struct intel_encoder *intel_encoder = &intel_dig_port->base;
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	enum port port = intel_dig_port->port;
+
+	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
+			   DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
+
+	connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+
+	switch (port) {
+	case PORT_B:
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+		intel_encoder->hpd_pin = HPD_PORT_B;
+		break;
+	case PORT_C:
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+		intel_encoder->hpd_pin = HPD_PORT_C;
+		break;
+	case PORT_D:
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+		intel_encoder->hpd_pin = HPD_PORT_D;
+		break;
+	case PORT_A:
+		intel_encoder->hpd_pin = HPD_PORT_A;
+		/* Internal port only for eDP. */
+	default:
+		BUG();
+	}
+
+	if (IS_VALLEYVIEW(dev)) {
+		intel_hdmi->write_infoframe = vlv_write_infoframe;
+		intel_hdmi->set_infoframes = vlv_set_infoframes;
+	} else if (!HAS_PCH_SPLIT(dev)) {
+		intel_hdmi->write_infoframe = g4x_write_infoframe;
+		intel_hdmi->set_infoframes = g4x_set_infoframes;
+	} else if (HAS_DDI(dev)) {
+		intel_hdmi->write_infoframe = hsw_write_infoframe;
+		intel_hdmi->set_infoframes = hsw_set_infoframes;
+	} else if (HAS_PCH_IBX(dev)) {
+		intel_hdmi->write_infoframe = ibx_write_infoframe;
+		intel_hdmi->set_infoframes = ibx_set_infoframes;
+	} else {
+		intel_hdmi->write_infoframe = cpt_write_infoframe;
+		intel_hdmi->set_infoframes = cpt_set_infoframes;
+	}
+
+	if (HAS_DDI(dev))
+		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+	else
+		intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	intel_hdmi_add_properties(intel_hdmi, connector);
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	drm_sysfs_connector_add(connector);
+
+	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+	 * 0xd.  Failure to do so will result in spurious interrupts being
+	 * generated on the port when a cable is not attached.
+	 */
+	if (IS_G4X(dev) && !IS_GM45(dev)) {
+		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+	}
+}
+
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
+{
+	struct intel_digital_port *intel_dig_port;
+	struct intel_encoder *intel_encoder;
+	struct drm_encoder *encoder;
+	struct intel_connector *intel_connector;
+
+	intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+	if (!intel_dig_port)
+		return;
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_dig_port);
+		return;
+	}
+
+	intel_encoder = &intel_dig_port->base;
+	encoder = &intel_encoder->base;
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+	intel_encoder->compute_config = intel_hdmi_compute_config;
+	intel_encoder->enable = intel_enable_hdmi;
+	intel_encoder->disable = intel_disable_hdmi;
+	intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+	intel_encoder->type = INTEL_OUTPUT_HDMI;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	intel_encoder->cloneable = false;
+
+	intel_dig_port->port = port;
+	intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
+	intel_dig_port->dp.output_reg = 0;
+
+	intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_i2c.c b/linux-imx/drivers/gpu/drm/i915/intel_i2c.c
new file mode 100644
index 0000000..639fe19
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_i2c.c
@@ -0,0 +1,615 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *	Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+struct gmbus_port {
+	const char *name;
+	int reg;
+};
+
+static const struct gmbus_port gmbus_ports[] = {
+	{ "ssc", GPIOB },
+	{ "vga", GPIOA },
+	{ "panel", GPIOC },
+	{ "dpc", GPIOD },
+	{ "dpb", GPIOE },
+	{ "dpd", GPIOF },
+};
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 10
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+	return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+void
+intel_i2c_reset(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
+	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+	u32 val;
+
+	/* When using bit bashing for I2C, this bit needs to be set to 1 */
+	if (!IS_PINEVIEW(dev_priv->dev))
+		return;
+
+	val = I915_READ(DSPCLK_GATE_D);
+	if (enable)
+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
+	else
+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, val);
+}
+
+static u32 get_reserved(struct intel_gmbus *bus)
+{
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = 0;
+
+	/* On most chips, these bits must be preserved in software. */
+	if (!IS_I830(dev) && !IS_845G(dev))
+		reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+					     (GPIO_DATA_PULLUP_DISABLE |
+					      GPIO_CLOCK_PULLUP_DISABLE);
+
+	return reserved;
+}
+
+static int get_clock(void *data)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	u32 clock_bits;
+
+	if (state_high)
+		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+	else
+		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+			GPIO_CLOCK_VAL_MASK;
+
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+	POSTING_READ(bus->gpio_reg);
+}
+
+static void set_data(void *data, int state_high)
+{
+	struct intel_gmbus *bus = data;
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	u32 reserved = get_reserved(bus);
+	u32 data_bits;
+
+	if (state_high)
+		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+	else
+		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+			GPIO_DATA_VAL_MASK;
+
+	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+	POSTING_READ(bus->gpio_reg);
+}
+
+static int
+intel_gpio_pre_xfer(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	intel_i2c_reset(dev_priv->dev);
+	intel_i2c_quirk_set(dev_priv, true);
+	set_data(bus, 1);
+	set_clock(bus, 1);
+	udelay(I2C_RISEFALL_TIME);
+	return 0;
+}
+
+static void
+intel_gpio_post_xfer(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+
+	set_data(bus, 1);
+	set_clock(bus, 1);
+	intel_i2c_quirk_set(dev_priv, false);
+}
+
+static void
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+{
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	struct i2c_algo_bit_data *algo;
+
+	algo = &bus->bit_algo;
+
+	/* -1 to map pin pair to gmbus index */
+	bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+
+	bus->adapter.algo_data = algo;
+	algo->setsda = set_data;
+	algo->setscl = set_clock;
+	algo->getsda = get_data;
+	algo->getscl = get_clock;
+	algo->pre_xfer = intel_gpio_pre_xfer;
+	algo->post_xfer = intel_gpio_post_xfer;
+	algo->udelay = I2C_RISEFALL_TIME;
+	algo->timeout = usecs_to_jiffies(2200);
+	algo->data = bus;
+}
+
+/*
+ * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
+ * mode. This results in spurious interrupt warnings if the legacy irq no. is
+ * shared with another device. The kernel then disables that interrupt source
+ * and so prevents the other device from working properly.
+ */
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+static int
+gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
+		     u32 gmbus2_status,
+		     u32 gmbus4_irq_en)
+{
+	int i;
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u32 gmbus2 = 0;
+	DEFINE_WAIT(wait);
+
+	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+		gmbus4_irq_en = 0;
+
+	/* Important: The hw handles only the first bit, so set only one! Since
+	 * we also need to check for NAKs besides the hw ready/idle signal, we
+	 * need to wake up periodically and check that ourselves. */
+	I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
+
+	for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
+		prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+
+		gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
+		if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
+			break;
+
+		schedule_timeout(1);
+	}
+	finish_wait(&dev_priv->gmbus_wait_queue, &wait);
+
+	I915_WRITE(GMBUS4 + reg_offset, 0);
+
+	if (gmbus2 & GMBUS_SATOER)
+		return -ENXIO;
+	if (gmbus2 & gmbus2_status)
+		return 0;
+	return -ETIMEDOUT;
+}
+
+static int
+gmbus_wait_idle(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	int reg_offset = dev_priv->gpio_mmio_base;
+
+#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
+
+	if (!HAS_GMBUS_IRQ(dev_priv->dev))
+		return wait_for(C, 10);
+
+	/* Important: The hw handles only the first bit, so set only one! */
+	I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
+
+	ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+				 msecs_to_jiffies_timeout(10));
+
+	I915_WRITE(GMBUS4 + reg_offset, 0);
+
+	if (ret)
+		return 0;
+	else
+		return -ETIMEDOUT;
+#undef C
+}
+
+static int
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+		u32 gmbus1_index)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u16 len = msg->len;
+	u8 *buf = msg->buf;
+
+	I915_WRITE(GMBUS1 + reg_offset,
+		   gmbus1_index |
+		   GMBUS_CYCLE_WAIT |
+		   (len << GMBUS_BYTE_COUNT_SHIFT) |
+		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+		u32 val, loop = 0;
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+					   GMBUS_HW_RDY_EN);
+		if (ret)
+			return ret;
+
+		val = I915_READ(GMBUS3 + reg_offset);
+		do {
+			*buf++ = val & 0xff;
+			val >>= 8;
+		} while (--len && ++loop < 4);
+	}
+
+	return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u16 len = msg->len;
+	u8 *buf = msg->buf;
+	u32 val, loop;
+
+	val = loop = 0;
+	while (len && loop < 4) {
+		val |= *buf++ << (8 * loop++);
+		len -= 1;
+	}
+
+	I915_WRITE(GMBUS3 + reg_offset, val);
+	I915_WRITE(GMBUS1 + reg_offset,
+		   GMBUS_CYCLE_WAIT |
+		   (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
+		   (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+		   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+	while (len) {
+		int ret;
+
+		val = loop = 0;
+		do {
+			val |= *buf++ << (8 * loop);
+		} while (--len && ++loop < 4);
+
+		I915_WRITE(GMBUS3 + reg_offset, val);
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
+					   GMBUS_HW_RDY_EN);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * The gmbus controller can combine a 1 or 2 byte write with a read that
+ * immediately follows it by using an "INDEX" cycle.
+ */
+static bool
+gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+{
+	return (i + 1 < num &&
+		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+		(msgs[i + 1].flags & I2C_M_RD));
+}
+
+static int
+gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+{
+	int reg_offset = dev_priv->gpio_mmio_base;
+	u32 gmbus1_index = 0;
+	u32 gmbus5 = 0;
+	int ret;
+
+	if (msgs[0].len == 2)
+		gmbus5 = GMBUS_2BYTE_INDEX_EN |
+			 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
+	if (msgs[0].len == 1)
+		gmbus1_index = GMBUS_CYCLE_INDEX |
+			       (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
+
+	/* GMBUS5 holds 16-bit index */
+	if (gmbus5)
+		I915_WRITE(GMBUS5 + reg_offset, gmbus5);
+
+	ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+
+	/* Clear GMBUS5 after each index transfer */
+	if (gmbus5)
+		I915_WRITE(GMBUS5 + reg_offset, 0);
+
+	return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+	   struct i2c_msg *msgs,
+	   int num)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = bus->dev_priv;
+	int i, reg_offset;
+	int ret = 0;
+
+	mutex_lock(&dev_priv->gmbus_mutex);
+
+	if (bus->force_bit) {
+		ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+		goto out;
+	}
+
+	reg_offset = dev_priv->gpio_mmio_base;
+
+	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+	for (i = 0; i < num; i++) {
+		if (gmbus_is_index_read(msgs, i, num)) {
+			ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
+			i += 1;  /* set i to the index of the read xfer */
+		} else if (msgs[i].flags & I2C_M_RD) {
+			ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+		} else {
+			ret = gmbus_xfer_write(dev_priv, &msgs[i]);
+		}
+
+		if (ret == -ETIMEDOUT)
+			goto timeout;
+		if (ret == -ENXIO)
+			goto clear_err;
+
+		ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
+					   GMBUS_HW_WAIT_EN);
+		if (ret == -ENXIO)
+			goto clear_err;
+		if (ret)
+			goto timeout;
+	}
+
+	/* Generate a STOP condition on the bus. Note that gmbus can't generata
+	 * a STOP on the very first cycle. To simplify the code we
+	 * unconditionally generate the STOP condition with an additional gmbus
+	 * cycle. */
+	I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+
+	/* Mark the GMBUS interface as disabled after waiting for idle.
+	 * We will re-enable it at the start of the next xfer,
+	 * till then let it sleep.
+	 */
+	if (gmbus_wait_idle(dev_priv)) {
+		DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
+			 adapter->name);
+		ret = -ETIMEDOUT;
+	}
+	I915_WRITE(GMBUS0 + reg_offset, 0);
+	ret = ret ?: i;
+	goto out;
+
+clear_err:
+	/*
+	 * Wait for bus to IDLE before clearing NAK.
+	 * If we clear the NAK while bus is still active, then it will stay
+	 * active and the next transaction may fail.
+	 *
+	 * If no ACK is received during the address phase of a transaction, the
+	 * adapter must report -ENXIO. It is not clear what to return if no ACK
+	 * is received at other times. But we have to be careful to not return
+	 * spurious -ENXIO because that will prevent i2c and drm edid functions
+	 * from retrying. So return -ENXIO only when gmbus properly quiescents -
+	 * timing out seems to happen when there _is_ a ddc chip present, but
+	 * it's slow responding and only answers on the 2nd retry.
+	 */
+	ret = -ENXIO;
+	if (gmbus_wait_idle(dev_priv)) {
+		DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
+			      adapter->name);
+		ret = -ETIMEDOUT;
+	}
+
+	/* Toggle the Software Clear Interrupt bit. This has the effect
+	 * of resetting the GMBUS controller and so clearing the
+	 * BUS_ERROR raised by the slave's NAK.
+	 */
+	I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+	I915_WRITE(GMBUS1 + reg_offset, 0);
+	I915_WRITE(GMBUS0 + reg_offset, 0);
+
+	DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+			 adapter->name, msgs[i].addr,
+			 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+
+	goto out;
+
+timeout:
+	DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+		 bus->adapter.name, bus->reg0 & 0xff);
+	I915_WRITE(GMBUS0 + reg_offset, 0);
+
+	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+	bus->force_bit = 1;
+	ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
+
+out:
+	mutex_unlock(&dev_priv->gmbus_mutex);
+	return ret;
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+	return i2c_bit_algo.functionality(adapter) &
+		(I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+		/* I2C_FUNC_10BIT_ADDR | */
+		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+	.master_xfer	= gmbus_xfer,
+	.functionality	= gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int intel_setup_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	if (HAS_PCH_NOP(dev))
+		return 0;
+	else if (HAS_PCH_SPLIT(dev))
+		dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
+	else if (IS_VALLEYVIEW(dev))
+		dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
+	else
+		dev_priv->gpio_mmio_base = 0;
+
+	mutex_init(&dev_priv->gmbus_mutex);
+	init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		u32 port = i + 1; /* +1 to map gmbus index to pin pair */
+
+		bus->adapter.owner = THIS_MODULE;
+		bus->adapter.class = I2C_CLASS_DDC;
+		snprintf(bus->adapter.name,
+			 sizeof(bus->adapter.name),
+			 "i915 gmbus %s",
+			 gmbus_ports[i].name);
+
+		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->dev_priv = dev_priv;
+
+		bus->adapter.algo = &gmbus_algorithm;
+
+		/* By default use a conservative clock rate */
+		bus->reg0 = port | GMBUS_RATE_100KHZ;
+
+		/* gmbus seems to be broken on i830 */
+		if (IS_I830(dev))
+			bus->force_bit = 1;
+
+		intel_gpio_setup(bus, port);
+
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
+	}
+
+	intel_i2c_reset(dev_priv->dev);
+
+	return 0;
+
+err:
+	while (--i) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		i2c_del_adapter(&bus->adapter);
+	}
+	return ret;
+}
+
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+					    unsigned port)
+{
+	WARN_ON(!intel_gmbus_is_port_valid(port));
+	/* -1 to map pin pair to gmbus index */
+	return (intel_gmbus_is_port_valid(port)) ?
+		&dev_priv->gmbus[port - 1].adapter : NULL;
+}
+
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	bus->force_bit += force_bit ? 1 : -1;
+	DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+		      force_bit ? "en" : "dis", adapter->name,
+		      bus->force_bit);
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		i2c_del_adapter(&bus->adapter);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_lvds.c b/linux-imx/drivers/gpu/drm/i915/intel_lvds.c
new file mode 100644
index 0000000..f77d42f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_lvds.c
@@ -0,0 +1,1315 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <acpi/button.h>
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include <linux/acpi.h>
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_connector {
+	struct intel_connector base;
+
+	struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+	struct intel_encoder base;
+
+	u32 pfit_control;
+	u32 pfit_pgm_ratios;
+	bool is_dual_link;
+	u32 reg;
+
+	struct intel_lvds_connector *attached_connector;
+};
+
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_lvds_encoder, base.base);
+}
+
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
+{
+	return container_of(connector, struct intel_lvds_connector, base.base);
+}
+
+static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	u32 tmp;
+
+	tmp = I915_READ(lvds_encoder->reg);
+
+	if (!(tmp & LVDS_PORT_EN))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+{
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_display_mode *fixed_mode =
+		lvds_encoder->attached_connector->base.panel.fixed_mode;
+	int pipe = intel_crtc->pipe;
+	u32 temp;
+
+	temp = I915_READ(lvds_encoder->reg);
+	temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~PORT_TRANS_SEL_MASK;
+		temp |= PORT_TRANS_SEL_CPT(pipe);
+	} else {
+		if (pipe == 1) {
+			temp |= LVDS_PIPEB_SELECT;
+		} else {
+			temp &= ~LVDS_PIPEB_SELECT;
+		}
+	}
+
+	/* set the corresponsding LVDS_BORDER bit */
+	temp |= dev_priv->lvds_border_bits;
+	/* Set the B0-B3 data pairs corresponding to whether we're going to
+	 * set the DPLLs for dual-channel mode or not.
+	 */
+	if (lvds_encoder->is_dual_link)
+		temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+	else
+		temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+	/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+	 * appropriately here, but we need to look more thoroughly into how
+	 * panels behave in the two modes.
+	 */
+
+	/* Set the dithering flag on LVDS as needed, note that there is no
+	 * special lvds dither control bit on pch-split platforms, dithering is
+	 * only controlled through the PIPECONF reg. */
+	if (INTEL_INFO(dev)->gen == 4) {
+		if (dev_priv->lvds_dither)
+			temp |= LVDS_ENABLE_DITHER;
+		else
+			temp &= ~LVDS_ENABLE_DITHER;
+	}
+	temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+	if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
+		temp |= LVDS_HSYNC_POLARITY;
+	if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
+		temp |= LVDS_VSYNC_POLARITY;
+
+	I915_WRITE(lvds_encoder->reg, temp);
+}
+
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
+		return;
+
+	/*
+	 * Enable automatic panel scaling so that non-native modes
+	 * fill the screen.  The panel fitter should only be
+	 * adjusted whilst the pipe is disabled, according to
+	 * register description and PRM.
+	 */
+	DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+		      enc->pfit_control,
+		      enc->pfit_pgm_ratios);
+
+	I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
+	I915_WRITE(PFIT_CONTROL, enc->pfit_control);
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void intel_enable_lvds(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 ctl_reg, stat_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		ctl_reg = PCH_PP_CONTROL;
+		stat_reg = PCH_PP_STATUS;
+	} else {
+		ctl_reg = PP_CONTROL;
+		stat_reg = PP_STATUS;
+	}
+
+	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+
+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+	POSTING_READ(lvds_encoder->reg);
+	if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+		DRM_ERROR("timed out waiting for panel to power on\n");
+
+	intel_panel_enable_backlight(dev, intel_crtc->pipe);
+}
+
+static void intel_disable_lvds(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 ctl_reg, stat_reg;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		ctl_reg = PCH_PP_CONTROL;
+		stat_reg = PCH_PP_STATUS;
+	} else {
+		ctl_reg = PP_CONTROL;
+		stat_reg = PP_STATUS;
+	}
+
+	intel_panel_disable_backlight(dev);
+
+	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+	if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+		DRM_ERROR("timed out waiting for panel to power off\n");
+
+	I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
+	POSTING_READ(lvds_encoder->reg);
+}
+
+static int intel_lvds_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+
+	if (mode->hdisplay > fixed_mode->hdisplay)
+		return MODE_PANEL;
+	if (mode->vdisplay > fixed_mode->vdisplay)
+		return MODE_PANEL;
+
+	return MODE_OK;
+}
+
+static void
+centre_horizontally(struct drm_display_mode *mode,
+		    int width)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the hsync and hblank widths constant */
+	sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+	blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (mode->hdisplay - width + 1) / 2;
+	border += border & 1; /* make the border even */
+
+	mode->crtc_hdisplay = width;
+	mode->crtc_hblank_start = width + border;
+	mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+		  int height)
+{
+	u32 border, sync_pos, blank_width, sync_width;
+
+	/* keep the vsync and vblank widths constant */
+	sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+	sync_pos = (blank_width - sync_width + 1) / 2;
+
+	border = (mode->vdisplay - height + 1) / 2;
+
+	mode->crtc_vdisplay = height;
+	mode->crtc_vblank_start = height + border;
+	mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+	/*
+	 * Floating point operation is not supported. So the FACTOR
+	 * is defined, which can avoid the floating point computation
+	 * when calculating the panel ratio.
+	 */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+	u32 ratio = source * FACTOR / target;
+	return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
+static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+				      struct intel_crtc_config *pipe_config)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder =
+		to_lvds_encoder(&intel_encoder->base);
+	struct intel_connector *intel_connector =
+		&lvds_encoder->attached_connector->base;
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	struct drm_display_mode *mode = &pipe_config->requested_mode;
+	struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
+	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+	unsigned int lvds_bpp;
+	int pipe;
+
+	/* Should never happen!! */
+	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
+		DRM_ERROR("Can't support LVDS on pipe A\n");
+		return false;
+	}
+
+	if (intel_encoder_check_is_cloned(&lvds_encoder->base))
+		return false;
+
+	if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
+	    LVDS_A3_POWER_UP)
+		lvds_bpp = 8*3;
+	else
+		lvds_bpp = 6*3;
+
+	if (lvds_bpp != pipe_config->pipe_bpp) {
+		DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
+			      pipe_config->pipe_bpp, lvds_bpp);
+		pipe_config->pipe_bpp = lvds_bpp;
+	}
+	/*
+	 * We have timings from the BIOS for the panel, put them in
+	 * to the adjusted mode.  The CRTC will be set up for this mode,
+	 * with the panel scaling set up to source from the H/VDisplay
+	 * of the original mode.
+	 */
+	intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+			       adjusted_mode);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		pipe_config->has_pch_encoder = true;
+
+		intel_pch_panel_fitting(dev,
+					intel_connector->panel.fitting_mode,
+					mode, adjusted_mode);
+		return true;
+	}
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->hdisplay == mode->hdisplay &&
+	    adjusted_mode->vdisplay == mode->vdisplay)
+		goto out;
+
+	/* 965+ wants fuzzy fitting */
+	if (INTEL_INFO(dev)->gen >= 4)
+		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+				 PFIT_FILTER_FUZZY);
+
+	/*
+	 * Enable automatic panel scaling for non-native modes so that they fill
+	 * the screen.  Should be enabled before the pipe is enabled, according
+	 * to register description and PRM.
+	 * Change the value here to see the borders for debugging
+	 */
+	for_each_pipe(pipe)
+		I915_WRITE(BCLRPAT(pipe), 0);
+
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+	pipe_config->timings_set = true;
+
+	switch (intel_connector->panel.fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		/*
+		 * For centered modes, we have to calculate border widths &
+		 * heights and modify the values programmed into the CRTC.
+		 */
+		centre_horizontally(adjusted_mode, mode->hdisplay);
+		centre_vertically(adjusted_mode, mode->vdisplay);
+		border = LVDS_BORDER_ENABLE;
+		break;
+
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		if (INTEL_INFO(dev)->gen >= 4) {
+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+			/* 965+ is easy, it does everything in hw */
+			if (scaled_width > scaled_height)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
+			else if (scaled_width < scaled_height)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+			else if (adjusted_mode->hdisplay != mode->hdisplay)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+		} else {
+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+			/*
+			 * For earlier chips we have to calculate the scaling
+			 * ratio by hand and program it into the
+			 * PFIT_PGM_RATIO register
+			 */
+			if (scaled_width > scaled_height) { /* pillar */
+				centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
+
+				border = LVDS_BORDER_ENABLE;
+				if (mode->vdisplay != adjusted_mode->vdisplay) {
+					u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+							    bits << PFIT_VERT_SCALE_SHIFT);
+					pfit_control |= (PFIT_ENABLE |
+							 VERT_INTERP_BILINEAR |
+							 HORIZ_INTERP_BILINEAR);
+				}
+			} else if (scaled_width < scaled_height) { /* letter */
+				centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
+
+				border = LVDS_BORDER_ENABLE;
+				if (mode->hdisplay != adjusted_mode->hdisplay) {
+					u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+					pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+							    bits << PFIT_VERT_SCALE_SHIFT);
+					pfit_control |= (PFIT_ENABLE |
+							 VERT_INTERP_BILINEAR |
+							 HORIZ_INTERP_BILINEAR);
+				}
+			} else
+				/* Aspects match, Let hw scale both directions */
+				pfit_control |= (PFIT_ENABLE |
+						 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_INTERP_BILINEAR);
+		}
+		break;
+
+	case DRM_MODE_SCALE_FULLSCREEN:
+		/*
+		 * Full scaling, even if it changes the aspect ratio.
+		 * Fortunately this is all done for us in hw.
+		 */
+		if (mode->vdisplay != adjusted_mode->vdisplay ||
+		    mode->hdisplay != adjusted_mode->hdisplay) {
+			pfit_control |= PFIT_ENABLE;
+			if (INTEL_INFO(dev)->gen >= 4)
+				pfit_control |= PFIT_SCALING_AUTO;
+			else
+				pfit_control |= (VERT_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_AUTO_SCALE |
+						 HORIZ_INTERP_BILINEAR);
+		}
+		break;
+
+	default:
+		break;
+	}
+
+out:
+	/* If not enabling scaling, be consistent and always use 0. */
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
+
+	/* Make sure pre-965 set dither correctly */
+	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+	if (pfit_control != lvds_encoder->pfit_control ||
+	    pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+		lvds_encoder->pfit_control = pfit_control;
+		lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+	}
+	dev_priv->lvds_border_bits = border;
+
+	/*
+	 * XXX: It would be nice to support lower refresh rates on the
+	 * panels to reduce power consumption, and perhaps match the
+	 * user's requested refresh rate.
+	 */
+
+	return true;
+}
+
+static void intel_lvds_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	/*
+	 * The LVDS pin pair will already have been turned on in the
+	 * intel_crtc_mode_set since it has a large impact on the DPLL
+	 * settings.
+	 */
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
+ * connected and closed means disconnected.  We also send hotplug events as
+ * needed, using lid status notification from the input layer.
+ */
+static enum drm_connector_status
+intel_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	enum drm_connector_status status;
+
+	status = intel_panel_detect(dev);
+	if (status != connector_status_unknown)
+		return status;
+
+	return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int intel_lvds_get_modes(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode;
+
+	/* use cached edid if we have one */
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		return drm_add_edid_modes(connector, lvds_connector->base.edid);
+
+	mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+	if (mode == NULL)
+		return 0;
+
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping forced modeset for %s\n", id->ident);
+	return 1;
+}
+
+/* The GPU hangs up on these systems if modeset is performed on LID open */
+static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+	{
+		.callback = intel_no_modeset_on_lid_dmi_callback,
+		.ident = "Toshiba Tecra A11",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+		},
+	},
+
+	{ }	/* terminating entry */
+};
+
+/*
+ * Lid events. Note the use of 'modeset':
+ *  - we set it to MODESET_ON_LID_OPEN on lid close,
+ *    and set it to MODESET_DONE on open
+ *  - we use it as a "only once" bit (ie we ignore
+ *    duplicate events where it was already properly set)
+ *  - the suspend/resume paths will set it to
+ *    MODESET_SUSPENDED and ignore the lid open event,
+ *    because they restore the mode ("lid open").
+ */
+static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+			    void *unused)
+{
+	struct intel_lvds_connector *lvds_connector =
+		container_of(nb, struct intel_lvds_connector, lid_notifier);
+	struct drm_connector *connector = &lvds_connector->base.base;
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return NOTIFY_OK;
+
+	mutex_lock(&dev_priv->modeset_restore_lock);
+	if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+		goto exit;
+	/*
+	 * check and update the status of LVDS connector after receiving
+	 * the LID nofication event.
+	 */
+	connector->status = connector->funcs->detect(connector, false);
+
+	/* Don't force modeset on machines where it causes a GPU lockup */
+	if (dmi_check_system(intel_no_modeset_on_lid))
+		goto exit;
+	if (!acpi_lid_open()) {
+		/* do modeset on next lid open event */
+		dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+		goto exit;
+	}
+
+	if (dev_priv->modeset_restore == MODESET_DONE)
+		goto exit;
+
+	drm_modeset_lock_all(dev);
+	intel_modeset_setup_hw_state(dev, true);
+	drm_modeset_unlock_all(dev);
+
+	dev_priv->modeset_restore = MODESET_DONE;
+
+exit:
+	mutex_unlock(&dev_priv->modeset_restore_lock);
+	return NOTIFY_OK;
+}
+
+/**
+ * intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+static void intel_lvds_destroy(struct drm_connector *connector)
+{
+	struct intel_lvds_connector *lvds_connector =
+		to_lvds_connector(connector);
+
+	if (lvds_connector->lid_notifier.notifier_call)
+		acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+		kfree(lvds_connector->base.edid);
+
+	intel_panel_fini(&lvds_connector->base.panel);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int intel_lvds_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
+
+	if (property == dev->mode_config.scaling_mode_property) {
+		struct drm_crtc *crtc;
+
+		if (value == DRM_MODE_SCALE_NONE) {
+			DRM_DEBUG_KMS("no scaling not supported\n");
+			return -EINVAL;
+		}
+
+		if (intel_connector->panel.fitting_mode == value) {
+			/* the LVDS scaling property is not changed */
+			return 0;
+		}
+		intel_connector->panel.fitting_mode = value;
+
+		crtc = intel_attached_encoder(connector)->base.crtc;
+		if (crtc && crtc->enabled) {
+			/*
+			 * If the CRTC is enabled, the display will be changed
+			 * according to the new panel fitting mode.
+			 */
+			intel_crtc_restore_mode(crtc);
+		}
+	}
+
+	return 0;
+}
+
+static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
+	.mode_set = intel_lvds_mode_set,
+};
+
+static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
+	.get_modes = intel_lvds_get_modes,
+	.mode_valid = intel_lvds_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_lvds_set_property,
+	.destroy = intel_lvds_destroy,
+};
+
+static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
+	return 1;
+}
+
+/* These systems claim to have LVDS, but really don't */
+static const struct dmi_system_id intel_no_lvds[] = {
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Apple Mac Mini (Core series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Apple Mac Mini (Core 2 series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "MSI IM-945GSE-A",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "A9830IMS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Dell Studio Hybrid",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Dell OptiPlex FX170",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen Mini PC",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen Mini PC MP915",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMx-F"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen i915GMm-HFS",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+                .ident = "AOpen i45GMx-I",
+                .matches = {
+                        DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+                        DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+                },
+        },
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Aopen i945GTt-VFA",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Clientron U800",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+		},
+	},
+	{
+                .callback = intel_no_lvds_dmi_callback,
+                .ident = "Clientron E830",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+                },
+        },
+        {
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus EeeBox PC EB1007",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Asus AT5NM10T-I",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+			DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard HP t5740",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard t5745",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Hewlett-Packard st5747",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "MSI Wind Box DC500",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+			DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Gigabyte GA-D525TUD",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+			DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Supermicro X7SPA-H",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Fujitsu Esprimo Q900",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D410PT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D425KT",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
+		.ident = "Intel D510MO",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
+			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
+		},
+	},
+
+	{ }	/* terminating entry */
+};
+
+/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+				      struct drm_display_mode *fixed_mode,
+				      struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *scan;
+	int temp_downclock;
+
+	temp_downclock = fixed_mode->clock;
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		/*
+		 * If one mode has the same resolution with the fixed_panel
+		 * mode while they have the different refresh rate, it means
+		 * that the reduced downclock is found for the LVDS. In such
+		 * case we can set the different FPx0/1 to dynamically select
+		 * between low and high frequency.
+		 */
+		if (scan->hdisplay == fixed_mode->hdisplay &&
+		    scan->hsync_start == fixed_mode->hsync_start &&
+		    scan->hsync_end == fixed_mode->hsync_end &&
+		    scan->htotal == fixed_mode->htotal &&
+		    scan->vdisplay == fixed_mode->vdisplay &&
+		    scan->vsync_start == fixed_mode->vsync_start &&
+		    scan->vsync_end == fixed_mode->vsync_end &&
+		    scan->vtotal == fixed_mode->vtotal) {
+			if (scan->clock < temp_downclock) {
+				/*
+				 * The downclock is already found. But we
+				 * expect to find the lower downclock.
+				 */
+				temp_downclock = scan->clock;
+			}
+		}
+	}
+	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
+		/* We found the downclock for LVDS. */
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = temp_downclock;
+		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+			      "Normal clock %dKhz, downclock %dKhz\n",
+			      fixed_mode->clock, temp_downclock);
+	}
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ */
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+				   u8 *i2c_pin)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dev_priv->child_dev_num)
+		return true;
+
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		struct child_device_config *child = dev_priv->child_dev + i;
+
+		/* If the device type is not LFP, continue.
+		 * We have to check both the new identifiers as well as the
+		 * old for compatibility with some BIOSes.
+		 */
+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
+		    child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		if (intel_gmbus_is_port_valid(child->i2c_pin))
+			*i2c_pin = child->i2c_pin;
+
+		/* However, we cannot trust the BIOS writers to populate
+		 * the VBT correctly.  Since LVDS requires additional
+		 * information from AIM blocks, a non-zero addin offset is
+		 * a good indicator that the LVDS is actually present.
+		 */
+		if (child->addin_offset)
+			return true;
+
+		/* But even then some BIOS writers perform some black magic
+		 * and instantiate the device without reference to any
+		 * additional data.  Trust that if the VBT was written into
+		 * the OpRegion then they have validated the LVDS's existence.
+		 */
+		if (dev_priv->opregion.vbt)
+			return true;
+	}
+
+	return false;
+}
+
+static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
+{
+	DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
+	return 1;
+}
+
+static const struct dmi_system_id intel_dual_link_lvds[] = {
+	{
+		.callback = intel_dual_link_lvds_callback,
+		.ident = "Apple MacBook Pro (Core i5/i7 Series)",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
+		},
+	},
+	{ }	/* terminating entry */
+};
+
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+	struct intel_encoder *encoder;
+	struct intel_lvds_encoder *lvds_encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
+			    base.head) {
+		if (encoder->type == INTEL_OUTPUT_LVDS) {
+			lvds_encoder = to_lvds_encoder(&encoder->base);
+
+			return lvds_encoder->is_dual_link;
+		}
+	}
+
+	return false;
+}
+
+static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
+{
+	struct drm_device *dev = lvds_encoder->base.base.dev;
+	unsigned int val;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* use the module option value if specified */
+	if (i915_lvds_channel_mode > 0)
+		return i915_lvds_channel_mode == 2;
+
+	if (dmi_check_system(intel_dual_link_lvds))
+		return true;
+
+	/* BIOS should set the proper LVDS register value at boot, but
+	 * in reality, it doesn't set the value when the lid is closed;
+	 * we need to check "the value to be set" in VBT when LVDS
+	 * register is uninitialized.
+	 */
+	val = I915_READ(lvds_encoder->reg);
+	if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+		val = dev_priv->bios_lvds_val;
+
+	return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
+}
+
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+	/* With the introduction of the PCH we gained a dedicated
+	 * LVDS presence pin, use it. */
+	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+		return true;
+
+	/* Otherwise LVDS was only attached to mobile products,
+	 * except for the inglorious 830gm */
+	if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
+		return true;
+
+	return false;
+}
+
+/**
+ * intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+bool intel_lvds_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds_encoder *lvds_encoder;
+	struct intel_encoder *intel_encoder;
+	struct intel_lvds_connector *lvds_connector;
+	struct intel_connector *intel_connector;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_display_mode *scan; /* *modes, *bios_mode; */
+	struct drm_display_mode *fixed_mode = NULL;
+	struct edid *edid;
+	struct drm_crtc *crtc;
+	u32 lvds;
+	int pipe;
+	u8 pin;
+
+	if (!intel_lvds_supported(dev))
+		return false;
+
+	/* Skip init on machines we know falsely report LVDS */
+	if (dmi_check_system(intel_no_lvds))
+		return false;
+
+	pin = GMBUS_PORT_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
+		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+		return false;
+	}
+
+	if (HAS_PCH_SPLIT(dev)) {
+		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+			return false;
+		if (dev_priv->edp.support) {
+			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+			return false;
+		}
+	}
+
+	lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+	if (!lvds_encoder)
+		return false;
+
+	lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+	if (!lvds_connector) {
+		kfree(lvds_encoder);
+		return false;
+	}
+
+	lvds_encoder->attached_connector = lvds_connector;
+
+	if (!HAS_PCH_SPLIT(dev)) {
+		lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
+	}
+
+	intel_encoder = &lvds_encoder->base;
+	encoder = &intel_encoder->base;
+	intel_connector = &lvds_connector->base;
+	connector = &intel_connector->base;
+	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+
+	intel_encoder->enable = intel_enable_lvds;
+	intel_encoder->pre_enable = intel_pre_enable_lvds;
+	intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+	intel_encoder->compute_config = intel_lvds_compute_config;
+	intel_encoder->disable = intel_disable_lvds;
+	intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	intel_encoder->type = INTEL_OUTPUT_LVDS;
+
+	intel_encoder->cloneable = false;
+	if (HAS_PCH_SPLIT(dev))
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	else if (IS_GEN4(dev))
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	else
+		intel_encoder->crtc_mask = (1 << 1);
+
+	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
+	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
+	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		lvds_encoder->reg = PCH_LVDS;
+	} else {
+		lvds_encoder->reg = LVDS;
+	}
+
+	/* create the scaling mode property */
+	drm_mode_create_scaling_mode_property(dev);
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.scaling_mode_property,
+				      DRM_MODE_SCALE_ASPECT);
+	intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+	/*
+	 * LVDS discovery:
+	 * 1) check for EDID on DDC
+	 * 2) check for VBT data
+	 * 3) check to see if LVDS is already on
+	 *    if none of the above, no panel
+	 * 4) make sure lid is open
+	 *    if closed, act like it's not there for now
+	 */
+
+	/*
+	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+	 * preferred mode is the right one.
+	 */
+	edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+	if (edid) {
+		if (drm_add_edid_modes(connector, edid)) {
+			drm_mode_connector_update_edid_property(connector,
+								edid);
+		} else {
+			kfree(edid);
+			edid = ERR_PTR(-EINVAL);
+		}
+	} else {
+		edid = ERR_PTR(-ENOENT);
+	}
+	lvds_connector->base.edid = edid;
+
+	if (IS_ERR_OR_NULL(edid)) {
+		/* Didn't get an EDID, so
+		 * Set wide sync ranges so we get all modes
+		 * handed to valid_mode for checking
+		 */
+		connector->display_info.min_vfreq = 0;
+		connector->display_info.max_vfreq = 200;
+		connector->display_info.min_hfreq = 0;
+		connector->display_info.max_hfreq = 200;
+	}
+
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+			DRM_DEBUG_KMS("using preferred mode from EDID: ");
+			drm_mode_debug_printmodeline(scan);
+
+			fixed_mode = drm_mode_duplicate(dev, scan);
+			if (fixed_mode) {
+				intel_find_lvds_downclock(dev, fixed_mode,
+							  connector);
+				goto out;
+			}
+		}
+	}
+
+	/* Failed to get EDID, what about VBT? */
+	if (dev_priv->lfp_lvds_vbt_mode) {
+		DRM_DEBUG_KMS("using mode from VBT: ");
+		drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+		fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+		if (fixed_mode) {
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+			goto out;
+		}
+	}
+
+	/*
+	 * If we didn't get EDID, try checking if the panel is already turned
+	 * on.  If so, assume that whatever is currently programmed is the
+	 * correct mode.
+	 */
+
+	/* Ironlake: FIXME if still fail, not try pipe mode now */
+	if (HAS_PCH_SPLIT(dev))
+		goto failed;
+
+	lvds = I915_READ(LVDS);
+	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+
+	if (crtc && (lvds & LVDS_PORT_EN)) {
+		fixed_mode = intel_crtc_mode_get(dev, crtc);
+		if (fixed_mode) {
+			DRM_DEBUG_KMS("using current (BIOS) mode: ");
+			drm_mode_debug_printmodeline(fixed_mode);
+			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+			goto out;
+		}
+	}
+
+	/* If we still don't have a mode after all that, give up. */
+	if (!fixed_mode)
+		goto failed;
+
+out:
+	lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
+	DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
+		      lvds_encoder->is_dual_link ? "dual" : "single");
+
+	/*
+	 * Unlock registers and just
+	 * leave them unlocked
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(PCH_PP_CONTROL,
+			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+	} else {
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+	}
+	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+		DRM_DEBUG_KMS("lid notifier registration failed\n");
+		lvds_connector->lid_notifier.notifier_call = NULL;
+	}
+	drm_sysfs_connector_add(connector);
+
+	intel_panel_init(&intel_connector->panel, fixed_mode);
+	intel_panel_setup_backlight(connector);
+
+	return true;
+
+failed:
+	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+	drm_connector_cleanup(connector);
+	drm_encoder_cleanup(encoder);
+	if (fixed_mode)
+		drm_mode_destroy(dev, fixed_mode);
+	kfree(lvds_encoder);
+	kfree(lvds_connector);
+	return false;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_modes.c b/linux-imx/drivers/gpu/drm/i915/intel_modes.c
new file mode 100644
index 0000000..0e860f3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_modes.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (c) 2007, 2010 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <drm/drm_edid.h>
+#include <drm/drmP.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+
+/**
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
+ */
+int intel_connector_update_modes(struct drm_connector *connector,
+				struct edid *edid)
+{
+	int ret;
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	drm_edid_to_eld(connector, edid);
+
+	return ret;
+}
+
+/**
+ * intel_ddc_get_modes - get modelist from monitor
+ * @connector: DRM connector device to use
+ * @adapter: i2c adapter
+ *
+ * Fetch the EDID information from @connector using the DDC bus.
+ */
+int intel_ddc_get_modes(struct drm_connector *connector,
+			struct i2c_adapter *adapter)
+{
+	struct edid *edid;
+	int ret;
+
+	edid = drm_get_edid(connector, adapter);
+	if (!edid)
+		return 0;
+
+	ret = intel_connector_update_modes(connector, edid);
+	kfree(edid);
+
+	return ret;
+}
+
+static const struct drm_prop_enum_list force_audio_names[] = {
+	{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
+	{ HDMI_AUDIO_OFF, "off" },
+	{ HDMI_AUDIO_AUTO, "auto" },
+	{ HDMI_AUDIO_ON, "on" },
+};
+
+void
+intel_attach_force_audio_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	prop = dev_priv->force_audio_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, 0,
+					   "audio",
+					   force_audio_names,
+					   ARRAY_SIZE(force_audio_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->force_audio_property = prop;
+	}
+	drm_object_attach_property(&connector->base, prop, 0);
+}
+
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+	{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
+	{ INTEL_BROADCAST_RGB_FULL, "Full" },
+	{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_property *prop;
+
+	prop = dev_priv->broadcast_rgb_property;
+	if (prop == NULL) {
+		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+					   "Broadcast RGB",
+					   broadcast_rgb_names,
+					   ARRAY_SIZE(broadcast_rgb_names));
+		if (prop == NULL)
+			return;
+
+		dev_priv->broadcast_rgb_property = prop;
+	}
+
+	drm_object_attach_property(&connector->base, prop, 0);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_opregion.c b/linux-imx/drivers/gpu/drm/i915/intel_opregion.c
new file mode 100644
index 0000000..a8117e6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_opregion.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define   ACPI_CLID 0x01ac /* current lid state indicator */
+#define   ACPI_CDCK 0x01b0 /* current docking state indicator */
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x400
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+
+struct opregion_header {
+	u8 signature[16];
+	u32 size;
+	u32 opregion_ver;
+	u8 bios_ver[32];
+	u8 vbios_ver[16];
+	u8 driver_ver[16];
+	u32 mboxes;
+	u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+	u32 drdy;       /* driver readiness */
+	u32 csts;       /* notification status */
+	u32 cevt;       /* current event */
+	u8 rsvd1[20];
+	u32 didl[8];    /* supported display devices ID list */
+	u32 cpdl[8];    /* currently presented display list */
+	u32 cadl[8];    /* currently active display list */
+	u32 nadl[8];    /* next active devices list */
+	u32 aslp;       /* ASL sleep time-out */
+	u32 tidx;       /* toggle table index */
+	u32 chpd;       /* current hotplug enable indicator */
+	u32 clid;       /* current lid state*/
+	u32 cdck;       /* current docking state */
+	u32 sxsw;       /* Sx state resume */
+	u32 evts;       /* ASL supported events */
+	u32 cnot;       /* current OS notification */
+	u32 nrdy;       /* driver status */
+	u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+	u32 scic;       /* SWSCI command|status|data */
+	u32 parm;       /* command parameters */
+	u32 dslp;       /* driver sleep time-out */
+	u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+	u32 ardy;       /* driver readiness */
+	u32 aslc;       /* ASLE interrupt command */
+	u32 tche;       /* technology enabled indicator */
+	u32 alsi;       /* current ALS illuminance reading */
+	u32 bclp;       /* backlight brightness to set */
+	u32 pfit;       /* panel fitting state */
+	u32 cblv;       /* current brightness level */
+	u16 bclm[20];   /* backlight level duty cycle mapping table */
+	u32 cpfm;       /* current panel fitting mode */
+	u32 epfm;       /* enabled panel fitting modes */
+	u8 plut[74];    /* panel LUT and identifier */
+	u32 pfmb;       /* PWM freq and min brightness */
+	u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM     (1 << 0)
+#define ASLE_SET_BACKLIGHT     (1 << 1)
+#define ASLE_SET_PFIT          (1 << 2)
+#define ASLE_SET_PWM_FREQ      (1 << 3)
+#define ASLE_REQ_MSK           0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAILED	(1<<10)
+#define ASLE_BACKLIGHT_FAILED	(1<<12)
+#define ASLE_PFIT_FAILED	(1<<14)
+#define ASLE_PWM_FREQ_FAILED	(1<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+#define ACPI_OTHER_OUTPUT (0<<8)
+#define ACPI_VGA_OUTPUT (1<<8)
+#define ACPI_TV_OUTPUT (2<<8)
+#define ACPI_DIGITAL_OUTPUT (3<<8)
+#define ACPI_LVDS_OUTPUT (4<<8)
+
+#ifdef CONFIG_ACPI
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 max;
+
+	DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAILED;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp > 255)
+		return ASLE_BACKLIGHT_FAILED;
+
+	max = intel_panel_get_max_backlight(dev);
+	intel_panel_set_backlight(dev, bclp * max / 255);
+	iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
+
+	return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+	/* alsi is the current ALS reading in lux. 0 indicates below sensor
+	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+	return 0;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (pfmb & ASLE_PFMB_PWM_VALID) {
+		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+		pwm = pwm >> 9;
+		/* FIXME - what do we do with the PWM? */
+	}
+	return 0;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+	/* Panel fitting is currently controlled by the X code, so this is a
+	   noop until modesetting support works fully */
+	if (!(pfit & ASLE_PFIT_VALID))
+		return ASLE_PFIT_FAILED;
+	return 0;
+}
+
+void intel_opregion_asle_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG_DRIVER("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM)
+		asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+	if (asle_req & ASLE_SET_PFIT)
+		asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+	if (asle_req & ASLE_SET_PWM_FREQ)
+		asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+
+	iowrite32(asle_stat, &asle->aslc);
+}
+
+void intel_opregion_gse_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG_DRIVER("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM) {
+		DRM_DEBUG_DRIVER("Illum is not supported\n");
+		asle_stat |= ASLE_ALS_ILLUM_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+	if (asle_req & ASLE_SET_PFIT) {
+		DRM_DEBUG_DRIVER("Pfit is not supported\n");
+		asle_stat |= ASLE_PFIT_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_PWM_FREQ) {
+		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+		asle_stat |= ASLE_PWM_FREQ_FAILED;
+	}
+
+	iowrite32(asle_stat, &asle->aslc);
+}
+#define ASLE_ALS_EN    (1<<0)
+#define ASLE_BLC_EN    (1<<1)
+#define ASLE_PFIT_EN   (1<<2)
+#define ASLE_PFMB_EN   (1<<3)
+
+void intel_opregion_enable_asle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
+
+	if (asle) {
+		if (IS_MOBILE(dev))
+			intel_enable_asle(dev);
+
+		iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+			  ASLE_PFMB_EN,
+			  &asle->tche);
+		iowrite32(1, &asle->ardy);
+	}
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+static int intel_opregion_video_event(struct notifier_block *nb,
+				      unsigned long val, void *data)
+{
+	/* The only video events relevant to opregion are 0x80. These indicate
+	   either a docking event, lid switch or display switch request. In
+	   Linux, these are handled by the dock, button and video drivers.
+	*/
+
+	struct opregion_acpi __iomem *acpi;
+	struct acpi_bus_event *event = data;
+	int ret = NOTIFY_OK;
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!system_opregion)
+		return NOTIFY_DONE;
+
+	acpi = system_opregion->acpi;
+
+	if (event->type == 0x80 &&
+	    (ioread32(&acpi->cevt) & 1) == 0)
+		ret = NOTIFY_BAD;
+
+	iowrite32(0, &acpi->csts);
+
+	return ret;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+	.notifier_call = intel_opregion_video_event,
+};
+
+/*
+ * Initialise the DIDL field in opregion. This passes a list of devices to
+ * the firmware. Values are defined by section B.4.2 of the ACPI specification
+ * (version 3)
+ */
+
+static void intel_didl_outputs(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	struct drm_connector *connector;
+	acpi_handle handle;
+	struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+	unsigned long long device_id;
+	acpi_status status;
+	u32 temp;
+	int i = 0;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+	if (!handle || acpi_bus_get_device(handle, &acpi_dev))
+		return;
+
+	if (acpi_is_video_device(handle))
+		acpi_video_bus = acpi_dev;
+	else {
+		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+			if (acpi_is_video_device(acpi_cdev->handle)) {
+				acpi_video_bus = acpi_cdev;
+				break;
+			}
+		}
+	}
+
+	if (!acpi_video_bus) {
+		pr_warn("No ACPI video bus found\n");
+		return;
+	}
+
+	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+		if (i >= 8) {
+			dev_printk(KERN_ERR, &dev->pdev->dev,
+				    "More than 8 outputs detected\n");
+			return;
+		}
+		status =
+			acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+						NULL, &device_id);
+		if (ACPI_SUCCESS(status)) {
+			if (!device_id)
+				goto blind_set;
+			iowrite32((u32)(device_id & 0x0f0f),
+				  &opregion->acpi->didl[i]);
+			i++;
+		}
+	}
+
+end:
+	/* If fewer than 8 outputs, the list must be null terminated */
+	if (i < 8)
+		iowrite32(0, &opregion->acpi->didl[i]);
+	return;
+
+blind_set:
+	i = 0;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		int output_type = ACPI_OTHER_OUTPUT;
+		if (i >= 8) {
+			dev_printk(KERN_ERR, &dev->pdev->dev,
+				    "More than 8 outputs detected\n");
+			return;
+		}
+		switch (connector->connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+			output_type = ACPI_VGA_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Component:
+		case DRM_MODE_CONNECTOR_9PinDIN:
+			output_type = ACPI_TV_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			output_type = ACPI_DIGITAL_OUTPUT;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			output_type = ACPI_LVDS_OUTPUT;
+			break;
+		}
+		temp = ioread32(&opregion->acpi->didl[i]);
+		iowrite32(temp | (1<<31) | output_type | i,
+			  &opregion->acpi->didl[i]);
+		i++;
+	}
+	goto end;
+}
+
+static void intel_setup_cadls(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int i = 0;
+	u32 disp_id;
+
+	/* Initialize the CADL field by duplicating the DIDL values.
+	 * Technically, this is not always correct as display outputs may exist,
+	 * but not active. This initialization is necessary for some Clevo
+	 * laptops that check this field before processing the brightness and
+	 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
+	 * there are less than eight devices. */
+	do {
+		disp_id = ioread32(&opregion->acpi->didl[i]);
+		iowrite32(disp_id, &opregion->acpi->cadl[i]);
+	} while (++i < 8 && disp_id != 0);
+}
+
+void intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+			intel_didl_outputs(dev);
+			intel_setup_cadls(dev);
+		}
+
+		/* Notify BIOS we are ready to handle ACPI video ext notifs.
+		 * Right now, all the events are handled by the ACPI video module.
+		 * We don't actually need to do anything with them. */
+		iowrite32(0, &opregion->acpi->csts);
+		iowrite32(1, &opregion->acpi->drdy);
+
+		system_opregion = opregion;
+		register_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	if (opregion->asle)
+		intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		iowrite32(0, &opregion->acpi->drdy);
+
+		system_opregion = NULL;
+		unregister_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	/* just clear all opregion memory pointers now */
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+	opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	void __iomem *base;
+	u32 asls, mboxes;
+	char buf[sizeof(OPREGION_SIGNATURE)];
+	int err = 0;
+
+	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+	if (asls == 0) {
+		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+		return -ENOTSUPP;
+	}
+
+	base = acpi_os_ioremap(asls, OPREGION_SIZE);
+	if (!base)
+		return -ENOMEM;
+
+	memcpy_fromio(buf, base, sizeof(buf));
+
+	if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+	opregion->header = base;
+	opregion->vbt = base + OPREGION_VBT_OFFSET;
+
+	opregion->lid_state = base + ACPI_CLID;
+
+	mboxes = ioread32(&opregion->header->mboxes);
+	if (mboxes & MBOX_ACPI) {
+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+	}
+
+	if (mboxes & MBOX_SWSCI) {
+		DRM_DEBUG_DRIVER("SWSCI supported\n");
+		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+	}
+	if (mboxes & MBOX_ASLE) {
+		DRM_DEBUG_DRIVER("ASLE supported\n");
+		opregion->asle = base + OPREGION_ASLE_OFFSET;
+	}
+
+	return 0;
+
+err_out:
+	iounmap(base);
+	return err;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_overlay.c b/linux-imx/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 0000000..67a2501
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1539 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both.  */
+#define IMAGE_MAX_WIDTH		2048
+#define IMAGE_MAX_HEIGHT	2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY	1024
+#define IMAGE_MAX_HEIGHT_LEGACY	1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE	(0x1<<19)
+#define OCMD_MIRROR_MASK	(0x3<<17)
+#define OCMD_MIRROR_MODE	(0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL	(0x1<<17)
+#define OCMD_MIRROR_VERTICAL	(0x2<<17)
+#define OCMD_MIRROR_BOTH	(0x3<<17)
+#define OCMD_BYTEORDER_MASK	(0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP		(0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP		(0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP	(0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888		(0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555		(0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565		(0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED	(0x8<<10)
+#define OCMD_YUV_411_PACKED	(0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR	(0xc<<10)
+#define OCMD_YUV_422_PLANAR	(0xd<<10)
+#define OCMD_YUV_410_PLANAR	(0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY	(0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE	(0x1<<7)
+#define OCMD_BUF_TYPE_MASK	(0x1<<5)
+#define OCMD_BUF_TYPE_FRAME	(0x0<<5)
+#define OCMD_BUF_TYPE_FIELD	(0x1<<5)
+#define OCMD_TEST_MODE		(0x1<<4)
+#define OCMD_BUFFER_SELECT	(0x3<<2)
+#define OCMD_BUFFER0		(0x0<<2)
+#define OCMD_BUFFER1		(0x1<<2)
+#define OCMD_FIELD_SELECT	(0x1<<2)
+#define OCMD_FIELD0		(0x0<<1)
+#define OCMD_FIELD1		(0x1<<1)
+#define OCMD_ENABLE		(0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK		(0x1<<18)
+#define OCONF_PIPE_A		(0x0<<18)
+#define OCONF_PIPE_B		(0x1<<18)
+#define OCONF_GAMMA2_ENABLE	(0x1<<16)
+#define OCONF_CSC_MODE_BT601	(0x0<<5)
+#define OCONF_CSC_MODE_BT709	(0x1<<5)
+#define OCONF_CSC_BYPASS	(0x1<<4)
+#define OCONF_CC_OUT_8BIT	(0x1<<3)
+#define OCONF_TEST_MODE		(0x1<<2)
+#define OCONF_THREE_LINE_BUFFER	(0x1<<0)
+#define OCONF_TWO_LINE_BUFFER	(0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE		(0x1<<31)
+#define CLK_RGB24_MASK		0x0
+#define CLK_RGB16_MASK		0x070307
+#define CLK_RGB15_MASK		0x070707
+#define CLK_RGB8I_MASK		0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+	(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+	(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS          5
+#define N_VERT_Y_TAPS           3
+#define N_HORIZ_UV_TAPS         3
+#define N_VERT_UV_TAPS          3
+#define N_PHASES                17
+#define MAX_TAPS                5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+	u32 OBUF_0Y;
+	u32 OBUF_1Y;
+	u32 OBUF_0U;
+	u32 OBUF_0V;
+	u32 OBUF_1U;
+	u32 OBUF_1V;
+	u32 OSTRIDE;
+	u32 YRGB_VPH;
+	u32 UV_VPH;
+	u32 HORZ_PH;
+	u32 INIT_PHS;
+	u32 DWINPOS;
+	u32 DWINSZ;
+	u32 SWIDTH;
+	u32 SWIDTHSW;
+	u32 SHEIGHT;
+	u32 YRGBSCALE;
+	u32 UVSCALE;
+	u32 OCLRC0;
+	u32 OCLRC1;
+	u32 DCLRKV;
+	u32 DCLRKM;
+	u32 SCLRKVH;
+	u32 SCLRKVL;
+	u32 SCLRKEN;
+	u32 OCONFIG;
+	u32 OCMD;
+	u32 RESERVED1; /* 0x6C */
+	u32 OSTART_0Y;
+	u32 OSTART_1Y;
+	u32 OSTART_0U;
+	u32 OSTART_0V;
+	u32 OSTART_1U;
+	u32 OSTART_1V;
+	u32 OTILEOFF_0Y;
+	u32 OTILEOFF_1Y;
+	u32 OTILEOFF_0U;
+	u32 OTILEOFF_0V;
+	u32 OTILEOFF_1U;
+	u32 OTILEOFF_1V;
+	u32 FASTHSCALE; /* 0xA0 */
+	u32 UVSCALEV; /* 0xA4 */
+	u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+	u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+	u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+	u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+	u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+	u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+	u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+	u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+	u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+struct intel_overlay {
+	struct drm_device *dev;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *vid_bo;
+	struct drm_i915_gem_object *old_vid_bo;
+	int active;
+	int pfit_active;
+	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+	u32 color_key;
+	u32 brightness, contrast, saturation;
+	u32 old_xscale, old_yscale;
+	/* register access */
+	u32 flip_addr;
+	struct drm_i915_gem_object *reg_bo;
+	/* flip handling */
+	uint32_t last_flip_req;
+	void (*flip_tail)(struct intel_overlay *);
+};
+
+static struct overlay_registers __iomem *
+intel_overlay_map_regs(struct intel_overlay *overlay)
+{
+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers __iomem *regs;
+
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
+	else
+		regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+					 overlay->reg_bo->gtt_offset);
+
+	return regs;
+}
+
+static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
+				     struct overlay_registers __iomem *regs)
+{
+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		io_mapping_unmap(regs);
+}
+
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 void (*tail)(struct intel_overlay *))
+{
+	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	BUG_ON(overlay->last_flip_req);
+	ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+	if (ret)
+		return ret;
+
+	overlay->flip_tail = tail;
+	ret = i915_wait_seqno(ring, overlay->last_flip_req);
+	if (ret)
+		return ret;
+	i915_gem_retire_requests(dev);
+
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	BUG_ON(overlay->active);
+	overlay->active = 1;
+
+	WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return intel_overlay_do_wait_request(overlay, NULL);
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int intel_overlay_continue(struct intel_overlay *overlay,
+				  bool load_polyphase_filter)
+{
+	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	u32 flip_addr = overlay->flip_addr;
+	u32 tmp;
+	int ret;
+
+	BUG_ON(!overlay->active);
+
+	if (load_polyphase_filter)
+		flip_addr |= OFC_UPDATE;
+
+	/* check for underruns */
+	tmp = I915_READ(DOVSTA);
+	if (tmp & (1 << 17))
+		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
+
+	return i915_add_request(ring, NULL, &overlay->last_flip_req);
+}
+
+static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+{
+	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
+
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	overlay->old_vid_bo = NULL;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+	struct drm_i915_gem_object *obj = overlay->vid_bo;
+
+	/* never have the overlay hw on without showing a frame */
+	BUG_ON(!overlay->vid_bo);
+
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+	overlay->vid_bo = NULL;
+
+	overlay->crtc->overlay = NULL;
+	overlay->crtc = NULL;
+	overlay->active = 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	u32 flip_addr = overlay->flip_addr;
+	int ret;
+
+	BUG_ON(!overlay->active);
+
+	/* According to intel docs the overlay hw may hang (when switching
+	 * off) without loading the filter coeffs. It is however unclear whether
+	 * this applies to the disabling of the overlay or to the switching off
+	 * of the hw. Do it in both cases */
+	flip_addr |= OFC_UPDATE;
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	/* wait for overlay to go idle */
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	/* turn overlay off */
+	if (IS_I830(dev)) {
+		/* Workaround: Don't disable the overlay fully, since otherwise
+		 * it dies on the next OVERLAY_ON cmd. */
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+	} else {
+		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(ring, flip_addr);
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	}
+	intel_ring_advance(ring);
+
+	return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	if (overlay->last_flip_req == 0)
+		return 0;
+
+	ret = i915_wait_seqno(ring, overlay->last_flip_req);
+	if (ret)
+		return ret;
+	i915_gem_retire_requests(dev);
+
+	if (overlay->flip_tail)
+		overlay->flip_tail(overlay);
+
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs
+ */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	/* Only wait if there is actually an old frame to release to
+	 * guarantee forward progress.
+	 */
+	if (!overlay->old_vid_bo)
+		return 0;
+
+	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+		/* synchronous slowpath */
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
+
+		ret = intel_overlay_do_wait_request(overlay,
+						    intel_overlay_release_old_vid_tail);
+		if (ret)
+			return ret;
+	}
+
+	intel_overlay_release_old_vid_tail(overlay);
+	return 0;
+}
+
+struct put_image_params {
+	int format;
+	short dst_x;
+	short dst_y;
+	short dst_w;
+	short dst_h;
+	short src_w;
+	short src_scan_h;
+	short src_scan_w;
+	short src_h;
+	short stride_Y;
+	short stride_UV;
+	int offset_Y;
+	int offset_U;
+	int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+		return 4;
+	case I915_OVERLAY_YUV411:
+		/* return 6; not implemented */
+	default:
+		return -EINVAL;
+	}
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+		return width << 1;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int uv_hsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV422:
+	case I915_OVERLAY_YUV420:
+		return 2;
+	case I915_OVERLAY_YUV411:
+	case I915_OVERLAY_YUV410:
+		return 4;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int uv_vsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+	case I915_OVERLAY_YUV420:
+	case I915_OVERLAY_YUV410:
+		return 2;
+	case I915_OVERLAY_YUV422:
+	case I915_OVERLAY_YUV411:
+		return 1;
+	default:
+		return -EINVAL;
+	}
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+	u32 mask, shift, ret;
+	if (IS_GEN2(dev)) {
+		mask = 0x1f;
+		shift = 5;
+	} else {
+		mask = 0x3f;
+		shift = 6;
+	}
+	ret = ((offset + width + mask) >> shift) - (offset >> shift);
+	if (!IS_GEN2(dev))
+		ret <<= 1;
+	ret -= 1;
+	return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+	0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+	0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+	0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+	0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+	0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+	0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+	0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+	0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+	0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+	0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+	0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+	0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+	0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+	0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+};
+
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+	0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+	0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+	0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+	0x3000, 0x0800, 0x3000
+};
+
+static void update_polyphase_filter(struct overlay_registers __iomem *regs)
+{
+	memcpy_toio(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+	memcpy_toio(regs->UV_HCOEFS, uv_static_hcoeffs,
+		    sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+				   struct overlay_registers __iomem *regs,
+				   struct put_image_params *params)
+{
+	/* fixed point with a 12 bit shift */
+	u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+	bool scale_changed = false;
+	int uv_hscale = uv_hsubsampling(params->format);
+	int uv_vscale = uv_vsubsampling(params->format);
+
+	if (params->dst_w > 1)
+		xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+			/(params->dst_w);
+	else
+		xscale = 1 << FP_SHIFT;
+
+	if (params->dst_h > 1)
+		yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+			/(params->dst_h);
+	else
+		yscale = 1 << FP_SHIFT;
+
+	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+	xscale_UV = xscale/uv_hscale;
+	yscale_UV = yscale/uv_vscale;
+	/* make the Y scale to UV scale ratio an exact multiply */
+	xscale = xscale_UV * uv_hscale;
+	yscale = yscale_UV * uv_vscale;
+	/*} else {
+	  xscale_UV = 0;
+	  yscale_UV = 0;
+	  }*/
+
+	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+		scale_changed = true;
+	overlay->old_xscale = xscale;
+	overlay->old_yscale = yscale;
+
+	iowrite32(((yscale & FRACT_MASK) << 20) |
+		  ((xscale >> FP_SHIFT)  << 16) |
+		  ((xscale & FRACT_MASK) << 3),
+		 &regs->YRGBSCALE);
+
+	iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+		  ((xscale_UV >> FP_SHIFT)  << 16) |
+		  ((xscale_UV & FRACT_MASK) << 3),
+		 &regs->UVSCALE);
+
+	iowrite32((((yscale    >> FP_SHIFT) << 16) |
+		   ((yscale_UV >> FP_SHIFT) << 0)),
+		 &regs->UVSCALEV);
+
+	if (scale_changed)
+		update_polyphase_filter(regs);
+
+	return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+			    struct overlay_registers __iomem *regs)
+{
+	u32 key = overlay->color_key;
+
+	switch (overlay->crtc->base.fb->bits_per_pixel) {
+	case 8:
+		iowrite32(0, &regs->DCLRKV);
+		iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+		break;
+
+	case 16:
+		if (overlay->crtc->base.fb->depth == 15) {
+			iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
+			iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
+				  &regs->DCLRKM);
+		} else {
+			iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
+			iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
+				  &regs->DCLRKM);
+		}
+		break;
+
+	case 24:
+	case 32:
+		iowrite32(key, &regs->DCLRKV);
+		iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+		break;
+	}
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+	u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			cmd |= OCMD_YUV_422_PLANAR;
+			break;
+		case I915_OVERLAY_YUV420:
+			cmd |= OCMD_YUV_420_PLANAR;
+			break;
+		case I915_OVERLAY_YUV411:
+		case I915_OVERLAY_YUV410:
+			cmd |= OCMD_YUV_410_PLANAR;
+			break;
+		}
+	} else { /* YUV packed */
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			cmd |= OCMD_YUV_422_PACKED;
+			break;
+		case I915_OVERLAY_YUV411:
+			cmd |= OCMD_YUV_411_PACKED;
+			break;
+		}
+
+		switch (params->format & I915_OVERLAY_SWAP_MASK) {
+		case I915_OVERLAY_NO_SWAP:
+			break;
+		case I915_OVERLAY_UV_SWAP:
+			cmd |= OCMD_UV_SWAP;
+			break;
+		case I915_OVERLAY_Y_SWAP:
+			cmd |= OCMD_Y_SWAP;
+			break;
+		case I915_OVERLAY_Y_AND_UV_SWAP:
+			cmd |= OCMD_Y_AND_UV_SWAP;
+			break;
+		}
+	}
+
+	return cmd;
+}
+
+static int intel_overlay_do_put_image(struct intel_overlay *overlay,
+				      struct drm_i915_gem_object *new_bo,
+				      struct put_image_params *params)
+{
+	int ret, tmp_width;
+	struct overlay_registers __iomem *regs;
+	bool scale_changed = false;
+	struct drm_device *dev = overlay->dev;
+	u32 swidth, swidthsw, sheight, ostride;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	BUG_ON(!overlay);
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_put_fence(new_bo);
+	if (ret)
+		goto out_unpin;
+
+	if (!overlay->active) {
+		u32 oconfig;
+		regs = intel_overlay_map_regs(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unpin;
+		}
+		oconfig = OCONF_CC_OUT_8BIT;
+		if (IS_GEN4(overlay->dev))
+			oconfig |= OCONF_CSC_MODE_BT709;
+		oconfig |= overlay->crtc->pipe == 0 ?
+			OCONF_PIPE_A : OCONF_PIPE_B;
+		iowrite32(oconfig, &regs->OCONFIG);
+		intel_overlay_unmap_regs(overlay, regs);
+
+		ret = intel_overlay_on(overlay);
+		if (ret != 0)
+			goto out_unpin;
+	}
+
+	regs = intel_overlay_map_regs(overlay);
+	if (!regs) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+	iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
+
+	if (params->format & I915_OVERLAY_YUV_PACKED)
+		tmp_width = packed_width_bytes(params->format, params->src_w);
+	else
+		tmp_width = params->src_w;
+
+	swidth = params->src_w;
+	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+	sheight = params->src_h;
+	iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+	ostride = params->stride_Y;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		int uv_hscale = uv_hsubsampling(params->format);
+		int uv_vscale = uv_vsubsampling(params->format);
+		u32 tmp_U, tmp_V;
+		swidth |= (params->src_w/uv_hscale) << 16;
+		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+				      params->src_w/uv_hscale);
+		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+				      params->src_w/uv_hscale);
+		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
+		sheight |= (params->src_h/uv_vscale) << 16;
+		iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
+		iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+		ostride |= params->stride_UV << 16;
+	}
+
+	iowrite32(swidth, &regs->SWIDTH);
+	iowrite32(swidthsw, &regs->SWIDTHSW);
+	iowrite32(sheight, &regs->SHEIGHT);
+	iowrite32(ostride, &regs->OSTRIDE);
+
+	scale_changed = update_scaling_factors(overlay, regs, params);
+
+	update_colorkey(overlay, regs);
+
+	iowrite32(overlay_cmd_reg(params), &regs->OCMD);
+
+	intel_overlay_unmap_regs(overlay, regs);
+
+	ret = intel_overlay_continue(overlay, scale_changed);
+	if (ret)
+		goto out_unpin;
+
+	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->vid_bo = new_bo;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(new_bo);
+	return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+	struct overlay_registers __iomem *regs;
+	struct drm_device *dev = overlay->dev;
+	int ret;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	ret = intel_overlay_recover_from_interrupt(overlay);
+	if (ret != 0)
+		return ret;
+
+	if (!overlay->active)
+		return 0;
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	regs = intel_overlay_map_regs(overlay);
+	iowrite32(0, &regs->OCMD);
+	intel_overlay_unmap_regs(overlay, regs);
+
+	ret = intel_overlay_off(overlay);
+	if (ret != 0)
+		return ret;
+
+	intel_overlay_off_tail(overlay);
+	return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+					  struct intel_crtc *crtc)
+{
+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+
+	if (!crtc->active)
+		return -EINVAL;
+
+	/* can't use the overlay with double wide pipe */
+	if (INTEL_INFO(overlay->dev)->gen < 4 &&
+	    (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 pfit_control = I915_READ(PFIT_CONTROL);
+	u32 ratio;
+
+	/* XXX: This is not the same logic as in the xorg driver, but more in
+	 * line with the intel documentation for the i965
+	 */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* on i965 use the PGM reg to read out the autoscaler values */
+		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+	} else {
+		if (pfit_control & VERT_AUTO_SCALE)
+			ratio = I915_READ(PFIT_AUTO_RATIOS);
+		else
+			ratio = I915_READ(PFIT_PGM_RATIOS);
+		ratio >>= PFIT_VERT_SCALE_SHIFT;
+	}
+
+	overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+			     struct drm_intel_overlay_put_image *rec)
+{
+	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+	if (rec->dst_x < mode->hdisplay &&
+	    rec->dst_x + rec->dst_width <= mode->hdisplay &&
+	    rec->dst_y < mode->vdisplay &&
+	    rec->dst_y + rec->dst_height <= mode->vdisplay)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+	u32 tmp;
+
+	/* downscaling limit is 8.0 */
+	tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+	tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+			     struct drm_intel_overlay_put_image *rec,
+			     struct drm_i915_gem_object *new_bo)
+{
+	int uv_hscale = uv_hsubsampling(rec->flags);
+	int uv_vscale = uv_vsubsampling(rec->flags);
+	u32 stride_mask;
+	int depth;
+	u32 tmp;
+
+	/* check src dimensions */
+	if (IS_845G(dev) || IS_I830(dev)) {
+		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
+		    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
+			return -EINVAL;
+	} else {
+		if (rec->src_height > IMAGE_MAX_HEIGHT ||
+		    rec->src_width  > IMAGE_MAX_WIDTH)
+			return -EINVAL;
+	}
+
+	/* better safe than sorry, use 4 as the maximal subsampling ratio */
+	if (rec->src_height < N_VERT_Y_TAPS*4 ||
+	    rec->src_width  < N_HORIZ_Y_TAPS*4)
+		return -EINVAL;
+
+	/* check alignment constraints */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+	case I915_OVERLAY_RGB:
+		/* not implemented */
+		return -EINVAL;
+
+	case I915_OVERLAY_YUV_PACKED:
+		if (uv_vscale != 1)
+			return -EINVAL;
+
+		depth = packed_depth_bytes(rec->flags);
+		if (depth < 0)
+			return depth;
+
+		/* ignore UV planes */
+		rec->stride_UV = 0;
+		rec->offset_U = 0;
+		rec->offset_V = 0;
+		/* check pixel alignment */
+		if (rec->offset_Y % depth)
+			return -EINVAL;
+		break;
+
+	case I915_OVERLAY_YUV_PLANAR:
+		if (uv_vscale < 0 || uv_hscale < 0)
+			return -EINVAL;
+		/* no offset restrictions for planar formats */
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (rec->src_width % uv_hscale)
+		return -EINVAL;
+
+	/* stride checking */
+	if (IS_I830(dev) || IS_845G(dev))
+		stride_mask = 255;
+	else
+		stride_mask = 63;
+
+	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+		return -EINVAL;
+	if (IS_GEN4(dev) && rec->stride_Y < 512)
+		return -EINVAL;
+
+	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+		4096 : 8192;
+	if (rec->stride_Y > tmp || rec->stride_UV > 2*1024)
+		return -EINVAL;
+
+	/* check buffer dimensions */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+	case I915_OVERLAY_RGB:
+	case I915_OVERLAY_YUV_PACKED:
+		/* always 4 Y values per depth pixels */
+		if (packed_width_bytes(rec->flags, rec->src_width) > rec->stride_Y)
+			return -EINVAL;
+
+		tmp = rec->stride_Y*rec->src_height;
+		if (rec->offset_Y + tmp > new_bo->base.size)
+			return -EINVAL;
+		break;
+
+	case I915_OVERLAY_YUV_PLANAR:
+		if (rec->src_width > rec->stride_Y)
+			return -EINVAL;
+		if (rec->src_width/uv_hscale > rec->stride_UV)
+			return -EINVAL;
+
+		tmp = rec->stride_Y * rec->src_height;
+		if (rec->offset_Y + tmp > new_bo->base.size)
+			return -EINVAL;
+
+		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
+		if (rec->offset_U + tmp > new_bo->base.size ||
+		    rec->offset_V + tmp > new_bo->base.size)
+			return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int intel_panel_fitter_pipe(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32  pfit_control;
+
+	/* i830 doesn't have a panel fitter */
+	if (IS_I830(dev))
+		return -1;
+
+	pfit_control = I915_READ(PFIT_CONTROL);
+
+	/* See if the panel fitter is in use */
+	if ((pfit_control & PFIT_ENABLE) == 0)
+		return -1;
+
+	/* 965 can place panel fitter on either pipe */
+	if (IS_GEN4(dev))
+		return (pfit_control >> 29) & 0x3;
+
+	/* older chips can only use pipe 1 */
+	return 1;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_put_image *put_image_rec = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_mode_object *drmmode_obj;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *new_bo;
+	struct put_image_params *params;
+	int ret;
+
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+		drm_modeset_lock_all(dev);
+		mutex_lock(&dev->struct_mutex);
+
+		ret = intel_overlay_switch_off(overlay);
+
+		mutex_unlock(&dev->struct_mutex);
+		drm_modeset_unlock_all(dev);
+
+		return ret;
+	}
+
+	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+					   DRM_MODE_OBJECT_CRTC);
+	if (!drmmode_obj) {
+		ret = -ENOENT;
+		goto out_free;
+	}
+	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+						   put_image_rec->bo_handle));
+	if (&new_bo->base == NULL) {
+		ret = -ENOENT;
+		goto out_free;
+	}
+
+	drm_modeset_lock_all(dev);
+	mutex_lock(&dev->struct_mutex);
+
+	if (new_bo->tiling_mode) {
+		DRM_ERROR("buffer used for overlay image can not be tiled\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = intel_overlay_recover_from_interrupt(overlay);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->crtc != crtc) {
+		struct drm_display_mode *mode = &crtc->base.mode;
+		ret = intel_overlay_switch_off(overlay);
+		if (ret != 0)
+			goto out_unlock;
+
+		ret = check_overlay_possible_on_crtc(overlay, crtc);
+		if (ret != 0)
+			goto out_unlock;
+
+		overlay->crtc = crtc;
+		crtc->overlay = overlay;
+
+		/* line too wide, i.e. one-line-mode */
+		if (mode->hdisplay > 1024 &&
+		    intel_panel_fitter_pipe(dev) == crtc->pipe) {
+			overlay->pfit_active = 1;
+			update_pfit_vscale_ratio(overlay);
+		} else
+			overlay->pfit_active = 0;
+	}
+
+	ret = check_overlay_dst(overlay, put_image_rec);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->pfit_active) {
+		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+				 overlay->pfit_vscale_ratio);
+		/* shifting right rounds downwards, so add 1 */
+		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+				 overlay->pfit_vscale_ratio) + 1;
+	} else {
+		params->dst_y = put_image_rec->dst_y;
+		params->dst_h = put_image_rec->dst_height;
+	}
+	params->dst_x = put_image_rec->dst_x;
+	params->dst_w = put_image_rec->dst_width;
+
+	params->src_w = put_image_rec->src_width;
+	params->src_h = put_image_rec->src_height;
+	params->src_scan_w = put_image_rec->src_scan_width;
+	params->src_scan_h = put_image_rec->src_scan_height;
+	if (params->src_scan_h > params->src_h ||
+	    params->src_scan_w > params->src_w) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = check_overlay_src(dev, put_image_rec, new_bo);
+	if (ret != 0)
+		goto out_unlock;
+	params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+	params->stride_Y = put_image_rec->stride_Y;
+	params->stride_UV = put_image_rec->stride_UV;
+	params->offset_Y = put_image_rec->offset_Y;
+	params->offset_U = put_image_rec->offset_U;
+	params->offset_V = put_image_rec->offset_V;
+
+	/* Check scaling after src size to prevent a divide-by-zero. */
+	ret = check_overlay_scaling(params);
+	if (ret != 0)
+		goto out_unlock;
+
+	ret = intel_overlay_do_put_image(overlay, new_bo, params);
+	if (ret != 0)
+		goto out_unlock;
+
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+
+	kfree(params);
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+	drm_gem_object_unreference_unlocked(&new_bo->base);
+out_free:
+	kfree(params);
+
+	return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+			     struct overlay_registers __iomem *regs)
+{
+	iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+		  &regs->OCLRC0);
+	iowrite32(overlay->saturation, &regs->OCLRC1);
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+	int i;
+
+	if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+		return false;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma1 >> i*8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma5 >> i*8) & 0xff) == 0x80)
+			return false;
+	}
+
+	return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+	if (!check_gamma_bounds(0, attrs->gamma0) ||
+	    !check_gamma_bounds(attrs->gamma0, attrs->gamma1) ||
+	    !check_gamma_bounds(attrs->gamma1, attrs->gamma2) ||
+	    !check_gamma_bounds(attrs->gamma2, attrs->gamma3) ||
+	    !check_gamma_bounds(attrs->gamma3, attrs->gamma4) ||
+	    !check_gamma_bounds(attrs->gamma4, attrs->gamma5) ||
+	    !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+		return -EINVAL;
+
+	if (!check_gamma5_errata(attrs->gamma5))
+		return -EINVAL;
+
+	return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_attrs *attrs = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct overlay_registers __iomem *regs;
+	int ret;
+
+	/* No need to check for DRIVER_MODESET - we don't set it up then. */
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	drm_modeset_lock_all(dev);
+	mutex_lock(&dev->struct_mutex);
+
+	ret = -EINVAL;
+	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+		attrs->color_key  = overlay->color_key;
+		attrs->brightness = overlay->brightness;
+		attrs->contrast   = overlay->contrast;
+		attrs->saturation = overlay->saturation;
+
+		if (!IS_GEN2(dev)) {
+			attrs->gamma0 = I915_READ(OGAMC0);
+			attrs->gamma1 = I915_READ(OGAMC1);
+			attrs->gamma2 = I915_READ(OGAMC2);
+			attrs->gamma3 = I915_READ(OGAMC3);
+			attrs->gamma4 = I915_READ(OGAMC4);
+			attrs->gamma5 = I915_READ(OGAMC5);
+		}
+	} else {
+		if (attrs->brightness < -128 || attrs->brightness > 127)
+			goto out_unlock;
+		if (attrs->contrast > 255)
+			goto out_unlock;
+		if (attrs->saturation > 1023)
+			goto out_unlock;
+
+		overlay->color_key  = attrs->color_key;
+		overlay->brightness = attrs->brightness;
+		overlay->contrast   = attrs->contrast;
+		overlay->saturation = attrs->saturation;
+
+		regs = intel_overlay_map_regs(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		update_reg_attrs(overlay, regs);
+
+		intel_overlay_unmap_regs(overlay, regs);
+
+		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+			if (IS_GEN2(dev))
+				goto out_unlock;
+
+			if (overlay->active) {
+				ret = -EBUSY;
+				goto out_unlock;
+			}
+
+			ret = check_gamma(attrs);
+			if (ret)
+				goto out_unlock;
+
+			I915_WRITE(OGAMC0, attrs->gamma0);
+			I915_WRITE(OGAMC1, attrs->gamma1);
+			I915_WRITE(OGAMC2, attrs->gamma2);
+			I915_WRITE(OGAMC3, attrs->gamma3);
+			I915_WRITE(OGAMC4, attrs->gamma4);
+			I915_WRITE(OGAMC5, attrs->gamma5);
+		}
+	}
+
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	drm_modeset_unlock_all(dev);
+
+	return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_i915_gem_object *reg_bo;
+	struct overlay_registers __iomem *regs;
+	int ret;
+
+	if (!HAS_OVERLAY(dev))
+		return;
+
+	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+	if (!overlay)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	if (WARN_ON(dev_priv->overlay))
+		goto out_free;
+
+	overlay->dev = dev;
+
+	reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+	if (reg_bo == NULL)
+		reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+	if (reg_bo == NULL)
+		goto out_free;
+	overlay->reg_bo = reg_bo;
+
+	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+		ret = i915_gem_attach_phys_object(dev, reg_bo,
+						  I915_GEM_PHYS_OVERLAY_REGS,
+						  PAGE_SIZE);
+		if (ret) {
+			DRM_ERROR("failed to attach phys overlay regs\n");
+			goto out_free_bo;
+		}
+		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
+	} else {
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+		if (ret) {
+			DRM_ERROR("failed to pin overlay register bo\n");
+			goto out_free_bo;
+		}
+		overlay->flip_addr = reg_bo->gtt_offset;
+
+		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
+		if (ret) {
+			DRM_ERROR("failed to move overlay register bo into the GTT\n");
+			goto out_unpin_bo;
+		}
+	}
+
+	/* init all values */
+	overlay->color_key = 0x0101fe;
+	overlay->brightness = -19;
+	overlay->contrast = 75;
+	overlay->saturation = 146;
+
+	regs = intel_overlay_map_regs(overlay);
+	if (!regs)
+		goto out_unpin_bo;
+
+	memset_io(regs, 0, sizeof(struct overlay_registers));
+	update_polyphase_filter(regs);
+	update_reg_attrs(overlay, regs);
+
+	intel_overlay_unmap_regs(overlay, regs);
+
+	dev_priv->overlay = overlay;
+	mutex_unlock(&dev->struct_mutex);
+	DRM_INFO("initialized overlay support\n");
+	return;
+
+out_unpin_bo:
+	if (!OVERLAY_NEEDS_PHYSICAL(dev))
+		i915_gem_object_unpin(reg_bo);
+out_free_bo:
+	drm_gem_object_unreference(&reg_bo->base);
+out_free:
+	mutex_unlock(&dev->struct_mutex);
+	kfree(overlay);
+	return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv->overlay)
+		return;
+
+	/* The bo's should be free'd by the generic code already.
+	 * Furthermore modesetting teardown happens beforehand so the
+	 * hardware should be off already */
+	BUG_ON(dev_priv->overlay->active);
+
+	drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
+	kfree(dev_priv->overlay);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_overlay_error_state {
+	struct overlay_registers regs;
+	unsigned long base;
+	u32 dovsta;
+	u32 isr;
+};
+
+static struct overlay_registers __iomem *
+intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers __iomem *regs;
+
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		/* Cast to make sparse happy, but it's wc memory anyway, so
+		 * equivalent to the wc io mapping on X86. */
+		regs = (struct overlay_registers __iomem *)
+			overlay->reg_bo->phys_obj->handle->vaddr;
+	else
+		regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+						overlay->reg_bo->gtt_offset);
+
+	return regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
+					struct overlay_registers __iomem *regs)
+{
+	if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		io_mapping_unmap_atomic(regs);
+}
+
+
+struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay = dev_priv->overlay;
+	struct intel_overlay_error_state *error;
+	struct overlay_registers __iomem *regs;
+
+	if (!overlay || !overlay->active)
+		return NULL;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	error->dovsta = I915_READ(DOVSTA);
+	error->isr = I915_READ(ISR);
+	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+		error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
+	else
+		error->base = overlay->reg_bo->gtt_offset;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs)
+		goto err;
+
+	memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
+	intel_overlay_unmap_regs_atomic(overlay, regs);
+
+	return error;
+
+err:
+	kfree(error);
+	return NULL;
+}
+
+void
+intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
+{
+	seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
+		   error->dovsta, error->isr);
+	seq_printf(m, "  Register file at 0x%08lx:\n",
+		   error->base);
+
+#define P(x) seq_printf(m, "    " #x ":	0x%08x\n", error->regs.x)
+	P(OBUF_0Y);
+	P(OBUF_1Y);
+	P(OBUF_0U);
+	P(OBUF_0V);
+	P(OBUF_1U);
+	P(OBUF_1V);
+	P(OSTRIDE);
+	P(YRGB_VPH);
+	P(UV_VPH);
+	P(HORZ_PH);
+	P(INIT_PHS);
+	P(DWINPOS);
+	P(DWINSZ);
+	P(SWIDTH);
+	P(SWIDTHSW);
+	P(SHEIGHT);
+	P(YRGBSCALE);
+	P(UVSCALE);
+	P(OCLRC0);
+	P(OCLRC1);
+	P(DCLRKV);
+	P(DCLRKM);
+	P(SCLRKVH);
+	P(SCLRKVL);
+	P(SCLRKEN);
+	P(OCONFIG);
+	P(OCMD);
+	P(OSTART_0Y);
+	P(OSTART_1Y);
+	P(OSTART_0U);
+	P(OSTART_0V);
+	P(OSTART_1U);
+	P(OSTART_1V);
+	P(OTILEOFF_0Y);
+	P(OTILEOFF_1Y);
+	P(OTILEOFF_0U);
+	P(OTILEOFF_0V);
+	P(OTILEOFF_1U);
+	P(OTILEOFF_1V);
+	P(FASTHSCALE);
+	P(UVSCALEV);
+#undef P
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_panel.c b/linux-imx/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 0000000..33cb87f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright © 2006-2010 Intel Corporation
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ *      Dave Airlie <airlied@linux.ie>
+ *      Jesse Barnes <jesse.barnes@intel.com>
+ *      Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/moduleparam.h>
+#include "intel_drv.h"
+
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
+void
+intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+		       struct drm_display_mode *adjusted_mode)
+{
+	adjusted_mode->hdisplay = fixed_mode->hdisplay;
+	adjusted_mode->hsync_start = fixed_mode->hsync_start;
+	adjusted_mode->hsync_end = fixed_mode->hsync_end;
+	adjusted_mode->htotal = fixed_mode->htotal;
+
+	adjusted_mode->vdisplay = fixed_mode->vdisplay;
+	adjusted_mode->vsync_start = fixed_mode->vsync_start;
+	adjusted_mode->vsync_end = fixed_mode->vsync_end;
+	adjusted_mode->vtotal = fixed_mode->vtotal;
+
+	adjusted_mode->clock = fixed_mode->clock;
+}
+
+/* adjusted_mode has been preset to be the panel's fixed mode */
+void
+intel_pch_panel_fitting(struct drm_device *dev,
+			int fitting_mode,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int x, y, width, height;
+
+	x = y = width = height = 0;
+
+	/* Native modes don't need fitting */
+	if (adjusted_mode->hdisplay == mode->hdisplay &&
+	    adjusted_mode->vdisplay == mode->vdisplay)
+		goto done;
+
+	switch (fitting_mode) {
+	case DRM_MODE_SCALE_CENTER:
+		width = mode->hdisplay;
+		height = mode->vdisplay;
+		x = (adjusted_mode->hdisplay - width + 1)/2;
+		y = (adjusted_mode->vdisplay - height + 1)/2;
+		break;
+
+	case DRM_MODE_SCALE_ASPECT:
+		/* Scale but preserve the aspect ratio */
+		{
+			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+			if (scaled_width > scaled_height) { /* pillar */
+				width = scaled_height / mode->vdisplay;
+				if (width & 1)
+					width++;
+				x = (adjusted_mode->hdisplay - width + 1) / 2;
+				y = 0;
+				height = adjusted_mode->vdisplay;
+			} else if (scaled_width < scaled_height) { /* letter */
+				height = scaled_width / mode->hdisplay;
+				if (height & 1)
+				    height++;
+				y = (adjusted_mode->vdisplay - height + 1) / 2;
+				x = 0;
+				width = adjusted_mode->hdisplay;
+			} else {
+				x = y = 0;
+				width = adjusted_mode->hdisplay;
+				height = adjusted_mode->vdisplay;
+			}
+		}
+		break;
+
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN:
+		x = y = 0;
+		width = adjusted_mode->hdisplay;
+		height = adjusted_mode->vdisplay;
+		break;
+	}
+
+done:
+	dev_priv->pch_pf_pos = (x << 16) | y;
+	dev_priv->pch_pf_size = (width << 16) | height;
+}
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+	if (IS_GEN2(dev))
+		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+	return 0;
+}
+
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	/* Restore the CTL value if it lost, e.g. GPU reset */
+
+	if (HAS_PCH_SPLIT(dev_priv->dev)) {
+		val = I915_READ(BLC_PWM_PCH_CTL2);
+		if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL2 = val;
+		} else if (val == 0) {
+			val = dev_priv->regfile.saveBLC_PWM_CTL2;
+			I915_WRITE(BLC_PWM_PCH_CTL2, val);
+		}
+	} else {
+		val = I915_READ(BLC_PWM_CTL);
+		if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+			dev_priv->regfile.saveBLC_PWM_CTL = val;
+			if (INTEL_INFO(dev)->gen >= 4)
+				dev_priv->regfile.saveBLC_PWM_CTL2 =
+					I915_READ(BLC_PWM_CTL2);
+		} else if (val == 0) {
+			val = dev_priv->regfile.saveBLC_PWM_CTL;
+			I915_WRITE(BLC_PWM_CTL, val);
+			if (INTEL_INFO(dev)->gen >= 4)
+				I915_WRITE(BLC_PWM_CTL2,
+					   dev_priv->regfile.saveBLC_PWM_CTL2);
+		}
+	}
+
+	return val;
+}
+
+static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
+{
+	u32 max;
+
+	max = i915_read_blc_pwm_ctl(dev);
+
+	if (HAS_PCH_SPLIT(dev)) {
+		max >>= 16;
+	} else {
+		if (INTEL_INFO(dev)->gen < 4)
+			max >>= 17;
+		else
+			max >>= 16;
+
+		if (is_backlight_combination_mode(dev))
+			max *= 0xff;
+	}
+
+	return max;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+	u32 max;
+
+	max = _intel_panel_get_max_backlight(dev);
+	if (max == 0) {
+		/* XXX add code here to query mode clock or hardware clock
+		 * and program max PWM appropriately.
+		 */
+		pr_warn_once("fixme: max PWM is zero\n");
+		return 1;
+	}
+
+	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+	return max;
+}
+
+static int i915_panel_invert_brightness;
+MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
+	"(-1 force normal, 0 machine defaults, 1 force inversion), please "
+	"report PCI device ID, subsystem vendor and subsystem device ID "
+	"to dri-devel@lists.freedesktop.org, if your machine needs it. "
+	"It will then be included in an upcoming module version.");
+module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
+static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (i915_panel_invert_brightness < 0)
+		return val;
+
+	if (i915_panel_invert_brightness > 0 ||
+	    dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
+		return intel_panel_get_max_backlight(dev) - val;
+
+	return val;
+}
+
+static u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	} else {
+		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+		if (INTEL_INFO(dev)->gen < 4)
+			val >>= 1;
+
+		if (is_backlight_combination_mode(dev)) {
+			u8 lbpc;
+
+			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+			val *= lbpc;
+		}
+	}
+
+	val = intel_panel_compute_brightness(dev, val);
+	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+	return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+	level = intel_panel_compute_brightness(dev, level);
+
+	if (HAS_PCH_SPLIT(dev))
+		return intel_pch_panel_set_backlight(dev, level);
+
+	if (is_backlight_combination_mode(dev)) {
+		u32 max = intel_panel_get_max_backlight(dev);
+		u8 lbpc;
+
+		lbpc = level * 0xfe / max + 1;
+		level /= lbpc;
+		pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
+	}
+
+	tmp = I915_READ(BLC_PWM_CTL);
+	if (INTEL_INFO(dev)->gen < 4)
+		level <<= 1;
+	tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CTL, tmp | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight.level = level;
+	if (dev_priv->backlight.device)
+		dev_priv->backlight.device->props.brightness = level;
+
+	if (dev_priv->backlight.enabled)
+		intel_panel_actually_set_backlight(dev, level);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight.enabled = false;
+	intel_panel_actually_set_backlight(dev, 0);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		uint32_t reg, tmp;
+
+		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+		I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+		if (HAS_PCH_SPLIT(dev)) {
+			tmp = I915_READ(BLC_PWM_PCH_CTL1);
+			tmp &= ~BLM_PCH_PWM_ENABLE;
+			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+		}
+	}
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev,
+				  enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight.level == 0) {
+		dev_priv->backlight.level = intel_panel_get_max_backlight(dev);
+		if (dev_priv->backlight.device)
+			dev_priv->backlight.device->props.brightness =
+				dev_priv->backlight.level;
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		uint32_t reg, tmp;
+
+		reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+		tmp = I915_READ(reg);
+
+		/* Note that this can also get called through dpms changes. And
+		 * we don't track the backlight dpms state, hence check whether
+		 * we have to do anything first. */
+		if (tmp & BLM_PWM_ENABLE)
+			goto set_level;
+
+		if (INTEL_INFO(dev)->num_pipes == 3)
+			tmp &= ~BLM_PIPE_SELECT_IVB;
+		else
+			tmp &= ~BLM_PIPE_SELECT;
+
+		tmp |= BLM_PIPE(pipe);
+		tmp &= ~BLM_PWM_ENABLE;
+
+		I915_WRITE(reg, tmp);
+		POSTING_READ(reg);
+		I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+		if (HAS_PCH_SPLIT(dev) &&
+		    !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
+			tmp = I915_READ(BLC_PWM_PCH_CTL1);
+			tmp |= BLM_PCH_PWM_ENABLE;
+			tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+			I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+		}
+	}
+
+set_level:
+	/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+	 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+	 * registers are set.
+	 */
+	dev_priv->backlight.enabled = true;
+	intel_panel_actually_set_backlight(dev, dev_priv->backlight.level);
+}
+
+static void intel_panel_init_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight.level = intel_panel_get_backlight(dev);
+	dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
+}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Assume that the BIOS does not lie through the OpRegion... */
+	if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+		return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+			connector_status_connected :
+			connector_status_disconnected;
+	}
+
+	switch (i915_panel_ignore_lid) {
+	case -2:
+		return connector_status_connected;
+	case -1:
+		return connector_status_disconnected;
+	default:
+		return connector_status_unknown;
+	}
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int intel_panel_update_status(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	intel_panel_set_backlight(dev, bd->props.brightness);
+	return 0;
+}
+
+static int intel_panel_get_brightness(struct backlight_device *bd)
+{
+	struct drm_device *dev = bl_get_data(bd);
+	return intel_panel_get_backlight(dev);
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+	.update_status = intel_panel_update_status,
+	.get_brightness = intel_panel_get_brightness,
+};
+
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct backlight_properties props;
+
+	intel_panel_init_backlight(dev);
+
+	if (WARN_ON(dev_priv->backlight.device))
+		return -ENODEV;
+
+	memset(&props, 0, sizeof(props));
+	props.type = BACKLIGHT_RAW;
+	props.brightness = dev_priv->backlight.level;
+	props.max_brightness = _intel_panel_get_max_backlight(dev);
+	if (props.max_brightness == 0) {
+		DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
+		return -ENODEV;
+	}
+	dev_priv->backlight.device =
+		backlight_device_register("intel_backlight",
+					  &connector->kdev, dev,
+					  &intel_panel_bl_ops, &props);
+
+	if (IS_ERR(dev_priv->backlight.device)) {
+		DRM_ERROR("Failed to register backlight: %ld\n",
+			  PTR_ERR(dev_priv->backlight.device));
+		dev_priv->backlight.device = NULL;
+		return -ENODEV;
+	}
+	return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (dev_priv->backlight.device) {
+		backlight_device_unregister(dev_priv->backlight.device);
+		dev_priv->backlight.device = NULL;
+	}
+}
+#else
+int intel_panel_setup_backlight(struct drm_connector *connector)
+{
+	intel_panel_init_backlight(connector->dev);
+	return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+	return;
+}
+#endif
+
+int intel_panel_init(struct intel_panel *panel,
+		     struct drm_display_mode *fixed_mode)
+{
+	panel->fixed_mode = fixed_mode;
+
+	return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+	struct intel_connector *intel_connector =
+		container_of(panel, struct intel_connector, panel);
+
+	if (panel->fixed_mode)
+		drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_pm.c b/linux-imx/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 0000000..94ad6bc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,4657 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eugeni Dodonov <eugeni.dodonov@intel.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "../../../platform/x86/intel_ips.h"
+#include <linux/module.h>
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+/* FBC, or Frame Buffer Compression, is a technique employed to compress the
+ * framebuffer contents in-memory, aiming at reducing the required bandwidth
+ * during in-memory transfers and, therefore, reduce the power packet.
+ *
+ * The benefits of FBC are mostly visible with solid backgrounds and
+ * variation-less patterns.
+ *
+ * FBC-related functionality can be enabled by the means of the
+ * i915.i915_enable_fbc parameter
+ */
+
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+	/* Be paranoid as we can arrive here with only partial
+	 * state retrieved from the hardware during setup.
+	 */
+	return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 fbc_ctl;
+
+	/* Disable compression */
+	fbc_ctl = I915_READ(FBC_CONTROL);
+	if ((fbc_ctl & FBC_CTL_EN) == 0)
+		return;
+
+	fbc_ctl &= ~FBC_CTL_EN;
+	I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+	/* Wait for compressing bit to clear */
+	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+		DRM_DEBUG_KMS("FBC idle timed out\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int cfb_pitch;
+	int plane, i;
+	u32 fbc_ctl, fbc_ctl2;
+
+	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+	if (fb->pitches[0] < cfb_pitch)
+		cfb_pitch = fb->pitches[0];
+
+	/* FBC_CTL wants 64B units */
+	cfb_pitch = (cfb_pitch / 64) - 1;
+	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+
+	/* Clear old tags */
+	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
+		I915_WRITE(FBC_TAG + (i * 4), 0);
+
+	/* Set it up... */
+	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+	fbc_ctl2 |= plane;
+	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
+	I915_WRITE(FBC_FENCE_OFF, crtc->y);
+
+	/* enable it... */
+	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+	if (IS_I945GM(dev))
+		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
+	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
+	fbc_ctl |= obj->fence_reg;
+	I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+		      cfb_pitch, crtc->y, intel_crtc->plane);
+}
+
+static bool i8xx_fbc_enabled(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+}
+
+static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+	unsigned long stall_watermark = 200;
+	u32 dpfc_ctl;
+
+	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
+	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
+
+	/* enable it... */
+	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+
+	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void g4x_disable_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpfc_ctl;
+
+	/* Disable compression */
+	dpfc_ctl = I915_READ(DPFC_CONTROL);
+	if (dpfc_ctl & DPFC_CTL_EN) {
+		dpfc_ctl &= ~DPFC_CTL_EN;
+		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+
+		DRM_DEBUG_KMS("disabled FBC\n");
+	}
+}
+
+static bool g4x_fbc_enabled(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 blt_ecoskpd;
+
+	/* Make sure blitter notifies FBC of writes */
+	gen6_gt_force_wake_get(dev_priv);
+	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+		GEN6_BLITTER_LOCK_SHIFT;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+			 GEN6_BLITTER_LOCK_SHIFT);
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	POSTING_READ(GEN6_BLITTER_ECOSKPD);
+	gen6_gt_force_wake_put(dev_priv);
+}
+
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
+	unsigned long stall_watermark = 200;
+	u32 dpfc_ctl;
+
+	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+	dpfc_ctl &= DPFC_RESERVED;
+	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+	/* Set persistent mode for front-buffer rendering, ala X. */
+	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+
+	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+	/* enable it... */
+	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
+	if (IS_GEN6(dev)) {
+		I915_WRITE(SNB_DPFC_CTL_SA,
+			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		sandybridge_blit_fbc_update(dev);
+	}
+
+	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+static void ironlake_disable_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dpfc_ctl;
+
+	/* Disable compression */
+	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+	if (dpfc_ctl & DPFC_CTL_EN) {
+		dpfc_ctl &= ~DPFC_CTL_EN;
+		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+
+		DRM_DEBUG_KMS("disabled FBC\n");
+	}
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
+bool intel_fbc_enabled(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->display.fbc_enabled)
+		return false;
+
+	return dev_priv->display.fbc_enabled(dev);
+}
+
+static void intel_fbc_work_fn(struct work_struct *__work)
+{
+	struct intel_fbc_work *work =
+		container_of(to_delayed_work(__work),
+			     struct intel_fbc_work, work);
+	struct drm_device *dev = work->crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	if (work == dev_priv->fbc_work) {
+		/* Double check that we haven't switched fb without cancelling
+		 * the prior work.
+		 */
+		if (work->crtc->fb == work->fb) {
+			dev_priv->display.enable_fbc(work->crtc,
+						     work->interval);
+
+			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+			dev_priv->cfb_fb = work->crtc->fb->base.id;
+			dev_priv->cfb_y = work->crtc->y;
+		}
+
+		dev_priv->fbc_work = NULL;
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->fbc_work == NULL)
+		return;
+
+	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+	/* Synchronisation is provided by struct_mutex and checking of
+	 * dev_priv->fbc_work, so we can perform the cancellation
+	 * entirely asynchronously.
+	 */
+	if (cancel_delayed_work(&dev_priv->fbc_work->work))
+		/* tasklet was killed before being run, clean up */
+		kfree(dev_priv->fbc_work);
+
+	/* Mark the work as no longer wanted so that if it does
+	 * wake-up (because the work was already running and waiting
+	 * for our mutex), it will discover that is no longer
+	 * necessary to run.
+	 */
+	dev_priv->fbc_work = NULL;
+}
+
+void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+	struct intel_fbc_work *work;
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!dev_priv->display.enable_fbc)
+		return;
+
+	intel_cancel_fbc_work(dev_priv);
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL) {
+		dev_priv->display.enable_fbc(crtc, interval);
+		return;
+	}
+
+	work->crtc = crtc;
+	work->fb = crtc->fb;
+	work->interval = interval;
+	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+	dev_priv->fbc_work = work;
+
+	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+	/* Delay the actual enabling to let pageflipping cease and the
+	 * display to settle before starting the compression. Note that
+	 * this delay also serves a second purpose: it allows for a
+	 * vblank to pass after disabling the FBC before we attempt
+	 * to modify the control registers.
+	 *
+	 * A more complicated solution would involve tracking vblanks
+	 * following the termination of the page-flipping sequence
+	 * and indeed performing the enable as a co-routine and not
+	 * waiting synchronously upon the vblank.
+	 */
+	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
+}
+
+void intel_disable_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	intel_cancel_fbc_work(dev_priv);
+
+	if (!dev_priv->display.disable_fbc)
+		return;
+
+	dev_priv->display.disable_fbc(dev);
+	dev_priv->cfb_plane = -1;
+}
+
+/**
+ * intel_update_fbc - enable/disable FBC as needed
+ * @dev: the drm_device
+ *
+ * Set up the framebuffer compression hardware at mode set time.  We
+ * enable it if possible:
+ *   - plane A only (on pre-965)
+ *   - no pixel mulitply/line duplication
+ *   - no alpha buffer discard
+ *   - no dual wide
+ *   - framebuffer <= 2048 in width, 1536 in height
+ *
+ * We can't assume that any compression will take place (worst case),
+ * so the compressed buffer has to be the same size as the uncompressed
+ * one.  It also must reside (along with the line length buffer) in
+ * stolen memory.
+ *
+ * We need to enable/disable FBC on a global basis.
+ */
+void intel_update_fbc(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = NULL, *tmp_crtc;
+	struct intel_crtc *intel_crtc;
+	struct drm_framebuffer *fb;
+	struct intel_framebuffer *intel_fb;
+	struct drm_i915_gem_object *obj;
+	int enable_fbc;
+
+	if (!i915_powersave)
+		return;
+
+	if (!I915_HAS_FBC(dev))
+		return;
+
+	/*
+	 * If FBC is already on, we just have to verify that we can
+	 * keep it that way...
+	 * Need to disable if:
+	 *   - more than one pipe is active
+	 *   - changing FBC params (stride, fence, mode)
+	 *   - new fb is too large to fit in compressed buffer
+	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
+	 */
+	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
+		if (intel_crtc_active(tmp_crtc) &&
+		    !to_intel_crtc(tmp_crtc)->primary_disabled) {
+			if (crtc) {
+				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
+				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+				goto out_disable;
+			}
+			crtc = tmp_crtc;
+		}
+	}
+
+	if (!crtc || crtc->fb == NULL) {
+		DRM_DEBUG_KMS("no output, disabling\n");
+		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+		goto out_disable;
+	}
+
+	intel_crtc = to_intel_crtc(crtc);
+	fb = crtc->fb;
+	intel_fb = to_intel_framebuffer(fb);
+	obj = intel_fb->obj;
+
+	enable_fbc = i915_enable_fbc;
+	if (enable_fbc < 0) {
+		DRM_DEBUG_KMS("fbc set to per-chip default\n");
+		enable_fbc = 1;
+		if (INTEL_INFO(dev)->gen <= 6)
+			enable_fbc = 0;
+	}
+	if (!enable_fbc) {
+		DRM_DEBUG_KMS("fbc disabled per module param\n");
+		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+		goto out_disable;
+	}
+	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
+	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+		DRM_DEBUG_KMS("mode incompatible with compression, "
+			      "disabling\n");
+		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+		goto out_disable;
+	}
+	if ((crtc->mode.hdisplay > 2048) ||
+	    (crtc->mode.vdisplay > 1536)) {
+		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+		goto out_disable;
+	}
+	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
+		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+		goto out_disable;
+	}
+
+	/* The use of a CPU fence is mandatory in order to detect writes
+	 * by the CPU to the scanout and trigger updates to the FBC.
+	 */
+	if (obj->tiling_mode != I915_TILING_X ||
+	    obj->fence_reg == I915_FENCE_REG_NONE) {
+		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
+		dev_priv->no_fbc_reason = FBC_NOT_TILED;
+		goto out_disable;
+	}
+
+	/* If the kernel debugger is active, always disable compression */
+	if (in_dbg_master())
+		goto out_disable;
+
+	if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+		DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
+		DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
+		DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
+		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+		goto out_disable;
+	}
+
+	/* If the scanout has not changed, don't modify the FBC settings.
+	 * Note that we make the fundamental assumption that the fb->obj
+	 * cannot be unpinned (and have its GTT offset and fence revoked)
+	 * without first being decoupled from the scanout and FBC disabled.
+	 */
+	if (dev_priv->cfb_plane == intel_crtc->plane &&
+	    dev_priv->cfb_fb == fb->base.id &&
+	    dev_priv->cfb_y == crtc->y)
+		return;
+
+	if (intel_fbc_enabled(dev)) {
+		/* We update FBC along two paths, after changing fb/crtc
+		 * configuration (modeswitching) and after page-flipping
+		 * finishes. For the latter, we know that not only did
+		 * we disable the FBC at the start of the page-flip
+		 * sequence, but also more than one vblank has passed.
+		 *
+		 * For the former case of modeswitching, it is possible
+		 * to switch between two FBC valid configurations
+		 * instantaneously so we do need to disable the FBC
+		 * before we can modify its control registers. We also
+		 * have to wait for the next vblank for that to take
+		 * effect. However, since we delay enabling FBC we can
+		 * assume that a vblank has passed since disabling and
+		 * that we can safely alter the registers in the deferred
+		 * callback.
+		 *
+		 * In the scenario that we go from a valid to invalid
+		 * and then back to valid FBC configuration we have
+		 * no strict enforcement that a vblank occurred since
+		 * disabling the FBC. However, along all current pipe
+		 * disabling paths we do need to wait for a vblank at
+		 * some point. And we wait before enabling FBC anyway.
+		 */
+		DRM_DEBUG_KMS("disabling active FBC for update\n");
+		intel_disable_fbc(dev);
+	}
+
+	intel_enable_fbc(crtc, 500);
+	return;
+
+out_disable:
+	/* Multiple disables should be harmless */
+	if (intel_fbc_enabled(dev)) {
+		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+		intel_disable_fbc(dev);
+	}
+	i915_gem_stolen_cleanup_compression(dev);
+}
+
+static void i915_pineview_get_mem_freq(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	tmp = I915_READ(CLKCFG);
+
+	switch (tmp & CLKCFG_FSB_MASK) {
+	case CLKCFG_FSB_533:
+		dev_priv->fsb_freq = 533; /* 133*4 */
+		break;
+	case CLKCFG_FSB_800:
+		dev_priv->fsb_freq = 800; /* 200*4 */
+		break;
+	case CLKCFG_FSB_667:
+		dev_priv->fsb_freq =  667; /* 167*4 */
+		break;
+	case CLKCFG_FSB_400:
+		dev_priv->fsb_freq = 400; /* 100*4 */
+		break;
+	}
+
+	switch (tmp & CLKCFG_MEM_MASK) {
+	case CLKCFG_MEM_533:
+		dev_priv->mem_freq = 533;
+		break;
+	case CLKCFG_MEM_667:
+		dev_priv->mem_freq = 667;
+		break;
+	case CLKCFG_MEM_800:
+		dev_priv->mem_freq = 800;
+		break;
+	}
+
+	/* detect pineview DDR3 setting */
+	tmp = I915_READ(CSHRDDR3CTL);
+	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
+}
+
+static void i915_ironlake_get_mem_freq(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u16 ddrpll, csipll;
+
+	ddrpll = I915_READ16(DDRMPLL1);
+	csipll = I915_READ16(CSIPLL0);
+
+	switch (ddrpll & 0xff) {
+	case 0xc:
+		dev_priv->mem_freq = 800;
+		break;
+	case 0x10:
+		dev_priv->mem_freq = 1066;
+		break;
+	case 0x14:
+		dev_priv->mem_freq = 1333;
+		break;
+	case 0x18:
+		dev_priv->mem_freq = 1600;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
+				 ddrpll & 0xff);
+		dev_priv->mem_freq = 0;
+		break;
+	}
+
+	dev_priv->ips.r_t = dev_priv->mem_freq;
+
+	switch (csipll & 0x3ff) {
+	case 0x00c:
+		dev_priv->fsb_freq = 3200;
+		break;
+	case 0x00e:
+		dev_priv->fsb_freq = 3733;
+		break;
+	case 0x010:
+		dev_priv->fsb_freq = 4266;
+		break;
+	case 0x012:
+		dev_priv->fsb_freq = 4800;
+		break;
+	case 0x014:
+		dev_priv->fsb_freq = 5333;
+		break;
+	case 0x016:
+		dev_priv->fsb_freq = 5866;
+		break;
+	case 0x018:
+		dev_priv->fsb_freq = 6400;
+		break;
+	default:
+		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
+				 csipll & 0x3ff);
+		dev_priv->fsb_freq = 0;
+		break;
+	}
+
+	if (dev_priv->fsb_freq == 3200) {
+		dev_priv->ips.c_m = 0;
+	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
+		dev_priv->ips.c_m = 1;
+	} else {
+		dev_priv->ips.c_m = 2;
+	}
+}
+
+static const struct cxsr_latency cxsr_latency_table[] = {
+	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
+	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
+	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
+	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
+	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
+
+	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
+	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
+	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
+	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
+	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
+
+	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
+	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
+	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
+	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
+	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
+
+	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
+	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
+	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
+	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
+	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
+
+	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
+	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
+	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
+	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
+	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
+
+	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
+	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
+	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
+	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
+	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
+};
+
+static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
+							 int is_ddr3,
+							 int fsb,
+							 int mem)
+{
+	const struct cxsr_latency *latency;
+	int i;
+
+	if (fsb == 0 || mem == 0)
+		return NULL;
+
+	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
+		latency = &cxsr_latency_table[i];
+		if (is_desktop == latency->is_desktop &&
+		    is_ddr3 == latency->is_ddr3 &&
+		    fsb == latency->fsb_freq && mem == latency->mem_freq)
+			return latency;
+	}
+
+	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+
+	return NULL;
+}
+
+static void pineview_disable_cxsr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* deactivate cxsr */
+	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+}
+
+/*
+ * Latency for FIFO fetches is dependent on several factors:
+ *   - memory configuration (speed, channels)
+ *   - chipset
+ *   - current MCH state
+ * It can be fairly high in some situations, so here we assume a fairly
+ * pessimal value.  It's a tradeoff between extra memory fetches (if we
+ * set this value too high, the FIFO will fetch frequently to stay full)
+ * and power consumption (set it too low to save power and we might see
+ * FIFO underruns and display "flicker").
+ *
+ * A value of 5us seems to be a good balance; safe for very low end
+ * platforms but not overly aggressive on lower latency configs.
+ */
+static const int latency_ns = 5000;
+
+static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x7f;
+	if (plane)
+		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A", size);
+
+	return size;
+}
+
+static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x1ff;
+	if (plane)
+		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
+	size >>= 1; /* Convert to cachelines */
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A", size);
+
+	return size;
+}
+
+static int i845_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x7f;
+	size >>= 2; /* Convert to cachelines */
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A",
+		      size);
+
+	return size;
+}
+
+static int i830_get_fifo_size(struct drm_device *dev, int plane)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dsparb = I915_READ(DSPARB);
+	int size;
+
+	size = dsparb & 0x7f;
+	size >>= 1; /* Convert to cachelines */
+
+	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+		      plane ? "B" : "A", size);
+
+	return size;
+}
+
+/* Pineview has different values for various configs */
+static const struct intel_watermark_params pineview_display_wm = {
+	PINEVIEW_DISPLAY_FIFO,
+	PINEVIEW_MAX_WM,
+	PINEVIEW_DFT_WM,
+	PINEVIEW_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
+	PINEVIEW_DISPLAY_FIFO,
+	PINEVIEW_MAX_WM,
+	PINEVIEW_DFT_HPLLOFF_WM,
+	PINEVIEW_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params pineview_cursor_wm = {
+	PINEVIEW_CURSOR_FIFO,
+	PINEVIEW_CURSOR_MAX_WM,
+	PINEVIEW_CURSOR_DFT_WM,
+	PINEVIEW_CURSOR_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
+	PINEVIEW_CURSOR_FIFO,
+	PINEVIEW_CURSOR_MAX_WM,
+	PINEVIEW_CURSOR_DFT_WM,
+	PINEVIEW_CURSOR_GUARD_WM,
+	PINEVIEW_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params g4x_wm_info = {
+	G4X_FIFO_SIZE,
+	G4X_MAX_WM,
+	G4X_MAX_WM,
+	2,
+	G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params g4x_cursor_wm_info = {
+	I965_CURSOR_FIFO,
+	I965_CURSOR_MAX_WM,
+	I965_CURSOR_DFT_WM,
+	2,
+	G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_wm_info = {
+	VALLEYVIEW_FIFO_SIZE,
+	VALLEYVIEW_MAX_WM,
+	VALLEYVIEW_MAX_WM,
+	2,
+	G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params valleyview_cursor_wm_info = {
+	I965_CURSOR_FIFO,
+	VALLEYVIEW_CURSOR_MAX_WM,
+	I965_CURSOR_DFT_WM,
+	2,
+	G4X_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i965_cursor_wm_info = {
+	I965_CURSOR_FIFO,
+	I965_CURSOR_MAX_WM,
+	I965_CURSOR_DFT_WM,
+	2,
+	I915_FIFO_LINE_SIZE,
+};
+static const struct intel_watermark_params i945_wm_info = {
+	I945_FIFO_SIZE,
+	I915_MAX_WM,
+	1,
+	2,
+	I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i915_wm_info = {
+	I915_FIFO_SIZE,
+	I915_MAX_WM,
+	1,
+	2,
+	I915_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i855_wm_info = {
+	I855GM_FIFO_SIZE,
+	I915_MAX_WM,
+	1,
+	2,
+	I830_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params i830_wm_info = {
+	I830_FIFO_SIZE,
+	I915_MAX_WM,
+	1,
+	2,
+	I830_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params ironlake_display_wm_info = {
+	ILK_DISPLAY_FIFO,
+	ILK_DISPLAY_MAXWM,
+	ILK_DISPLAY_DFTWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
+	ILK_CURSOR_FIFO,
+	ILK_CURSOR_MAXWM,
+	ILK_CURSOR_DFTWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_display_srwm_info = {
+	ILK_DISPLAY_SR_FIFO,
+	ILK_DISPLAY_MAX_SRWM,
+	ILK_DISPLAY_DFT_SRWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
+	ILK_CURSOR_SR_FIFO,
+	ILK_CURSOR_MAX_SRWM,
+	ILK_CURSOR_DFT_SRWM,
+	2,
+	ILK_FIFO_LINE_SIZE
+};
+
+static const struct intel_watermark_params sandybridge_display_wm_info = {
+	SNB_DISPLAY_FIFO,
+	SNB_DISPLAY_MAXWM,
+	SNB_DISPLAY_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
+	SNB_CURSOR_FIFO,
+	SNB_CURSOR_MAXWM,
+	SNB_CURSOR_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
+	SNB_DISPLAY_SR_FIFO,
+	SNB_DISPLAY_MAX_SRWM,
+	SNB_DISPLAY_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
+	SNB_CURSOR_SR_FIFO,
+	SNB_CURSOR_MAX_SRWM,
+	SNB_CURSOR_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+
+/**
+ * intel_calculate_wm - calculate watermark level
+ * @clock_in_khz: pixel clock
+ * @wm: chip FIFO params
+ * @pixel_size: display pixel size
+ * @latency_ns: memory latency for the platform
+ *
+ * Calculate the watermark level (the level at which the display plane will
+ * start fetching from memory again).  Each chip has a different display
+ * FIFO size and allocation, so the caller needs to figure that out and pass
+ * in the correct intel_watermark_params structure.
+ *
+ * As the pixel clock runs, the FIFO will be drained at a rate that depends
+ * on the pixel size.  When it reaches the watermark level, it'll start
+ * fetching FIFO line sized based chunks from memory until the FIFO fills
+ * past the watermark point.  If the FIFO drains completely, a FIFO underrun
+ * will occur, and a display engine hang could result.
+ */
+static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+					const struct intel_watermark_params *wm,
+					int fifo_size,
+					int pixel_size,
+					unsigned long latency_ns)
+{
+	long entries_required, wm_size;
+
+	/*
+	 * Note: we need to make sure we don't overflow for various clock &
+	 * latency values.
+	 * clocks go from a few thousand to several hundred thousand.
+	 * latency is usually a few thousand
+	 */
+	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+		1000;
+	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
+
+	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
+
+	wm_size = fifo_size - (entries_required + wm->guard_size);
+
+	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+
+	/* Don't promote wm_size to unsigned... */
+	if (wm_size > (long)wm->max_wm)
+		wm_size = wm->max_wm;
+	if (wm_size <= 0)
+		wm_size = wm->default_wm;
+	return wm_size;
+}
+
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+	struct drm_crtc *crtc, *enabled = NULL;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		if (intel_crtc_active(crtc)) {
+			if (enabled)
+				return NULL;
+			enabled = crtc;
+		}
+	}
+
+	return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	const struct cxsr_latency *latency;
+	u32 reg;
+	unsigned long wm;
+
+	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
+					 dev_priv->fsb_freq, dev_priv->mem_freq);
+	if (!latency) {
+		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+		pineview_disable_cxsr(dev);
+		return;
+	}
+
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
+		int clock = crtc->mode.clock;
+		int pixel_size = crtc->fb->bits_per_pixel / 8;
+
+		/* Display SR */
+		wm = intel_calculate_wm(clock, &pineview_display_wm,
+					pineview_display_wm.fifo_size,
+					pixel_size, latency->display_sr);
+		reg = I915_READ(DSPFW1);
+		reg &= ~DSPFW_SR_MASK;
+		reg |= wm << DSPFW_SR_SHIFT;
+		I915_WRITE(DSPFW1, reg);
+		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+
+		/* cursor SR */
+		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+					pineview_display_wm.fifo_size,
+					pixel_size, latency->cursor_sr);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_CURSOR_SR_MASK;
+		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
+		I915_WRITE(DSPFW3, reg);
+
+		/* Display HPLL off SR */
+		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
+					pixel_size, latency->display_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_SR_MASK;
+		reg |= wm & DSPFW_HPLL_SR_MASK;
+		I915_WRITE(DSPFW3, reg);
+
+		/* cursor HPLL off SR */
+		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+					pineview_display_hplloff_wm.fifo_size,
+					pixel_size, latency->cursor_hpll_disable);
+		reg = I915_READ(DSPFW3);
+		reg &= ~DSPFW_HPLL_CURSOR_MASK;
+		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
+		I915_WRITE(DSPFW3, reg);
+		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+
+		/* activate cxsr */
+		I915_WRITE(DSPFW3,
+			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
+		DRM_DEBUG_KMS("Self-refresh is enabled\n");
+	} else {
+		pineview_disable_cxsr(dev);
+		DRM_DEBUG_KMS("Self-refresh is disabled\n");
+	}
+}
+
+static bool g4x_compute_wm0(struct drm_device *dev,
+			    int plane,
+			    const struct intel_watermark_params *display,
+			    int display_latency_ns,
+			    const struct intel_watermark_params *cursor,
+			    int cursor_latency_ns,
+			    int *plane_wm,
+			    int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	if (!intel_crtc_active(crtc)) {
+		*cursor_wm = cursor->guard_size;
+		*plane_wm = display->guard_size;
+		return false;
+	}
+
+	htotal = crtc->mode.htotal;
+	hdisplay = crtc->mode.hdisplay;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
+
+	/* Use the small buffer method to calculate plane watermark */
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*plane_wm = entries + display->guard_size;
+	if (*plane_wm > (int)display->max_wm)
+		*plane_wm = display->max_wm;
+
+	/* Use the large buffer method to calculate cursor watermark */
+	line_time_us = ((htotal * 1000) / clock);
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+	entries = line_count * 64 * pixel_size;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+	if (*cursor_wm > (int)cursor->max_wm)
+		*cursor_wm = (int)cursor->max_wm;
+
+	return true;
+}
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+			   int display_wm, int cursor_wm,
+			   const struct intel_watermark_params *display,
+			   const struct intel_watermark_params *cursor)
+{
+	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+		      display_wm, cursor_wm);
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
+			      display_wm, display->max_wm);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
+			      cursor_wm, cursor->max_wm);
+		return false;
+	}
+
+	if (!(display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+			     int plane,
+			     int latency_ns,
+			     const struct intel_watermark_params *display,
+			     const struct intel_watermark_params *cursor,
+			     int *display_wm, int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	int hdisplay, htotal, pixel_size, clock;
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	hdisplay = crtc->mode.hdisplay;
+	htotal = crtc->mode.htotal;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return g4x_check_srwm(dev,
+			      *display_wm, *cursor_wm,
+			      display, cursor);
+}
+
+static bool vlv_compute_drain_latency(struct drm_device *dev,
+				     int plane,
+				     int *plane_prec_mult,
+				     int *plane_dl,
+				     int *cursor_prec_mult,
+				     int *cursor_dl)
+{
+	struct drm_crtc *crtc;
+	int clock, pixel_size;
+	int entries;
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	if (!intel_crtc_active(crtc))
+		return false;
+
+	clock = crtc->mode.clock;	/* VESA DOT Clock */
+	pixel_size = crtc->fb->bits_per_pixel / 8;	/* BPP */
+
+	entries = (clock / 1000) * pixel_size;
+	*plane_prec_mult = (entries > 256) ?
+		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+	*plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
+						     pixel_size);
+
+	entries = (clock / 1000) * 4;	/* BPP is always 4 for cursor */
+	*cursor_prec_mult = (entries > 256) ?
+		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
+	*cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+
+	return true;
+}
+
+/*
+ * Update drain latency registers of memory arbiter
+ *
+ * Valleyview SoC has a new memory arbiter and needs drain latency registers
+ * to be programmed. Each plane has a drain latency multiplier and a drain
+ * latency value.
+ */
+
+static void vlv_update_drain_latency(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_prec, planea_dl, planeb_prec, planeb_dl;
+	int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
+	int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
+							either 16 or 32 */
+
+	/* For plane A, Cursor A */
+	if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
+				      &cursor_prec_mult, &cursora_dl)) {
+		cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+			DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+		planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+			DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+
+		I915_WRITE(VLV_DDL1, cursora_prec |
+				(cursora_dl << DDL_CURSORA_SHIFT) |
+				planea_prec | planea_dl);
+	}
+
+	/* For plane B, Cursor B */
+	if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
+				      &cursor_prec_mult, &cursorb_dl)) {
+		cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+			DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+		planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
+			DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+
+		I915_WRITE(VLV_DDL2, cursorb_prec |
+				(cursorb_dl << DDL_CURSORB_SHIFT) |
+				planeb_prec | planeb_dl);
+	}
+}
+
+#define single_plane_enabled(mask) is_power_of_2(mask)
+
+static void valleyview_update_wm(struct drm_device *dev)
+{
+	static const int sr_latency_ns = 12000;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+	int plane_sr, cursor_sr;
+	int ignore_plane_sr, ignore_cursor_sr;
+	unsigned int enabled = 0;
+
+	vlv_update_drain_latency(dev);
+
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &valleyview_wm_info, latency_ns,
+			    &valleyview_cursor_wm_info, latency_ns,
+			    &planea_wm, &cursora_wm))
+		enabled |= 1 << PIPE_A;
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &valleyview_wm_info, latency_ns,
+			    &valleyview_cursor_wm_info, latency_ns,
+			    &planeb_wm, &cursorb_wm))
+		enabled |= 1 << PIPE_B;
+
+	if (single_plane_enabled(enabled) &&
+	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+			     sr_latency_ns,
+			     &valleyview_wm_info,
+			     &valleyview_cursor_wm_info,
+			     &plane_sr, &ignore_cursor_sr) &&
+	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+			     2*sr_latency_ns,
+			     &valleyview_wm_info,
+			     &valleyview_cursor_wm_info,
+			     &ignore_plane_sr, &cursor_sr)) {
+		I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+	} else {
+		I915_WRITE(FW_BLC_SELF_VLV,
+			   I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+		plane_sr = cursor_sr = 0;
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+		      planea_wm, cursora_wm,
+		      planeb_wm, cursorb_wm,
+		      plane_sr, cursor_sr);
+
+	I915_WRITE(DSPFW1,
+		   (plane_sr << DSPFW_SR_SHIFT) |
+		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
+		   planea_wm);
+	I915_WRITE(DSPFW2,
+		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+		   (cursora_wm << DSPFW_CURSORA_SHIFT));
+	I915_WRITE(DSPFW3,
+		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+	static const int sr_latency_ns = 12000;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+	int plane_sr, cursor_sr;
+	unsigned int enabled = 0;
+
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &g4x_wm_info, latency_ns,
+			    &g4x_cursor_wm_info, latency_ns,
+			    &planea_wm, &cursora_wm))
+		enabled |= 1 << PIPE_A;
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &g4x_wm_info, latency_ns,
+			    &g4x_cursor_wm_info, latency_ns,
+			    &planeb_wm, &cursorb_wm))
+		enabled |= 1 << PIPE_B;
+
+	if (single_plane_enabled(enabled) &&
+	    g4x_compute_srwm(dev, ffs(enabled) - 1,
+			     sr_latency_ns,
+			     &g4x_wm_info,
+			     &g4x_cursor_wm_info,
+			     &plane_sr, &cursor_sr)) {
+		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+	} else {
+		I915_WRITE(FW_BLC_SELF,
+			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+		plane_sr = cursor_sr = 0;
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+		      planea_wm, cursora_wm,
+		      planeb_wm, cursorb_wm,
+		      plane_sr, cursor_sr);
+
+	I915_WRITE(DSPFW1,
+		   (plane_sr << DSPFW_SR_SHIFT) |
+		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
+		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
+		   planea_wm);
+	I915_WRITE(DSPFW2,
+		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
+		   (cursora_wm << DSPFW_CURSORA_SHIFT));
+	/* HPLL off in SR has some issues on G4x... disable it */
+	I915_WRITE(DSPFW3,
+		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
+		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i965_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	int srwm = 1;
+	int cursor_sr = 16;
+
+	/* Calc sr entries for one plane configs */
+	crtc = single_enabled_crtc(dev);
+	if (crtc) {
+		/* self-refresh has much higher latency */
+		static const int sr_latency_ns = 12000;
+		int clock = crtc->mode.clock;
+		int htotal = crtc->mode.htotal;
+		int hdisplay = crtc->mode.hdisplay;
+		int pixel_size = crtc->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
+
+		line_time_us = ((htotal * 1000) / clock);
+
+		/* Use ns/us then divide to preserve precision */
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+		srwm = I965_FIFO_SIZE - entries;
+		if (srwm < 0)
+			srwm = 1;
+		srwm &= 0x1ff;
+		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+			      entries, srwm);
+
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * 64;
+		entries = DIV_ROUND_UP(entries,
+					  i965_cursor_wm_info.cacheline_size);
+		cursor_sr = i965_cursor_wm_info.fifo_size -
+			(entries + i965_cursor_wm_info.guard_size);
+
+		if (cursor_sr > i965_cursor_wm_info.max_wm)
+			cursor_sr = i965_cursor_wm_info.max_wm;
+
+		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+			      "cursor %d\n", srwm, cursor_sr);
+
+		if (IS_CRESTLINE(dev))
+			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+	} else {
+		/* Turn off self refresh if both pipes are enabled */
+		if (IS_CRESTLINE(dev))
+			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+				   & ~FW_BLC_SELF_EN);
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+		      srwm);
+
+	/* 965 has limitations... */
+	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+		   (8 << 16) | (8 << 8) | (8 << 0));
+	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+	/* update cursor SR watermark */
+	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+}
+
+static void i9xx_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	const struct intel_watermark_params *wm_info;
+	uint32_t fwater_lo;
+	uint32_t fwater_hi;
+	int cwm, srwm = 1;
+	int fifo_size;
+	int planea_wm, planeb_wm;
+	struct drm_crtc *crtc, *enabled = NULL;
+
+	if (IS_I945GM(dev))
+		wm_info = &i945_wm_info;
+	else if (!IS_GEN2(dev))
+		wm_info = &i915_wm_info;
+	else
+		wm_info = &i855_wm_info;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+	crtc = intel_get_crtc_for_plane(dev, 0);
+	if (intel_crtc_active(crtc)) {
+		int cpp = crtc->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
+		planea_wm = intel_calculate_wm(crtc->mode.clock,
+					       wm_info, fifo_size, cpp,
+					       latency_ns);
+		enabled = crtc;
+	} else
+		planea_wm = fifo_size - wm_info->guard_size;
+
+	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+	crtc = intel_get_crtc_for_plane(dev, 1);
+	if (intel_crtc_active(crtc)) {
+		int cpp = crtc->fb->bits_per_pixel / 8;
+		if (IS_GEN2(dev))
+			cpp = 4;
+
+		planeb_wm = intel_calculate_wm(crtc->mode.clock,
+					       wm_info, fifo_size, cpp,
+					       latency_ns);
+		if (enabled == NULL)
+			enabled = crtc;
+		else
+			enabled = NULL;
+	} else
+		planeb_wm = fifo_size - wm_info->guard_size;
+
+	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+
+	/*
+	 * Overlay gets an aggressive default since video jitter is bad.
+	 */
+	cwm = 2;
+
+	/* Play safe and disable self-refresh before adjusting watermarks. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+	else if (IS_I915GM(dev))
+		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
+	/* Calc sr entries for one plane configs */
+	if (HAS_FW_BLC(dev) && enabled) {
+		/* self-refresh has much higher latency */
+		static const int sr_latency_ns = 6000;
+		int clock = enabled->mode.clock;
+		int htotal = enabled->mode.htotal;
+		int hdisplay = enabled->mode.hdisplay;
+		int pixel_size = enabled->fb->bits_per_pixel / 8;
+		unsigned long line_time_us;
+		int entries;
+
+		line_time_us = (htotal * 1000) / clock;
+
+		/* Use ns/us then divide to preserve precision */
+		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+			pixel_size * hdisplay;
+		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+		srwm = wm_info->fifo_size - entries;
+		if (srwm < 0)
+			srwm = 1;
+
+		if (IS_I945G(dev) || IS_I945GM(dev))
+			I915_WRITE(FW_BLC_SELF,
+				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+		else if (IS_I915GM(dev))
+			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+	}
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+		      planea_wm, planeb_wm, cwm, srwm);
+
+	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+	fwater_hi = (cwm & 0x1f);
+
+	/* Set request length to 8 cachelines per fetch */
+	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
+	fwater_hi = fwater_hi | (1 << 8);
+
+	I915_WRITE(FW_BLC, fwater_lo);
+	I915_WRITE(FW_BLC2, fwater_hi);
+
+	if (HAS_FW_BLC(dev)) {
+		if (enabled) {
+			if (IS_I945G(dev) || IS_I945GM(dev))
+				I915_WRITE(FW_BLC_SELF,
+					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+			else if (IS_I915GM(dev))
+				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+			DRM_DEBUG_KMS("memory self refresh enabled\n");
+		} else
+			DRM_DEBUG_KMS("memory self refresh disabled\n");
+	}
+}
+
+static void i830_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
+	uint32_t fwater_lo;
+	int planea_wm;
+
+	crtc = single_enabled_crtc(dev);
+	if (crtc == NULL)
+		return;
+
+	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+				       dev_priv->display.get_fifo_size(dev, 0),
+				       4, latency_ns);
+	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+	fwater_lo |= (3<<8) | planea_wm;
+
+	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+
+	I915_WRITE(FW_BLC, fwater_lo);
+}
+
+#define ILK_LP0_PLANE_LATENCY		700
+#define ILK_LP0_CURSOR_LATENCY		1300
+
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+				int fbc_wm, int display_wm, int cursor_wm,
+				const struct intel_watermark_params *display,
+				const struct intel_watermark_params *cursor)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+	if (fbc_wm > SNB_FBC_MAX_SRWM) {
+		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+			      fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+		/* fbc has it's own way to disable FBC WM */
+		I915_WRITE(DISP_ARB_CTL,
+			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+		return false;
+	}
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+		return false;
+	}
+
+	if (!(fbc_wm || display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+				  int latency_ns,
+				  const struct intel_watermark_params *display,
+				  const struct intel_watermark_params *cursor,
+				  int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+	struct drm_crtc *crtc;
+	unsigned long line_time_us;
+	int hdisplay, htotal, pixel_size, clock;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*fbc_wm = *display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	hdisplay = crtc->mode.hdisplay;
+	htotal = crtc->mode.htotal;
+	clock = crtc->mode.clock;
+	pixel_size = crtc->fb->bits_per_pixel / 8;
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/*
+	 * Spec says:
+	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+	 */
+	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return ironlake_check_srwm(dev, level,
+				   *fbc_wm, *display_wm, *cursor_wm,
+				   display, cursor);
+}
+
+static void ironlake_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int fbc_wm, plane_wm, cursor_wm;
+	unsigned int enabled;
+
+	enabled = 0;
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &ironlake_display_wm_info,
+			    ILK_LP0_PLANE_LATENCY,
+			    &ironlake_cursor_wm_info,
+			    ILK_LP0_CURSOR_LATENCY,
+			    &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEA_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+			      " plane %d, " "cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_A;
+	}
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &ironlake_display_wm_info,
+			    ILK_LP0_PLANE_LATENCY,
+			    &ironlake_cursor_wm_info,
+			    ILK_LP0_CURSOR_LATENCY,
+			    &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEB_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_B;
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 */
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	if (!single_plane_enabled(enabled))
+		return;
+	enabled = ffs(enabled) - 1;
+
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, enabled,
+				   ILK_READ_WM1_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, enabled,
+				   ILK_READ_WM2_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/*
+	 * WM3 is unsupported on ILK, probably because we don't have latency
+	 * data for that power state
+	 */
+}
+
+static void sandybridge_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
+	u32 val;
+	int fbc_wm, plane_wm, cursor_wm;
+	unsigned int enabled;
+
+	enabled = 0;
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &sandybridge_display_wm_info, latency,
+			    &sandybridge_cursor_wm_info, latency,
+			    &plane_wm, &cursor_wm)) {
+		val = I915_READ(WM0_PIPEA_ILK);
+		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEA_ILK, val |
+			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+			      " plane %d, " "cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_A;
+	}
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &sandybridge_display_wm_info, latency,
+			    &sandybridge_cursor_wm_info, latency,
+			    &plane_wm, &cursor_wm)) {
+		val = I915_READ(WM0_PIPEB_ILK);
+		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEB_ILK, val |
+			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_B;
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 *
+	 * SNB support 3 levels of watermark.
+	 *
+	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+	 * and disabled in the descending order
+	 *
+	 */
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	if (!single_plane_enabled(enabled) ||
+	    dev_priv->sprite_scaling_enabled)
+		return;
+	enabled = ffs(enabled) - 1;
+
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, enabled,
+				   SNB_READ_WM1_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, enabled,
+				   SNB_READ_WM2_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM3 */
+	if (!ironlake_compute_srwm(dev, 3, enabled,
+				   SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM3_LP_ILK,
+		   WM3_LP_EN |
+		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+}
+
+static void ivybridge_update_wm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
+	u32 val;
+	int fbc_wm, plane_wm, cursor_wm;
+	int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
+	unsigned int enabled;
+
+	enabled = 0;
+	if (g4x_compute_wm0(dev, PIPE_A,
+			    &sandybridge_display_wm_info, latency,
+			    &sandybridge_cursor_wm_info, latency,
+			    &plane_wm, &cursor_wm)) {
+		val = I915_READ(WM0_PIPEA_ILK);
+		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEA_ILK, val |
+			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+			      " plane %d, " "cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_A;
+	}
+
+	if (g4x_compute_wm0(dev, PIPE_B,
+			    &sandybridge_display_wm_info, latency,
+			    &sandybridge_cursor_wm_info, latency,
+			    &plane_wm, &cursor_wm)) {
+		val = I915_READ(WM0_PIPEB_ILK);
+		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEB_ILK, val |
+			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_B;
+	}
+
+	if (g4x_compute_wm0(dev, PIPE_C,
+			    &sandybridge_display_wm_info, latency,
+			    &sandybridge_cursor_wm_info, latency,
+			    &plane_wm, &cursor_wm)) {
+		val = I915_READ(WM0_PIPEC_IVB);
+		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+		I915_WRITE(WM0_PIPEC_IVB, val |
+			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled |= 1 << PIPE_C;
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 *
+	 * SNB support 3 levels of watermark.
+	 *
+	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+	 * and disabled in the descending order
+	 *
+	 */
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	if (!single_plane_enabled(enabled) ||
+	    dev_priv->sprite_scaling_enabled)
+		return;
+	enabled = ffs(enabled) - 1;
+
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, enabled,
+				   SNB_READ_WM1_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, enabled,
+				   SNB_READ_WM2_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM3, note we have to correct the cursor latency */
+	if (!ironlake_compute_srwm(dev, 3, enabled,
+				   SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
+	    !ironlake_compute_srwm(dev, 3, enabled,
+				   2 * SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM3_LP_ILK,
+		   WM3_LP_EN |
+		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+}
+
+static void
+haswell_update_linetime_wm(struct drm_device *dev, int pipe,
+				 struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 temp;
+
+	temp = I915_READ(PIPE_WM_LINETIME(pipe));
+	temp &= ~PIPE_WM_LINETIME_MASK;
+
+	/* The WM are computed with base on how long it takes to fill a single
+	 * row at the given clock rate, multiplied by 8.
+	 * */
+	temp |= PIPE_WM_LINETIME_TIME(
+		((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
+
+	/* IPS watermarks are only used by pipe A, and are ignored by
+	 * pipes B and C.  They are calculated similarly to the common
+	 * linetime values, except that we are using CD clock frequency
+	 * in MHz instead of pixel rate for the division.
+	 *
+	 * This is a placeholder for the IPS watermark calculation code.
+	 */
+
+	I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
+}
+
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+			      uint32_t sprite_width, int pixel_size,
+			      const struct intel_watermark_params *display,
+			      int display_latency_ns, int *sprite_wm)
+{
+	struct drm_crtc *crtc;
+	int clock;
+	int entries, tlb_miss;
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	if (!intel_crtc_active(crtc)) {
+		*sprite_wm = display->guard_size;
+		return false;
+	}
+
+	clock = crtc->mode.clock;
+
+	/* Use the small buffer method to calculate the sprite watermark */
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size -
+		sprite_width * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*sprite_wm = entries + display->guard_size;
+	if (*sprite_wm > (int)display->max_wm)
+		*sprite_wm = display->max_wm;
+
+	return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+				uint32_t sprite_width, int pixel_size,
+				const struct intel_watermark_params *display,
+				int latency_ns, int *sprite_wm)
+{
+	struct drm_crtc *crtc;
+	unsigned long line_time_us;
+	int clock;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*sprite_wm = 0;
+		return false;
+	}
+
+	crtc = intel_get_crtc_for_plane(dev, plane);
+	clock = crtc->mode.clock;
+	if (!clock) {
+		*sprite_wm = 0;
+		return false;
+	}
+
+	line_time_us = (sprite_width * 1000) / clock;
+	if (!line_time_us) {
+		*sprite_wm = 0;
+		return false;
+	}
+
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = sprite_width * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*sprite_wm = entries + display->guard_size;
+
+	return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+					 uint32_t sprite_width, int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
+	u32 val;
+	int sprite_wm, reg;
+	int ret;
+
+	switch (pipe) {
+	case 0:
+		reg = WM0_PIPEA_ILK;
+		break;
+	case 1:
+		reg = WM0_PIPEB_ILK;
+		break;
+	case 2:
+		reg = WM0_PIPEC_IVB;
+		break;
+	default:
+		return; /* bad pipe */
+	}
+
+	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+					    &sandybridge_display_wm_info,
+					    latency, &sprite_wm);
+	if (!ret) {
+		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+			      pipe);
+		return;
+	}
+
+	val = I915_READ(reg);
+	val &= ~WM0_PIPE_SPRITE_MASK;
+	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+					      pixel_size,
+					      &sandybridge_display_srwm_info,
+					      SNB_READ_WM1_LATENCY() * 500,
+					      &sprite_wm);
+	if (!ret) {
+		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+			      pipe);
+		return;
+	}
+	I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+	/* Only IVB has two more LP watermarks for sprite */
+	if (!IS_IVYBRIDGE(dev))
+		return;
+
+	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+					      pixel_size,
+					      &sandybridge_display_srwm_info,
+					      SNB_READ_WM2_LATENCY() * 500,
+					      &sprite_wm);
+	if (!ret) {
+		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+			      pipe);
+		return;
+	}
+	I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+					      pixel_size,
+					      &sandybridge_display_srwm_info,
+					      SNB_READ_WM3_LATENCY() * 500,
+					      &sprite_wm);
+	if (!ret) {
+		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+			      pipe);
+		return;
+	}
+	I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ *   - normal (i.e. non-self-refresh)
+ *   - self-refresh (SR) mode
+ *   - lines are large relative to FIFO size (buffer can hold up to 2)
+ *   - lines are small relative to FIFO size (buffer can hold more than 2
+ *     lines), so need to account for TLB latency
+ *
+ *   The normal calculation is:
+ *     watermark = dotclock * bytes per pixel * latency
+ *   where latency is platform & configuration dependent (we assume pessimal
+ *   values here).
+ *
+ *   The SR calculation is:
+ *     watermark = (trunc(latency/line time)+1) * surface width *
+ *       bytes per pixel
+ *   where
+ *     line time = htotal / dotclock
+ *     surface width = hdisplay for normal plane and 64 for cursor
+ *   and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that.  And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+void intel_update_watermarks(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.update_wm)
+		dev_priv->display.update_wm(dev);
+}
+
+void intel_update_linetime_watermarks(struct drm_device *dev,
+		int pipe, struct drm_display_mode *mode)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.update_linetime_wm)
+		dev_priv->display.update_linetime_wm(dev, pipe, mode);
+}
+
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+				    uint32_t sprite_width, int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->display.update_sprite_wm)
+		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+						   pixel_size);
+}
+
+static struct drm_i915_gem_object *
+intel_alloc_context_page(struct drm_device *dev)
+{
+	struct drm_i915_gem_object *ctx;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ctx = i915_gem_alloc_object(dev, 4096);
+	if (!ctx) {
+		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+		return NULL;
+	}
+
+	ret = i915_gem_object_pin(ctx, 4096, true, false);
+	if (ret) {
+		DRM_ERROR("failed to pin power context: %d\n", ret);
+		goto err_unref;
+	}
+
+	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
+	if (ret) {
+		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+		goto err_unpin;
+	}
+
+	return ctx;
+
+err_unpin:
+	i915_gem_object_unpin(ctx);
+err_unref:
+	drm_gem_object_unreference(&ctx->base);
+	return NULL;
+}
+
+/**
+ * Lock protecting IPS related data structures
+ */
+DEFINE_SPINLOCK(mchdev_lock);
+
+/* Global for IPS driver to get at the current i915 device. Protected by
+ * mchdev_lock. */
+static struct drm_i915_private *i915_mch_dev;
+
+bool ironlake_set_drps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 rgvswctl;
+
+	assert_spin_locked(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
+	if (rgvswctl & MEMCTL_CMD_STS) {
+		DRM_DEBUG("gpu busy, RCS change rejected\n");
+		return false; /* still busy with another command */
+	}
+
+	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+	POSTING_READ16(MEMSWCTL);
+
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE16(MEMSWCTL, rgvswctl);
+
+	return true;
+}
+
+static void ironlake_enable_drps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 rgvmodectl = I915_READ(MEMMODECTL);
+	u8 fmax, fmin, fstart, vstart;
+
+	spin_lock_irq(&mchdev_lock);
+
+	/* Enable temp reporting */
+	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+
+	/* 100ms RC evaluation intervals */
+	I915_WRITE(RCUPEI, 100000);
+	I915_WRITE(RCDNEI, 100000);
+
+	/* Set max/min thresholds to 90ms and 80ms respectively */
+	I915_WRITE(RCBMAXAVG, 90000);
+	I915_WRITE(RCBMINAVG, 80000);
+
+	I915_WRITE(MEMIHYST, 1);
+
+	/* Set up min, max, and cur for interrupt handling */
+	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+		MEMMODE_FSTART_SHIFT;
+
+	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+		PXVFREQ_PX_SHIFT;
+
+	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
+	dev_priv->ips.fstart = fstart;
+
+	dev_priv->ips.max_delay = fstart;
+	dev_priv->ips.min_delay = fmin;
+	dev_priv->ips.cur_delay = fstart;
+
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+			 fmax, fmin, fstart);
+
+	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+	/*
+	 * Interrupts will be enabled in ironlake_irq_postinstall
+	 */
+
+	I915_WRITE(VIDSTART, vstart);
+	POSTING_READ(VIDSTART);
+
+	rgvmodectl |= MEMMODE_SWMODE_EN;
+	I915_WRITE(MEMMODECTL, rgvmodectl);
+
+	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+		DRM_ERROR("stuck trying to change perf mode\n");
+	mdelay(1);
+
+	ironlake_set_drps(dev, fstart);
+
+	dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
+		I915_READ(0x112e0);
+	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
+	dev_priv->ips.last_count2 = I915_READ(0x112f4);
+	getrawmonotonic(&dev_priv->ips.last_time2);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static void ironlake_disable_drps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u16 rgvswctl;
+
+	spin_lock_irq(&mchdev_lock);
+
+	rgvswctl = I915_READ16(MEMSWCTL);
+
+	/* Ack interrupts, disable EFC interrupt */
+	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+	I915_WRITE(DEIIR, DE_PCU_EVENT);
+	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+	/* Go back to the starting frequency */
+	ironlake_set_drps(dev, dev_priv->ips.fstart);
+	mdelay(1);
+	rgvswctl |= MEMCTL_CMD_STS;
+	I915_WRITE(MEMSWCTL, rgvswctl);
+	mdelay(1);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+/* There's a funny hw issue where the hw returns all 0 when reading from
+ * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
+ * ourselves, instead of doing a rmw cycle (which might result in us clearing
+ * all limits and the gpu stuck at whatever frequency it is at atm).
+ */
+static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
+{
+	u32 limits;
+
+	limits = 0;
+
+	if (*val >= dev_priv->rps.max_delay)
+		*val = dev_priv->rps.max_delay;
+	limits |= dev_priv->rps.max_delay << 24;
+
+	/* Only set the down limit when we've reached the lowest level to avoid
+	 * getting more interrupts, otherwise leave this clear. This prevents a
+	 * race in the hw when coming out of rc6: There's a tiny window where
+	 * the hw runs at the minimal clock before selecting the desired
+	 * frequency, if the down threshold expires in that window we will not
+	 * receive a down interrupt. */
+	if (*val <= dev_priv->rps.min_delay) {
+		*val = dev_priv->rps.min_delay;
+		limits |= dev_priv->rps.min_delay << 16;
+	}
+
+	return limits;
+}
+
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 limits = gen6_rps_limits(dev_priv, &val);
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+	WARN_ON(val > dev_priv->rps.max_delay);
+	WARN_ON(val < dev_priv->rps.min_delay);
+
+	if (val == dev_priv->rps.cur_delay)
+		return;
+
+	if (IS_HASWELL(dev))
+		I915_WRITE(GEN6_RPNSWREQ,
+			   HSW_FREQUENCY(val));
+	else
+		I915_WRITE(GEN6_RPNSWREQ,
+			   GEN6_FREQUENCY(val) |
+			   GEN6_OFFSET(0) |
+			   GEN6_AGGRESSIVE_TURBO);
+
+	/* Make sure we continue to get interrupts
+	 * until we hit the minimum or maximum frequencies.
+	 */
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+	POSTING_READ(GEN6_RPNSWREQ);
+
+	dev_priv->rps.cur_delay = val;
+
+	trace_intel_gpu_freq_change(val * 50);
+}
+
+static void gen6_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+	I915_WRITE(GEN6_PMIER, 0);
+	/* Complete PM interrupt masking here doesn't race with the rps work
+	 * item again unmasking PM interrupts because that is using a different
+	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
+	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+
+	spin_lock_irq(&dev_priv->rps.lock);
+	dev_priv->rps.pm_iir = 0;
+	spin_unlock_irq(&dev_priv->rps.lock);
+
+	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
+int intel_enable_rc6(const struct drm_device *dev)
+{
+	/* Respect the kernel parameter if it is set */
+	if (i915_enable_rc6 >= 0)
+		return i915_enable_rc6;
+
+	/* Disable RC6 on Ironlake */
+	if (INTEL_INFO(dev)->gen == 5)
+		return 0;
+
+	if (IS_HASWELL(dev)) {
+		DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+		return INTEL_RC6_ENABLE;
+	}
+
+	/* snb/ivb have more than one rc6 state. */
+	if (INTEL_INFO(dev)->gen == 6) {
+		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+		return INTEL_RC6_ENABLE;
+	}
+
+	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
+	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+}
+
+static void gen6_enable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
+	u32 rp_state_cap;
+	u32 gt_perf_status;
+	u32 rc6vids, pcu_mbox, rc6_mask = 0;
+	u32 gtfifodbg;
+	int rc6_mode;
+	int i, ret;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	/* Here begins a magic sequence of register writes to enable
+	 * auto-downclocking.
+	 *
+	 * Perhaps there might be some value in exposing these to
+	 * userspace...
+	 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+
+	/* Clear the DBG now so we don't confuse earlier errors */
+	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+		I915_WRITE(GTFIFODBG, gtfifodbg);
+	}
+
+	gen6_gt_force_wake_get(dev_priv);
+
+	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+	/* In units of 50MHz */
+	dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
+	dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+	dev_priv->rps.cur_delay = 0;
+
+	/* disable the counters and set deterministic thresholds */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for_each_ring(ring, dev_priv, i)
+		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+	/* Check if we are enabling RC6 */
+	rc6_mode = intel_enable_rc6(dev_priv->dev);
+	if (rc6_mode & INTEL_RC6_ENABLE)
+		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
+
+	/* We don't use those on Haswell */
+	if (!IS_HASWELL(dev)) {
+		if (rc6_mode & INTEL_RC6p_ENABLE)
+			rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+
+		if (rc6_mode & INTEL_RC6pp_ENABLE)
+			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+	}
+
+	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+			(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+			(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+			(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+
+	I915_WRITE(GEN6_RC_CONTROL,
+		   rc6_mask |
+		   GEN6_RC_CTL_EI_MODE(1) |
+		   GEN6_RC_CTL_HW_ENABLE);
+
+	if (IS_HASWELL(dev)) {
+		I915_WRITE(GEN6_RPNSWREQ,
+			   HSW_FREQUENCY(10));
+		I915_WRITE(GEN6_RC_VIDEO_FREQ,
+			   HSW_FREQUENCY(12));
+	} else {
+		I915_WRITE(GEN6_RPNSWREQ,
+			   GEN6_FREQUENCY(10) |
+			   GEN6_OFFSET(0) |
+			   GEN6_AGGRESSIVE_TURBO);
+		I915_WRITE(GEN6_RC_VIDEO_FREQ,
+			   GEN6_FREQUENCY(12));
+	}
+
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   dev_priv->rps.max_delay << 24 |
+		   dev_priv->rps.min_delay << 16);
+
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+	I915_WRITE(GEN6_RP_UP_EI, 66000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_AVG |
+		   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
+
+	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+	if (!ret) {
+		pcu_mbox = 0;
+		ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+		if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
+			DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
+					 (dev_priv->rps.max_delay & 0xff) * 50,
+					 (pcu_mbox & 0xff) * 50);
+			dev_priv->rps.hw_max = pcu_mbox & 0xff;
+		}
+	} else {
+		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
+	}
+
+	gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+
+	/* requires MSI enabled */
+	I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
+	spin_lock_irq(&dev_priv->rps.lock);
+	WARN_ON(dev_priv->rps.pm_iir != 0);
+	I915_WRITE(GEN6_PMIMR, 0);
+	spin_unlock_irq(&dev_priv->rps.lock);
+	/* enable all PM interrupts */
+	I915_WRITE(GEN6_PMINTRMSK, 0);
+
+	rc6vids = 0;
+	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+	if (IS_GEN6(dev) && ret) {
+		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+	} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+		rc6vids &= 0xffff00;
+		rc6vids |= GEN6_ENCODE_RC6_VID(450);
+		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+		if (ret)
+			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+	}
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
+static void gen6_update_ring_freq(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int min_freq = 15;
+	unsigned int gpu_freq;
+	unsigned int max_ia_freq, min_ring_freq;
+	int scaling_factor = 180;
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	max_ia_freq = cpufreq_quick_get_max(0);
+	/*
+	 * Default to measured freq if none found, PCU will ensure we don't go
+	 * over
+	 */
+	if (!max_ia_freq)
+		max_ia_freq = tsc_khz;
+
+	/* Convert from kHz to MHz */
+	max_ia_freq /= 1000;
+
+	min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
+	/* convert DDR frequency from units of 133.3MHz to bandwidth */
+	min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+
+	/*
+	 * For each potential GPU frequency, load a ring frequency we'd like
+	 * to use for memory access.  We do this by specifying the IA frequency
+	 * the PCU should use as a reference to determine the ring frequency.
+	 */
+	for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
+	     gpu_freq--) {
+		int diff = dev_priv->rps.max_delay - gpu_freq;
+		unsigned int ia_freq = 0, ring_freq = 0;
+
+		if (IS_HASWELL(dev)) {
+			ring_freq = (gpu_freq * 5 + 3) / 4;
+			ring_freq = max(min_ring_freq, ring_freq);
+			/* leave ia_freq as the default, chosen by cpufreq */
+		} else {
+			/* On older processors, there is no separate ring
+			 * clock domain, so in order to boost the bandwidth
+			 * of the ring, we need to upclock the CPU (ia_freq).
+			 *
+			 * For GPU frequencies less than 750MHz,
+			 * just use the lowest ring freq.
+			 */
+			if (gpu_freq < min_freq)
+				ia_freq = 800;
+			else
+				ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+		}
+
+		sandybridge_pcode_write(dev_priv,
+					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+					gpu_freq);
+	}
+}
+
+void ironlake_teardown_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->ips.renderctx) {
+		i915_gem_object_unpin(dev_priv->ips.renderctx);
+		drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+		dev_priv->ips.renderctx = NULL;
+	}
+
+	if (dev_priv->ips.pwrctx) {
+		i915_gem_object_unpin(dev_priv->ips.pwrctx);
+		drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+		dev_priv->ips.pwrctx = NULL;
+	}
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (I915_READ(PWRCTXA)) {
+		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+			 50);
+
+		I915_WRITE(PWRCTXA, 0);
+		POSTING_READ(PWRCTXA);
+
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+		POSTING_READ(RSTDBYCTL);
+	}
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->ips.renderctx == NULL)
+		dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+	if (!dev_priv->ips.renderctx)
+		return -ENOMEM;
+
+	if (dev_priv->ips.pwrctx == NULL)
+		dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+	if (!dev_priv->ips.pwrctx) {
+		ironlake_teardown_rc6(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void ironlake_enable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	bool was_interruptible;
+	int ret;
+
+	/* rc6 disabled by default due to repeated reports of hanging during
+	 * boot and resume.
+	 */
+	if (!intel_enable_rc6(dev))
+		return;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ret = ironlake_setup_rc6(dev);
+	if (ret)
+		return;
+
+	was_interruptible = dev_priv->mm.interruptible;
+	dev_priv->mm.interruptible = false;
+
+	/*
+	 * GPU can automatically power down the render unit if given a page
+	 * to save state.
+	 */
+	ret = intel_ring_begin(ring, 6);
+	if (ret) {
+		ironlake_teardown_rc6(dev);
+		dev_priv->mm.interruptible = was_interruptible;
+		return;
+	}
+
+	intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+			MI_MM_SPACE_GTT |
+			MI_SAVE_EXT_STATE_EN |
+			MI_RESTORE_EXT_STATE_EN |
+			MI_RESTORE_INHIBIT);
+	intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_advance(ring);
+
+	/*
+	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
+	 * does an implicit flush, combined with MI_FLUSH above, it should be
+	 * safe to assume that renderctx is valid
+	 */
+	ret = intel_ring_idle(ring);
+	dev_priv->mm.interruptible = was_interruptible;
+	if (ret) {
+		DRM_ERROR("failed to enable ironlake power savings\n");
+		ironlake_teardown_rc6(dev);
+		return;
+	}
+
+	I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+	unsigned long freq;
+	int div = (vidfreq & 0x3f0000) >> 16;
+	int post = (vidfreq & 0x3000) >> 12;
+	int pre = (vidfreq & 0x7);
+
+	if (!pre)
+		return 0;
+
+	freq = ((div * 133333) / ((1<<post) * pre));
+
+	return freq;
+}
+
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
+	{ 1, 1333, 301, 28664 },
+	{ 1, 1066, 294, 24460 },
+	{ 1, 800, 294, 25192 },
+	{ 0, 1333, 276, 27605 },
+	{ 0, 1066, 276, 27605 },
+	{ 0, 800, 231, 23784 },
+};
+
+static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	u64 total_count, diff, ret;
+	u32 count1, count2, count3, m = 0, c = 0;
+	unsigned long now = jiffies_to_msecs(jiffies), diff1;
+	int i;
+
+	assert_spin_locked(&mchdev_lock);
+
+	diff1 = now - dev_priv->ips.last_time1;
+
+	/* Prevent division-by-zero if we are asking too fast.
+	 * Also, we don't get interesting results if we are polling
+	 * faster than once in 10ms, so just return the saved value
+	 * in such cases.
+	 */
+	if (diff1 <= 10)
+		return dev_priv->ips.chipset_power;
+
+	count1 = I915_READ(DMIEC);
+	count2 = I915_READ(DDREC);
+	count3 = I915_READ(CSIEC);
+
+	total_count = count1 + count2 + count3;
+
+	/* FIXME: handle per-counter overflow */
+	if (total_count < dev_priv->ips.last_count1) {
+		diff = ~0UL - dev_priv->ips.last_count1;
+		diff += total_count;
+	} else {
+		diff = total_count - dev_priv->ips.last_count1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+		if (cparams[i].i == dev_priv->ips.c_m &&
+		    cparams[i].t == dev_priv->ips.r_t) {
+			m = cparams[i].m;
+			c = cparams[i].c;
+			break;
+		}
+	}
+
+	diff = div_u64(diff, diff1);
+	ret = ((m * diff) + c);
+	ret = div_u64(ret, 10);
+
+	dev_priv->ips.last_count1 = total_count;
+	dev_priv->ips.last_time1 = now;
+
+	dev_priv->ips.chipset_power = ret;
+
+	return ret;
+}
+
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long val;
+
+	if (dev_priv->info->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_chipset_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
+
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long m, x, b;
+	u32 tsfs;
+
+	tsfs = I915_READ(TSFS);
+
+	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
+	x = I915_READ8(TR1);
+
+	b = tsfs & TSFS_INTR_MASK;
+
+	return ((m * x) / 127) - b;
+}
+
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+{
+	static const struct v_table {
+		u16 vd; /* in .1 mil */
+		u16 vm; /* in .1 mil */
+	} v_table[] = {
+		{ 0, 0, },
+		{ 375, 0, },
+		{ 500, 0, },
+		{ 625, 0, },
+		{ 750, 0, },
+		{ 875, 0, },
+		{ 1000, 0, },
+		{ 1125, 0, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4250, 3125, },
+		{ 4375, 3250, },
+		{ 4500, 3375, },
+		{ 4625, 3500, },
+		{ 4750, 3625, },
+		{ 4875, 3750, },
+		{ 5000, 3875, },
+		{ 5125, 4000, },
+		{ 5250, 4125, },
+		{ 5375, 4250, },
+		{ 5500, 4375, },
+		{ 5625, 4500, },
+		{ 5750, 4625, },
+		{ 5875, 4750, },
+		{ 6000, 4875, },
+		{ 6125, 5000, },
+		{ 6250, 5125, },
+		{ 6375, 5250, },
+		{ 6500, 5375, },
+		{ 6625, 5500, },
+		{ 6750, 5625, },
+		{ 6875, 5750, },
+		{ 7000, 5875, },
+		{ 7125, 6000, },
+		{ 7250, 6125, },
+		{ 7375, 6250, },
+		{ 7500, 6375, },
+		{ 7625, 6500, },
+		{ 7750, 6625, },
+		{ 7875, 6750, },
+		{ 8000, 6875, },
+		{ 8125, 7000, },
+		{ 8250, 7125, },
+		{ 8375, 7250, },
+		{ 8500, 7375, },
+		{ 8625, 7500, },
+		{ 8750, 7625, },
+		{ 8875, 7750, },
+		{ 9000, 7875, },
+		{ 9125, 8000, },
+		{ 9250, 8125, },
+		{ 9375, 8250, },
+		{ 9500, 8375, },
+		{ 9625, 8500, },
+		{ 9750, 8625, },
+		{ 9875, 8750, },
+		{ 10000, 8875, },
+		{ 10125, 9000, },
+		{ 10250, 9125, },
+		{ 10375, 9250, },
+		{ 10500, 9375, },
+		{ 10625, 9500, },
+		{ 10750, 9625, },
+		{ 10875, 9750, },
+		{ 11000, 9875, },
+		{ 11125, 10000, },
+		{ 11250, 10125, },
+		{ 11375, 10250, },
+		{ 11500, 10375, },
+		{ 11625, 10500, },
+		{ 11750, 10625, },
+		{ 11875, 10750, },
+		{ 12000, 10875, },
+		{ 12125, 11000, },
+		{ 12250, 11125, },
+		{ 12375, 11250, },
+		{ 12500, 11375, },
+		{ 12625, 11500, },
+		{ 12750, 11625, },
+		{ 12875, 11750, },
+		{ 13000, 11875, },
+		{ 13125, 12000, },
+		{ 13250, 12125, },
+		{ 13375, 12250, },
+		{ 13500, 12375, },
+		{ 13625, 12500, },
+		{ 13750, 12625, },
+		{ 13875, 12750, },
+		{ 14000, 12875, },
+		{ 14125, 13000, },
+		{ 14250, 13125, },
+		{ 14375, 13250, },
+		{ 14500, 13375, },
+		{ 14625, 13500, },
+		{ 14750, 13625, },
+		{ 14875, 13750, },
+		{ 15000, 13875, },
+		{ 15125, 14000, },
+		{ 15250, 14125, },
+		{ 15375, 14250, },
+		{ 15500, 14375, },
+		{ 15625, 14500, },
+		{ 15750, 14625, },
+		{ 15875, 14750, },
+		{ 16000, 14875, },
+		{ 16125, 15000, },
+	};
+	if (dev_priv->info->is_mobile)
+		return v_table[pxvid].vm;
+	else
+		return v_table[pxvid].vd;
+}
+
+static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	struct timespec now, diff1;
+	u64 diff;
+	unsigned long diffms;
+	u32 count;
+
+	assert_spin_locked(&mchdev_lock);
+
+	getrawmonotonic(&now);
+	diff1 = timespec_sub(now, dev_priv->ips.last_time2);
+
+	/* Don't divide by 0 */
+	diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
+	if (!diffms)
+		return;
+
+	count = I915_READ(GFXEC);
+
+	if (count < dev_priv->ips.last_count2) {
+		diff = ~0UL - dev_priv->ips.last_count2;
+		diff += count;
+	} else {
+		diff = count - dev_priv->ips.last_count2;
+	}
+
+	dev_priv->ips.last_count2 = count;
+	dev_priv->ips.last_time2 = now;
+
+	/* More magic constants... */
+	diff = diff * 1181;
+	diff = div_u64(diff, diffms * 10);
+	dev_priv->ips.gfx_power = diff;
+}
+
+void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->info->gen != 5)
+		return;
+
+	spin_lock_irq(&mchdev_lock);
+
+	__i915_update_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+}
+
+static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long t, corr, state1, corr2, state2;
+	u32 pxvid, ext_v;
+
+	assert_spin_locked(&mchdev_lock);
+
+	pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
+	pxvid = (pxvid >> 24) & 0x7f;
+	ext_v = pvid_to_extvid(dev_priv, pxvid);
+
+	state1 = ext_v;
+
+	t = i915_mch_val(dev_priv);
+
+	/* Revel in the empirically derived constants */
+
+	/* Correction factor in 1/100000 units */
+	if (t > 80)
+		corr = ((t * 2349) + 135940);
+	else if (t >= 50)
+		corr = ((t * 964) + 29317);
+	else /* < 50 */
+		corr = ((t * 301) + 1004);
+
+	corr = corr * ((150142 * state1) / 10000 - 78642);
+	corr /= 100000;
+	corr2 = (corr * dev_priv->ips.corr);
+
+	state2 = (corr2 * state1) / 10000;
+	state2 /= 100; /* convert to mW */
+
+	__i915_update_gfx_val(dev_priv);
+
+	return dev_priv->ips.gfx_power + state2;
+}
+
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
+{
+	unsigned long val;
+
+	if (dev_priv->info->gen != 5)
+		return 0;
+
+	spin_lock_irq(&mchdev_lock);
+
+	val = __i915_gfx_val(dev_priv);
+
+	spin_unlock_irq(&mchdev_lock);
+
+	return val;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+	struct drm_i915_private *dev_priv;
+	unsigned long chipset_val, graphics_val, ret = 0;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	chipset_val = __i915_chipset_val(dev_priv);
+	graphics_val = __i915_gfx_val(dev_priv);
+
+	ret = chipset_val + graphics_val;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
+		dev_priv->ips.max_delay--;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
+		dev_priv->ips.max_delay++;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+	struct drm_i915_private *dev_priv;
+	struct intel_ring_buffer *ring;
+	bool ret = false;
+	int i;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev)
+		goto out_unlock;
+	dev_priv = i915_mch_dev;
+
+	for_each_ring(ring, dev_priv, i)
+		ret |= !list_empty(&ring->request_list);
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+	struct drm_i915_private *dev_priv;
+	bool ret = true;
+
+	spin_lock_irq(&mchdev_lock);
+	if (!i915_mch_dev) {
+		ret = false;
+		goto out_unlock;
+	}
+	dev_priv = i915_mch_dev;
+
+	dev_priv->ips.max_delay = dev_priv->ips.fstart;
+
+	if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+		ret = false;
+
+out_unlock:
+	spin_unlock_irq(&mchdev_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+	void (*link)(void);
+
+	link = symbol_get(ips_link_to_i915_driver);
+	if (link) {
+		link();
+		symbol_put(ips_link_to_i915_driver);
+	}
+}
+
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
+{
+	/* We only register the i915 ips part with intel-ips once everything is
+	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
+	spin_lock_irq(&mchdev_lock);
+	i915_mch_dev = dev_priv;
+	spin_unlock_irq(&mchdev_lock);
+
+	ips_ping_for_i915_load();
+}
+
+void intel_gpu_ips_teardown(void)
+{
+	spin_lock_irq(&mchdev_lock);
+	i915_mch_dev = NULL;
+	spin_unlock_irq(&mchdev_lock);
+}
+static void intel_init_emon(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 lcfuse;
+	u8 pxw[16];
+	int i;
+
+	/* Disable to program */
+	I915_WRITE(ECR, 0);
+	POSTING_READ(ECR);
+
+	/* Program energy weights for various events */
+	I915_WRITE(SDEW, 0x15040d00);
+	I915_WRITE(CSIEW0, 0x007f0000);
+	I915_WRITE(CSIEW1, 0x1e220004);
+	I915_WRITE(CSIEW2, 0x04000004);
+
+	for (i = 0; i < 5; i++)
+		I915_WRITE(PEW + (i * 4), 0);
+	for (i = 0; i < 3; i++)
+		I915_WRITE(DEW + (i * 4), 0);
+
+	/* Program P-state weights to account for frequency power adjustment */
+	for (i = 0; i < 16; i++) {
+		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+		unsigned long freq = intel_pxfreq(pxvidfreq);
+		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
+			PXVFREQ_PX_SHIFT;
+		unsigned long val;
+
+		val = vid * vid;
+		val *= (freq / 1000);
+		val *= 255;
+		val /= (127*127*900);
+		if (val > 0xff)
+			DRM_ERROR("bad pxval: %ld\n", val);
+		pxw[i] = val;
+	}
+	/* Render standby states get 0 weight */
+	pxw[14] = 0;
+	pxw[15] = 0;
+
+	for (i = 0; i < 4; i++) {
+		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
+			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
+		I915_WRITE(PXW + (i * 4), val);
+	}
+
+	/* Adjust magic regs to magic values (more experimental results) */
+	I915_WRITE(OGW0, 0);
+	I915_WRITE(OGW1, 0);
+	I915_WRITE(EG0, 0x00007f00);
+	I915_WRITE(EG1, 0x0000000e);
+	I915_WRITE(EG2, 0x000e0000);
+	I915_WRITE(EG3, 0x68000300);
+	I915_WRITE(EG4, 0x42000000);
+	I915_WRITE(EG5, 0x00140031);
+	I915_WRITE(EG6, 0);
+	I915_WRITE(EG7, 0);
+
+	for (i = 0; i < 8; i++)
+		I915_WRITE(PXWL + (i * 4), 0);
+
+	/* Enable PMON + select events */
+	I915_WRITE(ECR, 0x80000019);
+
+	lcfuse = I915_READ(LCFUSE02);
+
+	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+}
+
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_IRONLAKE_M(dev)) {
+		ironlake_disable_drps(dev);
+		ironlake_disable_rc6(dev);
+	} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+		cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+		mutex_lock(&dev_priv->rps.hw_lock);
+		gen6_disable_rps(dev);
+		mutex_unlock(&dev_priv->rps.hw_lock);
+	}
+}
+
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, struct drm_i915_private,
+			     rps.delayed_resume_work.work);
+	struct drm_device *dev = dev_priv->dev;
+
+	mutex_lock(&dev_priv->rps.hw_lock);
+	gen6_enable_rps(dev);
+	gen6_update_ring_freq(dev);
+	mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_IRONLAKE_M(dev)) {
+		ironlake_enable_drps(dev);
+		ironlake_enable_rc6(dev);
+		intel_init_emon(dev);
+	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+		/*
+		 * PCU communication is slow and this doesn't need to be
+		 * done at any specific time, so do this out of our fast path
+		 * to make resume and init faster.
+		 */
+		schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+				      round_jiffies_up_relative(HZ));
+	}
+}
+
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
+static void ironlake_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+	/* Required for FBC */
+	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
+
+	I915_WRITE(PCH_3DCGDIS0,
+		   MARIUNIT_CLOCK_GATE_DISABLE |
+		   SVSMUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(PCH_3DCGDIS1,
+		   VFMUNIT_CLOCK_GATE_DISABLE);
+
+	/*
+	 * According to the spec the following bits should be set in
+	 * order to enable memory self-refresh
+	 * The bit 22/21 of 0x42004
+	 * The bit 5 of 0x42020
+	 * The bit 15 of 0x45000
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
+		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
+	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
+	I915_WRITE(DISP_ARB_CTL,
+		   (I915_READ(DISP_ARB_CTL) |
+		    DISP_FBC_WM_DIS));
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	/*
+	 * Based on the document from hardware guys the following bits
+	 * should be set unconditionally in order to enable FBC.
+	 * The bit 22 of 0x42000
+	 * The bit 22 of 0x42004
+	 * The bit 7,8,9 of 0x42020.
+	 */
+	if (IS_IRONLAKE_M(dev)) {
+		I915_WRITE(ILK_DISPLAY_CHICKEN1,
+			   I915_READ(ILK_DISPLAY_CHICKEN1) |
+			   ILK_FBCQ_DIS);
+		I915_WRITE(ILK_DISPLAY_CHICKEN2,
+			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+			   ILK_DPARB_GATE);
+	}
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
+	I915_WRITE(_3D_CHICKEN2,
+		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+		   _3D_CHICKEN2_WM_READ_PIPELINED);
+
+	/* WaDisableRenderCachePipelinedFlush */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+	ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	uint32_t val;
+
+	/*
+	 * On Ibex Peak and Cougar Point, we need to disable clock
+	 * gating for the panel power sequencer or it will fail to
+	 * start up when no ports are active.
+	 */
+	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+		   DPLS_EDP_PPS_FIX_DIS);
+	/* The below fixes the weird display corruption, a few pixels shifted
+	 * downward, on (only) LVDS of some HP laptops with IVY.
+	 */
+	for_each_pipe(pipe) {
+		val = I915_READ(TRANS_CHICKEN2(pipe));
+		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		if (dev_priv->fdi_rx_polarity_inverted)
+			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
+		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
+		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
+		I915_WRITE(TRANS_CHICKEN2(pipe), val);
+	}
+	/* WADP0ClockGatingDisable */
+	for_each_pipe(pipe) {
+		I915_WRITE(TRANS_CHICKEN1(pipe),
+			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+	}
+}
+
+static void gen6_check_mch_setup(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = I915_READ(MCH_SSKPD);
+	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
+		DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
+		DRM_INFO("This can cause pipe underruns and display issues.\n");
+		DRM_INFO("Please upgrade your BIOS to fix this.\n");
+	}
+}
+
+static void gen6_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_ELPIN_409_SELECT);
+
+	/* WaDisableHiZPlanesWhenMSAAEnabled */
+	I915_WRITE(_3D_CHICKEN,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
+
+	/* WaSetupGtModeTdRowDispatch */
+	if (IS_SNB_GT1(dev))
+		I915_WRITE(GEN6_GT_MODE,
+			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
+
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+	I915_WRITE(GEN6_UCGCTL1,
+		   I915_READ(GEN6_UCGCTL1) |
+		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+	 * gating disable must be set.  Failure to set it results in
+	 * flickering pixels due to Z write ordering failures after
+	 * some amount of runtime in the Mesa "fire" demo, and Unigine
+	 * Sanctuary and Tropics, and apparently anything else with
+	 * alpha test or pixel discard.
+	 *
+	 * According to the spec, bit 11 (RCCUNIT) must also be set,
+	 * but we didn't debug actual testcases to find it out.
+	 *
+	 * Also apply WaDisableVDSUnitClockGating and
+	 * WaDisableRCPBUnitClockGating.
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+	/* Bspec says we need to always set all mask bits. */
+	I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
+		   _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+
+	/*
+	 * According to the spec the following bits should be
+	 * set in order to enable memory self-refresh and fbc:
+	 * The bit21 and bit22 of 0x42000
+	 * The bit21 and bit22 of 0x42004
+	 * The bit5 and bit7 of 0x42020
+	 * The bit14 of 0x70180
+	 * The bit14 of 0x71180
+	 */
+	I915_WRITE(ILK_DISPLAY_CHICKEN1,
+		   I915_READ(ILK_DISPLAY_CHICKEN1) |
+		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+	I915_WRITE(ILK_DISPLAY_CHICKEN2,
+		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+	I915_WRITE(ILK_DSPCLK_GATE_D,
+		   I915_READ(ILK_DSPCLK_GATE_D) |
+		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
+
+	/* WaMbcDriverBootEnable */
+	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+	for_each_pipe(pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
+		intel_flush_display_plane(dev_priv, pipe);
+	}
+
+	/* The default value should be 0x200 according to docs, but the two
+	 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
+	I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+	cpt_init_clock_gating(dev);
+
+	gen6_check_mch_setup(dev);
+}
+
+static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+{
+	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+
+	reg &= ~GEN7_FF_SCHED_MASK;
+	reg |= GEN7_FF_TS_SCHED_HW;
+	reg |= GEN7_FF_VS_SCHED_HW;
+	reg |= GEN7_FF_DS_SCHED_HW;
+
+	/* WaVSRefCountFullforceMissDisable */
+	if (IS_HASWELL(dev_priv->dev))
+		reg &= ~GEN7_FF_VS_REF_CNT_FFME;
+
+	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
+}
+
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/*
+	 * TODO: this bit should only be enabled when really needed, then
+	 * disabled when not needed anymore in order to save power.
+	 */
+	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+		I915_WRITE(SOUTH_DSPCLK_GATE_D,
+			   I915_READ(SOUTH_DSPCLK_GATE_D) |
+			   PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
+static void haswell_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 */
+	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
+	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	I915_WRITE(GEN7_L3CNTLREG1,
+			GEN7_WA_FOR_GEN7_L3_CONTROL);
+	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+			GEN7_WA_L3_CHICKEN_MODE);
+
+	/* This is required by WaCatErrorRejectionIssue */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	for_each_pipe(pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
+		intel_flush_display_plane(dev_priv, pipe);
+	}
+
+	gen7_setup_fixed_func_scheduler(dev_priv);
+
+	/* WaDisable4x2SubspanOptimization */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	/* WaMbcDriverBootEnable */
+	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+	/* WaSwitchSolVfFArbitrationPriority */
+	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+
+	/* XXX: This is a workaround for early silicon revisions and should be
+	 * removed later.
+	 */
+	I915_WRITE(WM_DBG,
+			I915_READ(WM_DBG) |
+			WM_DBG_DISALLOW_MULTIPLE_LP |
+			WM_DBG_DISALLOW_SPRITE |
+			WM_DBG_DISALLOW_MAXFIFO);
+
+	lpt_init_clock_gating(dev);
+}
+
+static void ivybridge_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+	uint32_t snpcr;
+
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableEarlyCull */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix */
+	I915_WRITE(IVB_CHICKEN3,
+		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+	/* WaDisablePSDDualDispatchEnable */
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+	else
+		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	I915_WRITE(GEN7_L3CNTLREG1,
+			GEN7_WA_FOR_GEN7_L3_CONTROL);
+	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+		   GEN7_WA_L3_CHICKEN_MODE);
+	if (IS_IVB_GT1(dev))
+		I915_WRITE(GEN7_ROW_CHICKEN2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+	else
+		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+	 * gating disable must be set.  Failure to set it results in
+	 * flickering pixels due to Z write ordering failures after
+	 * some amount of runtime in the Mesa "fire" demo, and Unigine
+	 * Sanctuary and Tropics, and apparently anything else with
+	 * alpha test or pixel discard.
+	 *
+	 * According to the spec, bit 11 (RCCUNIT) must also be set,
+	 * but we didn't debug actual testcases to find it out.
+	 *
+	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+	/* This is required by WaCatErrorRejectionIssue */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	for_each_pipe(pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
+		intel_flush_display_plane(dev_priv, pipe);
+	}
+
+	/* WaMbcDriverBootEnable */
+	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+	gen7_setup_fixed_func_scheduler(dev_priv);
+
+	/* WaDisable4x2SubspanOptimization */
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+	snpcr &= ~GEN6_MBC_SNPCR_MASK;
+	snpcr |= GEN6_MBC_SNPCR_MED;
+	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+	if (!HAS_PCH_NOP(dev))
+		cpt_init_clock_gating(dev);
+
+	gen6_check_mch_setup(dev);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int pipe;
+
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+	/* WaDisableEarlyCull */
+	I915_WRITE(_3D_CHICKEN3,
+		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
+
+	/* WaDisableBackToBackFlipFix */
+	I915_WRITE(IVB_CHICKEN3,
+		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+	/* WaDisablePSDDualDispatchEnable */
+	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
+				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
+	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+	I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
+	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/* WaDisableDopClockGating */
+	I915_WRITE(GEN7_ROW_CHICKEN2,
+		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+	/* WaForceL3Serialization */
+	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+	/* This is required by WaCatErrorRejectionIssue */
+	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+	/* WaMbcDriverBootEnable */
+	I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+		   GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+	 * gating disable must be set.  Failure to set it results in
+	 * flickering pixels due to Z write ordering failures after
+	 * some amount of runtime in the Mesa "fire" demo, and Unigine
+	 * Sanctuary and Tropics, and apparently anything else with
+	 * alpha test or pixel discard.
+	 *
+	 * According to the spec, bit 11 (RCCUNIT) must also be set,
+	 * but we didn't debug actual testcases to find it out.
+	 *
+	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+	 * This implements the WaDisableRCZUnitClockGating workaround.
+	 *
+	 * Also apply WaDisableVDSUnitClockGating and
+	 * WaDisableRCPBUnitClockGating.
+	 */
+	I915_WRITE(GEN6_UCGCTL2,
+		   GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+		   GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+	I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
+	for_each_pipe(pipe) {
+		I915_WRITE(DSPCNTR(pipe),
+			   I915_READ(DSPCNTR(pipe)) |
+			   DISPPLANE_TRICKLE_FEED_DISABLE);
+		intel_flush_display_plane(dev_priv, pipe);
+	}
+
+	I915_WRITE(CACHE_MODE_1,
+		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+	/*
+	 * WaDisableVLVClockGating_VBIIssue
+	 * Disable clock gating on th GCFG unit to prevent a delay
+	 * in the reporting of vblank events.
+	 */
+	I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
+
+	/* Conservative clock gating settings for now */
+	I915_WRITE(0x9400, 0xffffffff);
+	I915_WRITE(0x9404, 0xffffffff);
+	I915_WRITE(0x9408, 0xffffffff);
+	I915_WRITE(0x940c, 0xffffffff);
+	I915_WRITE(0x9410, 0xffffffff);
+	I915_WRITE(0x9414, 0xffffffff);
+	I915_WRITE(0x9418, 0xffffffff);
+}
+
+static void g4x_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t dspclk_gate;
+
+	I915_WRITE(RENCLK_GATE_D1, 0);
+	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
+		   GS_UNIT_CLOCK_GATE_DISABLE |
+		   CL_UNIT_CLOCK_GATE_DISABLE);
+	I915_WRITE(RAMCLK_GATE_D, 0);
+	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
+		OVRUNIT_CLOCK_GATE_DISABLE |
+		OVCUNIT_CLOCK_GATE_DISABLE;
+	if (IS_GM45(dev))
+		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+	/* WaDisableRenderCachePipelinedFlush */
+	I915_WRITE(CACHE_MODE_0,
+		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+}
+
+static void crestline_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+	I915_WRITE(RENCLK_GATE_D2, 0);
+	I915_WRITE(DSPCLK_GATE_D, 0);
+	I915_WRITE(RAMCLK_GATE_D, 0);
+	I915_WRITE16(DEUC, 0);
+}
+
+static void broadwater_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
+		   I965_RCC_CLOCK_GATE_DISABLE |
+		   I965_RCPB_CLOCK_GATE_DISABLE |
+		   I965_ISC_CLOCK_GATE_DISABLE |
+		   I965_FBC_CLOCK_GATE_DISABLE);
+	I915_WRITE(RENCLK_GATE_D2, 0);
+}
+
+static void gen3_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 dstate = I915_READ(D_STATE);
+
+	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+		DSTATE_DOT_CLOCK_GATING;
+	I915_WRITE(D_STATE, dstate);
+
+	if (IS_PINEVIEW(dev))
+		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+
+	/* IIR "flip pending" means done if this bit is set */
+	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+}
+
+static void i85x_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+}
+
+static void i830_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+}
+
+void intel_init_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->display.init_clock_gating(dev);
+}
+
+/**
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+bool intel_using_power_well(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_HASWELL(dev))
+		return I915_READ(HSW_PWR_WELL_DRIVER) ==
+		       (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+	else
+		return true;
+}
+
+void intel_set_power_well(struct drm_device *dev, bool enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool is_enabled, enable_requested;
+	uint32_t tmp;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	if (!i915_disable_power_well && !enable)
+		return;
+
+	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
+	is_enabled = tmp & HSW_PWR_WELL_STATE;
+	enable_requested = tmp & HSW_PWR_WELL_ENABLE;
+
+	if (enable) {
+		if (!enable_requested)
+			I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
+
+		if (!is_enabled) {
+			DRM_DEBUG_KMS("Enabling power well\n");
+			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
+				      HSW_PWR_WELL_STATE), 20))
+				DRM_ERROR("Timeout enabling power well\n");
+		}
+	} else {
+		if (enable_requested) {
+			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+			DRM_DEBUG_KMS("Requesting to disable the power well\n");
+		}
+	}
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+void intel_init_power_well(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (!HAS_POWER_WELL(dev))
+		return;
+
+	/* For now, we need the power well to be always enabled. */
+	intel_set_power_well(dev, true);
+
+	/* We're taking over the BIOS, so clear any requests made by it since
+	 * the driver is in charge now. */
+	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+}
+
+/* Set up chip specific power management-related functions */
+void intel_init_pm(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (I915_HAS_FBC(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
+			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+			dev_priv->display.enable_fbc = ironlake_enable_fbc;
+			dev_priv->display.disable_fbc = ironlake_disable_fbc;
+		} else if (IS_GM45(dev)) {
+			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
+			dev_priv->display.enable_fbc = g4x_enable_fbc;
+			dev_priv->display.disable_fbc = g4x_disable_fbc;
+		} else if (IS_CRESTLINE(dev)) {
+			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
+			dev_priv->display.enable_fbc = i8xx_enable_fbc;
+			dev_priv->display.disable_fbc = i8xx_disable_fbc;
+		}
+		/* 855GM needs testing */
+	}
+
+	/* For cxsr */
+	if (IS_PINEVIEW(dev))
+		i915_pineview_get_mem_freq(dev);
+	else if (IS_GEN5(dev))
+		i915_ironlake_get_mem_freq(dev);
+
+	/* For FIFO watermark updates */
+	if (HAS_PCH_SPLIT(dev)) {
+		if (IS_GEN5(dev)) {
+			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+				dev_priv->display.update_wm = ironlake_update_wm;
+			else {
+				DRM_DEBUG_KMS("Failed to get proper latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+		} else if (IS_GEN6(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = sandybridge_update_wm;
+				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+		} else if (IS_IVYBRIDGE(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = ivybridge_update_wm;
+				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+		} else if (IS_HASWELL(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = sandybridge_update_wm;
+				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+				dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
+			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+		} else
+			dev_priv->display.update_wm = NULL;
+	} else if (IS_VALLEYVIEW(dev)) {
+		dev_priv->display.update_wm = valleyview_update_wm;
+		dev_priv->display.init_clock_gating =
+			valleyview_init_clock_gating;
+	} else if (IS_PINEVIEW(dev)) {
+		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+					    dev_priv->is_ddr3,
+					    dev_priv->fsb_freq,
+					    dev_priv->mem_freq)) {
+			DRM_INFO("failed to find known CxSR latency "
+				 "(found ddr%s fsb freq %d, mem freq %d), "
+				 "disabling CxSR\n",
+				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
+				 dev_priv->fsb_freq, dev_priv->mem_freq);
+			/* Disable CxSR and never update its watermark again */
+			pineview_disable_cxsr(dev);
+			dev_priv->display.update_wm = NULL;
+		} else
+			dev_priv->display.update_wm = pineview_update_wm;
+		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	} else if (IS_G4X(dev)) {
+		dev_priv->display.update_wm = g4x_update_wm;
+		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+	} else if (IS_GEN4(dev)) {
+		dev_priv->display.update_wm = i965_update_wm;
+		if (IS_CRESTLINE(dev))
+			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+		else if (IS_BROADWATER(dev))
+			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+	} else if (IS_GEN3(dev)) {
+		dev_priv->display.update_wm = i9xx_update_wm;
+		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+	} else if (IS_I865G(dev)) {
+		dev_priv->display.update_wm = i830_update_wm;
+		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+		dev_priv->display.get_fifo_size = i830_get_fifo_size;
+	} else if (IS_I85X(dev)) {
+		dev_priv->display.update_wm = i9xx_update_wm;
+		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
+		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+	} else {
+		dev_priv->display.update_wm = i830_update_wm;
+		dev_priv->display.init_clock_gating = i830_init_clock_gating;
+		if (IS_845G(dev))
+			dev_priv->display.get_fifo_size = i845_get_fifo_size;
+		else
+			dev_priv->display.get_fifo_size = i830_get_fifo_size;
+	}
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+	u32 gt_thread_status_mask;
+
+	if (IS_HASWELL(dev_priv->dev))
+		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+	else
+		gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+	/* w/a for a sporadic read returning 0 by waiting for the GT
+	 * thread to wake up.
+	 */
+	if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+		DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+	__gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+	/* something from same cacheline, but !FORCEWAKE_MT */
+	POSTING_READ(ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+	u32 forcewake_ack;
+
+	if (IS_HASWELL(dev_priv->dev))
+		forcewake_ack = FORCEWAKE_ACK_HSW;
+	else
+		forcewake_ack = FORCEWAKE_MT_ACK;
+
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+	/* something from same cacheline, but !FORCEWAKE_MT */
+	POSTING_READ(ECOBUS);
+
+	if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+	__gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+	if (dev_priv->forcewake_count++ == 0)
+		dev_priv->gt.force_wake_get(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+	u32 gtfifodbg;
+	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+	     "MMIO read or write has been dropped %x\n", gtfifodbg))
+		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+	/* something from same cacheline, but !FORCEWAKE */
+	POSTING_READ(ECOBUS);
+	gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+	/* something from same cacheline, but !FORCEWAKE_MT */
+	POSTING_READ(ECOBUS);
+	gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+	if (--dev_priv->forcewake_count == 0)
+		dev_priv->gt.force_wake_put(dev_priv);
+	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+	int ret = 0;
+
+	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+		int loop = 500;
+		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+			udelay(10);
+			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+		}
+		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+			++ret;
+		dev_priv->gt_fifo_count = fifo;
+	}
+	dev_priv->gt_fifo_count--;
+
+	return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+	/* something from same cacheline, but !FORCEWAKE_VLV */
+	POSTING_READ(FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+	I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+	if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
+			     FORCEWAKE_KERNEL),
+			    FORCEWAKE_ACK_TIMEOUT_MS))
+		DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+	__gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+	I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
+			   _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+	/* The below doubles as a POSTING_READ */
+	gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_gt_sanitize(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		vlv_force_wake_reset(dev_priv);
+	} else if (INTEL_INFO(dev)->gen >= 6) {
+		__gen6_gt_force_wake_reset(dev_priv);
+		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+			__gen6_gt_force_wake_mt_reset(dev_priv);
+	}
+
+	/* BIOS often leaves RC6 enabled, but disable it for hw init */
+	if (INTEL_INFO(dev)->gen >= 6)
+		intel_disable_gt_powersave(dev);
+}
+
+void intel_gt_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_VALLEYVIEW(dev)) {
+		dev_priv->gt.force_wake_get = vlv_force_wake_get;
+		dev_priv->gt.force_wake_put = vlv_force_wake_put;
+	} else if (IS_HASWELL(dev)) {
+		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+	} else if (IS_IVYBRIDGE(dev)) {
+		u32 ecobus;
+
+		/* IVB configs may use multi-threaded forcewake */
+
+		/* A small trick here - if the bios hasn't configured
+		 * MT forcewake, and if the device is in RC6, then
+		 * force_wake_mt_get will not wake the device and the
+		 * ECOBUS read will return zero. Which will be
+		 * (correctly) interpreted by the test below as MT
+		 * forcewake being disabled.
+		 */
+		mutex_lock(&dev->struct_mutex);
+		__gen6_gt_force_wake_mt_get(dev_priv);
+		ecobus = I915_READ_NOTRACE(ECOBUS);
+		__gen6_gt_force_wake_mt_put(dev_priv);
+		mutex_unlock(&dev->struct_mutex);
+
+		if (ecobus & FORCEWAKE_MT_ENABLE) {
+			dev_priv->gt.force_wake_get =
+						__gen6_gt_force_wake_mt_get;
+			dev_priv->gt.force_wake_put =
+						__gen6_gt_force_wake_mt_put;
+		} else {
+			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+			DRM_INFO("when using vblank-synced partial screen updates.\n");
+			dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+			dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+		}
+	} else if (IS_GEN6(dev)) {
+		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+	}
+}
+
+void intel_pm_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+			  intel_gen6_powersave_work);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, *val);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	*val = I915_READ(GEN6_PCODE_DATA);
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
+
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+		DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, val);
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+		return -ETIMEDOUT;
+	}
+
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+
+	return 0;
+}
+
+static int vlv_punit_rw(struct drm_i915_private *dev_priv, u8 opcode,
+			u8 addr, u32 *val)
+{
+	u32 cmd, devfn, port, be, bar;
+
+	bar = 0;
+	be = 0xf;
+	port = IOSF_PORT_PUNIT;
+	devfn = PCI_DEVFN(2, 0);
+
+	cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
+		(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
+		(bar << IOSF_BAR_SHIFT);
+
+	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+	if (I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) {
+		DRM_DEBUG_DRIVER("warning: pcode (%s) mailbox access failed\n",
+				 opcode == PUNIT_OPCODE_REG_READ ?
+				 "read" : "write");
+		return -EAGAIN;
+	}
+
+	I915_WRITE(VLV_IOSF_ADDR, addr);
+	if (opcode == PUNIT_OPCODE_REG_WRITE)
+		I915_WRITE(VLV_IOSF_DATA, *val);
+	I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
+
+	if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0,
+		     500)) {
+		DRM_ERROR("timeout waiting for pcode %s (%d) to finish\n",
+			  opcode == PUNIT_OPCODE_REG_READ ? "read" : "write",
+			  addr);
+		return -ETIMEDOUT;
+	}
+
+	if (opcode == PUNIT_OPCODE_REG_READ)
+		*val = I915_READ(VLV_IOSF_DATA);
+	I915_WRITE(VLV_IOSF_DATA, 0);
+
+	return 0;
+}
+
+int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val)
+{
+	return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_READ, addr, val);
+}
+
+int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
+{
+	return vlv_punit_rw(dev_priv, PUNIT_OPCODE_REG_WRITE, addr, &val);
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.c b/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.c
new file mode 100644
index 0000000..629527d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -0,0 +1,1935 @@
+/*
+ * Copyright © 2008-2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Zou Nan hai <nanhai.zou@intel.com>
+ *    Xiang Hai hao<haihao.xiang@intel.com>
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+	struct drm_i915_gem_object *obj;
+	volatile u32 *cpu_page;
+	u32 gtt_offset;
+};
+
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+	if (space < 0)
+		space += ring->size;
+	return space;
+}
+
+static int
+gen2_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32	invalidate_domains,
+		       u32	flush_domains)
+{
+	u32 cmd;
+	int ret;
+
+	cmd = MI_FLUSH;
+	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
+		cmd |= MI_NO_WRITE_FLUSH;
+
+	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+		cmd |= MI_READ_FLUSH;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen4_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32	invalidate_domains,
+		       u32	flush_domains)
+{
+	struct drm_device *dev = ring->dev;
+	u32 cmd;
+	int ret;
+
+	/*
+	 * read/write caches:
+	 *
+	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+	 * also flushed at 2d versus 3d pipeline switches.
+	 *
+	 * read-only caches:
+	 *
+	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+	 * MI_READ_FLUSH is set, and is always flushed on 965.
+	 *
+	 * I915_GEM_DOMAIN_COMMAND may not exist?
+	 *
+	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+	 * invalidated when MI_EXE_FLUSH is set.
+	 *
+	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+	 * invalidated with every MI_FLUSH.
+	 *
+	 * TLBs:
+	 *
+	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+	 * are flushed at any MI_FLUSH.
+	 */
+
+	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
+		cmd &= ~MI_NO_WRITE_FLUSH;
+	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+		cmd |= MI_EXE_FLUSH;
+
+	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+	    (IS_G4X(dev) || IS_GEN5(dev)))
+		cmd |= MI_INVALIDATE_ISP;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6.  From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ *     "1 of the following must also be set:
+ *      - Render Target Cache Flush Enable ([12] of DW1)
+ *      - Depth Cache Flush Enable ([0] of DW1)
+ *      - Stall at Pixel Scoreboard ([1] of DW1)
+ *      - Depth Stall ([13] of DW1)
+ *      - Post-Sync Operation ([13] of DW1)
+ *      - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it.  Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either.  Notify enable is IRQs, which aren't
+ * really our business.  That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+                         u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/* Force SNB workarounds for PIPE_CONTROL flushes */
+	ret = intel_emit_post_sync_nonzero_flush(ring);
+	if (ret)
+		return ret;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+		/*
+		 * Ensure that any following seqno writes only happen
+		 * when the render cache is indeed flushed.
+		 */
+		flags |= PIPE_CONTROL_CS_STALL;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
+	}
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen7_render_ring_flush(struct intel_ring_buffer *ring,
+		       u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/*
+	 * Ensure that any following seqno writes only happen when the render
+	 * cache is indeed flushed.
+	 *
+	 * Workaround: 4th PIPE_CONTROL command (except the ones with only
+	 * read-cache invalidate bits set) must have the CS_STALL bit set. We
+	 * don't try to be clever and just set it unconditionally.
+	 */
+	flags |= PIPE_CONTROL_CS_STALL;
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	if (flush_domains) {
+		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+	}
+	if (invalidate_domains) {
+		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+		/*
+		 * TLB invalidate requires a post-sync write.
+		 */
+		flags |= PIPE_CONTROL_QW_WRITE;
+		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
+		/* Workaround: we must issue a pipe_control with CS-stall bit
+		 * set before a pipe_control command that has the state cache
+		 * invalidate bit set. */
+		gen7_render_ring_cs_stall_wa(ring);
+	}
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static void ring_write_tail(struct intel_ring_buffer *ring,
+			    u32 value)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	I915_WRITE_TAIL(ring, value);
+}
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
+			RING_ACTHD(ring->mmio_base) : ACTHD;
+
+	return I915_READ(acthd_reg);
+}
+
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj = ring->obj;
+	int ret = 0;
+	u32 head;
+
+	if (HAS_FORCE_WAKE(dev))
+		gen6_gt_force_wake_get(dev_priv);
+
+	/* Stop the ring if it's running. */
+	I915_WRITE_CTL(ring, 0);
+	I915_WRITE_HEAD(ring, 0);
+	ring->write_tail(ring, 0);
+
+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+	/* G45 ring initialization fails to reset head to zero */
+	if (head != 0) {
+		DRM_DEBUG_KMS("%s head not reset to zero "
+			      "ctl %08x head %08x tail %08x start %08x\n",
+			      ring->name,
+			      I915_READ_CTL(ring),
+			      I915_READ_HEAD(ring),
+			      I915_READ_TAIL(ring),
+			      I915_READ_START(ring));
+
+		I915_WRITE_HEAD(ring, 0);
+
+		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+			DRM_ERROR("failed to set %s head to zero "
+				  "ctl %08x head %08x tail %08x start %08x\n",
+				  ring->name,
+				  I915_READ_CTL(ring),
+				  I915_READ_HEAD(ring),
+				  I915_READ_TAIL(ring),
+				  I915_READ_START(ring));
+		}
+	}
+
+	/* Initialize the ring. This must happen _after_ we've cleared the ring
+	 * registers with the above sequence (the readback of the HEAD registers
+	 * also enforces ordering), otherwise the hw might lose the new ring
+	 * register values. */
+	I915_WRITE_START(ring, obj->gtt_offset);
+	I915_WRITE_CTL(ring,
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+			| RING_VALID);
+
+	/* If the head is still not zero, the ring is dead */
+	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
+		     I915_READ_START(ring) == obj->gtt_offset &&
+		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+		DRM_ERROR("%s initialization failed "
+				"ctl %08x head %08x tail %08x start %08x\n",
+				ring->name,
+				I915_READ_CTL(ring),
+				I915_READ_HEAD(ring),
+				I915_READ_TAIL(ring),
+				I915_READ_START(ring));
+		ret = -EIO;
+		goto out;
+	}
+
+	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+		i915_kernel_lost_context(ring->dev);
+	else {
+		ring->head = I915_READ_HEAD(ring);
+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+		ring->space = ring_space(ring);
+		ring->last_retired_head = -1;
+	}
+
+out:
+	if (HAS_FORCE_WAKE(dev))
+		gen6_gt_force_wake_put(dev_priv);
+
+	return ret;
+}
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	if (ring->private)
+		return 0;
+
+	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+	if (!pc)
+		return -ENOMEM;
+
+	obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate seqno page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+	ret = i915_gem_object_pin(obj, 4096, true, false);
+	if (ret)
+		goto err_unref;
+
+	pc->gtt_offset = obj->gtt_offset;
+	pc->cpu_page =  kmap(sg_page(obj->pages->sgl));
+	if (pc->cpu_page == NULL)
+		goto err_unpin;
+
+	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
+			 ring->name, pc->gtt_offset);
+
+	pc->obj = obj;
+	ring->private = pc;
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+err:
+	kfree(pc);
+	return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	struct drm_i915_gem_object *obj;
+
+	obj = pc->obj;
+
+	kunmap(sg_page(obj->pages->sgl));
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	kfree(pc);
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = init_ring_common(ring);
+
+	if (INTEL_INFO(dev)->gen > 3)
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+
+	/* We need to disable the AsyncFlip performance optimisations in order
+	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+	 * programmed to '1' on all products.
+	 */
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+	/* Required for the hardware to program scanline values for waiting */
+	if (INTEL_INFO(dev)->gen == 6)
+		I915_WRITE(GFX_MODE,
+			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+	if (IS_GEN7(dev))
+		I915_WRITE(GFX_MODE_GEN7,
+			   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
+
+	if (INTEL_INFO(dev)->gen >= 5) {
+		ret = init_pipe_control(ring);
+		if (ret)
+			return ret;
+	}
+
+	if (IS_GEN6(dev)) {
+		/* From the Sandybridge PRM, volume 1 part 3, page 24:
+		 * "If this bit is set, STCunit will have LRA as replacement
+		 *  policy. [...] This bit must be reset.  LRA replacement
+		 *  policy is not supported."
+		 */
+		I915_WRITE(CACHE_MODE_0,
+			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+		/* This is not explicitly set for GEN6, so read the register.
+		 * see intel_ring_mi_set_context() for why we care.
+		 * TODO: consider explicitly setting the bit for GEN5
+		 */
+		ring->itlb_before_ctx_switch =
+			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 6)
+		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+
+	if (HAS_L3_GPU_CACHE(dev))
+		I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
+	return ret;
+}
+
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+
+	if (!ring->private)
+		return;
+
+	if (HAS_BROKEN_CS_TLB(dev))
+		drm_gem_object_unreference(to_gem_object(ring->private));
+
+	if (INTEL_INFO(dev)->gen >= 5)
+		cleanup_pipe_control(ring);
+
+	ring->private = NULL;
+}
+
+static void
+update_mboxes(struct intel_ring_buffer *ring,
+	      u32 mmio_offset)
+{
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit(ring, mmio_offset);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
+}
+
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ * 
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int
+gen6_add_request(struct intel_ring_buffer *ring)
+{
+	u32 mbox1_reg;
+	u32 mbox2_reg;
+	int ret;
+
+	ret = intel_ring_begin(ring, 10);
+	if (ret)
+		return ret;
+
+	mbox1_reg = ring->signal_mbox[0];
+	mbox2_reg = ring->signal_mbox[1];
+
+	update_mboxes(ring, mbox1_reg);
+	update_mboxes(ring, mbox2_reg);
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+					      u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->last_seqno < seqno;
+}
+
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+gen6_ring_sync(struct intel_ring_buffer *waiter,
+	       struct intel_ring_buffer *signaller,
+	       u32 seqno)
+{
+	int ret;
+	u32 dw1 = MI_SEMAPHORE_MBOX |
+		  MI_SEMAPHORE_COMPARE |
+		  MI_SEMAPHORE_REGISTER;
+
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	seqno -= 1;
+
+	WARN_ON(signaller->semaphore_register[waiter->id] ==
+		MI_SEMAPHORE_SYNC_INVALID);
+
+	ret = intel_ring_begin(waiter, 4);
+	if (ret)
+		return ret;
+
+	/* If seqno wrap happened, omit the wait with no-ops */
+	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+		intel_ring_emit(waiter,
+				dw1 |
+				signaller->semaphore_register[waiter->id]);
+		intel_ring_emit(waiter, seqno);
+		intel_ring_emit(waiter, 0);
+		intel_ring_emit(waiter, MI_NOOP);
+	} else {
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+		intel_ring_emit(waiter, MI_NOOP);
+	}
+	intel_ring_advance(waiter);
+
+	return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
+do {									\
+	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
+		 PIPE_CONTROL_DEPTH_STALL);				\
+	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
+	intel_ring_emit(ring__, 0);							\
+	intel_ring_emit(ring__, 0);							\
+} while (0)
+
+static int
+pc_render_add_request(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+	 * incoherent with writes to memory, i.e. completely fubar,
+	 * so we need to use PIPE_NOTIFY instead.
+	 *
+	 * However, we also need to workaround the qword write
+	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+	 * memory before requesting an interrupt.
+	 */
+	ret = intel_ring_begin(ring, 32);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, 0);
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128; /* write to separate cachelines */
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+			PIPE_CONTROL_NOTIFY);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+	/* Workaround to force correct ordering between irq and seqno writes on
+	 * ivb (and maybe also on snb) by reading from a CS register (like
+	 * ACTHD) before reading the status page. */
+	if (!lazy_coherency)
+		intel_ring_get_active_head(ring);
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
+ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void
+ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+}
+
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
+{
+	struct pipe_control *pc = ring->private;
+	return pc->cpu_page[0];
+}
+
+static void
+pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+	struct pipe_control *pc = ring->private;
+	pc->cpu_page[0] = seqno;
+}
+
+static bool
+gen5_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen5_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(IMR, dev_priv->irq_mask);
+		POSTING_READ(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+static bool
+i8xx_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		dev_priv->irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+i8xx_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		dev_priv->irq_mask |= ring->irq_enable_mask;
+		I915_WRITE16(IMR, dev_priv->irq_mask);
+		POSTING_READ16(IMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+}
+
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 mmio = 0;
+
+	/* The ring status page addresses are no longer next to the rest of
+	 * the ring registers as of gen7.
+	 */
+	if (IS_GEN7(dev)) {
+		switch (ring->id) {
+		case RCS:
+			mmio = RENDER_HWS_PGA_GEN7;
+			break;
+		case BCS:
+			mmio = BLT_HWS_PGA_GEN7;
+			break;
+		case VCS:
+			mmio = BSD_HWS_PGA_GEN7;
+			break;
+		}
+	} else if (IS_GEN6(ring->dev)) {
+		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+	} else {
+		mmio = RING_HWS_PGA(ring->mmio_base);
+	}
+
+	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+	POSTING_READ(mmio);
+
+	/* Flush the TLB for this page */
+	if (INTEL_INFO(dev)->gen >= 6) {
+		u32 reg = RING_INSTPM(ring->mmio_base);
+		I915_WRITE(reg,
+			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+					      INSTPM_SYNC_FLUSH));
+		if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+			     1000))
+			DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+				  ring->name);
+	}
+}
+
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+	       u32     invalidate_domains,
+	       u32     flush_domains)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
+}
+
+static int
+i9xx_add_request(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, ring->outstanding_lazy_request);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	/* It looks like we need to prevent the gt from suspending while waiting
+	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
+	 * blt/bsd rings on ivb. */
+	gen6_gt_force_wake_get(dev_priv);
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+						GEN6_RENDER_L3_PARITY_ERROR));
+		else
+			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, flags);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+			I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+		else
+			I915_WRITE_IMR(ring, ~0);
+		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
+		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+		POSTING_READ(GTIMR);
+	}
+	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+	gen6_gt_force_wake_put(dev_priv);
+}
+
+static int
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			 u32 offset, u32 length,
+			 unsigned flags)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			MI_BATCH_GTT |
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
+static int
+i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
+				u32 offset, u32 len,
+				unsigned flags)
+{
+	int ret;
+
+	if (flags & I915_DISPATCH_PINNED) {
+		ret = intel_ring_begin(ring, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_BATCH_BUFFER);
+		intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+		intel_ring_emit(ring, offset + len - 8);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
+	} else {
+		struct drm_i915_gem_object *obj = ring->private;
+		u32 cs_offset = obj->gtt_offset;
+
+		if (len > I830_BATCH_LIMIT)
+			return -ENOSPC;
+
+		ret = intel_ring_begin(ring, 9+3);
+		if (ret)
+			return ret;
+		/* Blit the batch (which has now all relocs applied) to the stable batch
+		 * scratch bo area (so that the CS never stumbles over its tlb
+		 * invalidation bug) ... */
+		intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+				XY_SRC_COPY_BLT_WRITE_ALPHA |
+				XY_SRC_COPY_BLT_WRITE_RGB);
+		intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+		intel_ring_emit(ring, cs_offset);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 4096);
+		intel_ring_emit(ring, offset);
+		intel_ring_emit(ring, MI_FLUSH);
+
+		/* ... and execute it. */
+		intel_ring_emit(ring, MI_BATCH_BUFFER);
+		intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+		intel_ring_emit(ring, cs_offset + len - 8);
+		intel_ring_advance(ring);
+	}
+
+	return 0;
+}
+
+static int
+i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			 u32 offset, u32 len,
+			 unsigned flags)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static void cleanup_status_page(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+
+	obj = ring->status_page.obj;
+	if (obj == NULL)
+		return;
+
+	kunmap(sg_page(obj->pages->sgl));
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+	ring->status_page.obj = NULL;
+}
+
+static int init_status_page(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	obj = i915_gem_alloc_object(dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+	ret = i915_gem_object_pin(obj, 4096, true, false);
+	if (ret != 0) {
+		goto err_unref;
+	}
+
+	ring->status_page.gfx_addr = obj->gtt_offset;
+	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+	if (ring->status_page.page_addr == NULL) {
+		ret = -ENOMEM;
+		goto err_unpin;
+	}
+	ring->status_page.obj = obj;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	intel_ring_setup_status_page(ring);
+	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+			ring->name, ring->status_page.gfx_addr);
+
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+err:
+	return ret;
+}
+
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	u32 addr;
+
+	if (!dev_priv->status_page_dmah) {
+		dev_priv->status_page_dmah =
+			drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+		if (!dev_priv->status_page_dmah)
+			return -ENOMEM;
+	}
+
+	addr = dev_priv->status_page_dmah->busaddr;
+	if (INTEL_INFO(ring->dev)->gen >= 4)
+		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+	I915_WRITE(HWS_PGA, addr);
+
+	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+	return 0;
+}
+
+static int intel_init_ring_buffer(struct drm_device *dev,
+				  struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	ring->size = 32 * PAGE_SIZE;
+	memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
+
+	init_waitqueue_head(&ring->irq_queue);
+
+	if (I915_NEED_GFX_HWS(dev)) {
+		ret = init_status_page(ring);
+		if (ret)
+			return ret;
+	} else {
+		BUG_ON(ring->id != RCS);
+		ret = init_phys_hws_pga(ring);
+		if (ret)
+			return ret;
+	}
+
+	obj = NULL;
+	if (!HAS_LLC(dev))
+		obj = i915_gem_object_create_stolen(dev, ring->size);
+	if (obj == NULL)
+		obj = i915_gem_alloc_object(dev, ring->size);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate ringbuffer\n");
+		ret = -ENOMEM;
+		goto err_hws;
+	}
+
+	ring->obj = obj;
+
+	ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+	if (ret)
+		goto err_unref;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
+	if (ret)
+		goto err_unpin;
+
+	ring->virtual_start =
+		ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+			   ring->size);
+	if (ring->virtual_start == NULL) {
+		DRM_ERROR("Failed to map ringbuffer.\n");
+		ret = -EINVAL;
+		goto err_unpin;
+	}
+
+	ret = ring->init(ring);
+	if (ret)
+		goto err_unmap;
+
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev) || IS_845G(ring->dev))
+		ring->effective_size -= 128;
+
+	return 0;
+
+err_unmap:
+	iounmap(ring->virtual_start);
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+	ring->obj = NULL;
+err_hws:
+	cleanup_status_page(ring);
+	return ret;
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	if (ring->obj == NULL)
+		return;
+
+	/* Disable the ring buffer. The ring must be idle at this point */
+	dev_priv = ring->dev->dev_private;
+	ret = intel_ring_idle(ring);
+	if (ret)
+		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+			  ring->name, ret);
+
+	I915_WRITE_CTL(ring, 0);
+
+	iounmap(ring->virtual_start);
+
+	i915_gem_object_unpin(ring->obj);
+	drm_gem_object_unreference(&ring->obj->base);
+	ring->obj = NULL;
+
+	if (ring->cleanup)
+		ring->cleanup(ring);
+
+	cleanup_status_page(ring);
+}
+
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+	int ret;
+
+	ret = i915_wait_seqno(ring, seqno);
+	if (!ret)
+		i915_gem_retire_requests_ring(ring);
+
+	return ret;
+}
+
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+	struct drm_i915_gem_request *request;
+	u32 seqno = 0;
+	int ret;
+
+	i915_gem_retire_requests_ring(ring);
+
+	if (ring->last_retired_head != -1) {
+		ring->head = ring->last_retired_head;
+		ring->last_retired_head = -1;
+		ring->space = ring_space(ring);
+		if (ring->space >= n)
+			return 0;
+	}
+
+	list_for_each_entry(request, &ring->request_list, list) {
+		int space;
+
+		if (request->tail == -1)
+			continue;
+
+		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
+		if (space < 0)
+			space += ring->size;
+		if (space >= n) {
+			seqno = request->seqno;
+			break;
+		}
+
+		/* Consume this request in case we need more space than
+		 * is available and so need to prevent a race between
+		 * updating last_retired_head and direct reads of
+		 * I915_RING_HEAD. It also provides a nice sanity check.
+		 */
+		request->tail = -1;
+	}
+
+	if (seqno == 0)
+		return -ENOSPC;
+
+	ret = intel_ring_wait_seqno(ring, seqno);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(ring->last_retired_head == -1))
+		return -ENOSPC;
+
+	ring->head = ring->last_retired_head;
+	ring->last_retired_head = -1;
+	ring->space = ring_space(ring);
+	if (WARN_ON(ring->space < n))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long end;
+	int ret;
+
+	ret = intel_ring_wait_request(ring, n);
+	if (ret != -ENOSPC)
+		return ret;
+
+	trace_i915_ring_wait_begin(ring);
+	/* With GEM the hangcheck timer should kick us out of the loop,
+	 * leaving it early runs the risk of corrupting GEM state (due
+	 * to running on almost untested codepaths). But on resume
+	 * timers don't work yet, so prevent a complete hang in that
+	 * case by choosing an insanely large timeout. */
+	end = jiffies + 60 * HZ;
+
+	do {
+		ring->head = I915_READ_HEAD(ring);
+		ring->space = ring_space(ring);
+		if (ring->space >= n) {
+			trace_i915_ring_wait_end(ring);
+			return 0;
+		}
+
+		if (dev->primary->master) {
+			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		}
+
+		msleep(1);
+
+		ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+					   dev_priv->mm.interruptible);
+		if (ret)
+			return ret;
+	} while (!time_after(jiffies, end));
+	trace_i915_ring_wait_end(ring);
+	return -EBUSY;
+}
+
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+	uint32_t __iomem *virt;
+	int rem = ring->size - ring->tail;
+
+	if (ring->space < rem) {
+		int ret = ring_wait_for_space(ring, rem);
+		if (ret)
+			return ret;
+	}
+
+	virt = ring->virtual_start + ring->tail;
+	rem /= 4;
+	while (rem--)
+		iowrite32(MI_NOOP, virt++);
+
+	ring->tail = 0;
+	ring->space = ring_space(ring);
+
+	return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+	u32 seqno;
+	int ret;
+
+	/* We need to add any requests required to flush the objects and ring */
+	if (ring->outstanding_lazy_request) {
+		ret = i915_add_request(ring, NULL, NULL);
+		if (ret)
+			return ret;
+	}
+
+	/* Wait upon the last request to be completed */
+	if (list_empty(&ring->request_list))
+		return 0;
+
+	seqno = list_entry(ring->request_list.prev,
+			   struct drm_i915_gem_request,
+			   list)->seqno;
+
+	return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+	if (ring->outstanding_lazy_request)
+		return 0;
+
+	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
+static int __intel_ring_prepare(struct intel_ring_buffer *ring,
+				int bytes)
+{
+	int ret;
+
+	if (unlikely(ring->tail + bytes > ring->effective_size)) {
+		ret = intel_wrap_ring_buffer(ring);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (unlikely(ring->space < bytes)) {
+		ret = ring_wait_for_space(ring, bytes);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	return 0;
+}
+
+int intel_ring_begin(struct intel_ring_buffer *ring,
+		     int num_dwords)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	int ret;
+
+	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+				   dev_priv->mm.interruptible);
+	if (ret)
+		return ret;
+
+	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
+	if (ret)
+		return ret;
+
+	/* Preallocate the olr before touching the ring */
+	ret = intel_ring_alloc_seqno(ring);
+	if (ret)
+		return ret;
+
+	ring->space -= num_dwords * sizeof(uint32_t);
+	return 0;
+}
+
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	BUG_ON(ring->outstanding_lazy_request);
+
+	if (INTEL_INFO(ring->dev)->gen >= 6) {
+		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
+		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+	}
+
+	ring->set_seqno(ring, seqno);
+}
+
+void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+	ring->tail &= ring->size - 1;
+	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+		return;
+	ring->write_tail(ring, ring->tail);
+}
+
+
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
+				     u32 value)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+
+       /* Every tail move must follow the sequence below */
+
+	/* Disable notification that the ring is IDLE. The GT
+	 * will then assume that it is busy and bring it out of rc6.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+	/* Clear the context id. Here be magic! */
+	I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+	/* Wait for the ring not to be idle, i.e. for it to wake up. */
+	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+		      GEN6_BSD_SLEEP_INDICATOR) == 0,
+		     50))
+		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+	/* Now that the ring is fully powered up, update the tail */
+	I915_WRITE_TAIL(ring, value);
+	POSTING_READ(RING_TAIL(ring->mmio_base));
+
+	/* Let the ring send IDLE messages to the GT again,
+	 * and so let it sleep to conserve power when idle.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+}
+
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+			   u32 invalidate, u32 flush)
+{
+	uint32_t cmd;
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	/*
+	 * Bspec vol 1c.5 - video engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
+	if (invalidate & I915_GEM_GPU_DOMAINS)
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
+}
+
+static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len,
+			      unsigned flags)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len,
+			      unsigned flags)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START |
+			(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+/* Blitter support (SandyBridge+) */
+
+static int blt_ring_flush(struct intel_ring_buffer *ring,
+			  u32 invalidate, u32 flush)
+{
+	uint32_t cmd;
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	cmd = MI_FLUSH_DW;
+	/*
+	 * Bspec vol 1c.3 - blitter engine command streamer:
+	 * "If ENABLED, all TLBs will be invalidated once the flush
+	 * operation is complete. This bit is only valid when the
+	 * Post-Sync Operation field is a value of 1h or 3h."
+	 */
+	if (invalidate & I915_GEM_DOMAIN_RENDER)
+		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+			MI_FLUSH_DW_OP_STOREDW;
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+	return 0;
+}
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->add_request = gen6_add_request;
+		ring->flush = gen7_render_ring_flush;
+		if (INTEL_INFO(dev)->gen == 6)
+			ring->flush = gen6_render_ring_flush;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->irq_enable_mask = GT_USER_INTERRUPT;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		ring->sync_to = gen6_ring_sync;
+		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
+		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
+		ring->signal_mbox[0] = GEN6_VRSYNC;
+		ring->signal_mbox[1] = GEN6_BRSYNC;
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->flush = gen4_render_ring_flush;
+		ring->get_seqno = pc_render_get_seqno;
+		ring->set_seqno = pc_render_set_seqno;
+		ring->irq_get = gen5_ring_get_irq;
+		ring->irq_put = gen5_ring_put_irq;
+		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
+	} else {
+		ring->add_request = i9xx_add_request;
+		if (INTEL_INFO(dev)->gen < 4)
+			ring->flush = gen2_render_ring_flush;
+		else
+			ring->flush = gen4_render_ring_flush;
+		ring->get_seqno = ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (IS_GEN2(dev)) {
+			ring->irq_get = i8xx_ring_get_irq;
+			ring->irq_put = i8xx_ring_put_irq;
+		} else {
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->irq_enable_mask = I915_USER_INTERRUPT;
+	}
+	ring->write_tail = ring_write_tail;
+	if (IS_HASWELL(dev))
+		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 6)
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	else if (INTEL_INFO(dev)->gen >= 4)
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	else if (IS_I830(dev) || IS_845G(dev))
+		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+	else
+		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+	ring->init = init_render_ring;
+	ring->cleanup = render_ring_cleanup;
+
+	/* Workaround batchbuffer to combat CS tlb bug. */
+	if (HAS_BROKEN_CS_TLB(dev)) {
+		struct drm_i915_gem_object *obj;
+		int ret;
+
+		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+		if (obj == NULL) {
+			DRM_ERROR("Failed to allocate batch bo\n");
+			return -ENOMEM;
+		}
+
+		ret = i915_gem_object_pin(obj, 0, true, false);
+		if (ret != 0) {
+			drm_gem_object_unreference(&obj->base);
+			DRM_ERROR("Failed to ping batch bo\n");
+			return ret;
+		}
+
+		ring->private = obj;
+	}
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+	int ret;
+
+	ring->name = "render ring";
+	ring->id = RCS;
+	ring->mmio_base = RENDER_RING_BASE;
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+		/* non-kms not supported on gen6+ */
+		return -ENODEV;
+	}
+
+	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
+	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
+	 * the special gen5 functions. */
+	ring->add_request = i9xx_add_request;
+	if (INTEL_INFO(dev)->gen < 4)
+		ring->flush = gen2_render_ring_flush;
+	else
+		ring->flush = gen4_render_ring_flush;
+	ring->get_seqno = ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+	if (IS_GEN2(dev)) {
+		ring->irq_get = i8xx_ring_get_irq;
+		ring->irq_put = i8xx_ring_put_irq;
+	} else {
+		ring->irq_get = i9xx_ring_get_irq;
+		ring->irq_put = i9xx_ring_put_irq;
+	}
+	ring->irq_enable_mask = I915_USER_INTERRUPT;
+	ring->write_tail = ring_write_tail;
+	if (INTEL_INFO(dev)->gen >= 4)
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	else if (IS_I830(dev) || IS_845G(dev))
+		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+	else
+		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+	ring->init = init_render_ring;
+	ring->cleanup = render_ring_cleanup;
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+
+	ring->size = size;
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev) || IS_845G(ring->dev))
+		ring->effective_size -= 128;
+
+	ring->virtual_start = ioremap_wc(start, size);
+	if (ring->virtual_start == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ret = init_phys_hws_pga(ring);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
+
+	ring->name = "bsd ring";
+	ring->id = VCS;
+
+	ring->write_tail = ring_write_tail;
+	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+		ring->mmio_base = GEN6_BSD_RING_BASE;
+		/* gen6 bsd needs a special wa for tail updates */
+		if (IS_GEN6(dev))
+			ring->write_tail = gen6_bsd_ring_write_tail;
+		ring->flush = gen6_ring_flush;
+		ring->add_request = gen6_add_request;
+		ring->get_seqno = gen6_ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
+		ring->irq_get = gen6_ring_get_irq;
+		ring->irq_put = gen6_ring_put_irq;
+		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		ring->sync_to = gen6_ring_sync;
+		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
+		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
+		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
+		ring->signal_mbox[0] = GEN6_RVSYNC;
+		ring->signal_mbox[1] = GEN6_BVSYNC;
+	} else {
+		ring->mmio_base = BSD_RING_BASE;
+		ring->flush = bsd_ring_flush;
+		ring->add_request = i9xx_add_request;
+		ring->get_seqno = ring_get_seqno;
+		ring->set_seqno = ring_set_seqno;
+		if (IS_GEN5(dev)) {
+			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+			ring->irq_get = gen5_ring_get_irq;
+			ring->irq_put = gen5_ring_put_irq;
+		} else {
+			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+			ring->irq_get = i9xx_ring_get_irq;
+			ring->irq_put = i9xx_ring_put_irq;
+		}
+		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+	}
+	ring->init = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+
+	ring->name = "blitter ring";
+	ring->id = BCS;
+
+	ring->mmio_base = BLT_RING_BASE;
+	ring->write_tail = ring_write_tail;
+	ring->flush = blt_ring_flush;
+	ring->add_request = gen6_add_request;
+	ring->get_seqno = gen6_ring_get_seqno;
+	ring->set_seqno = ring_set_seqno;
+	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
+	ring->irq_get = gen6_ring_get_irq;
+	ring->irq_put = gen6_ring_put_irq;
+	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+	ring->sync_to = gen6_ring_sync;
+	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
+	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
+	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
+	ring->signal_mbox[0] = GEN6_RBSYNC;
+	ring->signal_mbox[1] = GEN6_VBSYNC;
+	ring->init = init_ring_common;
+
+	return intel_init_ring_buffer(dev, ring);
+}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+	uint32_t flush_domains;
+	int ret;
+
+	flush_domains = 0;
+	if (ring->gpu_caches_dirty)
+		flush_domains = I915_GEM_GPU_DOMAINS;
+
+	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+	if (ret)
+		return ret;
+
+	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.h b/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.h
new file mode 100644
index 0000000..d66208c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -0,0 +1,251 @@
+#ifndef _INTEL_RINGBUFFER_H_
+#define _INTEL_RINGBUFFER_H_
+
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
+struct  intel_hw_status_page {
+	u32		*page_addr;
+	unsigned int	gfx_addr;
+	struct		drm_i915_gem_object *obj;
+};
+
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+
+#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
+
+struct  intel_ring_buffer {
+	const char	*name;
+	enum intel_ring_id {
+		RCS = 0x0,
+		VCS,
+		BCS,
+	} id;
+#define I915_NUM_RINGS 3
+	u32		mmio_base;
+	void		__iomem *virtual_start;
+	struct		drm_device *dev;
+	struct		drm_i915_gem_object *obj;
+
+	u32		head;
+	u32		tail;
+	int		space;
+	int		size;
+	int		effective_size;
+	struct intel_hw_status_page status_page;
+
+	/** We track the position of the requests in the ring buffer, and
+	 * when each is retired we increment last_retired_head as the GPU
+	 * must have finished processing the request and so we know we
+	 * can advance the ringbuffer up to that position.
+	 *
+	 * last_retired_head is set to -1 after the value is consumed so
+	 * we can detect new retirements.
+	 */
+	u32		last_retired_head;
+
+	u32		irq_refcount;		/* protected by dev_priv->irq_lock */
+	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
+	u32		trace_irq_seqno;
+	u32		sync_seqno[I915_NUM_RINGS-1];
+	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+	void		(*irq_put)(struct intel_ring_buffer *ring);
+
+	int		(*init)(struct intel_ring_buffer *ring);
+
+	void		(*write_tail)(struct intel_ring_buffer *ring,
+				      u32 value);
+	int __must_check (*flush)(struct intel_ring_buffer *ring,
+				  u32	invalidate_domains,
+				  u32	flush_domains);
+	int		(*add_request)(struct intel_ring_buffer *ring);
+	/* Some chipsets are not quite as coherent as advertised and need
+	 * an expensive kick to force a true read of the up-to-date seqno.
+	 * However, the up-to-date seqno is not always required and the last
+	 * seen value is good enough. Note that the seqno will always be
+	 * monotonic, even if not coherent.
+	 */
+	u32		(*get_seqno)(struct intel_ring_buffer *ring,
+				     bool lazy_coherency);
+	void		(*set_seqno)(struct intel_ring_buffer *ring,
+				     u32 seqno);
+	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+					       u32 offset, u32 length,
+					       unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+	void		(*cleanup)(struct intel_ring_buffer *ring);
+	int		(*sync_to)(struct intel_ring_buffer *ring,
+				   struct intel_ring_buffer *to,
+				   u32 seqno);
+
+	u32		semaphore_register[3]; /*our mbox written by others */
+	u32		signal_mbox[2]; /* mboxes this ring signals to */
+	/**
+	 * List of objects currently involved in rendering from the
+	 * ringbuffer.
+	 *
+	 * Includes buffers having the contents of their GPU caches
+	 * flushed, not necessarily primitives.  last_rendering_seqno
+	 * represents when the rendering involved will be completed.
+	 *
+	 * A reference is held on the buffer while on this list.
+	 */
+	struct list_head active_list;
+
+	/**
+	 * List of breadcrumbs associated with GPU requests currently
+	 * outstanding.
+	 */
+	struct list_head request_list;
+
+	/**
+	 * Do we have some not yet emitted requests outstanding?
+	 */
+	u32 outstanding_lazy_request;
+	bool gpu_caches_dirty;
+
+	wait_queue_head_t irq_queue;
+
+	/**
+	 * Do an explicit TLB flush before MI_SET_CONTEXT
+	 */
+	bool itlb_before_ctx_switch;
+	struct i915_hw_context *default_context;
+	struct drm_i915_gem_object *last_context_obj;
+
+	void *private;
+};
+
+static inline bool
+intel_ring_initialized(struct intel_ring_buffer *ring)
+{
+	return ring->obj != NULL;
+}
+
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+	return 1 << ring->id;
+}
+
+static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+		      struct intel_ring_buffer *other)
+{
+	int idx;
+
+	/*
+	 * cs -> 0 = vcs, 1 = bcs
+	 * vcs -> 0 = bcs, 1 = cs,
+	 * bcs -> 0 = cs, 1 = vcs.
+	 */
+
+	idx = (other - ring) - 1;
+	if (idx < 0)
+		idx += I915_NUM_RINGS;
+
+	return idx;
+}
+
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+		       int reg)
+{
+	/* Ensure that the compiler doesn't optimize away the load. */
+	barrier();
+	return ring->status_page.page_addr[reg];
+}
+
+static inline void
+intel_write_status_page(struct intel_ring_buffer *ring,
+			int reg, u32 value)
+{
+	ring->status_page.page_addr[reg] = value;
+}
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define I915_GEM_HWS_INDEX		0x20
+#define I915_GEM_HWS_SCRATCH_INDEX	0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+				   u32 data)
+{
+	iowrite32(data, ring->virtual_start + ring->tail);
+	ring->tail += 4;
+}
+void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
+void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
+
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+	return ring->tail;
+}
+
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+	BUG_ON(ring->outstanding_lazy_request == 0);
+	return ring->outstanding_lazy_request;
+}
+
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+		ring->trace_irq_seqno = seqno;
+}
+
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
+#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_sdvo.c b/linux-imx/drivers/gpu/drm/i915/intel_sdvo.c
new file mode 100644
index 0000000..d4ea6c2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_sdvo.c
@@ -0,0 +1,2871 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_sdvo_regs.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+			SDVO_TV_MASK)
+
+#define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
+	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
+	"PAL_H"    , "PAL_I"   , "PAL_M"   ,
+	"PAL_N"    , "PAL_NC"  , "PAL_60"  ,
+	"SECAM_B"  , "SECAM_D" , "SECAM_G" ,
+	"SECAM_K"  , "SECAM_K1", "SECAM_L" ,
+	"SECAM_60"
+};
+
+#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct intel_sdvo {
+	struct intel_encoder base;
+
+	struct i2c_adapter *i2c;
+	u8 slave_addr;
+
+	struct i2c_adapter ddc;
+
+	/* Register for the SDVO device: SDVOB or SDVOC */
+	uint32_t sdvo_reg;
+
+	/* Active outputs controlled by this SDVO output */
+	uint16_t controlled_output;
+
+	/*
+	 * Capabilities of the SDVO device returned by
+	 * i830_sdvo_get_capabilities()
+	 */
+	struct intel_sdvo_caps caps;
+
+	/* Pixel clock limitations reported by the SDVO device, in kHz */
+	int pixel_clock_min, pixel_clock_max;
+
+	/*
+	* For multiple function SDVO device,
+	* this is for current attached outputs.
+	*/
+	uint16_t attached_output;
+
+	/*
+	 * Hotplug activation bits for this device
+	 */
+	uint16_t hotplug_active;
+
+	/**
+	 * This is used to select the color range of RBG outputs in HDMI mode.
+	 * It is only valid when using TMDS encoding and 8 bit per color mode.
+	 */
+	uint32_t color_range;
+	bool color_range_auto;
+
+	/**
+	 * This is set if we're going to treat the device as TV-out.
+	 *
+	 * While we have these nice friendly flags for output types that ought
+	 * to decide this for us, the S-Video output on our HDMI+S-Video card
+	 * shows up as RGB1 (VGA).
+	 */
+	bool is_tv;
+
+	/* On different gens SDVOB is at different places. */
+	bool is_sdvob;
+
+	/* This is for current tv format name */
+	int tv_format_index;
+
+	/**
+	 * This is set if we treat the device as HDMI, instead of DVI.
+	 */
+	bool is_hdmi;
+	bool has_hdmi_monitor;
+	bool has_hdmi_audio;
+	bool rgb_quant_range_selectable;
+
+	/**
+	 * This is set if we detect output of sdvo device as LVDS and
+	 * have a valid fixed mode to use with the panel.
+	 */
+	bool is_lvds;
+
+	/**
+	 * This is sdvo fixed pannel mode pointer
+	 */
+	struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+	/* DDC bus used by this SDVO encoder */
+	uint8_t ddc_bus;
+
+	/*
+	 * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+	 */
+	uint8_t dtd_sdvo_flags;
+};
+
+struct intel_sdvo_connector {
+	struct intel_connector base;
+
+	/* Mark the type of connector */
+	uint16_t output_flag;
+
+	enum hdmi_force_audio force_audio;
+
+	/* This contains all current supported TV format */
+	u8 tv_format_supported[TV_FORMAT_NUM];
+	int   format_supported_num;
+	struct drm_property *tv_format;
+
+	/* add the property for the SDVO-TV */
+	struct drm_property *left;
+	struct drm_property *right;
+	struct drm_property *top;
+	struct drm_property *bottom;
+	struct drm_property *hpos;
+	struct drm_property *vpos;
+	struct drm_property *contrast;
+	struct drm_property *saturation;
+	struct drm_property *hue;
+	struct drm_property *sharpness;
+	struct drm_property *flicker_filter;
+	struct drm_property *flicker_filter_adaptive;
+	struct drm_property *flicker_filter_2d;
+	struct drm_property *tv_chroma_filter;
+	struct drm_property *tv_luma_filter;
+	struct drm_property *dot_crawl;
+
+	/* add the property for the SDVO-TV/LVDS */
+	struct drm_property *brightness;
+
+	/* Add variable to record current setting for the above property */
+	u32	left_margin, right_margin, top_margin, bottom_margin;
+
+	/* this is to get the range of margin.*/
+	u32	max_hscan,  max_vscan;
+	u32	max_hpos, cur_hpos;
+	u32	max_vpos, cur_vpos;
+	u32	cur_brightness, max_brightness;
+	u32	cur_contrast,	max_contrast;
+	u32	cur_saturation, max_saturation;
+	u32	cur_hue,	max_hue;
+	u32	cur_sharpness,	max_sharpness;
+	u32	cur_flicker_filter,		max_flicker_filter;
+	u32	cur_flicker_filter_adaptive,	max_flicker_filter_adaptive;
+	u32	cur_flicker_filter_2d,		max_flicker_filter_2d;
+	u32	cur_tv_chroma_filter,	max_tv_chroma_filter;
+	u32	cur_tv_luma_filter,	max_tv_luma_filter;
+	u32	cur_dot_crawl,	max_dot_crawl;
+};
+
+static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_sdvo, base.base);
+}
+
+static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_sdvo, base);
+}
+
+static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
+{
+	return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
+static bool
+intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+			      struct intel_sdvo_connector *intel_sdvo_connector,
+			      int type);
+static bool
+intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+				   struct intel_sdvo_connector *intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 bval = val, cval = val;
+	int i;
+
+	if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
+		I915_WRITE(intel_sdvo->sdvo_reg, val);
+		I915_READ(intel_sdvo->sdvo_reg);
+		return;
+	}
+
+	if (intel_sdvo->sdvo_reg == GEN3_SDVOB)
+		cval = I915_READ(GEN3_SDVOC);
+	else
+		bval = I915_READ(GEN3_SDVOB);
+
+	/*
+	 * Write the registers twice for luck. Sometimes,
+	 * writing them only once doesn't appear to 'stick'.
+	 * The BIOS does this too. Yay, magic
+	 */
+	for (i = 0; i < 2; i++)
+	{
+		I915_WRITE(GEN3_SDVOB, bval);
+		I915_READ(GEN3_SDVOB);
+		I915_WRITE(GEN3_SDVOC, cval);
+		I915_READ(GEN3_SDVOC);
+	}
+}
+
+static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
+{
+	struct i2c_msg msgs[] = {
+		{
+			.addr = intel_sdvo->slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = &addr,
+		},
+		{
+			.addr = intel_sdvo->slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = ch,
+		}
+	};
+	int ret;
+
+	if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
+		return true;
+
+	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+	return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+	u8 cmd;
+	const char *name;
+} sdvo_cmd_names[] = {
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+	/* Add the op code for SDVO enhancements */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+	/* HDMI op code */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
+
+static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
+				   const void *args, int args_len)
+{
+	int i;
+
+	DRM_DEBUG_KMS("%s: W: %02X ",
+				SDVO_NAME(intel_sdvo), cmd);
+	for (i = 0; i < args_len; i++)
+		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+	for (; i < 8; i++)
+		DRM_LOG_KMS("   ");
+	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+		if (cmd == sdvo_cmd_names[i].cmd) {
+			DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(sdvo_cmd_names))
+		DRM_LOG_KMS("(%02X)", cmd);
+	DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+	"Power on",
+	"Success",
+	"Not supported",
+	"Invalid arg",
+	"Pending",
+	"Target not specified",
+	"Scaling not supported"
+};
+
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+				 const void *args, int args_len)
+{
+	u8 *buf, status;
+	struct i2c_msg *msgs;
+	int i, ret = true;
+
+        /* Would be simpler to allocate both in one go ? */        
+	buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
+	if (!buf)
+		return false;
+
+	msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
+	if (!msgs) {
+	        kfree(buf);
+		return false;
+        }
+
+	intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
+
+	for (i = 0; i < args_len; i++) {
+		msgs[i].addr = intel_sdvo->slave_addr;
+		msgs[i].flags = 0;
+		msgs[i].len = 2;
+		msgs[i].buf = buf + 2 *i;
+		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+		buf[2*i + 1] = ((u8*)args)[i];
+	}
+	msgs[i].addr = intel_sdvo->slave_addr;
+	msgs[i].flags = 0;
+	msgs[i].len = 2;
+	msgs[i].buf = buf + 2*i;
+	buf[2*i + 0] = SDVO_I2C_OPCODE;
+	buf[2*i + 1] = cmd;
+
+	/* the following two are to read the response */
+	status = SDVO_I2C_CMD_STATUS;
+	msgs[i+1].addr = intel_sdvo->slave_addr;
+	msgs[i+1].flags = 0;
+	msgs[i+1].len = 1;
+	msgs[i+1].buf = &status;
+
+	msgs[i+2].addr = intel_sdvo->slave_addr;
+	msgs[i+2].flags = I2C_M_RD;
+	msgs[i+2].len = 1;
+	msgs[i+2].buf = &status;
+
+	ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+	if (ret < 0) {
+		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+		ret = false;
+		goto out;
+	}
+	if (ret != i+3) {
+		/* failure in I2C transfer */
+		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+		ret = false;
+	}
+
+out:
+	kfree(msgs);
+	kfree(buf);
+	return ret;
+}
+
+static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
+				     void *response, int response_len)
+{
+	u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
+	u8 status;
+	int i;
+
+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
+	/*
+	 * The documentation states that all commands will be
+	 * processed within 15µs, and that we need only poll
+	 * the status byte a maximum of 3 times in order for the
+	 * command to be complete.
+	 *
+	 * Check 5 times in case the hardware failed to read the docs.
+	 *
+	 * Also beware that the first response by many devices is to
+	 * reply PENDING and stall for time. TVs are notorious for
+	 * requiring longer than specified to complete their replies.
+	 * Originally (in the DDX long ago), the delay was only ever 15ms
+	 * with an additional delay of 30ms applied for TVs added later after
+	 * many experiments. To accommodate both sets of delays, we do a
+	 * sequence of slow checks if the device is falling behind and fails
+	 * to reply within 5*15µs.
+	 */
+	if (!intel_sdvo_read_byte(intel_sdvo,
+				  SDVO_I2C_CMD_STATUS,
+				  &status))
+		goto log_fail;
+
+	while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+		if (retry < 10)
+			msleep(15);
+		else
+			udelay(15);
+
+		if (!intel_sdvo_read_byte(intel_sdvo,
+					  SDVO_I2C_CMD_STATUS,
+					  &status))
+			goto log_fail;
+	}
+
+	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+	else
+		DRM_LOG_KMS("(??? %d)", status);
+
+	if (status != SDVO_CMD_STATUS_SUCCESS)
+		goto log_fail;
+
+	/* Read the command response */
+	for (i = 0; i < response_len; i++) {
+		if (!intel_sdvo_read_byte(intel_sdvo,
+					  SDVO_I2C_RETURN_0 + i,
+					  &((u8 *)response)[i]))
+			goto log_fail;
+		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+	}
+	DRM_LOG_KMS("\n");
+	return true;
+
+log_fail:
+	DRM_LOG_KMS("... failed\n");
+	return false;
+}
+
+static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+	if (mode->clock >= 100000)
+		return 1;
+	else if (mode->clock >= 50000)
+		return 2;
+	else
+		return 4;
+}
+
+static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+					      u8 ddc_bus)
+{
+	/* This must be the immediately preceding write before the i2c xfer */
+	return intel_sdvo_write_cmd(intel_sdvo,
+				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+				    &ddc_bus, 1);
+}
+
+static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
+}
+
+static bool
+intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
+{
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, value, len);
+}
+
+static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_set_target_input_args targets = {0};
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_INPUT,
+				    &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
+{
+	struct intel_sdvo_get_trained_inputs_response response;
+
+	BUILD_BUG_ON(sizeof(response) != 1);
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+				  &response, sizeof(response)))
+		return false;
+
+	*input_1 = response.input0_trained;
+	*input_2 = response.input1_trained;
+	return true;
+}
+
+static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 outputs)
+{
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ACTIVE_OUTPUTS,
+				    &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
+					  u16 *outputs)
+{
+	return intel_sdvo_get_value(intel_sdvo,
+				    SDVO_CMD_GET_ACTIVE_OUTPUTS,
+				    outputs, sizeof(*outputs));
+}
+
+static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
+					       int mode)
+{
+	u8 state = SDVO_ENCODER_STATE_ON;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		state = SDVO_ENCODER_STATE_ON;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		state = SDVO_ENCODER_STATE_STANDBY;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		state = SDVO_ENCODER_STATE_SUSPEND;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		state = SDVO_ENCODER_STATE_OFF;
+		break;
+	}
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
+						   int *clock_min,
+						   int *clock_max)
+{
+	struct intel_sdvo_pixel_clock_range clocks;
+
+	BUILD_BUG_ON(sizeof(clocks) != 4);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+				  &clocks, sizeof(clocks)))
+		return false;
+
+	/* Convert the values from units of 10 kHz to kHz. */
+	*clock_min = clocks.min * 10;
+	*clock_max = clocks.max * 10;
+	return true;
+}
+
+static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
+					 u16 outputs)
+{
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TARGET_OUTPUT,
+				    &outputs, sizeof(outputs));
+}
+
+static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
+				  struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
+					 struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_timing(intel_sdvo,
+				     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
+					 struct intel_sdvo_dtd *dtd)
+{
+	return intel_sdvo_set_timing(intel_sdvo,
+				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+					 uint16_t clock,
+					 uint16_t width,
+					 uint16_t height)
+{
+	struct intel_sdvo_preferred_input_timing_args args;
+
+	memset(&args, 0, sizeof(args));
+	args.clock = clock;
+	args.width = width;
+	args.height = height;
+	args.interlace = 0;
+
+	if (intel_sdvo->is_lvds &&
+	   (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+	    intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+		args.scaled = 1;
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+				    &args, sizeof(args));
+}
+
+static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
+						  struct intel_sdvo_dtd *dtd)
+{
+	BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+	BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+	return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+				    &dtd->part1, sizeof(dtd->part1)) &&
+		intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+				     &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+					 const struct drm_display_mode *mode)
+{
+	uint16_t width, height;
+	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+	uint16_t h_sync_offset, v_sync_offset;
+	int mode_clock;
+
+	width = mode->hdisplay;
+	height = mode->vdisplay;
+
+	/* do some mode translations */
+	h_blank_len = mode->htotal - mode->hdisplay;
+	h_sync_len = mode->hsync_end - mode->hsync_start;
+
+	v_blank_len = mode->vtotal - mode->vdisplay;
+	v_sync_len = mode->vsync_end - mode->vsync_start;
+
+	h_sync_offset = mode->hsync_start - mode->hdisplay;
+	v_sync_offset = mode->vsync_start - mode->vdisplay;
+
+	mode_clock = mode->clock;
+	mode_clock /= 10;
+	dtd->part1.clock = mode_clock;
+
+	dtd->part1.h_active = width & 0xff;
+	dtd->part1.h_blank = h_blank_len & 0xff;
+	dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+		((h_blank_len >> 8) & 0xf);
+	dtd->part1.v_active = height & 0xff;
+	dtd->part1.v_blank = v_blank_len & 0xff;
+	dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+		((v_blank_len >> 8) & 0xf);
+
+	dtd->part2.h_sync_off = h_sync_offset & 0xff;
+	dtd->part2.h_sync_width = h_sync_len & 0xff;
+	dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+		(v_sync_len & 0xf);
+	dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+		((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+		((v_sync_len & 0x30) >> 4);
+
+	dtd->part2.dtd_flags = 0x18;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+	dtd->part2.sdvo_flags = 0;
+	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+	dtd->part2.reserved = 0;
+}
+
+static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+					 const struct intel_sdvo_dtd *dtd)
+{
+	mode->hdisplay = dtd->part1.h_active;
+	mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+	mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+	mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+	mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+	mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+	mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+	mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+	mode->vdisplay = dtd->part1.v_active;
+	mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+	mode->vsync_start = mode->vdisplay;
+	mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+	mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+	mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+	mode->vsync_end = mode->vsync_start +
+		(dtd->part2.v_sync_off_width & 0xf);
+	mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+	mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+	mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+	mode->clock = dtd->part1.clock * 10;
+
+	mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+	if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_encode encode;
+
+	BUILD_BUG_ON(sizeof(encode) != 2);
+	return intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPP_ENCODE,
+				  &encode, sizeof(encode));
+}
+
+static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
+				  uint8_t mode)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
+				       uint8_t mode)
+{
+	return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+{
+	int i, j;
+	uint8_t set_buf_index[2];
+	uint8_t av_split;
+	uint8_t buf_size;
+	uint8_t buf[48];
+	uint8_t *pos;
+
+	intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+	for (i = 0; i <= av_split; i++) {
+		set_buf_index[0] = i; set_buf_index[1] = 0;
+		intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+				     set_buf_index, 2);
+		intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+		intel_sdvo_read_response(encoder, &buf_size, 1);
+
+		pos = buf;
+		for (j = 0; j <= buf_size; j += 8) {
+			intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+					     NULL, 0);
+			intel_sdvo_read_response(encoder, pos, 8);
+			pos += 8;
+		}
+	}
+}
+#endif
+
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+				       unsigned if_index, uint8_t tx_rate,
+				       uint8_t *data, unsigned length)
+{
+	uint8_t set_buf_index[2] = { if_index, 0 };
+	uint8_t hbuf_size, tmp[8];
+	int i;
+
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
+				  set_buf_index, 2))
+		return false;
+
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+				  &hbuf_size, 1))
+		return false;
+
+	/* Buffer size is 0 based, hooray! */
+	hbuf_size++;
+
+	DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+		      if_index, length, hbuf_size);
+
+	for (i = 0; i < hbuf_size; i += 8) {
+		memset(tmp, 0, 8);
+		if (i < length)
+			memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+		if (!intel_sdvo_set_value(intel_sdvo,
+					  SDVO_CMD_SET_HBUF_DATA,
+					  tmp, 8))
+			return false;
+	}
+
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_HBUF_TXRATE,
+				    &tx_rate, 1);
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+					 const struct drm_display_mode *adjusted_mode)
+{
+	struct dip_infoframe avi_if = {
+		.type = DIP_TYPE_AVI,
+		.ver = DIP_VERSION_AVI,
+		.len = DIP_LEN_AVI,
+	};
+	uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
+	struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
+
+	if (intel_sdvo->rgb_quant_range_selectable) {
+		if (intel_crtc->config.limited_color_range)
+			avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+		else
+			avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+	}
+
+	avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
+
+	intel_dip_infoframe_csum(&avi_if);
+
+	/* sdvo spec says that the ecc is handled by the hw, and it looks like
+	 * we must not send the ecc field, either. */
+	memcpy(sdvo_data, &avi_if, 3);
+	sdvo_data[3] = avi_if.checksum;
+	memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+
+	return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+					  SDVO_HBUF_TX_VSYNC,
+					  sdvo_data, sizeof(sdvo_data));
+}
+
+static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+{
+	struct intel_sdvo_tv_format format;
+	uint32_t format_map;
+
+	format_map = 1 << intel_sdvo->tv_format_index;
+	memset(&format, 0, sizeof(format));
+	memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_TV_FORMAT,
+				    &format, sizeof(format));
+}
+
+static bool
+intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
+					const struct drm_display_mode *mode)
+{
+	struct intel_sdvo_dtd output_dtd;
+
+	if (!intel_sdvo_set_target_output(intel_sdvo,
+					  intel_sdvo->attached_output))
+		return false;
+
+	intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+		return false;
+
+	return true;
+}
+
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
+static bool
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+				    const struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct intel_sdvo_dtd input_dtd;
+
+	/* Reset the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return false;
+
+	if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
+						      mode->clock / 10,
+						      mode->hdisplay,
+						      mode->vdisplay))
+		return false;
+
+	if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
+						   &input_dtd))
+		return false;
+
+	intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+	intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
+
+	return true;
+}
+
+static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
+				      struct intel_crtc_config *pipe_config)
+{
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+	struct drm_display_mode *mode = &pipe_config->requested_mode;
+
+	DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+	pipe_config->pipe_bpp = 8*3;
+
+	if (HAS_PCH_SPLIT(encoder->base.dev))
+		pipe_config->has_pch_encoder = true;
+
+	/* We need to construct preferred input timings based on our
+	 * output timings.  To do that, we have to set the output
+	 * timings, even though this isn't really the right place in
+	 * the sequence to do it. Oh well.
+	 */
+	if (intel_sdvo->is_tv) {
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
+			return false;
+
+		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+							   mode,
+							   adjusted_mode);
+	} else if (intel_sdvo->is_lvds) {
+		if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
+							     intel_sdvo->sdvo_lvds_fixed_mode))
+			return false;
+
+		(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+							   mode,
+							   adjusted_mode);
+	}
+
+	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
+	 * SDVO device will factor out the multiplier during mode_set.
+	 */
+	pipe_config->pixel_multiplier =
+		intel_sdvo_get_pixel_multiplier(adjusted_mode);
+	adjusted_mode->clock *= pipe_config->pixel_multiplier;
+
+	if (intel_sdvo->color_range_auto) {
+		/* See CEA-861-E - 5.1 Default Encoding Parameters */
+		/* FIXME: This bit is only valid when using TMDS encoding and 8
+		 * bit per color mode. */
+		if (intel_sdvo->has_hdmi_monitor &&
+		    drm_match_cea_mode(adjusted_mode) > 1)
+			intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+		else
+			intel_sdvo->color_range = 0;
+	}
+
+	if (intel_sdvo->color_range)
+		pipe_config->limited_color_range = true;
+
+	return true;
+}
+
+static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
+{
+	struct drm_device *dev = intel_encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = intel_encoder->base.crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_display_mode *adjusted_mode =
+		&intel_crtc->config.adjusted_mode;
+	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
+	u32 sdvox;
+	struct intel_sdvo_in_out_map in_out;
+	struct intel_sdvo_dtd input_dtd, output_dtd;
+	int rate;
+
+	if (!mode)
+		return;
+
+	/* First, set the input mapping for the first input to our controlled
+	 * output. This is only correct if we're a single-input device, in
+	 * which case the first input is the output from the appropriate SDVO
+	 * channel on the motherboard.  In a two-input device, the first input
+	 * will be SDVOB and the second SDVOC.
+	 */
+	in_out.in0 = intel_sdvo->attached_output;
+	in_out.in1 = 0;
+
+	intel_sdvo_set_value(intel_sdvo,
+			     SDVO_CMD_SET_IN_OUT_MAP,
+			     &in_out, sizeof(in_out));
+
+	/* Set the output timings to the screen */
+	if (!intel_sdvo_set_target_output(intel_sdvo,
+					  intel_sdvo->attached_output))
+		return;
+
+	/* lvds has a special fixed output timing. */
+	if (intel_sdvo->is_lvds)
+		intel_sdvo_get_dtd_from_mode(&output_dtd,
+					     intel_sdvo->sdvo_lvds_fixed_mode);
+	else
+		intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+	if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+		DRM_INFO("Setting output timings on %s failed\n",
+			 SDVO_NAME(intel_sdvo));
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		return;
+
+	if (intel_sdvo->has_hdmi_monitor) {
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
+	} else
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
+
+	if (intel_sdvo->is_tv &&
+	    !intel_sdvo_set_tv_format(intel_sdvo))
+		return;
+
+	/* We have tried to get input timing in mode_fixup, and filled into
+	 * adjusted_mode.
+	 */
+	intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+	if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+		input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
+	if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+		DRM_INFO("Setting input timings on %s failed\n",
+			 SDVO_NAME(intel_sdvo));
+
+	switch (intel_crtc->config.pixel_multiplier) {
+	default:
+	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+	}
+	if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
+		return;
+
+	/* Set the SDVO control regs. */
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* The real mode polarity is set by the SDVO commands, using
+		 * struct intel_sdvo_dtd. */
+		sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+		if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
+			sdvox |= intel_sdvo->color_range;
+		if (INTEL_INFO(dev)->gen < 5)
+			sdvox |= SDVO_BORDER_ENABLE;
+	} else {
+		sdvox = I915_READ(intel_sdvo->sdvo_reg);
+		switch (intel_sdvo->sdvo_reg) {
+		case GEN3_SDVOB:
+			sdvox &= SDVOB_PRESERVE_MASK;
+			break;
+		case GEN3_SDVOC:
+			sdvox &= SDVOC_PRESERVE_MASK;
+			break;
+		}
+		sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+	}
+
+	if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+		sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+	else
+		sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+	if (intel_sdvo->has_hdmi_audio)
+		sdvox |= SDVO_AUDIO_ENABLE;
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* done in crtc_mode_set as the dpll_md reg must be written early */
+	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
+		/* done in crtc_mode_set as it lives inside the dpll register */
+	} else {
+		sdvox |= (intel_crtc->config.pixel_multiplier - 1)
+			<< SDVO_PORT_MULTIPLY_SHIFT;
+	}
+
+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+	    INTEL_INFO(dev)->gen < 5)
+		sdvox |= SDVO_STALL_SELECT;
+	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
+}
+
+static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector =
+		to_intel_sdvo_connector(&connector->base);
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
+	u16 active_outputs;
+
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (active_outputs & intel_sdvo_connector->output_flag)
+		return true;
+	else
+		return false;
+}
+
+static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
+				    enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	u16 active_outputs;
+	u32 tmp;
+
+	tmp = I915_READ(intel_sdvo->sdvo_reg);
+	intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
+
+	if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
+		return false;
+
+	if (HAS_PCH_CPT(dev))
+		*pipe = PORT_TO_PIPE_CPT(tmp);
+	else
+		*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void intel_disable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	u32 temp;
+
+	intel_sdvo_set_active_outputs(intel_sdvo, 0);
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_OFF);
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) != 0) {
+		/* HW workaround for IBX, we need to move the port to
+		 * transcoder A before disabling it. */
+		if (HAS_PCH_IBX(encoder->base.dev)) {
+			struct drm_crtc *crtc = encoder->base.crtc;
+			int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+			if (temp & SDVO_PIPE_B_SELECT) {
+				temp &= ~SDVO_PIPE_B_SELECT;
+				I915_WRITE(intel_sdvo->sdvo_reg, temp);
+				POSTING_READ(intel_sdvo->sdvo_reg);
+
+				/* Again we need to write this twice. */
+				I915_WRITE(intel_sdvo->sdvo_reg, temp);
+				POSTING_READ(intel_sdvo->sdvo_reg);
+
+				/* Transcoder selection bits only update
+				 * effectively on vblank. */
+				if (crtc)
+					intel_wait_for_vblank(encoder->base.dev, pipe);
+				else
+					msleep(50);
+			}
+		}
+
+		intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+	}
+}
+
+static void intel_enable_sdvo(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+	u32 temp;
+	bool input1, input2;
+	int i;
+	u8 status;
+
+	temp = I915_READ(intel_sdvo->sdvo_reg);
+	if ((temp & SDVO_ENABLE) == 0) {
+		/* HW workaround for IBX, we need to move the port
+		 * to transcoder A before disabling it, so restore it here. */
+		if (HAS_PCH_IBX(dev))
+			temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
+
+		intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+	}
+	for (i = 0; i < 2; i++)
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+	status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
+	/* Warn if the device reported failure to sync.
+	 * A lot of SDVO devices fail to notify of sync, but it's
+	 * a given it the status is a success, we succeeded.
+	 */
+	if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+		DRM_DEBUG_KMS("First %s output reported failure to "
+				"sync\n", SDVO_NAME(intel_sdvo));
+	}
+
+	if (0)
+		intel_sdvo_set_encoder_power_state(intel_sdvo,
+						   DRM_MODE_DPMS_ON);
+	intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+}
+
+static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
+{
+	struct drm_crtc *crtc;
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+	/* dvo supports only 2 dpms states. */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (mode == connector->dpms)
+		return;
+
+	connector->dpms = mode;
+
+	/* Only need to change hw state when actually enabled */
+	crtc = intel_sdvo->base.base.crtc;
+	if (!crtc) {
+		intel_sdvo->base.connectors_active = false;
+		return;
+	}
+
+	if (mode != DRM_MODE_DPMS_ON) {
+		intel_sdvo_set_active_outputs(intel_sdvo, 0);
+		if (0)
+			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+
+		intel_sdvo->base.connectors_active = false;
+
+		intel_crtc_update_dpms(crtc);
+	} else {
+		intel_sdvo->base.connectors_active = true;
+
+		intel_crtc_update_dpms(crtc);
+
+		if (0)
+			intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
+		intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
+	}
+
+	intel_modeset_check_state(connector->dev);
+}
+
+static int intel_sdvo_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		return MODE_NO_DBLESCAN;
+
+	if (intel_sdvo->pixel_clock_min > mode->clock)
+		return MODE_CLOCK_LOW;
+
+	if (intel_sdvo->pixel_clock_max < mode->clock)
+		return MODE_CLOCK_HIGH;
+
+	if (intel_sdvo->is_lvds) {
+		if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+			return MODE_PANEL;
+
+		if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+			return MODE_PANEL;
+	}
+
+	return MODE_OK;
+}
+
+static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
+{
+	BUILD_BUG_ON(sizeof(*caps) != 8);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_DEVICE_CAPS,
+				  caps, sizeof(*caps)))
+		return false;
+
+	DRM_DEBUG_KMS("SDVO capabilities:\n"
+		      "  vendor_id: %d\n"
+		      "  device_id: %d\n"
+		      "  device_rev_id: %d\n"
+		      "  sdvo_version_major: %d\n"
+		      "  sdvo_version_minor: %d\n"
+		      "  sdvo_inputs_mask: %d\n"
+		      "  smooth_scaling: %d\n"
+		      "  sharp_scaling: %d\n"
+		      "  up_scaling: %d\n"
+		      "  down_scaling: %d\n"
+		      "  stall_support: %d\n"
+		      "  output_flags: %d\n",
+		      caps->vendor_id,
+		      caps->device_id,
+		      caps->device_rev_id,
+		      caps->sdvo_version_major,
+		      caps->sdvo_version_minor,
+		      caps->sdvo_inputs_mask,
+		      caps->smooth_scaling,
+		      caps->sharp_scaling,
+		      caps->up_scaling,
+		      caps->down_scaling,
+		      caps->stall_support,
+		      caps->output_flags);
+
+	return true;
+}
+
+static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	uint16_t hotplug;
+
+	/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+	 * on the line. */
+	if (IS_I945G(dev) || IS_I945GM(dev))
+		return 0;
+
+	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+					&hotplug, sizeof(hotplug)))
+		return 0;
+
+	return hotplug;
+}
+
+static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
+{
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+
+	intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+			&intel_sdvo->hotplug_active, 2);
+}
+
+static bool
+intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
+{
+	/* Is there more than one type of output? */
+	return hweight16(intel_sdvo->caps.output_flags) > 1;
+}
+
+static struct edid *
+intel_sdvo_get_edid(struct drm_connector *connector)
+{
+	struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+	return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+
+	return drm_get_edid(connector,
+			    intel_gmbus_get_adapter(dev_priv,
+						    dev_priv->crt_ddc_pin));
+}
+
+static enum drm_connector_status
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	enum drm_connector_status status;
+	struct edid *edid;
+
+	edid = intel_sdvo_get_edid(connector);
+
+	if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
+		u8 ddc, saved_ddc = intel_sdvo->ddc_bus;
+
+		/*
+		 * Don't use the 1 as the argument of DDC bus switch to get
+		 * the EDID. It is used for SDVO SPD ROM.
+		 */
+		for (ddc = intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+			intel_sdvo->ddc_bus = ddc;
+			edid = intel_sdvo_get_edid(connector);
+			if (edid)
+				break;
+		}
+		/*
+		 * If we found the EDID on the other bus,
+		 * assume that is the correct DDC bus.
+		 */
+		if (edid == NULL)
+			intel_sdvo->ddc_bus = saved_ddc;
+	}
+
+	/*
+	 * When there is no edid and no monitor is connected with VGA
+	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
+	 */
+	if (edid == NULL)
+		edid = intel_sdvo_get_analog_edid(connector);
+
+	status = connector_status_unknown;
+	if (edid != NULL) {
+		/* DDC bus is shared, match EDID to connector type */
+		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+			status = connector_status_connected;
+			if (intel_sdvo->is_hdmi) {
+				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+				intel_sdvo->rgb_quant_range_selectable =
+					drm_rgb_quant_range_selectable(edid);
+			}
+		} else
+			status = connector_status_disconnected;
+		kfree(edid);
+	}
+
+	if (status == connector_status_connected) {
+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+		if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+			intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+	}
+
+	return status;
+}
+
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+				  struct edid *edid)
+{
+	bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+	bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+	DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+		      connector_is_digital, monitor_is_digital);
+	return connector_is_digital == monitor_is_digital;
+}
+
+static enum drm_connector_status
+intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+	uint16_t response;
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	enum drm_connector_status ret;
+
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_ATTACHED_DISPLAYS,
+				  &response, 2))
+		return connector_status_unknown;
+
+	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+		      response & 0xff, response >> 8,
+		      intel_sdvo_connector->output_flag);
+
+	if (response == 0)
+		return connector_status_disconnected;
+
+	intel_sdvo->attached_output = response;
+
+	intel_sdvo->has_hdmi_monitor = false;
+	intel_sdvo->has_hdmi_audio = false;
+	intel_sdvo->rgb_quant_range_selectable = false;
+
+	if ((intel_sdvo_connector->output_flag & response) == 0)
+		ret = connector_status_disconnected;
+	else if (IS_TMDS(intel_sdvo_connector))
+		ret = intel_sdvo_tmds_sink_detect(connector);
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+							      edid))
+				ret = connector_status_connected;
+			else
+				ret = connector_status_disconnected;
+
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
+
+	/* May update encoder flag for like clock for SDVO TV, etc.*/
+	if (ret == connector_status_connected) {
+		intel_sdvo->is_tv = false;
+		intel_sdvo->is_lvds = false;
+		intel_sdvo->base.needs_tv_clock = false;
+
+		if (response & SDVO_TV_MASK) {
+			intel_sdvo->is_tv = true;
+			intel_sdvo->base.needs_tv_clock = true;
+		}
+		if (response & SDVO_LVDS_MASK)
+			intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+	}
+
+	return ret;
+}
+
+static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+	struct edid *edid;
+
+	/* set the bus switch and get the modes */
+	edid = intel_sdvo_get_edid(connector);
+
+	/*
+	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
+	 * link between analog and digital outputs. So, if the regular SDVO
+	 * DDC fails, check to see if the analog output is disconnected, in
+	 * which case we'll look there for the digital DDC data.
+	 */
+	if (edid == NULL)
+		edid = intel_sdvo_get_analog_edid(connector);
+
+	if (edid != NULL) {
+		if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+						      edid)) {
+			drm_mode_connector_update_edid_property(connector, edid);
+			drm_add_edid_modes(connector, edid);
+		}
+
+		kfree(edid);
+	}
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note!  This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+		   416, 0, 200, 201, 232, 233, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+		   416, 0, 240, 241, 272, 273, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+		   496, 0, 300, 301, 332, 333, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+		   736, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+		   736, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+		   736, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+		   800, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+		   800, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+		   816, 0, 350, 351, 382, 383, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+		   816, 0, 400, 401, 432, 433, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+		   816, 0, 480, 481, 512, 513, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+		   816, 0, 540, 541, 572, 573, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+		   816, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+		   864, 0, 576, 577, 608, 609, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+		   896, 0, 600, 601, 632, 633, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+		   928, 0, 624, 625, 656, 657, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+		   1016, 0, 766, 767, 798, 799, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+		   1120, 0, 768, 769, 800, 801, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+		   1376, 0, 1024, 1025, 1056, 1057, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_sdtv_resolution_request tv_res;
+	uint32_t reply = 0, format_map = 0;
+	int i;
+
+	/* Read the list of supported input resolutions for the selected TV
+	 * format.
+	 */
+	format_map = 1 << intel_sdvo->tv_format_index;
+	memcpy(&tv_res, &format_map,
+	       min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
+
+	if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
+		return;
+
+	BUILD_BUG_ON(sizeof(tv_res) != 3);
+	if (!intel_sdvo_write_cmd(intel_sdvo,
+				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+				  &tv_res, sizeof(tv_res)))
+		return;
+	if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+		if (reply & (1 << i)) {
+			struct drm_display_mode *nmode;
+			nmode = drm_mode_duplicate(connector->dev,
+						   &sdvo_tv_modes[i]);
+			if (nmode)
+				drm_mode_probed_add(connector, nmode);
+		}
+}
+
+static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct drm_display_mode *newmode;
+
+	/*
+	 * Attempt to get the mode list from DDC.
+	 * Assume that the preferred modes are
+	 * arranged in priority order.
+	 */
+	intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+	/*
+	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+	 * SDVO->LVDS transcoders can't cope with the EDID mode. Since
+	 * drm_mode_probed_add adds the mode at the head of the list we add it
+	 * last.
+	 */
+	if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+		newmode = drm_mode_duplicate(connector->dev,
+					     dev_priv->sdvo_lvds_vbt_mode);
+		if (newmode != NULL) {
+			/* Guarantee the mode is preferred */
+			newmode->type = (DRM_MODE_TYPE_PREFERRED |
+					 DRM_MODE_TYPE_DRIVER);
+			drm_mode_probed_add(connector, newmode);
+		}
+	}
+
+	list_for_each_entry(newmode, &connector->probed_modes, head) {
+		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+			intel_sdvo->sdvo_lvds_fixed_mode =
+				drm_mode_duplicate(connector->dev, newmode);
+
+			intel_sdvo->is_lvds = true;
+			break;
+		}
+	}
+
+}
+
+static int intel_sdvo_get_modes(struct drm_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+	if (IS_TV(intel_sdvo_connector))
+		intel_sdvo_get_tv_modes(connector);
+	else if (IS_LVDS(intel_sdvo_connector))
+		intel_sdvo_get_lvds_modes(connector);
+	else
+		intel_sdvo_get_ddc_modes(connector);
+
+	return !list_empty(&connector->probed_modes);
+}
+
+static void
+intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	struct drm_device *dev = connector->dev;
+
+	if (intel_sdvo_connector->left)
+		drm_property_destroy(dev, intel_sdvo_connector->left);
+	if (intel_sdvo_connector->right)
+		drm_property_destroy(dev, intel_sdvo_connector->right);
+	if (intel_sdvo_connector->top)
+		drm_property_destroy(dev, intel_sdvo_connector->top);
+	if (intel_sdvo_connector->bottom)
+		drm_property_destroy(dev, intel_sdvo_connector->bottom);
+	if (intel_sdvo_connector->hpos)
+		drm_property_destroy(dev, intel_sdvo_connector->hpos);
+	if (intel_sdvo_connector->vpos)
+		drm_property_destroy(dev, intel_sdvo_connector->vpos);
+	if (intel_sdvo_connector->saturation)
+		drm_property_destroy(dev, intel_sdvo_connector->saturation);
+	if (intel_sdvo_connector->contrast)
+		drm_property_destroy(dev, intel_sdvo_connector->contrast);
+	if (intel_sdvo_connector->hue)
+		drm_property_destroy(dev, intel_sdvo_connector->hue);
+	if (intel_sdvo_connector->sharpness)
+		drm_property_destroy(dev, intel_sdvo_connector->sharpness);
+	if (intel_sdvo_connector->flicker_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
+	if (intel_sdvo_connector->flicker_filter_2d)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
+	if (intel_sdvo_connector->flicker_filter_adaptive)
+		drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
+	if (intel_sdvo_connector->tv_luma_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
+	if (intel_sdvo_connector->tv_chroma_filter)
+		drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
+	if (intel_sdvo_connector->dot_crawl)
+		drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
+	if (intel_sdvo_connector->brightness)
+		drm_property_destroy(dev, intel_sdvo_connector->brightness);
+}
+
+static void intel_sdvo_destroy(struct drm_connector *connector)
+{
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+
+	if (intel_sdvo_connector->tv_format)
+		drm_property_destroy(connector->dev,
+				     intel_sdvo_connector->tv_format);
+
+	intel_sdvo_destroy_enhance_property(connector);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(intel_sdvo_connector);
+}
+
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!intel_sdvo->is_hdmi)
+		return false;
+
+	edid = intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+	kfree(edid);
+
+	return has_audio;
+}
+
+static int
+intel_sdvo_set_property(struct drm_connector *connector,
+			struct drm_property *property,
+			uint64_t val)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	uint16_t temp_value;
+	uint8_t cmd;
+	int ret;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret)
+		return ret;
+
+	if (property == dev_priv->force_audio_property) {
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_sdvo_connector->force_audio)
+			return 0;
+
+		intel_sdvo_connector->force_audio = i;
+
+		if (i == HDMI_AUDIO_AUTO)
+			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = (i == HDMI_AUDIO_ON);
+
+		if (has_audio == intel_sdvo->has_hdmi_audio)
+			return 0;
+
+		intel_sdvo->has_hdmi_audio = has_audio;
+		goto done;
+	}
+
+	if (property == dev_priv->broadcast_rgb_property) {
+		bool old_auto = intel_sdvo->color_range_auto;
+		uint32_t old_range = intel_sdvo->color_range;
+
+		switch (val) {
+		case INTEL_BROADCAST_RGB_AUTO:
+			intel_sdvo->color_range_auto = true;
+			break;
+		case INTEL_BROADCAST_RGB_FULL:
+			intel_sdvo->color_range_auto = false;
+			intel_sdvo->color_range = 0;
+			break;
+		case INTEL_BROADCAST_RGB_LIMITED:
+			intel_sdvo->color_range_auto = false;
+			/* FIXME: this bit is only valid when using TMDS
+			 * encoding and 8 bit per color mode. */
+			intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (old_auto == intel_sdvo->color_range_auto &&
+		    old_range == intel_sdvo->color_range)
+			return 0;
+
+		goto done;
+	}
+
+#define CHECK_PROPERTY(name, NAME) \
+	if (intel_sdvo_connector->name == property) { \
+		if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
+		if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+		cmd = SDVO_CMD_SET_##NAME; \
+		intel_sdvo_connector->cur_##name = temp_value; \
+		goto set_value; \
+	}
+
+	if (property == intel_sdvo_connector->tv_format) {
+		if (val >= TV_FORMAT_NUM)
+			return -EINVAL;
+
+		if (intel_sdvo->tv_format_index ==
+		    intel_sdvo_connector->tv_format_supported[val])
+			return 0;
+
+		intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
+		goto done;
+	} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
+		temp_value = val;
+		if (intel_sdvo_connector->left == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->right, val);
+			if (intel_sdvo_connector->left_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (intel_sdvo_connector->right == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->left, val);
+			if (intel_sdvo_connector->right_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->left_margin = temp_value;
+			intel_sdvo_connector->right_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_hscan -
+				intel_sdvo_connector->left_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_H;
+			goto set_value;
+		} else if (intel_sdvo_connector->top == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->bottom, val);
+			if (intel_sdvo_connector->top_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		} else if (intel_sdvo_connector->bottom == property) {
+			drm_object_property_set_value(&connector->base,
+							 intel_sdvo_connector->top, val);
+			if (intel_sdvo_connector->bottom_margin == temp_value)
+				return 0;
+
+			intel_sdvo_connector->top_margin = temp_value;
+			intel_sdvo_connector->bottom_margin = temp_value;
+			temp_value = intel_sdvo_connector->max_vscan -
+				intel_sdvo_connector->top_margin;
+			cmd = SDVO_CMD_SET_OVERSCAN_V;
+			goto set_value;
+		}
+		CHECK_PROPERTY(hpos, HPOS)
+		CHECK_PROPERTY(vpos, VPOS)
+		CHECK_PROPERTY(saturation, SATURATION)
+		CHECK_PROPERTY(contrast, CONTRAST)
+		CHECK_PROPERTY(hue, HUE)
+		CHECK_PROPERTY(brightness, BRIGHTNESS)
+		CHECK_PROPERTY(sharpness, SHARPNESS)
+		CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+		CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+		CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+		CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+		CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+		CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+	}
+
+	return -EINVAL; /* unknown property */
+
+set_value:
+	if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
+		return -EIO;
+
+
+done:
+	if (intel_sdvo->base.base.crtc)
+		intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
+
+	return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
+	.dpms = intel_sdvo_dpms,
+	.detect = intel_sdvo_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_sdvo_set_property,
+	.destroy = intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
+	.get_modes = intel_sdvo_get_modes,
+	.mode_valid = intel_sdvo_mode_valid,
+	.best_encoder = intel_best_encoder,
+};
+
+static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+	struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+
+	if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+		drm_mode_destroy(encoder->dev,
+				 intel_sdvo->sdvo_lvds_fixed_mode);
+
+	i2c_del_adapter(&intel_sdvo->ddc);
+	intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
+	.destroy = intel_sdvo_enc_destroy,
+};
+
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+	uint16_t mask = 0;
+	unsigned int num_bits;
+
+	/* Make a mask of outputs less than or equal to our own priority in the
+	 * list.
+	 */
+	switch (sdvo->controlled_output) {
+	case SDVO_OUTPUT_LVDS1:
+		mask |= SDVO_OUTPUT_LVDS1;
+	case SDVO_OUTPUT_LVDS0:
+		mask |= SDVO_OUTPUT_LVDS0;
+	case SDVO_OUTPUT_TMDS1:
+		mask |= SDVO_OUTPUT_TMDS1;
+	case SDVO_OUTPUT_TMDS0:
+		mask |= SDVO_OUTPUT_TMDS0;
+	case SDVO_OUTPUT_RGB1:
+		mask |= SDVO_OUTPUT_RGB1;
+	case SDVO_OUTPUT_RGB0:
+		mask |= SDVO_OUTPUT_RGB0;
+		break;
+	}
+
+	/* Count bits to find what number we are in the priority list. */
+	mask &= sdvo->caps.output_flags;
+	num_bits = hweight16(mask);
+	/* If more than 3 outputs, default to DDC bus 3 for now. */
+	if (num_bits > 3)
+		num_bits = 3;
+
+	/* Corresponds to SDVO_CONTROL_BUS_DDCx */
+	sdvo->ddc_bus = 1 << num_bits;
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
+			  struct intel_sdvo *sdvo, u32 reg)
+{
+	struct sdvo_device_mapping *mapping;
+
+	if (sdvo->is_sdvob)
+		mapping = &(dev_priv->sdvo_mappings[0]);
+	else
+		mapping = &(dev_priv->sdvo_mappings[1]);
+
+	if (mapping->initialized)
+		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+	else
+		intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
+			  struct intel_sdvo *sdvo, u32 reg)
+{
+	struct sdvo_device_mapping *mapping;
+	u8 pin;
+
+	if (sdvo->is_sdvob)
+		mapping = &dev_priv->sdvo_mappings[0];
+	else
+		mapping = &dev_priv->sdvo_mappings[1];
+
+	if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+		pin = mapping->i2c_pin;
+	else
+		pin = GMBUS_PORT_DPB;
+
+	sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+	/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+	 * our code totally fails once we start using gmbus. Hence fall back to
+	 * bit banging for now. */
+	intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+	intel_gmbus_force_bit(sdvo->i2c, false);
+}
+
+static bool
+intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
+{
+	return intel_sdvo_check_supp_encode(intel_sdvo);
+}
+
+static u8
+intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+	if (sdvo->is_sdvob) {
+		my_mapping = &dev_priv->sdvo_mappings[0];
+		other_mapping = &dev_priv->sdvo_mappings[1];
+	} else {
+		my_mapping = &dev_priv->sdvo_mappings[1];
+		other_mapping = &dev_priv->sdvo_mappings[0];
+	}
+
+	/* If the BIOS described our SDVO device, take advantage of it. */
+	if (my_mapping->slave_addr)
+		return my_mapping->slave_addr;
+
+	/* If the BIOS only described a different SDVO device, use the
+	 * address that it isn't using.
+	 */
+	if (other_mapping->slave_addr) {
+		if (other_mapping->slave_addr == 0x70)
+			return 0x72;
+		else
+			return 0x70;
+	}
+
+	/* No SDVO device info is found for another DVO port,
+	 * so use mapping assumption we had before BIOS parsing.
+	 */
+	if (sdvo->is_sdvob)
+		return 0x70;
+	else
+		return 0x72;
+}
+
+static void
+intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+			  struct intel_sdvo *encoder)
+{
+	drm_connector_init(encoder->base.base.dev,
+			   &connector->base.base,
+			   &intel_sdvo_connector_funcs,
+			   connector->base.base.connector_type);
+
+	drm_connector_helper_add(&connector->base.base,
+				 &intel_sdvo_connector_helper_funcs);
+
+	connector->base.base.interlace_allowed = 1;
+	connector->base.base.doublescan_allowed = 0;
+	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+	connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
+
+	intel_connector_attach_encoder(&connector->base, &encoder->base);
+	drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
+			       struct intel_sdvo_connector *connector)
+{
+	struct drm_device *dev = connector->base.base.dev;
+
+	intel_attach_force_audio_property(&connector->base.base);
+	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
+		intel_attach_broadcast_rgb_property(&connector->base.base);
+		intel_sdvo->color_range_auto = true;
+	}
+}
+
+static bool
+intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+	}
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	if (intel_sdvo_get_hotplug_support(intel_sdvo) &
+		intel_sdvo_connector->output_flag) {
+		intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
+		/* Some SDVO devices have one-shot hotplug interrupts.
+		 * Ensure that they get re-enabled when an interrupt happens.
+		 */
+		intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
+		intel_sdvo_enable_hotplug(intel_encoder);
+	} else {
+		intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+	}
+	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+		intel_sdvo->is_hdmi = true;
+	}
+
+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+	if (intel_sdvo->is_hdmi)
+		intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
+
+	return true;
+}
+
+static bool
+intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+	intel_sdvo->controlled_output |= type;
+	intel_sdvo_connector->output_flag = type;
+
+	intel_sdvo->is_tv = true;
+	intel_sdvo->base.needs_tv_clock = true;
+
+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+
+	if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
+		goto err;
+
+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+	}
+
+	intel_sdvo_connector_init(intel_sdvo_connector,
+				  intel_sdvo);
+	return true;
+}
+
+static bool
+intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
+{
+	struct drm_encoder *encoder = &intel_sdvo->base.base;
+	struct drm_connector *connector;
+	struct intel_connector *intel_connector;
+	struct intel_sdvo_connector *intel_sdvo_connector;
+
+	intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+	if (!intel_sdvo_connector)
+		return false;
+
+	intel_connector = &intel_sdvo_connector->base;
+	connector = &intel_connector->base;
+	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+	if (device == 0) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+	} else if (device == 1) {
+		intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+		intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+	}
+
+	intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
+	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+		goto err;
+
+	return true;
+
+err:
+	intel_sdvo_destroy(connector);
+	return false;
+}
+
+static bool
+intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+{
+	intel_sdvo->is_tv = false;
+	intel_sdvo->base.needs_tv_clock = false;
+	intel_sdvo->is_lvds = false;
+
+	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+	if (flags & SDVO_OUTPUT_TMDS0)
+		if (!intel_sdvo_dvi_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+		if (!intel_sdvo_dvi_init(intel_sdvo, 1))
+			return false;
+
+	/* TV has no XXX1 function block */
+	if (flags & SDVO_OUTPUT_SVID0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_CVBS0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_YPRPB0)
+		if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
+			return false;
+
+	if (flags & SDVO_OUTPUT_RGB0)
+		if (!intel_sdvo_analog_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+		if (!intel_sdvo_analog_init(intel_sdvo, 1))
+			return false;
+
+	if (flags & SDVO_OUTPUT_LVDS0)
+		if (!intel_sdvo_lvds_init(intel_sdvo, 0))
+			return false;
+
+	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+		if (!intel_sdvo_lvds_init(intel_sdvo, 1))
+			return false;
+
+	if ((flags & SDVO_OUTPUT_MASK) == 0) {
+		unsigned char bytes[2];
+
+		intel_sdvo->controlled_output = 0;
+		memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
+		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+			      SDVO_NAME(intel_sdvo),
+			      bytes[0], bytes[1]);
+		return false;
+	}
+	intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+
+	return true;
+}
+
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector, *tmp;
+
+	list_for_each_entry_safe(connector, tmp,
+				 &dev->mode_config.connector_list, head) {
+		if (intel_attached_encoder(connector) == &intel_sdvo->base)
+			intel_sdvo_destroy(connector);
+	}
+}
+
+static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+					  struct intel_sdvo_connector *intel_sdvo_connector,
+					  int type)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct intel_sdvo_tv_format format;
+	uint32_t format_map, i;
+
+	if (!intel_sdvo_set_target_output(intel_sdvo, type))
+		return false;
+
+	BUILD_BUG_ON(sizeof(format) != 6);
+	if (!intel_sdvo_get_value(intel_sdvo,
+				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+				  &format, sizeof(format)))
+		return false;
+
+	memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+	if (format_map == 0)
+		return false;
+
+	intel_sdvo_connector->format_supported_num = 0;
+	for (i = 0 ; i < TV_FORMAT_NUM; i++)
+		if (format_map & (1 << i))
+			intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
+
+
+	intel_sdvo_connector->tv_format =
+			drm_property_create(dev, DRM_MODE_PROP_ENUM,
+					    "mode", intel_sdvo_connector->format_supported_num);
+	if (!intel_sdvo_connector->tv_format)
+		return false;
+
+	for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
+		drm_property_add_enum(
+				intel_sdvo_connector->tv_format, i,
+				i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
+
+	intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
+	drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+				      intel_sdvo_connector->tv_format, 0);
+	return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+	if (enhancements.name) { \
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+		    !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+			return false; \
+		intel_sdvo_connector->max_##name = data_value[0]; \
+		intel_sdvo_connector->cur_##name = response; \
+		intel_sdvo_connector->name = \
+			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+		if (!intel_sdvo_connector->name) return false; \
+		drm_object_attach_property(&connector->base, \
+					      intel_sdvo_connector->name, \
+					      intel_sdvo_connector->cur_##name); \
+		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+			      data_value[0], data_value[1], response); \
+	} \
+} while (0)
+
+static bool
+intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+				      struct intel_sdvo_connector *intel_sdvo_connector,
+				      struct intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	/* when horizontal overscan is supported, Add the left/right  property */
+	if (enhancements.overscan_h) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_H,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_H,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_hscan = data_value[0];
+		intel_sdvo_connector->left_margin = data_value[0] - response;
+		intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+		intel_sdvo_connector->left =
+			drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->left)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->left,
+					      intel_sdvo_connector->left_margin);
+
+		intel_sdvo_connector->right =
+			drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->right)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->right,
+					      intel_sdvo_connector->right_margin);
+		DRM_DEBUG_KMS("h_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	if (enhancements.overscan_v) {
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_MAX_OVERSCAN_V,
+					  &data_value, 4))
+			return false;
+
+		if (!intel_sdvo_get_value(intel_sdvo,
+					  SDVO_CMD_GET_OVERSCAN_V,
+					  &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_vscan = data_value[0];
+		intel_sdvo_connector->top_margin = data_value[0] - response;
+		intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+		intel_sdvo_connector->top =
+			drm_property_create_range(dev, 0,
+					    "top_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->top)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->top,
+					      intel_sdvo_connector->top_margin);
+
+		intel_sdvo_connector->bottom =
+			drm_property_create_range(dev, 0,
+					    "bottom_margin", 0, data_value[0]);
+		if (!intel_sdvo_connector->bottom)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->bottom,
+					      intel_sdvo_connector->bottom_margin);
+		DRM_DEBUG_KMS("v_overscan: max %d, "
+			      "default %d, current %d\n",
+			      data_value[0], data_value[1], response);
+	}
+
+	ENHANCEMENT(hpos, HPOS);
+	ENHANCEMENT(vpos, VPOS);
+	ENHANCEMENT(saturation, SATURATION);
+	ENHANCEMENT(contrast, CONTRAST);
+	ENHANCEMENT(hue, HUE);
+	ENHANCEMENT(sharpness, SHARPNESS);
+	ENHANCEMENT(brightness, BRIGHTNESS);
+	ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+	ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+	ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+	ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+	ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+	if (enhancements.dot_crawl) {
+		if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+			return false;
+
+		intel_sdvo_connector->max_dot_crawl = 1;
+		intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+		intel_sdvo_connector->dot_crawl =
+			drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+		if (!intel_sdvo_connector->dot_crawl)
+			return false;
+
+		drm_object_attach_property(&connector->base,
+					      intel_sdvo_connector->dot_crawl,
+					      intel_sdvo_connector->cur_dot_crawl);
+		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+	}
+
+	return true;
+}
+
+static bool
+intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
+					struct intel_sdvo_connector *intel_sdvo_connector,
+					struct intel_sdvo_enhancements_reply enhancements)
+{
+	struct drm_device *dev = intel_sdvo->base.base.dev;
+	struct drm_connector *connector = &intel_sdvo_connector->base.base;
+	uint16_t response, data_value[2];
+
+	ENHANCEMENT(brightness, BRIGHTNESS);
+
+	return true;
+}
+#undef ENHANCEMENT
+
+static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
+					       struct intel_sdvo_connector *intel_sdvo_connector)
+{
+	union {
+		struct intel_sdvo_enhancements_reply reply;
+		uint16_t response;
+	} enhancements;
+
+	BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+	enhancements.response = 0;
+	intel_sdvo_get_value(intel_sdvo,
+			     SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+			     &enhancements, sizeof(enhancements));
+	if (enhancements.response == 0) {
+		DRM_DEBUG_KMS("No enhancement is supported\n");
+		return true;
+	}
+
+	if (IS_TV(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else if (IS_LVDS(intel_sdvo_connector))
+		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
+	else
+		return true;
+}
+
+static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+				     struct i2c_msg *msgs,
+				     int num)
+{
+	struct intel_sdvo *sdvo = adapter->algo_data;
+
+	if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+		return -EIO;
+
+	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+	struct intel_sdvo *sdvo = adapter->algo_data;
+	return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
+	.master_xfer	= intel_sdvo_ddc_proxy_xfer,
+	.functionality	= intel_sdvo_ddc_proxy_func
+};
+
+static bool
+intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
+			  struct drm_device *dev)
+{
+	sdvo->ddc.owner = THIS_MODULE;
+	sdvo->ddc.class = I2C_CLASS_DDC;
+	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+	sdvo->ddc.dev.parent = &dev->pdev->dev;
+	sdvo->ddc.algo_data = sdvo;
+	sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+
+	return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_encoder *intel_encoder;
+	struct intel_sdvo *intel_sdvo;
+	u32 hotplug_mask;
+	int i;
+	intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+	if (!intel_sdvo)
+		return false;
+
+	intel_sdvo->sdvo_reg = sdvo_reg;
+	intel_sdvo->is_sdvob = is_sdvob;
+	intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+	intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+	if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+		goto err_i2c_bus;
+
+	/* encoder type will be decided later */
+	intel_encoder = &intel_sdvo->base;
+	intel_encoder->type = INTEL_OUTPUT_SDVO;
+	drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0);
+
+	/* Read the regs to test if we can talk to the device */
+	for (i = 0; i < 0x40; i++) {
+		u8 byte;
+
+		if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
+			DRM_DEBUG_KMS("No SDVO device found on %s\n",
+				      SDVO_NAME(intel_sdvo));
+			goto err;
+		}
+	}
+
+	hotplug_mask = 0;
+	if (IS_G4X(dev)) {
+		hotplug_mask = intel_sdvo->is_sdvob ?
+			SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+	} else if (IS_GEN4(dev)) {
+		hotplug_mask = intel_sdvo->is_sdvob ?
+			SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+	} else {
+		hotplug_mask = intel_sdvo->is_sdvob ?
+			SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+	}
+
+	intel_encoder->compute_config = intel_sdvo_compute_config;
+	intel_encoder->disable = intel_disable_sdvo;
+	intel_encoder->mode_set = intel_sdvo_mode_set;
+	intel_encoder->enable = intel_enable_sdvo;
+	intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
+
+	/* In default case sdvo lvds is false */
+	if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+		goto err;
+
+	if (intel_sdvo_output_setup(intel_sdvo,
+				    intel_sdvo->caps.output_flags) != true) {
+		DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
+			      SDVO_NAME(intel_sdvo));
+		/* Output_setup can leave behind connectors! */
+		goto err_output;
+	}
+
+	/* Only enable the hotplug irq if we need it, to work around noisy
+	 * hotplug lines.
+	 */
+	if (intel_sdvo->hotplug_active) {
+		intel_encoder->hpd_pin =
+			intel_sdvo->is_sdvob ?  HPD_SDVO_B : HPD_SDVO_C;
+	}
+
+	/*
+	 * Cloning SDVO with anything is often impossible, since the SDVO
+	 * encoder can request a special input timing mode. And even if that's
+	 * not the case we have evidence that cloning a plain unscaled mode with
+	 * VGA doesn't really work. Furthermore the cloning flags are way too
+	 * simplistic anyway to express such constraints, so just give up on
+	 * cloning for SDVO encoders.
+	 */
+	intel_sdvo->base.cloneable = false;
+
+	intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+	/* Set the input timing to the screen. Assume always input 0. */
+	if (!intel_sdvo_set_target_input(intel_sdvo))
+		goto err_output;
+
+	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+						    &intel_sdvo->pixel_clock_min,
+						    &intel_sdvo->pixel_clock_max))
+		goto err_output;
+
+	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+			"clock range %dMHz - %dMHz, "
+			"input 1: %c, input 2: %c, "
+			"output 1: %c, output 2: %c\n",
+			SDVO_NAME(intel_sdvo),
+			intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+			intel_sdvo->caps.device_rev_id,
+			intel_sdvo->pixel_clock_min / 1000,
+			intel_sdvo->pixel_clock_max / 1000,
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+			(intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+			/* check currently supported outputs */
+			intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+			intel_sdvo->caps.output_flags &
+			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+	return true;
+
+err_output:
+	intel_sdvo_output_cleanup(intel_sdvo);
+
+err:
+	drm_encoder_cleanup(&intel_encoder->base);
+	i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+	intel_sdvo_unselect_i2c_bus(intel_sdvo);
+	kfree(intel_sdvo);
+
+	return false;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_sdvo_regs.h b/linux-imx/drivers/gpu/drm/i915/intel_sdvo_regs.h
new file mode 100644
index 0000000..770bdd6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -0,0 +1,730 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *	Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST   (0)
+#define SDVO_OUTPUT_TMDS0   (1 << 0)
+#define SDVO_OUTPUT_RGB0    (1 << 1)
+#define SDVO_OUTPUT_CVBS0   (1 << 2)
+#define SDVO_OUTPUT_SVID0   (1 << 3)
+#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+#define SDVO_OUTPUT_SCART0  (1 << 5)
+#define SDVO_OUTPUT_LVDS0   (1 << 6)
+#define SDVO_OUTPUT_TMDS1   (1 << 8)
+#define SDVO_OUTPUT_RGB1    (1 << 9)
+#define SDVO_OUTPUT_CVBS1   (1 << 10)
+#define SDVO_OUTPUT_SVID1   (1 << 11)
+#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+#define SDVO_OUTPUT_SCART1  (1 << 13)
+#define SDVO_OUTPUT_LVDS1   (1 << 14)
+#define SDVO_OUTPUT_LAST    (14)
+
+struct intel_sdvo_caps {
+	u8 vendor_id;
+	u8 device_id;
+	u8 device_rev_id;
+	u8 sdvo_version_major;
+	u8 sdvo_version_minor;
+	unsigned int sdvo_inputs_mask:2;
+	unsigned int smooth_scaling:1;
+	unsigned int sharp_scaling:1;
+	unsigned int up_scaling:1;
+	unsigned int down_scaling:1;
+	unsigned int stall_support:1;
+	unsigned int pad:1;
+	u16 output_flags;
+} __attribute__((packed));
+
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE	(1 << 7)
+
+/** This matches the EDID DTD structure, more or less */
+struct intel_sdvo_dtd {
+	struct {
+		u16 clock;	/**< pixel clock, in 10kHz units */
+		u8 h_active;	/**< lower 8 bits (pixels) */
+		u8 h_blank;	/**< lower 8 bits (pixels) */
+		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
+		u8 v_active;	/**< lower 8 bits (lines) */
+		u8 v_blank;	/**< lower 8 bits (lines) */
+		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
+	} part1;
+
+	struct {
+		u8 h_sync_off;	/**< lower 8 bits, from hblank start */
+		u8 h_sync_width;	/**< lower 8 bits (pixels) */
+		/** lower 4 bits each vsync offset, vsync width */
+		u8 v_sync_off_width;
+		/**
+		* 2 high bits of hsync offset, 2 high bits of hsync width,
+		* bits 4-5 of vsync offset, and 2 high bits of vsync width.
+		*/
+		u8 sync_off_width_high;
+		u8 dtd_flags;
+		u8 sdvo_flags;
+		/** bits 6-7 of vsync offset at bits 6-7 */
+		u8 v_sync_off_high;
+		u8 reserved;
+	} part2;
+} __attribute__((packed));
+
+struct intel_sdvo_pixel_clock_range {
+	u16 min;	/**< pixel clock, in 10kHz units */
+	u16 max;	/**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct intel_sdvo_preferred_input_timing_args {
+	u16 clock;
+	u16 width;
+	u16 height;
+	u8	interlace:1;
+	u8	scaled:1;
+	u8	pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0				0x07
+#define SDVO_I2C_ARG_1				0x06
+#define SDVO_I2C_ARG_2				0x05
+#define SDVO_I2C_ARG_3				0x04
+#define SDVO_I2C_ARG_4				0x03
+#define SDVO_I2C_ARG_5				0x02
+#define SDVO_I2C_ARG_6				0x01
+#define SDVO_I2C_ARG_7				0x00
+#define SDVO_I2C_OPCODE				0x08
+#define SDVO_I2C_CMD_STATUS			0x09
+#define SDVO_I2C_RETURN_0			0x0a
+#define SDVO_I2C_RETURN_1			0x0b
+#define SDVO_I2C_RETURN_2			0x0c
+#define SDVO_I2C_RETURN_3			0x0d
+#define SDVO_I2C_RETURN_4			0x0e
+#define SDVO_I2C_RETURN_5			0x0f
+#define SDVO_I2C_RETURN_6			0x10
+#define SDVO_I2C_RETURN_7			0x11
+#define SDVO_I2C_VENDOR_BEGIN			0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON		0x0
+#define SDVO_CMD_STATUS_SUCCESS			0x1
+#define SDVO_CMD_STATUS_NOTSUPP			0x2
+#define SDVO_CMD_STATUS_INVALID_ARG		0x3
+#define SDVO_CMD_STATUS_PENDING			0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET					0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS			0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV			0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
+struct intel_sdvo_get_trained_inputs_response {
+	unsigned int input0_trained:1;
+	unsigned int input1_trained:1;
+	unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP				0x06
+struct intel_sdvo_in_out_map {
+	u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP				0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+	u16 interrupt_status;
+	unsigned int ambient_light_interrupt:1;
+	unsigned int hdmi_audio_encrypt_change:1;
+	unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT			0x10
+struct intel_sdvo_set_target_input_args {
+	unsigned int target_1:1;
+	unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
+# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
+# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct intel_sdvo_tv_format {
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT				0x28
+
+#define SDVO_CMD_SET_TV_FORMAT				0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
+struct intel_sdvo_sdtv_resolution_request {
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int pad:5;
+} __attribute__((packed));
+
+struct intel_sdvo_sdtv_resolution_reply {
+	unsigned int res_320x200:1;
+	unsigned int res_320x240:1;
+	unsigned int res_400x300:1;
+	unsigned int res_640x350:1;
+	unsigned int res_640x400:1;
+	unsigned int res_640x480:1;
+	unsigned int res_704x480:1;
+	unsigned int res_704x576:1;
+
+	unsigned int res_720x350:1;
+	unsigned int res_720x400:1;
+	unsigned int res_720x480:1;
+	unsigned int res_720x540:1;
+	unsigned int res_720x576:1;
+	unsigned int res_768x576:1;
+	unsigned int res_800x600:1;
+	unsigned int res_832x624:1;
+
+	unsigned int res_920x766:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x1024:1;
+	unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+   scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
+
+struct intel_sdvo_hdtv_resolution_request {
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:6;
+} __attribute__((packed));
+
+struct intel_sdvo_hdtv_resolution_reply {
+	unsigned int res_640x480:1;
+	unsigned int res_800x600:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x960:1;
+	unsigned int res_1400x1050:1;
+	unsigned int res_1600x1200:1;
+	unsigned int res_1920x1440:1;
+	unsigned int res_2048x1536:1;
+
+	unsigned int res_2560x1920:1;
+	unsigned int res_3200x2400:1;
+	unsigned int res_3840x2880:1;
+	unsigned int pad1:5;
+
+	unsigned int res_848x480:1;
+	unsigned int res_1064x600:1;
+	unsigned int res_1280x720:1;
+	unsigned int res_1360x768:1;
+	unsigned int res_1704x960:1;
+	unsigned int res_1864x1050:1;
+	unsigned int res_1920x1080:1;
+	unsigned int res_2128x1200:1;
+
+	unsigned int res_2560x1400:1;
+	unsigned int res_2728x1536:1;
+	unsigned int res_3408x1920:1;
+	unsigned int res_4264x2400:1;
+	unsigned int res_5120x2880:1;
+	unsigned int pad2:3;
+
+	unsigned int res_768x480:1;
+	unsigned int res_960x600:1;
+	unsigned int res_1152x720:1;
+	unsigned int res_1124x768:1;
+	unsigned int res_1536x960:1;
+	unsigned int res_1680x1050:1;
+	unsigned int res_1728x1080:1;
+	unsigned int res_1920x1200:1;
+
+	unsigned int res_2304x1440:1;
+	unsigned int res_2456x1536:1;
+	unsigned int res_3072x1920:1;
+	unsigned int res_3840x2400:1;
+	unsigned int res_4608x2880:1;
+	unsigned int pad3:3;
+
+	unsigned int res_1280x1024:1;
+	unsigned int pad4:7;
+
+	unsigned int res_1280x768:1;
+	unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+   last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+   SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE			0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
+# define SDVO_ENCODER_STATE_ON					(1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
+# define SDVO_ENCODER_STATE_OFF					(1 << 3)
+# define SDVO_MONITOR_STATE_ON					(1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY				(1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND				(1 << 6)
+# define SDVO_MONITOR_STATE_OFF					(1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct sdvo_panel_power_sequencing {
+	u8 t0;
+	u8 t1;
+	u8 t2;
+	u8 t3;
+	u8 t4;
+
+	unsigned int t0_high:2;
+	unsigned int t1_high:2;
+	unsigned int t2_high:2;
+	unsigned int t3_high:2;
+
+	unsigned int t4_high:2;
+	unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
+struct sdvo_max_backlight_reply {
+	u8 max_value;
+	u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
+struct sdvo_get_ambient_light_reply {
+	u16 trip_low;
+	u16 trip_high;
+	u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
+struct sdvo_set_ambient_light_reply {
+	u16 trip_low;
+	u16 trip_high;
+	unsigned int enable:1;
+	unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
+# define SDVO_DISPLAY_STATE_ON				(1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY			(1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND			(1 << 2)
+# define SDVO_DISPLAY_STATE_OFF				(1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
+struct intel_sdvo_enhancements_reply {
+	unsigned int flicker_filter:1;
+	unsigned int flicker_filter_adaptive:1;
+	unsigned int flicker_filter_2d:1;
+	unsigned int saturation:1;
+	unsigned int hue:1;
+	unsigned int brightness:1;
+	unsigned int contrast:1;
+	unsigned int overscan_h:1;
+
+	unsigned int overscan_v:1;
+	unsigned int hpos:1;
+	unsigned int vpos:1;
+	unsigned int sharpness:1;
+	unsigned int dot_crawl:1;
+	unsigned int dither:1;
+	unsigned int tv_chroma_filter:1;
+	unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
+#define SDVO_CMD_GET_MAX_SATURATION			0x55
+#define SDVO_CMD_GET_MAX_HUE				0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST			0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
+#define SDVO_CMD_GET_MAX_HPOS				0x67
+#define SDVO_CMD_GET_MAX_VPOS				0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
+struct intel_sdvo_enhancement_limits_reply {
+	u16 max_value;
+	u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
+# define SDVO_LVDS_COLOR_DEPTH_18			(0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24			(1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG			(0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI			(1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL			(0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL				(1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER			0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER			0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
+#define SDVO_CMD_GET_SATURATION				0x56
+#define SDVO_CMD_SET_SATURATION				0x57
+#define SDVO_CMD_GET_HUE				0x59
+#define SDVO_CMD_SET_HUE				0x5a
+#define SDVO_CMD_GET_BRIGHTNESS				0x5c
+#define SDVO_CMD_SET_BRIGHTNESS				0x5d
+#define SDVO_CMD_GET_CONTRAST				0x5f
+#define SDVO_CMD_SET_CONTRAST				0x60
+#define SDVO_CMD_GET_OVERSCAN_H				0x62
+#define SDVO_CMD_SET_OVERSCAN_H				0x63
+#define SDVO_CMD_GET_OVERSCAN_V				0x65
+#define SDVO_CMD_SET_OVERSCAN_V				0x66
+#define SDVO_CMD_GET_HPOS				0x68
+#define SDVO_CMD_SET_HPOS				0x69
+#define SDVO_CMD_GET_VPOS				0x6b
+#define SDVO_CMD_SET_VPOS				0x6c
+#define SDVO_CMD_GET_SHARPNESS				0x6e
+#define SDVO_CMD_SET_SHARPNESS				0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
+struct intel_sdvo_enhancements_arg {
+	u16 value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL				0x70
+#define SDVO_CMD_SET_DOT_CRAWL				0x71
+# define SDVO_DOT_CRAWL_ON					(1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON				(1 << 1)
+
+#define SDVO_CMD_GET_DITHER				0x72
+#define SDVO_CMD_SET_DITHER				0x73
+# define SDVO_DITHER_ON						(1 << 0)
+# define SDVO_DITHER_DEFAULT_ON					(1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
+# define SDVO_CONTROL_BUS_PROM				(1 << 0)
+# define SDVO_CONTROL_BUS_DDC1				(1 << 1)
+# define SDVO_CONTROL_BUS_DDC2				(1 << 2)
+# define SDVO_CONTROL_BUS_DDC3				(1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE	0x9d
+#define SDVO_CMD_GET_ENCODE		0x9e
+#define SDVO_CMD_SET_ENCODE		0x9f
+  #define SDVO_ENCODE_DVI	0x0
+  #define SDVO_ENCODE_HDMI	0x1
+#define SDVO_CMD_SET_PIXEL_REPLI	0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI	0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP	0x8d
+#define SDVO_CMD_SET_COLORIMETRY	0x8e
+  #define SDVO_COLORIMETRY_RGB256   0x0
+  #define SDVO_COLORIMETRY_RGB220   0x1
+  #define SDVO_COLORIMETRY_YCrCb422 0x3
+  #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY	0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT		0x91
+#define SDVO_CMD_GET_AUDIO_STAT		0x92
+#define SDVO_CMD_SET_HBUF_INDEX		0x93
+  #define SDVO_HBUF_INDEX_ELD		0
+  #define SDVO_HBUF_INDEX_AVI_IF	1
+#define SDVO_CMD_GET_HBUF_INDEX		0x94
+#define SDVO_CMD_GET_HBUF_INFO		0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT	0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT	0x97
+#define SDVO_CMD_SET_HBUF_DATA		0x98
+#define SDVO_CMD_GET_HBUF_DATA		0x99
+#define SDVO_CMD_SET_HBUF_TXRATE	0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE	0x9b
+  #define SDVO_HBUF_TX_DISABLED	(0 << 6)
+  #define SDVO_HBUF_TX_ONCE	(2 << 6)
+  #define SDVO_HBUF_TX_VSYNC	(3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
+#define SDVO_NEED_TO_STALL  (1 << 7)
+
+struct intel_sdvo_encode {
+	u8 dvi_rev;
+	u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_sprite.c b/linux-imx/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 0000000..c7d25c5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,963 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+static void
+vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+	u32 sprctl;
+	unsigned long sprsurf_offset, linear_offset;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+	sprctl = I915_READ(SPCNTR(pipe, plane));
+
+	/* Mask out pixel format bits in case we change it */
+	sprctl &= ~SP_PIXFORMAT_MASK;
+	sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
+	sprctl &= ~SP_TILED;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_YUYV:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
+		break;
+	case DRM_FORMAT_RGB565:
+		sprctl |= SP_FORMAT_BGR565;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		sprctl |= SP_FORMAT_BGRX8888;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		sprctl |= SP_FORMAT_BGRA8888;
+		break;
+	case DRM_FORMAT_XBGR2101010:
+		sprctl |= SP_FORMAT_RGBX1010102;
+		break;
+	case DRM_FORMAT_ABGR2101010:
+		sprctl |= SP_FORMAT_RGBA1010102;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		sprctl |= SP_FORMAT_RGBX8888;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		sprctl |= SP_FORMAT_RGBA8888;
+		break;
+	default:
+		/*
+		 * If we get here one of the upper layers failed to filter
+		 * out the unsupported plane formats
+		 */
+		BUG();
+		break;
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		sprctl |= SP_TILED;
+
+	sprctl |= SP_ENABLE;
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+	I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
+	I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	sprsurf_offset = intel_gen4_compute_page_offset(&x, &y,
+							obj->tiling_mode,
+							pixel_size,
+							fb->pitches[0]);
+	linear_offset -= sprsurf_offset;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+	else
+		I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+
+	I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+	I915_WRITE(SPCNTR(pipe, plane), sprctl);
+	I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+			     sprsurf_offset);
+	POSTING_READ(SPSURF(pipe, plane));
+}
+
+static void
+vlv_disable_plane(struct drm_plane *dplane)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+
+	I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) &
+		   ~SP_ENABLE);
+	/* Activate double buffered register update */
+	I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
+	POSTING_READ(SPSURF(pipe, plane));
+}
+
+static int
+vlv_update_colorkey(struct drm_plane *dplane,
+		    struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+	u32 sprctl;
+
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		return -EINVAL;
+
+	I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
+	I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
+	I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+
+	sprctl = I915_READ(SPCNTR(pipe, plane));
+	sprctl &= ~SP_SOURCE_KEY;
+	if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SP_SOURCE_KEY;
+	I915_WRITE(SPCNTR(pipe, plane), sprctl);
+
+	POSTING_READ(SPKEYMSK(pipe, plane));
+
+	return 0;
+}
+
+static void
+vlv_get_colorkey(struct drm_plane *dplane,
+		 struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = dplane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(dplane);
+	int pipe = intel_plane->pipe;
+	int plane = intel_plane->plane;
+	u32 sprctl;
+
+	key->min_value = I915_READ(SPKEYMINVAL(pipe, plane));
+	key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane));
+	key->channel_mask = I915_READ(SPKEYMSK(pipe, plane));
+
+	sprctl = I915_READ(SPCNTR(pipe, plane));
+	if (sprctl & SP_SOURCE_KEY)
+		key->flags = I915_SET_COLORKEY_SOURCE;
+	else
+		key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+	u32 sprctl, sprscale = 0;
+	unsigned long sprsurf_offset, linear_offset;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+	bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
+
+	sprctl = I915_READ(SPRCTL(pipe));
+
+	/* Mask out pixel format bits in case we change it */
+	sprctl &= ~SPRITE_PIXFORMAT_MASK;
+	sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+	sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+	sprctl &= ~SPRITE_TILED;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_XBGR8888:
+		sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		sprctl |= SPRITE_FORMAT_RGBX888;
+		break;
+	case DRM_FORMAT_YUYV:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+		break;
+	default:
+		BUG();
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		sprctl |= SPRITE_TILED;
+
+	/* must disable */
+	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+	sprctl |= SPRITE_ENABLE;
+
+	if (IS_HASWELL(dev))
+		sprctl |= SPRITE_PIPE_CSC_ENABLE;
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+	/*
+	 * IVB workaround: must disable low power watermarks for at least
+	 * one frame before enabling scaling.  LP watermarks can be re-enabled
+	 * when scaling is disabled.
+	 */
+	if (crtc_w != src_w || crtc_h != src_h) {
+		dev_priv->sprite_scaling_enabled |= 1 << pipe;
+
+		if (!scaling_was_enabled) {
+			intel_update_watermarks(dev);
+			intel_wait_for_vblank(dev, pipe);
+		}
+		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+	} else
+		dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	sprsurf_offset =
+		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+					       pixel_size, fb->pitches[0]);
+	linear_offset -= sprsurf_offset;
+
+	/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+	 * register */
+	if (IS_HASWELL(dev))
+		I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+	else if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(SPRLINOFF(pipe), linear_offset);
+
+	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), sprscale);
+	I915_WRITE(SPRCTL(pipe), sprctl);
+	I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+	POSTING_READ(SPRSURF(pipe));
+
+	/* potentially re-enable LP watermarks */
+	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+		intel_update_watermarks(dev);
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+	bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
+
+	I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+	/* Can't leave the scaler enabled... */
+	if (intel_plane->can_scale)
+		I915_WRITE(SPRSCALE(pipe), 0);
+	/* Activate double buffered register update */
+	I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
+	POSTING_READ(SPRSURF(pipe));
+
+	dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
+
+	/* potentially re-enable LP watermarks */
+	if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
+		intel_update_watermarks(dev);
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+		    struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane;
+	u32 sprctl;
+	int ret = 0;
+
+	intel_plane = to_intel_plane(plane);
+
+	I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+	I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+	I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+	sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		sprctl |= SPRITE_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		sprctl |= SPRITE_SOURCE_KEY;
+	I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+	POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+	return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane;
+	u32 sprctl;
+
+	intel_plane = to_intel_plane(plane);
+
+	key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+	key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+	key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+	key->flags = 0;
+
+	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+	if (sprctl & SPRITE_DEST_KEY)
+		key->flags = I915_SET_COLORKEY_DESTINATION;
+	else if (sprctl & SPRITE_SOURCE_KEY)
+		key->flags = I915_SET_COLORKEY_SOURCE;
+	else
+		key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+		 unsigned int crtc_w, unsigned int crtc_h,
+		 uint32_t x, uint32_t y,
+		 uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+	unsigned long dvssurf_offset, linear_offset;
+	u32 dvscntr, dvsscale;
+	int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+
+	dvscntr = I915_READ(DVSCNTR(pipe));
+
+	/* Mask out pixel format bits in case we change it */
+	dvscntr &= ~DVS_PIXFORMAT_MASK;
+	dvscntr &= ~DVS_RGB_ORDER_XBGR;
+	dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+	dvscntr &= ~DVS_TILED;
+
+	switch (fb->pixel_format) {
+	case DRM_FORMAT_XBGR8888:
+		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
+		break;
+	case DRM_FORMAT_XRGB8888:
+		dvscntr |= DVS_FORMAT_RGBX888;
+		break;
+	case DRM_FORMAT_YUYV:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+		break;
+	case DRM_FORMAT_YVYU:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+		break;
+	case DRM_FORMAT_UYVY:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+		break;
+	case DRM_FORMAT_VYUY:
+		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+		break;
+	default:
+		BUG();
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		dvscntr |= DVS_TILED;
+
+	if (IS_GEN6(dev))
+		dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+	dvscntr |= DVS_ENABLE;
+
+	/* Sizes are 0 based */
+	src_w--;
+	src_h--;
+	crtc_w--;
+	crtc_h--;
+
+	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+	dvsscale = 0;
+	if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
+		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+
+	linear_offset = y * fb->pitches[0] + x * pixel_size;
+	dvssurf_offset =
+		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
+					       pixel_size, fb->pitches[0]);
+	linear_offset -= dvssurf_offset;
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+	else
+		I915_WRITE(DVSLINOFF(pipe), linear_offset);
+
+	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+	I915_WRITE(DVSSCALE(pipe), dvsscale);
+	I915_WRITE(DVSCNTR(pipe), dvscntr);
+	I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+	POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+ilk_disable_plane(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int pipe = intel_plane->pipe;
+
+	I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+	/* Disable the scaler */
+	I915_WRITE(DVSSCALE(pipe), 0);
+	/* Flush double buffered register updates */
+	I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
+	POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int reg = DSPCNTR(intel_crtc->plane);
+
+	if (!intel_crtc->primary_disabled)
+		return;
+
+	intel_crtc->primary_disabled = false;
+	intel_update_fbc(dev);
+
+	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int reg = DSPCNTR(intel_crtc->plane);
+
+	if (intel_crtc->primary_disabled)
+		return;
+
+	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+	intel_crtc->primary_disabled = true;
+	intel_update_fbc(dev);
+}
+
+static int
+ilk_update_colorkey(struct drm_plane *plane,
+		    struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane;
+	u32 dvscntr;
+	int ret = 0;
+
+	intel_plane = to_intel_plane(plane);
+
+	I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+	I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+	I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+	dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+	if (key->flags & I915_SET_COLORKEY_DESTINATION)
+		dvscntr |= DVS_DEST_KEY;
+	else if (key->flags & I915_SET_COLORKEY_SOURCE)
+		dvscntr |= DVS_SOURCE_KEY;
+	I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+	POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+	return ret;
+}
+
+static void
+ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_plane *intel_plane;
+	u32 dvscntr;
+
+	intel_plane = to_intel_plane(plane);
+
+	key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+	key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+	key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+	key->flags = 0;
+
+	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+	if (dvscntr & DVS_DEST_KEY)
+		key->flags = I915_SET_COLORKEY_DESTINATION;
+	else if (dvscntr & DVS_SOURCE_KEY)
+		key->flags = I915_SET_COLORKEY_SOURCE;
+	else
+		key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		   unsigned int crtc_w, unsigned int crtc_h,
+		   uint32_t src_x, uint32_t src_y,
+		   uint32_t src_w, uint32_t src_h)
+{
+	struct drm_device *dev = plane->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	struct intel_framebuffer *intel_fb;
+	struct drm_i915_gem_object *obj, *old_obj;
+	int pipe = intel_plane->pipe;
+	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+								      pipe);
+	int ret = 0;
+	int x = src_x >> 16, y = src_y >> 16;
+	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+	bool disable_primary = false;
+
+	intel_fb = to_intel_framebuffer(fb);
+	obj = intel_fb->obj;
+
+	old_obj = intel_plane->obj;
+
+	intel_plane->crtc_x = crtc_x;
+	intel_plane->crtc_y = crtc_y;
+	intel_plane->crtc_w = crtc_w;
+	intel_plane->crtc_h = crtc_h;
+	intel_plane->src_x = src_x;
+	intel_plane->src_y = src_y;
+	intel_plane->src_w = src_w;
+	intel_plane->src_h = src_h;
+
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
+	/* Pipe must be running... */
+	if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
+		return -EINVAL;
+
+	if (crtc_x >= primary_w || crtc_y >= primary_h)
+		return -EINVAL;
+
+	/* Don't modify another pipe's plane */
+	if (intel_plane->pipe != intel_crtc->pipe)
+		return -EINVAL;
+
+	/* Sprite planes can be linear or x-tiled surfaces */
+	switch (obj->tiling_mode) {
+		case I915_TILING_NONE:
+		case I915_TILING_X:
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	/*
+	 * Clamp the width & height into the visible area.  Note we don't
+	 * try to scale the source if part of the visible region is offscreen.
+	 * The caller must handle that by adjusting source offset and size.
+	 */
+	if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+		crtc_w += crtc_x;
+		crtc_x = 0;
+	}
+	if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+		goto out;
+	if ((crtc_x + crtc_w) > primary_w)
+		crtc_w = primary_w - crtc_x;
+
+	if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+		crtc_h += crtc_y;
+		crtc_y = 0;
+	}
+	if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+		goto out;
+	if (crtc_y + crtc_h > primary_h)
+		crtc_h = primary_h - crtc_y;
+
+	if (!crtc_w || !crtc_h) /* Again, nothing to display */
+		goto out;
+
+	/*
+	 * We may not have a scaler, eg. HSW does not have it any more
+	 */
+	if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+		return -EINVAL;
+
+	/*
+	 * We can take a larger source and scale it down, but
+	 * only so much...  16x is the max on SNB.
+	 */
+	if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+		return -EINVAL;
+
+	/*
+	 * If the sprite is completely covering the primary plane,
+	 * we can disable the primary and save power.
+	 */
+	if ((crtc_x == 0) && (crtc_y == 0) &&
+	    (crtc_w == primary_w) && (crtc_h == primary_h))
+		disable_primary = true;
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Note that this will apply the VT-d workaround for scanouts,
+	 * which is more restrictive than required for sprites. (The
+	 * primary plane requires 256KiB alignment with 64 PTE padding,
+	 * the sprite planes only require 128KiB alignment and 32 PTE padding.
+	 */
+	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+	if (ret)
+		goto out_unlock;
+
+	intel_plane->obj = obj;
+
+	/*
+	 * Be sure to re-enable the primary before the sprite is no longer
+	 * covering it fully.
+	 */
+	if (!disable_primary)
+		intel_enable_primary(crtc);
+
+	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+				  crtc_w, crtc_h, x, y, src_w, src_h);
+
+	if (disable_primary)
+		intel_disable_primary(crtc);
+
+	/* Unpin old obj after new one is active to avoid ugliness */
+	if (old_obj) {
+		/*
+		 * It's fairly common to simply update the position of
+		 * an existing object.  In that case, we don't need to
+		 * wait for vblank to avoid ugliness, we only need to
+		 * do the pin & ref bookkeeping.
+		 */
+		if (old_obj != obj) {
+			mutex_unlock(&dev->struct_mutex);
+			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+			mutex_lock(&dev->struct_mutex);
+		}
+		intel_unpin_fb_obj(old_obj);
+	}
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+	struct drm_device *dev = plane->dev;
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	int ret = 0;
+
+	if (plane->crtc)
+		intel_enable_primary(plane->crtc);
+	intel_plane->disable_plane(plane);
+
+	if (!intel_plane->obj)
+		goto out;
+
+	intel_wait_for_vblank(dev, intel_plane->pipe);
+
+	mutex_lock(&dev->struct_mutex);
+	intel_unpin_fb_obj(intel_plane->obj);
+	intel_plane->obj = NULL;
+	mutex_unlock(&dev->struct_mutex);
+out:
+
+	return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+	intel_disable_plane(plane);
+	drm_plane_cleanup(plane);
+	kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_intel_sprite_colorkey *set = data;
+	struct drm_mode_object *obj;
+	struct drm_plane *plane;
+	struct intel_plane *intel_plane;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	/* Make sure we don't try to enable both src & dest simultaneously */
+	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+		return -EINVAL;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	plane = obj_to_plane(obj);
+	intel_plane = to_intel_plane(plane);
+	ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_intel_sprite_colorkey *get = data;
+	struct drm_mode_object *obj;
+	struct drm_plane *plane;
+	struct intel_plane *intel_plane;
+	int ret = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	drm_modeset_lock_all(dev);
+
+	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	plane = obj_to_plane(obj);
+	intel_plane = to_intel_plane(plane);
+	intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+	drm_modeset_unlock_all(dev);
+	return ret;
+}
+
+void intel_plane_restore(struct drm_plane *plane)
+{
+	struct intel_plane *intel_plane = to_intel_plane(plane);
+
+	if (!plane->crtc || !plane->fb)
+		return;
+
+	intel_update_plane(plane, plane->crtc, plane->fb,
+			   intel_plane->crtc_x, intel_plane->crtc_y,
+			   intel_plane->crtc_w, intel_plane->crtc_h,
+			   intel_plane->src_x, intel_plane->src_y,
+			   intel_plane->src_w, intel_plane->src_h);
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+	.update_plane = intel_update_plane,
+	.disable_plane = intel_disable_plane,
+	.destroy = intel_destroy_plane,
+};
+
+static uint32_t ilk_plane_formats[] = {
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+static uint32_t snb_plane_formats[] = {
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+static uint32_t vlv_plane_formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_ABGR8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_XBGR2101010,
+	DRM_FORMAT_ABGR2101010,
+	DRM_FORMAT_YUYV,
+	DRM_FORMAT_YVYU,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
+{
+	struct intel_plane *intel_plane;
+	unsigned long possible_crtcs;
+	const uint32_t *plane_formats;
+	int num_plane_formats;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 5)
+		return -ENODEV;
+
+	intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+	if (!intel_plane)
+		return -ENOMEM;
+
+	switch (INTEL_INFO(dev)->gen) {
+	case 5:
+	case 6:
+		intel_plane->can_scale = true;
+		intel_plane->max_downscale = 16;
+		intel_plane->update_plane = ilk_update_plane;
+		intel_plane->disable_plane = ilk_disable_plane;
+		intel_plane->update_colorkey = ilk_update_colorkey;
+		intel_plane->get_colorkey = ilk_get_colorkey;
+
+		if (IS_GEN6(dev)) {
+			plane_formats = snb_plane_formats;
+			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		} else {
+			plane_formats = ilk_plane_formats;
+			num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+		}
+		break;
+
+	case 7:
+		if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+			intel_plane->can_scale = false;
+		else
+			intel_plane->can_scale = true;
+
+		if (IS_VALLEYVIEW(dev)) {
+			intel_plane->max_downscale = 1;
+			intel_plane->update_plane = vlv_update_plane;
+			intel_plane->disable_plane = vlv_disable_plane;
+			intel_plane->update_colorkey = vlv_update_colorkey;
+			intel_plane->get_colorkey = vlv_get_colorkey;
+
+			plane_formats = vlv_plane_formats;
+			num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+		} else {
+			intel_plane->max_downscale = 2;
+			intel_plane->update_plane = ivb_update_plane;
+			intel_plane->disable_plane = ivb_disable_plane;
+			intel_plane->update_colorkey = ivb_update_colorkey;
+			intel_plane->get_colorkey = ivb_get_colorkey;
+
+			plane_formats = snb_plane_formats;
+			num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+		}
+		break;
+
+	default:
+		kfree(intel_plane);
+		return -ENODEV;
+	}
+
+	intel_plane->pipe = pipe;
+	intel_plane->plane = plane;
+	possible_crtcs = (1 << pipe);
+	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+			     &intel_plane_funcs,
+			     plane_formats, num_plane_formats,
+			     false);
+	if (ret)
+		kfree(intel_plane);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/i915/intel_tv.c b/linux-imx/drivers/gpu/drm/i915/intel_tv.c
new file mode 100644
index 0000000..a202d8d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/i915/intel_tv.c
@@ -0,0 +1,1681 @@
+/*
+ * Copyright © 2006-2008 Intel Corporation
+ *   Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file
+ * Integrated TV-out support for the 915GM and 945GM.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "intel_drv.h"
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+
+enum tv_margin {
+	TV_MARGIN_LEFT, TV_MARGIN_TOP,
+	TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
+};
+
+/** Private structure for the integrated TV support */
+struct intel_tv {
+	struct intel_encoder base;
+
+	int type;
+	const char *tv_format;
+	int margin[4];
+	u32 save_TV_H_CTL_1;
+	u32 save_TV_H_CTL_2;
+	u32 save_TV_H_CTL_3;
+	u32 save_TV_V_CTL_1;
+	u32 save_TV_V_CTL_2;
+	u32 save_TV_V_CTL_3;
+	u32 save_TV_V_CTL_4;
+	u32 save_TV_V_CTL_5;
+	u32 save_TV_V_CTL_6;
+	u32 save_TV_V_CTL_7;
+	u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
+
+	u32 save_TV_CSC_Y;
+	u32 save_TV_CSC_Y2;
+	u32 save_TV_CSC_U;
+	u32 save_TV_CSC_U2;
+	u32 save_TV_CSC_V;
+	u32 save_TV_CSC_V2;
+	u32 save_TV_CLR_KNOBS;
+	u32 save_TV_CLR_LEVEL;
+	u32 save_TV_WIN_POS;
+	u32 save_TV_WIN_SIZE;
+	u32 save_TV_FILTER_CTL_1;
+	u32 save_TV_FILTER_CTL_2;
+	u32 save_TV_FILTER_CTL_3;
+
+	u32 save_TV_H_LUMA[60];
+	u32 save_TV_H_CHROMA[60];
+	u32 save_TV_V_LUMA[43];
+	u32 save_TV_V_CHROMA[43];
+
+	u32 save_TV_DAC;
+	u32 save_TV_CTL;
+};
+
+struct video_levels {
+	int blank, black, burst;
+};
+
+struct color_conversion {
+	u16 ry, gy, by, ay;
+	u16 ru, gu, bu, au;
+	u16 rv, gv, bv, av;
+};
+
+static const u32 filter_table[] = {
+	0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+	0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+	0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+	0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+	0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+	0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+	0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+	0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+	0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+	0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+	0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+	0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+	0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+	0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+	0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+	0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
+	0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
+	0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
+	0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
+	0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
+	0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
+	0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
+	0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
+	0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
+	0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
+	0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
+	0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
+	0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
+	0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
+	0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
+	0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
+	0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+	0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+	0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+	0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+	0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+	0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+	0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+	0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+	0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+	0x28003100, 0x28002F00, 0x00003100, 0x36403000,
+	0x2D002CC0, 0x30003640, 0x2D0036C0,
+	0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
+	0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
+	0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
+	0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
+	0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
+	0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
+	0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
+	0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
+	0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
+	0x28003100, 0x28002F00, 0x00003100,
+};
+
+/*
+ * Color conversion values have 3 separate fixed point formats:
+ *
+ * 10 bit fields (ay, au)
+ *   1.9 fixed point (b.bbbbbbbbb)
+ * 11 bit fields (ry, by, ru, gu, gv)
+ *   exp.mantissa (ee.mmmmmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmmmmm)
+ *   ee = 01 = 10^-2 (0.0mmmmmmmmm)
+ *   ee = 10 = 10^-3 (0.00mmmmmmmmm)
+ *   ee = 11 = 10^-4 (0.000mmmmmmmmm)
+ * 12 bit fields (gy, rv, bu)
+ *   exp.mantissa (eee.mmmmmmmmm)
+ *   eee = 000 = 10^-1 (0.mmmmmmmmm)
+ *   eee = 001 = 10^-2 (0.0mmmmmmmmm)
+ *   eee = 010 = 10^-3 (0.00mmmmmmmmm)
+ *   eee = 011 = 10^-4 (0.000mmmmmmmmm)
+ *   eee = 100 = reserved
+ *   eee = 101 = reserved
+ *   eee = 110 = reserved
+ *   eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
+ *
+ * Saturation and contrast are 8 bits, with their own representation:
+ * 8 bit field (saturation, contrast)
+ *   exp.mantissa (ee.mmmmmm)
+ *   ee = 00 = 10^-1 (0.mmmmmm)
+ *   ee = 01 = 10^0 (m.mmmmm)
+ *   ee = 10 = 10^1 (mm.mmmm)
+ *   ee = 11 = 10^2 (mmm.mmm)
+ *
+ * Simple conversion function:
+ *
+ * static u32
+ * float_to_csc_11(float f)
+ * {
+ *     u32 exp;
+ *     u32 mant;
+ *     u32 ret;
+ *
+ *     if (f < 0)
+ *         f = -f;
+ *
+ *     if (f >= 1) {
+ *         exp = 0x7;
+ *	   mant = 1 << 8;
+ *     } else {
+ *         for (exp = 0; exp < 3 && f < 0.5; exp++)
+ *	   f *= 2.0;
+ *         mant = (f * (1 << 9) + 0.5);
+ *         if (mant >= (1 << 9))
+ *             mant = (1 << 9) - 1;
+ *     }
+ *     ret = (exp << 9) | mant;
+ *     return ret;
+ * }
+ */
+
+/*
+ * Behold, magic numbers!  If we plant them they might grow a big
+ * s-video cable to the sky... or something.
+ *
+ * Pre-converted to appropriate hex value.
+ */
+
+/*
+ * PAL & NTSC values for composite & s-video connections
+ */
+static const struct color_conversion ntsc_m_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_composite = {
+	.blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion ntsc_m_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_m_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion ntsc_j_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
+	.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
+	.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_composite = {
+	.blank = 225, .black = 225, .burst = 113,
+};
+
+static const struct color_conversion ntsc_j_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
+	.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
+	.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
+};
+
+static const struct video_levels ntsc_j_levels_svideo = {
+	.blank = 266, .black = 266, .burst = 133,
+};
+
+static const struct color_conversion pal_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
+	.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
+	.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_composite = {
+	.blank = 237, .black = 237, .burst = 118,
+};
+
+static const struct color_conversion pal_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+	.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
+	.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
+};
+
+static const struct video_levels pal_levels_svideo = {
+	.blank = 280, .black = 280, .burst = 139,
+};
+
+static const struct color_conversion pal_m_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_composite = {
+	.blank = 225, .black = 267, .burst = 113,
+};
+
+static const struct color_conversion pal_m_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_m_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 133,
+};
+
+static const struct color_conversion pal_n_csc_composite = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
+	.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
+	.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_composite = {
+	.blank = 225, .black = 267, .burst = 118,
+};
+
+static const struct color_conversion pal_n_csc_svideo = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
+	.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
+	.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
+};
+
+static const struct video_levels pal_n_levels_svideo = {
+	.blank = 266, .black = 316, .burst = 139,
+};
+
+/*
+ * Component connections
+ */
+static const struct color_conversion sdtv_csc_yprpb = {
+	.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
+	.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
+};
+
+static const struct color_conversion sdtv_csc_rgb = {
+	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct color_conversion hdtv_csc_yprpb = {
+	.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
+	.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
+	.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
+};
+
+static const struct color_conversion hdtv_csc_rgb = {
+	.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
+	.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
+	.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
+};
+
+static const struct video_levels component_levels = {
+	.blank = 279, .black = 279, .burst = 0,
+};
+
+
+struct tv_mode {
+	const char *name;
+	int clock;
+	int refresh; /* in millihertz (for precision) */
+	u32 oversample;
+	int hsync_end, hblank_start, hblank_end, htotal;
+	bool progressive, trilevel_sync, component_only;
+	int vsync_start_f1, vsync_start_f2, vsync_len;
+	bool veq_ena;
+	int veq_start_f1, veq_start_f2, veq_len;
+	int vi_end_f1, vi_end_f2, nbr_end;
+	bool burst_ena;
+	int hburst_start, hburst_len;
+	int vburst_start_f1, vburst_end_f1;
+	int vburst_start_f2, vburst_end_f2;
+	int vburst_start_f3, vburst_end_f3;
+	int vburst_start_f4, vburst_end_f4;
+	/*
+	 * subcarrier programming
+	 */
+	int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
+	u32 sc_reset;
+	bool pal_burst;
+	/*
+	 * blank/black levels
+	 */
+	const struct video_levels *composite_levels, *svideo_levels;
+	const struct color_conversion *composite_color, *svideo_color;
+	const u32 *filter_table;
+	int max_srcw;
+};
+
+
+/*
+ * Sub carrier DDA
+ *
+ *  I think this works as follows:
+ *
+ *  subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
+ *
+ * Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
+ *
+ * So,
+ *  dda1_ideal = subcarrier/pixel * 4096
+ *  dda1_inc = floor (dda1_ideal)
+ *  dda2 = dda1_ideal - dda1_inc
+ *
+ *  then pick a ratio for dda2 that gives the closest approximation. If
+ *  you can't get close enough, you can play with dda3 as well. This
+ *  seems likely to happen when dda2 is small as the jumps would be larger
+ *
+ * To invert this,
+ *
+ *  pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
+ *
+ * The constants below were all computed using a 107.520MHz clock
+ */
+
+/**
+ * Register programming values for TV modes.
+ *
+ * These values account for -1s required.
+ */
+
+static const struct tv_mode tv_modes[] = {
+	{
+		.name		= "NTSC-M",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start	= 836,		    .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_4,
+		.pal_burst	= false,
+
+		.composite_levels = &ntsc_m_levels_composite,
+		.composite_color = &ntsc_m_csc_composite,
+		.svideo_levels  = &ntsc_m_levels_svideo,
+		.svideo_color = &ntsc_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "NTSC-443",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start	= 836,		    .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1 = 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    168,
+		.dda2_inc       =   4093,       .dda2_size      =  27456,
+		.dda3_inc       =    310,       .dda3_size      =    525,
+		.sc_reset   = TV_SC_RESET_NEVER,
+		.pal_burst  = false,
+
+		.composite_levels = &ntsc_m_levels_composite,
+		.composite_color = &ntsc_m_csc_composite,
+		.svideo_levels  = &ntsc_m_levels_svideo,
+		.svideo_color = &ntsc_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "NTSC-J",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+		.hsync_end	= 64,		    .hblank_end		= 124,
+		.hblank_start = 836,	    .htotal		= 857,
+
+		.progressive	= false,    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,	    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena      = true,	    .veq_start_f1	= 0,
+		.veq_start_f2 = 1,	    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  20800,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_4,
+		.pal_burst	= false,
+
+		.composite_levels = &ntsc_j_levels_composite,
+		.composite_color = &ntsc_j_csc_composite,
+		.svideo_levels  = &ntsc_j_levels_svideo,
+		.svideo_color = &ntsc_j_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name		= "PAL-M",
+		.clock		= 108000,
+		.refresh	= 59940,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+		.hsync_end	= 64,		  .hblank_end		= 124,
+		.hblank_start = 836,	  .htotal		= 857,
+
+		.progressive	= false,	    .trilevel_sync = false,
+
+		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
+		.nbr_end	= 240,
+
+		.burst_ena	= true,
+		.hburst_start	= 72,		    .hburst_len		= 34,
+		.vburst_start_f1 = 9,		    .vburst_end_f1	= 240,
+		.vburst_start_f2 = 10,		    .vburst_end_f2	= 240,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 240,
+		.vburst_start_f4 = 10,		    .vburst_end_f4	= 240,
+
+		/* desired 3.5800000 actual 3.5800000 clock 107.52 */
+		.dda1_inc	=    135,
+		.dda2_inc	=  16704,	    .dda2_size		=  27456,
+		.dda3_inc	=      0,	    .dda3_size		=      0,
+		.sc_reset	= TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_m_levels_composite,
+		.composite_color = &pal_m_csc_composite,
+		.svideo_levels  = &pal_m_levels_svideo,
+		.svideo_color = &pal_m_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+		.name	    = "PAL-N",
+		.clock		= 108000,
+		.refresh	= 50000,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		.hsync_end	= 64,		    .hblank_end		= 128,
+		.hblank_start = 844,	    .htotal		= 863,
+
+		.progressive  = false,    .trilevel_sync = false,
+
+
+		.vsync_start_f1	= 6,	   .vsync_start_f2	= 7,
+		.vsync_len	= 6,
+
+		.veq_ena	= true,		    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,		    .veq_len		= 18,
+
+		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
+		.nbr_end	= 286,
+
+		.burst_ena	= true,
+		.hburst_start = 73,	    .hburst_len		= 34,
+		.vburst_start_f1 = 8,	    .vburst_end_f1	= 285,
+		.vburst_start_f2 = 8,	    .vburst_end_f2	= 286,
+		.vburst_start_f3 = 9,	    .vburst_end_f3	= 286,
+		.vburst_start_f4 = 9,	    .vburst_end_f4	= 285,
+
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    135,
+		.dda2_inc       =  23578,       .dda2_size      =  27648,
+		.dda3_inc       =    134,       .dda3_size      =    625,
+		.sc_reset   = TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_n_levels_composite,
+		.composite_color = &pal_n_csc_composite,
+		.svideo_levels  = &pal_n_levels_svideo,
+		.svideo_color = &pal_n_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+		.name	    = "PAL",
+		.clock		= 108000,
+		.refresh	= 50000,
+		.oversample	= TV_OVERSAMPLE_8X,
+		.component_only = 0,
+
+		.hsync_end	= 64,		    .hblank_end		= 142,
+		.hblank_start	= 844,	    .htotal		= 863,
+
+		.progressive	= false,    .trilevel_sync = false,
+
+		.vsync_start_f1	= 5,	    .vsync_start_f2	= 6,
+		.vsync_len	= 5,
+
+		.veq_ena	= true,	    .veq_start_f1	= 0,
+		.veq_start_f2	= 1,	    .veq_len		= 15,
+
+		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
+		.nbr_end	= 286,
+
+		.burst_ena	= true,
+		.hburst_start	= 73,		    .hburst_len		= 32,
+		.vburst_start_f1 = 8,		    .vburst_end_f1	= 285,
+		.vburst_start_f2 = 8,		    .vburst_end_f2	= 286,
+		.vburst_start_f3 = 9,		    .vburst_end_f3	= 286,
+		.vburst_start_f4 = 9,		    .vburst_end_f4	= 285,
+
+		/* desired 4.4336180 actual 4.4336180 clock 107.52 */
+		.dda1_inc       =    168,
+		.dda2_inc       =   4122,       .dda2_size      =  27648,
+		.dda3_inc       =     67,       .dda3_size      =    625,
+		.sc_reset   = TV_SC_RESET_EVERY_8,
+		.pal_burst  = true,
+
+		.composite_levels = &pal_levels_composite,
+		.composite_color = &pal_csc_composite,
+		.svideo_levels  = &pal_levels_svideo,
+		.svideo_color = &pal_csc_svideo,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "480p",
+		.clock		= 107520,
+		.refresh	= 59940,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 122,
+		.hblank_start   = 842,              .htotal             = 857,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+		.vsync_len      = 12,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 44,               .vi_end_f2          = 44,
+		.nbr_end        = 479,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "576p",
+		.clock		= 107520,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_4X,
+		.component_only = 1,
+
+		.hsync_end      = 64,               .hblank_end         = 139,
+		.hblank_start   = 859,              .htotal             = 863,
+
+		.progressive    = true,		    .trilevel_sync = false,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 48,               .vi_end_f2          = 48,
+		.nbr_end        = 575,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "720p@60Hz",
+		.clock		= 148800,
+		.refresh	= 60000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 80,               .hblank_end         = 300,
+		.hblank_start   = 1580,             .htotal             = 1649,
+
+		.progressive	= true,		    .trilevel_sync = true,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 29,               .vi_end_f2          = 29,
+		.nbr_end        = 719,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "720p@50Hz",
+		.clock		= 148800,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 80,               .hblank_end         = 300,
+		.hblank_start   = 1580,             .htotal             = 1979,
+
+		.progressive	= true,		    .trilevel_sync = true,
+
+		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+		.vsync_len      = 10,
+
+		.veq_ena        = false,
+
+		.vi_end_f1      = 29,               .vi_end_f2          = 29,
+		.nbr_end        = 719,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+		.max_srcw = 800
+	},
+	{
+		.name       = "1080i@50Hz",
+		.clock		= 148800,
+		.refresh	= 50000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 88,               .hblank_end         = 235,
+		.hblank_start   = 2155,             .htotal             = 2639,
+
+		.progressive	= false,	  .trilevel_sync = true,
+
+		.vsync_start_f1 = 4,              .vsync_start_f2     = 5,
+		.vsync_len      = 10,
+
+		.veq_ena	= true,	    .veq_start_f1	= 4,
+		.veq_start_f2   = 4,	    .veq_len		= 10,
+
+
+		.vi_end_f1      = 21,           .vi_end_f2          = 22,
+		.nbr_end        = 539,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+	{
+		.name       = "1080i@60Hz",
+		.clock		= 148800,
+		.refresh	= 60000,
+		.oversample     = TV_OVERSAMPLE_2X,
+		.component_only = 1,
+
+		.hsync_end      = 88,               .hblank_end         = 235,
+		.hblank_start   = 2155,             .htotal             = 2199,
+
+		.progressive	= false,	    .trilevel_sync = true,
+
+		.vsync_start_f1 = 4,               .vsync_start_f2     = 5,
+		.vsync_len      = 10,
+
+		.veq_ena	= true,		    .veq_start_f1	= 4,
+		.veq_start_f2	= 4,		    .veq_len		= 10,
+
+
+		.vi_end_f1      = 21,               .vi_end_f2          = 22,
+		.nbr_end        = 539,
+
+		.burst_ena      = false,
+
+		.filter_table = filter_table,
+	},
+};
+
+static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+{
+	return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_tv,
+			    base);
+}
+
+static bool
+intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ(TV_CTL);
+
+	if (!(tmp & TV_ENC_ENABLE))
+		return false;
+
+	*pipe = PORT_TO_PIPE(tmp);
+
+	return true;
+}
+
+static void
+intel_enable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+}
+
+static void
+intel_disable_tv(struct intel_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+}
+
+static const struct tv_mode *
+intel_tv_mode_lookup(const char *tv_format)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+		const struct tv_mode *tv_mode = &tv_modes[i];
+
+		if (!strcmp(tv_format, tv_mode->name))
+			return tv_mode;
+	}
+	return NULL;
+}
+
+static const struct tv_mode *
+intel_tv_mode_find(struct intel_tv *intel_tv)
+{
+	return intel_tv_mode_lookup(intel_tv->tv_format);
+}
+
+static enum drm_mode_status
+intel_tv_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	/* Ensure TV refresh is close to desired refresh */
+	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
+				< 1000)
+		return MODE_OK;
+
+	return MODE_CLOCK_RANGE;
+}
+
+
+static bool
+intel_tv_compute_config(struct intel_encoder *encoder,
+			struct intel_crtc_config *pipe_config)
+{
+	struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	if (!tv_mode)
+		return false;
+
+	if (intel_encoder_check_is_cloned(&intel_tv->base))
+		return false;
+
+	pipe_config->adjusted_mode.clock = tv_mode->clock;
+	DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+	pipe_config->pipe_bpp = 8*3;
+
+	/* TV has it's own notion of sync and other mode flags, so clear them. */
+	pipe_config->adjusted_mode.flags = 0;
+
+	/*
+	 * FIXME: We don't check whether the input mode is actually what we want
+	 * or whether userspace is doing something stupid.
+	 */
+
+	return true;
+}
+
+static void
+intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	u32 tv_ctl;
+	u32 hctl1, hctl2, hctl3;
+	u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
+	u32 scctl1, scctl2, scctl3;
+	int i, j;
+	const struct video_levels *video_levels;
+	const struct color_conversion *color_conversion;
+	bool burst_ena;
+	int pipe = intel_crtc->pipe;
+
+	if (!tv_mode)
+		return;	/* can't happen (mode_prepare prevents this) */
+
+	tv_ctl = I915_READ(TV_CTL);
+	tv_ctl &= TV_CTL_SAVE;
+
+	switch (intel_tv->type) {
+	default:
+	case DRM_MODE_CONNECTOR_Unknown:
+	case DRM_MODE_CONNECTOR_Composite:
+		tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
+		video_levels = tv_mode->composite_levels;
+		color_conversion = tv_mode->composite_color;
+		burst_ena = tv_mode->burst_ena;
+		break;
+	case DRM_MODE_CONNECTOR_Component:
+		tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
+		video_levels = &component_levels;
+		if (tv_mode->burst_ena)
+			color_conversion = &sdtv_csc_yprpb;
+		else
+			color_conversion = &hdtv_csc_yprpb;
+		burst_ena = false;
+		break;
+	case DRM_MODE_CONNECTOR_SVIDEO:
+		tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
+		video_levels = tv_mode->svideo_levels;
+		color_conversion = tv_mode->svideo_color;
+		burst_ena = tv_mode->burst_ena;
+		break;
+	}
+	hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
+		(tv_mode->htotal << TV_HTOTAL_SHIFT);
+
+	hctl2 = (tv_mode->hburst_start << 16) |
+		(tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
+
+	if (burst_ena)
+		hctl2 |= TV_BURST_ENA;
+
+	hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
+		(tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
+
+	vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
+		(tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
+		(tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
+
+	vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
+		(tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
+		(tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
+
+	vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
+		(tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
+		(tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
+
+	if (tv_mode->veq_ena)
+		vctl3 |= TV_EQUAL_ENA;
+
+	vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
+		(tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
+
+	vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
+		(tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
+
+	vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
+		(tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
+
+	vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
+		(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
+
+	if (intel_crtc->pipe == 1)
+		tv_ctl |= TV_ENC_PIPEB_SELECT;
+	tv_ctl |= tv_mode->oversample;
+
+	if (tv_mode->progressive)
+		tv_ctl |= TV_PROGRESSIVE;
+	if (tv_mode->trilevel_sync)
+		tv_ctl |= TV_TRILEVEL_SYNC;
+	if (tv_mode->pal_burst)
+		tv_ctl |= TV_PAL_BURST;
+
+	scctl1 = 0;
+	if (tv_mode->dda1_inc)
+		scctl1 |= TV_SC_DDA1_EN;
+	if (tv_mode->dda2_inc)
+		scctl1 |= TV_SC_DDA2_EN;
+	if (tv_mode->dda3_inc)
+		scctl1 |= TV_SC_DDA3_EN;
+	scctl1 |= tv_mode->sc_reset;
+	if (video_levels)
+		scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+	scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
+
+	scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
+		tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
+
+	scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
+		tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
+
+	/* Enable two fixes for the chips that need them. */
+	if (dev->pci_device < 0x2772)
+		tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
+
+	I915_WRITE(TV_H_CTL_1, hctl1);
+	I915_WRITE(TV_H_CTL_2, hctl2);
+	I915_WRITE(TV_H_CTL_3, hctl3);
+	I915_WRITE(TV_V_CTL_1, vctl1);
+	I915_WRITE(TV_V_CTL_2, vctl2);
+	I915_WRITE(TV_V_CTL_3, vctl3);
+	I915_WRITE(TV_V_CTL_4, vctl4);
+	I915_WRITE(TV_V_CTL_5, vctl5);
+	I915_WRITE(TV_V_CTL_6, vctl6);
+	I915_WRITE(TV_V_CTL_7, vctl7);
+	I915_WRITE(TV_SC_CTL_1, scctl1);
+	I915_WRITE(TV_SC_CTL_2, scctl2);
+	I915_WRITE(TV_SC_CTL_3, scctl3);
+
+	if (color_conversion) {
+		I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
+			   color_conversion->gy);
+		I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
+			   color_conversion->ay);
+		I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
+			   color_conversion->gu);
+		I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
+			   color_conversion->au);
+		I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
+			   color_conversion->gv);
+		I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
+			   color_conversion->av);
+	}
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+	else
+		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+
+	if (video_levels)
+		I915_WRITE(TV_CLR_LEVEL,
+			   ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
+			    (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+	{
+		int pipeconf_reg = PIPECONF(pipe);
+		int dspcntr_reg = DSPCNTR(intel_crtc->plane);
+		int pipeconf = I915_READ(pipeconf_reg);
+		int dspcntr = I915_READ(dspcntr_reg);
+		int xpos = 0x0, ypos = 0x0;
+		unsigned int xsize, ysize;
+		/* Pipe must be off here */
+		I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
+		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+
+		/* Wait for vblank for the disable to take effect */
+		if (IS_GEN2(dev))
+			intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+		I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
+		/* Wait for vblank for the disable to take effect. */
+		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
+
+		/* Filter ctl must be set before TV_WIN_SIZE */
+		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
+		xsize = tv_mode->hblank_start - tv_mode->hblank_end;
+		if (tv_mode->progressive)
+			ysize = tv_mode->nbr_end + 1;
+		else
+			ysize = 2*tv_mode->nbr_end + 1;
+
+		xpos += intel_tv->margin[TV_MARGIN_LEFT];
+		ypos += intel_tv->margin[TV_MARGIN_TOP];
+		xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
+			  intel_tv->margin[TV_MARGIN_RIGHT]);
+		ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
+			  intel_tv->margin[TV_MARGIN_BOTTOM]);
+		I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
+		I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+
+		I915_WRITE(pipeconf_reg, pipeconf);
+		I915_WRITE(dspcntr_reg, dspcntr);
+		intel_flush_display_plane(dev_priv, intel_crtc->plane);
+	}
+
+	j = 0;
+	for (i = 0; i < 60; i++)
+		I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+	for (i = 0; i < 60; i++)
+		I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+	for (i = 0; i < 43; i++)
+		I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+	for (i = 0; i < 43; i++)
+		I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
+	I915_WRITE(TV_CTL, tv_ctl);
+}
+
+static const struct drm_display_mode reported_modes[] = {
+	{
+		.name = "NTSC 480i",
+		.clock = 107520,
+		.hdisplay = 1280,
+		.hsync_start = 1368,
+		.hsync_end = 1496,
+		.htotal = 1712,
+
+		.vdisplay = 1024,
+		.vsync_start = 1027,
+		.vsync_end = 1034,
+		.vtotal = 1104,
+		.type = DRM_MODE_TYPE_DRIVER,
+	},
+};
+
+/**
+ * Detects TV presence by checking for load.
+ *
+ * Requires that the current pipe's DPLL is active.
+
+ * \return true if TV is connected.
+ * \return false if TV is disconnected.
+ */
+static int
+intel_tv_detect_type(struct intel_tv *intel_tv,
+		      struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = &intel_tv->base.base;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+	u32 tv_ctl, save_tv_ctl;
+	u32 tv_dac, save_tv_dac;
+	int type;
+
+	/* Disable TV interrupts around load detect or we'll recurse */
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_disable_pipestat(dev_priv, 0,
+				      PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
+
+	save_tv_dac = tv_dac = I915_READ(TV_DAC);
+	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+	/* Poll for TV detection */
+	tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+	if (intel_crtc->pipe == 1)
+		tv_ctl |= TV_ENC_PIPEB_SELECT;
+	else
+		tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+
+	tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
+	tv_dac |= (TVDAC_STATE_CHG_EN |
+		   TVDAC_A_SENSE_CTL |
+		   TVDAC_B_SENSE_CTL |
+		   TVDAC_C_SENSE_CTL |
+		   DAC_CTL_OVERRIDE |
+		   DAC_A_0_7_V |
+		   DAC_B_0_7_V |
+		   DAC_C_0_7_V);
+
+
+	/*
+	 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
+	 * the TV is misdetected. This is hardware requirement.
+	 */
+	if (IS_GM45(dev))
+		tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
+			    TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
+
+	I915_WRITE(TV_CTL, tv_ctl);
+	I915_WRITE(TV_DAC, tv_dac);
+	POSTING_READ(TV_DAC);
+
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+	type = -1;
+	tv_dac = I915_READ(TV_DAC);
+	DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+	/*
+	 *  A B C
+	 *  0 1 1 Composite
+	 *  1 0 X svideo
+	 *  0 0 0 Component
+	 */
+	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+		DRM_DEBUG_KMS("Detected Composite TV connection\n");
+		type = DRM_MODE_CONNECTOR_Composite;
+	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+		type = DRM_MODE_CONNECTOR_SVIDEO;
+	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+		DRM_DEBUG_KMS("Detected Component TV connection\n");
+		type = DRM_MODE_CONNECTOR_Component;
+	} else {
+		DRM_DEBUG_KMS("Unrecognised TV connection\n");
+		type = -1;
+	}
+
+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	I915_WRITE(TV_CTL, save_tv_ctl);
+	POSTING_READ(TV_CTL);
+
+	/* For unknown reasons the hw barfs if we don't do this vblank wait. */
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+	/* Restore interrupt config */
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_enable_pipestat(dev_priv, 0,
+				     PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
+
+	return type;
+}
+
+/*
+ * Here we set accurate tv format according to connector type
+ * i.e Component TV should not be assigned by NTSC or PAL
+ */
+static void intel_tv_find_better_format(struct drm_connector *connector)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	int i;
+
+	if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+		tv_mode->component_only)
+		return;
+
+
+	for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+		tv_mode = tv_modes + i;
+
+		if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
+			tv_mode->component_only)
+			break;
+	}
+
+	intel_tv->tv_format = tv_mode->name;
+	drm_object_property_set_value(&connector->base,
+		connector->dev->mode_config.tv_mode_property, i);
+}
+
+/**
+ * Detect the TV connection.
+ *
+ * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
+ * we have a pipe programmed in order to probe the TV.
+ */
+static enum drm_connector_status
+intel_tv_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_display_mode mode;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	int type;
+
+	mode = reported_modes[0];
+
+	if (force) {
+		struct intel_load_detect_pipe tmp;
+
+		if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
+			type = intel_tv_detect_type(intel_tv, connector);
+			intel_release_load_detect_pipe(connector, &tmp);
+		} else
+			return connector_status_unknown;
+	} else
+		return connector->status;
+
+	if (type < 0)
+		return connector_status_disconnected;
+
+	intel_tv->type = type;
+	intel_tv_find_better_format(connector);
+
+	return connector_status_connected;
+}
+
+static const struct input_res {
+	const char *name;
+	int w, h;
+} input_res_table[] = {
+	{"640x480", 640, 480},
+	{"800x600", 800, 600},
+	{"1024x768", 1024, 768},
+	{"1280x1024", 1280, 1024},
+	{"848x480", 848, 480},
+	{"1280x720", 1280, 720},
+	{"1920x1080", 1920, 1080},
+};
+
+/*
+ * Chose preferred mode  according to line number of TV format
+ */
+static void
+intel_tv_chose_preferred_modes(struct drm_connector *connector,
+			       struct drm_display_mode *mode_ptr)
+{
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+
+	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
+		mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+	else if (tv_mode->nbr_end > 480) {
+		if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
+			if (mode_ptr->vdisplay == 720)
+				mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+		} else if (mode_ptr->vdisplay == 1080)
+				mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
+	}
+}
+
+/**
+ * Stub get_modes function.
+ *
+ * This should probably return a set of fixed modes, unless we can figure out
+ * how to probe modes off of TV connections.
+ */
+
+static int
+intel_tv_get_modes(struct drm_connector *connector)
+{
+	struct drm_display_mode *mode_ptr;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+	int j, count = 0;
+	u64 tmp;
+
+	for (j = 0; j < ARRAY_SIZE(input_res_table);
+	     j++) {
+		const struct input_res *input = &input_res_table[j];
+		unsigned int hactive_s = input->w;
+		unsigned int vactive_s = input->h;
+
+		if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
+			continue;
+
+		if (input->w > 1024 && (!tv_mode->progressive
+					&& !tv_mode->component_only))
+			continue;
+
+		mode_ptr = drm_mode_create(connector->dev);
+		if (!mode_ptr)
+			continue;
+		strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+
+		mode_ptr->hdisplay = hactive_s;
+		mode_ptr->hsync_start = hactive_s + 1;
+		mode_ptr->hsync_end = hactive_s + 64;
+		if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
+			mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
+		mode_ptr->htotal = hactive_s + 96;
+
+		mode_ptr->vdisplay = vactive_s;
+		mode_ptr->vsync_start = vactive_s + 1;
+		mode_ptr->vsync_end = vactive_s + 32;
+		if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
+			mode_ptr->vsync_end = mode_ptr->vsync_start  + 1;
+		mode_ptr->vtotal = vactive_s + 33;
+
+		tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+		tmp *= mode_ptr->htotal;
+		tmp = div_u64(tmp, 1000000);
+		mode_ptr->clock = (int) tmp;
+
+		mode_ptr->type = DRM_MODE_TYPE_DRIVER;
+		intel_tv_chose_preferred_modes(connector, mode_ptr);
+		drm_mode_probed_add(connector, mode_ptr);
+		count++;
+	}
+
+	return count;
+}
+
+static void
+intel_tv_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+
+static int
+intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
+		      uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	struct drm_crtc *crtc = intel_tv->base.base.crtc;
+	int ret = 0;
+	bool changed = false;
+
+	ret = drm_object_property_set_value(&connector->base, property, val);
+	if (ret < 0)
+		goto out;
+
+	if (property == dev->mode_config.tv_left_margin_property &&
+		intel_tv->margin[TV_MARGIN_LEFT] != val) {
+		intel_tv->margin[TV_MARGIN_LEFT] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_right_margin_property &&
+		intel_tv->margin[TV_MARGIN_RIGHT] != val) {
+		intel_tv->margin[TV_MARGIN_RIGHT] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_top_margin_property &&
+		intel_tv->margin[TV_MARGIN_TOP] != val) {
+		intel_tv->margin[TV_MARGIN_TOP] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_bottom_margin_property &&
+		intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
+		intel_tv->margin[TV_MARGIN_BOTTOM] = val;
+		changed = true;
+	} else if (property == dev->mode_config.tv_mode_property) {
+		if (val >= ARRAY_SIZE(tv_modes)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
+			goto out;
+
+		intel_tv->tv_format = tv_modes[val].name;
+		changed = true;
+	} else {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (changed && crtc)
+		intel_crtc_restore_mode(crtc);
+out:
+	return ret;
+}
+
+static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
+	.mode_set = intel_tv_mode_set,
+};
+
+static const struct drm_connector_funcs intel_tv_connector_funcs = {
+	.dpms = intel_connector_dpms,
+	.detect = intel_tv_detect,
+	.destroy = intel_tv_destroy,
+	.set_property = intel_tv_set_property,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
+	.mode_valid = intel_tv_mode_valid,
+	.get_modes = intel_tv_get_modes,
+	.best_encoder = intel_best_encoder,
+};
+
+static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+	.destroy = intel_encoder_destroy,
+};
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not TV, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+			p_child->device_type != DEVICE_TYPE_TV)
+			continue;
+		/* Only when the addin_offset is non-zero, it is regarded
+		 * as present.
+		 */
+		if (p_child->addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
+void
+intel_tv_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
+	struct intel_tv *intel_tv;
+	struct intel_encoder *intel_encoder;
+	struct intel_connector *intel_connector;
+	u32 tv_dac_on, tv_dac_off, save_tv_dac;
+	char *tv_format_names[ARRAY_SIZE(tv_modes)];
+	int i, initial_mode = 0;
+
+	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+		return;
+
+	if (!tv_is_present_in_vbt(dev)) {
+		DRM_DEBUG_KMS("Integrated TV is not present.\n");
+		return;
+	}
+	/* Even if we have an encoder we may not have a connector */
+	if (!dev_priv->int_tv_support)
+		return;
+
+	/*
+	 * Sanity check the TV output by checking to see if the
+	 * DAC register holds a value
+	 */
+	save_tv_dac = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+	tv_dac_on = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	tv_dac_off = I915_READ(TV_DAC);
+
+	I915_WRITE(TV_DAC, save_tv_dac);
+
+	/*
+	 * If the register does not hold the state change enable
+	 * bit, (either as a 0 or a 1), assume it doesn't really
+	 * exist
+	 */
+	if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
+	    (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
+		return;
+
+	intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+	if (!intel_tv) {
+		return;
+	}
+
+	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+	if (!intel_connector) {
+		kfree(intel_tv);
+		return;
+	}
+
+	intel_encoder = &intel_tv->base;
+	connector = &intel_connector->base;
+
+	/* The documentation, for the older chipsets at least, recommend
+	 * using a polling method rather than hotplug detection for TVs.
+	 * This is because in order to perform the hotplug detection, the PLLs
+	 * for the TV must be kept alive increasing power drain and starving
+	 * bandwidth from other encoders. Notably for instance, it causes
+	 * pipe underruns on Crestline when this encoder is supposedly idle.
+	 *
+	 * More recent chipsets favour HDMI rather than integrated S-Video.
+	 */
+	intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
+			   DRM_MODE_CONNECTOR_SVIDEO);
+
+	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
+			 DRM_MODE_ENCODER_TVDAC);
+
+	intel_encoder->compute_config = intel_tv_compute_config;
+	intel_encoder->enable = intel_enable_tv;
+	intel_encoder->disable = intel_disable_tv;
+	intel_encoder->get_hw_state = intel_tv_get_hw_state;
+	intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
+	intel_encoder->type = INTEL_OUTPUT_TVOUT;
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	intel_encoder->cloneable = false;
+	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
+
+	/* BIOS margin values */
+	intel_tv->margin[TV_MARGIN_LEFT] = 54;
+	intel_tv->margin[TV_MARGIN_TOP] = 36;
+	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
+	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+
+	intel_tv->tv_format = tv_modes[initial_mode].name;
+
+	drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
+	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	/* Create TV properties then attach current values */
+	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
+		tv_format_names[i] = (char *)tv_modes[i].name;
+	drm_mode_create_tv_properties(dev,
+				      ARRAY_SIZE(tv_modes),
+				      tv_format_names);
+
+	drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
+				   initial_mode);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_left_margin_property,
+				   intel_tv->margin[TV_MARGIN_LEFT]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_top_margin_property,
+				   intel_tv->margin[TV_MARGIN_TOP]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_right_margin_property,
+				   intel_tv->margin[TV_MARGIN_RIGHT]);
+	drm_object_attach_property(&connector->base,
+				   dev->mode_config.tv_bottom_margin_property,
+				   intel_tv->margin[TV_MARGIN_BOTTOM]);
+	drm_sysfs_connector_add(connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/mga/Makefile b/linux-imx/drivers/gpu/drm/mga/Makefile
new file mode 100644
index 0000000..6068478
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
+
+mga-$(CONFIG_COMPAT) += mga_ioc32.o
+
+obj-$(CONFIG_DRM_MGA)	+= mga.o
+
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_dma.c b/linux-imx/drivers/gpu/drm/mga/mga_dma.c
new file mode 100644
index 0000000..cc3166d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_dma.c
@@ -0,0 +1,1154 @@
+/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file mga_dma.c
+ * DMA support for MGA G200 / G400.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Jeff Hartmann <jhartmann@valinux.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
+
+#define MGA_DEFAULT_USEC_TIMEOUT	10000
+#define MGA_FREELIST_DEBUG		0
+
+#define MINIMAL_CLEANUP 0
+#define FULL_CLEANUP 1
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
+
+/* ================================================================
+ * Engine control
+ */
+
+int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
+{
+	u32 status = 0;
+	int i;
+	DRM_DEBUG("\n");
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+		if (status == MGA_ENDPRDMASTS) {
+			MGA_WRITE8(MGA_CRTC_INDEX, 0);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if MGA_DMA_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+
+	DRM_DEBUG("\n");
+
+	/* The primary DMA stream should look like new right about now.
+	 */
+	primary->tail = 0;
+	primary->space = primary->size;
+	primary->last_flush = 0;
+
+	sarea_priv->last_wrap = 0;
+
+	/* FIXME: Reset counters, buffer ages etc...
+	 */
+
+	/* FIXME: What else do we need to reinitialize?  WARP stuff?
+	 */
+
+	return 0;
+}
+
+/* ================================================================
+ * Primary DMA stream
+ */
+
+void mga_do_dma_flush(drm_mga_private_t *dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	u32 head, tail;
+	u32 status = 0;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* We need to wait so that we can do an safe flush */
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+		if (status == MGA_ENDPRDMASTS)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (primary->tail == primary->last_flush) {
+		DRM_DEBUG("   bailing out...\n");
+		return;
+	}
+
+	tail = primary->tail + dev_priv->primary->offset;
+
+	/* We need to pad the stream between flushes, as the card
+	 * actually (partially?) reads the first of these commands.
+	 * See page 4-16 in the G400 manual, middle of the page or so.
+	 */
+	BEGIN_DMA(1);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+
+	primary->last_flush = primary->tail;
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+
+	if (head <= tail)
+		primary->space = primary->size - primary->tail;
+	else
+		primary->space = head - tail;
+
+	DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
+	DRM_DEBUG("   tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
+	DRM_DEBUG("  space = 0x%06x\n", primary->space);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
+
+	DRM_DEBUG("done.\n");
+}
+
+void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	u32 head, tail;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA_WRAP();
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+
+	tail = primary->tail + dev_priv->primary->offset;
+
+	primary->tail = 0;
+	primary->last_flush = 0;
+	primary->last_wrap++;
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+
+	if (head == dev_priv->primary->offset)
+		primary->space = primary->size;
+	else
+		primary->space = head - dev_priv->primary->offset;
+
+	DRM_DEBUG("   head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
+	DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
+	DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
+	DRM_DEBUG("  space = 0x%06x\n", primary->space);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
+
+	set_bit(0, &primary->wrapped);
+	DRM_DEBUG("done.\n");
+}
+
+void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 head = dev_priv->primary->offset;
+	DRM_DEBUG("\n");
+
+	sarea_priv->last_wrap++;
+	DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
+
+	clear_bit(0, &primary->wrapped);
+	DRM_DEBUG("done.\n");
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+#define MGA_BUFFER_USED		(~0)
+#define MGA_BUFFER_FREE		0
+
+#if MGA_FREELIST_DEBUG
+static void mga_freelist_print(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *entry;
+
+	DRM_INFO("\n");
+	DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
+		 dev_priv->sarea_priv->last_dispatch,
+		 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
+				dev_priv->primary->offset));
+	DRM_INFO("current freelist:\n");
+
+	for (entry = dev_priv->head->next; entry; entry = entry->next) {
+		DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
+			 entry, entry->buf->idx, entry->age.head,
+			 (unsigned long)(entry->age.head - dev_priv->primary->offset));
+	}
+	DRM_INFO("\n");
+}
+#endif
+
+static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_freelist_t *entry;
+	int i;
+	DRM_DEBUG("count=%d\n", dma->buf_count);
+
+	dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
+	if (dev_priv->head == NULL)
+		return -ENOMEM;
+
+	SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+
+		entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
+		if (entry == NULL)
+			return -ENOMEM;
+
+		entry->next = dev_priv->head->next;
+		entry->prev = dev_priv->head;
+		SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
+		entry->buf = buf;
+
+		if (dev_priv->head->next != NULL)
+			dev_priv->head->next->prev = entry;
+		if (entry->next == NULL)
+			dev_priv->tail = entry;
+
+		buf_priv->list_entry = entry;
+		buf_priv->discard = 0;
+		buf_priv->dispatched = 0;
+
+		dev_priv->head->next = entry;
+	}
+
+	return 0;
+}
+
+static void mga_freelist_cleanup(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *entry;
+	drm_mga_freelist_t *next;
+	DRM_DEBUG("\n");
+
+	entry = dev_priv->head;
+	while (entry) {
+		next = entry->next;
+		kfree(entry);
+		entry = next;
+	}
+
+	dev_priv->head = dev_priv->tail = NULL;
+}
+
+#if 0
+/* FIXME: Still needed?
+ */
+static void mga_freelist_reset(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	int i;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+		SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
+	}
+}
+#endif
+
+static struct drm_buf *mga_freelist_get(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *next;
+	drm_mga_freelist_t *prev;
+	drm_mga_freelist_t *tail = dev_priv->tail;
+	u32 head, wrap;
+	DRM_DEBUG("\n");
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+	wrap = dev_priv->sarea_priv->last_wrap;
+
+	DRM_DEBUG("   tail=0x%06lx %d\n",
+		  tail->age.head ?
+		  (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
+		  tail->age.wrap);
+	DRM_DEBUG("   head=0x%06lx %d\n",
+		  (unsigned long)(head - dev_priv->primary->offset), wrap);
+
+	if (TEST_AGE(&tail->age, head, wrap)) {
+		prev = dev_priv->tail->prev;
+		next = dev_priv->tail;
+		prev->next = NULL;
+		next->prev = next->next = NULL;
+		dev_priv->tail = prev;
+		SET_AGE(&next->age, MGA_BUFFER_USED, 0);
+		return next->buf;
+	}
+
+	DRM_DEBUG("returning NULL!\n");
+	return NULL;
+}
+
+int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_freelist_t *head, *entry, *prev;
+
+	DRM_DEBUG("age=0x%06lx wrap=%d\n",
+		  (unsigned long)(buf_priv->list_entry->age.head -
+				  dev_priv->primary->offset),
+		  buf_priv->list_entry->age.wrap);
+
+	entry = buf_priv->list_entry;
+	head = dev_priv->head;
+
+	if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
+		SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
+		prev = dev_priv->tail;
+		prev->next = entry;
+		entry->prev = prev;
+		entry->next = NULL;
+	} else {
+		prev = head->next;
+		head->next = entry;
+		prev->prev = entry;
+		entry->prev = head;
+		entry->next = prev;
+	}
+
+	return 0;
+}
+
+/* ================================================================
+ * DMA initialization, cleanup
+ */
+
+int mga_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	drm_mga_private_t *dev_priv;
+	int ret;
+
+	dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
+	if (!dev_priv)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+
+	dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
+	dev_priv->chipset = flags;
+
+	pci_set_master(dev->pdev);
+
+	dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+	dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
+
+	dev->counters += 3;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+
+	ret = drm_vblank_init(dev, 1);
+
+	if (ret) {
+		(void) mga_driver_unload(dev);
+		return ret;
+	}
+
+	return 0;
+}
+
+#if __OS_HAS_AGP
+/**
+ * Bootstrap the driver for AGP DMA.
+ *
+ * \todo
+ * Investigate whether there is any benefit to storing the WARP microcode in
+ * AGP memory.  If not, the microcode may as well always be put in PCI
+ * memory.
+ *
+ * \todo
+ * This routine needs to set dma_bs->agp_mode to the mode actually configured
+ * in the hardware.  Looking just at the Linux AGP driver code, I don't see
+ * an easy way to determine this.
+ *
+ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
+ */
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+				    drm_mga_dma_bootstrap_t *dma_bs)
+{
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+	unsigned int warp_size = MGA_WARP_UCODE_SIZE;
+	int err;
+	unsigned offset;
+	const unsigned secondary_size = dma_bs->secondary_bin_count
+	    * dma_bs->secondary_bin_size;
+	const unsigned agp_size = (dma_bs->agp_size << 20);
+	struct drm_buf_desc req;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	struct drm_agp_buffer agp_req;
+	struct drm_agp_binding bind_req;
+
+	/* Acquire AGP. */
+	err = drm_agp_acquire(dev);
+	if (err) {
+		DRM_ERROR("Unable to acquire AGP: %d\n", err);
+		return err;
+	}
+
+	err = drm_agp_info(dev, &info);
+	if (err) {
+		DRM_ERROR("Unable to get AGP info: %d\n", err);
+		return err;
+	}
+
+	mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
+	err = drm_agp_enable(dev, mode);
+	if (err) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		return err;
+	}
+
+	/* In addition to the usual AGP mode configuration, the G200 AGP cards
+	 * need to have the AGP mode "manually" set.
+	 */
+
+	if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
+		if (mode.mode & 0x02)
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
+		else
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
+	}
+
+	/* Allocate and bind AGP memory. */
+	agp_req.size = agp_size;
+	agp_req.type = 0;
+	err = drm_agp_alloc(dev, &agp_req);
+	if (err) {
+		dev_priv->agp_size = 0;
+		DRM_ERROR("Unable to allocate %uMB AGP memory\n",
+			  dma_bs->agp_size);
+		return err;
+	}
+
+	dev_priv->agp_size = agp_size;
+	dev_priv->agp_handle = agp_req.handle;
+
+	bind_req.handle = agp_req.handle;
+	bind_req.offset = 0;
+	err = drm_agp_bind(dev, &bind_req);
+	if (err) {
+		DRM_ERROR("Unable to bind AGP memory: %d\n", err);
+		return err;
+	}
+
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
+	offset = 0;
+	err = drm_addmap(dev, offset, warp_size,
+			 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
+	if (err) {
+		DRM_ERROR("Unable to map WARP microcode: %d\n", err);
+		return err;
+	}
+
+	offset += warp_size;
+	err = drm_addmap(dev, offset, dma_bs->primary_size,
+			 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
+	if (err) {
+		DRM_ERROR("Unable to map primary DMA region: %d\n", err);
+		return err;
+	}
+
+	offset += dma_bs->primary_size;
+	err = drm_addmap(dev, offset, secondary_size,
+			 _DRM_AGP, 0, &dev->agp_buffer_map);
+	if (err) {
+		DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
+		return err;
+	}
+
+	(void)memset(&req, 0, sizeof(req));
+	req.count = dma_bs->secondary_bin_count;
+	req.size = dma_bs->secondary_bin_size;
+	req.flags = _DRM_AGP_BUFFER;
+	req.agp_start = offset;
+
+	err = drm_addbufs_agp(dev, &req);
+	if (err) {
+		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
+		return err;
+	}
+
+	{
+		struct drm_map_list *_entry;
+		unsigned long agp_token = 0;
+
+		list_for_each_entry(_entry, &dev->maplist, head) {
+			if (_entry->map == dev->agp_buffer_map)
+				agp_token = _entry->user_token;
+		}
+		if (!agp_token)
+			return -EFAULT;
+
+		dev->agp_buffer_token = agp_token;
+	}
+
+	offset += secondary_size;
+	err = drm_addmap(dev, offset, agp_size - offset,
+			 _DRM_AGP, 0, &dev_priv->agp_textures);
+	if (err) {
+		DRM_ERROR("Unable to map AGP texture region %d\n", err);
+		return err;
+	}
+
+	drm_core_ioremap(dev_priv->warp, dev);
+	drm_core_ioremap(dev_priv->primary, dev);
+	drm_core_ioremap(dev->agp_buffer_map, dev);
+
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
+		DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
+			  dev_priv->warp->handle, dev_priv->primary->handle,
+			  dev->agp_buffer_map->handle);
+		return -ENOMEM;
+	}
+
+	dev_priv->dma_access = MGA_PAGPXFER;
+	dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
+	DRM_INFO("Initialized card for AGP DMA.\n");
+	return 0;
+}
+#else
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+				    drm_mga_dma_bootstrap_t *dma_bs)
+{
+	return -EINVAL;
+}
+#endif
+
+/**
+ * Bootstrap the driver for PCI DMA.
+ *
+ * \todo
+ * The algorithm for decreasing the size of the primary DMA buffer could be
+ * better.  The size should be rounded up to the nearest page size, then
+ * decrease the request size by a single page each pass through the loop.
+ *
+ * \todo
+ * Determine whether the maximum address passed to drm_pci_alloc is correct.
+ * The same goes for drm_addbufs_pci.
+ *
+ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
+ */
+static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
+				    drm_mga_dma_bootstrap_t *dma_bs)
+{
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+	unsigned int warp_size = MGA_WARP_UCODE_SIZE;
+	unsigned int primary_size;
+	unsigned int bin_count;
+	int err;
+	struct drm_buf_desc req;
+
+	if (dev->dma == NULL) {
+		DRM_ERROR("dev->dma is NULL\n");
+		return -EFAULT;
+	}
+
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
+	/* The proper alignment is 0x100 for this mapping */
+	err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
+			 _DRM_READ_ONLY, &dev_priv->warp);
+	if (err != 0) {
+		DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
+			  err);
+		return err;
+	}
+
+	/* Other than the bottom two bits being used to encode other
+	 * information, there don't appear to be any restrictions on the
+	 * alignment of the primary or secondary DMA buffers.
+	 */
+
+	for (primary_size = dma_bs->primary_size; primary_size != 0;
+	     primary_size >>= 1) {
+		/* The proper alignment for this mapping is 0x04 */
+		err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
+				 _DRM_READ_ONLY, &dev_priv->primary);
+		if (!err)
+			break;
+	}
+
+	if (err != 0) {
+		DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
+		return -ENOMEM;
+	}
+
+	if (dev_priv->primary->size != dma_bs->primary_size) {
+		DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
+			 dma_bs->primary_size,
+			 (unsigned)dev_priv->primary->size);
+		dma_bs->primary_size = dev_priv->primary->size;
+	}
+
+	for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
+	     bin_count--) {
+		(void)memset(&req, 0, sizeof(req));
+		req.count = bin_count;
+		req.size = dma_bs->secondary_bin_size;
+
+		err = drm_addbufs_pci(dev, &req);
+		if (!err)
+			break;
+	}
+
+	if (bin_count == 0) {
+		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
+		return err;
+	}
+
+	if (bin_count != dma_bs->secondary_bin_count) {
+		DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
+			 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
+
+		dma_bs->secondary_bin_count = bin_count;
+	}
+
+	dev_priv->dma_access = 0;
+	dev_priv->wagp_enable = 0;
+
+	dma_bs->agp_mode = 0;
+
+	DRM_INFO("Initialized card for PCI DMA.\n");
+	return 0;
+}
+
+static int mga_do_dma_bootstrap(struct drm_device *dev,
+				drm_mga_dma_bootstrap_t *dma_bs)
+{
+	const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
+	int err;
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+
+	dev_priv->used_new_dma_init = 1;
+
+	/* The first steps are the same for both PCI and AGP based DMA.  Map
+	 * the cards MMIO registers and map a status page.
+	 */
+	err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
+			 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
+	if (err) {
+		DRM_ERROR("Unable to map MMIO region: %d\n", err);
+		return err;
+	}
+
+	err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+			 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
+			 &dev_priv->status);
+	if (err) {
+		DRM_ERROR("Unable to map status region: %d\n", err);
+		return err;
+	}
+
+	/* The DMA initialization procedure is slightly different for PCI and
+	 * AGP cards.  AGP cards just allocate a large block of AGP memory and
+	 * carve off portions of it for internal uses.  The remaining memory
+	 * is returned to user-mode to be used for AGP textures.
+	 */
+	if (is_agp)
+		err = mga_do_agp_dma_bootstrap(dev, dma_bs);
+
+	/* If we attempted to initialize the card for AGP DMA but failed,
+	 * clean-up any mess that may have been created.
+	 */
+
+	if (err)
+		mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
+
+	/* Not only do we want to try and initialized PCI cards for PCI DMA,
+	 * but we also try to initialized AGP cards that could not be
+	 * initialized for AGP DMA.  This covers the case where we have an AGP
+	 * card in a system with an unsupported AGP chipset.  In that case the
+	 * card will be detected as AGP, but we won't be able to allocate any
+	 * AGP memory, etc.
+	 */
+
+	if (!is_agp || err)
+		err = mga_do_pci_dma_bootstrap(dev, dma_bs);
+
+	return err;
+}
+
+int mga_dma_bootstrap(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	drm_mga_dma_bootstrap_t *bootstrap = data;
+	int err;
+	static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
+	const drm_mga_private_t *const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	err = mga_do_dma_bootstrap(dev, bootstrap);
+	if (err) {
+		mga_do_cleanup_dma(dev, FULL_CLEANUP);
+		return err;
+	}
+
+	if (dev_priv->agp_textures != NULL) {
+		bootstrap->texture_handle = dev_priv->agp_textures->offset;
+		bootstrap->texture_size = dev_priv->agp_textures->size;
+	} else {
+		bootstrap->texture_handle = 0;
+		bootstrap->texture_size = 0;
+	}
+
+	bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
+
+	return err;
+}
+
+static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
+{
+	drm_mga_private_t *dev_priv;
+	int ret;
+	DRM_DEBUG("\n");
+
+	dev_priv = dev->dev_private;
+
+	if (init->sgram)
+		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
+	else
+		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
+	dev_priv->maccess = init->maccess;
+
+	dev_priv->fb_cpp = init->fb_cpp;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	dev_priv->depth_cpp = init->depth_cpp;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	/* FIXME: Need to support AGP textures...
+	 */
+	dev_priv->texture_offset = init->texture_offset[0];
+	dev_priv->texture_size = init->texture_size[0];
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("failed to find sarea!\n");
+		return -EINVAL;
+	}
+
+	if (!dev_priv->used_new_dma_init) {
+
+		dev_priv->dma_access = MGA_PAGPXFER;
+		dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("failed to find status page!\n");
+			return -EINVAL;
+		}
+		dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+		if (!dev_priv->mmio) {
+			DRM_ERROR("failed to find mmio region!\n");
+			return -EINVAL;
+		}
+		dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
+		if (!dev_priv->warp) {
+			DRM_ERROR("failed to find warp microcode region!\n");
+			return -EINVAL;
+		}
+		dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
+		if (!dev_priv->primary) {
+			DRM_ERROR("failed to find primary dma region!\n");
+			return -EINVAL;
+		}
+		dev->agp_buffer_token = init->buffers_offset;
+		dev->agp_buffer_map =
+		    drm_core_findmap(dev, init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("failed to find dma buffer region!\n");
+			return -EINVAL;
+		}
+
+		drm_core_ioremap(dev_priv->warp, dev);
+		drm_core_ioremap(dev_priv->primary, dev);
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				 init->sarea_priv_offset);
+
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle ||
+	    ((dev_priv->dma_access != 0) &&
+	     ((dev->agp_buffer_map == NULL) ||
+	      (dev->agp_buffer_map->handle == NULL)))) {
+		DRM_ERROR("failed to ioremap agp regions!\n");
+		return -ENOMEM;
+	}
+
+	ret = mga_warp_install_microcode(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
+		return ret;
+	}
+
+	ret = mga_warp_init(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to init WARP engine!: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->prim.status = (u32 *) dev_priv->status->handle;
+
+	mga_do_wait_for_idle(dev_priv);
+
+	/* Init the primary DMA registers.
+	 */
+	MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
+#if 0
+	MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |	/* Soft trap, SECEND, SETUPEND */
+		  MGA_PRIMPTREN1);	/* DWGSYNC */
+#endif
+
+	dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
+	dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
+			      + dev_priv->primary->size);
+	dev_priv->prim.size = dev_priv->primary->size;
+
+	dev_priv->prim.tail = 0;
+	dev_priv->prim.space = dev_priv->prim.size;
+	dev_priv->prim.wrapped = 0;
+
+	dev_priv->prim.last_flush = 0;
+	dev_priv->prim.last_wrap = 0;
+
+	dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
+
+	dev_priv->prim.status[0] = dev_priv->primary->offset;
+	dev_priv->prim.status[1] = 0;
+
+	dev_priv->sarea_priv->last_wrap = 0;
+	dev_priv->sarea_priv->last_frame.head = 0;
+	dev_priv->sarea_priv->last_frame.wrap = 0;
+
+	if (mga_freelist_init(dev, dev_priv) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
+{
+	int err = 0;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		drm_mga_private_t *dev_priv = dev->dev_private;
+
+		if ((dev_priv->warp != NULL)
+		    && (dev_priv->warp->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->warp, dev);
+
+		if ((dev_priv->primary != NULL)
+		    && (dev_priv->primary->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->primary, dev);
+
+		if (dev->agp_buffer_map != NULL)
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+
+		if (dev_priv->used_new_dma_init) {
+#if __OS_HAS_AGP
+			if (dev_priv->agp_handle != 0) {
+				struct drm_agp_binding unbind_req;
+				struct drm_agp_buffer free_req;
+
+				unbind_req.handle = dev_priv->agp_handle;
+				drm_agp_unbind(dev, &unbind_req);
+
+				free_req.handle = dev_priv->agp_handle;
+				drm_agp_free(dev, &free_req);
+
+				dev_priv->agp_textures = NULL;
+				dev_priv->agp_size = 0;
+				dev_priv->agp_handle = 0;
+			}
+
+			if ((dev->agp != NULL) && dev->agp->acquired)
+				err = drm_agp_release(dev);
+#endif
+		}
+
+		dev_priv->warp = NULL;
+		dev_priv->primary = NULL;
+		dev_priv->sarea = NULL;
+		dev_priv->sarea_priv = NULL;
+		dev->agp_buffer_map = NULL;
+
+		if (full_cleanup) {
+			dev_priv->mmio = NULL;
+			dev_priv->status = NULL;
+			dev_priv->used_new_dma_init = 0;
+		}
+
+		memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
+		dev_priv->warp_pipe = 0;
+		memset(dev_priv->warp_pipe_phys, 0,
+		       sizeof(dev_priv->warp_pipe_phys));
+
+		if (dev_priv->head != NULL)
+			mga_freelist_cleanup(dev);
+	}
+
+	return err;
+}
+
+int mga_dma_init(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	drm_mga_init_t *init = data;
+	int err;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case MGA_INIT_DMA:
+		err = mga_do_init_dma(dev, init);
+		if (err)
+			(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
+		return err;
+	case MGA_CLEANUP_DMA:
+		return mga_do_cleanup_dma(dev, FULL_CLEANUP);
+	}
+
+	return -EINVAL;
+}
+
+/* ================================================================
+ * Primary DMA stream management
+ */
+
+int mga_dma_flush(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	struct drm_lock *lock = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("%s%s%s\n",
+		  (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
+		  (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
+		  (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
+
+	WRAP_WAIT_WITH_RETURN(dev_priv);
+
+	if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
+		mga_do_dma_flush(dev_priv);
+
+	if (lock->flags & _DRM_LOCK_QUIESCENT) {
+#if MGA_DMA_DEBUG
+		int ret = mga_do_wait_for_idle(dev_priv);
+		if (ret < 0)
+			DRM_INFO("-EBUSY\n");
+		return ret;
+#else
+		return mga_do_wait_for_idle(dev_priv);
+#endif
+	} else {
+		return 0;
+	}
+}
+
+int mga_dma_reset(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return mga_do_dma_reset(dev_priv);
+}
+
+/* ================================================================
+ * DMA buffer management
+ */
+
+static int mga_dma_get_buffers(struct drm_device *dev,
+			       struct drm_file *file_priv, struct drm_dma *d)
+{
+	struct drm_buf *buf;
+	int i;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = mga_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i],
+				     &buf->idx, sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+				     &buf->total, sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int mga_dma_buffers(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	struct drm_dma *d = data;
+	int ret = 0;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	d->granted_count = 0;
+
+	if (d->request_count)
+		ret = mga_dma_get_buffers(dev, file_priv, d);
+
+	return ret;
+}
+
+/**
+ * Called just before the module is unloaded.
+ */
+int mga_driver_unload(struct drm_device *dev)
+{
+	kfree(dev->dev_private);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+/**
+ * Called when the last opener of the device is closed.
+ */
+void mga_driver_lastclose(struct drm_device *dev)
+{
+	mga_do_cleanup_dma(dev, FULL_CLEANUP);
+}
+
+int mga_driver_dma_quiescent(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	return mga_do_wait_for_idle(dev_priv);
+}
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_drv.c b/linux-imx/drivers/gpu/drm/mga/mga_drv.c
new file mode 100644
index 0000000..17d0a63
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_drv.c
@@ -0,0 +1,143 @@
+/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static int mga_driver_device_is_agp(struct drm_device *dev);
+
+static struct pci_device_id pciidlist[] = {
+	mga_PCI_IDS
+};
+
+static const struct file_operations mga_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = mga_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+	.dev_priv_size = sizeof(drm_mga_buf_priv_t),
+	.load = mga_driver_load,
+	.unload = mga_driver_unload,
+	.lastclose = mga_driver_lastclose,
+	.dma_quiescent = mga_driver_dma_quiescent,
+	.device_is_agp = mga_driver_device_is_agp,
+	.get_vblank_counter = mga_get_vblank_counter,
+	.enable_vblank = mga_enable_vblank,
+	.disable_vblank = mga_disable_vblank,
+	.irq_preinstall = mga_driver_irq_preinstall,
+	.irq_postinstall = mga_driver_irq_postinstall,
+	.irq_uninstall = mga_driver_irq_uninstall,
+	.irq_handler = mga_driver_irq_handler,
+	.ioctls = mga_ioctls,
+	.dma_ioctl = mga_dma_buffers,
+	.fops = &mga_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver mga_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init mga_init(void)
+{
+	driver.num_ioctls = mga_max_ioctl;
+	return drm_pci_init(&driver, &mga_pci_driver);
+}
+
+static void __exit mga_exit(void)
+{
+	drm_pci_exit(&driver, &mga_pci_driver);
+}
+
+module_init(mga_init);
+module_exit(mga_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * In addition to the usual tests performed by \c drm_device_is_agp, this
+ * function detects PCI G450 cards that appear to the system exactly like
+ * AGP G450 cards.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
+ */
+static int mga_driver_device_is_agp(struct drm_device *dev)
+{
+	const struct pci_dev *const pdev = dev->pdev;
+
+	/* There are PCI versions of the G450.  These cards have the
+	 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+	 * bridge chip.  We detect these cards, which are not currently
+	 * supported by this driver, by looking at the device ID of the
+	 * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
+	 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+	 * device.
+	 */
+
+	if ((pdev->device == 0x0525) && pdev->bus->self
+	    && (pdev->bus->self->vendor == 0x3388)
+	    && (pdev->bus->self->device == 0x0021)) {
+		return 0;
+	}
+
+	return 2;
+}
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_drv.h b/linux-imx/drivers/gpu/drm/mga/mga_drv.h
new file mode 100644
index 0000000..54558a0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_drv.h
@@ -0,0 +1,666 @@
+/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __MGA_DRV_H__
+#define __MGA_DRV_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, VA Linux Systems Inc."
+
+#define DRIVER_NAME		"mga"
+#define DRIVER_DESC		"Matrox G200/G400"
+#define DRIVER_DATE		"20051102"
+
+#define DRIVER_MAJOR		3
+#define DRIVER_MINOR		2
+#define DRIVER_PATCHLEVEL	1
+
+typedef struct drm_mga_primary_buffer {
+	u8 *start;
+	u8 *end;
+	int size;
+
+	u32 tail;
+	int space;
+	volatile long wrapped;
+
+	volatile u32 *status;
+
+	u32 last_flush;
+	u32 last_wrap;
+
+	u32 high_mark;
+} drm_mga_primary_buffer_t;
+
+typedef struct drm_mga_freelist {
+	struct drm_mga_freelist *next;
+	struct drm_mga_freelist *prev;
+	drm_mga_age_t age;
+	struct drm_buf *buf;
+} drm_mga_freelist_t;
+
+typedef struct {
+	drm_mga_freelist_t *list_entry;
+	int discard;
+	int dispatched;
+} drm_mga_buf_priv_t;
+
+typedef struct drm_mga_private {
+	drm_mga_primary_buffer_t prim;
+	drm_mga_sarea_t *sarea_priv;
+
+	drm_mga_freelist_t *head;
+	drm_mga_freelist_t *tail;
+
+	unsigned int warp_pipe;
+	unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
+
+	int chipset;
+	int usec_timeout;
+
+	/**
+	 * If set, the new DMA initialization sequence was used.  This is
+	 * primarilly used to select how the driver should uninitialized its
+	 * internal DMA structures.
+	 */
+	int used_new_dma_init;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_PAGPXFER.  Otherwise, it will be zero (for a PCI transfer).
+	 */
+	u32 dma_access;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_WAGP_ENABLE.  Otherwise, it will be zero (for a PCI
+	 * transfer).
+	 */
+	u32 wagp_enable;
+
+	/**
+	 * \name MMIO region parameters.
+	 *
+	 * \sa drm_mga_private_t::mmio
+	 */
+	/*@{ */
+	resource_size_t mmio_base;	   /**< Bus address of base of MMIO. */
+	resource_size_t mmio_size;	   /**< Size of the MMIO region. */
+	/*@} */
+
+	u32 clear_cmd;
+	u32 maccess;
+
+	atomic_t vbl_received;          /**< Number of vblanks received. */
+	wait_queue_head_t fence_queue;
+	atomic_t last_fence_retired;
+	u32 next_fence_to_post;
+
+	unsigned int fb_cpp;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	unsigned int depth_cpp;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *status;
+	drm_local_map_t *warp;
+	drm_local_map_t *primary;
+	drm_local_map_t *agp_textures;
+
+	unsigned long agp_handle;
+	unsigned int agp_size;
+} drm_mga_private_t;
+
+extern struct drm_ioctl_desc mga_ioctls[];
+extern int mga_max_ioctl;
+
+				/* mga_dma.c */
+extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int mga_dma_init(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int mga_dma_flush(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int mga_dma_reset(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int mga_dma_buffers(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
+extern int mga_driver_unload(struct drm_device *dev);
+extern void mga_driver_lastclose(struct drm_device *dev);
+extern int mga_driver_dma_quiescent(struct drm_device *dev);
+
+extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
+
+extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
+
+extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
+
+				/* mga_warp.c */
+extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
+extern int mga_warp_init(drm_mga_private_t *dev_priv);
+
+				/* mga_irq.c */
+extern int mga_enable_vblank(struct drm_device *dev, int crtc);
+extern void mga_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern void mga_driver_irq_preinstall(struct drm_device *dev);
+extern int mga_driver_irq_postinstall(struct drm_device *dev);
+extern void mga_driver_irq_uninstall(struct drm_device *dev);
+extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
+			     unsigned long arg);
+
+#define mga_flush_write_combine()	DRM_WRITEMEMORYBARRIER()
+
+#define MGA_READ8(reg)		DRM_READ8(dev_priv->mmio, (reg))
+#define MGA_READ(reg)		DRM_READ32(dev_priv->mmio, (reg))
+#define MGA_WRITE8(reg, val)	DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define MGA_WRITE(reg, val)	DRM_WRITE32(dev_priv->mmio, (reg), (val))
+
+#define DWGREG0		0x1c00
+#define DWGREG0_END	0x1dff
+#define DWGREG1		0x2c00
+#define DWGREG1_END	0x2dff
+
+#define ISREG0(r)	(r >= DWGREG0 && r <= DWGREG0_END)
+#define DMAREG0(r)	(u8)((r - DWGREG0) >> 2)
+#define DMAREG1(r)	(u8)(((r - DWGREG1) >> 2) | 0x80)
+#define DMAREG(r)	(ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
+
+/* ================================================================
+ * Helper macross...
+ */
+
+#define MGA_EMIT_STATE(dev_priv, dirty)					\
+do {									\
+	if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) {				\
+		if (dev_priv->chipset >= MGA_CARD_TYPE_G400)		\
+			mga_g400_emit_state(dev_priv);			\
+		else							\
+			mga_g200_emit_state(dev_priv);			\
+	}								\
+} while (0)
+
+#define WRAP_TEST_WITH_RETURN(dev_priv)					\
+do {									\
+	if (test_bit(0, &dev_priv->prim.wrapped)) {			\
+		if (mga_is_idle(dev_priv)) {				\
+			mga_do_dma_wrap_end(dev_priv);			\
+		} else if (dev_priv->prim.space <			\
+			   dev_priv->prim.high_mark) {			\
+			if (MGA_DMA_DEBUG)				\
+				DRM_INFO("wrap...\n");			\
+			return -EBUSY;					\
+		}							\
+	}								\
+} while (0)
+
+#define WRAP_WAIT_WITH_RETURN(dev_priv)					\
+do {									\
+	if (test_bit(0, &dev_priv->prim.wrapped)) {			\
+		if (mga_do_wait_for_idle(dev_priv) < 0) {		\
+			if (MGA_DMA_DEBUG)				\
+				DRM_INFO("wrap...\n");			\
+			return -EBUSY;					\
+		}							\
+		mga_do_dma_wrap_end(dev_priv);				\
+	}								\
+} while (0)
+
+/* ================================================================
+ * Primary DMA command stream
+ */
+
+#define MGA_VERBOSE	0
+
+#define DMA_LOCALS	unsigned int write; volatile u8 *prim;
+
+#define DMA_BLOCK_SIZE	(5 * sizeof(u32))
+
+#define BEGIN_DMA(n)							\
+do {									\
+	if (MGA_VERBOSE) {						\
+		DRM_INFO("BEGIN_DMA(%d)\n", (n));			\
+		DRM_INFO("   space=0x%x req=0x%Zx\n",			\
+			 dev_priv->prim.space, (n) * DMA_BLOCK_SIZE);	\
+	}								\
+	prim = dev_priv->prim.start;					\
+	write = dev_priv->prim.tail;					\
+} while (0)
+
+#define BEGIN_DMA_WRAP()						\
+do {									\
+	if (MGA_VERBOSE) {						\
+		DRM_INFO("BEGIN_DMA()\n");				\
+		DRM_INFO("   space=0x%x\n", dev_priv->prim.space);	\
+	}								\
+	prim = dev_priv->prim.start;					\
+	write = dev_priv->prim.tail;					\
+} while (0)
+
+#define ADVANCE_DMA()							\
+do {									\
+	dev_priv->prim.tail = write;					\
+	if (MGA_VERBOSE)						\
+		DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n",		\
+			 write, dev_priv->prim.space);			\
+} while (0)
+
+#define FLUSH_DMA()							\
+do {									\
+	if (0) {							\
+		DRM_INFO("\n");						\
+		DRM_INFO("   tail=0x%06x head=0x%06lx\n",		\
+			 dev_priv->prim.tail,				\
+			 (unsigned long)(MGA_READ(MGA_PRIMADDRESS) -	\
+					 dev_priv->primary->offset));	\
+	}								\
+	if (!test_bit(0, &dev_priv->prim.wrapped)) {			\
+		if (dev_priv->prim.space < dev_priv->prim.high_mark)	\
+			mga_do_dma_wrap_start(dev_priv);		\
+		else							\
+			mga_do_dma_flush(dev_priv);			\
+	}								\
+} while (0)
+
+/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
+ */
+#define DMA_WRITE(offset, val)						\
+do {									\
+	if (MGA_VERBOSE)						\
+		DRM_INFO("   DMA_WRITE( 0x%08x ) at 0x%04Zx\n",		\
+			 (u32)(val), write + (offset) * sizeof(u32));	\
+	*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val;	\
+} while (0)
+
+#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3)	\
+do {									\
+	DMA_WRITE(0, ((DMAREG(reg0) << 0) |				\
+		      (DMAREG(reg1) << 8) |				\
+		      (DMAREG(reg2) << 16) |				\
+		      (DMAREG(reg3) << 24)));				\
+	DMA_WRITE(1, val0);						\
+	DMA_WRITE(2, val1);						\
+	DMA_WRITE(3, val2);						\
+	DMA_WRITE(4, val3);						\
+	write += DMA_BLOCK_SIZE;					\
+} while (0)
+
+/* Buffer aging via primary DMA stream head pointer.
+ */
+
+#define SET_AGE(age, h, w)						\
+do {									\
+	(age)->head = h;						\
+	(age)->wrap = w;						\
+} while (0)
+
+#define TEST_AGE(age, h, w)		((age)->wrap < w ||		\
+					 ((age)->wrap == w &&		\
+					  (age)->head < h))
+
+#define AGE_BUFFER(buf_priv)						\
+do {									\
+	drm_mga_freelist_t *entry = (buf_priv)->list_entry;		\
+	if ((buf_priv)->dispatched) {					\
+		entry->age.head = (dev_priv->prim.tail +		\
+				   dev_priv->primary->offset);		\
+		entry->age.wrap = dev_priv->sarea_priv->last_wrap;	\
+	} else {							\
+		entry->age.head = 0;					\
+		entry->age.wrap = 0;					\
+	}								\
+} while (0)
+
+#define MGA_ENGINE_IDLE_MASK		(MGA_SOFTRAPEN |		\
+					 MGA_DWGENGSTS |		\
+					 MGA_ENDPRDMASTS)
+#define MGA_DMA_IDLE_MASK		(MGA_SOFTRAPEN |		\
+					 MGA_ENDPRDMASTS)
+
+#define MGA_DMA_DEBUG			0
+
+/* A reduced set of the mga registers.
+ */
+#define MGA_CRTC_INDEX			0x1fd4
+#define MGA_CRTC_DATA			0x1fd5
+
+/* CRTC11 */
+#define MGA_VINTCLR			(1 << 4)
+#define MGA_VINTEN			(1 << 5)
+
+#define MGA_ALPHACTRL			0x2c7c
+#define MGA_AR0				0x1c60
+#define MGA_AR1				0x1c64
+#define MGA_AR2				0x1c68
+#define MGA_AR3				0x1c6c
+#define MGA_AR4				0x1c70
+#define MGA_AR5				0x1c74
+#define MGA_AR6				0x1c78
+
+#define MGA_CXBNDRY			0x1c80
+#define MGA_CXLEFT			0x1ca0
+#define MGA_CXRIGHT			0x1ca4
+
+#define MGA_DMAPAD			0x1c54
+#define MGA_DSTORG			0x2cb8
+#define MGA_DWGCTL			0x1c00
+#	define MGA_OPCOD_MASK			(15 << 0)
+#	define MGA_OPCOD_TRAP			(4 << 0)
+#	define MGA_OPCOD_TEXTURE_TRAP		(6 << 0)
+#	define MGA_OPCOD_BITBLT			(8 << 0)
+#	define MGA_OPCOD_ILOAD			(9 << 0)
+#	define MGA_ATYPE_MASK			(7 << 4)
+#	define MGA_ATYPE_RPL			(0 << 4)
+#	define MGA_ATYPE_RSTR			(1 << 4)
+#	define MGA_ATYPE_ZI			(3 << 4)
+#	define MGA_ATYPE_BLK			(4 << 4)
+#	define MGA_ATYPE_I			(7 << 4)
+#	define MGA_LINEAR			(1 << 7)
+#	define MGA_ZMODE_MASK			(7 << 8)
+#	define MGA_ZMODE_NOZCMP			(0 << 8)
+#	define MGA_ZMODE_ZE			(2 << 8)
+#	define MGA_ZMODE_ZNE			(3 << 8)
+#	define MGA_ZMODE_ZLT			(4 << 8)
+#	define MGA_ZMODE_ZLTE			(5 << 8)
+#	define MGA_ZMODE_ZGT			(6 << 8)
+#	define MGA_ZMODE_ZGTE			(7 << 8)
+#	define MGA_SOLID			(1 << 11)
+#	define MGA_ARZERO			(1 << 12)
+#	define MGA_SGNZERO			(1 << 13)
+#	define MGA_SHIFTZERO			(1 << 14)
+#	define MGA_BOP_MASK			(15 << 16)
+#	define MGA_BOP_ZERO			(0 << 16)
+#	define MGA_BOP_DST			(10 << 16)
+#	define MGA_BOP_SRC			(12 << 16)
+#	define MGA_BOP_ONE			(15 << 16)
+#	define MGA_TRANS_SHIFT			20
+#	define MGA_TRANS_MASK			(15 << 20)
+#	define MGA_BLTMOD_MASK			(15 << 25)
+#	define MGA_BLTMOD_BMONOLEF		(0 << 25)
+#	define MGA_BLTMOD_BMONOWF		(4 << 25)
+#	define MGA_BLTMOD_PLAN			(1 << 25)
+#	define MGA_BLTMOD_BFCOL			(2 << 25)
+#	define MGA_BLTMOD_BU32BGR		(3 << 25)
+#	define MGA_BLTMOD_BU32RGB		(7 << 25)
+#	define MGA_BLTMOD_BU24BGR		(11 << 25)
+#	define MGA_BLTMOD_BU24RGB		(15 << 25)
+#	define MGA_PATTERN			(1 << 29)
+#	define MGA_TRANSC			(1 << 30)
+#	define MGA_CLIPDIS			(1 << 31)
+#define MGA_DWGSYNC			0x2c4c
+
+#define MGA_FCOL			0x1c24
+#define MGA_FIFOSTATUS			0x1e10
+#define MGA_FOGCOL			0x1cf4
+#define MGA_FXBNDRY			0x1c84
+#define MGA_FXLEFT			0x1ca8
+#define MGA_FXRIGHT			0x1cac
+
+#define MGA_ICLEAR			0x1e18
+#	define MGA_SOFTRAPICLR			(1 << 0)
+#	define MGA_VLINEICLR			(1 << 5)
+#define MGA_IEN				0x1e1c
+#	define MGA_SOFTRAPIEN			(1 << 0)
+#	define MGA_VLINEIEN			(1 << 5)
+
+#define MGA_LEN				0x1c5c
+
+#define MGA_MACCESS			0x1c04
+
+#define MGA_PITCH			0x1c8c
+#define MGA_PLNWT			0x1c1c
+#define MGA_PRIMADDRESS			0x1e58
+#	define MGA_DMA_GENERAL			(0 << 0)
+#	define MGA_DMA_BLIT			(1 << 0)
+#	define MGA_DMA_VECTOR			(2 << 0)
+#	define MGA_DMA_VERTEX			(3 << 0)
+#define MGA_PRIMEND			0x1e5c
+#	define MGA_PRIMNOSTART			(1 << 0)
+#	define MGA_PAGPXFER			(1 << 1)
+#define MGA_PRIMPTR			0x1e50
+#	define MGA_PRIMPTREN0			(1 << 0)
+#	define MGA_PRIMPTREN1			(1 << 1)
+
+#define MGA_RST				0x1e40
+#	define MGA_SOFTRESET			(1 << 0)
+#	define MGA_SOFTEXTRST			(1 << 1)
+
+#define MGA_SECADDRESS			0x2c40
+#define MGA_SECEND			0x2c44
+#define MGA_SETUPADDRESS		0x2cd0
+#define MGA_SETUPEND			0x2cd4
+#define MGA_SGN				0x1c58
+#define MGA_SOFTRAP			0x2c48
+#define MGA_SRCORG			0x2cb4
+#	define MGA_SRMMAP_MASK			(1 << 0)
+#	define MGA_SRCMAP_FB			(0 << 0)
+#	define MGA_SRCMAP_SYSMEM		(1 << 0)
+#	define MGA_SRCACC_MASK			(1 << 1)
+#	define MGA_SRCACC_PCI			(0 << 1)
+#	define MGA_SRCACC_AGP			(1 << 1)
+#define MGA_STATUS			0x1e14
+#	define MGA_SOFTRAPEN			(1 << 0)
+#	define MGA_VSYNCPEN			(1 << 4)
+#	define MGA_VLINEPEN			(1 << 5)
+#	define MGA_DWGENGSTS			(1 << 16)
+#	define MGA_ENDPRDMASTS			(1 << 17)
+#define MGA_STENCIL			0x2cc8
+#define MGA_STENCILCTL			0x2ccc
+
+#define MGA_TDUALSTAGE0			0x2cf8
+#define MGA_TDUALSTAGE1			0x2cfc
+#define MGA_TEXBORDERCOL		0x2c5c
+#define MGA_TEXCTL			0x2c30
+#define MGA_TEXCTL2			0x2c3c
+#	define MGA_DUALTEX			(1 << 7)
+#	define MGA_G400_TC2_MAGIC		(1 << 15)
+#	define MGA_MAP1_ENABLE			(1 << 31)
+#define MGA_TEXFILTER			0x2c58
+#define MGA_TEXHEIGHT			0x2c2c
+#define MGA_TEXORG			0x2c24
+#	define MGA_TEXORGMAP_MASK		(1 << 0)
+#	define MGA_TEXORGMAP_FB			(0 << 0)
+#	define MGA_TEXORGMAP_SYSMEM		(1 << 0)
+#	define MGA_TEXORGACC_MASK		(1 << 1)
+#	define MGA_TEXORGACC_PCI		(0 << 1)
+#	define MGA_TEXORGACC_AGP		(1 << 1)
+#define MGA_TEXORG1			0x2ca4
+#define MGA_TEXORG2			0x2ca8
+#define MGA_TEXORG3			0x2cac
+#define MGA_TEXORG4			0x2cb0
+#define MGA_TEXTRANS			0x2c34
+#define MGA_TEXTRANSHIGH		0x2c38
+#define MGA_TEXWIDTH			0x2c28
+
+#define MGA_WACCEPTSEQ			0x1dd4
+#define MGA_WCODEADDR			0x1e6c
+#define MGA_WFLAG			0x1dc4
+#define MGA_WFLAG1			0x1de0
+#define MGA_WFLAGNB			0x1e64
+#define MGA_WFLAGNB1			0x1e08
+#define MGA_WGETMSB			0x1dc8
+#define MGA_WIADDR			0x1dc0
+#define MGA_WIADDR2			0x1dd8
+#	define MGA_WMODE_SUSPEND		(0 << 0)
+#	define MGA_WMODE_RESUME			(1 << 0)
+#	define MGA_WMODE_JUMP			(2 << 0)
+#	define MGA_WMODE_START			(3 << 0)
+#	define MGA_WAGP_ENABLE			(1 << 2)
+#define MGA_WMISC			0x1e70
+#	define MGA_WUCODECACHE_ENABLE		(1 << 0)
+#	define MGA_WMASTER_ENABLE		(1 << 1)
+#	define MGA_WCACHEFLUSH_ENABLE		(1 << 3)
+#define MGA_WVRTXSZ			0x1dcc
+
+#define MGA_YBOT			0x1c9c
+#define MGA_YDST			0x1c90
+#define MGA_YDSTLEN			0x1c88
+#define MGA_YDSTORG			0x1c94
+#define MGA_YTOP			0x1c98
+
+#define MGA_ZORG			0x1c0c
+
+/* This finishes the current batch of commands
+ */
+#define MGA_EXEC			0x0100
+
+/* AGP PLL encoding (for G200 only).
+ */
+#define MGA_AGP_PLL			0x1e4c
+#	define MGA_AGP2XPLL_DISABLE		(0 << 0)
+#	define MGA_AGP2XPLL_ENABLE		(1 << 0)
+
+/* Warp registers
+ */
+#define MGA_WR0				0x2d00
+#define MGA_WR1				0x2d04
+#define MGA_WR2				0x2d08
+#define MGA_WR3				0x2d0c
+#define MGA_WR4				0x2d10
+#define MGA_WR5				0x2d14
+#define MGA_WR6				0x2d18
+#define MGA_WR7				0x2d1c
+#define MGA_WR8				0x2d20
+#define MGA_WR9				0x2d24
+#define MGA_WR10			0x2d28
+#define MGA_WR11			0x2d2c
+#define MGA_WR12			0x2d30
+#define MGA_WR13			0x2d34
+#define MGA_WR14			0x2d38
+#define MGA_WR15			0x2d3c
+#define MGA_WR16			0x2d40
+#define MGA_WR17			0x2d44
+#define MGA_WR18			0x2d48
+#define MGA_WR19			0x2d4c
+#define MGA_WR20			0x2d50
+#define MGA_WR21			0x2d54
+#define MGA_WR22			0x2d58
+#define MGA_WR23			0x2d5c
+#define MGA_WR24			0x2d60
+#define MGA_WR25			0x2d64
+#define MGA_WR26			0x2d68
+#define MGA_WR27			0x2d6c
+#define MGA_WR28			0x2d70
+#define MGA_WR29			0x2d74
+#define MGA_WR30			0x2d78
+#define MGA_WR31			0x2d7c
+#define MGA_WR32			0x2d80
+#define MGA_WR33			0x2d84
+#define MGA_WR34			0x2d88
+#define MGA_WR35			0x2d8c
+#define MGA_WR36			0x2d90
+#define MGA_WR37			0x2d94
+#define MGA_WR38			0x2d98
+#define MGA_WR39			0x2d9c
+#define MGA_WR40			0x2da0
+#define MGA_WR41			0x2da4
+#define MGA_WR42			0x2da8
+#define MGA_WR43			0x2dac
+#define MGA_WR44			0x2db0
+#define MGA_WR45			0x2db4
+#define MGA_WR46			0x2db8
+#define MGA_WR47			0x2dbc
+#define MGA_WR48			0x2dc0
+#define MGA_WR49			0x2dc4
+#define MGA_WR50			0x2dc8
+#define MGA_WR51			0x2dcc
+#define MGA_WR52			0x2dd0
+#define MGA_WR53			0x2dd4
+#define MGA_WR54			0x2dd8
+#define MGA_WR55			0x2ddc
+#define MGA_WR56			0x2de0
+#define MGA_WR57			0x2de4
+#define MGA_WR58			0x2de8
+#define MGA_WR59			0x2dec
+#define MGA_WR60			0x2df0
+#define MGA_WR61			0x2df4
+#define MGA_WR62			0x2df8
+#define MGA_WR63			0x2dfc
+#	define MGA_G400_WR_MAGIC		(1 << 6)
+#	define MGA_G400_WR56_MAGIC		0x46480000	/* 12800.0f */
+
+#define MGA_ILOAD_ALIGN		64
+#define MGA_ILOAD_MASK		(MGA_ILOAD_ALIGN - 1)
+
+#define MGA_DWGCTL_FLUSH	(MGA_OPCOD_TEXTURE_TRAP |		\
+				 MGA_ATYPE_I |				\
+				 MGA_ZMODE_NOZCMP |			\
+				 MGA_ARZERO |				\
+				 MGA_SGNZERO |				\
+				 MGA_BOP_SRC |				\
+				 (15 << MGA_TRANS_SHIFT))
+
+#define MGA_DWGCTL_CLEAR	(MGA_OPCOD_TRAP |			\
+				 MGA_ZMODE_NOZCMP |			\
+				 MGA_SOLID |				\
+				 MGA_ARZERO |				\
+				 MGA_SGNZERO |				\
+				 MGA_SHIFTZERO |			\
+				 MGA_BOP_SRC |				\
+				 (0 << MGA_TRANS_SHIFT) |		\
+				 MGA_BLTMOD_BMONOLEF |			\
+				 MGA_TRANSC |				\
+				 MGA_CLIPDIS)
+
+#define MGA_DWGCTL_COPY		(MGA_OPCOD_BITBLT |			\
+				 MGA_ATYPE_RPL |			\
+				 MGA_SGNZERO |				\
+				 MGA_SHIFTZERO |			\
+				 MGA_BOP_SRC |				\
+				 (0 << MGA_TRANS_SHIFT) |		\
+				 MGA_BLTMOD_BFCOL |			\
+				 MGA_CLIPDIS)
+
+/* Simple idle test.
+ */
+static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
+{
+	u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+	return (status == MGA_ENDPRDMASTS);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_ioc32.c b/linux-imx/drivers/gpu/drm/mga/mga_ioc32.c
new file mode 100644
index 0000000..709e90d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_ioc32.c
@@ -0,0 +1,225 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+
+typedef struct drm32_mga_init {
+	int func;
+	u32 sarea_priv_offset;
+	int chipset;
+	int sgram;
+	unsigned int maccess;
+	unsigned int fb_cpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_cpp;
+	unsigned int depth_offset, depth_pitch;
+	unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+	unsigned int texture_size[MGA_NR_TEX_HEAPS];
+	u32 fb_offset;
+	u32 mmio_offset;
+	u32 status_offset;
+	u32 warp_offset;
+	u32 primary_offset;
+	u32 buffers_offset;
+} drm_mga_init32_t;
+
+static int compat_mga_init(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	drm_mga_init32_t init32;
+	drm_mga_init_t __user *init;
+	int err = 0, i;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.chipset, &init->chipset)
+	    || __put_user(init32.sgram, &init->sgram)
+	    || __put_user(init32.maccess, &init->maccess)
+	    || __put_user(init32.fb_cpp, &init->fb_cpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_cpp, &init->depth_cpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.status_offset, &init->status_offset)
+	    || __put_user(init32.warp_offset, &init->warp_offset)
+	    || __put_user(init32.primary_offset, &init->primary_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset))
+		return -EFAULT;
+
+	for (i = 0; i < MGA_NR_TEX_HEAPS; i++) {
+		err |=
+		    __put_user(init32.texture_offset[i],
+			       &init->texture_offset[i]);
+		err |=
+		    __put_user(init32.texture_size[i], &init->texture_size[i]);
+	}
+	if (err)
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
+}
+
+typedef struct drm_mga_getparam32 {
+	int param;
+	u32 value;
+} drm_mga_getparam32_t;
+
+static int compat_mga_getparam(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_mga_getparam32_t getparam32;
+	drm_mga_getparam_t __user *getparam;
+
+	if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
+		return -EFAULT;
+
+	getparam = compat_alloc_user_space(sizeof(*getparam));
+	if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
+	    || __put_user(getparam32.param, &getparam->param)
+	    || __put_user((void __user *)(unsigned long)getparam32.value,
+			  &getparam->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+}
+
+typedef struct drm_mga_drm_bootstrap32 {
+	u32 texture_handle;
+	u32 texture_size;
+	u32 primary_size;
+	u32 secondary_bin_count;
+	u32 secondary_bin_size;
+	u32 agp_mode;
+	u8 agp_size;
+} drm_mga_dma_bootstrap32_t;
+
+static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_mga_dma_bootstrap32_t dma_bootstrap32;
+	drm_mga_dma_bootstrap_t __user *dma_bootstrap;
+	int err;
+
+	if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
+			   sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
+	if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
+	    || __put_user(dma_bootstrap32.texture_handle,
+			  &dma_bootstrap->texture_handle)
+	    || __put_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __put_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __put_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __put_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
+	    || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+			(unsigned long)dma_bootstrap);
+	if (err)
+		return err;
+
+	if (__get_user(dma_bootstrap32.texture_handle,
+		       &dma_bootstrap->texture_handle)
+	    || __get_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __get_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __get_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __get_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
+	    || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &dma_bootstrap32,
+			 sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+drm_ioctl_compat_t *mga_compat_ioctls[] = {
+	[DRM_MGA_INIT] = compat_mga_init,
+	[DRM_MGA_GETPARAM] = compat_mga_getparam,
+	[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+		fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_irq.c b/linux-imx/drivers/gpu/drm/mga/mga_irq.c
new file mode 100644
index 0000000..598c281
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_irq.c
@@ -0,0 +1,173 @@
+/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
+ */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
+
+u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_mga_private_t *const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
+
+irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	int status;
+	int handled = 0;
+
+	status = MGA_READ(MGA_STATUS);
+
+	/* VBLANK interrupt */
+	if (status & MGA_VLINEPEN) {
+		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
+		handled = 1;
+	}
+
+	/* SOFTRAP interrupt */
+	if (status & MGA_SOFTRAPEN) {
+		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
+		const u32 prim_end = MGA_READ(MGA_PRIMEND);
+
+
+		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
+
+		/* In addition to clearing the interrupt-pending bit, we
+		 * have to write to MGA_PRIMEND to re-start the DMA operation.
+		 */
+		if ((prim_start & ~0x03) != (prim_end & ~0x03))
+			MGA_WRITE(MGA_PRIMEND, prim_end);
+
+		atomic_inc(&dev_priv->last_fence_retired);
+		DRM_WAKEUP(&dev_priv->fence_queue);
+		handled = 1;
+	}
+
+	if (handled)
+		return IRQ_HANDLED;
+	return IRQ_NONE;
+}
+
+int mga_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	if (crtc != 0) {
+		DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+			  crtc);
+		return 0;
+	}
+
+	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+	return 0;
+}
+
+
+void mga_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0) {
+		DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+			  crtc);
+	}
+
+	/* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
+	 * a nice hardware counter that tracks the number of refreshes when
+	 * the interrupt is disabled, and the kernel doesn't know the refresh
+	 * rate to calculate an estimate.
+	 */
+	/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
+}
+
+int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	unsigned int cur_fence;
+	int ret = 0;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using fences.
+	 */
+	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+		    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_fence;
+
+	return ret;
+}
+
+void mga_driver_irq_preinstall(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	/* Disable *all* interrupts */
+	MGA_WRITE(MGA_IEN, 0);
+	/* Clear bits if they're already high */
+	MGA_WRITE(MGA_ICLEAR, ~0);
+}
+
+int mga_driver_irq_postinstall(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+
+	/* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
+	 * in mga_enable_vblank.
+	 */
+	MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
+	return 0;
+}
+
+void mga_driver_irq_uninstall(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	/* Disable *all* interrupts */
+	MGA_WRITE(MGA_IEN, 0);
+
+	dev->irq_enabled = 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_state.c b/linux-imx/drivers/gpu/drm/mga/mga_state.c
new file mode 100644
index 0000000..9c14514
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_state.c
@@ -0,0 +1,1102 @@
+/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jeff Hartmann <jhartmann@valinux.com>
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * Rewritten by:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
+
+/* ================================================================
+ * DMA hardware state programming functions
+ */
+
+static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
+			       struct drm_clip_rect *box)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	unsigned int pitch = dev_priv->front_pitch;
+	DMA_LOCALS;
+
+	BEGIN_DMA(2);
+
+	/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
+	 */
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
+		DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000,
+			  MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000);
+	}
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+		  MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	DMA_LOCALS;
+
+	BEGIN_DMA(3);
+
+	DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
+		  MGA_MACCESS, ctx->maccess,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
+		  MGA_FOGCOL, ctx->fogcolor,
+		  MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
+
+	DMA_BLOCK(MGA_FCOL, ctx->fcol,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	DMA_LOCALS;
+
+	BEGIN_DMA(4);
+
+	DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
+		  MGA_MACCESS, ctx->maccess,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
+		  MGA_FOGCOL, ctx->fogcolor,
+		  MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
+
+	DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
+		  MGA_TDUALSTAGE0, ctx->tdualstage0,
+		  MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
+
+	DMA_BLOCK(MGA_STENCIL, ctx->stencil,
+		  MGA_STENCILCTL, ctx->stencilctl,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	DMA_LOCALS;
+
+	BEGIN_DMA(4);
+
+	DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
+
+	DMA_BLOCK(MGA_WR34, tex->texheight,
+		  MGA_TEXTRANS, 0x0000ffff,
+		  MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
+/*	       tex->texctl, tex->texctl2); */
+
+	BEGIN_DMA(6);
+
+	DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
+
+	DMA_BLOCK(MGA_WR57, 0x00000000,
+		  MGA_WR53, 0x00000000,
+		  MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
+
+	DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
+		  MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
+		  MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
+		  MGA_DMAPAD, 0x00000000);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */
+/*	       tex->texctl, tex->texctl2); */
+
+	BEGIN_DMA(5);
+
+	DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
+				MGA_MAP1_ENABLE |
+				MGA_G400_TC2_MAGIC),
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
+
+	DMA_BLOCK(MGA_WR57, 0x00000000,
+		  MGA_WR53, 0x00000000,
+		  MGA_WR61, 0x00000000,
+		  MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
+
+	DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
+		  MGA_TEXTRANS, 0x0000ffff,
+		  MGA_TEXTRANSHIGH, 0x0000ffff,
+		  MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int pipe = sarea_priv->warp_pipe;
+	DMA_LOCALS;
+
+	BEGIN_DMA(3);
+
+	DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
+		  MGA_WVRTXSZ, 0x00000007,
+		  MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
+
+	DMA_BLOCK(MGA_WR25, 0x00000100,
+		  MGA_WR34, 0x00000000,
+		  MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
+
+	/* Padding required due to hardware bug.
+	 */
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
+			       MGA_WMODE_START | dev_priv->wagp_enable));
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int pipe = sarea_priv->warp_pipe;
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_pipe %x\n", pipe); */
+
+	BEGIN_DMA(10);
+
+	DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	if (pipe & MGA_T2) {
+		DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
+			  MGA_DMAPAD, 0x00000000,
+			  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+		DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x1e000000);
+	} else {
+		if (dev_priv->warp_pipe & MGA_T2) {
+			/* Flush the WARP pipe */
+			DMA_BLOCK(MGA_YDST, 0x00000000,
+				  MGA_FXLEFT, 0x00000000,
+				  MGA_FXRIGHT, 0x00000001,
+				  MGA_DWGCTL, MGA_DWGCTL_FLUSH);
+
+			DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
+				  MGA_DWGSYNC, 0x00007000,
+				  MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+				  MGA_LEN + MGA_EXEC, 0x00000000);
+
+			DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
+						MGA_G400_TC2_MAGIC),
+				  MGA_LEN + MGA_EXEC, 0x00000000,
+				  MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+				  MGA_DMAPAD, 0x00000000);
+		}
+
+		DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
+			  MGA_DMAPAD, 0x00000000,
+			  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+		DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x18000000);
+	}
+
+	DMA_BLOCK(MGA_WFLAG, 0x00000000,
+		  MGA_WFLAG1, 0x00000000,
+		  MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
+
+	DMA_BLOCK(MGA_WR49, 0x00000000,	/* tex0              */
+		  MGA_WR57, 0x00000000,	/* tex0              */
+		  MGA_WR53, 0x00000000,	/* tex1              */
+		  MGA_WR61, 0x00000000);	/* tex1              */
+
+	DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC,	/* tex0 width        */
+		  MGA_WR62, MGA_G400_WR_MAGIC,	/* tex0 height       */
+		  MGA_WR52, MGA_G400_WR_MAGIC,	/* tex1 width        */
+		  MGA_WR60, MGA_G400_WR_MAGIC);	/* tex1 height       */
+
+	/* Padding required due to hardware bug */
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
+				MGA_WMODE_START | dev_priv->wagp_enable));
+
+	ADVANCE_DMA();
+}
+
+static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
+		mga_g200_emit_pipe(dev_priv);
+		dev_priv->warp_pipe = sarea_priv->warp_pipe;
+	}
+
+	if (dirty & MGA_UPLOAD_CONTEXT) {
+		mga_g200_emit_context(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & MGA_UPLOAD_TEX0) {
+		mga_g200_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+	}
+}
+
+static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+	int multitex = sarea_priv->warp_pipe & MGA_T2;
+
+	if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
+		mga_g400_emit_pipe(dev_priv);
+		dev_priv->warp_pipe = sarea_priv->warp_pipe;
+	}
+
+	if (dirty & MGA_UPLOAD_CONTEXT) {
+		mga_g400_emit_context(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & MGA_UPLOAD_TEX0) {
+		mga_g400_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+	}
+
+	if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
+		mga_g400_emit_tex1(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
+	}
+}
+
+/* ================================================================
+ * SAREA state verification
+ */
+
+/* Disallow all write destinations except the front and backbuffer.
+ */
+static int mga_verify_context(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+
+	if (ctx->dstorg != dev_priv->front_offset &&
+	    ctx->dstorg != dev_priv->back_offset) {
+		DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
+			  ctx->dstorg, dev_priv->front_offset,
+			  dev_priv->back_offset);
+		ctx->dstorg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Disallow texture reads from PCI space.
+ */
+static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
+	unsigned int org;
+
+	org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
+
+	if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
+		DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
+		tex->texorg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mga_verify_state(drm_mga_private_t *dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+	int ret = 0;
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	if (dirty & MGA_UPLOAD_CONTEXT)
+		ret |= mga_verify_context(dev_priv);
+
+	if (dirty & MGA_UPLOAD_TEX0)
+		ret |= mga_verify_tex(dev_priv, 0);
+
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
+		if (dirty & MGA_UPLOAD_TEX1)
+			ret |= mga_verify_tex(dev_priv, 1);
+
+		if (dirty & MGA_UPLOAD_PIPE)
+			ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
+	} else {
+		if (dirty & MGA_UPLOAD_PIPE)
+			ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
+	}
+
+	return (ret == 0);
+}
+
+static int mga_verify_iload(drm_mga_private_t *dev_priv,
+			    unsigned int dstorg, unsigned int length)
+{
+	if (dstorg < dev_priv->texture_offset ||
+	    dstorg + length > (dev_priv->texture_offset +
+			       dev_priv->texture_size)) {
+		DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
+		return -EINVAL;
+	}
+
+	if (length & MGA_ILOAD_MASK) {
+		DRM_ERROR("*** bad iload length: 0x%x\n",
+			  length & MGA_ILOAD_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mga_verify_blit(drm_mga_private_t *dev_priv,
+			   unsigned int srcorg, unsigned int dstorg)
+{
+	if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
+	    (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
+		DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* ================================================================
+ *
+ */
+
+static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA(1);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	ADVANCE_DMA();
+
+	for (i = 0; i < nbox; i++) {
+		struct drm_clip_rect *box = &pbox[i];
+		u32 height = box->y2 - box->y1;
+
+		DRM_DEBUG("   from=%d,%d to=%d,%d\n",
+			  box->x1, box->y1, box->x2, box->y2);
+
+		if (clear->flags & MGA_FRONT) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->color_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_color,
+				  MGA_DSTORG, dev_priv->front_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+		if (clear->flags & MGA_BACK) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->color_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_color,
+				  MGA_DSTORG, dev_priv->back_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+		if (clear->flags & MGA_DEPTH) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->depth_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_depth,
+				  MGA_DSTORG, dev_priv->depth_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+	}
+
+	BEGIN_DMA(1);
+
+	/* Force reset of DWGCTL */
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_swap(struct drm_device *dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	sarea_priv->last_frame.head = dev_priv->prim.tail;
+	sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
+
+	BEGIN_DMA(4 + nbox);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
+		  MGA_MACCESS, dev_priv->maccess,
+		  MGA_SRCORG, dev_priv->back_offset,
+		  MGA_AR5, dev_priv->front_pitch);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
+
+	for (i = 0; i < nbox; i++) {
+		struct drm_clip_rect *box = &pbox[i];
+		u32 height = box->y2 - box->y1;
+		u32 start = box->y1 * dev_priv->front_pitch;
+
+		DRM_DEBUG("   from=%d,%d to=%d,%d\n",
+			  box->x1, box->y1, box->x2, box->y2);
+
+		DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
+			  MGA_AR3, start + box->x1,
+			  MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+			  MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
+	}
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt,
+		  MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+
+	FLUSH_DMA();
+
+	DRM_DEBUG("... done.\n");
+}
+
+static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 address = (u32) buf->bus_address;
+	u32 length = (u32) buf->used;
+	int i = 0;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
+
+	if (buf->used) {
+		buf_priv->dispatched = 1;
+
+		MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
+
+		do {
+			if (i < sarea_priv->nbox) {
+				mga_emit_clip_rect(dev_priv,
+						   &sarea_priv->boxes[i]);
+			}
+
+			BEGIN_DMA(1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SECADDRESS, (address |
+						   MGA_DMA_VERTEX),
+				  MGA_SECEND, ((address + length) |
+					       dev_priv->dma_access));
+
+			ADVANCE_DMA();
+		} while (++i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		AGE_BUFFER(buf_priv);
+		buf->pending = 0;
+		buf->used = 0;
+		buf_priv->dispatched = 0;
+
+		mga_freelist_put(dev, buf);
+	}
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
+				     unsigned int start, unsigned int end)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 address = (u32) buf->bus_address;
+	int i = 0;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
+
+	if (start != end) {
+		buf_priv->dispatched = 1;
+
+		MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
+
+		do {
+			if (i < sarea_priv->nbox) {
+				mga_emit_clip_rect(dev_priv,
+						   &sarea_priv->boxes[i]);
+			}
+
+			BEGIN_DMA(1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SETUPADDRESS, address + start,
+				  MGA_SETUPEND, ((address + end) |
+						 dev_priv->dma_access));
+
+			ADVANCE_DMA();
+		} while (++i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		AGE_BUFFER(buf_priv);
+		buf->pending = 0;
+		buf->used = 0;
+		buf_priv->dispatched = 0;
+
+		mga_freelist_put(dev, buf);
+	}
+
+	FLUSH_DMA();
+}
+
+/* This copies a 64 byte aligned agp region to the frambuffer with a
+ * standard blit, the ioctl needs to do checking.
+ */
+static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
+				   unsigned int dstorg, unsigned int length)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
+	u32 srcorg =
+	    buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
+	u32 y2;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
+
+	y2 = length / 64;
+
+	BEGIN_DMA(5);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DSTORG, dstorg,
+		  MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
+
+	DMA_BLOCK(MGA_PITCH, 64,
+		  MGA_PLNWT, 0xffffffff,
+		  MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
+
+	DMA_BLOCK(MGA_AR0, 63,
+		  MGA_AR3, 0,
+		  MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
+
+	DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
+		  MGA_SRCORG, dev_priv->front_offset,
+		  MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
+
+	ADVANCE_DMA();
+
+	AGE_BUFFER(buf_priv);
+
+	buf->pending = 0;
+	buf->used = 0;
+	buf_priv->dispatched = 0;
+
+	mga_freelist_put(dev, buf);
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	u32 scandir = 0, i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA(4 + nbox);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
+		  MGA_PLNWT, blit->planemask,
+		  MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
+
+	DMA_BLOCK(MGA_SGN, scandir,
+		  MGA_MACCESS, dev_priv->maccess,
+		  MGA_AR5, blit->ydir * blit->src_pitch,
+		  MGA_PITCH, blit->dst_pitch);
+
+	for (i = 0; i < nbox; i++) {
+		int srcx = pbox[i].x1 + blit->delta_sx;
+		int srcy = pbox[i].y1 + blit->delta_sy;
+		int dstx = pbox[i].x1 + blit->delta_dx;
+		int dsty = pbox[i].y1 + blit->delta_dy;
+		int h = pbox[i].y2 - pbox[i].y1;
+		int w = pbox[i].x2 - pbox[i].x1 - 1;
+		int start;
+
+		if (blit->ydir == -1)
+			srcy = blit->height - srcy - 1;
+
+		start = srcy * blit->src_pitch + srcx;
+
+		DMA_BLOCK(MGA_AR0, start + w,
+			  MGA_AR3, start,
+			  MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
+			  MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
+	}
+
+	/* Do something to flush AGP?
+	 */
+
+	/* Force reset of DWGCTL */
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt,
+		  MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+}
+
+/* ================================================================
+ *
+ */
+
+static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_clear_t *clear = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_clear(dev, clear);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_swap(dev);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+		return -EINVAL;
+	buf = dma->buflist[vertex->idx];
+	buf_priv = buf->dev_private;
+
+	buf->used = vertex->used;
+	buf_priv->discard = vertex->discard;
+
+	if (!mga_verify_state(dev_priv)) {
+		if (vertex->discard) {
+			if (buf_priv->dispatched == 1)
+				AGE_BUFFER(buf_priv);
+			buf_priv->dispatched = 0;
+			mga_freelist_put(dev, buf);
+		}
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_vertex(dev, buf);
+
+	return 0;
+}
+
+static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_indices_t *indices = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (indices->idx < 0 || indices->idx > dma->buf_count)
+		return -EINVAL;
+
+	buf = dma->buflist[indices->idx];
+	buf_priv = buf->dev_private;
+
+	buf_priv->discard = indices->discard;
+
+	if (!mga_verify_state(dev_priv)) {
+		if (indices->discard) {
+			if (buf_priv->dispatched == 1)
+				AGE_BUFFER(buf_priv);
+			buf_priv->dispatched = 0;
+			mga_freelist_put(dev, buf);
+		}
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
+
+	return 0;
+}
+
+static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_iload_t *iload = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+#if 0
+	if (mga_do_wait_for_idle(dev_priv) < 0) {
+		if (MGA_DMA_DEBUG)
+			DRM_INFO("-EBUSY\n");
+		return -EBUSY;
+	}
+#endif
+	if (iload->idx < 0 || iload->idx > dma->buf_count)
+		return -EINVAL;
+
+	buf = dma->buflist[iload->idx];
+	buf_priv = buf->dev_private;
+
+	if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
+		mga_freelist_put(dev, buf);
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_blit_t *blit = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
+		return -EINVAL;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_blit(dev, blit);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case MGA_PARAM_IRQ_NR:
+		value = drm_dev_to_irq(dev);
+		break;
+	case MGA_PARAM_CARD_TYPE:
+		value = dev_priv->chipset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 *fence = data;
+	DMA_LOCALS;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	/* I would normal do this assignment in the declaration of fence,
+	 * but dev_priv may be NULL.
+	 */
+
+	*fence = dev_priv->next_fence_to_post;
+	dev_priv->next_fence_to_post++;
+
+	BEGIN_DMA(1);
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
+	ADVANCE_DMA();
+
+	return 0;
+}
+
+static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
+file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 *fence = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	mga_driver_fence_wait(dev, fence);
+	return 0;
+}
+
+struct drm_ioctl_desc mga_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+};
+
+int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/mga/mga_warp.c b/linux-imx/drivers/gpu/drm/mga/mga_warp.c
new file mode 100644
index 0000000..0b76352
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mga/mga_warp.c
@@ -0,0 +1,169 @@
+/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
+ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/ihex.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/mga_drm.h>
+#include "mga_drv.h"
+
+#define FIRMWARE_G200 "matrox/g200_warp.fw"
+#define FIRMWARE_G400 "matrox/g400_warp.fw"
+
+MODULE_FIRMWARE(FIRMWARE_G200);
+MODULE_FIRMWARE(FIRMWARE_G400);
+
+#define MGA_WARP_CODE_ALIGN		256	/* in bytes */
+
+#define WARP_UCODE_SIZE(size)		ALIGN(size, MGA_WARP_CODE_ALIGN)
+
+int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
+{
+	unsigned char *vcbase = dev_priv->warp->handle;
+	unsigned long pcbase = dev_priv->warp->offset;
+	const char *firmware_name;
+	struct platform_device *pdev;
+	const struct firmware *fw = NULL;
+	const struct ihex_binrec *rec;
+	unsigned int size;
+	int n_pipes, where;
+	int rc = 0;
+
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		firmware_name = FIRMWARE_G400;
+		n_pipes = MGA_MAX_G400_PIPES;
+		break;
+	case MGA_CARD_TYPE_G200:
+		firmware_name = FIRMWARE_G200;
+		n_pipes = MGA_MAX_G200_PIPES;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	pdev = platform_device_register_simple("mga_warp", 0, NULL, 0);
+	if (IS_ERR(pdev)) {
+		DRM_ERROR("mga: Failed to register microcode\n");
+		return PTR_ERR(pdev);
+	}
+	rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev);
+	platform_device_unregister(pdev);
+	if (rc) {
+		DRM_ERROR("mga: Failed to load microcode \"%s\"\n",
+			  firmware_name);
+		return rc;
+	}
+
+	size = 0;
+	where = 0;
+	for (rec = (const struct ihex_binrec *)fw->data;
+	     rec;
+	     rec = ihex_next_binrec(rec)) {
+		size += WARP_UCODE_SIZE(be16_to_cpu(rec->len));
+		where++;
+	}
+
+	if (where != n_pipes) {
+		DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name);
+		rc = -EINVAL;
+		goto out;
+	}
+	size = PAGE_ALIGN(size);
+	DRM_DEBUG("MGA ucode size = %d bytes\n", size);
+	if (size > dev_priv->warp->size) {
+		DRM_ERROR("microcode too large! (%u > %lu)\n",
+			  size, dev_priv->warp->size);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
+
+	where = 0;
+	for (rec = (const struct ihex_binrec *)fw->data;
+	     rec;
+	     rec = ihex_next_binrec(rec)) {
+		unsigned int src_size, dst_size;
+
+		DRM_DEBUG(" pcbase = 0x%08lx  vcbase = %p\n", pcbase, vcbase);
+		dev_priv->warp_pipe_phys[where] = pcbase;
+		src_size = be16_to_cpu(rec->len);
+		dst_size = WARP_UCODE_SIZE(src_size);
+		memcpy(vcbase, rec->data, src_size);
+		pcbase += dst_size;
+		vcbase += dst_size;
+		where++;
+	}
+
+out:
+	release_firmware(fw);
+	return rc;
+}
+
+#define WMISC_EXPECTED		(MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
+
+int mga_warp_init(drm_mga_private_t *dev_priv)
+{
+	u32 wmisc;
+
+	/* FIXME: Get rid of these damned magic numbers...
+	 */
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
+		MGA_WRITE(MGA_WGETMSB, 0x00000E00);
+		MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
+		MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
+		break;
+	case MGA_CARD_TYPE_G200:
+		MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
+		MGA_WRITE(MGA_WGETMSB, 0x1606);
+		MGA_WRITE(MGA_WVRTXSZ, 7);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
+			      MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
+	wmisc = MGA_READ(MGA_WMISC);
+	if (wmisc != WMISC_EXPECTED) {
+		DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
+			  wmisc, WMISC_EXPECTED);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/mgag200/Kconfig b/linux-imx/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 0000000..b487cde
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
+config DRM_MGAG200
+	tristate "Kernel modesetting driver for MGA G200 server engines"
+	depends on DRM && PCI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	help
+	 This is a KMS driver for the MGA G200 server chips, it
+         does not support the original MGA G200 or any of the desktop
+         chips. It requires 0.3.0 of the modesetting userspace driver,
+         and a version of mga driver that will fail on KMS enabled
+         devices.
+
diff --git a/linux-imx/drivers/gpu/drm/mgag200/Makefile b/linux-imx/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 0000000..7db592e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y   := mgag200_main.o mgag200_mode.o \
+	mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 0000000..122b571
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include <drm/drmP.h>
+
+#include "mgag200_drv.h"
+
+#include <drm/drm_pciids.h>
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+	{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+	{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+	{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+	{ PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+	{ PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+	kfree(ap);
+}
+
+
+static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	mgag200_kick_out_firmware_fb(pdev);
+
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = mgag200_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.read = drm_read,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+	.load = mgag200_driver_load,
+	.unload = mgag200_driver_unload,
+	.fops = &mgag200_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+
+	.gem_init_object = mgag200_gem_init_object,
+	.gem_free_object = mgag200_gem_free_object,
+	.dumb_create = mgag200_dumb_create,
+	.dumb_map_offset = mgag200_dumb_mmap_offset,
+	.dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = mga_pci_probe,
+	.remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && mgag200_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (mgag200_modeset == 0)
+		return -EINVAL;
+	return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+	drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.h b/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 0000000..988911a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat 
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * 	    Matt Turner
+ *	    Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR		"Matthew Garrett"
+
+#define DRIVER_NAME		"mgag200"
+#define DRIVER_DESC		"MGA G200 SE"
+#define DRIVER_DATE		"20110418"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v)					\
+	do {							\
+		RREG8(0x1fda);					\
+		WREG8(ATTR_INDEX, reg);				\
+		WREG8(ATTR_DATA, v);				\
+	} while (0)						\
+
+#define WREG_SEQ(reg, v)					\
+	do {							\
+		WREG8(MGAREG_SEQ_INDEX, reg);			\
+		WREG8(MGAREG_SEQ_DATA, v);			\
+	} while (0)						\
+
+#define WREG_CRT(reg, v)					\
+	do {							\
+		WREG8(MGAREG_CRTC_INDEX, reg);			\
+		WREG8(MGAREG_CRTC_DATA, v);			\
+	} while (0)						\
+
+
+#define WREG_ECRT(reg, v)					\
+	do {							\
+		WREG8(MGAREG_CRTCEXT_INDEX, reg);				\
+		WREG8(MGAREG_CRTCEXT_DATA, v);				\
+	} while (0)						\
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v)					\
+	do {							\
+		WREG8(GFX_INDEX, reg);				\
+		WREG8(GFX_DATA, v);				\
+	} while (0)						\
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v)					\
+	do {							\
+		WREG8(DAC_INDEX, reg);				\
+		WREG8(DAC_DATA, v);				\
+	} while (0)						\
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+	struct drm_fb_helper helper;
+	struct mga_framebuffer mfb;
+	void *sysram;
+	int size;
+	struct ttm_bo_kmap_obj mapping;
+	int x1, y1, x2, y2; /* dirty rect */
+	spinlock_t dirty_lock;
+};
+
+struct mga_crtc {
+	struct drm_crtc base;
+	u8 lut_r[256], lut_g[256], lut_b[256];
+	int last_dpms;
+	bool enabled;
+};
+
+struct mga_mode_info {
+	bool mode_config_initialized;
+	struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+	struct drm_encoder base;
+	int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	struct i2c_algo_bit_data bit;
+	int data, clock;
+};
+
+struct mga_connector {
+	struct drm_connector base;
+	struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+	resource_size_t			vram_size;
+	resource_size_t			vram_base;
+	resource_size_t			vram_window;
+};
+
+enum mga_type {
+	G200_SE_A,
+	G200_SE_B,
+	G200_WB,
+	G200_EV,
+	G200_EH,
+	G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+	struct drm_device		*dev;
+	unsigned long			flags;
+
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	void __iomem			*rmmio;
+
+	drm_local_map_t			*framebuffer;
+
+	struct mga_mc			mc;
+	struct mga_mode_info		mode_info;
+
+	struct mga_fbdev *mfbdev;
+
+	bool				suspended;
+	int				num_crtc;
+	enum mga_type			type;
+	int				has_sdram;
+	struct drm_display_mode		mode;
+
+	int bpp_shifts[4];
+
+	int fb_mtrr;
+
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+	} ttm;
+
+	/* SE model number stored in reg 0x1e24 */
+	u32 unique_rev_id;
+};
+
+
+struct mgag200_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	struct ttm_bo_kmap_obj kmap;
+	struct drm_gem_object gem;
+	u32 placements[3];
+	int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct mgag200_bo, bo);
+}
+				/* mgag200_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			     u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno);
+
+				/* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+				/* mgag200_fb.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+				/* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+			     struct mga_framebuffer *mfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		       struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+			struct drm_device *dev,
+			struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+			 struct drm_device *dev,
+			 uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+			 struct drm_device *dev,
+			 uint32_t handle,
+			 uint64_t *offset);
+				/* mgag200_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+		      uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif				/* __MGAG200_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_fb.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 0000000..5da824c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Matt Turner
+ *          Dave Airlie
+ */
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+			     int x, int y, int width, int height)
+{
+	int i;
+	struct drm_gem_object *obj;
+	struct mgag200_bo *bo;
+	int src_offset, dst_offset;
+	int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+	int ret;
+	bool unmap = false;
+	bool store_for_later = false;
+	int x2, y2;
+	unsigned long flags;
+
+	obj = mfbdev->mfb.obj;
+	bo = gem_to_mga_bo(obj);
+
+	/*
+	 * try and reserve the BO, if we fail with busy
+	 * then the BO is being moved and we should
+	 * store up the damage until later.
+	 */
+	ret = mgag200_bo_reserve(bo, true);
+	if (ret) {
+		if (ret != -EBUSY)
+			return;
+
+		store_for_later = true;
+	}
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+	spin_lock_irqsave(&mfbdev->dirty_lock, flags);
+
+	if (mfbdev->y1 < y)
+		y = mfbdev->y1;
+	if (mfbdev->y2 > y2)
+		y2 = mfbdev->y2;
+	if (mfbdev->x1 < x)
+		x = mfbdev->x1;
+	if (mfbdev->x2 > x2)
+		x2 = mfbdev->x2;
+
+	if (store_for_later) {
+		mfbdev->x1 = x;
+		mfbdev->x2 = x2;
+		mfbdev->y1 = y;
+		mfbdev->y2 = y2;
+		spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+		return;
+	}
+
+	mfbdev->x1 = mfbdev->y1 = INT_MAX;
+	mfbdev->x2 = mfbdev->y2 = 0;
+	spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+
+	if (!bo->kmap.virtual) {
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret) {
+			DRM_ERROR("failed to kmap fb updates\n");
+			mgag200_bo_unreserve(bo);
+			return;
+		}
+		unmap = true;
+	}
+	for (i = y; i <= y2; i++) {
+		/* assume equal stride for now */
+		src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+		memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+
+	}
+	if (unmap)
+		ttm_bo_kunmap(&bo->kmap);
+
+	mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+			 const struct fb_fillrect *rect)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_fillrect(info, rect);
+	mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+			 rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+			 const struct fb_copyarea *area)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_copyarea(info, area);
+	mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+			 area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+			  const struct fb_image *image)
+{
+	struct mga_fbdev *mfbdev = info->par;
+	sys_imageblit(info, image);
+	mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+			 image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = mga_fillrect,
+	.fb_copyarea = mga_copyarea,
+	.fb_imageblit = mga_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+				   struct drm_mode_fb_cmd2 *mode_cmd,
+				   struct drm_gem_object **gobj_p)
+{
+	struct drm_device *dev = afbdev->helper.dev;
+	u32 size;
+	struct drm_gem_object *gobj;
+	int ret = 0;
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	ret = mgag200_gem_create(dev, size, true, &gobj);
+	if (ret)
+		return ret;
+
+	*gobj_p = gobj;
+	return ret;
+}
+
+static int mgag200fb_create(struct drm_fb_helper *helper,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+	struct drm_device *dev = mfbdev->helper.dev;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct mga_device *mdev = dev->dev_private;
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct drm_gem_object *gobj = NULL;
+	struct device *device = &dev->pdev->dev;
+	struct mgag200_bo *bo;
+	int ret;
+	void *sysram;
+	int size;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+		return ret;
+	}
+	bo = gem_to_mga_bo(gobj);
+
+	sysram = vmalloc(size);
+	if (!sysram)
+		return -ENOMEM;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL)
+		return -ENOMEM;
+
+	info->par = mfbdev;
+
+	ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+	if (ret)
+		return ret;
+
+	mfbdev->sysram = sysram;
+	mfbdev->size = size;
+
+	fb = &mfbdev->mfb.base;
+
+	/* setup helper */
+	mfbdev->helper.fb = fb;
+	mfbdev->helper.fbdev = info;
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	strcpy(info->fix.id, "mgadrmfb");
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &mgag200fb_ops;
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+	info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	info->screen_base = sysram;
+	info->screen_size = size;
+	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+	DRM_DEBUG_KMS("allocated %dx%d\n",
+		      fb->width, fb->height);
+	return 0;
+out:
+	return ret;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+				struct mga_fbdev *mfbdev)
+{
+	struct fb_info *info;
+	struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+	if (mfbdev->helper.fbdev) {
+		info = mfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (mfb->obj) {
+		drm_gem_object_unreference_unlocked(mfb->obj);
+		mfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&mfbdev->helper);
+	vfree(mfbdev->sysram);
+	drm_framebuffer_unregister_private(&mfb->base);
+	drm_framebuffer_cleanup(&mfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+	.gamma_set = mga_crtc_fb_gamma_set,
+	.gamma_get = mga_crtc_fb_gamma_get,
+	.fb_probe = mgag200fb_create,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+	struct mga_fbdev *mfbdev;
+	int ret;
+
+	mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
+	if (!mfbdev)
+		return -ENOMEM;
+
+	mdev->mfbdev = mfbdev;
+	mfbdev->helper.funcs = &mga_fb_helper_funcs;
+	spin_lock_init(&mfbdev->dirty_lock);
+
+	ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+				 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+	if (ret)
+		return ret;
+
+	drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(mdev->dev);
+
+	drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+	return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+	if (!mdev->mfbdev)
+		return;
+
+	mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_i2c.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 0000000..d3dcf54
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <drm/drmP.h>
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+	int tmp;
+
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+	tmp = (RREG8(DAC_DATA) & mask) | val;
+	WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+	WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+	if (state)
+		state = 0;
+	else
+		state = mask;
+	mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+	struct mga_i2c_chan *i2c = data;
+	struct mga_device *mdev = i2c->dev->dev_private;
+	return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+	struct mga_device *mdev = dev->dev_private;
+	struct mga_i2c_chan *i2c;
+	int ret;
+	int data, clock;
+
+	WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
+	WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+	WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+	switch (mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+	case G200_EV:
+	case G200_WB:
+		data = 1;
+		clock = 2;
+		break;
+	case G200_EH:
+	case G200_ER:
+		data = 2;
+		clock = 1;
+		break;
+	default:
+		data = 2;
+		clock = 8;
+		break;
+	}
+
+	i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+	if (!i2c)
+		return NULL;
+
+	i2c->data = data;
+	i2c->clock = clock;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+	i2c->adapter.algo_data = &i2c->bit;
+
+	i2c->bit.udelay = 10;
+	i2c->bit.timeout = 2;
+	i2c->bit.data = i2c;
+	i2c->bit.setsda		= mga_gpio_setsda;
+	i2c->bit.setscl		= mga_gpio_setscl;
+	i2c->bit.getsda		= mga_gpio_getsda;
+	i2c->bit.getscl		= mga_gpio_getscl;
+
+	ret = i2c_bit_add_bus(&i2c->adapter);
+	if (ret) {
+		kfree(i2c);
+		i2c = NULL;
+	}
+	return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_main.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 0000000..dafe049
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *          Matt Turner
+ *          Dave Airlie
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+	if (mga_fb->obj)
+		drm_gem_object_unreference_unlocked(mga_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(fb);
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+	.destroy = mga_user_framebuffer_destroy,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+			     struct mga_framebuffer *gfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj)
+{
+	int ret;
+	
+	drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+	gfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+	if (ret) {
+		DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+				struct drm_file *filp,
+				struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct mga_framebuffer *mga_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+	if (!mga_fb) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+	if (ret) {
+		drm_gem_object_unreference_unlocked(obj);
+		kfree(mga_fb);
+		return ERR_PTR(ret);
+	}
+	return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+	.fb_create = mgag200_user_framebuffer_create,
+};
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+	int offset;
+	int orig;
+	int test1, test2;
+	int orig1, orig2;
+
+	/* Probe */
+	orig = ioread16(mem);
+	iowrite16(0, mem);
+
+	for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+		orig1 = ioread8(mem + offset);
+		orig2 = ioread8(mem + offset + 0x100);
+
+		iowrite16(0xaa55, mem + offset);
+		iowrite16(0xaa55, mem + offset + 0x100);
+
+		test1 = ioread16(mem + offset);
+		test2 = ioread16(mem);
+
+		iowrite16(orig1, mem + offset);
+		iowrite16(orig2, mem + offset + 0x100);
+
+		if (test1 != 0xaa55) {
+			break;
+		}
+
+		if (test2) {
+			break;
+		}
+	}
+
+	iowrite16(orig, mem);
+	return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+	void __iomem *mem;
+	struct apertures_struct *aper = alloc_apertures(1);
+	if (!aper)
+		return -ENOMEM;
+
+	/* BAR 0 is VRAM */
+	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+	mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+	aper->ranges[0].base = mdev->mc.vram_base;
+	aper->ranges[0].size = mdev->mc.vram_window;
+
+	remove_conflicting_framebuffers(aper, "mgafb", true);
+	kfree(aper);
+
+	if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
+				"mgadrmfb_vram")) {
+		DRM_ERROR("can't reserve VRAM\n");
+		return -ENXIO;
+	}
+
+	mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+	mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+	pci_iounmap(mdev->dev->pdev, mem);
+
+	return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+			       uint32_t flags)
+{
+	struct mga_device *mdev = dev->dev_private;
+	int ret, option;
+
+	mdev->type = flags;
+
+	/* Hardcode the number of CRTCs to 1 */
+	mdev->num_crtc = 1;
+
+	pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+	mdev->has_sdram = !(option & (1 << 14));
+
+	/* BAR 0 is the framebuffer, BAR 1 contains registers */
+	mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+	mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+	if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
+				"mgadrmfb_mmio")) {
+		DRM_ERROR("can't reserve mmio registers\n");
+		return -ENOMEM;
+	}
+
+	mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+	if (mdev->rmmio == NULL)
+		return -ENOMEM;
+
+	/* stash G200 SE model number for later use */
+	if (IS_G200_SE(mdev))
+		mdev->unique_rev_id = RREG32(0x1e24);
+
+	ret = mga_vram_init(mdev);
+	if (ret)
+		return ret;
+
+	mdev->bpp_shifts[0] = 0;
+	mdev->bpp_shifts[1] = 1;
+	mdev->bpp_shifts[2] = 0;
+	mdev->bpp_shifts[3] = 2;
+	return 0;
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct mga_device *mdev;
+	int r;
+
+	mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
+	if (mdev == NULL)
+		return -ENOMEM;
+	dev->dev_private = (void *)mdev;
+	mdev->dev = dev;
+
+	r = mgag200_device_init(dev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+		goto out;
+	}
+	r = mgag200_mm_init(mdev);
+	if (r)
+		goto out;
+
+	drm_mode_config_init(dev);
+	dev->mode_config.funcs = (void *)&mga_mode_funcs;
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	r = mgag200_modeset_init(mdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+	if (r)
+		mgag200_driver_unload(dev);
+	return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+	struct mga_device *mdev = dev->dev_private;
+
+	if (mdev == NULL)
+		return 0;
+	mgag200_modeset_fini(mdev);
+	mgag200_fbdev_fini(mdev);
+	drm_mode_config_cleanup(dev);
+	mgag200_mm_fini(mdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+		   u32 size, bool iskernel,
+		   struct drm_gem_object **obj)
+{
+	struct mgag200_bo *astbo;
+	int ret;
+
+	*obj = NULL;
+
+	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
+
+	ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("failed to allocate GEM object\n");
+		return ret;
+	}
+	*obj = &astbo->gem;
+	return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	int ret;
+	struct drm_gem_object *gobj;
+	u32 handle;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	ret = mgag200_gem_create(dev, args->size, false,
+			     &gobj);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file, gobj, &handle);
+	drm_gem_object_unreference_unlocked(gobj);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+	return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+
+	tbo = &((*bo)->bo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+	struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+	if (!mgag200_bo)
+		return;
+	mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+	return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+		     struct drm_device *dev,
+		     uint32_t handle,
+		     uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret;
+	struct mgag200_bo *bo;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto out_unlock;
+	}
+
+	bo = gem_to_mga_bo(obj);
+	*offset = mgag200_bo_mmap_offset(bo);
+
+	drm_gem_object_unreference(obj);
+	ret = 0;
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+
+}
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_mode.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 0000000..f6341e8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1631 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ *	    Matt Turner
+ *	    Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	int i;
+
+	if (!crtc->enabled)
+		return;
+
+	WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+	for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+		/* VGA registers */
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+		WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+	}
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+	unsigned long timeout = jiffies + HZ/10;
+	unsigned int status = 0;
+
+	do {
+		status = RREG32(MGAREG_Status);
+	} while ((status & 0x08) && time_before(jiffies, timeout));
+	timeout = jiffies + HZ/10;
+	status = 0;
+	do {
+		status = RREG32(MGAREG_Status);
+	} while (!(status & 0x08) && time_before(jiffies, timeout));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+	unsigned long timeout = jiffies + HZ;
+	unsigned int status = 0;
+	do {
+		status = RREG8(MGAREG_Status + 2);
+	} while ((status & 0x01) && time_before(jiffies, timeout));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+
+	m = n = p = 0;
+	vcomax = 320000;
+	vcomin = 160000;
+	pllreffreq = 25000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 8; testp > 0; testp /= 2) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testn = 17; testn < 256; testn++) {
+			for (testm = 1; testm < 32; testm++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					m = testm - 1;
+					n = testn - 1;
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	if (delta > permitteddelta) {
+		printk(KERN_WARNING "PLL delta too large\n");
+		return 1;
+	}
+
+	WREG_DAC(MGA1064_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_PIX_PLLC_P, p);
+	return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	int i, j, tmpcount, vcount;
+	bool pll_locked = false;
+	u8 tmp;
+
+	m = n = p = 0;
+	vcomax = 550000;
+	vcomin = 150000;
+	pllreffreq = 48000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 1; testp < 9; testp++) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testm = 1; testm < 17; testm++) {
+			for (testn = 1; testn < 151; testn++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = (testm - 1) | ((n >> 1) & 0x80);
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	for (i = 0; i <= 32 && pll_locked == false; i++) {
+		if (i > 0) {
+			WREG8(MGAREG_CRTC_INDEX, 0x1e);
+			tmp = RREG8(MGAREG_CRTC_DATA);
+			if (tmp < 0xff)
+				WREG8(MGAREG_CRTC_DATA, tmp+1);
+		}
+
+		/* set pixclkdis to 1 */
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG8(DAC_DATA, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_REMHEADCTL_CLKDIS;
+		WREG8(DAC_DATA, tmp);
+
+		/* select PLL Set C */
+		tmp = RREG8(MGAREG_MEM_MISC_READ);
+		tmp |= 0x3 << 2;
+		WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+		WREG8(DAC_DATA, tmp);
+
+		udelay(500);
+
+		/* reset the PLL */
+		WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~0x04;
+		WREG8(DAC_DATA, tmp);
+
+		udelay(50);
+
+		/* program pixel pll register */
+		WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+		WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+		WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+		udelay(50);
+
+		/* turn pll on */
+		WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= 0x04;
+		WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+		udelay(500);
+
+		/* select the pixel pll */
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+		tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+		WREG8(DAC_DATA, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+		tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+		WREG8(DAC_DATA, tmp);
+
+		/* reset dotclock rate bit */
+		WREG8(MGAREG_SEQ_INDEX, 1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+		tmp &= ~0x8;
+		WREG8(MGAREG_SEQ_DATA, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG8(DAC_DATA, tmp);
+
+		vcount = RREG8(MGAREG_VCOUNT);
+
+		for (j = 0; j < 30 && pll_locked == false; j++) {
+			tmpcount = RREG8(MGAREG_VCOUNT);
+			if (tmpcount < vcount)
+				vcount = 0;
+			if ((tmpcount - vcount) > 2)
+				pll_locked = true;
+			else
+				udelay(5);
+		}
+	}
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+	WREG_DAC(MGA1064_REMHEADCTL, tmp);
+	return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	u8 tmp;
+
+	m = n = p = 0;
+	vcomax = 550000;
+	vcomin = 150000;
+	pllreffreq = 50000;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 16; testp > 0; testp--) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testn = 1; testn < 257; testn++) {
+			for (testm = 1; testm < 17; testm++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = testm - 1;
+					p = testp - 1;
+				}
+			}
+		}
+	}
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG8(DAC_DATA, tmp);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= 0x3 << 2;
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+	tmp = RREG8(DAC_DATA);
+	WREG8(DAC_DATA, tmp & ~0x40);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG8(DAC_DATA, tmp);
+
+	WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+	udelay(50);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG8(DAC_DATA, tmp);
+
+	udelay(500);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+	tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+	WREG8(DAC_DATA, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+	tmp = RREG8(DAC_DATA);
+	WREG8(DAC_DATA, tmp | 0x40);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= (0x3 << 2);
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG8(DAC_DATA, tmp);
+
+	return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta, permitteddelta;
+	unsigned int testp, testm, testn;
+	unsigned int p, m, n;
+	unsigned int computed;
+	int i, j, tmpcount, vcount;
+	u8 tmp;
+	bool pll_locked = false;
+
+	m = n = p = 0;
+	vcomax = 800000;
+	vcomin = 400000;
+	pllreffreq = 33333;
+
+	delta = 0xffffffff;
+	permitteddelta = clock * 5 / 1000;
+
+	for (testp = 16; testp > 0; testp >>= 1) {
+		if (clock * testp > vcomax)
+			continue;
+		if (clock * testp < vcomin)
+			continue;
+
+		for (testm = 1; testm < 33; testm++) {
+			for (testn = 17; testn < 257; testn++) {
+				computed = (pllreffreq * testn) /
+					(testm * testp);
+				if (computed > clock)
+					tmpdelta = computed - clock;
+				else
+					tmpdelta = clock - computed;
+				if (tmpdelta < delta) {
+					delta = tmpdelta;
+					n = testn - 1;
+					m = (testm - 1);
+					p = testp - 1;
+				}
+				if ((clock * testp) >= 600000)
+					p |= 0x80;
+			}
+		}
+	}
+	for (i = 0; i <= 32 && pll_locked == false; i++) {
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+		WREG8(DAC_DATA, tmp);
+
+		tmp = RREG8(MGAREG_MEM_MISC_READ);
+		tmp |= 0x3 << 2;
+		WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+		WREG8(DAC_DATA, tmp);
+
+		udelay(500);
+
+		WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+		WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+		WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+		udelay(500);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+		tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+		WREG8(DAC_DATA, tmp);
+
+		WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+		tmp = RREG8(DAC_DATA);
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+		tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+		WREG8(DAC_DATA, tmp);
+
+		vcount = RREG8(MGAREG_VCOUNT);
+
+		for (j = 0; j < 30 && pll_locked == false; j++) {
+			tmpcount = RREG8(MGAREG_VCOUNT);
+			if (tmpcount < vcount)
+				vcount = 0;
+			if ((tmpcount - vcount) > 2)
+				pll_locked = true;
+			else
+				udelay(5);
+		}
+	}
+
+	return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+	unsigned int vcomax, vcomin, pllreffreq;
+	unsigned int delta, tmpdelta;
+	int testr, testn, testm, testo;
+	unsigned int p, m, n;
+	unsigned int computed, vco;
+	int tmp;
+	const unsigned int m_div_val[] = { 1, 2, 4, 8 };
+
+	m = n = p = 0;
+	vcomax = 1488000;
+	vcomin = 1056000;
+	pllreffreq = 48000;
+
+	delta = 0xffffffff;
+
+	for (testr = 0; testr < 4; testr++) {
+		if (delta == 0)
+			break;
+		for (testn = 5; testn < 129; testn++) {
+			if (delta == 0)
+				break;
+			for (testm = 3; testm >= 0; testm--) {
+				if (delta == 0)
+					break;
+				for (testo = 5; testo < 33; testo++) {
+					vco = pllreffreq * (testn + 1) /
+						(testr + 1);
+					if (vco < vcomin)
+						continue;
+					if (vco > vcomax)
+						continue;
+					computed = vco / (m_div_val[testm] * (testo + 1));
+					if (computed > clock)
+						tmpdelta = computed - clock;
+					else
+						tmpdelta = clock - computed;
+					if (tmpdelta < delta) {
+						delta = tmpdelta;
+						m = testm | (testo << 3);
+						n = testn;
+						p = testr | (testr << 3);
+					}
+				}
+			}
+		}
+	}
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+	WREG8(DAC_DATA, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= MGA1064_REMHEADCTL_CLKDIS;
+	WREG8(DAC_DATA, tmp);
+
+	tmp = RREG8(MGAREG_MEM_MISC_READ);
+	tmp |= (0x3<<2) | 0xc0;
+	WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+	WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+	tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+	WREG8(DAC_DATA, tmp);
+
+	udelay(500);
+
+	WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+	WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+	WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+	udelay(50);
+
+	return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+	switch(mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+		return mga_g200se_set_plls(mdev, clock);
+		break;
+	case G200_WB:
+		return mga_g200wb_set_plls(mdev, clock);
+		break;
+	case G200_EV:
+		return mga_g200ev_set_plls(mdev, clock);
+		break;
+	case G200_EH:
+		return mga_g200eh_set_plls(mdev, clock);
+		break;
+	case G200_ER:
+		return mga_g200er_set_plls(mdev, clock);
+		break;
+	}
+	return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	u8 tmp;
+	int iter_max;
+
+	/* 1- The first step is to warn the BMC of an upcoming mode change.
+	 * We are putting the misc<0> to output.*/
+
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x10;
+	WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+	/* we are putting a 1 on the misc<0> line */
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x10;
+	WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+	/* 2- Second step to mask and further scan request
+	 * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+	 */
+	WREG8(DAC_INDEX, MGA1064_SPAREREG);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x80;
+	WREG_DAC(MGA1064_SPAREREG, tmp);
+
+	/* 3a- the third step is to verifu if there is an active scan
+	 * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+	 */
+	iter_max = 300;
+	while (!(tmp & 0x1) && iter_max) {
+		WREG8(DAC_INDEX, MGA1064_SPAREREG);
+		tmp = RREG8(DAC_DATA);
+		udelay(1000);
+		iter_max--;
+	}
+
+	/* 3b- this step occurs only if the remove is actually scanning
+	 * we are waiting for the end of the frame which is a 1 on
+	 * remvsyncsts (XSPAREREG<1>)
+	 */
+	if (iter_max) {
+		iter_max = 300;
+		while ((tmp & 0x2) && iter_max) {
+			WREG8(DAC_INDEX, MGA1064_SPAREREG);
+			tmp = RREG8(DAC_DATA);
+			udelay(1000);
+			iter_max--;
+		}
+	}
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+	u8 tmp;
+	struct mga_device *mdev = crtc->dev->dev_private;
+
+	/* 1- The first step is to ensure that the vrsten and hrsten are set */
+	WREG8(MGAREG_CRTCEXT_INDEX, 1);
+	tmp = RREG8(MGAREG_CRTCEXT_DATA);
+	WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+	/* 2- second step is to assert the rstlvl2 */
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+	tmp = RREG8(DAC_DATA);
+	tmp |= 0x8;
+	WREG8(DAC_DATA, tmp);
+
+	/* wait 10 us */
+	udelay(10);
+
+	/* 3- deassert rstlvl2 */
+	tmp &= ~0x08;
+	WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+	WREG8(DAC_DATA, tmp);
+
+	/* 4- remove mask of scan request */
+	WREG8(DAC_INDEX, MGA1064_SPAREREG);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~0x80;
+	WREG8(DAC_DATA, tmp);
+
+	/* 5- put back a 0 on the misc<0> line */
+	WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+	tmp = RREG8(DAC_DATA);
+	tmp &= ~0x10;
+	WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+/*
+   This is how the framebuffer base address is stored in g200 cards:
+   * Assume @offset is the gpu_addr variable of the framebuffer object
+   * Then addr is the number of _pixels_ (not bytes) from the start of
+     VRAM to the first pixel we want to display. (divided by 2 for 32bit
+     framebuffers)
+   * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+   addr<20> -> CRTCEXT0<6>
+   addr<19-16> -> CRTCEXT0<3-0>
+   addr<15-8> -> CRTCC<7-0>
+   addr<7-0> -> CRTCD<7-0>
+   CRTCEXT0 has to be programmed last to trigger an update and make the
+   new addr variable take effect.
+ */
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	u32 addr;
+	int count;
+	u8 crtcext0;
+
+	while (RREG8(0x1fda) & 0x08);
+	while (!(RREG8(0x1fda) & 0x08));
+
+	count = RREG8(MGAREG_VCOUNT) + 2;
+	while (RREG8(MGAREG_VCOUNT) < count);
+
+	WREG8(MGAREG_CRTCEXT_INDEX, 0);
+	crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
+	crtcext0 &= 0xB0;
+	addr = offset / 8;
+	/* Can't store addresses any higher than that...
+	   but we also don't have more than 16MB of memory, so it should be fine. */
+	WARN_ON(addr > 0x1fffff);
+	crtcext0 |= (!!(addr & (1<<20)))<<6;
+	WREG_CRT(0x0d, (u8)(addr & 0xff));
+	WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+	WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, int atomic)
+{
+	struct mga_device *mdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct mga_framebuffer *mga_fb;
+	struct mgag200_bo *bo;
+	int ret;
+	u64 gpu_addr;
+
+	/* push the previous fb to system ram */
+	if (!atomic && fb) {
+		mga_fb = to_mga_framebuffer(fb);
+		obj = mga_fb->obj;
+		bo = gem_to_mga_bo(obj);
+		ret = mgag200_bo_reserve(bo, false);
+		if (ret)
+			return ret;
+		mgag200_bo_push_sysram(bo);
+		mgag200_bo_unreserve(bo);
+	}
+
+	mga_fb = to_mga_framebuffer(crtc->fb);
+	obj = mga_fb->obj;
+	bo = gem_to_mga_bo(obj);
+
+	ret = mgag200_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+	if (ret) {
+		mgag200_bo_unreserve(bo);
+		return ret;
+	}
+
+	if (&mdev->mfbdev->mfb == mga_fb) {
+		/* if pushing console in kmap it */
+		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+		if (ret)
+			DRM_ERROR("failed to kmap fbcon\n");
+
+	}
+	mgag200_bo_unreserve(bo);
+
+	DRM_INFO("mga base %llx\n", gpu_addr);
+
+	mga_set_start_address(crtc, (u32)gpu_addr);
+
+	return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				  struct drm_framebuffer *old_fb)
+{
+	return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode,
+				int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	int hdisplay, hsyncstart, hsyncend, htotal;
+	int vdisplay, vsyncstart, vsyncend, vtotal;
+	int pitch;
+	int option = 0, option2 = 0;
+	int i;
+	unsigned char misc = 0;
+	unsigned char ext_vga[6];
+	u8 bppshift;
+
+	static unsigned char dacvalue[] = {
+		/* 0x00: */        0,    0,    0,    0,    0,    0, 0x00,    0,
+		/* 0x08: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x10: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x18: */     0x00,    0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+		/* 0x20: */     0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		/* 0x28: */     0x00, 0x00, 0x00, 0x00,    0,    0,    0, 0x40,
+		/* 0x30: */     0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+		/* 0x38: */     0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+		/* 0x40: */        0,    0,    0,    0,    0,    0,    0,    0,
+		/* 0x48: */        0,    0,    0,    0,    0,    0,    0,    0
+	};
+
+	bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+	switch (mdev->type) {
+	case G200_SE_A:
+	case G200_SE_B:
+		dacvalue[MGA1064_VREF_CTL] = 0x03;
+		dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+					     MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		if (mdev->has_sdram)
+			option = 0x40049120;
+		else
+			option = 0x4004d120;
+		option2 = 0x00008000;
+		break;
+	case G200_WB:
+		dacvalue[MGA1064_VREF_CTL] = 0x07;
+		option = 0x41049120;
+		option2 = 0x0000b000;
+		break;
+	case G200_EV:
+		dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		option = 0x00000120;
+		option2 = 0x0000b000;
+		break;
+	case G200_EH:
+		dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+					     MGA1064_MISC_CTL_DAC_RAM_CS;
+		option = 0x00000120;
+		option2 = 0x0000b000;
+		break;
+	case G200_ER:
+		break;
+	}
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+		break;
+	case 16:
+		if (crtc->fb->depth == 15)
+			dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+		else
+			dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+		break;
+	case 24:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+		break;
+	case 32:
+		dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+		break;
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= 0x40;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= 0x80;
+
+
+	for (i = 0; i < sizeof(dacvalue); i++) {
+		if ((i <= 0x17) ||
+		    (i == 0x1b) ||
+		    (i == 0x1c) ||
+		    ((i >= 0x1f) && (i <= 0x29)) ||
+		    ((i >= 0x30) && (i <= 0x37)))
+			continue;
+		if (IS_G200_SE(mdev) &&
+		    ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+			continue;
+		if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+		    (i >= 0x44) && (i <= 0x4e))
+			continue;
+
+		WREG_DAC(i, dacvalue[i]);
+	}
+
+	if (mdev->type == G200_ER)
+		WREG_DAC(0x90, 0);
+
+	if (option)
+		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+	if (option2)
+		pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+	WREG_SEQ(2, 0xf);
+	WREG_SEQ(3, 0);
+	WREG_SEQ(4, 0xe);
+
+	pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+	if (crtc->fb->bits_per_pixel == 24)
+		pitch = pitch >> (4 - bppshift);
+	else
+		pitch = pitch >> (4 - bppshift);
+
+	hdisplay = mode->hdisplay / 8 - 1;
+	hsyncstart = mode->hsync_start / 8 - 1;
+	hsyncend = mode->hsync_end / 8 - 1;
+	htotal = mode->htotal / 8 - 1;
+
+	/* Work around hardware quirk */
+	if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+		htotal++;
+
+	vdisplay = mode->vdisplay - 1;
+	vsyncstart = mode->vsync_start - 1;
+	vsyncend = mode->vsync_end - 1;
+	vtotal = mode->vtotal - 2;
+
+	WREG_GFX(0, 0);
+	WREG_GFX(1, 0);
+	WREG_GFX(2, 0);
+	WREG_GFX(3, 0);
+	WREG_GFX(4, 0);
+	WREG_GFX(5, 0x40);
+	WREG_GFX(6, 0x5);
+	WREG_GFX(7, 0xf);
+	WREG_GFX(8, 0xf);
+
+	WREG_CRT(0, htotal - 4);
+	WREG_CRT(1, hdisplay);
+	WREG_CRT(2, hdisplay);
+	WREG_CRT(3, (htotal & 0x1F) | 0x80);
+	WREG_CRT(4, hsyncstart);
+	WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+	WREG_CRT(6, vtotal & 0xFF);
+	WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+		 ((vdisplay & 0x100) >> 7) |
+		 ((vsyncstart & 0x100) >> 6) |
+		 ((vdisplay & 0x100) >> 5) |
+		 ((vdisplay & 0x100) >> 4) | /* linecomp */
+		 ((vtotal & 0x200) >> 4)|
+		 ((vdisplay & 0x200) >> 3) |
+		 ((vsyncstart & 0x200) >> 2));
+	WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+		 ((vdisplay & 0x200) >> 3));
+	WREG_CRT(10, 0);
+	WREG_CRT(11, 0);
+	WREG_CRT(12, 0);
+	WREG_CRT(13, 0);
+	WREG_CRT(14, 0);
+	WREG_CRT(15, 0);
+	WREG_CRT(16, vsyncstart & 0xFF);
+	WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+	WREG_CRT(18, vdisplay & 0xFF);
+	WREG_CRT(19, pitch & 0xFF);
+	WREG_CRT(20, 0);
+	WREG_CRT(21, vdisplay & 0xFF);
+	WREG_CRT(22, (vtotal + 1) & 0xFF);
+	WREG_CRT(23, 0xc3);
+	WREG_CRT(24, vdisplay & 0xFF);
+
+	ext_vga[0] = 0;
+	ext_vga[5] = 0;
+
+	/* TODO interlace */
+
+	ext_vga[0] |= (pitch & 0x300) >> 4;
+	ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+		((hdisplay & 0x100) >> 7) |
+		((hsyncstart & 0x100) >> 6) |
+		(htotal & 0x40);
+	ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+		((vdisplay & 0x400) >> 8) |
+		((vdisplay & 0xc00) >> 7) |
+		((vsyncstart & 0xc00) >> 5) |
+		((vdisplay & 0x400) >> 3);
+	if (crtc->fb->bits_per_pixel == 24)
+		ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+	else
+		ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+	ext_vga[4] = 0;
+	if (mdev->type == G200_WB)
+		ext_vga[1] |= 0x88;
+
+	/* Set pixel clocks */
+	misc = 0x2d;
+	WREG8(MGA_MISC_OUT, misc);
+
+	mga_crtc_set_plls(mdev, mode->clock);
+
+	for (i = 0; i < 6; i++) {
+		WREG_ECRT(i, ext_vga[i]);
+	}
+
+	if (mdev->type == G200_ER)
+		WREG_ECRT(0x24, 0x5);
+
+	if (mdev->type == G200_EV) {
+		WREG_ECRT(6, 0);
+	}
+
+	WREG_ECRT(0, ext_vga[0]);
+	/* Enable mga pixel clock */
+	misc = 0x2d;
+
+	WREG8(MGA_MISC_OUT, misc);
+
+	if (adjusted_mode)
+		memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+	mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+	/* reset tagfifo */
+	if (mdev->type == G200_ER) {
+		u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+		u8 seq1;
+
+		/* screen off */
+		WREG8(MGAREG_SEQ_INDEX, 0x01);
+		seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+		WREG8(MGAREG_SEQ_DATA, seq1);
+
+		WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+		udelay(1000);
+		WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+		WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+	}
+
+
+	if (IS_G200_SE(mdev)) {
+		if (mdev->unique_rev_id >= 0x02) {
+			u8 hi_pri_lvl;
+			u32 bpp;
+			u32 mb;
+
+			if (crtc->fb->bits_per_pixel > 16)
+				bpp = 32;
+			else if (crtc->fb->bits_per_pixel > 8)
+				bpp = 16;
+			else
+				bpp = 8;
+
+			mb = (mode->clock * bpp) / 1000;
+			if (mb > 3100)
+				hi_pri_lvl = 0;
+			else if (mb > 2600)
+				hi_pri_lvl = 1;
+			else if (mb > 1900)
+				hi_pri_lvl = 2;
+			else if (mb > 1160)
+				hi_pri_lvl = 3;
+			else if (mb > 440)
+				hi_pri_lvl = 4;
+			else
+				hi_pri_lvl = 5;
+
+			WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+			WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
+		} else {
+			WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+			if (mdev->unique_rev_id >= 0x01)
+				WREG8(MGAREG_CRTCEXT_DATA, 0x03);
+			else
+				WREG8(MGAREG_CRTCEXT_DATA, 0x04);
+		}
+	}
+	return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	int option;
+
+	if (mdev->suspended)
+		return 0;
+
+	WREG_SEQ(1, 0x20);
+	WREG_ECRT(1, 0x30);
+	/* Disable the pixel clock */
+	WREG_DAC(0x1a, 0x05);
+	/* Power down the DAC */
+	WREG_DAC(0x1e, 0x18);
+	/* Power down the pixel PLL */
+	WREG_DAC(0x1a, 0x0d);
+
+	/* Disable PLLs and clocks */
+	pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+	option &= ~(0x1F8024);
+	pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+	pci_set_power_state(pdev, PCI_D3hot);
+	pci_disable_device(pdev);
+
+	mdev->suspended = true;
+
+	return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct pci_dev *pdev = dev->pdev;
+	int option;
+
+	if (!mdev->suspended)
+		return 0;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_device(pdev);
+
+	/* Disable sysclk */
+	pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+	option &= ~(0x4);
+	pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+	mdev->suspended = false;
+
+	return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	u8 seq1 = 0, crtcext1 = 0;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		seq1 = 0;
+		crtcext1 = 0;
+		mga_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		seq1 = 0x20;
+		crtcext1 = 0x10;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		seq1 = 0x20;
+		crtcext1 = 0x20;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		seq1 = 0x20;
+		crtcext1 = 0x30;
+		break;
+	}
+
+#if 0
+	if (mode == DRM_MODE_DPMS_OFF) {
+		mga_suspend(crtc);
+	}
+#endif
+	WREG8(MGAREG_SEQ_INDEX, 0x01);
+	seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+	mga_wait_vsync(mdev);
+	mga_wait_busy(mdev);
+	WREG8(MGAREG_SEQ_DATA, seq1);
+	msleep(20);
+	WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+	crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+	WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+	if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+		mga_resume(crtc);
+		drm_helper_resume_force_mode(dev);
+	}
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	u8 tmp;
+
+	/*	mga_resume(crtc);*/
+
+	WREG8(MGAREG_CRTC_INDEX, 0x11);
+	tmp = RREG8(MGAREG_CRTC_DATA);
+	WREG_CRT(0x11, tmp | 0x80);
+
+	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+		WREG_SEQ(0, 1);
+		msleep(50);
+		WREG_SEQ(1, 0x20);
+		msleep(20);
+	} else {
+		WREG8(MGAREG_SEQ_INDEX, 0x1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+
+		/* start sync reset */
+		WREG_SEQ(0, 1);
+		WREG_SEQ(1, tmp | 0x20);
+	}
+
+	if (mdev->type == G200_WB)
+		mga_g200wb_prepare(crtc);
+
+	WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mga_device *mdev = dev->dev_private;
+	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+	u8 tmp;
+
+	if (mdev->type == G200_WB)
+		mga_g200wb_commit(crtc);
+
+	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+		msleep(50);
+		WREG_SEQ(1, 0x0);
+		msleep(20);
+		WREG_SEQ(0, 0x3);
+	} else {
+		WREG8(MGAREG_SEQ_INDEX, 0x1);
+		tmp = RREG8(MGAREG_SEQ_DATA);
+
+		tmp &= ~0x20;
+		WREG_SEQ(0x1, tmp);
+		WREG_SEQ(0, 3);
+	}
+	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+	int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+	int i;
+
+	for (i = start; i < end; i++) {
+		mga_crtc->lut_r[i] = red[i] >> 8;
+		mga_crtc->lut_g[i] = green[i] >> 8;
+		mga_crtc->lut_b[i] = blue[i] >> 8;
+	}
+	mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+	.gamma_set = mga_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+	.dpms = mga_crtc_dpms,
+	.mode_fixup = mga_crtc_mode_fixup,
+	.mode_set = mga_crtc_mode_set,
+	.mode_set_base = mga_crtc_mode_set_base,
+	.prepare = mga_crtc_prepare,
+	.commit = mga_crtc_commit,
+	.load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct mga_device *mdev)
+{
+	struct mga_crtc *mga_crtc;
+	int i;
+
+	mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+			      (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+			      GFP_KERNEL);
+
+	if (mga_crtc == NULL)
+		return;
+
+	drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+	mdev->mode_info.crtc = mga_crtc;
+
+	for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+		mga_crtc->lut_r[i] = i;
+		mga_crtc->lut_g[i] = i;
+		mga_crtc->lut_b[i] = i;
+	}
+
+	drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	mga_crtc->lut_r[regno] = red >> 8;
+	mga_crtc->lut_g[regno] = green >> 8;
+	mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+	*red = (u16)mga_crtc->lut_r[regno] << 8;
+	*green = (u16)mga_crtc->lut_g[regno] << 8;
+	*blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+	return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+	.dpms = mga_encoder_dpms,
+	.mode_fixup = mga_encoder_mode_fixup,
+	.mode_set = mga_encoder_mode_set,
+	.prepare = mga_encoder_prepare,
+	.commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+	.destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct mga_encoder *mga_encoder;
+
+	mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+	if (!mga_encoder)
+		return NULL;
+
+	encoder = &mga_encoder->base;
+	encoder->possible_crtcs = 0x1;
+
+	drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+			 DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+	return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+	struct mga_connector *mga_connector = to_mga_connector(connector);
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+	if (edid) {
+		drm_mode_connector_update_edid_property(connector, edid);
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+	return ret;
+}
+
+static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode,
+							int bits_per_pixel)
+{
+	uint32_t total_area, divisor;
+	int64_t active_area, pixels_per_second, bandwidth;
+	uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8;
+
+	divisor = 1024;
+
+	if (!mode->htotal || !mode->vtotal || !mode->clock)
+		return 0;
+
+	active_area = mode->hdisplay * mode->vdisplay;
+	total_area = mode->htotal * mode->vtotal;
+
+	pixels_per_second = active_area * mode->clock * 1000;
+	do_div(pixels_per_second, total_area);
+
+	bandwidth = pixels_per_second * bytes_per_pixel * 100;
+	do_div(bandwidth, divisor);
+
+	return (uint32_t)(bandwidth);
+}
+
+#define MODE_BANDWIDTH	MODE_BAD
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+	struct mga_fbdev *mfbdev = mdev->mfbdev;
+	struct drm_fb_helper *fb_helper = &mfbdev->helper;
+	struct drm_fb_helper_connector *fb_helper_conn = NULL;
+	int bpp = 32;
+	int i = 0;
+
+	if (IS_G200_SE(mdev)) {
+		if (mdev->unique_rev_id == 0x01) {
+			if (mode->hdisplay > 1600)
+				return MODE_VIRTUAL_X;
+			if (mode->vdisplay > 1200)
+				return MODE_VIRTUAL_Y;
+			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+				> (24400 * 1024))
+				return MODE_BANDWIDTH;
+		} else if (mdev->unique_rev_id >= 0x02) {
+			if (mode->hdisplay > 1920)
+				return MODE_VIRTUAL_X;
+			if (mode->vdisplay > 1200)
+				return MODE_VIRTUAL_Y;
+			if (mga_vga_calculate_mode_bandwidth(mode, bpp)
+				> (30100 * 1024))
+				return MODE_BANDWIDTH;
+		}
+	} else if (mdev->type == G200_WB) {
+		if (mode->hdisplay > 1280)
+			return MODE_VIRTUAL_X;
+		if (mode->vdisplay > 1024)
+			return MODE_VIRTUAL_Y;
+		if (mga_vga_calculate_mode_bandwidth(mode,
+			bpp > (31877 * 1024)))
+			return MODE_BANDWIDTH;
+	} else if (mdev->type == G200_EV &&
+		(mga_vga_calculate_mode_bandwidth(mode, bpp)
+			> (32700 * 1024))) {
+		return MODE_BANDWIDTH;
+	} else if (mdev->type == G200_EH &&
+		(mga_vga_calculate_mode_bandwidth(mode, bpp)
+			> (37500 * 1024))) {
+		return MODE_BANDWIDTH;
+	} else if (mdev->type == G200_ER &&
+		(mga_vga_calculate_mode_bandwidth(mode,
+			bpp) > (55000 * 1024))) {
+		return MODE_BANDWIDTH;
+	}
+
+	if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+	    mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+	    mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+	    mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+		return MODE_BAD;
+	}
+
+	/* Validate the mode input by the user */
+	for (i = 0; i < fb_helper->connector_count; i++) {
+		if (fb_helper->connector_info[i]->connector == connector) {
+			/* Found the helper for this connector */
+			fb_helper_conn = fb_helper->connector_info[i];
+			if (fb_helper_conn->cmdline_mode.specified) {
+				if (fb_helper_conn->cmdline_mode.bpp_specified) {
+					bpp = fb_helper_conn->cmdline_mode.bpp;
+				}
+			}
+		}
+	}
+
+	if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+		if (fb_helper_conn)
+			fb_helper_conn->cmdline_mode.specified = false;
+		return MODE_BAD;
+	}
+
+	return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+						  *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj =
+		    drm_mode_object_find(connector->dev, enc_id,
+					 DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+						   *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+	struct mga_connector *mga_connector = to_mga_connector(connector);
+	mgag200_i2c_destroy(mga_connector->i2c);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+	.get_modes = mga_vga_get_modes,
+	.mode_valid = mga_vga_mode_valid,
+	.best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = mga_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct mga_connector *mga_connector;
+
+	mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+	if (!mga_connector)
+		return NULL;
+
+	connector = &mga_connector->base;
+
+	drm_connector_init(dev, connector,
+			   &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+	drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+	mga_connector->i2c = mgag200_i2c_create(dev);
+	if (!mga_connector->i2c)
+		DRM_ERROR("failed to add ddc bus\n");
+
+	return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	mdev->mode_info.mode_config_initialized = true;
+
+	mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+	mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+	mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+	mga_crtc_init(mdev);
+
+	encoder = mga_encoder_init(mdev->dev);
+	if (!encoder) {
+		DRM_ERROR("mga_encoder_init failed\n");
+		return -1;
+	}
+
+	connector = mga_vga_init(mdev->dev);
+	if (!connector) {
+		DRM_ERROR("mga_vga_init failed\n");
+		return -1;
+	}
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	ret = mgag200_fbdev_init(mdev);
+	if (ret) {
+		DRM_ERROR("mga_fbdev_init failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_reg.h b/linux-imx/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 0000000..fb24d86
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ *		Dirk Hohndel
+ *			hohndel@XFree86.Org
+ *		David Dawes
+ *			dawes@XFree86.Org
+ * Contributors:
+ *		Guy DESBIEF, Aix-en-provence, France
+ *			g.desbief@aix.pacwan.net
+ *		MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define	MGAREG_DWGCTL		0x1c00
+#define	MGAREG_MACCESS		0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST		0x1c08
+#define	MGAREG_ZORG		0x1c0c
+
+#define	MGAREG_PAT0		0x1c10
+#define	MGAREG_PAT1		0x1c14
+#define	MGAREG_PLNWT		0x1c1c
+
+#define	MGAREG_BCOL		0x1c20
+#define	MGAREG_FCOL		0x1c24
+
+#define	MGAREG_SRC0		0x1c30
+#define	MGAREG_SRC1		0x1c34
+#define	MGAREG_SRC2		0x1c38
+#define	MGAREG_SRC3		0x1c3c
+
+#define	MGAREG_XYSTRT		0x1c40
+#define	MGAREG_XYEND		0x1c44
+
+#define	MGAREG_SHIFT		0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD		0x1c54
+#define	MGAREG_SGN		0x1c58
+#define	MGAREG_LEN		0x1c5c
+
+#define	MGAREG_AR0		0x1c60
+#define	MGAREG_AR1		0x1c64
+#define	MGAREG_AR2		0x1c68
+#define	MGAREG_AR3		0x1c6c
+#define	MGAREG_AR4		0x1c70
+#define	MGAREG_AR5		0x1c74
+#define	MGAREG_AR6		0x1c78
+
+#define	MGAREG_CXBNDRY		0x1c80
+#define	MGAREG_FXBNDRY		0x1c84
+#define	MGAREG_YDSTLEN		0x1c88
+#define	MGAREG_PITCH		0x1c8c
+
+#define	MGAREG_YDST		0x1c90
+#define	MGAREG_YDSTORG		0x1c94
+#define	MGAREG_YTOP		0x1c98
+#define	MGAREG_YBOT		0x1c9c
+
+#define	MGAREG_CXLEFT		0x1ca0
+#define	MGAREG_CXRIGHT		0x1ca4
+#define	MGAREG_FXLEFT		0x1ca8
+#define	MGAREG_FXRIGHT		0x1cac
+
+#define	MGAREG_XDST		0x1cb0
+
+#define	MGAREG_DR0		0x1cc0
+#define	MGAREG_DR1		0x1cc4
+#define	MGAREG_DR2		0x1cc8
+#define	MGAREG_DR3		0x1ccc
+
+#define	MGAREG_DR4		0x1cd0
+#define	MGAREG_DR5		0x1cd4
+#define	MGAREG_DR6		0x1cd8
+#define	MGAREG_DR7		0x1cdc
+
+#define	MGAREG_DR8		0x1ce0
+#define	MGAREG_DR9		0x1ce4
+#define	MGAREG_DR10		0x1ce8
+#define	MGAREG_DR11		0x1cec
+
+#define	MGAREG_DR12		0x1cf0
+#define	MGAREG_DR13		0x1cf4
+#define	MGAREG_DR14		0x1cf8
+#define	MGAREG_DR15		0x1cfc
+
+#define MGAREG_SRCORG		0x2cb4
+#define MGAREG_DSTORG		0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+   the drawing engine */
+
+#define MGAREG_EXEC		0x0100
+
+#define	MGAREG_FIFOSTATUS	0x1e10
+#define	MGAREG_Status		0x1e14
+#define MGAREG_CACHEFLUSH       0x1fff
+#define	MGAREG_ICLEAR		0x1e18
+#define	MGAREG_IEN		0x1e1c
+
+#define	MGAREG_VCOUNT		0x1e20
+
+#define	MGAREG_Reset		0x1e40
+
+#define	MGAREG_OPMODE		0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR           0x1dc0
+#define MGAREG_WIADDR2          0x1dd8
+#define MGAREG_WGETMSB          0x1dc8
+#define MGAREG_WVRTXSZ          0x1dcc
+#define MGAREG_WACCEPTSEQ       0x1dd4
+#define MGAREG_WMISC            0x1e70
+
+#define MGAREG_MEMCTL           0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL	(0x00 << 2)
+#define MGAOPM_DMA_BLIT		(0x01 << 2)
+#define MGAOPM_DMA_VECTOR	(0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8               0x00
+#define MGAMAC_PW16              0x01
+#define MGAMAC_PW24              0x03 /* not a typo */
+#define MGAMAC_PW32              0x02 /* not a typo */
+#define MGAMAC_BYPASS332         0x10000000
+#define MGAMAC_NODITHER          0x40000000
+#define MGAMAC_DIT555            0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN	0x00
+#define MGADWG_AUTOLINE_OPEN	0x01
+#define MGADWG_LINE_CLOSE	0x02
+#define MGADWG_AUTOLINE_CLOSE	0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP		0x04
+#define MGADWG_TEXTURE_TRAP	0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT		0x08
+#define MGADWG_FBITBLT		0x0c
+#define MGADWG_ILOAD		0x09
+#define MGADWG_ILOAD_SCALE	0x0d
+#define MGADWG_ILOAD_FILTER	0x0f
+#define MGADWG_ILOAD_HIQH	0x07
+#define MGADWG_ILOAD_HIQHV	0x0e
+#define MGADWG_IDUMP		0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL		( 0x00 << 4 )
+#define MGADWG_RSTR		( 0x01 << 4 )
+#define MGADWG_ZI		( 0x03 << 4 )
+#define MGADWG_BLK 		( 0x04 << 4 )
+#define MGADWG_I		( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR		( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP		( 0x00 << 8 )
+#define MGADWG_ZE		( 0x02 << 8 )
+#define MGADWG_ZNE		( 0x03 << 8 )
+#define MGADWG_ZLT		( 0x04 << 8 )
+#define MGADWG_ZLTE		( 0x05 << 8 )
+#define MGADWG_GT		( 0x06 << 8 )
+#define MGADWG_GTE		( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID		( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO		( 0x01 << 12 )
+
+#define MGADWG_SGNZERO		( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO	( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF		( 0x00 << 25 )
+#define MGADWG_BMONOWF		( 0x04 << 25 )
+#define MGADWG_BPLAN		( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+   a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL   		( 0x02 << 25 )
+#define MGADWG_BUYUV		( 0x0e << 25 )
+#define MGADWG_BU32BGR		( 0x03 << 25 )
+#define MGADWG_BU32RGB		( 0x07 << 25 )
+#define MGADWG_BU24BGR		( 0x0b << 25 )
+#define MGADWG_BU24RGB		( 0x0f << 25 )
+
+#define MGADWG_PATTERN		( 0x01 << 29 )
+#define MGADWG_TRANSC		( 0x01 << 30 )
+#define MGAREG_MISC_WRITE	0x3c2
+#define MGAREG_MISC_READ	0x3cc
+#define MGAREG_MEM_MISC_WRITE       0x1fc2
+#define MGAREG_MEM_MISC_READ        0x1fcc
+
+#define MGAREG_MISC_IOADSEL	(0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN	(0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25	(0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28	(0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX	(0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK	(0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS	(0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL	(0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX	0x1fc4
+#define MGAREG_SEQ_DATA		0x1fc5
+#define MGAREG_CRTC_INDEX	0x1fd4
+#define MGAREG_CRTC_DATA	0x1fd5
+#define MGAREG_CRTCEXT_INDEX	0x1fde
+#define MGAREG_CRTCEXT_DATA	0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI   		( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL   		( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT   		( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK   		( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS   		( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1   		( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1   		( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN   		( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION   		( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX		0x44
+#define PCI_MGA_DATA		0x48
+#define PCI_MGA_OPTION		0x40
+#define PCI_MGA_OPTION2		0x50
+#define PCI_MGA_OPTION3		0x54
+
+#define RAMDAC_OFFSET		0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX		0x00
+#define TVP3026_WADR_PAL	0x00
+#define TVP3026_COL_PAL		0x01
+#define TVP3026_PIX_RD_MSK	0x02
+#define TVP3026_RADR_PAL	0x03
+#define TVP3026_CUR_COL_ADDR	0x04
+#define TVP3026_CUR_COL_DATA	0x05
+#define TVP3026_DATA		0x0a
+#define TVP3026_CUR_RAM		0x0b
+#define TVP3026_CUR_XLOW	0x0c
+#define TVP3026_CUR_XHI		0x0d
+#define TVP3026_CUR_YLOW	0x0e
+#define TVP3026_CUR_YHI		0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV	0x01
+#define TVP3026_CURSOR_CTL	0x06
+#define TVP3026_LATCH_CTL	0x0f
+#define TVP3026_TRUE_COLOR_CTL	0x18
+#define TVP3026_MUX_CTL		0x19
+#define TVP3026_CLK_SEL		0x1a
+#define TVP3026_PAL_PAGE	0x1c
+#define TVP3026_GEN_CTL		0x1d
+#define TVP3026_MISC_CTL	0x1e
+#define TVP3026_GEN_IO_CTL	0x2a
+#define TVP3026_GEN_IO_DATA	0x2b
+#define TVP3026_PLL_ADDR	0x2c
+#define TVP3026_PIX_CLK_DATA	0x2d
+#define TVP3026_MEM_CLK_DATA	0x2e
+#define TVP3026_LOAD_CLK_DATA	0x2f
+#define TVP3026_KEY_RED_LOW	0x32
+#define TVP3026_KEY_RED_HI	0x33
+#define TVP3026_KEY_GREEN_LOW	0x34
+#define TVP3026_KEY_GREEN_HI	0x35
+#define TVP3026_KEY_BLUE_LOW	0x36
+#define TVP3026_KEY_BLUE_HI	0x37
+#define TVP3026_KEY_CTL		0x38
+#define TVP3026_MCLK_CTL	0x39
+#define TVP3026_SENSE_TEST	0x3a
+#define TVP3026_TEST_DATA	0x3b
+#define TVP3026_CRC_LSB		0x3c
+#define TVP3026_CRC_MSB		0x3d
+#define TVP3026_CRC_CTL		0x3e
+#define TVP3026_ID		0x3f
+#define TVP3026_RESET		0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX		0x00
+#define MGA1064_WADR_PAL	0x00
+#define MGA1064_SPAREREG        0x00
+#define MGA1064_COL_PAL		0x01
+#define MGA1064_PIX_RD_MSK	0x02
+#define MGA1064_RADR_PAL	0x03
+#define MGA1064_DATA		0x0a
+
+#define MGA1064_CUR_XLOW	0x0c
+#define MGA1064_CUR_XHI		0x0d
+#define MGA1064_CUR_YLOW	0x0e
+#define MGA1064_CUR_YHI		0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL    0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW	0x04
+#define MGA1064_CURSOR_BASE_ADR_HI	0x05
+#define MGA1064_CURSOR_CTL	0x06
+#define MGA1064_CURSOR_COL0_RED	0x08
+#define MGA1064_CURSOR_COL0_GREEN	0x09
+#define MGA1064_CURSOR_COL0_BLUE	0x0a
+
+#define MGA1064_CURSOR_COL1_RED	0x0c
+#define MGA1064_CURSOR_COL1_GREEN	0x0d
+#define MGA1064_CURSOR_COL1_BLUE	0x0e
+
+#define MGA1064_CURSOR_COL2_RED	0x010
+#define MGA1064_CURSOR_COL2_GREEN	0x011
+#define MGA1064_CURSOR_COL2_BLUE	0x012
+
+#define MGA1064_VREF_CTL	0x018
+
+#define MGA1064_MUL_CTL		0x19
+#define MGA1064_MUL_CTL_8bits		0x0
+#define MGA1064_MUL_CTL_15bits		0x01
+#define MGA1064_MUL_CTL_16bits		0x02
+#define MGA1064_MUL_CTL_24bits		0x03
+#define MGA1064_MUL_CTL_32bits		0x04
+#define MGA1064_MUL_CTL_2G8V16bits		0x05
+#define MGA1064_MUL_CTL_G16V16bits		0x06
+#define MGA1064_MUL_CTL_32_24bits		0x07
+
+#define MGA1064_PIX_CLK_CTL		0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS		( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN	( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI		( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL		( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT		( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK		( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL		0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS      (0x01 << 5)
+#define MGA1064_MISC_CTL	0x1e
+#define MGA1064_MISC_CTL_DAC_EN                ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA   		( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON   		( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC   		( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8   		( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS   		( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2	0x29
+#define MGA1064_GEN_IO_CTL	0x2a
+#define MGA1064_GEN_IO_DATA	0x2b
+#define MGA1064_SYS_PLL_M	0x2c
+#define MGA1064_SYS_PLL_N	0x2d
+#define MGA1064_SYS_PLL_P	0x2e
+#define MGA1064_SYS_PLL_STAT	0x2f
+
+#define MGA1064_REMHEADCTL     0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2     0x31
+
+#define MGA1064_ZOOM_CTL	0x38
+#define MGA1064_SENSE_TST	0x3a
+
+#define MGA1064_CRC_LSB		0x3c
+#define MGA1064_CRC_MSB		0x3d
+#define MGA1064_CRC_CTL		0x3e
+#define MGA1064_COL_KEY_MSK_LSB		0x40
+#define MGA1064_COL_KEY_MSK_MSB		0x41
+#define MGA1064_COL_KEY_LSB		0x42
+#define MGA1064_COL_KEY_MSB		0x43
+#define MGA1064_PIX_PLLA_M	0x44
+#define MGA1064_PIX_PLLA_N	0x45
+#define MGA1064_PIX_PLLA_P	0x46
+#define MGA1064_PIX_PLLB_M	0x48
+#define MGA1064_PIX_PLLB_N	0x49
+#define MGA1064_PIX_PLLB_P	0x4a
+#define MGA1064_PIX_PLLC_M	0x4c
+#define MGA1064_PIX_PLLC_N	0x4d
+#define MGA1064_PIX_PLLC_P	0x4e
+
+#define MGA1064_PIX_PLL_STAT	0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT    0x8c
+#define MGA1064_VID_PLL_P       0x8D
+#define MGA1064_VID_PLL_M       0x8E
+#define MGA1064_VID_PLL_N       0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M	0xb7
+#define MGA1064_WB_PIX_PLLC_N	0xb6
+#define MGA1064_WB_PIX_PLLC_P	0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M	0xb6
+#define MGA1064_EV_PIX_PLLC_N	0xb7
+#define MGA1064_EV_PIX_PLLC_P	0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M   0xb6
+#define MGA1064_EH_PIX_PLLC_N   0xb7
+#define MGA1064_EH_PIX_PLLC_P   0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M	0xb7
+#define MGA1064_ER_PIX_PLLC_N	0xb6
+#define MGA1064_ER_PIX_PLLC_P	0xb8
+
+#define MGA1064_DISP_CTL        0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK       0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS        0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN         0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK       (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS        0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1      (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2      (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE        (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK        (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS         0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1       (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB    (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656    (0x03 << 5)
+
+#define MGA1064_SYNC_CTL        0x8b
+
+#define MGA1064_PWR_CTL         0xa0
+#define MGA1064_PWR_CTL_DAC2_EN                (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN             (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN               (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN               (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN               (0x01 << 4)
+
+#define MGA1064_PAN_CTL         0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL            0x10
+#define MGAREG2_C2HPARAM         0x14
+#define MGAREG2_C2HSYNC          0x18
+#define MGAREG2_C2VPARAM         0x1c
+#define MGAREG2_C2VSYNC          0x20
+#define MGAREG2_C2STARTADD0      0x28
+
+#define MGAREG2_C2OFFSET         0x40
+#define MGAREG2_C2DATACTL        0x4c
+
+#define MGAREG_C2CTL            0x3c10
+#define MGAREG_C2CTL_C2_EN                     0x01
+
+#define MGAREG_C2_HIPRILVL_M                   (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M                   (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK            (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK           (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK          0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK          (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL        (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL        (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK           (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL         (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL       (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK            (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE         (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK           (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1          0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2          (0x01 << 20)
+
+#define MGAREG_C2HPARAM         0x3c14
+#define MGAREG_C2HSYNC          0x3c18
+#define MGAREG_C2VPARAM         0x3c1c
+#define MGAREG_C2VSYNC          0x3c20
+#define MGAREG_C2STARTADD0      0x3c28
+
+#define MGAREG_C2OFFSET         0x3c40
+#define MGAREG_C2DATACTL        0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG	0x3d60
+#define MGAREG_BESA1CORG	0x3d10
+#define MGAREG_BESA1ORG		0x3d00
+#define MGAREG_BESCTL		0x3d20
+#define MGAREG_BESGLOBCTL	0x3dc0
+#define MGAREG_BESHCOORD	0x3d28
+#define MGAREG_BESHISCAL	0x3d30
+#define MGAREG_BESHSRCEND	0x3d3c
+#define MGAREG_BESHSRCLST	0x3d50
+#define MGAREG_BESHSRCST	0x3d38
+#define MGAREG_BESLUMACTL	0x3d40
+#define MGAREG_BESPITCH		0x3d24
+#define MGAREG_BESV1SRCLST	0x3d54
+#define MGAREG_BESV1WGHT	0x3d48
+#define MGAREG_BESVCOORD	0x3d2c
+#define MGAREG_BESVISCAL	0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0		0x2c00
+#define MGAREG_TMR1		0x2c04
+#define MGAREG_TMR2		0x2c08
+#define MGAREG_TMR3		0x2c0c
+#define MGAREG_TMR4		0x2c10
+#define MGAREG_TMR5		0x2c14
+#define MGAREG_TMR6		0x2c18
+#define MGAREG_TMR7		0x2c1c
+#define MGAREG_TMR8		0x2c20
+#define MGAREG_TEXORG		0x2c24
+#define MGAREG_TEXWIDTH		0x2c28
+#define MGAREG_TEXHEIGHT	0x2c2c
+#define MGAREG_TEXCTL		0x2c30
+#    define MGA_TW4                             (0x00000000)
+#    define MGA_TW8                             (0x00000001)
+#    define MGA_TW15                            (0x00000002)
+#    define MGA_TW16                            (0x00000003)
+#    define MGA_TW12                            (0x00000004)
+#    define MGA_TW32                            (0x00000006)
+#    define MGA_TW8A                            (0x00000007)
+#    define MGA_TW8AL                           (0x00000008)
+#    define MGA_TW422                           (0x0000000A)
+#    define MGA_TW422UYVY                       (0x0000000B)
+#    define MGA_PITCHLIN                        (0x00000100)
+#    define MGA_NOPERSPECTIVE                   (0x00200000)
+#    define MGA_TAKEY                           (0x02000000)
+#    define MGA_TAMASK                          (0x04000000)
+#    define MGA_CLAMPUV                         (0x18000000)
+#    define MGA_TEXMODULATE                     (0x20000000)
+#define MGAREG_TEXCTL2		0x2c3c
+#    define MGA_G400_TC2_MAGIC                  (0x00008000)
+#    define MGA_TC2_DECALBLEND                  (0x00000001)
+#    define MGA_TC2_IDECAL                      (0x00000002)
+#    define MGA_TC2_DECALDIS                    (0x00000004)
+#    define MGA_TC2_CKSTRANSDIS                 (0x00000010)
+#    define MGA_TC2_BORDEREN                    (0x00000020)
+#    define MGA_TC2_SPECEN                      (0x00000040)
+#    define MGA_TC2_DUALTEX                     (0x00000080)
+#    define MGA_TC2_TABLEFOG                    (0x00000100)
+#    define MGA_TC2_BUMPMAP                     (0x00000200)
+#    define MGA_TC2_SELECT_TMU1                 (0x80000000)
+#define MGAREG_TEXTRANS		0x2c34
+#define MGAREG_TEXTRANSHIGH	0x2c38
+#define MGAREG_TEXFILTER	0x2c58
+#    define MGA_MIN_NRST                        (0x00000000)
+#    define MGA_MIN_BILIN                       (0x00000002)
+#    define MGA_MIN_ANISO                       (0x0000000D)
+#    define MGA_MAG_NRST                        (0x00000000)
+#    define MGA_MAG_BILIN                       (0x00000020)
+#    define MGA_FILTERALPHA                     (0x00100000)
+#define MGAREG_ALPHASTART	0x2c70
+#define MGAREG_ALPHAXINC	0x2c74
+#define MGAREG_ALPHAYINC	0x2c78
+#define MGAREG_ALPHACTRL	0x2c7c
+#    define MGA_SRC_ZERO                        (0x00000000)
+#    define MGA_SRC_ONE                         (0x00000001)
+#    define MGA_SRC_DST_COLOR                   (0x00000002)
+#    define MGA_SRC_ONE_MINUS_DST_COLOR         (0x00000003)
+#    define MGA_SRC_ALPHA                       (0x00000004)
+#    define MGA_SRC_ONE_MINUS_SRC_ALPHA         (0x00000005)
+#    define MGA_SRC_DST_ALPHA                   (0x00000006)
+#    define MGA_SRC_ONE_MINUS_DST_ALPHA         (0x00000007)
+#    define MGA_SRC_SRC_ALPHA_SATURATE          (0x00000008)
+#    define MGA_SRC_BLEND_MASK                  (0x0000000f)
+#    define MGA_DST_ZERO                        (0x00000000)
+#    define MGA_DST_ONE                         (0x00000010)
+#    define MGA_DST_SRC_COLOR                   (0x00000020)
+#    define MGA_DST_ONE_MINUS_SRC_COLOR         (0x00000030)
+#    define MGA_DST_SRC_ALPHA                   (0x00000040)
+#    define MGA_DST_ONE_MINUS_SRC_ALPHA         (0x00000050)
+#    define MGA_DST_DST_ALPHA                   (0x00000060)
+#    define MGA_DST_ONE_MINUS_DST_ALPHA         (0x00000070)
+#    define MGA_DST_BLEND_MASK                  (0x00000070)
+#    define MGA_ALPHACHANNEL                    (0x00000100)
+#    define MGA_VIDEOALPHA                      (0x00000200)
+#    define MGA_DIFFUSEDALPHA                   (0x01000000)
+#    define MGA_MODULATEDALPHA                  (0x02000000)
+#define MGAREG_TDUALSTAGE0                      (0x2CF8)
+#define MGAREG_TDUALSTAGE1                      (0x2CFC)
+#    define MGA_TDS_COLOR_ARG2_DIFFUSE          (0x00000000)
+#    define MGA_TDS_COLOR_ARG2_SPECULAR         (0x00000001)
+#    define MGA_TDS_COLOR_ARG2_FCOL             (0x00000002)
+#    define MGA_TDS_COLOR_ARG2_PREVSTAGE        (0x00000003)
+#    define MGA_TDS_COLOR_ALPHA_DIFFUSE         (0x00000000)
+#    define MGA_TDS_COLOR_ALPHA_FCOL            (0x00000004)
+#    define MGA_TDS_COLOR_ALPHA_CURRTEX         (0x00000008)
+#    define MGA_TDS_COLOR_ALPHA_PREVTEX         (0x0000000c)
+#    define MGA_TDS_COLOR_ALPHA_PREVSTAGE       (0x00000010)
+#    define MGA_TDS_COLOR_ARG1_REPLICATEALPHA   (0x00000020)
+#    define MGA_TDS_COLOR_ARG1_INV              (0x00000040)
+#    define MGA_TDS_COLOR_ARG2_REPLICATEALPHA   (0x00000080)
+#    define MGA_TDS_COLOR_ARG2_INV              (0x00000100)
+#    define MGA_TDS_COLOR_ALPHA1INV             (0x00000200)
+#    define MGA_TDS_COLOR_ALPHA2INV             (0x00000400)
+#    define MGA_TDS_COLOR_ARG1MUL_ALPHA1        (0x00000800)
+#    define MGA_TDS_COLOR_ARG2MUL_ALPHA2        (0x00001000)
+#    define MGA_TDS_COLOR_ARG1ADD_MULOUT        (0x00002000)
+#    define MGA_TDS_COLOR_ARG2ADD_MULOUT        (0x00004000)
+#    define MGA_TDS_COLOR_MODBRIGHT_2X          (0x00008000)
+#    define MGA_TDS_COLOR_MODBRIGHT_4X          (0x00010000)
+#    define MGA_TDS_COLOR_ADD_SUB               (0x00000000)
+#    define MGA_TDS_COLOR_ADD_ADD               (0x00020000)
+#    define MGA_TDS_COLOR_ADD2X                 (0x00040000)
+#    define MGA_TDS_COLOR_ADDBIAS               (0x00080000)
+#    define MGA_TDS_COLOR_BLEND                 (0x00100000)
+#    define MGA_TDS_COLOR_SEL_ARG1              (0x00000000)
+#    define MGA_TDS_COLOR_SEL_ARG2              (0x00200000)
+#    define MGA_TDS_COLOR_SEL_ADD               (0x00400000)
+#    define MGA_TDS_COLOR_SEL_MUL               (0x00600000)
+#    define MGA_TDS_ALPHA_ARG1_INV              (0x00800000)
+#    define MGA_TDS_ALPHA_ARG2_DIFFUSE          (0x00000000)
+#    define MGA_TDS_ALPHA_ARG2_FCOL             (0x01000000)
+#    define MGA_TDS_ALPHA_ARG2_PREVTEX          (0x02000000)
+#    define MGA_TDS_ALPHA_ARG2_PREVSTAGE        (0x03000000)
+#    define MGA_TDS_ALPHA_ARG2_INV              (0x04000000)
+#    define MGA_TDS_ALPHA_ADD                   (0x08000000)
+#    define MGA_TDS_ALPHA_ADDBIAS               (0x10000000)
+#    define MGA_TDS_ALPHA_ADD2X                 (0x20000000)
+#    define MGA_TDS_ALPHA_SEL_ARG1              (0x00000000)
+#    define MGA_TDS_ALPHA_SEL_ARG2              (0x40000000)
+#    define MGA_TDS_ALPHA_SEL_ADD               (0x80000000)
+#    define MGA_TDS_ALPHA_SEL_MUL               (0xc0000000)
+
+#define MGAREG_DWGSYNC		0x2c4c
+
+#define MGAREG_AGP_PLL		0x1e4c
+#define MGA_AGP2XPLL_ENABLE		0x1
+#define MGA_AGP2XPLL_DISABLE		0x0
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/mgag200/mgag200_ttm.c b/linux-imx/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 0000000..d2cb32f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	global_ref = &ast->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &mgag200_ttm_mem_global_init;
+	global_ref->release = &mgag200_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	ast->ttm.bo_global_ref.mem_glob =
+		ast->ttm.mem_global_ref.object;
+	global_ref = &ast->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&ast->ttm.mem_global_ref);
+		return r;
+	}
+	return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+	if (ast->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&ast->ttm.mem_global_ref);
+	ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+	struct mgag200_bo *bo;
+
+	bo = container_of(tbo, struct mgag200_bo, bo);
+
+	drm_gem_object_release(&bo->gem);
+	kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &mgag200_bo_ttm_destroy)
+		return true;
+	return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		     struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+			TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+	if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+		return;
+
+	mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+	*pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct mga_device *mdev = mgag200_bdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	int r;
+	r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+	ttm_tt_fini(tt);
+	kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+	.destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_tt *tt;
+
+	tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+	if (tt == NULL)
+		return NULL;
+	tt->func = &mgag200_tt_backend_func;
+	if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+		kfree(tt);
+		return NULL;
+	}
+	return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+	.ttm_tt_create = mgag200_ttm_tt_create,
+	.ttm_tt_populate = mgag200_ttm_tt_populate,
+	.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+	.init_mem_type = mgag200_bo_init_mem_type,
+	.evict_flags = mgag200_bo_evict_flags,
+	.move = mgag200_bo_move,
+	.verify_access = mgag200_bo_verify_access,
+	.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+	.io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+	int ret;
+	struct drm_device *dev = mdev->dev;
+	struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+	ret = mgag200_ttm_global_init(mdev);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&mdev->ttm.bdev,
+				 mdev->ttm.bo_global_ref.ref.object,
+				 &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+				 true);
+	if (ret) {
+		DRM_ERROR("Error initialising bo driver; %d\n", ret);
+		return ret;
+	}
+
+	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+	if (ret) {
+		DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+		return ret;
+	}
+
+	mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+				    pci_resource_len(dev->pdev, 0),
+				    DRM_MTRR_WC);
+
+	return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+	struct drm_device *dev = mdev->dev;
+	ttm_bo_device_release(&mdev->ttm.bdev);
+
+	mgag200_ttm_global_release(mdev);
+
+	if (mdev->fb_mtrr >= 0) {
+		drm_mtrr_del(mdev->fb_mtrr,
+			     pci_resource_start(dev->pdev, 0),
+			     pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+		mdev->fb_mtrr = -1;
+	}
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+	u32 c = 0;
+	bo->placement.fpfn = 0;
+	bo->placement.lpfn = 0;
+	bo->placement.placement = bo->placements;
+	bo->placement.busy_placement = bo->placements;
+	if (domain & TTM_PL_FLAG_VRAM)
+		bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+	if (domain & TTM_PL_FLAG_SYSTEM)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	bo->placement.num_placement = c;
+	bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+	if (ret) {
+		if (ret != -ERESTARTSYS && ret != -EBUSY)
+			DRM_ERROR("reserve failed %p %d\n", bo, ret);
+		return ret;
+	}
+	return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+	ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+		  uint32_t flags, struct mgag200_bo **pmgabo)
+{
+	struct mga_device *mdev = dev->dev_private;
+	struct mgag200_bo *mgabo;
+	size_t acc_size;
+	int ret;
+
+	mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+	if (!mgabo)
+		return -ENOMEM;
+
+	ret = drm_gem_object_init(dev, &mgabo->gem, size);
+	if (ret) {
+		kfree(mgabo);
+		return ret;
+	}
+
+	mgabo->gem.driver_private = NULL;
+	mgabo->bo.bdev = &mdev->ttm.bdev;
+	mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
+
+	mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+	acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+				       sizeof(struct mgag200_bo));
+
+	ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+			  ttm_bo_type_device, &mgabo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size,
+			  NULL, mgag200_bo_ttm_destroy);
+	if (ret)
+		return ret;
+
+	*pmgabo = mgabo;
+	return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+	return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+	int i, ret;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = mgag200_bo_gpu_offset(bo);
+	}
+
+	mgag200_ttm_placement(bo, pl_flag);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	bo->pin_count = 1;
+	if (gpu_addr)
+		*gpu_addr = mgag200_bo_gpu_offset(bo);
+	return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+	int i, ret;
+	if (!bo->pin_count) {
+		DRM_ERROR("unpin bad %p\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+
+	if (bo->kmap.virtual)
+		ttm_bo_kunmap(&bo->kmap);
+
+	mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+	for (i = 0; i < bo->placement.num_placement ; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	if (ret) {
+		DRM_ERROR("pushing to VRAM failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct mga_device *mdev;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	file_priv = filp->private_data;
+	mdev = file_priv->minor->dev->dev_private;
+	return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/Kconfig b/linux-imx/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 0000000..a7ff6d5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,55 @@
+config DRM_NOUVEAU
+	tristate "Nouveau (nVidia) cards"
+	depends on DRM && PCI
+        select FW_LOADER
+	select DRM_KMS_HELPER
+	select DRM_TTM
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select FB
+	select FRAMEBUFFER_CONSOLE if !EXPERT
+	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+	select X86_PLATFORM_DEVICES if ACPI && X86
+	select ACPI_WMI if ACPI && X86
+	select MXM_WMI if ACPI && X86
+	select POWER_SUPPLY
+	help
+	  Choose this option for open-source nVidia support.
+
+config NOUVEAU_DEBUG
+	int "Maximum debug level"
+	depends on DRM_NOUVEAU
+	range 0 7
+	default 5
+	help
+	  Selects the maximum debug level to compile support for.
+
+	  0 - fatal
+	  1 - error
+	  2 - warning
+	  3 - info
+	  4 - debug
+	  5 - trace (recommended)
+	  6 - paranoia
+	  7 - spam
+
+	  The paranoia and spam levels will add a lot of extra checks which
+	  may potentially slow down driver operation.
+
+config NOUVEAU_DEBUG_DEFAULT
+	int "Default debug level"
+	depends on DRM_NOUVEAU
+	range 0 7
+	default 3
+	help
+	  Selects the default debug level
+
+config DRM_NOUVEAU_BACKLIGHT
+	bool "Support for backlight control"
+	depends on DRM_NOUVEAU
+	default y
+	help
+	  Say Y here if you want to control the backlight of your display
+	  (e.g. a laptop panel).
diff --git a/linux-imx/drivers/gpu/drm/nouveau/Makefile b/linux-imx/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 0000000..998e8b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,247 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)/core
+ccflags-y += -I$(src)
+
+nouveau-y := core/core/client.o
+nouveau-y += core/core/engctx.o
+nouveau-y += core/core/engine.o
+nouveau-y += core/core/enum.o
+nouveau-y += core/core/event.o
+nouveau-y += core/core/falcon.o
+nouveau-y += core/core/gpuobj.o
+nouveau-y += core/core/handle.o
+nouveau-y += core/core/mm.o
+nouveau-y += core/core/namedb.o
+nouveau-y += core/core/object.o
+nouveau-y += core/core/option.o
+nouveau-y += core/core/parent.o
+nouveau-y += core/core/printk.o
+nouveau-y += core/core/ramht.o
+nouveau-y += core/core/subdev.o
+
+nouveau-y += core/subdev/bar/base.o
+nouveau-y += core/subdev/bar/nv50.o
+nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bios/base.o
+nouveau-y += core/subdev/bios/bit.o
+nouveau-y += core/subdev/bios/conn.o
+nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
+nouveau-y += core/subdev/bios/dp.o
+nouveau-y += core/subdev/bios/extdev.o
+nouveau-y += core/subdev/bios/gpio.o
+nouveau-y += core/subdev/bios/i2c.o
+nouveau-y += core/subdev/bios/init.o
+nouveau-y += core/subdev/bios/mxm.o
+nouveau-y += core/subdev/bios/perf.o
+nouveau-y += core/subdev/bios/pll.o
+nouveau-y += core/subdev/bios/therm.o
+nouveau-y += core/subdev/bios/xpio.o
+nouveau-y += core/subdev/bus/nv04.o
+nouveau-y += core/subdev/bus/nv31.o
+nouveau-y += core/subdev/bus/nv50.o
+nouveau-y += core/subdev/bus/nvc0.o
+nouveau-y += core/subdev/clock/nv04.o
+nouveau-y += core/subdev/clock/nv40.o
+nouveau-y += core/subdev/clock/nv50.o
+nouveau-y += core/subdev/clock/nva3.o
+nouveau-y += core/subdev/clock/nvc0.o
+nouveau-y += core/subdev/clock/pllnv04.o
+nouveau-y += core/subdev/clock/pllnva3.o
+nouveau-y += core/subdev/devinit/base.o
+nouveau-y += core/subdev/devinit/nv04.o
+nouveau-y += core/subdev/devinit/nv05.o
+nouveau-y += core/subdev/devinit/nv10.o
+nouveau-y += core/subdev/devinit/nv1a.o
+nouveau-y += core/subdev/devinit/nv20.o
+nouveau-y += core/subdev/devinit/nv50.o
+nouveau-y += core/subdev/fb/base.o
+nouveau-y += core/subdev/fb/nv04.o
+nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
+nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
+nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
+nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
+nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/gpio/base.o
+nouveau-y += core/subdev/gpio/nv10.o
+nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/gpio/nvd0.o
+nouveau-y += core/subdev/gpio/nve0.o
+nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/i2c/anx9805.o
+nouveau-y += core/subdev/i2c/aux.o
+nouveau-y += core/subdev/i2c/bit.o
+nouveau-y += core/subdev/i2c/nv04.o
+nouveau-y += core/subdev/i2c/nv4e.o
+nouveau-y += core/subdev/i2c/nv50.o
+nouveau-y += core/subdev/i2c/nv94.o
+nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/ibus/nvc0.o
+nouveau-y += core/subdev/ibus/nve0.o
+nouveau-y += core/subdev/instmem/base.o
+nouveau-y += core/subdev/instmem/nv04.o
+nouveau-y += core/subdev/instmem/nv40.o
+nouveau-y += core/subdev/instmem/nv50.o
+nouveau-y += core/subdev/ltcg/nvc0.o
+nouveau-y += core/subdev/mc/base.o
+nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/mc/nv98.o
+nouveau-y += core/subdev/mc/nvc0.o
+nouveau-y += core/subdev/mxm/base.o
+nouveau-y += core/subdev/mxm/mxms.o
+nouveau-y += core/subdev/mxm/nv50.o
+nouveau-y += core/subdev/therm/base.o
+nouveau-y += core/subdev/therm/fan.o
+nouveau-y += core/subdev/therm/fannil.o
+nouveau-y += core/subdev/therm/fanpwm.o
+nouveau-y += core/subdev/therm/fantog.o
+nouveau-y += core/subdev/therm/ic.o
+nouveau-y += core/subdev/therm/temp.o
+nouveau-y += core/subdev/therm/nv40.o
+nouveau-y += core/subdev/therm/nv50.o
+nouveau-y += core/subdev/therm/nv84.o
+nouveau-y += core/subdev/therm/nva3.o
+nouveau-y += core/subdev/therm/nvd0.o
+nouveau-y += core/subdev/timer/base.o
+nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/vm/base.o
+nouveau-y += core/subdev/vm/nv04.o
+nouveau-y += core/subdev/vm/nv41.o
+nouveau-y += core/subdev/vm/nv44.o
+nouveau-y += core/subdev/vm/nv50.o
+nouveau-y += core/subdev/vm/nvc0.o
+
+nouveau-y += core/engine/dmaobj/base.o
+nouveau-y += core/engine/dmaobj/nv04.o
+nouveau-y += core/engine/dmaobj/nv50.o
+nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
+nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
+nouveau-y += core/engine/copy/nva3.o
+nouveau-y += core/engine/copy/nvc0.o
+nouveau-y += core/engine/copy/nve0.o
+nouveau-y += core/engine/crypt/nv84.o
+nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/base.o
+nouveau-y += core/engine/device/nv04.o
+nouveau-y += core/engine/device/nv10.o
+nouveau-y += core/engine/device/nv20.o
+nouveau-y += core/engine/device/nv30.o
+nouveau-y += core/engine/device/nv40.o
+nouveau-y += core/engine/device/nv50.o
+nouveau-y += core/engine/device/nvc0.o
+nouveau-y += core/engine/device/nve0.o
+nouveau-y += core/engine/disp/base.o
+nouveau-y += core/engine/disp/nv04.o
+nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
+nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/nvf0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/dport.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/piornv50.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
+nouveau-y += core/engine/disp/vga.o
+nouveau-y += core/engine/fifo/base.o
+nouveau-y += core/engine/fifo/nv04.o
+nouveau-y += core/engine/fifo/nv10.o
+nouveau-y += core/engine/fifo/nv17.o
+nouveau-y += core/engine/fifo/nv40.o
+nouveau-y += core/engine/fifo/nv50.o
+nouveau-y += core/engine/fifo/nv84.o
+nouveau-y += core/engine/fifo/nvc0.o
+nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/graph/ctxnv40.o
+nouveau-y += core/engine/graph/ctxnv50.o
+nouveau-y += core/engine/graph/ctxnvc0.o
+nouveau-y += core/engine/graph/ctxnve0.o
+nouveau-y += core/engine/graph/nv04.o
+nouveau-y += core/engine/graph/nv10.o
+nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv25.o
+nouveau-y += core/engine/graph/nv2a.o
+nouveau-y += core/engine/graph/nv30.o
+nouveau-y += core/engine/graph/nv34.o
+nouveau-y += core/engine/graph/nv35.o
+nouveau-y += core/engine/graph/nv40.o
+nouveau-y += core/engine/graph/nv50.o
+nouveau-y += core/engine/graph/nvc0.o
+nouveau-y += core/engine/graph/nve0.o
+nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv40.o
+nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/mpeg/nv84.o
+nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
+nouveau-y += core/engine/software/nv04.o
+nouveau-y += core/engine/software/nv10.o
+nouveau-y += core/engine/software/nv50.o
+nouveau-y += core/engine/software/nvc0.o
+nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
+
+# drm/core
+nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
+nouveau-y += nouveau_vga.o nouveau_agp.o
+nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
+nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
+nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
+
+# drm/kms
+nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
+nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
+
+# drm/kms/nv04:nv50
+include $(src)/dispnv04/Makefile
+
+# drm/kms/nv50-
+nouveau-y += nv50_display.o
+
+# drm/pm
+nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
+nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
+nouveau-y += nouveau_mem.o
+
+# other random bits
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+ifdef CONFIG_X86
+nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+endif
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
+
+obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/client.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 0000000..9079c0a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/option.h>
+
+#include <engine/device.h>
+
+static void
+nouveau_client_dtor(struct nouveau_object *object)
+{
+	struct nouveau_client *client = (void *)object;
+	nouveau_object_ref(NULL, &client->device);
+	nouveau_handle_destroy(client->root);
+	nouveau_namedb_destroy(&client->base);
+}
+
+static struct nouveau_oclass
+nouveau_client_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_client_dtor,
+	},
+};
+
+int
+nouveau_client_create_(const char *name, u64 devname, const char *cfg,
+		       const char *dbg, int length, void **pobject)
+{
+	struct nouveau_object *device;
+	struct nouveau_client *client;
+	int ret;
+
+	device = (void *)nouveau_device_find(devname);
+	if (!device)
+		return -ENODEV;
+
+	ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
+				     NV_CLIENT_CLASS, NULL,
+				     (1ULL << NVDEV_ENGINE_DEVICE),
+				     length, pobject);
+	client = *pobject;
+	if (ret)
+		return ret;
+
+	ret = nouveau_handle_create(nv_object(client), ~0, ~0,
+				    nv_object(client), &client->root);
+	if (ret)
+		return ret;
+
+	/* prevent init/fini being called, os in in charge of this */
+	atomic_set(&nv_object(client)->usecount, 2);
+
+	nouveau_object_ref(device, &client->device);
+	snprintf(client->name, sizeof(client->name), "%s", name);
+	client->debug = nouveau_dbgopt(dbg, "CLIENT");
+	return 0;
+}
+
+int
+nouveau_client_init(struct nouveau_client *client)
+{
+	int ret;
+	nv_debug(client, "init running\n");
+	ret = nouveau_handle_init(client->root);
+	nv_debug(client, "init completed with %d\n", ret);
+	return ret;
+}
+
+int
+nouveau_client_fini(struct nouveau_client *client, bool suspend)
+{
+	const char *name[2] = { "fini", "suspend" };
+	int ret;
+
+	nv_debug(client, "%s running\n", name[suspend]);
+	ret = nouveau_handle_fini(client->root, suspend);
+	nv_debug(client, "%s completed with %d\n", name[suspend], ret);
+	return ret;
+}
+
+const char *
+nouveau_client_name(void *obj)
+{
+	const char *client_name = "unknown";
+	struct nouveau_client *client = nouveau_client(obj);
+	if (client)
+		client_name = client->name;
+	return client_name;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/engctx.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 0000000..84c71fa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/client.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+
+static inline int
+nouveau_engctx_exists(struct nouveau_object *parent,
+		      struct nouveau_engine *engine, void **pobject)
+{
+	struct nouveau_engctx *engctx;
+	struct nouveau_object *parctx;
+
+	list_for_each_entry(engctx, &engine->contexts, head) {
+		parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
+		if (parctx == parent) {
+			atomic_inc(&nv_object(engctx)->refcount);
+			*pobject = engctx;
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_engctx_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engobj,
+		       struct nouveau_oclass *oclass,
+		       struct nouveau_object *pargpu,
+		       u32 size, u32 align, u32 flags,
+		       int length, void **pobject)
+{
+	struct nouveau_client *client = nouveau_client(parent);
+	struct nouveau_engine *engine = nv_engine(engobj);
+	struct nouveau_object *engctx;
+	unsigned long save;
+	int ret;
+
+	/* check if this engine already has a context for the parent object,
+	 * and reference it instead of creating a new one
+	 */
+	spin_lock_irqsave(&engine->lock, save);
+	ret = nouveau_engctx_exists(parent, engine, pobject);
+	spin_unlock_irqrestore(&engine->lock, save);
+	if (ret)
+		return ret;
+
+	/* create the new context, supports creating both raw objects and
+	 * objects backed by instance memory
+	 */
+	if (size) {
+		ret = nouveau_gpuobj_create_(parent, engobj, oclass,
+					     NV_ENGCTX_CLASS,
+					     pargpu, size, align, flags,
+					     length, pobject);
+	} else {
+		ret = nouveau_object_create_(parent, engobj, oclass,
+					     NV_ENGCTX_CLASS, length, pobject);
+	}
+
+	engctx = *pobject;
+	if (ret)
+		return ret;
+
+	/* must take the lock again and re-check a context doesn't already
+	 * exist (in case of a race) - the lock had to be dropped before as
+	 * it's not possible to allocate the object with it held.
+	 */
+	spin_lock_irqsave(&engine->lock, save);
+	ret = nouveau_engctx_exists(parent, engine, pobject);
+	if (ret) {
+		spin_unlock_irqrestore(&engine->lock, save);
+		nouveau_object_ref(NULL, &engctx);
+		return ret;
+	}
+
+	if (client->vm)
+		atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+	list_add(&nv_engctx(engctx)->head, &engine->contexts);
+	nv_engctx(engctx)->addr = ~0ULL;
+	spin_unlock_irqrestore(&engine->lock, save);
+	return 0;
+}
+
+void
+nouveau_engctx_destroy(struct nouveau_engctx *engctx)
+{
+	struct nouveau_object *engobj = nv_object(engctx)->engine;
+	struct nouveau_engine *engine = nv_engine(engobj);
+	struct nouveau_client *client = nouveau_client(engctx);
+	unsigned long save;
+
+	nouveau_gpuobj_unmap(&engctx->vma);
+	spin_lock_irqsave(&engine->lock, save);
+	list_del(&engctx->head);
+	spin_unlock_irqrestore(&engine->lock, save);
+
+	if (client->vm)
+		atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+
+	if (engctx->base.size)
+		nouveau_gpuobj_destroy(&engctx->base);
+	else
+		nouveau_object_destroy(&engctx->base.base);
+}
+
+int
+nouveau_engctx_init(struct nouveau_engctx *engctx)
+{
+	struct nouveau_object *object = nv_object(engctx);
+	struct nouveau_subdev *subdev = nv_subdev(object->engine);
+	struct nouveau_object *parent;
+	struct nouveau_subdev *pardev;
+	int ret;
+
+	ret = nouveau_gpuobj_init(&engctx->base);
+	if (ret)
+		return ret;
+
+	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+	pardev = nv_subdev(parent->engine);
+	if (nv_parent(parent)->context_attach) {
+		mutex_lock(&pardev->mutex);
+		ret = nv_parent(parent)->context_attach(parent, object);
+		mutex_unlock(&pardev->mutex);
+	}
+
+	if (ret) {
+		nv_error(parent, "failed to attach %s context, %d\n",
+			 subdev->name, ret);
+		return ret;
+	}
+
+	nv_debug(parent, "attached %s context\n", subdev->name);
+	return 0;
+}
+
+int
+nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
+{
+	struct nouveau_object *object = nv_object(engctx);
+	struct nouveau_subdev *subdev = nv_subdev(object->engine);
+	struct nouveau_object *parent;
+	struct nouveau_subdev *pardev;
+	int ret = 0;
+
+	parent = nv_pclass(object->parent, NV_PARENT_CLASS);
+	pardev = nv_subdev(parent->engine);
+	if (nv_parent(parent)->context_detach) {
+		mutex_lock(&pardev->mutex);
+		ret = nv_parent(parent)->context_detach(parent, suspend, object);
+		mutex_unlock(&pardev->mutex);
+	}
+
+	if (ret) {
+		nv_error(parent, "failed to detach %s context, %d\n",
+			 subdev->name, ret);
+		return ret;
+	}
+
+	nv_debug(parent, "detached %s context\n", subdev->name);
+	return nouveau_gpuobj_fini(&engctx->base, suspend);
+}
+
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_engctx *engctx;
+	int ret;
+
+	ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+				    NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+	*pobject = nv_object(engctx);
+	return ret;
+}
+
+void
+_nouveau_engctx_dtor(struct nouveau_object *object)
+{
+	nouveau_engctx_destroy(nv_engctx(object));
+}
+
+int
+_nouveau_engctx_init(struct nouveau_object *object)
+{
+	return nouveau_engctx_init(nv_engctx(object));
+}
+
+
+int
+_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_engctx_fini(nv_engctx(object), suspend);
+}
+
+struct nouveau_object *
+nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
+{
+	struct nouveau_engctx *engctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->lock, flags);
+	list_for_each_entry(engctx, &engine->contexts, head) {
+		if (engctx->addr == addr) {
+			engctx->save = flags;
+			return nv_object(engctx);
+		}
+	}
+	spin_unlock_irqrestore(&engine->lock, flags);
+	return NULL;
+}
+
+void
+nouveau_engctx_put(struct nouveau_object *object)
+{
+	if (object) {
+		struct nouveau_engine *engine = nv_engine(object->engine);
+		struct nouveau_engctx *engctx = nv_engctx(object);
+		spin_unlock_irqrestore(&engine->lock, engctx->save);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/engine.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 0000000..c8bed4a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/engine.h>
+#include <core/option.h>
+
+int
+nouveau_engine_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engobj,
+		       struct nouveau_oclass *oclass, bool enable,
+		       const char *iname, const char *fname,
+		       int length, void **pobject)
+{
+	struct nouveau_engine *engine;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
+				     iname, fname, length, pobject);
+	engine = *pobject;
+	if (ret)
+		return ret;
+
+	if ( parent &&
+	    !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) {
+		if (!enable)
+			nv_warn(engine, "disabled, %s=1 to enable\n", iname);
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&engine->contexts);
+	spin_lock_init(&engine->lock);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/enum.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/enum.c
new file mode 100644
index 0000000..dd43479
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *en, u32 value)
+{
+	while (en->name) {
+		if (en->value == value)
+			return en;
+		en++;
+	}
+
+	return NULL;
+}
+
+const struct nouveau_enum *
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+	en = nouveau_enum_find(en, value);
+	if (en)
+		pr_cont("%s", en->name);
+	else
+		pr_cont("(unknown enum 0x%08x)", value);
+	return en;
+}
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+	while (bf->name) {
+		if (value & bf->mask) {
+			pr_cont(" %s", bf->name);
+			value &= ~bf->mask;
+		}
+
+		bf++;
+	}
+
+	if (value)
+		pr_cont(" (unknown bits 0x%08x)", value);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/event.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 0000000..7eb81c1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/os.h>
+#include <core/event.h>
+
+static void
+nouveau_event_put_locked(struct nouveau_event *event, int index,
+			 struct nouveau_eventh *handler)
+{
+	if (!--event->index[index].refs) {
+		if (event->disable)
+			event->disable(event, index);
+	}
+	list_del(&handler->head);
+}
+
+void
+nouveau_event_put(struct nouveau_event *event, int index,
+		  struct nouveau_eventh *handler)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event->lock, flags);
+	if (index < event->index_nr)
+		nouveau_event_put_locked(event, index, handler);
+	spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_get(struct nouveau_event *event, int index,
+		  struct nouveau_eventh *handler)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&event->lock, flags);
+	if (index < event->index_nr) {
+		list_add(&handler->head, &event->index[index].list);
+		if (!event->index[index].refs++) {
+			if (event->enable)
+				event->enable(event, index);
+		}
+	}
+	spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_trigger(struct nouveau_event *event, int index)
+{
+	struct nouveau_eventh *handler, *temp;
+	unsigned long flags;
+
+	if (index >= event->index_nr)
+		return;
+
+	spin_lock_irqsave(&event->lock, flags);
+	list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
+		if (handler->func(handler, index) == NVKM_EVENT_DROP) {
+			nouveau_event_put_locked(event, index, handler);
+		}
+	}
+	spin_unlock_irqrestore(&event->lock, flags);
+}
+
+void
+nouveau_event_destroy(struct nouveau_event **pevent)
+{
+	struct nouveau_event *event = *pevent;
+	if (event) {
+		kfree(event);
+		*pevent = NULL;
+	}
+}
+
+int
+nouveau_event_create(int index_nr, struct nouveau_event **pevent)
+{
+	struct nouveau_event *event;
+	int i;
+
+	event = *pevent = kzalloc(sizeof(*event) + index_nr *
+				  sizeof(event->index[0]), GFP_KERNEL);
+	if (!event)
+		return -ENOMEM;
+
+	spin_lock_init(&event->lock);
+	for (i = 0; i < index_nr; i++)
+		INIT_LIST_HEAD(&event->index[i].list);
+	event->index_nr = index_nr;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/falcon.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 0000000..e05c157
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+	return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+	nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nouveau_falcon *falcon = (void *)object;
+	const struct firmware *fw;
+	char name[32] = "internal";
+	int ret, i;
+	u32 caps;
+
+	/* enable engine, and determine its capabilities */
+	ret = nouveau_engine_init(&falcon->base);
+	if (ret)
+		return ret;
+
+	if (device->chipset <  0xa3 ||
+	    device->chipset == 0xaa || device->chipset == 0xac) {
+		falcon->version = 0;
+		falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
+	} else {
+		caps = nv_ro32(falcon, 0x12c);
+		falcon->version = (caps & 0x0000000f);
+		falcon->secret  = (caps & 0x00000030) >> 4;
+	}
+
+	caps = nv_ro32(falcon, 0x108);
+	falcon->code.limit = (caps & 0x000001ff) << 8;
+	falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+	nv_debug(falcon, "falcon version: %d\n", falcon->version);
+	nv_debug(falcon, "secret level: %d\n", falcon->secret);
+	nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+	nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+	/* wait for 'uc halted' to be signalled before continuing */
+	if (falcon->secret && falcon->version < 4) {
+		if (!falcon->version)
+			nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+		else
+			nv_wait(falcon, 0x180, 0x80000000, 0);
+		nv_wo32(falcon, 0x004, 0x00000010);
+	}
+
+	/* disable all interrupts */
+	nv_wo32(falcon, 0x014, 0xffffffff);
+
+	/* no default ucode provided by the engine implementation, try and
+	 * locate a "self-bootstrapping" firmware image for the engine
+	 */
+	if (!falcon->code.data) {
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret == 0) {
+			falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+			falcon->code.size = fw->size;
+			falcon->data.data = NULL;
+			falcon->data.size = 0;
+			release_firmware(fw);
+		}
+
+		falcon->external = true;
+	}
+
+	/* next step is to try and load "static code/data segment" firmware
+	 * images for the engine
+	 */
+	if (!falcon->code.data) {
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret) {
+			nv_error(falcon, "unable to load firmware data\n");
+			return ret;
+		}
+
+		falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->data.size = fw->size;
+		release_firmware(fw);
+		if (!falcon->data.data)
+			return -ENOMEM;
+
+		snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+			 device->chipset, falcon->addr >> 12);
+
+		ret = request_firmware(&fw, name, &device->pdev->dev);
+		if (ret) {
+			nv_error(falcon, "unable to load firmware code\n");
+			return ret;
+		}
+
+		falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+		falcon->code.size = fw->size;
+		release_firmware(fw);
+		if (!falcon->code.data)
+			return -ENOMEM;
+	}
+
+	nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+		 "static code/data segments" : "self-bootstrapping");
+
+	/* ensure any "self-bootstrapping" firmware image is in vram */
+	if (!falcon->data.data && !falcon->core) {
+		ret = nouveau_gpuobj_new(object->parent, NULL,
+					 falcon->code.size, 256, 0,
+					&falcon->core);
+		if (ret) {
+			nv_error(falcon, "core allocation failed, %d\n", ret);
+			return ret;
+		}
+
+		for (i = 0; i < falcon->code.size; i += 4)
+			nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+	}
+
+	/* upload firmware bootloader (or the full code segments) */
+	if (falcon->core) {
+		if (device->card_type < NV_C0)
+			nv_wo32(falcon, 0x618, 0x04000000);
+		else
+			nv_wo32(falcon, 0x618, 0x00000114);
+		nv_wo32(falcon, 0x11c, 0);
+		nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+		nv_wo32(falcon, 0x114, 0);
+		nv_wo32(falcon, 0x118, 0x00006610);
+	} else {
+		if (falcon->code.size > falcon->code.limit ||
+		    falcon->data.size > falcon->data.limit) {
+			nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+			return -EINVAL;
+		}
+
+		if (falcon->version < 3) {
+			nv_wo32(falcon, 0xff8, 0x00100000);
+			for (i = 0; i < falcon->code.size / 4; i++)
+				nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+		} else {
+			nv_wo32(falcon, 0x180, 0x01000000);
+			for (i = 0; i < falcon->code.size / 4; i++) {
+				if ((i & 0x3f) == 0)
+					nv_wo32(falcon, 0x188, i >> 6);
+				nv_wo32(falcon, 0x184, falcon->code.data[i]);
+			}
+		}
+	}
+
+	/* upload data segment (if necessary), zeroing the remainder */
+	if (falcon->version < 3) {
+		nv_wo32(falcon, 0xff8, 0x00000000);
+		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+			nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+		for (; i < falcon->data.limit; i += 4)
+			nv_wo32(falcon, 0xff4, 0x00000000);
+	} else {
+		nv_wo32(falcon, 0x1c0, 0x01000000);
+		for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+			nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+		for (; i < falcon->data.limit / 4; i++)
+			nv_wo32(falcon, 0x1c4, 0x00000000);
+	}
+
+	/* start it running */
+	nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+	nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+	nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+	nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+	return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_falcon *falcon = (void *)object;
+
+	if (!suspend) {
+		nouveau_gpuobj_ref(NULL, &falcon->core);
+		if (falcon->external) {
+			kfree(falcon->data.data);
+			kfree(falcon->code.data);
+			falcon->code.data = NULL;
+		}
+	}
+
+	nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+	nv_wo32(falcon, 0x014, 0xffffffff);
+
+	return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 addr, bool enable,
+		       const char *iname, const char *fname,
+		       int length, void **pobject)
+{
+	struct nouveau_falcon *falcon;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+				     fname, length, pobject);
+	falcon = *pobject;
+	if (ret)
+		return ret;
+
+	falcon->addr = addr;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 0000000..560b221
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/gpuobj.h>
+
+#include <subdev/instmem.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+void
+nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
+{
+	int i;
+
+	if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
+		for (i = 0; i < gpuobj->size; i += 4)
+			nv_wo32(gpuobj, i, 0x00000000);
+	}
+
+	if (gpuobj->node) {
+		nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
+				&gpuobj->node);
+	}
+
+	if (gpuobj->heap.block_size)
+		nouveau_mm_fini(&gpuobj->heap);
+
+	nouveau_object_destroy(&gpuobj->base);
+}
+
+int
+nouveau_gpuobj_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_object *pargpu,
+		       u32 size, u32 align, u32 flags,
+		       int length, void **pobject)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(parent);
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nouveau_gpuobj *gpuobj;
+	struct nouveau_mm *heap = NULL;
+	int ret, i;
+	u64 addr;
+
+	*pobject = NULL;
+
+	if (pargpu) {
+		while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
+			if (nv_gpuobj(pargpu)->heap.block_size)
+				break;
+			pargpu = pargpu->parent;
+		}
+
+		if (unlikely(pargpu == NULL)) {
+			nv_error(parent, "no gpuobj heap\n");
+			return -EINVAL;
+		}
+
+		addr =  nv_gpuobj(pargpu)->addr;
+		heap = &nv_gpuobj(pargpu)->heap;
+		atomic_inc(&parent->refcount);
+	} else {
+		ret = imem->alloc(imem, parent, size, align, &parent);
+		pargpu = parent;
+		if (ret)
+			return ret;
+
+		addr = nv_memobj(pargpu)->addr;
+		size = nv_memobj(pargpu)->size;
+
+		if (bar && bar->alloc) {
+			struct nouveau_instobj *iobj = (void *)parent;
+			struct nouveau_mem **mem = (void *)(iobj + 1);
+			struct nouveau_mem *node = *mem;
+			if (!bar->alloc(bar, parent, node, &pargpu)) {
+				nouveau_object_ref(NULL, &parent);
+				parent = pargpu;
+			}
+		}
+	}
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_GPUOBJ_CLASS, length, pobject);
+	nouveau_object_ref(NULL, &parent);
+	gpuobj = *pobject;
+	if (ret)
+		return ret;
+
+	gpuobj->parent = pargpu;
+	gpuobj->flags = flags;
+	gpuobj->addr = addr;
+	gpuobj->size = size;
+
+	if (heap) {
+		ret = nouveau_mm_head(heap, 1, size, size,
+				      max(align, (u32)1), &gpuobj->node);
+		if (ret)
+			return ret;
+
+		gpuobj->addr += gpuobj->node->offset;
+	}
+
+	if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
+		ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
+		for (i = 0; i < gpuobj->size; i += 4)
+			nv_wo32(gpuobj, i, 0x00000000);
+	}
+
+	return ret;
+}
+
+struct nouveau_gpuobj_class {
+	struct nouveau_object *pargpu;
+	u64 size;
+	u32 align;
+	u32 flags;
+};
+
+static int
+_nouveau_gpuobj_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj_class *args = data;
+	struct nouveau_gpuobj *object;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
+				    args->size, args->align, args->flags,
+				    &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+_nouveau_gpuobj_dtor(struct nouveau_object *object)
+{
+	nouveau_gpuobj_destroy(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_init(struct nouveau_object *object)
+{
+	return nouveau_gpuobj_init(nv_gpuobj(object));
+}
+
+int
+_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
+}
+
+u32
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+	if (gpuobj->node)
+		addr += gpuobj->node->offset;
+	return pfuncs->rd32(gpuobj->parent, addr);
+}
+
+void
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
+	struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
+	if (gpuobj->node)
+		addr += gpuobj->node->offset;
+	pfuncs->wr32(gpuobj->parent, addr, data);
+}
+
+static struct nouveau_oclass
+_nouveau_gpuobj_oclass = {
+	.handle = 0x00000000,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_gpuobj_ctor,
+		.dtor = _nouveau_gpuobj_dtor,
+		.init = _nouveau_gpuobj_init,
+		.fini = _nouveau_gpuobj_fini,
+		.rd32 = _nouveau_gpuobj_rd32,
+		.wr32 = _nouveau_gpuobj_wr32,
+	},
+};
+
+int
+nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+		   u32 size, u32 align, u32 flags,
+		   struct nouveau_gpuobj **pgpuobj)
+{
+	struct nouveau_object *engine = parent;
+	struct nouveau_gpuobj_class args = {
+		.pargpu = pargpu,
+		.size = size,
+		.align = align,
+		.flags = flags,
+	};
+
+	if (!nv_iclass(engine, NV_SUBDEV_CLASS))
+		engine = engine->engine;
+	BUG_ON(engine == NULL);
+
+	return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
+				   &args, sizeof(args),
+				   (struct nouveau_object **)pgpuobj);
+}
+
+int
+nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
+		   struct nouveau_vma *vma)
+{
+	struct nouveau_bar *bar = nouveau_bar(gpuobj);
+	int ret = -EINVAL;
+
+	if (bar && bar->umap) {
+		struct nouveau_instobj *iobj = (void *)
+			nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+		struct nouveau_mem **mem = (void *)(iobj + 1);
+		ret = bar->umap(bar, *mem, access, vma);
+	}
+
+	return ret;
+}
+
+int
+nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
+		      u32 access, struct nouveau_vma *vma)
+{
+	struct nouveau_instobj *iobj = (void *)
+		nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
+	struct nouveau_mem **mem = (void *)(iobj + 1);
+	int ret;
+
+	ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, *mem);
+	return 0;
+}
+
+void
+nouveau_gpuobj_unmap(struct nouveau_vma *vma)
+{
+	if (vma->node) {
+		nouveau_vm_unmap(vma);
+		nouveau_vm_put(vma);
+	}
+}
+
+/* the below is basically only here to support sharing the paged dma object
+ * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
+ * anywhere else.
+ */
+
+static void
+nouveau_gpudup_dtor(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *gpuobj = (void *)object;
+	nouveau_object_ref(NULL, &gpuobj->parent);
+	nouveau_object_destroy(&gpuobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_gpudup_oclass = {
+	.handle = NV_GPUOBJ_CLASS,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_gpudup_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+	},
+};
+
+int
+nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
+		   struct nouveau_gpuobj **pgpuobj)
+{
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	ret = nouveau_object_create(parent, parent->engine,
+				   &nouveau_gpudup_oclass, 0, &gpuobj);
+	*pgpuobj = gpuobj;
+	if (ret)
+		return ret;
+
+	nouveau_object_ref(nv_object(base), &gpuobj->parent);
+	gpuobj->addr = base->addr;
+	gpuobj->size = base->size;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/handle.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 0000000..264c2b3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/handle.h>
+#include <core/client.h>
+
+#define hprintk(h,l,f,a...) do {                                               \
+	struct nouveau_client *c = nouveau_client((h)->object);                \
+	struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0;      \
+	nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a);               \
+} while(0)
+
+int
+nouveau_handle_init(struct nouveau_handle *handle)
+{
+	struct nouveau_handle *item;
+	int ret;
+
+	hprintk(handle, TRACE, "init running\n");
+	ret = nouveau_object_inc(handle->object);
+	if (ret)
+		return ret;
+
+	hprintk(handle, TRACE, "init children\n");
+	list_for_each_entry(item, &handle->tree, head) {
+		ret = nouveau_handle_init(item);
+		if (ret)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "init completed\n");
+	return 0;
+fail:
+	hprintk(handle, ERROR, "init failed with %d\n", ret);
+	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+		nouveau_handle_fini(item, false);
+	}
+
+	nouveau_object_dec(handle->object, false);
+	return ret;
+}
+
+int
+nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
+{
+	static char *name[2] = { "fini", "suspend" };
+	struct nouveau_handle *item;
+	int ret;
+
+	hprintk(handle, TRACE, "%s children\n", name[suspend]);
+	list_for_each_entry(item, &handle->tree, head) {
+		ret = nouveau_handle_fini(item, suspend);
+		if (ret && suspend)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "%s running\n", name[suspend]);
+	if (handle->object) {
+		ret = nouveau_object_dec(handle->object, suspend);
+		if (ret && suspend)
+			goto fail;
+	}
+
+	hprintk(handle, TRACE, "%s completed\n", name[suspend]);
+	return 0;
+fail:
+	hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
+	list_for_each_entry_continue_reverse(item, &handle->tree, head) {
+		int rret = nouveau_handle_init(item);
+		if (rret)
+			hprintk(handle, FATAL, "failed to restart, %d\n", rret);
+	}
+
+	return ret;
+}
+
+int
+nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
+		      struct nouveau_object *object,
+		      struct nouveau_handle **phandle)
+{
+	struct nouveau_object *namedb;
+	struct nouveau_handle *handle;
+	int ret;
+
+	namedb = parent;
+	while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
+		namedb = namedb->parent;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&handle->head);
+	INIT_LIST_HEAD(&handle->tree);
+	handle->name = _handle;
+	handle->priv = ~0;
+
+	ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
+	if (ret) {
+		kfree(handle);
+		return ret;
+	}
+
+	if (nv_parent(parent)->object_attach) {
+		ret = nv_parent(parent)->object_attach(parent, object, _handle);
+		if (ret < 0) {
+			nouveau_handle_destroy(handle);
+			return ret;
+		}
+
+		handle->priv = ret;
+	}
+
+	if (object != namedb) {
+		while (!nv_iclass(namedb, NV_CLIENT_CLASS))
+			namedb = namedb->parent;
+
+		handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
+		if (handle->parent) {
+			list_add(&handle->head, &handle->parent->tree);
+			nouveau_namedb_put(handle->parent);
+		}
+	}
+
+	hprintk(handle, TRACE, "created\n");
+
+	*phandle = handle;
+
+	return 0;
+}
+
+void
+nouveau_handle_destroy(struct nouveau_handle *handle)
+{
+	struct nouveau_handle *item, *temp;
+
+	hprintk(handle, TRACE, "destroy running\n");
+	list_for_each_entry_safe(item, temp, &handle->tree, head) {
+		nouveau_handle_destroy(item);
+	}
+	list_del(&handle->head);
+
+	if (handle->priv != ~0) {
+		struct nouveau_object *parent = handle->parent->object;
+		nv_parent(parent)->object_detach(parent, handle->priv);
+	}
+
+	hprintk(handle, TRACE, "destroy completed\n");
+	nouveau_namedb_remove(handle);
+	kfree(handle);
+}
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *parent, u32 name)
+{
+	struct nouveau_object *object = NULL;
+	struct nouveau_handle *handle;
+
+	while (!nv_iclass(parent, NV_NAMEDB_CLASS))
+		parent = parent->parent;
+
+	handle = nouveau_namedb_get(nv_namedb(parent), name);
+	if (handle) {
+		nouveau_object_ref(handle->object, &object);
+		nouveau_namedb_put(handle);
+	}
+
+	return object;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_class(namedb, oclass);
+	return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_vinst(namedb, vinst);
+	return NULL;
+}
+
+struct nouveau_handle *
+nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
+{
+	struct nouveau_namedb *namedb;
+	if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
+		return nouveau_namedb_get_cinst(namedb, cinst);
+	return NULL;
+}
+
+void
+nouveau_handle_put(struct nouveau_handle *handle)
+{
+	if (handle)
+		nouveau_namedb_put(handle);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/mm.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/mm.c
new file mode 100644
index 0000000..0261a11
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/os.h"
+#include "core/mm.h"
+
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
+{
+	struct nouveau_mm_node *this = *pthis;
+
+	if (this) {
+		struct nouveau_mm_node *prev = node(this, prev);
+		struct nouveau_mm_node *next = node(this, next);
+
+		if (prev && prev->type == 0) {
+			prev->length += this->length;
+			list_del(&this->nl_entry);
+			kfree(this); this = prev;
+		}
+
+		if (next && next->type == 0) {
+			next->offset  = this->offset;
+			next->length += this->length;
+			if (this->type == 0)
+				list_del(&this->fl_entry);
+			list_del(&this->nl_entry);
+			kfree(this); this = NULL;
+		}
+
+		if (this && this->type != 0) {
+			list_for_each_entry(prev, &mm->free, fl_entry) {
+				if (this->offset < prev->offset)
+					break;
+			}
+
+			list_add_tail(&this->fl_entry, &prev->fl_entry);
+			this->type = 0;
+		}
+	}
+
+	*pthis = NULL;
+}
+
+static struct nouveau_mm_node *
+region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	b->offset = a->offset;
+	b->length = size;
+	b->type   = a->type;
+	a->offset += size;
+	a->length -= size;
+	list_add_tail(&b->nl_entry, &a->nl_entry);
+	if (b->type == 0)
+		list_add_tail(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+int
+nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+		u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *prev, *this, *next;
+	u32 mask = align - 1;
+	u32 splitoff;
+	u32 s, e;
+
+	list_for_each_entry(this, &mm->free, fl_entry) {
+		e = this->offset + this->length;
+		s = this->offset;
+
+		prev = node(this, prev);
+		if (prev && prev->type != type)
+			s = roundup(s, mm->block_size);
+
+		next = node(this, next);
+		if (next && next->type != type)
+			e = rounddown(e, mm->block_size);
+
+		s  = (s + mask) & ~mask;
+		e &= ~mask;
+		if (s > e || e - s < size_min)
+			continue;
+
+		splitoff = s - this->offset;
+		if (splitoff && !region_head(mm, this, splitoff))
+			return -ENOMEM;
+
+		this = region_head(mm, this, min(size_max, e - s));
+		if (!this)
+			return -ENOMEM;
+
+		this->type = type;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOSPC;
+}
+
+static struct nouveau_mm_node *
+region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	a->length -= size;
+	b->offset  = a->offset + a->length;
+	b->length  = size;
+	b->type    = a->type;
+
+	list_add(&b->nl_entry, &a->nl_entry);
+	if (b->type == 0)
+		list_add(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+int
+nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
+		u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *prev, *this, *next;
+	u32 mask = align - 1;
+
+	list_for_each_entry_reverse(this, &mm->free, fl_entry) {
+		u32 e = this->offset + this->length;
+		u32 s = this->offset;
+		u32 c = 0, a;
+
+		prev = node(this, prev);
+		if (prev && prev->type != type)
+			s = roundup(s, mm->block_size);
+
+		next = node(this, next);
+		if (next && next->type != type) {
+			e = rounddown(e, mm->block_size);
+			c = next->offset - e;
+		}
+
+		s = (s + mask) & ~mask;
+		a = e - s;
+		if (s > e || a < size_min)
+			continue;
+
+		a  = min(a, size_max);
+		s  = (e - a) & ~mask;
+		c += (e - s) - a;
+
+		if (c && !region_tail(mm, this, c))
+			return -ENOMEM;
+
+		this = region_tail(mm, this, a);
+		if (!this)
+			return -ENOMEM;
+
+		this->type = type;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOSPC;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
+{
+	struct nouveau_mm_node *node;
+
+	if (block) {
+		mutex_init(&mm->mutex);
+		INIT_LIST_HEAD(&mm->nodes);
+		INIT_LIST_HEAD(&mm->free);
+		mm->block_size = block;
+		mm->heap_nodes = 0;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	if (length) {
+		node->offset  = roundup(offset, mm->block_size);
+		node->length  = rounddown(offset + length, mm->block_size);
+		node->length -= node->offset;
+	}
+
+	list_add_tail(&node->nl_entry, &mm->nodes);
+	list_add_tail(&node->fl_entry, &mm->free);
+	mm->heap_nodes++;
+	return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm *mm)
+{
+	if (nouveau_mm_initialised(mm)) {
+		struct nouveau_mm_node *node, *heap =
+			list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+		int nodes = 0;
+
+		list_for_each_entry(node, &mm->nodes, nl_entry) {
+			if (WARN_ON(nodes++ == mm->heap_nodes))
+				return -EBUSY;
+		}
+
+		kfree(heap);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/namedb.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 0000000..1ce95a8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+
+static struct nouveau_handle *
+nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (handle->name == name)
+			return handle;
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_mclass(handle->object) == oclass)
+			return handle;
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+			if (nv_gpuobj(handle->object)->addr == vinst)
+				return handle;
+		}
+	}
+
+	return NULL;
+}
+
+static struct nouveau_handle *
+nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+	struct nouveau_handle *handle;
+
+	list_for_each_entry(handle, &namedb->list, node) {
+		if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+			if (nv_gpuobj(handle->object)->node &&
+			    nv_gpuobj(handle->object)->node->offset == cinst)
+				return handle;
+		}
+	}
+
+	return NULL;
+}
+
+int
+nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
+		      struct nouveau_object *object,
+		      struct nouveau_handle *handle)
+{
+	int ret = -EEXIST;
+	write_lock_irq(&namedb->lock);
+	if (!nouveau_namedb_lookup(namedb, name)) {
+		nouveau_object_ref(object, &handle->object);
+		handle->namedb = namedb;
+		list_add(&handle->node, &namedb->list);
+		ret = 0;
+	}
+	write_unlock_irq(&namedb->lock);
+	return ret;
+}
+
+void
+nouveau_namedb_remove(struct nouveau_handle *handle)
+{
+	struct nouveau_namedb *namedb = handle->namedb;
+	struct nouveau_object *object = handle->object;
+	write_lock_irq(&namedb->lock);
+	list_del(&handle->node);
+	write_unlock_irq(&namedb->lock);
+	nouveau_object_ref(NULL, &object);
+}
+
+struct nouveau_handle *
+nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup(namedb, name);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_class(namedb, oclass);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_vinst(namedb, vinst);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+struct nouveau_handle *
+nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
+{
+	struct nouveau_handle *handle;
+	read_lock(&namedb->lock);
+	handle = nouveau_namedb_lookup_cinst(namedb, cinst);
+	if (handle == NULL)
+		read_unlock(&namedb->lock);
+	return handle;
+}
+
+void
+nouveau_namedb_put(struct nouveau_handle *handle)
+{
+	if (handle)
+		read_unlock(&handle->namedb->lock);
+}
+
+int
+nouveau_namedb_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_oclass *sclass, u32 engcls,
+		       int length, void **pobject)
+{
+	struct nouveau_namedb *namedb;
+	int ret;
+
+	ret = nouveau_parent_create_(parent, engine, oclass, pclass |
+				     NV_NAMEDB_CLASS, sclass, engcls,
+				     length, pobject);
+	namedb = *pobject;
+	if (ret)
+		return ret;
+
+	rwlock_init(&namedb->lock);
+	INIT_LIST_HEAD(&namedb->list);
+	return 0;
+}
+
+int
+_nouveau_namedb_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_namedb *object;
+	int ret;
+
+	ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/object.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 0000000..7f48e28
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/engine.h>
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
+static DEFINE_SPINLOCK(_objlist_lock);
+#endif
+
+int
+nouveau_object_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       int size, void **pobject)
+{
+	struct nouveau_object *object;
+
+	object = *pobject = kzalloc(size, GFP_KERNEL);
+	if (!object)
+		return -ENOMEM;
+
+	nouveau_object_ref(parent, &object->parent);
+	nouveau_object_ref(engine, &object->engine);
+	object->oclass = oclass;
+	object->oclass->handle |= pclass;
+	atomic_set(&object->refcount, 1);
+	atomic_set(&object->usecount, 0);
+
+#ifdef NOUVEAU_OBJECT_MAGIC
+	object->_magic = NOUVEAU_OBJECT_MAGIC;
+	spin_lock(&_objlist_lock);
+	list_add(&object->list, &_objlist);
+	spin_unlock(&_objlist_lock);
+#endif
+	return 0;
+}
+
+static int
+_nouveau_object_ctor(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, void *data, u32 size,
+		     struct nouveau_object **pobject)
+{
+	struct nouveau_object *object;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &object);
+	*pobject = nv_object(object);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nouveau_object_destroy(struct nouveau_object *object)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+	spin_lock(&_objlist_lock);
+	list_del(&object->list);
+	spin_unlock(&_objlist_lock);
+#endif
+	nouveau_object_ref(NULL, &object->engine);
+	nouveau_object_ref(NULL, &object->parent);
+	kfree(object);
+}
+
+static void
+_nouveau_object_dtor(struct nouveau_object *object)
+{
+	nouveau_object_destroy(object);
+}
+
+int
+nouveau_object_init(struct nouveau_object *object)
+{
+	return 0;
+}
+
+static int
+_nouveau_object_init(struct nouveau_object *object)
+{
+	return nouveau_object_init(object);
+}
+
+int
+nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+	return 0;
+}
+
+static int
+_nouveau_object_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_object_fini(object, suspend);
+}
+
+struct nouveau_ofuncs
+nouveau_object_ofuncs = {
+	.ctor = _nouveau_object_ctor,
+	.dtor = _nouveau_object_dtor,
+	.init = _nouveau_object_init,
+	.fini = _nouveau_object_fini,
+};
+
+int
+nouveau_object_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
+	struct nouveau_object *object = NULL;
+	int ret;
+
+	ret = ofuncs->ctor(parent, engine, oclass, data, size, &object);
+	*pobject = object;
+	if (ret < 0) {
+		if (ret != -ENODEV) {
+			nv_error(parent, "failed to create 0x%08x, %d\n",
+				 oclass->handle, ret);
+		}
+
+		if (object) {
+			ofuncs->dtor(object);
+			*pobject = NULL;
+		}
+
+		return ret;
+	}
+
+	if (ret == 0) {
+		nv_debug(object, "created\n");
+		atomic_set(&object->refcount, 1);
+	}
+
+	return 0;
+}
+
+static void
+nouveau_object_dtor(struct nouveau_object *object)
+{
+	nv_debug(object, "destroying\n");
+	nv_ofuncs(object)->dtor(object);
+}
+
+void
+nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
+{
+	if (obj) {
+		atomic_inc(&obj->refcount);
+		nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
+	}
+
+	if (*ref) {
+		int dead = atomic_dec_and_test(&(*ref)->refcount);
+		nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
+		if (dead)
+			nouveau_object_dtor(*ref);
+	}
+
+	*ref = obj;
+}
+
+int
+nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+		   u16 _oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nouveau_object *parent = NULL;
+	struct nouveau_object *engctx = NULL;
+	struct nouveau_object *object = NULL;
+	struct nouveau_object *engine;
+	struct nouveau_oclass *oclass;
+	struct nouveau_handle *handle;
+	int ret;
+
+	/* lookup parent object and ensure it *is* a parent */
+	parent = nouveau_handle_ref(client, _parent);
+	if (!parent) {
+		nv_error(client, "parent 0x%08x not found\n", _parent);
+		return -ENOENT;
+	}
+
+	if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+		nv_error(parent, "cannot have children\n");
+		ret = -EINVAL;
+		goto fail_class;
+	}
+
+	/* check that parent supports the requested subclass */
+	ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+	if (ret) {
+		nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+		goto fail_class;
+	}
+
+	/* make sure engine init has been completed *before* any objects
+	 * it controls are created - the constructors may depend on
+	 * state calculated at init (ie. default context construction)
+	 */
+	if (engine) {
+		ret = nouveau_object_inc(engine);
+		if (ret)
+			goto fail_class;
+	}
+
+	/* if engine requires it, create a context object to insert
+	 * between the parent and its children (eg. PGRAPH context)
+	 */
+	if (engine && nv_engine(engine)->cclass) {
+		ret = nouveau_object_ctor(parent, engine,
+					  nv_engine(engine)->cclass,
+					  data, size, &engctx);
+		if (ret)
+			goto fail_engctx;
+	} else {
+		nouveau_object_ref(parent, &engctx);
+	}
+
+	/* finally, create new object and bind it to its handle */
+	ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+	*pobject = object;
+	if (ret)
+		goto fail_ctor;
+
+	ret = nouveau_object_inc(object);
+	if (ret)
+		goto fail_init;
+
+	ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+	if (ret)
+		goto fail_handle;
+
+	ret = nouveau_handle_init(handle);
+	if (ret)
+		nouveau_handle_destroy(handle);
+
+fail_handle:
+	nouveau_object_dec(object, false);
+fail_init:
+	nouveau_object_ref(NULL, &object);
+fail_ctor:
+	nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+	if (engine)
+		nouveau_object_dec(engine, false);
+fail_class:
+	nouveau_object_ref(NULL, &parent);
+	return ret;
+}
+
+int
+nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+	struct nouveau_object *parent = NULL;
+	struct nouveau_object *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+
+	parent = nouveau_handle_ref(client, _parent);
+	if (!parent)
+		return -ENOENT;
+
+	namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+	if (namedb) {
+		handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+		if (handle) {
+			nouveau_namedb_put(handle);
+			nouveau_handle_fini(handle, false);
+			nouveau_handle_destroy(handle);
+		}
+	}
+
+	nouveau_object_ref(NULL, &parent);
+	return handle ? 0 : -EINVAL;
+}
+
+int
+nouveau_object_inc(struct nouveau_object *object)
+{
+	int ref = atomic_add_return(1, &object->usecount);
+	int ret;
+
+	nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
+	if (ref != 1)
+		return 0;
+
+	nv_trace(object, "initialising...\n");
+	if (object->parent) {
+		ret = nouveau_object_inc(object->parent);
+		if (ret) {
+			nv_error(object, "parent failed, %d\n", ret);
+			goto fail_parent;
+		}
+	}
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		ret = nouveau_object_inc(object->engine);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (ret) {
+			nv_error(object, "engine failed, %d\n", ret);
+			goto fail_engine;
+		}
+	}
+
+	ret = nv_ofuncs(object)->init(object);
+	atomic_set(&object->usecount, 1);
+	if (ret) {
+		nv_error(object, "init failed, %d\n", ret);
+		goto fail_self;
+	}
+
+	nv_debug(object, "initialised\n");
+	return 0;
+
+fail_self:
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		nouveau_object_dec(object->engine, false);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	}
+fail_engine:
+	if (object->parent)
+		 nouveau_object_dec(object->parent, false);
+fail_parent:
+	atomic_dec(&object->usecount);
+	return ret;
+}
+
+static int
+nouveau_object_decf(struct nouveau_object *object)
+{
+	int ret;
+
+	nv_trace(object, "stopping...\n");
+
+	ret = nv_ofuncs(object)->fini(object, false);
+	atomic_set(&object->usecount, 0);
+	if (ret)
+		nv_warn(object, "failed fini, %d\n", ret);
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		nouveau_object_dec(object->engine, false);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+	}
+
+	if (object->parent)
+		nouveau_object_dec(object->parent, false);
+
+	nv_debug(object, "stopped\n");
+	return 0;
+}
+
+static int
+nouveau_object_decs(struct nouveau_object *object)
+{
+	int ret, rret;
+
+	nv_trace(object, "suspending...\n");
+
+	ret = nv_ofuncs(object)->fini(object, true);
+	atomic_set(&object->usecount, 0);
+	if (ret) {
+		nv_error(object, "failed suspend, %d\n", ret);
+		return ret;
+	}
+
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		ret = nouveau_object_dec(object->engine, true);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (ret) {
+			nv_warn(object, "engine failed suspend, %d\n", ret);
+			goto fail_engine;
+		}
+	}
+
+	if (object->parent) {
+		ret = nouveau_object_dec(object->parent, true);
+		if (ret) {
+			nv_warn(object, "parent failed suspend, %d\n", ret);
+			goto fail_parent;
+		}
+	}
+
+	nv_debug(object, "suspended\n");
+	return 0;
+
+fail_parent:
+	if (object->engine) {
+		mutex_lock(&nv_subdev(object->engine)->mutex);
+		rret = nouveau_object_inc(object->engine);
+		mutex_unlock(&nv_subdev(object->engine)->mutex);
+		if (rret)
+			nv_fatal(object, "engine failed to reinit, %d\n", rret);
+	}
+
+fail_engine:
+	rret = nv_ofuncs(object)->init(object);
+	if (rret)
+		nv_fatal(object, "failed to reinit, %d\n", rret);
+
+	return ret;
+}
+
+int
+nouveau_object_dec(struct nouveau_object *object, bool suspend)
+{
+	int ref = atomic_add_return(-1, &object->usecount);
+	int ret;
+
+	nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
+
+	if (ref == 0) {
+		if (suspend)
+			ret = nouveau_object_decs(object);
+		else
+			ret = nouveau_object_decf(object);
+
+		if (ret) {
+			atomic_inc(&object->usecount);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void
+nouveau_object_debug(void)
+{
+#ifdef NOUVEAU_OBJECT_MAGIC
+	struct nouveau_object *object;
+	if (!list_empty(&_objlist)) {
+		nv_fatal(NULL, "*******************************************\n");
+		nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
+		nv_fatal(NULL, "*******************************************\n");
+		list_for_each_entry(object, &_objlist, list) {
+			nv_fatal(object, "%p/%p/%d/%d\n",
+				 object->parent, object->engine,
+				 atomic_read(&object->refcount),
+				 atomic_read(&object->usecount));
+		}
+	}
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/option.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 0000000..62a432e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+#include <core/debug.h>
+
+/* compares unterminated string 'str' with zero-terminated string 'cmp' */
+static inline int
+strncasecmpz(const char *str, const char *cmp, size_t len)
+{
+	if (strlen(cmp) != len)
+		return len;
+	return strncasecmp(str, cmp, len);
+}
+
+const char *
+nouveau_stropt(const char *optstr, const char *opt, int *arglen)
+{
+	while (optstr && *optstr != '\0') {
+		int len = strcspn(optstr, ",=");
+		switch (optstr[len]) {
+		case '=':
+			if (!strncasecmpz(optstr, opt, len)) {
+				optstr += len + 1;
+				*arglen = strcspn(optstr, ",=");
+				return *arglen ? optstr : NULL;
+			}
+			optstr++;
+			break;
+		case ',':
+			optstr++;
+			break;
+		default:
+			break;
+		}
+		optstr += len;
+	}
+
+	return NULL;
+}
+
+bool
+nouveau_boolopt(const char *optstr, const char *opt, bool value)
+{
+	int arglen;
+
+	optstr = nouveau_stropt(optstr, opt, &arglen);
+	if (optstr) {
+		if (!strncasecmpz(optstr, "0", arglen) ||
+		    !strncasecmpz(optstr, "no", arglen) ||
+		    !strncasecmpz(optstr, "off", arglen) ||
+		    !strncasecmpz(optstr, "false", arglen))
+			value = false;
+		else
+		if (!strncasecmpz(optstr, "1", arglen) ||
+		    !strncasecmpz(optstr, "yes", arglen) ||
+		    !strncasecmpz(optstr, "on", arglen) ||
+		    !strncasecmpz(optstr, "true", arglen))
+			value = true;
+	}
+
+	return value;
+}
+
+int
+nouveau_dbgopt(const char *optstr, const char *sub)
+{
+	int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
+
+	while (optstr) {
+		int len = strcspn(optstr, ",=");
+		switch (optstr[len]) {
+		case '=':
+			if (strncasecmpz(optstr, sub, len))
+				mode = 0;
+			optstr++;
+			break;
+		default:
+			if (mode) {
+				if (!strncasecmpz(optstr, "fatal", len))
+					level = NV_DBG_FATAL;
+				else if (!strncasecmpz(optstr, "error", len))
+					level = NV_DBG_ERROR;
+				else if (!strncasecmpz(optstr, "warn", len))
+					level = NV_DBG_WARN;
+				else if (!strncasecmpz(optstr, "info", len))
+					level = NV_DBG_INFO;
+				else if (!strncasecmpz(optstr, "debug", len))
+					level = NV_DBG_DEBUG;
+				else if (!strncasecmpz(optstr, "trace", len))
+					level = NV_DBG_TRACE;
+				else if (!strncasecmpz(optstr, "paranoia", len))
+					level = NV_DBG_PARANOIA;
+				else if (!strncasecmpz(optstr, "spam", len))
+					level = NV_DBG_SPAM;
+			}
+
+			if (optstr[len] != '\0') {
+				optstr++;
+				mode = 1;
+				break;
+			}
+
+			return level;
+		}
+		optstr += len;
+	}
+
+	return level;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/parent.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 0000000..313380c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/client.h>
+
+int
+nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
+		      struct nouveau_object **pengine,
+		      struct nouveau_oclass **poclass)
+{
+	struct nouveau_sclass *sclass;
+	struct nouveau_engine *engine;
+	struct nouveau_oclass *oclass;
+	u64 mask;
+
+	sclass = nv_parent(parent)->sclass;
+	while (sclass) {
+		if ((sclass->oclass->handle & 0xffff) == handle) {
+			*pengine = parent->engine;
+			*poclass = sclass->oclass;
+			return 0;
+		}
+
+		sclass = sclass->sclass;
+	}
+
+	mask = nv_parent(parent)->engine;
+	while (mask) {
+		int i = ffsll(mask) - 1;
+
+		if (nv_iclass(parent, NV_CLIENT_CLASS))
+			engine = nv_engine(nv_client(parent)->device);
+		else
+			engine = nouveau_engine(parent, i);
+
+		if (engine) {
+			oclass = engine->sclass;
+			while (oclass->ofuncs) {
+				if ((oclass->handle & 0xffff) == handle) {
+					*pengine = nv_object(engine);
+					*poclass = oclass;
+					return 0;
+				}
+				oclass++;
+			}
+		}
+
+		mask &= ~(1ULL << i);
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_parent_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       struct nouveau_oclass *sclass, u64 engcls,
+		       int size, void **pobject)
+{
+	struct nouveau_parent *object;
+	struct nouveau_sclass *nclass;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_PARENT_CLASS, size, pobject);
+	object = *pobject;
+	if (ret)
+		return ret;
+
+	while (sclass && sclass->ofuncs) {
+		nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
+		if (!nclass)
+			return -ENOMEM;
+
+		nclass->sclass = object->sclass;
+		object->sclass = nclass;
+		nclass->engine = engine ? nv_engine(engine) : NULL;
+		nclass->oclass = sclass;
+		sclass++;
+	}
+
+	object->engine = engcls;
+	return 0;
+}
+
+void
+nouveau_parent_destroy(struct nouveau_parent *parent)
+{
+	struct nouveau_sclass *sclass;
+
+	while ((sclass = parent->sclass)) {
+		parent->sclass = sclass->sclass;
+		kfree(sclass);
+	}
+
+	nouveau_object_destroy(&parent->base);
+}
+
+
+void
+_nouveau_parent_dtor(struct nouveau_object *object)
+{
+	nouveau_parent_destroy(nv_parent(object));
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/printk.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 0000000..6161eaf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/subdev.h>
+#include <core/printk.h>
+
+void
+nv_printk_(struct nouveau_object *object, const char *pfx, int level,
+	   const char *fmt, ...)
+{
+	static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
+	char mfmt[256];
+	va_list args;
+
+	if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
+		struct nouveau_object *device = object;
+		struct nouveau_object *subdev = object;
+		char obuf[64], *ofmt = "";
+
+		if (object->engine) {
+			snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
+				 nv_hclass(object), object);
+			ofmt = obuf;
+			subdev = object->engine;
+			device = object->engine;
+		}
+
+		if (subdev->parent)
+			device = subdev->parent;
+
+		if (level > nv_subdev(subdev)->debug)
+			return;
+
+		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
+			 name[level], nv_subdev(subdev)->name,
+			 nv_device(device)->name, ofmt, fmt);
+	} else
+	if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
+		if (level > nv_client(object)->debug)
+			return;
+
+		snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
+			 name[level], nv_client(object)->name, fmt);
+	} else {
+		snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
+	}
+
+	va_start(args, fmt);
+	vprintk(mfmt, args);
+	va_end(args);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/ramht.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 0000000..86a6404
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/object.h>
+#include <core/ramht.h>
+#include <core/math.h>
+
+#include <subdev/bar.h>
+
+static u32
+nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
+{
+	u32 hash = 0;
+
+	while (handle) {
+		hash ^= (handle & ((1 << ramht->bits) - 1));
+		handle >>= ramht->bits;
+	}
+
+	hash ^= chid << (ramht->bits - 4);
+	hash  = hash << 3;
+	return hash;
+}
+
+int
+nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
+		     u32 handle, u32 context)
+{
+	struct nouveau_bar *bar = nouveau_bar(ramht);
+	u32 co, ho;
+
+	co = ho = nouveau_ramht_hash(ramht, chid, handle);
+	do {
+		if (!nv_ro32(ramht, co + 4)) {
+			nv_wo32(ramht, co + 0, handle);
+			nv_wo32(ramht, co + 4, context);
+			if (bar)
+				bar->flush(bar);
+			return co;
+		}
+
+		co += 8;
+		if (co >= nv_gpuobj(ramht)->size)
+			co = 0;
+	} while (co != ho);
+
+	return -ENOMEM;
+}
+
+void
+nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
+{
+	struct nouveau_bar *bar = nouveau_bar(ramht);
+	nv_wo32(ramht, cookie + 0, 0x00000000);
+	nv_wo32(ramht, cookie + 4, 0x00000000);
+	if (bar)
+		bar->flush(bar);
+}
+
+static struct nouveau_oclass
+nouveau_ramht_oclass = {
+	.handle = 0x0000abcd,
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = NULL,
+		.dtor = _nouveau_gpuobj_dtor,
+		.init = _nouveau_gpuobj_init,
+		.fini = _nouveau_gpuobj_fini,
+		.rd32 = _nouveau_gpuobj_rd32,
+		.wr32 = _nouveau_gpuobj_wr32,
+	},
+};
+
+int
+nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+		  u32 size, u32 align, struct nouveau_ramht **pramht)
+{
+	struct nouveau_ramht *ramht;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, parent->engine ?
+				    parent->engine : parent, /* <nv50 ramht */
+				    &nouveau_ramht_oclass, 0, pargpu, size,
+				    align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+	*pramht = ramht;
+	if (ret)
+		return ret;
+
+	ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/core/subdev.c b/linux-imx/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 0000000..48f0637
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/option.h>
+
+void
+nouveau_subdev_reset(struct nouveau_object *subdev)
+{
+	nv_trace(subdev, "resetting...\n");
+	nv_ofuncs(subdev)->fini(subdev, false);
+	nv_debug(subdev, "reset\n");
+}
+
+int
+nouveau_subdev_init(struct nouveau_subdev *subdev)
+{
+	int ret = nouveau_object_init(&subdev->base);
+	if (ret)
+		return ret;
+
+	nouveau_subdev_reset(&subdev->base);
+	return 0;
+}
+
+int
+_nouveau_subdev_init(struct nouveau_object *object)
+{
+	return nouveau_subdev_init(nv_subdev(object));
+}
+
+int
+nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
+{
+	if (subdev->unit) {
+		nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
+		nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
+	}
+
+	return nouveau_object_fini(&subdev->base, suspend);
+}
+
+int
+_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
+{
+	return nouveau_subdev_fini(nv_subdev(object), suspend);
+}
+
+void
+nouveau_subdev_destroy(struct nouveau_subdev *subdev)
+{
+	int subidx = nv_hclass(subdev) & 0xff;
+	nv_device(subdev)->subdev[subidx] = NULL;
+	nouveau_object_destroy(&subdev->base);
+}
+
+void
+_nouveau_subdev_dtor(struct nouveau_object *object)
+{
+	nouveau_subdev_destroy(nv_subdev(object));
+}
+
+int
+nouveau_subdev_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pclass,
+		       const char *subname, const char *sysname,
+		       int size, void **pobject)
+{
+	struct nouveau_subdev *subdev;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, pclass |
+				     NV_SUBDEV_CLASS, size, pobject);
+	subdev = *pobject;
+	if (ret)
+		return ret;
+
+	__mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
+	subdev->name = subname;
+
+	if (parent) {
+		struct nouveau_device *device = nv_device(parent);
+		int subidx = nv_hclass(subdev) & 0xff;
+
+		subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
+		subdev->mmio  = nv_subdev(device)->mmio;
+		device->subdev[subidx] = *pobject;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 0000000..1d9f614
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/engctx.h>
+#include <core/class.h>
+
+#include <engine/bsp.h>
+
+struct nv84_bsp_priv {
+	struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * BSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * BSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv84_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PBSP", "bsp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x04008000;
+	nv_engine(priv)->cclass = &nv84_bsp_cclass;
+	nv_engine(priv)->sclass = nv84_bsp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_bsp_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 0000000..0a5aa6b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+	{ 0x90b1, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+	struct nvc0_bsp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x084010, 0x0000fff2);
+	nv_wr32(priv, 0x08401c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+				    "PBSP", "bsp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00008000;
+	nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+	nv_engine(priv)->sclass = nvc0_bsp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_bsp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_bsp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 0000000..d4f23bb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+	{ 0x95b1, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+	.handle = NV_ENGCTX(BSP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+	struct nve0_bsp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x084010, 0x0000fff2);
+	nv_wr32(priv, 0x08401c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nve0_bsp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+				    "PBSP", "bsp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00008000;
+	nv_engine(priv)->cclass = &nve0_bsp_cclass;
+	nv_engine(priv)->sclass = nve0_bsp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+	.handle = NV_ENGINE(BSP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_bsp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nve0_bsp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
new file mode 100644
index 0000000..219850d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
@@ -0,0 +1,872 @@
+/* fuc microcode for copy engine on nva3- chipsets
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build for nva3:nvc0
+ *    m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
+ *
+ * To build for nvc0-
+ *    m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
+ */
+
+ifdef(`NVA3',
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
+)
+
+ctx_object:                   .b32 0
+ifdef(`NVA3',
+ctx_dma:
+ctx_dma_query:                .b32 0
+ctx_dma_src:                  .b32 0
+ctx_dma_dst:                  .b32 0
+,)
+.equ #ctx_dma_count 3
+ctx_query_address_high:       .b32 0
+ctx_query_address_low:        .b32 0
+ctx_query_counter:            .b32 0
+ctx_src_address_high:         .b32 0
+ctx_src_address_low:          .b32 0
+ctx_src_pitch:                .b32 0
+ctx_src_tile_mode:            .b32 0
+ctx_src_xsize:                .b32 0
+ctx_src_ysize:                .b32 0
+ctx_src_zsize:                .b32 0
+ctx_src_zoff:                 .b32 0
+ctx_src_xoff:                 .b32 0
+ctx_src_yoff:                 .b32 0
+ctx_src_cpp:                  .b32 0
+ctx_dst_address_high:         .b32 0
+ctx_dst_address_low:          .b32 0
+ctx_dst_pitch:                .b32 0
+ctx_dst_tile_mode:            .b32 0
+ctx_dst_xsize:                .b32 0
+ctx_dst_ysize:                .b32 0
+ctx_dst_zsize:                .b32 0
+ctx_dst_zoff:                 .b32 0
+ctx_dst_xoff:                 .b32 0
+ctx_dst_yoff:                 .b32 0
+ctx_dst_cpp:                  .b32 0
+ctx_format:                   .b32 0
+ctx_swz_const0:               .b32 0
+ctx_swz_const1:               .b32 0
+ctx_xcnt:                     .b32 0
+ctx_ycnt:                     .b32 0
+.align 256
+
+dispatch_table:
+// mthd 0x0000, NAME
+.b16 0x000 1
+.b32 #ctx_object                     ~0xffffffff
+// mthd 0x0100, NOP
+.b16 0x040 1
+.b32 0x00010000 + #cmd_nop           ~0xffffffff
+// mthd 0x0140, PM_TRIGGER
+.b16 0x050 1
+.b32 0x00010000 + #cmd_pm_trigger    ~0xffffffff
+ifdef(`NVA3', `
+// mthd 0x0180-0x018c, DMA_
+.b16 0x060 #ctx_dma_count
+dispatch_dma:
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+.b32 0x00010000 + #cmd_dma           ~0xffffffff
+',)
+// mthd 0x0200-0x0218, SRC_TILE
+.b16 0x80 7
+.b32 #ctx_src_tile_mode              ~0x00000fff
+.b32 #ctx_src_xsize                  ~0x0007ffff
+.b32 #ctx_src_ysize                  ~0x00001fff
+.b32 #ctx_src_zsize                  ~0x000007ff
+.b32 #ctx_src_zoff                   ~0x00000fff
+.b32 #ctx_src_xoff                   ~0x0007ffff
+.b32 #ctx_src_yoff                   ~0x00001fff
+// mthd 0x0220-0x0238, DST_TILE
+.b16 0x88 7
+.b32 #ctx_dst_tile_mode              ~0x00000fff
+.b32 #ctx_dst_xsize                  ~0x0007ffff
+.b32 #ctx_dst_ysize                  ~0x00001fff
+.b32 #ctx_dst_zsize                  ~0x000007ff
+.b32 #ctx_dst_zoff                   ~0x00000fff
+.b32 #ctx_dst_xoff                   ~0x0007ffff
+.b32 #ctx_dst_yoff                   ~0x00001fff
+// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
+.b16 0xc0 2
+.b32 0x00010000 + #cmd_exec          ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
+// mthd 0x030c-0x0340, various stuff
+.b16 0xc3 14
+.b32 #ctx_src_address_high           ~0x000000ff
+.b32 #ctx_src_address_low            ~0xffffffff
+.b32 #ctx_dst_address_high           ~0x000000ff
+.b32 #ctx_dst_address_low            ~0xffffffff
+.b32 #ctx_src_pitch                  ~0x0007ffff
+.b32 #ctx_dst_pitch                  ~0x0007ffff
+.b32 #ctx_xcnt                       ~0x0000ffff
+.b32 #ctx_ycnt                       ~0x00001fff
+.b32 #ctx_format                     ~0x0333ffff
+.b32 #ctx_swz_const0                 ~0xffffffff
+.b32 #ctx_swz_const1                 ~0xffffffff
+.b32 #ctx_query_address_high         ~0x000000ff
+.b32 #ctx_query_address_low          ~0xffffffff
+.b32 #ctx_query_counter              ~0xffffffff
+.b16 0x800 0
+
+ifdef(`NVA3',
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
+)
+
+main:
+   clear b32 $r0
+   mov $sp $r0
+
+   // setup i0 handler and route fifo and ctxswitch to it
+   mov $r1 #ih
+   mov $iv0 $r1
+   mov $r1 0x400
+   movw $r2 0xfff3
+   sethi $r2 0
+   iowr I[$r1 + 0x300] $r2
+
+   // enable interrupts
+   or $r2 0xc
+   iowr I[$r1] $r2
+   bset $flags ie0
+
+   // enable fifo access and context switching
+   mov $r1 0x1200
+   mov $r2 3
+   iowr I[$r1] $r2
+
+   // sleep forever, waking for interrupts
+   bset $flags $p0
+   spin:
+      sleep $p0
+      bra #spin
+
+// i0 handler
+ih:
+   iord $r1 I[$r0 + 0x200]
+
+   and $r2 $r1 0x00000008
+   bra e #ih_no_chsw
+      call #chsw
+   ih_no_chsw:
+   and $r2 $r1 0x00000004
+   bra e #ih_no_cmd
+      call #dispatch
+
+   ih_no_cmd:
+   and $r1 $r1 0x0000000c
+   iowr I[$r0 + 0x100] $r1
+   iret
+
+// $p1 direction (0 = unload, 1 = load)
+// $r3 channel
+swctx:
+   mov $r4 0x7700
+   mov $xtargets $r4
+ifdef(`NVA3', `
+   // target 7 hardcoded to ctx dma object
+   mov $xdbase $r0
+', ` // NVC0
+   // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
+   mov $r4 0x2100
+   iord $r4 I[$r4 + 0]
+   and $r4 1
+   shl b32 $r4 4
+   add b32 $r4 0x30
+
+   // channel is in vram
+   mov $r15 0x61c
+   shl b32 $r15 6
+   mov $r5 0x114
+   iowrs I[$r15] $r5
+
+   // read 16-byte PCOPYn info, containing context pointer, from channel
+   shl b32 $r5 $r3 4
+   add b32 $r5 2
+   mov $xdbase $r5
+   mov $r5 $sp
+   // get a chunk of stack space, aligned to 256 byte boundary
+   sub b32 $r5 0x100
+   mov $r6 0xff
+   not b32 $r6
+   and $r5 $r6
+   sethi $r5 0x00020000
+   xdld $r4 $r5
+   xdwait
+   sethi $r5 0
+
+   // set context pointer, from within channel VM
+   mov $r14 0
+   iowrs I[$r15] $r14
+   ld b32 $r4 D[$r5 + 0]
+   shr b32 $r4 8
+   ld b32 $r6 D[$r5 + 4]
+   shl b32 $r6 24
+   or $r4 $r6
+   mov $xdbase $r4
+')
+   // 256-byte context, at start of data segment
+   mov b32 $r4 $r0
+   sethi $r4 0x60000
+
+   // swap!
+   bra $p1 #swctx_load
+      xdst $r0 $r4
+      bra #swctx_done
+   swctx_load:
+      xdld $r0 $r4
+   swctx_done:
+   xdwait
+   ret
+
+chsw:
+   // read current channel
+   mov $r2 0x1400
+   iord $r3 I[$r2]
+
+   // if it's active, unload it and return
+   xbit $r15 $r3 0x1e
+   bra e #chsw_no_unload
+      bclr $flags $p1
+      call #swctx
+      bclr $r3 0x1e
+      iowr I[$r2] $r3
+      mov $r4 1
+      iowr I[$r2 + 0x200] $r4
+      ret
+
+   // read next channel
+   chsw_no_unload:
+   iord $r3 I[$r2 + 0x100]
+
+   // is there a channel waiting to be loaded?
+   xbit $r13 $r3 0x1e
+   bra e #chsw_finish_load
+      bset $flags $p1
+      call #swctx
+ifdef(`NVA3',
+      // load dma objects back into TARGET regs
+      mov $r5 #ctx_dma
+      mov $r6 #ctx_dma_count
+      chsw_load_ctx_dma:
+         ld b32 $r7 D[$r5 + $r6 * 4]
+         add b32 $r8 $r6 0x180
+         shl b32 $r8 8
+         iowr I[$r8] $r7
+         sub b32 $r6 1
+         bra nc #chsw_load_ctx_dma
+,)
+
+   chsw_finish_load:
+   mov $r3 2
+   iowr I[$r2 + 0x200] $r3
+   ret
+
+dispatch:
+   // read incoming fifo command
+   mov $r3 0x1900
+   iord $r2 I[$r3 + 0x100]
+   iord $r3 I[$r3 + 0x000]
+   and $r4 $r2 0x7ff
+   // $r2 will be used to store exception data
+   shl b32 $r2 0x10
+
+   // lookup method in the dispatch table, ILLEGAL_MTHD if not found
+   mov $r5 #dispatch_table
+   clear b32 $r6
+   clear b32 $r7
+   dispatch_loop:
+      ld b16 $r6 D[$r5 + 0]
+      ld b16 $r7 D[$r5 + 2]
+      add b32 $r5 4
+      cmpu b32 $r4 $r6
+      bra c #dispatch_illegal_mthd
+      add b32 $r7 $r6
+      cmpu b32 $r4 $r7
+      bra c #dispatch_valid_mthd
+      sub b32 $r7 $r6
+      shl b32 $r7 3
+      add b32 $r5 $r7
+      bra #dispatch_loop
+
+   // ensure no bits set in reserved fields, INVALID_BITFIELD
+   dispatch_valid_mthd:
+   sub b32 $r4 $r6
+   shl b32 $r4 3
+   add b32 $r4 $r5
+   ld b32 $r5 D[$r4 + 4]
+   and $r5 $r3
+   cmpu b32 $r5 0
+   bra ne #dispatch_invalid_bitfield
+
+   // depending on dispatch flags: execute method, or save data as state
+   ld b16 $r5 D[$r4 + 0]
+   ld b16 $r6 D[$r4 + 2]
+   cmpu b32 $r6 0
+   bra ne #dispatch_cmd
+      st b32 D[$r5] $r3
+      bra #dispatch_done
+   dispatch_cmd:
+      bclr $flags $p1
+      call $r5
+      bra $p1 #dispatch_error
+      bra #dispatch_done
+
+   dispatch_invalid_bitfield:
+   or $r2 2
+   dispatch_illegal_mthd:
+   or $r2 1
+
+   // store exception data in SCRATCH0/SCRATCH1, signal hostirq
+   dispatch_error:
+   mov $r4 0x1000
+   iowr I[$r4 + 0x000] $r2
+   iowr I[$r4 + 0x100] $r3
+   mov $r2 0x40
+   iowr I[$r0] $r2
+   hostirq_wait:
+      iord $r2 I[$r0 + 0x200]
+      and $r2 0x40
+      cmpu b32 $r2 0
+      bra ne #hostirq_wait
+
+   dispatch_done:
+   mov $r2 0x1d00
+   mov $r3 1
+   iowr I[$r2] $r3
+   ret
+
+// No-operation
+//
+// Inputs:
+//    $r1: irqh state
+//    $r2: hostirq state
+//    $r3: data
+//    $r4: dispatch table entry
+// Outputs:
+//    $r1: irqh state
+//    $p1: set on error
+//       $r2: hostirq state
+//       $r3: data
+cmd_nop:
+   ret
+
+// PM_TRIGGER
+//
+// Inputs:
+//    $r1: irqh state
+//    $r2: hostirq state
+//    $r3: data
+//    $r4: dispatch table entry
+// Outputs:
+//    $r1: irqh state
+//    $p1: set on error
+//       $r2: hostirq state
+//       $r3: data
+cmd_pm_trigger:
+   mov $r2 0x2200
+   clear b32 $r3
+   sethi $r3 0x20000
+   iowr I[$r2] $r3
+   ret
+
+ifdef(`NVA3',
+// SET_DMA_* method handler
+//
+// Inputs:
+//    $r1: irqh state
+//    $r2: hostirq state
+//    $r3: data
+//    $r4: dispatch table entry
+// Outputs:
+//    $r1: irqh state
+//    $p1: set on error
+//       $r2: hostirq state
+//       $r3: data
+cmd_dma:
+   sub b32 $r4 #dispatch_dma
+   shr b32 $r4 1
+   bset $r3 0x1e
+   st b32 D[$r4 + #ctx_dma] $r3
+   add b32 $r4 0x600
+   shl b32 $r4 6
+   iowr I[$r4] $r3
+   ret
+,)
+
+// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
+//
+cmd_exec_set_format:
+   // zero out a chunk of the stack to store the swizzle into
+   add $sp -0x10
+   st b32 D[$sp + 0x00] $r0
+   st b32 D[$sp + 0x04] $r0
+   st b32 D[$sp + 0x08] $r0
+   st b32 D[$sp + 0x0c] $r0
+
+   // extract cpp, src_ncomp and dst_ncomp from FORMAT
+   ld b32 $r4 D[$r0 + #ctx_format]
+   extr $r5 $r4 16:17
+   add b32 $r5 1
+   extr $r6 $r4 20:21
+   add b32 $r6 1
+   extr $r7 $r4 24:25
+   add b32 $r7 1
+
+   // convert FORMAT swizzle mask to hw swizzle mask
+   bclr $flags $p2
+   clear b32 $r8
+   clear b32 $r9
+   ncomp_loop:
+      and $r10 $r4 0xf
+      shr b32 $r4 4
+      clear b32 $r11
+      bpc_loop:
+         cmpu b8 $r10 4
+         bra nc #cmp_c0
+            mulu $r12 $r10 $r5
+            add b32 $r12 $r11
+            bset $flags $p2
+            bra #bpc_next
+         cmp_c0:
+         bra ne #cmp_c1
+            mov $r12 0x10
+            add b32 $r12 $r11
+            bra #bpc_next
+         cmp_c1:
+         cmpu b8 $r10 6
+         bra nc #cmp_zero
+            mov $r12 0x14
+            add b32 $r12 $r11
+            bra #bpc_next
+         cmp_zero:
+            mov $r12 0x80
+         bpc_next:
+         st b8 D[$sp + $r8] $r12
+         add b32 $r8 1
+         add b32 $r11 1
+         cmpu b32 $r11 $r5
+         bra c #bpc_loop
+      add b32 $r9 1
+      cmpu b32 $r9 $r7
+      bra c #ncomp_loop
+
+   // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
+   mulu $r6 $r5
+   st b32 D[$r0 + #ctx_src_cpp] $r6
+   ld b32 $r8 D[$r0 + #ctx_xcnt]
+   mulu $r6 $r8
+   bra $p2 #dst_xcnt
+   clear b32 $r6
+
+   dst_xcnt:
+   mulu $r7 $r5
+   st b32 D[$r0 + #ctx_dst_cpp] $r7
+   mulu $r7 $r8
+
+   mov $r5 0x810
+   shl b32 $r5 6
+   iowr I[$r5 + 0x000] $r6
+   iowr I[$r5 + 0x100] $r7
+   add b32 $r5 0x800
+   ld b32 $r6 D[$r0 + #ctx_dst_cpp]
+   sub b32 $r6 1
+   shl b32 $r6 8
+   ld b32 $r7 D[$r0 + #ctx_src_cpp]
+   sub b32 $r7 1
+   or $r6 $r7
+   iowr I[$r5 + 0x000] $r6
+   add b32 $r5 0x100
+   ld b32 $r6 D[$sp + 0x00]
+   iowr I[$r5 + 0x000] $r6
+   ld b32 $r6 D[$sp + 0x04]
+   iowr I[$r5 + 0x100] $r6
+   ld b32 $r6 D[$sp + 0x08]
+   iowr I[$r5 + 0x200] $r6
+   ld b32 $r6 D[$sp + 0x0c]
+   iowr I[$r5 + 0x300] $r6
+   add b32 $r5 0x400
+   ld b32 $r6 D[$r0 + #ctx_swz_const0]
+   iowr I[$r5 + 0x000] $r6
+   ld b32 $r6 D[$r0 + #ctx_swz_const1]
+   iowr I[$r5 + 0x100] $r6
+   add $sp 0x10
+   ret
+
+// Setup to handle a tiled surface
+//
+// Calculates a number of parameters the hardware requires in order
+// to correctly handle tiling.
+//
+// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
+//    nTx = round_up(w * cpp, 1 << Tp) >> Tp
+//    nTy = round_up(h, 1 << Th) >> Th
+//    Txo = (x * cpp) & ((1 << Tp) - 1)
+//     Tx = (x * cpp) >> Tp
+//    Tyo = y & ((1 << Th) - 1)
+//     Ty = y >> Th
+//    Tzo = z & ((1 << Td) - 1)
+//     Tz = z >> Td
+//
+//    off  = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
+//    off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
+//
+// Inputs:
+//    $r4: hw command (0x104800)
+//    $r5: ctx offset adjustment for src/dst selection
+//    $p2: set if dst surface
+//
+cmd_exec_set_surface_tiled:
+   // translate TILE_MODE into Tp, Th, Td shift values
+   ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
+   extr $r9 $r7 8:11
+   extr $r8 $r7 4:7
+ifdef(`NVA3',
+   add b32 $r8 2
+,
+   add b32 $r8 3
+)
+   extr $r7 $r7 0:3
+   cmp b32 $r7 0xe
+   bra ne #xtile64
+   mov $r7 4
+   bra #xtileok
+   xtile64:
+   xbit $r7 $flags $p2
+   add b32 $r7 17
+   bset $r4 $r7
+   mov $r7 6
+   xtileok:
+
+   // Op = (x * cpp) & ((1 << Tp) - 1)
+   // Tx = (x * cpp) >> Tp
+   ld b32 $r10 D[$r5 + #ctx_src_xoff]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
+   mulu $r10 $r11
+   mov $r11 1
+   shl b32 $r11 $r7
+   sub b32 $r11 1
+   and $r12 $r10 $r11
+   shr b32 $r10 $r7
+
+   // Tyo = y & ((1 << Th) - 1)
+   // Ty  = y >> Th
+   ld b32 $r13 D[$r5 + #ctx_src_yoff]
+   mov $r14 1
+   shl b32 $r14 $r8
+   sub b32 $r14 1
+   and $r11 $r13 $r14
+   shr b32 $r13 $r8
+
+   // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
+   add b32 $r14 1
+   shl b32 $r15 $r14 12
+   sub b32 $r14 $r11
+   or $r15 $r14
+   xbit $r6 $flags $p2
+   add b32 $r6 0x208
+   shl b32 $r6 8
+   iowr I[$r6 + 0x000] $r15
+
+   // Op += Tyo << Tp
+   shl b32 $r11 $r7
+   add b32 $r12 $r11
+
+   // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
+   ld b32 $r15 D[$r5 + #ctx_src_xsize]
+   ld b32 $r11 D[$r5 + #ctx_src_cpp]
+   mulu $r15 $r11
+   mov $r11 1
+   shl b32 $r11 $r7
+   sub b32 $r11 1
+   add b32 $r15 $r11
+   shr b32 $r15 $r7
+   push $r15
+
+   // nTy = (h + ((1 << Th) - 1)) >> Th
+   ld b32 $r15 D[$r5 + #ctx_src_ysize]
+   mov $r11 1
+   shl b32 $r11 $r8
+   sub b32 $r11 1
+   add b32 $r15 $r11
+   shr b32 $r15 $r8
+   push $r15
+
+   // Tys = Tp + Th
+   // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
+   add b32 $r7 $r8
+   sub b32 $r8 2
+   mov $r11 1
+   shl b32 $r11 $r8
+   shl b32 $r11 $r9
+
+   // Tzo = z & ((1 << Td) - 1)
+   // Tz  = z >> Td
+   // Op += Tzo << Tys
+   // Ts  = Tys + Td
+   ld b32 $r8 D[$r5 + #ctx_src_zoff]
+   mov $r14 1
+   shl b32 $r14 $r9
+   sub b32 $r14 1
+   and $r15 $r8 $r14
+   shl b32 $r15 $r7
+   add b32 $r12 $r15
+   add b32 $r7 $r9
+   shr b32 $r8 $r9
+
+   // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
+   pop $r15
+   pop $r9
+   mulu $r13 $r9
+   add b32 $r10 $r13
+   mulu $r8 $r9
+   mulu $r8 $r15
+   add b32 $r10 $r8
+   shl b32 $r10 $r7
+
+   // PITCH = (nTx - 1) << Ts
+   sub b32 $r9 1
+   shl b32 $r9 $r7
+   iowr I[$r6 + 0x200] $r9
+
+   // SRC_ADDRESS_LOW   = (Ot + Op) & 0xffffffff
+   // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
+   ld b32 $r8 D[$r5 + #ctx_src_address_high]
+   add b32 $r10 $r12
+   add b32 $r7 $r10
+   adc b32 $r8 0
+   shl b32 $r8 16
+   or $r8 $r11
+   sub b32 $r6 0x600
+   iowr I[$r6 + 0x000] $r7
+   add b32 $r6 0x400
+   iowr I[$r6 + 0x000] $r8
+   ret
+
+// Setup to handle a linear surface
+//
+// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
+//
+cmd_exec_set_surface_linear:
+   xbit $r6 $flags $p2
+   add b32 $r6 0x202
+   shl b32 $r6 8
+   ld b32 $r7 D[$r5 + #ctx_src_address_low]
+   iowr I[$r6 + 0x000] $r7
+   add b32 $r6 0x400
+   ld b32 $r7 D[$r5 + #ctx_src_address_high]
+   shl b32 $r7 16
+   iowr I[$r6 + 0x000] $r7
+   add b32 $r6 0x400
+   ld b32 $r7 D[$r5 + #ctx_src_pitch]
+   iowr I[$r6 + 0x000] $r7
+   ret
+
+// wait for regs to be available for use
+cmd_exec_wait:
+   push $r0
+   push $r1
+   mov $r0 0x800
+   shl b32 $r0 6
+   loop:
+      iord $r1 I[$r0]
+      and $r1 1
+      bra ne #loop
+   pop $r1
+   pop $r0
+   ret
+
+cmd_exec_query:
+   // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
+   xbit $r4 $r3 13
+   bra ne #query_counter
+      call #cmd_exec_wait
+      mov $r4 0x80c
+      shl b32 $r4 6
+      ld b32 $r5 D[$r0 + #ctx_query_address_low]
+      add b32 $r5 4
+      iowr I[$r4 + 0x000] $r5
+      iowr I[$r4 + 0x100] $r0
+      mov $r5 0xc
+      iowr I[$r4 + 0x200] $r5
+      add b32 $r4 0x400
+      ld b32 $r5 D[$r0 + #ctx_query_address_high]
+      shl b32 $r5 16
+      iowr I[$r4 + 0x000] $r5
+      add b32 $r4 0x500
+      mov $r5 0x00000b00
+      sethi $r5 0x00010000
+      iowr I[$r4 + 0x000] $r5
+      mov $r5 0x00004040
+      shl b32 $r5 1
+      sethi $r5 0x80800000
+      iowr I[$r4 + 0x100] $r5
+      mov $r5 0x00001110
+      sethi $r5 0x13120000
+      iowr I[$r4 + 0x200] $r5
+      mov $r5 0x00001514
+      sethi $r5 0x17160000
+      iowr I[$r4 + 0x300] $r5
+      mov $r5 0x00002601
+      sethi $r5 0x00010000
+      mov $r4 0x800
+      shl b32 $r4 6
+      iowr I[$r4 + 0x000] $r5
+
+   // write COUNTER
+   query_counter:
+   call #cmd_exec_wait
+   mov $r4 0x80c
+   shl b32 $r4 6
+   ld b32 $r5 D[$r0 + #ctx_query_address_low]
+   iowr I[$r4 + 0x000] $r5
+   iowr I[$r4 + 0x100] $r0
+   mov $r5 0x4
+   iowr I[$r4 + 0x200] $r5
+   add b32 $r4 0x400
+   ld b32 $r5 D[$r0 + #ctx_query_address_high]
+   shl b32 $r5 16
+   iowr I[$r4 + 0x000] $r5
+   add b32 $r4 0x500
+   mov $r5 0x00000300
+   iowr I[$r4 + 0x000] $r5
+   mov $r5 0x00001110
+   sethi $r5 0x13120000
+   iowr I[$r4 + 0x100] $r5
+   ld b32 $r5 D[$r0 + #ctx_query_counter]
+   add b32 $r4 0x500
+   iowr I[$r4 + 0x000] $r5
+   mov $r5 0x00002601
+   sethi $r5 0x00010000
+   mov $r4 0x800
+   shl b32 $r4 6
+   iowr I[$r4 + 0x000] $r5
+   ret
+
+// Execute a copy operation
+//
+// Inputs:
+//    $r1: irqh state
+//    $r2: hostirq state
+//    $r3: data
+//       000002000 QUERY_SHORT
+//       000001000 QUERY
+//       000000100 DST_LINEAR
+//       000000010 SRC_LINEAR
+//       000000001 FORMAT
+//    $r4: dispatch table entry
+// Outputs:
+//    $r1: irqh state
+//    $p1: set on error
+//       $r2: hostirq state
+//       $r3: data
+cmd_exec:
+   call #cmd_exec_wait
+
+   // if format requested, call function to calculate it, otherwise
+   // fill in cpp/xcnt for both surfaces as if (cpp == 1)
+   xbit $r15 $r3 0
+   bra e #cmd_exec_no_format
+      call #cmd_exec_set_format
+      mov $r4 0x200
+      bra #cmd_exec_init_src_surface
+   cmd_exec_no_format:
+      mov $r6 0x810
+      shl b32 $r6 6
+      mov $r7 1
+      st b32 D[$r0 + #ctx_src_cpp] $r7
+      st b32 D[$r0 + #ctx_dst_cpp] $r7
+      ld b32 $r7 D[$r0 + #ctx_xcnt]
+      iowr I[$r6 + 0x000] $r7
+      iowr I[$r6 + 0x100] $r7
+      clear b32 $r4
+
+   cmd_exec_init_src_surface:
+   bclr $flags $p2
+   clear b32 $r5
+   xbit $r15 $r3 4
+   bra e #src_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_init_dst_surface
+   src_tiled:
+      call #cmd_exec_set_surface_tiled
+      bset $r4 7
+
+   cmd_exec_init_dst_surface:
+   bset $flags $p2
+   mov $r5 #ctx_dst_address_high - #ctx_src_address_high
+   xbit $r15 $r3 8
+   bra e #dst_tiled
+      call #cmd_exec_set_surface_linear
+      bra #cmd_exec_kick
+   dst_tiled:
+      call #cmd_exec_set_surface_tiled
+      bset $r4 8
+
+   cmd_exec_kick:
+   mov $r5 0x800
+   shl b32 $r5 6
+   ld b32 $r6 D[$r0 + #ctx_ycnt]
+   iowr I[$r5 + 0x100] $r6
+   mov $r6 0x0041
+   // SRC_TARGET = 1, DST_TARGET = 2
+   sethi $r6 0x44000000
+   or $r4 $r6
+   iowr I[$r5] $r4
+
+   // if requested, queue up a QUERY write after the copy has completed
+   xbit $r15 $r3 12
+   bra e #cmd_exec_done
+      call #cmd_exec_query
+
+   cmd_exec_done:
+   ret
+
+// Flush write cache
+//
+// Inputs:
+//    $r1: irqh state
+//    $r2: hostirq state
+//    $r3: data
+//    $r4: dispatch table entry
+// Outputs:
+//    $r1: irqh state
+//    $p1: set on error
+//       $r2: hostirq state
+//       $r3: data
+cmd_wrcache_flush:
+   mov $r2 0x2200
+   clear b32 $r3
+   sethi $r3 0x10000
+   iowr I[$r2] $r3
+   ret
+
+.align 0x100
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
new file mode 100644
index 0000000..c92520f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -0,0 +1,620 @@
+static u32 nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
+	0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
+	0x00000000,
+/* 0x0008: ctx_dma_src */
+	0x00000000,
+/* 0x000c: ctx_dma_dst */
+	0x00000000,
+/* 0x0010: ctx_query_address_high */
+	0x00000000,
+/* 0x0014: ctx_query_address_low */
+	0x00000000,
+/* 0x0018: ctx_query_counter */
+	0x00000000,
+/* 0x001c: ctx_src_address_high */
+	0x00000000,
+/* 0x0020: ctx_src_address_low */
+	0x00000000,
+/* 0x0024: ctx_src_pitch */
+	0x00000000,
+/* 0x0028: ctx_src_tile_mode */
+	0x00000000,
+/* 0x002c: ctx_src_xsize */
+	0x00000000,
+/* 0x0030: ctx_src_ysize */
+	0x00000000,
+/* 0x0034: ctx_src_zsize */
+	0x00000000,
+/* 0x0038: ctx_src_zoff */
+	0x00000000,
+/* 0x003c: ctx_src_xoff */
+	0x00000000,
+/* 0x0040: ctx_src_yoff */
+	0x00000000,
+/* 0x0044: ctx_src_cpp */
+	0x00000000,
+/* 0x0048: ctx_dst_address_high */
+	0x00000000,
+/* 0x004c: ctx_dst_address_low */
+	0x00000000,
+/* 0x0050: ctx_dst_pitch */
+	0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
+	0x00000000,
+/* 0x0058: ctx_dst_xsize */
+	0x00000000,
+/* 0x005c: ctx_dst_ysize */
+	0x00000000,
+/* 0x0060: ctx_dst_zsize */
+	0x00000000,
+/* 0x0064: ctx_dst_zoff */
+	0x00000000,
+/* 0x0068: ctx_dst_xoff */
+	0x00000000,
+/* 0x006c: ctx_dst_yoff */
+	0x00000000,
+/* 0x0070: ctx_dst_cpp */
+	0x00000000,
+/* 0x0074: ctx_format */
+	0x00000000,
+/* 0x0078: ctx_swz_const0 */
+	0x00000000,
+/* 0x007c: ctx_swz_const1 */
+	0x00000000,
+/* 0x0080: ctx_xcnt */
+	0x00000000,
+/* 0x0084: ctx_ycnt */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0100: dispatch_table */
+	0x00010000,
+	0x00000000,
+	0x00000000,
+	0x00010040,
+	0x00010160,
+	0x00000000,
+	0x00010050,
+	0x00010162,
+	0x00000000,
+	0x00030060,
+/* 0x0128: dispatch_dma */
+	0x00010170,
+	0x00000000,
+	0x00010170,
+	0x00000000,
+	0x00010170,
+	0x00000000,
+	0x00070080,
+	0x00000028,
+	0xfffff000,
+	0x0000002c,
+	0xfff80000,
+	0x00000030,
+	0xffffe000,
+	0x00000034,
+	0xfffff800,
+	0x00000038,
+	0xfffff000,
+	0x0000003c,
+	0xfff80000,
+	0x00000040,
+	0xffffe000,
+	0x00070088,
+	0x00000054,
+	0xfffff000,
+	0x00000058,
+	0xfff80000,
+	0x0000005c,
+	0xffffe000,
+	0x00000060,
+	0xfffff800,
+	0x00000064,
+	0xfffff000,
+	0x00000068,
+	0xfff80000,
+	0x0000006c,
+	0xffffe000,
+	0x000200c0,
+	0x00010492,
+	0x00000000,
+	0x0001051b,
+	0x00000000,
+	0x000e00c3,
+	0x0000001c,
+	0xffffff00,
+	0x00000020,
+	0x00000000,
+	0x00000048,
+	0xffffff00,
+	0x0000004c,
+	0x00000000,
+	0x00000024,
+	0xfff80000,
+	0x00000050,
+	0xfff80000,
+	0x00000080,
+	0xffff0000,
+	0x00000084,
+	0xffffe000,
+	0x00000074,
+	0xfccc0000,
+	0x00000078,
+	0x00000000,
+	0x0000007c,
+	0x00000000,
+	0x00000010,
+	0xffffff00,
+	0x00000014,
+	0x00000000,
+	0x00000018,
+	0x00000000,
+	0x00000800,
+};
+
+static u32 nva3_pcopy_code[] = {
+/* 0x0000: main */
+	0x04fe04bd,
+	0x3517f000,
+	0xf10010fe,
+	0xf1040017,
+	0xf0fff327,
+	0x12d00023,
+	0x0c25f0c0,
+	0xf40012d0,
+	0x17f11031,
+	0x27f01200,
+	0x0012d003,
+/* 0x002f: spin */
+	0xf40031f4,
+	0x0ef40028,
+/* 0x0035: ih */
+	0x8001cffd,
+	0xf40812c4,
+	0x21f4060b,
+/* 0x0041: ih_no_chsw */
+	0x0412c472,
+	0xf4060bf4,
+/* 0x004a: ih_no_cmd */
+	0x11c4c321,
+	0x4001d00c,
+/* 0x0052: swctx */
+	0x47f101f8,
+	0x4bfe7700,
+	0x0007fe00,
+	0xf00204b9,
+	0x01f40643,
+	0x0604fa09,
+/* 0x006b: swctx_load */
+	0xfa060ef4,
+/* 0x006e: swctx_done */
+	0x03f80504,
+/* 0x0072: chsw */
+	0x27f100f8,
+	0x23cf1400,
+	0x1e3fc800,
+	0xf4170bf4,
+	0x21f40132,
+	0x1e3af052,
+	0xf00023d0,
+	0x24d00147,
+/* 0x0093: chsw_no_unload */
+	0xcf00f880,
+	0x3dc84023,
+	0x220bf41e,
+	0xf40131f4,
+	0x57f05221,
+	0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
+	0xa07856bc,
+	0xb6018068,
+	0x87d00884,
+	0x0162b600,
+/* 0x00bb: chsw_finish_load */
+	0xf0f018f4,
+	0x23d00237,
+/* 0x00c3: dispatch */
+	0xf100f880,
+	0xcf190037,
+	0x33cf4032,
+	0xff24e400,
+	0x1024b607,
+	0x010057f1,
+	0x74bd64bd,
+/* 0x00dc: dispatch_loop */
+	0x58005658,
+	0x50b60157,
+	0x0446b804,
+	0xbb4d08f4,
+	0x47b80076,
+	0x0f08f404,
+	0xb60276bb,
+	0x57bb0374,
+	0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
+	0xb60246bb,
+	0x45bb0344,
+	0x01459800,
+	0xb00453fd,
+	0x1bf40054,
+	0x00455820,
+	0xb0014658,
+	0x1bf40064,
+	0x00538009,
+/* 0x0127: dispatch_cmd */
+	0xf4300ef4,
+	0x55f90132,
+	0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
+	0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
+	0x0125f002,
+/* 0x0138: dispatch_error */
+	0x100047f1,
+	0xd00042d0,
+	0x27f04043,
+	0x0002d040,
+/* 0x0148: hostirq_wait */
+	0xf08002cf,
+	0x24b04024,
+	0xf71bf400,
+/* 0x0154: dispatch_done */
+	0x1d0027f1,
+	0xd00137f0,
+	0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
+	0x27f100f8,
+	0x34bd2200,
+	0xd00233f0,
+	0x00f80023,
+/* 0x0170: cmd_dma */
+	0x012842b7,
+	0xf00145b6,
+	0x43801e39,
+	0x0040b701,
+	0x0644b606,
+	0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
+	0xf030f400,
+	0xb00001b0,
+	0x01b00101,
+	0x0301b002,
+	0xc71d0498,
+	0x50b63045,
+	0x3446c701,
+	0xc70160b6,
+	0x70b63847,
+	0x0232f401,
+	0x94bd84bd,
+/* 0x01b4: ncomp_loop */
+	0xb60f4ac4,
+	0xb4bd0445,
+/* 0x01bc: bpc_loop */
+	0xf404a430,
+	0xa5ff0f18,
+	0x00cbbbc0,
+	0xf40231f4,
+/* 0x01ce: cmp_c0 */
+	0x1bf4220e,
+	0x10c7f00c,
+	0xf400cbbb,
+/* 0x01da: cmp_c1 */
+	0xa430160e,
+	0x0c18f406,
+	0xbb14c7f0,
+	0x0ef400cb,
+/* 0x01e9: cmp_zero */
+	0x80c7f107,
+/* 0x01ed: bpc_next */
+	0x01c83800,
+	0xb60180b6,
+	0xb5b801b0,
+	0xc308f404,
+	0xb80190b6,
+	0x08f40497,
+	0x0065fdb2,
+	0x98110680,
+	0x68fd2008,
+	0x0502f400,
+/* 0x0216: dst_xcnt */
+	0x75fd64bd,
+	0x1c078000,
+	0xf10078fd,
+	0xb6081057,
+	0x56d00654,
+	0x4057d000,
+	0x080050b7,
+	0xb61c0698,
+	0x64b60162,
+	0x11079808,
+	0xfd0172b6,
+	0x56d00567,
+	0x0050b700,
+	0x0060b401,
+	0xb40056d0,
+	0x56d00160,
+	0x0260b440,
+	0xb48056d0,
+	0x56d00360,
+	0x0050b7c0,
+	0x1e069804,
+	0x980056d0,
+	0x56d01f06,
+	0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
+	0x579800f8,
+	0x6879c70a,
+	0xb66478c7,
+	0x77c70280,
+	0x0e76b060,
+	0xf0091bf4,
+	0x0ef40477,
+/* 0x0291: xtile64 */
+	0x027cf00f,
+	0xfd1170b6,
+	0x77f00947,
+/* 0x029d: xtileok */
+	0x0f5a9806,
+	0xfd115b98,
+	0xb7f000ab,
+	0x04b7bb01,
+	0xff01b2b6,
+	0xa7bbc4ab,
+	0x105d9805,
+	0xbb01e7f0,
+	0xe2b604e8,
+	0xb4deff01,
+	0xb605d8bb,
+	0xef9401e0,
+	0x02ebbb0c,
+	0xf005fefd,
+	0x60b7026c,
+	0x64b60208,
+	0x006fd008,
+	0xbb04b7bb,
+	0x5f9800cb,
+	0x115b980b,
+	0xf000fbfd,
+	0xb7bb01b7,
+	0x01b2b604,
+	0xbb00fbbb,
+	0xf0f905f7,
+	0xf00c5f98,
+	0xb8bb01b7,
+	0x01b2b604,
+	0xbb00fbbb,
+	0xf0f905f8,
+	0xb60078bb,
+	0xb7f00282,
+	0x04b8bb01,
+	0x9804b9bb,
+	0xe7f00e58,
+	0x04e9bb01,
+	0xff01e2b6,
+	0xf7bbf48e,
+	0x00cfbb04,
+	0xbb0079bb,
+	0xf0fc0589,
+	0xd9fd90fc,
+	0x00adbb00,
+	0xfd0089fd,
+	0xa8bb008f,
+	0x04a7bb00,
+	0xbb0192b6,
+	0x69d00497,
+	0x08579880,
+	0xbb075898,
+	0x7abb00ac,
+	0x0081b600,
+	0xfd1084b6,
+	0x62b7058b,
+	0x67d00600,
+	0x0060b700,
+	0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
+	0x6cf000f8,
+	0x0260b702,
+	0x0864b602,
+	0xd0085798,
+	0x60b70067,
+	0x57980400,
+	0x1074b607,
+	0xb70067d0,
+	0x98040060,
+	0x67d00957,
+/* 0x03ab: cmd_exec_wait */
+	0xf900f800,
+	0xf110f900,
+	0xb6080007,
+/* 0x03b6: loop */
+	0x01cf0604,
+	0x0114f000,
+	0xfcfa1bf4,
+	0xf800fc10,
+/* 0x03c5: cmd_exec_query */
+	0x0d34c800,
+	0xf5701bf4,
+	0xf103ab21,
+	0xb6080c47,
+	0x05980644,
+	0x0450b605,
+	0xd00045d0,
+	0x57f04040,
+	0x8045d00c,
+	0x040040b7,
+	0xb6040598,
+	0x45d01054,
+	0x0040b700,
+	0x0057f105,
+	0x0153f00b,
+	0xf10045d0,
+	0xb6404057,
+	0x53f10154,
+	0x45d08080,
+	0x1057f140,
+	0x1253f111,
+	0x8045d013,
+	0x151457f1,
+	0x171653f1,
+	0xf1c045d0,
+	0xf0260157,
+	0x47f10153,
+	0x44b60800,
+	0x0045d006,
+/* 0x0438: query_counter */
+	0x03ab21f5,
+	0x080c47f1,
+	0x980644b6,
+	0x45d00505,
+	0x4040d000,
+	0xd00457f0,
+	0x40b78045,
+	0x05980400,
+	0x1054b604,
+	0xb70045d0,
+	0xf1050040,
+	0xd0030057,
+	0x57f10045,
+	0x53f11110,
+	0x45d01312,
+	0x06059840,
+	0x050040b7,
+	0xf10045d0,
+	0xf0260157,
+	0x47f10153,
+	0x44b60800,
+	0x0045d006,
+/* 0x0492: cmd_exec */
+	0x21f500f8,
+	0x3fc803ab,
+	0x0e0bf400,
+	0x018921f5,
+	0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
+	0xf11e0ef4,
+	0xb6081067,
+	0x77f00664,
+	0x11078001,
+	0x981c0780,
+	0x67d02007,
+	0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
+	0x32f444bd,
+	0xc854bd02,
+	0x0bf4043f,
+	0x8221f50a,
+	0x0a0ef403,
+/* 0x04d4: src_tiled */
+	0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
+	0xf40749f0,
+	0x57f00231,
+	0x083fc82c,
+	0xf50a0bf4,
+	0xf4038221,
+/* 0x04ee: dst_tiled */
+	0x21f50a0e,
+	0x49f00276,
+/* 0x04f5: cmd_exec_kick */
+	0x0057f108,
+	0x0654b608,
+	0xd0210698,
+	0x67f04056,
+	0x0063f141,
+	0x0546fd44,
+	0xc80054d0,
+	0x0bf40c3f,
+	0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
+	0xf100f803,
+	0xbd220027,
+	0x0133f034,
+	0xf80023d0,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
new file mode 100644
index 0000000..0d98c6c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -0,0 +1,606 @@
+static u32 nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
+	0x00000000,
+/* 0x0004: ctx_query_address_high */
+	0x00000000,
+/* 0x0008: ctx_query_address_low */
+	0x00000000,
+/* 0x000c: ctx_query_counter */
+	0x00000000,
+/* 0x0010: ctx_src_address_high */
+	0x00000000,
+/* 0x0014: ctx_src_address_low */
+	0x00000000,
+/* 0x0018: ctx_src_pitch */
+	0x00000000,
+/* 0x001c: ctx_src_tile_mode */
+	0x00000000,
+/* 0x0020: ctx_src_xsize */
+	0x00000000,
+/* 0x0024: ctx_src_ysize */
+	0x00000000,
+/* 0x0028: ctx_src_zsize */
+	0x00000000,
+/* 0x002c: ctx_src_zoff */
+	0x00000000,
+/* 0x0030: ctx_src_xoff */
+	0x00000000,
+/* 0x0034: ctx_src_yoff */
+	0x00000000,
+/* 0x0038: ctx_src_cpp */
+	0x00000000,
+/* 0x003c: ctx_dst_address_high */
+	0x00000000,
+/* 0x0040: ctx_dst_address_low */
+	0x00000000,
+/* 0x0044: ctx_dst_pitch */
+	0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
+	0x00000000,
+/* 0x004c: ctx_dst_xsize */
+	0x00000000,
+/* 0x0050: ctx_dst_ysize */
+	0x00000000,
+/* 0x0054: ctx_dst_zsize */
+	0x00000000,
+/* 0x0058: ctx_dst_zoff */
+	0x00000000,
+/* 0x005c: ctx_dst_xoff */
+	0x00000000,
+/* 0x0060: ctx_dst_yoff */
+	0x00000000,
+/* 0x0064: ctx_dst_cpp */
+	0x00000000,
+/* 0x0068: ctx_format */
+	0x00000000,
+/* 0x006c: ctx_swz_const0 */
+	0x00000000,
+/* 0x0070: ctx_swz_const1 */
+	0x00000000,
+/* 0x0074: ctx_xcnt */
+	0x00000000,
+/* 0x0078: ctx_ycnt */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0100: dispatch_table */
+	0x00010000,
+	0x00000000,
+	0x00000000,
+	0x00010040,
+	0x0001019f,
+	0x00000000,
+	0x00010050,
+	0x000101a1,
+	0x00000000,
+	0x00070080,
+	0x0000001c,
+	0xfffff000,
+	0x00000020,
+	0xfff80000,
+	0x00000024,
+	0xffffe000,
+	0x00000028,
+	0xfffff800,
+	0x0000002c,
+	0xfffff000,
+	0x00000030,
+	0xfff80000,
+	0x00000034,
+	0xffffe000,
+	0x00070088,
+	0x00000048,
+	0xfffff000,
+	0x0000004c,
+	0xfff80000,
+	0x00000050,
+	0xffffe000,
+	0x00000054,
+	0xfffff800,
+	0x00000058,
+	0xfffff000,
+	0x0000005c,
+	0xfff80000,
+	0x00000060,
+	0xffffe000,
+	0x000200c0,
+	0x000104b8,
+	0x00000000,
+	0x00010541,
+	0x00000000,
+	0x000e00c3,
+	0x00000010,
+	0xffffff00,
+	0x00000014,
+	0x00000000,
+	0x0000003c,
+	0xffffff00,
+	0x00000040,
+	0x00000000,
+	0x00000018,
+	0xfff80000,
+	0x00000044,
+	0xfff80000,
+	0x00000074,
+	0xffff0000,
+	0x00000078,
+	0xffffe000,
+	0x00000068,
+	0xfccc0000,
+	0x0000006c,
+	0x00000000,
+	0x00000070,
+	0x00000000,
+	0x00000004,
+	0xffffff00,
+	0x00000008,
+	0x00000000,
+	0x0000000c,
+	0x00000000,
+	0x00000800,
+};
+
+static u32 nvc0_pcopy_code[] = {
+/* 0x0000: main */
+	0x04fe04bd,
+	0x3517f000,
+	0xf10010fe,
+	0xf1040017,
+	0xf0fff327,
+	0x12d00023,
+	0x0c25f0c0,
+	0xf40012d0,
+	0x17f11031,
+	0x27f01200,
+	0x0012d003,
+/* 0x002f: spin */
+	0xf40031f4,
+	0x0ef40028,
+/* 0x0035: ih */
+	0x8001cffd,
+	0xf40812c4,
+	0x21f4060b,
+/* 0x0041: ih_no_chsw */
+	0x0412c4ca,
+	0xf5070bf4,
+/* 0x004b: ih_no_cmd */
+	0xc4010221,
+	0x01d00c11,
+/* 0x0053: swctx */
+	0xf101f840,
+	0xfe770047,
+	0x47f1004b,
+	0x44cf2100,
+	0x0144f000,
+	0xb60444b6,
+	0xf7f13040,
+	0xf4b6061c,
+	0x1457f106,
+	0x00f5d101,
+	0xb6043594,
+	0x57fe0250,
+	0x0145fe00,
+	0x010052b7,
+	0x00ff67f1,
+	0x56fd60bd,
+	0x0253f004,
+	0xf80545fa,
+	0x0053f003,
+	0xd100e7f0,
+	0x549800fe,
+	0x0845b600,
+	0xb6015698,
+	0x46fd1864,
+	0x0047fe05,
+	0xf00204b9,
+	0x01f40643,
+	0x0604fa09,
+/* 0x00c3: swctx_load */
+	0xfa060ef4,
+/* 0x00c6: swctx_done */
+	0x03f80504,
+/* 0x00ca: chsw */
+	0x27f100f8,
+	0x23cf1400,
+	0x1e3fc800,
+	0xf4170bf4,
+	0x21f40132,
+	0x1e3af053,
+	0xf00023d0,
+	0x24d00147,
+/* 0x00eb: chsw_no_unload */
+	0xcf00f880,
+	0x3dc84023,
+	0x090bf41e,
+	0xf40131f4,
+/* 0x00fa: chsw_finish_load */
+	0x37f05321,
+	0x8023d002,
+/* 0x0102: dispatch */
+	0x37f100f8,
+	0x32cf1900,
+	0x0033cf40,
+	0x07ff24e4,
+	0xf11024b6,
+	0xbd010057,
+/* 0x011b: dispatch_loop */
+	0x5874bd64,
+	0x57580056,
+	0x0450b601,
+	0xf40446b8,
+	0x76bb4d08,
+	0x0447b800,
+	0xbb0f08f4,
+	0x74b60276,
+	0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
+	0xbbdf0ef4,
+	0x44b60246,
+	0x0045bb03,
+	0xfd014598,
+	0x54b00453,
+	0x201bf400,
+	0x58004558,
+	0x64b00146,
+	0x091bf400,
+	0xf4005380,
+/* 0x0166: dispatch_cmd */
+	0x32f4300e,
+	0xf455f901,
+	0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
+	0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
+	0xf10125f0,
+	0xd0100047,
+	0x43d00042,
+	0x4027f040,
+/* 0x0187: hostirq_wait */
+	0xcf0002d0,
+	0x24f08002,
+	0x0024b040,
+/* 0x0193: dispatch_done */
+	0xf1f71bf4,
+	0xf01d0027,
+	0x23d00137,
+/* 0x019f: cmd_nop */
+	0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
+	0x0027f100,
+	0xf034bd22,
+	0x23d00233,
+/* 0x01af: cmd_exec_set_format */
+	0xf400f800,
+	0x01b0f030,
+	0x0101b000,
+	0xb00201b0,
+	0x04980301,
+	0x3045c71a,
+	0xc70150b6,
+	0x60b63446,
+	0x3847c701,
+	0xf40170b6,
+	0x84bd0232,
+/* 0x01da: ncomp_loop */
+	0x4ac494bd,
+	0x0445b60f,
+/* 0x01e2: bpc_loop */
+	0xa430b4bd,
+	0x0f18f404,
+	0xbbc0a5ff,
+	0x31f400cb,
+	0x220ef402,
+/* 0x01f4: cmp_c0 */
+	0xf00c1bf4,
+	0xcbbb10c7,
+	0x160ef400,
+/* 0x0200: cmp_c1 */
+	0xf406a430,
+	0xc7f00c18,
+	0x00cbbb14,
+/* 0x020f: cmp_zero */
+	0xf1070ef4,
+/* 0x0213: bpc_next */
+	0x380080c7,
+	0x80b601c8,
+	0x01b0b601,
+	0xf404b5b8,
+	0x90b6c308,
+	0x0497b801,
+	0xfdb208f4,
+	0x06800065,
+	0x1d08980e,
+	0xf40068fd,
+	0x64bd0502,
+/* 0x023c: dst_xcnt */
+	0x800075fd,
+	0x78fd1907,
+	0x1057f100,
+	0x0654b608,
+	0xd00056d0,
+	0x50b74057,
+	0x06980800,
+	0x0162b619,
+	0x980864b6,
+	0x72b60e07,
+	0x0567fd01,
+	0xb70056d0,
+	0xb4010050,
+	0x56d00060,
+	0x0160b400,
+	0xb44056d0,
+	0x56d00260,
+	0x0360b480,
+	0xb7c056d0,
+	0x98040050,
+	0x56d01b06,
+	0x1c069800,
+	0xf44056d0,
+	0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
+	0xc7075798,
+	0x78c76879,
+	0x0380b664,
+	0xb06077c7,
+	0x1bf40e76,
+	0x0477f009,
+/* 0x02b7: xtile64 */
+	0xf00f0ef4,
+	0x70b6027c,
+	0x0947fd11,
+/* 0x02c3: xtileok */
+	0x980677f0,
+	0x5b980c5a,
+	0x00abfd0e,
+	0xbb01b7f0,
+	0xb2b604b7,
+	0xc4abff01,
+	0x9805a7bb,
+	0xe7f00d5d,
+	0x04e8bb01,
+	0xff01e2b6,
+	0xd8bbb4de,
+	0x01e0b605,
+	0xbb0cef94,
+	0xfefd02eb,
+	0x026cf005,
+	0x020860b7,
+	0xd00864b6,
+	0xb7bb006f,
+	0x00cbbb04,
+	0x98085f98,
+	0xfbfd0e5b,
+	0x01b7f000,
+	0xb604b7bb,
+	0xfbbb01b2,
+	0x05f7bb00,
+	0x5f98f0f9,
+	0x01b7f009,
+	0xb604b8bb,
+	0xfbbb01b2,
+	0x05f8bb00,
+	0x78bbf0f9,
+	0x0282b600,
+	0xbb01b7f0,
+	0xb9bb04b8,
+	0x0b589804,
+	0xbb01e7f0,
+	0xe2b604e9,
+	0xf48eff01,
+	0xbb04f7bb,
+	0x79bb00cf,
+	0x0589bb00,
+	0x90fcf0fc,
+	0xbb00d9fd,
+	0x89fd00ad,
+	0x008ffd00,
+	0xbb00a8bb,
+	0x92b604a7,
+	0x0497bb01,
+	0x988069d0,
+	0x58980557,
+	0x00acbb04,
+	0xb6007abb,
+	0x84b60081,
+	0x058bfd10,
+	0x060062b7,
+	0xb70067d0,
+	0xd0040060,
+	0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
+	0xb7026cf0,
+	0xb6020260,
+	0x57980864,
+	0x0067d005,
+	0x040060b7,
+	0xb6045798,
+	0x67d01074,
+	0x0060b700,
+	0x06579804,
+	0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
+	0xf900f900,
+	0x0007f110,
+	0x0604b608,
+/* 0x03dc: loop */
+	0xf00001cf,
+	0x1bf40114,
+	0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
+	0xc800f800,
+	0x1bf40d34,
+	0xd121f570,
+	0x0c47f103,
+	0x0644b608,
+	0xb6020598,
+	0x45d00450,
+	0x4040d000,
+	0xd00c57f0,
+	0x40b78045,
+	0x05980400,
+	0x1054b601,
+	0xb70045d0,
+	0xf1050040,
+	0xf00b0057,
+	0x45d00153,
+	0x4057f100,
+	0x0154b640,
+	0x808053f1,
+	0xf14045d0,
+	0xf1111057,
+	0xd0131253,
+	0x57f18045,
+	0x53f11514,
+	0x45d01716,
+	0x0157f1c0,
+	0x0153f026,
+	0x080047f1,
+	0xd00644b6,
+/* 0x045e: query_counter */
+	0x21f50045,
+	0x47f103d1,
+	0x44b6080c,
+	0x02059806,
+	0xd00045d0,
+	0x57f04040,
+	0x8045d004,
+	0x040040b7,
+	0xb6010598,
+	0x45d01054,
+	0x0040b700,
+	0x0057f105,
+	0x0045d003,
+	0x111057f1,
+	0x131253f1,
+	0x984045d0,
+	0x40b70305,
+	0x45d00500,
+	0x0157f100,
+	0x0153f026,
+	0x080047f1,
+	0xd00644b6,
+	0x00f80045,
+/* 0x04b8: cmd_exec */
+	0x03d121f5,
+	0xf4003fc8,
+	0x21f50e0b,
+	0x47f101af,
+	0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
+	0x1067f11e,
+	0x0664b608,
+	0x800177f0,
+	0x07800e07,
+	0x1d079819,
+	0xd00067d0,
+	0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
+	0xbd0232f4,
+	0x043fc854,
+	0xf50a0bf4,
+	0xf403a821,
+/* 0x04fa: src_tiled */
+	0x21f50a0e,
+	0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
+	0x0231f407,
+	0xc82c57f0,
+	0x0bf4083f,
+	0xa821f50a,
+	0x0a0ef403,
+/* 0x0514: dst_tiled */
+	0x029c21f5,
+/* 0x051b: cmd_exec_kick */
+	0xf10849f0,
+	0xb6080057,
+	0x06980654,
+	0x4056d01e,
+	0xf14167f0,
+	0xfd440063,
+	0x54d00546,
+	0x0c3fc800,
+	0xf5070bf4,
+/* 0x053f: cmd_exec_done */
+	0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
+	0x0027f100,
+	0xf034bd22,
+	0x23d00133,
+	0x0000f800,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 0000000..d6dc2a6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/falcon.h>
+#include <core/class.h>
+#include <core/enum.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include "fuc/nva3.fuc.h"
+
+struct nva3_copy_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_sclass[] = {
+	{ 0x85b5, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nva3_copy_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+
+	},
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nva3_copy_isr_error_name[] = {
+	{ 0x0001, "ILLEGAL_MTHD" },
+	{ 0x0002, "INVALID_ENUM" },
+	{ 0x0003, "INVALID_BITFIELD" },
+	{}
+};
+
+void
+nva3_copy_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_falcon *falcon = (void *)subdev;
+	struct nouveau_object *engctx;
+	u32 dispatch = nv_ro32(falcon, 0x01c);
+	u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+	u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+	u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+	u32 addr = nv_ro32(falcon, 0x040) >> 16;
+	u32 mthd = (addr & 0x07ff) << 2;
+	u32 subc = (addr & 0x3800) >> 11;
+	u32 data = nv_ro32(falcon, 0x044);
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000040) {
+		nv_error(falcon, "DISPATCH_ERROR [");
+		nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+		       chid, inst << 12, nouveau_client_name(engctx), subc,
+		       mthd, data);
+		nv_wo32(falcon, 0x004, 0x00000040);
+		stat &= ~0x00000040;
+	}
+
+	if (stat) {
+		nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+		nv_wo32(falcon, 0x004, stat);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nva3_copy_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0d);
+	return 0;
+}
+
+static int
+nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	bool enable = (nv_device(parent)->chipset != 0xaf);
+	struct nva3_copy_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+				    "PCE0", "copy0", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00802000;
+	nv_subdev(priv)->intr = nva3_copy_intr;
+	nv_engine(priv)->cclass = &nva3_copy_cclass;
+	nv_engine(priv)->sclass = nva3_copy_sclass;
+	nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+	nv_falcon(priv)->code.data = nva3_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+	nv_falcon(priv)->data.data = nva3_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_copy_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_copy_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = _nouveau_falcon_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 0000000..b3ed273
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+#include <core/class.h>
+#include <core/enum.h>
+
+#include <engine/fifo.h>
+#include <engine/copy.h>
+
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_copy_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_copy0_sclass[] = {
+	{ 0x90b5, &nouveau_object_ofuncs },
+	{},
+};
+
+static struct nouveau_oclass
+nvc0_copy1_sclass[] = {
+	{ 0x90b8, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_ofuncs
+nvc0_copy_context_ofuncs = {
+	.ctor = _nouveau_falcon_context_ctor,
+	.dtor = _nouveau_falcon_context_dtor,
+	.init = _nouveau_falcon_context_init,
+	.fini = _nouveau_falcon_context_fini,
+	.rd32 = _nouveau_falcon_context_rd32,
+	.wr32 = _nouveau_falcon_context_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_copy0_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xc0),
+	.ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+static struct nouveau_oclass
+nvc0_copy1_cclass = {
+	.handle = NV_ENGCTX(COPY1, 0xc0),
+	.ofuncs = &nvc0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_copy_init(struct nouveau_object *object)
+{
+	struct nvc0_copy_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+	return 0;
+}
+
+static int
+nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000100)
+		return -ENODEV;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+				    "PCE0", "copy0", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000040;
+	nv_subdev(priv)->intr = nva3_copy_intr;
+	nv_engine(priv)->cclass = &nvc0_copy0_cclass;
+	nv_engine(priv)->sclass = nvc0_copy0_sclass;
+	nv_falcon(priv)->code.data = nvc0_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+	nv_falcon(priv)->data.data = nvc0_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
+	return 0;
+}
+
+static int
+nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000200)
+		return -ENODEV;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+				    "PCE1", "copy1", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000080;
+	nv_subdev(priv)->intr = nva3_copy_intr;
+	nv_engine(priv)->cclass = &nvc0_copy1_cclass;
+	nv_engine(priv)->sclass = nvc0_copy1_sclass;
+	nv_falcon(priv)->code.data = nvc0_pcopy_code;
+	nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+	nv_falcon(priv)->data.data = nvc0_pcopy_data;
+	nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_copy0_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_copy0_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_copy_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
+
+struct nouveau_oclass
+nvc0_copy1_oclass = {
+	.handle = NV_ENGINE(COPY1, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_copy1_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_copy_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 0000000..dbbe9e8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/copy.h>
+
+struct nve0_copy_priv {
+	struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_copy_sclass[] = {
+	{ 0xa0b5, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
+
+static struct nouveau_ofuncs
+nve0_copy_context_ofuncs = {
+	.ctor = _nouveau_engctx_ctor,
+	.dtor = _nouveau_engctx_dtor,
+	.init = _nouveau_engctx_init,
+	.fini = _nouveau_engctx_fini,
+	.rd32 = _nouveau_engctx_rd32,
+	.wr32 = _nouveau_engctx_wr32,
+};
+
+static struct nouveau_oclass
+nve0_copy_cclass = {
+	.handle = NV_ENGCTX(COPY0, 0xc0),
+	.ofuncs = &nve0_copy_context_ofuncs,
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000100)
+		return -ENODEV;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCE0", "copy0", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000040;
+	nv_engine(priv)->cclass = &nve0_copy_cclass;
+	nv_engine(priv)->sclass = nve0_copy_sclass;
+	return 0;
+}
+
+static int
+nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nve0_copy_priv *priv;
+	int ret;
+
+	if (nv_rd32(parent, 0x022500) & 0x00000200)
+		return -ENODEV;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCE1", "copy1", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000080;
+	nv_engine(priv)->cclass = &nve0_copy_cclass;
+	nv_engine(priv)->sclass = nve0_copy_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_copy0_oclass = {
+	.handle = NV_ENGINE(COPY0, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_copy0_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
+
+struct nouveau_oclass
+nve0_copy1_oclass = {
+	.handle = NV_ENGINE(COPY1, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_copy1_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
new file mode 100644
index 0000000..629da02
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -0,0 +1,698 @@
+/*
+ *  fuc microcode for nv98 pcrypt engine
+ *  Copyright (C) 2010  Marcin Kościelnicki
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query:		.b32 0
+ctx_dma_src:		.b32 0
+ctx_dma_dst:		.b32 0
+.equ #dma_count 3
+ctx_query_address_high:	.b32 0
+ctx_query_address_low:	.b32 0
+ctx_query_counter:	.b32 0
+ctx_cond_address_high:	.b32 0
+ctx_cond_address_low:	.b32 0
+ctx_cond_off:		.b32 0
+ctx_src_address_high:	.b32 0
+ctx_src_address_low:	.b32 0
+ctx_dst_address_high:	.b32 0
+ctx_dst_address_low:	.b32 0
+ctx_mode:		.b32 0
+.align 16
+ctx_key:		.skip 16
+ctx_iv:			.skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+	// $r0 is always set to 0 in our code - this allows some space savings.
+	clear b32 $r0
+
+	// set up the interrupt handler
+	mov $r1 #ih
+	mov $iv0 $r1
+
+	// init stack pointer
+	mov $sp $r0
+
+	// set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+	movw $r1 0xfff0
+	sethi $r1 0
+	mov $r2 0x400
+	iowr I[$r2 + 0x300] $r1
+
+	// enable the interrupts
+	or $r1 0xc
+	iowr I[$r2] $r1
+
+	// enable fifo access and context switching
+	mov $r1 3
+	mov $r2 0x1200
+	iowr I[$r2] $r1
+
+	// enable i0 delivery
+	bset $flags ie0
+
+	// sleep forver, waking only for interrupts.
+	bset $flags $p0
+	spin:
+	sleep $p0
+	bra #spin
+
+// i0 handler
+ih:
+	// see which interrupts we got
+	iord $r1 I[$r0 + 0x200]
+
+	and $r2 $r1 0x8
+	cmpu b32 $r2 0
+	bra e #noctx
+
+		// context switch... prepare the regs for xfer
+		mov $r2 0x7700
+		mov $xtargets $r2
+		mov $xdbase $r0
+		// 128-byte context.
+		mov $r2 0
+		sethi $r2 0x50000
+
+		// read current channel
+		mov $r3 0x1400
+		iord $r4 I[$r3]
+		// if bit 30 set, it's active, so we have to unload it first.
+		shl b32 $r5 $r4 1
+		cmps b32 $r5 0
+		bra nc #ctxload
+
+			// unload the current channel - save the context
+			xdst $r0 $r2
+			xdwait
+			// and clear bit 30, then write back
+			bclr $r4 0x1e
+			iowr I[$r3] $r4
+			// tell PFIFO we unloaded
+			mov $r4 1
+			iowr I[$r3 + 0x200] $r4
+
+		bra #noctx
+
+		ctxload:
+			// no channel loaded - perhaps we're requested to load one
+			iord $r4 I[$r3 + 0x100]
+			shl b32 $r15 $r4 1
+			cmps b32 $r15 0
+			// if bit 30 of next channel not set, probably PFIFO is just
+			// killing a context. do a faux load, without the active bit.
+			bra nc #dummyload
+
+				// ok, do a real context load.
+				xdld $r0 $r2
+				xdwait
+				mov $r5 #ctx_dma
+				mov $r6 #dma_count - 1
+				ctxload_dma_loop:
+					ld b32 $r7 D[$r5 + $r6 * 4]
+					add b32 $r8 $r6 0x180
+					shl b32 $r8 8
+					iowr I[$r8] $r7
+					sub b32 $r6 1
+				bra nc #ctxload_dma_loop
+
+			dummyload:
+			// tell PFIFO we're done
+			mov $r5 2
+			iowr I[$r3 + 0x200] $r5
+
+	noctx:
+	and $r2 $r1 0x4
+	cmpu b32 $r2 0
+	bra e #nocmd
+
+		// incoming fifo command.
+		mov $r3 0x1900
+		iord $r2 I[$r3 + 0x100]
+		iord $r3 I[$r3]
+		// extract the method
+		and $r4 $r2 0x7ff
+		// shift the addr to proper position if we need to interrupt later
+		shl b32 $r2 0x10
+
+		// mthd 0 and 0x100 [NAME, NOP]: ignore
+		and $r5 $r4 0x7bf
+		cmpu b32 $r5 0
+		bra e #cmddone
+
+		mov $r5 #engine_cmd_dtable - 0xc0 * 8
+		mov $r6 #engine_cmd_max
+		cmpu b32 $r4 0xc0
+		bra nc #dtable_cmd
+		mov $r5 #common_cmd_dtable - 0x80 * 8
+		mov $r6 #common_cmd_max
+		cmpu b32 $r4 0x80
+		bra nc #dtable_cmd
+		cmpu b32 $r4 0x60
+		bra nc #dma_cmd
+		cmpu b32 $r4 0x50
+		bra ne #illegal_mthd
+
+			// mthd 0x140: PM_TRIGGER
+			mov $r2 0x2200
+			clear b32 $r3
+			sethi $r3 0x20000
+			iowr I[$r2] $r3
+			bra #cmddone
+
+		dma_cmd:
+			// mthd 0x180...: DMA_*
+			cmpu b32 $r4 0x60+#dma_count
+			bra nc #illegal_mthd
+			shl b32 $r5 $r4 2
+			add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
+			bset $r3 0x1e
+			st b32 D[$r5] $r3
+			add b32 $r4 0x180 - 0x60
+			shl b32 $r4 8
+			iowr I[$r4] $r3
+			bra #cmddone
+
+		dtable_cmd:
+			cmpu b32 $r4 $r6
+			bra nc #illegal_mthd
+			shl b32 $r4 3
+			add b32 $r4 $r5
+			ld b32 $r5 D[$r4 + 4]
+			and $r5 $r3
+			cmpu b32 $r5 0
+			bra ne #invalid_bitfield
+			ld b16 $r5 D[$r4]
+			ld b16 $r6 D[$r4 + 2]
+			cmpu b32 $r6 2
+			bra e #cmd_setctx
+			ld b32 $r7 D[$r0 + #ctx_cond_off]
+			and $r6 $r7
+			cmpu b32 $r6 1
+			bra e #cmddone
+			call $r5
+			bra $p1 #dispatch_error
+			bra #cmddone
+
+		cmd_setctx:
+			st b32 D[$r5] $r3
+			bra #cmddone
+
+
+		invalid_bitfield:
+			or $r2 1
+		dispatch_error:
+		illegal_mthd:
+			mov $r4 0x1000
+			iowr I[$r4] $r2
+			iowr I[$r4 + 0x100] $r3
+			mov $r4 0x40
+			iowr I[$r0] $r4
+
+			im_loop:
+				iord $r4 I[$r0 + 0x200]
+				and $r4 0x40
+				cmpu b32 $r4 0
+			bra ne #im_loop
+
+		cmddone:
+		// remove the command from FIFO
+		mov $r3 0x1d00
+		mov $r4 1
+		iowr I[$r3] $r4
+
+	nocmd:
+	// ack the processed interrupts
+	and $r1 $r1 0xc
+	iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+	// if bit 0 of param set, trigger interrupt afterwards.
+	setp $p1 $r3
+	or $r2 3
+
+	// read PTIMER, beware of races...
+	mov $r4 0xb00
+	ptimer_retry:
+		iord $r6 I[$r4 + 0x100]
+		iord $r5 I[$r4]
+		iord $r7 I[$r4 + 0x100]
+		cmpu b32 $r6 $r7
+	bra ne #ptimer_retry
+
+	// prepare the query structure
+	ld b32 $r4 D[$r0 + #ctx_query_counter]
+	st b32 D[$r0 + #swap + 0x0] $r4
+	st b32 D[$r0 + #swap + 0x4] $r0
+	st b32 D[$r0 + #swap + 0x8] $r5
+	st b32 D[$r0 + #swap + 0xc] $r6
+
+	// will use target 0, DMA_QUERY.
+	mov $xtargets $r0
+
+	ld b32 $r4 D[$r0 + #ctx_query_address_high]
+	shl b32 $r4 0x18
+	mov $xdbase $r4
+
+	ld b32 $r4 D[$r0 + #ctx_query_address_low]
+	mov $r5 #swap
+	sethi $r5 0x20000
+	xdst $r4 $r5
+	xdwait
+
+	ret
+
+cmd_cond_mode:
+	// if >= 5, INVALID_ENUM
+	bset $flags $p1
+	or $r2 2
+	cmpu b32 $r3 5
+	bra nc #return
+
+	// otherwise, no error.
+	bclr $flags $p1
+
+	// if < 2, no QUERY object is involved
+	cmpu b32 $r3 2
+	bra nc #cmd_cond_mode_queryful
+
+		xor $r3 1
+		st b32 D[$r0 + #ctx_cond_off] $r3
+	return:
+		ret
+
+	cmd_cond_mode_queryful:
+	// ok, will need to pull a QUERY object, prepare offsets
+	ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+	ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+	and $r6 $r5 0xff
+	shr b32 $r5 8
+	shl b32 $r4 0x18
+	or $r4 $r5
+	mov $xdbase $r4
+	mov $xtargets $r0
+
+	// pull the first one
+	mov $r5 #swap
+	sethi $r5 0x20000
+	xdld $r6 $r5
+
+	// if == 2, only a single QUERY is involved...
+	cmpu b32 $r3 2
+	bra ne #cmd_cond_mode_double
+
+		xdwait
+		ld b32 $r4 D[$r0 + #swap + 4]
+		cmpu b32 $r4 0
+		xbit $r4 $flags z
+		st b32 D[$r0 + #ctx_cond_off] $r4
+		ret
+
+	// ok, we'll need to pull second one too
+	cmd_cond_mode_double:
+	add b32 $r6 0x10
+	add b32 $r5 0x10
+	xdld $r6 $r5
+	xdwait
+
+	// compare COUNTERs
+	ld b32 $r5 D[$r0 + #swap + 0x00]
+	ld b32 $r6 D[$r0 + #swap + 0x10]
+	cmpu b32 $r5 $r6
+	xbit $r4 $flags z
+
+	// compare RESen
+	ld b32 $r5 D[$r0 + #swap + 0x04]
+	ld b32 $r6 D[$r0 + #swap + 0x14]
+	cmpu b32 $r5 $r6
+	xbit $r5 $flags z
+	and $r4 $r5
+
+	// and negate or not, depending on mode
+	cmpu b32 $r3 3
+	xbit $r5 $flags z
+	xor $r4 $r5
+	st b32 D[$r0 + #ctx_cond_off] $r4
+	ret
+
+cmd_wrcache_flush:
+	bclr $flags $p1
+	mov $r2 0x2200
+	clear b32 $r3
+	sethi $r3 0x10000
+	iowr I[$r2] $r3
+	ret
+
+crypt_cmd_mode:
+	// if >= 0xf, INVALID_ENUM
+	bset $flags $p1
+	or $r2 2
+	cmpu b32 $r3 0xf
+	bra nc #crypt_cmd_mode_return
+
+		bclr $flags $p1
+		st b32 D[$r0 + #ctx_mode] $r3
+
+	crypt_cmd_mode_return:
+	ret
+
+crypt_cmd_length:
+	// nop if length == 0
+	cmpu b32 $r3 0
+	bra e #crypt_cmd_mode_return
+
+	// init key, IV
+	cxset 3
+	mov $r4 #ctx_key
+	sethi $r4 0x70000
+	xdst $r0 $r4
+	mov $r4 #ctx_iv
+	sethi $r4 0x60000
+	xdst $r0 $r4
+	xdwait
+	ckeyreg $c7
+
+	// prepare the targets
+	mov $r4 0x2100
+	mov $xtargets $r4
+
+	// prepare src address
+	ld b32 $r4 D[$r0 + #ctx_src_address_high]
+	ld b32 $r5 D[$r0 + #ctx_src_address_low]
+	shr b32 $r8 $r5 8
+	shl b32 $r4 0x18
+	or $r4 $r8
+	and $r5 $r5 0xff
+
+	// prepare dst address
+	ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+	ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+	shr b32 $r8 $r7 8
+	shl b32 $r6 0x18
+	or $r6 $r8
+	and $r7 $r7 0xff
+
+	// find the proper prep & do functions
+	ld b32 $r8 D[$r0 + #ctx_mode]
+	shl b32 $r8 2
+
+	// run prep
+	ld b16 $r9 D[$r8 + #crypt_dtable]
+	call $r9
+
+	// do it
+	ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+	call $r9
+	cxset 1
+	xdwait
+	cxset 0x61
+	xdwait
+	xdwait
+
+	// update src address
+	shr b32 $r8 $r4 0x18
+	shl b32 $r9 $r4 8
+	add b32 $r9 $r5
+	adc b32 $r8 0
+	st b32 D[$r0 + #ctx_src_address_high] $r8
+	st b32 D[$r0 + #ctx_src_address_low] $r9
+
+	// update dst address
+	shr b32 $r8 $r6 0x18
+	shl b32 $r9 $r6 8
+	add b32 $r9 $r7
+	adc b32 $r8 0
+	st b32 D[$r0 + #ctx_dst_address_high] $r8
+	st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+	// pull updated IV
+	cxset 2
+	mov $r4 #ctx_iv
+	sethi $r4 0x60000
+	xdld $r0 $r4
+	xdwait
+
+	ret
+
+
+crypt_copy_prep:
+	cs0begin 2
+		cxsin $c0
+		cxsout $c0
+	ret
+
+crypt_store_prep:
+	cs0begin 1
+		cxsout $c6
+	ret
+
+crypt_ecb_e_prep:
+	cs0begin 3
+		cxsin $c0
+		cenc $c0 $c0
+		cxsout $c0
+	ret
+
+crypt_ecb_d_prep:
+	ckexp $c7 $c7
+	cs0begin 3
+		cxsin $c0
+		cdec $c0 $c0
+		cxsout $c0
+	ret
+
+crypt_cbc_e_prep:
+	cs0begin 4
+		cxsin $c0
+		cxor $c6 $c0
+		cenc $c6 $c6
+		cxsout $c6
+	ret
+
+crypt_cbc_d_prep:
+	ckexp $c7 $c7
+	cs0begin 5
+		cmov $c2 $c6
+		cxsin $c6
+		cdec $c0 $c6
+		cxor $c0 $c2
+		cxsout $c0
+	ret
+
+crypt_pcbc_e_prep:
+	cs0begin 5
+		cxsin $c0
+		cxor $c6 $c0
+		cenc $c6 $c6
+		cxsout $c6
+		cxor $c6 $c0
+	ret
+
+crypt_pcbc_d_prep:
+	ckexp $c7 $c7
+	cs0begin 5
+		cxsin $c0
+		cdec $c1 $c0
+		cxor $c6 $c1
+		cxsout $c6
+		cxor $c6 $c0
+	ret
+
+crypt_cfb_e_prep:
+	cs0begin 4
+		cenc $c6 $c6
+		cxsin $c0
+		cxor $c6 $c0
+		cxsout $c6
+	ret
+
+crypt_cfb_d_prep:
+	cs0begin 4
+		cenc $c0 $c6
+		cxsin $c6
+		cxor $c0 $c6
+		cxsout $c0
+	ret
+
+crypt_ofb_prep:
+	cs0begin 4
+		cenc $c6 $c6
+		cxsin $c0
+		cxor $c0 $c6
+		cxsout $c0
+	ret
+
+crypt_ctr_prep:
+	cs0begin 5
+		cenc $c1 $c6
+		cadd $c6 1
+		cxsin $c0
+		cxor $c0 $c1
+		cxsout $c0
+	ret
+
+crypt_cbc_mac_prep:
+	cs0begin 3
+		cxsin $c0
+		cxor $c6 $c0
+		cenc $c6 $c6
+	ret
+
+crypt_cmac_finish_complete_prep:
+	cs0begin 7
+		cxsin $c0
+		cxor $c6 $c0
+		cxor $c0 $c0
+		cenc $c0 $c0
+		cprecmac $c0 $c0
+		cxor $c6 $c0
+		cenc $c6 $c6
+	ret
+
+crypt_cmac_finish_partial_prep:
+	cs0begin 8
+		cxsin $c0
+		cxor $c6 $c0
+		cxor $c0 $c0
+		cenc $c0 $c0
+		cprecmac $c0 $c0
+		cprecmac $c0 $c0
+		cxor $c6 $c0
+		cenc $c6 $c6
+	ret
+
+// TODO
+crypt_do_in:
+	add b32 $r3 $r5
+	mov $xdbase $r4
+	mov $r9 #swap
+	sethi $r9 0x20000
+	crypt_do_in_loop:
+		xdld $r5 $r9
+		xdwait
+		cxset 0x22
+		xdst $r0 $r9
+		cs0exec 1
+		xdwait
+		add b32 $r5 0x10
+		cmpu b32 $r5 $r3
+	bra ne #crypt_do_in_loop
+	cxset 1
+	xdwait
+	ret
+
+crypt_do_out:
+	add b32 $r3 $r7
+	mov $xdbase $r6
+	mov $r9 #swap
+	sethi $r9 0x20000
+	crypt_do_out_loop:
+		cs0exec 1
+		cxset 0x61
+		xdld $r7 $r9
+		xdst $r7 $r9
+		cxset 1
+		xdwait
+		add b32 $r7 0x10
+		cmpu b32 $r7 $r3
+	bra ne #crypt_do_out_loop
+	ret
+
+crypt_do_inout:
+	add b32 $r3 $r5
+	mov $r9 #swap
+	sethi $r9 0x20000
+	crypt_do_inout_loop:
+		mov $xdbase $r4
+		xdld $r5 $r9
+		xdwait
+		cxset 0x21
+		xdst $r0 $r9
+		cs0exec 1
+		cxset 0x61
+		mov $xdbase $r6
+		xdld $r7 $r9
+		xdst $r7 $r9
+		cxset 1
+		xdwait
+		add b32 $r5 0x10
+		add b32 $r7 0x10
+		cmpu b32 $r5 $r3
+	bra ne #crypt_do_inout_loop
+	ret
+
+.align 0x100
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
new file mode 100644
index 0000000..09962e4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -0,0 +1,584 @@
+static uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+	0x00000000,
+/* 0x0004: ctx_dma_src */
+	0x00000000,
+/* 0x0008: ctx_dma_dst */
+	0x00000000,
+/* 0x000c: ctx_query_address_high */
+	0x00000000,
+/* 0x0010: ctx_query_address_low */
+	0x00000000,
+/* 0x0014: ctx_query_counter */
+	0x00000000,
+/* 0x0018: ctx_cond_address_high */
+	0x00000000,
+/* 0x001c: ctx_cond_address_low */
+	0x00000000,
+/* 0x0020: ctx_cond_off */
+	0x00000000,
+/* 0x0024: ctx_src_address_high */
+	0x00000000,
+/* 0x0028: ctx_src_address_low */
+	0x00000000,
+/* 0x002c: ctx_dst_address_high */
+	0x00000000,
+/* 0x0030: ctx_dst_address_low */
+	0x00000000,
+/* 0x0034: ctx_mode */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0040: ctx_key */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0050: ctx_iv */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0080: swap */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x00a0: common_cmd_dtable */
+	0x0002000c,
+	0xffffff00,
+	0x00020010,
+	0x0000000f,
+	0x00020014,
+	0x00000000,
+	0x00000192,
+	0xfffffffe,
+	0x00020018,
+	0xffffff00,
+	0x0002001c,
+	0x0000000f,
+	0x000001d7,
+	0xfffffff8,
+	0x00000260,
+	0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+	0x00020040,
+	0x00000000,
+	0x00020044,
+	0x00000000,
+	0x00020048,
+	0x00000000,
+	0x0002004c,
+	0x00000000,
+	0x00020050,
+	0x00000000,
+	0x00020054,
+	0x00000000,
+	0x00020058,
+	0x00000000,
+	0x0002005c,
+	0x00000000,
+	0x00020024,
+	0xffffff00,
+	0x00020028,
+	0x0000000f,
+	0x0002002c,
+	0xffffff00,
+	0x00020030,
+	0x0000000f,
+	0x00000271,
+	0xfffffff0,
+	0x00010285,
+	0xf000000f,
+/* 0x0150: crypt_dtable */
+	0x04db0321,
+	0x04b1032f,
+	0x04db0339,
+	0x04db034b,
+	0x04db0361,
+	0x04db0377,
+	0x04db0395,
+	0x04db03af,
+	0x04db03cd,
+	0x04db03e3,
+	0x04db03f9,
+	0x04db040f,
+	0x04830429,
+	0x0483043b,
+	0x0483045d,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
+
+static uint32_t nv98_pcrypt_code[] = {
+	0x17f004bd,
+	0x0010fe35,
+	0xf10004fe,
+	0xf0fff017,
+	0x27f10013,
+	0x21d00400,
+	0x0c15f0c0,
+	0xf00021d0,
+	0x27f10317,
+	0x21d01200,
+	0x1031f400,
+/* 0x002f: spin */
+	0xf40031f4,
+	0x0ef40028,
+/* 0x0035: ih */
+	0x8001cffd,
+	0xb00812c4,
+	0x0bf40024,
+	0x0027f167,
+	0x002bfe77,
+	0xf00007fe,
+	0x23f00027,
+	0x0037f105,
+	0x0034cf14,
+	0xb0014594,
+	0x18f40055,
+	0x0602fa17,
+	0x4af003f8,
+	0x0034d01e,
+	0xd00147f0,
+	0x0ef48034,
+/* 0x0075: ctxload */
+	0x4034cf33,
+	0xb0014f94,
+	0x18f400f5,
+	0x0502fa21,
+	0x57f003f8,
+	0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+	0xa07856bc,
+	0xb6018068,
+	0x87d00884,
+	0x0162b600,
+/* 0x009f: dummyload */
+	0xf0f018f4,
+	0x35d00257,
+/* 0x00a5: noctx */
+	0x0412c480,
+	0xf50024b0,
+	0xf100df0b,
+	0xcf190037,
+	0x33cf4032,
+	0xff24e400,
+	0x1024b607,
+	0x07bf45e4,
+	0xf50054b0,
+	0xf100b90b,
+	0xf1fae057,
+	0xb000ce67,
+	0x18f4c044,
+	0xa057f14d,
+	0x8867f1fc,
+	0x8044b000,
+	0xb03f18f4,
+	0x18f46044,
+	0x5044b019,
+	0xf1741bf4,
+	0xbd220027,
+	0x0233f034,
+	0xf50023d0,
+/* 0x0103: dma_cmd */
+	0xb000810e,
+	0x18f46344,
+	0x0245945e,
+	0xfe8050b7,
+	0x801e39f0,
+	0x40b70053,
+	0x44b60120,
+	0x0043d008,
+/* 0x0123: dtable_cmd */
+	0xb8600ef4,
+	0x18f40446,
+	0x0344b63e,
+	0x980045bb,
+	0x53fd0145,
+	0x0054b004,
+	0x58291bf4,
+	0x46580045,
+	0x0264b001,
+	0x98170bf4,
+	0x67fd0807,
+	0x0164b004,
+	0xf9300bf4,
+	0x0f01f455,
+/* 0x015b: cmd_setctx */
+	0x80280ef4,
+	0x0ef40053,
+/* 0x0161: invalid_bitfield */
+	0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+	0x100047f1,
+	0xd00042d0,
+	0x47f04043,
+	0x0004d040,
+/* 0x0174: im_loop */
+	0xf08004cf,
+	0x44b04044,
+	0xf71bf400,
+/* 0x0180: cmddone */
+	0x1d0037f1,
+	0xd00147f0,
+/* 0x018a: nocmd */
+	0x11c40034,
+	0x4001d00c,
+/* 0x0192: cmd_query_get */
+	0x38f201f8,
+	0x0325f001,
+	0x0b0047f1,
+/* 0x019c: ptimer_retry */
+	0xcf4046cf,
+	0x47cf0045,
+	0x0467b840,
+	0x98f41bf4,
+	0x04800504,
+	0x21008020,
+	0x80220580,
+	0x0bfe2306,
+	0x03049800,
+	0xfe1844b6,
+	0x04980047,
+	0x8057f104,
+	0x0253f000,
+	0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+	0xf400f803,
+	0x25f00131,
+	0x0534b002,
+	0xf41218f4,
+	0x34b00132,
+	0x0b18f402,
+	0x800136f0,
+/* 0x01f2: return */
+	0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+	0x98060498,
+	0x56c40705,
+	0x0855b6ff,
+	0xfd1844b6,
+	0x47fe0545,
+	0x000bfe00,
+	0x008057f1,
+	0xfa0253f0,
+	0x34b00565,
+	0x131bf402,
+	0x049803f8,
+	0x0044b021,
+	0x800b4cf0,
+	0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+	0xb61060b6,
+	0x65fa1050,
+	0x9803f805,
+	0x06982005,
+	0x0456b824,
+	0x980b4cf0,
+	0x06982105,
+	0x0456b825,
+	0xfd0b5cf0,
+	0x34b00445,
+	0x0b5cf003,
+	0x800645fd,
+	0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+	0xf10132f4,
+	0xbd220027,
+	0x0133f034,
+	0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+	0x0131f400,
+	0xb00225f0,
+	0x18f40f34,
+	0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+	0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+	0x0034b000,
+	0xf4fb0bf4,
+	0x47f0033c,
+	0x0743f040,
+	0xf00604fa,
+	0x43f05047,
+	0x0604fa06,
+	0x3cf503f8,
+	0x47f1c407,
+	0x4bfe2100,
+	0x09049800,
+	0x950a0598,
+	0x44b60858,
+	0x0548fd18,
+	0x98ff55c4,
+	0x07980b06,
+	0x0878950c,
+	0xfd1864b6,
+	0x77c40568,
+	0x0d0898ff,
+	0x580284b6,
+	0x95f9a889,
+	0xf9a98958,
+	0x013cf495,
+	0x3cf403f8,
+	0xf803f861,
+	0x18489503,
+	0xbb084994,
+	0x81b60095,
+	0x09088000,
+	0x950a0980,
+	0x69941868,
+	0x0097bb08,
+	0x800081b6,
+	0x09800b08,
+	0x023cf40c,
+	0xf05047f0,
+	0x04fa0643,
+	0xf803f805,
+/* 0x0321: crypt_copy_prep */
+	0x203cf500,
+	0x003cf594,
+	0x003cf588,
+/* 0x032f: crypt_store_prep */
+	0xf500f88c,
+	0xf594103c,
+	0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+	0x303cf500,
+	0x003cf594,
+	0x003cf588,
+	0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+	0xf500f88c,
+	0xf5c8773c,
+	0xf594303c,
+	0xf588003c,
+	0xf5d4003c,
+	0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+	0x403cf500,
+	0x003cf594,
+	0x063cf588,
+	0x663cf5ac,
+	0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+	0xf500f88c,
+	0xf5c8773c,
+	0xf594503c,
+	0xf584623c,
+	0xf588063c,
+	0xf5d4603c,
+	0xf5ac203c,
+	0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+	0x503cf500,
+	0x003cf594,
+	0x063cf588,
+	0x663cf5ac,
+	0x063cf5d0,
+	0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+	0xf500f8ac,
+	0xf5c8773c,
+	0xf594503c,
+	0xf588003c,
+	0xf5d4013c,
+	0xf5ac163c,
+	0xf58c063c,
+	0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+	0x403cf500,
+	0x663cf594,
+	0x003cf5d0,
+	0x063cf588,
+	0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+	0xf500f88c,
+	0xf594403c,
+	0xf5d0603c,
+	0xf588063c,
+	0xf5ac603c,
+	0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+	0x403cf500,
+	0x663cf594,
+	0x003cf5d0,
+	0x603cf588,
+	0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+	0xf500f88c,
+	0xf594503c,
+	0xf5d0613c,
+	0xf5b0163c,
+	0xf588003c,
+	0xf5ac103c,
+	0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+	0x303cf500,
+	0x003cf594,
+	0x063cf588,
+	0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+	0xf500f8d0,
+	0xf594703c,
+	0xf588003c,
+	0xf5ac063c,
+	0xf5ac003c,
+	0xf5d0003c,
+	0xf5bc003c,
+	0xf5ac063c,
+	0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+	0x803cf500,
+	0x003cf594,
+	0x063cf588,
+	0x003cf5ac,
+	0x003cf5ac,
+	0x003cf5d0,
+	0x003cf5bc,
+	0x063cf5bc,
+	0x663cf5ac,
+/* 0x0483: crypt_do_in */
+	0xbb00f8d0,
+	0x47fe0035,
+	0x8097f100,
+	0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+	0xf80559fa,
+	0x223cf403,
+	0xf50609fa,
+	0xf898103c,
+	0x1050b603,
+	0xf40453b8,
+	0x3cf4e91b,
+	0xf803f801,
+/* 0x04b1: crypt_do_out */
+	0x0037bb00,
+	0xf10067fe,
+	0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+	0x3cf50293,
+	0x3cf49810,
+	0x0579fa61,
+	0xf40679fa,
+	0x03f8013c,
+	0xb81070b6,
+	0x1bf40473,
+/* 0x04db: crypt_do_inout */
+	0xbb00f8e8,
+	0x97f10035,
+	0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+	0x0047fe02,
+	0xf80559fa,
+	0x213cf403,
+	0xf50609fa,
+	0xf498103c,
+	0x67fe613c,
+	0x0579fa00,
+	0xf40679fa,
+	0x03f8013c,
+	0xb61050b6,
+	0x53b81070,
+	0xd41bf404,
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 0000000..5bc021f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+struct nv84_crypt_priv {
+	struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static int
+nv84_crypt_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_crypt_ofuncs = {
+	.ctor = nv84_crypt_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv84_crypt_sclass[] = {
+	{ 0x74c1, &nv84_crypt_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_crypt_cclass = {
+	.handle = NV_ENGCTX(CRYPT, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
+	{ 0x00000001, "INVALID_STATE" },
+	{ 0x00000002, "ILLEGAL_MTHD" },
+	{ 0x00000004, "ILLEGAL_CLASS" },
+	{ 0x00000080, "QUERY" },
+	{ 0x00000100, "FAULT" },
+	{}
+};
+
+static void
+nv84_crypt_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nv84_crypt_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x102130);
+	u32 mthd = nv_rd32(priv, 0x102190);
+	u32 data = nv_rd32(priv, 0x102194);
+	u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+		pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
+		       chid, (u64)inst << 12, nouveau_client_name(engctx),
+		       mthd, data);
+	}
+
+	nv_wr32(priv, 0x102130, stat);
+	nv_wr32(priv, 0x10200c, 0x10);
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv84_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0a);
+	return 0;
+}
+
+static int
+nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv84_crypt_priv *priv;
+	int ret;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PCRYPT", "crypt", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00004000;
+	nv_subdev(priv)->intr = nv84_crypt_intr;
+	nv_engine(priv)->cclass = &nv84_crypt_cclass;
+	nv_engine(priv)->sclass = nv84_crypt_sclass;
+	nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
+	return 0;
+}
+
+static int
+nv84_crypt_init(struct nouveau_object *object)
+{
+	struct nv84_crypt_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_engine_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x102130, 0xffffffff);
+	nv_wr32(priv, 0x102140, 0xffffffbf);
+	nv_wr32(priv, 0x10200c, 0x00000010);
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_crypt_oclass = {
+	.handle = NV_ENGINE(CRYPT, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_crypt_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = nv84_crypt_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 0000000..8bf8955
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/crypt.h>
+
+#include "fuc/nv98.fuc.h"
+
+struct nv98_crypt_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_sclass[] = {
+	{ 0x88b4, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_crypt_cclass = {
+	.handle = NV_ENGCTX(CRYPT, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
+
+static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
+	{ 0x0000, "ILLEGAL_MTHD" },
+	{ 0x0001, "INVALID_BITFIELD" },
+	{ 0x0002, "INVALID_ENUM" },
+	{ 0x0003, "QUERY" },
+	{}
+};
+
+static void
+nv98_crypt_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nv98_crypt_priv *priv = (void *)subdev;
+	u32 disp = nv_rd32(priv, 0x08701c);
+	u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
+	u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
+	u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
+	u32 addr = nv_rd32(priv, 0x087040) >> 16;
+	u32 mthd = (addr & 0x07ff) << 2;
+	u32 subc = (addr & 0x3800) >> 11;
+	u32 data = nv_rd32(priv, 0x087044);
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000040) {
+		nv_error(priv, "DISPATCH_ERROR [");
+		nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+		pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
+		       chid, (u64)inst << 12, nouveau_client_name(engctx),
+		       subc, mthd, data);
+		nv_wr32(priv, 0x087004, 0x00000040);
+		stat &= ~0x00000040;
+	}
+
+	if (stat) {
+		nv_error(priv, "unhandled intr 0x%08x\n", stat);
+		nv_wr32(priv, 0x087004, stat);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv98_crypt_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x0a);
+	return 0;
+}
+
+static int
+nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv98_crypt_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+				    "PCRYPT", "crypt", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00004000;
+	nv_subdev(priv)->intr = nv98_crypt_intr;
+	nv_engine(priv)->cclass = &nv98_crypt_cclass;
+	nv_engine(priv)->sclass = nv98_crypt_sclass;
+	nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
+	nv_falcon(priv)->code.data = nv98_pcrypt_code;
+	nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+	nv_falcon(priv)->data.data = nv98_pcrypt_data;
+	nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
+	return 0;
+}
+
+struct nouveau_oclass
+nv98_crypt_oclass = {
+	.handle = NV_ENGINE(CRYPT, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_crypt_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = _nouveau_falcon_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/base.c
new file mode 100644
index 0000000..4c72571
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/client.h>
+#include <core/option.h>
+
+#include <core/class.h>
+
+#include <engine/device.h>
+
+static DEFINE_MUTEX(nv_devices_mutex);
+static LIST_HEAD(nv_devices);
+
+struct nouveau_device *
+nouveau_device_find(u64 name)
+{
+	struct nouveau_device *device, *match = NULL;
+	mutex_lock(&nv_devices_mutex);
+	list_for_each_entry(device, &nv_devices, head) {
+		if (device->handle == name) {
+			match = device;
+			break;
+		}
+	}
+	mutex_unlock(&nv_devices_mutex);
+	return match;
+}
+
+/******************************************************************************
+ * nouveau_devobj (0x0080): class implementation
+ *****************************************************************************/
+struct nouveau_devobj {
+	struct nouveau_parent base;
+	struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+};
+
+static const u64 disable_map[] = {
+	[NVDEV_SUBDEV_VBIOS]	= NV_DEVICE_DISABLE_VBIOS,
+	[NVDEV_SUBDEV_DEVINIT]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_GPIO]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_I2C]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_CLOCK]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_MXM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_MC]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_BUS]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_TIMER]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_FB]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_LTCG]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_IBUS]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_INSTMEM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_VM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_BAR]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_VOLT]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_SUBDEV_THERM]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_DMAOBJ]	= NV_DEVICE_DISABLE_CORE,
+	[NVDEV_ENGINE_FIFO]	= NV_DEVICE_DISABLE_FIFO,
+	[NVDEV_ENGINE_SW]	= NV_DEVICE_DISABLE_FIFO,
+	[NVDEV_ENGINE_GR]	= NV_DEVICE_DISABLE_GRAPH,
+	[NVDEV_ENGINE_MPEG]	= NV_DEVICE_DISABLE_MPEG,
+	[NVDEV_ENGINE_ME]	= NV_DEVICE_DISABLE_ME,
+	[NVDEV_ENGINE_VP]	= NV_DEVICE_DISABLE_VP,
+	[NVDEV_ENGINE_CRYPT]	= NV_DEVICE_DISABLE_CRYPT,
+	[NVDEV_ENGINE_BSP]	= NV_DEVICE_DISABLE_BSP,
+	[NVDEV_ENGINE_PPP]	= NV_DEVICE_DISABLE_PPP,
+	[NVDEV_ENGINE_COPY0]	= NV_DEVICE_DISABLE_COPY0,
+	[NVDEV_ENGINE_COPY1]	= NV_DEVICE_DISABLE_COPY1,
+	[NVDEV_ENGINE_UNK1C1]	= NV_DEVICE_DISABLE_UNK1C1,
+	[NVDEV_ENGINE_VENC]	= NV_DEVICE_DISABLE_VENC,
+	[NVDEV_ENGINE_DISP]	= NV_DEVICE_DISABLE_DISP,
+	[NVDEV_SUBDEV_NR]	= 0,
+};
+
+static int
+nouveau_devobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_client *client = nv_client(parent);
+	struct nouveau_device *device;
+	struct nouveau_devobj *devobj;
+	struct nv_device_class *args = data;
+	u32 boot0, strap;
+	u64 disable, mmio_base, mmio_size;
+	void __iomem *map;
+	int ret, i, c;
+
+	if (size < sizeof(struct nv_device_class))
+		return -EINVAL;
+
+	/* find the device subdev that matches what the client requested */
+	device = nv_device(client->device);
+	if (args->device != ~0) {
+		device = nouveau_device_find(args->device);
+		if (!device)
+			return -ENODEV;
+	}
+
+	ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
+				    (1ULL << NVDEV_ENGINE_DMAOBJ) |
+				    (1ULL << NVDEV_ENGINE_FIFO) |
+				    (1ULL << NVDEV_ENGINE_DISP), &devobj);
+	*pobject = nv_object(devobj);
+	if (ret)
+		return ret;
+
+	mmio_base = pci_resource_start(device->pdev, 0);
+	mmio_size = pci_resource_len(device->pdev, 0);
+
+	/* translate api disable mask into internal mapping */
+	disable = args->debug0;
+	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if (args->disable & disable_map[i])
+			disable |= (1ULL << i);
+	}
+
+	/* identify the chipset, and determine classes of subdev/engines */
+	if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
+	    !device->card_type) {
+		map = ioremap(mmio_base, 0x102000);
+		if (map == NULL)
+			return -ENOMEM;
+
+		/* switch mmio to cpu's native endianness */
+#ifndef __BIG_ENDIAN
+		if (ioread32_native(map + 0x000004) != 0x00000000)
+#else
+		if (ioread32_native(map + 0x000004) == 0x00000000)
+#endif
+			iowrite32_native(0x01000001, map + 0x000004);
+
+		/* read boot0 and strapping information */
+		boot0 = ioread32_native(map + 0x000000);
+		strap = ioread32_native(map + 0x101000);
+		iounmap(map);
+
+		/* determine chipset and derive architecture from it */
+		if ((boot0 & 0x0f000000) > 0) {
+			device->chipset = (boot0 & 0xff00000) >> 20;
+			switch (device->chipset & 0xf0) {
+			case 0x10: device->card_type = NV_10; break;
+			case 0x20: device->card_type = NV_20; break;
+			case 0x30: device->card_type = NV_30; break;
+			case 0x40:
+			case 0x60: device->card_type = NV_40; break;
+			case 0x50:
+			case 0x80:
+			case 0x90:
+			case 0xa0: device->card_type = NV_50; break;
+			case 0xc0: device->card_type = NV_C0; break;
+			case 0xd0: device->card_type = NV_D0; break;
+			case 0xe0:
+			case 0xf0: device->card_type = NV_E0; break;
+			default:
+				break;
+			}
+		} else
+		if ((boot0 & 0xff00fff0) == 0x20004000) {
+			if (boot0 & 0x00f00000)
+				device->chipset = 0x05;
+			else
+				device->chipset = 0x04;
+			device->card_type = NV_04;
+		}
+
+		switch (device->card_type) {
+		case NV_04: ret = nv04_identify(device); break;
+		case NV_10: ret = nv10_identify(device); break;
+		case NV_20: ret = nv20_identify(device); break;
+		case NV_30: ret = nv30_identify(device); break;
+		case NV_40: ret = nv40_identify(device); break;
+		case NV_50: ret = nv50_identify(device); break;
+		case NV_C0:
+		case NV_D0: ret = nvc0_identify(device); break;
+		case NV_E0: ret = nve0_identify(device); break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret) {
+			nv_error(device, "unknown chipset, 0x%08x\n", boot0);
+			return ret;
+		}
+
+		nv_info(device, "BOOT0  : 0x%08x\n", boot0);
+		nv_info(device, "Chipset: %s (NV%02X)\n",
+			device->cname, device->chipset);
+		nv_info(device, "Family : NV%02X\n", device->card_type);
+
+		/* determine frequency of timing crystal */
+		if ( device->chipset < 0x17 ||
+		    (device->chipset >= 0x20 && device->chipset < 0x25))
+			strap &= 0x00000040;
+		else
+			strap &= 0x00400040;
+
+		switch (strap) {
+		case 0x00000000: device->crystal = 13500; break;
+		case 0x00000040: device->crystal = 14318; break;
+		case 0x00400000: device->crystal = 27000; break;
+		case 0x00400040: device->crystal = 25000; break;
+		}
+
+		nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
+	}
+
+	if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
+	    !nv_subdev(device)->mmio) {
+		nv_subdev(device)->mmio  = ioremap(mmio_base, mmio_size);
+		if (!nv_subdev(device)->mmio) {
+			nv_error(device, "unable to map device registers\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* ensure requested subsystems are available for use */
+	for (i = 1, c = 1; i < NVDEV_SUBDEV_NR; i++) {
+		if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
+			continue;
+
+		if (device->subdev[i]) {
+			nouveau_object_ref(device->subdev[i],
+					  &devobj->subdev[i]);
+			continue;
+		}
+
+		ret = nouveau_object_ctor(nv_object(device), NULL,
+					  oclass, NULL, i,
+					  &devobj->subdev[i]);
+		if (ret == -ENODEV)
+			continue;
+		if (ret)
+			return ret;
+
+		/* note: can't init *any* subdevs until devinit has been run
+		 * due to not knowing exactly what the vbios init tables will
+		 * mess with.  devinit also can't be run until all of its
+		 * dependencies have been created.
+		 *
+		 * this code delays init of any subdev until all of devinit's
+		 * dependencies have been created, and then initialises each
+		 * subdev in turn as they're created.
+		 */
+		while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
+			struct nouveau_object *subdev = devobj->subdev[c++];
+			if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret)
+					return ret;
+				atomic_dec(&nv_object(device)->usecount);
+			} else
+			if (subdev) {
+				nouveau_subdev_reset(subdev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void
+nouveau_devobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_devobj *devobj = (void *)object;
+	int i;
+
+	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
+		nouveau_object_ref(NULL, &devobj->subdev[i]);
+
+	nouveau_parent_destroy(&devobj->base);
+}
+
+static u8
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
+{
+	return nv_rd08(object->engine, addr);
+}
+
+static u16
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
+{
+	return nv_rd16(object->engine, addr);
+}
+
+static u32
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
+{
+	return nv_rd32(object->engine, addr);
+}
+
+static void
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
+{
+	nv_wr08(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
+{
+	nv_wr16(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	nv_wr32(object->engine, addr, data);
+}
+
+static struct nouveau_ofuncs
+nouveau_devobj_ofuncs = {
+	.ctor = nouveau_devobj_ctor,
+	.dtor = nouveau_devobj_dtor,
+	.init = _nouveau_parent_init,
+	.fini = _nouveau_parent_fini,
+	.rd08 = nouveau_devobj_rd08,
+	.rd16 = nouveau_devobj_rd16,
+	.rd32 = nouveau_devobj_rd32,
+	.wr08 = nouveau_devobj_wr08,
+	.wr16 = nouveau_devobj_wr16,
+	.wr32 = nouveau_devobj_wr32,
+};
+
+/******************************************************************************
+ * nouveau_device: engine functions
+ *****************************************************************************/
+static struct nouveau_oclass
+nouveau_device_sclass[] = {
+	{ 0x0080, &nouveau_devobj_ofuncs },
+	{}
+};
+
+static int
+nouveau_device_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_device *device = (void *)object;
+	struct nouveau_object *subdev;
+	int ret, i;
+
+	for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
+		if ((subdev = device->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_dec(subdev, suspend);
+				if (ret && suspend)
+					goto fail;
+			}
+		}
+	}
+
+	ret = 0;
+fail:
+	for (; ret && i < NVDEV_SUBDEV_NR; i++) {
+		if ((subdev = device->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret) {
+					/* XXX */
+				}
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int
+nouveau_device_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = (void *)object;
+	struct nouveau_object *subdev;
+	int ret, i;
+
+	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if ((subdev = device->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
+				ret = nouveau_object_inc(subdev);
+				if (ret)
+					goto fail;
+			} else {
+				nouveau_subdev_reset(subdev);
+			}
+		}
+	}
+
+	ret = 0;
+fail:
+	for (--i; ret && i >= 0; i--) {
+		if ((subdev = device->subdev[i])) {
+			if (!nv_iclass(subdev, NV_ENGINE_CLASS))
+				nouveau_object_dec(subdev, false);
+		}
+	}
+
+	return ret;
+}
+
+static void
+nouveau_device_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = (void *)object;
+
+	mutex_lock(&nv_devices_mutex);
+	list_del(&device->head);
+	mutex_unlock(&nv_devices_mutex);
+
+	if (nv_subdev(device)->mmio)
+		iounmap(nv_subdev(device)->mmio);
+
+	nouveau_engine_destroy(&device->base);
+}
+
+static struct nouveau_oclass
+nouveau_device_oclass = {
+	.handle = NV_ENGINE(DEVICE, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.dtor = nouveau_device_dtor,
+		.init = nouveau_device_init,
+		.fini = nouveau_device_fini,
+	},
+};
+
+int
+nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
+		       const char *cfg, const char *dbg,
+		       int length, void **pobject)
+{
+	struct nouveau_device *device;
+	int ret = -EEXIST;
+
+	mutex_lock(&nv_devices_mutex);
+	list_for_each_entry(device, &nv_devices, head) {
+		if (device->handle == name)
+			goto done;
+	}
+
+	ret = nouveau_engine_create_(NULL, NULL, &nouveau_device_oclass, true,
+				     "DEVICE", "device", length, pobject);
+	device = *pobject;
+	if (ret)
+		goto done;
+
+	device->pdev = pdev;
+	device->handle = name;
+	device->cfgopt = cfg;
+	device->dbgopt = dbg;
+	device->name = sname;
+
+	nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
+	nv_engine(device)->sclass = nouveau_device_sclass;
+	list_add(&device->head, &nv_devices);
+done:
+	mutex_unlock(&nv_devices_mutex);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
new file mode 100644
index 0000000..a0284cf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv04_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x04:
+		device->cname = "NV04";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x05:
+		device->cname = "NV05";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv04_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv04_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv04_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv04_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown RIVA chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
new file mode 100644
index 0000000..1b7809a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv10_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x10:
+		device->cname = "NV10";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x15:
+		device->cname = "NV15";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x16:
+		device->cname = "NV16";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x1a:
+		device->cname = "nForce";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x11:
+		device->cname = "NV11";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv10_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x17:
+		device->cname = "NV17";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x1f:
+		device->cname = "nForce2";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x18:
+		device->cname = "NV18";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv10_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Celsius chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
new file mode 100644
index 0000000..12a4005
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
+int
+nv20_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x20:
+		device->cname = "NV20";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv20_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x25:
+		device->cname = "NV25";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x28:
+		device->cname = "NV28";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv25_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x2a:
+		device->cname = "NV2A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv2a_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Kelvin chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
new file mode 100644
index 0000000..cef0f1e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
+int
+nv30_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x30:
+		device->cname = "NV30";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x35:
+		device->cname = "NV35";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv04_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x31:
+		device->cname = "NV31";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv30_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x36:
+		device->cname = "NV36";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv35_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x34:
+		device->cname = "NV34";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv04_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv17_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv34_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv31_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Rankine chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
new file mode 100644
index 0000000..1719cb0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/vm.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
+int
+nv40_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x40:
+		device->cname = "NV40";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x41:
+		device->cname = "NV41";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x42:
+		device->cname = "NV42";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x43:
+		device->cname = "NV43";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x45:
+		device->cname = "NV45";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x47:
+		device->cname = "G70";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x49:
+		device->cname = "G71";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4b:
+		device->cname = "G73";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x44:
+		device->cname = "NV44";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x46:
+		device->cname = "G72";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4a:
+		device->cname = "NV44A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4c:
+		device->cname = "C61";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x4e:
+		device->cname = "C51";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv4e_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x63:
+		device->cname = "C73";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x67:
+		device->cname = "C67";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	case 0x68:
+		device->cname = "C68";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv10_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv04_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv40_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv40_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv31_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv40_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv10_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv40_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv40_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv04_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Curie chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
new file mode 100644
index 0000000..5e8c3de
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/vp.h>
+#include <engine/crypt.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
+int
+nv50_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0x50:
+		device->cname = "G80";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv50_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv50_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv50_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+		break;
+	case 0x84:
+		device->cname = "G84";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		break;
+	case 0x86:
+		device->cname = "G86";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		break;
+	case 0x92:
+		device->cname = "G92";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
+		break;
+	case 0x94:
+		device->cname = "G94";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		break;
+	case 0x96:
+		device->cname = "G96";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv50_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		break;
+	case 0x98:
+		device->cname = "G98";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		break;
+	case 0xa0:
+		device->cname = "G200";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv50_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
+		break;
+	case 0xaa:
+		device->cname = "MCP77/MCP78";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		break;
+	case 0xac:
+		device->cname = "MCP79/MCP7A";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nv50_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nv84_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
+		break;
+	case 0xa3:
+		device->cname = "GT215";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_MPEG   ] = &nv84_mpeg_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xa5:
+		device->cname = "GT216";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xa8:
+		device->cname = "GT218";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xaf:
+		device->cname = "MCP89";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nva3_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nv98_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nv50_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nv50_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nv50_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nv50_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nv84_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nv50_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nv50_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Tesla chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
new file mode 100644
index 0000000..a36e64e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/vp.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
+int
+nvc0_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0xc0:
+		device->cname = "GF100";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xc4:
+		device->cname = "GF104";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xc3:
+		device->cname = "GF106";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xce:
+		device->cname = "GF114";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xcf:
+		device->cname = "GF116";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xc1:
+		device->cname = "GF108";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xc8:
+		device->cname = "GF110";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nv50_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nv94_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nva3_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
+		break;
+	case 0xd9:
+		device->cname = "GF119";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		break;
+	case 0xd7:
+		device->cname = "GF117";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nvd0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nvc0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+		break;
+	default:
+		nv_fatal(device, "unknown Fermi chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+	}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
new file mode 100644
index 0000000..a354e40
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bus.h>
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+#include <subdev/clock.h>
+#include <subdev/therm.h>
+#include <subdev/mxm.h>
+#include <subdev/devinit.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/ibus.h>
+#include <subdev/instmem.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include <engine/device.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+#include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
+
+int
+nve0_identify(struct nouveau_device *device)
+{
+	switch (device->chipset) {
+	case 0xe4:
+		device->cname = "GK104";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		break;
+	case 0xe7:
+		device->cname = "GK107";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		break;
+	case 0xe6:
+		device->cname = "GK106";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+		break;
+	case 0xf0:
+		device->cname = "GK110";
+		device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
+		device->oclass[NVDEV_SUBDEV_GPIO   ] = &nve0_gpio_oclass;
+		device->oclass[NVDEV_SUBDEV_I2C    ] = &nvd0_i2c_oclass;
+		device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
+		device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
+		device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
+		device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
+		device->oclass[NVDEV_SUBDEV_MC     ] = &nvc0_mc_oclass;
+		device->oclass[NVDEV_SUBDEV_BUS    ] = &nvc0_bus_oclass;
+		device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
+		device->oclass[NVDEV_SUBDEV_FB     ] = &nvc0_fb_oclass;
+		device->oclass[NVDEV_SUBDEV_LTCG   ] = &nvc0_ltcg_oclass;
+		device->oclass[NVDEV_SUBDEV_IBUS   ] = &nve0_ibus_oclass;
+		device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
+		device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
+		device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
+		device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
+		device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
+		device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
+#endif
+		device->oclass[NVDEV_ENGINE_DISP   ] = &nvf0_disp_oclass;
+#if 0
+		device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
+		device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+		device->oclass[NVDEV_ENGINE_COPY2  ] = &nve0_copy2_oclass;
+		device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+		device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+		device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
+#endif
+		break;
+	default:
+		nv_fatal(device, "unknown Kepler chipset\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 0000000..7a5cae4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+void
+_nouveau_disp_dtor(struct nouveau_object *object)
+{
+	struct nouveau_disp *disp = (void *)object;
+	nouveau_event_destroy(&disp->vblank);
+	nouveau_engine_destroy(&disp->base);
+}
+
+int
+nouveau_disp_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, int heads,
+		     const char *intname, const char *extname,
+		     int length, void **pobject)
+{
+	struct nouveau_disp *disp;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, true,
+				     intname, extname, length, pobject);
+	disp = *pobject;
+	if (ret)
+		return ret;
+
+	return nouveau_event_create(heads, &disp->vblank);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 0000000..a66b27c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+	const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+		         (data & NV50_DISP_DAC_PWR_VSYNC) |
+		         (data & NV50_DISP_DAC_PWR_DATA) |
+		         (data & NV50_DISP_DAC_PWR_STATE);
+	const u32 doff = (or * 0x800);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+	return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+	const u32 doff = (or * 0x800);
+
+	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
+	nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+	mdelay(9);
+	udelay(500);
+	loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
+	nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+	nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
+	nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+	if (!(loadval & 0x80000000))
+		return -ETIMEDOUT;
+
+	return (loadval & 0x38000000) >> 27;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+	u32 *data = args;
+	int ret;
+
+	if (size < sizeof(u32))
+		return -EINVAL;
+
+	switch (mthd & ~0x3f) {
+	case NV50_DISP_DAC_PWR:
+		ret = priv->dac.power(priv, or, data[0]);
+		break;
+	case NV50_DISP_DAC_LOAD:
+		ret = priv->dac.sense(priv, or, data[0]);
+		if (ret >= 0) {
+			data[0] = ret;
+			ret = 0;
+		}
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 0000000..31cc8fe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+#include <subdev/i2c.h>
+
+#include <engine/disp.h>
+
+#include "dport.h"
+
+#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt,             \
+				   dp->outp->hasht, dp->outp->hashm, ##args)
+#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt,             \
+				   dp->outp->hasht, dp->outp->hashm, ##args)
+
+/******************************************************************************
+ * link training
+ *****************************************************************************/
+struct dp_state {
+	const struct nouveau_dp_func *func;
+	struct nouveau_disp *disp;
+	struct dcb_output *outp;
+	struct nvbios_dpout info;
+	u8 version;
+	struct nouveau_i2c_port *aux;
+	int head;
+	u8  dpcd[4];
+	int link_nr;
+	u32 link_bw;
+	u8  stat[6];
+	u8  conf[4];
+};
+
+static int
+dp_set_link_config(struct dp_state *dp)
+{
+	struct nouveau_disp *disp = dp->disp;
+	struct nouveau_bios *bios = nouveau_bios(disp);
+	struct nvbios_init init = {
+		.subdev = nv_subdev(dp->disp),
+		.bios = bios,
+		.offset = 0x0000,
+		.outp = dp->outp,
+		.crtc = dp->head,
+		.execute = 1,
+	};
+	u32 lnkcmp;
+	u8 sink[2];
+
+	DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+
+	/* set desired link configuration on the sink */
+	sink[0] = dp->link_bw / 27000;
+	sink[1] = dp->link_nr;
+	if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+
+	nv_wraux(dp->aux, DPCD_LC00, sink, 2);
+
+	/* set desired link configuration on the source */
+	if ((lnkcmp = dp->info.lnkcmp)) {
+		if (dp->version < 0x30) {
+			while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
+				lnkcmp += 4;
+			init.offset = nv_ro16(bios, lnkcmp + 2);
+		} else {
+			while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
+				lnkcmp += 3;
+			init.offset = nv_ro16(bios, lnkcmp + 1);
+		}
+
+		nvbios_exec(&init);
+	}
+
+	return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
+				 dp->link_nr, dp->link_bw / 27000,
+				 dp->dpcd[DPCD_RC02] &
+					  DPCD_RC02_ENHANCED_FRAME_CAP);
+}
+
+static void
+dp_set_training_pattern(struct dp_state *dp, u8 pattern)
+{
+	u8 sink_tp;
+
+	DBG("training pattern %d\n", pattern);
+	dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
+
+	nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
+	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
+	sink_tp |= pattern;
+	nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
+}
+
+static int
+dp_link_train_commit(struct dp_state *dp)
+{
+	int i;
+
+	for (i = 0; i < dp->link_nr; i++) {
+		u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+		u8 lpre = (lane & 0x0c) >> 2;
+		u8 lvsw = (lane & 0x03) >> 0;
+
+		dp->conf[i] = (lpre << 3) | lvsw;
+		if (lvsw == 3)
+			dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
+		if (lpre == 3)
+			dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
+
+		DBG("config lane %d %02x\n", i, dp->conf[i]);
+		dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
+	}
+
+	return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
+}
+
+static int
+dp_link_train_update(struct dp_state *dp, u32 delay)
+{
+	int ret;
+
+	udelay(delay);
+
+	ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
+	if (ret)
+		return ret;
+
+	DBG("status %*ph\n", 6, dp->stat);
+	return 0;
+}
+
+static int
+dp_link_train_cr(struct dp_state *dp)
+{
+	bool cr_done = false, abort = false;
+	int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+	int tries = 0, i;
+
+	dp_set_training_pattern(dp, 1);
+
+	do {
+		if (dp_link_train_commit(dp) ||
+		    dp_link_train_update(dp, 100))
+			break;
+
+		cr_done = true;
+		for (i = 0; i < dp->link_nr; i++) {
+			u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
+				cr_done = false;
+				if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
+					abort = true;
+				break;
+			}
+		}
+
+		if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
+			voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
+			tries = 0;
+		}
+	} while (!cr_done && !abort && ++tries < 5);
+
+	return cr_done ? 0 : -1;
+}
+
+static int
+dp_link_train_eq(struct dp_state *dp)
+{
+	bool eq_done = false, cr_done = true;
+	int tries = 0, i;
+
+	dp_set_training_pattern(dp, 2);
+
+	do {
+		if (dp_link_train_update(dp, 400))
+			break;
+
+		eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
+		for (i = 0; i < dp->link_nr && eq_done; i++) {
+			u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
+			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
+				cr_done = false;
+			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
+			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
+				eq_done = false;
+		}
+
+		if (dp_link_train_commit(dp))
+			break;
+	} while (!eq_done && cr_done && ++tries <= 5);
+
+	return eq_done ? 0 : -1;
+}
+
+static void
+dp_link_train_init(struct dp_state *dp, bool spread)
+{
+	struct nvbios_init init = {
+		.subdev = nv_subdev(dp->disp),
+		.bios = nouveau_bios(dp->disp),
+		.outp = dp->outp,
+		.crtc = dp->head,
+		.execute = 1,
+	};
+
+	/* set desired spread */
+	if (spread)
+		init.offset = dp->info.script[2];
+	else
+		init.offset = dp->info.script[3];
+	nvbios_exec(&init);
+
+	/* pre-train script */
+	init.offset = dp->info.script[0];
+	nvbios_exec(&init);
+}
+
+static void
+dp_link_train_fini(struct dp_state *dp)
+{
+	struct nvbios_init init = {
+		.subdev = nv_subdev(dp->disp),
+		.bios = nouveau_bios(dp->disp),
+		.outp = dp->outp,
+		.crtc = dp->head,
+		.execute = 1,
+	};
+
+	/* post-train script */
+	init.offset = dp->info.script[1],
+	nvbios_exec(&init);
+}
+
+int
+nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
+		 struct dcb_output *outp, int head, u32 datarate)
+{
+	struct nouveau_bios *bios = nouveau_bios(disp);
+	struct nouveau_i2c *i2c = nouveau_i2c(disp);
+	struct dp_state _dp = {
+		.disp = disp,
+		.func = func,
+		.outp = outp,
+		.head = head,
+	}, *dp = &_dp;
+	const u32 bw_list[] = { 270000, 162000, 0 };
+	const u32 *link_bw = bw_list;
+	u8  hdr, cnt, len;
+	u32 data;
+	int ret;
+
+	/* find the bios displayport data relevant to this output */
+	data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
+				 &hdr, &cnt, &len, &dp->info);
+	if (!data) {
+		ERR("bios data not found\n");
+		return -EINVAL;
+	}
+
+	/* acquire the aux channel and fetch some info about the display */
+	if (outp->location)
+		dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+	else
+		dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
+	if (!dp->aux) {
+		ERR("no aux channel?!\n");
+		return -ENODEV;
+	}
+
+	ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
+	if (ret) {
+		ERR("failed to read DPCD\n");
+		return ret;
+	}
+
+	/* adjust required bandwidth for 8B/10B coding overhead */
+	datarate = (datarate / 8) * 10;
+
+	/* enable down-spreading and execute pre-train script from vbios */
+	dp_link_train_init(dp, dp->dpcd[3] & 0x01);
+
+	/* start off at highest link rate supported by encoder and display */
+	while (*link_bw > (dp->dpcd[1] * 27000))
+		link_bw++;
+
+	while (link_bw[0]) {
+		/* find minimum required lane count at this link rate */
+		dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
+		while ((dp->link_nr >> 1) * link_bw[0] > datarate)
+			dp->link_nr >>= 1;
+
+		/* drop link rate to minimum with this lane count */
+		while ((link_bw[1] * dp->link_nr) > datarate)
+			link_bw++;
+		dp->link_bw = link_bw[0];
+
+		/* program selected link configuration */
+		ret = dp_set_link_config(dp);
+		if (ret == 0) {
+			/* attempt to train the link at this configuration */
+			memset(dp->stat, 0x00, sizeof(dp->stat));
+			if (!dp_link_train_cr(dp) &&
+			    !dp_link_train_eq(dp))
+				break;
+		} else
+		if (ret >= 1) {
+			/* dp_set_link_config() handled training */
+			break;
+		}
+
+		/* retry at lower rate */
+		link_bw++;
+	}
+
+	/* finish link training */
+	dp_set_training_pattern(dp, 0);
+
+	/* execute post-train script from vbios */
+	dp_link_train_fini(dp);
+	return true;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 0000000..0e1bbd1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,78 @@
+#ifndef __NVKM_DISP_DPORT_H__
+#define __NVKM_DISP_DPORT_H__
+
+/* DPCD Receiver Capabilities */
+#define DPCD_RC00                                                       0x00000
+#define DPCD_RC00_DPCD_REV                                                 0xff
+#define DPCD_RC01                                                       0x00001
+#define DPCD_RC01_MAX_LINK_RATE                                            0xff
+#define DPCD_RC02                                                       0x00002
+#define DPCD_RC02_ENHANCED_FRAME_CAP                                       0x80
+#define DPCD_RC02_MAX_LANE_COUNT                                           0x1f
+#define DPCD_RC03                                                       0x00003
+#define DPCD_RC03_MAX_DOWNSPREAD                                           0x01
+
+/* DPCD Link Configuration */
+#define DPCD_LC00                                                       0x00100
+#define DPCD_LC00_LINK_BW_SET                                              0xff
+#define DPCD_LC01                                                       0x00101
+#define DPCD_LC01_ENHANCED_FRAME_EN                                        0x80
+#define DPCD_LC01_LANE_COUNT_SET                                           0x1f
+#define DPCD_LC02                                                       0x00102
+#define DPCD_LC02_TRAINING_PATTERN_SET                                     0x03
+#define DPCD_LC03(l)                                            ((l) +  0x00103)
+#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED                                 0x20
+#define DPCD_LC03_PRE_EMPHASIS_SET                                         0x18
+#define DPCD_LC03_MAX_SWING_REACHED                                        0x04
+#define DPCD_LC03_VOLTAGE_SWING_SET                                        0x03
+
+/* DPCD Link/Sink Status */
+#define DPCD_LS02                                                       0x00202
+#define DPCD_LS02_LANE1_SYMBOL_LOCKED                                      0x40
+#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE                                    0x20
+#define DPCD_LS02_LANE1_CR_DONE                                            0x10
+#define DPCD_LS02_LANE0_SYMBOL_LOCKED                                      0x04
+#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE                                    0x02
+#define DPCD_LS02_LANE0_CR_DONE                                            0x01
+#define DPCD_LS03                                                       0x00203
+#define DPCD_LS03_LANE3_SYMBOL_LOCKED                                      0x40
+#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE                                    0x20
+#define DPCD_LS03_LANE3_CR_DONE                                            0x10
+#define DPCD_LS03_LANE2_SYMBOL_LOCKED                                      0x04
+#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE                                    0x02
+#define DPCD_LS03_LANE2_CR_DONE                                            0x01
+#define DPCD_LS04                                                       0x00204
+#define DPCD_LS04_LINK_STATUS_UPDATED                                      0x80
+#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED                           0x40
+#define DPCD_LS04_INTERLANE_ALIGN_DONE                                     0x01
+#define DPCD_LS06                                                       0x00206
+#define DPCD_LS06_LANE1_PRE_EMPHASIS                                       0xc0
+#define DPCD_LS06_LANE1_VOLTAGE_SWING                                      0x30
+#define DPCD_LS06_LANE0_PRE_EMPHASIS                                       0x0c
+#define DPCD_LS06_LANE0_VOLTAGE_SWING                                      0x03
+#define DPCD_LS07                                                       0x00207
+#define DPCD_LS07_LANE3_PRE_EMPHASIS                                       0xc0
+#define DPCD_LS07_LANE3_VOLTAGE_SWING                                      0x30
+#define DPCD_LS07_LANE2_PRE_EMPHASIS                                       0x0c
+#define DPCD_LS07_LANE2_VOLTAGE_SWING                                      0x03
+
+struct nouveau_disp;
+struct dcb_output;
+
+struct nouveau_dp_func {
+	int (*pattern)(struct nouveau_disp *, struct dcb_output *,
+		       int head, int pattern);
+	int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+		       int link_nr, int link_bw, bool enh_frame);
+	int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
+		       int lane, int swing, int preem);
+};
+
+extern const struct nouveau_dp_func nv94_sor_dp_func;
+extern const struct nouveau_dp_func nvd0_sor_dp_func;
+extern const struct nouveau_dp_func nv50_pior_dp_func;
+
+int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
+		     struct dcb_output *, int, u32);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 0000000..a19e7d7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+	const u32 soff = (or * 0x800);
+	int i;
+
+	if (data && data[0]) {
+		for (i = 0; i < size; i++)
+			nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+		for (; i < 0x60; i++)
+			nv_wr32(priv, 0x61c440 + soff, (i << 8));
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+	} else
+	if (data) {
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+	} else {
+		nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 0000000..7176393
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+	const u32 soff = (or * 0x030);
+	int i;
+
+	if (data && data[0]) {
+		for (i = 0; i < size; i++)
+			nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+		for (; i < 0x60; i++)
+			nv_wr32(priv, 0x10ec00 + soff, (i << 8));
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+	} else
+	if (data) {
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+	} else {
+		nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 0000000..7fdade6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 hoff = (head * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+	nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+	nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+	nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+	/* Audio InfoFrame */
+	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+	nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+	nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+	nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+	nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+	/* ??? */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 0000000..db8c6fd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 soff = (or * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+	nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+	nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+	nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+	nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+	nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+	/* Audio InfoFrame */
+	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+	nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+	nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+	nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+	nv_mask(priv, 0x61c5d0 + soff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+	nv_mask(priv, 0x61c568 + soff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+	nv_mask(priv, 0x61c578 + soff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+	/* ??? */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+	nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+	nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 0000000..5151bb2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+	const u32 hoff = (head * 0x800);
+
+	if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+		nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+		nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+		nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+		return 0;
+	}
+
+	/* AVI InfoFrame */
+	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+	nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+	nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+	nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+	nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+	nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+	/* ??? InfoFrame? */
+	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+	nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+	/* HDMI_CTRL */
+	nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+	/* NFI, audio doesn't work without it though.. */
+	nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 0000000..05e903f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+#include <core/event.h>
+#include <core/class.h>
+
+struct nv04_disp_priv {
+	struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv04_disp_sclass[] = {
+	{ NV04_DISP_CLASS, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv04_disp_vblank_enable(struct nouveau_event *event, int head)
+{
+	nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+}
+
+static void
+nv04_disp_vblank_disable(struct nouveau_event *event, int head)
+{
+	nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
+}
+
+static void
+nv04_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_disp_priv *priv = (void *)subdev;
+	u32 crtc0 = nv_rd32(priv, 0x600100);
+	u32 crtc1 = nv_rd32(priv, 0x602100);
+
+	if (crtc0 & 0x00000001) {
+		nouveau_event_trigger(priv->base.vblank, 0);
+		nv_wr32(priv, 0x600100, 0x00000001);
+	}
+
+	if (crtc1 & 0x00000001) {
+		nouveau_event_trigger(priv->base.vblank, 1);
+		nv_wr32(priv, 0x602100, 0x00000001);
+	}
+}
+
+static int
+nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv04_disp_sclass;
+	nv_subdev(priv)->intr = nv04_disp_intr;
+	priv->base.vblank->priv = priv;
+	priv->base.vblank->enable = nv04_disp_vblank_enable;
+	priv->base.vblank->disable = nv04_disp_vblank_disable;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 0000000..4b7d434
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,1328 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+
+#include <engine/disp.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	struct nv50_disp_base *base = (void *)parent;
+	struct nv50_disp_chan *chan;
+	int ret;
+
+	if (base->chan & (1 << chid))
+		return -EBUSY;
+	base->chan |= (1 << chid);
+
+	ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+				     (1ULL << NVDEV_ENGINE_DMAOBJ),
+				     length, pobject);
+	chan = *pobject;
+	if (ret)
+		return ret;
+
+	chan->chid = chid;
+	return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+	struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+	base->chan &= ~(1 << chan->chid);
+	nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_chan *chan = (void *)object;
+	return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_chan *chan = (void *)object;
+	nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+			     struct nouveau_object *object, u32 name)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	struct nv50_disp_chan *chan = (void *)parent;
+	u32 addr = nv_gpuobj(object)->node->offset;
+	u32 chid = chan->chid;
+	u32 data = (chid << 28) | (addr << 10) | chid;
+	return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+		       int length, void **pobject)
+{
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+				     length, pobject);
+	dmac = *pobject;
+	if (ret)
+		return ret;
+
+	dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+	if (!dmac->pushdma)
+		return -ENOENT;
+
+	switch (nv_mclass(dmac->pushdma)) {
+	case 0x0002:
+	case 0x003d:
+		if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+			return -EINVAL;
+
+		switch (dmac->pushdma->target) {
+		case NV_MEM_TARGET_VRAM:
+			dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+			break;
+		case NV_MEM_TARGET_PCI_NOSNOOP:
+			dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_dmac *dmac = (void *)object;
+	nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+	nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&dmac->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+	nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+	nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+	nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+		nv_error(dmac, "init timeout, 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+		nv_error(dmac, "fini timeout, 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+	return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_mast_class *args = data;
+	struct nv50_disp_dmac *mast;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     0, sizeof(*mast), (void **)&mast);
+	*pobject = nv_object(mast);
+	if (ret)
+		return ret;
+
+	nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+	int ret;
+
+	ret = nv50_disp_chan_init(&mast->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+	/* attempt to unstick channel from some unknown state */
+	if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+		nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+	if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+		nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610204, mast->push);
+	nv_wr32(priv, 0x610208, 0x00010000);
+	nv_wr32(priv, 0x61020c, 0x00000000);
+	nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000, 0x00000000);
+	nv_wr32(priv, 0x610200, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+	nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+	return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+	.ctor = nv50_disp_mast_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_mast_init,
+	.fini = nv50_disp_mast_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_sync_class *args = data;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     1 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+	.ctor = nv50_disp_sync_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_ovly_class *args = data;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     3 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+	.ctor = nv50_disp_ovly_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nv50_disp_dmac_init,
+	.fini = nv50_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	return nv50_disp_chan_create_(parent, engine, oclass, chid,
+				      length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_pioc *pioc = (void *)object;
+	nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&pioc->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+		nv_error(pioc, "timeout0: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+		nv_error(pioc, "timeout1: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+
+	nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+		nv_error(pioc, "timeout: 0x%08x\n",
+			 nv_rd32(priv, 0x610200 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_oimm_class *args = data;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+	.ctor = nv50_disp_oimm_ctor,
+	.dtor = nv50_disp_pioc_dtor,
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_curs_class *args = data;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head > 1)
+		return -EINVAL;
+
+	ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+	.ctor = nv50_disp_curs_ctor,
+	.dtor = nv50_disp_pioc_dtor,
+	.init = nv50_disp_pioc_init,
+	.fini = nv50_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static void
+nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+	nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
+}
+
+static void
+nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+	nv_mask(event->priv, 0x61002c, (4 << head), 0);
+}
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_base *base;
+	int ret;
+
+	ret = nouveau_parent_create(parent, engine, oclass, 0,
+				    priv->sclass, 0, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	priv->base.vblank->priv = priv;
+	priv->base.vblank->enable = nv50_disp_base_vblank_enable;
+	priv->base.vblank->disable = nv50_disp_base_vblank_disable;
+	return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+				&base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_base *base = (void *)object;
+	nouveau_ramht_ref(NULL, &base->ramht);
+	nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+	int ret, i;
+	u32 tmp;
+
+	ret = nouveau_parent_init(&base->base);
+	if (ret)
+		return ret;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.  NFI what the 0x614004 caps are for..
+	 */
+	tmp = nv_rd32(priv, 0x614004);
+	nv_wr32(priv, 0x610184, tmp);
+
+	/* ... CRTC caps */
+	for (i = 0; i < priv->head.nr; i++) {
+		tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+		nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+		nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+		nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+		nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < priv->dac.nr; i++) {
+		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+		nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < priv->sor.nr; i++) {
+		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+		nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+	}
+
+	/* ... PIOR caps */
+	for (i = 0; i < 3; i++) {
+		tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+		nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nv_rd32(priv, 0x610024) & 0x00000100) {
+		nv_wr32(priv, 0x610024, 0x00000100);
+		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+			nv_error(priv, "timeout acquiring display\n");
+			return -EBUSY;
+		}
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nv_wr32(priv, 0x61002c, 0x00000370);
+	nv_wr32(priv, 0x610028, 0x00000000);
+	return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x610024, 0x00000000);
+	nv_wr32(priv, 0x610020, 0x00000000);
+
+	return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+	.ctor = nv50_disp_base_ctor,
+	.dtor = nv50_disp_base_dtor,
+	.init = nv50_disp_base_init,
+	.fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+	{ NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+	{}
+};
+
+static struct nouveau_oclass
+nv50_disp_sclass[] = {
+	{ NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nouveau_engctx *ectx;
+	int ret = -EBUSY;
+
+	/* no context needed for channel objects... */
+	if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+		atomic_inc(&parent->refcount);
+		*pobject = parent;
+		return 1;
+	}
+
+	/* allocate display hardware to client */
+	mutex_lock(&nv_subdev(priv)->mutex);
+	if (list_empty(&nv_engine(priv)->contexts)) {
+		ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+					    0x10000, 0x10000,
+					    NVOBJ_FLAG_HEAP, &ectx);
+		*pobject = nv_object(ectx);
+	}
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+	.handle = NV_ENGCTX(DISP, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_disp_data_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+	u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+	u32 addr, data;
+	int chid;
+
+	for (chid = 0; chid < 5; chid++) {
+		if (!(channels & (1 << chid)))
+			continue;
+
+		nv_wr32(priv, 0x610020, 0x00010000 << chid);
+		addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+		data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+		nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+		nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+			 chid, addr & 0xffc, data, addr);
+	}
+}
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+	    struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	u16 mask, type, data;
+
+	if (outp < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else
+	if (outp < 8) {
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			return 0x0000;
+		}
+		outp -= 4;
+	} else {
+		outp = outp - 8;
+		type = 0x0010;
+		mask = 0;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type |= priv->pior.type[outp]; break;
+		default:
+			nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
+			return 0x0000;
+		}
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << outp;
+	mask |= 0x0100 << head;
+
+	data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+	if (!data)
+		return 0x0000;
+
+	/* off-chip encoders require matching the exact encoder type */
+	if (dcb->location != 0)
+		type |= dcb->extdev << 8;
+
+	return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info;
+	struct dcb_output dcb;
+	u8  ver, hdr, cnt, len;
+	u16 data;
+	u32 ctrl = 0x00000000;
+	int i;
+
+	/* DAC */
+	for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+		ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+	/* SOR */
+	if (!(ctrl & (1 << head))) {
+		if (nv_device(priv)->chipset  < 0x90 ||
+		    nv_device(priv)->chipset == 0x92 ||
+		    nv_device(priv)->chipset == 0xa0) {
+			for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+				ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+			i += 4;
+		} else {
+			for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+				ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+			i += 4;
+		}
+	}
+
+	/* PIOR */
+	if (!(ctrl & (1 << head))) {
+		for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+			ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
+		i += 8;
+	}
+
+	if (!(ctrl & (1 << head)))
+		return false;
+	i--;
+
+	data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+	if (data) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &dcb,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		return nvbios_exec(&init) == 0;
+	}
+
+	return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+	    struct dcb_output *outp)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u32 ctrl = 0x00000000;
+	u32 data, conf = ~0;
+	int i;
+
+	/* DAC */
+	for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+		ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+	/* SOR */
+	if (!(ctrl & (1 << head))) {
+		if (nv_device(priv)->chipset  < 0x90 ||
+		    nv_device(priv)->chipset == 0x92 ||
+		    nv_device(priv)->chipset == 0xa0) {
+			for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+				ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+			i += 4;
+		} else {
+			for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+				ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+			i += 4;
+		}
+	}
+
+	/* PIOR */
+	if (!(ctrl & (1 << head))) {
+		for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+			ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
+		i += 8;
+	}
+
+	if (!(ctrl & (1 << head)))
+		return conf;
+	i--;
+
+	data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+	if (!data)
+		return conf;
+
+	if (outp->location == 0) {
+		switch (outp->type) {
+		case DCB_OUTPUT_TMDS:
+			conf = (ctrl & 0x00000f00) >> 8;
+			if (pclk >= 165000)
+				conf |= 0x0100;
+			break;
+		case DCB_OUTPUT_LVDS:
+			conf = priv->sor.lvdsconf;
+			break;
+		case DCB_OUTPUT_DP:
+			conf = (ctrl & 0x00000f00) >> 8;
+			break;
+		case DCB_OUTPUT_ANALOG:
+		default:
+			conf = 0x00ff;
+			break;
+		}
+	} else {
+		conf = (ctrl & 0x00000f00) >> 8;
+		pclk = pclk / 2;
+	}
+
+	data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data && id < 0xff) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = data,
+				.outp = outp,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	}
+
+	return conf;
+}
+
+static void
+nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
+{
+	exec_script(priv, head, 1);
+}
+
+static void
+nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
+{
+	exec_script(priv, head, 2);
+}
+
+static void
+nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
+{
+	struct nouveau_clock *clk = nouveau_clock(priv);
+	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	if (pclk)
+		clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+}
+
+static void
+nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
+			  struct dcb_output *outp, u32 pclk)
+{
+	const int link = !(outp->sorconf.link & 1);
+	const int   or = ffs(outp->or) - 1;
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+	const u32 symbol = 100000;
+	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+	u32 clksor = nv_rd32(priv, 0x614300 + soff);
+	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+	int TU, VTUi, VTUf, VTUa;
+	u64 link_data_rate, link_ratio, unk;
+	u32 best_diff = 64 * symbol;
+	u32 link_nr, link_bw, bits, r;
+
+	/* calculate packed data rate for each lane */
+	if      (dpctrl > 0x00030000) link_nr = 4;
+	else if (dpctrl > 0x00010000) link_nr = 2;
+	else			      link_nr = 1;
+
+	if (clksor & 0x000c0000)
+		link_bw = 270000;
+	else
+		link_bw = 162000;
+
+	if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
+	else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+	else                                  bits = 18;
+
+	link_data_rate = (pclk * bits / 8) / link_nr;
+
+	/* calculate ratio of packed data rate to link symbol rate */
+	link_ratio = link_data_rate * symbol;
+	r = do_div(link_ratio, link_bw);
+
+	for (TU = 64; TU >= 32; TU--) {
+		/* calculate average number of valid symbols in each TU */
+		u32 tu_valid = link_ratio * TU;
+		u32 calc, diff;
+
+		/* find a hw representation for the fraction.. */
+		VTUi = tu_valid / symbol;
+		calc = VTUi * symbol;
+		diff = tu_valid - calc;
+		if (diff) {
+			if (diff >= (symbol / 2)) {
+				VTUf = symbol / (symbol - diff);
+				if (symbol - (VTUf * diff))
+					VTUf++;
+
+				if (VTUf <= 15) {
+					VTUa  = 1;
+					calc += symbol - (symbol / VTUf);
+				} else {
+					VTUa  = 0;
+					VTUf  = 1;
+					calc += symbol;
+				}
+			} else {
+				VTUa  = 0;
+				VTUf  = min((int)(symbol / diff), 15);
+				calc += symbol / VTUf;
+			}
+
+			diff = calc - tu_valid;
+		} else {
+			/* no remainder, but the hw doesn't like the fractional
+			 * part to be zero.  decrement the integer part and
+			 * have the fraction add a whole symbol back
+			 */
+			VTUa = 0;
+			VTUf = 1;
+			VTUi--;
+		}
+
+		if (diff < best_diff) {
+			best_diff = diff;
+			bestTU = TU;
+			bestVTUa = VTUa;
+			bestVTUf = VTUf;
+			bestVTUi = VTUi;
+			if (diff == 0)
+				break;
+		}
+	}
+
+	if (!bestTU) {
+		nv_error(priv, "unable to find suitable dp config\n");
+		return;
+	}
+
+	/* XXX close to vbios numbers, but not right */
+	unk  = (symbol - link_ratio) * bestTU;
+	unk *= link_ratio;
+	r = do_div(unk, symbol);
+	r = do_div(unk, symbol);
+	unk += 6;
+
+	nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+	nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+						   bestVTUf << 16 |
+						   bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
+{
+	struct dcb_output outp;
+	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	u32 hval, hreg = 0x614200 + (head * 0x800);
+	u32 oval, oreg;
+	u32 mask;
+	u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+	if (conf != ~0) {
+		if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
+			u32 soff = (ffs(outp.or) - 1) * 0x08;
+			u32 ctrl = nv_rd32(priv, 0x610794 + soff);
+			u32 datarate;
+
+			switch ((ctrl & 0x000f0000) >> 16) {
+			case 6: datarate = pclk * 30 / 8; break;
+			case 5: datarate = pclk * 24 / 8; break;
+			case 2:
+			default:
+				datarate = pclk * 18 / 8;
+				break;
+			}
+
+			nouveau_dp_train(&priv->base, priv->sor.dp,
+					 &outp, head, datarate);
+		}
+
+		exec_clkcmp(priv, head, 0, pclk, &outp);
+
+		if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
+			oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+			oval = 0x00000000;
+			hval = 0x00000000;
+			mask = 0xffffffff;
+		} else
+		if (!outp.location) {
+			if (outp.type == DCB_OUTPUT_DP)
+				nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
+			oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+			oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+			hval = 0x00000000;
+			mask = 0x00000707;
+		} else {
+			oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
+			oval = 0x00000001;
+			hval = 0x00000001;
+			mask = 0x00000707;
+		}
+
+		nv_mask(priv, hreg, 0x0000000f, hval);
+		nv_mask(priv, oreg, mask, oval);
+	}
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	const int link = !(outp->sorconf.link & 1);
+	const int   or = ffs(outp->or) - 1;
+	const u32 loff = (or * 0x800) + (link * 0x80);
+	const u16 mask = (outp->sorconf.link << 6) | outp->or;
+	u8  ver, hdr;
+
+	if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+		nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
+static void
+nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
+{
+	struct dcb_output outp;
+	u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+	if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
+		if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
+			nv50_disp_intr_unk40_0_tmds(priv, &outp);
+		else
+		if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
+			u32 soff = (ffs(outp.or) - 1) * 0x08;
+			u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
+			u32 datarate;
+
+			switch ((ctrl & 0x000f0000) >> 16) {
+			case 6: datarate = pclk * 30 / 8; break;
+			case 5: datarate = pclk * 24 / 8; break;
+			case 2:
+			default:
+				datarate = pclk * 18 / 8;
+				break;
+			}
+
+			nouveau_dp_train(&priv->base, priv->pior.dp,
+					 &outp, head, datarate);
+		}
+	}
+}
+
+void
+nv50_disp_intr_supervisor(struct work_struct *work)
+{
+	struct nv50_disp_priv *priv =
+		container_of(work, struct nv50_disp_priv, supervisor);
+	u32 super = nv_rd32(priv, 0x610030);
+	int head;
+
+	nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
+
+	if (priv->super & 0x00000010) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(super & (0x00000020 << head)))
+				continue;
+			if (!(super & (0x00000080 << head)))
+				continue;
+			nv50_disp_intr_unk10_0(priv, head);
+		}
+	} else
+	if (priv->super & 0x00000020) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(super & (0x00000080 << head)))
+				continue;
+			nv50_disp_intr_unk20_0(priv, head);
+		}
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(super & (0x00000200 << head)))
+				continue;
+			nv50_disp_intr_unk20_1(priv, head);
+		}
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(super & (0x00000080 << head)))
+				continue;
+			nv50_disp_intr_unk20_2(priv, head);
+		}
+	} else
+	if (priv->super & 0x00000040) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(super & (0x00000080 << head)))
+				continue;
+			nv50_disp_intr_unk40_0(priv, head);
+		}
+	}
+
+	nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+void
+nv50_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_disp_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0x610020);
+	u32 intr1 = nv_rd32(priv, 0x610024);
+
+	if (intr0 & 0x001f0000) {
+		nv50_disp_intr_error(priv);
+		intr0 &= ~0x001f0000;
+	}
+
+	if (intr1 & 0x00000004) {
+		nouveau_event_trigger(priv->base.vblank, 0);
+		nv_wr32(priv, 0x610024, 0x00000004);
+		intr1 &= ~0x00000004;
+	}
+
+	if (intr1 & 0x00000008) {
+		nouveau_event_trigger(priv->base.vblank, 1);
+		nv_wr32(priv, 0x610024, 0x00000008);
+		intr1 &= ~0x00000008;
+	}
+
+	if (intr1 & 0x00000070) {
+		priv->super = (intr1 & 0x00000070);
+		schedule_work(&priv->supervisor);
+		nv_wr32(priv, 0x610024, priv->super);
+		intr1 &= ~0x00000070;
+	}
+}
+
+static int
+nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv50_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+	priv->sclass = nv50_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->pior.nr = 3;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->pior.power = nv50_pior_power;
+	priv->pior.dp = &nv50_pior_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 0000000..1ae6ceb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,147 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/event.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+#include "dport.h"
+
+struct nv50_disp_priv {
+	struct nouveau_disp base;
+	struct nouveau_oclass *sclass;
+
+	struct work_struct supervisor;
+	u32 super;
+
+	struct {
+		int nr;
+	} head;
+	struct {
+		int nr;
+		int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+		int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+	} dac;
+	struct {
+		int nr;
+		int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+		int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+		int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+		u32 lvdsconf;
+		const struct nouveau_dp_func *dp;
+	} sor;
+	struct {
+		int nr;
+		int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+		u8 type[3];
+		const struct nouveau_dp_func *dp;
+	} pior;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+		           u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+		           u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+		      struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+		      struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+		       struct dcb_output *);
+
+#define PIOR_MTHD(n) (n), (n) + 0x03
+
+int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+
+struct nv50_disp_base {
+	struct nouveau_parent base;
+	struct nouveau_ramht *ramht;
+	u32 chan;
+};
+
+struct nv50_disp_chan {
+	struct nouveau_namedb base;
+	int chid;
+};
+
+int  nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32  nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a)                                                 \
+	nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b)                                               \
+	nouveau_namedb_fini(&(a)->base, (b))
+
+int  nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+	struct nv50_disp_chan base;
+	struct nouveau_dmaobj *pushdma;
+	u32 push;
+};
+
+struct nv50_disp_pioc {
+	struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr_supervisor(struct work_struct *);
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr_supervisor(struct work_struct *);
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 0000000..d8c74c0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+	{ NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+	{ NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+	{}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv84_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+	priv->sclass = nv84_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->pior.nr = 3;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+	priv->pior.power = nv50_pior_power;
+	priv->pior.dp = &nv50_pior_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x82),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 0000000..a66f949
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+	{ NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+	{ NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+	{}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nv94_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+	priv->sclass = nv94_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->pior.nr = 3;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+	priv->sor.dp = &nv94_sor_dp_func;
+	priv->pior.power = nv50_pior_power;
+	priv->pior.dp = &nv50_pior_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x88),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv94_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 0000000..6cf8eef
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+	{ NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+	{ NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+	{}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nva0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+	priv->sclass = nva0_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 2;
+	priv->pior.nr = 3;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hdmi = nv84_hdmi_ctrl;
+	priv->pior.power = nv50_pior_power;
+	priv->pior.dp = &nv50_pior_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x83),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 0000000..b754131
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+	{ NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+	{ NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+	{ NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+	{ NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+	{ NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+	{}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+	{ SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+	{ SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+	{ SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+	{ SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+	{ DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_PWR)       , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR)  , nv50_pior_mthd },
+	{ PIOR_MTHD(NV50_DISP_PIOR_DP_PWR)    , nv50_pior_mthd },
+	{},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+	{ NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
+				  "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nva3_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nv50_disp_intr;
+	INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
+	priv->sclass = nva3_disp_sclass;
+	priv->head.nr = 2;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->pior.nr = 3;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nva3_hda_eld;
+	priv->sor.hdmi = nva3_hdmi_ctrl;
+	priv->sor.dp = &nv94_sor_dp_func;
+	priv->pior.power = nv50_pior_power;
+	priv->pior.dp = &nv50_pior_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x85),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 0000000..9ee4004
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
+
+#include <engine/disp.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+			     struct nouveau_object *object, u32 name)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	struct nv50_disp_chan *chan = (void *)parent;
+	u32 addr = nv_gpuobj(object)->node->offset;
+	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+	return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_disp_base *base = (void *)parent->parent;
+	nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&dmac->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+		nv_error(dmac, "init: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *dmac = (void *)object;
+	int chid = dmac->base.chid;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+		nv_error(dmac, "fini: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+	return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_mast_class *args = data;
+	struct nv50_disp_dmac *mast;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     0, sizeof(*mast), (void **)&mast);
+	*pobject = nv_object(mast);
+	if (ret)
+		return ret;
+
+	nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+	int ret;
+
+	ret = nv50_disp_chan_init(&mast->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+	/* initialise channel for dma command submission */
+	nv_wr32(priv, 0x610494, mast->push);
+	nv_wr32(priv, 0x610498, 0x00010000);
+	nv_wr32(priv, 0x61049c, 0x00000001);
+	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+	nv_wr32(priv, 0x640000, 0x00000000);
+	nv_wr32(priv, 0x610490, 0x01000013);
+
+	/* wait for it to go inactive */
+	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_dmac *mast = (void *)object;
+
+	/* deactivate channel */
+	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+	return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+	.ctor = nvd0_disp_mast_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_mast_init,
+	.fini = nvd0_disp_mast_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_sync_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     1 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+	.ctor = nvd0_disp_sync_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_dmac_init,
+	.fini = nvd0_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_ovly_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_dmac *dmac;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+				     5 + args->head, sizeof(*dmac),
+				     (void **)&dmac);
+	*pobject = nv_object(dmac);
+	if (ret)
+		return ret;
+
+	nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+	nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+	.ctor = nvd0_disp_ovly_ctor,
+	.dtor = nv50_disp_dmac_dtor,
+	.init = nvd0_disp_dmac_init,
+	.fini = nvd0_disp_dmac_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, int chid,
+		       int length, void **pobject)
+{
+	return nv50_disp_chan_create_(parent, engine, oclass, chid,
+				      length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_pioc *pioc = (void *)object;
+	nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+	int ret;
+
+	ret = nv50_disp_chan_init(&pioc->base);
+	if (ret)
+		return ret;
+
+	/* enable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+	/* activate channel */
+	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+		nv_error(pioc, "init: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_pioc *pioc = (void *)object;
+	int chid = pioc->base.chid;
+
+	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+		nv_error(pioc, "timeout: 0x%08x\n",
+			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	/* disable error reporting */
+	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+	return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_oimm_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+	.ctor = nvd0_disp_oimm_ctor,
+	.dtor = nvd0_disp_pioc_dtor,
+	.init = nvd0_disp_pioc_init,
+	.fini = nvd0_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_display_curs_class *args = data;
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_pioc *pioc;
+	int ret;
+
+	if (size < sizeof(*args) || args->head >= priv->head.nr)
+		return -EINVAL;
+
+	ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+				     sizeof(*pioc), (void **)&pioc);
+	*pobject = nv_object(pioc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+	.ctor = nvd0_disp_curs_ctor,
+	.dtor = nvd0_disp_pioc_dtor,
+	.init = nvd0_disp_pioc_init,
+	.fini = nvd0_disp_pioc_fini,
+	.rd32 = nv50_disp_chan_rd32,
+	.wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static void
+nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
+{
+	nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
+{
+	nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv = (void *)engine;
+	struct nv50_disp_base *base;
+	int ret;
+
+	ret = nouveau_parent_create(parent, engine, oclass, 0,
+				    priv->sclass, 0, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	priv->base.vblank->priv = priv;
+	priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
+	priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
+
+	return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
+				&base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+	struct nv50_disp_base *base = (void *)object;
+	nouveau_ramht_ref(NULL, &base->ramht);
+	nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+	int ret, i;
+	u32 tmp;
+
+	ret = nouveau_parent_init(&base->base);
+	if (ret)
+		return ret;
+
+	/* The below segments of code copying values from one register to
+	 * another appear to inform EVO of the display capabilities or
+	 * something similar.
+	 */
+
+	/* ... CRTC caps */
+	for (i = 0; i < priv->head.nr; i++) {
+		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+	}
+
+	/* ... DAC caps */
+	for (i = 0; i < priv->dac.nr; i++) {
+		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+	}
+
+	/* ... SOR caps */
+	for (i = 0; i < priv->sor.nr; i++) {
+		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+	}
+
+	/* steal display away from vbios, or something like that */
+	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+		nv_wr32(priv, 0x6100ac, 0x00000100);
+		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+			nv_error(priv, "timeout acquiring display\n");
+			return -EBUSY;
+		}
+	}
+
+	/* point at display engine memory area (hash table, objects) */
+	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+	/* enable supervisor interrupts, disable everything else */
+	nv_wr32(priv, 0x610090, 0x00000000);
+	nv_wr32(priv, 0x6100a0, 0x00000000);
+	nv_wr32(priv, 0x6100b0, 0x00000307);
+
+	return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	struct nv50_disp_base *base = (void *)object;
+
+	/* disable all interrupts */
+	nv_wr32(priv, 0x6100b0, 0x00000000);
+
+	return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+	.ctor = nvd0_disp_base_ctor,
+	.dtor = nvd0_disp_base_dtor,
+	.init = nvd0_disp_base_init,
+	.fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+	{ NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static struct nouveau_oclass
+nvd0_disp_sclass[] = {
+	{ NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+	{ NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+	{ NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+	{ NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+	{ NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+	    struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	    struct nvbios_outp *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	u16 mask, type, data;
+
+	if (outp < 4) {
+		type = DCB_OUTPUT_ANALOG;
+		mask = 0;
+	} else {
+		outp -= 4;
+		switch (ctrl & 0x00000f00) {
+		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+		default:
+			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+			return 0x0000;
+		}
+		dcb->sorconf.link = mask;
+	}
+
+	mask  = 0x00c0 & (mask << 6);
+	mask |= 0x0001 << outp;
+	mask |= 0x0100 << head;
+
+	data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+	if (!data)
+		return 0x0000;
+
+	return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info;
+	struct dcb_output dcb;
+	u8  ver, hdr, cnt, len;
+	u32 ctrl = 0x00000000;
+	u16 data;
+	int outp;
+
+	for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+		ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (outp == 8)
+		return false;
+
+	data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+	if (data) {
+		struct nvbios_init init = {
+			.subdev = nv_subdev(priv),
+			.bios = bios,
+			.offset = info.script[id],
+			.outp = &dcb,
+			.crtc = head,
+			.execute = 1,
+		};
+
+		return nvbios_exec(&init) == 0;
+	}
+
+	return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
+	    u32 pclk, struct dcb_output *dcb)
+{
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_outp info1;
+	struct nvbios_ocfg info2;
+	u8  ver, hdr, cnt, len;
+	u32 ctrl = 0x00000000;
+	u32 data, conf = ~0;
+	int outp;
+
+	for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
+		ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
+		if (ctrl & (1 << head))
+			break;
+	}
+
+	if (outp == 8)
+		return conf;
+
+	data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
+	if (data == 0x0000)
+		return conf;
+
+	switch (dcb->type) {
+	case DCB_OUTPUT_TMDS:
+		conf = (ctrl & 0x00000f00) >> 8;
+		if (pclk >= 165000)
+			conf |= 0x0100;
+		break;
+	case DCB_OUTPUT_LVDS:
+		conf = priv->sor.lvdsconf;
+		break;
+	case DCB_OUTPUT_DP:
+		conf = (ctrl & 0x00000f00) >> 8;
+		break;
+	case DCB_OUTPUT_ANALOG:
+	default:
+		conf = 0x00ff;
+		break;
+	}
+
+	data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+	if (data && id < 0xff) {
+		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+		if (data) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = data,
+				.outp = dcb,
+				.crtc = head,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+	}
+
+	return conf;
+}
+
+static void
+nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
+{
+	exec_script(priv, head, 1);
+}
+
+static void
+nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
+{
+	exec_script(priv, head, 2);
+}
+
+static void
+nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
+{
+	struct nouveau_clock *clk = nouveau_clock(priv);
+	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	if (pclk)
+		clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+}
+
+static void
+nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
+			 struct dcb_output *outp)
+{
+	const int or = ffs(outp->or) - 1;
+	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
+	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+	const u32 hoff = (head * 0x800);
+	const u32 soff = (  or * 0x800);
+	const u32 loff = (link * 0x080) + soff;
+	const u32 symbol = 100000;
+	const u32 TU = 64;
+	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+	u32 clksor = nv_rd32(priv, 0x612300 + soff);
+	u32 datarate, link_nr, link_bw, bits;
+	u64 ratio, value;
+
+	if      ((conf & 0x3c0) == 0x180) bits = 30;
+	else if ((conf & 0x3c0) == 0x140) bits = 24;
+	else                              bits = 18;
+	datarate = (pclk * bits) / 8;
+
+	if      (dpctrl > 0x00030000) link_nr = 4;
+	else if (dpctrl > 0x00010000) link_nr = 2;
+	else			      link_nr = 1;
+
+	link_bw  = (clksor & 0x007c0000) >> 18;
+	link_bw *= 27000;
+
+	ratio  = datarate;
+	ratio *= symbol;
+	do_div(ratio, link_nr * link_bw);
+
+	value  = (symbol - ratio) * TU;
+	value *= ratio;
+	do_div(value, symbol);
+	do_div(value, symbol);
+
+	value += 5;
+	value |= 0x08000000;
+
+	nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
+{
+	struct dcb_output outp;
+	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
+	if (conf != ~0) {
+		u32 addr, data;
+
+		if (outp.type == DCB_OUTPUT_DP) {
+			u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
+			switch ((sync & 0x000003c0) >> 6) {
+			case 6: pclk = pclk * 30 / 8; break;
+			case 5: pclk = pclk * 24 / 8; break;
+			case 2:
+			default:
+				pclk = pclk * 18 / 8;
+				break;
+			}
+
+			nouveau_dp_train(&priv->base, priv->sor.dp,
+					 &outp, head, pclk);
+		}
+
+		exec_clkcmp(priv, head, 0, pclk, &outp);
+
+		if (outp.type == DCB_OUTPUT_ANALOG) {
+			addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
+			data = 0x00000000;
+		} else {
+			if (outp.type == DCB_OUTPUT_DP)
+				nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
+			addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
+			data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
+		}
+
+		nv_mask(priv, addr, 0x00000707, data);
+	}
+}
+
+static void
+nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
+{
+	struct dcb_output outp;
+	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+	exec_clkcmp(priv, head, 1, pclk, &outp);
+}
+
+void
+nvd0_disp_intr_supervisor(struct work_struct *work)
+{
+	struct nv50_disp_priv *priv =
+		container_of(work, struct nv50_disp_priv, supervisor);
+	u32 mask[4];
+	int head;
+
+	nv_debug(priv, "supervisor %08x\n", priv->super);
+	for (head = 0; head < priv->head.nr; head++) {
+		mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
+		nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
+	}
+
+	if (priv->super & 0x00000001) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvd0_disp_intr_unk1_0(priv, head);
+		}
+	} else
+	if (priv->super & 0x00000002) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvd0_disp_intr_unk2_0(priv, head);
+		}
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(mask[head] & 0x00010000))
+				continue;
+			nvd0_disp_intr_unk2_1(priv, head);
+		}
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvd0_disp_intr_unk2_2(priv, head);
+		}
+	} else
+	if (priv->super & 0x00000004) {
+		for (head = 0; head < priv->head.nr; head++) {
+			if (!(mask[head] & 0x00001000))
+				continue;
+			nvd0_disp_intr_unk4_0(priv, head);
+		}
+	}
+
+	for (head = 0; head < priv->head.nr; head++)
+		nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
+	nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+void
+nvd0_disp_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_disp_priv *priv = (void *)subdev;
+	u32 intr = nv_rd32(priv, 0x610088);
+	int i;
+
+	if (intr & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x61008c);
+		nv_wr32(priv, 0x61008c, stat);
+		intr &= ~0x00000001;
+	}
+
+	if (intr & 0x00000002) {
+		u32 stat = nv_rd32(priv, 0x61009c);
+		int chid = ffs(stat) - 1;
+		if (chid >= 0) {
+			u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+			u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+			u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+			nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+				       "0x%08x 0x%08x\n",
+				 chid, (mthd & 0x0000ffc), data, mthd, unkn);
+			nv_wr32(priv, 0x61009c, (1 << chid));
+			nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+		}
+
+		intr &= ~0x00000002;
+	}
+
+	if (intr & 0x00100000) {
+		u32 stat = nv_rd32(priv, 0x6100ac);
+		if (stat & 0x00000007) {
+			priv->super = (stat & 0x00000007);
+			schedule_work(&priv->supervisor);
+			nv_wr32(priv, 0x6100ac, priv->super);
+			stat &= ~0x00000007;
+		}
+
+		if (stat) {
+			nv_info(priv, "unknown intr24 0x%08x\n", stat);
+			nv_wr32(priv, 0x6100ac, stat);
+		}
+
+		intr &= ~0x00100000;
+	}
+
+	for (i = 0; i < priv->head.nr; i++) {
+		u32 mask = 0x01000000 << i;
+		if (mask & intr) {
+			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
+			if (stat & 0x00000001)
+				nouveau_event_trigger(priv->base.vblank, i);
+			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
+			nv_rd32(priv, 0x6100c0 + (i * 0x800));
+		}
+	}
+}
+
+static int
+nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int heads = nv_rd32(parent, 0x022448);
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, heads,
+				  "PDISP", "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nvd0_disp_intr;
+	INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+	priv->sclass = nvd0_disp_sclass;
+	priv->head.nr = heads;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nvd0_hda_eld;
+	priv->sor.hdmi = nvd0_hdmi_ctrl;
+	priv->sor.dp = &nvd0_sor_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x90),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 0000000..20725b3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+	{ NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+	{ NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+	{ NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+	{ NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+	{ NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+	{ NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int heads = nv_rd32(parent, 0x022448);
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, heads,
+				  "PDISP", "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nve0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nvd0_disp_intr;
+	INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+	priv->sclass = nve0_disp_sclass;
+	priv->head.nr = heads;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nvd0_hda_eld;
+	priv->sor.hdmi = nvd0_hdmi_ctrl;
+	priv->sor.dp = &nvd0_sor_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x91),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
new file mode 100644
index 0000000..a488c36
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nvf0_disp_sclass[] = {
+	{ NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+	{ NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+	{ NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+	{ NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+	{ NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvf0_disp_base_oclass[] = {
+	{ NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+	{}
+};
+
+static int
+nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_disp_priv *priv;
+	int heads = nv_rd32(parent, 0x022448);
+	int ret;
+
+	ret = nouveau_disp_create(parent, engine, oclass, heads,
+				  "PDISP", "display", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nvf0_disp_base_oclass;
+	nv_engine(priv)->cclass = &nv50_disp_cclass;
+	nv_subdev(priv)->intr = nvd0_disp_intr;
+	INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
+	priv->sclass = nvf0_disp_sclass;
+	priv->head.nr = heads;
+	priv->dac.nr = 3;
+	priv->sor.nr = 4;
+	priv->dac.power = nv50_dac_power;
+	priv->dac.sense = nv50_dac_sense;
+	priv->sor.power = nv50_sor_power;
+	priv->sor.hda_eld = nvd0_hda_eld;
+	priv->sor.hdmi = nvd0_hdmi_ctrl;
+	priv->sor.dp = &nvd0_sor_dp_func;
+	return 0;
+}
+
+struct nouveau_oclass
+nvf0_disp_oclass = {
+	.handle = NV_ENGINE(DISP, 0x92),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvf0_disp_ctor,
+		.dtor = _nouveau_disp_dtor,
+		.init = _nouveau_disp_init,
+		.fini = _nouveau_disp_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
new file mode 100644
index 0000000..2c8ce35
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+#include <subdev/i2c.h>
+
+#include "nv50.h"
+
+/******************************************************************************
+ * DisplayPort
+ *****************************************************************************/
+static struct nouveau_i2c_port *
+nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
+{
+	struct nouveau_i2c *i2c = nouveau_i2c(disp);
+	return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
+}
+
+static int
+nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+		     int head, int pattern)
+{
+	struct nouveau_i2c_port *port;
+	int ret = -EINVAL;
+
+	port = nv50_pior_dp_find(disp, outp);
+	if (port) {
+		if (port->func->pattern)
+			ret = port->func->pattern(port, pattern);
+		else
+			ret = 0;
+	}
+
+	return ret;
+}
+
+static int
+nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		     int head, int lane_nr, int link_bw, bool enh)
+{
+	struct nouveau_i2c_port *port;
+	int ret = -EINVAL;
+
+	port = nv50_pior_dp_find(disp, outp);
+	if (port && port->func->lnk_ctl)
+		ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
+
+	return ret;
+}
+
+static int
+nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		     int head, int lane, int vsw, int pre)
+{
+	struct nouveau_i2c_port *port;
+	int ret = -EINVAL;
+
+	port = nv50_pior_dp_find(disp, outp);
+	if (port) {
+		if (port->func->drv_ctl)
+			ret = port->func->drv_ctl(port, lane, vsw, pre);
+		else
+			ret = 0;
+	}
+
+	return ret;
+}
+
+const struct nouveau_dp_func
+nv50_pior_dp_func = {
+	.pattern = nv50_pior_dp_pattern,
+	.lnk_ctl = nv50_pior_dp_lnk_ctl,
+	.drv_ctl = nv50_pior_dp_drv_ctl,
+};
+
+/******************************************************************************
+ * General PIOR handling
+ *****************************************************************************/
+int
+nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+	const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
+	const u32 soff = (or * 0x800);
+	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+	nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+	return 0;
+}
+
+int
+nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
+	const u8 or   = (mthd & NV50_DISP_PIOR_MTHD_OR);
+	u32 *data = args;
+	int ret;
+
+	if (size < sizeof(u32))
+		return -EINVAL;
+
+	mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
+	mthd &= ~NV50_DISP_PIOR_MTHD_OR;
+	switch (mthd) {
+	case NV50_DISP_PIOR_PWR:
+		ret = priv->pior.power(priv, or, data[0]);
+		priv->pior.type[or] = type;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 0000000..526b752
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+	const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+	const u32 soff = (or * 0x800);
+	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+	nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+	nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+	return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+	struct nv50_disp_priv *priv = (void *)object->engine;
+	const u8  head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+	const u8    or = (mthd & NV50_DISP_SOR_MTHD_OR);
+	u32 data;
+	int ret = -EINVAL;
+
+	if (size < sizeof(u32))
+		return -EINVAL;
+	data = *(u32 *)args;
+
+
+	switch (mthd & ~0x3f) {
+	case NV50_DISP_SOR_PWR:
+		ret = priv->sor.power(priv, or, data);
+		break;
+	case NVA3_DISP_SOR_HDA_ELD:
+		ret = priv->sor.hda_eld(priv, or, args, size);
+		break;
+	case NV84_DISP_SOR_HDMI_PWR:
+		ret = priv->sor.hdmi(priv, head, or, data);
+		break;
+	case NV50_DISP_SOR_LVDS_SCRIPT:
+		priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+		ret = 0;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 0000000..7ec4ee8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_soff(struct dcb_output *outp)
+{
+	return (ffs(outp->or) - 1) * 0x800;
+}
+
+static inline u32
+nv94_sor_loff(struct dcb_output *outp)
+{
+	return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+}
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+	static const u8 nv94[] = { 16, 8, 0, 24 };
+	if (nv_device(priv)->chipset == 0xaf)
+		return nvaf[lane];
+	return nv94[lane];
+}
+
+static int
+nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int pattern)
+{
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 loff = nv94_sor_loff(outp);
+	nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
+	return 0;
+}
+
+static int
+nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int link_nr, int link_bw, bool enh_frame)
+{
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 soff = nv94_sor_soff(outp);
+	const u32 loff = nv94_sor_loff(outp);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+	u32 lane = 0;
+	int i;
+
+	dpctrl |= ((1 << link_nr) - 1) << 16;
+	if (enh_frame)
+		dpctrl |= 0x00004000;
+	if (link_bw > 0x06)
+		clksor |= 0x00040000;
+
+	for (i = 0; i < link_nr; i++)
+		lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+	nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+	return 0;
+}
+
+static int
+nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int lane, int swing, int preem)
+{
+	struct nouveau_bios *bios = nouveau_bios(disp);
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 loff = nv94_sor_loff(outp);
+	u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+				 &ver, &hdr, &cnt, &len, &info);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+				 &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	return 0;
+}
+
+const struct nouveau_dp_func
+nv94_sor_dp_func = {
+	.pattern = nv94_sor_dp_pattern,
+	.lnk_ctl = nv94_sor_dp_lnk_ctl,
+	.drv_ctl = nv94_sor_dp_drv_ctl,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 0000000..9e1d435
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_soff(struct dcb_output *outp)
+{
+	return (ffs(outp->or) - 1) * 0x800;
+}
+
+static inline u32
+nvd0_sor_loff(struct dcb_output *outp)
+{
+	return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
+}
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+	static const u8 nvd0[] = { 16, 8, 0, 24 };
+	return nvd0[lane];
+}
+
+static int
+nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int pattern)
+{
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 loff = nvd0_sor_loff(outp);
+	nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+	return 0;
+}
+
+static int
+nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int link_nr, int link_bw, bool enh_frame)
+{
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 soff = nvd0_sor_soff(outp);
+	const u32 loff = nvd0_sor_loff(outp);
+	u32 dpctrl = 0x00000000;
+	u32 clksor = 0x00000000;
+	u32 lane = 0;
+	int i;
+
+	clksor |= link_bw << 18;
+	dpctrl |= ((1 << link_nr) - 1) << 16;
+	if (enh_frame)
+		dpctrl |= 0x00004000;
+
+	for (i = 0; i < link_nr; i++)
+		lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+	nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+	nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+	nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+	return 0;
+}
+
+static int
+nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
+		    int head, int lane, int swing, int preem)
+{
+	struct nouveau_bios *bios = nouveau_bios(disp);
+	struct nv50_disp_priv *priv = (void *)disp;
+	const u32 loff = nvd0_sor_loff(outp);
+	u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+	u8  ver, hdr, cnt, len;
+	struct nvbios_dpout info;
+	struct nvbios_dpcfg ocfg;
+
+	addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
+				 &ver, &hdr, &cnt, &len, &info);
+	if (!addr)
+		return -ENODEV;
+
+	addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
+				 &ver, &hdr, &cnt, &len, &ocfg);
+	if (!addr)
+		return -EINVAL;
+
+	nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+	nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+	nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+	nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+	return 0;
+}
+
+const struct nouveau_dp_func
+nvd0_sor_dp_func = {
+	.pattern = nvd0_sor_dp_pattern,
+	.lnk_ctl = nvd0_sor_dp_lnk_ctl,
+	.drv_ctl = nvd0_sor_dp_drv_ctl,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
new file mode 100644
index 0000000..5a1c684
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <subdev/vga.h>
+
+u8
+nv_rdport(void *obj, int head, u16 port)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	if (device->card_type >= NV_50)
+		return nv_rd08(obj, 0x601000 + port);
+
+	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
+	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
+	    port == 0x03d4 || port == 0x03d5)	/* CR */
+		return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
+
+	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
+	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
+	    port == 0x03ce || port == 0x03cf) {	/* GR */
+		if (device->card_type < NV_40)
+			head = 0; /* CR44 selects head */
+		return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
+	}
+
+	nv_error(obj, "unknown vga port 0x%04x\n", port);
+	return 0x00;
+}
+
+void
+nv_wrport(void *obj, int head, u16 port, u8 data)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	if (device->card_type >= NV_50)
+		nv_wr08(obj, 0x601000 + port, data);
+	else
+	if (port == 0x03c0 || port == 0x03c1 ||	/* AR */
+	    port == 0x03c2 || port == 0x03da ||	/* INP0 */
+	    port == 0x03d4 || port == 0x03d5)	/* CR */
+		nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
+	else
+	if (port == 0x03c2 || port == 0x03cc ||	/* MISC */
+	    port == 0x03c4 || port == 0x03c5 ||	/* SR */
+	    port == 0x03ce || port == 0x03cf) {	/* GR */
+		if (device->card_type < NV_40)
+			head = 0; /* CR44 selects head */
+		nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
+	} else
+		nv_error(obj, "unknown vga port 0x%04x\n", port);
+}
+
+u8
+nv_rdvgas(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03c4, index);
+	return nv_rdport(obj, head, 0x03c5);
+}
+
+void
+nv_wrvgas(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03c4, index);
+	nv_wrport(obj, head, 0x03c5, value);
+}
+
+u8
+nv_rdvgag(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03ce, index);
+	return nv_rdport(obj, head, 0x03cf);
+}
+
+void
+nv_wrvgag(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03ce, index);
+	nv_wrport(obj, head, 0x03cf, value);
+}
+
+u8
+nv_rdvgac(void *obj, int head, u8 index)
+{
+	nv_wrport(obj, head, 0x03d4, index);
+	return nv_rdport(obj, head, 0x03d5);
+}
+
+void
+nv_wrvgac(void *obj, int head, u8 index, u8 value)
+{
+	nv_wrport(obj, head, 0x03d4, index);
+	nv_wrport(obj, head, 0x03d5, value);
+}
+
+u8
+nv_rdvgai(void *obj, int head, u16 port, u8 index)
+{
+	if (port == 0x03c4) return nv_rdvgas(obj, head, index);
+	if (port == 0x03ce) return nv_rdvgag(obj, head, index);
+	if (port == 0x03d4) return nv_rdvgac(obj, head, index);
+	nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+	return 0x00;
+}
+
+void
+nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
+{
+	if      (port == 0x03c4) nv_wrvgas(obj, head, index, value);
+	else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
+	else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
+	else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
+}
+
+bool
+nv_lockvgac(void *obj, bool lock)
+{
+	bool locked = !nv_rdvgac(obj, 0, 0x1f);
+	u8 data = lock ? 0x99 : 0x57;
+	nv_wrvgac(obj, 0, 0x1f, data);
+	if (nv_device(obj)->chipset == 0x11) {
+		if (!(nv_rd32(obj, 0x001084) & 0x10000000))
+			nv_wrvgac(obj, 1, 0x1f, data);
+	}
+	return locked;
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ *    cr44 must be set to 0 or 3 for accessing values on the correct head
+ *    through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ *    access using the head B addresses can have strange results, ergo we leave
+ *    tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+u8
+nv_rdvgaowner(void *obj)
+{
+	if (nv_device(obj)->card_type < NV_50) {
+		if (nv_device(obj)->chipset == 0x11) {
+			u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
+			if (tied == 0) {
+				u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
+				u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
+				u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
+				u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
+				if (slA && !tvA) return 0x00;
+				if (slB && !tvB) return 0x03;
+				if (slA) return 0x00;
+				if (slB) return 0x03;
+				return 0x00;
+			}
+			return 0x04;
+		}
+
+		return nv_rdvgac(obj, 0, 0x44);
+	}
+
+	nv_error(obj, "rdvgaowner after nv4x\n");
+	return 0x00;
+}
+
+void
+nv_wrvgaowner(void *obj, u8 select)
+{
+	if (nv_device(obj)->card_type < NV_50) {
+		u8 owner = (select == 1) ? 3 : select;
+		if (nv_device(obj)->chipset == 0x11) {
+			/* workaround hw lockup bug */
+			nv_rdvgac(obj, 0, 0x1f);
+			nv_rdvgac(obj, 1, 0x1f);
+		}
+
+		nv_wrvgac(obj, 0, 0x44, owner);
+
+		if (nv_device(obj)->chipset == 0x11) {
+			nv_wrvgac(obj, 0, 0x2e, owner);
+			nv_wrvgac(obj, 0, 0x2e, owner);
+		}
+	} else
+		nv_error(obj, "wrvgaowner after nv4x\n");
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 0000000..5103e88
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_dmaeng *dmaeng = (void *)engine;
+	struct nouveau_dmaobj *dmaobj;
+	struct nouveau_gpuobj *gpuobj;
+	struct nv_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+	*pobject = nv_object(dmaobj);
+	if (ret)
+		return ret;
+
+	switch (args->flags & NV_DMA_TARGET_MASK) {
+	case NV_DMA_TARGET_VM:
+		dmaobj->target = NV_MEM_TARGET_VM;
+		break;
+	case NV_DMA_TARGET_VRAM:
+		dmaobj->target = NV_MEM_TARGET_VRAM;
+		break;
+	case NV_DMA_TARGET_PCI:
+		dmaobj->target = NV_MEM_TARGET_PCI;
+		break;
+	case NV_DMA_TARGET_PCI_US:
+	case NV_DMA_TARGET_AGP:
+		dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (args->flags & NV_DMA_ACCESS_MASK) {
+	case NV_DMA_ACCESS_VM:
+		dmaobj->access = NV_MEM_ACCESS_VM;
+		break;
+	case NV_DMA_ACCESS_RD:
+		dmaobj->access = NV_MEM_ACCESS_RO;
+		break;
+	case NV_DMA_ACCESS_WR:
+		dmaobj->access = NV_MEM_ACCESS_WO;
+		break;
+	case NV_DMA_ACCESS_RDWR:
+		dmaobj->access = NV_MEM_ACCESS_RW;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	dmaobj->start = args->start;
+	dmaobj->limit = args->limit;
+	dmaobj->conf0 = args->conf0;
+
+	switch (nv_mclass(parent)) {
+	case NV_DEVICE_CLASS:
+		/* delayed, or no, binding */
+		break;
+	default:
+		ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+		if (ret == 0) {
+			nouveau_object_ref(NULL, pobject);
+			*pobject = nv_object(gpuobj);
+		}
+		break;
+	}
+
+	return ret;
+}
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+	.ctor = nouveau_dmaobj_ctor,
+	.dtor = nouveau_object_destroy,
+	.init = nouveau_object_init,
+	.fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+	{ NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{ NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{ NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+	{}
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 0000000..027d821
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm/nv04.h>
+
+#include <engine/dmaobj.h>
+
+struct nv04_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+static int
+nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
+	struct nouveau_gpuobj *gpuobj;
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags2 = 0x00000000;
+	u64 offset = dmaobj->start & 0xfffff000;
+	u64 adjust = dmaobj->start & 0x00000fff;
+	u32 length = dmaobj->limit - dmaobj->start;
+	int ret;
+
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NV03_CHANNEL_DMA_CLASS:
+		case NV10_CHANNEL_DMA_CLASS:
+		case NV17_CHANNEL_DMA_CLASS:
+		case NV40_CHANNEL_DMA_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (dmaobj->target == NV_MEM_TARGET_VM) {
+		if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
+			struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+			if (!dmaobj->start)
+				return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+			offset  = nv_ro32(pgt, 8 + (offset >> 10));
+			offset &= 0xfffff000;
+		}
+
+		dmaobj->target = NV_MEM_TARGET_PCI;
+		dmaobj->access = NV_MEM_ACCESS_RW;
+	}
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00003000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00023000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00033000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		flags0 |= 0x00008000;
+	case NV_MEM_ACCESS_RW:
+		flags2 |= 0x00000002;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+	*pgpuobj = gpuobj;
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
+		nv_wo32(*pgpuobj, 0x04, length);
+		nv_wo32(*pgpuobj, 0x08, flags2 | offset);
+		nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
+	}
+
+	return ret;
+}
+
+static int
+nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nv04_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nv04_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 0000000..750183f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nv50_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+static int
+nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags5 = 0x00000000;
+	int ret;
+
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NV50_CHANNEL_DMA_CLASS:
+		case NV84_CHANNEL_DMA_CLASS:
+		case NV50_CHANNEL_IND_CLASS:
+		case NV84_CHANNEL_IND_CLASS:
+		case NV50_DISP_MAST_CLASS:
+		case NV84_DISP_MAST_CLASS:
+		case NV94_DISP_MAST_CLASS:
+		case NVA0_DISP_MAST_CLASS:
+		case NVA3_DISP_MAST_CLASS:
+		case NV50_DISP_SYNC_CLASS:
+		case NV84_DISP_SYNC_CLASS:
+		case NV94_DISP_SYNC_CLASS:
+		case NVA0_DISP_SYNC_CLASS:
+		case NVA3_DISP_SYNC_CLASS:
+		case NV50_DISP_OVLY_CLASS:
+		case NV84_DISP_OVLY_CLASS:
+		case NV94_DISP_OVLY_CLASS:
+		case NVA0_DISP_OVLY_CLASS:
+		case NVA3_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0  = NV50_DMA_CONF0_PRIV_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+			dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+		} else {
+			dmaobj->conf0  = NV50_DMA_CONF0_PRIV_US;
+			dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+			dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+			dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+	flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+	flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VM:
+		flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		flags0 |= 0x00080000;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0);
+		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+					upper_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, flags5);
+	}
+
+	return ret;
+}
+
+static int
+nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nv50_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nv50_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 0000000..cd3970d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvc0_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+static int
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	u32 flags0 = nv_mclass(dmaobj);
+	u32 flags5 = 0x00000000;
+	int ret;
+
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NVA3_DISP_MAST_CLASS:
+		case NVA3_DISP_SYNC_CLASS:
+		case NVA3_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else
+		return 0;
+
+	if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_VM;
+			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+		} else {
+			dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_US;
+			dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+			dmaobj->conf0 |= 0x00020000;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+	flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+	flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VM:
+		flags0 |= 0x00000000;
+		break;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dmaobj->access) {
+	case NV_MEM_ACCESS_VM:
+		break;
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00040000;
+		break;
+	case NV_MEM_ACCESS_WO:
+	case NV_MEM_ACCESS_RW:
+		flags0 |= 0x00080000;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0);
+		nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+		nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+					upper_32_bits(dmaobj->start));
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, flags5);
+	}
+
+	return ret;
+}
+
+static int
+nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nvc0_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nvc0_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 0000000..944e73a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+	struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+		 struct nouveau_object *parent,
+		 struct nouveau_dmaobj *dmaobj,
+		 struct nouveau_gpuobj **pgpuobj)
+{
+	u32 flags0 = 0x00000000;
+	int ret;
+
+	if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+		switch (nv_mclass(parent->parent)) {
+		case NVD0_DISP_MAST_CLASS:
+		case NVD0_DISP_SYNC_CLASS:
+		case NVD0_DISP_OVLY_CLASS:
+		case NVE0_DISP_MAST_CLASS:
+		case NVE0_DISP_SYNC_CLASS:
+		case NVE0_DISP_OVLY_CLASS:
+		case NVF0_DISP_MAST_CLASS:
+		case NVF0_DISP_SYNC_CLASS:
+		case NVF0_DISP_OVLY_CLASS:
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else
+		return 0;
+
+	if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+		if (dmaobj->target == NV_MEM_TARGET_VM) {
+			dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+			dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+		} else {
+			dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+			dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+		}
+	}
+
+	flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+	flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+	switch (dmaobj->target) {
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00000009;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+	if (ret == 0) {
+		nv_wo32(*pgpuobj, 0x00, flags0);
+		nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+		nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+		nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+		nv_wo32(*pgpuobj, 0x10, 0x00000000);
+		nv_wo32(*pgpuobj, 0x14, 0x00000000);
+	}
+
+	return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		 struct nouveau_oclass *oclass, void *data, u32 size,
+		 struct nouveau_object **pobject)
+{
+	struct nvd0_dmaeng_priv *priv;
+	int ret;
+
+	ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+	priv->base.bind = nvd0_dmaobj_bind;
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+	.handle = NV_ENGINE(DMAOBJ, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_dmaeng_ctor,
+		.dtor = _nouveau_dmaeng_dtor,
+		.init = _nouveau_dmaeng_init,
+		.fini = _nouveau_dmaeng_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 0000000..d3ec436
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/object.h>
+#include <core/handle.h>
+#include <core/event.h>
+#include <core/class.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+int
+nouveau_fifo_channel_create_(struct nouveau_object *parent,
+			     struct nouveau_object *engine,
+			     struct nouveau_oclass *oclass,
+			     int bar, u32 addr, u32 size, u32 pushbuf,
+			     u64 engmask, int len, void **ptr)
+{
+	struct nouveau_device *device = nv_device(engine);
+	struct nouveau_fifo *priv = (void *)engine;
+	struct nouveau_fifo_chan *chan;
+	struct nouveau_dmaeng *dmaeng;
+	unsigned long flags;
+	int ret;
+
+	/* create base object class */
+	ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+				     engmask, len, ptr);
+	chan = *ptr;
+	if (ret)
+		return ret;
+
+	/* validate dma object representing push buffer */
+	chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+	if (!chan->pushdma)
+		return -ENOENT;
+
+	dmaeng = (void *)chan->pushdma->base.engine;
+	switch (chan->pushdma->base.oclass->handle) {
+	case NV_DMA_FROM_MEMORY_CLASS:
+	case NV_DMA_IN_MEMORY_CLASS:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+	if (ret)
+		return ret;
+
+	/* find a free fifo channel */
+	spin_lock_irqsave(&priv->lock, flags);
+	for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
+		if (!priv->channel[chan->chid]) {
+			priv->channel[chan->chid] = nv_object(chan);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (chan->chid == priv->max) {
+		nv_error(priv, "no free channels\n");
+		return -ENOSPC;
+	}
+
+	/* map fifo control registers */
+	chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
+			     (chan->chid * size), size);
+	if (!chan->user)
+		return -EFAULT;
+
+	nouveau_event_trigger(priv->cevent, 0);
+
+	chan->size = size;
+	return 0;
+}
+
+void
+nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
+{
+	struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
+	unsigned long flags;
+
+	iounmap(chan->user);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->channel[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_gpuobj_ref(NULL, &chan->pushgpu);
+	nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
+	nouveau_namedb_destroy(&chan->base);
+}
+
+void
+_nouveau_fifo_channel_dtor(struct nouveau_object *object)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	nouveau_fifo_channel_destroy(chan);
+}
+
+u32
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	return ioread32_native(chan->user + addr);
+}
+
+void
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_fifo_chan *chan = (void *)object;
+	iowrite32_native(data, chan->user + addr);
+}
+
+static int
+nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
+{
+	int engidx = nv_hclass(priv) & 0xff;
+
+	while (object && object->parent) {
+		if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
+		    (nv_hclass(object->parent) & 0xff) == engidx)
+			return nouveau_fifo_chan(object)->chid;
+		object = object->parent;
+	}
+
+	return -1;
+}
+
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
+{
+	struct nouveau_fifo_chan *chan = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fifo->lock, flags);
+	if (chid >= fifo->min && chid <= fifo->max)
+		chan = (void *)fifo->channel[chid];
+	spin_unlock_irqrestore(&fifo->lock, flags);
+
+	return nouveau_client_name(chan);
+}
+
+void
+nouveau_fifo_destroy(struct nouveau_fifo *priv)
+{
+	kfree(priv->channel);
+	nouveau_event_destroy(&priv->uevent);
+	nouveau_event_destroy(&priv->cevent);
+	nouveau_engine_destroy(&priv->base);
+}
+
+int
+nouveau_fifo_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass,
+		     int min, int max, int length, void **pobject)
+{
+	struct nouveau_fifo *priv;
+	int ret;
+
+	ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
+				     "fifo", length, pobject);
+	priv = *pobject;
+	if (ret)
+		return ret;
+
+	priv->min = min;
+	priv->max = max;
+	priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
+	if (!priv->channel)
+		return -ENOMEM;
+
+	ret = nouveau_event_create(1, &priv->cevent);
+	if (ret)
+		return ret;
+
+	ret = nouveau_event_create(1, &priv->uevent);
+	if (ret)
+		return ret;
+
+	priv->chid = nouveau_fifo_chid;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
new file mode 100644
index 0000000..f877bd5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/ramht.h>
+#include <core/event.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv04_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 16,  0, 0x08,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x08,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+int
+nv04_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	u32 context, chid = chan->base.chid;
+	int ret;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->addr >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW:
+		context |= 0x00000000;
+		break;
+	case NVDEV_ENGINE_GR:
+		context |= 0x00010000;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		context |= 0x00020000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	context |= 0x80000000; /* valid */
+	context |= chid << 24;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+void
+nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	mutex_lock(&nv_subdev(priv)->mutex);
+	nouveau_ramht_remove(priv->ramht, cookie);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+int
+nv04_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
+	return 0;
+}
+
+static int
+nv04_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 32;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x10,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+void
+nv04_fifo_chan_dtor(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	struct ramfc_desc *c = priv->ramfc_desc;
+
+	do {
+		nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+	} while ((++c)->bits);
+
+	nouveau_fifo_channel_destroy(&chan->base);
+}
+
+int
+nv04_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	u32 mask = 1 << chan->base.chid;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+int
+nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_fifo_priv *priv = (void *)object->engine;
+	struct nv04_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *fctx = priv->ramfc;
+	struct ramfc_desc *c;
+	unsigned long flags;
+	u32 data = chan->ramfc;
+	u32 chid;
+
+	/* prevent fifo context switches */
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+	/* if this channel is active, replace it with a null context */
+	chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+	if (chid == chan->base.chid) {
+		nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
+		nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+		c = priv->ramfc_desc;
+		do {
+			u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+			u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+			u32 rv = (nv_rd32(priv, c->regp) &  rm) >> c->regs;
+			u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
+			nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+		} while ((++c)->bits);
+
+		c = priv->ramfc_desc;
+		do {
+			nv_wr32(priv, c->regp, 0x00000000);
+		} while ((++c)->bits);
+
+		nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+		nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+		nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	}
+
+	/* restore normal operation, after disabling dma mode */
+	nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv04_fifo_ofuncs = {
+	.ctor = nv04_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv04_fifo_sclass[] = {
+	{ NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+int
+nv04_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv04_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv04_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+void
+nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__acquires(priv->base.lock)
+{
+	struct nv04_fifo_priv *priv = (void *)pfifo;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	*pflags = flags;
+
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
+	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+	/* in some cases the puller may be left in an inconsistent state
+	 * if you try to stop it while it's busy translating handles.
+	 * sometimes you get a CACHE_ERROR, sometimes it just fails
+	 * silently; sending incorrect instance offsets to PGRAPH after
+	 * it's started up again.
+	 *
+	 * to avoid this, we invalidate the most recently calculated
+	 * instance.
+	 */
+	if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
+			   NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
+		nv_warn(priv, "timeout idling puller\n");
+
+	if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+			  NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+		nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
+
+void
+nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__releases(priv->base.lock)
+{
+	struct nv04_fifo_priv *priv = (void *)pfifo;
+	unsigned long flags = *pflags;
+
+	nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+}
+
+static const char *
+nv_dma_state_err(u32 state)
+{
+	static const char * const desc[] = {
+		"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+		"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+	};
+	return desc[(state >> 29) & 0x7];
+}
+
+static bool
+nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
+{
+	struct nv04_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+	bool handled = false;
+	unsigned long flags;
+	u32 engine;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	switch (mthd) {
+	case 0x0000:
+		bind = nouveau_namedb_get(nv_namedb(chan), data);
+		if (unlikely(!bind))
+			break;
+
+		if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
+			engine = 0x0000000f << (subc * 4);
+			chan->subc[subc] = data;
+			handled = true;
+
+			nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
+		}
+
+		nouveau_namedb_put(bind);
+		break;
+	default:
+		engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
+		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+			break;
+
+		bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
+		if (likely(bind)) {
+			if (!nv_call(bind->object, mthd, data))
+				handled = true;
+			nouveau_namedb_put(bind);
+		}
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return handled;
+}
+
+static void
+nv04_fifo_cache_error(struct nouveau_device *device,
+		struct nv04_fifo_priv *priv, u32 chid, u32 get)
+{
+	u32 mthd, data;
+	int ptr;
+
+	/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+	 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+	 * show that it wraps around to the start at GET=0x800.. No clue as to
+	 * why..
+	 */
+	ptr = (get & 0x7ff) >> 2;
+
+	if (device->card_type < NV_40) {
+		mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
+		data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
+	} else {
+		mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
+		data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
+	}
+
+	if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+		const char *client_name =
+			nouveau_client_name_for_fifo_chid(&priv->base, chid);
+		nv_error(priv,
+			 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+			 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
+			 data);
+	}
+
+	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+	nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+	nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+		nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
+
+	nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+		nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
+		u32 chid)
+{
+	const char *client_name;
+	u32 dma_get = nv_rd32(priv, 0x003244);
+	u32 dma_put = nv_rd32(priv, 0x003240);
+	u32 push = nv_rd32(priv, 0x003220);
+	u32 state = nv_rd32(priv, 0x003228);
+
+	client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
+
+	if (device->card_type == NV_50) {
+		u32 ho_get = nv_rd32(priv, 0x003328);
+		u32 ho_put = nv_rd32(priv, 0x003320);
+		u32 ib_get = nv_rd32(priv, 0x003334);
+		u32 ib_put = nv_rd32(priv, 0x003330);
+
+		nv_error(priv,
+			 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+			 chid, client_name, ho_get, dma_get, ho_put, dma_put,
+			 ib_get, ib_put, state, nv_dma_state_err(state), push);
+
+		/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+		nv_wr32(priv, 0x003364, 0x00000000);
+		if (dma_get != dma_put || ho_get != ho_put) {
+			nv_wr32(priv, 0x003244, dma_put);
+			nv_wr32(priv, 0x003328, ho_put);
+		} else
+		if (ib_get != ib_put)
+			nv_wr32(priv, 0x003334, ib_put);
+	} else {
+		nv_error(priv,
+			 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
+			 chid, client_name, dma_get, dma_put, state,
+			 nv_dma_state_err(state), push);
+
+		if (dma_get != dma_put)
+			nv_wr32(priv, 0x003244, dma_put);
+	}
+
+	nv_wr32(priv, 0x003228, 0x00000000);
+	nv_wr32(priv, 0x003220, 0x00000001);
+	nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
+void
+nv04_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_device *device = nv_device(subdev);
+	struct nv04_fifo_priv *priv = (void *)subdev;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		uint32_t chid, get;
+
+		nv_wr32(priv, NV03_PFIFO_CACHES, 0);
+
+		chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+		get  = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			nv04_fifo_cache_error(device, priv, chid, get);
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			nv04_fifo_dma_pusher(device, priv, chid);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+		}
+
+		if (status & NV_PFIFO_INTR_SEMAPHORE) {
+			uint32_t sem;
+
+			status &= ~NV_PFIFO_INTR_SEMAPHORE;
+			nv_wr32(priv, NV03_PFIFO_INTR_0,
+				NV_PFIFO_INTR_SEMAPHORE);
+
+			sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
+			nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+			nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+		}
+
+		if (device->card_type == NV_50) {
+			if (status & 0x00000010) {
+				status &= ~0x00000010;
+				nv_wr32(priv, 0x002100, 0x00000010);
+			}
+
+			if (status & 0x40000000) {
+				nouveau_event_trigger(priv->base.uevent, 0);
+				nv_wr32(priv, 0x002100, 0x40000000);
+				status &= ~0x40000000;
+			}
+		}
+
+		if (status) {
+			nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
+				status, chid);
+			nv_wr32(priv, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		nv_error(priv, "still angry after %d spins, halt\n", cnt);
+		nv_wr32(priv, 0x002140, 0);
+		nv_wr32(priv, 0x000140, 0);
+	}
+
+	nv_wr32(priv, 0x000100, 0x00000100);
+}
+
+static int
+nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv04_fifo_cclass;
+	nv_engine(priv)->sclass = nv04_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv04_ramfc;
+	return 0;
+}
+
+void
+nv04_fifo_dtor(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ramfc);
+	nouveau_gpuobj_ref(NULL, &priv->ramro);
+	nouveau_ramht_ref(NULL, &priv->ramht);
+	nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv04_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv04_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 0000000..496a4b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
+#ifndef __NV04_FIFO_H__
+#define __NV04_FIFO_H__
+
+#include <engine/fifo.h>
+
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+
+struct ramfc_desc {
+	unsigned bits:6;
+	unsigned ctxs:5;
+	unsigned ctxp:8;
+	unsigned regs:5;
+	unsigned regp;
+};
+
+struct nv04_fifo_priv {
+	struct nouveau_fifo base;
+	struct ramfc_desc *ramfc_desc;
+	struct nouveau_ramht  *ramht;
+	struct nouveau_gpuobj *ramro;
+	struct nouveau_gpuobj *ramfc;
+};
+
+struct nv04_fifo_base {
+	struct nouveau_fifo_base base;
+};
+
+struct nv04_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 subc[8];
+	u32 ramfc;
+};
+
+int  nv04_fifo_object_attach(struct nouveau_object *,
+			     struct nouveau_object *, u32);
+void nv04_fifo_object_detach(struct nouveau_object *, int);
+
+void nv04_fifo_chan_dtor(struct nouveau_object *);
+int  nv04_fifo_chan_init(struct nouveau_object *);
+int  nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
+
+int  nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, void *, u32,
+			    struct nouveau_object **);
+
+void nv04_fifo_dtor(struct nouveau_object *);
+int  nv04_fifo_init(struct nouveau_object *);
+void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
+void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
new file mode 100644
index 0000000..2c927c1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv10_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv10_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 32;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv10_fifo_ofuncs = {
+	.ctor = nv10_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv10_fifo_sclass[] = {
+	{ NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv10_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv10_fifo_cclass;
+	nv_engine(priv)->sclass = nv10_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv10_ramfc;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv04_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
new file mode 100644
index 0000000..a9cb51d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv17_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 16,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 16, 16, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{ 32,  0, 0x18,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{ 32,  0, 0x20,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
+	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv17_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+					  0x10000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
+					  &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	nv_parent(chan)->context_attach = nv04_fifo_context_attach;
+	chan->ramfc = chan->base.chid * 64;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x14,
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv17_fifo_ofuncs = {
+	.ctor = nv17_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv17_fifo_sclass[] = {
+	{ NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv17_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x17),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv17_fifo_cclass;
+	nv_engine(priv)->sclass = nv17_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv17_ramfc;
+	return 0;
+}
+
+static int
+nv17_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+	nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+	nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv17_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x17),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv17_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv17_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
new file mode 100644
index 0000000..2b1f917
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv40_ramfc[] = {
+	{ 32,  0, 0x00,  0, NV04_PFIFO_CACHE1_DMA_PUT },
+	{ 32,  0, 0x04,  0, NV04_PFIFO_CACHE1_DMA_GET },
+	{ 32,  0, 0x08,  0, NV10_PFIFO_CACHE1_REF_CNT },
+	{ 32,  0, 0x0c,  0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+	{ 32,  0, 0x10,  0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+	{ 32,  0, 0x14,  0, NV04_PFIFO_CACHE1_DMA_STATE },
+	{ 28,  0, 0x18,  0, NV04_PFIFO_CACHE1_DMA_FETCH },
+	{  2, 28, 0x18, 28, 0x002058 },
+	{ 32,  0, 0x1c,  0, NV04_PFIFO_CACHE1_ENGINE },
+	{ 32,  0, 0x20,  0, NV04_PFIFO_CACHE1_PULL1 },
+	{ 32,  0, 0x24,  0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+	{ 32,  0, 0x28,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+	{ 32,  0, 0x2c,  0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+	{ 32,  0, 0x30,  0, NV10_PFIFO_CACHE1_SEMAPHORE },
+	{ 32,  0, 0x34,  0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+	{ 32,  0, 0x38,  0, NV40_PFIFO_GRCTX_INSTANCE },
+	{ 17,  0, 0x3c,  0, NV04_PFIFO_DMA_TIMESLICE },
+	{ 32,  0, 0x40,  0, 0x0032e4 },
+	{ 32,  0, 0x44,  0, 0x0032e8 },
+	{ 32,  0, 0x4c,  0, 0x002088 },
+	{ 32,  0, 0x50,  0, 0x003300 },
+	{ 32,  0, 0x54,  0, 0x00330c },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv40_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	u32 context, chid = chan->base.chid;
+	int ret;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->addr >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW:
+		context |= 0x00000000;
+		break;
+	case NVDEV_ENGINE_GR:
+		context |= 0x00100000;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		context |= 0x00200000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	context |= chid << 23;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+	return ret;
+}
+
+static int
+nv40_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *engctx)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	unsigned long flags;
+	u32 reg, ctx;
+
+	switch (nv_engidx(engctx->engine)) {
+	case NVDEV_ENGINE_SW:
+		return 0;
+	case NVDEV_ENGINE_GR:
+		reg = 0x32e0;
+		ctx = 0x38;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		reg = 0x330c;
+		ctx = 0x54;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+		nv_wr32(priv, reg, nv_engctx(engctx)->addr);
+	nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
+
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *engctx)
+{
+	struct nv04_fifo_priv *priv = (void *)parent->engine;
+	struct nv04_fifo_chan *chan = (void *)parent;
+	unsigned long flags;
+	u32 reg, ctx;
+
+	switch (nv_engidx(engctx->engine)) {
+	case NVDEV_ENGINE_SW:
+		return 0;
+	case NVDEV_ENGINE_GR:
+		reg = 0x32e0;
+		ctx = 0x38;
+		break;
+	case NVDEV_ENGINE_MPEG:
+		reg = 0x330c;
+		ctx = 0x54;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+	if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+		nv_wr32(priv, reg, 0x00000000);
+	nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
+
+	nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return 0;
+}
+
+static int
+nv40_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nv04_fifo_priv *priv = (void *)engine;
+	struct nv04_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x1000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv40_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv40_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv40_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+	chan->ramfc = chan->base.chid * 128;
+
+	nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
+			     NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+			     NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+	nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_fifo_ofuncs = {
+	.ctor = nv40_fifo_chan_ctor,
+	.dtor = nv04_fifo_chan_dtor,
+	.init = nv04_fifo_chan_init,
+	.fini = nv04_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv40_fifo_sclass[] = {
+	{ NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv40_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fifo_context_ctor,
+		.dtor = _nouveau_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *imem = nv04_instmem(parent);
+	struct nv04_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nouveau_ramht_ref(imem->ramht, &priv->ramht);
+	nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+	nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv40_fifo_cclass;
+	nv_engine(priv)->sclass = nv40_fifo_sclass;
+	priv->base.pause = nv04_fifo_pause;
+	priv->base.start = nv04_fifo_start;
+	priv->ramfc_desc = nv40_ramfc;
+	return 0;
+}
+
+static int
+nv40_fifo_init(struct nouveau_object *object)
+{
+	struct nv04_fifo_priv *priv = (void *)object;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002040, 0x000000ff);
+	nv_wr32(priv, 0x002044, 0x2101ffff);
+	nv_wr32(priv, 0x002058, 0x00000001);
+
+	nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+				       ((priv->ramht->bits - 9) << 16) |
+				        (priv->ramht->base.addr >> 8));
+	nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+
+	switch (nv_device(priv)->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		nv_wr32(priv, 0x002230, 0x00000001);
+	case 0x40:
+	case 0x41:
+	case 0x42:
+	case 0x43:
+	case 0x45:
+	case 0x48:
+		nv_wr32(priv, 0x002220, 0x00030002);
+		break;
+	default:
+		nv_wr32(priv, 0x002230, 0x00000000);
+		nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
+					 priv->ramfc->addr) >> 16) |
+					0x00030000);
+		break;
+	}
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+
+	nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+	nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+	nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+	nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_fifo_ctor,
+		.dtor = nv04_fifo_dtor,
+		.init = nv40_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
new file mode 100644
index 0000000..e9b8217
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/class.h>
+#include <core/math.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
+		if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
+			nv_wo32(cur, p++ * 4, i);
+	}
+
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x0032f4, cur->addr >> 12);
+	nv_wr32(priv, 0x0032ec, p);
+	nv_wr32(priv, 0x002500, 0x00000101);
+}
+
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+	mutex_lock(&nv_subdev(priv)->mutex);
+	nv50_fifo_playlist_update_locked(priv);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+static int
+nv50_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nouveau_gpuobj *ectx = (void *)object;
+	u64 limit = ectx->addr + ectx->size - 1;
+	u64 start = ectx->addr;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	nv_wo32(base->eng, addr + 0x00, 0x00190000);
+	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+					upper_32_bits(start));
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_priv *priv = (void *)parent->engine;
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 addr, me;
+	int ret = 0;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0000; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	default:
+		return -EINVAL;
+	}
+
+	/* HW bug workaround:
+	 *
+	 * PFIFO will hang forever if the connected engines don't report
+	 * that they've processed the context switch request.
+	 *
+	 * In order for the kickoff to work, we need to ensure all the
+	 * connected engines are in a state where they can answer.
+	 *
+	 * Newer chipsets don't seem to suffer from this issue, and well,
+	 * there's also a "ignore these engines" bitmask reg we can use
+	 * if we hit the issue there..
+	 */
+	me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
+
+	/* do the kickoff... */
+	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+	if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
+		nv_error(priv, "channel %d [%s] unload timeout\n",
+			 chan->base.chid, nouveau_client_name(chan));
+		if (suspend)
+			ret = -EBUSY;
+	}
+	nv_wr32(priv, 0x00b860, me);
+
+	if (ret == 0) {
+		nv_wo32(base->eng, addr + 0x00, 0x00000000);
+		nv_wo32(base->eng, addr + 0x04, 0x00000000);
+		nv_wo32(base->eng, addr + 0x08, 0x00000000);
+		nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+		nv_wo32(base->eng, addr + 0x10, 0x00000000);
+		nv_wo32(base->eng, addr + 0x14, 0x00000000);
+		bar->flush(bar);
+	}
+
+	return ret;
+}
+
+static int
+nv50_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 context;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->node->offset >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+	case NVDEV_ENGINE_MPEG  : context |= 0x00200000; break;
+	default:
+		return -EINVAL;
+	}
+
+	return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+void
+nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	nouveau_ramht_remove(chan->ramht, cookie);
+}
+
+static int
+nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+				&chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv50_channel_ind_class *args = data;
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	u64 ioffset, ilength;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+			       &chan->ramht);
+	if (ret)
+		return ret;
+
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	bar->flush(bar);
+	return 0;
+}
+
+void
+nv50_fifo_chan_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_chan *chan = (void *)object;
+	nouveau_ramht_ref(NULL, &chan->ramht);
+	nouveau_fifo_channel_destroy(&chan->base);
+}
+
+static int
+nv50_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_base *base = (void *)object->parent;
+	struct nv50_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *ramfc = base->ramfc;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
+	nv50_fifo_playlist_update(priv);
+	return 0;
+}
+
+int
+nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+
+	/* remove channel from playlist, fifo will unload context */
+	nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+	nv50_fifo_playlist_update(priv);
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_dma = {
+	.ctor = nv50_fifo_chan_ctor_dma,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs_ind = {
+	.ctor = nv50_fifo_chan_ctor_ind,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv50_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv50_fifo_sclass[] = {
+	{ NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
+	{ NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv50_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200,
+				 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1200, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0, 0,
+				&base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nv50_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->eng);
+	nouveau_gpuobj_ref(NULL, &base->ramfc);
+	nouveau_gpuobj_ref(NULL, &base->cache);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nv50_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fifo_context_ctor,
+		.dtor = nv50_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv50_fifo_cclass;
+	nv_engine(priv)->sclass = nv50_fifo_sclass;
+	return 0;
+}
+
+void
+nv50_fifo_dtor(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object;
+
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+int
+nv50_fifo_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv_wr32(priv, 0x00250c, 0x6f3cfc34);
+	nv_wr32(priv, 0x002044, 0x01003fff);
+
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0xbfffffff);
+
+	for (i = 0; i < 128; i++)
+		nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
+	nv50_fifo_playlist_update_locked(priv);
+
+	nv_wr32(priv, 0x003200, 0x00000001);
+	nv_wr32(priv, 0x003250, 0x00000001);
+	nv_wr32(priv, 0x002500, 0x00000001);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fifo_ctor,
+		.dtor = nv50_fifo_dtor,
+		.init = nv50_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 0000000..3a9ceb3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+
+struct nv50_fifo_priv {
+	struct nouveau_fifo base;
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+};
+
+struct nv50_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *ramfc;
+	struct nouveau_gpuobj *cache;
+	struct nouveau_gpuobj *eng;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nv50_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 subc[8];
+	struct nouveau_ramht *ramht;
+};
+
+void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
+
+void nv50_fifo_object_detach(struct nouveau_object *, int);
+void nv50_fifo_chan_dtor(struct nouveau_object *);
+int  nv50_fifo_chan_fini(struct nouveau_object *, bool);
+
+void nv50_fifo_context_dtor(struct nouveau_object *);
+
+void nv50_fifo_dtor(struct nouveau_object *);
+int  nv50_fifo_init(struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
new file mode 100644
index 0000000..35b94bd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/client.h>
+#include <core/engctx.h>
+#include <core/ramht.h>
+#include <core/event.h>
+#include <core/class.h>
+#include <core/math.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nouveau_gpuobj *ectx = (void *)object;
+	u64 limit = ectx->addr + ectx->size - 1;
+	u64 start = ectx->addr;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0020; break;
+	case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+	case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	nv_wo32(base->eng, addr + 0x00, 0x00190000);
+	nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+	nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+	nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+					upper_32_bits(start));
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_priv *priv = (void *)parent->engine;
+	struct nv50_fifo_base *base = (void *)parent->parent;
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 addr, save, engn;
+	bool done;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : engn = 0; addr = 0x0020; break;
+	case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
+	case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
+	case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
+	default:
+		return -EINVAL;
+	}
+
+	save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
+	nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+	done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
+	nv_wr32(priv, 0x002520, save);
+	if (!done) {
+		nv_error(priv, "channel %d [%s] unload timeout\n",
+			 chan->base.chid, nouveau_client_name(chan));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nv_wo32(base->eng, addr + 0x00, 0x00000000);
+	nv_wo32(base->eng, addr + 0x04, 0x00000000);
+	nv_wo32(base->eng, addr + 0x08, 0x00000000);
+	nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+	nv_wo32(base->eng, addr + 0x10, 0x00000000);
+	nv_wo32(base->eng, addr + 0x14, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_object_attach(struct nouveau_object *parent,
+			struct nouveau_object *object, u32 handle)
+{
+	struct nv50_fifo_chan *chan = (void *)parent;
+	u32 context;
+
+	if (nv_iclass(object, NV_GPUOBJ_CLASS))
+		context = nv_gpuobj(object)->node->offset >> 4;
+	else
+		context = 0x00000004; /* just non-zero */
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_DMAOBJ:
+	case NVDEV_ENGINE_SW    : context |= 0x00000000; break;
+	case NVDEV_ENGINE_GR    : context |= 0x00100000; break;
+	case NVDEV_ENGINE_MPEG  :
+	case NVDEV_ENGINE_PPP   : context |= 0x00200000; break;
+	case NVDEV_ENGINE_ME    :
+	case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
+	case NVDEV_ENGINE_VP    : context |= 0x00400000; break;
+	case NVDEV_ENGINE_CRYPT :
+	case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+	case NVDEV_ENGINE_BSP   : context |= 0x00600000; break;
+	default:
+		return -EINVAL;
+	}
+
+	return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+static int
+nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv03_channel_dma_class *args = data;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG) |
+					  (1ULL << NVDEV_ENGINE_ME) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_CRYPT) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_PPP) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+			       &chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+	nv_wo32(base->ramfc, 0x3c, 0x003f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x4c, 0xffffffff);
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_fifo_base *base = (void *)parent;
+	struct nv50_fifo_chan *chan;
+	struct nv50_channel_ind_class *args = data;
+	u64 ioffset, ilength;
+	int ret;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+					  0x2000, args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_DMAOBJ) |
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_MPEG) |
+					  (1ULL << NVDEV_ENGINE_ME) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_CRYPT) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_PPP) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
+			       &chan->ramht);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+	nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+	nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+	nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+	nv_wo32(base->ramfc, 0x44, 0x01003fff);
+	nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+	nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+	nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+	nv_wo32(base->ramfc, 0x78, 0x00000000);
+	nv_wo32(base->ramfc, 0x7c, 0x30000001);
+	nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+				   (4 << 24) /* SEARCH_FULL */ |
+				   (chan->ramht->base.node->offset >> 4));
+	nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+	nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nv84_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nv50_fifo_priv *priv = (void *)object->engine;
+	struct nv50_fifo_base *base = (void *)object->parent;
+	struct nv50_fifo_chan *chan = (void *)object;
+	struct nouveau_gpuobj *ramfc = base->ramfc;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
+	nv50_fifo_playlist_update(priv);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_dma = {
+	.ctor = nv84_fifo_chan_ctor_dma,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs_ind = {
+	.ctor = nv84_fifo_chan_ctor_ind,
+	.dtor = nv50_fifo_chan_dtor,
+	.init = nv84_fifo_chan_init,
+	.fini = nv50_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv84_fifo_sclass[] = {
+	{ NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
+	{ NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv84_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+				          0x1000, NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0200, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x4000, 0,
+				 0, &base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x1000,
+				 0x400, NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), nv_object(base), 0x0100,
+				 0x100, NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv84_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_fifo_context_ctor,
+		.dtor = nv50_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static void
+nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+	struct nv84_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+}
+
+static void
+nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+	struct nv84_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+}
+
+static int
+nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	priv->base.uevent->enable = nv84_fifo_uevent_enable;
+	priv->base.uevent->disable = nv84_fifo_uevent_disable;
+	priv->base.uevent->priv = priv;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nv04_fifo_intr;
+	nv_engine(priv)->cclass = &nv84_fifo_cclass;
+	nv_engine(priv)->sclass = nv84_fifo_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_fifo_ctor,
+		.dtor = nv50_fifo_dtor,
+		.init = nv50_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
new file mode 100644
index 0000000..46dfa68
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/event.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+struct nvc0_fifo_priv {
+	struct nouveau_fifo base;
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+struct nvc0_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nvc0_fifo_chan {
+	struct nouveau_fifo_chan base;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = 0, p = 0; i < 128; i++) {
+		if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000004);
+		p += 8;
+	}
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002270, cur->addr >> 12);
+	nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
+	if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
+		nv_error(priv, "playlist update failed\n");
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+static int
+nvc0_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_base *base = (void *)parent->parent;
+	struct nouveau_engctx *ectx = (void *)object;
+	u32 addr;
+	int ret;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!ectx->vma.node) {
+		ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+					    NV_MEM_ACCESS_RW, &ectx->vma);
+		if (ret)
+			return ret;
+
+		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	}
+
+	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_priv *priv = (void *)parent->engine;
+	struct nvc0_fifo_base *base = (void *)parent->parent;
+	struct nvc0_fifo_chan *chan = (void *)parent;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   : addr = 0x0210; break;
+	case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+	case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wr32(priv, 0x002634, chan->base.chid);
+	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+		nv_error(priv, "channel %d [%s] kick timeout\n",
+			 chan->base.chid, nouveau_client_name(chan));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nvc0_fifo_priv *priv = (void *)engine;
+	struct nvc0_fifo_base *base = (void *)parent;
+	struct nvc0_fifo_chan *chan;
+	struct nv50_channel_ind_class *args = data;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+					  priv->user.bar.offset, 0x1000,
+					  args->pushbuf,
+					  (1ULL << NVDEV_ENGINE_SW) |
+					  (1ULL << NVDEV_ENGINE_GR) |
+					  (1ULL << NVDEV_ENGINE_COPY0) |
+					  (1ULL << NVDEV_ENGINE_COPY1) |
+					  (1ULL << NVDEV_ENGINE_BSP) |
+					  (1ULL << NVDEV_ENGINE_VP) |
+					  (1ULL << NVDEV_ENGINE_PPP), &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
+	nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
+
+	usermem = chan->base.chid * 0x1000;
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	for (i = 0; i < 0x1000; i += 4)
+		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x10, 0x0000face);
+	nv_wo32(base, 0x30, 0xfffff902);
+	nv_wo32(base, 0x48, lower_32_bits(ioffset));
+	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base, 0x54, 0x00000002);
+	nv_wo32(base, 0x84, 0x20400000);
+	nv_wo32(base, 0x94, 0x30000001);
+	nv_wo32(base, 0x9c, 0x00000100);
+	nv_wo32(base, 0xa4, 0x1f1f1f1f);
+	nv_wo32(base, 0xa8, 0x1f1f1f1f);
+	nv_wo32(base, 0xac, 0x0000001f);
+	nv_wo32(base, 0xb8, 0xf8000000);
+	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+	struct nvc0_fifo_priv *priv = (void *)object->engine;
+	struct nvc0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
+	nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
+	nvc0_fifo_playlist_update(priv);
+	return 0;
+}
+
+static int
+nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nvc0_fifo_priv *priv = (void *)object->engine;
+	struct nvc0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+	u32 mask, engine;
+
+	nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
+	nvc0_fifo_playlist_update(priv);
+	mask = nv_rd32(priv, 0x0025a4);
+	for (engine = 0; mask && engine < 16; engine++) {
+		if (!(mask & (1 << engine)))
+			continue;
+		nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
+		mask &= ~(1 << engine);
+	}
+	nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nvc0_fifo_ofuncs = {
+	.ctor = nvc0_fifo_chan_ctor,
+	.dtor = _nouveau_fifo_channel_dtor,
+	.init = nvc0_fifo_chan_init,
+	.fini = nvc0_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nvc0_fifo_sclass[] = {
+	{ NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nvc0_fifo_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nvc0_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_ZERO_ALLOC |
+					  NVOBJ_FLAG_HEAP, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+				&base->pgd);
+	if (ret)
+		return ret;
+
+	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0208, 0xffffffff);
+	nv_wo32(base, 0x020c, 0x000000ff);
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nvc0_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nvc0_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nvc0_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fifo_context_ctor,
+		.dtor = nvc0_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
+	{ 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
+	{ 0x03, "PEEPHOLE" },
+	{ 0x04, "BAR1" },
+	{ 0x05, "BAR3" },
+	{ 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
+	{ 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
+	{ 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
+	{ 0x13, "PCOUNTER" },
+	{ 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
+	{ 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
+	{ 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
+	{ 0x17, "PDAEMON" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
+	{ 0x00, "PT_NOT_PRESENT" },
+	{ 0x01, "PT_TOO_SHORT" },
+	{ 0x02, "PAGE_NOT_PRESENT" },
+	{ 0x03, "VM_LIMIT_EXCEEDED" },
+	{ 0x04, "NO_CHANNEL" },
+	{ 0x05, "PAGE_SYSTEM_ONLY" },
+	{ 0x06, "PAGE_READ_ONLY" },
+	{ 0x0a, "COMPRESSED_SYSRAM" },
+	{ 0x0c, "INVALID_STORAGE_TYPE" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
+	{ 0x01, "PCOPY0" },
+	{ 0x02, "PCOPY1" },
+	{ 0x04, "DISPATCH" },
+	{ 0x05, "CTXCTL" },
+	{ 0x06, "PFIFO" },
+	{ 0x07, "BAR_READ" },
+	{ 0x08, "BAR_WRITE" },
+	{ 0x0b, "PVP" },
+	{ 0x0c, "PPPP" },
+	{ 0x0d, "PBSP" },
+	{ 0x11, "PCOUNTER" },
+	{ 0x12, "PDAEMON" },
+	{ 0x14, "CCACHE" },
+	{ 0x15, "CCACHE_POST" },
+	{}
+};
+
+static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
+	{ 0x01, "TEX" },
+	{ 0x0c, "ESETUP" },
+	{ 0x0e, "CTXCTL" },
+	{ 0x0f, "PROP" },
+	{}
+};
+
+static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/*	{ 0x00008000, "" }	seen with null ib push */
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
+{
+	u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
+	u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
+	u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
+	u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
+	u32 client = (stat & 0x00001f00) >> 8;
+	const struct nouveau_enum *en;
+	struct nouveau_engine *engine;
+	struct nouveau_object *engctx = NULL;
+
+	switch (unit) {
+	case 3: /* PEEPHOLE */
+		nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
+		break;
+	case 4: /* BAR1 */
+		nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
+		break;
+	case 5: /* BAR3 */
+		nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
+		break;
+	default:
+		break;
+	}
+
+	nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
+		 "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+	pr_cont("] from ");
+	en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+	if (stat & 0x00000040) {
+		pr_cont("/");
+		nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
+	} else {
+		pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
+		nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
+	}
+
+	if (en && en->data2) {
+		engine = nouveau_engine(priv, en->data2);
+		if (engine)
+			engctx = nouveau_engctx_get(engine, inst);
+
+	}
+	pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+			nouveau_client_name(engctx));
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+	struct nvc0_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+	if (likely(bind)) {
+		if (!mthd || !nv_call(bind->object, mthd, data))
+			ret = 0;
+		nouveau_namedb_put(bind);
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return ret;
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
+{
+	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
+
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+				show &= ~0x00200000;
+		}
+	}
+
+	if (stat & 0x00800000) {
+		if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
+			show &= ~0x00800000;
+	}
+
+	if (show) {
+		nv_error(priv, "SUBFIFO%d:", unit);
+		nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
+		pr_cont("\n");
+		nv_error(priv,
+			 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+			 unit, chid,
+			 nouveau_client_name_for_fifo_chid(&priv->base, chid),
+			 subc, mthd, data);
+	}
+
+	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_fifo_priv *priv = (void *)subdev;
+	u32 mask = nv_rd32(priv, 0x002140);
+	u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+	if (stat & 0x00000001) {
+		u32 intr = nv_rd32(priv, 0x00252c);
+		nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
+		nv_wr32(priv, 0x002100, 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000100) {
+		u32 intr = nv_rd32(priv, 0x00254c);
+		nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
+		nv_wr32(priv, 0x002100, 0x00000100);
+		stat &= ~0x00000100;
+	}
+
+	if (stat & 0x00010000) {
+		u32 intr = nv_rd32(priv, 0x00256c);
+		nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
+		nv_wr32(priv, 0x002100, 0x00010000);
+		stat &= ~0x00010000;
+	}
+
+	if (stat & 0x01000000) {
+		u32 intr = nv_rd32(priv, 0x00258c);
+		nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
+		nv_wr32(priv, 0x002100, 0x01000000);
+		stat &= ~0x01000000;
+	}
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(priv, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_vm_fault(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(priv, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_subfifo_intr(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat & 0x40000000) {
+		u32 intr0 = nv_rd32(priv, 0x0025a4);
+		u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
+		nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
+			       intr0, intr1);
+		stat &= ~0x40000000;
+	}
+
+	if (stat & 0x80000000) {
+		u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
+		nouveau_event_trigger(priv->base.uevent, 0);
+		nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
+		stat &= ~0x80000000;
+	}
+
+	if (stat) {
+		nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+		nv_wr32(priv, 0x002100, stat);
+		nv_wr32(priv, 0x002140, 0);
+	}
+}
+
+static void
+nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+	struct nvc0_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+	struct nvc0_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
+static int
+nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
+				&priv->playlist[0]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0,
+				&priv->playlist[1]);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0,
+				&priv->user.mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+				&priv->user.bar);
+	if (ret)
+		return ret;
+
+	priv->base.uevent->enable = nvc0_fifo_uevent_enable;
+	priv->base.uevent->disable = nvc0_fifo_uevent_disable;
+	priv->base.uevent->priv = priv;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nvc0_fifo_intr;
+	nv_engine(priv)->cclass = &nvc0_fifo_cclass;
+	nv_engine(priv)->sclass = nvc0_fifo_sclass;
+	return 0;
+}
+
+static void
+nvc0_fifo_dtor(struct nouveau_object *object)
+{
+	struct nvc0_fifo_priv *priv = (void *)object;
+
+	nouveau_gpuobj_unmap(&priv->user.bar);
+	nouveau_gpuobj_ref(NULL, &priv->user.mem);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nvc0_fifo_init(struct nouveau_object *object)
+{
+	struct nvc0_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x000204, 0xffffffff);
+	nv_wr32(priv, 0x002204, 0xffffffff);
+
+	priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
+	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+	/* assign engines to subfifos */
+	if (priv->spoon_nr >= 3) {
+		nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
+		nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
+		nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
+		nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
+		nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+	}
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < priv->spoon_nr; i++) {
+		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	}
+
+	nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
+	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+	nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0x3fffffff);
+	nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fifo_ctor,
+		.dtor = nvc0_fifo_dtor,
+		.init = nvc0_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
new file mode 100644
index 0000000..56192a7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/event.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
+static const struct {
+	u64 subdev;
+	u64 mask;
+} fifo_engine[] = {
+	_(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW)),
+	_(NVDEV_ENGINE_VP      , 0),
+	_(NVDEV_ENGINE_PPP     , 0),
+	_(NVDEV_ENGINE_BSP     , 0),
+	_(NVDEV_ENGINE_COPY0   , 0),
+	_(NVDEV_ENGINE_COPY1   , 0),
+	_(NVDEV_ENGINE_VENC    , 0),
+};
+#undef _
+#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
+
+struct nve0_fifo_engn {
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+	struct nouveau_fifo base;
+	struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma bar;
+	} user;
+	int spoon_nr;
+};
+
+struct nve0_fifo_base {
+	struct nouveau_fifo_base base;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *vm;
+};
+
+struct nve0_fifo_chan {
+	struct nouveau_fifo_chan base;
+	u32 engine;
+};
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
+static void
+nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nve0_fifo_engn *engn = &priv->engine[engine];
+	struct nouveau_gpuobj *cur;
+	u32 match = (engine << 16) | 0x00000001;
+	int i, p;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	cur = engn->playlist[engn->cur_playlist];
+	if (unlikely(cur == NULL)) {
+		int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+					     0x8000, 0x1000, 0, &cur);
+		if (ret) {
+			mutex_unlock(&nv_subdev(priv)->mutex);
+			nv_error(priv, "playlist alloc failed\n");
+			return;
+		}
+
+		engn->playlist[engn->cur_playlist] = cur;
+	}
+
+	engn->cur_playlist = !engn->cur_playlist;
+
+	for (i = 0, p = 0; i < priv->base.max; i++) {
+		u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
+		if (ctrl != match)
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000000);
+		p += 8;
+	}
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x002270, cur->addr >> 12);
+	nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+	if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+		nv_error(priv, "playlist %d update timeout\n", engine);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+static int
+nve0_fifo_context_attach(struct nouveau_object *parent,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_base *base = (void *)parent->parent;
+	struct nouveau_engctx *ectx = (void *)object;
+	u32 addr;
+	int ret;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   :
+	case NVDEV_ENGINE_COPY0:
+	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!ectx->vma.node) {
+		ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+					    NV_MEM_ACCESS_RW, &ectx->vma);
+		if (ret)
+			return ret;
+
+		nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
+	}
+
+	nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+	nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+			 struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_priv *priv = (void *)parent->engine;
+	struct nve0_fifo_base *base = (void *)parent->parent;
+	struct nve0_fifo_chan *chan = (void *)parent;
+	u32 addr;
+
+	switch (nv_engidx(object->engine)) {
+	case NVDEV_ENGINE_SW   : return 0;
+	case NVDEV_ENGINE_GR   :
+	case NVDEV_ENGINE_COPY0:
+	case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+	case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+	case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+	case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_wr32(priv, 0x002634, chan->base.chid);
+	if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+		nv_error(priv, "channel %d [%s] kick timeout\n",
+			 chan->base.chid, nouveau_client_name(chan));
+		if (suspend)
+			return -EBUSY;
+	}
+
+	nv_wo32(base, addr + 0x00, 0x00000000);
+	nv_wo32(base, addr + 0x04, 0x00000000);
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nve0_fifo_chan_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nve0_fifo_priv *priv = (void *)engine;
+	struct nve0_fifo_base *base = (void *)parent;
+	struct nve0_fifo_chan *chan;
+	struct nve0_channel_ind_class *args = data;
+	u64 usermem, ioffset, ilength;
+	int ret, i;
+
+	if (size < sizeof(*args))
+		return -EINVAL;
+
+	for (i = 0; i < FIFO_ENGINE_NR; i++) {
+		if (args->engine & (1 << i)) {
+			if (nouveau_engine(parent, fifo_engine[i].subdev)) {
+				args->engine = (1 << i);
+				break;
+			}
+		}
+	}
+
+	if (i == FIFO_ENGINE_NR)
+		return -ENODEV;
+
+	ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+					  priv->user.bar.offset, 0x200,
+					  args->pushbuf,
+					  fifo_engine[i].mask, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_parent(chan)->context_attach = nve0_fifo_context_attach;
+	nv_parent(chan)->context_detach = nve0_fifo_context_detach;
+	chan->engine = i;
+
+	usermem = chan->base.chid * 0x200;
+	ioffset = args->ioffset;
+	ilength = log2i(args->ilength / 8);
+
+	for (i = 0; i < 0x200; i += 4)
+		nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+	nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+	nv_wo32(base, 0x10, 0x0000face);
+	nv_wo32(base, 0x30, 0xfffff902);
+	nv_wo32(base, 0x48, lower_32_bits(ioffset));
+	nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+	nv_wo32(base, 0x84, 0x20400000);
+	nv_wo32(base, 0x94, 0x30000001);
+	nv_wo32(base, 0x9c, 0x00000100);
+	nv_wo32(base, 0xac, 0x0000001f);
+	nv_wo32(base, 0xe8, chan->base.chid);
+	nv_wo32(base, 0xb8, 0xf8000000);
+	nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+	bar->flush(bar);
+	return 0;
+}
+
+static int
+nve0_fifo_chan_init(struct nouveau_object *object)
+{
+	struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+	struct nve0_fifo_priv *priv = (void *)object->engine;
+	struct nve0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+	int ret;
+
+	ret = nouveau_fifo_channel_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
+	nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+	nve0_fifo_playlist_update(priv, chan->engine);
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+	return 0;
+}
+
+static int
+nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nve0_fifo_priv *priv = (void *)object->engine;
+	struct nve0_fifo_chan *chan = (void *)object;
+	u32 chid = chan->base.chid;
+
+	nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+	nve0_fifo_playlist_update(priv, chan->engine);
+	nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
+
+	return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
+
+static struct nouveau_ofuncs
+nve0_fifo_ofuncs = {
+	.ctor = nve0_fifo_chan_ctor,
+	.dtor = _nouveau_fifo_channel_dtor,
+	.init = nve0_fifo_chan_init,
+	.fini = nve0_fifo_chan_fini,
+	.rd32 = _nouveau_fifo_channel_rd32,
+	.wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nve0_fifo_sclass[] = {
+	{ NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
+
+static int
+nve0_fifo_context_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *data, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nve0_fifo_base *base;
+	int ret;
+
+	ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+				          0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
+	*pobject = nv_object(base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
+				&base->pgd);
+	if (ret)
+		return ret;
+
+	nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+	nv_wo32(base, 0x0208, 0xffffffff);
+	nv_wo32(base, 0x020c, 0x000000ff);
+
+	ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nve0_fifo_context_dtor(struct nouveau_object *object)
+{
+	struct nve0_fifo_base *base = (void *)object;
+	nouveau_vm_ref(NULL, &base->vm, base->pgd);
+	nouveau_gpuobj_ref(NULL, &base->pgd);
+	nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nve0_fifo_cclass = {
+	.handle = NV_ENGCTX(FIFO, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_fifo_context_ctor,
+		.dtor = nve0_fifo_context_dtor,
+		.init = _nouveau_fifo_context_init,
+		.fini = _nouveau_fifo_context_fini,
+		.rd32 = _nouveau_fifo_context_rd32,
+		.wr32 = _nouveau_fifo_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static const struct nouveau_enum nve0_fifo_fault_unit[] = {
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_reason[] = {
+	{ 0x00, "PT_NOT_PRESENT" },
+	{ 0x01, "PT_TOO_SHORT" },
+	{ 0x02, "PAGE_NOT_PRESENT" },
+	{ 0x03, "VM_LIMIT_EXCEEDED" },
+	{ 0x04, "NO_CHANNEL" },
+	{ 0x05, "PAGE_SYSTEM_ONLY" },
+	{ 0x06, "PAGE_READ_ONLY" },
+	{ 0x0a, "COMPRESSED_SYSRAM" },
+	{ 0x0c, "INVALID_STORAGE_TYPE" },
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+	{}
+};
+
+static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+	{}
+};
+
+static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
+{
+	u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
+	u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
+	u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
+	u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
+	u32 client = (stat & 0x00001f00) >> 8;
+	const struct nouveau_enum *en;
+	struct nouveau_engine *engine;
+	struct nouveau_object *engctx = NULL;
+
+	nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
+		       "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+	pr_cont("] from ");
+	en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
+	if (stat & 0x00000040) {
+		pr_cont("/");
+		nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+	} else {
+		pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
+		nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+	}
+
+	if (en && en->data2) {
+		engine = nouveau_engine(priv, en->data2);
+		if (engine)
+			engctx = nouveau_engctx_get(engine, inst);
+
+	}
+
+	pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
+			nouveau_client_name(engctx));
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
+{
+	struct nve0_fifo_chan *chan = NULL;
+	struct nouveau_handle *bind;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&priv->base.lock, flags);
+	if (likely(chid >= priv->base.min && chid <= priv->base.max))
+		chan = (void *)priv->base.channel[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+	if (likely(bind)) {
+		if (!mthd || !nv_call(bind->object, mthd, data))
+			ret = 0;
+		nouveau_namedb_put(bind);
+	}
+
+out:
+	spin_unlock_irqrestore(&priv->base.lock, flags);
+	return ret;
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
+{
+	u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00003ffc);
+	u32 show = stat;
+
+	if (stat & 0x00200000) {
+		if (mthd == 0x0054) {
+			if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
+				show &= ~0x00200000;
+		}
+	}
+
+	if (stat & 0x00800000) {
+		if (!nve0_fifo_swmthd(priv, chid, mthd, data))
+			show &= ~0x00800000;
+	}
+
+	if (show) {
+		nv_error(priv, "SUBFIFO%d:", unit);
+		nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+		pr_cont("\n");
+		nv_error(priv,
+			 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
+			 unit, chid,
+			 nouveau_client_name_for_fifo_chid(&priv->base, chid),
+			 subc, mthd, data);
+	}
+
+	nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_intr(struct nouveau_subdev *subdev)
+{
+	struct nve0_fifo_priv *priv = (void *)subdev;
+	u32 mask = nv_rd32(priv, 0x002140);
+	u32 stat = nv_rd32(priv, 0x002100) & mask;
+
+	if (stat & 0x00000100) {
+		nv_warn(priv, "unknown status 0x00000100\n");
+		nv_wr32(priv, 0x002100, 0x00000100);
+		stat &= ~0x00000100;
+	}
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(priv, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nve0_fifo_isr_vm_fault(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(priv, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nve0_fifo_isr_subfifo_intr(priv, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(priv, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat & 0x40000000) {
+		nv_warn(priv, "unknown status 0x40000000\n");
+		nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
+		stat &= ~0x40000000;
+	}
+
+	if (stat & 0x80000000) {
+		nouveau_event_trigger(priv->base.uevent, 0);
+		nv_wr32(priv, 0x002100, 0x80000000);
+		stat &= ~0x80000000;
+	}
+
+	if (stat) {
+		nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+		nv_wr32(priv, 0x002100, stat);
+		nv_wr32(priv, 0x002140, 0);
+	}
+}
+
+static void
+nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
+{
+	struct nve0_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+}
+
+static void
+nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
+{
+	struct nve0_fifo_priv *priv = event->priv;
+	nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+}
+
+static int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nve0_fifo_priv *priv;
+	int ret;
+
+	ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 4096 * 0x200, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+				&priv->user.bar);
+	if (ret)
+		return ret;
+
+	priv->base.uevent->enable = nve0_fifo_uevent_enable;
+	priv->base.uevent->disable = nve0_fifo_uevent_disable;
+	priv->base.uevent->priv = priv;
+
+	nv_subdev(priv)->unit = 0x00000100;
+	nv_subdev(priv)->intr = nve0_fifo_intr;
+	nv_engine(priv)->cclass = &nve0_fifo_cclass;
+	nv_engine(priv)->sclass = nve0_fifo_sclass;
+	return 0;
+}
+
+static void
+nve0_fifo_dtor(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int i;
+
+	nouveau_gpuobj_unmap(&priv->user.bar);
+	nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+	for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+		nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+	}
+
+	nouveau_fifo_destroy(&priv->base);
+}
+
+static int
+nve0_fifo_init(struct nouveau_object *object)
+{
+	struct nve0_fifo_priv *priv = (void *)object;
+	int ret, i;
+
+	ret = nouveau_fifo_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* enable all available PSUBFIFOs */
+	nv_wr32(priv, 0x000204, 0xffffffff);
+	priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+	nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < priv->spoon_nr; i++) {
+		nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+	}
+
+	nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+	nv_wr32(priv, 0x002a00, 0xffffffff);
+	nv_wr32(priv, 0x002100, 0xffffffff);
+	nv_wr32(priv, 0x002140, 0x3fffffff);
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_fifo_oclass = {
+	.handle = NV_ENGINE(FIFO, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_fifo_ctor,
+		.dtor = nve0_fifo_dtor,
+		.init = nve0_fifo_init,
+		.fini = _nouveau_fifo_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
new file mode 100644
index 0000000..e194701
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -0,0 +1,129 @@
+#ifndef __NOUVEAU_GRCTX_H__
+#define __NOUVEAU_GRCTX_H__
+
+struct nouveau_grctx {
+	struct nouveau_device *device;
+
+	enum {
+		NOUVEAU_GRCTX_PROG,
+		NOUVEAU_GRCTX_VALS
+	} mode;
+	void *data;
+
+	u32 ctxprog_max;
+	u32 ctxprog_len;
+	u32 ctxprog_reg;
+	int ctxprog_label[32];
+	u32 ctxvals_pos;
+	u32 ctxvals_base;
+};
+
+static inline void
+cp_out(struct nouveau_grctx *ctx, u32 inst)
+{
+	u32 *ctxprog = ctx->data;
+
+	if (ctx->mode != NOUVEAU_GRCTX_PROG)
+		return;
+
+	BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
+	ctxprog[ctx->ctxprog_len++] = inst;
+}
+
+static inline void
+cp_lsr(struct nouveau_grctx *ctx, u32 val)
+{
+	cp_out(ctx, CP_LOAD_SR | val);
+}
+
+static inline void
+cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
+{
+	ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
+
+	ctx->ctxvals_base = ctx->ctxvals_pos;
+	ctx->ctxvals_pos = ctx->ctxvals_base + length;
+
+	if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
+		cp_lsr(ctx, length);
+		length = 0;
+	}
+
+	cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
+}
+
+static inline void
+cp_name(struct nouveau_grctx *ctx, int name)
+{
+	u32 *ctxprog = ctx->data;
+	int i;
+
+	if (ctx->mode != NOUVEAU_GRCTX_PROG)
+		return;
+
+	ctx->ctxprog_label[name] = ctx->ctxprog_len;
+	for (i = 0; i < ctx->ctxprog_len; i++) {
+		if ((ctxprog[i] & 0xfff00000) != 0xff400000)
+			continue;
+		if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
+			continue;
+		ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
+			     (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
+	}
+}
+
+static inline void
+_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+{
+	int ip = 0;
+
+	if (mod != 2) {
+		ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
+		if (ip == 0)
+			ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
+	}
+
+	cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
+		    (state ? 0 : CP_BRA_IF_CLEAR));
+}
+#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
+
+static inline void
+_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+{
+	cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
+}
+#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+{
+	cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
+}
+#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+cp_pos(struct nouveau_grctx *ctx, int offset)
+{
+	ctx->ctxvals_pos = offset;
+	ctx->ctxvals_base = ctx->ctxvals_pos;
+
+	cp_lsr(ctx, ctx->ctxvals_pos);
+	cp_out(ctx, CP_SET_CONTEXT_POINTER);
+}
+
+static inline void
+gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
+{
+	if (ctx->mode != NOUVEAU_GRCTX_VALS)
+		return;
+
+	reg = (reg - 0x00400000) / 4;
+	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
+
+	nv_wo32(ctx->data, reg * 4, val);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
new file mode 100644
index 0000000..7bbb1e1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+/* NVIDIA context programs handle a number of other conditions which are
+ * not implemented in our versions.  It's not clear why NVIDIA context
+ * programs have this code, nor whether it's strictly necessary for
+ * correct operation.  We'll implement additional handling if/when we
+ * discover it's necessary.
+ *
+ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
+ *   flag is set, this gets saved into the context.
+ * - On context save, the context program for all cards load nsource
+ *   into a flag register and check for ILLEGAL_MTHD.  If it's set,
+ *   opcode 0x60000d is called before resuming normal operation.
+ * - Some context programs check more conditions than the above.  NV44
+ *   checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
+ *   and calls 0x60000d before resuming normal operation.
+ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
+ *   and if true 0x800001 is called with count=0, pos=0, the flag is cleared
+ *   and then the ctxprog is aborted.  It looks like a complicated NOP,
+ *   its purpose is unknown.
+ * - In the section of code that loads the per-vs state, NVIDIA check
+ *   flag 10.  If it's set, they only transfer the small 0x300 byte block
+ *   of state + the state for a single vs as opposed to the state for
+ *   all vs units.  It doesn't seem likely that it'll occur in normal
+ *   operation, especially seeing as it appears NVIDIA may have screwed
+ *   up the ctxprogs for some cards and have an invalid instruction
+ *   rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
+ * - There's a number of places where context offset 0 (where we place
+ *   the PRAMIN offset of the context) is loaded into either 0x408000,
+ *   0x408004 or 0x408008.  Not sure what's up there either.
+ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
+ *   path for auto-loadctx.
+ */
+
+#define CP_FLAG_CLEAR                 0
+#define CP_FLAG_SET                   1
+#define CP_FLAG_SWAP_DIRECTION        ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD   0
+#define CP_FLAG_SWAP_DIRECTION_SAVE   1
+#define CP_FLAG_USER_SAVE             ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING     1
+#define CP_FLAG_USER_LOAD             ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING     1
+#define CP_FLAG_STATUS                ((3 * 32) + 0)
+#define CP_FLAG_STATUS_IDLE           0
+#define CP_FLAG_STATUS_BUSY           1
+#define CP_FLAG_AUTO_SAVE             ((3 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING     1
+#define CP_FLAG_AUTO_LOAD             ((3 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING     1
+#define CP_FLAG_UNK54                 ((3 * 32) + 6)
+#define CP_FLAG_UNK54_CLEAR           0
+#define CP_FLAG_UNK54_SET             1
+#define CP_FLAG_ALWAYS                ((3 * 32) + 8)
+#define CP_FLAG_ALWAYS_FALSE          0
+#define CP_FLAG_ALWAYS_TRUE           1
+#define CP_FLAG_UNK57                 ((3 * 32) + 9)
+#define CP_FLAG_UNK57_CLEAR           0
+#define CP_FLAG_UNK57_SET             1
+
+#define CP_CTX                   0x00100000
+#define CP_CTX_COUNT             0x000fc000
+#define CP_CTX_COUNT_SHIFT               14
+#define CP_CTX_REG               0x00003fff
+#define CP_LOAD_SR               0x00200000
+#define CP_LOAD_SR_VALUE         0x000fffff
+#define CP_BRA                   0x00400000
+#define CP_BRA_IP                0x0000ff00
+#define CP_BRA_IP_SHIFT                   8
+#define CP_BRA_IF_CLEAR          0x00000080
+#define CP_BRA_FLAG              0x0000007f
+#define CP_WAIT                  0x00500000
+#define CP_WAIT_SET              0x00000080
+#define CP_WAIT_FLAG             0x0000007f
+#define CP_SET                   0x00700000
+#define CP_SET_1                 0x00000080
+#define CP_SET_FLAG              0x0000007f
+#define CP_NEXT_TO_SWAP          0x00600007
+#define CP_NEXT_TO_CURRENT       0x00600009
+#define CP_SET_CONTEXT_POINTER   0x0060000a
+#define CP_END                   0x0060000e
+#define CP_LOAD_MAGIC_UNK01      0x00800001 /* unknown */
+#define CP_LOAD_MAGIC_NV44TCL    0x00800029 /* per-vs state (0x4497) */
+#define CP_LOAD_MAGIC_NV40TCL    0x00800041 /* per-vs state (0x4097) */
+
+#include "nv40.h"
+#include "ctx.h"
+
+/* TODO:
+ *  - get vs count from 0x1540
+ */
+
+static int
+nv40_graph_vs_count(struct nouveau_device *device)
+{
+
+	switch (device->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		return 8;
+	case 0x40:
+		return 6;
+	case 0x41:
+	case 0x42:
+		return 5;
+	case 0x43:
+	case 0x44:
+	case 0x46:
+	case 0x4a:
+		return 3;
+	case 0x4c:
+	case 0x4e:
+	case 0x67:
+	default:
+		return 1;
+	}
+}
+
+
+enum cp_label {
+	cp_check_load = 1,
+	cp_setup_auto_load,
+	cp_setup_load,
+	cp_setup_save,
+	cp_swap_state,
+	cp_swap_state3d_3_is_save,
+	cp_prepare_exit,
+	cp_exit,
+};
+
+static void
+nv40_graph_construct_general(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+
+	cp_ctx(ctx, 0x4000a4, 1);
+	gr_def(ctx, 0x4000a4, 0x00000008);
+	cp_ctx(ctx, 0x400144, 58);
+	gr_def(ctx, 0x400144, 0x00000001);
+	cp_ctx(ctx, 0x400314, 1);
+	gr_def(ctx, 0x400314, 0x00000000);
+	cp_ctx(ctx, 0x400400, 10);
+	cp_ctx(ctx, 0x400480, 10);
+	cp_ctx(ctx, 0x400500, 19);
+	gr_def(ctx, 0x400514, 0x00040000);
+	gr_def(ctx, 0x400524, 0x55555555);
+	gr_def(ctx, 0x400528, 0x55555555);
+	gr_def(ctx, 0x40052c, 0x55555555);
+	gr_def(ctx, 0x400530, 0x55555555);
+	cp_ctx(ctx, 0x400560, 6);
+	gr_def(ctx, 0x400568, 0x0000ffff);
+	gr_def(ctx, 0x40056c, 0x0000ffff);
+	cp_ctx(ctx, 0x40057c, 5);
+	cp_ctx(ctx, 0x400710, 3);
+	gr_def(ctx, 0x400710, 0x20010001);
+	gr_def(ctx, 0x400714, 0x0f73ef00);
+	cp_ctx(ctx, 0x400724, 1);
+	gr_def(ctx, 0x400724, 0x02008821);
+	cp_ctx(ctx, 0x400770, 3);
+	if (device->chipset == 0x40) {
+		cp_ctx(ctx, 0x400814, 4);
+		cp_ctx(ctx, 0x400828, 5);
+		cp_ctx(ctx, 0x400840, 5);
+		gr_def(ctx, 0x400850, 0x00000040);
+		cp_ctx(ctx, 0x400858, 4);
+		gr_def(ctx, 0x400858, 0x00000040);
+		gr_def(ctx, 0x40085c, 0x00000040);
+		gr_def(ctx, 0x400864, 0x80000000);
+		cp_ctx(ctx, 0x40086c, 9);
+		gr_def(ctx, 0x40086c, 0x80000000);
+		gr_def(ctx, 0x400870, 0x80000000);
+		gr_def(ctx, 0x400874, 0x80000000);
+		gr_def(ctx, 0x400878, 0x80000000);
+		gr_def(ctx, 0x400888, 0x00000040);
+		gr_def(ctx, 0x40088c, 0x80000000);
+		cp_ctx(ctx, 0x4009c0, 8);
+		gr_def(ctx, 0x4009cc, 0x80000000);
+		gr_def(ctx, 0x4009dc, 0x80000000);
+	} else {
+		cp_ctx(ctx, 0x400840, 20);
+		if (nv44_graph_class(ctx->device)) {
+			for (i = 0; i < 8; i++)
+				gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
+		}
+		gr_def(ctx, 0x400880, 0x00000040);
+		gr_def(ctx, 0x400884, 0x00000040);
+		gr_def(ctx, 0x400888, 0x00000040);
+		cp_ctx(ctx, 0x400894, 11);
+		gr_def(ctx, 0x400894, 0x00000040);
+		if (!nv44_graph_class(ctx->device)) {
+			for (i = 0; i < 8; i++)
+				gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
+		}
+		cp_ctx(ctx, 0x4008e0, 2);
+		cp_ctx(ctx, 0x4008f8, 2);
+		if (device->chipset == 0x4c ||
+		    (device->chipset & 0xf0) == 0x60)
+			cp_ctx(ctx, 0x4009f8, 1);
+	}
+	cp_ctx(ctx, 0x400a00, 73);
+	gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
+	cp_ctx(ctx, 0x401000, 4);
+	cp_ctx(ctx, 0x405004, 1);
+	switch (device->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		cp_ctx(ctx, 0x403448, 1);
+		gr_def(ctx, 0x403448, 0x00001010);
+		break;
+	default:
+		cp_ctx(ctx, 0x403440, 1);
+		switch (device->chipset) {
+		case 0x40:
+			gr_def(ctx, 0x403440, 0x00000010);
+			break;
+		case 0x44:
+		case 0x46:
+		case 0x4a:
+			gr_def(ctx, 0x403440, 0x00003010);
+			break;
+		case 0x41:
+		case 0x42:
+		case 0x43:
+		case 0x4c:
+		case 0x4e:
+		case 0x67:
+		default:
+			gr_def(ctx, 0x403440, 0x00001010);
+			break;
+		}
+		break;
+	}
+}
+
+static void
+nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+
+	if (device->chipset == 0x40) {
+		cp_ctx(ctx, 0x401880, 51);
+		gr_def(ctx, 0x401940, 0x00000100);
+	} else
+	if (device->chipset == 0x46 || device->chipset == 0x47 ||
+	    device->chipset == 0x49 || device->chipset == 0x4b) {
+		cp_ctx(ctx, 0x401880, 32);
+		for (i = 0; i < 16; i++)
+			gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
+		if (device->chipset == 0x46)
+			cp_ctx(ctx, 0x401900, 16);
+		cp_ctx(ctx, 0x401940, 3);
+	}
+	cp_ctx(ctx, 0x40194c, 18);
+	gr_def(ctx, 0x401954, 0x00000111);
+	gr_def(ctx, 0x401958, 0x00080060);
+	gr_def(ctx, 0x401974, 0x00000080);
+	gr_def(ctx, 0x401978, 0xffff0000);
+	gr_def(ctx, 0x40197c, 0x00000001);
+	gr_def(ctx, 0x401990, 0x46400000);
+	if (device->chipset == 0x40) {
+		cp_ctx(ctx, 0x4019a0, 2);
+		cp_ctx(ctx, 0x4019ac, 5);
+	} else {
+		cp_ctx(ctx, 0x4019a0, 1);
+		cp_ctx(ctx, 0x4019b4, 3);
+	}
+	gr_def(ctx, 0x4019bc, 0xffff0000);
+	switch (device->chipset) {
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		cp_ctx(ctx, 0x4019c0, 18);
+		for (i = 0; i < 16; i++)
+			gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
+		break;
+	}
+	cp_ctx(ctx, 0x401a08, 8);
+	gr_def(ctx, 0x401a10, 0x0fff0000);
+	gr_def(ctx, 0x401a14, 0x0fff0000);
+	gr_def(ctx, 0x401a1c, 0x00011100);
+	cp_ctx(ctx, 0x401a2c, 4);
+	cp_ctx(ctx, 0x401a44, 26);
+	for (i = 0; i < 16; i++)
+		gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
+	gr_def(ctx, 0x401a8c, 0x4b7fffff);
+	if (device->chipset == 0x40) {
+		cp_ctx(ctx, 0x401ab8, 3);
+	} else {
+		cp_ctx(ctx, 0x401ab8, 1);
+		cp_ctx(ctx, 0x401ac0, 1);
+	}
+	cp_ctx(ctx, 0x401ad0, 8);
+	gr_def(ctx, 0x401ad0, 0x30201000);
+	gr_def(ctx, 0x401ad4, 0x70605040);
+	gr_def(ctx, 0x401ad8, 0xb8a89888);
+	gr_def(ctx, 0x401adc, 0xf8e8d8c8);
+	cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
+	gr_def(ctx, 0x401b10, 0x40100000);
+	cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
+	gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
+			      0x00000004 : 0x00000000);
+	cp_ctx(ctx, 0x401b30, 25);
+	gr_def(ctx, 0x401b34, 0x0000ffff);
+	gr_def(ctx, 0x401b68, 0x435185d6);
+	gr_def(ctx, 0x401b6c, 0x2155b699);
+	gr_def(ctx, 0x401b70, 0xfedcba98);
+	gr_def(ctx, 0x401b74, 0x00000098);
+	gr_def(ctx, 0x401b84, 0xffffffff);
+	gr_def(ctx, 0x401b88, 0x00ff7000);
+	gr_def(ctx, 0x401b8c, 0x0000ffff);
+	if (device->chipset != 0x44 && device->chipset != 0x4a &&
+	    device->chipset != 0x4e)
+		cp_ctx(ctx, 0x401b94, 1);
+	cp_ctx(ctx, 0x401b98, 8);
+	gr_def(ctx, 0x401b9c, 0x00ff0000);
+	cp_ctx(ctx, 0x401bc0, 9);
+	gr_def(ctx, 0x401be0, 0x00ffff00);
+	cp_ctx(ctx, 0x401c00, 192);
+	for (i = 0; i < 16; i++) { /* fragment texture units */
+		gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
+		gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
+		gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
+		gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
+		gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
+		gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
+	}
+	for (i = 0; i < 4; i++) { /* vertex texture units */
+		gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
+		gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
+		gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
+		gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
+	}
+	cp_ctx(ctx, 0x400f5c, 3);
+	gr_def(ctx, 0x400f5c, 0x00000002);
+	cp_ctx(ctx, 0x400f84, 1);
+}
+
+static void
+nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+
+	cp_ctx(ctx, 0x402000, 1);
+	cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
+	switch (device->chipset) {
+	case 0x40:
+		gr_def(ctx, 0x402404, 0x00000001);
+		break;
+	case 0x4c:
+	case 0x4e:
+	case 0x67:
+		gr_def(ctx, 0x402404, 0x00000020);
+		break;
+	case 0x46:
+	case 0x49:
+	case 0x4b:
+		gr_def(ctx, 0x402404, 0x00000421);
+		break;
+	default:
+		gr_def(ctx, 0x402404, 0x00000021);
+	}
+	if (device->chipset != 0x40)
+		gr_def(ctx, 0x402408, 0x030c30c3);
+	switch (device->chipset) {
+	case 0x44:
+	case 0x46:
+	case 0x4a:
+	case 0x4c:
+	case 0x4e:
+	case 0x67:
+		cp_ctx(ctx, 0x402440, 1);
+		gr_def(ctx, 0x402440, 0x00011001);
+		break;
+	default:
+		break;
+	}
+	cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
+	gr_def(ctx, 0x402488, 0x3e020200);
+	gr_def(ctx, 0x40248c, 0x00ffffff);
+	switch (device->chipset) {
+	case 0x40:
+		gr_def(ctx, 0x402490, 0x60103f00);
+		break;
+	case 0x47:
+		gr_def(ctx, 0x402490, 0x40103f00);
+		break;
+	case 0x41:
+	case 0x42:
+	case 0x49:
+	case 0x4b:
+		gr_def(ctx, 0x402490, 0x20103f00);
+		break;
+	default:
+		gr_def(ctx, 0x402490, 0x0c103f00);
+		break;
+	}
+	gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
+			      0x00020000 : 0x00040000);
+	cp_ctx(ctx, 0x402500, 31);
+	gr_def(ctx, 0x402530, 0x00008100);
+	if (device->chipset == 0x40)
+		cp_ctx(ctx, 0x40257c, 6);
+	cp_ctx(ctx, 0x402594, 16);
+	cp_ctx(ctx, 0x402800, 17);
+	gr_def(ctx, 0x402800, 0x00000001);
+	switch (device->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		cp_ctx(ctx, 0x402864, 1);
+		gr_def(ctx, 0x402864, 0x00001001);
+		cp_ctx(ctx, 0x402870, 3);
+		gr_def(ctx, 0x402878, 0x00000003);
+		if (device->chipset != 0x47) { /* belong at end!! */
+			cp_ctx(ctx, 0x402900, 1);
+			cp_ctx(ctx, 0x402940, 1);
+			cp_ctx(ctx, 0x402980, 1);
+			cp_ctx(ctx, 0x4029c0, 1);
+			cp_ctx(ctx, 0x402a00, 1);
+			cp_ctx(ctx, 0x402a40, 1);
+			cp_ctx(ctx, 0x402a80, 1);
+			cp_ctx(ctx, 0x402ac0, 1);
+		}
+		break;
+	case 0x40:
+		cp_ctx(ctx, 0x402844, 1);
+		gr_def(ctx, 0x402844, 0x00000001);
+		cp_ctx(ctx, 0x402850, 1);
+		break;
+	default:
+		cp_ctx(ctx, 0x402844, 1);
+		gr_def(ctx, 0x402844, 0x00001001);
+		cp_ctx(ctx, 0x402850, 2);
+		gr_def(ctx, 0x402854, 0x00000003);
+		break;
+	}
+
+	cp_ctx(ctx, 0x402c00, 4);
+	gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
+			      0x80800001 : 0x00888001);
+	switch (device->chipset) {
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		cp_ctx(ctx, 0x402c20, 40);
+		for (i = 0; i < 32; i++)
+			gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
+		cp_ctx(ctx, 0x4030b8, 13);
+		gr_def(ctx, 0x4030dc, 0x00000005);
+		gr_def(ctx, 0x4030e8, 0x0000ffff);
+		break;
+	default:
+		cp_ctx(ctx, 0x402c10, 4);
+		if (device->chipset == 0x40)
+			cp_ctx(ctx, 0x402c20, 36);
+		else
+		if (device->chipset <= 0x42)
+			cp_ctx(ctx, 0x402c20, 24);
+		else
+		if (device->chipset <= 0x4a)
+			cp_ctx(ctx, 0x402c20, 16);
+		else
+			cp_ctx(ctx, 0x402c20, 8);
+		cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
+		gr_def(ctx, 0x402cd4, 0x00000005);
+		if (device->chipset != 0x40)
+			gr_def(ctx, 0x402ce0, 0x0000ffff);
+		break;
+	}
+
+	cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
+	cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
+	cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
+	for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
+		gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
+
+	if (device->chipset != 0x40) {
+		cp_ctx(ctx, 0x403600, 1);
+		gr_def(ctx, 0x403600, 0x00000001);
+	}
+	cp_ctx(ctx, 0x403800, 1);
+
+	cp_ctx(ctx, 0x403c18, 1);
+	gr_def(ctx, 0x403c18, 0x00000001);
+	switch (device->chipset) {
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		cp_ctx(ctx, 0x405018, 1);
+		gr_def(ctx, 0x405018, 0x08e00001);
+		cp_ctx(ctx, 0x405c24, 1);
+		gr_def(ctx, 0x405c24, 0x000e3000);
+		break;
+	}
+	if (device->chipset != 0x4e)
+		cp_ctx(ctx, 0x405800, 11);
+	cp_ctx(ctx, 0x407000, 1);
+}
+
+static void
+nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+{
+	int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
+
+	cp_out (ctx, 0x300000);
+	cp_lsr (ctx, len - 4);
+	cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
+	cp_lsr (ctx, len);
+	cp_name(ctx, cp_swap_state3d_3_is_save);
+	cp_out (ctx, 0x800001);
+
+	ctx->ctxvals_pos += len;
+}
+
+static void
+nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	struct nouveau_gpuobj *obj = ctx->data;
+	int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
+	int offset, i;
+
+	vs_nr    = nv40_graph_vs_count(ctx->device);
+	vs_nr_b0 = 363;
+	vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
+	if (device->chipset == 0x40) {
+		b0_offset = 0x2200/4; /* 33a0 */
+		b1_offset = 0x55a0/4; /* 1500 */
+		vs_len = 0x6aa0/4;
+	} else
+	if (device->chipset == 0x41 || device->chipset == 0x42) {
+		b0_offset = 0x2200/4; /* 2200 */
+		b1_offset = 0x4400/4; /* 0b00 */
+		vs_len = 0x4f00/4;
+	} else {
+		b0_offset = 0x1d40/4; /* 2200 */
+		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
+		vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
+	}
+
+	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
+	cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
+
+	offset = ctx->ctxvals_pos;
+	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+
+	if (ctx->mode != NOUVEAU_GRCTX_VALS)
+		return;
+
+	offset += 0x0280/4;
+	for (i = 0; i < 16; i++, offset += 2)
+		nv_wo32(obj, offset * 4, 0x3f800000);
+
+	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
+		for (i = 0; i < vs_nr_b0 * 6; i += 6)
+			nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+		for (i = 0; i < vs_nr_b1 * 4; i += 4)
+			nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+	}
+}
+
+static void
+nv40_grctx_generate(struct nouveau_grctx *ctx)
+{
+	/* decide whether we're loading/unloading the context */
+	cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+	cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+	cp_name(ctx, cp_check_load);
+	cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+	cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+	cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+	/* setup for context load */
+	cp_name(ctx, cp_setup_auto_load);
+	cp_wait(ctx, STATUS, IDLE);
+	cp_out (ctx, CP_NEXT_TO_SWAP);
+	cp_name(ctx, cp_setup_load);
+	cp_wait(ctx, STATUS, IDLE);
+	cp_set (ctx, SWAP_DIRECTION, LOAD);
+	cp_out (ctx, 0x00910880); /* ?? */
+	cp_out (ctx, 0x00901ffe); /* ?? */
+	cp_out (ctx, 0x01940000); /* ?? */
+	cp_lsr (ctx, 0x20);
+	cp_out (ctx, 0x0060000b); /* ?? */
+	cp_wait(ctx, UNK57, CLEAR);
+	cp_out (ctx, 0x0060000c); /* ?? */
+	cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+	/* setup for context save */
+	cp_name(ctx, cp_setup_save);
+	cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+	/* general PGRAPH state */
+	cp_name(ctx, cp_swap_state);
+	cp_pos (ctx, 0x00020/4);
+	nv40_graph_construct_general(ctx);
+	cp_wait(ctx, STATUS, IDLE);
+
+	/* 3D state, block 1 */
+	cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
+	nv40_graph_construct_state3d(ctx);
+	cp_wait(ctx, STATUS, IDLE);
+
+	/* 3D state, block 2 */
+	nv40_graph_construct_state3d_2(ctx);
+
+	/* Some other block of "random" state */
+	nv40_graph_construct_state3d_3(ctx);
+
+	/* Per-vertex shader state */
+	cp_pos (ctx, ctx->ctxvals_pos);
+	nv40_graph_construct_shader(ctx);
+
+	/* pre-exit state updates */
+	cp_name(ctx, cp_prepare_exit);
+	cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+	cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+	cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+	cp_name(ctx, cp_exit);
+	cp_set (ctx, USER_SAVE, NOT_PENDING);
+	cp_set (ctx, USER_LOAD, NOT_PENDING);
+	cp_out (ctx, CP_END);
+}
+
+void
+nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
+{
+	nv40_grctx_generate(&(struct nouveau_grctx) {
+			     .device = device,
+			     .mode = NOUVEAU_GRCTX_VALS,
+			     .data = mem,
+			   });
+}
+
+int
+nv40_grctx_init(struct nouveau_device *device, u32 *size)
+{
+	u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
+	struct nouveau_grctx ctx = {
+		.device = device,
+		.mode = NOUVEAU_GRCTX_PROG,
+		.data = ctxprog,
+		.ctxprog_max = 256,
+	};
+
+	if (!ctxprog)
+		return -ENOMEM;
+
+	nv40_grctx_generate(&ctx);
+
+	nv_wr32(device, 0x400324, 0);
+	for (i = 0; i < ctx.ctxprog_len; i++)
+		nv_wr32(device, 0x400328, ctxprog[i]);
+	*size = ctx.ctxvals_pos * 4;
+
+	kfree(ctxprog);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
new file mode 100644
index 0000000..552fdbd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -0,0 +1,3341 @@
+/*
+ * Copyright 2009 Marcin Kościelnicki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/gpuobj.h>
+
+#define CP_FLAG_CLEAR                 0
+#define CP_FLAG_SET                   1
+#define CP_FLAG_SWAP_DIRECTION        ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD   0
+#define CP_FLAG_SWAP_DIRECTION_SAVE   1
+#define CP_FLAG_UNK01                 ((0 * 32) + 1)
+#define CP_FLAG_UNK01_CLEAR           0
+#define CP_FLAG_UNK01_SET             1
+#define CP_FLAG_UNK03                 ((0 * 32) + 3)
+#define CP_FLAG_UNK03_CLEAR           0
+#define CP_FLAG_UNK03_SET             1
+#define CP_FLAG_USER_SAVE             ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING     1
+#define CP_FLAG_USER_LOAD             ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING     1
+#define CP_FLAG_UNK0B                 ((0 * 32) + 0xb)
+#define CP_FLAG_UNK0B_CLEAR           0
+#define CP_FLAG_UNK0B_SET             1
+#define CP_FLAG_XFER_SWITCH           ((0 * 32) + 0xe)
+#define CP_FLAG_XFER_SWITCH_DISABLE   0
+#define CP_FLAG_XFER_SWITCH_ENABLE    1
+#define CP_FLAG_STATE                 ((0 * 32) + 0x1c)
+#define CP_FLAG_STATE_STOPPED         0
+#define CP_FLAG_STATE_RUNNING         1
+#define CP_FLAG_UNK1D                 ((0 * 32) + 0x1d)
+#define CP_FLAG_UNK1D_CLEAR           0
+#define CP_FLAG_UNK1D_SET             1
+#define CP_FLAG_UNK20                 ((1 * 32) + 0)
+#define CP_FLAG_UNK20_CLEAR           0
+#define CP_FLAG_UNK20_SET             1
+#define CP_FLAG_STATUS                ((2 * 32) + 0)
+#define CP_FLAG_STATUS_BUSY           0
+#define CP_FLAG_STATUS_IDLE           1
+#define CP_FLAG_AUTO_SAVE             ((2 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING     1
+#define CP_FLAG_AUTO_LOAD             ((2 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING     1
+#define CP_FLAG_NEWCTX                ((2 * 32) + 10)
+#define CP_FLAG_NEWCTX_BUSY           0
+#define CP_FLAG_NEWCTX_DONE           1
+#define CP_FLAG_XFER                  ((2 * 32) + 11)
+#define CP_FLAG_XFER_IDLE             0
+#define CP_FLAG_XFER_BUSY             1
+#define CP_FLAG_ALWAYS                ((2 * 32) + 13)
+#define CP_FLAG_ALWAYS_FALSE          0
+#define CP_FLAG_ALWAYS_TRUE           1
+#define CP_FLAG_INTR                  ((2 * 32) + 15)
+#define CP_FLAG_INTR_NOT_PENDING      0
+#define CP_FLAG_INTR_PENDING          1
+
+#define CP_CTX                   0x00100000
+#define CP_CTX_COUNT             0x000f0000
+#define CP_CTX_COUNT_SHIFT               16
+#define CP_CTX_REG               0x00003fff
+#define CP_LOAD_SR               0x00200000
+#define CP_LOAD_SR_VALUE         0x000fffff
+#define CP_BRA                   0x00400000
+#define CP_BRA_IP                0x0001ff00
+#define CP_BRA_IP_SHIFT                   8
+#define CP_BRA_IF_CLEAR          0x00000080
+#define CP_BRA_FLAG              0x0000007f
+#define CP_WAIT                  0x00500000
+#define CP_WAIT_SET              0x00000080
+#define CP_WAIT_FLAG             0x0000007f
+#define CP_SET                   0x00700000
+#define CP_SET_1                 0x00000080
+#define CP_SET_FLAG              0x0000007f
+#define CP_NEWCTX                0x00600004
+#define CP_NEXT_TO_SWAP          0x00600005
+#define CP_SET_CONTEXT_POINTER   0x00600006
+#define CP_SET_XFER_POINTER      0x00600007
+#define CP_ENABLE                0x00600009
+#define CP_END                   0x0060000c
+#define CP_NEXT_TO_CURRENT       0x0060000d
+#define CP_DISABLE1              0x0090ffff
+#define CP_DISABLE2              0x0091ffff
+#define CP_XFER_1      0x008000ff
+#define CP_XFER_2      0x008800ff
+#define CP_SEEK_1      0x00c000ff
+#define CP_SEEK_2      0x00c800ff
+
+#include "nv50.h"
+#include "ctx.h"
+
+#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
+#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
+
+/*
+ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
+ * the GPU itself that does context-switching, but it needs a special
+ * microcode to do it. And it's the driver's task to supply this microcode,
+ * further known as ctxprog, as well as the initial context values, known
+ * as ctxvals.
+ *
+ * Without ctxprog, you cannot switch contexts. Not even in software, since
+ * the majority of context [xfer strands] isn't accessible directly. You're
+ * stuck with a single channel, and you also suffer all the problems resulting
+ * from missing ctxvals, since you cannot load them.
+ *
+ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
+ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
+ * since you don't have... some sort of needed setup.
+ *
+ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
+ * it's too much hassle to handle no-ctxprog as a special case.
+ */
+
+/*
+ * How ctxprogs work.
+ *
+ * The ctxprog is written in its own kind of microcode, with very small and
+ * crappy set of available commands. You upload it to a small [512 insns]
+ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
+ * switch channel. or when the driver explicitely requests it. Stuff visible
+ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
+ * the per-channel context save area in VRAM [known as ctxvals or grctx],
+ * 4 flags registers, a scratch register, two grctx pointers, plus many
+ * random poorly-understood details.
+ *
+ * When ctxprog runs, it's supposed to check what operations are asked of it,
+ * save old context if requested, optionally reset PGRAPH and switch to the
+ * new channel, and load the new context. Context consists of three major
+ * parts: subset of MMIO registers and two "xfer areas".
+ */
+
+/* TODO:
+ *  - document unimplemented bits compared to nvidia
+ *  - NVAx: make a TP subroutine, use it.
+ *  - use 0x4008fc instead of 0x1540?
+ */
+
+enum cp_label {
+	cp_check_load = 1,
+	cp_setup_auto_load,
+	cp_setup_load,
+	cp_setup_save,
+	cp_swap_state,
+	cp_prepare_exit,
+	cp_exit,
+};
+
+static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+
+/* Main function: construct the ctxprog skeleton, call the other functions. */
+
+static int
+nv50_grctx_generate(struct nouveau_grctx *ctx)
+{
+	cp_set (ctx, STATE, RUNNING);
+	cp_set (ctx, XFER_SWITCH, ENABLE);
+	/* decide whether we're loading/unloading the context */
+	cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+	cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+	cp_name(ctx, cp_check_load);
+	cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+	cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+	cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
+
+	/* setup for context load */
+	cp_name(ctx, cp_setup_auto_load);
+	cp_out (ctx, CP_DISABLE1);
+	cp_out (ctx, CP_DISABLE2);
+	cp_out (ctx, CP_ENABLE);
+	cp_out (ctx, CP_NEXT_TO_SWAP);
+	cp_set (ctx, UNK01, SET);
+	cp_name(ctx, cp_setup_load);
+	cp_out (ctx, CP_NEWCTX);
+	cp_wait(ctx, NEWCTX, BUSY);
+	cp_set (ctx, UNK1D, CLEAR);
+	cp_set (ctx, SWAP_DIRECTION, LOAD);
+	cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
+	cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+	/* setup for context save */
+	cp_name(ctx, cp_setup_save);
+	cp_set (ctx, UNK1D, SET);
+	cp_wait(ctx, STATUS, BUSY);
+	cp_wait(ctx, INTR, PENDING);
+	cp_bra (ctx, STATUS, BUSY, cp_setup_save);
+	cp_set (ctx, UNK01, SET);
+	cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+	/* general PGRAPH state */
+	cp_name(ctx, cp_swap_state);
+	cp_set (ctx, UNK03, SET);
+	cp_pos (ctx, 0x00004/4);
+	cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
+	cp_pos (ctx, 0x00100/4);
+	nv50_graph_construct_mmio(ctx);
+	nv50_graph_construct_xfer1(ctx);
+	nv50_graph_construct_xfer2(ctx);
+
+	cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+
+	cp_set (ctx, UNK20, SET);
+	cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
+	cp_lsr (ctx, ctx->ctxvals_base);
+	cp_out (ctx, CP_SET_XFER_POINTER);
+	cp_lsr (ctx, 4);
+	cp_out (ctx, CP_SEEK_1);
+	cp_out (ctx, CP_XFER_1);
+	cp_wait(ctx, XFER, BUSY);
+
+	/* pre-exit state updates */
+	cp_name(ctx, cp_prepare_exit);
+	cp_set (ctx, UNK01, CLEAR);
+	cp_set (ctx, UNK03, CLEAR);
+	cp_set (ctx, UNK1D, CLEAR);
+
+	cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+	cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+	cp_name(ctx, cp_exit);
+	cp_set (ctx, USER_SAVE, NOT_PENDING);
+	cp_set (ctx, USER_LOAD, NOT_PENDING);
+	cp_set (ctx, XFER_SWITCH, DISABLE);
+	cp_set (ctx, STATE, STOPPED);
+	cp_out (ctx, CP_END);
+	ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
+
+	return 0;
+}
+
+void
+nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
+{
+	nv50_grctx_generate(&(struct nouveau_grctx) {
+			     .device = device,
+			     .mode = NOUVEAU_GRCTX_VALS,
+			     .data = mem,
+			   });
+}
+
+int
+nv50_grctx_init(struct nouveau_device *device, u32 *size)
+{
+	u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
+	struct nouveau_grctx ctx = {
+		.device = device,
+		.mode = NOUVEAU_GRCTX_PROG,
+		.data = ctxprog,
+		.ctxprog_max = 512,
+	};
+
+	if (!ctxprog)
+		return -ENOMEM;
+	nv50_grctx_generate(&ctx);
+
+	nv_wr32(device, 0x400324, 0);
+	for (i = 0; i < ctx.ctxprog_len; i++)
+		nv_wr32(device, 0x400328, ctxprog[i]);
+	*size = ctx.ctxvals_pos * 4;
+	kfree(ctxprog);
+	return 0;
+}
+
+/*
+ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
+ * registers to save/restore and the default values for them.
+ */
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i, j;
+	int offset, base;
+	u32 units = nv_rd32 (ctx->device, 0x1540);
+
+	/* 0800: DISPATCH */
+	cp_ctx(ctx, 0x400808, 7);
+	gr_def(ctx, 0x400814, 0x00000030);
+	cp_ctx(ctx, 0x400834, 0x32);
+	if (device->chipset == 0x50) {
+		gr_def(ctx, 0x400834, 0xff400040);
+		gr_def(ctx, 0x400838, 0xfff00080);
+		gr_def(ctx, 0x40083c, 0xfff70090);
+		gr_def(ctx, 0x400840, 0xffe806a8);
+	}
+	gr_def(ctx, 0x400844, 0x00000002);
+	if (IS_NVA3F(device->chipset))
+		gr_def(ctx, 0x400894, 0x00001000);
+	gr_def(ctx, 0x4008e8, 0x00000003);
+	gr_def(ctx, 0x4008ec, 0x00001000);
+	if (device->chipset == 0x50)
+		cp_ctx(ctx, 0x400908, 0xb);
+	else if (device->chipset < 0xa0)
+		cp_ctx(ctx, 0x400908, 0xc);
+	else
+		cp_ctx(ctx, 0x400908, 0xe);
+
+	if (device->chipset >= 0xa0)
+		cp_ctx(ctx, 0x400b00, 0x1);
+	if (IS_NVA3F(device->chipset)) {
+		cp_ctx(ctx, 0x400b10, 0x1);
+		gr_def(ctx, 0x400b10, 0x0001629d);
+		cp_ctx(ctx, 0x400b20, 0x1);
+		gr_def(ctx, 0x400b20, 0x0001629d);
+	}
+
+	nv50_graph_construct_mmio_ddata(ctx);
+
+	/* 0C00: VFETCH */
+	cp_ctx(ctx, 0x400c08, 0x2);
+	gr_def(ctx, 0x400c08, 0x0000fe0c);
+
+	/* 1000 */
+	if (device->chipset < 0xa0) {
+		cp_ctx(ctx, 0x401008, 0x4);
+		gr_def(ctx, 0x401014, 0x00001000);
+	} else if (!IS_NVA3F(device->chipset)) {
+		cp_ctx(ctx, 0x401008, 0x5);
+		gr_def(ctx, 0x401018, 0x00001000);
+	} else {
+		cp_ctx(ctx, 0x401008, 0x5);
+		gr_def(ctx, 0x401018, 0x00004000);
+	}
+
+	/* 1400 */
+	cp_ctx(ctx, 0x401400, 0x8);
+	cp_ctx(ctx, 0x401424, 0x3);
+	if (device->chipset == 0x50)
+		gr_def(ctx, 0x40142c, 0x0001fd87);
+	else
+		gr_def(ctx, 0x40142c, 0x00000187);
+	cp_ctx(ctx, 0x401540, 0x5);
+	gr_def(ctx, 0x401550, 0x00001018);
+
+	/* 1800: STREAMOUT */
+	cp_ctx(ctx, 0x401814, 0x1);
+	gr_def(ctx, 0x401814, 0x000000ff);
+	if (device->chipset == 0x50) {
+		cp_ctx(ctx, 0x40181c, 0xe);
+		gr_def(ctx, 0x401850, 0x00000004);
+	} else if (device->chipset < 0xa0) {
+		cp_ctx(ctx, 0x40181c, 0xf);
+		gr_def(ctx, 0x401854, 0x00000004);
+	} else {
+		cp_ctx(ctx, 0x40181c, 0x13);
+		gr_def(ctx, 0x401864, 0x00000004);
+	}
+
+	/* 1C00 */
+	cp_ctx(ctx, 0x401c00, 0x1);
+	switch (device->chipset) {
+	case 0x50:
+		gr_def(ctx, 0x401c00, 0x0001005f);
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x94:
+		gr_def(ctx, 0x401c00, 0x044d00df);
+		break;
+	case 0x92:
+	case 0x96:
+	case 0x98:
+	case 0xa0:
+	case 0xaa:
+	case 0xac:
+		gr_def(ctx, 0x401c00, 0x042500df);
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+	case 0xaf:
+		gr_def(ctx, 0x401c00, 0x142500df);
+		break;
+	}
+
+	/* 2000 */
+
+	/* 2400 */
+	cp_ctx(ctx, 0x402400, 0x1);
+	if (device->chipset == 0x50)
+		cp_ctx(ctx, 0x402408, 0x1);
+	else
+		cp_ctx(ctx, 0x402408, 0x2);
+	gr_def(ctx, 0x402408, 0x00000600);
+
+	/* 2800: CSCHED */
+	cp_ctx(ctx, 0x402800, 0x1);
+	if (device->chipset == 0x50)
+		gr_def(ctx, 0x402800, 0x00000006);
+
+	/* 2C00: ZCULL */
+	cp_ctx(ctx, 0x402c08, 0x6);
+	if (device->chipset != 0x50)
+		gr_def(ctx, 0x402c14, 0x01000000);
+	gr_def(ctx, 0x402c18, 0x000000ff);
+	if (device->chipset == 0x50)
+		cp_ctx(ctx, 0x402ca0, 0x1);
+	else
+		cp_ctx(ctx, 0x402ca0, 0x2);
+	if (device->chipset < 0xa0)
+		gr_def(ctx, 0x402ca0, 0x00000400);
+	else if (!IS_NVA3F(device->chipset))
+		gr_def(ctx, 0x402ca0, 0x00000800);
+	else
+		gr_def(ctx, 0x402ca0, 0x00000400);
+	cp_ctx(ctx, 0x402cac, 0x4);
+
+	/* 3000: ENG2D */
+	cp_ctx(ctx, 0x403004, 0x1);
+	gr_def(ctx, 0x403004, 0x00000001);
+
+	/* 3400 */
+	if (device->chipset >= 0xa0) {
+		cp_ctx(ctx, 0x403404, 0x1);
+		gr_def(ctx, 0x403404, 0x00000001);
+	}
+
+	/* 5000: CCACHE */
+	cp_ctx(ctx, 0x405000, 0x1);
+	switch (device->chipset) {
+	case 0x50:
+		gr_def(ctx, 0x405000, 0x00300080);
+		break;
+	case 0x84:
+	case 0xa0:
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		gr_def(ctx, 0x405000, 0x000e0080);
+		break;
+	case 0x86:
+	case 0x92:
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		gr_def(ctx, 0x405000, 0x00000080);
+		break;
+	}
+	cp_ctx(ctx, 0x405014, 0x1);
+	gr_def(ctx, 0x405014, 0x00000004);
+	cp_ctx(ctx, 0x40501c, 0x1);
+	cp_ctx(ctx, 0x405024, 0x1);
+	cp_ctx(ctx, 0x40502c, 0x1);
+
+	/* 6000? */
+	if (device->chipset == 0x50)
+		cp_ctx(ctx, 0x4063e0, 0x1);
+
+	/* 6800: M2MF */
+	if (device->chipset < 0x90) {
+		cp_ctx(ctx, 0x406814, 0x2b);
+		gr_def(ctx, 0x406818, 0x00000f80);
+		gr_def(ctx, 0x406860, 0x007f0080);
+		gr_def(ctx, 0x40689c, 0x007f0080);
+	} else {
+		cp_ctx(ctx, 0x406814, 0x4);
+		if (device->chipset == 0x98)
+			gr_def(ctx, 0x406818, 0x00000f80);
+		else
+			gr_def(ctx, 0x406818, 0x00001f80);
+		if (IS_NVA3F(device->chipset))
+			gr_def(ctx, 0x40681c, 0x00000030);
+		cp_ctx(ctx, 0x406830, 0x3);
+	}
+
+	/* 7000: per-ROP group state */
+	for (i = 0; i < 8; i++) {
+		if (units & (1<<(i+16))) {
+			cp_ctx(ctx, 0x407000 + (i<<8), 3);
+			if (device->chipset == 0x50)
+				gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
+			else if (device->chipset != 0xa5)
+				gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
+			else
+				gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
+			gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
+
+			if (device->chipset == 0x50) {
+				cp_ctx(ctx, 0x407010 + (i<<8), 1);
+			} else if (device->chipset < 0xa0) {
+				cp_ctx(ctx, 0x407010 + (i<<8), 2);
+				gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+				gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
+			} else {
+				cp_ctx(ctx, 0x407010 + (i<<8), 3);
+				gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+				if (device->chipset != 0xa5)
+					gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
+				else
+					gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
+			}
+
+			cp_ctx(ctx, 0x407080 + (i<<8), 4);
+			if (device->chipset != 0xa5)
+				gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
+			else
+				gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
+			if (device->chipset == 0x50)
+				gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
+			else
+				gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
+			gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
+
+			if (device->chipset < 0xa0)
+				cp_ctx(ctx, 0x407094 + (i<<8), 1);
+			else if (!IS_NVA3F(device->chipset))
+				cp_ctx(ctx, 0x407094 + (i<<8), 3);
+			else {
+				cp_ctx(ctx, 0x407094 + (i<<8), 4);
+				gr_def(ctx, 0x4070a0 + (i<<8), 1);
+			}
+		}
+	}
+
+	cp_ctx(ctx, 0x407c00, 0x3);
+	if (device->chipset < 0x90)
+		gr_def(ctx, 0x407c00, 0x00010040);
+	else if (device->chipset < 0xa0)
+		gr_def(ctx, 0x407c00, 0x00390040);
+	else
+		gr_def(ctx, 0x407c00, 0x003d0040);
+	gr_def(ctx, 0x407c08, 0x00000022);
+	if (device->chipset >= 0xa0) {
+		cp_ctx(ctx, 0x407c10, 0x3);
+		cp_ctx(ctx, 0x407c20, 0x1);
+		cp_ctx(ctx, 0x407c2c, 0x1);
+	}
+
+	if (device->chipset < 0xa0) {
+		cp_ctx(ctx, 0x407d00, 0x9);
+	} else {
+		cp_ctx(ctx, 0x407d00, 0x15);
+	}
+	if (device->chipset == 0x98)
+		gr_def(ctx, 0x407d08, 0x00380040);
+	else {
+		if (device->chipset < 0x90)
+			gr_def(ctx, 0x407d08, 0x00010040);
+		else if (device->chipset < 0xa0)
+			gr_def(ctx, 0x407d08, 0x00390040);
+		else
+			gr_def(ctx, 0x407d08, 0x003d0040);
+		gr_def(ctx, 0x407d0c, 0x00000022);
+	}
+
+	/* 8000+: per-TP state */
+	for (i = 0; i < 10; i++) {
+		if (units & (1<<i)) {
+			if (device->chipset < 0xa0)
+				base = 0x408000 + (i<<12);
+			else
+				base = 0x408000 + (i<<11);
+			if (device->chipset < 0xa0)
+				offset = base + 0xc00;
+			else
+				offset = base + 0x80;
+			cp_ctx(ctx, offset + 0x00, 1);
+			gr_def(ctx, offset + 0x00, 0x0000ff0a);
+			cp_ctx(ctx, offset + 0x08, 1);
+
+			/* per-MP state */
+			for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
+				if (!(units & (1 << (j+24)))) continue;
+				if (device->chipset < 0xa0)
+					offset = base + 0x200 + (j<<7);
+				else
+					offset = base + 0x100 + (j<<7);
+				cp_ctx(ctx, offset, 0x20);
+				gr_def(ctx, offset + 0x00, 0x01800000);
+				gr_def(ctx, offset + 0x04, 0x00160000);
+				gr_def(ctx, offset + 0x08, 0x01800000);
+				gr_def(ctx, offset + 0x18, 0x0003ffff);
+				switch (device->chipset) {
+				case 0x50:
+					gr_def(ctx, offset + 0x1c, 0x00080000);
+					break;
+				case 0x84:
+					gr_def(ctx, offset + 0x1c, 0x00880000);
+					break;
+				case 0x86:
+					gr_def(ctx, offset + 0x1c, 0x018c0000);
+					break;
+				case 0x92:
+				case 0x96:
+				case 0x98:
+					gr_def(ctx, offset + 0x1c, 0x118c0000);
+					break;
+				case 0x94:
+					gr_def(ctx, offset + 0x1c, 0x10880000);
+					break;
+				case 0xa0:
+				case 0xa5:
+					gr_def(ctx, offset + 0x1c, 0x310c0000);
+					break;
+				case 0xa3:
+				case 0xa8:
+				case 0xaa:
+				case 0xac:
+				case 0xaf:
+					gr_def(ctx, offset + 0x1c, 0x300c0000);
+					break;
+				}
+				gr_def(ctx, offset + 0x40, 0x00010401);
+				if (device->chipset == 0x50)
+					gr_def(ctx, offset + 0x48, 0x00000040);
+				else
+					gr_def(ctx, offset + 0x48, 0x00000078);
+				gr_def(ctx, offset + 0x50, 0x000000bf);
+				gr_def(ctx, offset + 0x58, 0x00001210);
+				if (device->chipset == 0x50)
+					gr_def(ctx, offset + 0x5c, 0x00000080);
+				else
+					gr_def(ctx, offset + 0x5c, 0x08000080);
+				if (device->chipset >= 0xa0)
+					gr_def(ctx, offset + 0x68, 0x0000003e);
+			}
+
+			if (device->chipset < 0xa0)
+				cp_ctx(ctx, base + 0x300, 0x4);
+			else
+				cp_ctx(ctx, base + 0x300, 0x5);
+			if (device->chipset == 0x50)
+				gr_def(ctx, base + 0x304, 0x00007070);
+			else if (device->chipset < 0xa0)
+				gr_def(ctx, base + 0x304, 0x00027070);
+			else if (!IS_NVA3F(device->chipset))
+				gr_def(ctx, base + 0x304, 0x01127070);
+			else
+				gr_def(ctx, base + 0x304, 0x05127070);
+
+			if (device->chipset < 0xa0)
+				cp_ctx(ctx, base + 0x318, 1);
+			else
+				cp_ctx(ctx, base + 0x320, 1);
+			if (device->chipset == 0x50)
+				gr_def(ctx, base + 0x318, 0x0003ffff);
+			else if (device->chipset < 0xa0)
+				gr_def(ctx, base + 0x318, 0x03ffffff);
+			else
+				gr_def(ctx, base + 0x320, 0x07ffffff);
+
+			if (device->chipset < 0xa0)
+				cp_ctx(ctx, base + 0x324, 5);
+			else
+				cp_ctx(ctx, base + 0x328, 4);
+
+			if (device->chipset < 0xa0) {
+				cp_ctx(ctx, base + 0x340, 9);
+				offset = base + 0x340;
+			} else if (!IS_NVA3F(device->chipset)) {
+				cp_ctx(ctx, base + 0x33c, 0xb);
+				offset = base + 0x344;
+			} else {
+				cp_ctx(ctx, base + 0x33c, 0xd);
+				offset = base + 0x344;
+			}
+			gr_def(ctx, offset + 0x0, 0x00120407);
+			gr_def(ctx, offset + 0x4, 0x05091507);
+			if (device->chipset == 0x84)
+				gr_def(ctx, offset + 0x8, 0x05100202);
+			else
+				gr_def(ctx, offset + 0x8, 0x05010202);
+			gr_def(ctx, offset + 0xc, 0x00030201);
+			if (device->chipset == 0xa3)
+				cp_ctx(ctx, base + 0x36c, 1);
+
+			cp_ctx(ctx, base + 0x400, 2);
+			gr_def(ctx, base + 0x404, 0x00000040);
+			cp_ctx(ctx, base + 0x40c, 2);
+			gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
+			gr_def(ctx, base + 0x410, 0x00141210);
+
+			if (device->chipset < 0xa0)
+				offset = base + 0x800;
+			else
+				offset = base + 0x500;
+			cp_ctx(ctx, offset, 6);
+			gr_def(ctx, offset + 0x0, 0x000001f0);
+			gr_def(ctx, offset + 0x4, 0x00000001);
+			gr_def(ctx, offset + 0x8, 0x00000003);
+			if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
+				gr_def(ctx, offset + 0xc, 0x00008000);
+			gr_def(ctx, offset + 0x14, 0x00039e00);
+			cp_ctx(ctx, offset + 0x1c, 2);
+			if (device->chipset == 0x50)
+				gr_def(ctx, offset + 0x1c, 0x00000040);
+			else
+				gr_def(ctx, offset + 0x1c, 0x00000100);
+			gr_def(ctx, offset + 0x20, 0x00003800);
+
+			if (device->chipset >= 0xa0) {
+				cp_ctx(ctx, base + 0x54c, 2);
+				if (!IS_NVA3F(device->chipset))
+					gr_def(ctx, base + 0x54c, 0x003fe006);
+				else
+					gr_def(ctx, base + 0x54c, 0x003fe007);
+				gr_def(ctx, base + 0x550, 0x003fe000);
+			}
+
+			if (device->chipset < 0xa0)
+				offset = base + 0xa00;
+			else
+				offset = base + 0x680;
+			cp_ctx(ctx, offset, 1);
+			gr_def(ctx, offset, 0x00404040);
+
+			if (device->chipset < 0xa0)
+				offset = base + 0xe00;
+			else
+				offset = base + 0x700;
+			cp_ctx(ctx, offset, 2);
+			if (device->chipset < 0xa0)
+				gr_def(ctx, offset, 0x0077f005);
+			else if (device->chipset == 0xa5)
+				gr_def(ctx, offset, 0x6cf7f007);
+			else if (device->chipset == 0xa8)
+				gr_def(ctx, offset, 0x6cfff007);
+			else if (device->chipset == 0xac)
+				gr_def(ctx, offset, 0x0cfff007);
+			else
+				gr_def(ctx, offset, 0x0cf7f007);
+			if (device->chipset == 0x50)
+				gr_def(ctx, offset + 0x4, 0x00007fff);
+			else if (device->chipset < 0xa0)
+				gr_def(ctx, offset + 0x4, 0x003f7fff);
+			else
+				gr_def(ctx, offset + 0x4, 0x02bf7fff);
+			cp_ctx(ctx, offset + 0x2c, 1);
+			if (device->chipset == 0x50) {
+				cp_ctx(ctx, offset + 0x50, 9);
+				gr_def(ctx, offset + 0x54, 0x000003ff);
+				gr_def(ctx, offset + 0x58, 0x00000003);
+				gr_def(ctx, offset + 0x5c, 0x00000003);
+				gr_def(ctx, offset + 0x60, 0x000001ff);
+				gr_def(ctx, offset + 0x64, 0x0000001f);
+				gr_def(ctx, offset + 0x68, 0x0000000f);
+				gr_def(ctx, offset + 0x6c, 0x0000000f);
+			} else if (device->chipset < 0xa0) {
+				cp_ctx(ctx, offset + 0x50, 1);
+				cp_ctx(ctx, offset + 0x70, 1);
+			} else {
+				cp_ctx(ctx, offset + 0x50, 1);
+				cp_ctx(ctx, offset + 0x60, 5);
+			}
+		}
+	}
+}
+
+static void
+dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
+	int i;
+	if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+		for (i = 0; i < num; i++)
+			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+	ctx->ctxvals_pos += num;
+}
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int base, num;
+	base = ctx->ctxvals_pos;
+
+	/* tesla state */
+	dd_emit(ctx, 1, 0);	/* 00000001 UNK0F90 */
+	dd_emit(ctx, 1, 0);	/* 00000001 UNK135C */
+
+	/* SRC_TIC state */
+	dd_emit(ctx, 1, 0);	/* 00000007 SRC_TILE_MODE_Z */
+	dd_emit(ctx, 1, 2);	/* 00000007 SRC_TILE_MODE_Y */
+	dd_emit(ctx, 1, 1);	/* 00000001 SRC_LINEAR #1 */
+	dd_emit(ctx, 1, 0);	/* 000000ff SRC_ADDRESS_HIGH */
+	dd_emit(ctx, 1, 0);	/* 00000001 SRC_SRGB */
+	if (device->chipset >= 0x94)
+		dd_emit(ctx, 1, 0);	/* 00000003 eng2d UNK0258 */
+	dd_emit(ctx, 1, 1);	/* 00000fff SRC_DEPTH */
+	dd_emit(ctx, 1, 0x100);	/* 0000ffff SRC_HEIGHT */
+
+	/* turing state */
+	dd_emit(ctx, 1, 0);		/* 0000000f TEXTURES_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 0000000f SAMPLERS_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 000000ff CB_DEF_ADDRESS_HIGH */
+	dd_emit(ctx, 1, 0);		/* ffffffff CB_DEF_ADDRESS_LOW */
+	dd_emit(ctx, 1, 0);		/* ffffffff SHARED_SIZE */
+	dd_emit(ctx, 1, 2);		/* ffffffff REG_MODE */
+	dd_emit(ctx, 1, 1);		/* 0000ffff BLOCK_ALLOC_THREADS */
+	dd_emit(ctx, 1, 1);		/* 00000001 LANES32 */
+	dd_emit(ctx, 1, 0);		/* 000000ff UNK370 */
+	dd_emit(ctx, 1, 0);		/* 000000ff USER_PARAM_UNK */
+	dd_emit(ctx, 1, 0);		/* 000000ff USER_PARAM_COUNT */
+	dd_emit(ctx, 1, 1);		/* 000000ff UNK384 bits 8-15 */
+	dd_emit(ctx, 1, 0x3fffff);	/* 003fffff TIC_LIMIT */
+	dd_emit(ctx, 1, 0x1fff);	/* 000fffff TSC_LIMIT */
+	dd_emit(ctx, 1, 0);		/* 0000ffff CB_ADDR_INDEX */
+	dd_emit(ctx, 1, 1);		/* 000007ff BLOCKDIM_X */
+	dd_emit(ctx, 1, 1);		/* 000007ff BLOCKDIM_XMY */
+	dd_emit(ctx, 1, 0);		/* 00000001 BLOCKDIM_XMY_OVERFLOW */
+	dd_emit(ctx, 1, 1);		/* 0003ffff BLOCKDIM_XMYMZ */
+	dd_emit(ctx, 1, 1);		/* 000007ff BLOCKDIM_Y */
+	dd_emit(ctx, 1, 1);		/* 0000007f BLOCKDIM_Z */
+	dd_emit(ctx, 1, 4);		/* 000000ff CP_REG_ALLOC_TEMP */
+	dd_emit(ctx, 1, 1);		/* 00000001 BLOCKDIM_DIRTY */
+	if (IS_NVA3F(device->chipset))
+		dd_emit(ctx, 1, 0);	/* 00000003 UNK03E8 */
+	dd_emit(ctx, 1, 1);		/* 0000007f BLOCK_ALLOC_HALFWARPS */
+	dd_emit(ctx, 1, 1);		/* 00000007 LOCAL_WARPS_NO_CLAMP */
+	dd_emit(ctx, 1, 7);		/* 00000007 LOCAL_WARPS_LOG_ALLOC */
+	dd_emit(ctx, 1, 1);		/* 00000007 STACK_WARPS_NO_CLAMP */
+	dd_emit(ctx, 1, 7);		/* 00000007 STACK_WARPS_LOG_ALLOC */
+	dd_emit(ctx, 1, 1);		/* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
+	dd_emit(ctx, 1, 1);		/* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
+	dd_emit(ctx, 1, 1);		/* 000007ff BLOCK_ALLOC_THREADS */
+
+	/* compat 2d state */
+	if (device->chipset == 0x50) {
+		dd_emit(ctx, 4, 0);		/* 0000ffff clip X, Y, W, H */
+
+		dd_emit(ctx, 1, 1);		/* ffffffff chroma COLOR_FORMAT */
+
+		dd_emit(ctx, 1, 1);		/* ffffffff pattern COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff pattern SHAPE */
+		dd_emit(ctx, 1, 1);		/* ffffffff pattern PATTERN_SELECT */
+
+		dd_emit(ctx, 1, 0xa);		/* ffffffff surf2d SRC_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff surf2d DMA_SRC */
+		dd_emit(ctx, 1, 0);		/* 000000ff surf2d SRC_ADDRESS_HIGH */
+		dd_emit(ctx, 1, 0);		/* ffffffff surf2d SRC_ADDRESS_LOW */
+		dd_emit(ctx, 1, 0x40);		/* 0000ffff surf2d SRC_PITCH */
+		dd_emit(ctx, 1, 0);		/* 0000000f surf2d SRC_TILE_MODE_Z */
+		dd_emit(ctx, 1, 2);		/* 0000000f surf2d SRC_TILE_MODE_Y */
+		dd_emit(ctx, 1, 0x100);		/* ffffffff surf2d SRC_HEIGHT */
+		dd_emit(ctx, 1, 1);		/* 00000001 surf2d SRC_LINEAR */
+		dd_emit(ctx, 1, 0x100);		/* ffffffff surf2d SRC_WIDTH */
+
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_B_X */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_B_Y */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_C_X */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_C_Y */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_D_X */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect CLIP_D_Y */
+		dd_emit(ctx, 1, 1);		/* ffffffff gdirect COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff gdirect OPERATION */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect POINT_X */
+		dd_emit(ctx, 1, 0);		/* 0000ffff gdirect POINT_Y */
+
+		dd_emit(ctx, 1, 0);		/* 0000ffff blit SRC_Y */
+		dd_emit(ctx, 1, 0);		/* ffffffff blit OPERATION */
+
+		dd_emit(ctx, 1, 0);		/* ffffffff ifc OPERATION */
+
+		dd_emit(ctx, 1, 0);		/* ffffffff iifc INDEX_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff iifc LUT_OFFSET */
+		dd_emit(ctx, 1, 4);		/* ffffffff iifc COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff iifc OPERATION */
+	}
+
+	/* m2mf state */
+	dd_emit(ctx, 1, 0);		/* ffffffff m2mf LINE_COUNT */
+	dd_emit(ctx, 1, 0);		/* ffffffff m2mf LINE_LENGTH_IN */
+	dd_emit(ctx, 2, 0);		/* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
+	dd_emit(ctx, 1, 1);		/* ffffffff m2mf TILING_DEPTH_OUT */
+	dd_emit(ctx, 1, 0x100);		/* ffffffff m2mf TILING_HEIGHT_OUT */
+	dd_emit(ctx, 1, 0);		/* ffffffff m2mf TILING_POSITION_OUT_Z */
+	dd_emit(ctx, 1, 1);		/* 00000001 m2mf LINEAR_OUT */
+	dd_emit(ctx, 2, 0);		/* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
+	dd_emit(ctx, 1, 0x100);		/* ffffffff m2mf TILING_PITCH_OUT */
+	dd_emit(ctx, 1, 1);		/* ffffffff m2mf TILING_DEPTH_IN */
+	dd_emit(ctx, 1, 0x100);		/* ffffffff m2mf TILING_HEIGHT_IN */
+	dd_emit(ctx, 1, 0);		/* ffffffff m2mf TILING_POSITION_IN_Z */
+	dd_emit(ctx, 1, 1);		/* 00000001 m2mf LINEAR_IN */
+	dd_emit(ctx, 2, 0);		/* 0000ffff m2mf TILING_POSITION_IN_X, Y */
+	dd_emit(ctx, 1, 0x100);		/* ffffffff m2mf TILING_PITCH_IN */
+
+	/* more compat 2d state */
+	if (device->chipset == 0x50) {
+		dd_emit(ctx, 1, 1);		/* ffffffff line COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff line OPERATION */
+
+		dd_emit(ctx, 1, 1);		/* ffffffff triangle COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff triangle OPERATION */
+
+		dd_emit(ctx, 1, 0);		/* 0000000f sifm TILE_MODE_Z */
+		dd_emit(ctx, 1, 2);		/* 0000000f sifm TILE_MODE_Y */
+		dd_emit(ctx, 1, 0);		/* 000000ff sifm FORMAT_FILTER */
+		dd_emit(ctx, 1, 1);		/* 000000ff sifm FORMAT_ORIGIN */
+		dd_emit(ctx, 1, 0);		/* 0000ffff sifm SRC_PITCH */
+		dd_emit(ctx, 1, 1);		/* 00000001 sifm SRC_LINEAR */
+		dd_emit(ctx, 1, 0);		/* 000000ff sifm SRC_OFFSET_HIGH */
+		dd_emit(ctx, 1, 0);		/* ffffffff sifm SRC_OFFSET */
+		dd_emit(ctx, 1, 0);		/* 0000ffff sifm SRC_HEIGHT */
+		dd_emit(ctx, 1, 0);		/* 0000ffff sifm SRC_WIDTH */
+		dd_emit(ctx, 1, 3);		/* ffffffff sifm COLOR_FORMAT */
+		dd_emit(ctx, 1, 0);		/* ffffffff sifm OPERATION */
+
+		dd_emit(ctx, 1, 0);		/* ffffffff sifc OPERATION */
+	}
+
+	/* tesla state */
+	dd_emit(ctx, 1, 0);		/* 0000000f GP_TEXTURES_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 0000000f GP_SAMPLERS_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 000000ff */
+	dd_emit(ctx, 1, 0);		/* ffffffff */
+	dd_emit(ctx, 1, 4);		/* 000000ff UNK12B0_0 */
+	dd_emit(ctx, 1, 0x70);		/* 000000ff UNK12B0_1 */
+	dd_emit(ctx, 1, 0x80);		/* 000000ff UNK12B0_3 */
+	dd_emit(ctx, 1, 0);		/* 000000ff UNK12B0_2 */
+	dd_emit(ctx, 1, 0);		/* 0000000f FP_TEXTURES_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 0000000f FP_SAMPLERS_LOG2 */
+	if (IS_NVA3F(device->chipset)) {
+		dd_emit(ctx, 1, 0);	/* ffffffff */
+		dd_emit(ctx, 1, 0);	/* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
+	} else {
+		dd_emit(ctx, 1, 0);	/* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
+	}
+	dd_emit(ctx, 1, 0xc);		/* 000000ff SEMANTIC_COLOR.BFC0_ID */
+	if (device->chipset != 0x50)
+		dd_emit(ctx, 1, 0);	/* 00000001 SEMANTIC_COLOR.CLMP_EN */
+	dd_emit(ctx, 1, 8);		/* 000000ff SEMANTIC_COLOR.COLR_NR */
+	dd_emit(ctx, 1, 0x14);		/* 000000ff SEMANTIC_COLOR.FFC0_ID */
+	if (device->chipset == 0x50) {
+		dd_emit(ctx, 1, 0);	/* 000000ff SEMANTIC_LAYER */
+		dd_emit(ctx, 1, 0);	/* 00000001 */
+	} else {
+		dd_emit(ctx, 1, 0);	/* 00000001 SEMANTIC_PTSZ.ENABLE */
+		dd_emit(ctx, 1, 0x29);	/* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
+		dd_emit(ctx, 1, 0x27);	/* 000000ff SEMANTIC_PRIM */
+		dd_emit(ctx, 1, 0x26);	/* 000000ff SEMANTIC_LAYER */
+		dd_emit(ctx, 1, 8);	/* 0000000f SMENATIC_CLIP.CLIP_HIGH */
+		dd_emit(ctx, 1, 4);	/* 000000ff SEMANTIC_CLIP.CLIP_LO */
+		dd_emit(ctx, 1, 0x27);	/* 000000ff UNK0FD4 */
+		dd_emit(ctx, 1, 0);	/* 00000001 UNK1900 */
+	}
+	dd_emit(ctx, 1, 0);		/* 00000007 RT_CONTROL_MAP0 */
+	dd_emit(ctx, 1, 1);		/* 00000007 RT_CONTROL_MAP1 */
+	dd_emit(ctx, 1, 2);		/* 00000007 RT_CONTROL_MAP2 */
+	dd_emit(ctx, 1, 3);		/* 00000007 RT_CONTROL_MAP3 */
+	dd_emit(ctx, 1, 4);		/* 00000007 RT_CONTROL_MAP4 */
+	dd_emit(ctx, 1, 5);		/* 00000007 RT_CONTROL_MAP5 */
+	dd_emit(ctx, 1, 6);		/* 00000007 RT_CONTROL_MAP6 */
+	dd_emit(ctx, 1, 7);		/* 00000007 RT_CONTROL_MAP7 */
+	dd_emit(ctx, 1, 1);		/* 0000000f RT_CONTROL_COUNT */
+	dd_emit(ctx, 8, 0);		/* 00000001 RT_HORIZ_UNK */
+	dd_emit(ctx, 8, 0);		/* ffffffff RT_ADDRESS_LOW */
+	dd_emit(ctx, 1, 0xcf);		/* 000000ff RT_FORMAT */
+	dd_emit(ctx, 7, 0);		/* 000000ff RT_FORMAT */
+	if (device->chipset != 0x50)
+		dd_emit(ctx, 3, 0);	/* 1, 1, 1 */
+	else
+		dd_emit(ctx, 2, 0);	/* 1, 1 */
+	dd_emit(ctx, 1, 0);		/* ffffffff GP_ENABLE */
+	dd_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
+	dd_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
+	dd_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	if (IS_NVA3F(device->chipset)) {
+		dd_emit(ctx, 1, 3);	/* 00000003 */
+		dd_emit(ctx, 1, 0);	/* 00000001 UNK1418. Alone. */
+	}
+	if (device->chipset != 0x50)
+		dd_emit(ctx, 1, 3);	/* 00000003 UNK15AC */
+	dd_emit(ctx, 1, 1);		/* ffffffff RASTERIZE_ENABLE */
+	dd_emit(ctx, 1, 0);		/* 00000001 FP_CONTROL.EXPORTS_Z */
+	if (device->chipset != 0x50)
+		dd_emit(ctx, 1, 0);	/* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
+	dd_emit(ctx, 1, 0x12);		/* 000000ff FP_INTERPOLANT_CTRL.COUNT */
+	dd_emit(ctx, 1, 0x10);		/* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
+	dd_emit(ctx, 1, 0xc);		/* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
+	dd_emit(ctx, 1, 1);		/* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
+	dd_emit(ctx, 1, 0);		/* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
+	dd_emit(ctx, 1, 0);		/* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
+	dd_emit(ctx, 1, 0);		/* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
+	dd_emit(ctx, 1, 4);		/* 000000ff FP_RESULT_COUNT */
+	dd_emit(ctx, 1, 2);		/* ffffffff REG_MODE */
+	dd_emit(ctx, 1, 4);		/* 000000ff FP_REG_ALLOC_TEMP */
+	if (device->chipset >= 0xa0)
+		dd_emit(ctx, 1, 0);	/* ffffffff */
+	dd_emit(ctx, 1, 0);		/* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
+	dd_emit(ctx, 1, 0);		/* ffffffff STRMOUT_ENABLE */
+	dd_emit(ctx, 1, 0x3fffff);	/* 003fffff TIC_LIMIT */
+	dd_emit(ctx, 1, 0x1fff);	/* 000fffff TSC_LIMIT */
+	dd_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE*/
+	if (device->chipset != 0x50)
+		dd_emit(ctx, 8, 0);	/* 00000001 */
+	if (device->chipset >= 0xa0) {
+		dd_emit(ctx, 1, 1);	/* 00000007 VTX_ATTR_DEFINE.COMP */
+		dd_emit(ctx, 1, 1);	/* 00000007 VTX_ATTR_DEFINE.SIZE */
+		dd_emit(ctx, 1, 2);	/* 00000007 VTX_ATTR_DEFINE.TYPE */
+		dd_emit(ctx, 1, 0);	/* 000000ff VTX_ATTR_DEFINE.ATTR */
+	}
+	dd_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	dd_emit(ctx, 1, 0x14);		/* 0000001f ZETA_FORMAT */
+	dd_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	dd_emit(ctx, 1, 0);		/* 0000000f VP_TEXTURES_LOG2 */
+	dd_emit(ctx, 1, 0);		/* 0000000f VP_SAMPLERS_LOG2 */
+	if (IS_NVA3F(device->chipset))
+		dd_emit(ctx, 1, 0);	/* 00000001 */
+	dd_emit(ctx, 1, 2);		/* 00000003 POLYGON_MODE_BACK */
+	if (device->chipset >= 0xa0)
+		dd_emit(ctx, 1, 0);	/* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
+	dd_emit(ctx, 1, 0);		/* 0000ffff CB_ADDR_INDEX */
+	if (device->chipset >= 0xa0)
+		dd_emit(ctx, 1, 0);	/* 00000003 */
+	dd_emit(ctx, 1, 0);		/* 00000001 CULL_FACE_ENABLE */
+	dd_emit(ctx, 1, 1);		/* 00000003 CULL_FACE */
+	dd_emit(ctx, 1, 0);		/* 00000001 FRONT_FACE */
+	dd_emit(ctx, 1, 2);		/* 00000003 POLYGON_MODE_FRONT */
+	dd_emit(ctx, 1, 0x1000);	/* 00007fff UNK141C */
+	if (device->chipset != 0x50) {
+		dd_emit(ctx, 1, 0xe00);		/* 7fff */
+		dd_emit(ctx, 1, 0x1000);	/* 7fff */
+		dd_emit(ctx, 1, 0x1e00);	/* 7fff */
+	}
+	dd_emit(ctx, 1, 0);		/* 00000001 BEGIN_END_ACTIVE */
+	dd_emit(ctx, 1, 1);		/* 00000001 POLYGON_MODE_??? */
+	dd_emit(ctx, 1, 1);		/* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
+	dd_emit(ctx, 1, 1);		/* 000000ff FP_REG_ALLOC_TEMP... without /4? */
+	dd_emit(ctx, 1, 1);		/* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
+	dd_emit(ctx, 1, 1);		/* 00000001 */
+	dd_emit(ctx, 1, 0);		/* 00000001 */
+	dd_emit(ctx, 1, 0);		/* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
+	dd_emit(ctx, 1, 0);		/* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
+	dd_emit(ctx, 1, 0x200);		/* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
+	if (IS_NVA3F(device->chipset))
+		dd_emit(ctx, 1, 0x200);
+	dd_emit(ctx, 1, 0);		/* 00000001 */
+	if (device->chipset < 0xa0) {
+		dd_emit(ctx, 1, 1);	/* 00000001 */
+		dd_emit(ctx, 1, 0x70);	/* 000000ff */
+		dd_emit(ctx, 1, 0x80);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 00000001 */
+		dd_emit(ctx, 1, 1);	/* 00000001 */
+		dd_emit(ctx, 1, 0x70);	/* 000000ff */
+		dd_emit(ctx, 1, 0x80);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 000000ff */
+	} else {
+		dd_emit(ctx, 1, 1);	/* 00000001 */
+		dd_emit(ctx, 1, 0xf0);	/* 000000ff */
+		dd_emit(ctx, 1, 0xff);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 00000001 */
+		dd_emit(ctx, 1, 1);	/* 00000001 */
+		dd_emit(ctx, 1, 0xf0);	/* 000000ff */
+		dd_emit(ctx, 1, 0xff);	/* 000000ff */
+		dd_emit(ctx, 1, 0);	/* 000000ff */
+		dd_emit(ctx, 1, 9);	/* 0000003f UNK114C.COMP,SIZE */
+	}
+
+	/* eng2d state */
+	dd_emit(ctx, 1, 0);		/* 00000001 eng2d COLOR_KEY_ENABLE */
+	dd_emit(ctx, 1, 0);		/* 00000007 eng2d COLOR_KEY_FORMAT */
+	dd_emit(ctx, 1, 1);		/* ffffffff eng2d DST_DEPTH */
+	dd_emit(ctx, 1, 0xcf);		/* 000000ff eng2d DST_FORMAT */
+	dd_emit(ctx, 1, 0);		/* ffffffff eng2d DST_LAYER */
+	dd_emit(ctx, 1, 1);		/* 00000001 eng2d DST_LINEAR */
+	dd_emit(ctx, 1, 0);		/* 00000007 eng2d PATTERN_COLOR_FORMAT */
+	dd_emit(ctx, 1, 0);		/* 00000007 eng2d OPERATION */
+	dd_emit(ctx, 1, 0);		/* 00000003 eng2d PATTERN_SELECT */
+	dd_emit(ctx, 1, 0xcf);		/* 000000ff eng2d SIFC_FORMAT */
+	dd_emit(ctx, 1, 0);		/* 00000001 eng2d SIFC_BITMAP_ENABLE */
+	dd_emit(ctx, 1, 2);		/* 00000003 eng2d SIFC_BITMAP_UNK808 */
+	dd_emit(ctx, 1, 0);		/* ffffffff eng2d BLIT_DU_DX_FRACT */
+	dd_emit(ctx, 1, 1);		/* ffffffff eng2d BLIT_DU_DX_INT */
+	dd_emit(ctx, 1, 0);		/* ffffffff eng2d BLIT_DV_DY_FRACT */
+	dd_emit(ctx, 1, 1);		/* ffffffff eng2d BLIT_DV_DY_INT */
+	dd_emit(ctx, 1, 0);		/* 00000001 eng2d BLIT_CONTROL_FILTER */
+	dd_emit(ctx, 1, 0xcf);		/* 000000ff eng2d DRAW_COLOR_FORMAT */
+	dd_emit(ctx, 1, 0xcf);		/* 000000ff eng2d SRC_FORMAT */
+	dd_emit(ctx, 1, 1);		/* 00000001 eng2d SRC_LINEAR #2 */
+
+	num = ctx->ctxvals_pos - base;
+	ctx->ctxvals_pos = base;
+	if (IS_NVA3F(device->chipset))
+		cp_ctx(ctx, 0x404800, num);
+	else
+		cp_ctx(ctx, 0x405400, num);
+}
+
+/*
+ * xfer areas. These are a pain.
+ *
+ * There are 2 xfer areas: the first one is big and contains all sorts of
+ * stuff, the second is small and contains some per-TP context.
+ *
+ * Each area is split into 8 "strands". The areas, when saved to grctx,
+ * are made of 8-word blocks. Each block contains a single word from
+ * each strand. The strands are independent of each other, their
+ * addresses are unrelated to each other, and data in them is closely
+ * packed together. The strand layout varies a bit between cards: here
+ * and there, a single word is thrown out in the middle and the whole
+ * strand is offset by a bit from corresponding one on another chipset.
+ * For this reason, addresses of stuff in strands are almost useless.
+ * Knowing sequence of stuff and size of gaps between them is much more
+ * useful, and that's how we build the strands in our generator.
+ *
+ * NVA0 takes this mess to a whole new level by cutting the old strands
+ * into a few dozen pieces [known as genes], rearranging them randomly,
+ * and putting them back together to make new strands. Hopefully these
+ * genes correspond more or less directly to the same PGRAPH subunits
+ * as in 400040 register.
+ *
+ * The most common value in default context is 0, and when the genes
+ * are separated by 0's, gene bounduaries are quite speculative...
+ * some of them can be clearly deduced, others can be guessed, and yet
+ * others won't be resolved without figuring out the real meaning of
+ * given ctxval. For the same reason, ending point of each strand
+ * is unknown. Except for strand 0, which is the longest strand and
+ * its end corresponds to end of the whole xfer.
+ *
+ * An unsolved mystery is the seek instruction: it takes an argument
+ * in bits 8-18, and that argument is clearly the place in strands to
+ * seek to... but the offsets don't seem to correspond to offsets as
+ * seen in grctx. Perhaps there's another, real, not randomly-changing
+ * addressing in strands, and the xfer insn just happens to skip over
+ * the unused bits? NV10-NV30 PIPE comes to mind...
+ *
+ * As far as I know, there's no way to access the xfer areas directly
+ * without the help of ctxprog.
+ */
+
+static void
+xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
+	int i;
+	if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+		for (i = 0; i < num; i++)
+			nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+	ctx->ctxvals_pos += num << 3;
+}
+
+/* Gene declarations... */
+
+static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+	int offset;
+	int size = 0;
+	u32 units = nv_rd32 (ctx->device, 0x1540);
+
+	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+	ctx->ctxvals_base = offset;
+
+	if (device->chipset < 0xa0) {
+		/* Strand 0 */
+		ctx->ctxvals_pos = offset;
+		nv50_graph_construct_gene_dispatch(ctx);
+		nv50_graph_construct_gene_m2mf(ctx);
+		nv50_graph_construct_gene_unk24xx(ctx);
+		nv50_graph_construct_gene_clipid(ctx);
+		nv50_graph_construct_gene_zcull(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 1 */
+		ctx->ctxvals_pos = offset + 0x1;
+		nv50_graph_construct_gene_vfetch(ctx);
+		nv50_graph_construct_gene_eng2d(ctx);
+		nv50_graph_construct_gene_csched(ctx);
+		nv50_graph_construct_gene_ropm1(ctx);
+		nv50_graph_construct_gene_ropm2(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 2 */
+		ctx->ctxvals_pos = offset + 0x2;
+		nv50_graph_construct_gene_ccache(ctx);
+		nv50_graph_construct_gene_unk1cxx(ctx);
+		nv50_graph_construct_gene_strmout(ctx);
+		nv50_graph_construct_gene_unk14xx(ctx);
+		nv50_graph_construct_gene_unk10xx(ctx);
+		nv50_graph_construct_gene_unk34xx(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 3: per-ROP group state */
+		ctx->ctxvals_pos = offset + 3;
+		for (i = 0; i < 6; i++)
+			if (units & (1 << (i + 16)))
+				nv50_graph_construct_gene_ropc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strands 4-7: per-TP state */
+		for (i = 0; i < 4; i++) {
+			ctx->ctxvals_pos = offset + 4 + i;
+			if (units & (1 << (2 * i)))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << (2 * i + 1)))
+				nv50_graph_construct_xfer_tp(ctx);
+			if ((ctx->ctxvals_pos-offset)/8 > size)
+				size = (ctx->ctxvals_pos-offset)/8;
+		}
+	} else {
+		/* Strand 0 */
+		ctx->ctxvals_pos = offset;
+		nv50_graph_construct_gene_dispatch(ctx);
+		nv50_graph_construct_gene_m2mf(ctx);
+		nv50_graph_construct_gene_unk34xx(ctx);
+		nv50_graph_construct_gene_csched(ctx);
+		nv50_graph_construct_gene_unk1cxx(ctx);
+		nv50_graph_construct_gene_strmout(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 1 */
+		ctx->ctxvals_pos = offset + 1;
+		nv50_graph_construct_gene_unk10xx(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 2 */
+		ctx->ctxvals_pos = offset + 2;
+		if (device->chipset == 0xa0)
+			nv50_graph_construct_gene_unk14xx(ctx);
+		nv50_graph_construct_gene_unk24xx(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 3 */
+		ctx->ctxvals_pos = offset + 3;
+		nv50_graph_construct_gene_vfetch(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 4 */
+		ctx->ctxvals_pos = offset + 4;
+		nv50_graph_construct_gene_ccache(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 5 */
+		ctx->ctxvals_pos = offset + 5;
+		nv50_graph_construct_gene_ropm2(ctx);
+		nv50_graph_construct_gene_ropm1(ctx);
+		/* per-ROP context */
+		for (i = 0; i < 8; i++)
+			if (units & (1<<(i+16)))
+				nv50_graph_construct_gene_ropc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 6 */
+		ctx->ctxvals_pos = offset + 6;
+		nv50_graph_construct_gene_zcull(ctx);
+		nv50_graph_construct_gene_clipid(ctx);
+		nv50_graph_construct_gene_eng2d(ctx);
+		if (units & (1 << 0))
+			nv50_graph_construct_xfer_tp(ctx);
+		if (units & (1 << 1))
+			nv50_graph_construct_xfer_tp(ctx);
+		if (units & (1 << 2))
+			nv50_graph_construct_xfer_tp(ctx);
+		if (units & (1 << 3))
+			nv50_graph_construct_xfer_tp(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 7 */
+		ctx->ctxvals_pos = offset + 7;
+		if (device->chipset == 0xa0) {
+			if (units & (1 << 4))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << 5))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << 6))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << 7))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << 8))
+				nv50_graph_construct_xfer_tp(ctx);
+			if (units & (1 << 9))
+				nv50_graph_construct_xfer_tp(ctx);
+		} else {
+			nv50_graph_construct_gene_unk14xx(ctx);
+		}
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+	}
+
+	ctx->ctxvals_pos = offset + size * 8;
+	ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+	cp_lsr (ctx, offset);
+	cp_out (ctx, CP_SET_XFER_POINTER);
+	cp_lsr (ctx, size);
+	cp_out (ctx, CP_SEEK_1);
+	cp_out (ctx, CP_XFER_1);
+	cp_wait(ctx, XFER, BUSY);
+}
+
+/*
+ * non-trivial demagiced parts of ctx init go here
+ */
+
+static void
+nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+{
+	/* start of strand 0 */
+	struct nouveau_device *device = ctx->device;
+	/* SEEK */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 5, 0);
+	else if (!IS_NVA3F(device->chipset))
+		xf_emit(ctx, 6, 0);
+	else
+		xf_emit(ctx, 4, 0);
+	/* SEEK */
+	/* the PGRAPH's internal FIFO */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 8*3, 0);
+	else
+		xf_emit(ctx, 0x100*3, 0);
+	/* and another bonus slot?!? */
+	xf_emit(ctx, 3, 0);
+	/* and YET ANOTHER bonus slot? */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 3, 0);
+	/* SEEK */
+	/* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
+	xf_emit(ctx, 9, 0);
+	/* SEEK */
+	xf_emit(ctx, 9, 0);
+	/* SEEK */
+	xf_emit(ctx, 9, 0);
+	/* SEEK */
+	xf_emit(ctx, 9, 0);
+	/* SEEK */
+	if (device->chipset < 0x90)
+		xf_emit(ctx, 4, 0);
+	/* SEEK */
+	xf_emit(ctx, 2, 0);
+	/* SEEK */
+	xf_emit(ctx, 6*2, 0);
+	xf_emit(ctx, 2, 0);
+	/* SEEK */
+	xf_emit(ctx, 2, 0);
+	/* SEEK */
+	xf_emit(ctx, 6*2, 0);
+	xf_emit(ctx, 2, 0);
+	/* SEEK */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 0x1c, 0);
+	else if (device->chipset < 0xa0)
+		xf_emit(ctx, 0x1e, 0);
+	else
+		xf_emit(ctx, 0x22, 0);
+	/* SEEK */
+	xf_emit(ctx, 0x15, 0);
+}
+
+static void
+nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+{
+	/* Strand 0, right after dispatch */
+	struct nouveau_device *device = ctx->device;
+	int smallm2mf = 0;
+	if (device->chipset < 0x92 || device->chipset == 0x98)
+		smallm2mf = 1;
+	/* SEEK */
+	xf_emit (ctx, 1, 0);		/* DMA_NOTIFY instance >> 4 */
+	xf_emit (ctx, 1, 0);		/* DMA_BUFFER_IN instance >> 4 */
+	xf_emit (ctx, 1, 0);		/* DMA_BUFFER_OUT instance >> 4 */
+	xf_emit (ctx, 1, 0);		/* OFFSET_IN */
+	xf_emit (ctx, 1, 0);		/* OFFSET_OUT */
+	xf_emit (ctx, 1, 0);		/* PITCH_IN */
+	xf_emit (ctx, 1, 0);		/* PITCH_OUT */
+	xf_emit (ctx, 1, 0);		/* LINE_LENGTH */
+	xf_emit (ctx, 1, 0);		/* LINE_COUNT */
+	xf_emit (ctx, 1, 0x21);		/* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
+	xf_emit (ctx, 1, 1);		/* LINEAR_IN */
+	xf_emit (ctx, 1, 0x2);		/* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
+	xf_emit (ctx, 1, 0x100);	/* TILING_PITCH_IN */
+	xf_emit (ctx, 1, 0x100);	/* TILING_HEIGHT_IN */
+	xf_emit (ctx, 1, 1);		/* TILING_DEPTH_IN */
+	xf_emit (ctx, 1, 0);		/* TILING_POSITION_IN_Z */
+	xf_emit (ctx, 1, 0);		/* TILING_POSITION_IN */
+	xf_emit (ctx, 1, 1);		/* LINEAR_OUT */
+	xf_emit (ctx, 1, 0x2);		/* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
+	xf_emit (ctx, 1, 0x100);	/* TILING_PITCH_OUT */
+	xf_emit (ctx, 1, 0x100);	/* TILING_HEIGHT_OUT */
+	xf_emit (ctx, 1, 1);		/* TILING_DEPTH_OUT */
+	xf_emit (ctx, 1, 0);		/* TILING_POSITION_OUT_Z */
+	xf_emit (ctx, 1, 0);		/* TILING_POSITION_OUT */
+	xf_emit (ctx, 1, 0);		/* OFFSET_IN_HIGH */
+	xf_emit (ctx, 1, 0);		/* OFFSET_OUT_HIGH */
+	/* SEEK */
+	if (smallm2mf)
+		xf_emit(ctx, 0x40, 0);	/* 20 * ffffffff, 3ffff */
+	else
+		xf_emit(ctx, 0x100, 0);	/* 80 * ffffffff, 3ffff */
+	xf_emit(ctx, 4, 0);		/* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
+	/* SEEK */
+	if (smallm2mf)
+		xf_emit(ctx, 0x400, 0);	/* ffffffff */
+	else
+		xf_emit(ctx, 0x800, 0);	/* ffffffff */
+	xf_emit(ctx, 4, 0);		/* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
+	/* SEEK */
+	xf_emit(ctx, 0x40, 0);		/* 20 * bits ffffffff, 3ffff */
+	xf_emit(ctx, 0x6, 0);		/* 1f, 0, 1f, 0, 1f, 0 */
+}
+
+static void
+nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 2, 0);		/* RO */
+	xf_emit(ctx, 0x800, 0);		/* ffffffff */
+	switch (device->chipset) {
+	case 0x50:
+	case 0x92:
+	case 0xa0:
+		xf_emit(ctx, 0x2b, 0);
+		break;
+	case 0x84:
+		xf_emit(ctx, 0x29, 0);
+		break;
+	case 0x94:
+	case 0x96:
+	case 0xa3:
+		xf_emit(ctx, 0x27, 0);
+		break;
+	case 0x86:
+	case 0x98:
+	case 0xa5:
+	case 0xa8:
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		xf_emit(ctx, 0x25, 0);
+		break;
+	}
+	/* CB bindings, 0x80 of them. first word is address >> 8, second is
+	 * size >> 4 | valid << 24 */
+	xf_emit(ctx, 0x100, 0);		/* ffffffff CB_DEF */
+	xf_emit(ctx, 1, 0);		/* 0000007f CB_ADDR_BUFFER */
+	xf_emit(ctx, 1, 0);		/* 0 */
+	xf_emit(ctx, 0x30, 0);		/* ff SET_PROGRAM_CB */
+	xf_emit(ctx, 1, 0);		/* 3f last SET_PROGRAM_CB */
+	xf_emit(ctx, 4, 0);		/* RO */
+	xf_emit(ctx, 0x100, 0);		/* ffffffff */
+	xf_emit(ctx, 8, 0);		/* 1f, 0, 0, ... */
+	xf_emit(ctx, 8, 0);		/* ffffffff */
+	xf_emit(ctx, 4, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 3 */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_CODE_CB */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_TIC */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_TSC */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINKED_TSC */
+	xf_emit(ctx, 1, 0);		/* 000000ff TIC_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff TIC_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0x3fffff);	/* 003fffff TIC_LIMIT */
+	xf_emit(ctx, 1, 0);		/* 000000ff TSC_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff TSC_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0x1fff);	/* 000fffff TSC_LIMIT */
+	xf_emit(ctx, 1, 0);		/* 000000ff VP_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff VP_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 00ffffff VP_START_ID */
+	xf_emit(ctx, 1, 0);		/* 000000ff CB_DEF_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff CB_DEF_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 000000ff GP_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff GP_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 00ffffff GP_START_ID */
+	xf_emit(ctx, 1, 0);		/* 000000ff FP_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff FP_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 00ffffff FP_START_ID */
+}
+
+static void
+nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+	/* end of area 2 on pre-NVA0, area 1 on NVAx */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
+	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0x3ff);
+	else
+		xf_emit(ctx, 1, 0x7ff);	/* 000007ff */
+	xf_emit(ctx, 1, 0);		/* 111/113 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	for (i = 0; i < 8; i++) {
+		switch (device->chipset) {
+		case 0x50:
+		case 0x86:
+		case 0x98:
+		case 0xaa:
+		case 0xac:
+			xf_emit(ctx, 0xa0, 0);	/* ffffffff */
+			break;
+		case 0x84:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+			xf_emit(ctx, 0x120, 0);
+			break;
+		case 0xa5:
+		case 0xa8:
+			xf_emit(ctx, 0x100, 0);	/* ffffffff */
+			break;
+		case 0xa0:
+		case 0xa3:
+		case 0xaf:
+			xf_emit(ctx, 0x400, 0);	/* ffffffff */
+			break;
+		}
+		xf_emit(ctx, 4, 0);	/* 3f, 0, 0, 0 */
+		xf_emit(ctx, 4, 0);	/* ffffffff */
+	}
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_TEMP */
+	xf_emit(ctx, 1, 1);		/* 00000001 RASTERIZE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0x27);		/* 000000ff UNK0FD4 */
+	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0x26);		/* 000000ff SEMANTIC_LAYER */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* end of area 2 on pre-NVA0, area 1 on NVAx */
+	xf_emit(ctx, 1, 0);		/* 00000001 VIEWPORT_CLIP_RECTS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000003 VIEWPORT_CLIP_MODE */
+	xf_emit(ctx, 0x10, 0x04000000);	/* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
+	xf_emit(ctx, 1, 0);		/* 00000001 POLYGON_STIPPLE_ENABLE */
+	xf_emit(ctx, 0x20, 0);		/* ffffffff POLYGON_STIPPLE */
+	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0x04e3bfdf);	/* ffffffff UNK0D64 */
+	xf_emit(ctx, 1, 0x04e3bfdf);	/* ffffffff UNK0DF4 */
+	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0x1fe21);	/* 0001ffff tesla UNK0FAC */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 0x0fac6881);
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 1);
+		xf_emit(ctx, 3, 0);
+	}
+}
+
+static void
+nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
+	if (device->chipset != 0x50) {
+		xf_emit(ctx, 5, 0);		/* ffffffff */
+		xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
+		xf_emit(ctx, 1, 0);		/* 00000001 */
+		xf_emit(ctx, 1, 0);		/* 000003ff */
+		xf_emit(ctx, 1, 0x804);		/* 00000fff SEMANTIC_CLIP */
+		xf_emit(ctx, 1, 0);		/* 00000001 */
+		xf_emit(ctx, 2, 4);		/* 7f, ff */
+		xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	}
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 4);			/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);			/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);			/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x10);			/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);			/* 000000ff VP_CLIP_DISTANCE_ENABLE */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0);		/* 3ff */
+	xf_emit(ctx, 1, 0);			/* 000000ff tesla UNK1940 */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK0D7C */
+	xf_emit(ctx, 1, 0x804);			/* 00000fff SEMANTIC_CLIP */
+	xf_emit(ctx, 1, 1);			/* 00000001 VIEWPORT_TRANSFORM_EN */
+	xf_emit(ctx, 1, 0x1a);			/* 0000001f POLYGON_MODE */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0x7f);		/* 000000ff tesla UNK0FFC */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 1);			/* 00000001 SHADE_MODEL */
+	xf_emit(ctx, 1, 0x80c14);		/* 01ffffff SEMANTIC_COLOR */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0x8100c12);		/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 4);			/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);			/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);			/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x10);			/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK0D7C */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK0F8C */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 1);			/* 00000001 VIEWPORT_TRANSFORM_EN */
+	xf_emit(ctx, 1, 0x8100c12);		/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 4, 0);			/* ffffffff NOPERSPECTIVE_BITMAP */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0);			/* 0000000f */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0x3ff);		/* 000003ff tesla UNK0D68 */
+	else
+		xf_emit(ctx, 1, 0x7ff);		/* 000007ff tesla UNK0D68 */
+	xf_emit(ctx, 1, 0x80c14);		/* 01ffffff SEMANTIC_COLOR */
+	xf_emit(ctx, 1, 0);			/* 00000001 VERTEX_TWO_SIDE_ENABLE */
+	xf_emit(ctx, 0x30, 0);			/* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
+	xf_emit(ctx, 3, 0);			/* f, 0, 0 */
+	xf_emit(ctx, 3, 0);			/* ffffffff last VIEWPORT_SCALE? */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 1);			/* 00000001 VIEWPORT_TRANSFORM_EN */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1924 */
+	xf_emit(ctx, 1, 0x10);			/* 000000ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);			/* 00000001 */
+	xf_emit(ctx, 0x30, 0);			/* ffffffff VIEWPORT_TRANSLATE */
+	xf_emit(ctx, 3, 0);			/* f, 0, 0 */
+	xf_emit(ctx, 3, 0);			/* ffffffff */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 2, 0x88);			/* 000001ff tesla UNK19D8 */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1924 */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 4);			/* 0000000f CULL_MODE */
+	xf_emit(ctx, 2, 0);			/* 07ffffff SCREEN_SCISSOR */
+	xf_emit(ctx, 2, 0);			/* 00007fff WINDOW_OFFSET_XY */
+	xf_emit(ctx, 1, 0);			/* 00000003 WINDOW_ORIGIN */
+	xf_emit(ctx, 0x10, 0);			/* 00000001 SCISSOR_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0x26);			/* 000000ff SEMANTIC_LAYER */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0);			/* 0000000f */
+	xf_emit(ctx, 1, 0x3f800000);		/* ffffffff LINE_WIDTH */
+	xf_emit(ctx, 1, 0);			/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 00000001 LINE_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 0);		/* 00000001 */
+	xf_emit(ctx, 1, 0x1a);			/* 0000001f POLYGON_MODE */
+	xf_emit(ctx, 1, 0x10);			/* 000000ff VIEW_VOLUME_CLIP_CTRL */
+	if (device->chipset != 0x50) {
+		xf_emit(ctx, 1, 0);		/* ffffffff */
+		xf_emit(ctx, 1, 0);		/* 00000001 */
+		xf_emit(ctx, 1, 0);		/* 000003ff */
+	}
+	xf_emit(ctx, 0x20, 0);			/* 10xbits ffffffff, 3fffff. SCISSOR_* */
+	xf_emit(ctx, 1, 0);			/* f */
+	xf_emit(ctx, 1, 0);			/* 0? */
+	xf_emit(ctx, 1, 0);			/* ffffffff */
+	xf_emit(ctx, 1, 0);			/* 003fffff */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0x52);			/* 000001ff SEMANTIC_PTSZ */
+	xf_emit(ctx, 1, 0);			/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0x26);			/* 000000ff SEMANTIC_LAYER */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 4);			/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);			/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);			/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x1a);			/* 0000001f POLYGON_MODE */
+	xf_emit(ctx, 1, 0);			/* 00000001 LINE_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0x00ffff00);		/* 00ffffff LINE_STIPPLE_PATTERN */
+	xf_emit(ctx, 1, 0);			/* 0000000f */
+}
+
+static void
+nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
+	/* SEEK */
+	xf_emit(ctx, 1, 0x3f);		/* 0000003f UNK1590 */
+	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_BACK_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_REF */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 2);		/* 00000003 tesla UNK143C */
+	xf_emit(ctx, 2, 0x04000000);	/* 07ffffff tesla UNK0D6C */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0);		/* 00000001 CLIPID_ENABLE */
+	xf_emit(ctx, 2, 0);		/* ffffffff DEPTH_BOUNDS */
+	xf_emit(ctx, 1, 0);		/* 00000001 */
+	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 0000000f CULL_MODE */
+	xf_emit(ctx, 1, 0);		/* 0000ffff */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK0FB0 */
+	xf_emit(ctx, 1, 0);		/* 00000001 POLYGON_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0);		/* 000000ff CLEAR_STENCIL */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_REF */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff CLEAR_DEPTH */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1108 */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0x1001);	/* 00001fff ZETA_ARRAY_MODE */
+	/* SEEK */
+	xf_emit(ctx, 4, 0xffff);	/* 0000ffff MSAA_MASK */
+	xf_emit(ctx, 0x10, 0);		/* 00000001 SCISSOR_ENABLE */
+	xf_emit(ctx, 0x10, 0);		/* ffffffff DEPTH_RANGE_NEAR */
+	xf_emit(ctx, 0x10, 0x3f800000);	/* ffffffff DEPTH_RANGE_FAR */
+	xf_emit(ctx, 1, 0x10);		/* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);		/* 00000001 VIEWPORT_CLIP_RECTS_EN */
+	xf_emit(ctx, 1, 3);		/* 00000003 FP_CTRL_UNK196C */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1968 */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0);	/* 0fffffff tesla UNK1104 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK151C */
+}
+
+static void
+nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
+{
+	/* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000007 UNK0FB4 */
+	/* SEEK */
+	xf_emit(ctx, 4, 0);		/* 07ffffff CLIPID_REGION_HORIZ */
+	xf_emit(ctx, 4, 0);		/* 07ffffff CLIPID_REGION_VERT */
+	xf_emit(ctx, 2, 0);		/* 07ffffff SCREEN_SCISSOR */
+	xf_emit(ctx, 2, 0x04000000);	/* 07ffffff UNK1508 */
+	xf_emit(ctx, 1, 0);		/* 00000001 CLIPID_ENABLE */
+	xf_emit(ctx, 1, 0x80);		/* 00003fff CLIPID_WIDTH */
+	xf_emit(ctx, 1, 0);		/* 000000ff CLIPID_ID */
+	xf_emit(ctx, 1, 0);		/* 000000ff CLIPID_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff CLIPID_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0x80);		/* 00003fff CLIPID_HEIGHT */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_CLIPID */
+}
+
+static void
+nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+	/* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
+	/* SEEK */
+	xf_emit(ctx, 0x33, 0);
+	/* SEEK */
+	xf_emit(ctx, 2, 0);
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 4, 0);	/* RO */
+		xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+		xf_emit(ctx, 1, 0);	/* 1ff */
+		xf_emit(ctx, 8, 0);	/* 0? */
+		xf_emit(ctx, 9, 0);	/* ffffffff, 7ff */
+
+		xf_emit(ctx, 4, 0);	/* RO */
+		xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+		xf_emit(ctx, 1, 0);	/* 1ff */
+		xf_emit(ctx, 8, 0);	/* 0? */
+		xf_emit(ctx, 9, 0);	/* ffffffff, 7ff */
+	} else {
+		xf_emit(ctx, 0xc, 0);	/* RO */
+		/* SEEK */
+		xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+		xf_emit(ctx, 1, 0);	/* 1ff */
+		xf_emit(ctx, 8, 0);	/* 0? */
+
+		/* SEEK */
+		xf_emit(ctx, 0xc, 0);	/* RO */
+		/* SEEK */
+		xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+		xf_emit(ctx, 1, 0);	/* 1ff */
+		xf_emit(ctx, 8, 0);	/* 0? */
+	}
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
+	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
+	xf_emit(ctx, 1, 1);		/* 00000001 */
+	/* SEEK */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 2, 4);	/* 000000ff */
+	xf_emit(ctx, 1, 0x80c14);	/* 01ffffff SEMANTIC_COLOR */
+	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 POINT_SPRITE_ENABLE */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 0x27);		/* 000000ff SEMANTIC_PRIM_ID */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f */
+	xf_emit(ctx, 1, 1);		/* 00000001 */
+	for (i = 0; i < 10; i++) {
+		/* SEEK */
+		xf_emit(ctx, 0x40, 0);		/* ffffffff */
+		xf_emit(ctx, 0x10, 0);		/* 3, 0, 0.... */
+		xf_emit(ctx, 0x10, 0);		/* ffffffff */
+	}
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000001 POINT_SPRITE_CTRL */
+	xf_emit(ctx, 1, 1);		/* 00000001 */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 4, 0);		/* ffffffff NOPERSPECTIVE_BITMAP */
+	xf_emit(ctx, 0x10, 0);		/* 00ffffff POINT_COORD_REPLACE_MAP */
+	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0);	/* 000003ff */
+}
+
+static void
+nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int acnt = 0x10, rep, i;
+	/* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
+	if (IS_NVA3F(device->chipset))
+		acnt = 0x20;
+	/* SEEK */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK13A4 */
+		xf_emit(ctx, 1, 1);	/* 00000fff tesla UNK1318 */
+	}
+	xf_emit(ctx, 1, 0);		/* ffffffff VERTEX_BUFFER_FIRST */
+	xf_emit(ctx, 1, 0);		/* 00000001 PRIMITIVE_RESTART_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK0DE8 */
+	xf_emit(ctx, 1, 0);		/* ffffffff PRIMITIVE_RESTART_INDEX */
+	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, acnt/8, 0);	/* ffffffff VTX_ATR_MASK_UNK0DD0 */
+	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
+	xf_emit(ctx, 1, 0x20);		/* 0000ffff tesla UNK129C */
+	xf_emit(ctx, 1, 0);		/* 000000ff turing UNK370??? */
+	xf_emit(ctx, 1, 0);		/* 0000ffff turing USER_PARAM_COUNT */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0xb, 0);	/* RO */
+	else if (device->chipset >= 0xa0)
+		xf_emit(ctx, 0x9, 0);	/* RO */
+	else
+		xf_emit(ctx, 0x8, 0);	/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 00000001 EDGE_FLAG */
+	xf_emit(ctx, 1, 0);		/* 00000001 PROVOKING_VERTEX_LAST */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0x1a);		/* 0000001f POLYGON_MODE */
+	/* SEEK */
+	xf_emit(ctx, 0xc, 0);		/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 7f/ff */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
+	xf_emit(ctx, 1, 4);		/* 000001ff UNK1A28 */
+	xf_emit(ctx, 1, 8);		/* 000001ff UNK0DF0 */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0x3ff);	/* 3ff tesla UNK0D68 */
+	else
+		xf_emit(ctx, 1, 0x7ff);	/* 7ff tesla UNK0D68 */
+	if (device->chipset == 0xa8)
+		xf_emit(ctx, 1, 0x1e00);	/* 7fff */
+	/* SEEK */
+	xf_emit(ctx, 0xc, 0);		/* RO or close */
+	/* SEEK */
+	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
+	if (device->chipset > 0x50 && device->chipset < 0xa0)
+		xf_emit(ctx, 2, 0);	/* ffffffff */
+	else
+		xf_emit(ctx, 1, 0);	/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK0FD8 */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 0x10, 0);	/* 0? */
+		xf_emit(ctx, 2, 0);	/* weird... */
+		xf_emit(ctx, 2, 0);	/* RO */
+	} else {
+		xf_emit(ctx, 8, 0);	/* 0? */
+		xf_emit(ctx, 1, 0);	/* weird... */
+		xf_emit(ctx, 2, 0);	/* RO */
+	}
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* ffffffff VB_ELEMENT_BASE */
+	xf_emit(ctx, 1, 0);		/* ffffffff UNK1438 */
+	xf_emit(ctx, acnt, 0);		/* 1 tesla UNK1000 */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1118? */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* ffffffff VERTEX_ARRAY_UNK90C */
+	xf_emit(ctx, 1, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* ffffffff VERTEX_ARRAY_UNK90C */
+	xf_emit(ctx, 1, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* RO */
+	xf_emit(ctx, 2, 0);		/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK111C? */
+	xf_emit(ctx, 1, 0);		/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 000000ff UNK15F4_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff UNK15F4_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 000000ff UNK0F84_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff UNK0F84_ADDRESS_LOW */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* 00000fff VERTEX_ARRAY_STRIDE */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* ffffffff VERTEX_ARRAY_LOW */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* 000000ff VERTEX_ARRAY_HIGH */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* ffffffff VERTEX_LIMIT_LOW */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	xf_emit(ctx, acnt, 0);		/* 000000ff VERTEX_LIMIT_HIGH */
+	xf_emit(ctx, 3, 0);		/* f/1f */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, acnt, 0);		/* f */
+		xf_emit(ctx, 3, 0);		/* f/1f */
+	}
+	/* SEEK */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 2, 0);	/* RO */
+	else
+		xf_emit(ctx, 5, 0);	/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* ffff DMA_VTXBUF */
+	/* SEEK */
+	if (device->chipset < 0xa0) {
+		xf_emit(ctx, 0x41, 0);	/* RO */
+		/* SEEK */
+		xf_emit(ctx, 0x11, 0);	/* RO */
+	} else if (!IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0x50, 0);	/* RO */
+	else
+		xf_emit(ctx, 0x58, 0);	/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, 1, 1);		/* 1 UNK0DEC */
+	/* SEEK */
+	xf_emit(ctx, acnt*4, 0);	/* ffffffff VTX_ATTR */
+	xf_emit(ctx, 4, 0);		/* f/1f, 0, 0, 0 */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0x1d, 0);	/* RO */
+	else
+		xf_emit(ctx, 0x16, 0);	/* RO */
+	/* SEEK */
+	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
+	/* SEEK */
+	if (device->chipset < 0xa0)
+		xf_emit(ctx, 8, 0);	/* RO */
+	else if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0xc, 0);	/* RO */
+	else
+		xf_emit(ctx, 7, 0);	/* RO */
+	/* SEEK */
+	xf_emit(ctx, 0xa, 0);		/* RO */
+	if (device->chipset == 0xa0)
+		rep = 0xc;
+	else
+		rep = 4;
+	for (i = 0; i < rep; i++) {
+		/* SEEK */
+		if (IS_NVA3F(device->chipset))
+			xf_emit(ctx, 0x20, 0);	/* ffffffff */
+		xf_emit(ctx, 0x200, 0);	/* ffffffff */
+		xf_emit(ctx, 4, 0);	/* 7f/ff, 0, 0, 0 */
+		xf_emit(ctx, 4, 0);	/* ffffffff */
+	}
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 113/111 */
+	xf_emit(ctx, 1, 0xf);		/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, (acnt/8)-1, 0);	/* ffffffff VP_ATTR_EN */
+	xf_emit(ctx, acnt/8, 0);	/* ffffffff VTX_ATTR_MASK_UNK0DD0 */
+	xf_emit(ctx, 1, 0);		/* 0000000f VP_GP_BUILTIN_ATTR_EN */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	/* SEEK */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 7, 0);	/* weird... */
+	else
+		xf_emit(ctx, 5, 0);	/* weird... */
+}
+
+static void
+nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
+	/* SEEK */
+	xf_emit(ctx, 2, 0);		/* 0001ffff CLIP_X, CLIP_Y */
+	xf_emit(ctx, 2, 0);		/* 0000ffff CLIP_W, CLIP_H */
+	xf_emit(ctx, 1, 0);		/* 00000001 CLIP_ENABLE */
+	if (device->chipset < 0xa0) {
+		/* this is useless on everything but the original NV50,
+		 * guess they forgot to nuke it. Or just didn't bother. */
+		xf_emit(ctx, 2, 0);	/* 0000ffff IFC_CLIP_X, Y */
+		xf_emit(ctx, 2, 1);	/* 0000ffff IFC_CLIP_W, H */
+		xf_emit(ctx, 1, 0);	/* 00000001 IFC_CLIP_ENABLE */
+	}
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	xf_emit(ctx, 1, 0x100);		/* 0001ffff DST_WIDTH */
+	xf_emit(ctx, 1, 0x100);		/* 0001ffff DST_HEIGHT */
+	xf_emit(ctx, 1, 0x11);		/* 3f[NV50]/7f[NV84+] DST_FORMAT */
+	xf_emit(ctx, 1, 0);		/* 0001ffff DRAW_POINT_X */
+	xf_emit(ctx, 1, 8);		/* 0000000f DRAW_UNK58C */
+	xf_emit(ctx, 1, 0);		/* 000fffff SIFC_DST_X_FRACT */
+	xf_emit(ctx, 1, 0);		/* 0001ffff SIFC_DST_X_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff SIFC_DST_Y_FRACT */
+	xf_emit(ctx, 1, 0);		/* 0001ffff SIFC_DST_Y_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff SIFC_DX_DU_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff SIFC_DX_DU_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff SIFC_DY_DV_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff SIFC_DY_DV_INT */
+	xf_emit(ctx, 1, 1);		/* 0000ffff SIFC_WIDTH */
+	xf_emit(ctx, 1, 1);		/* 0000ffff SIFC_HEIGHT */
+	xf_emit(ctx, 1, 0xcf);		/* 000000ff SIFC_FORMAT */
+	xf_emit(ctx, 1, 2);		/* 00000003 SIFC_BITMAP_UNK808 */
+	xf_emit(ctx, 1, 0);		/* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
+	xf_emit(ctx, 1, 0);		/* 00000001 SIFC_BITMAP_LSB_FIRST */
+	xf_emit(ctx, 1, 0);		/* 00000001 SIFC_BITMAP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000ffff BLIT_DST_X */
+	xf_emit(ctx, 1, 0);		/* 0000ffff BLIT_DST_Y */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_DU_DX_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff BLIT_DU_DX_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_DV_DY_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff BLIT_DV_DY_INT */
+	xf_emit(ctx, 1, 1);		/* 0000ffff BLIT_DST_W */
+	xf_emit(ctx, 1, 1);		/* 0000ffff BLIT_DST_H */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_SRC_X_FRACT */
+	xf_emit(ctx, 1, 0);		/* 0001ffff BLIT_SRC_X_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_SRC_Y_FRACT */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK888 */
+	xf_emit(ctx, 1, 4);		/* 0000003f UNK884 */
+	xf_emit(ctx, 1, 0);		/* 00000007 UNK880 */
+	xf_emit(ctx, 1, 1);		/* 0000001f tesla UNK0FB8 */
+	xf_emit(ctx, 1, 0x15);		/* 000000ff tesla UNK128C */
+	xf_emit(ctx, 2, 0);		/* 00000007, ffff0ff3 */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK260 */
+	xf_emit(ctx, 1, 0x4444480);	/* 1fffffff UNK870 */
+	/* SEEK */
+	xf_emit(ctx, 0x10, 0);
+	/* SEEK */
+	xf_emit(ctx, 0x27, 0);
+}
+
+static void
+nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
+	/* SEEK */
+	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1924 */
+	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 0);		/* 000003ff */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* ffffffff turing UNK364 */
+	xf_emit(ctx, 1, 0);		/* 0000000f turing UNK36C */
+	xf_emit(ctx, 1, 0);		/* 0000ffff USER_PARAM_COUNT */
+	xf_emit(ctx, 1, 0x100);		/* 00ffffff turing UNK384 */
+	xf_emit(ctx, 1, 0);		/* 0000000f turing UNK2A0 */
+	xf_emit(ctx, 1, 0);		/* 0000ffff GRIDID */
+	xf_emit(ctx, 1, 0x10001);	/* ffffffff GRIDDIM_XY */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0x10001);	/* ffffffff BLOCKDIM_XY */
+	xf_emit(ctx, 1, 1);		/* 0000ffff BLOCKDIM_Z */
+	xf_emit(ctx, 1, 0x10001);	/* 00ffffff BLOCK_ALLOC */
+	xf_emit(ctx, 1, 1);		/* 00000001 LANES32 */
+	xf_emit(ctx, 1, 4);		/* 000000ff FP_REG_ALLOC_TEMP */
+	xf_emit(ctx, 1, 2);		/* 00000003 REG_MODE */
+	/* SEEK */
+	xf_emit(ctx, 0x40, 0);		/* ffffffff USER_PARAM */
+	switch (device->chipset) {
+	case 0x50:
+	case 0x92:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0x80, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 0x10*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0x84:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0x60, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 0xc*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0x94:
+	case 0x96:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0x40, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 8*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0x86:
+	case 0x98:
+		xf_emit(ctx, 4, 0);	/* f, 0, 0, 0 */
+		xf_emit(ctx, 0x10, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 2*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0xa0:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0xf0, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 0x1e*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0xa3:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0x60, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 0xc*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0xa5:
+	case 0xaf:
+		xf_emit(ctx, 8, 0);	/* 7, 0, 0, 0, ... */
+		xf_emit(ctx, 0x30, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 6*2, 0);	/* ffffffff, 1f */
+		break;
+	case 0xaa:
+		xf_emit(ctx, 0x12, 0);
+		break;
+	case 0xa8:
+	case 0xac:
+		xf_emit(ctx, 4, 0);	/* f, 0, 0, 0 */
+		xf_emit(ctx, 0x10, 0);	/* fff */
+		xf_emit(ctx, 2, 0);	/* ff, fff */
+		xf_emit(ctx, 2*2, 0);	/* ffffffff, 1f */
+		break;
+	}
+	xf_emit(ctx, 1, 0);		/* 0000000f */
+	xf_emit(ctx, 1, 0);		/* 00000000 */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 0000001f */
+	xf_emit(ctx, 4, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 00000003 turing UNK35C */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 4, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 00000003 turing UNK35C */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 000000ff */
+}
+
+static void
+nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY */
+	xf_emit(ctx, 1, 0x3f800000);	/* ffffffff LINE_WIDTH */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1658 */
+	xf_emit(ctx, 1, 0);		/* 00000001 POLYGON_SMOOTH_ENABLE */
+	xf_emit(ctx, 3, 0);		/* 00000001 POLYGON_OFFSET_*_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 0000000f CULL_MODE */
+	xf_emit(ctx, 1, 0x1a);		/* 0000001f POLYGON_MODE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 0);		/* 00000001 POINT_SPRITE_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK165C */
+	xf_emit(ctx, 0x10, 0);		/* 00000001 SCISSOR_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
+	xf_emit(ctx, 1, 0);		/* ffffffff POLYGON_OFFSET_UNITS */
+	xf_emit(ctx, 1, 0);		/* ffffffff POLYGON_OFFSET_FACTOR */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1668 */
+	xf_emit(ctx, 2, 0);		/* 07ffffff SCREEN_SCISSOR */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0x11);		/* 0000007f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 0000007f RT_FORMAT */
+	xf_emit(ctx, 8, 0);		/* 00000001 RT_HORIZ_LINEAR */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 3);	/* 00000003 UNK16B4 */
+	else if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 1);	/* 00000001 UNK16B4 */
+	xf_emit(ctx, 1, 0);		/* 00000003 MULTISAMPLE_CTRL */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK0F90 */
+	xf_emit(ctx, 1, 2);		/* 00000003 tesla UNK143C */
+	xf_emit(ctx, 2, 0x04000000);	/* 07ffffff tesla UNK0D6C */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 5);		/* 0000000f UNK1408 */
+	xf_emit(ctx, 1, 0x52);		/* 000001ff SEMANTIC_PTSZ */
+	xf_emit(ctx, 1, 0);		/* ffffffff POINT_SIZE */
+	xf_emit(ctx, 1, 0);		/* 00000001 */
+	xf_emit(ctx, 1, 0);		/* 00000007 tesla UNK0FB4 */
+	if (device->chipset != 0x50) {
+		xf_emit(ctx, 1, 0);	/* 3ff */
+		xf_emit(ctx, 1, 1);	/* 00000001 tesla UNK1110 */
+	}
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1928 */
+	xf_emit(ctx, 0x10, 0);		/* ffffffff DEPTH_RANGE_NEAR */
+	xf_emit(ctx, 0x10, 0x3f800000);	/* ffffffff DEPTH_RANGE_FAR */
+	xf_emit(ctx, 1, 0x10);		/* 000000ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 0x20, 0);		/* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK187C */
+	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_MASK */
+	xf_emit(ctx, 1, 0x8100c12);	/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 5);		/* 0000000f tesla UNK1220 */
+	xf_emit(ctx, 1, 0);		/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 000000ff tesla UNK1A20 */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
+	xf_emit(ctx, 4, 0xffff);	/* 0000ffff MSAA_MASK */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
+	if (device->chipset < 0xa0)
+		xf_emit(ctx, 0x1c, 0);	/* RO */
+	else if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0x9, 0);
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
+	xf_emit(ctx, 1, 0x1a);		/* 0000001f POLYGON_MODE */
+	xf_emit(ctx, 1, 0);		/* 00000003 WINDOW_ORIGIN */
+	if (device->chipset != 0x50) {
+		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK1100 */
+		xf_emit(ctx, 1, 0);	/* 3ff */
+	}
+	/* XXX: the following block could belong either to unk1cxx, or
+	 * to STRMOUT. Rather hard to tell. */
+	if (device->chipset < 0xa0)
+		xf_emit(ctx, 0x25, 0);
+	else
+		xf_emit(ctx, 0x3b, 0);
+}
+
+static void
+nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 1, 0x102);		/* 0000ffff STRMOUT_BUFFER_CTRL */
+	xf_emit(ctx, 1, 0);		/* ffffffff STRMOUT_PRIMITIVE_COUNT */
+	xf_emit(ctx, 4, 4);		/* 000000ff STRMOUT_NUM_ATTRIBS */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 4, 0);	/* ffffffff UNK1A8C */
+		xf_emit(ctx, 4, 0);	/* ffffffff UNK1780 */
+	}
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 4);		/* 0000007f VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0x3ff);	/* 000003ff tesla UNK0D68 */
+	else
+		xf_emit(ctx, 1, 0x7ff);	/* 000007ff tesla UNK0D68 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	/* SEEK */
+	xf_emit(ctx, 1, 0x102);		/* 0000ffff STRMOUT_BUFFER_CTRL */
+	xf_emit(ctx, 1, 0);		/* ffffffff STRMOUT_PRIMITIVE_COUNT */
+	xf_emit(ctx, 4, 0);		/* 000000ff STRMOUT_ADDRESS_HIGH */
+	xf_emit(ctx, 4, 0);		/* ffffffff STRMOUT_ADDRESS_LOW */
+	xf_emit(ctx, 4, 4);		/* 000000ff STRMOUT_NUM_ATTRIBS */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 4, 0);	/* ffffffff UNK1A8C */
+		xf_emit(ctx, 4, 0);	/* ffffffff UNK1780 */
+	}
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_STRMOUT */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_QUERY */
+	xf_emit(ctx, 1, 0);		/* 000000ff QUERY_ADDRESS_HIGH */
+	xf_emit(ctx, 2, 0);		/* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
+	xf_emit(ctx, 2, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	/* SEEK */
+	xf_emit(ctx, 0x20, 0);		/* ffffffff STRMOUT_MAP */
+	xf_emit(ctx, 1, 0);		/* 0000000f */
+	xf_emit(ctx, 1, 0);		/* 00000000? */
+	xf_emit(ctx, 2, 0);		/* ffffffff */
+}
+
+static void
+nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0D64 */
+	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0DF4 */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0);		/* 000003ff */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 0x11);	/* 000000ff tesla UNK1968 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_QUERY */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 2, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 000000ff QUERY_ADDRESS_HIGH */
+	xf_emit(ctx, 2, 0);		/* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 7 */
+	/* SEEK */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_QUERY */
+	xf_emit(ctx, 1, 0);		/* 000000ff QUERY_ADDRESS_HIGH */
+	xf_emit(ctx, 2, 0);		/* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0D64 */
+	xf_emit(ctx, 1, 0x4e3bfdf);	/* ffffffff UNK0DF4 */
+	xf_emit(ctx, 1, 0);		/* 00000001 eng2d UNK260 */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 0x11);	/* 000000ff tesla UNK1968 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int magic2;
+	if (device->chipset == 0x50) {
+		magic2 = 0x00003e60;
+	} else if (!IS_NVA3F(device->chipset)) {
+		magic2 = 0x001ffe67;
+	} else {
+		magic2 = 0x00087e67;
+	}
+	xf_emit(ctx, 1, 0);		/* f/7 MUTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_BACK_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 2);		/* 00000003 tesla UNK143C */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
+		xf_emit(ctx, 1, 0x15);	/* 000000ff */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 0x10);		/* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);		/* ffffffff CLEAR_DEPTH */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
+		xf_emit(ctx, 3, 0);	/* ff, ffffffff, ffffffff */
+		xf_emit(ctx, 1, 4);	/* 7 */
+		xf_emit(ctx, 1, 0x400);	/* fffffff */
+		xf_emit(ctx, 1, 0x300);	/* ffff */
+		xf_emit(ctx, 1, 0x1001);	/* 1fff */
+		if (device->chipset != 0xa0) {
+			if (IS_NVA3F(device->chipset))
+				xf_emit(ctx, 1, 0);	/* 0000000f UNK15C8 */
+			else
+				xf_emit(ctx, 1, 0x15);	/* ff */
+		}
+	}
+	xf_emit(ctx, 1, 0);		/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_BACK_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 2);		/* 00000003 tesla UNK143C */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 0x10);		/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1900 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_BACK_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_REF */
+	xf_emit(ctx, 2, 0);		/* ffffffff DEPTH_BOUNDS */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK0FB0 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_REF */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 0x10);		/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 0x10, 0);		/* ffffffff DEPTH_RANGE_NEAR */
+	xf_emit(ctx, 0x10, 0x3f800000);	/* ffffffff DEPTH_RANGE_FAR */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 0);		/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_BACK_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_FUNC_REF */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 2, 0);		/* ffffffff DEPTH_BOUNDS */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000007 DEPTH_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 000000ff CLEAR_STENCIL */
+	xf_emit(ctx, 1, 0);		/* 00000007 STENCIL_FRONT_FUNC_FUNC */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_FUNC_REF */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 1, 0x10);		/* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 0x3f);		/* 0000003f UNK1590 */
+	xf_emit(ctx, 1, 0);		/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 2, 0);		/* ffff0ff3, ffff */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK0FB0 */
+	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff CLEAR_DEPTH */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK19CC */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 2, 0);
+		xf_emit(ctx, 1, 0x1001);
+		xf_emit(ctx, 0xb, 0);
+	} else {
+		xf_emit(ctx, 1, 0);	/* 00000007 */
+		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK1534 */
+		xf_emit(ctx, 1, 0);	/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+		xf_emit(ctx, 8, 0);	/* 00000001 BLEND_ENABLE */
+		xf_emit(ctx, 1, 0);	/* ffff0ff3 */
+	}
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f */
+	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
+	if (device->chipset != 0x50) {
+		xf_emit(ctx, 1, 0);	/* 0000000f LOGIC_OP */
+		xf_emit(ctx, 1, 0);	/* 000000ff */
+	}
+	xf_emit(ctx, 1, 0);		/* 00000007 OPERATION */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
+	xf_emit(ctx, 2, 1);		/* 00000007 BLEND_EQUATION_RGB, ALPHA */
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK133C */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_RGB */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_RGB */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_ALPHA */
+	xf_emit(ctx, 1, 0);		/* 00000001 */
+	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK12E4 */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_ALPHA */
+		xf_emit(ctx, 8, 1);	/* 00000001 IBLEND_UNK00 */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_FUNC_SRC_RGB */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_FUNC_DST_RGB */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_FUNC_SRC_ALPHA */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_FUNC_DST_ALPHA */
+		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK1140 */
+		xf_emit(ctx, 2, 0);	/* 00000001 */
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+		xf_emit(ctx, 1, 0);	/* 0000000f */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* ffffffff */
+		xf_emit(ctx, 2, 0);	/* 00000001 */
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+		xf_emit(ctx, 1, 0);	/* 00000001 */
+		xf_emit(ctx, 1, 0);	/* 000003ff */
+	} else if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 2, 0);	/* 00000001 */
+		xf_emit(ctx, 1, 0);	/* 00000007 */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* ffffffff */
+		xf_emit(ctx, 2, 0);	/* 00000001 */
+	} else {
+		xf_emit(ctx, 1, 0);	/* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1430 */
+		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1A3C */
+	}
+	xf_emit(ctx, 4, 0);		/* ffffffff CLEAR_COLOR */
+	xf_emit(ctx, 4, 0);		/* ffffffff BLEND_COLOR A R G B */
+	xf_emit(ctx, 1, 0);		/* 00000fff eng2d UNK2B0 */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 2, 0);	/* 00000001 */
+	xf_emit(ctx, 1, 0);		/* 000003ff */
+	xf_emit(ctx, 8, 0);		/* 00000001 BLEND_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK133C */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_RGB */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_RGB */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_RGB */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_ALPHA */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK19C0 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f LOGIC_OP */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 0);	/* 00000001 UNK12E4? NVA3+ only? */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 8, 1);	/* 00000001 IBLEND_UNK00 */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_FUNC_SRC_RGB */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_FUNC_DST_RGB */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_ALPHA */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_FUNC_SRC_ALPHA */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_FUNC_DST_ALPHA */
+		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK15C4 */
+		xf_emit(ctx, 1, 0);	/* 00000001 */
+		xf_emit(ctx, 1, 0);	/* 00000001 tesla UNK1140 */
+	}
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	xf_emit(ctx, 1, 0);		/* 00000007 PATTERN_COLOR_FORMAT */
+	xf_emit(ctx, 2, 0);		/* ffffffff PATTERN_MONO_COLOR */
+	xf_emit(ctx, 1, 0);		/* 00000001 PATTERN_MONO_FORMAT */
+	xf_emit(ctx, 2, 0);		/* ffffffff PATTERN_MONO_BITMAP */
+	xf_emit(ctx, 1, 0);		/* 00000003 PATTERN_SELECT */
+	xf_emit(ctx, 1, 0);		/* 000000ff ROP */
+	xf_emit(ctx, 1, 0);		/* ffffffff BETA1 */
+	xf_emit(ctx, 1, 0);		/* ffffffff BETA4 */
+	xf_emit(ctx, 1, 0);		/* 00000007 OPERATION */
+	xf_emit(ctx, 0x50, 0);		/* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
+}
+
+static void
+nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int magic3;
+	switch (device->chipset) {
+	case 0x50:
+		magic3 = 0x1000;
+		break;
+	case 0x86:
+	case 0x98:
+	case 0xa8:
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		magic3 = 0x1e00;
+		break;
+	default:
+		magic3 = 0;
+	}
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 111/113[NVA0+] */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0x1f, 0);	/* ffffffff */
+	else if (device->chipset >= 0xa0)
+		xf_emit(ctx, 0x0f, 0);	/* ffffffff */
+	else
+		xf_emit(ctx, 0x10, 0);	/* fffffff VP_RESULT_MAP_1 up */
+	xf_emit(ctx, 2, 0);		/* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 0x03020100);	/* ffffffff */
+	else
+		xf_emit(ctx, 1, 0x00608080);	/* fffffff VP_RESULT_MAP_0 */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 2, 0);		/* 111/113, 7f/ff */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+	if (magic3)
+		xf_emit(ctx, 1, magic3);	/* 00007fff tesla UNK141C */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 111/113 */
+	xf_emit(ctx, 0x1f, 0);		/* ffffffff GP_RESULT_MAP_1 up */
+	xf_emit(ctx, 1, 0);		/* 0000001f */
+	xf_emit(ctx, 1, 0);		/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 0x80);		/* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0x03020100);	/* ffffffff GP_RESULT_MAP_0 */
+	xf_emit(ctx, 1, 3);		/* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+	if (magic3)
+		xf_emit(ctx, 1, magic3);	/* 7fff tesla UNK141C */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 0);		/* 00000001 PROVOKING_VERTEX_LAST */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 111/113 */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 3);		/* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+	xf_emit(ctx, 1, 0);		/* 00000001 PROVOKING_VERTEX_LAST */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK13A0 */
+	xf_emit(ctx, 1, 4);		/* 7f/ff VP_REG_ALLOC_RESULT */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+	xf_emit(ctx, 1, 0);		/* 111/113 */
+	if (device->chipset == 0x94 || device->chipset == 0x96)
+		xf_emit(ctx, 0x1020, 0);	/* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+	else if (device->chipset < 0xa0)
+		xf_emit(ctx, 0xa20, 0);	/* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+	else if (!IS_NVA3F(device->chipset))
+		xf_emit(ctx, 0x210, 0);	/* ffffffff */
+	else
+		xf_emit(ctx, 0x410, 0);	/* ffffffff */
+	xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+	xf_emit(ctx, 1, 4);		/* 000000ff GP_RESULT_MAP_SIZE */
+	xf_emit(ctx, 1, 3);		/* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+	xf_emit(ctx, 1, 0);		/* 00000001 PROVOKING_VERTEX_LAST */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int magic1, magic2;
+	if (device->chipset == 0x50) {
+		magic1 = 0x3ff;
+		magic2 = 0x00003e60;
+	} else if (!IS_NVA3F(device->chipset)) {
+		magic1 = 0x7ff;
+		magic2 = 0x001ffe67;
+	} else {
+		magic1 = 0x7ff;
+		magic2 = 0x00087e67;
+	}
+	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* ffffffff ALPHA_TEST_REF */
+	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000000f UNK16A0 */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_BACK_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 4, 0);		/* ffffffff BLEND_COLOR */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK19C0 */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK0FDC */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ff[NV50]/3ff[NV84+] */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 4, 0xffff);	/* 0000ffff MSAA_MASK */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 3, 0);		/* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_BACK_ENABLE */
+	xf_emit(ctx, 2, 0);		/* 00007fff WINDOW_OFFSET_XY */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK19CC */
+	xf_emit(ctx, 1, 0);		/* 7 */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff COLOR_KEY */
+	xf_emit(ctx, 1, 0);		/* 00000001 COLOR_KEY_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 COLOR_KEY_FORMAT */
+	xf_emit(ctx, 2, 0);		/* ffffffff SIFC_BITMAP_COLOR */
+	xf_emit(ctx, 1, 1);		/* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 ALPHA_TEST_FUNC */
+	xf_emit(ctx, 1, 0);		/* 00000001 ALPHA_TEST_ENABLE */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 3);	/* 00000003 tesla UNK16B4 */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1298 */
+	} else if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 1, 1);	/* 00000001 tesla UNK16B4 */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+	} else {
+		xf_emit(ctx, 1, 0);	/* 00000003 MULTISAMPLE_CTRL */
+	}
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 8, 0);		/* 00000001 BLEND_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_ALPHA */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_RGB */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_RGB */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_RGB */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 0);	/* 00000001 UNK12E4 */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_RGB */
+		xf_emit(ctx, 8, 1);	/* 00000007 IBLEND_EQUATION_ALPHA */
+		xf_emit(ctx, 8, 1);	/* 00000001 IBLEND_UNK00 */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_SRC_RGB */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_DST_RGB */
+		xf_emit(ctx, 8, 2);	/* 0000001f IBLEND_SRC_ALPHA */
+		xf_emit(ctx, 8, 1);	/* 0000001f IBLEND_DST_ALPHA */
+		xf_emit(ctx, 1, 0);	/* 00000001 UNK1140 */
+	}
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK133C */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
+	xf_emit(ctx, 1, 0);		/* 00000001 FRAMEBUFFER_SRGB */
+	xf_emit(ctx, 1, 0);		/* 7 */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	xf_emit(ctx, 1, 0);		/* 00000007 OPERATION */
+	xf_emit(ctx, 1, 0xcf);		/* 000000ff SIFC_FORMAT */
+	xf_emit(ctx, 1, 0xcf);		/* 000000ff DRAW_COLOR_FORMAT */
+	xf_emit(ctx, 1, 0xcf);		/* 000000ff SRC_FORMAT */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	xf_emit(ctx, 1, 0);		/* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 8, 0);		/* 00000001 BLEND_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_ALPHA */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_ALPHA */
+	xf_emit(ctx, 1, 1);		/* 0000001f BLEND_FUNC_DST_RGB */
+	xf_emit(ctx, 1, 1);		/* 00000007 BLEND_EQUATION_RGB */
+	xf_emit(ctx, 1, 2);		/* 0000001f BLEND_FUNC_SRC_RGB */
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK133C */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 8, 1);		/* 00000001 UNK19E0 */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0);	/* ff */
+	else
+		xf_emit(ctx, 3, 0);	/* 1, 7, 3ff */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_DU_DX_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff BLIT_DU_DX_INT */
+	xf_emit(ctx, 1, 0);		/* 000fffff BLIT_DV_DY_FRACT */
+	xf_emit(ctx, 1, 1);		/* 0001ffff BLIT_DV_DY_INT */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, magic1);	/* 3ff/7ff tesla UNK0D68 */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 8, 0);		/* 0000ffff DMA_COLOR */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_GLOBAL */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_LOCAL */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_STACK */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_DST */
+	xf_emit(ctx, 1, 0);		/* 7 */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 8, 0);		/* 000000ff RT_ADDRESS_HIGH */
+	xf_emit(ctx, 8, 0);		/* ffffffff RT_LAYER_STRIDE */
+	xf_emit(ctx, 8, 0);		/* ffffffff RT_ADDRESS_LOW */
+	xf_emit(ctx, 8, 8);		/* 0000007f RT_TILE_MODE */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 8, 0x400);		/* 0fffffff RT_HORIZ */
+	xf_emit(ctx, 8, 0x300);		/* 0000ffff RT_VERT */
+	xf_emit(ctx, 1, 1);		/* 00001fff RT_ARRAY_MODE */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, 0x20);		/* 00000fff DST_TILE_MODE */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 0x100);		/* 0001ffff DST_HEIGHT */
+	xf_emit(ctx, 1, 0);		/* 000007ff DST_LAYER */
+	xf_emit(ctx, 1, 1);		/* 00000001 DST_LINEAR */
+	xf_emit(ctx, 1, 0);		/* ffffffff DST_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0);		/* 000000ff DST_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0x40);		/* 0007ffff DST_PITCH */
+	xf_emit(ctx, 1, 0x100);		/* 0001ffff DST_WIDTH */
+	xf_emit(ctx, 1, 0);		/* 0000ffff */
+	xf_emit(ctx, 1, 3);		/* 00000003 tesla UNK15AC */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 2);		/* 00000003 tesla UNK143C */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_ZETA */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 2, 0);		/* ffff, ff/3ff */
+	xf_emit(ctx, 1, 0);		/* 0001ffff GP_BUILTIN_RESULT_EN */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 000000ff STENCIL_FRONT_MASK */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0);		/* ffffffff ZETA_LAYER_STRIDE */
+	xf_emit(ctx, 1, 0);		/* 000000ff ZETA_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);		/* ffffffff ZETA_ADDRESS_LOW */
+	xf_emit(ctx, 1, 4);		/* 00000007 ZETA_TILE_MODE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	xf_emit(ctx, 1, 0x400);		/* 0fffffff ZETA_HORIZ */
+	xf_emit(ctx, 1, 0x300);		/* 0000ffff ZETA_VERT */
+	xf_emit(ctx, 1, 0x1001);	/* 00001fff ZETA_ARRAY_MODE */
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 0);	/* 00000001 */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 7, 0);		/* 3f/7f RT_FORMAT */
+	xf_emit(ctx, 1, 0x0fac6881);	/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0xf);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 7, 0);		/* 0000000f COLOR_MASK */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 8, 0);		/* 00000001 BLEND_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000003 UNK0F90 */
+	xf_emit(ctx, 1, 0);		/* 00000001 FRAMEBUFFER_SRGB */
+	xf_emit(ctx, 1, 0);		/* 7 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LOGIC_OP_ENABLE */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 0);	/* 00000001 UNK1140 */
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	}
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	if (device->chipset >= 0xa0)
+		xf_emit(ctx, 1, 0x0fac6881);	/* fffffff */
+	xf_emit(ctx, 1, magic2);	/* 001fffff tesla UNK0F78 */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_BOUNDS_EN */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE_ENABLE */
+	xf_emit(ctx, 1, 0x11);		/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK0FB0 */
+	xf_emit(ctx, 1, 0);		/* ff/3ff */
+	xf_emit(ctx, 1, 4);		/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);		/* 00000001 STENCIL_FRONT_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK15B4 */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK19CC */
+	xf_emit(ctx, 1, 0);		/* 00000007 */
+	xf_emit(ctx, 1, 0);		/* 00000001 SAMPLECNT_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 0000000f ZETA_FORMAT */
+	xf_emit(ctx, 1, 1);		/* 00000001 ZETA_ENABLE */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+		xf_emit(ctx, 1, 0);	/* 0000000f tesla UNK15C8 */
+	}
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A3C */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 3, 0);		/* 7/f, 1, ffff0ff3 */
+		xf_emit(ctx, 1, 0xfac6881);	/* fffffff */
+		xf_emit(ctx, 4, 0);		/* 1, 1, 1, 3ff */
+		xf_emit(ctx, 1, 4);		/* 7 */
+		xf_emit(ctx, 1, 0);		/* 1 */
+		xf_emit(ctx, 2, 1);		/* 1 */
+		xf_emit(ctx, 2, 0);		/* 7, f */
+		xf_emit(ctx, 1, 1);		/* 1 */
+		xf_emit(ctx, 1, 0);		/* 7/f */
+		if (IS_NVA3F(device->chipset))
+			xf_emit(ctx, 0x9, 0);	/* 1 */
+		else
+			xf_emit(ctx, 0x8, 0);	/* 1 */
+		xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+		xf_emit(ctx, 8, 1);		/* 1 */
+		xf_emit(ctx, 1, 0x11);		/* 7f */
+		xf_emit(ctx, 7, 0);		/* 7f */
+		xf_emit(ctx, 1, 0xfac6881);	/* fffffff */
+		xf_emit(ctx, 1, 0xf);		/* f */
+		xf_emit(ctx, 7, 0);		/* f */
+		xf_emit(ctx, 1, 0x11);		/* 7f */
+		xf_emit(ctx, 1, 1);		/* 1 */
+		xf_emit(ctx, 5, 0);		/* 1, 7, 3ff, 3, 7 */
+		if (IS_NVA3F(device->chipset)) {
+			xf_emit(ctx, 1, 0);	/* 00000001 UNK1140 */
+			xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+		}
+	}
+}
+
+static void
+nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 2, 0);		/* 1 LINKED_TSC. yes, 2. */
+	if (device->chipset != 0x50)
+		xf_emit(ctx, 1, 0);	/* 3 */
+	xf_emit(ctx, 1, 1);		/* 1ffff BLIT_DU_DX_INT */
+	xf_emit(ctx, 1, 0);		/* fffff BLIT_DU_DX_FRACT */
+	xf_emit(ctx, 1, 1);		/* 1ffff BLIT_DV_DY_INT */
+	xf_emit(ctx, 1, 0);		/* fffff BLIT_DV_DY_FRACT */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 1, 0);	/* 3 BLIT_CONTROL */
+	else
+		xf_emit(ctx, 2, 0);	/* 3ff, 1 */
+	xf_emit(ctx, 1, 0x2a712488);	/* ffffffff SRC_TIC_0 */
+	xf_emit(ctx, 1, 0);		/* ffffffff SRC_TIC_1 */
+	xf_emit(ctx, 1, 0x4085c000);	/* ffffffff SRC_TIC_2 */
+	xf_emit(ctx, 1, 0x40);		/* ffffffff SRC_TIC_3 */
+	xf_emit(ctx, 1, 0x100);		/* ffffffff SRC_TIC_4 */
+	xf_emit(ctx, 1, 0x10100);	/* ffffffff SRC_TIC_5 */
+	xf_emit(ctx, 1, 0x02800000);	/* ffffffff SRC_TIC_6 */
+	xf_emit(ctx, 1, 0);		/* ffffffff SRC_TIC_7 */
+	if (device->chipset == 0x50) {
+		xf_emit(ctx, 1, 0);	/* 00000001 turing UNK358 */
+		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1A34? */
+		xf_emit(ctx, 1, 0);	/* 00000003 turing UNK37C tesla UNK1690 */
+		xf_emit(ctx, 1, 0);	/* 00000003 BLIT_CONTROL */
+		xf_emit(ctx, 1, 0);	/* 00000001 turing UNK32C tesla UNK0F94 */
+	} else if (!IS_NVAAF(device->chipset)) {
+		xf_emit(ctx, 1, 0);	/* ffffffff tesla UNK1A34? */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* 000003ff */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* 000003ff */
+		xf_emit(ctx, 1, 0);	/* 00000003 tesla UNK1664 / turing UNK03E8 */
+		xf_emit(ctx, 1, 0);	/* 00000003 */
+		xf_emit(ctx, 1, 0);	/* 000003ff */
+	} else {
+		xf_emit(ctx, 0x6, 0);
+	}
+	xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A34 */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_TEXTURE */
+	xf_emit(ctx, 1, 0);		/* 0000ffff DMA_SRC */
+}
+
+static void
+nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 2, 0);		/* 7, ffff0ff3 */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE */
+	xf_emit(ctx, 1, 0x04e3bfdf);	/* ffffffff UNK0D64 */
+	xf_emit(ctx, 1, 0x04e3bfdf);	/* ffffffff UNK0DF4 */
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK15B4 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK0F98 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);	/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1668 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_STIPPLE_ENABLE */
+	xf_emit(ctx, 1, 0x00ffff00);	/* 00ffffff LINE_STIPPLE_PATTERN */
+	xf_emit(ctx, 1, 0);		/* 00000001 POLYGON_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 UNK1534 */
+	xf_emit(ctx, 1, 0);		/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 1, 0);		/* 00000001 tesla UNK1658 */
+	xf_emit(ctx, 1, 0);		/* 00000001 LINE_SMOOTH_ENABLE */
+	xf_emit(ctx, 1, 0);		/* ffff0ff3 */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);		/* 00000001 DEPTH_WRITE */
+	xf_emit(ctx, 1, 1);		/* 00000001 UNK15B4 */
+	xf_emit(ctx, 1, 0);		/* 00000001 POINT_SPRITE_ENABLE */
+	xf_emit(ctx, 1, 1);		/* 00000001 tesla UNK165C */
+	xf_emit(ctx, 1, 0x30201000);	/* ffffffff tesla UNK1670 */
+	xf_emit(ctx, 1, 0x70605040);	/* ffffffff tesla UNK1670 */
+	xf_emit(ctx, 1, 0xb8a89888);	/* ffffffff tesla UNK1670 */
+	xf_emit(ctx, 1, 0xf8e8d8c8);	/* ffffffff tesla UNK1670 */
+	xf_emit(ctx, 1, 0);		/* 00000001 VERTEX_TWO_SIDE_ENABLE */
+	xf_emit(ctx, 1, 0x1a);		/* 0000001f POLYGON_MODE */
+}
+
+static void
+nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	if (device->chipset < 0xa0) {
+		nv50_graph_construct_xfer_unk84xx(ctx);
+		nv50_graph_construct_xfer_tprop(ctx);
+		nv50_graph_construct_xfer_tex(ctx);
+		nv50_graph_construct_xfer_unk8cxx(ctx);
+	} else {
+		nv50_graph_construct_xfer_tex(ctx);
+		nv50_graph_construct_xfer_tprop(ctx);
+		nv50_graph_construct_xfer_unk8cxx(ctx);
+		nv50_graph_construct_xfer_unk84xx(ctx);
+	}
+}
+
+static void
+nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i, mpcnt = 2;
+	switch (device->chipset) {
+		case 0x98:
+		case 0xaa:
+			mpcnt = 1;
+			break;
+		case 0x50:
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa8:
+		case 0xac:
+			mpcnt = 2;
+			break;
+		case 0xa0:
+		case 0xa3:
+		case 0xa5:
+		case 0xaf:
+			mpcnt = 3;
+			break;
+	}
+	for (i = 0; i < mpcnt; i++) {
+		xf_emit(ctx, 1, 0);		/* ff */
+		xf_emit(ctx, 1, 0x80);		/* ffffffff tesla UNK1404 */
+		xf_emit(ctx, 1, 0x80007004);	/* ffffffff tesla UNK12B0 */
+		xf_emit(ctx, 1, 0x04000400);	/* ffffffff */
+		if (device->chipset >= 0xa0)
+			xf_emit(ctx, 1, 0xc0);	/* 00007fff tesla UNK152C */
+		xf_emit(ctx, 1, 0x1000);	/* 0000ffff tesla UNK0D60 */
+		xf_emit(ctx, 1, 0);		/* ff/3ff */
+		xf_emit(ctx, 1, 0);		/* ffffffff tesla UNK1A30 */
+		if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
+			xf_emit(ctx, 1, 0xe00);		/* 7fff */
+			xf_emit(ctx, 1, 0x1e00);	/* 7fff */
+		}
+		xf_emit(ctx, 1, 1);		/* 000000ff VP_REG_ALLOC_TEMP */
+		xf_emit(ctx, 1, 0);		/* 00000001 LINKED_TSC */
+		xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+		if (device->chipset == 0x50)
+			xf_emit(ctx, 2, 0x1000);	/* 7fff tesla UNK141C */
+		xf_emit(ctx, 1, 1);		/* 000000ff GP_REG_ALLOC_TEMP */
+		xf_emit(ctx, 1, 0);		/* 00000001 GP_ENABLE */
+		xf_emit(ctx, 1, 4);		/* 000000ff FP_REG_ALLOC_TEMP */
+		xf_emit(ctx, 1, 2);		/* 00000003 REG_MODE */
+		if (IS_NVAAF(device->chipset))
+			xf_emit(ctx, 0xb, 0);	/* RO */
+		else if (device->chipset >= 0xa0)
+			xf_emit(ctx, 0xc, 0);	/* RO */
+		else
+			xf_emit(ctx, 0xa, 0);	/* RO */
+	}
+	xf_emit(ctx, 1, 0x08100c12);		/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 0);			/* ff/3ff */
+	if (device->chipset >= 0xa0) {
+		xf_emit(ctx, 1, 0x1fe21);	/* 0003ffff tesla UNK0FAC */
+	}
+	xf_emit(ctx, 3, 0);			/* 7fff, 0, 0 */
+	xf_emit(ctx, 1, 0);			/* 00000001 tesla UNK1534 */
+	xf_emit(ctx, 1, 0);			/* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+	xf_emit(ctx, 4, 0xffff);		/* 0000ffff MSAA_MASK */
+	xf_emit(ctx, 1, 1);			/* 00000001 LANES32 */
+	xf_emit(ctx, 1, 0x10001);		/* 00ffffff BLOCK_ALLOC */
+	xf_emit(ctx, 1, 0x10001);		/* ffffffff BLOCKDIM_XY */
+	xf_emit(ctx, 1, 1);			/* 0000ffff BLOCKDIM_Z */
+	xf_emit(ctx, 1, 0);			/* ffffffff SHARED_SIZE */
+	xf_emit(ctx, 1, 0x1fe21);		/* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
+	xf_emit(ctx, 1, 0);			/* ffffffff tesla UNK1A34 */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 1);		/* 0000001f tesla UNK169C */
+	xf_emit(ctx, 1, 0);			/* ff/3ff */
+	xf_emit(ctx, 1, 0);			/* 1 LINKED_TSC */
+	xf_emit(ctx, 1, 0);			/* ff FP_ADDRESS_HIGH */
+	xf_emit(ctx, 1, 0);			/* ffffffff FP_ADDRESS_LOW */
+	xf_emit(ctx, 1, 0x08100c12);		/* 1fffffff FP_INTERPOLANT_CTRL */
+	xf_emit(ctx, 1, 4);			/* 00000007 FP_CONTROL */
+	xf_emit(ctx, 1, 0);			/* 000000ff FRAG_COLOR_CLAMP_EN */
+	xf_emit(ctx, 1, 2);			/* 00000003 REG_MODE */
+	xf_emit(ctx, 1, 0x11);			/* 0000007f RT_FORMAT */
+	xf_emit(ctx, 7, 0);			/* 0000007f RT_FORMAT */
+	xf_emit(ctx, 1, 0);			/* 00000007 */
+	xf_emit(ctx, 1, 0xfac6881);		/* 0fffffff RT_CONTROL */
+	xf_emit(ctx, 1, 0);			/* 00000003 MULTISAMPLE_CTRL */
+	if (IS_NVA3F(device->chipset))
+		xf_emit(ctx, 1, 3);		/* 00000003 tesla UNK16B4 */
+	xf_emit(ctx, 1, 0);			/* 00000001 ALPHA_TEST_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 00000007 ALPHA_TEST_FUNC */
+	xf_emit(ctx, 1, 0);			/* 00000001 FRAMEBUFFER_SRGB */
+	xf_emit(ctx, 1, 4);			/* ffffffff tesla UNK1400 */
+	xf_emit(ctx, 8, 0);			/* 00000001 BLEND_ENABLE */
+	xf_emit(ctx, 1, 0);			/* 00000001 LOGIC_OP_ENABLE */
+	xf_emit(ctx, 1, 2);			/* 0000001f BLEND_FUNC_SRC_RGB */
+	xf_emit(ctx, 1, 1);			/* 0000001f BLEND_FUNC_DST_RGB */
+	xf_emit(ctx, 1, 1);			/* 00000007 BLEND_EQUATION_RGB */
+	xf_emit(ctx, 1, 2);			/* 0000001f BLEND_FUNC_SRC_ALPHA */
+	xf_emit(ctx, 1, 1);			/* 0000001f BLEND_FUNC_DST_ALPHA */
+	xf_emit(ctx, 1, 1);			/* 00000007 BLEND_EQUATION_ALPHA */
+	xf_emit(ctx, 1, 1);			/* 00000001 UNK133C */
+	if (IS_NVA3F(device->chipset)) {
+		xf_emit(ctx, 1, 0);		/* 00000001 UNK12E4 */
+		xf_emit(ctx, 8, 2);		/* 0000001f IBLEND_FUNC_SRC_RGB */
+		xf_emit(ctx, 8, 1);		/* 0000001f IBLEND_FUNC_DST_RGB */
+		xf_emit(ctx, 8, 1);		/* 00000007 IBLEND_EQUATION_RGB */
+		xf_emit(ctx, 8, 2);		/* 0000001f IBLEND_FUNC_SRC_ALPHA */
+		xf_emit(ctx, 8, 1);		/* 0000001f IBLEND_FUNC_DST_ALPHA */
+		xf_emit(ctx, 8, 1);		/* 00000007 IBLEND_EQUATION_ALPHA */
+		xf_emit(ctx, 8, 1);		/* 00000001 IBLEND_UNK00 */
+		xf_emit(ctx, 1, 0);		/* 00000003 tesla UNK1928 */
+		xf_emit(ctx, 1, 0);		/* 00000001 UNK1140 */
+	}
+	xf_emit(ctx, 1, 0);			/* 00000003 tesla UNK0F90 */
+	xf_emit(ctx, 1, 4);			/* 000000ff FP_RESULT_COUNT */
+	/* XXX: demagic this part some day */
+	if (device->chipset == 0x50)
+		xf_emit(ctx, 0x3a0, 0);
+	else if (device->chipset < 0x94)
+		xf_emit(ctx, 0x3a2, 0);
+	else if (device->chipset == 0x98 || device->chipset == 0xaa)
+		xf_emit(ctx, 0x39f, 0);
+	else
+		xf_emit(ctx, 0x3a3, 0);
+	xf_emit(ctx, 1, 0x11);			/* 3f/7f DST_FORMAT */
+	xf_emit(ctx, 1, 0);			/* 7 OPERATION */
+	xf_emit(ctx, 1, 1);			/* 1 DST_LINEAR */
+	xf_emit(ctx, 0x2d, 0);
+}
+
+static void
+nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+{
+	struct nouveau_device *device = ctx->device;
+	int i;
+	u32 offset;
+	u32 units = nv_rd32 (ctx->device, 0x1540);
+	int size = 0;
+
+	offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+
+	if (device->chipset < 0xa0) {
+		for (i = 0; i < 8; i++) {
+			ctx->ctxvals_pos = offset + i;
+			/* that little bugger belongs to csched. No idea
+			 * what it's doing here. */
+			if (i == 0)
+				xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
+			if (units & (1 << i))
+				nv50_graph_construct_xfer_mpc(ctx);
+			if ((ctx->ctxvals_pos-offset)/8 > size)
+				size = (ctx->ctxvals_pos-offset)/8;
+		}
+	} else {
+		/* Strand 0: TPs 0, 1 */
+		ctx->ctxvals_pos = offset;
+		/* that little bugger belongs to csched. No idea
+		 * what it's doing here. */
+		xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
+		if (units & (1 << 0))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 1))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 1: TPs 2, 3 */
+		ctx->ctxvals_pos = offset + 1;
+		if (units & (1 << 2))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 3))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 2: TPs 4, 5, 6 */
+		ctx->ctxvals_pos = offset + 2;
+		if (units & (1 << 4))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 5))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 6))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+
+		/* Strand 3: TPs 7, 8, 9 */
+		ctx->ctxvals_pos = offset + 3;
+		if (units & (1 << 7))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 8))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if (units & (1 << 9))
+			nv50_graph_construct_xfer_mpc(ctx);
+		if ((ctx->ctxvals_pos-offset)/8 > size)
+			size = (ctx->ctxvals_pos-offset)/8;
+	}
+	ctx->ctxvals_pos = offset + size * 8;
+	ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+	cp_lsr (ctx, offset);
+	cp_out (ctx, CP_SET_XFER_POINTER);
+	cp_lsr (ctx, size);
+	cp_out (ctx, CP_SEEK_2);
+	cp_out (ctx, CP_XFER_2);
+	cp_wait(ctx, XFER, BUSY);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
new file mode 100644
index 0000000..4cc6269
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -0,0 +1,3038 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+void
+nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
+{
+	nv_wr32(priv, 0x400204, data);
+	nv_wr32(priv, 0x400200, icmd);
+	while (nv_rd32(priv, 0x400700) & 2) {}
+}
+
+int
+nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+{
+	struct nouveau_bar *bar = nouveau_bar(priv);
+	struct nouveau_gpuobj *chan;
+	u32 size = (0x80000 + priv->size + 4095) & ~4095;
+	int ret, i;
+
+	/* allocate memory to for a "channel", which we'll use to generate
+	 * the default context values
+	 */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
+	chan = info->chan;
+	if (ret) {
+		nv_error(priv, "failed to allocate channel memory, %d\n", ret);
+		return ret;
+	}
+
+	/* PGD pointer */
+	nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
+	nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
+	nv_wo32(chan, 0x0208, 0xffffffff);
+	nv_wo32(chan, 0x020c, 0x000000ff);
+
+	/* PGT[0] pointer */
+	nv_wo32(chan, 0x1000, 0x00000000);
+	nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
+
+	/* identity-map the whole "channel" into its own vm */
+	for (i = 0; i < size / 4096; i++) {
+		u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
+		nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
+		nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
+	}
+
+	/* context pointer (virt) */
+	nv_wo32(chan, 0x0210, 0x00080004);
+	nv_wo32(chan, 0x0214, 0x00000000);
+
+	bar->flush(bar);
+
+	nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
+	nv_wr32(priv, 0x100cbc, 0x80000001);
+	nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
+
+	/* setup default state for mmio list construction */
+	info->data = priv->mmio_data;
+	info->mmio = priv->mmio_list;
+	info->addr = 0x2000 + (i * 8);
+	info->priv = priv;
+	info->buffer_nr = 0;
+
+	if (priv->firmware) {
+		nv_wr32(priv, 0x409840, 0x00000030);
+		nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+		nv_wr32(priv, 0x409504, 0x00000003);
+		if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
+			nv_error(priv, "load_ctx timeout\n");
+
+		nv_wo32(chan, 0x8001c, 1);
+		nv_wo32(chan, 0x80020, 0);
+		nv_wo32(chan, 0x80028, 0);
+		nv_wo32(chan, 0x8002c, 0);
+		bar->flush(bar);
+		return 0;
+	}
+
+	/* HUB_FUC(SET_CHAN) */
+	nv_wr32(priv, 0x409840, 0x80000000);
+	nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
+	nv_wr32(priv, 0x409504, 0x00000001);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_SET_CHAN timeout\n");
+		nvc0_graph_ctxctl_debug(priv);
+		nouveau_gpuobj_ref(NULL, &info->chan);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+void
+nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+{
+	info->buffer[info->buffer_nr]  = info->addr;
+	info->buffer[info->buffer_nr] +=  (align - 1);
+	info->buffer[info->buffer_nr] &= ~(align - 1);
+	info->addr = info->buffer[info->buffer_nr++] + size;
+
+	info->data->size = size;
+	info->data->align = align;
+	info->data->access = access;
+	info->data++;
+}
+
+void
+nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
+{
+	struct nvc0_graph_priv *priv = info->priv;
+
+	info->mmio->addr = addr;
+	info->mmio->data = data;
+	info->mmio->shift = shift;
+	info->mmio->buffer = buf;
+	info->mmio++;
+
+	if (shift)
+		data |= info->buffer[buf] >> shift;
+	nv_wr32(priv, addr, data);
+}
+
+int
+nvc0_grctx_fini(struct nvc0_grctx *info)
+{
+	struct nvc0_graph_priv *priv = info->priv;
+	int i;
+
+	/* trigger a context unload by unsetting the "next channel valid" bit
+	 * and faking a context switch interrupt
+	 */
+	nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
+	nv_wr32(priv, 0x409000, 0x00000100);
+	if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
+		nv_error(priv, "grctx template channel unload timeout\n");
+		return -EBUSY;
+	}
+
+	priv->data = kmalloc(priv->size, GFP_KERNEL);
+	if (priv->data) {
+		for (i = 0; i < priv->size; i += 4)
+			priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i);
+	}
+
+	nouveau_gpuobj_ref(NULL, &info->chan);
+	return priv->data ? 0 : -ENOMEM;
+}
+
+static void
+nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	nv_mthd(priv, 0x9097, 0x0800, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0840, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0900, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0980, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0804, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0844, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0904, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0808, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0848, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0888, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x08c8, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0908, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0948, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x0988, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x09c8, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x080c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x084c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x088c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x08cc, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x090c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x094c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x098c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x09cc, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x0810, 0x000000cf);
+	nv_mthd(priv, 0x9097, 0x0850, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0910, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0990, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0814, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0854, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0894, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x08d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0914, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0954, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0994, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x09d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0818, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0858, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0898, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x08d8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0918, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0958, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0998, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x09d8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x081c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x085c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x089c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x091c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x095c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x099c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0820, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0860, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x08e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0920, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0960, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x09e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2700, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2720, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2740, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2760, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2780, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2704, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2724, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2744, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2764, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2784, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2708, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2728, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2748, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2768, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2788, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x270c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x272c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x274c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x276c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x278c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x27ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2710, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2730, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2750, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2770, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2790, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27b0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27d0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x27f0, 0x00014000);
+	nv_mthd(priv, 0x9097, 0x2714, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2734, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2754, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2774, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2794, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27b4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27d4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x27f4, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1c00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1da8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1db8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1de8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1df8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1fec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2200, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2210, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2220, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2230, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2240, 0x00000022);
+	nv_mthd(priv, 0x9097, 0x2000, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2040, 0x00000011);
+	nv_mthd(priv, 0x9097, 0x2080, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x20c0, 0x00000030);
+	nv_mthd(priv, 0x9097, 0x2100, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x2140, 0x00000051);
+	nv_mthd(priv, 0x9097, 0x200c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x204c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x208c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x20cc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x210c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x214c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x2010, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2050, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2090, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x20d0, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x2110, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x2150, 0x00000004);
+	nv_mthd(priv, 0x9097, 0x0380, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0384, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0388, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x038c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x03ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0700, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0710, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0720, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0730, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0704, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0714, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0724, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0734, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0708, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0718, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0728, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0738, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2800, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2804, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2808, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x280c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2810, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2814, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2818, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x281c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2820, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2824, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2828, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x282c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2830, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2834, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2838, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x283c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2840, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2844, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2848, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x284c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2850, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2854, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2858, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x285c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2860, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2864, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2868, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x286c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2870, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2874, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2878, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x287c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2888, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x288c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2894, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2898, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x289c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x28fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2900, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2904, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2908, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x290c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2910, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2914, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2918, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x291c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2920, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2924, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2928, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x292c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2930, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2934, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2938, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x293c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2948, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x294c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2954, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2958, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x295c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2960, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2964, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2968, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x296c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2970, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2974, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2978, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x297c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2980, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2988, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x298c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2990, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2994, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2998, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x299c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x29fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0be8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0acc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0aec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0af0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0a94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0af4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0b94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c24, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c34, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c64, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c94, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c28, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c38, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c68, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c78, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c98, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e00, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e20, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e30, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e60, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e70, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d40, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d48, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d50, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d44, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1e00, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e20, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e40, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e60, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e80, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e04, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e24, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e44, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e64, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e84, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e08, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e28, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e48, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e68, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e88, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e10, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e30, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e50, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e70, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e90, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e14, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e34, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e54, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e74, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e94, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1e18, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e38, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e58, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e78, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1e98, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001);
+	if (fermi == 0x9097) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9097, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9097, 0x030c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1944, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1514, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881);
+	nv_mthd(priv, 0x9097, 0x0fac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1538, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014);
+	nv_mthd(priv, 0x9097, 0x0fec, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x179c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1228, 0x00000400);
+	nv_mthd(priv, 0x9097, 0x122c, 0x00000300);
+	nv_mthd(priv, 0x9097, 0x1230, 0x00010001);
+	nv_mthd(priv, 0x9097, 0x07f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15b4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1534, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x153c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x16b4, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff);
+	nv_mthd(priv, 0x9097, 0x0df8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1948, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1970, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x161c, 0x000009f0);
+	nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x163c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1160, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1164, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1168, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x116c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1170, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1174, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1178, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x117c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1180, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1184, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1188, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x118c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1190, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1194, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1198, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x119c, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040);
+	nv_mthd(priv, 0x9097, 0x1880, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1884, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1888, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x188c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1890, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1894, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1898, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x189c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x18fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff);
+	nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff);
+	nv_mthd(priv, 0x9097, 0x17d8, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x17dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15f8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1434, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1438, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d74, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x13a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1318, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1644, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0748, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0de8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1648, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12a4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1120, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1124, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1128, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x112c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1118, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x164c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1658, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1910, 0x00000290);
+	nv_mthd(priv, 0x9097, 0x1518, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x165c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1520, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1604, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1570, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x020c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1670, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x1674, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888);
+	nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(priv, 0x9097, 0x166c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00);
+	nv_mthd(priv, 0x9097, 0x12d0, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x12d4, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1684, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1688, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02);
+	nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02);
+	nv_mthd(priv, 0x9097, 0x0db4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x168c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x15bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x156c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x187c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1110, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1234, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1690, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12ac, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x02c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0790, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0794, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0798, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x079c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x077c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1000, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x10fc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1290, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0218, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x12d8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12dc, 0x00000010);
+	nv_mthd(priv, 0x9097, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x155c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1560, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1564, 0x00001fff);
+	nv_mthd(priv, 0x9097, 0x1574, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1578, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x157c, 0x003fffff);
+	nv_mthd(priv, 0x9097, 0x1354, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1664, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1610, 0x00000012);
+	nv_mthd(priv, 0x9097, 0x1608, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x160c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x162c, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0320, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0324, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0328, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x032c, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0330, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0334, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0338, 0x3f800000);
+	nv_mthd(priv, 0x9097, 0x0750, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0760, 0x39291909);
+	nv_mthd(priv, 0x9097, 0x0764, 0x79695949);
+	nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989);
+	nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(priv, 0x9097, 0x0770, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x0774, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x0778, 0x00009080);
+	nv_mthd(priv, 0x9097, 0x0780, 0x39291909);
+	nv_mthd(priv, 0x9097, 0x0784, 0x79695949);
+	nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989);
+	nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(priv, 0x9097, 0x07d0, 0x30201000);
+	nv_mthd(priv, 0x9097, 0x07d4, 0x70605040);
+	nv_mthd(priv, 0x9097, 0x07d8, 0x00009080);
+	nv_mthd(priv, 0x9097, 0x037c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0740, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0744, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x2600, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1918, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x191c, 0x00000900);
+	nv_mthd(priv, 0x9097, 0x1920, 0x00000405);
+	nv_mthd(priv, 0x9097, 0x1308, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1924, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x13ac, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x192c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c);
+	nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02c0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1510, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1940, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x194c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1950, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1968, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1590, 0x0000003f);
+	nv_mthd(priv, 0x9097, 0x07e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07f0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x07f4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x196c, 0x00000011);
+	nv_mthd(priv, 0x9097, 0x197c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02d8, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1980, 0x00000080);
+	nv_mthd(priv, 0x9097, 0x1504, 0x00000080);
+	nv_mthd(priv, 0x9097, 0x1984, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0300, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x13a8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12ec, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1310, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1314, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1380, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1384, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1388, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x138c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1390, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1394, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x139c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1398, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1594, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1598, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x159c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15a0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15a4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x0f54, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f58, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12cc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x12e8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x130c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1360, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1364, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1368, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x136c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1370, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1374, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1378, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x137c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x133c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1340, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1344, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1348, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x134c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1350, 0x00000002);
+	nv_mthd(priv, 0x9097, 0x1358, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x12e4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x131c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1320, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1324, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1328, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1140, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19c8, 0x00001500);
+	nv_mthd(priv, 0x9097, 0x135c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x19e0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19e4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19e8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19ec, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f0, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19f8, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19fc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x19cc, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x15b8, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a00, 0x00001111);
+	nv_mthd(priv, 0x9097, 0x1a04, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a08, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a10, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a14, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a18, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000);
+	nv_mthd(priv, 0x9097, 0x10f8, 0x00001010);
+	nv_mthd(priv, 0x9097, 0x0d80, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d84, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d88, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0d90, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0da0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1508, 0x80000000);
+	nv_mthd(priv, 0x9097, 0x150c, 0x40000000);
+	nv_mthd(priv, 0x9097, 0x1668, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0318, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x031c, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x07dc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x074c, 0x00000055);
+	nv_mthd(priv, 0x9097, 0x1420, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x17bc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c0, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x17c4, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1008, 0x00000008);
+	nv_mthd(priv, 0x9097, 0x100c, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x1010, 0x0000012c);
+	nv_mthd(priv, 0x9097, 0x0d60, 0x00000040);
+	nv_mthd(priv, 0x9097, 0x075c, 0x00000003);
+	nv_mthd(priv, 0x9097, 0x1018, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x101c, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1020, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x1024, 0x00000001);
+	nv_mthd(priv, 0x9097, 0x1444, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x1448, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x144c, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0360, 0x20164010);
+	nv_mthd(priv, 0x9097, 0x0364, 0x00000020);
+	nv_mthd(priv, 0x9097, 0x0368, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0de4, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0204, 0x00000006);
+	nv_mthd(priv, 0x9097, 0x0208, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff);
+	nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48);
+	nv_mthd(priv, 0x9097, 0x1220, 0x00000005);
+	nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000);
+	nv_mthd(priv, 0x9097, 0x0f98, 0x00300008);
+	nv_mthd(priv, 0x9097, 0x1284, 0x04000080);
+	nv_mthd(priv, 0x9097, 0x1450, 0x00300008);
+	nv_mthd(priv, 0x9097, 0x1454, 0x04000080);
+	nv_mthd(priv, 0x9097, 0x0214, 0x00000000);
+	/* in trace, right after 0x90c0, not here */
+	nv_mthd(priv, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	if (fermi == 0x9197) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9197, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
+{
+	u32 fermi = nvc0_graph_class(priv);
+	u32 mthd;
+
+	if (fermi == 0x9297) {
+		for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+			nv_mthd(priv, 0x9297, mthd, 0x00000000);
+	}
+	nv_mthd(priv, 0x9297, 0x036c, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0370, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x07a4, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x07a8, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0374, 0x00000000);
+	nv_mthd(priv, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
+nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0314, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0320, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0238, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x023c, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x0318, 0x00000000);
+	nv_mthd(priv, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
+		nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+		nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+	}
+	nv_mthd(priv, 0x90c0, 0x270c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x272c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x274c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x276c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x278c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
+	for (i = 0; nv_device(priv)->chipset >= 0xd0 && i < 4; i++) {
+		nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+		nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+		nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+		nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+	}
+	nv_mthd(priv, 0x90c0, 0x030c, 0x00000001);
+	nv_mthd(priv, 0x90c0, 0x1944, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0758, 0x00000100);
+	nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0790, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0794, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0798, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x079c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x077c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0204, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0208, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x020c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0214, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x024c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0x90c0, 0x1608, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x160c, 0x00000000);
+	nv_mthd(priv, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, 0x404004, 0x00000000);
+	nv_wr32(priv, 0x404008, 0x00000000);
+	nv_wr32(priv, 0x40400c, 0x00000000);
+	nv_wr32(priv, 0x404010, 0x00000000);
+	nv_wr32(priv, 0x404014, 0x00000000);
+	nv_wr32(priv, 0x404018, 0x00000000);
+	nv_wr32(priv, 0x40401c, 0x00000000);
+	nv_wr32(priv, 0x404020, 0x00000000);
+	nv_wr32(priv, 0x404024, 0x00000000);
+	nv_wr32(priv, 0x404028, 0x00000000);
+	nv_wr32(priv, 0x40402c, 0x00000000);
+	nv_wr32(priv, 0x404044, 0x00000000);
+	nv_wr32(priv, 0x404094, 0x00000000);
+	nv_wr32(priv, 0x404098, 0x00000000);
+	nv_wr32(priv, 0x40409c, 0x00000000);
+	nv_wr32(priv, 0x4040a0, 0x00000000);
+	nv_wr32(priv, 0x4040a4, 0x00000000);
+	nv_wr32(priv, 0x4040a8, 0x00000000);
+	nv_wr32(priv, 0x4040ac, 0x00000000);
+	nv_wr32(priv, 0x4040b0, 0x00000000);
+	nv_wr32(priv, 0x4040b4, 0x00000000);
+	nv_wr32(priv, 0x4040b8, 0x00000000);
+	nv_wr32(priv, 0x4040bc, 0x00000000);
+	nv_wr32(priv, 0x4040c0, 0x00000000);
+	nv_wr32(priv, 0x4040c4, 0x00000000);
+	nv_wr32(priv, 0x4040c8, 0xf0000087);
+	nv_wr32(priv, 0x4040d4, 0x00000000);
+	nv_wr32(priv, 0x4040d8, 0x00000000);
+	nv_wr32(priv, 0x4040dc, 0x00000000);
+	nv_wr32(priv, 0x4040e0, 0x00000000);
+	nv_wr32(priv, 0x4040e4, 0x00000000);
+	nv_wr32(priv, 0x4040e8, 0x00001000);
+	nv_wr32(priv, 0x4040f8, 0x00000000);
+	nv_wr32(priv, 0x404130, 0x00000000);
+	nv_wr32(priv, 0x404134, 0x00000000);
+	nv_wr32(priv, 0x404138, 0x20000040);
+	nv_wr32(priv, 0x404150, 0x0000002e);
+	nv_wr32(priv, 0x404154, 0x00000400);
+	nv_wr32(priv, 0x404158, 0x00000200);
+	nv_wr32(priv, 0x404164, 0x00000055);
+	nv_wr32(priv, 0x404168, 0x00000000);
+	nv_wr32(priv, 0x404174, 0x00000000);
+	nv_wr32(priv, 0x404178, 0x00000000);
+	nv_wr32(priv, 0x40417c, 0x00000000);
+	for (i = 0; i < 8; i++)
+		nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404404, 0x00000000);
+	nv_wr32(priv, 0x404408, 0x00000000);
+	nv_wr32(priv, 0x40440c, 0x00000000);
+	nv_wr32(priv, 0x404410, 0x00000000);
+	nv_wr32(priv, 0x404414, 0x00000000);
+	nv_wr32(priv, 0x404418, 0x00000000);
+	nv_wr32(priv, 0x40441c, 0x00000000);
+	nv_wr32(priv, 0x404420, 0x00000000);
+	nv_wr32(priv, 0x404424, 0x00000000);
+	nv_wr32(priv, 0x404428, 0x00000000);
+	nv_wr32(priv, 0x40442c, 0x00000000);
+	nv_wr32(priv, 0x404430, 0x00000000);
+	nv_wr32(priv, 0x404434, 0x00000000);
+	nv_wr32(priv, 0x404438, 0x00000000);
+	nv_wr32(priv, 0x404460, 0x00000000);
+	nv_wr32(priv, 0x404464, 0x00000000);
+	nv_wr32(priv, 0x404468, 0x00ffffff);
+	nv_wr32(priv, 0x40446c, 0x00000000);
+	nv_wr32(priv, 0x404480, 0x00000001);
+	nv_wr32(priv, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404604, 0x00000015);
+	nv_wr32(priv, 0x404608, 0x00000000);
+	nv_wr32(priv, 0x40460c, 0x00002e00);
+	nv_wr32(priv, 0x404610, 0x00000100);
+	nv_wr32(priv, 0x404618, 0x00000000);
+	nv_wr32(priv, 0x40461c, 0x00000000);
+	nv_wr32(priv, 0x404620, 0x00000000);
+	nv_wr32(priv, 0x404624, 0x00000000);
+	nv_wr32(priv, 0x404628, 0x00000000);
+	nv_wr32(priv, 0x40462c, 0x00000000);
+	nv_wr32(priv, 0x404630, 0x00000000);
+	nv_wr32(priv, 0x404634, 0x00000000);
+	nv_wr32(priv, 0x404638, 0x00000004);
+	nv_wr32(priv, 0x40463c, 0x00000000);
+	nv_wr32(priv, 0x404640, 0x00000000);
+	nv_wr32(priv, 0x404644, 0x00000000);
+	nv_wr32(priv, 0x404648, 0x00000000);
+	nv_wr32(priv, 0x40464c, 0x00000000);
+	nv_wr32(priv, 0x404650, 0x00000000);
+	nv_wr32(priv, 0x404654, 0x00000000);
+	nv_wr32(priv, 0x404658, 0x00000000);
+	nv_wr32(priv, 0x40465c, 0x007f0100);
+	nv_wr32(priv, 0x404660, 0x00000000);
+	nv_wr32(priv, 0x404664, 0x00000000);
+	nv_wr32(priv, 0x404668, 0x00000000);
+	nv_wr32(priv, 0x40466c, 0x00000000);
+	nv_wr32(priv, 0x404670, 0x00000000);
+	nv_wr32(priv, 0x404674, 0x00000000);
+	nv_wr32(priv, 0x404678, 0x00000000);
+	nv_wr32(priv, 0x40467c, 0x00000002);
+	nv_wr32(priv, 0x404680, 0x00000000);
+	nv_wr32(priv, 0x404684, 0x00000000);
+	nv_wr32(priv, 0x404688, 0x00000000);
+	nv_wr32(priv, 0x40468c, 0x00000000);
+	nv_wr32(priv, 0x404690, 0x00000000);
+	nv_wr32(priv, 0x404694, 0x00000000);
+	nv_wr32(priv, 0x404698, 0x00000000);
+	nv_wr32(priv, 0x40469c, 0x00000000);
+	nv_wr32(priv, 0x4046a0, 0x007f0080);
+	nv_wr32(priv, 0x4046a4, 0x00000000);
+	nv_wr32(priv, 0x4046a8, 0x00000000);
+	nv_wr32(priv, 0x4046ac, 0x00000000);
+	nv_wr32(priv, 0x4046b0, 0x00000000);
+	nv_wr32(priv, 0x4046b4, 0x00000000);
+	nv_wr32(priv, 0x4046b8, 0x00000000);
+	nv_wr32(priv, 0x4046bc, 0x00000000);
+	nv_wr32(priv, 0x4046c0, 0x00000000);
+	nv_wr32(priv, 0x4046c4, 0x00000000);
+	nv_wr32(priv, 0x4046c8, 0x00000000);
+	nv_wr32(priv, 0x4046cc, 0x00000000);
+	nv_wr32(priv, 0x4046d0, 0x00000000);
+	nv_wr32(priv, 0x4046d4, 0x00000000);
+	nv_wr32(priv, 0x4046d8, 0x00000000);
+	nv_wr32(priv, 0x4046dc, 0x00000000);
+	nv_wr32(priv, 0x4046e0, 0x00000000);
+	nv_wr32(priv, 0x4046e4, 0x00000000);
+	nv_wr32(priv, 0x4046e8, 0x00000000);
+	nv_wr32(priv, 0x4046f0, 0x00000000);
+	nv_wr32(priv, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404700, 0x00000000);
+	nv_wr32(priv, 0x404704, 0x00000000);
+	nv_wr32(priv, 0x404708, 0x00000000);
+	nv_wr32(priv, 0x40470c, 0x00000000);
+	nv_wr32(priv, 0x404710, 0x00000000);
+	nv_wr32(priv, 0x404714, 0x00000000);
+	nv_wr32(priv, 0x404718, 0x00000000);
+	nv_wr32(priv, 0x40471c, 0x00000000);
+	nv_wr32(priv, 0x404720, 0x00000000);
+	nv_wr32(priv, 0x404724, 0x00000000);
+	nv_wr32(priv, 0x404728, 0x00000000);
+	nv_wr32(priv, 0x40472c, 0x00000000);
+	nv_wr32(priv, 0x404730, 0x00000000);
+	nv_wr32(priv, 0x404734, 0x00000100);
+	nv_wr32(priv, 0x404738, 0x00000000);
+	nv_wr32(priv, 0x40473c, 0x00000000);
+	nv_wr32(priv, 0x404740, 0x00000000);
+	nv_wr32(priv, 0x404744, 0x00000000);
+	nv_wr32(priv, 0x404748, 0x00000000);
+	nv_wr32(priv, 0x40474c, 0x00000000);
+	nv_wr32(priv, 0x404750, 0x00000000);
+	nv_wr32(priv, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
+{
+
+	if (nv_device(priv)->chipset >= 0xd0) {
+		nv_wr32(priv, 0x405800, 0x0f8000bf);
+		nv_wr32(priv, 0x405830, 0x02180218);
+		nv_wr32(priv, 0x405834, 0x08000000);
+	} else
+	if (nv_device(priv)->chipset == 0xc1) {
+		nv_wr32(priv, 0x405800, 0x0f8000bf);
+		nv_wr32(priv, 0x405830, 0x02180218);
+		nv_wr32(priv, 0x405834, 0x00000000);
+	} else {
+		nv_wr32(priv, 0x405800, 0x078000bf);
+		nv_wr32(priv, 0x405830, 0x02180000);
+		nv_wr32(priv, 0x405834, 0x00000000);
+	}
+	nv_wr32(priv, 0x405838, 0x00000000);
+	nv_wr32(priv, 0x405854, 0x00000000);
+	nv_wr32(priv, 0x405870, 0x00000001);
+	nv_wr32(priv, 0x405874, 0x00000001);
+	nv_wr32(priv, 0x405878, 0x00000001);
+	nv_wr32(priv, 0x40587c, 0x00000001);
+	nv_wr32(priv, 0x405a00, 0x00000000);
+	nv_wr32(priv, 0x405a04, 0x00000000);
+	nv_wr32(priv, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x406020, 0x000103c1);
+	nv_wr32(priv, 0x406028, 0x00000001);
+	nv_wr32(priv, 0x40602c, 0x00000001);
+	nv_wr32(priv, 0x406030, 0x00000001);
+	nv_wr32(priv, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+
+	nv_wr32(priv, 0x4064a8, 0x00000000);
+	nv_wr32(priv, 0x4064ac, 0x00003fff);
+	nv_wr32(priv, 0x4064b4, 0x00000000);
+	nv_wr32(priv, 0x4064b8, 0x00000000);
+	if (nv_device(priv)->chipset >= 0xd0)
+		nv_wr32(priv, 0x4064bc, 0x00000000);
+	if (nv_device(priv)->chipset == 0xc1 ||
+	    nv_device(priv)->chipset >= 0xd0) {
+		nv_wr32(priv, 0x4064c0, 0x80140078);
+		nv_wr32(priv, 0x4064c4, 0x0086ffff);
+	}
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407804, 0x00000023);
+	nv_wr32(priv, 0x40780c, 0x0a418820);
+	nv_wr32(priv, 0x407810, 0x062080e6);
+	nv_wr32(priv, 0x407814, 0x020398a4);
+	nv_wr32(priv, 0x407818, 0x0e629062);
+	nv_wr32(priv, 0x40781c, 0x0a418820);
+	nv_wr32(priv, 0x407820, 0x000000e6);
+	nv_wr32(priv, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408000, 0x00000000);
+	nv_wr32(priv, 0x408004, 0x00000000);
+	nv_wr32(priv, 0x408008, 0x00000018);
+	nv_wr32(priv, 0x40800c, 0x00000000);
+	nv_wr32(priv, 0x408010, 0x00000000);
+	nv_wr32(priv, 0x408014, 0x00000069);
+	nv_wr32(priv, 0x408018, 0xe100e100);
+	nv_wr32(priv, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+
+	/* ROPC_BROADCAST */
+	nv_wr32(priv, 0x408800, 0x02802a3c);
+	nv_wr32(priv, 0x408804, 0x00000040);
+	if (chipset >= 0xd0) {
+		nv_wr32(priv, 0x408808, 0x1043e005);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x1043e005);
+		nv_wr32(priv, 0x408908, 0x00c8102f);
+	} else
+	if (chipset == 0xc1) {
+		nv_wr32(priv, 0x408808, 0x1003e005);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x62000001);
+		nv_wr32(priv, 0x408908, 0x00c80929);
+	} else {
+		nv_wr32(priv, 0x408808, 0x0003e00d);
+		nv_wr32(priv, 0x408900, 0x3080b801);
+		nv_wr32(priv, 0x408904, 0x02000001);
+		nv_wr32(priv, 0x408908, 0x00c80929);
+	}
+	nv_wr32(priv, 0x40890c, 0x00000000);
+	nv_wr32(priv, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+	int i;
+
+	/* GPC_BROADCAST */
+	nv_wr32(priv, 0x418380, 0x00000016);
+	nv_wr32(priv, 0x418400, 0x38004e00);
+	nv_wr32(priv, 0x418404, 0x71e0ffff);
+	nv_wr32(priv, 0x418408, 0x00000000);
+	nv_wr32(priv, 0x41840c, 0x00001008);
+	nv_wr32(priv, 0x418410, 0x0fff0fff);
+	nv_wr32(priv, 0x418414, chipset < 0xd0 ? 0x00200fff : 0x02200fff);
+	nv_wr32(priv, 0x418450, 0x00000000);
+	nv_wr32(priv, 0x418454, 0x00000000);
+	nv_wr32(priv, 0x418458, 0x00000000);
+	nv_wr32(priv, 0x41845c, 0x00000000);
+	nv_wr32(priv, 0x418460, 0x00000000);
+	nv_wr32(priv, 0x418464, 0x00000000);
+	nv_wr32(priv, 0x418468, 0x00000001);
+	nv_wr32(priv, 0x41846c, 0x00000000);
+	nv_wr32(priv, 0x418470, 0x00000000);
+	nv_wr32(priv, 0x418600, 0x0000001f);
+	nv_wr32(priv, 0x418684, 0x0000000f);
+	nv_wr32(priv, 0x418700, 0x00000002);
+	nv_wr32(priv, 0x418704, 0x00000080);
+	nv_wr32(priv, 0x418708, 0x00000000);
+	nv_wr32(priv, 0x41870c, chipset < 0xd0 ? 0x07c80000 : 0x00000000);
+	nv_wr32(priv, 0x418710, 0x00000000);
+	nv_wr32(priv, 0x418800, chipset < 0xd0 ? 0x0006860a : 0x7006860a);
+	nv_wr32(priv, 0x418808, 0x00000000);
+	nv_wr32(priv, 0x41880c, 0x00000000);
+	nv_wr32(priv, 0x418810, 0x00000000);
+	nv_wr32(priv, 0x418828, 0x00008442);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x418830, 0x10000001);
+	else
+		nv_wr32(priv, 0x418830, 0x00000001);
+	nv_wr32(priv, 0x4188d8, 0x00000008);
+	nv_wr32(priv, 0x4188e0, 0x01000000);
+	nv_wr32(priv, 0x4188e8, 0x00000000);
+	nv_wr32(priv, 0x4188ec, 0x00000000);
+	nv_wr32(priv, 0x4188f0, 0x00000000);
+	nv_wr32(priv, 0x4188f4, 0x00000000);
+	nv_wr32(priv, 0x4188f8, 0x00000000);
+	if (chipset >= 0xd0)
+		nv_wr32(priv, 0x4188fc, 0x20100008);
+	else if (chipset == 0xc1)
+		nv_wr32(priv, 0x4188fc, 0x00100018);
+	else
+		nv_wr32(priv, 0x4188fc, 0x00100000);
+	nv_wr32(priv, 0x41891c, 0x00ff00ff);
+	nv_wr32(priv, 0x418924, 0x00000000);
+	nv_wr32(priv, 0x418928, 0x00ffff00);
+	nv_wr32(priv, 0x41892c, 0x0000ff00);
+	for (i = 0; i < 8; i++) {
+		nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000);
+		nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
+		nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
+	}
+	nv_wr32(priv, 0x418b00, chipset < 0xd0 ? 0x00000000 : 0x00000006);
+	nv_wr32(priv, 0x418b08, 0x0a418820);
+	nv_wr32(priv, 0x418b0c, 0x062080e6);
+	nv_wr32(priv, 0x418b10, 0x020398a4);
+	nv_wr32(priv, 0x418b14, 0x0e629062);
+	nv_wr32(priv, 0x418b18, 0x0a418820);
+	nv_wr32(priv, 0x418b1c, 0x000000e6);
+	nv_wr32(priv, 0x418bb8, 0x00000103);
+	nv_wr32(priv, 0x418c08, 0x00000001);
+	nv_wr32(priv, 0x418c10, 0x00000000);
+	nv_wr32(priv, 0x418c14, 0x00000000);
+	nv_wr32(priv, 0x418c18, 0x00000000);
+	nv_wr32(priv, 0x418c1c, 0x00000000);
+	nv_wr32(priv, 0x418c20, 0x00000000);
+	nv_wr32(priv, 0x418c24, 0x00000000);
+	nv_wr32(priv, 0x418c28, 0x00000000);
+	nv_wr32(priv, 0x418c2c, 0x00000000);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x418c6c, 0x00000001);
+	nv_wr32(priv, 0x418c80, 0x20200004);
+	nv_wr32(priv, 0x418c8c, 0x00000001);
+	nv_wr32(priv, 0x419000, 0x00000780);
+	nv_wr32(priv, 0x419004, 0x00000000);
+	nv_wr32(priv, 0x419008, 0x00000000);
+	nv_wr32(priv, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
+{
+	int chipset = nv_device(priv)->chipset;
+
+	/* GPC_BROADCAST.TP_BROADCAST */
+	nv_wr32(priv, 0x419818, 0x00000000);
+	nv_wr32(priv, 0x41983c, 0x00038bc7);
+	nv_wr32(priv, 0x419848, 0x00000000);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x419864, 0x00000129);
+	else
+		nv_wr32(priv, 0x419864, 0x0000012a);
+	nv_wr32(priv, 0x419888, 0x00000000);
+	nv_wr32(priv, 0x419a00, 0x000001f0);
+	nv_wr32(priv, 0x419a04, 0x00000001);
+	nv_wr32(priv, 0x419a08, 0x00000023);
+	nv_wr32(priv, 0x419a0c, 0x00020000);
+	nv_wr32(priv, 0x419a10, 0x00000000);
+	nv_wr32(priv, 0x419a14, 0x00000200);
+	nv_wr32(priv, 0x419a1c, 0x00000000);
+	nv_wr32(priv, 0x419a20, 0x00000800);
+	if (chipset >= 0xd0)
+		nv_wr32(priv, 0x00419ac4, 0x0017f440);
+	else if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x00419ac4, 0x0007f440);
+	nv_wr32(priv, 0x419b00, 0x0a418820);
+	nv_wr32(priv, 0x419b04, 0x062080e6);
+	nv_wr32(priv, 0x419b08, 0x020398a4);
+	nv_wr32(priv, 0x419b0c, 0x0e629062);
+	nv_wr32(priv, 0x419b10, 0x0a418820);
+	nv_wr32(priv, 0x419b14, 0x000000e6);
+	nv_wr32(priv, 0x419bd0, 0x00900103);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x419be0, 0x00400001);
+	else
+		nv_wr32(priv, 0x419be0, 0x00000001);
+	nv_wr32(priv, 0x419be4, 0x00000000);
+	nv_wr32(priv, 0x419c00, chipset < 0xd0 ? 0x00000002 : 0x0000000a);
+	nv_wr32(priv, 0x419c04, 0x00000006);
+	nv_wr32(priv, 0x419c08, 0x00000002);
+	nv_wr32(priv, 0x419c20, 0x00000000);
+	if (nv_device(priv)->chipset >= 0xd0) {
+		nv_wr32(priv, 0x419c24, 0x00084210);
+		nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
+		nv_wr32(priv, 0x419cb0, 0x00020048);
+	} else
+	if (chipset == 0xce || chipset == 0xcf) {
+		nv_wr32(priv, 0x419cb0, 0x00020048);
+	} else {
+		nv_wr32(priv, 0x419cb0, 0x00060048);
+	}
+	nv_wr32(priv, 0x419ce8, 0x00000000);
+	nv_wr32(priv, 0x419cf4, 0x00000183);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x419d20, 0x12180000);
+	else
+		nv_wr32(priv, 0x419d20, 0x02180000);
+	nv_wr32(priv, 0x419d24, 0x00001fff);
+	if (chipset == 0xc1 || chipset >= 0xd0)
+		nv_wr32(priv, 0x419d44, 0x02180218);
+	nv_wr32(priv, 0x419e04, 0x00000000);
+	nv_wr32(priv, 0x419e08, 0x00000000);
+	nv_wr32(priv, 0x419e0c, 0x00000000);
+	nv_wr32(priv, 0x419e10, 0x00000002);
+	nv_wr32(priv, 0x419e44, 0x001beff2);
+	nv_wr32(priv, 0x419e48, 0x00000000);
+	nv_wr32(priv, 0x419e4c, 0x0000000f);
+	nv_wr32(priv, 0x419e50, 0x00000000);
+	nv_wr32(priv, 0x419e54, 0x00000000);
+	nv_wr32(priv, 0x419e58, 0x00000000);
+	nv_wr32(priv, 0x419e5c, 0x00000000);
+	nv_wr32(priv, 0x419e60, 0x00000000);
+	nv_wr32(priv, 0x419e64, 0x00000000);
+	nv_wr32(priv, 0x419e68, 0x00000000);
+	nv_wr32(priv, 0x419e6c, 0x00000000);
+	nv_wr32(priv, 0x419e70, 0x00000000);
+	nv_wr32(priv, 0x419e74, 0x00000000);
+	nv_wr32(priv, 0x419e78, 0x00000000);
+	nv_wr32(priv, 0x419e7c, 0x00000000);
+	nv_wr32(priv, 0x419e80, 0x00000000);
+	nv_wr32(priv, 0x419e84, 0x00000000);
+	nv_wr32(priv, 0x419e88, 0x00000000);
+	nv_wr32(priv, 0x419e8c, 0x00000000);
+	nv_wr32(priv, 0x419e90, 0x00000000);
+	nv_wr32(priv, 0x419e98, 0x00000000);
+	if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x419ee0, 0x00011110);
+	nv_wr32(priv, 0x419f50, 0x00000000);
+	nv_wr32(priv, 0x419f54, 0x00000000);
+	if (chipset != 0xc0 && chipset != 0xc8)
+		nv_wr32(priv, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+	struct nvc0_grctx info;
+	int ret, i, gpc, tpc, id;
+	u32 fermi = nvc0_graph_class(priv);
+	u32 r000260, tmp;
+
+	ret = nvc0_grctx_init(priv, &info);
+	if (ret)
+		return ret;
+
+	r000260 = nv_rd32(priv, 0x000260);
+	nv_wr32(priv, 0x000260, r000260 & ~1);
+	nv_wr32(priv, 0x400208, 0x00000000);
+
+	nvc0_grctx_generate_dispatch(priv);
+	nvc0_grctx_generate_macro(priv);
+	nvc0_grctx_generate_m2mf(priv);
+	nvc0_grctx_generate_unk47xx(priv);
+	nvc0_grctx_generate_shaders(priv);
+	nvc0_grctx_generate_unk60xx(priv);
+	nvc0_grctx_generate_unk64xx(priv);
+	nvc0_grctx_generate_tpbus(priv);
+	nvc0_grctx_generate_ccache(priv);
+	nvc0_grctx_generate_rop(priv);
+	nvc0_grctx_generate_gpc(priv);
+	nvc0_grctx_generate_tp(priv);
+
+	nv_wr32(priv, 0x404154, 0x00000000);
+
+	/* generate per-context mmio list data */
+	mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+	mmio_list(0x408004, 0x00000000,  8, 0);
+	mmio_list(0x408008, 0x80000018,  0, 0);
+	mmio_list(0x40800c, 0x00000000,  8, 1);
+	mmio_list(0x408010, 0x80000000,  0, 0);
+	mmio_list(0x418810, 0x80000000, 12, 2);
+	mmio_list(0x419848, 0x10000000, 12, 2);
+	mmio_list(0x419004, 0x00000000,  8, 1);
+	mmio_list(0x419008, 0x00000000,  0, 0);
+	mmio_list(0x418808, 0x00000000,  8, 0);
+	mmio_list(0x41880c, 0x80000018,  0, 0);
+	if (nv_device(priv)->chipset != 0xc1) {
+		tmp = 0x02180000;
+		mmio_list(0x405830, tmp, 0, 0);
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+				mmio_list(reg, tmp, 0, 0);
+				tmp += 0x0324;
+			}
+		}
+	} else {
+		tmp = 0x02180000;
+		mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
+		mmio_list(0x4064c4, 0x0086ffff, 0, 0);
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
+				mmio_list(reg, 0x10000000 | tmp, 0, 0);
+				tmp += 0x0324;
+			}
+			for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+				u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
+				mmio_list(reg, tmp, 0, 0);
+				tmp += 0x0324;
+			}
+		}
+	}
+
+	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tpc < priv->tpc_nr[gpc]) {
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
+				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
+				id++;
+			}
+
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tpc_nr[i] << (i * 4);
+	nv_wr32(priv, 0x406028, tmp);
+	nv_wr32(priv, 0x405870, tmp);
+
+	nv_wr32(priv, 0x40602c, 0x00000000);
+	nv_wr32(priv, 0x405874, 0x00000000);
+	nv_wr32(priv, 0x406030, 0x00000000);
+	nv_wr32(priv, 0x405878, 0x00000000);
+	nv_wr32(priv, 0x406034, 0x00000000);
+	nv_wr32(priv, 0x40587c, 0x00000000);
+
+	if (1) {
+		u8 tpcnr[GPC_MAX], data[TPC_MAX];
+
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+		memset(data, 0x1f, sizeof(data));
+
+		gpc = -1;
+		for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpcnr[gpc]--;
+			data[tpc] = gpc;
+		}
+
+		for (i = 0; i < 4; i++)
+			nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+	}
+
+	if (1) {
+		u32 data[6] = {}, data2[2] = {};
+		u8 tpcnr[GPC_MAX];
+		u8 shift, ntpcv;
+
+		/* calculate first set of magics */
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+		gpc = -1;
+		for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpcnr[gpc]--;
+
+			data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+		}
+
+		for (; tpc < 32; tpc++)
+			data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+		/* and the second... */
+		shift = 0;
+		ntpcv = priv->tpc_total;
+		while (!(ntpcv & (1 << 4))) {
+			ntpcv <<= 1;
+			shift++;
+		}
+
+		data2[0]  = (ntpcv << 16);
+		data2[0] |= (shift << 21);
+		data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+		for (i = 1; i < 7; i++)
+			data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+		/* GPC_BROADCAST */
+		nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+		/* GPC_BROADCAST.TP_BROADCAST */
+		nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
+				       priv->magic_not_rop_nr |
+				       data2[0]);
+		nv_wr32(priv, 0x419be4, data2[1]);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
+
+		/* UNK78xx */
+		nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+	}
+
+	if (1) {
+		u32 tpc_mask = 0, tpc_set = 0;
+		u8  tpcnr[GPC_MAX], a, b;
+
+		memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+			tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+		for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+			a = (i * (priv->tpc_total - 1)) / 32;
+			if (a != b) {
+				b = a;
+				do {
+					gpc = (gpc + 1) % priv->gpc_nr;
+				} while (!tpcnr[gpc]);
+				tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+				tpc_set |= 1 << ((gpc * 8) + tpc);
+			}
+
+			nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+			nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+		}
+	}
+
+	nv_wr32(priv, 0x400208, 0x80000000);
+
+	nv_icmd(priv, 0x00001000, 0x00000004);
+	nv_icmd(priv, 0x000000a9, 0x0000ffff);
+	nv_icmd(priv, 0x00000038, 0x0fac6881);
+	nv_icmd(priv, 0x0000003d, 0x00000001);
+	nv_icmd(priv, 0x000000e8, 0x00000400);
+	nv_icmd(priv, 0x000000e9, 0x00000400);
+	nv_icmd(priv, 0x000000ea, 0x00000400);
+	nv_icmd(priv, 0x000000eb, 0x00000400);
+	nv_icmd(priv, 0x000000ec, 0x00000400);
+	nv_icmd(priv, 0x000000ed, 0x00000400);
+	nv_icmd(priv, 0x000000ee, 0x00000400);
+	nv_icmd(priv, 0x000000ef, 0x00000400);
+	nv_icmd(priv, 0x00000078, 0x00000300);
+	nv_icmd(priv, 0x00000079, 0x00000300);
+	nv_icmd(priv, 0x0000007a, 0x00000300);
+	nv_icmd(priv, 0x0000007b, 0x00000300);
+	nv_icmd(priv, 0x0000007c, 0x00000300);
+	nv_icmd(priv, 0x0000007d, 0x00000300);
+	nv_icmd(priv, 0x0000007e, 0x00000300);
+	nv_icmd(priv, 0x0000007f, 0x00000300);
+	nv_icmd(priv, 0x00000050, 0x00000011);
+	nv_icmd(priv, 0x00000058, 0x00000008);
+	nv_icmd(priv, 0x00000059, 0x00000008);
+	nv_icmd(priv, 0x0000005a, 0x00000008);
+	nv_icmd(priv, 0x0000005b, 0x00000008);
+	nv_icmd(priv, 0x0000005c, 0x00000008);
+	nv_icmd(priv, 0x0000005d, 0x00000008);
+	nv_icmd(priv, 0x0000005e, 0x00000008);
+	nv_icmd(priv, 0x0000005f, 0x00000008);
+	nv_icmd(priv, 0x00000208, 0x00000001);
+	nv_icmd(priv, 0x00000209, 0x00000001);
+	nv_icmd(priv, 0x0000020a, 0x00000001);
+	nv_icmd(priv, 0x0000020b, 0x00000001);
+	nv_icmd(priv, 0x0000020c, 0x00000001);
+	nv_icmd(priv, 0x0000020d, 0x00000001);
+	nv_icmd(priv, 0x0000020e, 0x00000001);
+	nv_icmd(priv, 0x0000020f, 0x00000001);
+	nv_icmd(priv, 0x00000081, 0x00000001);
+	nv_icmd(priv, 0x00000085, 0x00000004);
+	nv_icmd(priv, 0x00000088, 0x00000400);
+	nv_icmd(priv, 0x00000090, 0x00000300);
+	nv_icmd(priv, 0x00000098, 0x00001001);
+	nv_icmd(priv, 0x000000e3, 0x00000001);
+	nv_icmd(priv, 0x000000da, 0x00000001);
+	nv_icmd(priv, 0x000000f8, 0x00000003);
+	nv_icmd(priv, 0x000000fa, 0x00000001);
+	nv_icmd(priv, 0x0000009f, 0x0000ffff);
+	nv_icmd(priv, 0x000000a0, 0x0000ffff);
+	nv_icmd(priv, 0x000000a1, 0x0000ffff);
+	nv_icmd(priv, 0x000000a2, 0x0000ffff);
+	nv_icmd(priv, 0x000000b1, 0x00000001);
+	nv_icmd(priv, 0x000000b2, 0x00000000);
+	nv_icmd(priv, 0x000000b3, 0x00000000);
+	nv_icmd(priv, 0x000000b4, 0x00000000);
+	nv_icmd(priv, 0x000000b5, 0x00000000);
+	nv_icmd(priv, 0x000000b6, 0x00000000);
+	nv_icmd(priv, 0x000000b7, 0x00000000);
+	nv_icmd(priv, 0x000000b8, 0x00000000);
+	nv_icmd(priv, 0x000000b9, 0x00000000);
+	nv_icmd(priv, 0x000000ba, 0x00000000);
+	nv_icmd(priv, 0x000000bb, 0x00000000);
+	nv_icmd(priv, 0x000000bc, 0x00000000);
+	nv_icmd(priv, 0x000000bd, 0x00000000);
+	nv_icmd(priv, 0x000000be, 0x00000000);
+	nv_icmd(priv, 0x000000bf, 0x00000000);
+	nv_icmd(priv, 0x000000c0, 0x00000000);
+	nv_icmd(priv, 0x000000c1, 0x00000000);
+	nv_icmd(priv, 0x000000c2, 0x00000000);
+	nv_icmd(priv, 0x000000c3, 0x00000000);
+	nv_icmd(priv, 0x000000c4, 0x00000000);
+	nv_icmd(priv, 0x000000c5, 0x00000000);
+	nv_icmd(priv, 0x000000c6, 0x00000000);
+	nv_icmd(priv, 0x000000c7, 0x00000000);
+	nv_icmd(priv, 0x000000c8, 0x00000000);
+	nv_icmd(priv, 0x000000c9, 0x00000000);
+	nv_icmd(priv, 0x000000ca, 0x00000000);
+	nv_icmd(priv, 0x000000cb, 0x00000000);
+	nv_icmd(priv, 0x000000cc, 0x00000000);
+	nv_icmd(priv, 0x000000cd, 0x00000000);
+	nv_icmd(priv, 0x000000ce, 0x00000000);
+	nv_icmd(priv, 0x000000cf, 0x00000000);
+	nv_icmd(priv, 0x000000d0, 0x00000000);
+	nv_icmd(priv, 0x000000d1, 0x00000000);
+	nv_icmd(priv, 0x000000d2, 0x00000000);
+	nv_icmd(priv, 0x000000d3, 0x00000000);
+	nv_icmd(priv, 0x000000d4, 0x00000000);
+	nv_icmd(priv, 0x000000d5, 0x00000000);
+	nv_icmd(priv, 0x000000d6, 0x00000000);
+	nv_icmd(priv, 0x000000d7, 0x00000000);
+	nv_icmd(priv, 0x000000d8, 0x00000000);
+	nv_icmd(priv, 0x000000d9, 0x00000000);
+	nv_icmd(priv, 0x00000210, 0x00000040);
+	nv_icmd(priv, 0x00000211, 0x00000040);
+	nv_icmd(priv, 0x00000212, 0x00000040);
+	nv_icmd(priv, 0x00000213, 0x00000040);
+	nv_icmd(priv, 0x00000214, 0x00000040);
+	nv_icmd(priv, 0x00000215, 0x00000040);
+	nv_icmd(priv, 0x00000216, 0x00000040);
+	nv_icmd(priv, 0x00000217, 0x00000040);
+	if (nv_device(priv)->chipset >= 0xd0) {
+		for (i = 0x0400; i <= 0x0417; i++)
+			nv_icmd(priv, i, 0x00000040);
+	}
+	nv_icmd(priv, 0x00000218, 0x0000c080);
+	nv_icmd(priv, 0x00000219, 0x0000c080);
+	nv_icmd(priv, 0x0000021a, 0x0000c080);
+	nv_icmd(priv, 0x0000021b, 0x0000c080);
+	nv_icmd(priv, 0x0000021c, 0x0000c080);
+	nv_icmd(priv, 0x0000021d, 0x0000c080);
+	nv_icmd(priv, 0x0000021e, 0x0000c080);
+	nv_icmd(priv, 0x0000021f, 0x0000c080);
+	if (nv_device(priv)->chipset >= 0xd0) {
+		for (i = 0x0440; i <= 0x0457; i++)
+			nv_icmd(priv, i, 0x0000c080);
+	}
+	nv_icmd(priv, 0x000000ad, 0x0000013e);
+	nv_icmd(priv, 0x000000e1, 0x00000010);
+	nv_icmd(priv, 0x00000290, 0x00000000);
+	nv_icmd(priv, 0x00000291, 0x00000000);
+	nv_icmd(priv, 0x00000292, 0x00000000);
+	nv_icmd(priv, 0x00000293, 0x00000000);
+	nv_icmd(priv, 0x00000294, 0x00000000);
+	nv_icmd(priv, 0x00000295, 0x00000000);
+	nv_icmd(priv, 0x00000296, 0x00000000);
+	nv_icmd(priv, 0x00000297, 0x00000000);
+	nv_icmd(priv, 0x00000298, 0x00000000);
+	nv_icmd(priv, 0x00000299, 0x00000000);
+	nv_icmd(priv, 0x0000029a, 0x00000000);
+	nv_icmd(priv, 0x0000029b, 0x00000000);
+	nv_icmd(priv, 0x0000029c, 0x00000000);
+	nv_icmd(priv, 0x0000029d, 0x00000000);
+	nv_icmd(priv, 0x0000029e, 0x00000000);
+	nv_icmd(priv, 0x0000029f, 0x00000000);
+	nv_icmd(priv, 0x000003b0, 0x00000000);
+	nv_icmd(priv, 0x000003b1, 0x00000000);
+	nv_icmd(priv, 0x000003b2, 0x00000000);
+	nv_icmd(priv, 0x000003b3, 0x00000000);
+	nv_icmd(priv, 0x000003b4, 0x00000000);
+	nv_icmd(priv, 0x000003b5, 0x00000000);
+	nv_icmd(priv, 0x000003b6, 0x00000000);
+	nv_icmd(priv, 0x000003b7, 0x00000000);
+	nv_icmd(priv, 0x000003b8, 0x00000000);
+	nv_icmd(priv, 0x000003b9, 0x00000000);
+	nv_icmd(priv, 0x000003ba, 0x00000000);
+	nv_icmd(priv, 0x000003bb, 0x00000000);
+	nv_icmd(priv, 0x000003bc, 0x00000000);
+	nv_icmd(priv, 0x000003bd, 0x00000000);
+	nv_icmd(priv, 0x000003be, 0x00000000);
+	nv_icmd(priv, 0x000003bf, 0x00000000);
+	nv_icmd(priv, 0x000002a0, 0x00000000);
+	nv_icmd(priv, 0x000002a1, 0x00000000);
+	nv_icmd(priv, 0x000002a2, 0x00000000);
+	nv_icmd(priv, 0x000002a3, 0x00000000);
+	nv_icmd(priv, 0x000002a4, 0x00000000);
+	nv_icmd(priv, 0x000002a5, 0x00000000);
+	nv_icmd(priv, 0x000002a6, 0x00000000);
+	nv_icmd(priv, 0x000002a7, 0x00000000);
+	nv_icmd(priv, 0x000002a8, 0x00000000);
+	nv_icmd(priv, 0x000002a9, 0x00000000);
+	nv_icmd(priv, 0x000002aa, 0x00000000);
+	nv_icmd(priv, 0x000002ab, 0x00000000);
+	nv_icmd(priv, 0x000002ac, 0x00000000);
+	nv_icmd(priv, 0x000002ad, 0x00000000);
+	nv_icmd(priv, 0x000002ae, 0x00000000);
+	nv_icmd(priv, 0x000002af, 0x00000000);
+	nv_icmd(priv, 0x00000420, 0x00000000);
+	nv_icmd(priv, 0x00000421, 0x00000000);
+	nv_icmd(priv, 0x00000422, 0x00000000);
+	nv_icmd(priv, 0x00000423, 0x00000000);
+	nv_icmd(priv, 0x00000424, 0x00000000);
+	nv_icmd(priv, 0x00000425, 0x00000000);
+	nv_icmd(priv, 0x00000426, 0x00000000);
+	nv_icmd(priv, 0x00000427, 0x00000000);
+	nv_icmd(priv, 0x00000428, 0x00000000);
+	nv_icmd(priv, 0x00000429, 0x00000000);
+	nv_icmd(priv, 0x0000042a, 0x00000000);
+	nv_icmd(priv, 0x0000042b, 0x00000000);
+	nv_icmd(priv, 0x0000042c, 0x00000000);
+	nv_icmd(priv, 0x0000042d, 0x00000000);
+	nv_icmd(priv, 0x0000042e, 0x00000000);
+	nv_icmd(priv, 0x0000042f, 0x00000000);
+	nv_icmd(priv, 0x000002b0, 0x00000000);
+	nv_icmd(priv, 0x000002b1, 0x00000000);
+	nv_icmd(priv, 0x000002b2, 0x00000000);
+	nv_icmd(priv, 0x000002b3, 0x00000000);
+	nv_icmd(priv, 0x000002b4, 0x00000000);
+	nv_icmd(priv, 0x000002b5, 0x00000000);
+	nv_icmd(priv, 0x000002b6, 0x00000000);
+	nv_icmd(priv, 0x000002b7, 0x00000000);
+	nv_icmd(priv, 0x000002b8, 0x00000000);
+	nv_icmd(priv, 0x000002b9, 0x00000000);
+	nv_icmd(priv, 0x000002ba, 0x00000000);
+	nv_icmd(priv, 0x000002bb, 0x00000000);
+	nv_icmd(priv, 0x000002bc, 0x00000000);
+	nv_icmd(priv, 0x000002bd, 0x00000000);
+	nv_icmd(priv, 0x000002be, 0x00000000);
+	nv_icmd(priv, 0x000002bf, 0x00000000);
+	nv_icmd(priv, 0x00000430, 0x00000000);
+	nv_icmd(priv, 0x00000431, 0x00000000);
+	nv_icmd(priv, 0x00000432, 0x00000000);
+	nv_icmd(priv, 0x00000433, 0x00000000);
+	nv_icmd(priv, 0x00000434, 0x00000000);
+	nv_icmd(priv, 0x00000435, 0x00000000);
+	nv_icmd(priv, 0x00000436, 0x00000000);
+	nv_icmd(priv, 0x00000437, 0x00000000);
+	nv_icmd(priv, 0x00000438, 0x00000000);
+	nv_icmd(priv, 0x00000439, 0x00000000);
+	nv_icmd(priv, 0x0000043a, 0x00000000);
+	nv_icmd(priv, 0x0000043b, 0x00000000);
+	nv_icmd(priv, 0x0000043c, 0x00000000);
+	nv_icmd(priv, 0x0000043d, 0x00000000);
+	nv_icmd(priv, 0x0000043e, 0x00000000);
+	nv_icmd(priv, 0x0000043f, 0x00000000);
+	nv_icmd(priv, 0x000002c0, 0x00000000);
+	nv_icmd(priv, 0x000002c1, 0x00000000);
+	nv_icmd(priv, 0x000002c2, 0x00000000);
+	nv_icmd(priv, 0x000002c3, 0x00000000);
+	nv_icmd(priv, 0x000002c4, 0x00000000);
+	nv_icmd(priv, 0x000002c5, 0x00000000);
+	nv_icmd(priv, 0x000002c6, 0x00000000);
+	nv_icmd(priv, 0x000002c7, 0x00000000);
+	nv_icmd(priv, 0x000002c8, 0x00000000);
+	nv_icmd(priv, 0x000002c9, 0x00000000);
+	nv_icmd(priv, 0x000002ca, 0x00000000);
+	nv_icmd(priv, 0x000002cb, 0x00000000);
+	nv_icmd(priv, 0x000002cc, 0x00000000);
+	nv_icmd(priv, 0x000002cd, 0x00000000);
+	nv_icmd(priv, 0x000002ce, 0x00000000);
+	nv_icmd(priv, 0x000002cf, 0x00000000);
+	nv_icmd(priv, 0x000004d0, 0x00000000);
+	nv_icmd(priv, 0x000004d1, 0x00000000);
+	nv_icmd(priv, 0x000004d2, 0x00000000);
+	nv_icmd(priv, 0x000004d3, 0x00000000);
+	nv_icmd(priv, 0x000004d4, 0x00000000);
+	nv_icmd(priv, 0x000004d5, 0x00000000);
+	nv_icmd(priv, 0x000004d6, 0x00000000);
+	nv_icmd(priv, 0x000004d7, 0x00000000);
+	nv_icmd(priv, 0x000004d8, 0x00000000);
+	nv_icmd(priv, 0x000004d9, 0x00000000);
+	nv_icmd(priv, 0x000004da, 0x00000000);
+	nv_icmd(priv, 0x000004db, 0x00000000);
+	nv_icmd(priv, 0x000004dc, 0x00000000);
+	nv_icmd(priv, 0x000004dd, 0x00000000);
+	nv_icmd(priv, 0x000004de, 0x00000000);
+	nv_icmd(priv, 0x000004df, 0x00000000);
+	nv_icmd(priv, 0x00000720, 0x00000000);
+	nv_icmd(priv, 0x00000721, 0x00000000);
+	nv_icmd(priv, 0x00000722, 0x00000000);
+	nv_icmd(priv, 0x00000723, 0x00000000);
+	nv_icmd(priv, 0x00000724, 0x00000000);
+	nv_icmd(priv, 0x00000725, 0x00000000);
+	nv_icmd(priv, 0x00000726, 0x00000000);
+	nv_icmd(priv, 0x00000727, 0x00000000);
+	nv_icmd(priv, 0x00000728, 0x00000000);
+	nv_icmd(priv, 0x00000729, 0x00000000);
+	nv_icmd(priv, 0x0000072a, 0x00000000);
+	nv_icmd(priv, 0x0000072b, 0x00000000);
+	nv_icmd(priv, 0x0000072c, 0x00000000);
+	nv_icmd(priv, 0x0000072d, 0x00000000);
+	nv_icmd(priv, 0x0000072e, 0x00000000);
+	nv_icmd(priv, 0x0000072f, 0x00000000);
+	nv_icmd(priv, 0x000008c0, 0x00000000);
+	nv_icmd(priv, 0x000008c1, 0x00000000);
+	nv_icmd(priv, 0x000008c2, 0x00000000);
+	nv_icmd(priv, 0x000008c3, 0x00000000);
+	nv_icmd(priv, 0x000008c4, 0x00000000);
+	nv_icmd(priv, 0x000008c5, 0x00000000);
+	nv_icmd(priv, 0x000008c6, 0x00000000);
+	nv_icmd(priv, 0x000008c7, 0x00000000);
+	nv_icmd(priv, 0x000008c8, 0x00000000);
+	nv_icmd(priv, 0x000008c9, 0x00000000);
+	nv_icmd(priv, 0x000008ca, 0x00000000);
+	nv_icmd(priv, 0x000008cb, 0x00000000);
+	nv_icmd(priv, 0x000008cc, 0x00000000);
+	nv_icmd(priv, 0x000008cd, 0x00000000);
+	nv_icmd(priv, 0x000008ce, 0x00000000);
+	nv_icmd(priv, 0x000008cf, 0x00000000);
+	nv_icmd(priv, 0x00000890, 0x00000000);
+	nv_icmd(priv, 0x00000891, 0x00000000);
+	nv_icmd(priv, 0x00000892, 0x00000000);
+	nv_icmd(priv, 0x00000893, 0x00000000);
+	nv_icmd(priv, 0x00000894, 0x00000000);
+	nv_icmd(priv, 0x00000895, 0x00000000);
+	nv_icmd(priv, 0x00000896, 0x00000000);
+	nv_icmd(priv, 0x00000897, 0x00000000);
+	nv_icmd(priv, 0x00000898, 0x00000000);
+	nv_icmd(priv, 0x00000899, 0x00000000);
+	nv_icmd(priv, 0x0000089a, 0x00000000);
+	nv_icmd(priv, 0x0000089b, 0x00000000);
+	nv_icmd(priv, 0x0000089c, 0x00000000);
+	nv_icmd(priv, 0x0000089d, 0x00000000);
+	nv_icmd(priv, 0x0000089e, 0x00000000);
+	nv_icmd(priv, 0x0000089f, 0x00000000);
+	nv_icmd(priv, 0x000008e0, 0x00000000);
+	nv_icmd(priv, 0x000008e1, 0x00000000);
+	nv_icmd(priv, 0x000008e2, 0x00000000);
+	nv_icmd(priv, 0x000008e3, 0x00000000);
+	nv_icmd(priv, 0x000008e4, 0x00000000);
+	nv_icmd(priv, 0x000008e5, 0x00000000);
+	nv_icmd(priv, 0x000008e6, 0x00000000);
+	nv_icmd(priv, 0x000008e7, 0x00000000);
+	nv_icmd(priv, 0x000008e8, 0x00000000);
+	nv_icmd(priv, 0x000008e9, 0x00000000);
+	nv_icmd(priv, 0x000008ea, 0x00000000);
+	nv_icmd(priv, 0x000008eb, 0x00000000);
+	nv_icmd(priv, 0x000008ec, 0x00000000);
+	nv_icmd(priv, 0x000008ed, 0x00000000);
+	nv_icmd(priv, 0x000008ee, 0x00000000);
+	nv_icmd(priv, 0x000008ef, 0x00000000);
+	nv_icmd(priv, 0x000008a0, 0x00000000);
+	nv_icmd(priv, 0x000008a1, 0x00000000);
+	nv_icmd(priv, 0x000008a2, 0x00000000);
+	nv_icmd(priv, 0x000008a3, 0x00000000);
+	nv_icmd(priv, 0x000008a4, 0x00000000);
+	nv_icmd(priv, 0x000008a5, 0x00000000);
+	nv_icmd(priv, 0x000008a6, 0x00000000);
+	nv_icmd(priv, 0x000008a7, 0x00000000);
+	nv_icmd(priv, 0x000008a8, 0x00000000);
+	nv_icmd(priv, 0x000008a9, 0x00000000);
+	nv_icmd(priv, 0x000008aa, 0x00000000);
+	nv_icmd(priv, 0x000008ab, 0x00000000);
+	nv_icmd(priv, 0x000008ac, 0x00000000);
+	nv_icmd(priv, 0x000008ad, 0x00000000);
+	nv_icmd(priv, 0x000008ae, 0x00000000);
+	nv_icmd(priv, 0x000008af, 0x00000000);
+	nv_icmd(priv, 0x000008f0, 0x00000000);
+	nv_icmd(priv, 0x000008f1, 0x00000000);
+	nv_icmd(priv, 0x000008f2, 0x00000000);
+	nv_icmd(priv, 0x000008f3, 0x00000000);
+	nv_icmd(priv, 0x000008f4, 0x00000000);
+	nv_icmd(priv, 0x000008f5, 0x00000000);
+	nv_icmd(priv, 0x000008f6, 0x00000000);
+	nv_icmd(priv, 0x000008f7, 0x00000000);
+	nv_icmd(priv, 0x000008f8, 0x00000000);
+	nv_icmd(priv, 0x000008f9, 0x00000000);
+	nv_icmd(priv, 0x000008fa, 0x00000000);
+	nv_icmd(priv, 0x000008fb, 0x00000000);
+	nv_icmd(priv, 0x000008fc, 0x00000000);
+	nv_icmd(priv, 0x000008fd, 0x00000000);
+	nv_icmd(priv, 0x000008fe, 0x00000000);
+	nv_icmd(priv, 0x000008ff, 0x00000000);
+	nv_icmd(priv, 0x0000094c, 0x000000ff);
+	nv_icmd(priv, 0x0000094d, 0xffffffff);
+	nv_icmd(priv, 0x0000094e, 0x00000002);
+	nv_icmd(priv, 0x000002ec, 0x00000001);
+	nv_icmd(priv, 0x00000303, 0x00000001);
+	nv_icmd(priv, 0x000002e6, 0x00000001);
+	nv_icmd(priv, 0x00000466, 0x00000052);
+	nv_icmd(priv, 0x00000301, 0x3f800000);
+	nv_icmd(priv, 0x00000304, 0x30201000);
+	nv_icmd(priv, 0x00000305, 0x70605040);
+	nv_icmd(priv, 0x00000306, 0xb8a89888);
+	nv_icmd(priv, 0x00000307, 0xf8e8d8c8);
+	nv_icmd(priv, 0x0000030a, 0x00ffff00);
+	nv_icmd(priv, 0x0000030b, 0x0000001a);
+	nv_icmd(priv, 0x0000030c, 0x00000001);
+	nv_icmd(priv, 0x00000318, 0x00000001);
+	nv_icmd(priv, 0x00000340, 0x00000000);
+	nv_icmd(priv, 0x00000375, 0x00000001);
+	nv_icmd(priv, 0x00000351, 0x00000100);
+	nv_icmd(priv, 0x0000037d, 0x00000006);
+	nv_icmd(priv, 0x000003a0, 0x00000002);
+	nv_icmd(priv, 0x000003aa, 0x00000001);
+	nv_icmd(priv, 0x000003a9, 0x00000001);
+	nv_icmd(priv, 0x00000380, 0x00000001);
+	nv_icmd(priv, 0x00000360, 0x00000040);
+	nv_icmd(priv, 0x00000366, 0x00000000);
+	nv_icmd(priv, 0x00000367, 0x00000000);
+	nv_icmd(priv, 0x00000368, 0x00001fff);
+	nv_icmd(priv, 0x00000370, 0x00000000);
+	nv_icmd(priv, 0x00000371, 0x00000000);
+	nv_icmd(priv, 0x00000372, 0x003fffff);
+	nv_icmd(priv, 0x0000037a, 0x00000012);
+	nv_icmd(priv, 0x000005e0, 0x00000022);
+	nv_icmd(priv, 0x000005e1, 0x00000022);
+	nv_icmd(priv, 0x000005e2, 0x00000022);
+	nv_icmd(priv, 0x000005e3, 0x00000022);
+	nv_icmd(priv, 0x000005e4, 0x00000022);
+	nv_icmd(priv, 0x00000619, 0x00000003);
+	nv_icmd(priv, 0x00000811, 0x00000003);
+	nv_icmd(priv, 0x00000812, 0x00000004);
+	nv_icmd(priv, 0x00000813, 0x00000006);
+	nv_icmd(priv, 0x00000814, 0x00000008);
+	nv_icmd(priv, 0x00000815, 0x0000000b);
+	nv_icmd(priv, 0x00000800, 0x00000001);
+	nv_icmd(priv, 0x00000801, 0x00000001);
+	nv_icmd(priv, 0x00000802, 0x00000001);
+	nv_icmd(priv, 0x00000803, 0x00000001);
+	nv_icmd(priv, 0x00000804, 0x00000001);
+	nv_icmd(priv, 0x00000805, 0x00000001);
+	nv_icmd(priv, 0x00000632, 0x00000001);
+	nv_icmd(priv, 0x00000633, 0x00000002);
+	nv_icmd(priv, 0x00000634, 0x00000003);
+	nv_icmd(priv, 0x00000635, 0x00000004);
+	nv_icmd(priv, 0x00000654, 0x3f800000);
+	nv_icmd(priv, 0x00000657, 0x3f800000);
+	nv_icmd(priv, 0x00000655, 0x3f800000);
+	nv_icmd(priv, 0x00000656, 0x3f800000);
+	nv_icmd(priv, 0x000006cd, 0x3f800000);
+	nv_icmd(priv, 0x000007f5, 0x3f800000);
+	nv_icmd(priv, 0x000007dc, 0x39291909);
+	nv_icmd(priv, 0x000007dd, 0x79695949);
+	nv_icmd(priv, 0x000007de, 0xb9a99989);
+	nv_icmd(priv, 0x000007df, 0xf9e9d9c9);
+	nv_icmd(priv, 0x000007e8, 0x00003210);
+	nv_icmd(priv, 0x000007e9, 0x00007654);
+	nv_icmd(priv, 0x000007ea, 0x00000098);
+	nv_icmd(priv, 0x000007ec, 0x39291909);
+	nv_icmd(priv, 0x000007ed, 0x79695949);
+	nv_icmd(priv, 0x000007ee, 0xb9a99989);
+	nv_icmd(priv, 0x000007ef, 0xf9e9d9c9);
+	nv_icmd(priv, 0x000007f0, 0x00003210);
+	nv_icmd(priv, 0x000007f1, 0x00007654);
+	nv_icmd(priv, 0x000007f2, 0x00000098);
+	nv_icmd(priv, 0x000005a5, 0x00000001);
+	nv_icmd(priv, 0x00000980, 0x00000000);
+	nv_icmd(priv, 0x00000981, 0x00000000);
+	nv_icmd(priv, 0x00000982, 0x00000000);
+	nv_icmd(priv, 0x00000983, 0x00000000);
+	nv_icmd(priv, 0x00000984, 0x00000000);
+	nv_icmd(priv, 0x00000985, 0x00000000);
+	nv_icmd(priv, 0x00000986, 0x00000000);
+	nv_icmd(priv, 0x00000987, 0x00000000);
+	nv_icmd(priv, 0x00000988, 0x00000000);
+	nv_icmd(priv, 0x00000989, 0x00000000);
+	nv_icmd(priv, 0x0000098a, 0x00000000);
+	nv_icmd(priv, 0x0000098b, 0x00000000);
+	nv_icmd(priv, 0x0000098c, 0x00000000);
+	nv_icmd(priv, 0x0000098d, 0x00000000);
+	nv_icmd(priv, 0x0000098e, 0x00000000);
+	nv_icmd(priv, 0x0000098f, 0x00000000);
+	nv_icmd(priv, 0x00000990, 0x00000000);
+	nv_icmd(priv, 0x00000991, 0x00000000);
+	nv_icmd(priv, 0x00000992, 0x00000000);
+	nv_icmd(priv, 0x00000993, 0x00000000);
+	nv_icmd(priv, 0x00000994, 0x00000000);
+	nv_icmd(priv, 0x00000995, 0x00000000);
+	nv_icmd(priv, 0x00000996, 0x00000000);
+	nv_icmd(priv, 0x00000997, 0x00000000);
+	nv_icmd(priv, 0x00000998, 0x00000000);
+	nv_icmd(priv, 0x00000999, 0x00000000);
+	nv_icmd(priv, 0x0000099a, 0x00000000);
+	nv_icmd(priv, 0x0000099b, 0x00000000);
+	nv_icmd(priv, 0x0000099c, 0x00000000);
+	nv_icmd(priv, 0x0000099d, 0x00000000);
+	nv_icmd(priv, 0x0000099e, 0x00000000);
+	nv_icmd(priv, 0x0000099f, 0x00000000);
+	nv_icmd(priv, 0x000009a0, 0x00000000);
+	nv_icmd(priv, 0x000009a1, 0x00000000);
+	nv_icmd(priv, 0x000009a2, 0x00000000);
+	nv_icmd(priv, 0x000009a3, 0x00000000);
+	nv_icmd(priv, 0x000009a4, 0x00000000);
+	nv_icmd(priv, 0x000009a5, 0x00000000);
+	nv_icmd(priv, 0x000009a6, 0x00000000);
+	nv_icmd(priv, 0x000009a7, 0x00000000);
+	nv_icmd(priv, 0x000009a8, 0x00000000);
+	nv_icmd(priv, 0x000009a9, 0x00000000);
+	nv_icmd(priv, 0x000009aa, 0x00000000);
+	nv_icmd(priv, 0x000009ab, 0x00000000);
+	nv_icmd(priv, 0x000009ac, 0x00000000);
+	nv_icmd(priv, 0x000009ad, 0x00000000);
+	nv_icmd(priv, 0x000009ae, 0x00000000);
+	nv_icmd(priv, 0x000009af, 0x00000000);
+	nv_icmd(priv, 0x000009b0, 0x00000000);
+	nv_icmd(priv, 0x000009b1, 0x00000000);
+	nv_icmd(priv, 0x000009b2, 0x00000000);
+	nv_icmd(priv, 0x000009b3, 0x00000000);
+	nv_icmd(priv, 0x000009b4, 0x00000000);
+	nv_icmd(priv, 0x000009b5, 0x00000000);
+	nv_icmd(priv, 0x000009b6, 0x00000000);
+	nv_icmd(priv, 0x000009b7, 0x00000000);
+	nv_icmd(priv, 0x000009b8, 0x00000000);
+	nv_icmd(priv, 0x000009b9, 0x00000000);
+	nv_icmd(priv, 0x000009ba, 0x00000000);
+	nv_icmd(priv, 0x000009bb, 0x00000000);
+	nv_icmd(priv, 0x000009bc, 0x00000000);
+	nv_icmd(priv, 0x000009bd, 0x00000000);
+	nv_icmd(priv, 0x000009be, 0x00000000);
+	nv_icmd(priv, 0x000009bf, 0x00000000);
+	nv_icmd(priv, 0x000009c0, 0x00000000);
+	nv_icmd(priv, 0x000009c1, 0x00000000);
+	nv_icmd(priv, 0x000009c2, 0x00000000);
+	nv_icmd(priv, 0x000009c3, 0x00000000);
+	nv_icmd(priv, 0x000009c4, 0x00000000);
+	nv_icmd(priv, 0x000009c5, 0x00000000);
+	nv_icmd(priv, 0x000009c6, 0x00000000);
+	nv_icmd(priv, 0x000009c7, 0x00000000);
+	nv_icmd(priv, 0x000009c8, 0x00000000);
+	nv_icmd(priv, 0x000009c9, 0x00000000);
+	nv_icmd(priv, 0x000009ca, 0x00000000);
+	nv_icmd(priv, 0x000009cb, 0x00000000);
+	nv_icmd(priv, 0x000009cc, 0x00000000);
+	nv_icmd(priv, 0x000009cd, 0x00000000);
+	nv_icmd(priv, 0x000009ce, 0x00000000);
+	nv_icmd(priv, 0x000009cf, 0x00000000);
+	nv_icmd(priv, 0x000009d0, 0x00000000);
+	nv_icmd(priv, 0x000009d1, 0x00000000);
+	nv_icmd(priv, 0x000009d2, 0x00000000);
+	nv_icmd(priv, 0x000009d3, 0x00000000);
+	nv_icmd(priv, 0x000009d4, 0x00000000);
+	nv_icmd(priv, 0x000009d5, 0x00000000);
+	nv_icmd(priv, 0x000009d6, 0x00000000);
+	nv_icmd(priv, 0x000009d7, 0x00000000);
+	nv_icmd(priv, 0x000009d8, 0x00000000);
+	nv_icmd(priv, 0x000009d9, 0x00000000);
+	nv_icmd(priv, 0x000009da, 0x00000000);
+	nv_icmd(priv, 0x000009db, 0x00000000);
+	nv_icmd(priv, 0x000009dc, 0x00000000);
+	nv_icmd(priv, 0x000009dd, 0x00000000);
+	nv_icmd(priv, 0x000009de, 0x00000000);
+	nv_icmd(priv, 0x000009df, 0x00000000);
+	nv_icmd(priv, 0x000009e0, 0x00000000);
+	nv_icmd(priv, 0x000009e1, 0x00000000);
+	nv_icmd(priv, 0x000009e2, 0x00000000);
+	nv_icmd(priv, 0x000009e3, 0x00000000);
+	nv_icmd(priv, 0x000009e4, 0x00000000);
+	nv_icmd(priv, 0x000009e5, 0x00000000);
+	nv_icmd(priv, 0x000009e6, 0x00000000);
+	nv_icmd(priv, 0x000009e7, 0x00000000);
+	nv_icmd(priv, 0x000009e8, 0x00000000);
+	nv_icmd(priv, 0x000009e9, 0x00000000);
+	nv_icmd(priv, 0x000009ea, 0x00000000);
+	nv_icmd(priv, 0x000009eb, 0x00000000);
+	nv_icmd(priv, 0x000009ec, 0x00000000);
+	nv_icmd(priv, 0x000009ed, 0x00000000);
+	nv_icmd(priv, 0x000009ee, 0x00000000);
+	nv_icmd(priv, 0x000009ef, 0x00000000);
+	nv_icmd(priv, 0x000009f0, 0x00000000);
+	nv_icmd(priv, 0x000009f1, 0x00000000);
+	nv_icmd(priv, 0x000009f2, 0x00000000);
+	nv_icmd(priv, 0x000009f3, 0x00000000);
+	nv_icmd(priv, 0x000009f4, 0x00000000);
+	nv_icmd(priv, 0x000009f5, 0x00000000);
+	nv_icmd(priv, 0x000009f6, 0x00000000);
+	nv_icmd(priv, 0x000009f7, 0x00000000);
+	nv_icmd(priv, 0x000009f8, 0x00000000);
+	nv_icmd(priv, 0x000009f9, 0x00000000);
+	nv_icmd(priv, 0x000009fa, 0x00000000);
+	nv_icmd(priv, 0x000009fb, 0x00000000);
+	nv_icmd(priv, 0x000009fc, 0x00000000);
+	nv_icmd(priv, 0x000009fd, 0x00000000);
+	nv_icmd(priv, 0x000009fe, 0x00000000);
+	nv_icmd(priv, 0x000009ff, 0x00000000);
+	nv_icmd(priv, 0x00000468, 0x00000004);
+	nv_icmd(priv, 0x0000046c, 0x00000001);
+	nv_icmd(priv, 0x00000470, 0x00000000);
+	nv_icmd(priv, 0x00000471, 0x00000000);
+	nv_icmd(priv, 0x00000472, 0x00000000);
+	nv_icmd(priv, 0x00000473, 0x00000000);
+	nv_icmd(priv, 0x00000474, 0x00000000);
+	nv_icmd(priv, 0x00000475, 0x00000000);
+	nv_icmd(priv, 0x00000476, 0x00000000);
+	nv_icmd(priv, 0x00000477, 0x00000000);
+	nv_icmd(priv, 0x00000478, 0x00000000);
+	nv_icmd(priv, 0x00000479, 0x00000000);
+	nv_icmd(priv, 0x0000047a, 0x00000000);
+	nv_icmd(priv, 0x0000047b, 0x00000000);
+	nv_icmd(priv, 0x0000047c, 0x00000000);
+	nv_icmd(priv, 0x0000047d, 0x00000000);
+	nv_icmd(priv, 0x0000047e, 0x00000000);
+	nv_icmd(priv, 0x0000047f, 0x00000000);
+	nv_icmd(priv, 0x00000480, 0x00000000);
+	nv_icmd(priv, 0x00000481, 0x00000000);
+	nv_icmd(priv, 0x00000482, 0x00000000);
+	nv_icmd(priv, 0x00000483, 0x00000000);
+	nv_icmd(priv, 0x00000484, 0x00000000);
+	nv_icmd(priv, 0x00000485, 0x00000000);
+	nv_icmd(priv, 0x00000486, 0x00000000);
+	nv_icmd(priv, 0x00000487, 0x00000000);
+	nv_icmd(priv, 0x00000488, 0x00000000);
+	nv_icmd(priv, 0x00000489, 0x00000000);
+	nv_icmd(priv, 0x0000048a, 0x00000000);
+	nv_icmd(priv, 0x0000048b, 0x00000000);
+	nv_icmd(priv, 0x0000048c, 0x00000000);
+	nv_icmd(priv, 0x0000048d, 0x00000000);
+	nv_icmd(priv, 0x0000048e, 0x00000000);
+	nv_icmd(priv, 0x0000048f, 0x00000000);
+	nv_icmd(priv, 0x00000490, 0x00000000);
+	nv_icmd(priv, 0x00000491, 0x00000000);
+	nv_icmd(priv, 0x00000492, 0x00000000);
+	nv_icmd(priv, 0x00000493, 0x00000000);
+	nv_icmd(priv, 0x00000494, 0x00000000);
+	nv_icmd(priv, 0x00000495, 0x00000000);
+	nv_icmd(priv, 0x00000496, 0x00000000);
+	nv_icmd(priv, 0x00000497, 0x00000000);
+	nv_icmd(priv, 0x00000498, 0x00000000);
+	nv_icmd(priv, 0x00000499, 0x00000000);
+	nv_icmd(priv, 0x0000049a, 0x00000000);
+	nv_icmd(priv, 0x0000049b, 0x00000000);
+	nv_icmd(priv, 0x0000049c, 0x00000000);
+	nv_icmd(priv, 0x0000049d, 0x00000000);
+	nv_icmd(priv, 0x0000049e, 0x00000000);
+	nv_icmd(priv, 0x0000049f, 0x00000000);
+	nv_icmd(priv, 0x000004a0, 0x00000000);
+	nv_icmd(priv, 0x000004a1, 0x00000000);
+	nv_icmd(priv, 0x000004a2, 0x00000000);
+	nv_icmd(priv, 0x000004a3, 0x00000000);
+	nv_icmd(priv, 0x000004a4, 0x00000000);
+	nv_icmd(priv, 0x000004a5, 0x00000000);
+	nv_icmd(priv, 0x000004a6, 0x00000000);
+	nv_icmd(priv, 0x000004a7, 0x00000000);
+	nv_icmd(priv, 0x000004a8, 0x00000000);
+	nv_icmd(priv, 0x000004a9, 0x00000000);
+	nv_icmd(priv, 0x000004aa, 0x00000000);
+	nv_icmd(priv, 0x000004ab, 0x00000000);
+	nv_icmd(priv, 0x000004ac, 0x00000000);
+	nv_icmd(priv, 0x000004ad, 0x00000000);
+	nv_icmd(priv, 0x000004ae, 0x00000000);
+	nv_icmd(priv, 0x000004af, 0x00000000);
+	nv_icmd(priv, 0x000004b0, 0x00000000);
+	nv_icmd(priv, 0x000004b1, 0x00000000);
+	nv_icmd(priv, 0x000004b2, 0x00000000);
+	nv_icmd(priv, 0x000004b3, 0x00000000);
+	nv_icmd(priv, 0x000004b4, 0x00000000);
+	nv_icmd(priv, 0x000004b5, 0x00000000);
+	nv_icmd(priv, 0x000004b6, 0x00000000);
+	nv_icmd(priv, 0x000004b7, 0x00000000);
+	nv_icmd(priv, 0x000004b8, 0x00000000);
+	nv_icmd(priv, 0x000004b9, 0x00000000);
+	nv_icmd(priv, 0x000004ba, 0x00000000);
+	nv_icmd(priv, 0x000004bb, 0x00000000);
+	nv_icmd(priv, 0x000004bc, 0x00000000);
+	nv_icmd(priv, 0x000004bd, 0x00000000);
+	nv_icmd(priv, 0x000004be, 0x00000000);
+	nv_icmd(priv, 0x000004bf, 0x00000000);
+	nv_icmd(priv, 0x000004c0, 0x00000000);
+	nv_icmd(priv, 0x000004c1, 0x00000000);
+	nv_icmd(priv, 0x000004c2, 0x00000000);
+	nv_icmd(priv, 0x000004c3, 0x00000000);
+	nv_icmd(priv, 0x000004c4, 0x00000000);
+	nv_icmd(priv, 0x000004c5, 0x00000000);
+	nv_icmd(priv, 0x000004c6, 0x00000000);
+	nv_icmd(priv, 0x000004c7, 0x00000000);
+	nv_icmd(priv, 0x000004c8, 0x00000000);
+	nv_icmd(priv, 0x000004c9, 0x00000000);
+	nv_icmd(priv, 0x000004ca, 0x00000000);
+	nv_icmd(priv, 0x000004cb, 0x00000000);
+	nv_icmd(priv, 0x000004cc, 0x00000000);
+	nv_icmd(priv, 0x000004cd, 0x00000000);
+	nv_icmd(priv, 0x000004ce, 0x00000000);
+	nv_icmd(priv, 0x000004cf, 0x00000000);
+	nv_icmd(priv, 0x00000510, 0x3f800000);
+	nv_icmd(priv, 0x00000511, 0x3f800000);
+	nv_icmd(priv, 0x00000512, 0x3f800000);
+	nv_icmd(priv, 0x00000513, 0x3f800000);
+	nv_icmd(priv, 0x00000514, 0x3f800000);
+	nv_icmd(priv, 0x00000515, 0x3f800000);
+	nv_icmd(priv, 0x00000516, 0x3f800000);
+	nv_icmd(priv, 0x00000517, 0x3f800000);
+	nv_icmd(priv, 0x00000518, 0x3f800000);
+	nv_icmd(priv, 0x00000519, 0x3f800000);
+	nv_icmd(priv, 0x0000051a, 0x3f800000);
+	nv_icmd(priv, 0x0000051b, 0x3f800000);
+	nv_icmd(priv, 0x0000051c, 0x3f800000);
+	nv_icmd(priv, 0x0000051d, 0x3f800000);
+	nv_icmd(priv, 0x0000051e, 0x3f800000);
+	nv_icmd(priv, 0x0000051f, 0x3f800000);
+	nv_icmd(priv, 0x00000520, 0x000002b6);
+	nv_icmd(priv, 0x00000529, 0x00000001);
+	nv_icmd(priv, 0x00000530, 0xffff0000);
+	nv_icmd(priv, 0x00000531, 0xffff0000);
+	nv_icmd(priv, 0x00000532, 0xffff0000);
+	nv_icmd(priv, 0x00000533, 0xffff0000);
+	nv_icmd(priv, 0x00000534, 0xffff0000);
+	nv_icmd(priv, 0x00000535, 0xffff0000);
+	nv_icmd(priv, 0x00000536, 0xffff0000);
+	nv_icmd(priv, 0x00000537, 0xffff0000);
+	nv_icmd(priv, 0x00000538, 0xffff0000);
+	nv_icmd(priv, 0x00000539, 0xffff0000);
+	nv_icmd(priv, 0x0000053a, 0xffff0000);
+	nv_icmd(priv, 0x0000053b, 0xffff0000);
+	nv_icmd(priv, 0x0000053c, 0xffff0000);
+	nv_icmd(priv, 0x0000053d, 0xffff0000);
+	nv_icmd(priv, 0x0000053e, 0xffff0000);
+	nv_icmd(priv, 0x0000053f, 0xffff0000);
+	nv_icmd(priv, 0x00000585, 0x0000003f);
+	nv_icmd(priv, 0x00000576, 0x00000003);
+	if (nv_device(priv)->chipset == 0xc1 ||
+	    nv_device(priv)->chipset >= 0xd0)
+		nv_icmd(priv, 0x0000057b, 0x00000059);
+	nv_icmd(priv, 0x00000586, 0x00000040);
+	nv_icmd(priv, 0x00000582, 0x00000080);
+	nv_icmd(priv, 0x00000583, 0x00000080);
+	nv_icmd(priv, 0x000005c2, 0x00000001);
+	nv_icmd(priv, 0x00000638, 0x00000001);
+	nv_icmd(priv, 0x00000639, 0x00000001);
+	nv_icmd(priv, 0x0000063a, 0x00000002);
+	nv_icmd(priv, 0x0000063b, 0x00000001);
+	nv_icmd(priv, 0x0000063c, 0x00000001);
+	nv_icmd(priv, 0x0000063d, 0x00000002);
+	nv_icmd(priv, 0x0000063e, 0x00000001);
+	nv_icmd(priv, 0x000008b8, 0x00000001);
+	nv_icmd(priv, 0x000008b9, 0x00000001);
+	nv_icmd(priv, 0x000008ba, 0x00000001);
+	nv_icmd(priv, 0x000008bb, 0x00000001);
+	nv_icmd(priv, 0x000008bc, 0x00000001);
+	nv_icmd(priv, 0x000008bd, 0x00000001);
+	nv_icmd(priv, 0x000008be, 0x00000001);
+	nv_icmd(priv, 0x000008bf, 0x00000001);
+	nv_icmd(priv, 0x00000900, 0x00000001);
+	nv_icmd(priv, 0x00000901, 0x00000001);
+	nv_icmd(priv, 0x00000902, 0x00000001);
+	nv_icmd(priv, 0x00000903, 0x00000001);
+	nv_icmd(priv, 0x00000904, 0x00000001);
+	nv_icmd(priv, 0x00000905, 0x00000001);
+	nv_icmd(priv, 0x00000906, 0x00000001);
+	nv_icmd(priv, 0x00000907, 0x00000001);
+	nv_icmd(priv, 0x00000908, 0x00000002);
+	nv_icmd(priv, 0x00000909, 0x00000002);
+	nv_icmd(priv, 0x0000090a, 0x00000002);
+	nv_icmd(priv, 0x0000090b, 0x00000002);
+	nv_icmd(priv, 0x0000090c, 0x00000002);
+	nv_icmd(priv, 0x0000090d, 0x00000002);
+	nv_icmd(priv, 0x0000090e, 0x00000002);
+	nv_icmd(priv, 0x0000090f, 0x00000002);
+	nv_icmd(priv, 0x00000910, 0x00000001);
+	nv_icmd(priv, 0x00000911, 0x00000001);
+	nv_icmd(priv, 0x00000912, 0x00000001);
+	nv_icmd(priv, 0x00000913, 0x00000001);
+	nv_icmd(priv, 0x00000914, 0x00000001);
+	nv_icmd(priv, 0x00000915, 0x00000001);
+	nv_icmd(priv, 0x00000916, 0x00000001);
+	nv_icmd(priv, 0x00000917, 0x00000001);
+	nv_icmd(priv, 0x00000918, 0x00000001);
+	nv_icmd(priv, 0x00000919, 0x00000001);
+	nv_icmd(priv, 0x0000091a, 0x00000001);
+	nv_icmd(priv, 0x0000091b, 0x00000001);
+	nv_icmd(priv, 0x0000091c, 0x00000001);
+	nv_icmd(priv, 0x0000091d, 0x00000001);
+	nv_icmd(priv, 0x0000091e, 0x00000001);
+	nv_icmd(priv, 0x0000091f, 0x00000001);
+	nv_icmd(priv, 0x00000920, 0x00000002);
+	nv_icmd(priv, 0x00000921, 0x00000002);
+	nv_icmd(priv, 0x00000922, 0x00000002);
+	nv_icmd(priv, 0x00000923, 0x00000002);
+	nv_icmd(priv, 0x00000924, 0x00000002);
+	nv_icmd(priv, 0x00000925, 0x00000002);
+	nv_icmd(priv, 0x00000926, 0x00000002);
+	nv_icmd(priv, 0x00000927, 0x00000002);
+	nv_icmd(priv, 0x00000928, 0x00000001);
+	nv_icmd(priv, 0x00000929, 0x00000001);
+	nv_icmd(priv, 0x0000092a, 0x00000001);
+	nv_icmd(priv, 0x0000092b, 0x00000001);
+	nv_icmd(priv, 0x0000092c, 0x00000001);
+	nv_icmd(priv, 0x0000092d, 0x00000001);
+	nv_icmd(priv, 0x0000092e, 0x00000001);
+	nv_icmd(priv, 0x0000092f, 0x00000001);
+	nv_icmd(priv, 0x00000648, 0x00000001);
+	nv_icmd(priv, 0x00000649, 0x00000001);
+	nv_icmd(priv, 0x0000064a, 0x00000001);
+	nv_icmd(priv, 0x0000064b, 0x00000001);
+	nv_icmd(priv, 0x0000064c, 0x00000001);
+	nv_icmd(priv, 0x0000064d, 0x00000001);
+	nv_icmd(priv, 0x0000064e, 0x00000001);
+	nv_icmd(priv, 0x0000064f, 0x00000001);
+	nv_icmd(priv, 0x00000650, 0x00000001);
+	nv_icmd(priv, 0x00000658, 0x0000000f);
+	nv_icmd(priv, 0x000007ff, 0x0000000a);
+	nv_icmd(priv, 0x0000066a, 0x40000000);
+	nv_icmd(priv, 0x0000066b, 0x10000000);
+	nv_icmd(priv, 0x0000066c, 0xffff0000);
+	nv_icmd(priv, 0x0000066d, 0xffff0000);
+	nv_icmd(priv, 0x000007af, 0x00000008);
+	nv_icmd(priv, 0x000007b0, 0x00000008);
+	nv_icmd(priv, 0x000007f6, 0x00000001);
+	nv_icmd(priv, 0x000006b2, 0x00000055);
+	nv_icmd(priv, 0x000007ad, 0x00000003);
+	nv_icmd(priv, 0x00000937, 0x00000001);
+	nv_icmd(priv, 0x00000971, 0x00000008);
+	nv_icmd(priv, 0x00000972, 0x00000040);
+	nv_icmd(priv, 0x00000973, 0x0000012c);
+	nv_icmd(priv, 0x0000097c, 0x00000040);
+	nv_icmd(priv, 0x00000979, 0x00000003);
+	nv_icmd(priv, 0x00000975, 0x00000020);
+	nv_icmd(priv, 0x00000976, 0x00000001);
+	nv_icmd(priv, 0x00000977, 0x00000020);
+	nv_icmd(priv, 0x00000978, 0x00000001);
+	nv_icmd(priv, 0x00000957, 0x00000003);
+	nv_icmd(priv, 0x0000095e, 0x20164010);
+	nv_icmd(priv, 0x0000095f, 0x00000020);
+	if (nv_device(priv)->chipset >= 0xd0)
+		nv_icmd(priv, 0x0000097d, 0x00000020);
+	nv_icmd(priv, 0x00000683, 0x00000006);
+	nv_icmd(priv, 0x00000685, 0x003fffff);
+	nv_icmd(priv, 0x00000687, 0x00000c48);
+	nv_icmd(priv, 0x000006a0, 0x00000005);
+	nv_icmd(priv, 0x00000840, 0x00300008);
+	nv_icmd(priv, 0x00000841, 0x04000080);
+	nv_icmd(priv, 0x00000842, 0x00300008);
+	nv_icmd(priv, 0x00000843, 0x04000080);
+	nv_icmd(priv, 0x00000818, 0x00000000);
+	nv_icmd(priv, 0x00000819, 0x00000000);
+	nv_icmd(priv, 0x0000081a, 0x00000000);
+	nv_icmd(priv, 0x0000081b, 0x00000000);
+	nv_icmd(priv, 0x0000081c, 0x00000000);
+	nv_icmd(priv, 0x0000081d, 0x00000000);
+	nv_icmd(priv, 0x0000081e, 0x00000000);
+	nv_icmd(priv, 0x0000081f, 0x00000000);
+	nv_icmd(priv, 0x00000848, 0x00000000);
+	nv_icmd(priv, 0x00000849, 0x00000000);
+	nv_icmd(priv, 0x0000084a, 0x00000000);
+	nv_icmd(priv, 0x0000084b, 0x00000000);
+	nv_icmd(priv, 0x0000084c, 0x00000000);
+	nv_icmd(priv, 0x0000084d, 0x00000000);
+	nv_icmd(priv, 0x0000084e, 0x00000000);
+	nv_icmd(priv, 0x0000084f, 0x00000000);
+	nv_icmd(priv, 0x00000850, 0x00000000);
+	nv_icmd(priv, 0x00000851, 0x00000000);
+	nv_icmd(priv, 0x00000852, 0x00000000);
+	nv_icmd(priv, 0x00000853, 0x00000000);
+	nv_icmd(priv, 0x00000854, 0x00000000);
+	nv_icmd(priv, 0x00000855, 0x00000000);
+	nv_icmd(priv, 0x00000856, 0x00000000);
+	nv_icmd(priv, 0x00000857, 0x00000000);
+	nv_icmd(priv, 0x00000738, 0x00000000);
+	nv_icmd(priv, 0x000006aa, 0x00000001);
+	nv_icmd(priv, 0x000006ab, 0x00000002);
+	nv_icmd(priv, 0x000006ac, 0x00000080);
+	nv_icmd(priv, 0x000006ad, 0x00000100);
+	nv_icmd(priv, 0x000006ae, 0x00000100);
+	nv_icmd(priv, 0x000006b1, 0x00000011);
+	nv_icmd(priv, 0x000006bb, 0x000000cf);
+	nv_icmd(priv, 0x000006ce, 0x2a712488);
+	nv_icmd(priv, 0x00000739, 0x4085c000);
+	nv_icmd(priv, 0x0000073a, 0x00000080);
+	nv_icmd(priv, 0x00000786, 0x80000100);
+	nv_icmd(priv, 0x0000073c, 0x00010100);
+	nv_icmd(priv, 0x0000073d, 0x02800000);
+	nv_icmd(priv, 0x00000787, 0x000000cf);
+	nv_icmd(priv, 0x0000078c, 0x00000008);
+	nv_icmd(priv, 0x00000792, 0x00000001);
+	nv_icmd(priv, 0x00000794, 0x00000001);
+	nv_icmd(priv, 0x00000795, 0x00000001);
+	nv_icmd(priv, 0x00000796, 0x00000001);
+	nv_icmd(priv, 0x00000797, 0x000000cf);
+	nv_icmd(priv, 0x00000836, 0x00000001);
+	nv_icmd(priv, 0x0000079a, 0x00000002);
+	nv_icmd(priv, 0x00000833, 0x04444480);
+	nv_icmd(priv, 0x000007a1, 0x00000001);
+	nv_icmd(priv, 0x000007a3, 0x00000001);
+	nv_icmd(priv, 0x000007a4, 0x00000001);
+	nv_icmd(priv, 0x000007a5, 0x00000001);
+	nv_icmd(priv, 0x00000831, 0x00000004);
+	nv_icmd(priv, 0x0000080c, 0x00000002);
+	nv_icmd(priv, 0x0000080d, 0x00000100);
+	nv_icmd(priv, 0x0000080e, 0x00000100);
+	nv_icmd(priv, 0x0000080f, 0x00000001);
+	nv_icmd(priv, 0x00000823, 0x00000002);
+	nv_icmd(priv, 0x00000824, 0x00000100);
+	nv_icmd(priv, 0x00000825, 0x00000100);
+	nv_icmd(priv, 0x00000826, 0x00000001);
+	nv_icmd(priv, 0x0000095d, 0x00000001);
+	nv_icmd(priv, 0x0000082b, 0x00000004);
+	nv_icmd(priv, 0x00000942, 0x00010001);
+	nv_icmd(priv, 0x00000943, 0x00000001);
+	nv_icmd(priv, 0x00000944, 0x00000022);
+	nv_icmd(priv, 0x000007c5, 0x00010001);
+	nv_icmd(priv, 0x00000834, 0x00000001);
+	nv_icmd(priv, 0x000007c7, 0x00000001);
+	nv_icmd(priv, 0x0000c1b0, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b1, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b2, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b3, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b4, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b5, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b6, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b7, 0x0000000f);
+	nv_icmd(priv, 0x0000c1b8, 0x0fac6881);
+	nv_icmd(priv, 0x0000c1b9, 0x00fac688);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000002);
+	nv_icmd(priv, 0x000006aa, 0x00000001);
+	nv_icmd(priv, 0x000006ad, 0x00000100);
+	nv_icmd(priv, 0x000006ae, 0x00000100);
+	nv_icmd(priv, 0x000006b1, 0x00000011);
+	nv_icmd(priv, 0x0000078c, 0x00000008);
+	nv_icmd(priv, 0x00000792, 0x00000001);
+	nv_icmd(priv, 0x00000794, 0x00000001);
+	nv_icmd(priv, 0x00000795, 0x00000001);
+	nv_icmd(priv, 0x00000796, 0x00000001);
+	nv_icmd(priv, 0x00000797, 0x000000cf);
+	nv_icmd(priv, 0x0000079a, 0x00000002);
+	nv_icmd(priv, 0x00000833, 0x04444480);
+	nv_icmd(priv, 0x000007a1, 0x00000001);
+	nv_icmd(priv, 0x000007a3, 0x00000001);
+	nv_icmd(priv, 0x000007a4, 0x00000001);
+	nv_icmd(priv, 0x000007a5, 0x00000001);
+	nv_icmd(priv, 0x00000831, 0x00000004);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000014);
+	nv_icmd(priv, 0x00000351, 0x00000100);
+	nv_icmd(priv, 0x00000957, 0x00000003);
+	nv_icmd(priv, 0x0000095d, 0x00000001);
+	nv_icmd(priv, 0x0000082b, 0x00000004);
+	nv_icmd(priv, 0x00000942, 0x00010001);
+	nv_icmd(priv, 0x00000943, 0x00000001);
+	nv_icmd(priv, 0x000007c5, 0x00010001);
+	nv_icmd(priv, 0x00000834, 0x00000001);
+	nv_icmd(priv, 0x000007c7, 0x00000001);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_icmd(priv, 0x00001000, 0x00000001);
+	nv_icmd(priv, 0x0000080c, 0x00000002);
+	nv_icmd(priv, 0x0000080d, 0x00000100);
+	nv_icmd(priv, 0x0000080e, 0x00000100);
+	nv_icmd(priv, 0x0000080f, 0x00000001);
+	nv_icmd(priv, 0x00000823, 0x00000002);
+	nv_icmd(priv, 0x00000824, 0x00000100);
+	nv_icmd(priv, 0x00000825, 0x00000100);
+	nv_icmd(priv, 0x00000826, 0x00000001);
+	nv_icmd(priv, 0x0001e100, 0x00000001);
+	nv_wr32(priv, 0x400208, 0x00000000);
+	nv_wr32(priv, 0x404154, 0x00000400);
+
+	nvc0_grctx_generate_9097(priv);
+	if (fermi >= 0x9197)
+		nvc0_grctx_generate_9197(priv);
+	if (fermi >= 0x9297)
+		nvc0_grctx_generate_9297(priv);
+	nvc0_grctx_generate_902d(priv);
+	nvc0_grctx_generate_9039(priv);
+	nvc0_grctx_generate_90c0(priv);
+
+	nv_wr32(priv, 0x000260, r000260);
+
+	return nvc0_grctx_fini(&info);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
new file mode 100644
index 0000000..ae27dae
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -0,0 +1,2793 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+
+static void
+nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400208, 0x80000000);
+	nv_icmd(priv, 0x001000, 0x00000004);
+	nv_icmd(priv, 0x000039, 0x00000000);
+	nv_icmd(priv, 0x00003a, 0x00000000);
+	nv_icmd(priv, 0x00003b, 0x00000000);
+	nv_icmd(priv, 0x0000a9, 0x0000ffff);
+	nv_icmd(priv, 0x000038, 0x0fac6881);
+	nv_icmd(priv, 0x00003d, 0x00000001);
+	nv_icmd(priv, 0x0000e8, 0x00000400);
+	nv_icmd(priv, 0x0000e9, 0x00000400);
+	nv_icmd(priv, 0x0000ea, 0x00000400);
+	nv_icmd(priv, 0x0000eb, 0x00000400);
+	nv_icmd(priv, 0x0000ec, 0x00000400);
+	nv_icmd(priv, 0x0000ed, 0x00000400);
+	nv_icmd(priv, 0x0000ee, 0x00000400);
+	nv_icmd(priv, 0x0000ef, 0x00000400);
+	nv_icmd(priv, 0x000078, 0x00000300);
+	nv_icmd(priv, 0x000079, 0x00000300);
+	nv_icmd(priv, 0x00007a, 0x00000300);
+	nv_icmd(priv, 0x00007b, 0x00000300);
+	nv_icmd(priv, 0x00007c, 0x00000300);
+	nv_icmd(priv, 0x00007d, 0x00000300);
+	nv_icmd(priv, 0x00007e, 0x00000300);
+	nv_icmd(priv, 0x00007f, 0x00000300);
+	nv_icmd(priv, 0x000050, 0x00000011);
+	nv_icmd(priv, 0x000058, 0x00000008);
+	nv_icmd(priv, 0x000059, 0x00000008);
+	nv_icmd(priv, 0x00005a, 0x00000008);
+	nv_icmd(priv, 0x00005b, 0x00000008);
+	nv_icmd(priv, 0x00005c, 0x00000008);
+	nv_icmd(priv, 0x00005d, 0x00000008);
+	nv_icmd(priv, 0x00005e, 0x00000008);
+	nv_icmd(priv, 0x00005f, 0x00000008);
+	nv_icmd(priv, 0x000208, 0x00000001);
+	nv_icmd(priv, 0x000209, 0x00000001);
+	nv_icmd(priv, 0x00020a, 0x00000001);
+	nv_icmd(priv, 0x00020b, 0x00000001);
+	nv_icmd(priv, 0x00020c, 0x00000001);
+	nv_icmd(priv, 0x00020d, 0x00000001);
+	nv_icmd(priv, 0x00020e, 0x00000001);
+	nv_icmd(priv, 0x00020f, 0x00000001);
+	nv_icmd(priv, 0x000081, 0x00000001);
+	nv_icmd(priv, 0x000085, 0x00000004);
+	nv_icmd(priv, 0x000088, 0x00000400);
+	nv_icmd(priv, 0x000090, 0x00000300);
+	nv_icmd(priv, 0x000098, 0x00001001);
+	nv_icmd(priv, 0x0000e3, 0x00000001);
+	nv_icmd(priv, 0x0000da, 0x00000001);
+	nv_icmd(priv, 0x0000f8, 0x00000003);
+	nv_icmd(priv, 0x0000fa, 0x00000001);
+	nv_icmd(priv, 0x00009f, 0x0000ffff);
+	nv_icmd(priv, 0x0000a0, 0x0000ffff);
+	nv_icmd(priv, 0x0000a1, 0x0000ffff);
+	nv_icmd(priv, 0x0000a2, 0x0000ffff);
+	nv_icmd(priv, 0x0000b1, 0x00000001);
+	nv_icmd(priv, 0x0000ad, 0x0000013e);
+	nv_icmd(priv, 0x0000e1, 0x00000010);
+	nv_icmd(priv, 0x000290, 0x00000000);
+	nv_icmd(priv, 0x000291, 0x00000000);
+	nv_icmd(priv, 0x000292, 0x00000000);
+	nv_icmd(priv, 0x000293, 0x00000000);
+	nv_icmd(priv, 0x000294, 0x00000000);
+	nv_icmd(priv, 0x000295, 0x00000000);
+	nv_icmd(priv, 0x000296, 0x00000000);
+	nv_icmd(priv, 0x000297, 0x00000000);
+	nv_icmd(priv, 0x000298, 0x00000000);
+	nv_icmd(priv, 0x000299, 0x00000000);
+	nv_icmd(priv, 0x00029a, 0x00000000);
+	nv_icmd(priv, 0x00029b, 0x00000000);
+	nv_icmd(priv, 0x00029c, 0x00000000);
+	nv_icmd(priv, 0x00029d, 0x00000000);
+	nv_icmd(priv, 0x00029e, 0x00000000);
+	nv_icmd(priv, 0x00029f, 0x00000000);
+	nv_icmd(priv, 0x0003b0, 0x00000000);
+	nv_icmd(priv, 0x0003b1, 0x00000000);
+	nv_icmd(priv, 0x0003b2, 0x00000000);
+	nv_icmd(priv, 0x0003b3, 0x00000000);
+	nv_icmd(priv, 0x0003b4, 0x00000000);
+	nv_icmd(priv, 0x0003b5, 0x00000000);
+	nv_icmd(priv, 0x0003b6, 0x00000000);
+	nv_icmd(priv, 0x0003b7, 0x00000000);
+	nv_icmd(priv, 0x0003b8, 0x00000000);
+	nv_icmd(priv, 0x0003b9, 0x00000000);
+	nv_icmd(priv, 0x0003ba, 0x00000000);
+	nv_icmd(priv, 0x0003bb, 0x00000000);
+	nv_icmd(priv, 0x0003bc, 0x00000000);
+	nv_icmd(priv, 0x0003bd, 0x00000000);
+	nv_icmd(priv, 0x0003be, 0x00000000);
+	nv_icmd(priv, 0x0003bf, 0x00000000);
+	nv_icmd(priv, 0x0002a0, 0x00000000);
+	nv_icmd(priv, 0x0002a1, 0x00000000);
+	nv_icmd(priv, 0x0002a2, 0x00000000);
+	nv_icmd(priv, 0x0002a3, 0x00000000);
+	nv_icmd(priv, 0x0002a4, 0x00000000);
+	nv_icmd(priv, 0x0002a5, 0x00000000);
+	nv_icmd(priv, 0x0002a6, 0x00000000);
+	nv_icmd(priv, 0x0002a7, 0x00000000);
+	nv_icmd(priv, 0x0002a8, 0x00000000);
+	nv_icmd(priv, 0x0002a9, 0x00000000);
+	nv_icmd(priv, 0x0002aa, 0x00000000);
+	nv_icmd(priv, 0x0002ab, 0x00000000);
+	nv_icmd(priv, 0x0002ac, 0x00000000);
+	nv_icmd(priv, 0x0002ad, 0x00000000);
+	nv_icmd(priv, 0x0002ae, 0x00000000);
+	nv_icmd(priv, 0x0002af, 0x00000000);
+	nv_icmd(priv, 0x000420, 0x00000000);
+	nv_icmd(priv, 0x000421, 0x00000000);
+	nv_icmd(priv, 0x000422, 0x00000000);
+	nv_icmd(priv, 0x000423, 0x00000000);
+	nv_icmd(priv, 0x000424, 0x00000000);
+	nv_icmd(priv, 0x000425, 0x00000000);
+	nv_icmd(priv, 0x000426, 0x00000000);
+	nv_icmd(priv, 0x000427, 0x00000000);
+	nv_icmd(priv, 0x000428, 0x00000000);
+	nv_icmd(priv, 0x000429, 0x00000000);
+	nv_icmd(priv, 0x00042a, 0x00000000);
+	nv_icmd(priv, 0x00042b, 0x00000000);
+	nv_icmd(priv, 0x00042c, 0x00000000);
+	nv_icmd(priv, 0x00042d, 0x00000000);
+	nv_icmd(priv, 0x00042e, 0x00000000);
+	nv_icmd(priv, 0x00042f, 0x00000000);
+	nv_icmd(priv, 0x0002b0, 0x00000000);
+	nv_icmd(priv, 0x0002b1, 0x00000000);
+	nv_icmd(priv, 0x0002b2, 0x00000000);
+	nv_icmd(priv, 0x0002b3, 0x00000000);
+	nv_icmd(priv, 0x0002b4, 0x00000000);
+	nv_icmd(priv, 0x0002b5, 0x00000000);
+	nv_icmd(priv, 0x0002b6, 0x00000000);
+	nv_icmd(priv, 0x0002b7, 0x00000000);
+	nv_icmd(priv, 0x0002b8, 0x00000000);
+	nv_icmd(priv, 0x0002b9, 0x00000000);
+	nv_icmd(priv, 0x0002ba, 0x00000000);
+	nv_icmd(priv, 0x0002bb, 0x00000000);
+	nv_icmd(priv, 0x0002bc, 0x00000000);
+	nv_icmd(priv, 0x0002bd, 0x00000000);
+	nv_icmd(priv, 0x0002be, 0x00000000);
+	nv_icmd(priv, 0x0002bf, 0x00000000);
+	nv_icmd(priv, 0x000430, 0x00000000);
+	nv_icmd(priv, 0x000431, 0x00000000);
+	nv_icmd(priv, 0x000432, 0x00000000);
+	nv_icmd(priv, 0x000433, 0x00000000);
+	nv_icmd(priv, 0x000434, 0x00000000);
+	nv_icmd(priv, 0x000435, 0x00000000);
+	nv_icmd(priv, 0x000436, 0x00000000);
+	nv_icmd(priv, 0x000437, 0x00000000);
+	nv_icmd(priv, 0x000438, 0x00000000);
+	nv_icmd(priv, 0x000439, 0x00000000);
+	nv_icmd(priv, 0x00043a, 0x00000000);
+	nv_icmd(priv, 0x00043b, 0x00000000);
+	nv_icmd(priv, 0x00043c, 0x00000000);
+	nv_icmd(priv, 0x00043d, 0x00000000);
+	nv_icmd(priv, 0x00043e, 0x00000000);
+	nv_icmd(priv, 0x00043f, 0x00000000);
+	nv_icmd(priv, 0x0002c0, 0x00000000);
+	nv_icmd(priv, 0x0002c1, 0x00000000);
+	nv_icmd(priv, 0x0002c2, 0x00000000);
+	nv_icmd(priv, 0x0002c3, 0x00000000);
+	nv_icmd(priv, 0x0002c4, 0x00000000);
+	nv_icmd(priv, 0x0002c5, 0x00000000);
+	nv_icmd(priv, 0x0002c6, 0x00000000);
+	nv_icmd(priv, 0x0002c7, 0x00000000);
+	nv_icmd(priv, 0x0002c8, 0x00000000);
+	nv_icmd(priv, 0x0002c9, 0x00000000);
+	nv_icmd(priv, 0x0002ca, 0x00000000);
+	nv_icmd(priv, 0x0002cb, 0x00000000);
+	nv_icmd(priv, 0x0002cc, 0x00000000);
+	nv_icmd(priv, 0x0002cd, 0x00000000);
+	nv_icmd(priv, 0x0002ce, 0x00000000);
+	nv_icmd(priv, 0x0002cf, 0x00000000);
+	nv_icmd(priv, 0x0004d0, 0x00000000);
+	nv_icmd(priv, 0x0004d1, 0x00000000);
+	nv_icmd(priv, 0x0004d2, 0x00000000);
+	nv_icmd(priv, 0x0004d3, 0x00000000);
+	nv_icmd(priv, 0x0004d4, 0x00000000);
+	nv_icmd(priv, 0x0004d5, 0x00000000);
+	nv_icmd(priv, 0x0004d6, 0x00000000);
+	nv_icmd(priv, 0x0004d7, 0x00000000);
+	nv_icmd(priv, 0x0004d8, 0x00000000);
+	nv_icmd(priv, 0x0004d9, 0x00000000);
+	nv_icmd(priv, 0x0004da, 0x00000000);
+	nv_icmd(priv, 0x0004db, 0x00000000);
+	nv_icmd(priv, 0x0004dc, 0x00000000);
+	nv_icmd(priv, 0x0004dd, 0x00000000);
+	nv_icmd(priv, 0x0004de, 0x00000000);
+	nv_icmd(priv, 0x0004df, 0x00000000);
+	nv_icmd(priv, 0x000720, 0x00000000);
+	nv_icmd(priv, 0x000721, 0x00000000);
+	nv_icmd(priv, 0x000722, 0x00000000);
+	nv_icmd(priv, 0x000723, 0x00000000);
+	nv_icmd(priv, 0x000724, 0x00000000);
+	nv_icmd(priv, 0x000725, 0x00000000);
+	nv_icmd(priv, 0x000726, 0x00000000);
+	nv_icmd(priv, 0x000727, 0x00000000);
+	nv_icmd(priv, 0x000728, 0x00000000);
+	nv_icmd(priv, 0x000729, 0x00000000);
+	nv_icmd(priv, 0x00072a, 0x00000000);
+	nv_icmd(priv, 0x00072b, 0x00000000);
+	nv_icmd(priv, 0x00072c, 0x00000000);
+	nv_icmd(priv, 0x00072d, 0x00000000);
+	nv_icmd(priv, 0x00072e, 0x00000000);
+	nv_icmd(priv, 0x00072f, 0x00000000);
+	nv_icmd(priv, 0x0008c0, 0x00000000);
+	nv_icmd(priv, 0x0008c1, 0x00000000);
+	nv_icmd(priv, 0x0008c2, 0x00000000);
+	nv_icmd(priv, 0x0008c3, 0x00000000);
+	nv_icmd(priv, 0x0008c4, 0x00000000);
+	nv_icmd(priv, 0x0008c5, 0x00000000);
+	nv_icmd(priv, 0x0008c6, 0x00000000);
+	nv_icmd(priv, 0x0008c7, 0x00000000);
+	nv_icmd(priv, 0x0008c8, 0x00000000);
+	nv_icmd(priv, 0x0008c9, 0x00000000);
+	nv_icmd(priv, 0x0008ca, 0x00000000);
+	nv_icmd(priv, 0x0008cb, 0x00000000);
+	nv_icmd(priv, 0x0008cc, 0x00000000);
+	nv_icmd(priv, 0x0008cd, 0x00000000);
+	nv_icmd(priv, 0x0008ce, 0x00000000);
+	nv_icmd(priv, 0x0008cf, 0x00000000);
+	nv_icmd(priv, 0x000890, 0x00000000);
+	nv_icmd(priv, 0x000891, 0x00000000);
+	nv_icmd(priv, 0x000892, 0x00000000);
+	nv_icmd(priv, 0x000893, 0x00000000);
+	nv_icmd(priv, 0x000894, 0x00000000);
+	nv_icmd(priv, 0x000895, 0x00000000);
+	nv_icmd(priv, 0x000896, 0x00000000);
+	nv_icmd(priv, 0x000897, 0x00000000);
+	nv_icmd(priv, 0x000898, 0x00000000);
+	nv_icmd(priv, 0x000899, 0x00000000);
+	nv_icmd(priv, 0x00089a, 0x00000000);
+	nv_icmd(priv, 0x00089b, 0x00000000);
+	nv_icmd(priv, 0x00089c, 0x00000000);
+	nv_icmd(priv, 0x00089d, 0x00000000);
+	nv_icmd(priv, 0x00089e, 0x00000000);
+	nv_icmd(priv, 0x00089f, 0x00000000);
+	nv_icmd(priv, 0x0008e0, 0x00000000);
+	nv_icmd(priv, 0x0008e1, 0x00000000);
+	nv_icmd(priv, 0x0008e2, 0x00000000);
+	nv_icmd(priv, 0x0008e3, 0x00000000);
+	nv_icmd(priv, 0x0008e4, 0x00000000);
+	nv_icmd(priv, 0x0008e5, 0x00000000);
+	nv_icmd(priv, 0x0008e6, 0x00000000);
+	nv_icmd(priv, 0x0008e7, 0x00000000);
+	nv_icmd(priv, 0x0008e8, 0x00000000);
+	nv_icmd(priv, 0x0008e9, 0x00000000);
+	nv_icmd(priv, 0x0008ea, 0x00000000);
+	nv_icmd(priv, 0x0008eb, 0x00000000);
+	nv_icmd(priv, 0x0008ec, 0x00000000);
+	nv_icmd(priv, 0x0008ed, 0x00000000);
+	nv_icmd(priv, 0x0008ee, 0x00000000);
+	nv_icmd(priv, 0x0008ef, 0x00000000);
+	nv_icmd(priv, 0x0008a0, 0x00000000);
+	nv_icmd(priv, 0x0008a1, 0x00000000);
+	nv_icmd(priv, 0x0008a2, 0x00000000);
+	nv_icmd(priv, 0x0008a3, 0x00000000);
+	nv_icmd(priv, 0x0008a4, 0x00000000);
+	nv_icmd(priv, 0x0008a5, 0x00000000);
+	nv_icmd(priv, 0x0008a6, 0x00000000);
+	nv_icmd(priv, 0x0008a7, 0x00000000);
+	nv_icmd(priv, 0x0008a8, 0x00000000);
+	nv_icmd(priv, 0x0008a9, 0x00000000);
+	nv_icmd(priv, 0x0008aa, 0x00000000);
+	nv_icmd(priv, 0x0008ab, 0x00000000);
+	nv_icmd(priv, 0x0008ac, 0x00000000);
+	nv_icmd(priv, 0x0008ad, 0x00000000);
+	nv_icmd(priv, 0x0008ae, 0x00000000);
+	nv_icmd(priv, 0x0008af, 0x00000000);
+	nv_icmd(priv, 0x0008f0, 0x00000000);
+	nv_icmd(priv, 0x0008f1, 0x00000000);
+	nv_icmd(priv, 0x0008f2, 0x00000000);
+	nv_icmd(priv, 0x0008f3, 0x00000000);
+	nv_icmd(priv, 0x0008f4, 0x00000000);
+	nv_icmd(priv, 0x0008f5, 0x00000000);
+	nv_icmd(priv, 0x0008f6, 0x00000000);
+	nv_icmd(priv, 0x0008f7, 0x00000000);
+	nv_icmd(priv, 0x0008f8, 0x00000000);
+	nv_icmd(priv, 0x0008f9, 0x00000000);
+	nv_icmd(priv, 0x0008fa, 0x00000000);
+	nv_icmd(priv, 0x0008fb, 0x00000000);
+	nv_icmd(priv, 0x0008fc, 0x00000000);
+	nv_icmd(priv, 0x0008fd, 0x00000000);
+	nv_icmd(priv, 0x0008fe, 0x00000000);
+	nv_icmd(priv, 0x0008ff, 0x00000000);
+	nv_icmd(priv, 0x00094c, 0x000000ff);
+	nv_icmd(priv, 0x00094d, 0xffffffff);
+	nv_icmd(priv, 0x00094e, 0x00000002);
+	nv_icmd(priv, 0x0002ec, 0x00000001);
+	nv_icmd(priv, 0x000303, 0x00000001);
+	nv_icmd(priv, 0x0002e6, 0x00000001);
+	nv_icmd(priv, 0x000466, 0x00000052);
+	nv_icmd(priv, 0x000301, 0x3f800000);
+	nv_icmd(priv, 0x000304, 0x30201000);
+	nv_icmd(priv, 0x000305, 0x70605040);
+	nv_icmd(priv, 0x000306, 0xb8a89888);
+	nv_icmd(priv, 0x000307, 0xf8e8d8c8);
+	nv_icmd(priv, 0x00030a, 0x00ffff00);
+	nv_icmd(priv, 0x00030b, 0x0000001a);
+	nv_icmd(priv, 0x00030c, 0x00000001);
+	nv_icmd(priv, 0x000318, 0x00000001);
+	nv_icmd(priv, 0x000340, 0x00000000);
+	nv_icmd(priv, 0x000375, 0x00000001);
+	nv_icmd(priv, 0x00037d, 0x00000006);
+	nv_icmd(priv, 0x0003a0, 0x00000002);
+	nv_icmd(priv, 0x0003aa, 0x00000001);
+	nv_icmd(priv, 0x0003a9, 0x00000001);
+	nv_icmd(priv, 0x000380, 0x00000001);
+	nv_icmd(priv, 0x000383, 0x00000011);
+	nv_icmd(priv, 0x000360, 0x00000040);
+	nv_icmd(priv, 0x000366, 0x00000000);
+	nv_icmd(priv, 0x000367, 0x00000000);
+	nv_icmd(priv, 0x000368, 0x00000fff);
+	nv_icmd(priv, 0x000370, 0x00000000);
+	nv_icmd(priv, 0x000371, 0x00000000);
+	nv_icmd(priv, 0x000372, 0x000fffff);
+	nv_icmd(priv, 0x00037a, 0x00000012);
+	nv_icmd(priv, 0x000619, 0x00000003);
+	nv_icmd(priv, 0x000811, 0x00000003);
+	nv_icmd(priv, 0x000812, 0x00000004);
+	nv_icmd(priv, 0x000813, 0x00000006);
+	nv_icmd(priv, 0x000814, 0x00000008);
+	nv_icmd(priv, 0x000815, 0x0000000b);
+	nv_icmd(priv, 0x000800, 0x00000001);
+	nv_icmd(priv, 0x000801, 0x00000001);
+	nv_icmd(priv, 0x000802, 0x00000001);
+	nv_icmd(priv, 0x000803, 0x00000001);
+	nv_icmd(priv, 0x000804, 0x00000001);
+	nv_icmd(priv, 0x000805, 0x00000001);
+	nv_icmd(priv, 0x000632, 0x00000001);
+	nv_icmd(priv, 0x000633, 0x00000002);
+	nv_icmd(priv, 0x000634, 0x00000003);
+	nv_icmd(priv, 0x000635, 0x00000004);
+	nv_icmd(priv, 0x000654, 0x3f800000);
+	nv_icmd(priv, 0x000657, 0x3f800000);
+	nv_icmd(priv, 0x000655, 0x3f800000);
+	nv_icmd(priv, 0x000656, 0x3f800000);
+	nv_icmd(priv, 0x0006cd, 0x3f800000);
+	nv_icmd(priv, 0x0007f5, 0x3f800000);
+	nv_icmd(priv, 0x0007dc, 0x39291909);
+	nv_icmd(priv, 0x0007dd, 0x79695949);
+	nv_icmd(priv, 0x0007de, 0xb9a99989);
+	nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
+	nv_icmd(priv, 0x0007e8, 0x00003210);
+	nv_icmd(priv, 0x0007e9, 0x00007654);
+	nv_icmd(priv, 0x0007ea, 0x00000098);
+	nv_icmd(priv, 0x0007ec, 0x39291909);
+	nv_icmd(priv, 0x0007ed, 0x79695949);
+	nv_icmd(priv, 0x0007ee, 0xb9a99989);
+	nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
+	nv_icmd(priv, 0x0007f0, 0x00003210);
+	nv_icmd(priv, 0x0007f1, 0x00007654);
+	nv_icmd(priv, 0x0007f2, 0x00000098);
+	nv_icmd(priv, 0x0005a5, 0x00000001);
+	nv_icmd(priv, 0x000980, 0x00000000);
+	nv_icmd(priv, 0x000981, 0x00000000);
+	nv_icmd(priv, 0x000982, 0x00000000);
+	nv_icmd(priv, 0x000983, 0x00000000);
+	nv_icmd(priv, 0x000984, 0x00000000);
+	nv_icmd(priv, 0x000985, 0x00000000);
+	nv_icmd(priv, 0x000986, 0x00000000);
+	nv_icmd(priv, 0x000987, 0x00000000);
+	nv_icmd(priv, 0x000988, 0x00000000);
+	nv_icmd(priv, 0x000989, 0x00000000);
+	nv_icmd(priv, 0x00098a, 0x00000000);
+	nv_icmd(priv, 0x00098b, 0x00000000);
+	nv_icmd(priv, 0x00098c, 0x00000000);
+	nv_icmd(priv, 0x00098d, 0x00000000);
+	nv_icmd(priv, 0x00098e, 0x00000000);
+	nv_icmd(priv, 0x00098f, 0x00000000);
+	nv_icmd(priv, 0x000990, 0x00000000);
+	nv_icmd(priv, 0x000991, 0x00000000);
+	nv_icmd(priv, 0x000992, 0x00000000);
+	nv_icmd(priv, 0x000993, 0x00000000);
+	nv_icmd(priv, 0x000994, 0x00000000);
+	nv_icmd(priv, 0x000995, 0x00000000);
+	nv_icmd(priv, 0x000996, 0x00000000);
+	nv_icmd(priv, 0x000997, 0x00000000);
+	nv_icmd(priv, 0x000998, 0x00000000);
+	nv_icmd(priv, 0x000999, 0x00000000);
+	nv_icmd(priv, 0x00099a, 0x00000000);
+	nv_icmd(priv, 0x00099b, 0x00000000);
+	nv_icmd(priv, 0x00099c, 0x00000000);
+	nv_icmd(priv, 0x00099d, 0x00000000);
+	nv_icmd(priv, 0x00099e, 0x00000000);
+	nv_icmd(priv, 0x00099f, 0x00000000);
+	nv_icmd(priv, 0x0009a0, 0x00000000);
+	nv_icmd(priv, 0x0009a1, 0x00000000);
+	nv_icmd(priv, 0x0009a2, 0x00000000);
+	nv_icmd(priv, 0x0009a3, 0x00000000);
+	nv_icmd(priv, 0x0009a4, 0x00000000);
+	nv_icmd(priv, 0x0009a5, 0x00000000);
+	nv_icmd(priv, 0x0009a6, 0x00000000);
+	nv_icmd(priv, 0x0009a7, 0x00000000);
+	nv_icmd(priv, 0x0009a8, 0x00000000);
+	nv_icmd(priv, 0x0009a9, 0x00000000);
+	nv_icmd(priv, 0x0009aa, 0x00000000);
+	nv_icmd(priv, 0x0009ab, 0x00000000);
+	nv_icmd(priv, 0x0009ac, 0x00000000);
+	nv_icmd(priv, 0x0009ad, 0x00000000);
+	nv_icmd(priv, 0x0009ae, 0x00000000);
+	nv_icmd(priv, 0x0009af, 0x00000000);
+	nv_icmd(priv, 0x0009b0, 0x00000000);
+	nv_icmd(priv, 0x0009b1, 0x00000000);
+	nv_icmd(priv, 0x0009b2, 0x00000000);
+	nv_icmd(priv, 0x0009b3, 0x00000000);
+	nv_icmd(priv, 0x0009b4, 0x00000000);
+	nv_icmd(priv, 0x0009b5, 0x00000000);
+	nv_icmd(priv, 0x0009b6, 0x00000000);
+	nv_icmd(priv, 0x0009b7, 0x00000000);
+	nv_icmd(priv, 0x0009b8, 0x00000000);
+	nv_icmd(priv, 0x0009b9, 0x00000000);
+	nv_icmd(priv, 0x0009ba, 0x00000000);
+	nv_icmd(priv, 0x0009bb, 0x00000000);
+	nv_icmd(priv, 0x0009bc, 0x00000000);
+	nv_icmd(priv, 0x0009bd, 0x00000000);
+	nv_icmd(priv, 0x0009be, 0x00000000);
+	nv_icmd(priv, 0x0009bf, 0x00000000);
+	nv_icmd(priv, 0x0009c0, 0x00000000);
+	nv_icmd(priv, 0x0009c1, 0x00000000);
+	nv_icmd(priv, 0x0009c2, 0x00000000);
+	nv_icmd(priv, 0x0009c3, 0x00000000);
+	nv_icmd(priv, 0x0009c4, 0x00000000);
+	nv_icmd(priv, 0x0009c5, 0x00000000);
+	nv_icmd(priv, 0x0009c6, 0x00000000);
+	nv_icmd(priv, 0x0009c7, 0x00000000);
+	nv_icmd(priv, 0x0009c8, 0x00000000);
+	nv_icmd(priv, 0x0009c9, 0x00000000);
+	nv_icmd(priv, 0x0009ca, 0x00000000);
+	nv_icmd(priv, 0x0009cb, 0x00000000);
+	nv_icmd(priv, 0x0009cc, 0x00000000);
+	nv_icmd(priv, 0x0009cd, 0x00000000);
+	nv_icmd(priv, 0x0009ce, 0x00000000);
+	nv_icmd(priv, 0x0009cf, 0x00000000);
+	nv_icmd(priv, 0x0009d0, 0x00000000);
+	nv_icmd(priv, 0x0009d1, 0x00000000);
+	nv_icmd(priv, 0x0009d2, 0x00000000);
+	nv_icmd(priv, 0x0009d3, 0x00000000);
+	nv_icmd(priv, 0x0009d4, 0x00000000);
+	nv_icmd(priv, 0x0009d5, 0x00000000);
+	nv_icmd(priv, 0x0009d6, 0x00000000);
+	nv_icmd(priv, 0x0009d7, 0x00000000);
+	nv_icmd(priv, 0x0009d8, 0x00000000);
+	nv_icmd(priv, 0x0009d9, 0x00000000);
+	nv_icmd(priv, 0x0009da, 0x00000000);
+	nv_icmd(priv, 0x0009db, 0x00000000);
+	nv_icmd(priv, 0x0009dc, 0x00000000);
+	nv_icmd(priv, 0x0009dd, 0x00000000);
+	nv_icmd(priv, 0x0009de, 0x00000000);
+	nv_icmd(priv, 0x0009df, 0x00000000);
+	nv_icmd(priv, 0x0009e0, 0x00000000);
+	nv_icmd(priv, 0x0009e1, 0x00000000);
+	nv_icmd(priv, 0x0009e2, 0x00000000);
+	nv_icmd(priv, 0x0009e3, 0x00000000);
+	nv_icmd(priv, 0x0009e4, 0x00000000);
+	nv_icmd(priv, 0x0009e5, 0x00000000);
+	nv_icmd(priv, 0x0009e6, 0x00000000);
+	nv_icmd(priv, 0x0009e7, 0x00000000);
+	nv_icmd(priv, 0x0009e8, 0x00000000);
+	nv_icmd(priv, 0x0009e9, 0x00000000);
+	nv_icmd(priv, 0x0009ea, 0x00000000);
+	nv_icmd(priv, 0x0009eb, 0x00000000);
+	nv_icmd(priv, 0x0009ec, 0x00000000);
+	nv_icmd(priv, 0x0009ed, 0x00000000);
+	nv_icmd(priv, 0x0009ee, 0x00000000);
+	nv_icmd(priv, 0x0009ef, 0x00000000);
+	nv_icmd(priv, 0x0009f0, 0x00000000);
+	nv_icmd(priv, 0x0009f1, 0x00000000);
+	nv_icmd(priv, 0x0009f2, 0x00000000);
+	nv_icmd(priv, 0x0009f3, 0x00000000);
+	nv_icmd(priv, 0x0009f4, 0x00000000);
+	nv_icmd(priv, 0x0009f5, 0x00000000);
+	nv_icmd(priv, 0x0009f6, 0x00000000);
+	nv_icmd(priv, 0x0009f7, 0x00000000);
+	nv_icmd(priv, 0x0009f8, 0x00000000);
+	nv_icmd(priv, 0x0009f9, 0x00000000);
+	nv_icmd(priv, 0x0009fa, 0x00000000);
+	nv_icmd(priv, 0x0009fb, 0x00000000);
+	nv_icmd(priv, 0x0009fc, 0x00000000);
+	nv_icmd(priv, 0x0009fd, 0x00000000);
+	nv_icmd(priv, 0x0009fe, 0x00000000);
+	nv_icmd(priv, 0x0009ff, 0x00000000);
+	nv_icmd(priv, 0x000468, 0x00000004);
+	nv_icmd(priv, 0x00046c, 0x00000001);
+	nv_icmd(priv, 0x000470, 0x00000000);
+	nv_icmd(priv, 0x000471, 0x00000000);
+	nv_icmd(priv, 0x000472, 0x00000000);
+	nv_icmd(priv, 0x000473, 0x00000000);
+	nv_icmd(priv, 0x000474, 0x00000000);
+	nv_icmd(priv, 0x000475, 0x00000000);
+	nv_icmd(priv, 0x000476, 0x00000000);
+	nv_icmd(priv, 0x000477, 0x00000000);
+	nv_icmd(priv, 0x000478, 0x00000000);
+	nv_icmd(priv, 0x000479, 0x00000000);
+	nv_icmd(priv, 0x00047a, 0x00000000);
+	nv_icmd(priv, 0x00047b, 0x00000000);
+	nv_icmd(priv, 0x00047c, 0x00000000);
+	nv_icmd(priv, 0x00047d, 0x00000000);
+	nv_icmd(priv, 0x00047e, 0x00000000);
+	nv_icmd(priv, 0x00047f, 0x00000000);
+	nv_icmd(priv, 0x000480, 0x00000000);
+	nv_icmd(priv, 0x000481, 0x00000000);
+	nv_icmd(priv, 0x000482, 0x00000000);
+	nv_icmd(priv, 0x000483, 0x00000000);
+	nv_icmd(priv, 0x000484, 0x00000000);
+	nv_icmd(priv, 0x000485, 0x00000000);
+	nv_icmd(priv, 0x000486, 0x00000000);
+	nv_icmd(priv, 0x000487, 0x00000000);
+	nv_icmd(priv, 0x000488, 0x00000000);
+	nv_icmd(priv, 0x000489, 0x00000000);
+	nv_icmd(priv, 0x00048a, 0x00000000);
+	nv_icmd(priv, 0x00048b, 0x00000000);
+	nv_icmd(priv, 0x00048c, 0x00000000);
+	nv_icmd(priv, 0x00048d, 0x00000000);
+	nv_icmd(priv, 0x00048e, 0x00000000);
+	nv_icmd(priv, 0x00048f, 0x00000000);
+	nv_icmd(priv, 0x000490, 0x00000000);
+	nv_icmd(priv, 0x000491, 0x00000000);
+	nv_icmd(priv, 0x000492, 0x00000000);
+	nv_icmd(priv, 0x000493, 0x00000000);
+	nv_icmd(priv, 0x000494, 0x00000000);
+	nv_icmd(priv, 0x000495, 0x00000000);
+	nv_icmd(priv, 0x000496, 0x00000000);
+	nv_icmd(priv, 0x000497, 0x00000000);
+	nv_icmd(priv, 0x000498, 0x00000000);
+	nv_icmd(priv, 0x000499, 0x00000000);
+	nv_icmd(priv, 0x00049a, 0x00000000);
+	nv_icmd(priv, 0x00049b, 0x00000000);
+	nv_icmd(priv, 0x00049c, 0x00000000);
+	nv_icmd(priv, 0x00049d, 0x00000000);
+	nv_icmd(priv, 0x00049e, 0x00000000);
+	nv_icmd(priv, 0x00049f, 0x00000000);
+	nv_icmd(priv, 0x0004a0, 0x00000000);
+	nv_icmd(priv, 0x0004a1, 0x00000000);
+	nv_icmd(priv, 0x0004a2, 0x00000000);
+	nv_icmd(priv, 0x0004a3, 0x00000000);
+	nv_icmd(priv, 0x0004a4, 0x00000000);
+	nv_icmd(priv, 0x0004a5, 0x00000000);
+	nv_icmd(priv, 0x0004a6, 0x00000000);
+	nv_icmd(priv, 0x0004a7, 0x00000000);
+	nv_icmd(priv, 0x0004a8, 0x00000000);
+	nv_icmd(priv, 0x0004a9, 0x00000000);
+	nv_icmd(priv, 0x0004aa, 0x00000000);
+	nv_icmd(priv, 0x0004ab, 0x00000000);
+	nv_icmd(priv, 0x0004ac, 0x00000000);
+	nv_icmd(priv, 0x0004ad, 0x00000000);
+	nv_icmd(priv, 0x0004ae, 0x00000000);
+	nv_icmd(priv, 0x0004af, 0x00000000);
+	nv_icmd(priv, 0x0004b0, 0x00000000);
+	nv_icmd(priv, 0x0004b1, 0x00000000);
+	nv_icmd(priv, 0x0004b2, 0x00000000);
+	nv_icmd(priv, 0x0004b3, 0x00000000);
+	nv_icmd(priv, 0x0004b4, 0x00000000);
+	nv_icmd(priv, 0x0004b5, 0x00000000);
+	nv_icmd(priv, 0x0004b6, 0x00000000);
+	nv_icmd(priv, 0x0004b7, 0x00000000);
+	nv_icmd(priv, 0x0004b8, 0x00000000);
+	nv_icmd(priv, 0x0004b9, 0x00000000);
+	nv_icmd(priv, 0x0004ba, 0x00000000);
+	nv_icmd(priv, 0x0004bb, 0x00000000);
+	nv_icmd(priv, 0x0004bc, 0x00000000);
+	nv_icmd(priv, 0x0004bd, 0x00000000);
+	nv_icmd(priv, 0x0004be, 0x00000000);
+	nv_icmd(priv, 0x0004bf, 0x00000000);
+	nv_icmd(priv, 0x0004c0, 0x00000000);
+	nv_icmd(priv, 0x0004c1, 0x00000000);
+	nv_icmd(priv, 0x0004c2, 0x00000000);
+	nv_icmd(priv, 0x0004c3, 0x00000000);
+	nv_icmd(priv, 0x0004c4, 0x00000000);
+	nv_icmd(priv, 0x0004c5, 0x00000000);
+	nv_icmd(priv, 0x0004c6, 0x00000000);
+	nv_icmd(priv, 0x0004c7, 0x00000000);
+	nv_icmd(priv, 0x0004c8, 0x00000000);
+	nv_icmd(priv, 0x0004c9, 0x00000000);
+	nv_icmd(priv, 0x0004ca, 0x00000000);
+	nv_icmd(priv, 0x0004cb, 0x00000000);
+	nv_icmd(priv, 0x0004cc, 0x00000000);
+	nv_icmd(priv, 0x0004cd, 0x00000000);
+	nv_icmd(priv, 0x0004ce, 0x00000000);
+	nv_icmd(priv, 0x0004cf, 0x00000000);
+	nv_icmd(priv, 0x000510, 0x3f800000);
+	nv_icmd(priv, 0x000511, 0x3f800000);
+	nv_icmd(priv, 0x000512, 0x3f800000);
+	nv_icmd(priv, 0x000513, 0x3f800000);
+	nv_icmd(priv, 0x000514, 0x3f800000);
+	nv_icmd(priv, 0x000515, 0x3f800000);
+	nv_icmd(priv, 0x000516, 0x3f800000);
+	nv_icmd(priv, 0x000517, 0x3f800000);
+	nv_icmd(priv, 0x000518, 0x3f800000);
+	nv_icmd(priv, 0x000519, 0x3f800000);
+	nv_icmd(priv, 0x00051a, 0x3f800000);
+	nv_icmd(priv, 0x00051b, 0x3f800000);
+	nv_icmd(priv, 0x00051c, 0x3f800000);
+	nv_icmd(priv, 0x00051d, 0x3f800000);
+	nv_icmd(priv, 0x00051e, 0x3f800000);
+	nv_icmd(priv, 0x00051f, 0x3f800000);
+	nv_icmd(priv, 0x000520, 0x000002b6);
+	nv_icmd(priv, 0x000529, 0x00000001);
+	nv_icmd(priv, 0x000530, 0xffff0000);
+	nv_icmd(priv, 0x000531, 0xffff0000);
+	nv_icmd(priv, 0x000532, 0xffff0000);
+	nv_icmd(priv, 0x000533, 0xffff0000);
+	nv_icmd(priv, 0x000534, 0xffff0000);
+	nv_icmd(priv, 0x000535, 0xffff0000);
+	nv_icmd(priv, 0x000536, 0xffff0000);
+	nv_icmd(priv, 0x000537, 0xffff0000);
+	nv_icmd(priv, 0x000538, 0xffff0000);
+	nv_icmd(priv, 0x000539, 0xffff0000);
+	nv_icmd(priv, 0x00053a, 0xffff0000);
+	nv_icmd(priv, 0x00053b, 0xffff0000);
+	nv_icmd(priv, 0x00053c, 0xffff0000);
+	nv_icmd(priv, 0x00053d, 0xffff0000);
+	nv_icmd(priv, 0x00053e, 0xffff0000);
+	nv_icmd(priv, 0x00053f, 0xffff0000);
+	nv_icmd(priv, 0x000585, 0x0000003f);
+	nv_icmd(priv, 0x000576, 0x00000003);
+	nv_icmd(priv, 0x00057b, 0x00000059);
+	nv_icmd(priv, 0x000586, 0x00000040);
+	nv_icmd(priv, 0x000582, 0x00000080);
+	nv_icmd(priv, 0x000583, 0x00000080);
+	nv_icmd(priv, 0x0005c2, 0x00000001);
+	nv_icmd(priv, 0x000638, 0x00000001);
+	nv_icmd(priv, 0x000639, 0x00000001);
+	nv_icmd(priv, 0x00063a, 0x00000002);
+	nv_icmd(priv, 0x00063b, 0x00000001);
+	nv_icmd(priv, 0x00063c, 0x00000001);
+	nv_icmd(priv, 0x00063d, 0x00000002);
+	nv_icmd(priv, 0x00063e, 0x00000001);
+	nv_icmd(priv, 0x0008b8, 0x00000001);
+	nv_icmd(priv, 0x0008b9, 0x00000001);
+	nv_icmd(priv, 0x0008ba, 0x00000001);
+	nv_icmd(priv, 0x0008bb, 0x00000001);
+	nv_icmd(priv, 0x0008bc, 0x00000001);
+	nv_icmd(priv, 0x0008bd, 0x00000001);
+	nv_icmd(priv, 0x0008be, 0x00000001);
+	nv_icmd(priv, 0x0008bf, 0x00000001);
+	nv_icmd(priv, 0x000900, 0x00000001);
+	nv_icmd(priv, 0x000901, 0x00000001);
+	nv_icmd(priv, 0x000902, 0x00000001);
+	nv_icmd(priv, 0x000903, 0x00000001);
+	nv_icmd(priv, 0x000904, 0x00000001);
+	nv_icmd(priv, 0x000905, 0x00000001);
+	nv_icmd(priv, 0x000906, 0x00000001);
+	nv_icmd(priv, 0x000907, 0x00000001);
+	nv_icmd(priv, 0x000908, 0x00000002);
+	nv_icmd(priv, 0x000909, 0x00000002);
+	nv_icmd(priv, 0x00090a, 0x00000002);
+	nv_icmd(priv, 0x00090b, 0x00000002);
+	nv_icmd(priv, 0x00090c, 0x00000002);
+	nv_icmd(priv, 0x00090d, 0x00000002);
+	nv_icmd(priv, 0x00090e, 0x00000002);
+	nv_icmd(priv, 0x00090f, 0x00000002);
+	nv_icmd(priv, 0x000910, 0x00000001);
+	nv_icmd(priv, 0x000911, 0x00000001);
+	nv_icmd(priv, 0x000912, 0x00000001);
+	nv_icmd(priv, 0x000913, 0x00000001);
+	nv_icmd(priv, 0x000914, 0x00000001);
+	nv_icmd(priv, 0x000915, 0x00000001);
+	nv_icmd(priv, 0x000916, 0x00000001);
+	nv_icmd(priv, 0x000917, 0x00000001);
+	nv_icmd(priv, 0x000918, 0x00000001);
+	nv_icmd(priv, 0x000919, 0x00000001);
+	nv_icmd(priv, 0x00091a, 0x00000001);
+	nv_icmd(priv, 0x00091b, 0x00000001);
+	nv_icmd(priv, 0x00091c, 0x00000001);
+	nv_icmd(priv, 0x00091d, 0x00000001);
+	nv_icmd(priv, 0x00091e, 0x00000001);
+	nv_icmd(priv, 0x00091f, 0x00000001);
+	nv_icmd(priv, 0x000920, 0x00000002);
+	nv_icmd(priv, 0x000921, 0x00000002);
+	nv_icmd(priv, 0x000922, 0x00000002);
+	nv_icmd(priv, 0x000923, 0x00000002);
+	nv_icmd(priv, 0x000924, 0x00000002);
+	nv_icmd(priv, 0x000925, 0x00000002);
+	nv_icmd(priv, 0x000926, 0x00000002);
+	nv_icmd(priv, 0x000927, 0x00000002);
+	nv_icmd(priv, 0x000928, 0x00000001);
+	nv_icmd(priv, 0x000929, 0x00000001);
+	nv_icmd(priv, 0x00092a, 0x00000001);
+	nv_icmd(priv, 0x00092b, 0x00000001);
+	nv_icmd(priv, 0x00092c, 0x00000001);
+	nv_icmd(priv, 0x00092d, 0x00000001);
+	nv_icmd(priv, 0x00092e, 0x00000001);
+	nv_icmd(priv, 0x00092f, 0x00000001);
+	nv_icmd(priv, 0x000648, 0x00000001);
+	nv_icmd(priv, 0x000649, 0x00000001);
+	nv_icmd(priv, 0x00064a, 0x00000001);
+	nv_icmd(priv, 0x00064b, 0x00000001);
+	nv_icmd(priv, 0x00064c, 0x00000001);
+	nv_icmd(priv, 0x00064d, 0x00000001);
+	nv_icmd(priv, 0x00064e, 0x00000001);
+	nv_icmd(priv, 0x00064f, 0x00000001);
+	nv_icmd(priv, 0x000650, 0x00000001);
+	nv_icmd(priv, 0x000658, 0x0000000f);
+	nv_icmd(priv, 0x0007ff, 0x0000000a);
+	nv_icmd(priv, 0x00066a, 0x40000000);
+	nv_icmd(priv, 0x00066b, 0x10000000);
+	nv_icmd(priv, 0x00066c, 0xffff0000);
+	nv_icmd(priv, 0x00066d, 0xffff0000);
+	nv_icmd(priv, 0x0007af, 0x00000008);
+	nv_icmd(priv, 0x0007b0, 0x00000008);
+	nv_icmd(priv, 0x0007f6, 0x00000001);
+	nv_icmd(priv, 0x0006b2, 0x00000055);
+	nv_icmd(priv, 0x0007ad, 0x00000003);
+	nv_icmd(priv, 0x000937, 0x00000001);
+	nv_icmd(priv, 0x000971, 0x00000008);
+	nv_icmd(priv, 0x000972, 0x00000040);
+	nv_icmd(priv, 0x000973, 0x0000012c);
+	nv_icmd(priv, 0x00097c, 0x00000040);
+	nv_icmd(priv, 0x000979, 0x00000003);
+	nv_icmd(priv, 0x000975, 0x00000020);
+	nv_icmd(priv, 0x000976, 0x00000001);
+	nv_icmd(priv, 0x000977, 0x00000020);
+	nv_icmd(priv, 0x000978, 0x00000001);
+	nv_icmd(priv, 0x000957, 0x00000003);
+	nv_icmd(priv, 0x00095e, 0x20164010);
+	nv_icmd(priv, 0x00095f, 0x00000020);
+	nv_icmd(priv, 0x00097d, 0x00000020);
+	nv_icmd(priv, 0x000683, 0x00000006);
+	nv_icmd(priv, 0x000685, 0x003fffff);
+	nv_icmd(priv, 0x000687, 0x003fffff);
+	nv_icmd(priv, 0x0006a0, 0x00000005);
+	nv_icmd(priv, 0x000840, 0x00400008);
+	nv_icmd(priv, 0x000841, 0x08000080);
+	nv_icmd(priv, 0x000842, 0x00400008);
+	nv_icmd(priv, 0x000843, 0x08000080);
+	nv_icmd(priv, 0x000818, 0x00000000);
+	nv_icmd(priv, 0x000819, 0x00000000);
+	nv_icmd(priv, 0x00081a, 0x00000000);
+	nv_icmd(priv, 0x00081b, 0x00000000);
+	nv_icmd(priv, 0x00081c, 0x00000000);
+	nv_icmd(priv, 0x00081d, 0x00000000);
+	nv_icmd(priv, 0x00081e, 0x00000000);
+	nv_icmd(priv, 0x00081f, 0x00000000);
+	nv_icmd(priv, 0x000848, 0x00000000);
+	nv_icmd(priv, 0x000849, 0x00000000);
+	nv_icmd(priv, 0x00084a, 0x00000000);
+	nv_icmd(priv, 0x00084b, 0x00000000);
+	nv_icmd(priv, 0x00084c, 0x00000000);
+	nv_icmd(priv, 0x00084d, 0x00000000);
+	nv_icmd(priv, 0x00084e, 0x00000000);
+	nv_icmd(priv, 0x00084f, 0x00000000);
+	nv_icmd(priv, 0x000850, 0x00000000);
+	nv_icmd(priv, 0x000851, 0x00000000);
+	nv_icmd(priv, 0x000852, 0x00000000);
+	nv_icmd(priv, 0x000853, 0x00000000);
+	nv_icmd(priv, 0x000854, 0x00000000);
+	nv_icmd(priv, 0x000855, 0x00000000);
+	nv_icmd(priv, 0x000856, 0x00000000);
+	nv_icmd(priv, 0x000857, 0x00000000);
+	nv_icmd(priv, 0x000738, 0x00000000);
+	nv_icmd(priv, 0x0006aa, 0x00000001);
+	nv_icmd(priv, 0x0006ab, 0x00000002);
+	nv_icmd(priv, 0x0006ac, 0x00000080);
+	nv_icmd(priv, 0x0006ad, 0x00000100);
+	nv_icmd(priv, 0x0006ae, 0x00000100);
+	nv_icmd(priv, 0x0006b1, 0x00000011);
+	nv_icmd(priv, 0x0006bb, 0x000000cf);
+	nv_icmd(priv, 0x0006ce, 0x2a712488);
+	nv_icmd(priv, 0x000739, 0x4085c000);
+	nv_icmd(priv, 0x00073a, 0x00000080);
+	nv_icmd(priv, 0x000786, 0x80000100);
+	nv_icmd(priv, 0x00073c, 0x00010100);
+	nv_icmd(priv, 0x00073d, 0x02800000);
+	nv_icmd(priv, 0x000787, 0x000000cf);
+	nv_icmd(priv, 0x00078c, 0x00000008);
+	nv_icmd(priv, 0x000792, 0x00000001);
+	nv_icmd(priv, 0x000794, 0x00000001);
+	nv_icmd(priv, 0x000795, 0x00000001);
+	nv_icmd(priv, 0x000796, 0x00000001);
+	nv_icmd(priv, 0x000797, 0x000000cf);
+	nv_icmd(priv, 0x000836, 0x00000001);
+	nv_icmd(priv, 0x00079a, 0x00000002);
+	nv_icmd(priv, 0x000833, 0x04444480);
+	nv_icmd(priv, 0x0007a1, 0x00000001);
+	nv_icmd(priv, 0x0007a3, 0x00000001);
+	nv_icmd(priv, 0x0007a4, 0x00000001);
+	nv_icmd(priv, 0x0007a5, 0x00000001);
+	nv_icmd(priv, 0x000831, 0x00000004);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x000a04, 0x000000ff);
+	nv_icmd(priv, 0x000a0b, 0x00000040);
+	nv_icmd(priv, 0x00097f, 0x00000100);
+	nv_icmd(priv, 0x000a02, 0x00000001);
+	nv_icmd(priv, 0x000809, 0x00000007);
+	nv_icmd(priv, 0x00c221, 0x00000040);
+	nv_icmd(priv, 0x00c1b0, 0x0000000f);
+	nv_icmd(priv, 0x00c1b1, 0x0000000f);
+	nv_icmd(priv, 0x00c1b2, 0x0000000f);
+	nv_icmd(priv, 0x00c1b3, 0x0000000f);
+	nv_icmd(priv, 0x00c1b4, 0x0000000f);
+	nv_icmd(priv, 0x00c1b5, 0x0000000f);
+	nv_icmd(priv, 0x00c1b6, 0x0000000f);
+	nv_icmd(priv, 0x00c1b7, 0x0000000f);
+	nv_icmd(priv, 0x00c1b8, 0x0fac6881);
+	nv_icmd(priv, 0x00c1b9, 0x00fac688);
+	nv_icmd(priv, 0x00c401, 0x00000001);
+	nv_icmd(priv, 0x00c402, 0x00010001);
+	nv_icmd(priv, 0x00c403, 0x00000001);
+	nv_icmd(priv, 0x00c404, 0x00000001);
+	nv_icmd(priv, 0x00c40e, 0x00000020);
+	nv_icmd(priv, 0x00c500, 0x00000003);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000002);
+	nv_icmd(priv, 0x0006aa, 0x00000001);
+	nv_icmd(priv, 0x0006ad, 0x00000100);
+	nv_icmd(priv, 0x0006ae, 0x00000100);
+	nv_icmd(priv, 0x0006b1, 0x00000011);
+	nv_icmd(priv, 0x00078c, 0x00000008);
+	nv_icmd(priv, 0x000792, 0x00000001);
+	nv_icmd(priv, 0x000794, 0x00000001);
+	nv_icmd(priv, 0x000795, 0x00000001);
+	nv_icmd(priv, 0x000796, 0x00000001);
+	nv_icmd(priv, 0x000797, 0x000000cf);
+	nv_icmd(priv, 0x00079a, 0x00000002);
+	nv_icmd(priv, 0x000833, 0x04444480);
+	nv_icmd(priv, 0x0007a1, 0x00000001);
+	nv_icmd(priv, 0x0007a3, 0x00000001);
+	nv_icmd(priv, 0x0007a4, 0x00000001);
+	nv_icmd(priv, 0x0007a5, 0x00000001);
+	nv_icmd(priv, 0x000831, 0x00000004);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000008);
+	nv_icmd(priv, 0x000039, 0x00000000);
+	nv_icmd(priv, 0x00003a, 0x00000000);
+	nv_icmd(priv, 0x00003b, 0x00000000);
+	nv_icmd(priv, 0x000380, 0x00000001);
+	nv_icmd(priv, 0x000366, 0x00000000);
+	nv_icmd(priv, 0x000367, 0x00000000);
+	nv_icmd(priv, 0x000368, 0x00000fff);
+	nv_icmd(priv, 0x000370, 0x00000000);
+	nv_icmd(priv, 0x000371, 0x00000000);
+	nv_icmd(priv, 0x000372, 0x000fffff);
+	nv_icmd(priv, 0x000813, 0x00000006);
+	nv_icmd(priv, 0x000814, 0x00000008);
+	nv_icmd(priv, 0x000957, 0x00000003);
+	nv_icmd(priv, 0x000818, 0x00000000);
+	nv_icmd(priv, 0x000819, 0x00000000);
+	nv_icmd(priv, 0x00081a, 0x00000000);
+	nv_icmd(priv, 0x00081b, 0x00000000);
+	nv_icmd(priv, 0x00081c, 0x00000000);
+	nv_icmd(priv, 0x00081d, 0x00000000);
+	nv_icmd(priv, 0x00081e, 0x00000000);
+	nv_icmd(priv, 0x00081f, 0x00000000);
+	nv_icmd(priv, 0x000848, 0x00000000);
+	nv_icmd(priv, 0x000849, 0x00000000);
+	nv_icmd(priv, 0x00084a, 0x00000000);
+	nv_icmd(priv, 0x00084b, 0x00000000);
+	nv_icmd(priv, 0x00084c, 0x00000000);
+	nv_icmd(priv, 0x00084d, 0x00000000);
+	nv_icmd(priv, 0x00084e, 0x00000000);
+	nv_icmd(priv, 0x00084f, 0x00000000);
+	nv_icmd(priv, 0x000850, 0x00000000);
+	nv_icmd(priv, 0x000851, 0x00000000);
+	nv_icmd(priv, 0x000852, 0x00000000);
+	nv_icmd(priv, 0x000853, 0x00000000);
+	nv_icmd(priv, 0x000854, 0x00000000);
+	nv_icmd(priv, 0x000855, 0x00000000);
+	nv_icmd(priv, 0x000856, 0x00000000);
+	nv_icmd(priv, 0x000857, 0x00000000);
+	nv_icmd(priv, 0x000738, 0x00000000);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x000a04, 0x000000ff);
+	nv_icmd(priv, 0x00097f, 0x00000100);
+	nv_icmd(priv, 0x000a02, 0x00000001);
+	nv_icmd(priv, 0x000809, 0x00000007);
+	nv_icmd(priv, 0x00c221, 0x00000040);
+	nv_icmd(priv, 0x00c401, 0x00000001);
+	nv_icmd(priv, 0x00c402, 0x00010001);
+	nv_icmd(priv, 0x00c403, 0x00000001);
+	nv_icmd(priv, 0x00c404, 0x00000001);
+	nv_icmd(priv, 0x00c40e, 0x00000020);
+	nv_icmd(priv, 0x00c500, 0x00000003);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_icmd(priv, 0x001000, 0x00000001);
+	nv_icmd(priv, 0x000b07, 0x00000002);
+	nv_icmd(priv, 0x000b08, 0x00000100);
+	nv_icmd(priv, 0x000b09, 0x00000100);
+	nv_icmd(priv, 0x000b0a, 0x00000001);
+	nv_icmd(priv, 0x01e100, 0x00000001);
+	nv_wr32(priv, 0x400208, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
+	nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
+	nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
+	nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
+	nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
+	nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
+	nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
+	nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
+	nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
+	nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
+	nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
+	nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
+	nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
+	nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
+	nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
+	nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
+	nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
+	nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
+	nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
+	nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
+	nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
+	nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
+	nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
+	nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
+	nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
+	nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
+	nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
+	nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
+	nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
+	nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
+	nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
+	nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
+	nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
+	nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
+	nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
+	nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
+	nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
+	nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
+	nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
+	nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
+	nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
+	nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
+	nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
+	nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
+	nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
+	nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
+	nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
+	nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
+	nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
+	nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
+	nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
+	nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
+	nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
+	nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
+	nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
+	nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
+	nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
+	nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
+	nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
+	nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
+	nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
+	nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
+	nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
+	nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
+	nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
+	nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
+	nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
+	nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
+{
+	nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
+	nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404010, 0x0);
+	nv_wr32(priv, 0x404014, 0x0);
+	nv_wr32(priv, 0x404018, 0x0);
+	nv_wr32(priv, 0x40401c, 0x0);
+	nv_wr32(priv, 0x404020, 0x0);
+	nv_wr32(priv, 0x404024, 0xe000);
+	nv_wr32(priv, 0x404028, 0x0);
+	nv_wr32(priv, 0x4040a8, 0x0);
+	nv_wr32(priv, 0x4040ac, 0x0);
+	nv_wr32(priv, 0x4040b0, 0x0);
+	nv_wr32(priv, 0x4040b4, 0x0);
+	nv_wr32(priv, 0x4040b8, 0x0);
+	nv_wr32(priv, 0x4040bc, 0x0);
+	nv_wr32(priv, 0x4040c0, 0x0);
+	nv_wr32(priv, 0x4040c4, 0x0);
+	nv_wr32(priv, 0x4040c8, 0xf800008f);
+	nv_wr32(priv, 0x4040d0, 0x0);
+	nv_wr32(priv, 0x4040d4, 0x0);
+	nv_wr32(priv, 0x4040d8, 0x0);
+	nv_wr32(priv, 0x4040dc, 0x0);
+	nv_wr32(priv, 0x4040e0, 0x0);
+	nv_wr32(priv, 0x4040e4, 0x0);
+	nv_wr32(priv, 0x4040e8, 0x1000);
+	nv_wr32(priv, 0x4040f8, 0x0);
+	nv_wr32(priv, 0x404130, 0x0);
+	nv_wr32(priv, 0x404134, 0x0);
+	nv_wr32(priv, 0x404138, 0x20000040);
+	nv_wr32(priv, 0x404150, 0x2e);
+	nv_wr32(priv, 0x404154, 0x400);
+	nv_wr32(priv, 0x404158, 0x200);
+	nv_wr32(priv, 0x404164, 0x55);
+	nv_wr32(priv, 0x4041a0, 0x0);
+	nv_wr32(priv, 0x4041a4, 0x0);
+	nv_wr32(priv, 0x4041a8, 0x0);
+	nv_wr32(priv, 0x4041ac, 0x0);
+	nv_wr32(priv, 0x404200, 0x0);
+	nv_wr32(priv, 0x404204, 0x0);
+	nv_wr32(priv, 0x404208, 0x0);
+	nv_wr32(priv, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404404, 0x0);
+	nv_wr32(priv, 0x404408, 0x0);
+	nv_wr32(priv, 0x40440c, 0x0);
+	nv_wr32(priv, 0x404410, 0x0);
+	nv_wr32(priv, 0x404414, 0x0);
+	nv_wr32(priv, 0x404418, 0x0);
+	nv_wr32(priv, 0x40441c, 0x0);
+	nv_wr32(priv, 0x404420, 0x0);
+	nv_wr32(priv, 0x404424, 0x0);
+	nv_wr32(priv, 0x404428, 0x0);
+	nv_wr32(priv, 0x40442c, 0x0);
+	nv_wr32(priv, 0x404430, 0x0);
+	nv_wr32(priv, 0x404434, 0x0);
+	nv_wr32(priv, 0x404438, 0x0);
+	nv_wr32(priv, 0x404460, 0x0);
+	nv_wr32(priv, 0x404464, 0x0);
+	nv_wr32(priv, 0x404468, 0xffffff);
+	nv_wr32(priv, 0x40446c, 0x0);
+	nv_wr32(priv, 0x404480, 0x1);
+	nv_wr32(priv, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404604, 0x14);
+	nv_wr32(priv, 0x404608, 0x0);
+	nv_wr32(priv, 0x40460c, 0x3fff);
+	nv_wr32(priv, 0x404610, 0x100);
+	nv_wr32(priv, 0x404618, 0x0);
+	nv_wr32(priv, 0x40461c, 0x0);
+	nv_wr32(priv, 0x404620, 0x0);
+	nv_wr32(priv, 0x404624, 0x0);
+	nv_wr32(priv, 0x40462c, 0x0);
+	nv_wr32(priv, 0x404630, 0x0);
+	nv_wr32(priv, 0x404640, 0x0);
+	nv_wr32(priv, 0x404654, 0x0);
+	nv_wr32(priv, 0x404660, 0x0);
+	nv_wr32(priv, 0x404678, 0x0);
+	nv_wr32(priv, 0x40467c, 0x2);
+	nv_wr32(priv, 0x404680, 0x0);
+	nv_wr32(priv, 0x404684, 0x0);
+	nv_wr32(priv, 0x404688, 0x0);
+	nv_wr32(priv, 0x40468c, 0x0);
+	nv_wr32(priv, 0x404690, 0x0);
+	nv_wr32(priv, 0x404694, 0x0);
+	nv_wr32(priv, 0x404698, 0x0);
+	nv_wr32(priv, 0x40469c, 0x0);
+	nv_wr32(priv, 0x4046a0, 0x7f0080);
+	nv_wr32(priv, 0x4046a4, 0x0);
+	nv_wr32(priv, 0x4046a8, 0x0);
+	nv_wr32(priv, 0x4046ac, 0x0);
+	nv_wr32(priv, 0x4046b0, 0x0);
+	nv_wr32(priv, 0x4046b4, 0x0);
+	nv_wr32(priv, 0x4046b8, 0x0);
+	nv_wr32(priv, 0x4046bc, 0x0);
+	nv_wr32(priv, 0x4046c0, 0x0);
+	nv_wr32(priv, 0x4046c8, 0x0);
+	nv_wr32(priv, 0x4046cc, 0x0);
+	nv_wr32(priv, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x404700, 0x0);
+	nv_wr32(priv, 0x404704, 0x0);
+	nv_wr32(priv, 0x404708, 0x0);
+	nv_wr32(priv, 0x404718, 0x0);
+	nv_wr32(priv, 0x40471c, 0x0);
+	nv_wr32(priv, 0x404720, 0x0);
+	nv_wr32(priv, 0x404724, 0x0);
+	nv_wr32(priv, 0x404728, 0x0);
+	nv_wr32(priv, 0x40472c, 0x0);
+	nv_wr32(priv, 0x404730, 0x0);
+	nv_wr32(priv, 0x404734, 0x100);
+	nv_wr32(priv, 0x404738, 0x0);
+	nv_wr32(priv, 0x40473c, 0x0);
+	nv_wr32(priv, 0x404744, 0x0);
+	nv_wr32(priv, 0x404748, 0x0);
+	nv_wr32(priv, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x405800, 0xf8000bf);
+	nv_wr32(priv, 0x405830, 0x2180648);
+	nv_wr32(priv, 0x405834, 0x8000000);
+	nv_wr32(priv, 0x405838, 0x0);
+	nv_wr32(priv, 0x405854, 0x0);
+	nv_wr32(priv, 0x405870, 0x1);
+	nv_wr32(priv, 0x405874, 0x1);
+	nv_wr32(priv, 0x405878, 0x1);
+	nv_wr32(priv, 0x40587c, 0x1);
+	nv_wr32(priv, 0x405a00, 0x0);
+	nv_wr32(priv, 0x405a04, 0x0);
+	nv_wr32(priv, 0x405a18, 0x0);
+	nv_wr32(priv, 0x405b00, 0x0);
+	nv_wr32(priv, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x406020, 0x4103c1);
+	nv_wr32(priv, 0x406028, 0x1);
+	nv_wr32(priv, 0x40602c, 0x1);
+	nv_wr32(priv, 0x406030, 0x1);
+	nv_wr32(priv, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x4064a8, 0x0);
+	nv_wr32(priv, 0x4064ac, 0x3fff);
+	nv_wr32(priv, 0x4064b4, 0x0);
+	nv_wr32(priv, 0x4064b8, 0x0);
+	nv_wr32(priv, 0x4064c0, 0x801a00f0);
+	nv_wr32(priv, 0x4064c4, 0x192ffff);
+	nv_wr32(priv, 0x4064c8, 0x1800600);
+	nv_wr32(priv, 0x4064cc, 0x0);
+	nv_wr32(priv, 0x4064d0, 0x0);
+	nv_wr32(priv, 0x4064d4, 0x0);
+	nv_wr32(priv, 0x4064d8, 0x0);
+	nv_wr32(priv, 0x4064dc, 0x0);
+	nv_wr32(priv, 0x4064e0, 0x0);
+	nv_wr32(priv, 0x4064e4, 0x0);
+	nv_wr32(priv, 0x4064e8, 0x0);
+	nv_wr32(priv, 0x4064ec, 0x0);
+	nv_wr32(priv, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x407804, 0x23);
+	nv_wr32(priv, 0x40780c, 0xa418820);
+	nv_wr32(priv, 0x407810, 0x62080e6);
+	nv_wr32(priv, 0x407814, 0x20398a4);
+	nv_wr32(priv, 0x407818, 0xe629062);
+	nv_wr32(priv, 0x40781c, 0xa418820);
+	nv_wr32(priv, 0x407820, 0xe6);
+	nv_wr32(priv, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408000, 0x0);
+	nv_wr32(priv, 0x408004, 0x0);
+	nv_wr32(priv, 0x408008, 0x30);
+	nv_wr32(priv, 0x40800c, 0x0);
+	nv_wr32(priv, 0x408010, 0x0);
+	nv_wr32(priv, 0x408014, 0x69);
+	nv_wr32(priv, 0x408018, 0xe100e100);
+	nv_wr32(priv, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x408800, 0x2802a3c);
+	nv_wr32(priv, 0x408804, 0x40);
+	nv_wr32(priv, 0x408808, 0x1043e005);
+	nv_wr32(priv, 0x408840, 0xb);
+	nv_wr32(priv, 0x408900, 0x3080b801);
+	nv_wr32(priv, 0x408904, 0x62000001);
+	nv_wr32(priv, 0x408908, 0xc8102f);
+	nv_wr32(priv, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x418380, 0x16);
+	nv_wr32(priv, 0x418400, 0x38004e00);
+	nv_wr32(priv, 0x418404, 0x71e0ffff);
+	nv_wr32(priv, 0x41840c, 0x1008);
+	nv_wr32(priv, 0x418410, 0xfff0fff);
+	nv_wr32(priv, 0x418414, 0x2200fff);
+	nv_wr32(priv, 0x418450, 0x0);
+	nv_wr32(priv, 0x418454, 0x0);
+	nv_wr32(priv, 0x418458, 0x0);
+	nv_wr32(priv, 0x41845c, 0x0);
+	nv_wr32(priv, 0x418460, 0x0);
+	nv_wr32(priv, 0x418464, 0x0);
+	nv_wr32(priv, 0x418468, 0x1);
+	nv_wr32(priv, 0x41846c, 0x0);
+	nv_wr32(priv, 0x418470, 0x0);
+	nv_wr32(priv, 0x418600, 0x1f);
+	nv_wr32(priv, 0x418684, 0xf);
+	nv_wr32(priv, 0x418700, 0x2);
+	nv_wr32(priv, 0x418704, 0x80);
+	nv_wr32(priv, 0x418708, 0x0);
+	nv_wr32(priv, 0x41870c, 0x0);
+	nv_wr32(priv, 0x418710, 0x0);
+	nv_wr32(priv, 0x418800, 0x7006860a);
+	nv_wr32(priv, 0x418808, 0x0);
+	nv_wr32(priv, 0x41880c, 0x0);
+	nv_wr32(priv, 0x418810, 0x0);
+	nv_wr32(priv, 0x418828, 0x44);
+	nv_wr32(priv, 0x418830, 0x10000001);
+	nv_wr32(priv, 0x4188d8, 0x8);
+	nv_wr32(priv, 0x4188e0, 0x1000000);
+	nv_wr32(priv, 0x4188e8, 0x0);
+	nv_wr32(priv, 0x4188ec, 0x0);
+	nv_wr32(priv, 0x4188f0, 0x0);
+	nv_wr32(priv, 0x4188f4, 0x0);
+	nv_wr32(priv, 0x4188f8, 0x0);
+	nv_wr32(priv, 0x4188fc, 0x20100018);
+	nv_wr32(priv, 0x41891c, 0xff00ff);
+	nv_wr32(priv, 0x418924, 0x0);
+	nv_wr32(priv, 0x418928, 0xffff00);
+	nv_wr32(priv, 0x41892c, 0xff00);
+	nv_wr32(priv, 0x418a00, 0x0);
+	nv_wr32(priv, 0x418a04, 0x0);
+	nv_wr32(priv, 0x418a08, 0x0);
+	nv_wr32(priv, 0x418a0c, 0x10000);
+	nv_wr32(priv, 0x418a10, 0x0);
+	nv_wr32(priv, 0x418a14, 0x0);
+	nv_wr32(priv, 0x418a18, 0x0);
+	nv_wr32(priv, 0x418a20, 0x0);
+	nv_wr32(priv, 0x418a24, 0x0);
+	nv_wr32(priv, 0x418a28, 0x0);
+	nv_wr32(priv, 0x418a2c, 0x10000);
+	nv_wr32(priv, 0x418a30, 0x0);
+	nv_wr32(priv, 0x418a34, 0x0);
+	nv_wr32(priv, 0x418a38, 0x0);
+	nv_wr32(priv, 0x418a40, 0x0);
+	nv_wr32(priv, 0x418a44, 0x0);
+	nv_wr32(priv, 0x418a48, 0x0);
+	nv_wr32(priv, 0x418a4c, 0x10000);
+	nv_wr32(priv, 0x418a50, 0x0);
+	nv_wr32(priv, 0x418a54, 0x0);
+	nv_wr32(priv, 0x418a58, 0x0);
+	nv_wr32(priv, 0x418a60, 0x0);
+	nv_wr32(priv, 0x418a64, 0x0);
+	nv_wr32(priv, 0x418a68, 0x0);
+	nv_wr32(priv, 0x418a6c, 0x10000);
+	nv_wr32(priv, 0x418a70, 0x0);
+	nv_wr32(priv, 0x418a74, 0x0);
+	nv_wr32(priv, 0x418a78, 0x0);
+	nv_wr32(priv, 0x418a80, 0x0);
+	nv_wr32(priv, 0x418a84, 0x0);
+	nv_wr32(priv, 0x418a88, 0x0);
+	nv_wr32(priv, 0x418a8c, 0x10000);
+	nv_wr32(priv, 0x418a90, 0x0);
+	nv_wr32(priv, 0x418a94, 0x0);
+	nv_wr32(priv, 0x418a98, 0x0);
+	nv_wr32(priv, 0x418aa0, 0x0);
+	nv_wr32(priv, 0x418aa4, 0x0);
+	nv_wr32(priv, 0x418aa8, 0x0);
+	nv_wr32(priv, 0x418aac, 0x10000);
+	nv_wr32(priv, 0x418ab0, 0x0);
+	nv_wr32(priv, 0x418ab4, 0x0);
+	nv_wr32(priv, 0x418ab8, 0x0);
+	nv_wr32(priv, 0x418ac0, 0x0);
+	nv_wr32(priv, 0x418ac4, 0x0);
+	nv_wr32(priv, 0x418ac8, 0x0);
+	nv_wr32(priv, 0x418acc, 0x10000);
+	nv_wr32(priv, 0x418ad0, 0x0);
+	nv_wr32(priv, 0x418ad4, 0x0);
+	nv_wr32(priv, 0x418ad8, 0x0);
+	nv_wr32(priv, 0x418ae0, 0x0);
+	nv_wr32(priv, 0x418ae4, 0x0);
+	nv_wr32(priv, 0x418ae8, 0x0);
+	nv_wr32(priv, 0x418aec, 0x10000);
+	nv_wr32(priv, 0x418af0, 0x0);
+	nv_wr32(priv, 0x418af4, 0x0);
+	nv_wr32(priv, 0x418af8, 0x0);
+	nv_wr32(priv, 0x418b00, 0x6);
+	nv_wr32(priv, 0x418b08, 0xa418820);
+	nv_wr32(priv, 0x418b0c, 0x62080e6);
+	nv_wr32(priv, 0x418b10, 0x20398a4);
+	nv_wr32(priv, 0x418b14, 0xe629062);
+	nv_wr32(priv, 0x418b18, 0xa418820);
+	nv_wr32(priv, 0x418b1c, 0xe6);
+	nv_wr32(priv, 0x418bb8, 0x103);
+	nv_wr32(priv, 0x418c08, 0x1);
+	nv_wr32(priv, 0x418c10, 0x0);
+	nv_wr32(priv, 0x418c14, 0x0);
+	nv_wr32(priv, 0x418c18, 0x0);
+	nv_wr32(priv, 0x418c1c, 0x0);
+	nv_wr32(priv, 0x418c20, 0x0);
+	nv_wr32(priv, 0x418c24, 0x0);
+	nv_wr32(priv, 0x418c28, 0x0);
+	nv_wr32(priv, 0x418c2c, 0x0);
+	nv_wr32(priv, 0x418c40, 0xffffffff);
+	nv_wr32(priv, 0x418c6c, 0x1);
+	nv_wr32(priv, 0x418c80, 0x20200004);
+	nv_wr32(priv, 0x418c8c, 0x1);
+	nv_wr32(priv, 0x419000, 0x780);
+	nv_wr32(priv, 0x419004, 0x0);
+	nv_wr32(priv, 0x419008, 0x0);
+	nv_wr32(priv, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x419848, 0x0);
+	nv_wr32(priv, 0x419864, 0x129);
+	nv_wr32(priv, 0x419888, 0x0);
+	nv_wr32(priv, 0x419a00, 0xf0);
+	nv_wr32(priv, 0x419a04, 0x1);
+	nv_wr32(priv, 0x419a08, 0x21);
+	nv_wr32(priv, 0x419a0c, 0x20000);
+	nv_wr32(priv, 0x419a10, 0x0);
+	nv_wr32(priv, 0x419a14, 0x200);
+	nv_wr32(priv, 0x419a1c, 0xc000);
+	nv_wr32(priv, 0x419a20, 0x800);
+	nv_wr32(priv, 0x419a30, 0x1);
+	nv_wr32(priv, 0x419ac4, 0x37f440);
+	nv_wr32(priv, 0x419c00, 0xa);
+	nv_wr32(priv, 0x419c04, 0x80000006);
+	nv_wr32(priv, 0x419c08, 0x2);
+	nv_wr32(priv, 0x419c20, 0x0);
+	nv_wr32(priv, 0x419c24, 0x84210);
+	nv_wr32(priv, 0x419c28, 0x3efbefbe);
+	nv_wr32(priv, 0x419ce8, 0x0);
+	nv_wr32(priv, 0x419cf4, 0x3203);
+	nv_wr32(priv, 0x419e04, 0x0);
+	nv_wr32(priv, 0x419e08, 0x0);
+	nv_wr32(priv, 0x419e0c, 0x0);
+	nv_wr32(priv, 0x419e10, 0x402);
+	nv_wr32(priv, 0x419e44, 0x13eff2);
+	nv_wr32(priv, 0x419e48, 0x0);
+	nv_wr32(priv, 0x419e4c, 0x7f);
+	nv_wr32(priv, 0x419e50, 0x0);
+	nv_wr32(priv, 0x419e54, 0x0);
+	nv_wr32(priv, 0x419e58, 0x0);
+	nv_wr32(priv, 0x419e5c, 0x0);
+	nv_wr32(priv, 0x419e60, 0x0);
+	nv_wr32(priv, 0x419e64, 0x0);
+	nv_wr32(priv, 0x419e68, 0x0);
+	nv_wr32(priv, 0x419e6c, 0x0);
+	nv_wr32(priv, 0x419e70, 0x0);
+	nv_wr32(priv, 0x419e74, 0x0);
+	nv_wr32(priv, 0x419e78, 0x0);
+	nv_wr32(priv, 0x419e7c, 0x0);
+	nv_wr32(priv, 0x419e80, 0x0);
+	nv_wr32(priv, 0x419e84, 0x0);
+	nv_wr32(priv, 0x419e88, 0x0);
+	nv_wr32(priv, 0x419e8c, 0x0);
+	nv_wr32(priv, 0x419e90, 0x0);
+	nv_wr32(priv, 0x419e94, 0x0);
+	nv_wr32(priv, 0x419e98, 0x0);
+	nv_wr32(priv, 0x419eac, 0x1fcf);
+	nv_wr32(priv, 0x419eb0, 0xd3f);
+	nv_wr32(priv, 0x419ec8, 0x1304f);
+	nv_wr32(priv, 0x419f30, 0x0);
+	nv_wr32(priv, 0x419f34, 0x0);
+	nv_wr32(priv, 0x419f38, 0x0);
+	nv_wr32(priv, 0x419f3c, 0x0);
+	nv_wr32(priv, 0x419f40, 0x0);
+	nv_wr32(priv, 0x419f44, 0x0);
+	nv_wr32(priv, 0x419f48, 0x0);
+	nv_wr32(priv, 0x419f4c, 0x0);
+	nv_wr32(priv, 0x419f58, 0x0);
+	nv_wr32(priv, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x41be24, 0x6);
+	nv_wr32(priv, 0x41bec0, 0x12180000);
+	nv_wr32(priv, 0x41bec4, 0x37f7f);
+	nv_wr32(priv, 0x41bee4, 0x6480430);
+	nv_wr32(priv, 0x41bf00, 0xa418820);
+	nv_wr32(priv, 0x41bf04, 0x62080e6);
+	nv_wr32(priv, 0x41bf08, 0x20398a4);
+	nv_wr32(priv, 0x41bf0c, 0xe629062);
+	nv_wr32(priv, 0x41bf10, 0xa418820);
+	nv_wr32(priv, 0x41bf14, 0xe6);
+	nv_wr32(priv, 0x41bfd0, 0x900103);
+	nv_wr32(priv, 0x41bfe0, 0x400001);
+	nv_wr32(priv, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nvc0_graph_priv *priv)
+{
+	struct nvc0_grctx info;
+	int ret, i, gpc, tpc, id;
+	u32 data[6] = {}, data2[2] = {}, tmp;
+	u32 tpc_set = 0, tpc_mask = 0;
+	u32 magic[GPC_MAX][2], offset;
+	u8 tpcnr[GPC_MAX], a, b;
+	u8 shift, ntpcv;
+
+	ret = nvc0_grctx_init(priv, &info);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x400204, 0x00000000);
+	nv_wr32(priv, 0x400208, 0x00000000);
+
+	nve0_graph_generate_unk40xx(priv);
+	nve0_graph_generate_unk44xx(priv);
+	nve0_graph_generate_unk46xx(priv);
+	nve0_graph_generate_unk47xx(priv);
+	nve0_graph_generate_unk58xx(priv);
+	nve0_graph_generate_unk60xx(priv);
+	nve0_graph_generate_unk64xx(priv);
+	nve0_graph_generate_unk70xx(priv);
+	nve0_graph_generate_unk78xx(priv);
+	nve0_graph_generate_unk80xx(priv);
+	nve0_graph_generate_unk88xx(priv);
+	nve0_graph_generate_gpc(priv);
+	nve0_graph_generate_tpc(priv);
+	nve0_graph_generate_tpcunk(priv);
+
+	nv_wr32(priv, 0x404154, 0x0);
+
+	mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
+	mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+	mmio_list(0x40800c, 0x00000000,  8, 1);
+	mmio_list(0x408010, 0x80000000,  0, 0);
+	mmio_list(0x419004, 0x00000000,  8, 1);
+	mmio_list(0x419008, 0x00000000,  0, 0);
+	mmio_list(0x4064cc, 0x80000000,  0, 0);
+	mmio_list(0x408004, 0x00000000,  8, 0);
+	mmio_list(0x408008, 0x80000030,  0, 0);
+	mmio_list(0x418808, 0x00000000,  8, 0);
+	mmio_list(0x41880c, 0x80000030,  0, 0);
+	mmio_list(0x4064c8, 0x01800600,  0, 0);
+	mmio_list(0x418810, 0x80000000, 12, 2);
+	mmio_list(0x419848, 0x10000000, 12, 2);
+	mmio_list(0x405830, 0x02180648,  0, 0);
+	mmio_list(0x4064c4, 0x0192ffff,  0, 0);
+	for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+		u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+		u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+		magic[gpc][0]  = 0x10000000 | (magic0 << 16) | offset;
+		magic[gpc][1]  = 0x00000000 | (magic1 << 16);
+		offset += 0x0324 * priv->tpc_nr[gpc];
+	}
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
+		mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
+		offset += 0x07ff * priv->tpc_nr[gpc];
+	}
+	mmio_list(0x17e91c, 0x06060609, 0, 0);
+	mmio_list(0x17e920, 0x00090a05, 0, 0);
+
+	nv_wr32(priv, 0x418c6c, 0x1);
+	nv_wr32(priv, 0x41980c, 0x10);
+	nv_wr32(priv, 0x41be08, 0x4);
+	nv_wr32(priv, 0x4064c0, 0x801a00f0);
+	nv_wr32(priv, 0x405800, 0xf8000bf);
+	nv_wr32(priv, 0x419c00, 0xa);
+
+	for (tpc = 0, id = 0; tpc < 4; tpc++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tpc < priv->tpc_nr[gpc]) {
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
+				nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+				nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
+			}
+
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tpc_nr[i] << (i * 4);
+	nv_wr32(priv, 0x406028, tmp);
+	nv_wr32(priv, 0x405870, tmp);
+
+	nv_wr32(priv, 0x40602c, 0x0);
+	nv_wr32(priv, 0x405874, 0x0);
+	nv_wr32(priv, 0x406030, 0x0);
+	nv_wr32(priv, 0x405878, 0x0);
+	nv_wr32(priv, 0x406034, 0x0);
+	nv_wr32(priv, 0x40587c, 0x0);
+
+	/* calculate first set of magics */
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+	gpc = -1;
+	for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpcnr[gpc]--;
+
+		data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+	}
+
+	for (; tpc < 32; tpc++)
+		data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+	/* and the second... */
+	shift = 0;
+	ntpcv = priv->tpc_total;
+	while (!(ntpcv & (1 << 4))) {
+		ntpcv <<= 1;
+		shift++;
+	}
+
+	data2[0]  = ntpcv << 16;
+	data2[0] |= shift << 21;
+	data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+	data2[0] |= priv->tpc_total << 8;
+	data2[0] |= priv->magic_not_rop_nr;
+	for (i = 1; i < 7; i++)
+		data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+	/* and write it all the various parts of PGRAPH */
+	nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
+
+	nv_wr32(priv, 0x41bfd0, data2[0]);
+	nv_wr32(priv, 0x41bfe4, data2[1]);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
+
+	nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+	for (i = 0; i < 6; i++)
+		nv_wr32(priv, 0x40780c + (i * 4), data[i]);
+
+
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+		tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+	for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+		a = (i * (priv->tpc_total - 1)) / 32;
+		if (a != b) {
+			b = a;
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpcnr[gpc]);
+			tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+			tpc_set |= 1 << ((gpc * 8) + tpc);
+		}
+
+		nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
+		nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+	}
+
+	for (i = 0; i < 8; i++)
+		nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
+
+	nv_wr32(priv, 0x405b00, (priv->tpc_total << 8) | priv->gpc_nr);
+	if (priv->gpc_nr == 1) {
+		nv_mask(priv, 0x408850, 0x0000000f, priv->tpc_nr[0]);
+		nv_mask(priv, 0x408958, 0x0000000f, priv->tpc_nr[0]);
+	} else {
+		nv_mask(priv, 0x408850, 0x0000000f, priv->gpc_nr);
+		nv_mask(priv, 0x408958, 0x0000000f, priv->gpc_nr);
+	}
+	nv_mask(priv, 0x419f78, 0x00000001, 0x00000000);
+
+	nve0_grctx_generate_icmd(priv);
+	nve0_grctx_generate_a097(priv);
+	nve0_grctx_generate_902d(priv);
+
+	nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+	nv_wr32(priv, 0x418800, 0x7026860a); //XXX
+	nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
+	return nvc0_grctx_fini(&info);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
new file mode 100644
index 0000000..f7055af
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -0,0 +1,544 @@
+/* fuc microcode for nvc0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section #nvc0_grgpc_data
+include(`nvc0.fuc')
+gpc_id:			.b32 0
+gpc_mmio_list_head:	.b32 0
+gpc_mmio_list_tail:	.b32 0
+
+tpc_count:		.b32 0
+tpc_mask:		.b32 0
+tpc_mmio_list_head:	.b32 0
+tpc_mmio_list_tail:	.b32 0
+
+cmd_queue:		queue_init
+
+// chipset descriptions
+chipsets:
+.b8  0xc0 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
+.b8  0xc1 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
+.b8  0xc3 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8  0xc4 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8  0xc8 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
+.b8  0xce 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8  0xcf 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
+.b8  0xd7 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
+.b8  0 0 0 0
+
+// GPC mmio lists
+nvc0_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 6)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvc0_gpc_mmio_tail:
+mmctx_data(0x000c6c, 1);
+nvc1_gpc_mmio_tail:
+
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
+// TPC mmio lists
+nvc0_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 1)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x000750, 2)
+nvc0_tpc_mmio_tail:
+mmctx_data(0x000758, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x0006e0, 1)
+nvcf_tpc_mmio_tail:
+mmctx_data(0x0004bc, 1)
+nvc3_tpc_mmio_tail:
+mmctx_data(0x000544, 1)
+nvc1_tpc_mmio_tail:
+
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
+
+.section #nvc0_grgpc_code
+bra #init
+define(`include_code')
+include(`nvc0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0.fuc)
+//
+error:
+	push $r14
+	mov $r14 -0x67ec 	// 0x9814
+	sethi $r14 0x400000
+	call #nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
+	add b32 $r14 0x41c
+	mov $r15 1
+	call #nv_wr32		// HUB_CTXCTL_INTR_UP_SET
+	pop $r14
+	ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//   CC_SCRATCH[1]: context base
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: GPC context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2		// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0		// INTR_DISPATCH
+
+	// enable fifo interrupt
+	mov $r2 4
+	iowr I[$r1 + 0x000] $r2		// INTR_EN_SET
+
+	// enable interrupts
+	bset $flags ie0
+
+	// figure out which GPC we are, and how many TPCs we have
+	mov $r1 0x608
+	shl b32 $r1 6
+	iord $r2 I[$r1 + 0x000]		// UNITS
+	mov $r3 1
+	and $r2 0x1f
+	shl b32 $r3 $r2
+	sub b32 $r3 1
+	st b32 D[$r0 + #tpc_count] $r2
+	st b32 D[$r0 + #tpc_mask] $r3
+	add b32 $r1 0x400
+	iord $r2 I[$r1 + 0x000]		// MYINDEX
+	st b32 D[$r0 + #gpc_id] $r2
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r1 #chipsets - 12
+	init_find_chipset:
+		add b32 $r1 12
+		ld b32 $r3 D[$r1 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// initialise context base, and size tracking
+	init_context:
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x100]	// CC_SCRATCH[1], initial base
+	clear b32 $r3		// track GPC context size here
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't currently ever change
+	mov $r4 0x700
+	shl b32 $r4 6
+	shr b32 $r5 $r2 8
+	iowr I[$r4 + 0x000] $r5		// MMCTX_SAVE_SWBASE
+	iowr I[$r4 + 0x100] $r5		// MMCTX_LOAD_SWBASE
+
+	// calculate GPC mmio context size, store the chipset-specific
+	// mmio list pointers somewhere we can get at them later without
+	// re-parsing the chipset list
+	clear b32 $r14
+	clear b32 $r15
+	ld b16 $r14 D[$r1 + 4]
+	ld b16 $r15 D[$r1 + 6]
+	st b16 D[$r0 + #gpc_mmio_list_head] $r14
+	st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+	call #mmctx_size
+	add b32 $r2 $r15
+	add b32 $r3 $r15
+
+	// calculate per-TPC mmio context size, store the list pointers
+	ld b16 $r14 D[$r1 + 8]
+	ld b16 $r15 D[$r1 + 10]
+	st b16 D[$r0 + #tpc_mmio_list_head] $r14
+	st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+	call #mmctx_size
+	ld b32 $r14 D[$r0 + #tpc_count]
+	mulu $r14 $r15
+	add b32 $r2 $r14
+	add b32 $r3 $r14
+
+	// round up base/size to 256 byte boundary (for strand SWBASE)
+	add b32 $r4 0x1300
+	shr b32 $r3 2
+	iowr I[$r4 + 0x000] $r3		// MMCTX_LOAD_COUNT, wtf for?!?
+	shr b32 $r2 8
+	shr b32 $r3 6
+	add b32 $r2 1
+	add b32 $r3 1
+	shl b32 $r2 8
+	shl b32 $r3 8
+
+	// calculate size of strand context data
+	mov b32 $r15 $r2
+	call #strand_ctx_init
+	add b32 $r3 $r15
+
+	// save context size, and tell HUB we're done
+	mov $r1 0x800
+	shl b32 $r1 6
+	iowr I[$r1 + 0x100] $r3		// CC_SCRATCH[1]  = context size
+	add b32 $r1 0x800
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// 0x0000-0x0003 are all context transfers
+	cmpu b32 $r14 0x04
+	bra nc #main_not_ctx_xfer
+		// fetch $flags and mask off $p1/$p2
+		mov $r1 $flags
+		mov $r2 0x0006
+		not b32 $r2
+		and $r1 $r2
+		// set $p1/$p2 according to transfer type
+		shl b32 $r14 1
+		or $r1 $r14
+		mov $flags $r1
+		// transfer context data
+		call #ctx_xfer
+		bra #main
+
+	main_not_ctx_xfer:
+	shl b32 $r15 $r14 16
+	or $r15 E_BAD_COMMAND
+	call #error
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// ack, and wake up main()
+	ih_no_fifo:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+	mov $r15 1
+	ld b32 $r14 D[$r0 + #gpc_id]
+	shl b32 $r15 $r14
+	mov $r14 -0x6be8 	// 0x409418 - HUB_BAR_SET
+	sethi $r14 0x400000
+	call #nv_wr32
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x020
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = POWER
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0xa20
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = UNK11, ENABLE, POWER
+	ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	// set context base address
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r15// MEM_BASE
+	bra not $p1 #ctx_xfer_not_load
+		call #ctx_redswitch
+	ctx_xfer_not_load:
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 2		// first
+	mov $r11 0x0000
+	sethi $r11 0x500000
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn
+	ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// per-TPC mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 4		// last
+	mov $r11 0x4000
+	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_TPC0
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_TPC0
+	ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+	ld b32 $r15 D[$r0 + #tpc_mask]
+	mov $r14 0x800		// stride = 0x800
+	call #mmctx_xfer
+
+	// wait for strands to finish
+	call #strand_wait
+
+	// if load, or a save without a load following, do some
+	// unknown stuff that's done after finishing a block of
+	// strand commands
+	bra $p1 #ctx_xfer_post
+	bra not $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r1 0x4afc
+		sethi $r1 0x20000
+		mov $r2 0xd
+		iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0d
+		call #strand_wait
+
+	// mark completion in HUB's barrier
+	ctx_xfer_done:
+	call #hub_barrier_done
+	ret
+
+.align 256
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
new file mode 100644
index 0000000..96050dd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -0,0 +1,604 @@
+uint32_t nvc0_grgpc_data[] = {
+/* 0x0000: gpc_id */
+	0x00000000,
+/* 0x0004: gpc_mmio_list_head */
+	0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
+	0x00000000,
+/* 0x000c: tpc_count */
+	0x00000000,
+/* 0x0010: tpc_mask */
+	0x00000000,
+/* 0x0014: tpc_mmio_list_head */
+	0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
+	0x00000000,
+/* 0x001c: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0064: chipsets */
+	0x000000c0,
+	0x012800c8,
+	0x01e40194,
+	0x000000c1,
+	0x012c00c8,
+	0x01f80194,
+	0x000000c3,
+	0x012800c8,
+	0x01f40194,
+	0x000000c4,
+	0x012800c8,
+	0x01f40194,
+	0x000000c8,
+	0x012800c8,
+	0x01e40194,
+	0x000000ce,
+	0x012800c8,
+	0x01f40194,
+	0x000000cf,
+	0x012800c8,
+	0x01f00194,
+	0x000000d9,
+	0x0194012c,
+	0x025401f8,
+	0x00000000,
+/* 0x00c8: nvc0_gpc_mmio_head */
+	0x00000380,
+	0x14000400,
+	0x20000450,
+	0x00000600,
+	0x00000684,
+	0x10000700,
+	0x00000800,
+	0x08000808,
+	0x00000828,
+	0x00000830,
+	0x000008d8,
+	0x000008e0,
+	0x140008e8,
+	0x0000091c,
+	0x08000924,
+	0x00000b00,
+	0x14000b08,
+	0x00000bb8,
+	0x00000c08,
+	0x1c000c10,
+	0x00000c80,
+	0x00000c8c,
+	0x08001000,
+	0x00001014,
+/* 0x0128: nvc0_gpc_mmio_tail */
+	0x00000c6c,
+/* 0x012c: nvc1_gpc_mmio_tail */
+/* 0x012c: nvd9_gpc_mmio_head */
+	0x00000380,
+	0x04000400,
+	0x0800040c,
+	0x20000450,
+	0x00000600,
+	0x00000684,
+	0x10000700,
+	0x00000800,
+	0x08000808,
+	0x00000828,
+	0x00000830,
+	0x000008d8,
+	0x000008e0,
+	0x140008e8,
+	0x0000091c,
+	0x08000924,
+	0x00000b00,
+	0x14000b08,
+	0x00000bb8,
+	0x00000c08,
+	0x1c000c10,
+	0x00000c6c,
+	0x00000c80,
+	0x00000c8c,
+	0x08001000,
+	0x00001014,
+/* 0x0194: nvd9_gpc_mmio_tail */
+/* 0x0194: nvc0_tpc_mmio_head */
+	0x00000018,
+	0x0000003c,
+	0x00000048,
+	0x00000064,
+	0x00000088,
+	0x14000200,
+	0x0400021c,
+	0x14000300,
+	0x000003d0,
+	0x040003e0,
+	0x08000400,
+	0x00000420,
+	0x000004b0,
+	0x000004e8,
+	0x000004f4,
+	0x04000520,
+	0x0c000604,
+	0x4c000644,
+	0x00000698,
+	0x04000750,
+/* 0x01e4: nvc0_tpc_mmio_tail */
+	0x00000758,
+	0x000002c4,
+	0x000006e0,
+/* 0x01f0: nvcf_tpc_mmio_tail */
+	0x000004bc,
+/* 0x01f4: nvc3_tpc_mmio_tail */
+	0x00000544,
+/* 0x01f8: nvc1_tpc_mmio_tail */
+/* 0x01f8: nvd9_tpc_mmio_head */
+	0x00000018,
+	0x0000003c,
+	0x00000048,
+	0x00000064,
+	0x00000088,
+	0x14000200,
+	0x0400021c,
+	0x000002c4,
+	0x14000300,
+	0x000003d0,
+	0x040003e0,
+	0x08000400,
+	0x08000420,
+	0x000004b0,
+	0x000004e8,
+	0x000004f4,
+	0x04000520,
+	0x00000544,
+	0x0c000604,
+	0x4c000644,
+	0x00000698,
+	0x000006e0,
+	0x08000750,
+};
+
+uint32_t nvc0_grgpc_code[] = {
+	0x03060ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe3f09814,
+	0x8d21f440,
+	0x041ce0b7,
+	0xf401f7f0,
+	0xe0fc8d21,
+/* 0x0306: init */
+	0x04bd00f8,
+	0xf10004fe,
+	0xf0120017,
+	0x12d00227,
+	0x3e17f100,
+	0x0010fe04,
+	0x040017f1,
+	0xf0c010d0,
+	0x12d00427,
+	0x1031f400,
+	0x060817f1,
+	0xcf0614b6,
+	0x37f00012,
+	0x1f24f001,
+	0xb60432bb,
+	0x02800132,
+	0x04038003,
+	0x040010b7,
+	0x800012cf,
+	0x27f10002,
+	0x24b60800,
+	0x0022cf06,
+/* 0x035f: init_find_chipset */
+	0xb65817f0,
+	0x13980c10,
+	0x0432b800,
+	0xb00b0bf4,
+	0x1bf40034,
+/* 0x0373: init_context */
+	0xf100f8f1,
+	0xb6080027,
+	0x22cf0624,
+	0xf134bd40,
+	0xb6070047,
+	0x25950644,
+	0x0045d008,
+	0xbd4045d0,
+	0x58f4bde4,
+	0x1f58021e,
+	0x020e4003,
+	0xf5040f40,
+	0xbb013d21,
+	0x3fbb002f,
+	0x041e5800,
+	0x40051f58,
+	0x0f400a0e,
+	0x3d21f50c,
+	0x030e9801,
+	0xbb00effd,
+	0x3ebb002e,
+	0x0040b700,
+	0x0235b613,
+	0xb60043d0,
+	0x35b60825,
+	0x0120b606,
+	0xb60130b6,
+	0x34b60824,
+	0x022fb908,
+	0x026321f5,
+	0xf1003fbb,
+	0xb6080017,
+	0x13d00614,
+	0x0010b740,
+	0xf024bd08,
+	0x12d01f29,
+/* 0x0401: main */
+	0x0031f400,
+	0xf00028f4,
+	0x21f41cd7,
+	0xf401f439,
+	0xf404e4b0,
+	0x81fe1e18,
+	0x0627f001,
+	0x12fd20bd,
+	0x01e4b604,
+	0xfe051efd,
+	0x21f50018,
+	0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
+	0x10ef94d3,
+	0xf501f5f0,
+	0xf402ec21,
+/* 0x043e: ih */
+	0x80f9c60e,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0x800acff0,
+	0xf404abc4,
+	0xb7f11d0b,
+	0xd7f01900,
+	0x40becf1c,
+	0xf400bfcf,
+	0xb0b70421,
+	0xe7f00400,
+	0x00bed001,
+/* 0x0474: ih_no_fifo */
+	0xfc400ad0,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x32f480fc,
+/* 0x048f: hub_barrier_done */
+	0xf001f800,
+	0x0e9801f7,
+	0x04febb00,
+	0x9418e7f1,
+	0xf440e3f0,
+	0x00f88d21,
+/* 0x04a4: ctx_redswitch */
+	0x0614e7f1,
+	0xf006e4b6,
+	0xefd020f7,
+	0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
+	0xf401f2b6,
+	0xf7f1fd1b,
+	0xefd00a20,
+/* 0x04c3: ctx_xfer */
+	0xf100f800,
+	0xb60a0417,
+	0x1fd00614,
+	0x0711f400,
+	0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
+	0x4afc17f1,
+	0xf00213f0,
+	0x12d00c27,
+	0x0721f500,
+	0xfc27f102,
+	0x0223f047,
+	0xf00020d0,
+	0x20b6012c,
+	0x0012d003,
+	0xf001acf0,
+	0xb7f002a5,
+	0x50b3f000,
+	0xb6000c98,
+	0xbcbb0fc4,
+	0x010c9800,
+	0xf0020d98,
+	0x21f500e7,
+	0xacf0015c,
+	0x04a5f001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6000c,
+	0x00bcbb0f,
+	0x98050c98,
+	0x0f98060d,
+	0x00e7f104,
+	0x5c21f508,
+	0x0721f501,
+	0x0601f402,
+/* 0x054b: ctx_xfer_post */
+	0xf11412f4,
+	0xf04afc17,
+	0x27f00213,
+	0x0012d00d,
+	0x020721f5,
+/* 0x055c: ctx_xfer_done */
+	0x048f21f5,
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
new file mode 100644
index 0000000..62ab231
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -0,0 +1,456 @@
+/* fuc microcode for nve0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section #nve0_grgpc_data
+include(`nve0.fuc')
+gpc_id:			.b32 0
+gpc_mmio_list_head:	.b32 0
+gpc_mmio_list_tail:	.b32 0
+
+tpc_count:		.b32 0
+tpc_mask:		.b32 0
+tpc_mmio_list_head:	.b32 0
+tpc_mmio_list_tail:	.b32 0
+
+cmd_queue:		queue_init
+
+// chipset descriptions
+chipsets:
+.b8  0xe4 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8  0xe7 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8  0xe6 0 0 0
+.b16 #nve4_gpc_mmio_head
+.b16 #nve4_gpc_mmio_tail
+.b16 #nve4_tpc_mmio_head
+.b16 #nve4_tpc_mmio_tail
+.b8  0 0 0 0
+
+// GPC mmio lists
+nve4_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c40, 1)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+mmctx_data(0x003024, 1)
+mmctx_data(0x0030c0, 2)
+mmctx_data(0x0030e4, 1)
+mmctx_data(0x003100, 6)
+mmctx_data(0x0031d0, 1)
+mmctx_data(0x0031e0, 2)
+nve4_gpc_mmio_tail:
+
+// TPC mmio lists
+nve4_tpc_mmio_head:
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000230, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 22)
+mmctx_data(0x0006ac, 2)
+mmctx_data(0x0006c8, 1)
+mmctx_data(0x000730, 8)
+mmctx_data(0x000758, 1)
+mmctx_data(0x000778, 1)
+nve4_tpc_mmio_tail:
+
+.section #nve0_grgpc_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+	push $r14
+	mov $r14 -0x67ec 	// 0x9814
+	sethi $r14 0x400000
+	call #nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
+	add b32 $r14 0x41c
+	mov $r15 1
+	call #nv_wr32		// HUB_CTXCTL_INTR_UP_SET
+	pop $r14
+	ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//   CC_SCRATCH[1]: context base
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: GPC context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2		// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0		// INTR_DISPATCH
+
+	// enable fifo interrupt
+	mov $r2 4
+	iowr I[$r1 + 0x000] $r2		// INTR_EN_SET
+
+	// enable interrupts
+	bset $flags ie0
+
+	// figure out which GPC we are, and how many TPCs we have
+	mov $r1 0x608
+	shl b32 $r1 6
+	iord $r2 I[$r1 + 0x000]		// UNITS
+	mov $r3 1
+	and $r2 0x1f
+	shl b32 $r3 $r2
+	sub b32 $r3 1
+	st b32 D[$r0 + #tpc_count] $r2
+	st b32 D[$r0 + #tpc_mask] $r3
+	add b32 $r1 0x400
+	iord $r2 I[$r1 + 0x000]		// MYINDEX
+	st b32 D[$r0 + #gpc_id] $r2
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r1 #chipsets - 12
+	init_find_chipset:
+		add b32 $r1 12
+		ld b32 $r3 D[$r1 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// initialise context base, and size tracking
+	init_context:
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x100]	// CC_SCRATCH[1], initial base
+	clear b32 $r3		// track GPC context size here
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't currently ever change
+	mov $r4 0x700
+	shl b32 $r4 6
+	shr b32 $r5 $r2 8
+	iowr I[$r4 + 0x000] $r5		// MMCTX_SAVE_SWBASE
+	iowr I[$r4 + 0x100] $r5		// MMCTX_LOAD_SWBASE
+
+	// calculate GPC mmio context size, store the chipset-specific
+	// mmio list pointers somewhere we can get at them later without
+	// re-parsing the chipset list
+	clear b32 $r14
+	clear b32 $r15
+	ld b16 $r14 D[$r1 + 4]
+	ld b16 $r15 D[$r1 + 6]
+	st b16 D[$r0 + #gpc_mmio_list_head] $r14
+	st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+	call #mmctx_size
+	add b32 $r2 $r15
+	add b32 $r3 $r15
+
+	// calculate per-TPC mmio context size, store the list pointers
+	ld b16 $r14 D[$r1 + 8]
+	ld b16 $r15 D[$r1 + 10]
+	st b16 D[$r0 + #tpc_mmio_list_head] $r14
+	st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+	call #mmctx_size
+	ld b32 $r14 D[$r0 + #tpc_count]
+	mulu $r14 $r15
+	add b32 $r2 $r14
+	add b32 $r3 $r14
+
+	// round up base/size to 256 byte boundary (for strand SWBASE)
+	add b32 $r4 0x1300
+	shr b32 $r3 2
+	iowr I[$r4 + 0x000] $r3		// MMCTX_LOAD_COUNT, wtf for?!?
+	shr b32 $r2 8
+	shr b32 $r3 6
+	add b32 $r2 1
+	add b32 $r3 1
+	shl b32 $r2 8
+	shl b32 $r3 8
+
+	// calculate size of strand context data
+	mov b32 $r15 $r2
+	call #strand_ctx_init
+	add b32 $r3 $r15
+
+	// save context size, and tell HUB we're done
+	mov $r1 0x800
+	shl b32 $r1 6
+	iowr I[$r1 + 0x100] $r3		// CC_SCRATCH[1]  = context size
+	add b32 $r1 0x800
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// 0x0000-0x0003 are all context transfers
+	cmpu b32 $r14 0x04
+	bra nc #main_not_ctx_xfer
+		// fetch $flags and mask off $p1/$p2
+		mov $r1 $flags
+		mov $r2 0x0006
+		not b32 $r2
+		and $r1 $r2
+		// set $p1/$p2 according to transfer type
+		shl b32 $r14 1
+		or $r1 $r14
+		mov $flags $r1
+		// transfer context data
+		call #ctx_xfer
+		bra #main
+
+	main_not_ctx_xfer:
+	shl b32 $r15 $r14 16
+	or $r15 E_BAD_COMMAND
+	call #error
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// ack, and wake up main()
+	ih_no_fifo:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+	mov $r15 1
+	ld b32 $r14 D[$r0 + #gpc_id]
+	shl b32 $r15 $r14
+	mov $r14 -0x6be8 	// 0x409418 - HUB_BAR_SET
+	sethi $r14 0x400000
+	call #nv_wr32
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x020
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = POWER
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0xa20
+	iowr I[$r14] $r15	// GPC_RED_SWITCH = UNK11, ENABLE, POWER
+	ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	// set context base address
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r15// MEM_BASE
+	bra not $p1 #ctx_xfer_not_load
+		call #ctx_redswitch
+	ctx_xfer_not_load:
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 2		// first
+	mov $r11 0x0000
+	sethi $r11 0x500000
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn
+	ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// per-TPC mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 4		// last
+	mov $r11 0x4000
+	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_TPC0
+	ld b32 $r12 D[$r0 + #gpc_id]
+	shl b32 $r12 15
+	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_TPC0
+	ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+	ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+	ld b32 $r15 D[$r0 + #tpc_mask]
+	mov $r14 0x800		// stride = 0x800
+	call #mmctx_xfer
+
+	// wait for strands to finish
+	call #strand_wait
+
+	// if load, or a save without a load following, do some
+	// unknown stuff that's done after finishing a block of
+	// strand commands
+	bra $p1 #ctx_xfer_post
+	bra not $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r1 0x4afc
+		sethi $r1 0x20000
+		mov $r2 0xd
+		iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0d
+		call #strand_wait
+
+	// mark completion in HUB's barrier
+	ctx_xfer_done:
+	call #hub_barrier_done
+	ret
+
+.align 256
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
new file mode 100644
index 0000000..09ee470
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -0,0 +1,533 @@
+uint32_t nve0_grgpc_data[] = {
+/* 0x0000: gpc_id */
+	0x00000000,
+/* 0x0004: gpc_mmio_list_head */
+	0x00000000,
+/* 0x0008: gpc_mmio_list_tail */
+	0x00000000,
+/* 0x000c: tpc_count */
+	0x00000000,
+/* 0x0010: tpc_mask */
+	0x00000000,
+/* 0x0014: tpc_mmio_list_head */
+	0x00000000,
+/* 0x0018: tpc_mmio_list_tail */
+	0x00000000,
+/* 0x001c: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0064: chipsets */
+	0x000000e4,
+	0x0110008c,
+	0x01580110,
+	0x000000e7,
+	0x0110008c,
+	0x01580110,
+	0x000000e6,
+	0x0110008c,
+	0x01580110,
+	0x00000000,
+/* 0x008c: nve4_gpc_mmio_head */
+	0x00000380,
+	0x04000400,
+	0x0800040c,
+	0x20000450,
+	0x00000600,
+	0x00000684,
+	0x10000700,
+	0x00000800,
+	0x08000808,
+	0x00000828,
+	0x00000830,
+	0x000008d8,
+	0x000008e0,
+	0x140008e8,
+	0x0000091c,
+	0x08000924,
+	0x00000b00,
+	0x14000b08,
+	0x00000bb8,
+	0x00000c08,
+	0x1c000c10,
+	0x00000c40,
+	0x00000c6c,
+	0x00000c80,
+	0x00000c8c,
+	0x08001000,
+	0x00001014,
+	0x00003024,
+	0x040030c0,
+	0x000030e4,
+	0x14003100,
+	0x000031d0,
+	0x040031e0,
+/* 0x0110: nve4_gpc_mmio_tail */
+/* 0x0110: nve4_tpc_mmio_head */
+	0x00000048,
+	0x00000064,
+	0x00000088,
+	0x14000200,
+	0x0400021c,
+	0x00000230,
+	0x000002c4,
+	0x08000400,
+	0x08000420,
+	0x000004e8,
+	0x000004f4,
+	0x0c000604,
+	0x54000644,
+	0x040006ac,
+	0x000006c8,
+	0x1c000730,
+	0x00000758,
+	0x00000778,
+};
+
+uint32_t nve0_grgpc_code[] = {
+	0x03060ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe3f09814,
+	0x8d21f440,
+	0x041ce0b7,
+	0xf401f7f0,
+	0xe0fc8d21,
+/* 0x0306: init */
+	0x04bd00f8,
+	0xf10004fe,
+	0xf0120017,
+	0x12d00227,
+	0x3e17f100,
+	0x0010fe04,
+	0x040017f1,
+	0xf0c010d0,
+	0x12d00427,
+	0x1031f400,
+	0x060817f1,
+	0xcf0614b6,
+	0x37f00012,
+	0x1f24f001,
+	0xb60432bb,
+	0x02800132,
+	0x04038003,
+	0x040010b7,
+	0x800012cf,
+	0x27f10002,
+	0x24b60800,
+	0x0022cf06,
+/* 0x035f: init_find_chipset */
+	0xb65817f0,
+	0x13980c10,
+	0x0432b800,
+	0xb00b0bf4,
+	0x1bf40034,
+/* 0x0373: init_context */
+	0xf100f8f1,
+	0xb6080027,
+	0x22cf0624,
+	0xf134bd40,
+	0xb6070047,
+	0x25950644,
+	0x0045d008,
+	0xbd4045d0,
+	0x58f4bde4,
+	0x1f58021e,
+	0x020e4003,
+	0xf5040f40,
+	0xbb013d21,
+	0x3fbb002f,
+	0x041e5800,
+	0x40051f58,
+	0x0f400a0e,
+	0x3d21f50c,
+	0x030e9801,
+	0xbb00effd,
+	0x3ebb002e,
+	0x0040b700,
+	0x0235b613,
+	0xb60043d0,
+	0x35b60825,
+	0x0120b606,
+	0xb60130b6,
+	0x34b60824,
+	0x022fb908,
+	0x026321f5,
+	0xf1003fbb,
+	0xb6080017,
+	0x13d00614,
+	0x0010b740,
+	0xf024bd08,
+	0x12d01f29,
+/* 0x0401: main */
+	0x0031f400,
+	0xf00028f4,
+	0x21f41cd7,
+	0xf401f439,
+	0xf404e4b0,
+	0x81fe1e18,
+	0x0627f001,
+	0x12fd20bd,
+	0x01e4b604,
+	0xfe051efd,
+	0x21f50018,
+	0x0ef404c3,
+/* 0x0431: main_not_ctx_xfer */
+	0x10ef94d3,
+	0xf501f5f0,
+	0xf402ec21,
+/* 0x043e: ih */
+	0x80f9c60e,
+	0xf90188fe,
+	0xf990f980,
+	0xf9b0f9a0,
+	0xf9e0f9d0,
+	0x800acff0,
+	0xf404abc4,
+	0xb7f11d0b,
+	0xd7f01900,
+	0x40becf1c,
+	0xf400bfcf,
+	0xb0b70421,
+	0xe7f00400,
+	0x00bed001,
+/* 0x0474: ih_no_fifo */
+	0xfc400ad0,
+	0xfce0fcf0,
+	0xfcb0fcd0,
+	0xfc90fca0,
+	0x0088fe80,
+	0x32f480fc,
+/* 0x048f: hub_barrier_done */
+	0xf001f800,
+	0x0e9801f7,
+	0x04febb00,
+	0x9418e7f1,
+	0xf440e3f0,
+	0x00f88d21,
+/* 0x04a4: ctx_redswitch */
+	0x0614e7f1,
+	0xf006e4b6,
+	0xefd020f7,
+	0x08f7f000,
+/* 0x04b4: ctx_redswitch_delay */
+	0xf401f2b6,
+	0xf7f1fd1b,
+	0xefd00a20,
+/* 0x04c3: ctx_xfer */
+	0xf100f800,
+	0xb60a0417,
+	0x1fd00614,
+	0x0711f400,
+	0x04a421f5,
+/* 0x04d4: ctx_xfer_not_load */
+	0x4afc17f1,
+	0xf00213f0,
+	0x12d00c27,
+	0x0721f500,
+	0xfc27f102,
+	0x0223f047,
+	0xf00020d0,
+	0x20b6012c,
+	0x0012d003,
+	0xf001acf0,
+	0xb7f002a5,
+	0x50b3f000,
+	0xb6000c98,
+	0xbcbb0fc4,
+	0x010c9800,
+	0xf0020d98,
+	0x21f500e7,
+	0xacf0015c,
+	0x04a5f001,
+	0x4000b7f1,
+	0x9850b3f0,
+	0xc4b6000c,
+	0x00bcbb0f,
+	0x98050c98,
+	0x0f98060d,
+	0x00e7f104,
+	0x5c21f508,
+	0x0721f501,
+	0x0601f402,
+/* 0x054b: ctx_xfer_post */
+	0xf11412f4,
+	0xf04afc17,
+	0x27f00213,
+	0x0012d00d,
+	0x020721f5,
+/* 0x055c: ctx_xfer_done */
+	0x048f21f5,
+	0x000000f8,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
new file mode 100644
index 0000000..7fbdebb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -0,0 +1,869 @@
+/* fuc microcode for nvc0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h
+ */
+
+.section #nvc0_grhub_data
+include(`nvc0.fuc')
+gpc_count:		.b32 0
+rop_count:		.b32 0
+cmd_queue:		queue_init
+hub_mmio_list_head:	.b32 0
+hub_mmio_list_tail:	.b32 0
+
+ctx_current:		.b32 0
+
+chipsets:
+.b8  0xc0 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xc1 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
+.b8  0xc3 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xc4 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xc8 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xce 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xcf 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8  0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
+.b8  0xd7 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
+.b8  0 0 0 0
+
+nvc0_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 11)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404174, 3)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvc0_hub_mmio_tail:
+mmctx_data(0x4064c0, 2)
+nvc1_hub_mmio_tail:
+
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count:	.b32 0
+chan_mmio_address:	.b32 0
+
+.align 256
+xfer_data: 		.b32 0
+
+.section #nvc0_grhub_code
+bra #init
+define(`include_code')
+include(`nvc0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0.fuc)
+//
+error:
+	push $r14
+	mov $r14 0x814
+	shl b32 $r14 6
+	iowr I[$r14 + 0x000] $r15	// CC_SCRATCH[5] = error code
+	mov $r14 0xc1c
+	shl b32 $r14 6
+	mov $r15 1
+	iowr I[$r14 + 0x000] $r15	// INTR_UP_SET
+	pop $r14
+	ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: total PGRAPH context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+	mov $xdbase $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2	// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0	// INTR_DISPATCH
+
+	// route HUB_CHANNEL_SWITCH to fuc interrupt 8
+	mov $r3 0x404
+	shl b32 $r3 6
+	mov $r2 0x2003		// { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+	iowr I[$r3 + 0x000] $r2
+
+	// not sure what these are, route them because NVIDIA does, and
+	// the IRQ handler will signal the host if we ever get one.. we
+	// may find out if/why we need to handle these if so..
+	//
+	mov $r2 0x2004
+	iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+	mov $r2 0x200b
+	iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+	mov $r2 0x200c
+	iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+	// enable all INTR_UP interrupts
+	mov $r2 0xc24
+	shl b32 $r2 6
+	not b32 $r3 $r0
+	iowr I[$r2] $r3
+
+	// enable fifo, ctxsw, 9, 10, 15 interrupts
+	mov $r2 -0x78fc		// 0x8704
+	sethi $r2 0
+	iowr I[$r1 + 0x000] $r2	// INTR_EN_SET
+
+	// fifo level triggered, rest edge
+	sub b32 $r1 0x100
+	mov $r2 4
+	iowr I[$r1] $r2
+
+	// enable interrupts
+	bset $flags ie0
+
+	// fetch enabled GPC/ROP counts
+	mov $r14 -0x69fc	// 0x409604
+	sethi $r14 0x400000
+	call #nv_rd32
+	extr $r1 $r15 16:20
+	st b32 D[$r0 + #rop_count] $r1
+	and $r15 0x1f
+	st b32 D[$r0 + #gpc_count] $r15
+
+	// set BAR_REQMASK to GPC mask
+	mov $r1 1
+	shl b32 $r1 $r15
+	sub b32 $r1 1
+	mov $r2 0x40c
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1
+	iowr I[$r2 + 0x100] $r1
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r15 #chipsets - 8
+	init_find_chipset:
+		add b32 $r15 8
+		ld b32 $r3 D[$r15 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// context size calculation, reserve first 256 bytes for use by fuc
+	init_context:
+	mov $r1 256
+
+	// calculate size of mmio context data
+	ld b16 $r14 D[$r15 + 4]
+	ld b16 $r15 D[$r15 + 6]
+	sethi $r14 0
+	st b32 D[$r0 + #hub_mmio_list_head] $r14
+	st b32 D[$r0 + #hub_mmio_list_tail] $r15
+	call #mmctx_size
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't (currently) ever change
+	mov $r3 0x700
+	shl b32 $r3 6
+	shr b32 $r4 $r1 8
+	iowr I[$r3 + 0x000] $r4		// MMCTX_SAVE_SWBASE
+	iowr I[$r3 + 0x100] $r4		// MMCTX_LOAD_SWBASE
+	add b32 $r3 0x1300
+	add b32 $r1 $r15
+	shr b32 $r15 2
+	iowr I[$r3 + 0x000] $r15	// MMCTX_LOAD_COUNT, wtf for?!?
+
+	// strands, base offset needs to be aligned to 256 bytes
+	shr b32 $r1 8
+	add b32 $r1 1
+	shl b32 $r1 8
+	mov b32 $r15 $r1
+	call #strand_ctx_init
+	add b32 $r1 $r15
+
+	// initialise each GPC in sequence by passing in the offset of its
+	// context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+	// has previously been uploaded by the host) running.
+	//
+	// the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+	// when it has completed, and return the size of its context data
+	// in GPCn_CC_SCRATCH[1]
+	//
+	ld b32 $r3 D[$r0 + #gpc_count]
+	mov $r4 0x2000
+	sethi $r4 0x500000
+	init_gpc:
+		// setup, and start GPC ucode running
+		add b32 $r14 $r4 0x804
+		mov b32 $r15 $r1
+		call #nv_wr32			// CC_SCRATCH[1] = ctx offset
+		add b32 $r14 $r4 0x800
+		mov b32 $r15 $r2
+		call #nv_wr32			// CC_SCRATCH[0] = chipset
+		add b32 $r14 $r4 0x10c
+		clear b32 $r15
+		call #nv_wr32
+		add b32 $r14 $r4 0x104
+		call #nv_wr32			// ENTRY
+		add b32 $r14 $r4 0x100
+		mov $r15 2			// CTRL_START_TRIGGER
+		call #nv_wr32			// CTRL
+
+		// wait for it to complete, and adjust context size
+		add b32 $r14 $r4 0x800
+		init_gpc_wait:
+			call #nv_rd32
+			xbit $r15 $r15 31
+			bra e #init_gpc_wait
+		add b32 $r14 $r4 0x804
+		call #nv_rd32
+		add b32 $r1 $r15
+
+		// next!
+		add b32 $r4 0x8000
+		sub b32 $r3 1
+		bra ne #init_gpc
+
+	// save context size, and tell host we're ready
+	mov $r2 0x800
+	shl b32 $r2 6
+	iowr I[$r2 + 0x100] $r1		// CC_SCRATCH[1]  = context size
+	add b32 $r2 0x800
+	clear b32 $r1
+	bset $r1 31
+	iowr I[$r2 + 0x000] $r1		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	// sleep until we have something to do
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// context switch, requested by GPU?
+	cmpu b32 $r14 0x4001
+	bra ne #main_not_ctx_switch
+		trace_set(T_AUTO)
+		mov $r1 0xb00
+		shl b32 $r1 6
+		iord $r2 I[$r1 + 0x100]		// CHAN_NEXT
+		iord $r1 I[$r1 + 0x000]		// CHAN_CUR
+
+		xbit $r3 $r1 31
+		bra e #chsw_no_prev
+			xbit $r3 $r2 31
+			bra e #chsw_prev_no_next
+				push $r2
+				mov b32 $r2 $r1
+				trace_set(T_SAVE)
+				bclr $flags $p1
+				bset $flags $p2
+				call #ctx_xfer
+				trace_clr(T_SAVE);
+				pop $r2
+				trace_set(T_LOAD);
+				bset $flags $p1
+				call #ctx_xfer
+				trace_clr(T_LOAD);
+				bra #chsw_done
+			chsw_prev_no_next:
+				push $r2
+				mov b32 $r2 $r1
+				bclr $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+				pop $r2
+				mov $r1 0xb00
+				shl b32 $r1 6
+				iowr I[$r1] $r2
+				bra #chsw_done
+		chsw_no_prev:
+			xbit $r3 $r2 31
+			bra e #chsw_done
+				bset $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+
+		// ack the context switch request
+		chsw_done:
+		mov $r1 0xb0c
+		shl b32 $r1 6
+		mov $r2 1
+		iowr I[$r1 + 0x000] $r2		// 0x409b0c
+		trace_clr(T_AUTO)
+		bra #main
+
+	// request to set current channel? (*not* a context switch)
+	main_not_ctx_switch:
+	cmpu b32 $r14 0x0001
+	bra ne #main_not_ctx_chan
+		mov b32 $r2 $r15
+		call #ctx_chan
+		bra #main_done
+
+	// request to store current channel context?
+	main_not_ctx_chan:
+	cmpu b32 $r14 0x0002
+	bra ne #main_not_ctx_save
+		trace_set(T_SAVE)
+		bclr $flags $p1
+		bclr $flags $p2
+		call #ctx_xfer
+		trace_clr(T_SAVE)
+		bra #main_done
+
+	main_not_ctx_save:
+		shl b32 $r15 $r14 16
+		or $r15 E_BAD_COMMAND
+		call #error
+		bra #main
+
+	main_done:
+	mov $r1 0x820
+	shl b32 $r1 6
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// context switch request?
+	ih_no_fifo:
+	and $r11 $r10 0x00000100
+	bra e #ih_no_ctxsw
+		// enqueue a context switch for later processing
+		mov $r13 #cmd_queue
+		mov $r14 0x4001
+		call #queue_put
+
+	// anything we didn't handle, bring it to the host's attention
+	ih_no_ctxsw:
+	mov $r11 0x104
+	not b32 $r11
+	and $r11 $r10 $r11
+	bra e #ih_no_other
+		mov $r10 0xc1c
+		shl b32 $r10 6
+		iowr I[$r10] $r11	// INTR_UP_SET
+
+	// ack, and wake up main()
+	ih_no_other:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
+ctx_4160s:
+	mov $r14 0x4160
+	sethi $r14 0x400000
+	mov $r15 1
+	call #nv_wr32
+	ctx_4160s_wait:
+		call #nv_rd32
+		xbit $r15 $r15 4
+		bra e #ctx_4160s_wait
+	ret
+
+// Without clearing again at end of xfer, some things cause PGRAPH
+// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
+// still function with it set however...
+ctx_4160c:
+	mov $r14 0x4160
+	sethi $r14 0x400000
+	clear b32 $r15
+	call #nv_wr32
+	ret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	or $r15 0x10
+	call #nv_wr32
+	ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	call #nv_rd32
+	and $r15 0x10
+	bra ne #ctx_4170w
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x270
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0x770
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+	ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+	mov $r14 0x86c
+	shl b32 $r14 6
+	iowr I[$r14] $r15	// HUB(0x86c) = val
+	mov $r14 -0x75ec
+	sethi $r14 0x400000
+	call #nv_wr32		// ROP(0xa14) = val
+	mov $r14 -0x5794
+	sethi $r14 0x410000
+	call #nv_wr32		// GPC(0x86c) = val
+	ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+	trace_set(T_CHAN)
+
+	// switch to channel, somewhat magic in parts..
+	mov $r10 12		// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa24
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r0	// 0x409a24
+	mov $r3 0xb00
+	shl b32 $r3 6
+	iowr I[$r3 + 0x100] $r2	// CHAN_NEXT
+	mov $r1 0xa0c
+	shl b32 $r1 6
+	mov $r4 7
+	iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+	iowr I[$r1 + 0x100] $r4	// MEM_CMD
+	ctx_chan_wait_0:
+		iord $r4 I[$r1 + 0x100]
+		and $r4 0x1f
+		bra ne #ctx_chan_wait_0
+	iowr I[$r3 + 0x000] $r2	// CHAN_CUR
+
+	// load channel header, fetch PGRAPH context pointer
+	mov $xtargets $r0
+	bclr $r2 31
+	shl b32 $r2 4
+	add b32 $r2 2
+
+	trace_set(T_LCHAN)
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_BASE
+	mov $r1 0xa20
+	shl b32 $r1 6
+	mov $r2 0x0002
+	sethi $r2 0x80000000
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vram
+	mov $r1 0x10			// chan + 0x0210
+	mov $r2 #xfer_data
+	sethi $r2 0x00020000		// 16 bytes
+	xdld $r1 $r2
+	xdwait
+	trace_clr(T_LCHAN)
+
+	// update current context
+	ld b32 $r1 D[$r0 + #xfer_data + 4]
+	shl b32 $r1 24
+	ld b32 $r2 D[$r0 + #xfer_data + 0]
+	shr b32 $r2 8
+	or $r1 $r2
+	st b32 D[$r0 + #ctx_current] $r1
+
+	// set transfer base to start of context, and fetch context header
+	trace_set(T_LCTXH)
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1		// MEM_BASE
+	mov $r2 1
+	mov $r1 0xa20
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vm
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdld $r0 $r1
+	xdwait
+	trace_clr(T_LCTXH)
+
+	trace_clr(T_CHAN)
+	ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+//            the active channel for ctxctl, but not actually transfer
+//            any context data.  intended for use only during initial
+//            context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+	call #ctx_4160s
+	call #ctx_load
+	mov $r10 12			// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa10
+	shl b32 $r1 6
+	mov $r2 5
+	iowr I[$r1 + 0x000] $r2		// MEM_CMD = 5 (???)
+	ctx_chan_wait:
+		iord $r2 I[$r1 + 0x000]
+		or $r2 $r2
+		bra ne #ctx_chan_wait
+	call #ctx_4160c
+	ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel.  Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state...  The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+	// set transfer base to be the mmio list
+	ld b32 $r3 D[$r0 + #chan_mmio_address]
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	clear b32 $r3
+	ctx_mmio_loop:
+		// fetch next 256 bytes of mmio list if necessary
+		and $r4 $r3 0xff
+		bra ne #ctx_mmio_pull
+			mov $r5 #xfer_data
+			sethi $r5 0x00060000	// 256 bytes
+			xdld $r3 $r5
+			xdwait
+
+		// execute a single list entry
+		ctx_mmio_pull:
+		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+		call #nv_wr32
+
+		// next!
+		add b32 $r3 8
+		sub b32 $r1 1
+		bra ne #ctx_mmio_loop
+
+	// set transfer base back to the current context
+	ctx_mmio_done:
+	ld b32 $r3 D[$r0 + #ctx_current]
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	// disable the mmio list now, we don't need/want to execute it again
+	st b32 D[$r0 + #chan_mmio_count] $r0
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdst $r0 $r1
+	xdwait
+	ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	// according to mwk, some kind of wait for idle
+	mov $r15 0xc00
+	shl b32 $r15 6
+	mov $r14 4
+	iowr I[$r15 + 0x200] $r14
+	ctx_xfer_idle:
+		iord $r14 I[$r15 + 0x000]
+		and $r14 0x2000
+		bra ne #ctx_xfer_idle
+
+	bra not $p1 #ctx_xfer_pre
+	bra $p2 #ctx_xfer_pre_load
+	ctx_xfer_pre:
+		mov $r15 0x10
+		call #ctx_86c
+		call #ctx_4160s
+		bra not $p1 #ctx_xfer_exec
+
+	ctx_xfer_pre_load:
+		mov $r15 2
+		call #ctx_4170s
+		call #ctx_4170w
+		call #ctx_redswitch
+		clear b32 $r15
+		call #ctx_4170s
+		call #ctx_load
+
+	// fetch context pointer, and initiate xfer on all GPCs
+	ctx_xfer_exec:
+	ld b32 $r1 D[$r0 + #ctx_current]
+	mov $r2 0x414
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r0	// BAR_STATUS = reset
+	mov $r14 -0x5b00
+	sethi $r14 0x410000
+	mov b32 $r15 $r1
+	call #nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
+	add b32 $r14 4
+	xbit $r15 $flags $p1
+	xbit $r2 $flags $p2
+	shl b32 $r2 1
+	or $r15 $r2
+	call #nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 6		// first, last
+	mov $r11 0		// base = 0
+	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// wait for GPCs to all complete
+	mov $r10 8		// DONE_BAR
+	call #wait_doneo
+
+	// wait for strand xfer to complete
+	call #strand_wait
+
+	// post-op
+	bra $p1 #ctx_xfer_post
+		mov $r10 12		// DONE_UNK12
+		call #wait_donez
+		mov $r1 0xa10
+		shl b32 $r1 6
+		mov $r2 5
+		iowr I[$r1] $r2		// MEM_CMD
+		ctx_xfer_post_save_wait:
+			iord $r2 I[$r1]
+			or $r2 $r2
+			bra ne #ctx_xfer_post_save_wait
+
+	bra $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r15 2
+		call #ctx_4170s
+		clear b32 $r15
+		call #ctx_86c
+		call #strand_post
+		call #ctx_4170w
+		clear b32 $r15
+		call #ctx_4170s
+
+		bra not $p1 #ctx_xfer_no_post_mmio
+		ld b32 $r1 D[$r0 + #chan_mmio_count]
+		or $r1 $r1
+		bra e #ctx_xfer_no_post_mmio
+			call #ctx_mmio_exec
+
+		ctx_xfer_no_post_mmio:
+		call #ctx_4160c
+
+	ctx_xfer_done:
+	ret
+
+.align 256
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
new file mode 100644
index 0000000..bb03d2a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -0,0 +1,928 @@
+uint32_t nvc0_grhub_data[] = {
+/* 0x0000: gpc_count */
+	0x00000000,
+/* 0x0004: rop_count */
+	0x00000000,
+/* 0x0008: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0050: hub_mmio_list_head */
+	0x00000000,
+/* 0x0054: hub_mmio_list_tail */
+	0x00000000,
+/* 0x0058: ctx_current */
+	0x00000000,
+/* 0x005c: chipsets */
+	0x000000c0,
+	0x013c00a0,
+	0x000000c1,
+	0x014000a0,
+	0x000000c3,
+	0x013c00a0,
+	0x000000c4,
+	0x013c00a0,
+	0x000000c8,
+	0x013c00a0,
+	0x000000ce,
+	0x013c00a0,
+	0x000000cf,
+	0x013c00a0,
+	0x000000d9,
+	0x01dc0140,
+	0x00000000,
+/* 0x00a0: nvc0_hub_mmio_head */
+	0x0417e91c,
+	0x04400204,
+	0x28404004,
+	0x00404044,
+	0x34404094,
+	0x184040d0,
+	0x004040f8,
+	0x08404130,
+	0x08404150,
+	0x04404164,
+	0x08404174,
+	0x1c404200,
+	0x34404404,
+	0x0c404460,
+	0x00404480,
+	0x00404498,
+	0x0c404604,
+	0x7c404618,
+	0x50404698,
+	0x044046f0,
+	0x54404700,
+	0x00405800,
+	0x08405830,
+	0x00405854,
+	0x0c405870,
+	0x04405a00,
+	0x00405a18,
+	0x00406020,
+	0x0c406028,
+	0x044064a8,
+	0x044064b4,
+	0x00407804,
+	0x1440780c,
+	0x004078bc,
+	0x18408000,
+	0x00408064,
+	0x08408800,
+	0x0c408900,
+	0x00408980,
+/* 0x013c: nvc0_hub_mmio_tail */
+	0x044064c0,
+/* 0x0140: nvc1_hub_mmio_tail */
+/* 0x0140: nvd9_hub_mmio_head */
+	0x0417e91c,
+	0x04400204,
+	0x24404004,
+	0x00404044,
+	0x34404094,
+	0x184040d0,
+	0x004040f8,
+	0x08404130,
+	0x08404150,
+	0x04404164,
+	0x04404178,
+	0x1c404200,
+	0x34404404,
+	0x0c404460,
+	0x00404480,
+	0x00404498,
+	0x0c404604,
+	0x7c404618,
+	0x50404698,
+	0x044046f0,
+	0x54404700,
+	0x00405800,
+	0x08405830,
+	0x00405854,
+	0x0c405870,
+	0x04405a00,
+	0x00405a18,
+	0x00406020,
+	0x0c406028,
+	0x044064a8,
+	0x104064b4,
+	0x00407804,
+	0x1440780c,
+	0x004078bc,
+	0x18408000,
+	0x00408064,
+	0x08408800,
+	0x0c408900,
+	0x00408980,
+/* 0x01dc: nvd9_hub_mmio_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
+	0x00000000,
+/* 0x0204: chan_mmio_address */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0300: xfer_data */
+	0x00000000,
+};
+
+uint32_t nvc0_grhub_code[] = {
+	0x03090ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe4b60814,
+	0x00efd006,
+	0x0c1ce7f1,
+	0xf006e4b6,
+	0xefd001f7,
+	0xf8e0fc00,
+/* 0x0309: init */
+	0xfe04bd00,
+	0x07fe0004,
+	0x0017f100,
+	0x0227f012,
+	0xf10012d0,
+	0xfe05b917,
+	0x17f10010,
+	0x10d00400,
+	0x0437f1c0,
+	0x0634b604,
+	0x200327f1,
+	0xf10032d0,
+	0xd0200427,
+	0x27f10132,
+	0x32d0200b,
+	0x0c27f102,
+	0x0732d020,
+	0x0c2427f1,
+	0xb90624b6,
+	0x23d00003,
+	0x0427f100,
+	0x0023f087,
+	0xb70012d0,
+	0xf0010012,
+	0x12d00427,
+	0x1031f400,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xf1c76821,
+	0x01018090,
+	0x801ff4f0,
+	0x17f0000f,
+	0x041fbb01,
+	0xf10112b6,
+	0xb6040c27,
+	0x21d00624,
+	0x4021d000,
+	0x080027f1,
+	0xcf0624b6,
+	0xf7f00022,
+/* 0x03a9: init_find_chipset */
+	0x08f0b654,
+	0xb800f398,
+	0x0bf40432,
+	0x0034b00b,
+	0xf8f11bf4,
+/* 0x03bd: init_context */
+	0x0017f100,
+	0x02fe5801,
+	0xf003ff58,
+	0x0e8000e3,
+	0x150f8014,
+	0x013d21f5,
+	0x070037f1,
+	0x950634b6,
+	0x34d00814,
+	0x4034d000,
+	0x130030b7,
+	0xb6001fbb,
+	0x3fd002f5,
+	0x0815b600,
+	0xb60110b6,
+	0x1fb90814,
+	0x6321f502,
+	0x001fbb02,
+	0xf1000398,
+	0xf0200047,
+/* 0x040e: init_gpc */
+	0x4ea05043,
+	0x1fb90804,
+	0x8d21f402,
+	0x08004ea0,
+	0xf4022fb9,
+	0x4ea08d21,
+	0xf4bd010c,
+	0xa08d21f4,
+	0xf401044e,
+	0x4ea08d21,
+	0xf7f00100,
+	0x8d21f402,
+	0x08004ea0,
+/* 0x0440: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x0027f1b4,
+	0x0624b608,
+	0xb74021d0,
+	0xbd080020,
+	0x1f19f014,
+/* 0x0473: main */
+	0xf40021d0,
+	0x28f40031,
+	0x08d7f000,
+	0xf43921f4,
+	0xe4b1f401,
+	0x1bf54001,
+	0x87f100d1,
+	0x84b6083c,
+	0xf094bd06,
+	0x89d00499,
+	0x0017f100,
+	0x0614b60b,
+	0xcf4012cf,
+	0x13c80011,
+	0x7e0bf41f,
+	0xf41f23c8,
+	0x20f95a0b,
+	0xf10212b9,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00799f0,
+	0x32f40089,
+	0x0231f401,
+	0x082921f5,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0799f094,
+	0xfc0089d0,
+	0x3c87f120,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d006,
+	0xf50131f4,
+	0xf1082921,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00699f0,
+	0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
+	0xb920f931,
+	0x32f40212,
+	0x0232f401,
+	0x082921f5,
+	0x17f120fc,
+	0x14b60b00,
+	0x0012d006,
+/* 0x0527: chsw_no_prev */
+	0xc8130ef4,
+	0x0bf41f23,
+	0x0131f40d,
+	0xf50232f4,
+/* 0x0537: chsw_done */
+	0xf1082921,
+	0xb60b0c17,
+	0x27f00614,
+	0x0012d001,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0499f094,
+	0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
+	0xb0ff200e,
+	0x1bf401e4,
+	0x02f2b90d,
+	0x07b521f5,
+/* 0x0567: main_not_ctx_chan */
+	0xb0420ef4,
+	0x1bf402e4,
+	0x3c87f12e,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d007,
+	0xf40132f4,
+	0x21f50232,
+	0x87f10829,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00799,
+	0x110ef400,
+/* 0x0598: main_not_ctx_save */
+	0xf010ef94,
+	0x21f501f5,
+	0x0ef502ec,
+/* 0x05a6: main_done */
+	0x17f1fed1,
+	0x14b60820,
+	0xf024bd06,
+	0x12d01f29,
+	0xbe0ef500,
+/* 0x05b9: ih */
+	0xfe80f9fe,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0xc4800acf,
+	0x0bf404ab,
+	0x00b7f11d,
+	0x08d7f019,
+	0xcf40becf,
+	0x21f400bf,
+	0x00b0b704,
+	0x01e7f004,
+/* 0x05ef: ih_no_fifo */
+	0xe400bed0,
+	0xf40100ab,
+	0xd7f00d0b,
+	0x01e7f108,
+	0x0421f440,
+/* 0x0600: ih_no_ctxsw */
+	0x0104b7f1,
+	0xabffb0bd,
+	0x0d0bf4b4,
+	0x0c1ca7f1,
+	0xd006a4b6,
+/* 0x0616: ih_no_other */
+	0x0ad000ab,
+	0xfcf0fc40,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0631: ctx_4160s */
+	0x60e7f101,
+	0x40e3f041,
+	0xf401f7f0,
+/* 0x063e: ctx_4160s_wait */
+	0x21f48d21,
+	0x04ffc868,
+	0xf8fa0bf4,
+/* 0x0649: ctx_4160c */
+	0x60e7f100,
+	0x40e3f041,
+	0x21f4f4bd,
+/* 0x0657: ctx_4170s */
+	0xf100f88d,
+	0xf04170e7,
+	0xf5f040e3,
+	0x8d21f410,
+/* 0x0666: ctx_4170w */
+	0xe7f100f8,
+	0xe3f04170,
+	0x6821f440,
+	0xf410f4f0,
+	0x00f8f31b,
+/* 0x0678: ctx_redswitch */
+	0x0614e7f1,
+	0xf106e4b6,
+	0xd00270f7,
+	0xf7f000ef,
+/* 0x0689: ctx_redswitch_delay */
+	0x01f2b608,
+	0xf1fd1bf4,
+	0xd00770f7,
+	0x00f800ef,
+/* 0x0698: ctx_86c */
+	0x086ce7f1,
+	0xd006e4b6,
+	0xe7f100ef,
+	0xe3f08a14,
+	0x8d21f440,
+	0xa86ce7f1,
+	0xf441e3f0,
+	0x00f88d21,
+/* 0x06b8: ctx_load */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0599f094,
+	0xf00089d0,
+	0x21f40ca7,
+	0x2417f1c9,
+	0x0614b60a,
+	0xf10010d0,
+	0xb60b0037,
+	0x32d00634,
+	0x0c17f140,
+	0x0614b60a,
+	0xd00747f0,
+	0x14d00012,
+/* 0x06f1: ctx_chan_wait_0 */
+	0x4014cf40,
+	0xf41f44f0,
+	0x32d0fa1b,
+	0x000bfe00,
+	0xb61f2af0,
+	0x20b60424,
+	0x3c87f102,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d008,
+	0x0a0417f1,
+	0xd00614b6,
+	0x17f10012,
+	0x14b60a20,
+	0x0227f006,
+	0x800023f1,
+	0xf00012d0,
+	0x27f11017,
+	0x23f00300,
+	0x0512fa02,
+	0x87f103f8,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00899,
+	0xc1019800,
+	0x981814b6,
+	0x25b6c002,
+	0x0512fd08,
+	0xf1160180,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00999f0,
+	0x27f10089,
+	0x24b60a04,
+	0x0021d006,
+	0xf10127f0,
+	0xb60a2017,
+	0x12d00614,
+	0x0017f100,
+	0x0613f002,
+	0xf80501fa,
+	0x5c87f103,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d009,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0599f094,
+	0xf80089d0,
+/* 0x07b5: ctx_chan */
+	0x3121f500,
+	0xb821f506,
+	0x0ca7f006,
+	0xf1c921f4,
+	0xb60a1017,
+	0x27f00614,
+	0x0012d005,
+/* 0x07d0: ctx_chan_wait */
+	0xfd0012cf,
+	0x1bf40522,
+	0x4921f5fa,
+/* 0x07df: ctx_mmio_exec */
+	0x9800f806,
+	0x27f18103,
+	0x24b60a04,
+	0x0023d006,
+/* 0x07ee: ctx_mmio_loop */
+	0x34c434bd,
+	0x0f1bf4ff,
+	0x030057f1,
+	0xfa0653f0,
+	0x03f80535,
+/* 0x0800: ctx_mmio_pull */
+	0x98c04e98,
+	0x21f4c14f,
+	0x0830b68d,
+	0xf40112b6,
+/* 0x0812: ctx_mmio_done */
+	0x0398df1b,
+	0x0023d016,
+	0xf1800080,
+	0xf0020017,
+	0x01fa0613,
+	0xf803f806,
+/* 0x0829: ctx_xfer */
+	0x00f7f100,
+	0x06f4b60c,
+	0xd004e7f0,
+/* 0x0836: ctx_xfer_idle */
+	0xfecf80fe,
+	0x00e4f100,
+	0xf91bf420,
+	0xf40611f4,
+/* 0x0846: ctx_xfer_pre */
+	0xf7f01102,
+	0x9821f510,
+	0x3121f506,
+	0x1c11f406,
+/* 0x0854: ctx_xfer_pre_load */
+	0xf502f7f0,
+	0xf5065721,
+	0xf5066621,
+	0xbd067821,
+	0x5721f5f4,
+	0xb821f506,
+/* 0x086d: ctx_xfer_exec */
+	0x16019806,
+	0x041427f1,
+	0xd00624b6,
+	0xe7f10020,
+	0xe3f0a500,
+	0x021fb941,
+	0xb68d21f4,
+	0xfcf004e0,
+	0x022cf001,
+	0xfd0124b6,
+	0x21f405f2,
+	0xfc17f18d,
+	0x0213f04a,
+	0xd00c27f0,
+	0x21f50012,
+	0x27f10207,
+	0x23f047fc,
+	0x0020d002,
+	0xb6012cf0,
+	0x12d00320,
+	0x01acf000,
+	0xf006a5f0,
+	0x0c9800b7,
+	0x150d9814,
+	0xf500e7f0,
+	0xf0015c21,
+	0x21f508a7,
+	0x21f50103,
+	0x01f40207,
+	0x0ca7f022,
+	0xf1c921f4,
+	0xb60a1017,
+	0x27f00614,
+	0x0012d005,
+/* 0x08f4: ctx_xfer_post_save_wait */
+	0xfd0012cf,
+	0x1bf40522,
+	0x3202f4fa,
+/* 0x0900: ctx_xfer_post */
+	0xf502f7f0,
+	0xbd065721,
+	0x9821f5f4,
+	0x2621f506,
+	0x6621f502,
+	0xf5f4bd06,
+	0xf4065721,
+	0x01981011,
+	0x0511fd80,
+	0xf5070bf4,
+/* 0x092b: ctx_xfer_no_post_mmio */
+	0xf507df21,
+/* 0x092f: ctx_xfer_done */
+	0xf8064921,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
new file mode 100644
index 0000000..7fe9d7c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -0,0 +1,793 @@
+/* fuc microcode for nve0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ *    m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h
+ */
+
+.section #nve0_grhub_data
+include(`nve0.fuc')
+gpc_count:		.b32 0
+rop_count:		.b32 0
+cmd_queue:		queue_init
+hub_mmio_list_head:	.b32 0
+hub_mmio_list_tail:	.b32 0
+
+ctx_current:		.b32 0
+
+chipsets:
+.b8  0xe4 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8  0xe7 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8  0xe6 0 0 0
+.b16 #nve4_hub_mmio_head
+.b16 #nve4_hub_mmio_tail
+.b8  0 0 0 0
+
+nve4_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404010, 7)
+mmctx_data(0x4040a8, 9)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 1)
+mmctx_data(0x4041a0, 4)
+mmctx_data(0x404200, 4)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 4)
+mmctx_data(0x40462c, 2)
+mmctx_data(0x404640, 1)
+mmctx_data(0x404654, 1)
+mmctx_data(0x404660, 1)
+mmctx_data(0x404678, 19)
+mmctx_data(0x4046c8, 3)
+mmctx_data(0x404700, 3)
+mmctx_data(0x404718, 10)
+mmctx_data(0x404744, 2)
+mmctx_data(0x404754, 1)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x405b00, 1)
+mmctx_data(0x405b10, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x4064c0, 12)
+mmctx_data(0x4064fc, 1)
+mmctx_data(0x407040, 1)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408840, 1)
+mmctx_data(0x408900, 3)
+mmctx_data(0x408980, 1)
+nve4_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count:	.b32 0
+chan_mmio_address:	.b32 0
+
+.align 256
+xfer_data: 		.b32 0
+
+.section #nve0_grhub_code
+bra #init
+define(`include_code')
+include(`nve0.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nve0.fuc)
+//
+error:
+	push $r14
+	mov $r14 0x814
+	shl b32 $r14 6
+	iowr I[$r14 + 0x000] $r15	// CC_SCRATCH[5] = error code
+	mov $r14 0xc1c
+	shl b32 $r14 6
+	mov $r15 1
+	iowr I[$r14 + 0x000] $r15	// INTR_UP_SET
+	pop $r14
+	ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+//   CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+//   CC_SCRATCH[0]:
+//	     31:31: set to signal completion
+//   CC_SCRATCH[1]:
+//	      31:0: total PGRAPH context size
+//
+init:
+	clear b32 $r0
+	mov $sp $r0
+	mov $xdbase $r0
+
+	// enable fifo access
+	mov $r1 0x1200
+	mov $r2 2
+	iowr I[$r1 + 0x000] $r2	// FIFO_ENABLE
+
+	// setup i0 handler, and route all interrupts to it
+	mov $r1 #ih
+	mov $iv0 $r1
+	mov $r1 0x400
+	iowr I[$r1 + 0x300] $r0	// INTR_DISPATCH
+
+	// route HUB_CHANNEL_SWITCH to fuc interrupt 8
+	mov $r3 0x404
+	shl b32 $r3 6
+	mov $r2 0x2003		// { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+	iowr I[$r3 + 0x000] $r2
+
+	// not sure what these are, route them because NVIDIA does, and
+	// the IRQ handler will signal the host if we ever get one.. we
+	// may find out if/why we need to handle these if so..
+	//
+	mov $r2 0x2004
+	iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+	mov $r2 0x200b
+	iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+	mov $r2 0x200c
+	iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+	// enable all INTR_UP interrupts
+	mov $r2 0xc24
+	shl b32 $r2 6
+	not b32 $r3 $r0
+	iowr I[$r2] $r3
+
+	// enable fifo, ctxsw, 9, 10, 15 interrupts
+	mov $r2 -0x78fc		// 0x8704
+	sethi $r2 0
+	iowr I[$r1 + 0x000] $r2	// INTR_EN_SET
+
+	// fifo level triggered, rest edge
+	sub b32 $r1 0x100
+	mov $r2 4
+	iowr I[$r1] $r2
+
+	// enable interrupts
+	bset $flags ie0
+
+	// fetch enabled GPC/ROP counts
+	mov $r14 -0x69fc	// 0x409604
+	sethi $r14 0x400000
+	call #nv_rd32
+	extr $r1 $r15 16:20
+	st b32 D[$r0 + #rop_count] $r1
+	and $r15 0x1f
+	st b32 D[$r0 + #gpc_count] $r15
+
+	// set BAR_REQMASK to GPC mask
+	mov $r1 1
+	shl b32 $r1 $r15
+	sub b32 $r1 1
+	mov $r2 0x40c
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1
+	iowr I[$r2 + 0x100] $r1
+
+	// find context data for this chipset
+	mov $r2 0x800
+	shl b32 $r2 6
+	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+	mov $r15 #chipsets - 8
+	init_find_chipset:
+		add b32 $r15 8
+		ld b32 $r3 D[$r15 + 0x00]
+		cmpu b32 $r3 $r2
+		bra e #init_context
+		cmpu b32 $r3 0
+		bra ne #init_find_chipset
+		// unknown chipset
+		ret
+
+	// context size calculation, reserve first 256 bytes for use by fuc
+	init_context:
+	mov $r1 256
+
+	// calculate size of mmio context data
+	ld b16 $r14 D[$r15 + 4]
+	ld b16 $r15 D[$r15 + 6]
+	sethi $r14 0
+	st b32 D[$r0 + #hub_mmio_list_head] $r14
+	st b32 D[$r0 + #hub_mmio_list_tail] $r15
+	call #mmctx_size
+
+	// set mmctx base addresses now so we don't have to do it later,
+	// they don't (currently) ever change
+	mov $r3 0x700
+	shl b32 $r3 6
+	shr b32 $r4 $r1 8
+	iowr I[$r3 + 0x000] $r4		// MMCTX_SAVE_SWBASE
+	iowr I[$r3 + 0x100] $r4		// MMCTX_LOAD_SWBASE
+	add b32 $r3 0x1300
+	add b32 $r1 $r15
+	shr b32 $r15 2
+	iowr I[$r3 + 0x000] $r15	// MMCTX_LOAD_COUNT, wtf for?!?
+
+	// strands, base offset needs to be aligned to 256 bytes
+	shr b32 $r1 8
+	add b32 $r1 1
+	shl b32 $r1 8
+	mov b32 $r15 $r1
+	call #strand_ctx_init
+	add b32 $r1 $r15
+
+	// initialise each GPC in sequence by passing in the offset of its
+	// context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+	// has previously been uploaded by the host) running.
+	//
+	// the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+	// when it has completed, and return the size of its context data
+	// in GPCn_CC_SCRATCH[1]
+	//
+	ld b32 $r3 D[$r0 + #gpc_count]
+	mov $r4 0x2000
+	sethi $r4 0x500000
+	init_gpc:
+		// setup, and start GPC ucode running
+		add b32 $r14 $r4 0x804
+		mov b32 $r15 $r1
+		call #nv_wr32			// CC_SCRATCH[1] = ctx offset
+		add b32 $r14 $r4 0x800
+		mov b32 $r15 $r2
+		call #nv_wr32			// CC_SCRATCH[0] = chipset
+		add b32 $r14 $r4 0x10c
+		clear b32 $r15
+		call #nv_wr32
+		add b32 $r14 $r4 0x104
+		call #nv_wr32			// ENTRY
+		add b32 $r14 $r4 0x100
+		mov $r15 2			// CTRL_START_TRIGGER
+		call #nv_wr32			// CTRL
+
+		// wait for it to complete, and adjust context size
+		add b32 $r14 $r4 0x800
+		init_gpc_wait:
+			call #nv_rd32
+			xbit $r15 $r15 31
+			bra e #init_gpc_wait
+		add b32 $r14 $r4 0x804
+		call #nv_rd32
+		add b32 $r1 $r15
+
+		// next!
+		add b32 $r4 0x8000
+		sub b32 $r3 1
+		bra ne #init_gpc
+
+	// save context size, and tell host we're ready
+	mov $r2 0x800
+	shl b32 $r2 6
+	iowr I[$r2 + 0x100] $r1		// CC_SCRATCH[1]  = context size
+	add b32 $r2 0x800
+	clear b32 $r1
+	bset $r1 31
+	iowr I[$r2 + 0x000] $r1		// CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+	// sleep until we have something to do
+	bset $flags $p0
+	sleep $p0
+	mov $r13 #cmd_queue
+	call #queue_get
+	bra $p1 #main
+
+	// context switch, requested by GPU?
+	cmpu b32 $r14 0x4001
+	bra ne #main_not_ctx_switch
+		trace_set(T_AUTO)
+		mov $r1 0xb00
+		shl b32 $r1 6
+		iord $r2 I[$r1 + 0x100]		// CHAN_NEXT
+		iord $r1 I[$r1 + 0x000]		// CHAN_CUR
+
+		xbit $r3 $r1 31
+		bra e #chsw_no_prev
+			xbit $r3 $r2 31
+			bra e #chsw_prev_no_next
+				push $r2
+				mov b32 $r2 $r1
+				trace_set(T_SAVE)
+				bclr $flags $p1
+				bset $flags $p2
+				call #ctx_xfer
+				trace_clr(T_SAVE);
+				pop $r2
+				trace_set(T_LOAD);
+				bset $flags $p1
+				call #ctx_xfer
+				trace_clr(T_LOAD);
+				bra #chsw_done
+			chsw_prev_no_next:
+				push $r2
+				mov b32 $r2 $r1
+				bclr $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+				pop $r2
+				mov $r1 0xb00
+				shl b32 $r1 6
+				iowr I[$r1] $r2
+				bra #chsw_done
+		chsw_no_prev:
+			xbit $r3 $r2 31
+			bra e #chsw_done
+				bset $flags $p1
+				bclr $flags $p2
+				call #ctx_xfer
+
+		// ack the context switch request
+		chsw_done:
+		mov $r1 0xb0c
+		shl b32 $r1 6
+		mov $r2 1
+		iowr I[$r1 + 0x000] $r2		// 0x409b0c
+		trace_clr(T_AUTO)
+		bra #main
+
+	// request to set current channel? (*not* a context switch)
+	main_not_ctx_switch:
+	cmpu b32 $r14 0x0001
+	bra ne #main_not_ctx_chan
+		mov b32 $r2 $r15
+		call #ctx_chan
+		bra #main_done
+
+	// request to store current channel context?
+	main_not_ctx_chan:
+	cmpu b32 $r14 0x0002
+	bra ne #main_not_ctx_save
+		trace_set(T_SAVE)
+		bclr $flags $p1
+		bclr $flags $p2
+		call #ctx_xfer
+		trace_clr(T_SAVE)
+		bra #main_done
+
+	main_not_ctx_save:
+		shl b32 $r15 $r14 16
+		or $r15 E_BAD_COMMAND
+		call #error
+		bra #main
+
+	main_done:
+	mov $r1 0x820
+	shl b32 $r1 6
+	clear b32 $r2
+	bset $r2 31
+	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+	bra #main
+
+// interrupt handler
+ih:
+	push $r8
+	mov $r8 $flags
+	push $r8
+	push $r9
+	push $r10
+	push $r11
+	push $r13
+	push $r14
+	push $r15
+
+	// incoming fifo command?
+	iord $r10 I[$r0 + 0x200]	// INTR
+	and $r11 $r10 0x00000004
+	bra e #ih_no_fifo
+		// queue incoming fifo command for later processing
+		mov $r11 0x1900
+		mov $r13 #cmd_queue
+		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+		call #queue_put
+		add b32 $r11 0x400
+		mov $r14 1
+		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+
+	// context switch request?
+	ih_no_fifo:
+	and $r11 $r10 0x00000100
+	bra e #ih_no_ctxsw
+		// enqueue a context switch for later processing
+		mov $r13 #cmd_queue
+		mov $r14 0x4001
+		call #queue_put
+
+	// anything we didn't handle, bring it to the host's attention
+	ih_no_ctxsw:
+	mov $r11 0x104
+	not b32 $r11
+	and $r11 $r10 $r11
+	bra e #ih_no_other
+		mov $r10 0xc1c
+		shl b32 $r10 6
+		iowr I[$r10] $r11	// INTR_UP_SET
+
+	// ack, and wake up main()
+	ih_no_other:
+	iowr I[$r0 + 0x100] $r10	// INTR_ACK
+
+	pop $r15
+	pop $r14
+	pop $r13
+	pop $r11
+	pop $r10
+	pop $r9
+	pop $r8
+	mov $flags $r8
+	pop $r8
+	bclr $flags $p0
+	iret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	or $r15 0x10
+	call #nv_wr32
+	ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+	mov $r14 0x4170
+	sethi $r14 0x400000
+	call #nv_rd32
+	and $r15 0x10
+	bra ne #ctx_4170w
+	ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off?  Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+	mov $r14 0x614
+	shl b32 $r14 6
+	mov $r15 0x270
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+	mov $r15 8
+	ctx_redswitch_delay:
+		sub b32 $r15 1
+		bra ne #ctx_redswitch_delay
+	mov $r15 0x770
+	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+	ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+	mov $r14 0x86c
+	shl b32 $r14 6
+	iowr I[$r14] $r15	// HUB(0x86c) = val
+	mov $r14 -0x75ec
+	sethi $r14 0x400000
+	call #nv_wr32		// ROP(0xa14) = val
+	mov $r14 -0x5794
+	sethi $r14 0x410000
+	call #nv_wr32		// GPC(0x86c) = val
+	ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+	trace_set(T_CHAN)
+
+	// switch to channel, somewhat magic in parts..
+	mov $r10 12		// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa24
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r0	// 0x409a24
+	mov $r3 0xb00
+	shl b32 $r3 6
+	iowr I[$r3 + 0x100] $r2	// CHAN_NEXT
+	mov $r1 0xa0c
+	shl b32 $r1 6
+	mov $r4 7
+	iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+	iowr I[$r1 + 0x100] $r4	// MEM_CMD
+	ctx_chan_wait_0:
+		iord $r4 I[$r1 + 0x100]
+		and $r4 0x1f
+		bra ne #ctx_chan_wait_0
+	iowr I[$r3 + 0x000] $r2	// CHAN_CUR
+
+	// load channel header, fetch PGRAPH context pointer
+	mov $xtargets $r0
+	bclr $r2 31
+	shl b32 $r2 4
+	add b32 $r2 2
+
+	trace_set(T_LCHAN)
+	mov $r1 0xa04
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_BASE
+	mov $r1 0xa20
+	shl b32 $r1 6
+	mov $r2 0x0002
+	sethi $r2 0x80000000
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vram
+	mov $r1 0x10			// chan + 0x0210
+	mov $r2 #xfer_data
+	sethi $r2 0x00020000		// 16 bytes
+	xdld $r1 $r2
+	xdwait
+	trace_clr(T_LCHAN)
+
+	// update current context
+	ld b32 $r1 D[$r0 + #xfer_data + 4]
+	shl b32 $r1 24
+	ld b32 $r2 D[$r0 + #xfer_data + 0]
+	shr b32 $r2 8
+	or $r1 $r2
+	st b32 D[$r0 + #ctx_current] $r1
+
+	// set transfer base to start of context, and fetch context header
+	trace_set(T_LCTXH)
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r1		// MEM_BASE
+	mov $r2 1
+	mov $r1 0xa20
+	shl b32 $r1 6
+	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vm
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdld $r0 $r1
+	xdwait
+	trace_clr(T_LCTXH)
+
+	trace_clr(T_CHAN)
+	ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+//            the active channel for ctxctl, but not actually transfer
+//            any context data.  intended for use only during initial
+//            context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+	call #ctx_load
+	mov $r10 12			// DONE_UNK12
+	call #wait_donez
+	mov $r1 0xa10
+	shl b32 $r1 6
+	mov $r2 5
+	iowr I[$r1 + 0x000] $r2		// MEM_CMD = 5 (???)
+	ctx_chan_wait:
+		iord $r2 I[$r1 + 0x000]
+		or $r2 $r2
+		bra ne #ctx_chan_wait
+	ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel.  Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state...  The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+	// set transfer base to be the mmio list
+	ld b32 $r3 D[$r0 + #chan_mmio_address]
+	mov $r2 0xa04
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	clear b32 $r3
+	ctx_mmio_loop:
+		// fetch next 256 bytes of mmio list if necessary
+		and $r4 $r3 0xff
+		bra ne #ctx_mmio_pull
+			mov $r5 #xfer_data
+			sethi $r5 0x00060000	// 256 bytes
+			xdld $r3 $r5
+			xdwait
+
+		// execute a single list entry
+		ctx_mmio_pull:
+		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+		call #nv_wr32
+
+		// next!
+		add b32 $r3 8
+		sub b32 $r1 1
+		bra ne #ctx_mmio_loop
+
+	// set transfer base back to the current context
+	ctx_mmio_done:
+	ld b32 $r3 D[$r0 + #ctx_current]
+	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+
+	// disable the mmio list now, we don't need/want to execute it again
+	st b32 D[$r0 + #chan_mmio_count] $r0
+	mov $r1 #chan_data
+	sethi $r1 0x00060000		// 256 bytes
+	xdst $r0 $r1
+	xdwait
+	ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+//     $p1 clear on save, set on load
+//     $p2 set if opposite direction done/will be done, so:
+//		on save it means: "a load will follow this save"
+//		on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+	// according to mwk, some kind of wait for idle
+	mov $r15 0xc00
+	shl b32 $r15 6
+	mov $r14 4
+	iowr I[$r15 + 0x200] $r14
+	ctx_xfer_idle:
+		iord $r14 I[$r15 + 0x000]
+		and $r14 0x2000
+		bra ne #ctx_xfer_idle
+
+	bra not $p1 #ctx_xfer_pre
+	bra $p2 #ctx_xfer_pre_load
+	ctx_xfer_pre:
+		mov $r15 0x10
+		call #ctx_86c
+		bra not $p1 #ctx_xfer_exec
+
+	ctx_xfer_pre_load:
+		mov $r15 2
+		call #ctx_4170s
+		call #ctx_4170w
+		call #ctx_redswitch
+		clear b32 $r15
+		call #ctx_4170s
+		call #ctx_load
+
+	// fetch context pointer, and initiate xfer on all GPCs
+	ctx_xfer_exec:
+	ld b32 $r1 D[$r0 + #ctx_current]
+	mov $r2 0x414
+	shl b32 $r2 6
+	iowr I[$r2 + 0x000] $r0	// BAR_STATUS = reset
+	mov $r14 -0x5b00
+	sethi $r14 0x410000
+	mov b32 $r15 $r1
+	call #nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
+	add b32 $r14 4
+	xbit $r15 $flags $p1
+	xbit $r2 $flags $p2
+	shl b32 $r2 1
+	or $r15 $r2
+	call #nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+	// strands
+	mov $r1 0x4afc
+	sethi $r1 0x20000
+	mov $r2 0xc
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+	call #strand_wait
+	mov $r2 0x47fc
+	sethi $r2 0x20000
+	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+	xbit $r2 $flags $p1
+	add b32 $r2 3
+	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+	// mmio context
+	xbit $r10 $flags $p1	// direction
+	or $r10 6		// first, last
+	mov $r11 0		// base = 0
+	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+	mov $r14 0		// not multi
+	call #mmctx_xfer
+
+	// wait for GPCs to all complete
+	mov $r10 8		// DONE_BAR
+	call #wait_doneo
+
+	// wait for strand xfer to complete
+	call #strand_wait
+
+	// post-op
+	bra $p1 #ctx_xfer_post
+		mov $r10 12		// DONE_UNK12
+		call #wait_donez
+		mov $r1 0xa10
+		shl b32 $r1 6
+		mov $r2 5
+		iowr I[$r1] $r2		// MEM_CMD
+		ctx_xfer_post_save_wait:
+			iord $r2 I[$r1]
+			or $r2 $r2
+			bra ne #ctx_xfer_post_save_wait
+
+	bra $p2 #ctx_xfer_done
+	ctx_xfer_post:
+		mov $r15 2
+		call #ctx_4170s
+		clear b32 $r15
+		call #ctx_86c
+		call #strand_post
+		call #ctx_4170w
+		clear b32 $r15
+		call #ctx_4170s
+
+		bra not $p1 #ctx_xfer_no_post_mmio
+		ld b32 $r1 D[$r0 + #chan_mmio_count]
+		or $r1 $r1
+		bra e #ctx_xfer_no_post_mmio
+			call #ctx_mmio_exec
+
+		ctx_xfer_no_post_mmio:
+
+	ctx_xfer_done:
+	ret
+
+.align 256
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
new file mode 100644
index 0000000..e3421af
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -0,0 +1,858 @@
+uint32_t nve0_grhub_data[] = {
+/* 0x0000: gpc_count */
+	0x00000000,
+/* 0x0004: rop_count */
+	0x00000000,
+/* 0x0008: cmd_queue */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0050: hub_mmio_list_head */
+	0x00000000,
+/* 0x0054: hub_mmio_list_tail */
+	0x00000000,
+/* 0x0058: ctx_current */
+	0x00000000,
+/* 0x005c: chipsets */
+	0x000000e4,
+	0x01440078,
+	0x000000e7,
+	0x01440078,
+	0x000000e6,
+	0x01440078,
+	0x00000000,
+/* 0x0078: nve4_hub_mmio_head */
+	0x0417e91c,
+	0x04400204,
+	0x18404010,
+	0x204040a8,
+	0x184040d0,
+	0x004040f8,
+	0x08404130,
+	0x08404150,
+	0x00404164,
+	0x0c4041a0,
+	0x0c404200,
+	0x34404404,
+	0x0c404460,
+	0x00404480,
+	0x00404498,
+	0x0c404604,
+	0x0c404618,
+	0x0440462c,
+	0x00404640,
+	0x00404654,
+	0x00404660,
+	0x48404678,
+	0x084046c8,
+	0x08404700,
+	0x24404718,
+	0x04404744,
+	0x00404754,
+	0x00405800,
+	0x08405830,
+	0x00405854,
+	0x0c405870,
+	0x04405a00,
+	0x00405a18,
+	0x00405b00,
+	0x00405b10,
+	0x00406020,
+	0x0c406028,
+	0x044064a8,
+	0x044064b4,
+	0x2c4064c0,
+	0x004064fc,
+	0x00407040,
+	0x00407804,
+	0x1440780c,
+	0x004078bc,
+	0x18408000,
+	0x00408064,
+	0x08408800,
+	0x00408840,
+	0x08408900,
+	0x00408980,
+/* 0x0144: nve4_hub_mmio_tail */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0200: chan_data */
+/* 0x0200: chan_mmio_count */
+	0x00000000,
+/* 0x0204: chan_mmio_address */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+/* 0x0300: xfer_data */
+	0x00000000,
+};
+
+uint32_t nve0_grhub_code[] = {
+	0x03090ef5,
+/* 0x0004: queue_put */
+	0x9800d898,
+	0x86f001d9,
+	0x0489b808,
+	0xf00c1bf4,
+	0x21f502f7,
+	0x00f802ec,
+/* 0x001c: queue_put_next */
+	0xb60798c4,
+	0x8dbb0384,
+	0x0880b600,
+	0x80008e80,
+	0x90b6018f,
+	0x0f94f001,
+	0xf801d980,
+/* 0x0039: queue_get */
+	0x0131f400,
+	0x9800d898,
+	0x89b801d9,
+	0x210bf404,
+	0xb60789c4,
+	0x9dbb0394,
+	0x0890b600,
+	0x98009e98,
+	0x80b6019f,
+	0x0f84f001,
+	0xf400d880,
+/* 0x0066: queue_get_done */
+	0x00f80132,
+/* 0x0068: nv_rd32 */
+	0x0728b7f1,
+	0xb906b4b6,
+	0xc9f002ec,
+	0x00bcd01f,
+/* 0x0078: nv_rd32_wait */
+	0xc800bccf,
+	0x1bf41fcc,
+	0x06a7f0fa,
+	0x010321f5,
+	0xf840bfcf,
+/* 0x008d: nv_wr32 */
+	0x28b7f100,
+	0x06b4b607,
+	0xb980bfd0,
+	0xc9f002ec,
+	0x1ec9f01f,
+/* 0x00a3: nv_wr32_wait */
+	0xcf00bcd0,
+	0xccc800bc,
+	0xfa1bf41f,
+/* 0x00ae: watchdog_reset */
+	0x87f100f8,
+	0x84b60430,
+	0x1ff9f006,
+	0xf8008fd0,
+/* 0x00bd: watchdog_clear */
+	0x3087f100,
+	0x0684b604,
+	0xf80080d0,
+/* 0x00c9: wait_donez */
+	0x3c87f100,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d000,
+	0x081887f1,
+	0xd00684b6,
+/* 0x00e2: wait_done_wait_donez */
+	0x87f1008a,
+	0x84b60400,
+	0x0088cf06,
+	0xf4888aff,
+	0x87f1f31b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00099,
+/* 0x0103: wait_doneo */
+	0xf100f800,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00099f0,
+	0x87f10089,
+	0x84b60818,
+	0x008ad006,
+/* 0x011c: wait_done_wait_doneo */
+	0x040087f1,
+	0xcf0684b6,
+	0x8aff0088,
+	0xf30bf488,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0099f094,
+	0xf80089d0,
+/* 0x013d: mmctx_size */
+/* 0x013f: nv_mmctx_size_loop */
+	0x9894bd00,
+	0x85b600e8,
+	0x0180b61a,
+	0xbb0284b6,
+	0xe0b60098,
+	0x04efb804,
+	0xb9eb1bf4,
+	0x00f8029f,
+/* 0x015c: mmctx_xfer */
+	0x083c87f1,
+	0xbd0684b6,
+	0x0199f094,
+	0xf10089d0,
+	0xb6071087,
+	0x94bd0684,
+	0xf405bbfd,
+	0x8bd0090b,
+	0x0099f000,
+/* 0x0180: mmctx_base_disabled */
+	0xf405eefd,
+	0x8ed00c0b,
+	0xc08fd080,
+/* 0x018f: mmctx_multi_disabled */
+	0xb70199f0,
+	0xc8010080,
+	0xb4b600ab,
+	0x0cb9f010,
+	0xb601aec8,
+	0xbefd11e4,
+	0x008bd005,
+/* 0x01a8: mmctx_exec_loop */
+/* 0x01a8: mmctx_wait_free */
+	0xf0008ecf,
+	0x0bf41fe4,
+	0x00ce98fa,
+	0xd005e9fd,
+	0xc0b6c08e,
+	0x04cdb804,
+	0xc8e81bf4,
+	0x1bf402ab,
+/* 0x01c9: mmctx_fini_wait */
+	0x008bcf18,
+	0xb01fb4f0,
+	0x1bf410b4,
+	0x02a7f0f7,
+	0xf4c921f4,
+/* 0x01de: mmctx_stop */
+	0xabc81b0e,
+	0x10b4b600,
+	0xf00cb9f0,
+	0x8bd012b9,
+/* 0x01ed: mmctx_stop_wait */
+	0x008bcf00,
+	0xf412bbc8,
+/* 0x01f6: mmctx_done */
+	0x87f1fa1b,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00199,
+/* 0x0207: strand_wait */
+	0xf900f800,
+	0x02a7f0a0,
+	0xfcc921f4,
+/* 0x0213: strand_pre */
+	0xf100f8a0,
+	0xf04afc87,
+	0x97f00283,
+	0x0089d00c,
+	0x020721f5,
+/* 0x0226: strand_post */
+	0x87f100f8,
+	0x83f04afc,
+	0x0d97f002,
+	0xf50089d0,
+	0xf8020721,
+/* 0x0239: strand_set */
+	0xfca7f100,
+	0x02a3f04f,
+	0x0500aba2,
+	0xd00fc7f0,
+	0xc7f000ac,
+	0x00bcd00b,
+	0x020721f5,
+	0xf000aed0,
+	0xbcd00ac7,
+	0x0721f500,
+/* 0x0263: strand_ctx_init */
+	0xf100f802,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x21f50089,
+	0xe7f00213,
+	0x3921f503,
+	0xfca7f102,
+	0x02a3f046,
+	0x0400aba0,
+	0xf040a0d0,
+	0xbcd001c7,
+	0x0721f500,
+	0x010c9202,
+	0xf000acd0,
+	0xbcd002c7,
+	0x0721f500,
+	0x2621f502,
+	0x8087f102,
+	0x0684b608,
+	0xb70089cf,
+	0x95220080,
+/* 0x02ba: ctx_init_strand_loop */
+	0x8ed008fe,
+	0x408ed000,
+	0xb6808acf,
+	0xa0b606a5,
+	0x00eabb01,
+	0xb60480b6,
+	0x1bf40192,
+	0x08e4b6e8,
+	0xf1f2efbc,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00399f0,
+	0x00f80089,
+/* 0x02ec: error */
+	0xe7f1e0f9,
+	0xe4b60814,
+	0x00efd006,
+	0x0c1ce7f1,
+	0xf006e4b6,
+	0xefd001f7,
+	0xf8e0fc00,
+/* 0x0309: init */
+	0xfe04bd00,
+	0x07fe0004,
+	0x0017f100,
+	0x0227f012,
+	0xf10012d0,
+	0xfe05b917,
+	0x17f10010,
+	0x10d00400,
+	0x0437f1c0,
+	0x0634b604,
+	0x200327f1,
+	0xf10032d0,
+	0xd0200427,
+	0x27f10132,
+	0x32d0200b,
+	0x0c27f102,
+	0x0732d020,
+	0x0c2427f1,
+	0xb90624b6,
+	0x23d00003,
+	0x0427f100,
+	0x0023f087,
+	0xb70012d0,
+	0xf0010012,
+	0x12d00427,
+	0x1031f400,
+	0x9604e7f1,
+	0xf440e3f0,
+	0xf1c76821,
+	0x01018090,
+	0x801ff4f0,
+	0x17f0000f,
+	0x041fbb01,
+	0xf10112b6,
+	0xb6040c27,
+	0x21d00624,
+	0x4021d000,
+	0x080027f1,
+	0xcf0624b6,
+	0xf7f00022,
+/* 0x03a9: init_find_chipset */
+	0x08f0b654,
+	0xb800f398,
+	0x0bf40432,
+	0x0034b00b,
+	0xf8f11bf4,
+/* 0x03bd: init_context */
+	0x0017f100,
+	0x02fe5801,
+	0xf003ff58,
+	0x0e8000e3,
+	0x150f8014,
+	0x013d21f5,
+	0x070037f1,
+	0x950634b6,
+	0x34d00814,
+	0x4034d000,
+	0x130030b7,
+	0xb6001fbb,
+	0x3fd002f5,
+	0x0815b600,
+	0xb60110b6,
+	0x1fb90814,
+	0x6321f502,
+	0x001fbb02,
+	0xf1000398,
+	0xf0200047,
+/* 0x040e: init_gpc */
+	0x4ea05043,
+	0x1fb90804,
+	0x8d21f402,
+	0x08004ea0,
+	0xf4022fb9,
+	0x4ea08d21,
+	0xf4bd010c,
+	0xa08d21f4,
+	0xf401044e,
+	0x4ea08d21,
+	0xf7f00100,
+	0x8d21f402,
+	0x08004ea0,
+/* 0x0440: init_gpc_wait */
+	0xc86821f4,
+	0x0bf41fff,
+	0x044ea0fa,
+	0x6821f408,
+	0xb7001fbb,
+	0xb6800040,
+	0x1bf40132,
+	0x0027f1b4,
+	0x0624b608,
+	0xb74021d0,
+	0xbd080020,
+	0x1f19f014,
+/* 0x0473: main */
+	0xf40021d0,
+	0x28f40031,
+	0x08d7f000,
+	0xf43921f4,
+	0xe4b1f401,
+	0x1bf54001,
+	0x87f100d1,
+	0x84b6083c,
+	0xf094bd06,
+	0x89d00499,
+	0x0017f100,
+	0x0614b60b,
+	0xcf4012cf,
+	0x13c80011,
+	0x7e0bf41f,
+	0xf41f23c8,
+	0x20f95a0b,
+	0xf10212b9,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00799f0,
+	0x32f40089,
+	0x0231f401,
+	0x07fb21f5,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0799f094,
+	0xfc0089d0,
+	0x3c87f120,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d006,
+	0xf50131f4,
+	0xf107fb21,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00699f0,
+	0x0ef40089,
+/* 0x0509: chsw_prev_no_next */
+	0xb920f931,
+	0x32f40212,
+	0x0232f401,
+	0x07fb21f5,
+	0x17f120fc,
+	0x14b60b00,
+	0x0012d006,
+/* 0x0527: chsw_no_prev */
+	0xc8130ef4,
+	0x0bf41f23,
+	0x0131f40d,
+	0xf50232f4,
+/* 0x0537: chsw_done */
+	0xf107fb21,
+	0xb60b0c17,
+	0x27f00614,
+	0x0012d001,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0499f094,
+	0xf50089d0,
+/* 0x0557: main_not_ctx_switch */
+	0xb0ff200e,
+	0x1bf401e4,
+	0x02f2b90d,
+	0x078f21f5,
+/* 0x0567: main_not_ctx_chan */
+	0xb0420ef4,
+	0x1bf402e4,
+	0x3c87f12e,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d007,
+	0xf40132f4,
+	0x21f50232,
+	0x87f107fb,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00799,
+	0x110ef400,
+/* 0x0598: main_not_ctx_save */
+	0xf010ef94,
+	0x21f501f5,
+	0x0ef502ec,
+/* 0x05a6: main_done */
+	0x17f1fed1,
+	0x14b60820,
+	0xf024bd06,
+	0x12d01f29,
+	0xbe0ef500,
+/* 0x05b9: ih */
+	0xfe80f9fe,
+	0x80f90188,
+	0xa0f990f9,
+	0xd0f9b0f9,
+	0xf0f9e0f9,
+	0xc4800acf,
+	0x0bf404ab,
+	0x00b7f11d,
+	0x08d7f019,
+	0xcf40becf,
+	0x21f400bf,
+	0x00b0b704,
+	0x01e7f004,
+/* 0x05ef: ih_no_fifo */
+	0xe400bed0,
+	0xf40100ab,
+	0xd7f00d0b,
+	0x01e7f108,
+	0x0421f440,
+/* 0x0600: ih_no_ctxsw */
+	0x0104b7f1,
+	0xabffb0bd,
+	0x0d0bf4b4,
+	0x0c1ca7f1,
+	0xd006a4b6,
+/* 0x0616: ih_no_other */
+	0x0ad000ab,
+	0xfcf0fc40,
+	0xfcd0fce0,
+	0xfca0fcb0,
+	0xfe80fc90,
+	0x80fc0088,
+	0xf80032f4,
+/* 0x0631: ctx_4170s */
+	0x70e7f101,
+	0x40e3f041,
+	0xf410f5f0,
+	0x00f88d21,
+/* 0x0640: ctx_4170w */
+	0x4170e7f1,
+	0xf440e3f0,
+	0xf4f06821,
+	0xf31bf410,
+/* 0x0652: ctx_redswitch */
+	0xe7f100f8,
+	0xe4b60614,
+	0x70f7f106,
+	0x00efd002,
+/* 0x0663: ctx_redswitch_delay */
+	0xb608f7f0,
+	0x1bf401f2,
+	0x70f7f1fd,
+	0x00efd007,
+/* 0x0672: ctx_86c */
+	0xe7f100f8,
+	0xe4b6086c,
+	0x00efd006,
+	0x8a14e7f1,
+	0xf440e3f0,
+	0xe7f18d21,
+	0xe3f0a86c,
+	0x8d21f441,
+/* 0x0692: ctx_load */
+	0x87f100f8,
+	0x84b6083c,
+	0xf094bd06,
+	0x89d00599,
+	0x0ca7f000,
+	0xf1c921f4,
+	0xb60a2417,
+	0x10d00614,
+	0x0037f100,
+	0x0634b60b,
+	0xf14032d0,
+	0xb60a0c17,
+	0x47f00614,
+	0x0012d007,
+/* 0x06cb: ctx_chan_wait_0 */
+	0xcf4014d0,
+	0x44f04014,
+	0xfa1bf41f,
+	0xfe0032d0,
+	0x2af0000b,
+	0x0424b61f,
+	0xf10220b6,
+	0xb6083c87,
+	0x94bd0684,
+	0xd00899f0,
+	0x17f10089,
+	0x14b60a04,
+	0x0012d006,
+	0x0a2017f1,
+	0xf00614b6,
+	0x23f10227,
+	0x12d08000,
+	0x1017f000,
+	0x030027f1,
+	0xfa0223f0,
+	0x03f80512,
+	0x085c87f1,
+	0xbd0684b6,
+	0x0899f094,
+	0x980089d0,
+	0x14b6c101,
+	0xc0029818,
+	0xfd0825b6,
+	0x01800512,
+	0x3c87f116,
+	0x0684b608,
+	0x99f094bd,
+	0x0089d009,
+	0x0a0427f1,
+	0xd00624b6,
+	0x27f00021,
+	0x2017f101,
+	0x0614b60a,
+	0xf10012d0,
+	0xf0020017,
+	0x01fa0613,
+	0xf103f805,
+	0xb6085c87,
+	0x94bd0684,
+	0xd00999f0,
+	0x87f10089,
+	0x84b6085c,
+	0xf094bd06,
+	0x89d00599,
+/* 0x078f: ctx_chan */
+	0xf500f800,
+	0xf0069221,
+	0x21f40ca7,
+	0x1017f1c9,
+	0x0614b60a,
+	0xd00527f0,
+/* 0x07a6: ctx_chan_wait */
+	0x12cf0012,
+	0x0522fd00,
+	0xf8fa1bf4,
+/* 0x07b1: ctx_mmio_exec */
+	0x81039800,
+	0x0a0427f1,
+	0xd00624b6,
+	0x34bd0023,
+/* 0x07c0: ctx_mmio_loop */
+	0xf4ff34c4,
+	0x57f10f1b,
+	0x53f00300,
+	0x0535fa06,
+/* 0x07d2: ctx_mmio_pull */
+	0x4e9803f8,
+	0xc14f98c0,
+	0xb68d21f4,
+	0x12b60830,
+	0xdf1bf401,
+/* 0x07e4: ctx_mmio_done */
+	0xd0160398,
+	0x00800023,
+	0x0017f180,
+	0x0613f002,
+	0xf80601fa,
+/* 0x07fb: ctx_xfer */
+	0xf100f803,
+	0xb60c00f7,
+	0xe7f006f4,
+	0x80fed004,
+/* 0x0808: ctx_xfer_idle */
+	0xf100fecf,
+	0xf42000e4,
+	0x11f4f91b,
+	0x0d02f406,
+/* 0x0818: ctx_xfer_pre */
+	0xf510f7f0,
+	0xf4067221,
+/* 0x0822: ctx_xfer_pre_load */
+	0xf7f01c11,
+	0x3121f502,
+	0x4021f506,
+	0x5221f506,
+	0xf5f4bd06,
+	0xf5063121,
+/* 0x083b: ctx_xfer_exec */
+	0x98069221,
+	0x27f11601,
+	0x24b60414,
+	0x0020d006,
+	0xa500e7f1,
+	0xb941e3f0,
+	0x21f4021f,
+	0x04e0b68d,
+	0xf001fcf0,
+	0x24b6022c,
+	0x05f2fd01,
+	0xf18d21f4,
+	0xf04afc17,
+	0x27f00213,
+	0x0012d00c,
+	0x020721f5,
+	0x47fc27f1,
+	0xd00223f0,
+	0x2cf00020,
+	0x0320b601,
+	0xf00012d0,
+	0xa5f001ac,
+	0x00b7f006,
+	0x98140c98,
+	0xe7f0150d,
+	0x5c21f500,
+	0x08a7f001,
+	0x010321f5,
+	0x020721f5,
+	0xf02201f4,
+	0x21f40ca7,
+	0x1017f1c9,
+	0x0614b60a,
+	0xd00527f0,
+/* 0x08c2: ctx_xfer_post_save_wait */
+	0x12cf0012,
+	0x0522fd00,
+	0xf4fa1bf4,
+/* 0x08ce: ctx_xfer_post */
+	0xf7f02e02,
+	0x3121f502,
+	0xf5f4bd06,
+	0xf5067221,
+	0xf5022621,
+	0xbd064021,
+	0x3121f5f4,
+	0x1011f406,
+	0xfd800198,
+	0x0bf40511,
+	0xb121f507,
+/* 0x08f9: ctx_xfer_no_post_mmio */
+/* 0x08f9: ctx_xfer_done */
+	0x0000f807,
+	0x00000000,
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
new file mode 100644
index 0000000..e6b2288
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nvc0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+	mov $r8 0x83c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+	mov $r8 0x85c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+//	$r14 command
+//	$r15 data
+//
+queue_put:
+	// make sure we have space..
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	xor $r8 8
+	cmpu b32 $r8 $r9
+	bra ne #queue_put_next
+		mov $r15 E_CMD_OVERFLOW
+		call #error
+		ret
+
+	// store cmd/data on queue
+	queue_put_next:
+	and $r8 $r9 7
+	shl b32 $r8 3
+	add b32 $r8 $r13
+	add b32 $r8 8
+	st b32 D[$r8 + 0x0] $r14
+	st b32 D[$r8 + 0x4] $r15
+
+	// update PUT
+	add b32 $r9 1
+	and $r9 0xf
+	st b32 D[$r13 + 0x4] $r9
+	ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out:	$p1  clear on success (data available)
+//	$r14 command
+// 	$r15 data
+//
+queue_get:
+	bset $flags $p1
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	cmpu b32 $r8 $r9
+	bra e #queue_get_done
+		// fetch first cmd/data pair
+		and $r9 $r8 7
+		shl b32 $r9 3
+		add b32 $r9 $r13
+		add b32 $r9 8
+		ld b32 $r14 D[$r9 + 0x0]
+		ld b32 $r15 D[$r9 + 0x4]
+
+		// update GET
+		add b32 $r8 1
+		and $r8 0xf
+		st b32 D[$r13 + 0x0] $r8
+		bclr $flags $p1
+queue_get_done:
+	ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_rd32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_rd32_wait
+	mov $r10 6			// DONE_MMIO_RD
+	call #wait_doneo
+	iord $r15 I[$r11 + 0x100]	// MMIO_RDVAL
+	ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+//      $r15 value
+//
+nv_wr32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	iowr I[$r11 + 0x200] $r15	// MMIO_WRVAL
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	bset $r12 30			// MMIO_CTRL_WRITE
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_wr32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_wr32_wait
+	ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+	mov $r8 0x430
+	shl b32 $r8 6
+	bset $r15 31
+	iowr I[$r8 + 0x000] $r15
+	ret
+
+// clear watchdog timer
+watchdog_clear:
+	mov $r8 0x430
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r0
+	ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+	trace_set(T_WAIT);
+	mov $r8 0x818
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r10	// CC_SCRATCH[6] = wait bit
+	wait_done_$1:
+		mov $r8 0x400
+		shl b32 $r8 6
+		iord $r8 I[$r8 + 0x000]	// DONE
+		xbit $r8 $r8 $r10
+		bra $2 #wait_done_$1
+	trace_clr(T_WAIT)
+	ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+//      $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+	clear b32 $r9
+	nv_mmctx_size_loop:
+		ld b32 $r8 D[$r14]
+		shr b32 $r8 26
+		add b32 $r8 1
+		shl b32 $r8 2
+		add b32 $r9 $r8
+		add b32 $r14 4
+		cmpu b32 $r14 $r15
+		bra ne #nv_mmctx_size_loop
+	mov b32 $r15 $r9
+	ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+//		bit 0: direction (0 = save, 1 = load)
+//		bit 1: set if first transfer
+//		bit 2: set if last transfer
+//	$r11 base
+//	$r12 mmio list head
+//	$r13 mmio list tail
+//	$r14 multi_stride
+//	$r15 multi_mask
+//
+mmctx_xfer:
+	trace_set(T_MMCTX)
+	mov $r8 0x710
+	shl b32 $r8 6
+	clear b32 $r9
+	or $r11 $r11
+	bra e #mmctx_base_disabled
+		iowr I[$r8 + 0x000] $r11	// MMCTX_BASE
+		bset $r9 0			// BASE_EN
+	mmctx_base_disabled:
+	or $r14 $r14
+	bra e #mmctx_multi_disabled
+		iowr I[$r8 + 0x200] $r14 	// MMCTX_MULTI_STRIDE
+		iowr I[$r8 + 0x300] $r15 	// MMCTX_MULTI_MASK
+		bset $r9 1			// MULTI_EN
+	mmctx_multi_disabled:
+	add b32 $r8 0x100
+
+	xbit $r11 $r10 0
+	shl b32 $r11 16			// DIR
+	bset $r11 12			// QLIMIT = 0x10
+	xbit $r14 $r10 1
+	shl b32 $r14 17
+	or $r11 $r14			// START_TRIGGER
+	iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+
+	// loop over the mmio list, and send requests to the hw
+	mmctx_exec_loop:
+		// wait for space in mmctx queue
+		mmctx_wait_free:
+			iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+			and $r14 0x1f
+			bra e #mmctx_wait_free
+
+		// queue up an entry
+		ld b32 $r14 D[$r12]
+		or $r14 $r9
+		iowr I[$r8 + 0x300] $r14
+		add b32 $r12 4
+		cmpu b32 $r12 $r13
+		bra ne #mmctx_exec_loop
+
+	xbit $r11 $r10 2
+	bra ne #mmctx_stop
+		// wait for queue to empty
+		mmctx_fini_wait:
+			iord $r11 I[$r8 + 0x000]	// MMCTX_CTRL
+			and $r11 0x1f
+			cmpu b32 $r11 0x10
+			bra ne #mmctx_fini_wait
+		mov $r10 2				// DONE_MMCTX
+		call #wait_donez
+		bra #mmctx_done
+	mmctx_stop:
+		xbit $r11 $r10 0
+		shl b32 $r11 16			// DIR
+		bset $r11 12			// QLIMIT = 0x10
+		bset $r11 18			// STOP_TRIGGER
+		iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+		mmctx_stop_wait:
+			// wait for STOP_TRIGGER to clear
+			iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+			xbit $r11 $r11 18
+			bra ne #mmctx_stop_wait
+	mmctx_done:
+	trace_clr(T_MMCTX)
+	ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+	push $r10
+	mov $r10 2
+	call #wait_donez
+	pop $r10
+	ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xc
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xd
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+	mov $r10 0x4ffc
+	sethi $r10 0x20000
+	sub b32 $r11 $r10 0x500
+	mov $r12 0xf
+	iowr I[$r10 + 0x000] $r12		// 0x93c = 0xf
+	mov $r12 0xb
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xb
+	call #strand_wait
+	iowr I[$r10 + 0x000] $r14		// 0x93c = <id>
+	mov $r12 0xa
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xa
+	call #strand_wait
+	ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+	trace_set(T_STRINIT)
+	call #strand_pre
+	mov $r14 3
+	call #strand_set
+	mov $r10 0x46fc
+	sethi $r10 0x20000
+	add b32 $r11 $r10 0x400
+	iowr I[$r10 + 0x100] $r0	// STRAND_FIRST_GENE = 0
+	mov $r12 1
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_FIRST_GENE
+	call #strand_wait
+	sub b32 $r12 $r0 1
+	iowr I[$r10 + 0x000] $r12	// STRAND_GENE_CNT = 0xffffffff
+	mov $r12 2
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_GENE_CNT
+	call #strand_wait
+	call #strand_post
+
+	// read the size of each strand, poke the context offset of
+	// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+	// about it later then.
+	mov $r8 0x880
+	shl b32 $r8 6
+	iord $r9 I[$r8 + 0x000]		// STRANDS
+	add b32 $r8 0x2200
+	shr b32 $r14 $r15 8
+	ctx_init_strand_loop:
+		iowr I[$r8 + 0x000] $r14	// STRAND_SAVE_SWBASE
+		iowr I[$r8 + 0x100] $r14	// STRAND_LOAD_SWBASE
+		iord $r10 I[$r8 + 0x200]	// STRAND_SIZE
+		shr b32 $r10 6
+		add b32 $r10 1
+		add b32 $r14 $r10
+		add b32 $r8 4
+		sub b32 $r9 1
+		bra ne #ctx_init_strand_loop
+
+	shl b32 $r14 8
+	sub b32 $r15 $r14 $r15
+	trace_clr(T_STRINIT)
+	ret
+')
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
new file mode 100644
index 0000000..f16a5d5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nve0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+	mov $r8 0x83c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+	mov $r8 0x85c
+	shl b32 $r8 6
+	clear b32 $r9
+	bset $r9 $1
+	iowr I[$r8 + 0x000] $r9		// CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+//	$r14 command
+//	$r15 data
+//
+queue_put:
+	// make sure we have space..
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	xor $r8 8
+	cmpu b32 $r8 $r9
+	bra ne #queue_put_next
+		mov $r15 E_CMD_OVERFLOW
+		call #error
+		ret
+
+	// store cmd/data on queue
+	queue_put_next:
+	and $r8 $r9 7
+	shl b32 $r8 3
+	add b32 $r8 $r13
+	add b32 $r8 8
+	st b32 D[$r8 + 0x0] $r14
+	st b32 D[$r8 + 0x4] $r15
+
+	// update PUT
+	add b32 $r9 1
+	and $r9 0xf
+	st b32 D[$r13 + 0x4] $r9
+	ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out:	$p1  clear on success (data available)
+//	$r14 command
+// 	$r15 data
+//
+queue_get:
+	bset $flags $p1
+	ld b32 $r8 D[$r13 + 0x0]	// GET
+	ld b32 $r9 D[$r13 + 0x4]	// PUT
+	cmpu b32 $r8 $r9
+	bra e #queue_get_done
+		// fetch first cmd/data pair
+		and $r9 $r8 7
+		shl b32 $r9 3
+		add b32 $r9 $r13
+		add b32 $r9 8
+		ld b32 $r14 D[$r9 + 0x0]
+		ld b32 $r15 D[$r9 + 0x4]
+
+		// update GET
+		add b32 $r8 1
+		and $r8 0xf
+		st b32 D[$r13 + 0x0] $r8
+		bclr $flags $p1
+queue_get_done:
+	ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_rd32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_rd32_wait
+	mov $r10 6			// DONE_MMIO_RD
+	call #wait_doneo
+	iord $r15 I[$r11 + 0x100]	// MMIO_RDVAL
+	ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+//      $r15 value
+//
+nv_wr32:
+	mov $r11 0x728
+	shl b32 $r11 6
+	iowr I[$r11 + 0x200] $r15	// MMIO_WRVAL
+	mov b32 $r12 $r14
+	bset $r12 31			// MMIO_CTRL_PENDING
+	bset $r12 30			// MMIO_CTRL_WRITE
+	iowr I[$r11 + 0x000] $r12	// MMIO_CTRL
+	nv_wr32_wait:
+		iord $r12 I[$r11 + 0x000]
+		xbit $r12 $r12 31
+		bra ne #nv_wr32_wait
+	ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+	mov $r8 0x430
+	shl b32 $r8 6
+	bset $r15 31
+	iowr I[$r8 + 0x000] $r15
+	ret
+
+// clear watchdog timer
+watchdog_clear:
+	mov $r8 0x430
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r0
+	ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+	trace_set(T_WAIT);
+	mov $r8 0x818
+	shl b32 $r8 6
+	iowr I[$r8 + 0x000] $r10	// CC_SCRATCH[6] = wait bit
+	wait_done_$1:
+		mov $r8 0x400
+		shl b32 $r8 6
+		iord $r8 I[$r8 + 0x000]	// DONE
+		xbit $r8 $r8 $r10
+		bra $2 #wait_done_$1
+	trace_clr(T_WAIT)
+	ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+//      $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+	clear b32 $r9
+	nv_mmctx_size_loop:
+		ld b32 $r8 D[$r14]
+		shr b32 $r8 26
+		add b32 $r8 1
+		shl b32 $r8 2
+		add b32 $r9 $r8
+		add b32 $r14 4
+		cmpu b32 $r14 $r15
+		bra ne #nv_mmctx_size_loop
+	mov b32 $r15 $r9
+	ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+//		bit 0: direction (0 = save, 1 = load)
+//		bit 1: set if first transfer
+//		bit 2: set if last transfer
+//	$r11 base
+//	$r12 mmio list head
+//	$r13 mmio list tail
+//	$r14 multi_stride
+//	$r15 multi_mask
+//
+mmctx_xfer:
+	trace_set(T_MMCTX)
+	mov $r8 0x710
+	shl b32 $r8 6
+	clear b32 $r9
+	or $r11 $r11
+	bra e #mmctx_base_disabled
+		iowr I[$r8 + 0x000] $r11	// MMCTX_BASE
+		bset $r9 0			// BASE_EN
+	mmctx_base_disabled:
+	or $r14 $r14
+	bra e #mmctx_multi_disabled
+		iowr I[$r8 + 0x200] $r14 	// MMCTX_MULTI_STRIDE
+		iowr I[$r8 + 0x300] $r15 	// MMCTX_MULTI_MASK
+		bset $r9 1			// MULTI_EN
+	mmctx_multi_disabled:
+	add b32 $r8 0x100
+
+	xbit $r11 $r10 0
+	shl b32 $r11 16			// DIR
+	bset $r11 12			// QLIMIT = 0x10
+	xbit $r14 $r10 1
+	shl b32 $r14 17
+	or $r11 $r14			// START_TRIGGER
+	iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+
+	// loop over the mmio list, and send requests to the hw
+	mmctx_exec_loop:
+		// wait for space in mmctx queue
+		mmctx_wait_free:
+			iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+			and $r14 0x1f
+			bra e #mmctx_wait_free
+
+		// queue up an entry
+		ld b32 $r14 D[$r12]
+		or $r14 $r9
+		iowr I[$r8 + 0x300] $r14
+		add b32 $r12 4
+		cmpu b32 $r12 $r13
+		bra ne #mmctx_exec_loop
+
+	xbit $r11 $r10 2
+	bra ne #mmctx_stop
+		// wait for queue to empty
+		mmctx_fini_wait:
+			iord $r11 I[$r8 + 0x000]	// MMCTX_CTRL
+			and $r11 0x1f
+			cmpu b32 $r11 0x10
+			bra ne #mmctx_fini_wait
+		mov $r10 2				// DONE_MMCTX
+		call #wait_donez
+		bra #mmctx_done
+	mmctx_stop:
+		xbit $r11 $r10 0
+		shl b32 $r11 16			// DIR
+		bset $r11 12			// QLIMIT = 0x10
+		bset $r11 18			// STOP_TRIGGER
+		iowr I[$r8 + 0x000] $r11	// MMCTX_CTRL
+		mmctx_stop_wait:
+			// wait for STOP_TRIGGER to clear
+			iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+			xbit $r11 $r11 18
+			bra ne #mmctx_stop_wait
+	mmctx_done:
+	trace_clr(T_MMCTX)
+	ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+	push $r10
+	mov $r10 2
+	call #wait_donez
+	pop $r10
+	ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xc
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+	mov $r8 0x4afc
+	sethi $r8 0x20000
+	mov $r9 0xd
+	iowr I[$r8] $r9
+	call #strand_wait
+	ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+	mov $r10 0x4ffc
+	sethi $r10 0x20000
+	sub b32 $r11 $r10 0x500
+	mov $r12 0xf
+	iowr I[$r10 + 0x000] $r12		// 0x93c = 0xf
+	mov $r12 0xb
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xb
+	call #strand_wait
+	iowr I[$r10 + 0x000] $r14		// 0x93c = <id>
+	mov $r12 0xa
+	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xa
+	call #strand_wait
+	ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+	trace_set(T_STRINIT)
+	call #strand_pre
+	mov $r14 3
+	call #strand_set
+	mov $r10 0x46fc
+	sethi $r10 0x20000
+	add b32 $r11 $r10 0x400
+	iowr I[$r10 + 0x100] $r0	// STRAND_FIRST_GENE = 0
+	mov $r12 1
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_FIRST_GENE
+	call #strand_wait
+	sub b32 $r12 $r0 1
+	iowr I[$r10 + 0x000] $r12	// STRAND_GENE_CNT = 0xffffffff
+	mov $r12 2
+	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_GENE_CNT
+	call #strand_wait
+	call #strand_post
+
+	// read the size of each strand, poke the context offset of
+	// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+	// about it later then.
+	mov $r8 0x880
+	shl b32 $r8 6
+	iord $r9 I[$r8 + 0x000]		// STRANDS
+	add b32 $r8 0x2200
+	shr b32 $r14 $r15 8
+	ctx_init_strand_loop:
+		iowr I[$r8 + 0x000] $r14	// STRAND_SAVE_SWBASE
+		iowr I[$r8 + 0x100] $r14	// STRAND_LOAD_SWBASE
+		iord $r10 I[$r8 + 0x200]	// STRAND_SIZE
+		shr b32 $r10 6
+		add b32 $r10 1
+		add b32 $r14 $r10
+		add b32 $r8 4
+		sub b32 $r9 1
+		bra ne #ctx_init_strand_loop
+
+	shl b32 $r14 8
+	sub b32 $r15 $r14 $r15
+	trace_clr(T_STRINIT)
+	ret
+')
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
new file mode 100644
index 0000000..ad13dcd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -0,0 +1,1389 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "regs.h"
+
+static u32
+nv04_graph_ctx_regs[] = {
+	0x0040053c,
+	0x00400544,
+	0x00400540,
+	0x00400548,
+	NV04_PGRAPH_CTX_SWITCH1,
+	NV04_PGRAPH_CTX_SWITCH2,
+	NV04_PGRAPH_CTX_SWITCH3,
+	NV04_PGRAPH_CTX_SWITCH4,
+	NV04_PGRAPH_CTX_CACHE1,
+	NV04_PGRAPH_CTX_CACHE2,
+	NV04_PGRAPH_CTX_CACHE3,
+	NV04_PGRAPH_CTX_CACHE4,
+	0x00400184,
+	0x004001a4,
+	0x004001c4,
+	0x004001e4,
+	0x00400188,
+	0x004001a8,
+	0x004001c8,
+	0x004001e8,
+	0x0040018c,
+	0x004001ac,
+	0x004001cc,
+	0x004001ec,
+	0x00400190,
+	0x004001b0,
+	0x004001d0,
+	0x004001f0,
+	0x00400194,
+	0x004001b4,
+	0x004001d4,
+	0x004001f4,
+	0x00400198,
+	0x004001b8,
+	0x004001d8,
+	0x004001f8,
+	0x0040019c,
+	0x004001bc,
+	0x004001dc,
+	0x004001fc,
+	0x00400174,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV04_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV04_PGRAPH_SURFACE,
+	NV04_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV04_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM+0x00,
+	NV04_PGRAPH_PATT_COLORRAM+0x04,
+	NV04_PGRAPH_PATT_COLORRAM+0x08,
+	NV04_PGRAPH_PATT_COLORRAM+0x0c,
+	NV04_PGRAPH_PATT_COLORRAM+0x10,
+	NV04_PGRAPH_PATT_COLORRAM+0x14,
+	NV04_PGRAPH_PATT_COLORRAM+0x18,
+	NV04_PGRAPH_PATT_COLORRAM+0x1c,
+	NV04_PGRAPH_PATT_COLORRAM+0x20,
+	NV04_PGRAPH_PATT_COLORRAM+0x24,
+	NV04_PGRAPH_PATT_COLORRAM+0x28,
+	NV04_PGRAPH_PATT_COLORRAM+0x2c,
+	NV04_PGRAPH_PATT_COLORRAM+0x30,
+	NV04_PGRAPH_PATT_COLORRAM+0x34,
+	NV04_PGRAPH_PATT_COLORRAM+0x38,
+	NV04_PGRAPH_PATT_COLORRAM+0x3c,
+	NV04_PGRAPH_PATT_COLORRAM+0x40,
+	NV04_PGRAPH_PATT_COLORRAM+0x44,
+	NV04_PGRAPH_PATT_COLORRAM+0x48,
+	NV04_PGRAPH_PATT_COLORRAM+0x4c,
+	NV04_PGRAPH_PATT_COLORRAM+0x50,
+	NV04_PGRAPH_PATT_COLORRAM+0x54,
+	NV04_PGRAPH_PATT_COLORRAM+0x58,
+	NV04_PGRAPH_PATT_COLORRAM+0x5c,
+	NV04_PGRAPH_PATT_COLORRAM+0x60,
+	NV04_PGRAPH_PATT_COLORRAM+0x64,
+	NV04_PGRAPH_PATT_COLORRAM+0x68,
+	NV04_PGRAPH_PATT_COLORRAM+0x6c,
+	NV04_PGRAPH_PATT_COLORRAM+0x70,
+	NV04_PGRAPH_PATT_COLORRAM+0x74,
+	NV04_PGRAPH_PATT_COLORRAM+0x78,
+	NV04_PGRAPH_PATT_COLORRAM+0x7c,
+	NV04_PGRAPH_PATT_COLORRAM+0x80,
+	NV04_PGRAPH_PATT_COLORRAM+0x84,
+	NV04_PGRAPH_PATT_COLORRAM+0x88,
+	NV04_PGRAPH_PATT_COLORRAM+0x8c,
+	NV04_PGRAPH_PATT_COLORRAM+0x90,
+	NV04_PGRAPH_PATT_COLORRAM+0x94,
+	NV04_PGRAPH_PATT_COLORRAM+0x98,
+	NV04_PGRAPH_PATT_COLORRAM+0x9c,
+	NV04_PGRAPH_PATT_COLORRAM+0xa0,
+	NV04_PGRAPH_PATT_COLORRAM+0xa4,
+	NV04_PGRAPH_PATT_COLORRAM+0xa8,
+	NV04_PGRAPH_PATT_COLORRAM+0xac,
+	NV04_PGRAPH_PATT_COLORRAM+0xb0,
+	NV04_PGRAPH_PATT_COLORRAM+0xb4,
+	NV04_PGRAPH_PATT_COLORRAM+0xb8,
+	NV04_PGRAPH_PATT_COLORRAM+0xbc,
+	NV04_PGRAPH_PATT_COLORRAM+0xc0,
+	NV04_PGRAPH_PATT_COLORRAM+0xc4,
+	NV04_PGRAPH_PATT_COLORRAM+0xc8,
+	NV04_PGRAPH_PATT_COLORRAM+0xcc,
+	NV04_PGRAPH_PATT_COLORRAM+0xd0,
+	NV04_PGRAPH_PATT_COLORRAM+0xd4,
+	NV04_PGRAPH_PATT_COLORRAM+0xd8,
+	NV04_PGRAPH_PATT_COLORRAM+0xdc,
+	NV04_PGRAPH_PATT_COLORRAM+0xe0,
+	NV04_PGRAPH_PATT_COLORRAM+0xe4,
+	NV04_PGRAPH_PATT_COLORRAM+0xe8,
+	NV04_PGRAPH_PATT_COLORRAM+0xec,
+	NV04_PGRAPH_PATT_COLORRAM+0xf0,
+	NV04_PGRAPH_PATT_COLORRAM+0xf4,
+	NV04_PGRAPH_PATT_COLORRAM+0xf8,
+	NV04_PGRAPH_PATT_COLORRAM+0xfc,
+	NV04_PGRAPH_PATTERN,
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	0x00400600,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	NV04_PGRAPH_CONTROL0,
+	NV04_PGRAPH_CONTROL1,
+	NV04_PGRAPH_CONTROL2,
+	NV04_PGRAPH_BLEND,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	0x00400560,
+	0x00400568,
+	0x00400564,
+	0x0040056c,
+	0x00400400,
+	0x00400480,
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	0x00400534,
+	0x00400538,
+	0x00400514,
+	0x00400518,
+	0x0040051c,
+	0x00400520,
+	0x00400524,
+	0x00400528,
+	0x0040052c,
+	0x00400530,
+	0x00400d00,
+	0x00400d40,
+	0x00400d80,
+	0x00400d04,
+	0x00400d44,
+	0x00400d84,
+	0x00400d08,
+	0x00400d48,
+	0x00400d88,
+	0x00400d0c,
+	0x00400d4c,
+	0x00400d8c,
+	0x00400d10,
+	0x00400d50,
+	0x00400d90,
+	0x00400d14,
+	0x00400d54,
+	0x00400d94,
+	0x00400d18,
+	0x00400d58,
+	0x00400d98,
+	0x00400d1c,
+	0x00400d5c,
+	0x00400d9c,
+	0x00400d20,
+	0x00400d60,
+	0x00400da0,
+	0x00400d24,
+	0x00400d64,
+	0x00400da4,
+	0x00400d28,
+	0x00400d68,
+	0x00400da8,
+	0x00400d2c,
+	0x00400d6c,
+	0x00400dac,
+	0x00400d30,
+	0x00400d70,
+	0x00400db0,
+	0x00400d34,
+	0x00400d74,
+	0x00400db4,
+	0x00400d38,
+	0x00400d78,
+	0x00400db8,
+	0x00400d3c,
+	0x00400d7c,
+	0x00400dbc,
+	0x00400590,
+	0x00400594,
+	0x00400598,
+	0x0040059c,
+	0x004005a8,
+	0x004005ac,
+	0x004005b0,
+	0x004005b4,
+	0x004005c0,
+	0x004005c4,
+	0x004005c8,
+	0x004005cc,
+	0x004005d0,
+	0x004005d4,
+	0x004005d8,
+	0x004005dc,
+	0x004005e0,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV04_PGRAPH_DVD_COLORFMT,
+	NV04_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	0x00400500,
+	0x00400504,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2,
+	NV04_PGRAPH_DEBUG_3
+};
+
+struct nv04_graph_priv {
+	struct nouveau_graph base;
+	struct nv04_graph_chan *chan[16];
+	spinlock_t lock;
+};
+
+struct nv04_graph_chan {
+	struct nouveau_object base;
+	int chid;
+	u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+
+static inline struct nv04_graph_priv *
+nv04_graph_priv(struct nv04_graph_chan *chan)
+{
+	return (void *)nv_object(chan)->engine;
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ *  - bits 0-7: class
+ *  - bit 12: color key active
+ *  - bit 13: clip rect active
+ *  - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ *            [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ *            from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ *            NV03_CONTEXT_SURFACE_DST].
+ *  - bits 15-17: 2d operation [aka patch config]
+ *  - bits 20-22: dither mode
+ *  - bit 24: patch valid [enables rendering using this object]
+ *  - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ *  - bit 26: surface_src/surface_zeta valid
+ *  - bit 27: pattern valid
+ *  - bit 28: rop valid
+ *  - bit 29: beta1 valid
+ *  - bit 30: beta4 valid
+ * word 1:
+ *  - bits 0-1: mono format
+ *  - bits 8-13: color format
+ *  - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ *  - bits 0-15: DMA_A instance
+ *  - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+	u32 tmp;
+
+	tmp  = nv_ro32(object, 0x00);
+	tmp &= ~mask;
+	tmp |= value;
+	nv_wo32(object, 0x00, tmp);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
+	nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
+{
+	int class, op, valid = 1;
+	u32 tmp, ctx1;
+
+	ctx1 = nv_ro32(object, 0x00);
+	class = ctx1 & 0xff;
+	op = (ctx1 >> 15) & 7;
+
+	tmp = nv_ro32(object, 0x0c);
+	tmp &= ~mask;
+	tmp |= value;
+	nv_wo32(object, 0x0c, tmp);
+
+	/* check for valid surf2d/surf_dst/surf_color */
+	if (!(tmp & 0x02000000))
+		valid = 0;
+	/* check for valid surf_src/surf_zeta */
+	if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+		valid = 0;
+
+	switch (op) {
+	/* SRCCOPY_AND, SRCCOPY: no extra objects required */
+	case 0:
+	case 3:
+		break;
+	/* ROP_AND: requires pattern and rop */
+	case 1:
+		if (!(tmp & 0x18000000))
+			valid = 0;
+		break;
+	/* BLEND_AND: requires beta1 */
+	case 2:
+		if (!(tmp & 0x20000000))
+			valid = 0;
+		break;
+	/* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+	case 4:
+	case 5:
+		if (!(tmp & 0x40000000))
+			valid = 0;
+		break;
+	}
+
+	nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	u32 class = nv_ro32(object, 0) & 0xff;
+	u32 data = *(u32 *)args;
+	if (data > 5)
+		return 1;
+	/* Old versions of the objects only accept first three operations. */
+	if (data > 2 && class < 0x40)
+		return 1;
+	nv04_graph_set_ctx1(object, 0x00038000, data << 15);
+	/* changing operation changes set of objects needed for validation */
+	nv04_graph_set_ctx_val(object, 0, 0);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	u32 data = *(u32 *)args;
+	u32 min = data & 0xffff, max;
+	u32 w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(priv, 0x40053c, min);
+	nv_wr32(priv, 0x400544, max);
+	return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	u32 data = *(u32 *)args;
+	u32 min = data & 0xffff, max;
+	u32 w = data >> 16;
+	if (min & 0x8000)
+		/* too large */
+		return 1;
+	if (w & 0x8000)
+		/* yes, it accepts negative for some reason. */
+		w |= 0xffff0000;
+	max = min + w;
+	max &= 0x3ffff;
+	nv_wr32(priv, 0x400540, min);
+	nv_wr32(priv, 0x400548, max);
+	return 0;
+}
+
+static u16
+nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	u32 inst = *(u32 *)args << 4;
+	return nv_ro32(imem, inst);
+}
+
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
+			    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
+				    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x42:
+		nv04_graph_set_ctx1(object, 0x00004000, 0);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	case 0x52:
+		nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0);
+		return 0;
+	case 0x18:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0);
+		return 0;
+	case 0x44:
+		nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
+			 void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x10000000, 0);
+		return 0;
+	case 0x43:
+		nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x20000000, 0);
+		return 0;
+	case 0x12:
+		nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x40000000, 0);
+		return 0;
+	case 0x72:
+		nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x58:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0);
+		return 0;
+	case 0x59:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0);
+		return 0;
+	case 0x5a:
+		nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
+			       void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0);
+		return 0;
+	case 0x5b:
+		nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
+			  void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x2000, 0);
+		return 0;
+	case 0x19:
+		nv04_graph_set_ctx1(object, 0x2000, 0x2000);
+		return 0;
+	}
+	return 1;
+}
+
+static int
+nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
+			    void *args, u32 size)
+{
+	switch (nv04_graph_mthd_bind_class(object, args, size)) {
+	case 0x30:
+		nv04_graph_set_ctx1(object, 0x1000, 0);
+		return 0;
+	/* Yes, for some reason even the old versions of objects
+	 * accept 0x57 and not 0x17. Consistency be damned.
+	 */
+	case 0x57:
+		nv04_graph_set_ctx1(object, 0x1000, 0x1000);
+		return 0;
+	}
+	return 1;
+}
+
+static struct nouveau_omthds
+nv03_graph_gdi_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_gdi_omthds[] = {
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_blit_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_blit_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_iifc_omthds[] = {
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+	{ 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+	{ 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_ifc_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_ifc_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+	{ 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifc_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifc_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifm_omthds[] = {
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x0304, 0x0304, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifm_omthds[] = {
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x0304, 0x0304, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_surf3d_omthds[] = {
+	{ 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+	{}
+};
+
+static struct nouveau_omthds
+nv03_graph_ttri_omthds[] = {
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
+	{}
+};
+
+static struct nouveau_omthds
+nv01_graph_prim_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static struct nouveau_omthds
+nv04_graph_prim_omthds[] = {
+	{ 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+	{ 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+	{ 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+	{ 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+	{ 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+	{ 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+	{ 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
+	{}
+};
+
+static int
+nv04_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+#ifdef __BIG_ENDIAN
+	nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+#endif
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv04_graph_ofuncs = {
+	.ctor = nv04_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv04_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0017, &nv04_graph_ofuncs }, /* chroma */
+	{ 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
+	{ 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
+	{ 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
+	{ 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
+	{ 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
+	{ 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
+	{ 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0042, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
+	{ 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
+	{ 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
+	{ 0x0054, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0055, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0057, &nv04_graph_ofuncs }, /* chroma */
+	{ 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
+	{ 0x0059, &nv04_graph_ofuncs }, /* surf_src */
+	{ 0x005a, &nv04_graph_ofuncs }, /* surf_color */
+	{ 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
+	{ 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
+	{ 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
+	{ 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
+	{ 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
+	{ 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
+	{ 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
+	{ 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
+	{ 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
+	{ 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
+	{ 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv04_graph_chan *
+nv04_graph_channel(struct nv04_graph_priv *priv)
+{
+	struct nv04_graph_chan *chan = NULL;
+	if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+		int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
+		if (chid < ARRAY_SIZE(priv->chan))
+			chan = priv->chan[chid];
+	}
+	return chan;
+}
+
+static int
+nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
+{
+	struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+	return 0;
+}
+
+static int
+nv04_graph_unload_context(struct nv04_graph_chan *chan)
+{
+	struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+		chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
+
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+	return 0;
+}
+
+static void
+nv04_graph_context_switch(struct nv04_graph_priv *priv)
+{
+	struct nv04_graph_chan *prev = NULL;
+	struct nv04_graph_chan *next = NULL;
+	unsigned long flags;
+	int chid;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv04_graph_idle(priv);
+
+	/* If previous context is valid, we need to save it */
+	prev = nv04_graph_channel(priv);
+	if (prev)
+		nv04_graph_unload_context(prev);
+
+	/* load context for next channel */
+	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+	next = priv->chan[chid];
+	if (next)
+		nv04_graph_load_context(next, chid);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+		if (nv04_graph_ctx_regs[i] == reg)
+			return &chan->nv04[i];
+	}
+
+	return NULL;
+}
+
+static int
+nv04_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_fifo_chan *fifo = (void *)parent;
+	struct nv04_graph_priv *priv = (void *)engine;
+	struct nv04_graph_chan *chan;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->chan[fifo->chid]) {
+		*pobject = nv_object(priv->chan[fifo->chid]);
+		atomic_inc(&(*pobject)->refcount);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		return 1;
+	}
+
+	*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+	priv->chan[fifo->chid] = chan;
+	chan->chid = fifo->chid;
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
+static void
+nv04_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	struct nv04_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_object_destroy(&chan->base);
+}
+
+static int
+nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_graph_priv *priv = (void *)object->engine;
+	struct nv04_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv04_graph_channel(priv) == chan)
+		nv04_graph_unload_context(chan);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return nouveau_object_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv04_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_graph_context_ctor,
+		.dtor = nv04_graph_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nv04_graph_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+bool
+nv04_graph_idle(void *obj)
+{
+	struct nouveau_graph *graph = nouveau_graph(obj);
+	u32 mask = 0xffffffff;
+
+	if (nv_device(obj)->card_type == NV_40)
+		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+	if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
+		nv_error(graph, "idle timed out with status 0x%08x\n",
+			 nv_rd32(graph, NV04_PGRAPH_STATUS));
+		return false;
+	}
+
+	return true;
+}
+
+static const struct nouveau_bitfield
+nv04_graph_intr_name[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{}
+};
+
+static const struct nouveau_bitfield
+nv04_graph_nstatus[] = {
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
+	{}
+};
+
+const struct nouveau_bitfield
+nv04_graph_nsource[] = {
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+	{}
+};
+
+static void
+nv04_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_graph_priv *priv = (void *)subdev;
+	struct nv04_graph_chan *chan = NULL;
+	struct nouveau_namedb *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x0f000000) >> 24;
+	u32 subc = (addr & 0x0000e000) >> 13;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
+	u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+	u32 show = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	chan = priv->chan[chid];
+	if (chan)
+		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (stat & NV_PGRAPH_INTR_NOTIFY) {
+		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+			handle = nouveau_namedb_get_vinst(namedb, inst);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_NOTIFY;
+		}
+	}
+
+	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		nv04_graph_context_switch(priv);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv04_graph_intr_name, show);
+		pr_cont(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		pr_cont(" nstatus:");
+		nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+		pr_cont("\n");
+		nv_error(priv,
+			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, nouveau_client_name(chan), subc, class, mthd,
+			 data);
+	}
+
+	nouveau_namedb_put(handle);
+}
+
+static int
+nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv04_graph_intr;
+	nv_engine(priv)->cclass = &nv04_graph_cclass;
+	nv_engine(priv)->sclass = nv04_graph_sclass;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static int
+nv04_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv04_graph_priv *priv = (void *)engine;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Enable PGRAPH interrupts */
+	nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
+	nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+	/*1231C000 blob, 001 haiku*/
+	/*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+	/*0x72111100 blob , 01 haiku*/
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+	/*haiku same*/
+
+	/*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+	/*haiku and blob 10d4*/
+
+	nv_wr32(priv, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
+	nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+
+	/* These don't belong here, they're part of a per-channel context */
+	nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv04_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
new file mode 100644
index 0000000..23c143a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -0,0 +1,1316 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "regs.h"
+
+struct pipe_state {
+	u32 pipe_0x0000[0x040/4];
+	u32 pipe_0x0040[0x010/4];
+	u32 pipe_0x0200[0x0c0/4];
+	u32 pipe_0x4400[0x080/4];
+	u32 pipe_0x6400[0x3b0/4];
+	u32 pipe_0x6800[0x2f0/4];
+	u32 pipe_0x6c00[0x030/4];
+	u32 pipe_0x7000[0x130/4];
+	u32 pipe_0x7400[0x0c0/4];
+	u32 pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+	NV10_PGRAPH_CTX_SWITCH(0),
+	NV10_PGRAPH_CTX_SWITCH(1),
+	NV10_PGRAPH_CTX_SWITCH(2),
+	NV10_PGRAPH_CTX_SWITCH(3),
+	NV10_PGRAPH_CTX_SWITCH(4),
+	NV10_PGRAPH_CTX_CACHE(0, 0),
+	NV10_PGRAPH_CTX_CACHE(0, 1),
+	NV10_PGRAPH_CTX_CACHE(0, 2),
+	NV10_PGRAPH_CTX_CACHE(0, 3),
+	NV10_PGRAPH_CTX_CACHE(0, 4),
+	NV10_PGRAPH_CTX_CACHE(1, 0),
+	NV10_PGRAPH_CTX_CACHE(1, 1),
+	NV10_PGRAPH_CTX_CACHE(1, 2),
+	NV10_PGRAPH_CTX_CACHE(1, 3),
+	NV10_PGRAPH_CTX_CACHE(1, 4),
+	NV10_PGRAPH_CTX_CACHE(2, 0),
+	NV10_PGRAPH_CTX_CACHE(2, 1),
+	NV10_PGRAPH_CTX_CACHE(2, 2),
+	NV10_PGRAPH_CTX_CACHE(2, 3),
+	NV10_PGRAPH_CTX_CACHE(2, 4),
+	NV10_PGRAPH_CTX_CACHE(3, 0),
+	NV10_PGRAPH_CTX_CACHE(3, 1),
+	NV10_PGRAPH_CTX_CACHE(3, 2),
+	NV10_PGRAPH_CTX_CACHE(3, 3),
+	NV10_PGRAPH_CTX_CACHE(3, 4),
+	NV10_PGRAPH_CTX_CACHE(4, 0),
+	NV10_PGRAPH_CTX_CACHE(4, 1),
+	NV10_PGRAPH_CTX_CACHE(4, 2),
+	NV10_PGRAPH_CTX_CACHE(4, 3),
+	NV10_PGRAPH_CTX_CACHE(4, 4),
+	NV10_PGRAPH_CTX_CACHE(5, 0),
+	NV10_PGRAPH_CTX_CACHE(5, 1),
+	NV10_PGRAPH_CTX_CACHE(5, 2),
+	NV10_PGRAPH_CTX_CACHE(5, 3),
+	NV10_PGRAPH_CTX_CACHE(5, 4),
+	NV10_PGRAPH_CTX_CACHE(6, 0),
+	NV10_PGRAPH_CTX_CACHE(6, 1),
+	NV10_PGRAPH_CTX_CACHE(6, 2),
+	NV10_PGRAPH_CTX_CACHE(6, 3),
+	NV10_PGRAPH_CTX_CACHE(6, 4),
+	NV10_PGRAPH_CTX_CACHE(7, 0),
+	NV10_PGRAPH_CTX_CACHE(7, 1),
+	NV10_PGRAPH_CTX_CACHE(7, 2),
+	NV10_PGRAPH_CTX_CACHE(7, 3),
+	NV10_PGRAPH_CTX_CACHE(7, 4),
+	NV10_PGRAPH_CTX_USER,
+	NV04_PGRAPH_DMA_START_0,
+	NV04_PGRAPH_DMA_START_1,
+	NV04_PGRAPH_DMA_LENGTH,
+	NV04_PGRAPH_DMA_MISC,
+	NV10_PGRAPH_DMA_PITCH,
+	NV04_PGRAPH_BOFFSET0,
+	NV04_PGRAPH_BBASE0,
+	NV04_PGRAPH_BLIMIT0,
+	NV04_PGRAPH_BOFFSET1,
+	NV04_PGRAPH_BBASE1,
+	NV04_PGRAPH_BLIMIT1,
+	NV04_PGRAPH_BOFFSET2,
+	NV04_PGRAPH_BBASE2,
+	NV04_PGRAPH_BLIMIT2,
+	NV04_PGRAPH_BOFFSET3,
+	NV04_PGRAPH_BBASE3,
+	NV04_PGRAPH_BLIMIT3,
+	NV04_PGRAPH_BOFFSET4,
+	NV04_PGRAPH_BBASE4,
+	NV04_PGRAPH_BLIMIT4,
+	NV04_PGRAPH_BOFFSET5,
+	NV04_PGRAPH_BBASE5,
+	NV04_PGRAPH_BLIMIT5,
+	NV04_PGRAPH_BPITCH0,
+	NV04_PGRAPH_BPITCH1,
+	NV04_PGRAPH_BPITCH2,
+	NV04_PGRAPH_BPITCH3,
+	NV04_PGRAPH_BPITCH4,
+	NV10_PGRAPH_SURFACE,
+	NV10_PGRAPH_STATE,
+	NV04_PGRAPH_BSWIZZLE2,
+	NV04_PGRAPH_BSWIZZLE5,
+	NV04_PGRAPH_BPIXEL,
+	NV10_PGRAPH_NOTIFY,
+	NV04_PGRAPH_PATT_COLOR0,
+	NV04_PGRAPH_PATT_COLOR1,
+	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+	0x00400904,
+	0x00400908,
+	0x0040090c,
+	0x00400910,
+	0x00400914,
+	0x00400918,
+	0x0040091c,
+	0x00400920,
+	0x00400924,
+	0x00400928,
+	0x0040092c,
+	0x00400930,
+	0x00400934,
+	0x00400938,
+	0x0040093c,
+	0x00400940,
+	0x00400944,
+	0x00400948,
+	0x0040094c,
+	0x00400950,
+	0x00400954,
+	0x00400958,
+	0x0040095c,
+	0x00400960,
+	0x00400964,
+	0x00400968,
+	0x0040096c,
+	0x00400970,
+	0x00400974,
+	0x00400978,
+	0x0040097c,
+	0x00400980,
+	0x00400984,
+	0x00400988,
+	0x0040098c,
+	0x00400990,
+	0x00400994,
+	0x00400998,
+	0x0040099c,
+	0x004009a0,
+	0x004009a4,
+	0x004009a8,
+	0x004009ac,
+	0x004009b0,
+	0x004009b4,
+	0x004009b8,
+	0x004009bc,
+	0x004009c0,
+	0x004009c4,
+	0x004009c8,
+	0x004009cc,
+	0x004009d0,
+	0x004009d4,
+	0x004009d8,
+	0x004009dc,
+	0x004009e0,
+	0x004009e4,
+	0x004009e8,
+	0x004009ec,
+	0x004009f0,
+	0x004009f4,
+	0x004009f8,
+	0x004009fc,
+	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
+	0x0040080c,
+	NV04_PGRAPH_PATTERN_SHAPE,
+	NV03_PGRAPH_MONO_COLOR0,
+	NV04_PGRAPH_ROP3,
+	NV04_PGRAPH_CHROMA,
+	NV04_PGRAPH_BETA_AND,
+	NV04_PGRAPH_BETA_PREMULT,
+	0x00400e70,
+	0x00400e74,
+	0x00400e78,
+	0x00400e7c,
+	0x00400e80,
+	0x00400e84,
+	0x00400e88,
+	0x00400e8c,
+	0x00400ea0,
+	0x00400ea4,
+	0x00400ea8,
+	0x00400e90,
+	0x00400e94,
+	0x00400e98,
+	0x00400e9c,
+	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
+	0x00400f04,
+	0x00400f24,
+	0x00400f08,
+	0x00400f28,
+	0x00400f0c,
+	0x00400f2c,
+	0x00400f10,
+	0x00400f30,
+	0x00400f14,
+	0x00400f34,
+	0x00400f18,
+	0x00400f38,
+	0x00400f1c,
+	0x00400f3c,
+	NV10_PGRAPH_XFMODE0,
+	NV10_PGRAPH_XFMODE1,
+	NV10_PGRAPH_GLOBALSTATE0,
+	NV10_PGRAPH_GLOBALSTATE1,
+	NV04_PGRAPH_STORED_FMT,
+	NV04_PGRAPH_SOURCE_COLOR,
+	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
+	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
+	0x00400404,
+	0x00400484,
+	0x00400408,
+	0x00400488,
+	0x0040040c,
+	0x0040048c,
+	0x00400410,
+	0x00400490,
+	0x00400414,
+	0x00400494,
+	0x00400418,
+	0x00400498,
+	0x0040041c,
+	0x0040049c,
+	0x00400420,
+	0x004004a0,
+	0x00400424,
+	0x004004a4,
+	0x00400428,
+	0x004004a8,
+	0x0040042c,
+	0x004004ac,
+	0x00400430,
+	0x004004b0,
+	0x00400434,
+	0x004004b4,
+	0x00400438,
+	0x004004b8,
+	0x0040043c,
+	0x004004bc,
+	0x00400440,
+	0x004004c0,
+	0x00400444,
+	0x004004c4,
+	0x00400448,
+	0x004004c8,
+	0x0040044c,
+	0x004004cc,
+	0x00400450,
+	0x004004d0,
+	0x00400454,
+	0x004004d4,
+	0x00400458,
+	0x004004d8,
+	0x0040045c,
+	0x004004dc,
+	0x00400460,
+	0x004004e0,
+	0x00400464,
+	0x004004e4,
+	0x00400468,
+	0x004004e8,
+	0x0040046c,
+	0x004004ec,
+	0x00400470,
+	0x004004f0,
+	0x00400474,
+	0x004004f4,
+	0x00400478,
+	0x004004f8,
+	0x0040047c,
+	0x004004fc,
+	NV03_PGRAPH_ABS_UCLIP_XMIN,
+	NV03_PGRAPH_ABS_UCLIP_XMAX,
+	NV03_PGRAPH_ABS_UCLIP_YMIN,
+	NV03_PGRAPH_ABS_UCLIP_YMAX,
+	0x00400550,
+	0x00400558,
+	0x00400554,
+	0x0040055c,
+	NV03_PGRAPH_ABS_UCLIPA_XMIN,
+	NV03_PGRAPH_ABS_UCLIPA_XMAX,
+	NV03_PGRAPH_ABS_UCLIPA_YMIN,
+	NV03_PGRAPH_ABS_UCLIPA_YMAX,
+	NV03_PGRAPH_ABS_ICLIP_XMAX,
+	NV03_PGRAPH_ABS_ICLIP_YMAX,
+	NV03_PGRAPH_XY_LOGIC_MISC0,
+	NV03_PGRAPH_XY_LOGIC_MISC1,
+	NV03_PGRAPH_XY_LOGIC_MISC2,
+	NV03_PGRAPH_XY_LOGIC_MISC3,
+	NV03_PGRAPH_CLIPX_0,
+	NV03_PGRAPH_CLIPX_1,
+	NV03_PGRAPH_CLIPY_0,
+	NV03_PGRAPH_CLIPY_1,
+	NV10_PGRAPH_COMBINER0_IN_ALPHA,
+	NV10_PGRAPH_COMBINER1_IN_ALPHA,
+	NV10_PGRAPH_COMBINER0_IN_RGB,
+	NV10_PGRAPH_COMBINER1_IN_RGB,
+	NV10_PGRAPH_COMBINER_COLOR0,
+	NV10_PGRAPH_COMBINER_COLOR1,
+	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+	NV10_PGRAPH_COMBINER0_OUT_RGB,
+	NV10_PGRAPH_COMBINER1_OUT_RGB,
+	NV10_PGRAPH_COMBINER_FINAL0,
+	NV10_PGRAPH_COMBINER_FINAL1,
+	0x00400e00,
+	0x00400e04,
+	0x00400e08,
+	0x00400e0c,
+	0x00400e10,
+	0x00400e14,
+	0x00400e18,
+	0x00400e1c,
+	0x00400e20,
+	0x00400e24,
+	0x00400e28,
+	0x00400e2c,
+	0x00400e30,
+	0x00400e34,
+	0x00400e38,
+	0x00400e3c,
+	NV04_PGRAPH_PASSTHRU_0,
+	NV04_PGRAPH_PASSTHRU_1,
+	NV04_PGRAPH_PASSTHRU_2,
+	NV10_PGRAPH_DIMX_TEXTURE,
+	NV10_PGRAPH_WDIMX_TEXTURE,
+	NV10_PGRAPH_DVD_COLORFMT,
+	NV10_PGRAPH_SCALED_FORMAT,
+	NV04_PGRAPH_MISC24_0,
+	NV04_PGRAPH_MISC24_1,
+	NV04_PGRAPH_MISC24_2,
+	NV03_PGRAPH_X_MISC,
+	NV03_PGRAPH_Y_MISC,
+	NV04_PGRAPH_VALID1,
+	NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+	NV10_PGRAPH_DEBUG_4,
+	0x004006b0,
+	0x00400eac,
+	0x00400eb0,
+	0x00400eb4,
+	0x00400eb8,
+	0x00400ebc,
+	0x00400ec0,
+	0x00400ec4,
+	0x00400ec8,
+	0x00400ecc,
+	0x00400ed0,
+	0x00400ed4,
+	0x00400ed8,
+	0x00400edc,
+	0x00400ee0,
+	0x00400a00,
+	0x00400a04,
+};
+
+struct nv10_graph_priv {
+	struct nouveau_graph base;
+	struct nv10_graph_chan *chan[32];
+	spinlock_t lock;
+};
+
+struct nv10_graph_chan {
+	struct nouveau_object base;
+	int chid;
+	int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+	int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+	struct pipe_state pipe_state;
+	u32 lma_window[4];
+};
+
+
+static inline struct nv10_graph_priv *
+nv10_graph_priv(struct nv10_graph_chan *chan)
+{
+	return (void *)nv_object(chan)->engine;
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+#define PIPE_SAVE(priv, state, addr)					\
+	do {								\
+		int __i;						\
+		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
+			state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
+	} while (0)
+
+#define PIPE_RESTORE(priv, state, addr)					\
+	do {								\
+		int __i;						\
+		nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
+		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
+			nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+	} while (0)
+
+static struct nouveau_oclass
+nv10_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0056, &nv04_graph_ofuncs }, /* celcius */
+	{},
+};
+
+static struct nouveau_oclass
+nv15_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0096, &nv04_graph_ofuncs }, /* celcius */
+	{},
+};
+
+static int
+nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	struct nv10_graph_chan *chan = (void *)object->parent;
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+	u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+	u32 xfmode0, xfmode1;
+	u32 data = *(u32 *)args;
+	int i;
+
+	chan->lma_window[(mthd - 0x1638) / 4] = data;
+
+	if (mthd != 0x1644)
+		return 0;
+
+	nv04_graph_idle(priv);
+
+	PIPE_SAVE(priv, pipe_0x0040, 0x0040);
+	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+
+	PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+
+	nv04_graph_idle(priv);
+
+	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+
+	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
+	PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
+	PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+
+	nv04_graph_idle(priv);
+
+	PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+
+	PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
+	PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
+	PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
+	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv04_graph_idle(priv);
+
+	return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
+			   void *args, u32 size)
+{
+	struct nv10_graph_chan *chan = (void *)object->parent;
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+
+	nv04_graph_idle(priv);
+
+	nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+	nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
+	return 0;
+}
+
+static struct nouveau_omthds
+nv17_celcius_omthds[] = {
+	{ 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+	{ 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+	{ 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+	{ 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+	{ 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
+	{}
+};
+
+static struct nouveau_oclass
+nv17_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs }, /* pattern */
+	{ 0x004a, &nv04_graph_ofuncs }, /* gdi */
+	{ 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+	{ 0x005f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs }, /* blit */
+	{ 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+	{ 0x0094, &nv04_graph_ofuncs }, /* ttri */
+	{ 0x0095, &nv04_graph_ofuncs }, /* mtri */
+	{ 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv10_graph_chan *
+nv10_graph_channel(struct nv10_graph_priv *priv)
+{
+	struct nv10_graph_chan *chan = NULL;
+	if (nv_rd32(priv, 0x400144) & 0x00010000) {
+		int chid = nv_rd32(priv, 0x400148) >> 24;
+		if (chid < ARRAY_SIZE(priv->chan))
+			chan = priv->chan[chid];
+	}
+	return chan;
+}
+
+static void
+nv10_graph_save_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+
+	PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+	PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
+	PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
+	PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
+	PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
+	PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
+	PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
+	PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+}
+
+static void
+nv10_graph_load_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe = &chan->pipe_state;
+	u32 xfmode0, xfmode1;
+	int i;
+
+	nv04_graph_idle(priv);
+	/* XXX check haiku comments */
+	xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+	xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+	for (i = 0; i < 3; i++)
+		nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+	nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+	nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+	PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+	nv04_graph_idle(priv);
+
+	/* restore XFMODE */
+	nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+	nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+	PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
+	PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
+	PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
+	PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
+	PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
+	PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
+	PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+	PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
+	PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
+	nv04_graph_idle(priv);
+}
+
+static void
+nv10_graph_create_pipe(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	struct pipe_state *pipe_state = &chan->pipe_state;
+	u32 *pipe_state_addr;
+	int i;
+#define PIPE_INIT(addr) \
+	do { \
+		pipe_state_addr = pipe_state->pipe_##addr; \
+	} while (0)
+#define PIPE_INIT_END(addr) \
+	do { \
+		u32 *__end_addr = pipe_state->pipe_##addr + \
+				ARRAY_SIZE(pipe_state->pipe_##addr); \
+		if (pipe_state_addr != __end_addr) \
+			nv_error(priv, "incomplete pipe init for 0x%x :  %p/%p\n", \
+				addr, pipe_state_addr, __end_addr); \
+	} while (0)
+#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
+
+	PIPE_INIT(0x0200);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0200);
+
+	PIPE_INIT(0x6400);
+	for (i = 0; i < 211; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x40000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x3f000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	PIPE_INIT_END(0x6400);
+
+	PIPE_INIT(0x6800);
+	for (i = 0; i < 162; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x3f800000);
+	for (i = 0; i < 25; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6800);
+
+	PIPE_INIT(0x6c00);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0xbf800000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x6c00);
+
+	PIPE_INIT(0x7000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x00000000);
+	NV_WRITE_PIPE_INIT(0x7149f2ca);
+	for (i = 0; i < 35; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7000);
+
+	PIPE_INIT(0x7400);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7400);
+
+	PIPE_INIT(0x7800);
+	for (i = 0; i < 48; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x7800);
+
+	PIPE_INIT(0x4400);
+	for (i = 0; i < 32; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x4400);
+
+	PIPE_INIT(0x0000);
+	for (i = 0; i < 16; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0000);
+
+	PIPE_INIT(0x0040);
+	for (i = 0; i < 4; i++)
+		NV_WRITE_PIPE_INIT(0x00000000);
+	PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int
+nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+		if (nv10_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
+	return -1;
+}
+
+static int
+nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+		if (nv17_graph_ctx_regs[i] == reg)
+			return i;
+	}
+	nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
+	return -1;
+}
+
+static void
+nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+	u32 ctx_user, ctx_switch[5];
+	int i, subchan = -1;
+
+	/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
+	 * that cannot be restored via MMIO. Do it through the FIFO
+	 * instead.
+	 */
+
+	/* Look for a celsius object */
+	for (i = 0; i < 8; i++) {
+		int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+
+		if (class == 0x56 || class == 0x96 || class == 0x99) {
+			subchan = i;
+			break;
+		}
+	}
+
+	if (subchan < 0 || !inst)
+		return;
+
+	/* Save the current ctx object */
+	ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
+	for (i = 0; i < 5; i++)
+		ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
+
+	/* Save the FIFO state */
+	st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
+	st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
+	st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
+	fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+
+	for (i = 0; i < ARRAY_SIZE(fifo); i++)
+		fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
+
+	/* Switch to the celsius subchannel */
+	for (i = 0; i < 5; i++)
+		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
+			nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+
+	/* Inject NV10TCL_DMA_VTXBUF */
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+		0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+	nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+	/* Restore the FIFO state */
+	for (i = 0; i < ARRAY_SIZE(fifo); i++)
+		nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
+
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+
+	/* Restore the current ctx object */
+	for (i = 0; i < 5; i++)
+		nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+	nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
+}
+
+static int
+nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	u32 inst;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
+	}
+
+	nv10_graph_load_pipe(chan);
+
+	inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+	nv10_graph_load_dma_vtxbuf(chan, chid, inst);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+	nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
+	return 0;
+}
+
+static int
+nv10_graph_unload_context(struct nv10_graph_chan *chan)
+{
+	struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+		chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+			chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
+	}
+
+	nv10_graph_save_pipe(chan);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	return 0;
+}
+
+static void
+nv10_graph_context_switch(struct nv10_graph_priv *priv)
+{
+	struct nv10_graph_chan *prev = NULL;
+	struct nv10_graph_chan *next = NULL;
+	unsigned long flags;
+	int chid;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv04_graph_idle(priv);
+
+	/* If previous context is valid, we need to save it */
+	prev = nv10_graph_channel(priv);
+	if (prev)
+		nv10_graph_unload_context(prev);
+
+	/* load context for next channel */
+	chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+	next = priv->chan[chid];
+	if (next)
+		nv10_graph_load_context(next, chid);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+	int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
+	if (offset > 0) \
+		chan->nv10[offset] = val; \
+	} while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+	int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
+	if (offset > 0) \
+		chan->nv17[offset] = val; \
+	} while (0)
+
+static int
+nv10_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_fifo_chan *fifo = (void *)parent;
+	struct nv10_graph_priv *priv = (void *)engine;
+	struct nv10_graph_chan *chan;
+	unsigned long flags;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->chan[fifo->chid]) {
+		*pobject = nv_object(priv->chan[fifo->chid]);
+		atomic_inc(&(*pobject)->refcount);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		nouveau_object_destroy(&chan->base);
+		return 1;
+	}
+
+	NV_WRITE_CTX(0x00400e88, 0x08000000);
+	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+	NV_WRITE_CTX(0x00400e10, 0x00001000);
+	NV_WRITE_CTX(0x00400e14, 0x00001000);
+	NV_WRITE_CTX(0x00400e30, 0x00080008);
+	NV_WRITE_CTX(0x00400e34, 0x00080008);
+	if (nv_device(priv)->chipset >= 0x17) {
+		/* is it really needed ??? */
+		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+					nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
+		NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
+		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+	}
+	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
+
+	nv10_graph_create_pipe(chan);
+
+	priv->chan[fifo->chid] = chan;
+	chan->chid = fifo->chid;
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
+static void
+nv10_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nv10_graph_priv *priv = (void *)object->engine;
+	struct nv10_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->chan[chan->chid] = NULL;
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	nouveau_object_destroy(&chan->base);
+}
+
+static int
+nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_graph_priv *priv = (void *)object->engine;
+	struct nv10_graph_chan *chan = (void *)object;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+	if (nv10_graph_channel(priv) == chan)
+		nv10_graph_unload_context(chan);
+	nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return nouveau_object_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv10_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_graph_context_ctor,
+		.dtor = nv10_graph_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nv10_graph_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv10_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
+
+	pfifo->start(pfifo, &flags);
+}
+
+const struct nouveau_bitfield nv10_graph_intr_name[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
+	{}
+};
+
+const struct nouveau_bitfield nv10_graph_nstatus[] = {
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
+	{}
+};
+
+static void
+nv10_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nv10_graph_priv *priv = (void *)subdev;
+	struct nv10_graph_chan *chan = NULL;
+	struct nouveau_namedb *namedb = NULL;
+	struct nouveau_handle *handle = NULL;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x01f00000) >> 20;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 show = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	chan = priv->chan[chid];
+	if (chan)
+		namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+			handle = nouveau_namedb_get_class(namedb, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+		}
+	}
+
+	if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+		nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+		stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+		nv10_graph_context_switch(priv);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		pr_cont(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		pr_cont(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		pr_cont("\n");
+		nv_error(priv,
+			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, nouveau_client_name(chan), subc, class, mthd,
+			 data);
+	}
+
+	nouveau_namedb_put(handle);
+}
+
+static int
+nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv10_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv10_graph_intr;
+	nv_engine(priv)->cclass = &nv10_graph_cclass;
+
+	if (nv_device(priv)->chipset <= 0x10)
+		nv_engine(priv)->sclass = nv10_graph_sclass;
+	else
+	if (nv_device(priv)->chipset <  0x17 ||
+	    nv_device(priv)->chipset == 0x1a)
+		nv_engine(priv)->sclass = nv15_graph_sclass;
+	else
+		nv_engine(priv)->sclass = nv17_graph_sclass;
+
+	nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv10_graph_dtor(struct nouveau_object *object)
+{
+	struct nv10_graph_priv *priv = (void *)object;
+	nouveau_graph_destroy(&priv->base);
+}
+
+static int
+nv10_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	struct nv10_graph_priv *priv = (void *)engine;
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	/* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
+
+	if (nv_device(priv)->chipset >= 0x17) {
+		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+		nv_wr32(priv, 0x400a10, 0x03ff3fb6);
+		nv_wr32(priv, 0x400838, 0x002f8684);
+		nv_wr32(priv, 0x40083c, 0x00115f3f);
+		nv_wr32(priv, 0x4006b0, 0x40000020);
+	} else {
+		nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+	nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+
+	nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+	return 0;
+}
+
+static int
+nv10_graph_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_graph_priv *priv = (void *)object;
+	return nouveau_graph_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv10_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_graph_ctor,
+		.dtor = nv10_graph_dtor,
+		.init = nv10_graph_init,
+		.fini = nv10_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
new file mode 100644
index 0000000..b245593
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -0,0 +1,384 @@
+#include <core/client.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv20_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+	{ 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
+	{ 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv20_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
+					   &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x033c, 0xffff0000);
+	nv_wo32(chan, 0x03a0, 0x0fff0000);
+	nv_wo32(chan, 0x03a4, 0x0fff0000);
+	nv_wo32(chan, 0x047c, 0x00000101);
+	nv_wo32(chan, 0x0490, 0x00000111);
+	nv_wo32(chan, 0x04a8, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05a4, 0x4b7fffff);
+	nv_wo32(chan, 0x05fc, 0x00000001);
+	nv_wo32(chan, 0x0604, 0x00004000);
+	nv_wo32(chan, 0x0610, 0x00000001);
+	nv_wo32(chan, 0x0618, 0x00040000);
+	nv_wo32(chan, 0x061c, 0x00010000);
+	for (i = 0x1c1c; i <= 0x248c; i += 16) {
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x281c, 0x3f800000);
+	nv_wo32(chan, 0x2830, 0x3f800000);
+	nv_wo32(chan, 0x285c, 0x40000000);
+	nv_wo32(chan, 0x2860, 0x3f800000);
+	nv_wo32(chan, 0x2864, 0x3f000000);
+	nv_wo32(chan, 0x286c, 0x40000000);
+	nv_wo32(chan, 0x2870, 0x3f800000);
+	nv_wo32(chan, 0x2878, 0xbf800000);
+	nv_wo32(chan, 0x2880, 0xbf800000);
+	nv_wo32(chan, 0x34a4, 0x000fe000);
+	nv_wo32(chan, 0x3530, 0x000003f8);
+	nv_wo32(chan, 0x3540, 0x002fe000);
+	for (i = 0x355c; i <= 0x3578; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+int
+nv20_graph_context_init(struct nouveau_object *object)
+{
+	struct nv20_graph_priv *priv = (void *)object->engine;
+	struct nv20_graph_chan *chan = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_context_init(&chan->base);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+	return 0;
+}
+
+int
+nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv20_graph_priv *priv = (void *)object->engine;
+	struct nv20_graph_chan *chan = (void *)object;
+	int chid = -1;
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x400144) & 0x00010000)
+		chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
+	if (chan->chid == chid) {
+		nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
+		nv_wr32(priv, 0x400788, 0x00000002);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+		nv_wr32(priv, 0x400144, 0x10000000);
+		nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
+	}
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+
+	nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
+	return nouveau_graph_context_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv20_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+void
+nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv20_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+	if (nv_device(engine)->chipset != 0x34) {
+		nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	}
+
+	pfifo->start(pfifo, &flags);
+}
+
+void
+nv20_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv20_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 chid = (addr & 0x01f00000) >> 20;
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+	u32 show = stat;
+
+	engctx = nouveau_engctx_get(engine, chid);
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+			handle = nouveau_handle_get_class(engctx, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		pr_cont(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		pr_cont(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		pr_cont("\n");
+		nv_error(priv,
+			 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, nouveau_client_name(engctx), subc, class, mthd,
+			 data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv20_graph_cclass;
+	nv_engine(priv)->sclass = nv20_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+void
+nv20_graph_dtor(struct nouveau_object *object)
+{
+	struct nv20_graph_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ctxtab);
+	nouveau_graph_destroy(&priv->base);
+}
+
+int
+nv20_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv20_graph_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	u32 tmp, vramsz;
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+
+	if (nv_device(priv)->chipset == 0x20) {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+		for (i = 0; i < 15; i++)
+			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+	} else {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+		for (i = 0; i < 32; i++)
+			nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+		nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+	nv_wr32(priv, 0x40009C           , 0x00000040);
+
+	if (nv_device(priv)->chipset >= 0x25) {
+		nv_wr32(priv, 0x400890, 0x00a8cfff);
+		nv_wr32(priv, 0x400610, 0x304B1FB6);
+		nv_wr32(priv, 0x400B80, 0x1cbd3883);
+		nv_wr32(priv, 0x400B84, 0x44000000);
+		nv_wr32(priv, 0x400098, 0x40000080);
+		nv_wr32(priv, 0x400B88, 0x000000ff);
+
+	} else {
+		nv_wr32(priv, 0x400880, 0x0008c7df);
+		nv_wr32(priv, 0x400094, 0x00000005);
+		nv_wr32(priv, 0x400B80, 0x45eae20e);
+		nv_wr32(priv, 0x400B84, 0x24000000);
+		nv_wr32(priv, 0x400098, 0x00000040);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+	tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
+	nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+
+	/* begin RAM config */
+	vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
+	nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+	nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
+	nv_wr32(priv, 0x400820, 0);
+	nv_wr32(priv, 0x400824, 0);
+	nv_wr32(priv, 0x400864, vramsz - 1);
+	nv_wr32(priv, 0x400868, vramsz - 1);
+
+	/* interesting.. the below overwrites some of the tile setup above.. */
+	nv_wr32(priv, 0x400B20, 0x00000000);
+	nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+	nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+	return 0;
+}
+
+struct nouveau_oclass
+nv20_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 0000000..2bea731
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
+#ifndef __NV20_GRAPH_H__
+#define __NV20_GRAPH_H__
+
+#include <core/enum.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+struct nv20_graph_priv {
+	struct nouveau_graph base;
+	struct nouveau_gpuobj *ctxtab;
+};
+
+struct nv20_graph_chan {
+	struct nouveau_graph_chan base;
+	int chid;
+};
+
+extern struct nouveau_oclass nv25_graph_sclass[];
+int  nv20_graph_context_init(struct nouveau_object *);
+int  nv20_graph_context_fini(struct nouveau_object *, bool);
+
+void nv20_graph_tile_prog(struct nouveau_engine *, int);
+void nv20_graph_intr(struct nouveau_subdev *);
+
+void nv20_graph_dtor(struct nouveau_object *);
+int  nv20_graph_init(struct nouveau_object *);
+
+int  nv30_graph_init(struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 0000000..7a80d00
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+struct nouveau_oclass
+nv25_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+	{ 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv25_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x035c, 0xffff0000);
+	nv_wo32(chan, 0x03c0, 0x0fff0000);
+	nv_wo32(chan, 0x03c4, 0x0fff0000);
+	nv_wo32(chan, 0x049c, 0x00000101);
+	nv_wo32(chan, 0x04b0, 0x00000111);
+	nv_wo32(chan, 0x04c8, 0x00000080);
+	nv_wo32(chan, 0x04cc, 0xffff0000);
+	nv_wo32(chan, 0x04d0, 0x00000001);
+	nv_wo32(chan, 0x04e4, 0x44400000);
+	nv_wo32(chan, 0x04fc, 0x4b800000);
+	for (i = 0x0510; i <= 0x051c; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x0530; i <= 0x053c; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x0548; i <= 0x0554; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0558; i <= 0x0564; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x0568; i <= 0x0574; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x0598; i <= 0x05d4; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05e0, 0x4b7fffff);
+	nv_wo32(chan, 0x0620, 0x00000080);
+	nv_wo32(chan, 0x0624, 0x30201000);
+	nv_wo32(chan, 0x0628, 0x70605040);
+	nv_wo32(chan, 0x062c, 0xb0a09080);
+	nv_wo32(chan, 0x0630, 0xf0e0d0c0);
+	nv_wo32(chan, 0x0664, 0x00000001);
+	nv_wo32(chan, 0x066c, 0x00004000);
+	nv_wo32(chan, 0x0678, 0x00000001);
+	nv_wo32(chan, 0x0680, 0x00040000);
+	nv_wo32(chan, 0x0684, 0x00010000);
+	for (i = 0x1b04; i <= 0x2374; i += 16) {
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x2704, 0x3f800000);
+	nv_wo32(chan, 0x2718, 0x3f800000);
+	nv_wo32(chan, 0x2744, 0x40000000);
+	nv_wo32(chan, 0x2748, 0x3f800000);
+	nv_wo32(chan, 0x274c, 0x3f000000);
+	nv_wo32(chan, 0x2754, 0x40000000);
+	nv_wo32(chan, 0x2758, 0x3f800000);
+	nv_wo32(chan, 0x2760, 0xbf800000);
+	nv_wo32(chan, 0x2768, 0xbf800000);
+	nv_wo32(chan, 0x308c, 0x000fe000);
+	nv_wo32(chan, 0x3108, 0x000003f8);
+	nv_wo32(chan, 0x3468, 0x002fe000);
+	for (i = 0x3484; i <= 0x34a0; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv25_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv25_graph_cclass;
+	nv_engine(priv)->sclass = nv25_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv25_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 0000000..3e1f32e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv2a_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x033c, 0xffff0000);
+	nv_wo32(chan, 0x03a0, 0x0fff0000);
+	nv_wo32(chan, 0x03a4, 0x0fff0000);
+	nv_wo32(chan, 0x047c, 0x00000101);
+	nv_wo32(chan, 0x0490, 0x00000111);
+	nv_wo32(chan, 0x04a8, 0x44400000);
+	for (i = 0x04d4; i <= 0x04e0; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x04f4; i <= 0x0500; i += 4)
+		nv_wo32(chan, i, 0x00080000);
+	for (i = 0x050c; i <= 0x0518; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x051c; i <= 0x0528; i += 4)
+		nv_wo32(chan, i, 0x000105b8);
+	for (i = 0x052c; i <= 0x0538; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	for (i = 0x055c; i <= 0x0598; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x05a4, 0x4b7fffff);
+	nv_wo32(chan, 0x05fc, 0x00000001);
+	nv_wo32(chan, 0x0604, 0x00004000);
+	nv_wo32(chan, 0x0610, 0x00000001);
+	nv_wo32(chan, 0x0618, 0x00040000);
+	nv_wo32(chan, 0x061c, 0x00010000);
+	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+		nv_wo32(chan, (i + 0), 0x10700ff9);
+		nv_wo32(chan, (i + 4), 0x0436086c);
+		nv_wo32(chan, (i + 8), 0x000c001b);
+	}
+	nv_wo32(chan, 0x269c, 0x3f800000);
+	nv_wo32(chan, 0x26b0, 0x3f800000);
+	nv_wo32(chan, 0x26dc, 0x40000000);
+	nv_wo32(chan, 0x26e0, 0x3f800000);
+	nv_wo32(chan, 0x26e4, 0x3f000000);
+	nv_wo32(chan, 0x26ec, 0x40000000);
+	nv_wo32(chan, 0x26f0, 0x3f800000);
+	nv_wo32(chan, 0x26f8, 0xbf800000);
+	nv_wo32(chan, 0x2700, 0xbf800000);
+	nv_wo32(chan, 0x3024, 0x000fe000);
+	nv_wo32(chan, 0x30a0, 0x000003f8);
+	nv_wo32(chan, 0x33fc, 0x002fe000);
+	for (i = 0x341c; i <= 0x3438; i += 4)
+		nv_wo32(chan, i, 0x001c527c);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv2a_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x2a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv2a_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv2a_graph_cclass;
+	nv_engine(priv)->sclass = nv25_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv2a_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x2a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv2a_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv20_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 0000000..e451db3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv30_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv30_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x0410, 0x00000101);
+	nv_wo32(chan, 0x0424, 0x00000111);
+	nv_wo32(chan, 0x0428, 0x00000060);
+	nv_wo32(chan, 0x0444, 0x00000080);
+	nv_wo32(chan, 0x0448, 0xffff0000);
+	nv_wo32(chan, 0x044c, 0x00000001);
+	nv_wo32(chan, 0x0460, 0x44400000);
+	nv_wo32(chan, 0x048c, 0xffff0000);
+	for (i = 0x04e0; i < 0x04e8; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04ec, 0x00011100);
+	for (i = 0x0508; i < 0x0548; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x0550, 0x4b7fffff);
+	nv_wo32(chan, 0x058c, 0x00000080);
+	nv_wo32(chan, 0x0590, 0x30201000);
+	nv_wo32(chan, 0x0594, 0x70605040);
+	nv_wo32(chan, 0x0598, 0xb8a89888);
+	nv_wo32(chan, 0x059c, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05b0, 0xb0000000);
+	for (i = 0x0600; i < 0x0640; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0640; i < 0x0680; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06c0; i < 0x0700; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x0700; i < 0x0740; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0740; i < 0x0780; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x085c, 0x00040000);
+	nv_wo32(chan, 0x0860, 0x00010000);
+	for (i = 0x0864; i < 0x0874; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 1, 0x0436086c);
+		nv_wo32(chan, i + 2, 0x000c001b);
+	}
+	for (i = 0x30b8; i < 0x30c8; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x344c, 0x3f800000);
+	nv_wo32(chan, 0x3808, 0x3f800000);
+	nv_wo32(chan, 0x381c, 0x3f800000);
+	nv_wo32(chan, 0x3848, 0x40000000);
+	nv_wo32(chan, 0x384c, 0x3f800000);
+	nv_wo32(chan, 0x3850, 0x3f000000);
+	nv_wo32(chan, 0x3858, 0x40000000);
+	nv_wo32(chan, 0x385c, 0x3f800000);
+	nv_wo32(chan, 0x3864, 0xbf800000);
+	nv_wo32(chan, 0x386c, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv30_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv30_graph_cclass;
+	nv_engine(priv)->sclass = nv30_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+int
+nv30_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nv20_graph_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(priv, 0x400890, 0x01b463ff);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+	nv_wr32(priv, 0x400B80, 0x1003d888);
+	nv_wr32(priv, 0x400B84, 0x0c000000);
+	nv_wr32(priv, 0x400098, 0x00000000);
+	nv_wr32(priv, 0x40009C, 0x0005ad00);
+	nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+	nv_wr32(priv, 0x4000a0, 0x00000000);
+	nv_wr32(priv, 0x4000a4, 0x00000008);
+	nv_wr32(priv, 0x4008a8, 0xb784a400);
+	nv_wr32(priv, 0x400ba0, 0x002f8685);
+	nv_wr32(priv, 0x400ba4, 0x00231f3f);
+	nv_wr32(priv, 0x4008a4, 0x40000020);
+
+	if (nv_device(priv)->chipset == 0x34) {
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
+		nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+		nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+	}
+
+	nv_wr32(priv, 0x4000c0, 0x00000016);
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+	nv_wr32(priv, 0x0040075c             , 0x00000001);
+
+	/* begin RAM config */
+	/* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
+	nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+	nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+	if (nv_device(priv)->chipset != 0x34) {
+		nv_wr32(priv, 0x400750, 0x00EA0000);
+		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x400750, 0x00EA0004);
+		nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+	}
+	return 0;
+}
+
+struct nouveau_oclass
+nv30_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 0000000..9385ac7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv34_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv34_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x040c, 0x01000101);
+	nv_wo32(chan, 0x0420, 0x00000111);
+	nv_wo32(chan, 0x0424, 0x00000060);
+	nv_wo32(chan, 0x0440, 0x00000080);
+	nv_wo32(chan, 0x0444, 0xffff0000);
+	nv_wo32(chan, 0x0448, 0x00000001);
+	nv_wo32(chan, 0x045c, 0x44400000);
+	nv_wo32(chan, 0x0480, 0xffff0000);
+	for (i = 0x04d4; i < 0x04dc; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04e0, 0x00011100);
+	for (i = 0x04fc; i < 0x053c; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x0544, 0x4b7fffff);
+	nv_wo32(chan, 0x057c, 0x00000080);
+	nv_wo32(chan, 0x0580, 0x30201000);
+	nv_wo32(chan, 0x0584, 0x70605040);
+	nv_wo32(chan, 0x0588, 0xb8a89888);
+	nv_wo32(chan, 0x058c, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05a0, 0xb0000000);
+	for (i = 0x05f0; i < 0x0630; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0630; i < 0x0670; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06b0; i < 0x06f0; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x06f0; i < 0x0730; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0730; i < 0x0770; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x0850, 0x00040000);
+	nv_wo32(chan, 0x0854, 0x00010000);
+	for (i = 0x0858; i < 0x0868; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x15ac; i <= 0x271c ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 1, 0x0436086c);
+		nv_wo32(chan, i + 2, 0x000c001b);
+	}
+	for (i = 0x274c; i < 0x275c; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x2ae0, 0x3f800000);
+	nv_wo32(chan, 0x2e9c, 0x3f800000);
+	nv_wo32(chan, 0x2eb0, 0x3f800000);
+	nv_wo32(chan, 0x2edc, 0x40000000);
+	nv_wo32(chan, 0x2ee0, 0x3f800000);
+	nv_wo32(chan, 0x2ee4, 0x3f000000);
+	nv_wo32(chan, 0x2eec, 0x40000000);
+	nv_wo32(chan, 0x2ef0, 0x3f800000);
+	nv_wo32(chan, 0x2ef8, 0xbf800000);
+	nv_wo32(chan, 0x2f00, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv34_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x34),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv34_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv34_graph_cclass;
+	nv_engine(priv)->sclass = nv34_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv34_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x34),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv34_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 0000000..9ce84b7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv35_graph_sclass[] = {
+	{ 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+	{ 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+	{ 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+	{ 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+	{ 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv35_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv20_graph_chan *chan;
+	int ret, i;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
+					   16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->chid = nouveau_fifo_chan(parent)->chid;
+
+	nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+	nv_wo32(chan, 0x040c, 0x00000101);
+	nv_wo32(chan, 0x0420, 0x00000111);
+	nv_wo32(chan, 0x0424, 0x00000060);
+	nv_wo32(chan, 0x0440, 0x00000080);
+	nv_wo32(chan, 0x0444, 0xffff0000);
+	nv_wo32(chan, 0x0448, 0x00000001);
+	nv_wo32(chan, 0x045c, 0x44400000);
+	nv_wo32(chan, 0x0488, 0xffff0000);
+	for (i = 0x04dc; i < 0x04e4; i += 4)
+		nv_wo32(chan, i, 0x0fff0000);
+	nv_wo32(chan, 0x04e8, 0x00011100);
+	for (i = 0x0504; i < 0x0544; i += 4)
+		nv_wo32(chan, i, 0x07ff0000);
+	nv_wo32(chan, 0x054c, 0x4b7fffff);
+	nv_wo32(chan, 0x0588, 0x00000080);
+	nv_wo32(chan, 0x058c, 0x30201000);
+	nv_wo32(chan, 0x0590, 0x70605040);
+	nv_wo32(chan, 0x0594, 0xb8a89888);
+	nv_wo32(chan, 0x0598, 0xf8e8d8c8);
+	nv_wo32(chan, 0x05ac, 0xb0000000);
+	for (i = 0x0604; i < 0x0644; i += 4)
+		nv_wo32(chan, i, 0x00010588);
+	for (i = 0x0644; i < 0x0684; i += 4)
+		nv_wo32(chan, i, 0x00030303);
+	for (i = 0x06c4; i < 0x0704; i += 4)
+		nv_wo32(chan, i, 0x0008aae4);
+	for (i = 0x0704; i < 0x0744; i += 4)
+		nv_wo32(chan, i, 0x01012000);
+	for (i = 0x0744; i < 0x0784; i += 4)
+		nv_wo32(chan, i, 0x00080008);
+	nv_wo32(chan, 0x0860, 0x00040000);
+	nv_wo32(chan, 0x0864, 0x00010000);
+	for (i = 0x0868; i < 0x0878; i += 4)
+		nv_wo32(chan, i, 0x00040004);
+	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+		nv_wo32(chan, i + 0, 0x10700ff9);
+		nv_wo32(chan, i + 4, 0x0436086c);
+		nv_wo32(chan, i + 8, 0x000c001b);
+	}
+	for (i = 0x30bc; i < 0x30cc; i += 4)
+		nv_wo32(chan, i, 0x0000ffff);
+	nv_wo32(chan, 0x3450, 0x3f800000);
+	nv_wo32(chan, 0x380c, 0x3f800000);
+	nv_wo32(chan, 0x3820, 0x3f800000);
+	nv_wo32(chan, 0x384c, 0x40000000);
+	nv_wo32(chan, 0x3850, 0x3f800000);
+	nv_wo32(chan, 0x3854, 0x3f000000);
+	nv_wo32(chan, 0x385c, 0x40000000);
+	nv_wo32(chan, 0x3860, 0x3f800000);
+	nv_wo32(chan, 0x3868, 0xbf800000);
+	nv_wo32(chan, 0x3870, 0xbf800000);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv35_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = nv20_graph_context_init,
+		.fini = nv20_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv20_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv20_graph_intr;
+	nv_engine(priv)->cclass = &nv35_graph_cclass;
+	nv_engine(priv)->sclass = nv35_graph_sclass;
+	nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv35_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_graph_ctor,
+		.dtor = nv20_graph_dtor,
+		.init = nv30_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
new file mode 100644
index 0000000..193a5de
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+#include "nv40.h"
+#include "regs.h"
+
+struct nv40_graph_priv {
+	struct nouveau_graph base;
+	u32 size;
+};
+
+struct nv40_graph_chan {
+	struct nouveau_graph_chan base;
+};
+
+static u64
+nv40_graph_units(struct nouveau_graph *graph)
+{
+	struct nv40_graph_priv *priv = (void *)graph;
+
+	return nv_rd32(priv, 0x1540);
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static int
+nv40_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    20, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+#ifdef __BIG_ENDIAN
+	nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+#endif
+	nv_wo32(obj, 0x0c, 0x00000000);
+	nv_wo32(obj, 0x10, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_graph_ofuncs = {
+	.ctor = nv40_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv40_graph_sclass[] = {
+	{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+	{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+	{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+	{ 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
+	{},
+};
+
+static struct nouveau_oclass
+nv44_graph_sclass[] = {
+	{ 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+	{ 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+	{ 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+	{ 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+	{ 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+	{ 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+	{ 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+	{ 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+	{ 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+	{ 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+	{ 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+	{ 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+	{ 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+	{ 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+	{ 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+	{ 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
+	{},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv40_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv40_graph_priv *priv = (void *)engine;
+	struct nv40_graph_chan *chan;
+	int ret;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 16,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+	nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
+	return 0;
+}
+
+static int
+nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv40_graph_priv *priv = (void *)object->engine;
+	struct nv40_graph_chan *chan = (void *)object;
+	u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+	int ret = 0;
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+
+	if (nv_rd32(priv, 0x40032c) == inst) {
+		if (suspend) {
+			nv_wr32(priv, 0x400720, 0x00000000);
+			nv_wr32(priv, 0x400784, inst);
+			nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
+			nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
+			if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
+				u32 insn = nv_rd32(priv, 0x400308);
+				nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+				ret = -EBUSY;
+			}
+		}
+
+		nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+	}
+
+	if (nv_rd32(priv, 0x400330) == inst)
+		nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+
+	nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
+	return ret;
+}
+
+static struct nouveau_oclass
+nv40_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = nv40_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+	struct nv40_graph_priv *priv = (void *)engine;
+	unsigned long flags;
+
+	pfifo->pause(pfifo, &flags);
+	nv04_graph_idle(priv);
+
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+	case 0x41:
+	case 0x42:
+	case 0x43:
+	case 0x45:
+	case 0x4e:
+		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (nv_device(priv)->chipset) {
+		case 0x40:
+		case 0x45:
+			nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+			nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		case 0x41:
+		case 0x42:
+		case 0x43:
+			nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		default:
+			break;
+		}
+		break;
+	case 0x44:
+	case 0x4a:
+		nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x4c:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+	case 0x63:
+	case 0x67:
+	case 0x68:
+		nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+		switch (nv_device(priv)->chipset) {
+		case 0x47:
+		case 0x49:
+		case 0x4b:
+			nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+			nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	pfifo->start(pfifo, &flags);
+}
+
+static void
+nv40_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle = NULL;
+	struct nv40_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+	u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+	u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+	u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
+	u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+	u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & NV_PGRAPH_INTR_ERROR) {
+		if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+			handle = nouveau_handle_get_class(engctx, class);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~NV_PGRAPH_INTR_ERROR;
+			nouveau_handle_put(handle);
+		}
+
+		if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+			nv_mask(priv, 0x402000, 0, 0);
+		}
+	}
+
+	nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+	nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+	if (show) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv10_graph_intr_name, show);
+		pr_cont(" nsource:");
+		nouveau_bitfield_print(nv04_graph_nsource, nsource);
+		pr_cont(" nstatus:");
+		nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+		pr_cont("\n");
+		nv_error(priv,
+			 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, inst << 4, nouveau_client_name(engctx), subc,
+			 class, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00001000;
+	nv_subdev(priv)->intr = nv40_graph_intr;
+	nv_engine(priv)->cclass = &nv40_graph_cclass;
+	if (nv44_graph_class(priv))
+		nv_engine(priv)->sclass = nv44_graph_sclass;
+	else
+		nv_engine(priv)->sclass = nv40_graph_sclass;
+	nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+
+	priv->base.units = nv40_graph_units;
+	return 0;
+}
+
+static int
+nv40_graph_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object);
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	struct nv40_graph_priv *priv = (void *)engine;
+	int ret, i, j;
+	u32 vramsz;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* generate and upload context program */
+	ret = nv40_grctx_init(nv_device(priv), &priv->size);
+	if (ret)
+		return ret;
+
+	/* No context present currently */
+	nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+	nv_wr32(priv, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
+	nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+	nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+	nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+	nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+	nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+	nv_wr32(priv, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
+
+	j = nv_rd32(priv, 0x1540) & 0xff;
+	if (j) {
+		for (i = 0; !(j & 1); j >>= 1, i++)
+			;
+		nv_wr32(priv, 0x405000, i);
+	}
+
+	if (nv_device(priv)->chipset == 0x40) {
+		nv_wr32(priv, 0x4009b0, 0x83280fff);
+		nv_wr32(priv, 0x4009b4, 0x000000a0);
+	} else {
+		nv_wr32(priv, 0x400820, 0x83280eff);
+		nv_wr32(priv, 0x400824, 0x000000a0);
+	}
+
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+	case 0x45:
+		nv_wr32(priv, 0x4009b8, 0x0078e366);
+		nv_wr32(priv, 0x4009bc, 0x0000014c);
+		break;
+	case 0x41:
+	case 0x42: /* pciid also 0x00Cx */
+	/* case 0x0120: XXX (pciid) */
+		nv_wr32(priv, 0x400828, 0x007596ff);
+		nv_wr32(priv, 0x40082c, 0x00000108);
+		break;
+	case 0x43:
+		nv_wr32(priv, 0x400828, 0x0072cb77);
+		nv_wr32(priv, 0x40082c, 0x00000108);
+		break;
+	case 0x44:
+	case 0x46: /* G72 */
+	case 0x4a:
+	case 0x4c: /* G7x-based C51 */
+	case 0x4e:
+		nv_wr32(priv, 0x400860, 0);
+		nv_wr32(priv, 0x400864, 0);
+		break;
+	case 0x47: /* G70 */
+	case 0x49: /* G71 */
+	case 0x4b: /* G73 */
+		nv_wr32(priv, 0x400828, 0x07830610);
+		nv_wr32(priv, 0x40082c, 0x0000016A);
+		break;
+	default:
+		break;
+	}
+
+	nv_wr32(priv, 0x400b38, 0x2ffff800);
+	nv_wr32(priv, 0x400b3c, 0x00006000);
+
+	/* Tiling related stuff. */
+	switch (nv_device(priv)->chipset) {
+	case 0x44:
+	case 0x4a:
+		nv_wr32(priv, 0x400bc4, 0x1003d888);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b500);
+		break;
+	case 0x46:
+		nv_wr32(priv, 0x400bc4, 0x0000e024);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b520);
+		break;
+	case 0x4c:
+	case 0x4e:
+	case 0x67:
+		nv_wr32(priv, 0x400bc4, 0x1003d888);
+		nv_wr32(priv, 0x400bbc, 0xb7a7b540);
+		break;
+	default:
+		break;
+	}
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	/* begin RAM config */
+	vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+	switch (nv_device(priv)->chipset) {
+	case 0x40:
+		nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x400820, 0);
+		nv_wr32(priv, 0x400824, 0);
+		nv_wr32(priv, 0x400864, vramsz);
+		nv_wr32(priv, 0x400868, vramsz);
+		break;
+	default:
+		switch (nv_device(priv)->chipset) {
+		case 0x41:
+		case 0x42:
+		case 0x43:
+		case 0x45:
+		case 0x4e:
+		case 0x44:
+		case 0x4a:
+			nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
+			nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
+			break;
+		default:
+			nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
+			nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
+			break;
+		}
+		nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
+		nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
+		nv_wr32(priv, 0x400840, 0);
+		nv_wr32(priv, 0x400844, 0);
+		nv_wr32(priv, 0x4008A0, vramsz);
+		nv_wr32(priv, 0x4008A4, vramsz);
+		break;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv40_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 0000000..7da35a4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
+#ifndef __NV40_GRAPH_H__
+#define __NV40_GRAPH_H__
+
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(void *priv)
+{
+	struct nouveau_device *device = nv_device(priv);
+
+	if ((device->chipset & 0xf0) == 0x60)
+		return 1;
+
+	return !(0x0baf & (1 << (device->chipset & 0x0f)));
+}
+
+int  nv40_grctx_init(struct nouveau_device *, u32 *size);
+void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
new file mode 100644
index 0000000..1ac3611
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "nv50.h"
+
+struct nv50_graph_priv {
+	struct nouveau_graph base;
+	spinlock_t lock;
+	u32 size;
+};
+
+struct nv50_graph_chan {
+	struct nouveau_graph_chan base;
+};
+
+static u64
+nv50_graph_units(struct nouveau_graph *graph)
+{
+	struct nv50_graph_priv *priv = (void *)graph;
+
+	return nv_rd32(priv, 0x1540);
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static int
+nv50_graph_object_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv50_graph_ofuncs = {
+	.ctor = nv50_graph_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv50_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x5097, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nv84_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8297, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva0_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8397, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nva3_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x8597, &nv50_graph_ofuncs },
+	{ 0x85c0, &nv50_graph_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvaf_graph_sclass[] = {
+	{ 0x0030, &nv50_graph_ofuncs },
+	{ 0x502d, &nv50_graph_ofuncs },
+	{ 0x5039, &nv50_graph_ofuncs },
+	{ 0x50c0, &nv50_graph_ofuncs },
+	{ 0x85c0, &nv50_graph_ofuncs },
+	{ 0x8697, &nv50_graph_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv50_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *data, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nv50_graph_priv *priv = (void *)engine;
+	struct nv50_graph_chan *chan;
+	int ret;
+
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 0,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_graph_context_ctor,
+		.dtor = _nouveau_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_graph_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x00);
+	return 0;
+}
+
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+	{ 0x00000001, "BUSY" }, /* set when any bit is set */
+	{ 0x00000002, "DISPATCH" },
+	{ 0x00000004, "UNK2" },
+	{ 0x00000008, "UNK3" },
+	{ 0x00000010, "UNK4" },
+	{ 0x00000020, "UNK5" },
+	{ 0x00000040, "M2MF" },
+	{ 0x00000080, "UNK7" },
+	{ 0x00000100, "CTXPROG" },
+	{ 0x00000200, "VFETCH" },
+	{ 0x00000400, "CCACHE_UNK4" },
+	{ 0x00000800, "STRMOUT_GSCHED_UNK5" },
+	{ 0x00001000, "UNK14XX" },
+	{ 0x00002000, "UNK24XX_CSCHED" },
+	{ 0x00004000, "UNK1CXX" },
+	{ 0x00008000, "CLIPID" },
+	{ 0x00010000, "ZCULL" },
+	{ 0x00020000, "ENG2D" },
+	{ 0x00040000, "UNK34XX" },
+	{ 0x00080000, "TPRAST" },
+	{ 0x00100000, "TPROP" },
+	{ 0x00200000, "TEX" },
+	{ 0x00400000, "TPVP" },
+	{ 0x00800000, "MP" },
+	{ 0x01000000, "ROP" },
+	{}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+	"VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+	"TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+	"UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+	"ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+		const char *const units[], u32 status)
+{
+	int i;
+
+	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+	for (i = 0; units[i] && status; i++) {
+		if ((status & 7) == 1)
+			pr_cont(" %s", units[i]);
+		status >>= 3;
+	}
+	if (status)
+		pr_cont(" (invalid: 0x%x)", status);
+	pr_cont("\n");
+}
+
+static int
+nv84_graph_tlb_flush(struct nouveau_engine *engine)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(engine);
+	struct nv50_graph_priv *priv = (void *)engine;
+	bool idle, timeout = false;
+	unsigned long flags;
+	u64 start;
+	u32 tmp;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
+
+	start = ptimer->read(ptimer);
+	do {
+		idle = true;
+
+		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+
+		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
+			if ((tmp & 7) == 1)
+				idle = false;
+		}
+	} while (!idle &&
+		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
+
+	if (timeout) {
+		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+		tmp = nv_rd32(priv, 0x400700);
+		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
+		nouveau_bitfield_print(nv50_pgraph_status, tmp);
+		pr_cont("\n");
+
+		nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+				nv_rd32(priv, 0x400380));
+		nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+				nv_rd32(priv, 0x400384));
+		nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+				nv_rd32(priv, 0x400388));
+	}
+
+	nv50_vm_flush_engine(&engine->base, 0x00);
+
+	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return timeout ? -EBUSY : 0;
+}
+
+static const struct nouveau_enum nv50_mp_exec_error_names[] = {
+	{ 3, "STACK_UNDERFLOW", NULL },
+	{ 4, "QUADON_ACTIVE", NULL },
+	{ 8, "TIMEOUT", NULL },
+	{ 0x10, "INVALID_OPCODE", NULL },
+	{ 0x40, "BREAKPOINT", NULL },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "IN" },
+	{ 0x00000004, "OUT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+const struct nouveau_enum nv50_data_error_names[] = {
+	{ 0x00000003, "INVALID_OPERATION", NULL },
+	{ 0x00000004, "INVALID_VALUE", NULL },
+	{ 0x00000005, "INVALID_ENUM", NULL },
+	{ 0x00000008, "INVALID_OBJECT", NULL },
+	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
+	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+	{ 0x0000000c, "INVALID_BITFIELD", NULL },
+	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
+	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
+	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
+	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
+	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
+	{}
+};
+
+static const struct nouveau_bitfield nv50_graph_intr_name[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "COMPUTE_QUERY" },
+	{ 0x00000010, "ILLEGAL_MTHD" },
+	{ 0x00000020, "ILLEGAL_CLASS" },
+	{ 0x00000040, "DOUBLE_NOTIFY" },
+	{ 0x00001000, "CONTEXT_SWITCH" },
+	{ 0x00010000, "BUFFER_NOTIFY" },
+	{ 0x00100000, "DATA_ERROR" },
+	{ 0x00200000, "TRAP" },
+	{ 0x01000000, "SINGLE_STEP" },
+	{}
+};
+
+static void
+nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
+{
+	u32 units = nv_rd32(priv, 0x1540);
+	u32 addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (nv_device(priv)->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(priv, addr + 0x10);
+		status = nv_rd32(priv, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(priv, addr + 0x20);
+			pc = nv_rd32(priv, addr + 0x24);
+			oplow = nv_rd32(priv, addr + 0x70);
+			ophigh = nv_rd32(priv, addr + 0x74);
+			nv_error(priv, "TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_enum_print(nv50_mp_exec_error_names, status);
+			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(priv, addr + 0x10, mp10);
+		nv_wr32(priv, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
+		u32 ustatus_new, int display, const char *name)
+{
+	int tps = 0;
+	u32 units = nv_rd32(priv, 0x1540);
+	int i, r;
+	u32 ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (nv_device(priv)->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			if (display) {
+				nv_error(priv, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(priv, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x04030000) {
+				nv50_priv_mp_trap(priv, i, display);
+				ustatus &= ~0x04030000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			u32 e0c = nv_rd32(priv, ustatus_addr + 4);
+			u32 e10 = nv_rd32(priv, ustatus_addr + 8);
+			u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
+			u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+			u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+			u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+			u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(priv, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
+			int chid, u64 inst, struct nouveau_object *engctx)
+{
+	u32 status = nv_rd32(priv, 0x400108);
+	u32 ustatus;
+
+	if (!status && display) {
+		nv_error(priv, "TRAP: no units reporting traps?\n");
+		return 1;
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		nv_wr32(priv, 0x400500, 0x00000000);
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			u32 addr = nv_rd32(priv, 0x400808);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 datal = nv_rd32(priv, 0x40080c);
+			u32 datah = nv_rd32(priv, 0x400810);
+			u32 class = nv_rd32(priv, 0x400814);
+			u32 r848 = nv_rd32(priv, 0x400848);
+
+			nv_error(priv, "TRAP DISPATCH_FAULT\n");
+			if (display && (addr & 0x80000000)) {
+				nv_error(priv,
+					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
+					 chid, inst,
+					 nouveau_client_name(engctx), subc,
+					 class, mthd, datah, datal, addr, r848);
+			} else
+			if (display) {
+				nv_error(priv, "no stuck command?\n");
+			}
+
+			nv_wr32(priv, 0x400808, 0);
+			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
+			nv_wr32(priv, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+
+		if (ustatus & 0x00000002) {
+			u32 addr = nv_rd32(priv, 0x40084c);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 data = nv_rd32(priv, 0x40085c);
+			u32 class = nv_rd32(priv, 0x400814);
+
+			nv_error(priv, "TRAP DISPATCH_QUERY\n");
+			if (display && (addr & 0x80000000)) {
+				nv_error(priv,
+					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
+					 chid, inst,
+					 nouveau_client_name(engctx), subc,
+					 class, mthd, data, addr);
+			} else
+			if (display) {
+				nv_error(priv, "no stuck command?\n");
+			}
+
+			nv_wr32(priv, 0x40084c, 0);
+			ustatus &= ~0x00000002;
+		}
+
+		if (ustatus && display) {
+			nv_error(priv, "TRAP_DISPATCH (unknown "
+				      "0x%08x)\n", ustatus);
+		}
+
+		nv_wr32(priv, 0x400804, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x001);
+		status &= ~0x001;
+		if (!status)
+			return 0;
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_M2MF");
+			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+			pr_cont("\n");
+			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
+				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(priv, 0x400040, 2);
+		nv_wr32(priv, 0x400040, 0);
+		nv_wr32(priv, 0x406800, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_VFETCH");
+			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+			pr_cont("\n");
+			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
+				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
+		}
+
+		nv_wr32(priv, 0x400c04, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_STRMOUT");
+			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+			pr_cont("\n");
+			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
+				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(priv, 0x400040, 0x80);
+		nv_wr32(priv, 0x400040, 0);
+		nv_wr32(priv, 0x401800, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
+		if (display) {
+			nv_error(priv, "TRAP_CCACHE");
+			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+			pr_cont("\n");
+			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
+				     " %08x %08x %08x\n",
+				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
+				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
+				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
+				nv_rd32(priv, 0x40501c));
+
+		}
+
+		nv_wr32(priv, 0x405018, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
+		if (display)
+			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
+		nv_wr32(priv, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+				    "TRAP_TEXTURE");
+		nv_wr32(priv, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+				    "TRAP_MP");
+		nv_wr32(priv, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+				    "TRAP_TPDMA");
+		nv_wr32(priv, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
+		nv_wr32(priv, 0x400108, status);
+	}
+
+	return 1;
+}
+
+static void
+nv50_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle = NULL;
+	struct nv50_graph_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 mthd = (addr & 0x00001ffc);
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 class = nv_rd32(priv, 0x400814);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (handle && !nv_call(handle->object, mthd, data))
+			show &= ~0x00000010;
+		nouveau_handle_put(handle);
+	}
+
+	if (show & 0x00100000) {
+		u32 ecode = nv_rd32(priv, 0x400110);
+		nv_error(priv, "DATA_ERROR ");
+		nouveau_enum_print(nv50_data_error_names, ecode);
+		pr_cont("\n");
+	}
+
+	if (stat & 0x00200000) {
+		if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
+				engctx))
+			show &= ~0x00200000;
+	}
+
+	nv_wr32(priv, 0x400100, stat);
+	nv_wr32(priv, 0x400500, 0x00010001);
+
+	if (show) {
+		nv_error(priv, "%s", "");
+		nouveau_bitfield_print(nv50_graph_intr_name, show);
+		pr_cont("\n");
+		nv_error(priv,
+			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, (u64)inst << 12, nouveau_client_name(engctx),
+			 subc, class, mthd, data);
+	}
+
+	if (nv_rd32(priv, 0x400824) & (1 << 31))
+		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_graph_priv *priv;
+	int ret;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00201000;
+	nv_subdev(priv)->intr = nv50_graph_intr;
+	nv_engine(priv)->cclass = &nv50_graph_cclass;
+
+	priv->base.units = nv50_graph_units;
+
+	switch (nv_device(priv)->chipset) {
+	case 0x50:
+		nv_engine(priv)->sclass = nv50_graph_sclass;
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x92:
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		nv_engine(priv)->sclass = nv84_graph_sclass;
+		break;
+	case 0xa0:
+	case 0xaa:
+	case 0xac:
+		nv_engine(priv)->sclass = nva0_graph_sclass;
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+		nv_engine(priv)->sclass = nva3_graph_sclass;
+		break;
+	case 0xaf:
+		nv_engine(priv)->sclass = nvaf_graph_sclass;
+		break;
+
+	};
+
+	if (nv_device(priv)->chipset == 0x50 ||
+	    nv_device(priv)->chipset == 0xac)
+		nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
+	else
+		nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
+
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static int
+nv50_graph_init(struct nouveau_object *object)
+{
+	struct nv50_graph_priv *priv = (void *)object;
+	int ret, units, i;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
+	nv_wr32(priv, 0x40008c, 0x00000004);
+
+	/* reset/enable traps and interrupts */
+	nv_wr32(priv, 0x400804, 0xc0000000);
+	nv_wr32(priv, 0x406800, 0xc0000000);
+	nv_wr32(priv, 0x400c04, 0xc0000000);
+	nv_wr32(priv, 0x401800, 0xc0000000);
+	nv_wr32(priv, 0x405018, 0xc0000000);
+	nv_wr32(priv, 0x402000, 0xc0000000);
+
+	units = nv_rd32(priv, 0x001540);
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+
+		if (nv_device(priv)->chipset < 0xa0) {
+			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
+			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
+			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+		} else {
+			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
+			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
+			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+		}
+	}
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+	nv_wr32(priv, 0x400500, 0x00010001);
+
+	/* upload context program, initialise ctxctl defaults */
+	ret = nv50_grctx_init(nv_device(priv), &priv->size);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x400824, 0x00000000);
+	nv_wr32(priv, 0x400828, 0x00000000);
+	nv_wr32(priv, 0x40082c, 0x00000000);
+	nv_wr32(priv, 0x400830, 0x00000000);
+	nv_wr32(priv, 0x40032c, 0x00000000);
+	nv_wr32(priv, 0x400330, 0x00000000);
+
+	/* some unknown zcull magic */
+	switch (nv_device(priv)->chipset & 0xf0) {
+	case 0x50:
+	case 0x80:
+	case 0x90:
+		nv_wr32(priv, 0x402ca8, 0x00000800);
+		break;
+	case 0xa0:
+	default:
+		nv_wr32(priv, 0x402cc0, 0x00000000);
+		if (nv_device(priv)->chipset == 0xa0 ||
+		    nv_device(priv)->chipset == 0xaa ||
+		    nv_device(priv)->chipset == 0xac) {
+			nv_wr32(priv, 0x402ca8, 0x00000802);
+		} else {
+			nv_wr32(priv, 0x402cc0, 0x00000000);
+			nv_wr32(priv, 0x402ca8, 0x00000002);
+		}
+
+		break;
+	}
+
+	/* zero out zcull regions */
+	for (i = 0; i < 8; i++) {
+		nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
+		nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+	}
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_graph_ctor,
+		.dtor = _nouveau_graph_dtor,
+		.init = nv50_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 0000000..0505fb4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
+#ifndef __NV50_GRAPH_H__
+#define __NV50_GRAPH_H__
+
+int  nv50_grctx_init(struct nouveau_device *, u32 *size);
+void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
new file mode 100644
index 0000000..f9b9d82
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+#include "fuc/hubnvc0.fuc.h"
+#include "fuc/gpcnvc0.fuc.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvc1_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{ 0x9197, &nouveau_object_ofuncs },
+	{}
+};
+
+static struct nouveau_oclass
+nvc8_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0x9039, &nouveau_object_ofuncs },
+	{ 0x9097, &nouveau_object_ofuncs },
+	{ 0x90c0, &nouveau_object_ofuncs },
+	{ 0x9197, &nouveau_object_ofuncs },
+	{ 0x9297, &nouveau_object_ofuncs },
+	{}
+};
+
+u64
+nvc0_graph_units(struct nouveau_graph *graph)
+{
+	struct nvc0_graph_priv *priv = (void *)graph;
+	u64 cfg;
+
+	cfg  = (u32)priv->gpc_nr;
+	cfg |= (u32)priv->tpc_total << 8;
+	cfg |= (u64)priv->rop_nr << 32;
+
+	return cfg;
+}
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+int
+nvc0_graph_context_ctor(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass, void *args, u32 size,
+			struct nouveau_object **pobject)
+{
+	struct nouveau_vm *vm = nouveau_client(parent)->vm;
+	struct nvc0_graph_priv *priv = (void *)engine;
+	struct nvc0_graph_data *data = priv->mmio_data;
+	struct nvc0_graph_mmio *mmio = priv->mmio_list;
+	struct nvc0_graph_chan *chan;
+	int ret, i;
+
+	/* allocate memory for context, and fill with default values */
+	ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+					   priv->size, 0x100,
+					   NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	/* allocate memory for a "mmio list" buffer that's used by the HUB
+	 * fuc to modify some per-context register settings on first load
+	 * of the context.
+	 */
+	ret = nouveau_gpuobj_new(nv_object(chan), NULL, 0x1000, 0x100, 0,
+				&chan->mmio);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
+				    NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+				    &chan->mmio_vma);
+	if (ret)
+		return ret;
+
+	/* allocate buffers referenced by mmio list */
+	for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
+		ret = nouveau_gpuobj_new(nv_object(chan), NULL, data->size,
+					 data->align, 0, &chan->data[i].mem);
+		if (ret)
+			return ret;
+
+		ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
+					   &chan->data[i].vma);
+		if (ret)
+			return ret;
+
+		data++;
+	}
+
+	/* finally, fill in the mmio list and point the context at it */
+	for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
+		u32 addr = mmio->addr;
+		u32 data = mmio->data;
+
+		if (mmio->shift) {
+			u64 info = chan->data[mmio->buffer].vma.offset;
+			data |= info >> mmio->shift;
+		}
+
+		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+		nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
+		mmio++;
+	}
+
+	for (i = 0; i < priv->size; i += 4)
+		nv_wo32(chan, i, priv->data[i / 4]);
+
+	if (!priv->firmware) {
+		nv_wo32(chan, 0x00, chan->mmio_nr / 2);
+		nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
+	} else {
+		nv_wo32(chan, 0xf4, 0);
+		nv_wo32(chan, 0xf8, 0);
+		nv_wo32(chan, 0x10, chan->mmio_nr / 2);
+		nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
+		nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
+		nv_wo32(chan, 0x1c, 1);
+		nv_wo32(chan, 0x20, 0);
+		nv_wo32(chan, 0x28, 0);
+		nv_wo32(chan, 0x2c, 0);
+	}
+
+	return 0;
+}
+
+void
+nvc0_graph_context_dtor(struct nouveau_object *object)
+{
+	struct nvc0_graph_chan *chan = (void *)object;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+		nouveau_gpuobj_unmap(&chan->data[i].vma);
+		nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
+	}
+
+	nouveau_gpuobj_unmap(&chan->mmio_vma);
+	nouveau_gpuobj_ref(NULL, &chan->mmio);
+
+	nouveau_graph_context_destroy(&chan->base);
+}
+
+static struct nouveau_oclass
+nvc0_graph_cclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_context_ctor,
+		.dtor = nvc0_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
+{
+	nv_error(priv, "%06x - done 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x400));
+	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
+		 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
+	nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+		 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
+		 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+}
+
+void
+nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
+{
+	u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+	u32 gpc;
+
+	nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
+	for (gpc = 0; gpc < gpcnr; gpc++)
+		nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+}
+
+static void
+nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+{
+	u32 ustat = nv_rd32(priv, 0x409c18);
+
+	if (ustat & 0x00000001)
+		nv_error(priv, "CTXCTRL ucode error\n");
+	if (ustat & 0x00080000)
+		nv_error(priv, "CTXCTRL watchdog timeout\n");
+	if (ustat & ~0x00080001)
+		nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+	nvc0_graph_ctxctl_debug(priv);
+	nv_wr32(priv, 0x409c20, ustat);
+}
+
+static void
+nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
+{
+	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
+
+	if (stat & 0x00000001) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
+		nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000002) {
+		u32 trap0 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0644));
+		u32 trap1 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x064c));
+		nv_error(priv, "GPC%d/TPC%d/MP: 0x%08x 0x%08x\n",
+			       gpc, tpc, trap0, trap1);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0644), 0x001ffffe);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x064c), 0x0000000f);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000002);
+		stat &= ~0x00000002;
+	}
+
+	if (stat & 0x00000004) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
+		nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000004);
+		stat &= ~0x00000004;
+	}
+
+	if (stat & 0x00000008) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
+		nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000008);
+		stat &= ~0x00000008;
+	}
+
+	if (stat) {
+		nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
+		nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), stat);
+	}
+}
+
+static void
+nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
+{
+	u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+	int tpc;
+
+	if (stat & 0x00000001) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
+		nv_error(priv, "GPC%d/PROP: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat & 0x00000002) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
+		nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000002);
+		stat &= ~0x00000002;
+	}
+
+	if (stat & 0x00000004) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
+		nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000004);
+		stat &= ~0x00000004;
+	}
+
+	if (stat & 0x00000008) {
+		u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
+		nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000008);
+		stat &= ~0x00000009;
+	}
+
+	for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+		u32 mask = 0x00010000 << tpc;
+		if (stat & mask) {
+			nvc0_graph_trap_tpc(priv, gpc, tpc);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
+			stat &= ~mask;
+		}
+	}
+
+	if (stat) {
+		nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), stat);
+	}
+}
+
+static void
+nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
+{
+	u32 trap = nv_rd32(priv, 0x400108);
+	int rop, gpc;
+
+	if (trap & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x404000);
+		nv_error(priv, "DISPATCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x404000, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000001);
+		trap &= ~0x00000001;
+	}
+
+	if (trap & 0x00000002) {
+		u32 stat = nv_rd32(priv, 0x404600);
+		nv_error(priv, "M2MF 0x%08x\n", stat);
+		nv_wr32(priv, 0x404600, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000002);
+		trap &= ~0x00000002;
+	}
+
+	if (trap & 0x00000008) {
+		u32 stat = nv_rd32(priv, 0x408030);
+		nv_error(priv, "CCACHE 0x%08x\n", stat);
+		nv_wr32(priv, 0x408030, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000008);
+		trap &= ~0x00000008;
+	}
+
+	if (trap & 0x00000010) {
+		u32 stat = nv_rd32(priv, 0x405840);
+		nv_error(priv, "SHADER 0x%08x\n", stat);
+		nv_wr32(priv, 0x405840, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000010);
+		trap &= ~0x00000010;
+	}
+
+	if (trap & 0x00000040) {
+		u32 stat = nv_rd32(priv, 0x40601c);
+		nv_error(priv, "UNK6 0x%08x\n", stat);
+		nv_wr32(priv, 0x40601c, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000040);
+		trap &= ~0x00000040;
+	}
+
+	if (trap & 0x00000080) {
+		u32 stat = nv_rd32(priv, 0x404490);
+		nv_error(priv, "MACRO 0x%08x\n", stat);
+		nv_wr32(priv, 0x404490, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000080);
+		trap &= ~0x00000080;
+	}
+
+	if (trap & 0x01000000) {
+		u32 stat = nv_rd32(priv, 0x400118);
+		for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
+			u32 mask = 0x00000001 << gpc;
+			if (stat & mask) {
+				nvc0_graph_trap_gpc(priv, gpc);
+				nv_wr32(priv, 0x400118, mask);
+				stat &= ~mask;
+			}
+		}
+		nv_wr32(priv, 0x400108, 0x01000000);
+		trap &= ~0x01000000;
+	}
+
+	if (trap & 0x02000000) {
+		for (rop = 0; rop < priv->rop_nr; rop++) {
+			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
+			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
+			nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
+				 rop, statz, statc);
+			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		}
+		nv_wr32(priv, 0x400108, 0x02000000);
+		trap &= ~0x02000000;
+	}
+
+	if (trap) {
+		nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
+		nv_wr32(priv, 0x400108, trap);
+	}
+}
+
+static void
+nvc0_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nvc0_graph_priv *priv = (void *)subdev;
+	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 code = nv_rd32(priv, 0x400110);
+	u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (!handle || nv_call(handle->object, mthd, data)) {
+			nv_error(priv,
+				 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+				 chid, inst << 12, nouveau_client_name(engctx),
+				 subc, class, mthd, data);
+		}
+		nouveau_handle_put(handle);
+		nv_wr32(priv, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		nv_error(priv,
+			 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, inst << 12, nouveau_client_name(engctx), subc,
+			 class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		nv_error(priv, "DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst << 12, nouveau_client_name(engctx), subc,
+			class, mthd, data);
+		nv_wr32(priv, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
+			 nouveau_client_name(engctx));
+		nvc0_graph_trap_intr(priv);
+		nv_wr32(priv, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		nvc0_graph_ctxctl_isr(priv);
+		nv_wr32(priv, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, 0x400100, stat);
+	}
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nouveau_engctx_put(engctx);
+}
+
+int
+nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
+		   struct nvc0_graph_fuc *fuc)
+{
+	struct nouveau_device *device = nv_device(priv);
+	const struct firmware *fw;
+	char f[32];
+	int ret;
+
+	snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+	ret = request_firmware(&fw, f, &device->pdev->dev);
+	if (ret) {
+		snprintf(f, sizeof(f), "nouveau/%s", fwname);
+		ret = request_firmware(&fw, f, &device->pdev->dev);
+		if (ret) {
+			nv_error(priv, "failed to load %s\n", fwname);
+			return ret;
+		}
+	}
+
+	fuc->size = fw->size;
+	fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+	release_firmware(fw);
+	return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static int
+nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_graph_priv *priv;
+	bool enable = device->chipset != 0xd7;
+	int ret, i;
+
+	ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->intr = nvc0_graph_intr;
+	nv_engine(priv)->cclass = &nvc0_graph_cclass;
+
+	priv->base.units = nvc0_graph_units;
+
+	if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+		nv_info(priv, "using external firmware\n");
+		if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+		    nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+			return -EINVAL;
+		priv->firmware = true;
+	}
+
+	switch (nvc0_graph_class(priv)) {
+	case 0x9097:
+		nv_engine(priv)->sclass = nvc0_graph_sclass;
+		break;
+	case 0x9197:
+		nv_engine(priv)->sclass = nvc1_graph_sclass;
+		break;
+	case 0x9297:
+		nv_engine(priv)->sclass = nvc8_graph_sclass;
+		break;
+	}
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+				&priv->unk4188b4);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+				&priv->unk4188b8);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
+	for (i = 0; i < priv->gpc_nr; i++) {
+		priv->tpc_nr[i]  = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+		priv->tpc_total += priv->tpc_nr[i];
+	}
+
+	/*XXX: these need figuring out... though it might not even matter */
+	switch (nv_device(priv)->chipset) {
+	case 0xc0:
+		if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+			priv->magic_not_rop_nr = 0x07;
+		} else
+		if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+			priv->magic_not_rop_nr = 0x05;
+		} else
+		if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+			priv->magic_not_rop_nr = 0x06;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc1: /* 2/0/0/0, 1 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	case 0xc8: /* 4/4/3/4, 5 */
+		priv->magic_not_rop_nr = 0x06;
+		break;
+	case 0xce: /* 4/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xcf: /* 4/0/0/0, 3 */
+		priv->magic_not_rop_nr = 0x03;
+		break;
+	case 0xd9: /* 1/0/0/0, 1 */
+		priv->magic_not_rop_nr = 0x01;
+		break;
+	}
+
+	return 0;
+}
+
+static void
+nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
+{
+	kfree(fuc->data);
+	fuc->data = NULL;
+}
+
+void
+nvc0_graph_dtor(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+
+	kfree(priv->data);
+
+	nvc0_graph_dtor_fw(&priv->fuc409c);
+	nvc0_graph_dtor_fw(&priv->fuc409d);
+	nvc0_graph_dtor_fw(&priv->fuc41ac);
+	nvc0_graph_dtor_fw(&priv->fuc41ad);
+
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+	nouveau_graph_destroy(&priv->base);
+}
+
+static void
+nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400080, 0x003083c2);
+	nv_wr32(priv, 0x400088, 0x00006fe7);
+	nv_wr32(priv, 0x40008c, 0x00000000);
+	nv_wr32(priv, 0x400090, 0x00000030);
+	nv_wr32(priv, 0x40013c, 0x013901f7);
+	nv_wr32(priv, 0x400140, 0x00000100);
+	nv_wr32(priv, 0x400144, 0x00000000);
+	nv_wr32(priv, 0x400148, 0x00000110);
+	nv_wr32(priv, 0x400138, 0x00000000);
+	nv_wr32(priv, 0x400130, 0x00000000);
+	nv_wr32(priv, 0x400134, 0x00000000);
+	nv_wr32(priv, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
+{
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	u32 data[TPC_MAX / 8];
+	u8  tpcnr[GPC_MAX];
+	int i, gpc, tpc;
+
+	nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
+	/*
+	 *      TP      ROP UNKVAL(magic_not_rop_nr)
+	 * 450: 4/0/0/0 2        3
+	 * 460: 3/4/0/0 4        1
+	 * 465: 3/4/4/0 4        7
+	 * 470: 3/3/4/4 5        5
+	 * 480: 3/4/4/4 6        6
+	 */
+
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tpc_nr[gpc]);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+}
+
+static void
+nvc0_graph_init_units(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x409c24, 0x000f0000);
+	nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
+	nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
+	nv_wr32(priv, 0x408030, 0xc0000000);
+	nv_wr32(priv, 0x40601c, 0xc0000000);
+	nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
+	nv_wr32(priv, 0x406018, 0xc0000000);
+	nv_wr32(priv, 0x405840, 0xc0000000);
+	nv_wr32(priv, 0x405844, 0x00ffffff);
+	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
+{
+	int gpc, tpc;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+		}
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
+{
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+void
+nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
+		   struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
+{
+	int i;
+
+	nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
+	for (i = 0; i < data->size / 4; i++)
+		nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
+
+	nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
+	for (i = 0; i < code->size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, fuc_base + 0x0188, i >> 6);
+		nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
+	}
+}
+
+static int
+nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
+{
+	u32 r000260;
+	int i;
+
+	if (priv->firmware) {
+		/* load fuc microcode */
+		r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+		nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
+						   &priv->fuc409d);
+		nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
+						   &priv->fuc41ad);
+		nv_wr32(priv, 0x000260, r000260);
+
+		/* start both of them running */
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x41a10c, 0x00000000);
+		nv_wr32(priv, 0x40910c, 0x00000000);
+		nv_wr32(priv, 0x41a100, 0x00000002);
+		nv_wr32(priv, 0x409100, 0x00000002);
+		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+			nv_warn(priv, "0x409800 wait failed\n");
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x7fffffff);
+		nv_wr32(priv, 0x409504, 0x00000021);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000010);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x10 timeout\n");
+			return -EBUSY;
+		}
+		priv->size = nv_rd32(priv, 0x409800);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000016);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x16 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000025);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x25 timeout\n");
+			return -EBUSY;
+		}
+
+		if (priv->data == NULL) {
+			int ret = nvc0_grctx_generate(priv);
+			if (ret) {
+				nv_error(priv, "failed to construct context\n");
+				return ret;
+			}
+		}
+
+		return 0;
+	}
+
+	/* load HUB microcode */
+	r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x4091c0, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
+		nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
+
+	nv_wr32(priv, 0x409180, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x409188, i >> 6);
+		nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
+	}
+
+	/* load GPC microcode */
+	nv_wr32(priv, 0x41a1c0, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
+		nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
+
+	nv_wr32(priv, 0x41a180, 0x01000000);
+	for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x41a188, i >> 6);
+		nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
+	}
+	nv_wr32(priv, 0x000260, r000260);
+
+	/* start HUB ucode running, it'll init the GPCs */
+	nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
+	nv_wr32(priv, 0x40910c, 0x00000000);
+	nv_wr32(priv, 0x409100, 0x00000002);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_INIT timed out\n");
+		nvc0_graph_ctxctl_debug(priv);
+		return -EBUSY;
+	}
+
+	priv->size = nv_rd32(priv, 0x409804);
+	if (priv->data == NULL) {
+		int ret = nvc0_grctx_generate(priv);
+		if (ret) {
+			nv_error(priv, "failed to construct context\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nvc0_graph_init(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nvc0_graph_init_obj418880(priv);
+	nvc0_graph_init_regs(priv);
+	/*nvc0_graph_init_unitplemented_magics(priv);*/
+	nvc0_graph_init_gpc_0(priv);
+	/*nvc0_graph_init_unitplemented_c242(priv);*/
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+
+	nvc0_graph_init_units(priv);
+	nvc0_graph_init_gpc_1(priv);
+	nvc0_graph_init_rop(priv);
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400118, 0xffffffff);
+	nv_wr32(priv, 0x400130, 0xffffffff);
+	nv_wr32(priv, 0x40011c, 0xffffffff);
+	nv_wr32(priv, 0x400134, 0xffffffff);
+	nv_wr32(priv, 0x400054, 0x34ce3464);
+
+	ret = nvc0_graph_init_ctxctl(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_ctor,
+		.dtor = nvc0_graph_dtor,
+		.init = nvc0_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
new file mode 100644
index 0000000..c870dad
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r)      (0x408800 + (r))
+#define ROP_UNIT(u, r)    (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r)      (0x418000 + (r))
+#define GPC_UNIT(t, r)    (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_data {
+	u32 size;
+	u32 align;
+	u32 access;
+};
+
+struct nvc0_graph_mmio {
+	u32 addr;
+	u32 data;
+	u32 shift;
+	u32 buffer;
+};
+
+struct nvc0_graph_fuc {
+	u32 *data;
+	u32  size;
+};
+
+struct nvc0_graph_priv {
+	struct nouveau_graph base;
+
+	struct nvc0_graph_fuc fuc409c;
+	struct nvc0_graph_fuc fuc409d;
+	struct nvc0_graph_fuc fuc41ac;
+	struct nvc0_graph_fuc fuc41ad;
+	bool firmware;
+
+	u8 rop_nr;
+	u8 gpc_nr;
+	u8 tpc_nr[GPC_MAX];
+	u8 tpc_total;
+
+	struct nouveau_gpuobj *unk4188b4;
+	struct nouveau_gpuobj *unk4188b8;
+
+	struct nvc0_graph_data mmio_data[4];
+	struct nvc0_graph_mmio mmio_list[4096/8];
+	u32  size;
+	u32 *data;
+
+	u8 magic_not_rop_nr;
+};
+
+struct nvc0_graph_chan {
+	struct nouveau_graph_chan base;
+
+	struct nouveau_gpuobj *mmio;
+	struct nouveau_vma mmio_vma;
+	int mmio_nr;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_vma vma;
+	} data[4];
+};
+
+static inline u32
+nvc0_graph_class(void *obj)
+{
+	struct nouveau_device *device = nv_device(obj);
+
+	switch (device->chipset) {
+	case 0xc0:
+	case 0xc3:
+	case 0xc4:
+	case 0xce: /* guess, mmio trace shows only 0x9097 state */
+	case 0xcf: /* guess, mmio trace shows only 0x9097 state */
+		return 0x9097;
+	case 0xc1:
+		return 0x9197;
+	case 0xc8:
+	case 0xd9:
+	case 0xd7:
+		return 0x9297;
+	case 0xe4:
+	case 0xe7:
+	case 0xe6:
+		return 0xa097;
+	default:
+		return 0;
+	}
+}
+
+void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
+
+static inline void
+nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
+{
+	nv_wr32(priv, 0x40448c, data);
+	nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+struct nvc0_grctx {
+	struct nvc0_graph_priv *priv;
+	struct nvc0_graph_data *data;
+	struct nvc0_graph_mmio *mmio;
+	struct nouveau_gpuobj *chan;
+	int buffer_nr;
+	u64 buffer[4];
+	u64 addr;
+};
+
+int  nvc0_grctx_generate(struct nvc0_graph_priv *);
+int  nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
+void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
+int  nvc0_grctx_fini(struct nvc0_grctx *);
+
+int  nve0_grctx_generate(struct nvc0_graph_priv *);
+
+#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
+#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
+
+void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
+int  nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
+			struct nvc0_graph_fuc *);
+void nvc0_graph_dtor(struct nouveau_object *);
+void nvc0_graph_init_fw(struct nvc0_graph_priv *, u32 base,
+			struct nvc0_graph_fuc *, struct nvc0_graph_fuc *);
+int  nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, void *, u32,
+			     struct nouveau_object **);
+void nvc0_graph_context_dtor(struct nouveau_object *);
+
+u64 nvc0_graph_units(struct nouveau_graph *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
new file mode 100644
index 0000000..678c16f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -0,0 +1,807 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nvc0.h"
+#include "fuc/hubnve0.fuc.h"
+#include "fuc/gpcnve0.fuc.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_sclass[] = {
+	{ 0x902d, &nouveau_object_ofuncs },
+	{ 0xa040, &nouveau_object_ofuncs },
+	{ 0xa097, &nouveau_object_ofuncs },
+	{ 0xa0c0, &nouveau_object_ofuncs },
+	{ 0xa0b5, &nouveau_object_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_cclass = {
+	.handle = NV_ENGCTX(GR, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_graph_context_ctor,
+		.dtor = nvc0_graph_context_dtor,
+		.init = _nouveau_graph_context_init,
+		.fini = _nouveau_graph_context_fini,
+		.rd32 = _nouveau_graph_context_rd32,
+		.wr32 = _nouveau_graph_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+{
+	u32 ustat = nv_rd32(priv, 0x409c18);
+
+	if (ustat & 0x00000001)
+		nv_error(priv, "CTXCTRL ucode error\n");
+	if (ustat & 0x00080000)
+		nv_error(priv, "CTXCTRL watchdog timeout\n");
+	if (ustat & ~0x00080001)
+		nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+	nvc0_graph_ctxctl_debug(priv);
+	nv_wr32(priv, 0x409c20, ustat);
+}
+
+static const struct nouveau_enum nve0_mp_warp_error[] = {
+	{ 0x00, "NO_ERROR" },
+	{ 0x01, "STACK_MISMATCH" },
+	{ 0x05, "MISALIGNED_PC" },
+	{ 0x08, "MISALIGNED_GPR" },
+	{ 0x09, "INVALID_OPCODE" },
+	{ 0x0d, "GPR_OUT_OF_BOUNDS" },
+	{ 0x0e, "MEM_OUT_OF_BOUNDS" },
+	{ 0x0f, "UNALIGNED_MEM_ACCESS" },
+	{ 0x11, "INVALID_PARAM" },
+	{}
+};
+
+static const struct nouveau_enum nve0_mp_global_error[] = {
+	{ 2, "MULTIPLE_WARP_ERRORS" },
+	{ 3, "OUT_OF_STACK_SPACE" },
+	{}
+};
+
+static const struct nouveau_enum nve0_gpc_rop_error[] = {
+	{ 1, "RT_PITCH_OVERRUN" },
+	{ 4, "RT_WIDTH_OVERRUN" },
+	{ 5, "RT_HEIGHT_OVERRUN" },
+	{ 7, "ZETA_STORAGE_TYPE_MISMATCH" },
+	{ 8, "RT_STORAGE_TYPE_MISMATCH" },
+	{ 10, "RT_LINEAR_MISMATCH" },
+	{}
+};
+
+static const struct nouveau_enum nve0_sked_error[] = {
+	{ 7, "CONSTANT_BUFFER_SIZE" },
+	{ 9, "LOCAL_MEMORY_SIZE_POS" },
+	{ 10, "LOCAL_MEMORY_SIZE_NEG" },
+	{ 11, "WARP_CSTACK_SIZE" },
+	{ 12, "TOTAL_TEMP_SIZE" },
+	{ 13, "REGISTER_COUNT" },
+	{ 18, "TOTAL_THREADS" },
+	{ 20, "PROGRAM_OFFSET" },
+	{ 21, "SHARED_MEMORY_SIZE" },
+	{ 25, "SHARED_CONFIG_TOO_SMALL" },
+	{ 26, "TOTAL_REGISTER_COUNT" },
+	{}
+};
+
+static void
+nve0_graph_mp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+	int i;
+	u32 werr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x648));
+	u32 gerr = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x650));
+
+	nv_error(priv, "GPC%i/TP%i/MP trap:", gpc, tp);
+
+	for (i = 0; i <= 31; ++i) {
+		if (!(gerr & (1 << i)))
+			continue;
+		pr_cont(" ");
+		nouveau_enum_print(nve0_mp_global_error, i);
+	}
+	if (werr) {
+		pr_cont(" ");
+		nouveau_enum_print(nve0_mp_warp_error, werr & 0xffff);
+	}
+	pr_cont("\n");
+
+	/* disable MP trap to avoid spam */
+	nv_mask(priv, TPC_UNIT(gpc, tp, 0x50c), 0x2, 0x0);
+
+	/* TODO: figure out how to resume after an MP trap */
+}
+
+static void
+nve0_graph_tp_trap(struct nvc0_graph_priv *priv, int gpc, int tp)
+{
+	u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x508));
+
+	if (stat & 0x1) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x224));
+		nv_error(priv, "GPC%i/TP%i/TEX trap: %08x\n",
+			 gpc, tp, trap);
+
+		nv_wr32(priv, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
+		stat &= ~0x1;
+	}
+
+	if (stat & 0x2) {
+		nve0_graph_mp_trap(priv, gpc, tp);
+		stat &= ~0x2;
+	}
+
+	if (stat & 0x4) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x084));
+		nv_error(priv, "GPC%i/TP%i/POLY trap: %08x\n",
+			 gpc, tp, trap);
+
+		nv_wr32(priv, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
+		stat &= ~0x4;
+	}
+
+	if (stat & 0x8) {
+		u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tp, 0x48c));
+		nv_error(priv, "GPC%i/TP%i/L1C trap: %08x\n",
+			 gpc, tp, trap);
+
+		nv_wr32(priv, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
+		stat &= ~0x8;
+	}
+
+	if (stat) {
+		nv_error(priv, "GPC%i/TP%i: unknown stat %08x\n",
+			 gpc, tp, stat);
+	}
+}
+
+static void
+nve0_graph_gpc_trap(struct nvc0_graph_priv *priv)
+{
+	const u32 mask = nv_rd32(priv, 0x400118);
+	int gpc;
+
+	for (gpc = 0; gpc < 4; ++gpc) {
+		u32 stat;
+		int tp;
+
+		if (!(mask & (1 << gpc)))
+			continue;
+		stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
+
+		if (stat & 0x0001) {
+			u32 trap[4];
+			int i;
+
+			trap[0] = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
+			trap[1] = nv_rd32(priv, GPC_UNIT(gpc, 0x0434));
+			trap[2] = nv_rd32(priv, GPC_UNIT(gpc, 0x0438));
+			trap[3] = nv_rd32(priv, GPC_UNIT(gpc, 0x043c));
+
+			nv_error(priv, "GPC%i/PROP trap:", gpc);
+			for (i = 0; i <= 29; ++i) {
+				if (!(trap[0] & (1 << i)))
+					continue;
+				pr_cont(" ");
+				nouveau_enum_print(nve0_gpc_rop_error, i);
+			}
+			pr_cont("\n");
+
+			nv_error(priv, "x = %u, y = %u, "
+				 "format = %x, storage type = %x\n",
+				 trap[1] & 0xffff,
+				 trap[1] >> 16,
+				 (trap[2] >> 8) & 0x3f,
+				 trap[3] & 0xff);
+
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+			stat &= ~0x0001;
+		}
+
+		if (stat & 0x0002) {
+			u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
+			nv_error(priv, "GPC%i/ZCULL trap: %08x\n", gpc,
+				 trap);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+			stat &= ~0x0002;
+		}
+
+		if (stat & 0x0004) {
+			u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
+			nv_error(priv, "GPC%i/CCACHE trap: %08x\n", gpc,
+				 trap);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+			stat &= ~0x0004;
+		}
+
+		if (stat & 0x0008) {
+			u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
+			nv_error(priv, "GPC%i/ESETUP trap %08x\n", gpc,
+				 trap);
+			nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+			stat &= ~0x0008;
+		}
+
+		for (tp = 0; tp < 8; ++tp) {
+			if (stat & (1 << (16 + tp)))
+				nve0_graph_tp_trap(priv, gpc, tp);
+		}
+		stat &= ~0xff0000;
+
+		if (stat) {
+			nv_error(priv, "GPC%i: unknown stat %08x\n",
+				 gpc, stat);
+		}
+	}
+}
+
+
+static void
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
+		struct nouveau_object *engctx)
+{
+	u32 trap = nv_rd32(priv, 0x400108);
+	int i;
+	int rop;
+
+	if (trap & 0x00000001) {
+		u32 stat = nv_rd32(priv, 0x404000);
+		nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n",
+			 chid, inst, nouveau_client_name(engctx), stat);
+		nv_wr32(priv, 0x404000, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000001);
+		trap &= ~0x00000001;
+	}
+
+	if (trap & 0x00000010) {
+		u32 stat = nv_rd32(priv, 0x405840);
+		nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n",
+			 chid, inst, nouveau_client_name(engctx), stat);
+		nv_wr32(priv, 0x405840, 0xc0000000);
+		nv_wr32(priv, 0x400108, 0x00000010);
+		trap &= ~0x00000010;
+	}
+
+	if (trap & 0x00000100) {
+		u32 stat = nv_rd32(priv, 0x407020);
+		nv_error(priv, "SKED ch %d [0x%010llx %s]:",
+			 chid, inst, nouveau_client_name(engctx));
+
+		for (i = 0; i <= 29; ++i) {
+			if (!(stat & (1 << i)))
+				continue;
+			pr_cont(" ");
+			nouveau_enum_print(nve0_sked_error, i);
+		}
+		pr_cont("\n");
+
+		if (stat & 0x3fffffff)
+			nv_wr32(priv, 0x407020, 0x40000000);
+		nv_wr32(priv, 0x400108, 0x00000100);
+		trap &= ~0x00000100;
+	}
+
+	if (trap & 0x01000000) {
+		nv_error(priv, "GPC ch %d [0x%010llx %s]:\n",
+			 chid, inst, nouveau_client_name(engctx));
+		nve0_graph_gpc_trap(priv);
+		trap &= ~0x01000000;
+	}
+
+	if (trap & 0x02000000) {
+		for (rop = 0; rop < priv->rop_nr; rop++) {
+			u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
+			u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
+			nv_error(priv,
+				 "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n",
+				 rop, chid, inst, nouveau_client_name(engctx),
+				 statz, statc);
+			nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+			nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		}
+		nv_wr32(priv, 0x400108, 0x02000000);
+		trap &= ~0x02000000;
+	}
+
+	if (trap) {
+		nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n",
+			 chid, inst, nouveau_client_name(engctx), trap);
+		nv_wr32(priv, 0x400108, trap);
+	}
+}
+
+static void
+nve0_graph_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nvc0_graph_priv *priv = (void *)subdev;
+	u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
+	u32 stat = nv_rd32(priv, 0x400100);
+	u32 addr = nv_rd32(priv, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(priv, 0x400708);
+	u32 code = nv_rd32(priv, 0x400110);
+	u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x00000010) {
+		handle = nouveau_handle_get_class(engctx, class);
+		if (!handle || nv_call(handle->object, mthd, data)) {
+			nv_error(priv,
+				 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+				 chid, inst, nouveau_client_name(engctx), subc,
+				 class, mthd, data);
+		}
+		nouveau_handle_put(handle);
+		nv_wr32(priv, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		nv_error(priv,
+			 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			 chid, inst, nouveau_client_name(engctx), subc, class,
+			 mthd, data);
+		nv_wr32(priv, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		nv_error(priv, "DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst, nouveau_client_name(engctx), subc, class,
+			mthd, data);
+		nv_wr32(priv, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		nve0_graph_trap_isr(priv, chid, inst, engctx);
+		nv_wr32(priv, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		nve0_graph_ctxctl_isr(priv);
+		nv_wr32(priv, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, 0x400100, stat);
+	}
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_graph_priv *priv;
+	int ret, i;
+
+	ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x18001000;
+	nv_subdev(priv)->intr = nve0_graph_intr;
+	nv_engine(priv)->cclass = &nve0_graph_cclass;
+	nv_engine(priv)->sclass = nve0_graph_sclass;
+
+	priv->base.units = nvc0_graph_units;
+
+	if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+		nv_info(priv, "using external firmware\n");
+		if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+		    nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+		    nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+			return -EINVAL;
+		priv->firmware = true;
+	}
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+				&priv->unk4188b4);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 256, 0,
+				&priv->unk4188b8);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->gpc_nr =  nv_rd32(priv, 0x409604) & 0x0000001f;
+	priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+	for (i = 0; i < priv->gpc_nr; i++) {
+		priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+		priv->tpc_total += priv->tpc_nr[i];
+	}
+
+	switch (nv_device(priv)->chipset) {
+	case 0xe4:
+		if (priv->tpc_total == 8)
+			priv->magic_not_rop_nr = 3;
+		else
+		if (priv->tpc_total == 7)
+			priv->magic_not_rop_nr = 1;
+		break;
+	case 0xe7:
+	case 0xe6:
+		priv->magic_not_rop_nr = 1;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
+{
+	int i;
+
+	nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+	nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x400080, 0x003083c2);
+	nv_wr32(priv, 0x400088, 0x0001ffe7);
+	nv_wr32(priv, 0x40008c, 0x00000000);
+	nv_wr32(priv, 0x400090, 0x00000030);
+	nv_wr32(priv, 0x40013c, 0x003901f7);
+	nv_wr32(priv, 0x400140, 0x00000100);
+	nv_wr32(priv, 0x400144, 0x00000000);
+	nv_wr32(priv, 0x400148, 0x00000110);
+	nv_wr32(priv, 0x400138, 0x00000000);
+	nv_wr32(priv, 0x400130, 0x00000000);
+	nv_wr32(priv, 0x400134, 0x00000000);
+	nv_wr32(priv, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct nvc0_graph_priv *priv)
+{
+	nv_wr32(priv, 0x409ffc, 0x00000000);
+	nv_wr32(priv, 0x409c14, 0x00003e3e);
+	nv_wr32(priv, 0x409c24, 0x000f0000);
+
+	nv_wr32(priv, 0x404000, 0xc0000000);
+	nv_wr32(priv, 0x404600, 0xc0000000);
+	nv_wr32(priv, 0x408030, 0xc0000000);
+	nv_wr32(priv, 0x404490, 0xc0000000);
+	nv_wr32(priv, 0x406018, 0xc0000000);
+	nv_wr32(priv, 0x407020, 0xc0000000);
+	nv_wr32(priv, 0x405840, 0xc0000000);
+	nv_wr32(priv, 0x405844, 0x00ffffff);
+
+	nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
+{
+	const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+	u32 data[TPC_MAX / 8];
+	u8  tpcnr[GPC_MAX];
+	int i, gpc, tpc;
+
+	nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
+
+	memset(data, 0x00, sizeof(data));
+	memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+	for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+		do {
+			gpc = (gpc + 1) % priv->gpc_nr;
+		} while (!tpcnr[gpc]);
+		tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+		data[i / 8] |= tpc << ((i % 8) * 4);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+	nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+	nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+	nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tpc_nr[gpc]);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
+	}
+
+	nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
+	nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
+{
+	int gpc, tpc;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+			nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+		}
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nve0_graph_init_rop(struct nvc0_graph_priv *priv)
+{
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+static int
+nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
+{
+	u32 r000260;
+	int i;
+
+	if (priv->firmware) {
+		/* load fuc microcode */
+		r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+		nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
+		nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+		nv_wr32(priv, 0x000260, r000260);
+
+		/* start both of them running */
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x41a10c, 0x00000000);
+		nv_wr32(priv, 0x40910c, 0x00000000);
+		nv_wr32(priv, 0x41a100, 0x00000002);
+		nv_wr32(priv, 0x409100, 0x00000002);
+		if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+			nv_error(priv, "0x409800 wait failed\n");
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x7fffffff);
+		nv_wr32(priv, 0x409504, 0x00000021);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000010);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x10 timeout\n");
+			return -EBUSY;
+		}
+		priv->size = nv_rd32(priv, 0x409800);
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000016);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x16 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409840, 0xffffffff);
+		nv_wr32(priv, 0x409500, 0x00000000);
+		nv_wr32(priv, 0x409504, 0x00000025);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x25 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000030);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x30 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409810, 0xb00095c8);
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000031);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x31 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409810, 0x00080420);
+		nv_wr32(priv, 0x409800, 0x00000000);
+		nv_wr32(priv, 0x409500, 0x00000001);
+		nv_wr32(priv, 0x409504, 0x00000032);
+		if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+			nv_error(priv, "fuc09 req 0x32 timeout\n");
+			return -EBUSY;
+		}
+
+		nv_wr32(priv, 0x409614, 0x00000070);
+		nv_wr32(priv, 0x409614, 0x00000770);
+		nv_wr32(priv, 0x40802c, 0x00000001);
+
+		if (priv->data == NULL) {
+			int ret = nve0_grctx_generate(priv);
+			if (ret) {
+				nv_error(priv, "failed to construct context\n");
+				return ret;
+			}
+		}
+
+		return 0;
+	}
+
+	/* load HUB microcode */
+	r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+	nv_wr32(priv, 0x4091c0, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grhub_data) / 4; i++)
+		nv_wr32(priv, 0x4091c4, nve0_grhub_data[i]);
+
+	nv_wr32(priv, 0x409180, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grhub_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x409188, i >> 6);
+		nv_wr32(priv, 0x409184, nve0_grhub_code[i]);
+	}
+
+	/* load GPC microcode */
+	nv_wr32(priv, 0x41a1c0, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grgpc_data) / 4; i++)
+		nv_wr32(priv, 0x41a1c4, nve0_grgpc_data[i]);
+
+	nv_wr32(priv, 0x41a180, 0x01000000);
+	for (i = 0; i < sizeof(nve0_grgpc_code) / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(priv, 0x41a188, i >> 6);
+		nv_wr32(priv, 0x41a184, nve0_grgpc_code[i]);
+	}
+	nv_wr32(priv, 0x000260, r000260);
+
+	/* start HUB ucode running, it'll init the GPCs */
+	nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
+	nv_wr32(priv, 0x40910c, 0x00000000);
+	nv_wr32(priv, 0x409100, 0x00000002);
+	if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+		nv_error(priv, "HUB_INIT timed out\n");
+		nvc0_graph_ctxctl_debug(priv);
+		return -EBUSY;
+	}
+
+	priv->size = nv_rd32(priv, 0x409804);
+	if (priv->data == NULL) {
+		int ret = nve0_grctx_generate(priv);
+		if (ret) {
+			nv_error(priv, "failed to construct context\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nve0_graph_init(struct nouveau_object *object)
+{
+	struct nvc0_graph_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_graph_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nve0_graph_init_obj418880(priv);
+	nve0_graph_init_regs(priv);
+	nve0_graph_init_gpc_0(priv);
+
+	nv_wr32(priv, 0x400500, 0x00010001);
+	nv_wr32(priv, 0x400100, 0xffffffff);
+	nv_wr32(priv, 0x40013c, 0xffffffff);
+
+	nve0_graph_init_units(priv);
+	nve0_graph_init_gpc_1(priv);
+	nve0_graph_init_rop(priv);
+
+	nv_wr32(priv, 0x400108, 0xffffffff);
+	nv_wr32(priv, 0x400138, 0xffffffff);
+	nv_wr32(priv, 0x400118, 0xffffffff);
+	nv_wr32(priv, 0x400130, 0xffffffff);
+	nv_wr32(priv, 0x40011c, 0xffffffff);
+	nv_wr32(priv, 0x400134, 0xffffffff);
+	nv_wr32(priv, 0x400054, 0x34ce3464);
+
+	ret = nve0_graph_init_ctxctl(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_graph_oclass = {
+	.handle = NV_ENGINE(GR, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_graph_ctor,
+		.dtor = nvc0_graph_dtor,
+		.init = nve0_graph_init,
+		.fini = _nouveau_graph_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 0000000..fde8e24
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,274 @@
+#ifndef __NOUVEAU_GRAPH_REGS_H__
+#define __NOUVEAU_GRAPH_REGS_H__
+
+#define NV04_PGRAPH_DEBUG_0                                0x00400080
+#define NV04_PGRAPH_DEBUG_1                                0x00400084
+#define NV04_PGRAPH_DEBUG_2                                0x00400088
+#define NV04_PGRAPH_DEBUG_3                                0x0040008c
+#define NV10_PGRAPH_DEBUG_4                                0x00400090
+#define NV03_PGRAPH_INTR                                   0x00400100
+#define NV03_PGRAPH_NSTATUS                                0x00400104
+#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
+#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
+#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
+#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
+#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
+#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
+#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
+#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
+#define NV03_PGRAPH_NSOURCE                                0x00400108
+#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                   (1<<0)
+#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                     (1<<1)
+#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR               (1<<2)
+#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION                (1<<3)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                    (1<<4)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                     (1<<5)
+#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                   (1<<6)
+#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION               (1<<7)
+#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION               (1<<8)
+#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION               (1<<9)
+#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
+#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
+#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
+#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
+#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
+#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
+#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
+#define NV03_PGRAPH_INTR_EN                                0x00400140
+#define NV40_PGRAPH_INTR_EN                                0x0040013C
+#    define NV_PGRAPH_INTR_NOTIFY                              (1<<0)
+#    define NV_PGRAPH_INTR_MISSING_HW                          (1<<4)
+#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
+#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
+#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
+#define NV10_PGRAPH_CTX_USER                               0x00400148
+#define NV10_PGRAPH_CTX_SWITCH(i)                         (0x0040014C + 0x4*(i))
+#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j)                       (0x00400160	\
+							   + 0x4*(i) + 0x20*(j))
+#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
+#define NV04_PGRAPH_CTX_USER                               0x00400174
+#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
+#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
+#define NV03_PGRAPH_CTX_USER                               0x00400194
+#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
+#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE                    0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED                      0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE                    0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
+#define NV03_PGRAPH_X_MISC                                 0x00400500
+#define NV03_PGRAPH_Y_MISC                                 0x00400504
+#define NV04_PGRAPH_VALID1                                 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
+#define NV04_PGRAPH_MISC24_0                               0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
+#define NV03_PGRAPH_CLIPX_0                                0x00400524
+#define NV03_PGRAPH_CLIPX_1                                0x00400528
+#define NV03_PGRAPH_CLIPY_0                                0x0040052C
+#define NV03_PGRAPH_CLIPY_1                                0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
+#define NV04_PGRAPH_MISC24_1                               0x00400570
+#define NV04_PGRAPH_MISC24_2                               0x00400574
+#define NV04_PGRAPH_VALID2                                 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
+#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
+#define NV04_PGRAPH_FORMAT_0                               0x004005A8
+#define NV04_PGRAPH_FORMAT_1                               0x004005AC
+#define NV04_PGRAPH_FILTER_0                               0x004005B0
+#define NV04_PGRAPH_FILTER_1                               0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
+#define NV04_PGRAPH_ROP3                                   0x00400604
+#define NV04_PGRAPH_BETA_AND                               0x00400608
+#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
+#define NV04_PGRAPH_FORMATS                                0x00400618
+#define NV10_PGRAPH_DEBUG_2                                0x00400620
+#define NV04_PGRAPH_BOFFSET0                               0x00400640
+#define NV04_PGRAPH_BOFFSET1                               0x00400644
+#define NV04_PGRAPH_BOFFSET2                               0x00400648
+#define NV04_PGRAPH_BOFFSET3                               0x0040064C
+#define NV04_PGRAPH_BOFFSET4                               0x00400650
+#define NV04_PGRAPH_BOFFSET5                               0x00400654
+#define NV04_PGRAPH_BBASE0                                 0x00400658
+#define NV04_PGRAPH_BBASE1                                 0x0040065C
+#define NV04_PGRAPH_BBASE2                                 0x00400660
+#define NV04_PGRAPH_BBASE3                                 0x00400664
+#define NV04_PGRAPH_BBASE4                                 0x00400668
+#define NV04_PGRAPH_BBASE5                                 0x0040066C
+#define NV04_PGRAPH_BPITCH0                                0x00400670
+#define NV04_PGRAPH_BPITCH1                                0x00400674
+#define NV04_PGRAPH_BPITCH2                                0x00400678
+#define NV04_PGRAPH_BPITCH3                                0x0040067C
+#define NV04_PGRAPH_BPITCH4                                0x00400680
+#define NV04_PGRAPH_BLIMIT0                                0x00400684
+#define NV04_PGRAPH_BLIMIT1                                0x00400688
+#define NV04_PGRAPH_BLIMIT2                                0x0040068C
+#define NV04_PGRAPH_BLIMIT3                                0x00400690
+#define NV04_PGRAPH_BLIMIT4                                0x00400694
+#define NV04_PGRAPH_BLIMIT5                                0x00400698
+#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
+#define NV03_PGRAPH_STATUS                                 0x004006B0
+#define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
+#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
+#define NV04_PGRAPH_SURFACE                                0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
+#define NV04_PGRAPH_STATE                                  0x00400710
+#define NV10_PGRAPH_SURFACE                                0x00400710
+#define NV04_PGRAPH_NOTIFY                                 0x00400714
+#define NV10_PGRAPH_STATE                                  0x00400714
+#define NV10_PGRAPH_NOTIFY                                 0x00400718
+
+#define NV04_PGRAPH_FIFO                                   0x00400720
+
+#define NV04_PGRAPH_BPIXEL                                 0x00400724
+#define NV10_PGRAPH_RDI_INDEX                              0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
+#define NV10_PGRAPH_RDI_DATA                               0x00400754
+#define NV04_PGRAPH_DMA_PITCH                              0x00400760
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR                       0x00400760
+#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL                         0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH                         0x0040076c
+#define NV10_PGRAPH_DMA_PITCH                              0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
+#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
+#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
+#define NV04_PGRAPH_PATTERN                                0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
+#define NV04_PGRAPH_CHROMA                                 0x00400814
+#define NV04_PGRAPH_CONTROL0                               0x00400818
+#define NV04_PGRAPH_CONTROL1                               0x0040081C
+#define NV04_PGRAPH_CONTROL2                               0x00400820
+#define NV04_PGRAPH_BLEND                                  0x00400824
+#define NV04_PGRAPH_STORED_FMT                             0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
+#define NV20_PGRAPH_TILE(i)                                (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i)                              (0x004009c0 + 4*(i))
+#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM                                  0x00400D00
+#define NV47_PGRAPH_TILE(i)                                (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i)                              (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i)                               (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM                                  0x00400D40
+#define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i)                              (0x00400e00 + 4*(i))
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
+#define NV10_PGRAPH_XFMODE0                                0x00400F40
+#define NV10_PGRAPH_XFMODE1                                0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
+#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
+#define NV04_PGRAPH_DMA_START_0                            0x00401000
+#define NV04_PGRAPH_DMA_START_1                            0x00401004
+#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
+#define NV04_PGRAPH_DMA_MISC                               0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
+#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
+#define NV04_PGRAPH_DMA_RM                                 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i)                              (0x004068c0 + 4*(i))
+#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i)                              (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i)                              (0x004069c0 + 4*(i))
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
new file mode 100644
index 0000000..49ecbb8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/fifo.h>
+#include <engine/mpeg.h>
+#include <engine/graph/nv40.h>
+
+struct nv31_mpeg_priv {
+	struct nouveau_mpeg base;
+	atomic_t refcount;
+};
+
+struct nv31_mpeg_chan {
+	struct nouveau_object base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static int
+nv31_mpeg_object_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    20, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+static int
+nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
+{
+	struct nouveau_instmem *imem = nouveau_instmem(object);
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	u32 inst = *(u32 *)arg << 4;
+	u32 dma0 = nv_ro32(imem, inst + 0);
+	u32 dma1 = nv_ro32(imem, inst + 4);
+	u32 dma2 = nv_ro32(imem, inst + 8);
+	u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+	u32 size = dma1 + 1;
+
+	/* only allow linear DMA objects */
+	if (!(dma0 & 0x00002000))
+		return -EINVAL;
+
+	if (mthd == 0x0190) {
+		/* DMA_CMD */
+		nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+		nv_wr32(priv, 0x00b334, base);
+		nv_wr32(priv, 0x00b324, size);
+	} else
+	if (mthd == 0x01a0) {
+		/* DMA_DATA */
+		nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+		nv_wr32(priv, 0x00b360, base);
+		nv_wr32(priv, 0x00b364, size);
+	} else {
+		/* DMA_IMAGE, VRAM only */
+		if (dma0 & 0x000c0000)
+			return -EINVAL;
+
+		nv_wr32(priv, 0x00b370, base);
+		nv_wr32(priv, 0x00b374, size);
+	}
+
+	return 0;
+}
+
+static struct nouveau_ofuncs
+nv31_mpeg_ofuncs = {
+	.ctor = nv31_mpeg_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_omthds
+nv31_mpeg_omthds[] = {
+	{ 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+	{ 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+	{ 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
+	{}
+};
+
+struct nouveau_oclass
+nv31_mpeg_sclass[] = {
+	{ 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv31_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nv31_mpeg_chan *chan;
+	int ret;
+
+	if (!atomic_add_unless(&priv->refcount, 1, 1))
+		return -EBUSY;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv31_mpeg_context_dtor(struct nouveau_object *object)
+{
+	struct nv31_mpeg_priv *priv = (void *)object->engine;
+	struct nv31_mpeg_chan *chan = (void *)object;
+	atomic_dec(&priv->refcount);
+	nouveau_object_destroy(&chan->base);
+}
+
+static struct nouveau_oclass
+nv31_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x31),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv31_mpeg_context_ctor,
+		.dtor = nv31_mpeg_context_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+void
+nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
+{
+	struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+	struct nv31_mpeg_priv *priv = (void *)engine;
+
+	nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
+	nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
+	nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+void
+nv31_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
+	struct nouveau_engine *engine = nv_engine(subdev);
+	struct nouveau_object *engctx;
+	struct nouveau_handle *handle;
+	struct nv31_mpeg_priv *priv = (void *)subdev;
+	u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+	int chid;
+
+	engctx = nouveau_engctx_get(engine, inst);
+	chid   = pfifo->chid(pfifo, engctx);
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
+			show &= ~0x01000000;
+		}
+
+		if (type == 0x00000010) {
+			handle = nouveau_handle_get_class(engctx, 0x3174);
+			if (handle && !nv_call(handle->object, mthd, data))
+				show &= ~0x01000000;
+			nouveau_handle_put(handle);
+		}
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+
+	if (show) {
+		nv_error(priv,
+			 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 chid, inst << 4, nouveau_client_name(engctx), stat,
+			 type, mthd, data);
+	}
+
+	nouveau_engctx_put(engctx);
+}
+
+static int
+nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv31_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv31_mpeg_intr;
+	nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+int
+nv31_mpeg_init(struct nouveau_object *object)
+{
+	struct nouveau_engine *engine = nv_engine(object->engine);
+	struct nv31_mpeg_priv *priv = (void *)engine;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	int ret, i;
+
+	ret = nouveau_mpeg_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* VPE init */
+	nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+	nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		engine->tile_prog(engine, i);
+
+	/* PMPEG init */
+	nv_wr32(priv, 0x00b32c, 0x00000000);
+	nv_wr32(priv, 0x00b314, 0x00000100);
+	nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+	nv_wr32(priv, 0x00b300, 0x02001ec1);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+	nv_wr32(priv, 0x00b100, 0xffffffff);
+	nv_wr32(priv, 0x00b140, 0xffffffff);
+
+	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv31_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x31),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv31_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 0000000..f7c581a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/mpeg.h>
+#include <engine/graph/nv40.h>
+
+struct nv40_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv40_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv40_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nv40_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+					  264 * 4, 16,
+					  NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+	struct nv40_mpeg_priv *priv = (void *)object->engine;
+	struct nv40_mpeg_chan *chan = (void *)object;
+	u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+	if (nv_rd32(priv, 0x00b318) == inst)
+		nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv40_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = nv40_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nv40_mpeg_priv *priv = (void *)subdev;
+	u32 stat;
+
+	if ((stat = nv_rd32(priv, 0x00b100)))
+		nv31_mpeg_intr(subdev);
+
+	if ((stat = nv_rd32(priv, 0x00b800))) {
+		nv_error(priv, "PMSRCH 0x%08x\n", stat);
+		nv_wr32(priv, 0x00b800, stat);
+	}
+}
+
+static int
+nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv40_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv40_mpeg_intr;
+	nv_engine(priv)->cclass = &nv40_mpeg_cclass;
+	nv_engine(priv)->sclass = nv31_mpeg_sclass;
+	nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv31_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
new file mode 100644
index 0000000..bc7d12b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/mpeg.h>
+
+struct nv50_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv50_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static int
+nv50_mpeg_object_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_gpuobj *obj;
+	int ret;
+
+	ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+				    16, 16, 0, &obj);
+	*pobject = nv_object(obj);
+	if (ret)
+		return ret;
+
+	nv_wo32(obj, 0x00, nv_mclass(obj));
+	nv_wo32(obj, 0x04, 0x00000000);
+	nv_wo32(obj, 0x08, 0x00000000);
+	nv_wo32(obj, 0x0c, 0x00000000);
+	return 0;
+}
+
+struct nouveau_ofuncs
+nv50_mpeg_ofuncs = {
+	.ctor = nv50_mpeg_object_ctor,
+	.dtor = _nouveau_gpuobj_dtor,
+	.init = _nouveau_gpuobj_init,
+	.fini = _nouveau_gpuobj_fini,
+	.rd32 = _nouveau_gpuobj_rd32,
+	.wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv50_mpeg_sclass[] = {
+	{ 0x3174, &nv50_mpeg_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+int
+nv50_mpeg_context_ctor(struct nouveau_object *parent,
+		       struct nouveau_object *engine,
+		       struct nouveau_oclass *oclass, void *data, u32 size,
+		       struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = nouveau_bar(parent);
+	struct nv50_mpeg_chan *chan;
+	int ret;
+
+	ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
+					  0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	nv_wo32(chan, 0x0070, 0x00801ec1);
+	nv_wo32(chan, 0x007c, 0x0000037c);
+	bar->flush(bar);
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = _nouveau_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+int
+nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
+{
+	nv50_vm_flush_engine(&engine->base, 0x08);
+	return 0;
+}
+
+void
+nv50_mpeg_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_mpeg_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, 0x00b100);
+	u32 type = nv_rd32(priv, 0x00b230);
+	u32 mthd = nv_rd32(priv, 0x00b234);
+	u32 data = nv_rd32(priv, 0x00b238);
+	u32 show = stat;
+
+	if (stat & 0x01000000) {
+		/* happens on initial binding of the object */
+		if (type == 0x00000020 && mthd == 0x0000) {
+			nv_wr32(priv, 0x00b308, 0x00000100);
+			show &= ~0x01000000;
+		}
+	}
+
+	if (show) {
+		nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+			stat, type, mthd, data);
+	}
+
+	nv_wr32(priv, 0x00b100, stat);
+	nv_wr32(priv, 0x00b230, 0x00000001);
+}
+
+static void
+nv50_vpe_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_mpeg_priv *priv = (void *)subdev;
+
+	if (nv_rd32(priv, 0x00b100))
+		nv50_mpeg_intr(subdev);
+
+	if (nv_rd32(priv, 0x00b800)) {
+		u32 stat = nv_rd32(priv, 0x00b800);
+		nv_info(priv, "PMSRCH: 0x%08x\n", stat);
+		nv_wr32(priv, 0xb800, stat);
+	}
+}
+
+static int
+nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00400002;
+	nv_subdev(priv)->intr = nv50_vpe_intr;
+	nv_engine(priv)->cclass = &nv50_mpeg_cclass;
+	nv_engine(priv)->sclass = nv50_mpeg_sclass;
+	nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+	return 0;
+}
+
+int
+nv50_mpeg_init(struct nouveau_object *object)
+{
+	struct nv50_mpeg_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_mpeg_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x00b32c, 0x00000000);
+	nv_wr32(priv, 0x00b314, 0x00000100);
+	nv_wr32(priv, 0x00b0e0, 0x0000001a);
+
+	nv_wr32(priv, 0x00b220, 0x00000044);
+	nv_wr32(priv, 0x00b300, 0x00801ec1);
+	nv_wr32(priv, 0x00b390, 0x00000000);
+	nv_wr32(priv, 0x00b394, 0x00000000);
+	nv_wr32(priv, 0x00b398, 0x00000000);
+	nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+	nv_wr32(priv, 0x00b100, 0xffffffff);
+	nv_wr32(priv, 0x00b140, 0xffffffff);
+
+	if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+		nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv50_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 0000000..8f805b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/mpeg.h>
+
+struct nv84_mpeg_priv {
+	struct nouveau_mpeg base;
+};
+
+struct nv84_mpeg_chan {
+	struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_sclass[] = {
+	{ 0x8274, &nv50_mpeg_ofuncs },
+	{}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_cclass = {
+	.handle = NV_ENGCTX(MPEG, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mpeg_context_ctor,
+		.dtor = _nouveau_mpeg_context_dtor,
+		.init = _nouveau_mpeg_context_init,
+		.fini = _nouveau_mpeg_context_fini,
+		.rd32 = _nouveau_mpeg_context_rd32,
+		.wr32 = _nouveau_mpeg_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv84_mpeg_priv *priv;
+	int ret;
+
+	ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_subdev(priv)->intr = nv50_mpeg_intr;
+	nv_engine(priv)->cclass = &nv84_mpeg_cclass;
+	nv_engine(priv)->sclass = nv84_mpeg_sclass;
+	nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_mpeg_oclass = {
+	.handle = NV_ENGINE(MPEG, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_mpeg_ctor,
+		.dtor = _nouveau_mpeg_dtor,
+		.init = nv50_mpeg_init,
+		.fini = _nouveau_mpeg_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
new file mode 100644
index 0000000..5a5b2a7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/class.h>
+
+#include <engine/ppp.h>
+
+struct nv98_ppp_priv {
+	struct nouveau_engine base;
+};
+
+struct nv98_ppp_chan {
+	struct nouveau_engctx base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_ppp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_ppp_cclass = {
+	.handle = NV_ENGCTX(PPP, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv98_ppp_priv *priv;
+	int ret;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PPPP", "ppp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00400002;
+	nv_engine(priv)->cclass = &nv98_ppp_cclass;
+	nv_engine(priv)->sclass = nv98_ppp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nv98_ppp_oclass = {
+	.handle = NV_ENGINE(PPP, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_ppp_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 0000000..ebf0d86
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+	{ 0x90b3, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+	.handle = NV_ENGCTX(PPP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+	struct nvc0_ppp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x086010, 0x0000fff2);
+	nv_wr32(priv, 0x08601c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_ppp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+				    "PPPP", "ppp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00000002;
+	nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+	nv_engine(priv)->sclass = nvc0_ppp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+	.handle = NV_ENGINE(PPP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ppp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_ppp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 0000000..2a859a3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+#include <engine/fifo.h>
+
+struct nv04_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv04_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
+		      void *data, u32 size)
+{
+	struct nouveau_object *channel = (void *)nv_engctx(object->parent);
+	struct nouveau_fifo_chan *fifo = (void *)channel->parent;
+	atomic_set(&fifo->refcnt, *(u32*)data);
+	return 0;
+}
+
+static int
+nv04_software_flip(struct nouveau_object *object, u32 mthd,
+		   void *args, u32 size)
+{
+	struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv04_software_omthds[] = {
+	{ 0x0150, 0x0150, nv04_software_set_ref },
+	{ 0x0500, 0x0500, nv04_software_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv04_software_sclass[] = {
+	{ 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv04_software_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv04_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv04_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+void
+nv04_software_intr(struct nouveau_subdev *subdev)
+{
+	nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
+}
+
+static int
+nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv04_software_cclass;
+	nv_engine(priv)->sclass = nv04_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 0000000..a019364
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+
+struct nv10_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv10_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv10_software_flip(struct nouveau_object *object, u32 mthd,
+		   void *args, u32 size)
+{
+	struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv10_software_omthds[] = {
+	{ 0x0500, 0x0500, nv10_software_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv10_software_sclass[] = {
+	{ 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv10_software_context_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 size,
+		      struct nouveau_object **pobject)
+{
+	struct nv10_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv10_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv10_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv10_software_cclass;
+	nv_engine(priv)->sclass = nv10_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 0000000..c48e749
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nv50_software_priv {
+	struct nouveau_software base;
+};
+
+struct nv50_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
+			      void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
+	struct nouveau_handle *handle;
+	int ret = -EINVAL;
+
+	handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
+	if (!handle)
+		return -ENOENT;
+
+	if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+		struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
+		chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+		ret = 0;
+	}
+	nouveau_namedb_put(handle);
+	return ret;
+}
+
+static int
+nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+				 void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.offset = *(u32 *)args;
+	return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.value = *(u32 *)args;
+	return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+				  void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_disp *disp = nouveau_disp(object);
+	u32 crtc = *(u32 *)args;
+	if (crtc > 1)
+		return -EINVAL;
+
+	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
+	return 0;
+}
+
+static int
+nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+			void *args, u32 size)
+{
+	struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv50_software_omthds[] = {
+	{ 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+	{ 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+	{ 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+	{ 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nv50_software_mthd_flip },
+	{}
+};
+
+static struct nouveau_oclass
+nv50_software_sclass[] = {
+	{ 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+	struct nouveau_software_chan *chan =
+		container_of(event, struct nouveau_software_chan, vblank.event);
+	struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nouveau_bar *bar = nouveau_bar(priv);
+
+	nv_wr32(priv, 0x001704, chan->vblank.channel);
+	nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+	bar->flush(bar);
+
+	if (nv_device(priv)->chipset == 0x50) {
+		nv_wr32(priv, 0x001570, chan->vblank.offset);
+		nv_wr32(priv, 0x001574, chan->vblank.value);
+	} else {
+		nv_wr32(priv, 0x060010, chan->vblank.offset);
+		nv_wr32(priv, 0x060014, chan->vblank.value);
+	}
+
+	return NVKM_EVENT_DROP;
+}
+
+static int
+nv50_software_context_ctor(struct nouveau_object *parent,
+			   struct nouveau_object *engine,
+			   struct nouveau_oclass *oclass, void *data, u32 size,
+			   struct nouveau_object **pobject)
+{
+	struct nv50_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+	chan->base.vblank.event.func = nv50_software_vblsem_release;
+	return 0;
+}
+
+static struct nouveau_oclass
+nv50_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nv50_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nv50_software_cclass;
+	nv_engine(priv)->sclass = nv50_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_software_oclass = {
+	.handle = NV_ENGINE(SW, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 0000000..d698e71
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/event.h>
+
+#include <subdev/bar.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nvc0_software_priv {
+	struct nouveau_software base;
+};
+
+struct nvc0_software_chan {
+	struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+				 void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	u64 data = *(u32 *)args;
+	if (mthd == 0x0400) {
+		chan->base.vblank.offset &= 0x00ffffffffULL;
+		chan->base.vblank.offset |= data << 32;
+	} else {
+		chan->base.vblank.offset &= 0xff00000000ULL;
+		chan->base.vblank.offset |= data;
+	}
+	return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+				void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	chan->base.vblank.value = *(u32 *)args;
+	return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+				  void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nouveau_disp *disp = nouveau_disp(object);
+	u32 crtc = *(u32 *)args;
+
+	if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
+		return -EINVAL;
+
+	nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
+	return 0;
+}
+
+static int
+nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+			void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	if (chan->base.flip)
+		return chan->base.flip(chan->base.flip_data);
+	return -EINVAL;
+}
+
+static int
+nvc0_software_mthd_mp_control(struct nouveau_object *object, u32 mthd,
+                              void *args, u32 size)
+{
+	struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	u32 data = *(u32 *)args;
+
+	switch (mthd) {
+	case 0x600:
+		nv_wr32(priv, 0x419e00, data); /* MP.PM_UNK000 */
+		break;
+	case 0x644:
+		if (data & ~0x1ffffe)
+			return -EINVAL;
+		nv_wr32(priv, 0x419e44, data); /* MP.TRAP_WARP_ERROR_EN */
+		break;
+	case 0x6ac:
+		nv_wr32(priv, 0x419eac, data); /* MP.PM_UNK0AC */
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct nouveau_omthds
+nvc0_software_omthds[] = {
+	{ 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+	{ 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+	{ 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+	{ 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+	{ 0x0500, 0x0500, nvc0_software_mthd_flip },
+	{ 0x0600, 0x0600, nvc0_software_mthd_mp_control },
+	{ 0x0644, 0x0644, nvc0_software_mthd_mp_control },
+	{ 0x06ac, 0x06ac, nvc0_software_mthd_mp_control },
+	{}
+};
+
+static struct nouveau_oclass
+nvc0_software_sclass[] = {
+	{ 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
+	{}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
+{
+	struct nouveau_software_chan *chan =
+		container_of(event, struct nouveau_software_chan, vblank.event);
+	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
+	struct nouveau_bar *bar = nouveau_bar(priv);
+
+	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+	bar->flush(bar);
+	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+	nv_wr32(priv, 0x060014, chan->vblank.value);
+
+	return NVKM_EVENT_DROP;
+}
+
+static int
+nvc0_software_context_ctor(struct nouveau_object *parent,
+			   struct nouveau_object *engine,
+			   struct nouveau_oclass *oclass, void *data, u32 size,
+			   struct nouveau_object **pobject)
+{
+	struct nvc0_software_chan *chan;
+	int ret;
+
+	ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+	chan->base.vblank.event.func = nvc0_software_vblsem_release;
+	return 0;
+}
+
+static struct nouveau_oclass
+nvc0_software_cclass = {
+	.handle = NV_ENGCTX(SW, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_software_context_ctor,
+		.dtor = _nouveau_software_context_dtor,
+		.init = _nouveau_software_context_init,
+		.fini = _nouveau_software_context_fini,
+	},
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 size,
+		   struct nouveau_object **pobject)
+{
+	struct nvc0_software_priv *priv;
+	int ret;
+
+	ret = nouveau_software_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_engine(priv)->cclass = &nvc0_software_cclass;
+	nv_engine(priv)->sclass = nvc0_software_sclass;
+	nv_subdev(priv)->intr = nv04_software_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_software_oclass = {
+	.handle = NV_ENGINE(SW, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_software_ctor,
+		.dtor = _nouveau_software_dtor,
+		.init = _nouveau_software_init,
+		.fini = _nouveau_software_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
new file mode 100644
index 0000000..261cd96
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/engctx.h>
+#include <core/class.h>
+
+#include <engine/vp.h>
+
+struct nv84_vp_priv {
+	struct nouveau_engine base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_vp_sclass[] = {
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_engctx_ctor,
+		.dtor = _nouveau_engctx_dtor,
+		.init = _nouveau_engctx_init,
+		.fini = _nouveau_engctx_fini,
+		.rd32 = _nouveau_engctx_rd32,
+		.wr32 = _nouveau_engctx_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv84_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_engine_create(parent, engine, oclass, true,
+				    "PVP", "vp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x01020000;
+	nv_engine(priv)->cclass = &nv84_vp_cclass;
+	nv_engine(priv)->sclass = nv84_vp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nv84_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_vp_ctor,
+		.dtor = _nouveau_engine_dtor,
+		.init = _nouveau_engine_init,
+		.fini = _nouveau_engine_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 0000000..f761949
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+	{ 0x90b2, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+	struct nvc0_vp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x085010, 0x0000fff2);
+	nv_wr32(priv, 0x08501c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nvc0_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+				    "PVP", "vp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00020000;
+	nv_engine(priv)->cclass = &nvc0_vp_cclass;
+	nv_engine(priv)->sclass = nvc0_vp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_vp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nvc0_vp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 0000000..2384ce5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+	struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+	{ 0x95b2, &nouveau_object_ofuncs },
+	{},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+	.handle = NV_ENGCTX(VP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = _nouveau_falcon_context_ctor,
+		.dtor = _nouveau_falcon_context_dtor,
+		.init = _nouveau_falcon_context_init,
+		.fini = _nouveau_falcon_context_fini,
+		.rd32 = _nouveau_falcon_context_rd32,
+		.wr32 = _nouveau_falcon_context_wr32,
+	},
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+	struct nve0_vp_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_falcon_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x085010, 0x0000fff2);
+	nv_wr32(priv, 0x08501c, 0x0000fff2);
+	return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nve0_vp_priv *priv;
+	int ret;
+
+	ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+				    "PVP", "vp", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->unit = 0x00020000;
+	nv_engine(priv)->cclass = &nve0_vp_cclass;
+	nv_engine(priv)->sclass = nve0_vp_sclass;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+	.handle = NV_ENGINE(VP, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_vp_ctor,
+		.dtor = _nouveau_falcon_dtor,
+		.init = nve0_vp_init,
+		.fini = _nouveau_falcon_fini,
+		.rd32 = _nouveau_falcon_rd32,
+		.wr32 = _nouveau_falcon_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/class.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/class.h
new file mode 100644
index 0000000..5a5961b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -0,0 +1,361 @@
+#ifndef __NOUVEAU_CLASS_H__
+#define __NOUVEAU_CLASS_H__
+
+/* Device class
+ *
+ * 0080: NV_DEVICE
+ */
+#define NV_DEVICE_CLASS                                              0x00000080
+
+#define NV_DEVICE_DISABLE_IDENTIFY                        0x0000000000000001ULL
+#define NV_DEVICE_DISABLE_MMIO                            0x0000000000000002ULL
+#define NV_DEVICE_DISABLE_VBIOS                           0x0000000000000004ULL
+#define NV_DEVICE_DISABLE_CORE                            0x0000000000000008ULL
+#define NV_DEVICE_DISABLE_DISP                            0x0000000000010000ULL
+#define NV_DEVICE_DISABLE_FIFO                            0x0000000000020000ULL
+#define NV_DEVICE_DISABLE_GRAPH                           0x0000000100000000ULL
+#define NV_DEVICE_DISABLE_MPEG                            0x0000000200000000ULL
+#define NV_DEVICE_DISABLE_ME                              0x0000000400000000ULL
+#define NV_DEVICE_DISABLE_VP                              0x0000000800000000ULL
+#define NV_DEVICE_DISABLE_CRYPT                           0x0000001000000000ULL
+#define NV_DEVICE_DISABLE_BSP                             0x0000002000000000ULL
+#define NV_DEVICE_DISABLE_PPP                             0x0000004000000000ULL
+#define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
+#define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
+#define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
+
+struct nv_device_class {
+	u64 device;	/* device identifier, ~0 for client default */
+	u64 disable;	/* disable particular subsystems */
+	u64 debug0;	/* as above, but *internal* ids, and *NOT* ABI */
+};
+
+/* DMA object classes
+ *
+ * 0002: NV_DMA_FROM_MEMORY
+ * 0003: NV_DMA_TO_MEMORY
+ * 003d: NV_DMA_IN_MEMORY
+ */
+#define NV_DMA_FROM_MEMORY_CLASS                                     0x00000002
+#define NV_DMA_TO_MEMORY_CLASS                                       0x00000003
+#define NV_DMA_IN_MEMORY_CLASS                                       0x0000003d
+
+#define NV_DMA_TARGET_MASK                                           0x000000ff
+#define NV_DMA_TARGET_VM                                             0x00000000
+#define NV_DMA_TARGET_VRAM                                           0x00000001
+#define NV_DMA_TARGET_PCI                                            0x00000002
+#define NV_DMA_TARGET_PCI_US                                         0x00000003
+#define NV_DMA_TARGET_AGP                                            0x00000004
+#define NV_DMA_ACCESS_MASK                                           0x00000f00
+#define NV_DMA_ACCESS_VM                                             0x00000000
+#define NV_DMA_ACCESS_RD                                             0x00000100
+#define NV_DMA_ACCESS_WR                                             0x00000200
+#define NV_DMA_ACCESS_RDWR                                           0x00000300
+
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE                                        0x80000000
+#define NV50_DMA_CONF0_PRIV                                          0x00300000
+#define NV50_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NV50_DMA_CONF0_PRIV_US                                       0x00100000
+#define NV50_DMA_CONF0_PRIV__S                                       0x00200000
+#define NV50_DMA_CONF0_PART                                          0x00030000
+#define NV50_DMA_CONF0_PART_VM                                       0x00000000
+#define NV50_DMA_CONF0_PART_256                                      0x00010000
+#define NV50_DMA_CONF0_PART_1KB                                      0x00020000
+#define NV50_DMA_CONF0_COMP                                          0x00000180
+#define NV50_DMA_CONF0_COMP_NONE                                     0x00000000
+#define NV50_DMA_CONF0_COMP_VM                                       0x00000180
+#define NV50_DMA_CONF0_TYPE                                          0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NV50_DMA_CONF0_TYPE_VM                                       0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVC0_DMA_CONF0_PRIV                                          0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NVC0_DMA_CONF0_PRIV_US                                       0x00100000
+#define NVC0_DMA_CONF0_PRIV__S                                       0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */                              0x00030000
+#define NVC0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVD0_DMA_CONF0_PAGE                                          0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP                                       0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP                                       0x00000400
+#define NVD0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
+struct nv_dma_class {
+	u32 flags;
+	u32 pad0;
+	u64 start;
+	u64 limit;
+	u32 conf0;
+};
+
+/* DMA FIFO channel classes
+ *
+ * 006b: NV03_CHANNEL_DMA
+ * 006e: NV10_CHANNEL_DMA
+ * 176e: NV17_CHANNEL_DMA
+ * 406e: NV40_CHANNEL_DMA
+ * 506e: NV50_CHANNEL_DMA
+ * 826e: NV84_CHANNEL_DMA
+ */
+#define NV03_CHANNEL_DMA_CLASS                                       0x0000006b
+#define NV10_CHANNEL_DMA_CLASS                                       0x0000006e
+#define NV17_CHANNEL_DMA_CLASS                                       0x0000176e
+#define NV40_CHANNEL_DMA_CLASS                                       0x0000406e
+#define NV50_CHANNEL_DMA_CLASS                                       0x0000506e
+#define NV84_CHANNEL_DMA_CLASS                                       0x0000826e
+
+struct nv03_channel_dma_class {
+	u32 pushbuf;
+	u32 pad0;
+	u64 offset;
+};
+
+/* Indirect FIFO channel classes
+ *
+ * 506f: NV50_CHANNEL_IND
+ * 826f: NV84_CHANNEL_IND
+ * 906f: NVC0_CHANNEL_IND
+ * a06f: NVE0_CHANNEL_IND
+ */
+
+#define NV50_CHANNEL_IND_CLASS                                       0x0000506f
+#define NV84_CHANNEL_IND_CLASS                                       0x0000826f
+#define NVC0_CHANNEL_IND_CLASS                                       0x0000906f
+#define NVE0_CHANNEL_IND_CLASS                                       0x0000a06f
+
+struct nv50_channel_ind_class {
+	u32 pushbuf;
+	u32 ilength;
+	u64 ioffset;
+};
+
+#define NVE0_CHANNEL_IND_ENGINE_GR                                   0x00000001
+#define NVE0_CHANNEL_IND_ENGINE_VP                                   0x00000002
+#define NVE0_CHANNEL_IND_ENGINE_PPP                                  0x00000004
+#define NVE0_CHANNEL_IND_ENGINE_BSP                                  0x00000008
+#define NVE0_CHANNEL_IND_ENGINE_CE0                                  0x00000010
+#define NVE0_CHANNEL_IND_ENGINE_CE1                                  0x00000020
+#define NVE0_CHANNEL_IND_ENGINE_ENC                                  0x00000040
+
+struct nve0_channel_ind_class {
+	u32 pushbuf;
+	u32 ilength;
+	u64 ioffset;
+	u32 engine;
+};
+
+/* 0046: NV04_DISP
+ */
+
+#define NV04_DISP_CLASS                                              0x00000046
+
+struct nv04_display_class {
+};
+
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ * 9270: NVF0_DISP
+ */
+
+#define NV50_DISP_CLASS                                              0x00005070
+#define NV84_DISP_CLASS                                              0x00008270
+#define NVA0_DISP_CLASS                                              0x00008370
+#define NV94_DISP_CLASS                                              0x00008870
+#define NVA3_DISP_CLASS                                              0x00008570
+#define NVD0_DISP_CLASS                                              0x00009070
+#define NVE0_DISP_CLASS                                              0x00009170
+#define NVF0_DISP_CLASS                                              0x00009270
+
+#define NV50_DISP_SOR_MTHD                                           0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
+#define NV50_DISP_SOR_MTHD_LINK                                      0x00000004
+#define NV50_DISP_SOR_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_SOR_PWR                                            0x00010000
+#define NV50_DISP_SOR_PWR_STATE                                      0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON                                   0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF                                  0x00000000
+#define NVA3_DISP_SOR_HDA_ELD                                        0x00010100
+#define NV84_DISP_SOR_HDMI_PWR                                       0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE                                 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF                             0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON                              0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET                         0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY                                 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT                                    0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID                                 0x0000ffff
+
+#define NV50_DISP_DAC_MTHD                                           0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_DAC_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_DAC_PWR                                            0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC                                      0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO                                   0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC                                      0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO                                   0x00000004
+#define NV50_DISP_DAC_PWR_DATA                                       0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON                                    0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO                                    0x00000010
+#define NV50_DISP_DAC_PWR_STATE                                      0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF                                  0x00000040
+#define NV50_DISP_DAC_LOAD                                           0x00020100
+#define NV50_DISP_DAC_LOAD_VALUE                                     0x00000007
+
+#define NV50_DISP_PIOR_MTHD                                          0x00030000
+#define NV50_DISP_PIOR_MTHD_TYPE                                     0x0000f000
+#define NV50_DISP_PIOR_MTHD_OR                                       0x00000003
+
+#define NV50_DISP_PIOR_PWR                                           0x00030000
+#define NV50_DISP_PIOR_PWR_STATE                                     0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_ON                                  0x00000001
+#define NV50_DISP_PIOR_PWR_STATE_OFF                                 0x00000000
+#define NV50_DISP_PIOR_TMDS_PWR                                      0x00032000
+#define NV50_DISP_PIOR_TMDS_PWR_STATE                                0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON                             0x00000001
+#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF                            0x00000000
+#define NV50_DISP_PIOR_DP_PWR                                        0x00036000
+#define NV50_DISP_PIOR_DP_PWR_STATE                                  0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_ON                               0x00000001
+#define NV50_DISP_PIOR_DP_PWR_STATE_OFF                              0x00000000
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ * 927a: NVF0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS                                         0x0000507a
+#define NV84_DISP_CURS_CLASS                                         0x0000827a
+#define NVA0_DISP_CURS_CLASS                                         0x0000837a
+#define NV94_DISP_CURS_CLASS                                         0x0000887a
+#define NVA3_DISP_CURS_CLASS                                         0x0000857a
+#define NVD0_DISP_CURS_CLASS                                         0x0000907a
+#define NVE0_DISP_CURS_CLASS                                         0x0000917a
+#define NVF0_DISP_CURS_CLASS                                         0x0000927a
+
+struct nv50_display_curs_class {
+	u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ * 927b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS                                         0x0000507b
+#define NV84_DISP_OIMM_CLASS                                         0x0000827b
+#define NVA0_DISP_OIMM_CLASS                                         0x0000837b
+#define NV94_DISP_OIMM_CLASS                                         0x0000887b
+#define NVA3_DISP_OIMM_CLASS                                         0x0000857b
+#define NVD0_DISP_OIMM_CLASS                                         0x0000907b
+#define NVE0_DISP_OIMM_CLASS                                         0x0000917b
+#define NVF0_DISP_OIMM_CLASS                                         0x0000927b
+
+struct nv50_display_oimm_class {
+	u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ * 927c: NVF0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS                                         0x0000507c
+#define NV84_DISP_SYNC_CLASS                                         0x0000827c
+#define NVA0_DISP_SYNC_CLASS                                         0x0000837c
+#define NV94_DISP_SYNC_CLASS                                         0x0000887c
+#define NVA3_DISP_SYNC_CLASS                                         0x0000857c
+#define NVD0_DISP_SYNC_CLASS                                         0x0000907c
+#define NVE0_DISP_SYNC_CLASS                                         0x0000917c
+#define NVF0_DISP_SYNC_CLASS                                         0x0000927c
+
+struct nv50_display_sync_class {
+	u32 pushbuf;
+	u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ * 927d: NVF0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS                                         0x0000507d
+#define NV84_DISP_MAST_CLASS                                         0x0000827d
+#define NVA0_DISP_MAST_CLASS                                         0x0000837d
+#define NV94_DISP_MAST_CLASS                                         0x0000887d
+#define NVA3_DISP_MAST_CLASS                                         0x0000857d
+#define NVD0_DISP_MAST_CLASS                                         0x0000907d
+#define NVE0_DISP_MAST_CLASS                                         0x0000917d
+#define NVF0_DISP_MAST_CLASS                                         0x0000927d
+
+struct nv50_display_mast_class {
+	u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ * 927e: NVF0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS                                         0x0000507e
+#define NV84_DISP_OVLY_CLASS                                         0x0000827e
+#define NVA0_DISP_OVLY_CLASS                                         0x0000837e
+#define NV94_DISP_OVLY_CLASS                                         0x0000887e
+#define NVA3_DISP_OVLY_CLASS                                         0x0000857e
+#define NVD0_DISP_OVLY_CLASS                                         0x0000907e
+#define NVE0_DISP_OVLY_CLASS                                         0x0000917e
+#define NVF0_DISP_OVLY_CLASS                                         0x0000927e
+
+struct nv50_display_ovly_class {
+	u32 pushbuf;
+	u32 head;
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/client.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/client.h
new file mode 100644
index 0000000..c66eac5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -0,0 +1,46 @@
+#ifndef __NOUVEAU_CLIENT_H__
+#define __NOUVEAU_CLIENT_H__
+
+#include <core/namedb.h>
+
+struct nouveau_client {
+	struct nouveau_namedb base;
+	struct nouveau_handle *root;
+	struct nouveau_object *device;
+	char name[32];
+	u32 debug;
+	struct nouveau_vm *vm;
+};
+
+static inline struct nouveau_client *
+nv_client(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
+		nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline struct nouveau_client *
+nouveau_client(void *obj)
+{
+	struct nouveau_object *client = nv_object(obj);
+	while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
+		client = client->parent;
+	return (void *)client;
+}
+
+#define nouveau_client_create(n,c,oc,od,d)                                     \
+	nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
+
+int  nouveau_client_create_(const char *name, u64 device, const char *cfg,
+			    const char *dbg, int, void **);
+#define nouveau_client_destroy(p)                                              \
+	nouveau_namedb_destroy(&(p)->base)
+
+int  nouveau_client_init(struct nouveau_client *);
+int  nouveau_client_fini(struct nouveau_client *, bool suspend);
+const char *nouveau_client_name(void *obj);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/debug.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/debug.h
new file mode 100644
index 0000000..9ea18df
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -0,0 +1,13 @@
+#ifndef __NOUVEAU_DEBUG_H__
+#define __NOUVEAU_DEBUG_H__
+
+#define NV_DBG_FATAL    0
+#define NV_DBG_ERROR    1
+#define NV_DBG_WARN     2
+#define NV_DBG_INFO     3
+#define NV_DBG_DEBUG    4
+#define NV_DBG_TRACE    5
+#define NV_DBG_PARANOIA 6
+#define NV_DBG_SPAM     7
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/device.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/device.h
new file mode 100644
index 0000000..05840f3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -0,0 +1,137 @@
+#ifndef __NOUVEAU_DEVICE_H__
+#define __NOUVEAU_DEVICE_H__
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/engine.h>
+
+enum nv_subdev_type {
+	NVDEV_ENGINE_DEVICE,
+	NVDEV_SUBDEV_VBIOS,
+
+	/* All subdevs from DEVINIT to DEVINIT_LAST will be created before
+	 * *any* of them are initialised.  This subdev category is used
+	 * for any subdevs that the VBIOS init table parsing may call out
+	 * to during POST.
+	 */
+	NVDEV_SUBDEV_DEVINIT,
+	NVDEV_SUBDEV_GPIO,
+	NVDEV_SUBDEV_I2C,
+	NVDEV_SUBDEV_CLOCK,
+	NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_CLOCK,
+
+	/* This grouping of subdevs are initialised right after they've
+	 * been created, and are allowed to assume any subdevs in the
+	 * list above them exist and have been initialised.
+	 */
+	NVDEV_SUBDEV_MXM,
+	NVDEV_SUBDEV_MC,
+	NVDEV_SUBDEV_BUS,
+	NVDEV_SUBDEV_TIMER,
+	NVDEV_SUBDEV_FB,
+	NVDEV_SUBDEV_LTCG,
+	NVDEV_SUBDEV_IBUS,
+	NVDEV_SUBDEV_INSTMEM,
+	NVDEV_SUBDEV_VM,
+	NVDEV_SUBDEV_BAR,
+	NVDEV_SUBDEV_VOLT,
+	NVDEV_SUBDEV_THERM,
+
+	NVDEV_ENGINE_DMAOBJ,
+	NVDEV_ENGINE_FIFO,
+	NVDEV_ENGINE_SW,
+	NVDEV_ENGINE_GR,
+	NVDEV_ENGINE_MPEG,
+	NVDEV_ENGINE_ME,
+	NVDEV_ENGINE_VP,
+	NVDEV_ENGINE_CRYPT,
+	NVDEV_ENGINE_BSP,
+	NVDEV_ENGINE_PPP,
+	NVDEV_ENGINE_COPY0,
+	NVDEV_ENGINE_COPY1,
+	NVDEV_ENGINE_UNK1C1,
+	NVDEV_ENGINE_VENC,
+	NVDEV_ENGINE_DISP,
+
+	NVDEV_SUBDEV_NR,
+};
+
+struct nouveau_device {
+	struct nouveau_engine base;
+	struct list_head head;
+
+	struct pci_dev *pdev;
+	u64 handle;
+
+	const char *cfgopt;
+	const char *dbgopt;
+	const char *name;
+	const char *cname;
+
+	enum {
+		NV_04    = 0x04,
+		NV_10    = 0x10,
+		NV_20    = 0x20,
+		NV_30    = 0x30,
+		NV_40    = 0x40,
+		NV_50    = 0x50,
+		NV_C0    = 0xc0,
+		NV_D0    = 0xd0,
+		NV_E0    = 0xe0,
+	} card_type;
+	u32 chipset;
+	u32 crystal;
+
+	struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
+	struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+};
+
+static inline struct nouveau_device *
+nv_device(void *obj)
+{
+	struct nouveau_object *object = nv_object(obj);
+	struct nouveau_object *device = object;
+
+	if (device->engine)
+		device = device->engine;
+	if (device->parent)
+		device = device->parent;
+
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
+		     (nv_hclass(device) & 0xff) != NVDEV_ENGINE_DEVICE)) {
+		nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
+			  nv_hclass(object), nv_hclass(device));
+	}
+#endif
+
+	return (void *)device;
+}
+
+static inline struct nouveau_subdev *
+nouveau_subdev(void *obj, int sub)
+{
+	if (nv_device(obj)->subdev[sub])
+		return nv_subdev(nv_device(obj)->subdev[sub]);
+	return NULL;
+}
+
+static inline struct nouveau_engine *
+nouveau_engine(void *obj, int sub)
+{
+	struct nouveau_subdev *subdev = nouveau_subdev(obj, sub);
+	if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS))
+		return nv_engine(subdev);
+	return NULL;
+}
+
+static inline bool
+nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
+{
+	struct nouveau_device *device = nv_device(object);
+	return device->pdev->device == dev &&
+	       device->pdev->subsystem_vendor == ven &&
+	       device->pdev->subsystem_device == sub;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engctx.h
new file mode 100644
index 0000000..2fd48b5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -0,0 +1,54 @@
+#ifndef __NOUVEAU_ENGCTX_H__
+#define __NOUVEAU_ENGCTX_H__
+
+#include <core/object.h>
+#include <core/gpuobj.h>
+
+#include <subdev/vm.h>
+
+#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
+#define NV_ENGCTX(name,var)  NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
+
+struct nouveau_engctx {
+	struct nouveau_gpuobj base;
+	struct nouveau_vma vma;
+	struct list_head head;
+	unsigned long save;
+	u64 addr;
+};
+
+static inline struct nouveau_engctx *
+nv_engctx(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
+		nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_engctx_create(p,e,c,g,s,a,f,d)                                 \
+	nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f),              \
+			       sizeof(**d), (void **)d)
+
+int  nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, struct nouveau_object *,
+			    u32 size, u32 align, u32 flags,
+			    int length, void **data);
+void nouveau_engctx_destroy(struct nouveau_engctx *);
+int  nouveau_engctx_init(struct nouveau_engctx *);
+int  nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
+
+int  _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, void *, u32,
+			  struct nouveau_object **);
+void _nouveau_engctx_dtor(struct nouveau_object *);
+int  _nouveau_engctx_init(struct nouveau_object *);
+int  _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
+#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32
+#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32
+
+struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr);
+void nouveau_engctx_put(struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engine.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engine.h
new file mode 100644
index 0000000..666d06d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/engine.h
@@ -0,0 +1,57 @@
+#ifndef __NOUVEAU_ENGINE_H__
+#define __NOUVEAU_ENGINE_H__
+
+#include <core/object.h>
+#include <core/subdev.h>
+
+#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
+#define NV_ENGINE(name,var)  NV_ENGINE_(NVDEV_ENGINE_##name, (var))
+
+struct nouveau_engine {
+	struct nouveau_subdev base;
+	struct nouveau_oclass *cclass;
+	struct nouveau_oclass *sclass;
+
+	struct list_head contexts;
+	spinlock_t lock;
+
+	void (*tile_prog)(struct nouveau_engine *, int region);
+	int  (*tlb_flush)(struct nouveau_engine *);
+};
+
+static inline struct nouveau_engine *
+nv_engine(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
+		nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline int
+nv_engidx(struct nouveau_object *object)
+{
+	return nv_subidx(object);
+}
+
+#define nouveau_engine_create(p,e,c,d,i,f,r)                                   \
+	nouveau_engine_create_((p), (e), (c), (d), (i), (f),                   \
+			       sizeof(**r),(void **)r)
+
+#define nouveau_engine_destroy(p)                                              \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_engine_init(p)                                                 \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_engine_fini(p,s)                                               \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, bool, const char *,
+			   const char *, int, void **);
+
+#define _nouveau_engine_dtor _nouveau_subdev_dtor
+#define _nouveau_engine_init _nouveau_subdev_init
+#define _nouveau_engine_fini _nouveau_subdev_fini
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/enum.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/enum.h
new file mode 100644
index 0000000..4fc62bb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -0,0 +1,24 @@
+#ifndef __NOUVEAU_ENUM_H__
+#define __NOUVEAU_ENUM_H__
+
+struct nouveau_enum {
+	u32 value;
+	const char *name;
+	const void *data;
+	u32 data2;
+};
+
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *, u32 value);
+
+const struct nouveau_enum *
+nouveau_enum_print(const struct nouveau_enum *en, u32 value);
+
+struct nouveau_bitfield {
+	u32 mask;
+	const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/event.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/event.h
new file mode 100644
index 0000000..9e09440
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_EVENT_H__
+#define __NVKM_EVENT_H__
+
+/* return codes from event handlers */
+#define NVKM_EVENT_DROP 0
+#define NVKM_EVENT_KEEP 1
+
+struct nouveau_eventh {
+	struct list_head head;
+	int (*func)(struct nouveau_eventh *, int index);
+};
+
+struct nouveau_event {
+	spinlock_t lock;
+
+	void *priv;
+	void (*enable)(struct nouveau_event *, int index);
+	void (*disable)(struct nouveau_event *, int index);
+
+	int index_nr;
+	struct {
+		struct list_head list;
+		int refs;
+	} index[];
+};
+
+int  nouveau_event_create(int index_nr, struct nouveau_event **);
+void nouveau_event_destroy(struct nouveau_event **);
+void nouveau_event_trigger(struct nouveau_event *, int index);
+
+void nouveau_event_get(struct nouveau_event *, int index,
+		       struct nouveau_eventh *);
+void nouveau_event_put(struct nouveau_event *, int index,
+		       struct nouveau_eventh *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 0000000..1edec38
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d)                         \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d)                                      \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d)                                         \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s)                                       \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+	bool external;
+};
+
+struct nouveau_falcon {
+	struct nouveau_engine base;
+
+	u32 addr;
+	u8  version;
+	u8  secret;
+
+	struct nouveau_gpuobj *core;
+	bool external;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} code;
+
+	struct {
+		u32 limit;
+		u32 *data;
+		u32  size;
+	} data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r)                                 \
+	nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
+			       sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p)                                              \
+	nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({                                              \
+	struct nouveau_falcon *falcon = (p);                                   \
+	_nouveau_falcon_init(nv_object(falcon));                               \
+})
+#define nouveau_falcon_fini(p,s) ({                                            \
+	struct nouveau_falcon *falcon = (p);                                   \
+	_nouveau_falcon_fini(nv_object(falcon), (s));                          \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, u32, bool, const char *,
+			   const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int  _nouveau_falcon_init(struct nouveau_object *);
+int  _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32  _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
new file mode 100644
index 0000000..b3b9ce4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -0,0 +1,71 @@
+#ifndef __NOUVEAU_GPUOBJ_H__
+#define __NOUVEAU_GPUOBJ_H__
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/parent.h>
+#include <core/mm.h>
+
+struct nouveau_vma;
+struct nouveau_vm;
+
+#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
+#define NVOBJ_FLAG_ZERO_FREE  0x00000002
+#define NVOBJ_FLAG_HEAP       0x00000004
+
+struct nouveau_gpuobj {
+	struct nouveau_object base;
+	struct nouveau_object *parent;
+	struct nouveau_mm_node *node;
+	struct nouveau_mm heap;
+
+	u32 flags;
+	u64 addr;
+	u32 size;
+};
+
+static inline struct nouveau_gpuobj *
+nv_gpuobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
+		nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d)                               \
+	nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f),         \
+			       sizeof(**d), (void **)d)
+#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base)
+#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s))
+int  nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_object *, u32 size, u32 align,
+			    u32 flags, int length, void **);
+void nouveau_gpuobj_destroy(struct nouveau_gpuobj *);
+
+int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *,
+		       u32 size, u32 align, u32 flags,
+		       struct nouveau_gpuobj **);
+int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *,
+		       struct nouveau_gpuobj **);
+
+int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *);
+int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *,
+			  u32 access, struct nouveau_vma *);
+void nouveau_gpuobj_unmap(struct nouveau_vma *);
+
+static inline void
+nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
+{
+	nouveau_object_ref(&obj->base, (struct nouveau_object **)ref);
+}
+
+void _nouveau_gpuobj_dtor(struct nouveau_object *);
+int  _nouveau_gpuobj_init(struct nouveau_object *);
+int  _nouveau_gpuobj_fini(struct nouveau_object *, bool);
+u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/handle.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/handle.h
new file mode 100644
index 0000000..363674c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_HANDLE_H__
+#define __NOUVEAU_HANDLE_H__
+
+struct nouveau_handle {
+	struct nouveau_namedb *namedb;
+	struct list_head node;
+
+	struct list_head head;
+	struct list_head tree;
+	u32 name;
+	u32 priv;
+
+	struct nouveau_handle *parent;
+	struct nouveau_object *object;
+};
+
+int  nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle,
+			   struct nouveau_object *, struct nouveau_handle **);
+void nouveau_handle_destroy(struct nouveau_handle *);
+int  nouveau_handle_init(struct nouveau_handle *);
+int  nouveau_handle_fini(struct nouveau_handle *, bool suspend);
+
+struct nouveau_object *
+nouveau_handle_ref(struct nouveau_object *, u32 name);
+
+struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16);
+struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64);
+struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32);
+void nouveau_handle_put(struct nouveau_handle *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/math.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/math.h
new file mode 100644
index 0000000..f808131
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/math.h
@@ -0,0 +1,16 @@
+#ifndef __NOUVEAU_MATH_H__
+#define __NOUVEAU_MATH_H__
+
+static inline int
+log2i(u64 base)
+{
+	u64 temp = base >> 1;
+	int log2;
+
+	for (log2 = 0; temp; log2++, temp >>= 1) {
+	}
+
+	return (base & (base - 1)) ? log2 + 1: log2;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/mm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/mm.h
new file mode 100644
index 0000000..2514e81
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -0,0 +1,38 @@
+#ifndef __NOUVEAU_MM_H__
+#define __NOUVEAU_MM_H__
+
+struct nouveau_mm_node {
+	struct list_head nl_entry;
+	struct list_head fl_entry;
+	struct list_head rl_entry;
+
+	u8  type;
+	u32 offset;
+	u32 length;
+};
+
+struct nouveau_mm {
+	struct list_head nodes;
+	struct list_head free;
+
+	struct mutex mutex;
+
+	u32 block_size;
+	int heap_nodes;
+};
+
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+	return mm->block_size != 0;
+}
+
+int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm *);
+int  nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
+		     u32 align, struct nouveau_mm_node **);
+int  nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
+		     u32 align, struct nouveau_mm_node **);
+void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/namedb.h
new file mode 100644
index 0000000..8897e08
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -0,0 +1,56 @@
+#ifndef __NOUVEAU_NAMEDB_H__
+#define __NOUVEAU_NAMEDB_H__
+
+#include <core/parent.h>
+
+struct nouveau_handle;
+
+struct nouveau_namedb {
+	struct nouveau_parent base;
+	rwlock_t lock;
+	struct list_head list;
+};
+
+static inline struct nouveau_namedb *
+nv_namedb(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
+		nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_namedb_create(p,e,c,v,s,m,d)                                   \
+	nouveau_namedb_create_((p), (e), (c), (v), (s), (m),                   \
+			       sizeof(**d), (void **)d)
+#define nouveau_namedb_init(p)                                                 \
+	nouveau_parent_init(&(p)->base)
+#define nouveau_namedb_fini(p,s)                                               \
+	nouveau_parent_fini(&(p)->base, (s))
+#define nouveau_namedb_destroy(p)                                              \
+	nouveau_parent_destroy(&(p)->base)
+
+int  nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_oclass *, u32 engcls,
+			    int size, void **);
+
+int  _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, void *, u32,
+			  struct nouveau_object **);
+#define _nouveau_namedb_dtor _nouveau_parent_dtor
+#define _nouveau_namedb_init _nouveau_parent_init
+#define _nouveau_namedb_fini _nouveau_parent_fini
+
+int  nouveau_namedb_insert(struct nouveau_namedb *, u32 name,
+			   struct nouveau_object *, struct nouveau_handle *);
+void nouveau_namedb_remove(struct nouveau_handle *);
+
+struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32);
+struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16);
+struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64);
+struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32);
+void nouveau_namedb_put(struct nouveau_handle *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/object.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/object.h
new file mode 100644
index 0000000..62e68ba
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -0,0 +1,202 @@
+#ifndef __NOUVEAU_OBJECT_H__
+#define __NOUVEAU_OBJECT_H__
+
+#include <core/os.h>
+#include <core/printk.h>
+
+#define NV_PARENT_CLASS 0x80000000
+#define NV_NAMEDB_CLASS 0x40000000
+#define NV_CLIENT_CLASS 0x20000000
+#define NV_SUBDEV_CLASS 0x10000000
+#define NV_ENGINE_CLASS 0x08000000
+#define NV_MEMOBJ_CLASS 0x04000000
+#define NV_GPUOBJ_CLASS 0x02000000
+#define NV_ENGCTX_CLASS 0x01000000
+#define NV_OBJECT_CLASS 0x0000ffff
+
+struct nouveau_object {
+	struct nouveau_oclass *oclass;
+	struct nouveau_object *parent;
+	struct nouveau_object *engine;
+	atomic_t refcount;
+	atomic_t usecount;
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad
+	struct list_head list;
+	u32 _magic;
+#endif
+};
+
+static inline struct nouveau_object *
+nv_object(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (likely(obj)) {
+		struct nouveau_object *object = obj;
+		if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC))
+			nv_assert("BAD CAST -> NvObject, invalid magic");
+	}
+#endif
+	return obj;
+}
+
+#define nouveau_object_create(p,e,c,s,d)                                       \
+	nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
+int  nouveau_object_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32, int size, void **);
+void nouveau_object_destroy(struct nouveau_object *);
+int  nouveau_object_init(struct nouveau_object *);
+int  nouveau_object_fini(struct nouveau_object *, bool suspend);
+
+extern struct nouveau_ofuncs nouveau_object_ofuncs;
+
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
+struct nouveau_oclass {
+	u32 handle;
+	struct nouveau_ofuncs * const ofuncs;
+	struct nouveau_omthds * const omthds;
+	struct lock_class_key lock_class_key;
+};
+
+#define nv_oclass(o)    nv_object(o)->oclass
+#define nv_hclass(o)    nv_oclass(o)->handle
+#define nv_iclass(o,i) (nv_hclass(o) & (i))
+#define nv_mclass(o)    nv_iclass(o, NV_OBJECT_CLASS)
+
+static inline struct nouveau_object *
+nv_pclass(struct nouveau_object *parent, u32 oclass)
+{
+	while (parent && !nv_iclass(parent, oclass))
+		parent = parent->parent;
+	return parent;
+}
+
+struct nouveau_omthds {
+	u32 start;
+	u32 limit;
+	int (*call)(struct nouveau_object *, u32, void *, u32);
+};
+
+struct nouveau_ofuncs {
+	int  (*ctor)(struct nouveau_object *, struct nouveau_object *,
+		     struct nouveau_oclass *, void *data, u32 size,
+		     struct nouveau_object **);
+	void (*dtor)(struct nouveau_object *);
+	int  (*init)(struct nouveau_object *);
+	int  (*fini)(struct nouveau_object *, bool suspend);
+	u8   (*rd08)(struct nouveau_object *, u64 offset);
+	u16  (*rd16)(struct nouveau_object *, u64 offset);
+	u32  (*rd32)(struct nouveau_object *, u64 offset);
+	void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+	void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+	void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
+};
+
+static inline struct nouveau_ofuncs *
+nv_ofuncs(void *obj)
+{
+	return nv_oclass(obj)->ofuncs;
+}
+
+int  nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, void *, u32,
+			 struct nouveau_object **);
+void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
+int nouveau_object_inc(struct nouveau_object *);
+int nouveau_object_dec(struct nouveau_object *, bool suspend);
+
+int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
+		       u16 oclass, void *data, u32 size,
+		       struct nouveau_object **);
+int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
+void nouveau_object_debug(void);
+
+static inline int
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
+{
+	struct nouveau_omthds *method = nv_oclass(obj)->omthds;
+
+	while (method && method->call) {
+		if (mthd >= method->start && mthd <= method->limit)
+			return method->call(obj, mthd, data, size);
+		method++;
+	}
+
+	return -EINVAL;
+}
+
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+	return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
+static inline u8
+nv_ro08(void *obj, u64 addr)
+{
+	u8 data = nv_ofuncs(obj)->rd08(obj, addr);
+	nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
+	return data;
+}
+
+static inline u16
+nv_ro16(void *obj, u64 addr)
+{
+	u16 data = nv_ofuncs(obj)->rd16(obj, addr);
+	nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
+	return data;
+}
+
+static inline u32
+nv_ro32(void *obj, u64 addr)
+{
+	u32 data = nv_ofuncs(obj)->rd32(obj, addr);
+	nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
+	return data;
+}
+
+static inline void
+nv_wo08(void *obj, u64 addr, u8 data)
+{
+	nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
+	nv_ofuncs(obj)->wr08(obj, addr, data);
+}
+
+static inline void
+nv_wo16(void *obj, u64 addr, u16 data)
+{
+	nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
+	nv_ofuncs(obj)->wr16(obj, addr, data);
+}
+
+static inline void
+nv_wo32(void *obj, u64 addr, u32 data)
+{
+	nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
+	nv_ofuncs(obj)->wr32(obj, addr, data);
+}
+
+static inline u32
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
+{
+	u32 temp = nv_ro32(obj, addr);
+	nv_wo32(obj, addr, (temp & ~mask) | data);
+	return temp;
+}
+
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
+{
+	unsigned char c1, c2;
+
+	while (len--) {
+		c1 = nv_ro08(obj, addr++);
+		c2 = *(str++);
+		if (c1 != c2)
+			return c1 - c2;
+	}
+	return 0;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/option.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/option.h
new file mode 100644
index 0000000..2707495
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -0,0 +1,11 @@
+#ifndef __NOUVEAU_OPTION_H__
+#define __NOUVEAU_OPTION_H__
+
+#include <core/os.h>
+
+const char *nouveau_stropt(const char *optstr, const char *opt, int *len);
+bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
+
+int nouveau_dbgopt(const char *optstr, const char *sub);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/parent.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/parent.h
new file mode 100644
index 0000000..9f5ea90
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -0,0 +1,61 @@
+#ifndef __NOUVEAU_PARENT_H__
+#define __NOUVEAU_PARENT_H__
+
+#include <core/device.h>
+#include <core/object.h>
+
+struct nouveau_sclass {
+	struct nouveau_sclass *sclass;
+	struct nouveau_engine *engine;
+	struct nouveau_oclass *oclass;
+};
+
+struct nouveau_parent {
+	struct nouveau_object base;
+
+	struct nouveau_sclass *sclass;
+	u64 engine;
+
+	int  (*context_attach)(struct nouveau_object *,
+			       struct nouveau_object *);
+	int  (*context_detach)(struct nouveau_object *, bool suspend,
+			       struct nouveau_object *);
+
+	int  (*object_attach)(struct nouveau_object *parent,
+			      struct nouveau_object *object, u32 name);
+	void (*object_detach)(struct nouveau_object *parent, int cookie);
+};
+
+static inline struct nouveau_parent *
+nv_parent(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
+		nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_parent_create(p,e,c,v,s,m,d)                                   \
+	nouveau_parent_create_((p), (e), (c), (v), (s), (m),                   \
+			       sizeof(**d), (void **)d)
+#define nouveau_parent_init(p)                                                 \
+	nouveau_object_init(&(p)->base)
+#define nouveau_parent_fini(p,s)                                               \
+	nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    struct nouveau_oclass *, u64 engcls,
+			    int size, void **);
+void nouveau_parent_destroy(struct nouveau_parent *);
+
+void _nouveau_parent_dtor(struct nouveau_object *);
+#define _nouveau_parent_init nouveau_object_init
+#define _nouveau_parent_fini nouveau_object_fini
+
+int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
+			  struct nouveau_object **pengine,
+			  struct nouveau_oclass **poclass);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/printk.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/printk.h
new file mode 100644
index 0000000..febed2e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -0,0 +1,40 @@
+#ifndef __NOUVEAU_PRINTK_H__
+#define __NOUVEAU_PRINTK_H__
+
+#include <core/os.h>
+#include <core/debug.h>
+
+struct nouveau_object;
+
+#define NV_PRINTK_FATAL    KERN_CRIT
+#define NV_PRINTK_ERROR    KERN_ERR
+#define NV_PRINTK_WARN     KERN_WARNING
+#define NV_PRINTK_INFO     KERN_INFO
+#define NV_PRINTK_DEBUG    KERN_DEBUG
+#define NV_PRINTK_PARANOIA KERN_DEBUG
+#define NV_PRINTK_TRACE    KERN_DEBUG
+#define NV_PRINTK_SPAM     KERN_DEBUG
+
+void __printf(4, 5)
+nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
+
+#define nv_printk(o,l,f,a...) do {                                             \
+	if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG)                                \
+		nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a);   \
+} while(0)
+
+#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
+#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
+#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
+#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
+#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
+#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
+#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+
+#define nv_assert(f,a...) do {                                                 \
+	if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG)                              \
+		nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a);  \
+	BUG_ON(1);                                                             \
+} while(0)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/ramht.h
new file mode 100644
index 0000000..47e4cac
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -0,0 +1,23 @@
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+#include <core/gpuobj.h>
+
+struct nouveau_ramht {
+	struct nouveau_gpuobj base;
+	int bits;
+};
+
+int  nouveau_ramht_insert(struct nouveau_ramht *, int chid,
+			  u32 handle, u32 context);
+void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
+int  nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
+		       u32 size, u32 align, struct nouveau_ramht **);
+
+static inline void
+nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
+{
+	nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/subdev.h
new file mode 100644
index 0000000..e9632e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/core/subdev.h
@@ -0,0 +1,118 @@
+#ifndef __NOUVEAU_SUBDEV_H__
+#define __NOUVEAU_SUBDEV_H__
+
+#include <core/object.h>
+
+#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
+#define NV_SUBDEV(name,var)  NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
+
+struct nouveau_subdev {
+	struct nouveau_object base;
+	struct mutex mutex;
+	const char *name;
+	void __iomem *mmio;
+	u32 debug;
+	u32 unit;
+
+	void (*intr)(struct nouveau_subdev *);
+};
+
+static inline struct nouveau_subdev *
+nv_subdev(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
+		nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+static inline int
+nv_subidx(struct nouveau_object *object)
+{
+	return nv_hclass(nv_subdev(object)) & 0xff;
+}
+
+#define nouveau_subdev_create(p,e,o,v,s,f,d)                                   \
+	nouveau_subdev_create_((p), (e), (o), (v), (s), (f),                   \
+			       sizeof(**d),(void **)d)
+
+int  nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, u32 pclass,
+			    const char *sname, const char *fname,
+			    int size, void **);
+void nouveau_subdev_destroy(struct nouveau_subdev *);
+int  nouveau_subdev_init(struct nouveau_subdev *);
+int  nouveau_subdev_fini(struct nouveau_subdev *, bool suspend);
+void nouveau_subdev_reset(struct nouveau_object *);
+
+void _nouveau_subdev_dtor(struct nouveau_object *);
+int  _nouveau_subdev_init(struct nouveau_object *);
+int  _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
+
+#define s_printk(s,l,f,a...) do {                                              \
+	if ((s)->debug >= OS_DBG_##l) {                                        \
+		nv_printk((s)->base.parent, (s)->name, l, f, ##a);             \
+	}                                                                      \
+} while(0)
+
+static inline u8
+nv_rd08(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u8 data = ioread8(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
+	return data;
+}
+
+static inline u16
+nv_rd16(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u16 data = ioread16_native(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
+	return data;
+}
+
+static inline u32
+nv_rd32(void *obj, u32 addr)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	u32 data = ioread32_native(subdev->mmio + addr);
+	nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
+	return data;
+}
+
+static inline void
+nv_wr08(void *obj, u32 addr, u8 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
+	iowrite8(data, subdev->mmio + addr);
+}
+
+static inline void
+nv_wr16(void *obj, u32 addr, u16 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
+	iowrite16_native(data, subdev->mmio + addr);
+}
+
+static inline void
+nv_wr32(void *obj, u32 addr, u32 data)
+{
+	struct nouveau_subdev *subdev = nv_subdev(obj);
+	nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
+	iowrite32_native(data, subdev->mmio + addr);
+}
+
+static inline u32
+nv_mask(void *obj, u32 addr, u32 mask, u32 data)
+{
+	u32 temp = nv_rd32(obj, addr);
+	nv_wr32(obj, addr, (temp & ~mask) | data);
+	return temp;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 0000000..13ccdf5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,8 @@
+#ifndef __NOUVEAU_BSP_H__
+#define __NOUVEAU_BSP_H__
+
+extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 0000000..8cad2cf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,12 @@
+#ifndef __NOUVEAU_COPY_H__
+#define __NOUVEAU_COPY_H__
+
+void nva3_copy_intr(struct nouveau_subdev *);
+
+extern struct nouveau_oclass nva3_copy_oclass;
+extern struct nouveau_oclass nvc0_copy0_oclass;
+extern struct nouveau_oclass nvc0_copy1_oclass;
+extern struct nouveau_oclass nve0_copy0_oclass;
+extern struct nouveau_oclass nve0_copy1_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 0000000..db97561
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,7 @@
+#ifndef __NOUVEAU_CRYPT_H__
+#define __NOUVEAU_CRYPT_H__
+
+extern struct nouveau_oclass nv84_crypt_oclass;
+extern struct nouveau_oclass nv98_crypt_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/device.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/device.h
new file mode 100644
index 0000000..b3dd2c4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/device.h
@@ -0,0 +1,23 @@
+#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
+#define __NOUVEAU_SUBDEV_DEVICE_H__
+
+#include <core/device.h>
+
+#define nouveau_device_create(p,n,s,c,d,u)                                     \
+	nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
+
+int  nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
+			    const char *cfg, const char *dbg, int, void **);
+
+int nv04_identify(struct nouveau_device *);
+int nv10_identify(struct nouveau_device *);
+int nv20_identify(struct nouveau_device *);
+int nv30_identify(struct nouveau_device *);
+int nv40_identify(struct nouveau_device *);
+int nv50_identify(struct nouveau_device *);
+int nvc0_identify(struct nouveau_device *);
+int nve0_identify(struct nouveau_device *);
+
+struct nouveau_device *nouveau_device_find(u64 name);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 0000000..4b21fab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,49 @@
+#ifndef __NOUVEAU_DISP_H__
+#define __NOUVEAU_DISP_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+#include <core/device.h>
+#include <core/event.h>
+
+struct nouveau_disp {
+	struct nouveau_engine base;
+	struct nouveau_event *vblank;
+};
+
+static inline struct nouveau_disp *
+nouveau_disp(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
+}
+
+#define nouveau_disp_create(p,e,c,h,i,x,d)                                     \
+	nouveau_disp_create_((p), (e), (c), (h), (i), (x),                     \
+			     sizeof(**d), (void **)d)
+#define nouveau_disp_destroy(d) ({                                             \
+	struct nouveau_disp *disp = (d);                                       \
+	_nouveau_disp_dtor(nv_object(disp));                                   \
+})
+#define nouveau_disp_init(d)                                                   \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_disp_fini(d,s)                                                 \
+	nouveau_engine_fini(&(d)->base, (s))
+
+int  nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, int heads,
+			  const char *, const char *, int, void **);
+void _nouveau_disp_dtor(struct nouveau_object *);
+#define _nouveau_disp_init _nouveau_engine_init
+#define _nouveau_disp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_disp_oclass;
+extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
+extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
+extern struct nouveau_oclass nvf0_disp_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 0000000..b28914e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,48 @@
+#ifndef __NOUVEAU_DMAOBJ_H__
+#define __NOUVEAU_DMAOBJ_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+
+struct nouveau_gpuobj;
+
+struct nouveau_dmaobj {
+	struct nouveau_object base;
+	u32 target;
+	u32 access;
+	u64 start;
+	u64 limit;
+	u32 conf0;
+};
+
+struct nouveau_dmaeng {
+	struct nouveau_engine base;
+
+	/* creates a "physical" dma object from a struct nouveau_dmaobj */
+	int (*bind)(struct nouveau_dmaeng *dmaeng,
+		    struct nouveau_object *parent,
+		    struct nouveau_dmaobj *dmaobj,
+		    struct nouveau_gpuobj **);
+};
+
+#define nouveau_dmaeng_create(p,e,c,d)                                         \
+	nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
+#define nouveau_dmaeng_destroy(p)                                              \
+	nouveau_engine_destroy(&(p)->base)
+#define nouveau_dmaeng_init(p)                                                 \
+	nouveau_engine_init(&(p)->base)
+#define nouveau_dmaeng_fini(p,s)                                               \
+	nouveau_engine_fini(&(p)->base, (s))
+
+#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
+#define _nouveau_dmaeng_init _nouveau_engine_init
+#define _nouveau_dmaeng_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_dmaeng_oclass;
+extern struct nouveau_oclass nv50_dmaeng_oclass;
+extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
new file mode 100644
index 0000000..633c2f8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -0,0 +1,116 @@
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engine.h>
+
+struct nouveau_fifo_chan {
+	struct nouveau_namedb base;
+	struct nouveau_dmaobj *pushdma;
+	struct nouveau_gpuobj *pushgpu;
+	void __iomem *user;
+	u32 size;
+	u16 chid;
+	atomic_t refcnt; /* NV04_NVSW_SET_REF */
+};
+
+static inline struct nouveau_fifo_chan *
+nouveau_fifo_chan(void *obj)
+{
+	return (void *)nv_namedb(obj);
+}
+
+#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d)                         \
+	nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n),        \
+				     (m), sizeof(**d), (void **)d)
+#define nouveau_fifo_channel_init(p)                                           \
+	nouveau_namedb_init(&(p)->base)
+#define nouveau_fifo_channel_fini(p,s)                                         \
+	nouveau_namedb_fini(&(p)->base, (s))
+
+int  nouveau_fifo_channel_create_(struct nouveau_object *,
+				  struct nouveau_object *,
+				  struct nouveau_oclass *,
+				  int bar, u32 addr, u32 size, u32 push,
+				  u64 engmask, int len, void **);
+void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
+
+#define _nouveau_fifo_channel_init _nouveau_namedb_init
+#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
+
+void _nouveau_fifo_channel_dtor(struct nouveau_object *);
+u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
+
+struct nouveau_fifo_base {
+	struct nouveau_gpuobj base;
+};
+
+#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d)                           \
+	nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
+#define nouveau_fifo_context_destroy(p)                                        \
+	nouveau_gpuobj_destroy(&(p)->base)
+#define nouveau_fifo_context_init(p)                                           \
+	nouveau_gpuobj_init(&(p)->base)
+#define nouveau_fifo_context_fini(p,s)                                         \
+	nouveau_gpuobj_fini(&(p)->base, (s))
+
+#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
+#define _nouveau_fifo_context_init _nouveau_gpuobj_init
+#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
+#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
+#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
+
+struct nouveau_fifo {
+	struct nouveau_engine base;
+
+	struct nouveau_event *cevent; /* channel creation event */
+	struct nouveau_event *uevent; /* async user trigger */
+
+	struct nouveau_object **channel;
+	spinlock_t lock;
+	u16 min;
+	u16 max;
+
+	int  (*chid)(struct nouveau_fifo *, struct nouveau_object *);
+	void (*pause)(struct nouveau_fifo *, unsigned long *);
+	void (*start)(struct nouveau_fifo *, unsigned long *);
+};
+
+static inline struct nouveau_fifo *
+nouveau_fifo(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
+}
+
+#define nouveau_fifo_create(o,e,c,fc,lc,d)                                     \
+	nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
+#define nouveau_fifo_init(p)                                                   \
+	nouveau_engine_init(&(p)->base)
+#define nouveau_fifo_fini(p,s)                                                 \
+	nouveau_engine_fini(&(p)->base, (s))
+
+int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, int min, int max,
+			 int size, void **);
+void nouveau_fifo_destroy(struct nouveau_fifo *);
+const char *
+nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
+
+#define _nouveau_fifo_init _nouveau_engine_init
+#define _nouveau_fifo_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_fifo_oclass;
+extern struct nouveau_oclass nv10_fifo_oclass;
+extern struct nouveau_oclass nv17_fifo_oclass;
+extern struct nouveau_oclass nv40_fifo_oclass;
+extern struct nouveau_oclass nv50_fifo_oclass;
+extern struct nouveau_oclass nv84_fifo_oclass;
+extern struct nouveau_oclass nvc0_fifo_oclass;
+extern struct nouveau_oclass nve0_fifo_oclass;
+
+void nv04_fifo_intr(struct nouveau_subdev *);
+int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 0000000..5d39243
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,76 @@
+#ifndef __NOUVEAU_GRAPH_H__
+#define __NOUVEAU_GRAPH_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+struct nouveau_graph_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_graph_context_create(p,e,c,g,s,a,f,d)                          \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_graph_context_destroy(d)                                       \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_graph_context_init(d)                                          \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_graph_context_fini(d,s)                                        \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
+#define _nouveau_graph_context_init _nouveau_engctx_init
+#define _nouveau_graph_context_fini _nouveau_engctx_fini
+#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_graph {
+	struct nouveau_engine base;
+
+	/* Returns chipset-specific counts of units packed into an u64.
+	 */
+	u64 (*units)(struct nouveau_graph *);
+};
+
+static inline struct nouveau_graph *
+nouveau_graph(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
+}
+
+#define nouveau_graph_create(p,e,c,y,d)                                        \
+	nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
+#define nouveau_graph_destroy(d)                                               \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_graph_init(d)                                                  \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_graph_fini(d,s)                                                \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_graph_dtor _nouveau_engine_dtor
+#define _nouveau_graph_init _nouveau_engine_init
+#define _nouveau_graph_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_graph_oclass;
+extern struct nouveau_oclass nv10_graph_oclass;
+extern struct nouveau_oclass nv20_graph_oclass;
+extern struct nouveau_oclass nv25_graph_oclass;
+extern struct nouveau_oclass nv2a_graph_oclass;
+extern struct nouveau_oclass nv30_graph_oclass;
+extern struct nouveau_oclass nv34_graph_oclass;
+extern struct nouveau_oclass nv35_graph_oclass;
+extern struct nouveau_oclass nv40_graph_oclass;
+extern struct nouveau_oclass nv50_graph_oclass;
+extern struct nouveau_oclass nvc0_graph_oclass;
+extern struct nouveau_oclass nve0_graph_oclass;
+
+extern const struct nouveau_bitfield nv04_graph_nsource[];
+extern struct nouveau_ofuncs nv04_graph_ofuncs;
+bool nv04_graph_idle(void *obj);
+
+extern const struct nouveau_bitfield nv10_graph_intr_name[];
+extern const struct nouveau_bitfield nv10_graph_nstatus[];
+
+extern const struct nouveau_enum nv50_data_error_names[];
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 0000000..bbf0d4a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
+#ifndef __NOUVEAU_MPEG_H__
+#define __NOUVEAU_MPEG_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_mpeg_chan {
+	struct nouveau_engctx base;
+};
+
+#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d)                           \
+	nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_mpeg_context_destroy(d)                                        \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_mpeg_context_init(d)                                           \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_mpeg_context_fini(d,s)                                         \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
+#define _nouveau_mpeg_context_init _nouveau_engctx_init
+#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
+#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_mpeg {
+	struct nouveau_engine base;
+};
+
+#define nouveau_mpeg_create(p,e,c,d)                                           \
+	nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
+#define nouveau_mpeg_destroy(d)                                                \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_mpeg_init(d)                                                   \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_mpeg_fini(d,s)                                                 \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_dtor _nouveau_engine_dtor
+#define _nouveau_mpeg_init _nouveau_engine_init
+#define _nouveau_mpeg_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv31_mpeg_oclass;
+extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv50_mpeg_oclass;
+extern struct nouveau_oclass nv84_mpeg_oclass;
+
+extern struct nouveau_oclass nv31_mpeg_sclass[];
+void nv31_mpeg_intr(struct nouveau_subdev *);
+void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
+int  nv31_mpeg_init(struct nouveau_object *);
+
+extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
+int  nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, void *, u32,
+			    struct nouveau_object **);
+int  nv50_mpeg_tlb_flush(struct nouveau_engine *);
+void nv50_mpeg_intr(struct nouveau_subdev *);
+int  nv50_mpeg_init(struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 0000000..0a66781
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,7 @@
+#ifndef __NOUVEAU_PPP_H__
+#define __NOUVEAU_PPP_H__
+
+extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/software.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 0000000..4579948
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/event.h>
+
+struct nouveau_software_chan {
+	struct nouveau_engctx base;
+
+	struct {
+		struct nouveau_eventh event;
+		u32 channel;
+		u32 ctxdma;
+		u64 offset;
+		u32 value;
+	} vblank;
+
+	int (*flip)(void *);
+	void *flip_data;
+};
+
+#define nouveau_software_context_create(p,e,c,d)                               \
+	nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
+#define nouveau_software_context_destroy(d)                                    \
+	nouveau_engctx_destroy(&(d)->base)
+#define nouveau_software_context_init(d)                                       \
+	nouveau_engctx_init(&(d)->base)
+#define nouveau_software_context_fini(d,s)                                     \
+	nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_software_context_dtor _nouveau_engctx_dtor
+#define _nouveau_software_context_init _nouveau_engctx_init
+#define _nouveau_software_context_fini _nouveau_engctx_fini
+
+struct nouveau_software {
+	struct nouveau_engine base;
+};
+
+#define nouveau_software_create(p,e,c,d)                                       \
+	nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
+#define nouveau_software_destroy(d)                                            \
+	nouveau_engine_destroy(&(d)->base)
+#define nouveau_software_init(d)                                               \
+	nouveau_engine_init(&(d)->base)
+#define nouveau_software_fini(d,s)                                             \
+	nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_software_dtor _nouveau_engine_dtor
+#define _nouveau_software_init _nouveau_engine_init
+#define _nouveau_software_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_software_oclass;
+extern struct nouveau_oclass nv10_software_oclass;
+extern struct nouveau_oclass nv50_software_oclass;
+extern struct nouveau_oclass nvc0_software_oclass;
+
+void nv04_software_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 0000000..d7b287b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,8 @@
+#ifndef __NOUVEAU_VP_H__
+#define __NOUVEAU_VP_H__
+
+extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
new file mode 100644
index 0000000..4f4ff45
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -0,0 +1,55 @@
+#ifndef __NOUVEAU_BAR_H__
+#define __NOUVEAU_BAR_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#include <subdev/fb.h>
+
+struct nouveau_vma;
+
+struct nouveau_bar {
+	struct nouveau_subdev base;
+
+	int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
+		     struct nouveau_mem *, struct nouveau_object **);
+	void __iomem *iomem;
+
+	int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
+		    u32 flags, struct nouveau_vma *);
+	int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
+		    u32 flags, struct nouveau_vma *);
+	void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
+	void (*flush)(struct nouveau_bar *);
+};
+
+static inline struct nouveau_bar *
+nouveau_bar(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
+}
+
+#define nouveau_bar_create(p,e,o,d)                                            \
+	nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_bar_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_bar_fini(p,s)                                                  \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, int, void **);
+void nouveau_bar_destroy(struct nouveau_bar *);
+
+void _nouveau_bar_dtor(struct nouveau_object *);
+#define _nouveau_bar_init _nouveau_subdev_init
+#define _nouveau_bar_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv50_bar_oclass;
+extern struct nouveau_oclass nvc0_bar_oclass;
+
+int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
+		      struct nouveau_mem *, struct nouveau_object **);
+
+void nv84_bar_flush(struct nouveau_bar *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
new file mode 100644
index 0000000..5bd1ca8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -0,0 +1,35 @@
+#ifndef __NOUVEAU_BIOS_H__
+#define __NOUVEAU_BIOS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bios {
+	struct nouveau_subdev base;
+	u32 size;
+	u8 *data;
+
+	u32 bmp_offset;
+	u32 bit_offset;
+
+	struct {
+		u8 major;
+		u8 chip;
+		u8 minor;
+		u8 micro;
+		u8 patch;
+	} version;
+};
+
+static inline struct nouveau_bios *
+nouveau_bios(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS];
+}
+
+u8  nvbios_checksum(const u8 *data, int size);
+u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
+
+extern struct nouveau_oclass nouveau_bios_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
new file mode 100644
index 0000000..73f060b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
@@ -0,0 +1,13 @@
+#ifndef __NVBIOS_BIT_H__
+#define __NVBIOS_BIT_H__
+
+struct bit_entry {
+	u8  id;
+	u8  version;
+	u16 length;
+	u16 offset;
+};
+
+int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
new file mode 100644
index 0000000..10e4dbc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
@@ -0,0 +1,39 @@
+#ifndef __NVBIOS_BMP_H__
+#define __NVBIOS_BMP_H__
+
+static inline u16
+bmp_version(struct nouveau_bios *bios)
+{
+	if (bios->bmp_offset) {
+		return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
+		       nv_ro08(bios, bios->bmp_offset + 6);
+	}
+
+	return 0x0000;
+}
+
+static inline u16
+bmp_mem_init_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 24);
+	return 0x0000;
+}
+
+static inline u16
+bmp_sdr_seq_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 26);
+	return 0x0000;
+}
+
+static inline u16
+bmp_ddr_seq_table(struct nouveau_bios *bios)
+{
+	if (bmp_version(bios) >= 0x0300)
+		return nv_ro16(bios, bios->bmp_offset + 28);
+	return 0x0000;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
new file mode 100644
index 0000000..c127054
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -0,0 +1,27 @@
+#ifndef __NVBIOS_CONN_H__
+#define __NVBIOS_CONN_H__
+
+enum dcb_connector_type {
+	DCB_CONNECTOR_VGA = 0x00,
+	DCB_CONNECTOR_TV_0 = 0x10,
+	DCB_CONNECTOR_TV_1 = 0x11,
+	DCB_CONNECTOR_TV_3 = 0x13,
+	DCB_CONNECTOR_DVI_I = 0x30,
+	DCB_CONNECTOR_DVI_D = 0x31,
+	DCB_CONNECTOR_DMS59_0 = 0x38,
+	DCB_CONNECTOR_DMS59_1 = 0x39,
+	DCB_CONNECTOR_LVDS = 0x40,
+	DCB_CONNECTOR_LVDS_SPWG = 0x41,
+	DCB_CONNECTOR_DP = 0x46,
+	DCB_CONNECTOR_eDP = 0x47,
+	DCB_CONNECTOR_HDMI_0 = 0x60,
+	DCB_CONNECTOR_HDMI_1 = 0x61,
+	DCB_CONNECTOR_DMS59_DP0 = 0x64,
+	DCB_CONNECTOR_DMS59_DP1 = 0x65,
+	DCB_CONNECTOR_NONE = 0xff
+};
+
+u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
new file mode 100644
index 0000000..123270e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -0,0 +1,69 @@
+#ifndef __NVBIOS_DCB_H__
+#define __NVBIOS_DCB_H__
+
+struct nouveau_bios;
+
+enum dcb_output_type {
+	DCB_OUTPUT_ANALOG	= 0x0,
+	DCB_OUTPUT_TV		= 0x1,
+	DCB_OUTPUT_TMDS		= 0x2,
+	DCB_OUTPUT_LVDS		= 0x3,
+	DCB_OUTPUT_DP		= 0x6,
+	DCB_OUTPUT_EOL		= 0xe,
+	DCB_OUTPUT_UNUSED	= 0xf,
+	DCB_OUTPUT_ANY = -1,
+};
+
+struct dcb_output {
+	int index;	/* may not be raw dcb index if merging has happened */
+	u16 hasht;
+	u16 hashm;
+	enum dcb_output_type type;
+	uint8_t i2c_index;
+	uint8_t heads;
+	uint8_t connector;
+	uint8_t bus;
+	uint8_t location;
+	uint8_t or;
+	uint8_t link;
+	bool duallink_possible;
+	uint8_t extdev;
+	union {
+		struct sor_conf {
+			int link;
+		} sorconf;
+		struct {
+			int maxfreq;
+		} crtconf;
+		struct {
+			struct sor_conf sor;
+			bool use_straps_for_mode;
+			bool use_acpi_for_edid;
+			bool use_power_scripts;
+		} lvdsconf;
+		struct {
+			bool has_component_output;
+		} tvconf;
+		struct {
+			struct sor_conf sor;
+			int link_nr;
+			int link_bw;
+		} dpconf;
+		struct {
+			struct sor_conf sor;
+			int slave_addr;
+		} tmdsconf;
+	};
+	bool i2c_upper_default;
+};
+
+u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
+u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+		   struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+		   struct dcb_output *);
+int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
+		     (struct nouveau_bios *, void *, int index, u16 entry));
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 0000000..c35937e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+	u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr__, u8 *sub,
+		      struct nvbios_disp *);
+
+struct nvbios_outp {
+	u16 type;
+	u16 mask;
+	u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+	u16 match;
+	u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+		      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		      struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
new file mode 100644
index 0000000..6e54218
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -0,0 +1,34 @@
+#ifndef __NVBIOS_DP_H__
+#define __NVBIOS_DP_H__
+
+struct nvbios_dpout {
+	u16 type;
+	u16 mask;
+	u8  flags;
+	u32 script[5];
+	u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+		       u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		       struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+		       u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		       struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+	u8 drv;
+	u8 pre;
+	u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
new file mode 100644
index 0000000..949fee3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
@@ -0,0 +1,30 @@
+#ifndef __NVBIOS_EXTDEV_H__
+#define __NVBIOS_EXTDEV_H__
+
+struct nouveau_bios;
+
+enum nvbios_extdev_type {
+	NVBIOS_EXTDEV_LM89		= 0x02,
+	NVBIOS_EXTDEV_VT1103M		= 0x40,
+	NVBIOS_EXTDEV_PX3540		= 0x41,
+	NVBIOS_EXTDEV_VT1105M		= 0x42, /* or close enough... */
+	NVBIOS_EXTDEV_ADT7473		= 0x70, /* can also be a LM64 */
+	NVBIOS_EXTDEV_HDCP_EEPROM	= 0x90,
+	NVBIOS_EXTDEV_NONE		= 0xff,
+};
+
+struct nvbios_extdev_func {
+	u8 type;
+	u8 addr;
+	u8 bus;
+};
+
+int
+nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *);
+
+int
+nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type,
+		   struct nvbios_extdev_func *);
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
new file mode 100644
index 0000000..96d3364
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -0,0 +1,40 @@
+#ifndef __NVBIOS_GPIO_H__
+#define __NVBIOS_GPIO_H__
+
+enum dcb_gpio_func_name {
+	DCB_GPIO_PANEL_POWER = 0x01,
+	DCB_GPIO_TVDAC0 = 0x0c,
+	DCB_GPIO_TVDAC1 = 0x2d,
+	DCB_GPIO_FAN = 0x09,
+	DCB_GPIO_FAN_SENSE = 0x3d,
+	DCB_GPIO_UNUSED = 0xff
+};
+
+#define DCB_GPIO_LOG_DIR     0x02
+#define DCB_GPIO_LOG_DIR_OUT 0x00
+#define DCB_GPIO_LOG_DIR_IN  0x02
+#define DCB_GPIO_LOG_VAL     0x01
+#define DCB_GPIO_LOG_VAL_LO  0x00
+#define DCB_GPIO_LOG_VAL_HI  0x01
+
+struct dcb_gpio_func {
+	u8 func;
+	u8 line;
+	u8 log[2];
+
+	/* so far, "param" seems to only have an influence on PWM-related
+	 * GPIOs such as FAN_CONTROL and PANEL_BACKLIGHT_LEVEL.
+	 * if param equals 1, hardware PWM is available
+	 * if param equals 0, the host should toggle the GPIO itself
+	 */
+	u8 param;
+};
+
+u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
+u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
+		   struct dcb_gpio_func *);
+u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
+		   u8 *ver, u8 *len, struct dcb_gpio_func *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
new file mode 100644
index 0000000..10b57a1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -0,0 +1,25 @@
+#ifndef __NVBIOS_I2C_H__
+#define __NVBIOS_I2C_H__
+
+struct nouveau_bios;
+
+enum dcb_i2c_type {
+	DCB_I2C_NV04_BIT = 0,
+	DCB_I2C_NV4E_BIT = 4,
+	DCB_I2C_NVIO_BIT = 5,
+	DCB_I2C_NVIO_AUX = 6,
+	DCB_I2C_UNUSED = 0xff
+};
+
+struct dcb_i2c_entry {
+	enum dcb_i2c_type type;
+	u8 drive;
+	u8 sense;
+	u8 share;
+};
+
+u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len);
+int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
new file mode 100644
index 0000000..ca2f6bf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -0,0 +1,22 @@
+#ifndef __NVBIOS_INIT_H__
+#define __NVBIOS_INIT_H__
+
+struct nvbios_init {
+	struct nouveau_subdev *subdev;
+	struct nouveau_bios *bios;
+	u16 offset;
+	struct dcb_output *outp;
+	int crtc;
+
+	/* internal state used during parsing */
+	u8 execute;
+	u32 nested;
+	u16 repeat;
+	u16 repend;
+	u32 ramcfg;
+};
+
+int nvbios_exec(struct nvbios_init *);
+int nvbios_init(struct nouveau_subdev *, bool execute);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
new file mode 100644
index 0000000..5572e60
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
@@ -0,0 +1,9 @@
+#ifndef __NVBIOS_MXM_H__
+#define __NVBIOS_MXM_H__
+
+u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr);
+
+u8  mxm_sor_map(struct nouveau_bios *, u8 conn);
+u8  mxm_ddc_map(struct nouveau_bios *, u8 port);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
new file mode 100644
index 0000000..0b285e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -0,0 +1,14 @@
+#ifndef __NVBIOS_PERF_H__
+#define __NVBIOS_PERF_H__
+
+struct nouveau_bios;
+
+struct nvbios_perf_fan {
+	u32 pwm_divisor;
+};
+
+int
+nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *);
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
new file mode 100644
index 0000000..b2f3d4d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -0,0 +1,79 @@
+#ifndef __NVBIOS_PLL_H__
+#define __NVBIOS_PLL_H__
+
+/*XXX: kill me */
+struct nouveau_pll_vals {
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			uint8_t N1, M1, N2, M2;
+#else
+			uint8_t M1, N1, M2, N2;
+#endif
+		};
+		struct {
+			uint16_t NM1, NM2;
+		} __attribute__((packed));
+	};
+	int log2P;
+
+	int refclk;
+};
+
+struct nouveau_bios;
+
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
+enum nvbios_pll_type {
+	PLL_CORE   = 0x01,
+	PLL_SHADER = 0x02,
+	PLL_UNK03  = 0x03,
+	PLL_MEMORY = 0x04,
+	PLL_VDEC   = 0x05,
+	PLL_UNK40  = 0x40,
+	PLL_UNK41  = 0x41,
+	PLL_UNK42  = 0x42,
+	PLL_VPLL0  = 0x80,
+	PLL_VPLL1  = 0x81,
+	PLL_VPLL2  = 0x82,
+	PLL_VPLL3  = 0x83,
+	PLL_MAX    = 0xff
+};
+
+struct nvbios_pll {
+	enum nvbios_pll_type type;
+	u32 reg;
+	u32 refclk;
+
+	u8 min_p;
+	u8 max_p;
+	u8 bias_p;
+
+	/*
+	 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
+	 * value) is no different to 6 (at least for vplls) so allowing the MNP
+	 * calc to use 7 causes the generated clock to be out by a factor of 2.
+	 * however, max_log2p cannot be fixed-up during parsing as the
+	 * unmodified max_log2p value is still needed for setting mplls, hence
+	 * an additional max_usable_log2p member
+	 */
+	u8 max_p_usable;
+
+	struct {
+		u32 min_freq;
+		u32 max_freq;
+		u32 min_inputfreq;
+		u32 max_inputfreq;
+		u8  min_m;
+		u8  max_m;
+		u8  min_n;
+		u8  max_n;
+	} vco1, vco2;
+};
+
+int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
new file mode 100644
index 0000000..083541d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -0,0 +1,62 @@
+#ifndef __NVBIOS_THERM_H__
+#define __NVBIOS_THERM_H__
+
+struct nouveau_bios;
+
+struct nvbios_therm_threshold {
+	u8 temp;
+	u8 hysteresis;
+};
+
+struct nvbios_therm_sensor {
+	/* diode */
+	s16 slope_mult;
+	s16 slope_div;
+	s16 offset_num;
+	s16 offset_den;
+	s8 offset_constant;
+
+	/* thresholds */
+	struct nvbios_therm_threshold thrs_fan_boost;
+	struct nvbios_therm_threshold thrs_down_clock;
+	struct nvbios_therm_threshold thrs_critical;
+	struct nvbios_therm_threshold thrs_shutdown;
+};
+
+/* no vbios have more than 6 */
+#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
+struct nouveau_therm_trip_point {
+	int fan_duty;
+	int temp;
+	int hysteresis;
+};
+
+struct nvbios_therm_fan {
+	u16 pwm_freq;
+
+	u8 min_duty;
+	u8 max_duty;
+
+	u16 bump_period;
+	u16 slow_down_period;
+
+	struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
+	u8 nr_fan_trip;
+	u8 linear_min_temp;
+	u8 linear_max_temp;
+};
+
+enum nvbios_therm_domain {
+	NVBIOS_THERM_DOMAIN_CORE,
+	NVBIOS_THERM_DOMAIN_AMBIENT,
+};
+
+int
+nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain,
+			  struct nvbios_therm_sensor *);
+
+int
+nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *);
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
new file mode 100644
index 0000000..360baab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
@@ -0,0 +1,19 @@
+#ifndef __NVBIOS_XPIO_H__
+#define __NVBIOS_XPIO_H__
+
+#define NVBIOS_XPIO_FLAG_AUX  0x10
+#define NVBIOS_XPIO_FLAG_AUX0 0x00
+#define NVBIOS_XPIO_FLAG_AUX1 0x10
+
+struct nvbios_xpio {
+	u8 type;
+	u8 addr;
+	u8 flags;
+};
+
+u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
new file mode 100644
index 0000000..7d88ec4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -0,0 +1,41 @@
+#ifndef __NOUVEAU_BUS_H__
+#define __NOUVEAU_BUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_bus_intr {
+	u32 stat;
+	u32 unit;
+};
+
+struct nouveau_bus {
+	struct nouveau_subdev base;
+};
+
+static inline struct nouveau_bus *
+nouveau_bus(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
+}
+
+#define nouveau_bus_create(p, e, o, d)                                         \
+	nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master",             \
+			       sizeof(**d), (void **)d)
+#define nouveau_bus_destroy(p)                                                 \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_bus_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_bus_fini(p, s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_bus_dtor _nouveau_subdev_dtor
+#define _nouveau_bus_init _nouveau_subdev_init
+#define _nouveau_bus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_bus_oclass;
+extern struct nouveau_oclass nv31_bus_oclass;
+extern struct nouveau_oclass nv50_bus_oclass;
+extern struct nouveau_oclass nvc0_bus_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
new file mode 100644
index 0000000..41b7a6a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -0,0 +1,60 @@
+#ifndef __NOUVEAU_CLOCK_H__
+#define __NOUVEAU_CLOCK_H__
+
+#include <core/device.h>
+#include <core/subdev.h>
+
+struct nouveau_pll_vals;
+struct nvbios_pll;
+
+struct nouveau_clock {
+	struct nouveau_subdev base;
+
+	int (*pll_set)(struct nouveau_clock *, u32 type, u32 freq);
+
+	/*XXX: die, these are here *only* to support the completely
+	 *     bat-shit insane what-was-nouveau_hw.c code
+	 */
+	int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *,
+			int clk, struct nouveau_pll_vals *pv);
+	int (*pll_prog)(struct nouveau_clock *, u32 reg1,
+			struct nouveau_pll_vals *pv);
+};
+
+static inline struct nouveau_clock *
+nouveau_clock(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
+}
+
+#define nouveau_clock_create(p,e,o,d)                                          \
+	nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
+#define nouveau_clock_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_clock_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_clock_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int  nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, void *, u32, int, void **);
+
+#define _nouveau_clock_dtor _nouveau_subdev_dtor
+#define _nouveau_clock_init _nouveau_subdev_init
+#define _nouveau_clock_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_clock_oclass;
+extern struct nouveau_oclass nv40_clock_oclass;
+extern struct nouveau_oclass nv50_clock_oclass;
+extern struct nouveau_oclass nva3_clock_oclass;
+extern struct nouveau_oclass nvc0_clock_oclass;
+
+int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
+int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+			int clk, struct nouveau_pll_vals *);
+int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
+			struct nouveau_pll_vals *);
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+			int clk, struct nouveau_pll_vals *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
new file mode 100644
index 0000000..29e4cc1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -0,0 +1,40 @@
+#ifndef __NOUVEAU_DEVINIT_H__
+#define __NOUVEAU_DEVINIT_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_devinit {
+	struct nouveau_subdev base;
+	bool post;
+	void (*meminit)(struct nouveau_devinit *);
+};
+
+static inline struct nouveau_devinit *
+nouveau_devinit(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
+}
+
+#define nouveau_devinit_create(p,e,o,d)                                        \
+	nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_devinit_destroy(p)                                             \
+	nouveau_subdev_destroy(&(p)->base)
+
+int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+int nouveau_devinit_init(struct nouveau_devinit *);
+int nouveau_devinit_fini(struct nouveau_devinit *, bool suspend);
+
+extern struct nouveau_oclass nv04_devinit_oclass;
+extern struct nouveau_oclass nv05_devinit_oclass;
+extern struct nouveau_oclass nv10_devinit_oclass;
+extern struct nouveau_oclass nv1a_devinit_oclass;
+extern struct nouveau_oclass nv20_devinit_oclass;
+extern struct nouveau_oclass nv50_devinit_oclass;
+
+void nv04_devinit_dtor(struct nouveau_object *);
+int  nv04_devinit_init(struct nouveau_object *);
+int  nv04_devinit_fini(struct nouveau_object *, bool);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
new file mode 100644
index 0000000..da470e6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -0,0 +1,171 @@
+#ifndef __NOUVEAU_FB_H__
+#define __NOUVEAU_FB_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
+
+#include <subdev/vm.h>
+
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO  1
+#define NV_MEM_ACCESS_WO  2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM  8
+#define NV_MEM_ACCESS_NOSNOOP 16
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+struct nouveau_mem {
+	struct drm_device *dev;
+
+	struct nouveau_vma bar_vma;
+	struct nouveau_vma vma[2];
+	u8  page_shift;
+
+	struct nouveau_mm_node *tag;
+	struct list_head regions;
+	dma_addr_t *pages;
+	u32 memtype;
+	u64 offset;
+	u64 size;
+	struct sg_table *sg;
+};
+
+struct nouveau_fb_tile {
+	struct nouveau_mm_node *tag;
+	u32 addr;
+	u32 limit;
+	u32 pitch;
+	u32 zcomp;
+};
+
+struct nouveau_fb {
+	struct nouveau_subdev base;
+
+	bool (*memtype_valid)(struct nouveau_fb *, u32 memtype);
+
+	struct {
+		enum {
+			NV_MEM_TYPE_UNKNOWN = 0,
+			NV_MEM_TYPE_STOLEN,
+			NV_MEM_TYPE_SGRAM,
+			NV_MEM_TYPE_SDRAM,
+			NV_MEM_TYPE_DDR1,
+			NV_MEM_TYPE_DDR2,
+			NV_MEM_TYPE_DDR3,
+			NV_MEM_TYPE_GDDR2,
+			NV_MEM_TYPE_GDDR3,
+			NV_MEM_TYPE_GDDR4,
+			NV_MEM_TYPE_GDDR5
+		} type;
+		u64 stolen;
+		u64 size;
+
+		int ranks;
+		int parts;
+
+		int  (*init)(struct nouveau_fb *);
+		int  (*get)(struct nouveau_fb *, u64 size, u32 align,
+			    u32 size_nc, u32 type, struct nouveau_mem **);
+		void (*put)(struct nouveau_fb *, struct nouveau_mem **);
+	} ram;
+
+	struct nouveau_mm vram;
+	struct nouveau_mm tags;
+
+	struct {
+		struct nouveau_fb_tile region[16];
+		int regions;
+		void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
+			     u32 pitch, u32 flags, struct nouveau_fb_tile *);
+		void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+			     struct nouveau_fb_tile *);
+		void (*fini)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+		void (*prog)(struct nouveau_fb *, int i,
+			     struct nouveau_fb_tile *);
+	} tile;
+};
+
+static inline struct nouveau_fb *
+nouveau_fb(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
+}
+
+#define nouveau_fb_create(p,e,c,d)                                             \
+	nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
+int  nouveau_fb_preinit(struct nouveau_fb *);
+void nouveau_fb_destroy(struct nouveau_fb *);
+int  nouveau_fb_init(struct nouveau_fb *);
+#define nouveau_fb_fini(p,s)                                                   \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+void _nouveau_fb_dtor(struct nouveau_object *);
+int  _nouveau_fb_init(struct nouveau_object *);
+#define _nouveau_fb_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_fb_oclass;
+extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
+extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
+extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
+extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
+extern struct nouveau_oclass nv50_fb_oclass;
+extern struct nouveau_oclass nvc0_fb_oclass;
+
+struct nouveau_bios;
+int  nouveau_fb_bios_memtype(struct nouveau_bios *);
+
+bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
+
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
+void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+		       struct nouveau_fb_tile *);
+
+int  nv41_fb_vram_init(struct nouveau_fb *);
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_vram_init(struct nouveau_fb *);
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+		       u32 pitch, u32 flags, struct nouveau_fb_tile *);
+
+void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
new file mode 100644
index 0000000..c85b9f1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -0,0 +1,53 @@
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+struct nouveau_gpio {
+	struct nouveau_subdev base;
+
+	struct nouveau_event *events;
+
+	/* hardware interfaces */
+	void (*reset)(struct nouveau_gpio *, u8 func);
+	int  (*drive)(struct nouveau_gpio *, int line, int dir, int out);
+	int  (*sense)(struct nouveau_gpio *, int line);
+
+	/* software interfaces */
+	int  (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
+		     struct dcb_gpio_func *);
+	int  (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
+	int  (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
+};
+
+static inline struct nouveau_gpio *
+nouveau_gpio(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
+}
+
+#define nouveau_gpio_create(p,e,o,l,d)                                         \
+	nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
+#define nouveau_gpio_destroy(p) ({                                             \
+	struct nouveau_gpio *gpio = (p);                                       \
+	_nouveau_gpio_dtor(nv_object(gpio));                                   \
+})
+#define nouveau_gpio_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int  nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
+			  struct nouveau_oclass *, int, int, void **);
+void _nouveau_gpio_dtor(struct nouveau_object *);
+int  nouveau_gpio_init(struct nouveau_gpio *);
+
+extern struct nouveau_oclass nv10_gpio_oclass;
+extern struct nouveau_oclass nv50_gpio_oclass;
+extern struct nouveau_oclass nvd0_gpio_oclass;
+extern struct nouveau_oclass nve0_gpio_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
new file mode 100644
index 0000000..888384c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -0,0 +1,151 @@
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/i2c.h>
+
+#define NV_I2C_PORT(n)    (0x00 + (n))
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
+
+#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
+#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
+#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
+
+struct nouveau_i2c_port {
+	struct nouveau_object base;
+	struct i2c_adapter adapter;
+
+	struct list_head head;
+	u8  index;
+
+	const struct nouveau_i2c_func *func;
+};
+
+struct nouveau_i2c_func {
+	void (*acquire)(struct nouveau_i2c_port *);
+	void (*release)(struct nouveau_i2c_port *);
+
+	void (*drive_scl)(struct nouveau_i2c_port *, int);
+	void (*drive_sda)(struct nouveau_i2c_port *, int);
+	int  (*sense_scl)(struct nouveau_i2c_port *);
+	int  (*sense_sda)(struct nouveau_i2c_port *);
+
+	int  (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
+	int  (*pattern)(struct nouveau_i2c_port *, int pattern);
+	int  (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
+	int  (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
+};
+
+#define nouveau_i2c_port_create(p,e,o,i,a,d)                                   \
+	nouveau_i2c_port_create_((p), (e), (o), (i), (a),                      \
+				 sizeof(**d), (void **)d)
+#define nouveau_i2c_port_destroy(p) ({                                         \
+	struct nouveau_i2c_port *port = (p);                                   \
+	_nouveau_i2c_port_dtor(nv_object(i2c));                                \
+})
+#define nouveau_i2c_port_init(p)                                               \
+	nouveau_object_init(&(p)->base)
+#define nouveau_i2c_port_fini(p,s)                                             \
+	nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, u8,
+			     const struct i2c_algorithm *, int, void **);
+void _nouveau_i2c_port_dtor(struct nouveau_object *);
+#define _nouveau_i2c_port_init nouveau_object_init
+#define _nouveau_i2c_port_fini nouveau_object_fini
+
+struct nouveau_i2c {
+	struct nouveau_subdev base;
+
+	struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
+	struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
+	int (*identify)(struct nouveau_i2c *, int index,
+			const char *what, struct i2c_board_info *,
+			bool (*match)(struct nouveau_i2c_port *,
+				      struct i2c_board_info *));
+	struct list_head ports;
+};
+
+static inline struct nouveau_i2c *
+nouveau_i2c(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
+}
+
+#define nouveau_i2c_create(p,e,o,s,d)                                          \
+	nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
+#define nouveau_i2c_destroy(p) ({                                              \
+	struct nouveau_i2c *i2c = (p);                                         \
+	_nouveau_i2c_dtor(nv_object(i2c));                                     \
+})
+#define nouveau_i2c_init(p) ({                                                 \
+	struct nouveau_i2c *i2c = (p);                                         \
+	_nouveau_i2c_init(nv_object(i2c));                                     \
+})
+#define nouveau_i2c_fini(p,s) ({                                               \
+	struct nouveau_i2c *i2c = (p);                                         \
+	_nouveau_i2c_fini(nv_object(i2c), (s));                                \
+})
+
+int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, struct nouveau_oclass *,
+			int, void **);
+void _nouveau_i2c_dtor(struct nouveau_object *);
+int  _nouveau_i2c_init(struct nouveau_object *);
+int  _nouveau_i2c_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nv04_i2c_oclass;
+extern struct nouveau_oclass nv4e_i2c_oclass;
+extern struct nouveau_oclass nv50_i2c_oclass;
+extern struct nouveau_oclass nv94_i2c_oclass;
+extern struct nouveau_oclass nvd0_i2c_oclass;
+extern struct nouveau_oclass nouveau_anx9805_sclass[];
+
+extern const struct i2c_algorithm nouveau_i2c_bit_algo;
+extern const struct i2c_algorithm nouveau_i2c_aux_algo;
+
+static inline int
+nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
+{
+	u8 val;
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
+	};
+
+	int ret = i2c_transfer(&port->adapter, msgs, 2);
+	if (ret != 2)
+		return -EIO;
+
+	return val;
+}
+
+static inline int
+nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
+{
+	u8 buf[2] = { reg, val };
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 2, .buf = buf },
+	};
+
+	int ret = i2c_transfer(&port->adapter, msgs, 1);
+	if (ret != 1)
+		return -EIO;
+
+	return 0;
+}
+
+static inline bool
+nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
+{
+	return nv_rdi2cr(port, addr, 0) >= 0;
+}
+
+int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
new file mode 100644
index 0000000..88814f1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -0,0 +1,34 @@
+#ifndef __NOUVEAU_IBUS_H__
+#define __NOUVEAU_IBUS_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_ibus {
+	struct nouveau_subdev base;
+};
+
+static inline struct nouveau_ibus *
+nouveau_ibus(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS];
+}
+
+#define nouveau_ibus_create(p,e,o,d)                                           \
+	nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus",              \
+			       sizeof(**d), (void **)d)
+#define nouveau_ibus_destroy(p)                                                \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_ibus_init(p)                                                   \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_ibus_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_ibus_dtor _nouveau_subdev_dtor
+#define _nouveau_ibus_init _nouveau_subdev_init
+#define _nouveau_ibus_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nvc0_ibus_oclass;
+extern struct nouveau_oclass nve0_ibus_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
new file mode 100644
index 0000000..ec7a54e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -0,0 +1,73 @@
+#ifndef __NOUVEAU_INSTMEM_H__
+#define __NOUVEAU_INSTMEM_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nouveau_instobj {
+	struct nouveau_object base;
+	struct list_head head;
+	u32 *suspend;
+	u64 addr;
+	u32 size;
+};
+
+static inline struct nouveau_instobj *
+nv_memobj(void *obj)
+{
+#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
+	if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
+		nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
+#endif
+	return obj;
+}
+
+#define nouveau_instobj_create(p,e,o,d)                                        \
+	nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instobj_init(p)                                                \
+	nouveau_object_init(&(p)->base)
+#define nouveau_instobj_fini(p,s)                                              \
+	nouveau_object_fini(&(p)->base, (s))
+
+int  nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
+			     struct nouveau_oclass *, int, void **);
+void nouveau_instobj_destroy(struct nouveau_instobj *);
+
+void _nouveau_instobj_dtor(struct nouveau_object *);
+#define _nouveau_instobj_init nouveau_object_init
+#define _nouveau_instobj_fini nouveau_object_fini
+
+struct nouveau_instmem {
+	struct nouveau_subdev base;
+	struct list_head list;
+
+	u32 reserved;
+	int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
+		     u32 size, u32 align, struct nouveau_object **);
+};
+
+static inline struct nouveau_instmem *
+nouveau_instmem(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
+}
+
+#define nouveau_instmem_create(p,e,o,d)                                        \
+	nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_instmem_destroy(p)                                             \
+	nouveau_subdev_destroy(&(p)->base)
+int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
+			    struct nouveau_oclass *, int, void **);
+int nouveau_instmem_init(struct nouveau_instmem *);
+int nouveau_instmem_fini(struct nouveau_instmem *, bool);
+
+#define _nouveau_instmem_dtor _nouveau_subdev_dtor
+int _nouveau_instmem_init(struct nouveau_object *);
+int _nouveau_instmem_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nv04_instmem_oclass;
+extern struct nouveau_oclass nv40_instmem_oclass;
+extern struct nouveau_oclass nv50_instmem_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
new file mode 100644
index 0000000..a1985ed
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -0,0 +1,40 @@
+#ifndef __NOUVEAU_LTCG_H__
+#define __NOUVEAU_LTCG_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_mm_node;
+
+struct nouveau_ltcg {
+	struct nouveau_subdev base;
+
+	int  (*tags_alloc)(struct nouveau_ltcg *, u32 count,
+	                   struct nouveau_mm_node **);
+	void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
+	void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
+};
+
+static inline struct nouveau_ltcg *
+nouveau_ltcg(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
+}
+
+#define nouveau_ltcg_create(p,e,o,d)                                           \
+	nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2",            \
+			       sizeof(**d), (void **)d)
+#define nouveau_ltcg_destroy(p)                                                \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_ltcg_init(p)                                                   \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_ltcg_fini(p,s)                                                 \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
+#define _nouveau_ltcg_init _nouveau_subdev_init
+#define _nouveau_ltcg_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nvc0_ltcg_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
new file mode 100644
index 0000000..9d2cd20
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -0,0 +1,52 @@
+#ifndef __NOUVEAU_MC_H__
+#define __NOUVEAU_MC_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_mc_intr {
+	u32 stat;
+	u32 unit;
+};
+
+struct nouveau_mc {
+	struct nouveau_subdev base;
+	const struct nouveau_mc_intr *intr_map;
+};
+
+static inline struct nouveau_mc *
+nouveau_mc(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
+}
+
+#define nouveau_mc_create(p,e,o,m,d)                                           \
+	nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p) ({                                               \
+	struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
+})
+#define nouveau_mc_init(p) ({                                                  \
+	struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc));        \
+})
+#define nouveau_mc_fini(p,s) ({                                                \
+	struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s));   \
+})
+
+int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, const struct nouveau_mc_intr *,
+			int, void **);
+void _nouveau_mc_dtor(struct nouveau_object *);
+int  _nouveau_mc_init(struct nouveau_object *);
+int  _nouveau_mc_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nv04_mc_oclass;
+extern struct nouveau_oclass nv44_mc_oclass;
+extern struct nouveau_oclass nv50_mc_oclass;
+extern struct nouveau_oclass nv98_mc_oclass;
+extern struct nouveau_oclass nvc0_mc_oclass;
+
+extern const struct nouveau_mc_intr nv04_mc_intr[];
+int nv04_mc_init(struct nouveau_object *);
+int nv50_mc_init(struct nouveau_object *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
new file mode 100644
index 0000000..b93b152
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
@@ -0,0 +1,37 @@
+#ifndef __NOUVEAU_MXM_H__
+#define __NOUVEAU_MXM_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#define MXM_SANITISE_DCB 0x00000001
+
+struct nouveau_mxm {
+	struct nouveau_subdev base;
+	u32 action;
+	u8 *mxms;
+};
+
+static inline struct nouveau_mxm *
+nouveau_mxm(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM];
+}
+
+#define nouveau_mxm_create(p,e,o,d)                                            \
+	nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mxm_init(p)                                                    \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_mxm_fini(p,s)                                                  \
+	nouveau_subdev_fini(&(p)->base, (s))
+int  nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *,
+			 struct nouveau_oclass *, int, void **);
+void nouveau_mxm_destroy(struct nouveau_mxm *);
+
+#define _nouveau_mxm_dtor _nouveau_subdev_dtor
+#define _nouveau_mxm_init _nouveau_subdev_init
+#define _nouveau_mxm_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv50_mxm_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
new file mode 100644
index 0000000..c075998
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -0,0 +1,80 @@
+#ifndef __NOUVEAU_THERM_H__
+#define __NOUVEAU_THERM_H__
+
+#include <core/device.h>
+#include <core/subdev.h>
+
+enum nouveau_therm_fan_mode {
+	NOUVEAU_THERM_CTRL_NONE = 0,
+	NOUVEAU_THERM_CTRL_MANUAL = 1,
+	NOUVEAU_THERM_CTRL_AUTO = 2,
+};
+
+enum nouveau_therm_attr_type {
+	NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0,
+	NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1,
+	NOUVEAU_THERM_ATTR_FAN_MODE = 2,
+
+	NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10,
+	NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
+	NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12,
+	NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
+	NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14,
+	NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15,
+	NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16,
+	NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
+};
+
+struct nouveau_therm {
+	struct nouveau_subdev base;
+
+	int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
+	int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
+	int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
+	int (*pwm_clock)(struct nouveau_therm *);
+
+	int (*fan_get)(struct nouveau_therm *);
+	int (*fan_set)(struct nouveau_therm *, int);
+	int (*fan_sense)(struct nouveau_therm *);
+
+	int (*temp_get)(struct nouveau_therm *);
+
+	int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type);
+	int (*attr_set)(struct nouveau_therm *,
+			enum nouveau_therm_attr_type, int);
+};
+
+static inline struct nouveau_therm *
+nouveau_therm(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM];
+}
+
+#define nouveau_therm_create(p,e,o,d)                                          \
+	nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_therm_destroy(p) ({                                            \
+	struct nouveau_therm *therm = (p);                                     \
+        _nouveau_therm_dtor(nv_object(therm));                                 \
+})
+#define nouveau_therm_init(p) ({                                               \
+	struct nouveau_therm *therm = (p);                                     \
+        _nouveau_therm_init(nv_object(therm));                                 \
+})
+#define nouveau_therm_fini(p,s) ({                                             \
+	struct nouveau_therm *therm = (p);                                     \
+        _nouveau_therm_init(nv_object(therm), (s));                            \
+})
+
+int  nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
+			   struct nouveau_oclass *, int, void **);
+void _nouveau_therm_dtor(struct nouveau_object *);
+int  _nouveau_therm_init(struct nouveau_object *);
+int  _nouveau_therm_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_oclass nv40_therm_oclass;
+extern struct nouveau_oclass nv50_therm_oclass;
+extern struct nouveau_oclass nv84_therm_oclass;
+extern struct nouveau_oclass nva3_therm_oclass;
+extern struct nouveau_oclass nvd0_therm_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
new file mode 100644
index 0000000..e465d15
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -0,0 +1,61 @@
+#ifndef __NOUVEAU_TIMER_H__
+#define __NOUVEAU_TIMER_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+struct nouveau_alarm {
+	struct list_head head;
+	u64 timestamp;
+	void (*func)(struct nouveau_alarm *);
+};
+
+static inline void
+nouveau_alarm_init(struct nouveau_alarm *alarm,
+		   void (*func)(struct nouveau_alarm *))
+{
+	INIT_LIST_HEAD(&alarm->head);
+	alarm->func = func;
+}
+
+bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
+bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
+void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
+
+#define NV_WAIT_DEFAULT 2000000000ULL
+#define nv_wait(o,a,m,v)                                                       \
+	nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_ne(o,a,m,v)                                                    \
+	nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
+#define nv_wait_cb(o,c,d)                                                      \
+	nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
+
+struct nouveau_timer {
+	struct nouveau_subdev base;
+	u64  (*read)(struct nouveau_timer *);
+	void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
+};
+
+static inline struct nouveau_timer *
+nouveau_timer(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER];
+}
+
+#define nouveau_timer_create(p,e,o,d)                                          \
+	nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer",            \
+			       sizeof(**d), (void **)d)
+#define nouveau_timer_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_timer_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_timer_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
+			  struct nouveau_oclass *, int size, void **);
+
+extern struct nouveau_oclass nv04_timer_oclass;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
new file mode 100644
index 0000000..fee09ad
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_VGA_H__
+#define __NOUVEAU_VGA_H__
+
+#include <core/os.h>
+
+/* access to various legacy io ports */
+u8   nv_rdport(void *obj, int head, u16 port);
+void nv_wrport(void *obj, int head, u16 port, u8 value);
+
+/* VGA Sequencer */
+u8   nv_rdvgas(void *obj, int head, u8 index);
+void nv_wrvgas(void *obj, int head, u8 index, u8 value);
+
+/* VGA Graphics */
+u8   nv_rdvgag(void *obj, int head, u8 index);
+void nv_wrvgag(void *obj, int head, u8 index, u8 value);
+
+/* VGA CRTC */
+u8   nv_rdvgac(void *obj, int head, u8 index);
+void nv_wrvgac(void *obj, int head, u8 index, u8 value);
+
+/* VGA indexed port access dispatcher */
+u8   nv_rdvgai(void *obj, int head, u16 port, u8 index);
+void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
+
+bool nv_lockvgac(void *obj, bool lock);
+u8   nv_rdvgaowner(void *obj);
+void nv_wrvgaowner(void *obj, u8);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
new file mode 100644
index 0000000..9d595ef
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include <core/object.h>
+#include <core/subdev.h>
+#include <core/device.h>
+#include <core/mm.h>
+
+struct nouveau_vm_pgt {
+	struct nouveau_gpuobj *obj[2];
+	u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+	struct list_head head;
+	struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_gpuobj;
+struct nouveau_mem;
+
+struct nouveau_vma {
+	struct list_head head;
+	int refcount;
+	struct nouveau_vm *vm;
+	struct nouveau_mm_node *node;
+	u64 offset;
+	u32 access;
+};
+
+struct nouveau_vm {
+	struct nouveau_vmmgr *vmm;
+	struct nouveau_mm mm;
+	int refcount;
+
+	struct list_head pgd_list;
+	atomic_t engref[64]; //NVDEV_SUBDEV_NR];
+
+	struct nouveau_vm_pgt *pgt;
+	u32 fpde;
+	u32 lpde;
+};
+
+struct nouveau_vmmgr {
+	struct nouveau_subdev base;
+
+	u64 limit;
+	u8  dma_bits;
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	int  (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
+		       u64 mm_offset, struct nouveau_vm **);
+
+	void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+			struct nouveau_gpuobj *pgt[2]);
+	void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    struct nouveau_mem *, u32 pte, u32 cnt,
+		    u64 phys, u64 delta);
+	void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		       struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+	void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+	void (*flush)(struct nouveau_vm *);
+};
+
+static inline struct nouveau_vmmgr *
+nouveau_vmmgr(void *obj)
+{
+	return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
+}
+
+#define nouveau_vmmgr_create(p,e,o,i,f,d)                                      \
+	nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
+#define nouveau_vmmgr_destroy(p)                                               \
+	nouveau_subdev_destroy(&(p)->base)
+#define nouveau_vmmgr_init(p)                                                  \
+	nouveau_subdev_init(&(p)->base)
+#define nouveau_vmmgr_fini(p,s)                                                \
+	nouveau_subdev_fini(&(p)->base, (s))
+
+#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
+#define _nouveau_vmmgr_init _nouveau_subdev_init
+#define _nouveau_vmmgr_fini _nouveau_subdev_fini
+
+extern struct nouveau_oclass nv04_vmmgr_oclass;
+extern struct nouveau_oclass nv41_vmmgr_oclass;
+extern struct nouveau_oclass nv44_vmmgr_oclass;
+extern struct nouveau_oclass nv50_vmmgr_oclass;
+extern struct nouveau_oclass nvc0_vmmgr_oclass;
+
+int  nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
+		    struct nouveau_vm **);
+void nv04_vmmgr_dtor(struct nouveau_object *);
+
+void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
+void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
+
+/* nouveau_vm.c */
+int  nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
+		       u64 mm_offset, u32 block, struct nouveau_vm **);
+int  nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
+		    u64 mm_offset, struct nouveau_vm **);
+int  nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+		    struct nouveau_gpuobj *pgd);
+int  nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+		    u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+		       struct nouveau_mem *);
+void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+		     struct nouveau_mem *mem);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/os.h b/linux-imx/drivers/gpu/drm/nouveau/core/os.h
new file mode 100644
index 0000000..3bd9be2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/os.h
@@ -0,0 +1,49 @@
+#ifndef __NOUVEAU_OS_H__
+#define __NOUVEAU_OS_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+#include <linux/bitops.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/delay.h>
+#include <linux/io-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <asm/unaligned.h>
+
+static inline int
+ffsll(u64 mask)
+{
+	int i;
+	for (i = 0; i < 64; i++) {
+		if (mask & (1ULL << i))
+			return i + 1;
+	}
+	return 0;
+}
+
+#ifndef ioread32_native
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native  ioread32be
+#define iowrite32_native iowrite32be
+#else /* def __BIG_ENDIAN */
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native  ioread32
+#define iowrite32_native iowrite32
+#endif /* def __BIG_ENDIAN else */
+#endif /* !ioread32_native */
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
new file mode 100644
index 0000000..d70ba34
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <subdev/bar.h>
+
+struct nouveau_barobj {
+	struct nouveau_object base;
+	struct nouveau_vma vma;
+	void __iomem *iomem;
+};
+
+static int
+nouveau_barobj_ctor(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, void *mem, u32 size,
+		    struct nouveau_object **pobject)
+{
+	struct nouveau_bar *bar = (void *)engine;
+	struct nouveau_barobj *barobj;
+	int ret;
+
+	ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
+	*pobject = nv_object(barobj);
+	if (ret)
+		return ret;
+
+	ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
+	if (ret)
+		return ret;
+
+	barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
+	return 0;
+}
+
+static void
+nouveau_barobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = (void *)object->engine;
+	struct nouveau_barobj *barobj = (void *)object;
+	if (barobj->vma.node)
+		bar->unmap(bar, &barobj->vma);
+	nouveau_object_destroy(&barobj->base);
+}
+
+static u32
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_barobj *barobj = (void *)object;
+	return ioread32_native(barobj->iomem + addr);
+}
+
+static void
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_barobj *barobj = (void *)object;
+	iowrite32_native(data, barobj->iomem + addr);
+}
+
+static struct nouveau_oclass
+nouveau_barobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_barobj_ctor,
+		.dtor = nouveau_barobj_dtor,
+		.init = nouveau_object_init,
+		.fini = nouveau_object_fini,
+		.rd32 = nouveau_barobj_rd32,
+		.wr32 = nouveau_barobj_wr32,
+	},
+};
+
+int
+nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
+		  struct nouveau_mem *mem, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(bar);
+	return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
+				   mem, 0, pobject);
+}
+
+int
+nouveau_bar_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bar *bar;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
+				     "bar", length, pobject);
+	bar = *pobject;
+	if (ret)
+		return ret;
+
+	bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
+			     pci_resource_len(device->pdev, 3));
+	return 0;
+}
+
+void
+nouveau_bar_destroy(struct nouveau_bar *bar)
+{
+	if (bar->iomem)
+		iounmap(bar->iomem);
+	nouveau_subdev_destroy(&bar->base);
+}
+
+void
+_nouveau_bar_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bar *bar = (void *)object;
+	nouveau_bar_destroy(bar);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
new file mode 100644
index 0000000..649f1ce
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+struct nv50_bar_priv {
+	struct nouveau_bar base;
+	spinlock_t lock;
+	struct nouveau_gpuobj *mem;
+	struct nouveau_gpuobj *pad;
+	struct nouveau_gpuobj *pgd;
+	struct nouveau_vm *bar1_vm;
+	struct nouveau_gpuobj *bar1;
+	struct nouveau_vm *bar3_vm;
+	struct nouveau_gpuobj *bar3;
+};
+
+static int
+nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	return 0;
+}
+
+static int
+nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	return 0;
+}
+
+static void
+nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap(vma);
+	nv50_vm_flush_engine(nv_subdev(bar), 6);
+	nouveau_vm_put(vma);
+}
+
+static void
+nv50_bar_flush(struct nouveau_bar *bar)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	unsigned long flags;
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(priv, 0x00330c, 0x00000001);
+	if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
+		nv_warn(priv, "flush timeout\n");
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+void
+nv84_bar_flush(struct nouveau_bar *bar)
+{
+	struct nv50_bar_priv *priv = (void *)bar;
+	unsigned long flags;
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(bar, 0x070000, 0x00000001);
+	if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
+		nv_warn(priv, "flush timeout\n");
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_object *heap;
+	struct nouveau_vm *vm;
+	struct nv50_bar_priv *priv;
+	u64 start, limit;
+	int ret;
+
+	ret = nouveau_bar_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+				 NVOBJ_FLAG_HEAP, &priv->mem);
+	heap = nv_object(priv->mem);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), heap,
+				(device->chipset == 0x50) ? 0x1400 : 0x0200,
+				 0, 0, &priv->pad);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), heap, 0x4000, 0,
+				 0, &priv->pgd);
+	if (ret)
+		return ret;
+
+	/* BAR3 */
+	start = 0x0100000000ULL;
+	limit = start + pci_resource_len(device->pdev, 3);
+
+	ret = nouveau_vm_new(device, start, limit, start, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), heap,
+				 ((limit-- - start) >> 12) * 8, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]);
+	vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar3);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->bar3, 0x00, 0x7fc00000);
+	nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
+	nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
+	nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
+				  upper_32_bits(start));
+	nv_wo32(priv->bar3, 0x10, 0x00000000);
+	nv_wo32(priv->bar3, 0x14, 0x00000000);
+
+	/* BAR1 */
+	start = 0x0000000000ULL;
+	limit = start + pci_resource_len(device->pdev, 1);
+
+	ret = nouveau_vm_new(device, start, limit--, start, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), heap, 24, 16, 0, &priv->bar1);
+	if (ret)
+		return ret;
+
+	nv_wo32(priv->bar1, 0x00, 0x7fc00000);
+	nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
+	nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
+	nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
+				  upper_32_bits(start));
+	nv_wo32(priv->bar1, 0x10, 0x00000000);
+	nv_wo32(priv->bar1, 0x14, 0x00000000);
+
+	priv->base.alloc = nouveau_bar_alloc;
+	priv->base.kmap = nv50_bar_kmap;
+	priv->base.umap = nv50_bar_umap;
+	priv->base.unmap = nv50_bar_unmap;
+	if (device->chipset == 0x50)
+		priv->base.flush = nv50_bar_flush;
+	else
+		priv->base.flush = nv84_bar_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv50_bar_dtor(struct nouveau_object *object)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->bar1);
+	nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar3);
+	if (priv->bar3_vm) {
+		nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
+	}
+	nouveau_gpuobj_ref(NULL, &priv->pgd);
+	nouveau_gpuobj_ref(NULL, &priv->pad);
+	nouveau_gpuobj_ref(NULL, &priv->mem);
+	nouveau_bar_destroy(&priv->base);
+}
+
+static int
+nv50_bar_init(struct nouveau_object *object)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bar_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv50_vm_flush_engine(nv_subdev(priv), 6);
+
+	nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
+	nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
+	nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
+	nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
+	return 0;
+}
+
+static int
+nv50_bar_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_bar_priv *priv = (void *)object;
+	return nouveau_bar_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_bar_oclass = {
+	.handle = NV_SUBDEV(BAR, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_bar_ctor,
+		.dtor = nv50_bar_dtor,
+		.init = nv50_bar_init,
+		.fini = nv50_bar_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
new file mode 100644
index 0000000..f8a4495
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+struct nvc0_bar_priv {
+	struct nouveau_bar base;
+	spinlock_t lock;
+	struct {
+		struct nouveau_gpuobj *mem;
+		struct nouveau_gpuobj *pgd;
+		struct nouveau_vm *vm;
+	} bar[2];
+};
+
+static int
+nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
+	return 0;
+}
+
+static int
+nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
+	      u32 flags, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int ret;
+
+	ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
+			     mem->page_shift, flags, vma);
+	if (ret)
+		return ret;
+
+	nouveau_vm_map(vma, mem);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
+	return 0;
+}
+
+static void
+nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
+{
+	struct nvc0_bar_priv *priv = (void *)bar;
+	int i = !(vma->vm == priv->bar[0].vm);
+
+	nouveau_vm_unmap(vma);
+	nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
+	nouveau_vm_put(vma);
+}
+
+static int
+nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct pci_dev *pdev = device->pdev;
+	struct nvc0_bar_priv *priv;
+	struct nouveau_gpuobj *mem;
+	struct nouveau_vm *vm;
+	int ret;
+
+	ret = nouveau_bar_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* BAR3 */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+				&priv->bar[0].mem);
+	mem = priv->bar[0].mem;
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+				&priv->bar[0].pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+				 (pci_resource_len(pdev, 3) >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
+				 &vm->pgt[0].obj[0]);
+	vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
+	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
+	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
+	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
+
+	/* BAR1 */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0,
+				&priv->bar[1].mem);
+	mem = priv->bar[1].mem;
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0,
+				&priv->bar[1].pgd);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+	if (ret)
+		return ret;
+
+	nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
+	nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
+	nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
+	nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
+
+	priv->base.alloc = nouveau_bar_alloc;
+	priv->base.kmap = nvc0_bar_kmap;
+	priv->base.umap = nvc0_bar_umap;
+	priv->base.unmap = nvc0_bar_unmap;
+	priv->base.flush = nv84_bar_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nvc0_bar_dtor(struct nouveau_object *object)
+{
+	struct nvc0_bar_priv *priv = (void *)object;
+
+	nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
+
+	if (priv->bar[0].vm) {
+		nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
+	}
+	nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
+
+	nouveau_bar_destroy(&priv->base);
+}
+
+static int
+nvc0_bar_init(struct nouveau_object *object)
+{
+	struct nvc0_bar_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bar_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+	nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
+
+	nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
+	nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_bar_oclass = {
+	.handle = NV_SUBDEV(BAR, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_bar_ctor,
+		.dtor = nvc0_bar_dtor,
+		.init = nvc0_bar_init,
+		.fini = _nouveau_bar_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
new file mode 100644
index 0000000..0e2c1a4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+#include <core/subdev.h>
+#include <core/option.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/bit.h>
+
+u8
+nvbios_checksum(const u8 *data, int size)
+{
+	u8 sum = 0;
+	while (size--)
+		sum += *data++;
+	return sum;
+}
+
+u16
+nvbios_findstr(const u8 *data, int size, const char *str, int len)
+{
+	int i, j;
+
+	for (i = 0; i <= (size - len); i++) {
+		for (j = 0; j < len; j++)
+			if ((char)data[i + j] != str[j])
+				break;
+		if (j == len)
+			return i;
+	}
+
+	return 0;
+}
+
+#if defined(__powerpc__)
+static void
+nouveau_bios_shadow_of(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	struct device_node *dn;
+	const u32 *data;
+	int size;
+
+	dn = pci_device_to_OF_node(pdev);
+	if (!dn) {
+		nv_info(bios, "Unable to get the OF node\n");
+		return;
+	}
+
+	data = of_get_property(dn, "NVDA,BMP", &size);
+	if (data && size) {
+		bios->size = size;
+		bios->data = kmalloc(bios->size, GFP_KERNEL);
+		if (bios->data)
+			memcpy(bios->data, data, size);
+	}
+}
+#endif
+
+static void
+nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u32 bar0 = 0;
+	int i;
+
+	if (device->card_type >= NV_50) {
+		u64 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8;
+		if (!addr) {
+			addr  = (u64)nv_rd32(bios, 0x001700) << 16;
+			addr += 0xf0000;
+		}
+
+		bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
+	}
+
+	/* bail if no rom signature */
+	if (nv_rd08(bios, 0x700000) != 0x55 ||
+	    nv_rd08(bios, 0x700001) != 0xaa)
+		goto out;
+
+	bios->size = nv_rd08(bios, 0x700002) * 512;
+	if (!bios->size)
+		goto out;
+
+	bios->data = kmalloc(bios->size, GFP_KERNEL);
+	if (bios->data) {
+		for (i = 0; i < bios->size; i++)
+			nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
+	}
+
+out:
+	if (device->card_type >= NV_50)
+		nv_wr32(bios, 0x001700, bar0);
+}
+
+static void
+nouveau_bios_shadow_prom(struct nouveau_bios *bios)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u32 pcireg, access;
+	u16 pcir;
+	int i;
+
+	/* enable access to rom */
+	if (device->card_type >= NV_50)
+		pcireg = 0x088050;
+	else
+		pcireg = 0x001850;
+	access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
+
+	/* bail if no rom signature, with a workaround for a PROM reading
+	 * issue on some chipsets.  the first read after a period of
+	 * inactivity returns the wrong result, so retry the first header
+	 * byte a few times before giving up as a workaround
+	 */
+	i = 16;
+	do {
+		if (nv_rd08(bios, 0x300000) == 0x55)
+			break;
+	} while (i--);
+
+	if (!i || nv_rd08(bios, 0x300001) != 0xaa)
+		goto out;
+
+	/* additional check (see note below) - read PCI record header */
+	pcir = nv_rd08(bios, 0x300018) |
+	       nv_rd08(bios, 0x300019) << 8;
+	if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
+	    nv_rd08(bios, 0x300001 + pcir) != 'C' ||
+	    nv_rd08(bios, 0x300002 + pcir) != 'I' ||
+	    nv_rd08(bios, 0x300003 + pcir) != 'R')
+		goto out;
+
+	/* read entire bios image to system memory */
+	bios->size = nv_rd08(bios, 0x300002) * 512;
+	if (!bios->size)
+		goto out;
+
+	bios->data = kmalloc(bios->size, GFP_KERNEL);
+	if (bios->data) {
+		for (i = 0; i < bios->size; i++)
+			nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i));
+	}
+
+out:
+	/* disable access to rom */
+	nv_wr32(bios, pcireg, access);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+#else
+static inline bool
+nouveau_acpi_rom_supported(struct pci_dev *pdev) {
+	return false;
+}
+
+static inline int
+nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
+	return -EINVAL;
+}
+#endif
+
+static void
+nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	int ret, cnt, i;
+
+	if (!nouveau_acpi_rom_supported(pdev)) {
+		bios->data = NULL;
+		return;
+	}
+
+	bios->size = 0;
+	bios->data = kmalloc(4096, GFP_KERNEL);
+	if (bios->data) {
+		if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
+			bios->size = bios->data[2] * 512;
+		kfree(bios->data);
+	}
+
+	if (!bios->size)
+		return;
+
+	bios->data = kmalloc(bios->size, GFP_KERNEL);
+	if (bios->data) {
+		/* disobey the acpi spec - much faster on at least w530 ... */
+		ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+		if (ret != bios->size ||
+		    nvbios_checksum(bios->data, bios->size)) {
+			/* ... that didn't work, ok, i'll be good now */
+			for (i = 0; i < bios->size; i += cnt) {
+				cnt = min((bios->size - i), (u32)4096);
+				ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+				if (ret != cnt)
+					break;
+			}
+		}
+	}
+}
+
+static void
+nouveau_bios_shadow_pci(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	size_t size;
+
+	if (!pci_enable_rom(pdev)) {
+		void __iomem *rom = pci_map_rom(pdev, &size);
+		if (rom && size) {
+			bios->data = kmalloc(size, GFP_KERNEL);
+			if (bios->data) {
+				memcpy_fromio(bios->data, rom, size);
+				bios->size = size;
+			}
+		}
+		if (rom)
+			pci_unmap_rom(pdev, rom);
+
+		pci_disable_rom(pdev);
+	}
+}
+
+static void
+nouveau_bios_shadow_platform(struct nouveau_bios *bios)
+{
+	struct pci_dev *pdev = nv_device(bios)->pdev;
+	size_t size;
+
+	void __iomem *rom = pci_platform_rom(pdev, &size);
+	if (rom && size) {
+		bios->data = kmalloc(size, GFP_KERNEL);
+		if (bios->data) {
+			memcpy_fromio(bios->data, rom, size);
+			bios->size = size;
+		}
+	}
+}
+
+static int
+nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
+{
+	if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
+			bios->data[1] != 0xAA) {
+		nv_info(bios, "... signature not found\n");
+		return 0;
+	}
+
+	if (nvbios_checksum(bios->data,
+			min_t(u32, bios->data[2] * 512, bios->size))) {
+		nv_info(bios, "... checksum invalid\n");
+		/* if a ro image is somewhat bad, it's probably all rubbish */
+		return writeable ? 2 : 1;
+	}
+
+	nv_info(bios, "... appears to be valid\n");
+	return 3;
+}
+
+struct methods {
+	const char desc[16];
+	void (*shadow)(struct nouveau_bios *);
+	const bool rw;
+	int score;
+	u32 size;
+	u8 *data;
+};
+
+static int
+nouveau_bios_shadow(struct nouveau_bios *bios)
+{
+	struct methods shadow_methods[] = {
+#if defined(__powerpc__)
+		{ "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
+#endif
+		{ "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
+		{ "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
+		{ "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
+		{ "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
+		{ "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
+		{}
+	};
+	struct methods *mthd, *best;
+	const struct firmware *fw;
+	const char *optarg;
+	int optlen, ret;
+	char *source;
+
+	optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
+	source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
+	if (source) {
+		/* try to match one of the built-in methods */
+		mthd = shadow_methods;
+		do {
+			if (strcasecmp(source, mthd->desc))
+				continue;
+			nv_info(bios, "source: %s\n", mthd->desc);
+
+			mthd->shadow(bios);
+			mthd->score = nouveau_bios_score(bios, mthd->rw);
+			if (mthd->score) {
+				kfree(source);
+				return 0;
+			}
+		} while ((++mthd)->shadow);
+
+		/* attempt to load firmware image */
+		ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
+		if (ret == 0) {
+			bios->size = fw->size;
+			bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+			release_firmware(fw);
+
+			nv_info(bios, "image: %s\n", source);
+			if (nouveau_bios_score(bios, 1)) {
+				kfree(source);
+				return 0;
+			}
+
+			kfree(bios->data);
+			bios->data = NULL;
+		}
+
+		nv_error(bios, "source \'%s\' invalid\n", source);
+		kfree(source);
+	}
+
+	mthd = shadow_methods;
+	do {
+		nv_info(bios, "checking %s for image...\n", mthd->desc);
+		mthd->shadow(bios);
+		mthd->score = nouveau_bios_score(bios, mthd->rw);
+		mthd->size = bios->size;
+		mthd->data = bios->data;
+		bios->data = NULL;
+	} while (mthd->score != 3 && (++mthd)->shadow);
+
+	mthd = shadow_methods;
+	best = mthd;
+	do {
+		if (mthd->score > best->score) {
+			kfree(best->data);
+			best = mthd;
+		}
+	} while ((++mthd)->shadow);
+
+	if (best->score) {
+		nv_info(bios, "using image from %s\n", best->desc);
+		bios->size = best->size;
+		bios->data = best->data;
+		return 0;
+	}
+
+	nv_error(bios, "unable to locate usable image\n");
+	return -EINVAL;
+}
+
+static u8
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return bios->data[addr];
+}
+
+static u16
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return get_unaligned_le16(&bios->data[addr]);
+}
+
+static u32
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return get_unaligned_le32(&bios->data[addr]);
+}
+
+static void
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	bios->data[addr] = data;
+}
+
+static void
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	put_unaligned_le16(data, &bios->data[addr]);
+}
+
+static void
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nouveau_bios *bios = (void *)object;
+	put_unaligned_le32(data, &bios->data[addr]);
+}
+
+static int
+nouveau_bios_ctor(struct nouveau_object *parent,
+		  struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_bios *bios;
+	struct bit_entry bit_i;
+	int ret;
+
+	ret = nouveau_subdev_create(parent, engine, oclass, 0,
+				    "VBIOS", "bios", &bios);
+	*pobject = nv_object(bios);
+	if (ret)
+		return ret;
+
+	ret = nouveau_bios_shadow(bios);
+	if (ret)
+		return ret;
+
+	/* detect type of vbios we're dealing with */
+	bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
+					  "\xff\x7f""NV\0", 5);
+	if (bios->bmp_offset) {
+		nv_info(bios, "BMP version %x.%x\n",
+			bmp_version(bios) >> 8,
+			bmp_version(bios) & 0xff);
+	}
+
+	bios->bit_offset = nvbios_findstr(bios->data, bios->size,
+					  "\xff\xb8""BIT", 5);
+	if (bios->bit_offset)
+		nv_info(bios, "BIT signature found\n");
+
+	/* determine the vbios version number */
+	if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
+		bios->version.major = nv_ro08(bios, bit_i.offset + 3);
+		bios->version.chip  = nv_ro08(bios, bit_i.offset + 2);
+		bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
+		bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
+		bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
+	} else
+	if (bmp_version(bios)) {
+		bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
+		bios->version.chip  = nv_ro08(bios, bios->bmp_offset + 12);
+		bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
+		bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
+	}
+
+	nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
+		bios->version.major, bios->version.chip,
+		bios->version.minor, bios->version.micro, bios->version.patch);
+
+	return 0;
+}
+
+static void
+nouveau_bios_dtor(struct nouveau_object *object)
+{
+	struct nouveau_bios *bios = (void *)object;
+	kfree(bios->data);
+	nouveau_subdev_destroy(&bios->base);
+}
+
+static int
+nouveau_bios_init(struct nouveau_object *object)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return nouveau_subdev_init(&bios->base);
+}
+
+static int
+nouveau_bios_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_bios *bios = (void *)object;
+	return nouveau_subdev_fini(&bios->base, suspend);
+}
+
+struct nouveau_oclass
+nouveau_bios_oclass = {
+	.handle = NV_SUBDEV(VBIOS, 0x00),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nouveau_bios_ctor,
+		.dtor = nouveau_bios_dtor,
+		.init = nouveau_bios_init,
+		.fini = nouveau_bios_fini,
+		.rd08 = nouveau_bios_rd08,
+		.rd16 = nouveau_bios_rd16,
+		.rd32 = nouveau_bios_rd32,
+		.wr08 = nouveau_bios_wr08,
+		.wr16 = nouveau_bios_wr16,
+		.wr32 = nouveau_bios_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
new file mode 100644
index 0000000..1d03a3f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/object.h"
+
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+
+int
+bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit)
+{
+	if (likely(bios->bit_offset)) {
+		u8  entries = nv_ro08(bios, bios->bit_offset + 10);
+		u32 entry   = bios->bit_offset + 12;
+		while (entries--) {
+			if (nv_ro08(bios, entry + 0) == id) {
+				bit->id      = nv_ro08(bios, entry + 0);
+				bit->version = nv_ro08(bios, entry + 1);
+				bit->length  = nv_ro16(bios, entry + 2);
+				bit->offset  = nv_ro16(bios, entry + 4);
+				return 0;
+			}
+
+			entry += nv_ro08(bios, bios->bit_offset + 9);
+		}
+
+		return -ENOENT;
+	}
+
+	return -EINVAL;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
new file mode 100644
index 0000000..5ac010e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/conn.h>
+
+u16
+dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+	if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
+		u16 data = nv_ro16(bios, dcb + 0x14);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = nv_ro08(bios, data + 1);
+			*cnt = nv_ro08(bios, data + 2);
+			*len = nv_ro08(bios, data + 3);
+			return data;
+		}
+	}
+	return 0x0000;
+}
+
+u16
+dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len);
+	if (data && idx < cnt)
+		return data + hdr + (idx * *len);
+	return 0x0000;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
new file mode 100644
index 0000000..2d9b9d7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "core/device.h"
+
+#include "subdev/bios.h"
+#include "subdev/bios/dcb.h"
+
+u16
+dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct nouveau_device *device = nv_device(bios);
+	u16 dcb = 0x0000;
+
+	if (device->card_type > NV_04)
+		dcb = nv_ro16(bios, 0x36);
+	if (!dcb) {
+		nv_warn(bios, "DCB table not found\n");
+		return dcb;
+	}
+
+	*ver = nv_ro08(bios, dcb);
+
+	if (*ver >= 0x41) {
+		nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
+		return 0x0000;
+	} else
+	if (*ver >= 0x30) {
+		if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
+			*hdr = nv_ro08(bios, dcb + 1);
+			*cnt = nv_ro08(bios, dcb + 2);
+			*len = nv_ro08(bios, dcb + 3);
+			return dcb;
+		}
+	} else
+	if (*ver >= 0x20) {
+		if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
+			u16 i2c = nv_ro16(bios, dcb + 2);
+			*hdr = 8;
+			*cnt = (i2c - dcb) / 8;
+			*len = 8;
+			return dcb;
+		}
+	} else
+	if (*ver >= 0x15) {
+		if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
+			u16 i2c = nv_ro16(bios, dcb + 2);
+			*hdr = 4;
+			*cnt = (i2c - dcb) / 10;
+			*len = 10;
+			return dcb;
+		}
+	} else {
+		/*
+		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+		 * always has the same single (crt) entry, even when tv-out
+		 * present, so the conclusion is this version cannot really
+		 * be used.
+		 *
+		 * v1.2 tables (some NV6/10, and NV15+) normally have the
+		 * same 5 entries, which are not specific to the card and so
+		 * no use.
+		 *
+		 * v1.2 does have an I2C table that read_dcb_i2c_table can
+		 * handle, but cards exist (nv11 in #14821) with a bad i2c
+		 * table pointer, so use the indices parsed in
+		 * parse_bmp_structure.
+		 *
+		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+		 */
+		nv_warn(bios, "DCB contains no useful data\n");
+		return 0x0000;
+	}
+
+	nv_warn(bios, "DCB header validation failed\n");
+	return 0x0000;
+}
+
+u16
+dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
+	if (dcb && idx < cnt)
+		return dcb + hdr + (idx * *len);
+	return 0x0000;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+	return (outp->extdev << 8) | (outp->location << 4) | outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+	return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+	       struct dcb_output *outp)
+{
+	u16 dcb = dcb_outp(bios, idx, ver, len);
+	if (dcb) {
+		if (*ver >= 0x20) {
+			u32 conn = nv_ro32(bios, dcb + 0x00);
+			outp->or        = (conn & 0x0f000000) >> 24;
+			outp->location  = (conn & 0x00300000) >> 20;
+			outp->bus       = (conn & 0x000f0000) >> 16;
+			outp->connector = (conn & 0x0000f000) >> 12;
+			outp->heads     = (conn & 0x00000f00) >> 8;
+			outp->i2c_index = (conn & 0x000000f0) >> 4;
+			outp->type      = (conn & 0x0000000f);
+			outp->link      = 0;
+		} else {
+			dcb = 0x0000;
+		}
+
+		if (*ver >= 0x40) {
+			u32 conf = nv_ro32(bios, dcb + 0x04);
+			switch (outp->type) {
+			case DCB_OUTPUT_TMDS:
+			case DCB_OUTPUT_LVDS:
+			case DCB_OUTPUT_DP:
+				outp->link = (conf & 0x00000030) >> 4;
+				outp->sorconf.link = outp->link; /*XXX*/
+				outp->extdev = 0x00;
+				if (outp->location != 0)
+					outp->extdev = (conf & 0x0000ff00) >> 8;
+				break;
+			default:
+				break;
+			}
+		}
+
+		outp->hasht = dcb_outp_hasht(outp);
+		outp->hashm = dcb_outp_hashm(outp);
+	}
+	return dcb;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+	       u8 *ver, u8 *len, struct dcb_output *outp)
+{
+	u16 dcb, idx = 0;
+	while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+		if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
+			if ((dcb_outp_hashm(outp) & mask) == mask)
+				break;
+		}
+	}
+	return dcb;
+}
+
+int
+dcb_outp_foreach(struct nouveau_bios *bios, void *data,
+		 int (*exec)(struct nouveau_bios *, void *, int, u16))
+{
+	int ret, idx = -1;
+	u8  ver, len;
+	u16 outp;
+
+	while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
+		if (nv_ro32(bios, outp) == 0x00000000)
+			break; /* seen on an NV11 with DCB v1.5 */
+		if (nv_ro32(bios, outp) == 0xffffffff)
+			break; /* seen on an NV17 with DCB v2.0 */
+
+		if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
+			continue;
+		if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
+			break;
+
+		ret = exec(bios, data, idx, outp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 0000000..7f16e52
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+	struct bit_entry U;
+
+	if (!bit_entry(bios, 'U', &U)) {
+		if (U.version == 1) {
+			u16 data = nv_ro16(bios, U.offset);
+			if (data) {
+				*ver = nv_ro08(bios, data + 0x00);
+				switch (*ver) {
+				case 0x20:
+				case 0x21:
+					*hdr = nv_ro08(bios, data + 0x01);
+					*len = nv_ro08(bios, data + 0x02);
+					*cnt = nv_ro08(bios, data + 0x03);
+					*sub = nv_ro08(bios, data + 0x04);
+					return data;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *len, u8 *sub)
+{
+	u8  hdr, cnt;
+	u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+	if (data && idx < cnt)
+		return data + hdr + (idx * *len);
+	*ver = 0x00;
+	return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *len, u8 *sub,
+		  struct nvbios_disp *info)
+{
+	u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+	if (data && *len >= 2) {
+		info->data = nv_ro16(bios, data + 0);
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct nvbios_disp info;
+	u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+	if (data) {
+		*cnt = nv_ro08(bios, info.data + 0x05);
+		*len = 0x06;
+		data = info.data;
+	}
+	return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_outp *info)
+{
+	u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+	if (data && *hdr >= 0x0a) {
+		info->type      = nv_ro16(bios, data + 0x00);
+		info->mask      = nv_ro32(bios, data + 0x02);
+		if (*ver <= 0x20) /* match any link */
+			info->mask |= 0x00c0;
+		info->script[0] = nv_ro16(bios, data + 0x06);
+		info->script[1] = nv_ro16(bios, data + 0x08);
+		info->script[2] = 0x0000;
+		if (*hdr >= 0x0c)
+			info->script[2] = nv_ro16(bios, data + 0x0a);
+		return data;
+	}
+	return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_outp *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+		if (data && info->type == type) {
+			if ((info->mask & mask) == mask)
+				break;
+		}
+	}
+	return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	if (idx < *cnt)
+		return outp + *hdr + (idx * *len);
+	return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_ocfg *info)
+{
+	u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+	if (data) {
+		info->match     = nv_ro16(bios, data + 0x00);
+		info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+		info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+	}
+	return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+		  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		  struct nvbios_ocfg *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+		if (info->match == type)
+			break;
+	}
+	return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+	while (cmp) {
+		if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+			return  nv_ro16(bios, cmp + 0x02);
+		cmp += 0x04;
+	}
+	return 0x0000;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
new file mode 100644
index 0000000..663853b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+#include "subdev/bios/dp.h"
+
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry d;
+
+	if (!bit_entry(bios, 'd', &d)) {
+		if (d.version == 1 && d.length >= 2) {
+			u16 data = nv_ro16(bios, d.offset);
+			if (data) {
+				*ver = nv_ro08(bios, data + 0x00);
+				switch (*ver) {
+				case 0x21:
+				case 0x30:
+				case 0x40:
+					*hdr = nv_ro08(bios, data + 0x01);
+					*len = nv_ro08(bios, data + 0x02);
+					*cnt = nv_ro08(bios, data + 0x03);
+					return data;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return 0x0000;
+}
+
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+	if (data && idx < *cnt) {
+		u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+		switch (*ver * !!outp) {
+		case 0x21:
+		case 0x30:
+			*hdr = nv_ro08(bios, data + 0x04);
+			*len = nv_ro08(bios, data + 0x05);
+			*cnt = nv_ro08(bios, outp + 0x04);
+			break;
+		case 0x40:
+			*hdr = nv_ro08(bios, data + 0x04);
+			*cnt = 0;
+			*len = 0;
+			break;
+		default:
+			break;
+		}
+		return outp;
+	}
+	*ver = 0x00;
+	return 0x0000;
+}
+
+u16
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpout *info)
+{
+	u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+	if (data && *ver) {
+		info->type = nv_ro16(bios, data + 0x00);
+		info->mask = nv_ro16(bios, data + 0x02);
+		switch (*ver) {
+		case 0x21:
+		case 0x30:
+			info->flags     = nv_ro08(bios, data + 0x05);
+			info->script[0] = nv_ro16(bios, data + 0x06);
+			info->script[1] = nv_ro16(bios, data + 0x08);
+			info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+			info->script[2] = nv_ro16(bios, data + 0x0c);
+			info->script[3] = nv_ro16(bios, data + 0x0e);
+			info->script[4] = nv_ro16(bios, data + 0x10);
+			break;
+		case 0x40:
+			info->flags     = nv_ro08(bios, data + 0x04);
+			info->script[0] = nv_ro16(bios, data + 0x05);
+			info->script[1] = nv_ro16(bios, data + 0x07);
+			info->lnkcmp    = nv_ro16(bios, data + 0x09);
+			info->script[2] = nv_ro16(bios, data + 0x0b);
+			info->script[3] = nv_ro16(bios, data + 0x0d);
+			info->script[4] = nv_ro16(bios, data + 0x0f);
+			break;
+		default:
+			data = 0x0000;
+			break;
+		}
+	}
+	return data;
+}
+
+u16
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpout *info)
+{
+	u16 data, idx = 0;
+	while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+		if (data && info->type == type) {
+			if ((info->mask & mask) == mask)
+				break;
+		}
+	}
+	return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	if (*ver >= 0x40) {
+		outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+		*hdr = *hdr + (*len * * cnt);
+		*len = nv_ro08(bios, outp + 0x06);
+		*cnt = nv_ro08(bios, outp + 0x07);
+	}
+
+	if (idx < *cnt)
+		return outp + *hdr + (idx * *len);
+
+	return 0x0000;
+}
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *info)
+{
+	u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+	if (data) {
+		switch (*ver) {
+		case 0x21:
+			info->drv = nv_ro08(bios, data + 0x02);
+			info->pre = nv_ro08(bios, data + 0x03);
+			info->unk = nv_ro08(bios, data + 0x04);
+			break;
+		case 0x30:
+		case 0x40:
+			info->drv = nv_ro08(bios, data + 0x01);
+			info->pre = nv_ro08(bios, data + 0x02);
+			info->unk = nv_ro08(bios, data + 0x03);
+			break;
+		default:
+			data = 0x0000;
+			break;
+		}
+	}
+	return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+		   struct nvbios_dpcfg *info)
+{
+	u8 idx = 0xff;
+	u16 data;
+
+	if (*ver >= 0x30) {
+		const u8 vsoff[] = { 0, 4, 7, 9 };
+		idx = (un * 10) + vsoff[vs] + pe;
+	} else {
+		while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+						  ver, hdr, cnt, len))) {
+			if (nv_ro08(bios, data + 0x00) == vs &&
+			    nv_ro08(bios, data + 0x01) == pe)
+				break;
+			idx++;
+		}
+	}
+
+	return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
new file mode 100644
index 0000000..b2a676e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/extdev.h>
+
+static u16
+extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+{
+	u8  dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
+	u16 dcb, extdev = 0;
+
+	dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
+	if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
+		return 0x0000;
+
+	extdev = nv_ro16(bios, dcb + 18);
+	if (!extdev)
+		return 0x0000;
+
+	*ver = nv_ro08(bios, extdev + 0);
+	*hdr = nv_ro08(bios, extdev + 1);
+	*cnt = nv_ro08(bios, extdev + 2);
+	*len = nv_ro08(bios, extdev + 3);
+
+	return extdev + *hdr;
+}
+
+static u16
+nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8 hdr, cnt;
+	u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
+	if (extdev && idx < cnt)
+		return extdev + idx * *len;
+	return 0x0000;
+}
+
+static void
+extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
+			  struct nvbios_extdev_func *entry)
+{
+	entry->type = nv_ro08(bios, offset + 0);
+	entry->addr = nv_ro08(bios, offset + 1);
+	entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
+}
+
+int
+nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
+		    struct nvbios_extdev_func *func)
+{
+	u8 ver, len;
+	u16 entry;
+
+	if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len)))
+		return -EINVAL;
+
+	extdev_parse_entry(bios, entry, func);
+
+	return 0;
+}
+
+int
+nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
+		   struct nvbios_extdev_func *func)
+{
+	u8 ver, len, i;
+	u16 entry;
+
+	i = 0;
+	while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
+		extdev_parse_entry(bios, entry, func);
+		if (func->type == type)
+			return 0;
+	}
+
+	return -EINVAL;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
new file mode 100644
index 0000000..172a4f9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
+
+u16
+dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 data = 0x0000;
+	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+	if (dcb) {
+		if (*ver >= 0x30 && *hdr >= 0x0c)
+			data = nv_ro16(bios, dcb + 0x0a);
+		else
+		if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
+			data = nv_ro16(bios, dcb - 0x0f);
+
+		if (data) {
+			*ver = nv_ro08(bios, data + 0x00);
+			if (*ver < 0x30) {
+				*hdr = 3;
+				*cnt = nv_ro08(bios, data + 0x02);
+				*len = nv_ro08(bios, data + 0x01);
+			} else
+			if (*ver <= 0x41) {
+				*hdr = nv_ro08(bios, data + 0x01);
+				*cnt = nv_ro08(bios, data + 0x02);
+				*len = nv_ro08(bios, data + 0x03);
+			} else {
+				data = 0x0000;
+			}
+		}
+	}
+	return data;
+}
+
+u16
+dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt, xver; /* use gpio version for xpio entry parsing */
+	u16 gpio;
+
+	if (!idx--)
+		gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
+	else
+		gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
+
+	if (gpio && ent < cnt)
+		return gpio + hdr + (ent * *len);
+	return 0x0000;
+}
+
+u16
+dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
+	       struct dcb_gpio_func *gpio)
+{
+	u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
+	if (data) {
+		if (*ver < 0x40) {
+			u16 info = nv_ro16(bios, data);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (info & 0x001f) >> 0,
+				.func = (info & 0x07e0) >> 5,
+				.log[0] = (info & 0x1800) >> 11,
+				.log[1] = (info & 0x6000) >> 13,
+				.param = !!(info & 0x8000),
+			};
+		} else
+		if (*ver < 0x41) {
+			u32 info = nv_ro32(bios, data);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (info & 0x0000001f) >> 0,
+				.func = (info & 0x0000ff00) >> 8,
+				.log[0] = (info & 0x18000000) >> 27,
+				.log[1] = (info & 0x60000000) >> 29,
+				.param = !!(info & 0x80000000),
+			};
+		} else {
+			u32 info = nv_ro32(bios, data + 0);
+			u8 info1 = nv_ro32(bios, data + 4);
+			*gpio = (struct dcb_gpio_func) {
+				.line = (info & 0x0000003f) >> 0,
+				.func = (info & 0x0000ff00) >> 8,
+				.log[0] = (info1 & 0x30) >> 4,
+				.log[1] = (info1 & 0xc0) >> 6,
+				.param = !!(info & 0x80000000),
+			};
+		}
+	}
+
+	return data;
+}
+
+u16
+dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
+	       u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
+{
+	u8  hdr, cnt, i = 0;
+	u16 data;
+
+	while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
+		if ((line == 0xff || line == gpio->line) &&
+		    (func == 0xff || func == gpio->func))
+			return data;
+	}
+
+	/* DCB 2.2, fixed TVDAC GPIO data */
+	if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
+		if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
+			u8 conf = nv_ro08(bios, data - 5);
+			u8 addr = nv_ro08(bios, data - 4);
+			if (conf & 0x01) {
+				*gpio = (struct dcb_gpio_func) {
+					.func = DCB_GPIO_TVDAC0,
+					.line = addr >> 4,
+					.log[0] = !!(conf & 0x02),
+					.log[1] =  !(conf & 0x02),
+				};
+				*ver = 0x00;
+				return data;
+			}
+		}
+	}
+
+	return 0x0000;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
new file mode 100644
index 0000000..cfb9288
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+
+#include "subdev/bios.h"
+#include "subdev/bios/dcb.h"
+#include "subdev/bios/i2c.h"
+
+u16
+dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 i2c = 0x0000;
+	u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
+	if (dcb) {
+		if (*ver >= 0x15)
+			i2c = nv_ro16(bios, dcb + 2);
+		if (*ver >= 0x30)
+			i2c = nv_ro16(bios, dcb + 4);
+	}
+
+	if (i2c && *ver >= 0x30) {
+		*ver = nv_ro08(bios, i2c + 0);
+		*hdr = nv_ro08(bios, i2c + 1);
+		*cnt = nv_ro08(bios, i2c + 2);
+		*len = nv_ro08(bios, i2c + 3);
+	} else {
+		*ver = *ver; /* use DCB version */
+		*hdr = 0;
+		*cnt = 16;
+		*len = 4;
+	}
+
+	return i2c;
+}
+
+u16
+dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+{
+	u8  hdr, cnt;
+	u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
+	if (i2c && idx < cnt)
+		return i2c + hdr + (idx * *len);
+	return 0x0000;
+}
+
+int
+dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
+{
+	u8  ver, len;
+	u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
+	if (ent) {
+		info->type  = nv_ro08(bios, ent + 3);
+		info->share = DCB_I2C_UNUSED;
+		if (ver < 0x30) {
+			info->type &= 0x07;
+			if (info->type == 0x07)
+				info->type = DCB_I2C_UNUSED;
+		}
+
+		switch (info->type) {
+		case DCB_I2C_NV04_BIT:
+			info->drive = nv_ro08(bios, ent + 0);
+			info->sense = nv_ro08(bios, ent + 1);
+			return 0;
+		case DCB_I2C_NV4E_BIT:
+			info->drive = nv_ro08(bios, ent + 1);
+			return 0;
+		case DCB_I2C_NVIO_BIT:
+		case DCB_I2C_NVIO_AUX:
+			info->drive = nv_ro08(bios, ent + 0) & 0x0f;
+			if (nv_ro08(bios, ent + 1) & 0x01) {
+				info->share  = nv_ro08(bios, ent + 1) >> 1;
+				info->share &= 0x0f;
+			}
+			return 0;
+		case DCB_I2C_UNUSED:
+			return 0;
+		default:
+			nv_warn(bios, "unknown i2c type %d\n", info->type);
+			info->type = DCB_I2C_UNUSED;
+			return 0;
+		}
+	}
+
+	if (bios->bmp_offset && idx < 2) {
+		/* BMP (from v4.0 has i2c info in the structure, it's in a
+		 * fixed location on earlier VBIOS
+		 */
+		if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
+			ent = 0x0048;
+		else
+			ent = 0x0036 + bios->bmp_offset;
+
+		if (idx == 0) {
+			info->drive = nv_ro08(bios, ent + 4);
+			if (!info->drive) info->drive = 0x3f;
+			info->sense = nv_ro08(bios, ent + 5);
+			if (!info->sense) info->sense = 0x3e;
+		} else
+		if (idx == 1) {
+			info->drive = nv_ro08(bios, ent + 6);
+			if (!info->drive) info->drive = 0x37;
+			info->sense = nv_ro08(bios, ent + 7);
+			if (!info->sense) info->sense = 0x36;
+		}
+
+		info->type  = DCB_I2C_NV04_BIT;
+		info->share = DCB_I2C_UNUSED;
+		return 0;
+	}
+
+	return -ENOENT;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
new file mode 100644
index 0000000..c7bf974
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -0,0 +1,2219 @@
+#include <core/engine.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/conn.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/init.h>
+#include <subdev/devinit.h>
+#include <subdev/clock.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+#include <subdev/gpio.h>
+
+#define bioslog(lvl, fmt, args...) do {                                        \
+	nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset,            \
+		  init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args);   \
+} while(0)
+#define cont(fmt, args...) do {                                                \
+	if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE)                      \
+		printk(fmt, ##args);                                           \
+} while(0)
+#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
+#define warn(fmt, args...) bioslog(WARN, fmt, ##args)
+#define error(fmt, args...) bioslog(ERROR, fmt, ##args)
+
+/******************************************************************************
+ * init parser control flow helpers
+ *****************************************************************************/
+
+static inline bool
+init_exec(struct nvbios_init *init)
+{
+	return (init->execute == 1) || ((init->execute & 5) == 5);
+}
+
+static inline void
+init_exec_set(struct nvbios_init *init, bool exec)
+{
+	if (exec) init->execute &= 0xfd;
+	else      init->execute |= 0x02;
+}
+
+static inline void
+init_exec_inv(struct nvbios_init *init)
+{
+	init->execute ^= 0x02;
+}
+
+static inline void
+init_exec_force(struct nvbios_init *init, bool exec)
+{
+	if (exec) init->execute |= 0x04;
+	else      init->execute &= 0xfb;
+}
+
+/******************************************************************************
+ * init parser wrappers for normal register/i2c/whatever accessors
+ *****************************************************************************/
+
+static inline int
+init_or(struct nvbios_init *init)
+{
+	if (init_exec(init)) {
+		if (init->outp)
+			return ffs(init->outp->or) - 1;
+		error("script needs OR!!\n");
+	}
+	return 0;
+}
+
+static inline int
+init_link(struct nvbios_init *init)
+{
+	if (init_exec(init)) {
+		if (init->outp)
+			return !(init->outp->sorconf.link & 1);
+		error("script needs OR link\n");
+	}
+	return 0;
+}
+
+static inline int
+init_crtc(struct nvbios_init *init)
+{
+	if (init_exec(init)) {
+		if (init->crtc >= 0)
+			return init->crtc;
+		error("script needs crtc\n");
+	}
+	return 0;
+}
+
+static u8
+init_conn(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  ver, len;
+	u16 conn;
+
+	if (init_exec(init)) {
+		if (init->outp) {
+			conn = init->outp->connector;
+			conn = dcb_conn(bios, conn, &ver, &len);
+			if (conn)
+				return nv_ro08(bios, conn);
+		}
+
+		error("script needs connector type\n");
+	}
+
+	return 0xff;
+}
+
+static inline u32
+init_nvreg(struct nvbios_init *init, u32 reg)
+{
+	/* C51 (at least) sometimes has the lower bits set which the VBIOS
+	 * interprets to mean that access needs to go through certain IO
+	 * ports instead.  The NVIDIA binary driver has been seen to access
+	 * these through the NV register address, so lets assume we can
+	 * do the same
+	 */
+	reg &= ~0x00000003;
+
+	/* GF8+ display scripts need register addresses mangled a bit to
+	 * select a specific CRTC/OR
+	 */
+	if (nv_device(init->bios)->card_type >= NV_50) {
+		if (reg & 0x80000000) {
+			reg += init_crtc(init) * 0x800;
+			reg &= ~0x80000000;
+		}
+
+		if (reg & 0x40000000) {
+			reg += init_or(init) * 0x800;
+			reg &= ~0x40000000;
+			if (reg & 0x20000000) {
+				reg += init_link(init) * 0x80;
+				reg &= ~0x20000000;
+			}
+		}
+	}
+
+	if (reg & ~0x00fffffc)
+		warn("unknown bits in register 0x%08x\n", reg);
+	return reg;
+}
+
+static u32
+init_rd32(struct nvbios_init *init, u32 reg)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init))
+		return nv_rd32(init->subdev, reg);
+	return 0x00000000;
+}
+
+static void
+init_wr32(struct nvbios_init *init, u32 reg, u32 val)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init))
+		nv_wr32(init->subdev, reg, val);
+}
+
+static u32
+init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
+{
+	reg = init_nvreg(init, reg);
+	if (init_exec(init)) {
+		u32 tmp = nv_rd32(init->subdev, reg);
+		nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
+		return tmp;
+	}
+	return 0x00000000;
+}
+
+static u8
+init_rdport(struct nvbios_init *init, u16 port)
+{
+	if (init_exec(init))
+		return nv_rdport(init->subdev, init->crtc, port);
+	return 0x00;
+}
+
+static void
+init_wrport(struct nvbios_init *init, u16 port, u8 value)
+{
+	if (init_exec(init))
+		nv_wrport(init->subdev, init->crtc, port, value);
+}
+
+static u8
+init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
+{
+	struct nouveau_subdev *subdev = init->subdev;
+	if (init_exec(init)) {
+		int head = init->crtc < 0 ? 0 : init->crtc;
+		return nv_rdvgai(subdev, head, port, index);
+	}
+	return 0x00;
+}
+
+static void
+init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
+{
+	/* force head 0 for updates to cr44, it only exists on first head */
+	if (nv_device(init->subdev)->card_type < NV_50) {
+		if (port == 0x03d4 && index == 0x44)
+			init->crtc = 0;
+	}
+
+	if (init_exec(init)) {
+		int head = init->crtc < 0 ? 0 : init->crtc;
+		nv_wrvgai(init->subdev, head, port, index, value);
+	}
+
+	/* select head 1 if cr44 write selected it */
+	if (nv_device(init->subdev)->card_type < NV_50) {
+		if (port == 0x03d4 && index == 0x44 && value == 3)
+			init->crtc = 1;
+	}
+}
+
+static struct nouveau_i2c_port *
+init_i2c(struct nvbios_init *init, int index)
+{
+	struct nouveau_i2c *i2c = nouveau_i2c(init->bios);
+
+	if (index == 0xff) {
+		index = NV_I2C_DEFAULT(0);
+		if (init->outp && init->outp->i2c_upper_default)
+			index = NV_I2C_DEFAULT(1);
+	} else
+	if (index < 0) {
+		if (!init->outp) {
+			if (init_exec(init))
+				error("script needs output for i2c\n");
+			return NULL;
+		}
+
+		if (index == -2 && init->outp->location) {
+			index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
+			return i2c->find_type(i2c, index);
+		}
+
+		index = init->outp->i2c_index;
+	}
+
+	return i2c->find(i2c, index);
+}
+
+static int
+init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, index);
+	if (port && init_exec(init))
+		return nv_rdi2cr(port, addr, reg);
+	return -ENODEV;
+}
+
+static int
+init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, index);
+	if (port && init_exec(init))
+		return nv_wri2cr(port, addr, reg, val);
+	return -ENODEV;
+}
+
+static int
+init_rdauxr(struct nvbios_init *init, u32 addr)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, -2);
+	u8 data;
+
+	if (port && init_exec(init)) {
+		int ret = nv_rdaux(port, addr, &data, 1);
+		if (ret)
+			return ret;
+		return data;
+	}
+
+	return -ENODEV;
+}
+
+static int
+init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
+{
+	struct nouveau_i2c_port *port = init_i2c(init, -2);
+	if (port && init_exec(init))
+		return nv_wraux(port, addr, &data, 1);
+	return -ENODEV;
+}
+
+static void
+init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
+{
+	struct nouveau_clock *clk = nouveau_clock(init->bios);
+	if (clk && clk->pll_set && init_exec(init)) {
+		int ret = clk->pll_set(clk, id, freq);
+		if (ret)
+			warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
+	}
+}
+
+/******************************************************************************
+ * parsing of bios structures that are required to execute init tables
+ *****************************************************************************/
+
+static u16
+init_table(struct nouveau_bios *bios, u16 *len)
+{
+	struct bit_entry bit_I;
+
+	if (!bit_entry(bios, 'I', &bit_I)) {
+		*len = bit_I.length;
+		return bit_I.offset;
+	}
+
+	if (bmp_version(bios) >= 0x0510) {
+		*len = 14;
+		return bios->bmp_offset + 75;
+	}
+
+	return 0x0000;
+}
+
+static u16
+init_table_(struct nvbios_init *init, u16 offset, const char *name)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 len, data = init_table(bios, &len);
+	if (data) {
+		if (len >= offset + 2) {
+			data = nv_ro16(bios, data + offset);
+			if (data)
+				return data;
+
+			warn("%s pointer invalid\n", name);
+			return 0x0000;
+		}
+
+		warn("init data too short for %s pointer", name);
+		return 0x0000;
+	}
+
+	warn("init data not found\n");
+	return 0x0000;
+}
+
+#define init_script_table(b) init_table_((b), 0x00, "script table")
+#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table")
+#define init_macro_table(b) init_table_((b), 0x04, "macro table")
+#define init_condition_table(b) init_table_((b), 0x06, "condition table")
+#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
+#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table")
+#define init_function_table(b) init_table_((b), 0x0c, "function table")
+#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
+
+static u16
+init_script(struct nouveau_bios *bios, int index)
+{
+	struct nvbios_init init = { .bios = bios };
+	u16 bmp_ver = bmp_version(bios), data;
+
+	if (bmp_ver && bmp_ver < 0x0510) {
+		if (index > 1 || bmp_ver < 0x0100)
+			return 0x0000;
+
+		data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
+		return nv_ro16(bios, data + (index * 2));
+	}
+
+	data = init_script_table(&init);
+	if (data)
+		return nv_ro16(bios, data + (index * 2));
+
+	return 0x0000;
+}
+
+static u16
+init_unknown_script(struct nouveau_bios *bios)
+{
+	u16 len, data = init_table(bios, &len);
+	if (data && len >= 16)
+		return nv_ro16(bios, data + 14);
+	return 0x0000;
+}
+
+static u16
+init_ram_restrict_table(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct bit_entry bit_M;
+	u16 data = 0x0000;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			data = nv_ro16(bios, bit_M.offset + 3);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			data = nv_ro16(bios, bit_M.offset + 1);
+	}
+
+	if (data == 0x0000)
+		warn("ram restrict table not found\n");
+	return data;
+}
+
+static u8
+init_ram_restrict_group_count(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct bit_entry bit_M;
+
+	if (!bit_entry(bios, 'M', &bit_M)) {
+		if (bit_M.version == 1 && bit_M.length >= 5)
+			return nv_ro08(bios, bit_M.offset + 2);
+		if (bit_M.version == 2 && bit_M.length >= 3)
+			return nv_ro08(bios, bit_M.offset + 0);
+	}
+
+	return 0x00;
+}
+
+static u8
+init_ram_restrict_strap(struct nvbios_init *init)
+{
+	/* This appears to be the behaviour of the VBIOS parser, and *is*
+	 * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
+	 * avoid fucking up the memory controller (somehow) by reading it
+	 * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
+	 *
+	 * Preserving the non-caching behaviour on earlier chipsets just
+	 * in case *not* re-reading the strap causes similar breakage.
+	 */
+	if (!init->ramcfg || init->bios->version.major < 0x70)
+		init->ramcfg = init_rd32(init, 0x101000);
+	return (init->ramcfg & 0x00000003c) >> 2;
+}
+
+static u8
+init_ram_restrict(struct nvbios_init *init)
+{
+	u8  strap = init_ram_restrict_strap(init);
+	u16 table = init_ram_restrict_table(init);
+	if (table)
+		return nv_ro08(init->bios, table + strap);
+	return 0x00;
+}
+
+static u8
+init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_xlat_table(init);
+	if (table) {
+		u16 data = nv_ro16(bios, table + (index * 2));
+		if (data)
+			return nv_ro08(bios, data + offset);
+		warn("xlat table pointer %d invalid\n", index);
+	}
+	return 0x00;
+}
+
+/******************************************************************************
+ * utility functions used by various init opcode handlers
+ *****************************************************************************/
+
+static bool
+init_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_condition_table(init);
+	if (table) {
+		u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
+		u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
+		u32 val = nv_ro32(bios, table + (cond * 12) + 8);
+		trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
+		      cond, reg, msk, val);
+		return (init_rd32(init, reg) & msk) == val;
+	}
+	return false;
+}
+
+static bool
+init_io_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_io_condition_table(init);
+	if (table) {
+		u16 port = nv_ro16(bios, table + (cond * 5) + 0);
+		u8 index = nv_ro08(bios, table + (cond * 5) + 2);
+		u8  mask = nv_ro08(bios, table + (cond * 5) + 3);
+		u8 value = nv_ro08(bios, table + (cond * 5) + 4);
+		trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
+		      cond, port, index, mask, value);
+		return (init_rdvgai(init, port, index) & mask) == value;
+	}
+	return false;
+}
+
+static bool
+init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 table = init_io_flag_condition_table(init);
+	if (table) {
+		u16 port = nv_ro16(bios, table + (cond * 9) + 0);
+		u8 index = nv_ro08(bios, table + (cond * 9) + 2);
+		u8  mask = nv_ro08(bios, table + (cond * 9) + 3);
+		u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
+		u16 data = nv_ro16(bios, table + (cond * 9) + 5);
+		u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
+		u8 value = nv_ro08(bios, table + (cond * 9) + 8);
+		u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
+		return (nv_ro08(bios, data + ioval) & dmask) == value;
+	}
+	return false;
+}
+
+static inline u32
+init_shift(u32 data, u8 shift)
+{
+	if (shift < 0x80)
+		return data >> shift;
+	return data << (0x100 - shift);
+}
+
+static u32
+init_tmds_reg(struct nvbios_init *init, u8 tmds)
+{
+	/* For mlv < 0x80, it is an index into a table of TMDS base addresses.
+	 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address.
+	 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
+	 * CR58 for CR57 = 0 to index a table of offsets to the basic
+	 * 0x6808b0 address, and then flip the offset by 8.
+	 */
+
+	const int pramdac_offset[13] = {
+		0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
+	const u32 pramdac_table[4] = {
+		0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
+
+	if (tmds >= 0x80) {
+		if (init->outp) {
+			u32 dacoffset = pramdac_offset[init->outp->or];
+			if (tmds == 0x81)
+				dacoffset ^= 8;
+			return 0x6808b0 + dacoffset;
+		}
+
+		if (init_exec(init))
+			error("tmds opcodes need dcb\n");
+	} else {
+		if (tmds < ARRAY_SIZE(pramdac_table))
+			return pramdac_table[tmds];
+
+		error("tmds selector 0x%02x unknown\n", tmds);
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ * init opcode handlers
+ *****************************************************************************/
+
+/**
+ * init_reserved - stub for various unknown/unused single-byte opcodes
+ *
+ */
+static void
+init_reserved(struct nvbios_init *init)
+{
+	u8 opcode = nv_ro08(init->bios, init->offset);
+	u8 length, i;
+
+	switch (opcode) {
+	case 0xaa:
+		length = 4;
+		break;
+	default:
+		length = 1;
+		break;
+	}
+
+	trace("RESERVED 0x%02x\t", opcode);
+	for (i = 1; i < length; i++)
+		cont(" 0x%02x", nv_ro08(init->bios, init->offset + i));
+	cont("\n");
+	init->offset += length;
+}
+
+/**
+ * INIT_DONE - opcode 0x71
+ *
+ */
+static void
+init_done(struct nvbios_init *init)
+{
+	trace("DONE\n");
+	init->offset = 0x0000;
+}
+
+/**
+ * INIT_IO_RESTRICT_PROG - opcode 0x32
+ *
+ */
+static void
+init_io_restrict_prog(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 count = nv_ro08(bios, init->offset + 6);
+	u32  reg = nv_ro32(bios, init->offset + 7);
+	u8 conf, i;
+
+	trace("IO_RESTRICT_PROG\tR[0x%06x] = "
+	      "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n",
+	      reg, port, index, mask, shift);
+	init->offset += 11;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 data = nv_ro32(bios, init->offset);
+
+		if (i == conf) {
+			trace("\t0x%08x *\n", data);
+			init_wr32(init, reg, data);
+		} else {
+			trace("\t0x%08x\n", data);
+		}
+
+		init->offset += 4;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_REPEAT - opcode 0x33
+ *
+ */
+static void
+init_repeat(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 count = nv_ro08(bios, init->offset + 1);
+	u16 repeat = init->repeat;
+
+	trace("REPEAT\t0x%02x\n", count);
+	init->offset += 2;
+
+	init->repeat = init->offset;
+	init->repend = init->offset;
+	while (count--) {
+		init->offset = init->repeat;
+		nvbios_exec(init);
+		if (count)
+			trace("REPEAT\t0x%02x\n", count);
+	}
+	init->offset = init->repend;
+	init->repeat = repeat;
+}
+
+/**
+ * INIT_IO_RESTRICT_PLL - opcode 0x34
+ *
+ */
+static void
+init_io_restrict_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	s8  iofc = nv_ro08(bios, init->offset + 6);
+	u8 count = nv_ro08(bios, init->offset + 7);
+	u32  reg = nv_ro32(bios, init->offset + 8);
+	u8 conf, i;
+
+	trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
+	      "((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n",
+	      reg, port, index, mask, shift, iofc);
+	init->offset += 12;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 freq = nv_ro16(bios, init->offset) * 10;
+
+		if (i == conf) {
+			trace("\t%dkHz *\n", freq);
+			if (iofc > 0 && init_io_flag_condition_met(init, iofc))
+				freq *= 2;
+			init_prog_pll(init, reg, freq);
+		} else {
+			trace("\t%dkHz\n", freq);
+		}
+
+		init->offset += 2;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_END_REPEAT - opcode 0x36
+ *
+ */
+static void
+init_end_repeat(struct nvbios_init *init)
+{
+	trace("END_REPEAT\n");
+	init->offset += 1;
+
+	if (init->repeat) {
+		init->repend = init->offset;
+		init->offset = 0;
+	}
+}
+
+/**
+ * INIT_COPY - opcode 0x37
+ *
+ */
+static void
+init_copy(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 smask = nv_ro08(bios, init->offset + 6);
+	u16 port = nv_ro16(bios, init->offset + 7);
+	u8 index = nv_ro08(bios, init->offset + 9);
+	u8  mask = nv_ro08(bios, init->offset + 10);
+	u8  data;
+
+	trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
+	      "((R[0x%06x] %s 0x%02x) & 0x%02x)\n",
+	      port, index, mask, reg, (shift & 0x80) ? "<<" : ">>",
+	      (shift & 0x80) ? (0x100 - shift) : shift, smask);
+	init->offset += 11;
+
+	data  = init_rdvgai(init, port, index) & mask;
+	data |= init_shift(init_rd32(init, reg), shift) & smask;
+	init_wrvgai(init, port, index, data);
+}
+
+/**
+ * INIT_NOT - opcode 0x38
+ *
+ */
+static void
+init_not(struct nvbios_init *init)
+{
+	trace("NOT\n");
+	init->offset += 1;
+	init_exec_inv(init);
+}
+
+/**
+ * INIT_IO_FLAG_CONDITION - opcode 0x39
+ *
+ */
+static void
+init_io_flag_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_io_flag_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_DP_CONDITION - opcode 0x3a
+ *
+ */
+static void
+init_dp_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct nvbios_dpout info;
+	u8  cond = nv_ro08(bios, init->offset + 1);
+	u8  unkn = nv_ro08(bios, init->offset + 2);
+	u8  ver, hdr, cnt, len;
+	u16 data;
+
+	trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
+	init->offset += 3;
+
+	switch (cond) {
+	case 0:
+		if (init_conn(init) != DCB_CONNECTOR_eDP)
+			init_exec_set(init, false);
+		break;
+	case 1:
+	case 2:
+		if ( init->outp &&
+		    (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+					       (init->outp->or << 0) |
+					       (init->outp->sorconf.link << 6),
+					       &ver, &hdr, &cnt, &len, &info)))
+		{
+			if (!(info.flags & cond))
+				init_exec_set(init, false);
+			break;
+		}
+
+		if (init_exec(init))
+			warn("script needs dp output table data\n");
+		break;
+	case 5:
+		if (!(init_rdauxr(init, 0x0d) & 1))
+			init_exec_set(init, false);
+		break;
+	default:
+		warn("unknown dp condition 0x%02x\n", cond);
+		break;
+	}
+}
+
+/**
+ * INIT_IO_MASK_OR - opcode 0x3b
+ *
+ */
+static void
+init_io_mask_or(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8    or = init_or(init);
+	u8  data;
+
+	trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)\n", index, or);
+	init->offset += 2;
+
+	data = init_rdvgai(init, 0x03d4, index);
+	init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
+}
+
+/**
+ * INIT_IO_OR - opcode 0x3c
+ *
+ */
+static void
+init_io_or(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8    or = init_or(init);
+	u8  data;
+
+	trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)\n", index, or);
+	init->offset += 2;
+
+	data = init_rdvgai(init, 0x03d4, index);
+	init_wrvgai(init, 0x03d4, index, data | (1 << or));
+}
+
+/**
+ * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
+ *
+ */
+static void
+init_idx_addr_latched(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 creg = nv_ro32(bios, init->offset + 1);
+	u32 dreg = nv_ro32(bios, init->offset + 5);
+	u32 mask = nv_ro32(bios, init->offset + 9);
+	u32 data = nv_ro32(bios, init->offset + 13);
+	u8 count = nv_ro08(bios, init->offset + 17);
+
+	trace("INDEX_ADDRESS_LATCHED\t"
+	      "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n",
+	      creg, dreg, mask, data);
+	init->offset += 18;
+
+	while (count--) {
+		u8 iaddr = nv_ro08(bios, init->offset + 0);
+		u8 idata = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
+		init->offset += 2;
+
+		init_wr32(init, dreg, idata);
+		init_mask(init, creg, ~mask, data | iaddr);
+	}
+}
+
+/**
+ * INIT_IO_RESTRICT_PLL2 - opcode 0x4a
+ *
+ */
+static void
+init_io_restrict_pll2(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8 shift = nv_ro08(bios, init->offset + 5);
+	u8 count = nv_ro08(bios, init->offset + 6);
+	u32  reg = nv_ro32(bios, init->offset + 7);
+	u8  conf, i;
+
+	trace("IO_RESTRICT_PLL2\t"
+	      "R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n",
+	      reg, port, index, mask, shift);
+	init->offset += 11;
+
+	conf = (init_rdvgai(init, port, index) & mask) >> shift;
+	for (i = 0; i < count; i++) {
+		u32 freq = nv_ro32(bios, init->offset);
+		if (i == conf) {
+			trace("\t%dkHz *\n", freq);
+			init_prog_pll(init, reg, freq);
+		} else {
+			trace("\t%dkHz\n", freq);
+		}
+		init->offset += 4;
+	}
+	trace("}]\n");
+}
+
+/**
+ * INIT_PLL2 - opcode 0x4b
+ *
+ */
+static void
+init_pll2(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 freq = nv_ro32(bios, init->offset + 5);
+
+	trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
+	init->offset += 9;
+
+	init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_I2C_BYTE - opcode 0x4c
+ *
+ */
+static void
+init_i2c_byte(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+
+	trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	while (count--) {
+		u8  reg = nv_ro08(bios, init->offset + 0);
+		u8 mask = nv_ro08(bios, init->offset + 1);
+		u8 data = nv_ro08(bios, init->offset + 2);
+		int val;
+
+		trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
+		init->offset += 3;
+
+		val = init_rdi2cr(init, index, addr, reg);
+		if (val < 0)
+			continue;
+		init_wri2cr(init, index, addr, reg, (val & mask) | data);
+	}
+}
+
+/**
+ * INIT_ZM_I2C_BYTE - opcode 0x4d
+ *
+ */
+static void
+init_zm_i2c_byte(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+
+	trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	while (count--) {
+		u8  reg = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", reg, data);
+		init->offset += 2;
+
+		init_wri2cr(init, index, addr, reg, data);
+	}
+
+}
+
+/**
+ * INIT_ZM_I2C - opcode 0x4e
+ *
+ */
+static void
+init_zm_i2c(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 count = nv_ro08(bios, init->offset + 3);
+	u8 data[256], i;
+
+	trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
+	init->offset += 4;
+
+	for (i = 0; i < count; i++) {
+		data[i] = nv_ro08(bios, init->offset);
+		trace("\t0x%02x\n", data[i]);
+		init->offset++;
+	}
+
+	if (init_exec(init)) {
+		struct nouveau_i2c_port *port = init_i2c(init, index);
+		struct i2c_msg msg = {
+			.addr = addr, .flags = 0, .len = count, .buf = data,
+		};
+		int ret;
+
+		if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
+			warn("i2c wr failed, %d\n", ret);
+	}
+}
+
+/**
+ * INIT_TMDS - opcode 0x4f
+ *
+ */
+static void
+init_tmds(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 tmds = nv_ro08(bios, init->offset + 1);
+	u8 addr = nv_ro08(bios, init->offset + 2);
+	u8 mask = nv_ro08(bios, init->offset + 3);
+	u8 data = nv_ro08(bios, init->offset + 4);
+	u32 reg = init_tmds_reg(init, tmds);
+
+	trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
+	      tmds, addr, mask, data);
+	init->offset += 5;
+
+	if (reg == 0)
+		return;
+
+	init_wr32(init, reg + 0, addr | 0x00010000);
+	init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask));
+	init_wr32(init, reg + 0, addr);
+}
+
+/**
+ * INIT_ZM_TMDS_GROUP - opcode 0x50
+ *
+ */
+static void
+init_zm_tmds_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  tmds = nv_ro08(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 2);
+	u32  reg = init_tmds_reg(init, tmds);
+
+	trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
+	init->offset += 3;
+
+	while (count--) {
+		u8 addr = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t[0x%02x] = 0x%02x\n", addr, data);
+		init->offset += 2;
+
+		init_wr32(init, reg + 4, data);
+		init_wr32(init, reg + 0, addr);
+	}
+}
+
+/**
+ * INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
+ *
+ */
+static void
+init_cr_idx_adr_latch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr0 = nv_ro08(bios, init->offset + 1);
+	u8 addr1 = nv_ro08(bios, init->offset + 2);
+	u8  base = nv_ro08(bios, init->offset + 3);
+	u8 count = nv_ro08(bios, init->offset + 4);
+	u8 save0;
+
+	trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
+	init->offset += 5;
+
+	save0 = init_rdvgai(init, 0x03d4, addr0);
+	while (count--) {
+		u8 data = nv_ro08(bios, init->offset);
+
+		trace("\t\t[0x%02x] = 0x%02x\n", base, data);
+		init->offset += 1;
+
+		init_wrvgai(init, 0x03d4, addr0, base++);
+		init_wrvgai(init, 0x03d4, addr1, data);
+	}
+	init_wrvgai(init, 0x03d4, addr0, save0);
+}
+
+/**
+ * INIT_CR - opcode 0x52
+ *
+ */
+static void
+init_cr(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr = nv_ro08(bios, init->offset + 1);
+	u8 mask = nv_ro08(bios, init->offset + 2);
+	u8 data = nv_ro08(bios, init->offset + 3);
+	u8 val;
+
+	trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
+	init->offset += 4;
+
+	val = init_rdvgai(init, 0x03d4, addr) & mask;
+	init_wrvgai(init, 0x03d4, addr, val | data);
+}
+
+/**
+ * INIT_ZM_CR - opcode 0x53
+ *
+ */
+static void
+init_zm_cr(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 addr = nv_ro08(bios, init->offset + 1);
+	u8 data = nv_ro08(bios, init->offset + 2);
+
+	trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr,  data);
+	init->offset += 3;
+
+	init_wrvgai(init, 0x03d4, addr, data);
+}
+
+/**
+ * INIT_ZM_CR_GROUP - opcode 0x54
+ *
+ */
+static void
+init_zm_cr_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 count = nv_ro08(bios, init->offset + 1);
+
+	trace("ZM_CR_GROUP\n");
+	init->offset += 2;
+
+	while (count--) {
+		u8 addr = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+
+		trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
+		init->offset += 2;
+
+		init_wrvgai(init, 0x03d4, addr, data);
+	}
+}
+
+/**
+ * INIT_CONDITION_TIME - opcode 0x56
+ *
+ */
+static void
+init_condition_time(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  cond = nv_ro08(bios, init->offset + 1);
+	u8 retry = nv_ro08(bios, init->offset + 2);
+	u8  wait = min((u16)retry * 50, 100);
+
+	trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
+	init->offset += 3;
+
+	if (!init_exec(init))
+		return;
+
+	while (wait--) {
+		if (init_condition_met(init, cond))
+			return;
+		mdelay(20);
+	}
+
+	init_exec_set(init, false);
+}
+
+/**
+ * INIT_LTIME - opcode 0x57
+ *
+ */
+static void
+init_ltime(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 msec = nv_ro16(bios, init->offset + 1);
+
+	trace("LTIME\t0x%04x\n", msec);
+	init->offset += 3;
+
+	if (init_exec(init))
+		mdelay(msec);
+}
+
+/**
+ * INIT_ZM_REG_SEQUENCE - opcode 0x58
+ *
+ */
+static void
+init_zm_reg_sequence(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 base = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
+	init->offset += 6;
+
+	while (count--) {
+		u32 data = nv_ro32(bios, init->offset);
+
+		trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
+		init->offset += 4;
+
+		init_wr32(init, base, data);
+		base += 4;
+	}
+}
+
+/**
+ * INIT_SUB_DIRECT - opcode 0x5b
+ *
+ */
+static void
+init_sub_direct(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 addr = nv_ro16(bios, init->offset + 1);
+	u16 save;
+
+	trace("SUB_DIRECT\t0x%04x\n", addr);
+
+	if (init_exec(init)) {
+		save = init->offset;
+		init->offset = addr;
+		if (nvbios_exec(init)) {
+			error("error parsing sub-table\n");
+			return;
+		}
+		init->offset = save;
+	}
+
+	init->offset += 3;
+}
+
+/**
+ * INIT_JUMP - opcode 0x5c
+ *
+ */
+static void
+init_jump(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 offset = nv_ro16(bios, init->offset + 1);
+
+	trace("JUMP\t0x%04x\n", offset);
+
+	if (init_exec(init))
+		init->offset = offset;
+	else
+		init->offset += 3;
+}
+
+/**
+ * INIT_I2C_IF - opcode 0x5e
+ *
+ */
+static void
+init_i2c_if(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2);
+	u8   reg = nv_ro08(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 value;
+
+	trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
+	      index, addr, reg, mask, data);
+	init->offset += 6;
+	init_exec_force(init, true);
+
+	value = init_rdi2cr(init, index, addr, reg);
+	if ((value & mask) != data)
+		init_exec_set(init, false);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_COPY_NV_REG - opcode 0x5f
+ *
+ */
+static void
+init_copy_nv_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  sreg = nv_ro32(bios, init->offset + 1);
+	u8  shift = nv_ro08(bios, init->offset + 5);
+	u32 smask = nv_ro32(bios, init->offset + 6);
+	u32  sxor = nv_ro32(bios, init->offset + 10);
+	u32  dreg = nv_ro32(bios, init->offset + 14);
+	u32 dmask = nv_ro32(bios, init->offset + 18);
+	u32 data;
+
+	trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
+	      "((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n",
+	      dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>",
+	      (shift & 0x80) ? (0x100 - shift) : shift, smask, sxor);
+	init->offset += 22;
+
+	data = init_shift(init_rd32(init, sreg), shift);
+	init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
+}
+
+/**
+ * INIT_ZM_INDEX_IO - opcode 0x62
+ *
+ */
+static void
+init_zm_index_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro08(bios, init->offset + 3);
+	u8  data = nv_ro08(bios, init->offset + 4);
+
+	trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
+	init->offset += 5;
+
+	init_wrvgai(init, port, index, data);
+}
+
+/**
+ * INIT_COMPUTE_MEM - opcode 0x63
+ *
+ */
+static void
+init_compute_mem(struct nvbios_init *init)
+{
+	struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
+
+	trace("COMPUTE_MEM\n");
+	init->offset += 1;
+
+	init_exec_force(init, true);
+	if (init_exec(init) && devinit->meminit)
+		devinit->meminit(devinit);
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_RESET - opcode 0x65
+ *
+ */
+static void
+init_reset(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32   reg = nv_ro32(bios, init->offset + 1);
+	u32 data1 = nv_ro32(bios, init->offset + 5);
+	u32 data2 = nv_ro32(bios, init->offset + 9);
+	u32 savepci19;
+
+	trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
+	init->offset += 13;
+	init_exec_force(init, true);
+
+	savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000);
+	init_wr32(init, reg, data1);
+	udelay(10);
+	init_wr32(init, reg, data2);
+	init_wr32(init, 0x00184c, savepci19);
+	init_mask(init, 0x001850, 0x00000001, 0x00000000);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_MEM - opcode 0x66
+ *
+ */
+static u16
+init_configure_mem_clk(struct nvbios_init *init)
+{
+	u16 mdata = bmp_mem_init_table(init->bios);
+	if (mdata)
+		mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66;
+	return mdata;
+}
+
+static void
+init_configure_mem(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 mdata, sdata;
+	u32 addr, data;
+
+	trace("CONFIGURE_MEM\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	mdata = init_configure_mem_clk(init);
+	sdata = bmp_sdr_seq_table(bios);
+	if (nv_ro08(bios, mdata) & 0x01)
+		sdata = bmp_ddr_seq_table(bios);
+	mdata += 6; /* skip to data */
+
+	data = init_rdvgai(init, 0x03c4, 0x01);
+	init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
+
+	while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
+		switch (addr) {
+		case 0x10021c: /* CKE_NORMAL */
+		case 0x1002d0: /* CMD_REFRESH */
+		case 0x1002d4: /* CMD_PRECHARGE */
+			data = 0x00000001;
+			break;
+		default:
+			data = nv_ro32(bios, mdata);
+			mdata += 4;
+			if (data == 0xffffffff)
+				continue;
+			break;
+		}
+
+		init_wr32(init, addr, data);
+	}
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_CLK - opcode 0x67
+ *
+ */
+static void
+init_configure_clk(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 mdata, clock;
+
+	trace("CONFIGURE_CLK\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	mdata = init_configure_mem_clk(init);
+
+	/* NVPLL */
+	clock = nv_ro16(bios, mdata + 4) * 10;
+	init_prog_pll(init, 0x680500, clock);
+
+	/* MPLL */
+	clock = nv_ro16(bios, mdata + 2) * 10;
+	if (nv_ro08(bios, mdata) & 0x01)
+		clock *= 2;
+	init_prog_pll(init, 0x680504, clock);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_CONFIGURE_PREINIT - opcode 0x68
+ *
+ */
+static void
+init_configure_preinit(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 strap;
+
+	trace("CONFIGURE_PREINIT\n");
+	init->offset += 1;
+
+	if (bios->version.major > 2) {
+		init_done(init);
+		return;
+	}
+	init_exec_force(init, true);
+
+	strap = init_rd32(init, 0x101000);
+	strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6);
+	init_wrvgai(init, 0x03d4, 0x3c, strap);
+
+	init_exec_force(init, false);
+}
+
+/**
+ * INIT_IO - opcode 0x69
+ *
+ */
+static void
+init_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8  mask = nv_ro16(bios, init->offset + 3);
+	u8  data = nv_ro16(bios, init->offset + 4);
+	u8 value;
+
+	trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
+	init->offset += 5;
+
+	/* ummm.. yes.. should really figure out wtf this is and why it's
+	 * needed some day..  it's almost certainly wrong, but, it also
+	 * somehow makes things work...
+	 */
+	if (nv_device(init->bios)->card_type >= NV_50 &&
+	    port == 0x03c3 && data == 0x01) {
+		init_mask(init, 0x614100, 0xf0800000, 0x00800000);
+		init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
+		init_mask(init, 0x614900, 0xf0800000, 0x00800000);
+		init_mask(init, 0x000200, 0x40000000, 0x00000000);
+		mdelay(10);
+		init_mask(init, 0x00e18c, 0x00020000, 0x00000000);
+		init_mask(init, 0x000200, 0x40000000, 0x40000000);
+		init_wr32(init, 0x614100, 0x00800018);
+		init_wr32(init, 0x614900, 0x00800018);
+		mdelay(10);
+		init_wr32(init, 0x614100, 0x10000018);
+		init_wr32(init, 0x614900, 0x10000018);
+	}
+
+	value = init_rdport(init, port) & mask;
+	init_wrport(init, port, data | value);
+}
+
+/**
+ * INIT_SUB - opcode 0x6b
+ *
+ */
+static void
+init_sub(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u16 addr, save;
+
+	trace("SUB\t0x%02x\n", index);
+
+	addr = init_script(bios, index);
+	if (addr && init_exec(init)) {
+		save = init->offset;
+		init->offset = addr;
+		if (nvbios_exec(init)) {
+			error("error parsing sub-table\n");
+			return;
+		}
+		init->offset = save;
+	}
+
+	init->offset += 2;
+}
+
+/**
+ * INIT_RAM_CONDITION - opcode 0x6d
+ *
+ */
+static void
+init_ram_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  mask = nv_ro08(bios, init->offset + 1);
+	u8 value = nv_ro08(bios, init->offset + 2);
+
+	trace("RAM_CONDITION\t"
+	      "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
+	init->offset += 3;
+
+	if ((init_rd32(init, 0x100000) & mask) != value)
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_NV_REG - opcode 0x6e
+ *
+ */
+static void
+init_nv_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32 data = nv_ro32(bios, init->offset + 9);
+
+	trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
+	init->offset += 13;
+
+	init_mask(init, reg, ~mask, data);
+}
+
+/**
+ * INIT_MACRO - opcode 0x6f
+ *
+ */
+static void
+init_macro(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  macro = nv_ro08(bios, init->offset + 1);
+	u16 table;
+
+	trace("MACRO\t0x%02x\n", macro);
+
+	table = init_macro_table(init);
+	if (table) {
+		u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
+		u32 data = nv_ro32(bios, table + (macro * 8) + 4);
+		trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
+		init_wr32(init, addr, data);
+	}
+
+	init->offset += 2;
+}
+
+/**
+ * INIT_RESUME - opcode 0x72
+ *
+ */
+static void
+init_resume(struct nvbios_init *init)
+{
+	trace("RESUME\n");
+	init->offset += 1;
+	init_exec_set(init, true);
+}
+
+/**
+ * INIT_TIME - opcode 0x74
+ *
+ */
+static void
+init_time(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 usec = nv_ro16(bios, init->offset + 1);
+
+	trace("TIME\t0x%04x\n", usec);
+	init->offset += 3;
+
+	if (init_exec(init)) {
+		if (usec < 1000)
+			udelay(usec);
+		else
+			mdelay((usec + 900) / 1000);
+	}
+}
+
+/**
+ * INIT_CONDITION - opcode 0x75
+ *
+ */
+static void
+init_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_IO_CONDITION - opcode 0x76
+ *
+ */
+static void
+init_io_condition(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 cond = nv_ro08(bios, init->offset + 1);
+
+	trace("IO_CONDITION\t0x%02x\n", cond);
+	init->offset += 2;
+
+	if (!init_io_condition_met(init, cond))
+		init_exec_set(init, false);
+}
+
+/**
+ * INIT_INDEX_IO - opcode 0x78
+ *
+ */
+static void
+init_index_io(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u16 port = nv_ro16(bios, init->offset + 1);
+	u8 index = nv_ro16(bios, init->offset + 3);
+	u8  mask = nv_ro08(bios, init->offset + 4);
+	u8  data = nv_ro08(bios, init->offset + 5);
+	u8 value;
+
+	trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
+	      port, index, mask, data);
+	init->offset += 6;
+
+	value = init_rdvgai(init, port, index) & mask;
+	init_wrvgai(init, port, index, data | value);
+}
+
+/**
+ * INIT_PLL - opcode 0x79
+ *
+ */
+static void
+init_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32  reg = nv_ro32(bios, init->offset + 1);
+	u32 freq = nv_ro16(bios, init->offset + 5) * 10;
+
+	trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
+	init->offset += 7;
+
+	init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG - opcode 0x7a
+ *
+ */
+static void
+init_zm_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u32 data = nv_ro32(bios, init->offset + 5);
+
+	trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
+	init->offset += 9;
+
+	if (addr == 0x000200)
+		data |= 0x00000001;
+
+	init_wr32(init, addr, data);
+}
+
+/**
+ * INIT_RAM_RESTRICT_PLL - opcde 0x87
+ *
+ */
+static void
+init_ram_restrict_pll(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8  type = nv_ro08(bios, init->offset + 1);
+	u8 count = init_ram_restrict_group_count(init);
+	u8 strap = init_ram_restrict(init);
+	u8 cconf;
+
+	trace("RAM_RESTRICT_PLL\t0x%02x\n", type);
+	init->offset += 2;
+
+	for (cconf = 0; cconf < count; cconf++) {
+		u32 freq = nv_ro32(bios, init->offset);
+
+		if (cconf == strap) {
+			trace("%dkHz *\n", freq);
+			init_prog_pll(init, type, freq);
+		} else {
+			trace("%dkHz\n", freq);
+		}
+
+		init->offset += 4;
+	}
+}
+
+/**
+ * INIT_GPIO - opcode 0x8e
+ *
+ */
+static void
+init_gpio(struct nvbios_init *init)
+{
+	struct nouveau_gpio *gpio = nouveau_gpio(init->bios);
+
+	trace("GPIO\n");
+	init->offset += 1;
+
+	if (init_exec(init) && gpio && gpio->reset)
+		gpio->reset(gpio, DCB_GPIO_UNUSED);
+}
+
+/**
+ * INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
+ *
+ */
+static void
+init_ram_restrict_zm_reg_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8  incr = nv_ro08(bios, init->offset + 5);
+	u8   num = nv_ro08(bios, init->offset + 6);
+	u8 count = init_ram_restrict_group_count(init);
+	u8 index = init_ram_restrict(init);
+	u8 i, j;
+
+	trace("RAM_RESTRICT_ZM_REG_GROUP\t"
+	      "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
+	init->offset += 7;
+
+	for (i = 0; i < num; i++) {
+		trace("\tR[0x%06x] = {\n", addr);
+		for (j = 0; j < count; j++) {
+			u32 data = nv_ro32(bios, init->offset);
+
+			if (j == index) {
+				trace("\t\t0x%08x *\n", data);
+				init_wr32(init, addr, data);
+			} else {
+				trace("\t\t0x%08x\n", data);
+			}
+
+			init->offset += 4;
+		}
+		trace("\t}\n");
+		addr += incr;
+	}
+}
+
+/**
+ * INIT_COPY_ZM_REG - opcode 0x90
+ *
+ */
+static void
+init_copy_zm_reg(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 sreg = nv_ro32(bios, init->offset + 1);
+	u32 dreg = nv_ro32(bios, init->offset + 5);
+
+	trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
+	init->offset += 9;
+
+	init_wr32(init, dreg, init_rd32(init, sreg));
+}
+
+/**
+ * INIT_ZM_REG_GROUP - opcode 0x91
+ *
+ */
+static void
+init_zm_reg_group(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
+	init->offset += 6;
+
+	while (count--) {
+		u32 data = nv_ro32(bios, init->offset);
+		trace("\t0x%08x\n", data);
+		init_wr32(init, addr, data);
+		init->offset += 4;
+	}
+}
+
+/**
+ * INIT_XLAT - opcode 0x96
+ *
+ */
+static void
+init_xlat(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 saddr = nv_ro32(bios, init->offset + 1);
+	u8 sshift = nv_ro08(bios, init->offset + 5);
+	u8  smask = nv_ro08(bios, init->offset + 6);
+	u8  index = nv_ro08(bios, init->offset + 7);
+	u32 daddr = nv_ro32(bios, init->offset + 8);
+	u32 dmask = nv_ro32(bios, init->offset + 12);
+	u8  shift = nv_ro08(bios, init->offset + 16);
+	u32 data;
+
+	trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
+	      "(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n",
+	      daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>",
+	      (sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift);
+	init->offset += 17;
+
+	data = init_shift(init_rd32(init, saddr), sshift) & smask;
+	data = init_xlat_(init, index, data) << shift;
+	init_mask(init, daddr, ~dmask, data);
+}
+
+/**
+ * INIT_ZM_MASK_ADD - opcode 0x97
+ *
+ */
+static void
+init_zm_mask_add(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u32 mask = nv_ro32(bios, init->offset + 5);
+	u32  add = nv_ro32(bios, init->offset + 9);
+	u32 data;
+
+	trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
+	init->offset += 13;
+
+	data =  init_rd32(init, addr);
+	data = (data & mask) | ((data + add) & ~mask);
+	init_wr32(init, addr, data);
+}
+
+/**
+ * INIT_AUXCH - opcode 0x98
+ *
+ */
+static void
+init_auxch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
+	init->offset += 6;
+
+	while (count--) {
+		u8 mask = nv_ro08(bios, init->offset + 0);
+		u8 data = nv_ro08(bios, init->offset + 1);
+		trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
+		mask = init_rdauxr(init, addr) & mask;
+		init_wrauxr(init, addr, mask | data);
+		init->offset += 2;
+	}
+}
+
+/**
+ * INIT_AUXCH - opcode 0x99
+ *
+ */
+static void
+init_zm_auxch(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u32 addr = nv_ro32(bios, init->offset + 1);
+	u8 count = nv_ro08(bios, init->offset + 5);
+
+	trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
+	init->offset += 6;
+
+	while (count--) {
+		u8 data = nv_ro08(bios, init->offset + 0);
+		trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
+		init_wrauxr(init, addr, data);
+		init->offset += 1;
+	}
+}
+
+/**
+ * INIT_I2C_LONG_IF - opcode 0x9a
+ *
+ */
+static void
+init_i2c_long_if(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	u8 index = nv_ro08(bios, init->offset + 1);
+	u8  addr = nv_ro08(bios, init->offset + 2) >> 1;
+	u8 reglo = nv_ro08(bios, init->offset + 3);
+	u8 reghi = nv_ro08(bios, init->offset + 4);
+	u8  mask = nv_ro08(bios, init->offset + 5);
+	u8  data = nv_ro08(bios, init->offset + 6);
+	struct nouveau_i2c_port *port;
+
+	trace("I2C_LONG_IF\t"
+	      "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
+	      index, addr, reglo, reghi, mask, data);
+	init->offset += 7;
+
+	port = init_i2c(init, index);
+	if (port) {
+		u8 i[2] = { reghi, reglo };
+		u8 o[1] = {};
+		struct i2c_msg msg[] = {
+			{ .addr = addr, .flags = 0, .len = 2, .buf = i },
+			{ .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o }
+		};
+		int ret;
+
+		ret = i2c_transfer(&port->adapter, msg, 2);
+		if (ret == 2 && ((o[0] & mask) == data))
+			return;
+	}
+
+	init_exec_set(init, false);
+}
+
+/**
+ * INIT_GPIO_NE - opcode 0xa9
+ *
+ */
+static void
+init_gpio_ne(struct nvbios_init *init)
+{
+	struct nouveau_bios *bios = init->bios;
+	struct nouveau_gpio *gpio = nouveau_gpio(bios);
+	struct dcb_gpio_func func;
+	u8 count = nv_ro08(bios, init->offset + 1);
+	u8 idx = 0, ver, len;
+	u16 data, i;
+
+	trace("GPIO_NE\t");
+	init->offset += 2;
+
+	for (i = init->offset; i < init->offset + count; i++)
+		cont("0x%02x ", nv_ro08(bios, i));
+	cont("\n");
+
+	while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
+		if (func.func != DCB_GPIO_UNUSED) {
+			for (i = init->offset; i < init->offset + count; i++) {
+				if (func.func == nv_ro08(bios, i))
+					break;
+			}
+
+			trace("\tFUNC[0x%02x]", func.func);
+			if (i == (init->offset + count)) {
+				cont(" *");
+				if (init_exec(init) && gpio && gpio->reset)
+					gpio->reset(gpio, func.func);
+			}
+			cont("\n");
+		}
+	}
+
+	init->offset += count;
+}
+
+static struct nvbios_init_opcode {
+	void (*exec)(struct nvbios_init *);
+} init_opcode[] = {
+	[0x32] = { init_io_restrict_prog },
+	[0x33] = { init_repeat },
+	[0x34] = { init_io_restrict_pll },
+	[0x36] = { init_end_repeat },
+	[0x37] = { init_copy },
+	[0x38] = { init_not },
+	[0x39] = { init_io_flag_condition },
+	[0x3a] = { init_dp_condition },
+	[0x3b] = { init_io_mask_or },
+	[0x3c] = { init_io_or },
+	[0x49] = { init_idx_addr_latched },
+	[0x4a] = { init_io_restrict_pll2 },
+	[0x4b] = { init_pll2 },
+	[0x4c] = { init_i2c_byte },
+	[0x4d] = { init_zm_i2c_byte },
+	[0x4e] = { init_zm_i2c },
+	[0x4f] = { init_tmds },
+	[0x50] = { init_zm_tmds_group },
+	[0x51] = { init_cr_idx_adr_latch },
+	[0x52] = { init_cr },
+	[0x53] = { init_zm_cr },
+	[0x54] = { init_zm_cr_group },
+	[0x56] = { init_condition_time },
+	[0x57] = { init_ltime },
+	[0x58] = { init_zm_reg_sequence },
+	[0x5b] = { init_sub_direct },
+	[0x5c] = { init_jump },
+	[0x5e] = { init_i2c_if },
+	[0x5f] = { init_copy_nv_reg },
+	[0x62] = { init_zm_index_io },
+	[0x63] = { init_compute_mem },
+	[0x65] = { init_reset },
+	[0x66] = { init_configure_mem },
+	[0x67] = { init_configure_clk },
+	[0x68] = { init_configure_preinit },
+	[0x69] = { init_io },
+	[0x6b] = { init_sub },
+	[0x6d] = { init_ram_condition },
+	[0x6e] = { init_nv_reg },
+	[0x6f] = { init_macro },
+	[0x71] = { init_done },
+	[0x72] = { init_resume },
+	[0x74] = { init_time },
+	[0x75] = { init_condition },
+	[0x76] = { init_io_condition },
+	[0x78] = { init_index_io },
+	[0x79] = { init_pll },
+	[0x7a] = { init_zm_reg },
+	[0x87] = { init_ram_restrict_pll },
+	[0x8c] = { init_reserved },
+	[0x8d] = { init_reserved },
+	[0x8e] = { init_gpio },
+	[0x8f] = { init_ram_restrict_zm_reg_group },
+	[0x90] = { init_copy_zm_reg },
+	[0x91] = { init_zm_reg_group },
+	[0x92] = { init_reserved },
+	[0x96] = { init_xlat },
+	[0x97] = { init_zm_mask_add },
+	[0x98] = { init_auxch },
+	[0x99] = { init_zm_auxch },
+	[0x9a] = { init_i2c_long_if },
+	[0xa9] = { init_gpio_ne },
+	[0xaa] = { init_reserved },
+};
+
+#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
+
+int
+nvbios_exec(struct nvbios_init *init)
+{
+	init->nested++;
+	while (init->offset) {
+		u8 opcode = nv_ro08(init->bios, init->offset);
+		if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
+			error("unknown opcode 0x%02x\n", opcode);
+			return -EINVAL;
+		}
+
+		init_opcode[opcode].exec(init);
+	}
+	init->nested--;
+	return 0;
+}
+
+int
+nvbios_init(struct nouveau_subdev *subdev, bool execute)
+{
+	struct nouveau_bios *bios = nouveau_bios(subdev);
+	int ret = 0;
+	int i = -1;
+	u16 data;
+
+	if (execute)
+		nv_info(bios, "running init tables\n");
+	while (!ret && (data = (init_script(bios, ++i)))) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = data,
+			.outp = NULL,
+			.crtc = -1,
+			.execute = execute ? 1 : 0,
+		};
+
+		ret = nvbios_exec(&init);
+	}
+
+	/* the vbios parser will run this right after the normal init
+	 * tables, whereas the binary driver appears to run it later.
+	 */
+	if (!ret && (data = init_unknown_script(bios))) {
+		struct nvbios_init init = {
+			.subdev = subdev,
+			.bios = bios,
+			.offset = data,
+			.outp = NULL,
+			.crtc = -1,
+			.execute = execute ? 1 : 0,
+		};
+
+		ret = nvbios_exec(&init);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
new file mode 100644
index 0000000..2610b11
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/mxm.h>
+
+u16
+mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
+{
+	struct bit_entry x;
+
+	if (bit_entry(bios, 'x', &x)) {
+		nv_debug(bios, "BIT 'x' table not present\n");
+		return 0x0000;
+	}
+
+	*ver = x.version;
+	*hdr = x.length;
+	if (*ver != 1 || *hdr < 3) {
+		nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
+		return 0x0000;
+	}
+
+	return x.offset;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+	0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+	0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+u8
+mxm_sor_map(struct nouveau_bios *bios, u8 conn)
+{
+	u8  ver, hdr;
+	u16 mxm = mxm_table(bios, &ver, &hdr);
+	if (mxm && hdr >= 6) {
+		u16 map = nv_ro16(bios, mxm + 4);
+		if (map) {
+			ver = nv_ro08(bios, map);
+			if (ver == 0x10) {
+				if (conn < nv_ro08(bios, map + 3)) {
+					map += nv_ro08(bios, map + 1);
+					map += conn;
+					return nv_ro08(bios, map);
+				}
+
+				return 0x00;
+			}
+
+			nv_warn(bios, "unknown sor map v%02x\n", ver);
+		}
+	}
+
+	if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
+		return nv84_sor_map[conn];
+	if (bios->version.chip == 0x92)
+		return nv92_sor_map[conn];
+	if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
+		return nv94_sor_map[conn];
+	if (bios->version.chip == 0x98)
+		return nv98_sor_map[conn];
+
+	nv_warn(bios, "missing sor map\n");
+	return 0x00;
+}
+
+u8
+mxm_ddc_map(struct nouveau_bios *bios, u8 port)
+{
+	u8  ver, hdr;
+	u16 mxm = mxm_table(bios, &ver, &hdr);
+	if (mxm && hdr >= 8) {
+		u16 map = nv_ro16(bios, mxm + 6);
+		if (map) {
+			ver = nv_ro08(bios, map);
+			if (ver == 0x10) {
+				if (port < nv_ro08(bios, map + 3)) {
+					map += nv_ro08(bios, map + 1);
+					map += port;
+					return nv_ro08(bios, map);
+				}
+
+				return 0x00;
+			}
+
+			nv_warn(bios, "unknown ddc map v%02x\n", ver);
+		}
+	}
+
+	/* v2.x: directly write port as dcb i2cidx */
+	return (port << 4) | port;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
new file mode 100644
index 0000000..bcbb056
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/perf.h>
+
+static u16
+perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_P;
+	u16 perf = 0x0000;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version <= 2) {
+			perf = nv_ro16(bios, bit_P.offset + 0);
+			if (perf) {
+				*ver = nv_ro08(bios, perf + 0);
+				*hdr = nv_ro08(bios, perf + 1);
+			}
+		} else
+			nv_error(bios, "unknown offset for perf in BIT P %d\n",
+				bit_P.version);
+	}
+
+	if (bios->bmp_offset) {
+		if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
+			perf = nv_ro16(bios, bios->bmp_offset + 0x94);
+			if (perf) {
+				*hdr = nv_ro08(bios, perf + 0);
+				*ver = nv_ro08(bios, perf + 1);
+			}
+		}
+	}
+
+	return perf;
+}
+
+int
+nvbios_perf_fan_parse(struct nouveau_bios *bios,
+		      struct nvbios_perf_fan *fan)
+{
+	u8 ver = 0, hdr = 0, cnt = 0, len = 0;
+	u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
+	if (!perf)
+		return -ENODEV;
+
+	if (ver >= 0x20 && ver < 0x40 && hdr > 6)
+		fan->pwm_divisor = nv_ro16(bios, perf + 6);
+	else
+		fan->pwm_divisor = 0;
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
new file mode 100644
index 0000000..f835501
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <subdev/vga.h>
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/bios/pll.h>
+
+struct pll_mapping {
+	u8  type;
+	u32 reg;
+};
+
+static struct pll_mapping
+nv04_pll_mapping[] = {
+	{ PLL_CORE  , 0x680500 },
+	{ PLL_MEMORY, 0x680504 },
+	{ PLL_VPLL0 , 0x680508 },
+	{ PLL_VPLL1 , 0x680520 },
+	{}
+};
+
+static struct pll_mapping
+nv40_pll_mapping[] = {
+	{ PLL_CORE  , 0x004000 },
+	{ PLL_MEMORY, 0x004020 },
+	{ PLL_VPLL0 , 0x680508 },
+	{ PLL_VPLL1 , 0x680520 },
+	{}
+};
+
+static struct pll_mapping
+nv50_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_UNK03 , 0x004000 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_UNK40 , 0x00e810 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_UNK42 , 0x00e824 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+static struct pll_mapping
+nv84_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_VDEC  , 0x004030 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+static u16
+pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct bit_entry bit_C;
+
+	if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
+		u16 data = nv_ro16(bios, bit_C.offset + 8);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = nv_ro08(bios, data + 1);
+			*len = nv_ro08(bios, data + 2);
+			*cnt = nv_ro08(bios, data + 3);
+			return data;
+		}
+	}
+
+	if (bmp_version(bios) >= 0x0524) {
+		u16 data = nv_ro16(bios, bios->bmp_offset + 142);
+		if (data) {
+			*ver = nv_ro08(bios, data + 0);
+			*hdr = 1;
+			*cnt = 1;
+			*len = 0x18;
+			return data;
+		}
+	}
+
+	*ver = 0x00;
+	return 0x0000;
+}
+
+static struct pll_mapping *
+pll_map(struct nouveau_bios *bios)
+{
+	switch (nv_device(bios)->card_type) {
+	case NV_04:
+	case NV_10:
+	case NV_20:
+	case NV_30:
+		return nv04_pll_mapping;
+		break;
+	case NV_40:
+		return nv40_pll_mapping;
+	case NV_50:
+		if (nv_device(bios)->chipset == 0x50)
+			return nv50_pll_mapping;
+		else
+		if (nv_device(bios)->chipset <  0xa3 ||
+		    nv_device(bios)->chipset == 0xaa ||
+		    nv_device(bios)->chipset == 0xac)
+			return nv84_pll_mapping;
+	default:
+		return NULL;
+	}
+}
+
+static u16
+pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
+{
+	struct pll_mapping *map;
+	u8  hdr, cnt;
+	u16 data;
+
+	data = pll_limits_table(bios, ver, &hdr, &cnt, len);
+	if (data && *ver >= 0x30) {
+		data += hdr;
+		while (cnt--) {
+			if (nv_ro32(bios, data + 3) == reg) {
+				*type = nv_ro08(bios, data + 0);
+				return data;
+			}
+			data += *len;
+		}
+		return 0x0000;
+	}
+
+	map = pll_map(bios);
+	while (map->reg) {
+		if (map->reg == reg && *ver >= 0x20) {
+			u16 addr = (data += hdr);
+			*type = map->type;
+			while (cnt--) {
+				if (nv_ro32(bios, data) == map->reg)
+					return data;
+				data += *len;
+			}
+			return addr;
+		} else
+		if (map->reg == reg) {
+			*type = map->type;
+			return data + 1;
+		}
+		map++;
+	}
+
+	return 0x0000;
+}
+
+static u16
+pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
+{
+	struct pll_mapping *map;
+	u8  hdr, cnt;
+	u16 data;
+
+	data = pll_limits_table(bios, ver, &hdr, &cnt, len);
+	if (data && *ver >= 0x30) {
+		data += hdr;
+		while (cnt--) {
+			if (nv_ro08(bios, data + 0) == type) {
+				*reg = nv_ro32(bios, data + 3);
+				return data;
+			}
+			data += *len;
+		}
+		return 0x0000;
+	}
+
+	map = pll_map(bios);
+	while (map->reg) {
+		if (map->type == type && *ver >= 0x20) {
+			u16 addr = (data += hdr);
+			*reg = map->reg;
+			while (cnt--) {
+				if (nv_ro32(bios, data) == map->reg)
+					return data;
+				data += *len;
+			}
+			return addr;
+		} else
+		if (map->type == type) {
+			*reg = map->reg;
+			return data + 1;
+		}
+		map++;
+	}
+
+	return 0x0000;
+}
+
+int
+nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info)
+{
+	u8  ver, len;
+	u32 reg = type;
+	u16 data;
+
+	if (type > PLL_MAX) {
+		reg  = type;
+		data = pll_map_reg(bios, reg, &type, &ver, &len);
+	} else {
+		data = pll_map_type(bios, type, &reg, &ver, &len);
+	}
+
+	if (ver && !data)
+		return -ENOENT;
+
+	memset(info, 0, sizeof(*info));
+	info->type = type;
+	info->reg = reg;
+
+	switch (ver) {
+	case 0x00:
+		break;
+	case 0x10:
+	case 0x11:
+		info->vco1.min_freq = nv_ro32(bios, data + 0);
+		info->vco1.max_freq = nv_ro32(bios, data + 4);
+		info->vco2.min_freq = nv_ro32(bios, data + 8);
+		info->vco2.max_freq = nv_ro32(bios, data + 12);
+		info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
+		info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
+		info->vco1.max_inputfreq = INT_MAX;
+		info->vco2.max_inputfreq = INT_MAX;
+
+		info->max_p = 0x7;
+		info->max_p_usable = 0x6;
+
+		/* these values taken from nv30/31/36 */
+		switch (bios->version.chip) {
+		case 0x36:
+			info->vco1.min_n = 0x5;
+			break;
+		default:
+			info->vco1.min_n = 0x1;
+			break;
+		}
+		info->vco1.max_n = 0xff;
+		info->vco1.min_m = 0x1;
+		info->vco1.max_m = 0xd;
+
+		/*
+		 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
+		 * table version (apart from nv35)), N2 is compared to
+		 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
+		 * save a comparison
+		 */
+		info->vco2.min_n = 0x4;
+		switch (bios->version.chip) {
+		case 0x30:
+		case 0x35:
+			info->vco2.max_n = 0x1f;
+			break;
+		default:
+			info->vco2.max_n = 0x28;
+			break;
+		}
+		info->vco2.min_m = 0x1;
+		info->vco2.max_m = 0x4;
+		break;
+	case 0x20:
+	case 0x21:
+		info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
+		info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
+		info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
+		info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
+		info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
+		info->vco1.min_n = nv_ro08(bios, data + 20);
+		info->vco1.max_n = nv_ro08(bios, data + 21);
+		info->vco1.min_m = nv_ro08(bios, data + 22);
+		info->vco1.max_m = nv_ro08(bios, data + 23);
+		info->vco2.min_n = nv_ro08(bios, data + 24);
+		info->vco2.max_n = nv_ro08(bios, data + 25);
+		info->vco2.min_m = nv_ro08(bios, data + 26);
+		info->vco2.max_m = nv_ro08(bios, data + 27);
+
+		info->max_p = nv_ro08(bios, data + 29);
+		info->max_p_usable = info->max_p;
+		if (bios->version.chip < 0x60)
+			info->max_p_usable = 0x6;
+		info->bias_p = nv_ro08(bios, data + 30);
+
+		if (len > 0x22)
+			info->refclk = nv_ro32(bios, data + 31);
+		break;
+	case 0x30:
+		data = nv_ro16(bios, data + 1);
+
+		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
+		info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
+		info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
+		info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
+		info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
+		info->vco1.min_n = nv_ro08(bios, data + 16);
+		info->vco1.max_n = nv_ro08(bios, data + 17);
+		info->vco1.min_m = nv_ro08(bios, data + 18);
+		info->vco1.max_m = nv_ro08(bios, data + 19);
+		info->vco2.min_n = nv_ro08(bios, data + 20);
+		info->vco2.max_n = nv_ro08(bios, data + 21);
+		info->vco2.min_m = nv_ro08(bios, data + 22);
+		info->vco2.max_m = nv_ro08(bios, data + 23);
+		info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
+		info->bias_p = nv_ro08(bios, data + 27);
+		info->refclk = nv_ro32(bios, data + 28);
+		break;
+	case 0x40:
+		info->refclk = nv_ro16(bios, data + 9) * 1000;
+		data = nv_ro16(bios, data + 1);
+
+		info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
+		info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
+		info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
+		info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
+		info->vco1.min_m = nv_ro08(bios, data + 8);
+		info->vco1.max_m = nv_ro08(bios, data + 9);
+		info->vco1.min_n = nv_ro08(bios, data + 10);
+		info->vco1.max_n = nv_ro08(bios, data + 11);
+		info->min_p = nv_ro08(bios, data + 12);
+		info->max_p = nv_ro08(bios, data + 13);
+		break;
+	default:
+		nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
+		return -EINVAL;
+	}
+
+	if (!info->refclk) {
+		info->refclk = nv_device(bios)->crystal;
+		if (bios->version.chip == 0x51) {
+			u32 sel_clk = nv_rd32(bios, 0x680524);
+			if ((info->reg == 0x680508 && sel_clk & 0x20) ||
+			    (info->reg == 0x680520 && sel_clk & 0x80)) {
+				if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
+					info->refclk = 200000;
+				else
+					info->refclk = 25000;
+			}
+		}
+	}
+
+	/*
+	 * By now any valid limit table ought to have set a max frequency for
+	 * vco1, so if it's zero it's either a pre limit table bios, or one
+	 * with an empty limit table (seen on nv18)
+	 */
+	if (!info->vco1.max_freq) {
+		info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
+		info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
+		if (bmp_version(bios) < 0x0506) {
+			info->vco1.max_freq = 256000;
+			info->vco1.min_freq = 128000;
+		}
+
+		info->vco1.min_inputfreq = 0;
+		info->vco1.max_inputfreq = INT_MAX;
+		info->vco1.min_n = 0x1;
+		info->vco1.max_n = 0xff;
+		info->vco1.min_m = 0x1;
+
+		if (nv_device(bios)->crystal == 13500) {
+			/* nv05 does this, nv11 doesn't, nv10 unknown */
+			if (bios->version.chip < 0x11)
+				info->vco1.min_m = 0x7;
+			info->vco1.max_m = 0xd;
+		} else {
+			if (bios->version.chip < 0x11)
+				info->vco1.min_m = 0x8;
+			info->vco1.max_m = 0xe;
+		}
+
+		if (bios->version.chip <  0x17 ||
+		    bios->version.chip == 0x1a ||
+		    bios->version.chip == 0x20)
+			info->max_p = 4;
+		else
+			info->max_p = 5;
+		info->max_p_usable = info->max_p;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
new file mode 100644
index 0000000..22a2057
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/therm.h>
+
+static u16
+therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
+{
+	struct bit_entry bit_P;
+	u16 therm = 0;
+
+	if (!bit_entry(bios, 'P', &bit_P)) {
+		if (bit_P.version == 1)
+			therm = nv_ro16(bios, bit_P.offset + 12);
+		else if (bit_P.version == 2)
+			therm = nv_ro16(bios, bit_P.offset + 16);
+		else
+			nv_error(bios,
+				"unknown offset for thermal in BIT P %d\n",
+				bit_P.version);
+	}
+
+	/* exit now if we haven't found the thermal table */
+	if (!therm)
+		return 0x0000;
+
+	*ver = nv_ro08(bios, therm + 0);
+	*hdr = nv_ro08(bios, therm + 1);
+	*len = nv_ro08(bios, therm + 2);
+	*cnt = nv_ro08(bios, therm + 3);
+
+	return therm + nv_ro08(bios, therm + 1);
+}
+
+static u16
+nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
+{
+	u8 hdr, cnt;
+	u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
+	if (therm && idx < cnt)
+		return therm + idx * *len;
+	return 0x0000;
+}
+
+int
+nvbios_therm_sensor_parse(struct nouveau_bios *bios,
+			  enum nvbios_therm_domain domain,
+			  struct nvbios_therm_sensor *sensor)
+{
+	s8 thrs_section, sensor_section, offset;
+	u8 ver, len, i;
+	u16 entry;
+
+	/* we only support the core domain for now */
+	if (domain != NVBIOS_THERM_DOMAIN_CORE)
+		return -EINVAL;
+
+	/* Read the entries from the table */
+	thrs_section = 0;
+	sensor_section = -1;
+	i = 0;
+	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
+		s16 value = nv_ro16(bios, entry + 1);
+
+		switch (nv_ro08(bios, entry + 0)) {
+		case 0x0:
+			thrs_section = value;
+			if (value > 0)
+				return 0; /* we do not try to support ambient */
+			break;
+		case 0x01:
+			sensor_section++;
+			if (sensor_section == 0) {
+				offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
+				sensor->offset_constant = offset;
+			}
+			break;
+
+		case 0x04:
+			if (thrs_section == 0) {
+				sensor->thrs_critical.temp = (value & 0xff0) >> 4;
+				sensor->thrs_critical.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x07:
+			if (thrs_section == 0) {
+				sensor->thrs_down_clock.temp = (value & 0xff0) >> 4;
+				sensor->thrs_down_clock.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x08:
+			if (thrs_section == 0) {
+				sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4;
+				sensor->thrs_fan_boost.hysteresis = value & 0xf;
+			}
+			break;
+
+		case 0x10:
+			if (sensor_section == 0)
+				sensor->offset_num = value;
+			break;
+
+		case 0x11:
+			if (sensor_section == 0)
+				sensor->offset_den = value;
+			break;
+
+		case 0x12:
+			if (sensor_section == 0)
+				sensor->slope_mult = value;
+			break;
+
+		case 0x13:
+			if (sensor_section == 0)
+				sensor->slope_div = value;
+			break;
+		case 0x32:
+			if (thrs_section == 0) {
+				sensor->thrs_shutdown.temp = (value & 0xff0) >> 4;
+				sensor->thrs_shutdown.hysteresis = value & 0xf;
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+int
+nvbios_therm_fan_parse(struct nouveau_bios *bios,
+			  struct nvbios_therm_fan *fan)
+{
+	struct nouveau_therm_trip_point *cur_trip = NULL;
+	u8 ver, len, i;
+	u16 entry;
+
+	uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
+				75, 0, 85, 0, 100, 0, 100, 0 };
+
+	i = 0;
+	fan->nr_fan_trip = 0;
+	while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
+		s16 value = nv_ro16(bios, entry + 1);
+
+		switch (nv_ro08(bios, entry + 0)) {
+		case 0x22:
+			fan->min_duty = value & 0xff;
+			fan->max_duty = (value & 0xff00) >> 8;
+			break;
+		case 0x24:
+			fan->nr_fan_trip++;
+			cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+			cur_trip->hysteresis = value & 0xf;
+			cur_trip->temp = (value & 0xff0) >> 4;
+			cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
+			break;
+		case 0x25:
+			cur_trip = &fan->trip[fan->nr_fan_trip - 1];
+			cur_trip->fan_duty = value;
+			break;
+		case 0x26:
+			fan->pwm_freq = value;
+			break;
+		case 0x3b:
+			fan->bump_period = value;
+			break;
+		case 0x3c:
+			fan->slow_down_period = value;
+			break;
+		case 0x46:
+			fan->linear_min_temp = nv_ro08(bios, entry + 1);
+			fan->linear_max_temp = nv_ro08(bios, entry + 2);
+			break;
+		}
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
new file mode 100644
index 0000000..e9b8e5d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/xpio.h>
+
+static u16
+dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
+	if (data && *ver >= 0x40 && *hdr >= 0x06) {
+		u16 xpio = nv_ro16(bios, data + 0x04);
+		if (xpio) {
+			*ver = nv_ro08(bios, data + 0x00);
+			*hdr = nv_ro08(bios, data + 0x01);
+			*cnt = nv_ro08(bios, data + 0x02);
+			*len = nv_ro08(bios, data + 0x03);
+			return xpio;
+		}
+	}
+	return 0x0000;
+}
+
+u16
+dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
+	if (data && idx < *cnt) {
+		u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
+		if (xpio) {
+			*ver = nv_ro08(bios, data + 0x00);
+			*hdr = nv_ro08(bios, data + 0x01);
+			*cnt = nv_ro08(bios, data + 0x02);
+			*len = nv_ro08(bios, data + 0x03);
+			return xpio;
+		}
+	}
+	return 0x0000;
+}
+
+u16
+dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
+	       u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+	       struct nvbios_xpio *info)
+{
+	u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
+	if (data && *len >= 6) {
+		info->type = nv_ro08(bios, data + 0x04);
+		info->addr = nv_ro08(bios, data + 0x05);
+		info->flags = nv_ro08(bios, data + 0x06);
+	}
+	return 0x0000;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
new file mode 100644
index 0000000..8c7f805
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv04_bus_priv {
+	struct nouveau_bus base;
+};
+
+static void
+nv04_bus_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+	if (stat & 0x00000001) {
+		nv_error(pbus, "BUS ERROR\n");
+		stat &= ~0x00000001;
+		nv_wr32(pbus, 0x001100, 0x00000001);
+	}
+
+	if (stat & 0x00000110) {
+		subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
+		if (subdev && subdev->intr)
+			subdev->intr(subdev);
+		stat &= ~0x00000110;
+		nv_wr32(pbus, 0x001100, 0x00000110);
+	}
+
+	if (stat) {
+		nv_error(pbus, "unknown intr 0x%08x\n", stat);
+		nv_mask(pbus, 0x001140, stat, 0x00000000);
+	}
+}
+
+static int
+nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nv04_bus_intr;
+	return 0;
+}
+
+static int
+nv04_bus_init(struct nouveau_object *object)
+{
+	struct nv04_bus_priv *priv = (void *)object;
+
+	nv_wr32(priv, 0x001100, 0xffffffff);
+	nv_wr32(priv, 0x001140, 0x00000111);
+
+	return nouveau_bus_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_bus_oclass = {
+	.handle = NV_SUBDEV(BUS, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nv04_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
new file mode 100644
index 0000000..34132ae
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv31_bus_priv {
+	struct nouveau_bus base;
+};
+
+static void
+nv31_bus_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+	u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
+
+	if (gpio) {
+		subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
+		if (subdev && subdev->intr)
+			subdev->intr(subdev);
+	}
+
+	if (stat & 0x00000008) {  /* NV41- */
+		u32 addr = nv_rd32(pbus, 0x009084);
+		u32 data = nv_rd32(pbus, 0x009088);
+
+		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+			 (addr & 0x00000002) ? "write" : "read", data,
+			 (addr & 0x00fffffc));
+
+		stat &= ~0x00000008;
+		nv_wr32(pbus, 0x001100, 0x00000008);
+	}
+
+	if (stat & 0x00070000) {
+		subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+		if (subdev && subdev->intr)
+			subdev->intr(subdev);
+		stat &= ~0x00070000;
+		nv_wr32(pbus, 0x001100, 0x00070000);
+	}
+
+	if (stat) {
+		nv_error(pbus, "unknown intr 0x%08x\n", stat);
+		nv_mask(pbus, 0x001140, stat, 0x00000000);
+	}
+}
+
+static int
+nv31_bus_init(struct nouveau_object *object)
+{
+	struct nv31_bus_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bus_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x001100, 0xffffffff);
+	nv_wr32(priv, 0x001140, 0x00070008);
+	return 0;
+}
+
+static int
+nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv31_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nv31_bus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv31_bus_oclass = {
+	.handle = NV_SUBDEV(BUS, 0x31),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv31_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nv31_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
new file mode 100644
index 0000000..f5b2117
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nv50_bus_priv {
+	struct nouveau_bus base;
+};
+
+static void
+nv50_bus_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+	if (stat & 0x00000008) {
+		u32 addr = nv_rd32(pbus, 0x009084);
+		u32 data = nv_rd32(pbus, 0x009088);
+
+		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
+			 (addr & 0x00000002) ? "write" : "read", data,
+			 (addr & 0x00fffffc));
+
+		stat &= ~0x00000008;
+		nv_wr32(pbus, 0x001100, 0x00000008);
+	}
+
+	if (stat & 0x00010000) {
+		subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
+		if (subdev && subdev->intr)
+			subdev->intr(subdev);
+		stat &= ~0x00010000;
+		nv_wr32(pbus, 0x001100, 0x00010000);
+	}
+
+	if (stat) {
+		nv_error(pbus, "unknown intr 0x%08x\n", stat);
+		nv_mask(pbus, 0x001140, stat, 0);
+	}
+}
+
+static int
+nv50_bus_init(struct nouveau_object *object)
+{
+	struct nv50_bus_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bus_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x001100, 0xffffffff);
+	nv_wr32(priv, 0x001140, 0x00010008);
+	return 0;
+}
+
+static int
+nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nv50_bus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_bus_oclass = {
+	.handle = NV_SUBDEV(BUS, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nv50_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
new file mode 100644
index 0000000..b192d62
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 Nouveau Community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres <martin.peres@labri.fr>
+ *          Ben Skeggs
+ */
+
+#include <subdev/bus.h>
+
+struct nvc0_bus_priv {
+	struct nouveau_bus base;
+};
+
+static void
+nvc0_bus_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_bus *pbus = nouveau_bus(subdev);
+	u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
+
+	if (stat & 0x0000000e) {
+		u32 addr = nv_rd32(pbus, 0x009084);
+		u32 data = nv_rd32(pbus, 0x009088);
+
+		nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
+			 (addr & 0x00000002) ? "write" : "read", data,
+			 (addr & 0x00fffffc),
+			 (stat & 0x00000002) ? "!ENGINE " : "",
+			 (stat & 0x00000004) ? "IBUS " : "",
+			 (stat & 0x00000008) ? "TIMEOUT " : "");
+
+		nv_wr32(pbus, 0x009084, 0x00000000);
+		nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
+		stat &= ~0x0000000e;
+	}
+
+	if (stat) {
+		nv_error(pbus, "unknown intr 0x%08x\n", stat);
+		nv_mask(pbus, 0x001140, stat, 0x00000000);
+	}
+}
+
+static int
+nvc0_bus_init(struct nouveau_object *object)
+{
+	struct nvc0_bus_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_bus_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x001100, 0xffffffff);
+	nv_wr32(priv, 0x001140, 0x0000000e);
+	return 0;
+}
+
+static int
+nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nvc0_bus_priv *priv;
+	int ret;
+
+	ret = nouveau_bus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nvc0_bus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_bus_oclass = {
+	.handle = NV_SUBDEV(BUS, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_bus_ctor,
+		.dtor = _nouveau_bus_dtor,
+		.init = nvc0_bus_init,
+		.fini = _nouveau_bus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
new file mode 100644
index 0000000..b7fd115
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nv04_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+powerctrl_1_shift(int chip_version, int reg)
+{
+	int shift = -4;
+
+	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
+		return shift;
+
+	switch (reg) {
+	case 0x680520:
+		shift += 4;
+	case 0x680508:
+		shift += 4;
+	case 0x680504:
+		shift += 4;
+	case 0x680500:
+		shift += 4;
+	}
+
+	/*
+	 * the shift for vpll regs is only used for nv3x chips with a single
+	 * stage pll
+	 */
+	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
+			  chip_version == 0x36 || chip_version >= 0x40))
+		shift = -4;
+
+	return shift;
+}
+
+static void
+setPLL_single(struct nv04_clock_priv *priv, u32 reg,
+	      struct nouveau_pll_vals *pv)
+{
+	int chip_version = nouveau_bios(priv)->version.chip;
+	uint32_t oldpll = nv_rd32(priv, reg);
+	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
+	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t saved_powerctrl_1 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
+
+	if (oldpll == pll)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nv_rd32(priv, 0x001584);
+		nv_wr32(priv, 0x001584,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
+		/* upclock -- write new post divider first */
+		nv_wr32(priv, reg, pv->log2P << 16 | (oldpll & 0xffff));
+	else
+		/* downclock -- write new NM first */
+		nv_wr32(priv, reg, (oldpll & 0xffff0000) | pv->NM1);
+
+	if (chip_version < 0x17 && chip_version != 0x11)
+		/* wait a bit on older chips */
+		msleep(64);
+	nv_rd32(priv, reg);
+
+	/* then write the other half as well */
+	nv_wr32(priv, reg, pll);
+
+	if (shift_powerctrl_1 >= 0)
+		nv_wr32(priv, 0x001584, saved_powerctrl_1);
+}
+
+static uint32_t
+new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
+{
+	bool head_a = (reg1 == 0x680508);
+
+	if (ss)	/* single stage pll mode */
+		ramdac580 |= head_a ? 0x00000100 : 0x10000000;
+	else
+		ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;
+
+	return ramdac580;
+}
+
+static void
+setPLL_double_highregs(struct nv04_clock_priv *priv, u32 reg1,
+		       struct nouveau_pll_vals *pv)
+{
+	int chip_version = nouveau_bios(priv)->version.chip;
+	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
+	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
+	uint32_t oldpll1 = nv_rd32(priv, reg1);
+	uint32_t oldpll2 = !nv3035 ? nv_rd32(priv, reg2) : 0;
+	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
+	uint32_t oldramdac580 = 0, ramdac580 = 0;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
+	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
+	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
+
+	/* model specific additions to generic pll1 and pll2 set up above */
+	if (nv3035) {
+		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
+		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
+		pll2 = 0;
+	}
+	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
+		oldramdac580 = nv_rd32(priv, 0x680580);
+		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
+		if (oldramdac580 != ramdac580)
+			oldpll1 = ~0;	/* force mismatch */
+		if (single_stage)
+			/* magic value used by nvidia in single stage mode */
+			pll2 |= 0x011f;
+	}
+	if (chip_version > 0x70)
+		/* magic bits set by the blob (but not the bios) on g71-73 */
+		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
+
+	if (oldpll1 == pll1 && oldpll2 == pll2)
+		return;	/* already set */
+
+	if (shift_powerctrl_1 >= 0) {
+		saved_powerctrl_1 = nv_rd32(priv, 0x001584);
+		nv_wr32(priv, 0x001584,
+			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+			1 << shift_powerctrl_1);
+	}
+
+	if (chip_version >= 0x40) {
+		int shift_c040 = 14;
+
+		switch (reg1) {
+		case 0x680504:
+			shift_c040 += 2;
+		case 0x680500:
+			shift_c040 += 2;
+		case 0x680520:
+			shift_c040 += 2;
+		case 0x680508:
+			shift_c040 += 2;
+		}
+
+		savedc040 = nv_rd32(priv, 0xc040);
+		if (shift_c040 != 14)
+			nv_wr32(priv, 0xc040, savedc040 & ~(3 << shift_c040));
+	}
+
+	if (oldramdac580 != ramdac580)
+		nv_wr32(priv, 0x680580, ramdac580);
+
+	if (!nv3035)
+		nv_wr32(priv, reg2, pll2);
+	nv_wr32(priv, reg1, pll1);
+
+	if (shift_powerctrl_1 >= 0)
+		nv_wr32(priv, 0x001584, saved_powerctrl_1);
+	if (chip_version >= 0x40)
+		nv_wr32(priv, 0xc040, savedc040);
+}
+
+static void
+setPLL_double_lowregs(struct nv04_clock_priv *priv, u32 NMNMreg,
+		      struct nouveau_pll_vals *pv)
+{
+	/* When setting PLLs, there is a merry game of disabling and enabling
+	 * various bits of hardware during the process. This function is a
+	 * synthesis of six nv4x traces, nearly each card doing a subtly
+	 * different thing. With luck all the necessary bits for each card are
+	 * combined herein. Without luck it deviates from each card's formula
+	 * so as to not work on any :)
+	 */
+
+	uint32_t Preg = NMNMreg - 4;
+	bool mpll = Preg == 0x4020;
+	uint32_t oldPval = nv_rd32(priv, Preg);
+	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
+	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
+			0xc << 28 | pv->log2P << 16;
+	uint32_t saved4600 = 0;
+	/* some cards have different maskc040s */
+	uint32_t maskc040 = ~(3 << 14), savedc040;
+	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
+
+	if (nv_rd32(priv, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+		return;
+
+	if (Preg == 0x4000)
+		maskc040 = ~0x333;
+	if (Preg == 0x4058)
+		maskc040 = ~(0xc << 24);
+
+	if (mpll) {
+		struct nvbios_pll info;
+		uint8_t Pval2;
+
+		if (nvbios_pll_parse(nouveau_bios(priv), Preg, &info))
+			return;
+
+		Pval2 = pv->log2P + info.bias_p;
+		if (Pval2 > info.max_p)
+			Pval2 = info.max_p;
+		Pval |= 1 << 28 | Pval2 << 20;
+
+		saved4600 = nv_rd32(priv, 0x4600);
+		nv_wr32(priv, 0x4600, saved4600 | 8 << 28);
+	}
+	if (single_stage)
+		Pval |= mpll ? 1 << 12 : 1 << 8;
+
+	nv_wr32(priv, Preg, oldPval | 1 << 28);
+	nv_wr32(priv, Preg, Pval & ~(4 << 28));
+	if (mpll) {
+		Pval |= 8 << 20;
+		nv_wr32(priv, 0x4020, Pval & ~(0xc << 28));
+		nv_wr32(priv, 0x4038, Pval & ~(0xc << 28));
+	}
+
+	savedc040 = nv_rd32(priv, 0xc040);
+	nv_wr32(priv, 0xc040, savedc040 & maskc040);
+
+	nv_wr32(priv, NMNMreg, NMNM);
+	if (NMNMreg == 0x4024)
+		nv_wr32(priv, 0x403c, NMNM);
+
+	nv_wr32(priv, Preg, Pval);
+	if (mpll) {
+		Pval &= ~(8 << 20);
+		nv_wr32(priv, 0x4020, Pval);
+		nv_wr32(priv, 0x4038, Pval);
+		nv_wr32(priv, 0x4600, saved4600);
+	}
+
+	nv_wr32(priv, 0xc040, savedc040);
+
+	if (mpll) {
+		nv_wr32(priv, 0x4020, Pval & ~(1 << 28));
+		nv_wr32(priv, 0x4038, Pval & ~(1 << 28));
+	}
+}
+
+int
+nv04_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nv04_clock_priv *priv = (void *)clk;
+	struct nouveau_pll_vals pv;
+	struct nvbios_pll info;
+	int ret;
+
+	ret = nvbios_pll_parse(nouveau_bios(priv), type > 0x405c ?
+			       type : type - 4, &info);
+	if (ret)
+		return ret;
+
+	ret = clk->pll_calc(clk, &info, freq, &pv);
+	if (!ret)
+		return ret;
+
+	return clk->pll_prog(clk, type, &pv);
+}
+
+int
+nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+		    int clk, struct nouveau_pll_vals *pv)
+{
+	int N1, M1, N2, M2, P;
+	int ret = nv04_pll_calc(clock, info, clk, &N1, &M1, &N2, &M2, &P);
+	if (ret) {
+		pv->refclk = info->refclk;
+		pv->N1 = N1;
+		pv->M1 = M1;
+		pv->N2 = N2;
+		pv->M2 = M2;
+		pv->log2P = P;
+	}
+	return ret;
+}
+
+int
+nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
+		    struct nouveau_pll_vals *pv)
+{
+	struct nv04_clock_priv *priv = (void *)clk;
+	int cv = nouveau_bios(clk)->version.chip;
+
+	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+	    cv >= 0x40) {
+		if (reg1 > 0x405c)
+			setPLL_double_highregs(priv, reg1, pv);
+		else
+			setPLL_double_lowregs(priv, reg1, pv);
+	} else
+		setPLL_single(priv, reg1, pv);
+
+	return 0;
+}
+
+static int
+nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nv04_clock_pll_set;
+	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->base.pll_prog = nv04_clock_pll_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
new file mode 100644
index 0000000..a4b2b7e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+
+struct nv40_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv40_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nv04_clock_pll_set;
+	priv->base.pll_calc = nv04_clock_pll_calc;
+	priv->base.pll_prog = nv04_clock_pll_prog;
+	return 0;
+}
+
+struct nouveau_oclass
+nv40_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
new file mode 100644
index 0000000..f4147f6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nv50_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nv50_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nv50_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N1, M1, N2, M2, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret) {
+		nv_error(clk, "failed to retrieve pll data, %d\n", ret);
+		return ret;
+	}
+
+	ret = nv04_pll_calc(clk, &info, freq, &N1, &M1, &N2, &M2, &P);
+	if (!ret) {
+		nv_error(clk, "failed pll calculation\n");
+		return ret;
+	}
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+		nv_wr32(priv, info.reg + 0, 0x10000611);
+		nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
+		nv_mask(priv, info.reg + 8, 0x7fff00ff, (P  << 28) |
+							(M2 << 16) | N2);
+		break;
+	case PLL_MEMORY:
+		nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
+						        (info.bias_p << 19) |
+							(P << 16));
+		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		break;
+	default:
+		nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
+		nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv50_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nv50_clock_pll_set;
+	priv->base.pll_calc = nv04_clock_pll_calc;
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
new file mode 100644
index 0000000..9068c98
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nva3_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nva3_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+		nv_wr32(priv, info.reg + 0, 0x50000610);
+		nv_mask(priv, info.reg + 4, 0x003fffff,
+					    (P << 16) | (M << 8) | N);
+		nv_wr32(priv, info.reg + 8, fN);
+		break;
+	default:
+		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+		    int clk, struct nouveau_pll_vals *pv)
+{
+	int ret, N, M, P;
+
+	ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+	if (ret > 0) {
+		pv->refclk = info->refclk;
+		pv->N1 = N;
+		pv->M1 = M;
+		pv->log2P = P;
+	}
+	return ret;
+}
+
+
+static int
+nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nva3_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nva3_clock_pll_set;
+	priv->base.pll_calc = nva3_clock_pll_calc;
+	return 0;
+}
+
+struct nouveau_oclass
+nva3_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
new file mode 100644
index 0000000..7c96262
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+struct nvc0_clock_priv {
+	struct nouveau_clock base;
+};
+
+static int
+nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
+{
+	struct nvc0_clock_priv *priv = (void *)clk;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct nvbios_pll info;
+	int N, fN, M, P;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, type, &info);
+	if (ret)
+		return ret;
+
+	ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
+	if (ret < 0)
+		return ret;
+
+	switch (info.type) {
+	case PLL_VPLL0:
+	case PLL_VPLL1:
+	case PLL_VPLL2:
+	case PLL_VPLL3:
+		nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
+		nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
+		nv_wr32(priv, info.reg + 0x10, fN << 16);
+		break;
+	default:
+		nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_clock_priv *priv;
+	int ret;
+
+	ret = nouveau_clock_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.pll_set = nvc0_clock_pll_set;
+	priv->base.pll_calc = nva3_clock_pll_calc;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_clock_oclass = {
+	.handle = NV_SUBDEV(CLOCK, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_clock_ctor,
+		.dtor = _nouveau_clock_dtor,
+		.init = _nouveau_clock_init,
+		.fini = _nouveau_clock_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
new file mode 100644
index 0000000..ef2c007
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
@@ -0,0 +1,9 @@
+#ifndef __NOUVEAU_PLL_H__
+#define __NOUVEAU_PLL_H__
+
+int nv04_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
+		  int *N1, int *M1, int *N2, int *M2, int *P);
+int nva3_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
+		  int *N, int *fN, int *M, int *P);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
new file mode 100644
index 0000000..a2ab6d0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+static int
+getMNP_single(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
+	      int *pN, int *pM, int *pP)
+{
+	/* Find M, N and P for a single stage PLL
+	 *
+	 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	int cv = nouveau_bios(clock)->version.chip;
+	int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
+	int minM = info->vco1.min_m, maxM = info->vco1.max_m;
+	int minN = info->vco1.min_n, maxN = info->vco1.max_n;
+	int minU = info->vco1.min_inputfreq;
+	int maxU = info->vco1.max_inputfreq;
+	int minP = info->min_p;
+	int maxP = info->max_p_usable;
+	int crystal = info->refclk;
+	int M, N, thisP, P;
+	int clkP, calcclk;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
+	/* possibly correlated with introduction of 27MHz crystal */
+	if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+		if (clk > 250000)
+			maxM = 6;
+		if (clk > 340000)
+			maxM = 2;
+	} else if (cv < 0x40) {
+		if (clk > 150000)
+			maxM = 6;
+		if (clk > 200000)
+			maxM = 4;
+		if (clk > 340000)
+			maxM = 2;
+	}
+
+	P = 1 << maxP;
+	if ((clk * P) < minvco) {
+		minvco = clk * maxP;
+		maxvco = minvco * 2;
+	}
+
+	if (clk + clk/200 > maxvco)	/* +0.5% */
+		maxvco = clk + clk/200;
+
+	/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
+	for (thisP = minP; thisP <= maxP; thisP++) {
+		P = 1 << thisP;
+		clkP = clk * P;
+
+		if (clkP < minvco)
+			continue;
+		if (clkP > maxvco)
+			return bestclk;
+
+		for (M = minM; M <= maxM; M++) {
+			if (crystal/M < minU)
+				return bestclk;
+			if (crystal/M > maxU)
+				continue;
+
+			/* add crystal/2 to round better */
+			N = (clkP * M + crystal/2) / crystal;
+
+			if (N < minN)
+				continue;
+			if (N > maxN)
+				break;
+
+			/* more rounding additions */
+			calcclk = ((N * crystal + P/2) / P + M/2) / M;
+			delta = abs(calcclk - clk);
+			/* we do an exhaustive search rather than terminating
+			 * on an optimality condition...
+			 */
+			if (delta < bestdelta) {
+				bestdelta = delta;
+				bestclk = calcclk;
+				*pN = N;
+				*pM = M;
+				*pP = thisP;
+				if (delta == 0)	/* except this one */
+					return bestclk;
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+static int
+getMNP_double(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
+	      int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
+{
+	/* Find M, N and P for a two stage PLL
+	 *
+	 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
+	 * values, but we're too lazy to use those atm
+	 *
+	 * "clk" parameter in kHz
+	 * returns calculated clock
+	 */
+	int chip_version = nouveau_bios(clock)->version.chip;
+	int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
+	int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
+	int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
+	int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq;
+	int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m;
+	int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n;
+	int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m;
+	int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n;
+	int maxlog2P = info->max_p_usable;
+	int crystal = info->refclk;
+	bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
+	int M1, N1, M2, N2, log2P;
+	int clkP, calcclk1, calcclk2, calcclkout;
+	int delta, bestdelta = INT_MAX;
+	int bestclk = 0;
+
+	int vco2 = (maxvco2 - maxvco2/200) / 2;
+	for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
+		;
+	clkP = clk << log2P;
+
+	if (maxvco2 < clk + clk/200)	/* +0.5% */
+		maxvco2 = clk + clk/200;
+
+	for (M1 = minM1; M1 <= maxM1; M1++) {
+		if (crystal/M1 < minU1)
+			return bestclk;
+		if (crystal/M1 > maxU1)
+			continue;
+
+		for (N1 = minN1; N1 <= maxN1; N1++) {
+			calcclk1 = crystal * N1 / M1;
+			if (calcclk1 < minvco1)
+				continue;
+			if (calcclk1 > maxvco1)
+				break;
+
+			for (M2 = minM2; M2 <= maxM2; M2++) {
+				if (calcclk1/M2 < minU2)
+					break;
+				if (calcclk1/M2 > maxU2)
+					continue;
+
+				/* add calcclk1/2 to round better */
+				N2 = (clkP * M2 + calcclk1/2) / calcclk1;
+				if (N2 < minN2)
+					continue;
+				if (N2 > maxN2)
+					break;
+
+				if (!fixedgain2) {
+					if (chip_version < 0x60)
+						if (N2/M2 < 4 || N2/M2 > 10)
+							continue;
+
+					calcclk2 = calcclk1 * N2 / M2;
+					if (calcclk2 < minvco2)
+						break;
+					if (calcclk2 > maxvco2)
+						continue;
+				} else
+					calcclk2 = calcclk1;
+
+				calcclkout = calcclk2 >> log2P;
+				delta = abs(calcclkout - clk);
+				/* we do an exhaustive search rather than terminating
+				 * on an optimality condition...
+				 */
+				if (delta < bestdelta) {
+					bestdelta = delta;
+					bestclk = calcclkout;
+					*pN1 = N1;
+					*pM1 = M1;
+					*pN2 = N2;
+					*pM2 = M2;
+					*pP = log2P;
+					if (delta == 0)	/* except this one */
+						return bestclk;
+				}
+			}
+		}
+	}
+
+	return bestclk;
+}
+
+int
+nv04_pll_calc(struct nouveau_clock *clk, struct nvbios_pll *info, u32 freq,
+	      int *N1, int *M1, int *N2, int *M2, int *P)
+{
+	int ret;
+
+	if (!info->vco2.max_freq) {
+		ret = getMNP_single(clk, info, freq, N1, M1, P);
+		*N2 = 1;
+		*M2 = 1;
+	} else {
+		ret = getMNP_double(clk, info, freq, N1, M1, N2, M2, P);
+	}
+
+	if (!ret)
+		nv_error(clk, "unable to compute acceptable pll values\n");
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
new file mode 100644
index 0000000..eed5c16
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/clock.h>
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+
+#include "pll.h"
+
+int
+nva3_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+	      u32 freq, int *pN, int *pfN, int *pM, int *P)
+{
+	u32 best_err = ~0, err;
+	int M, lM, hM, N, fN;
+
+	*P = info->vco1.max_freq / freq;
+	if (*P > info->max_p)
+		*P = info->max_p;
+	if (*P < info->min_p)
+		*P = info->min_p;
+
+	lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq;
+	lM = max(lM, (int)info->vco1.min_m);
+	hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
+	hM = min(hM, (int)info->vco1.max_m);
+
+	for (M = lM; M <= hM; M++) {
+		u32 tmp = freq * *P * M;
+		N  = tmp / info->refclk;
+		fN = tmp % info->refclk;
+		if (!pfN && fN >= info->refclk / 2)
+			N++;
+
+		if (N < info->vco1.min_n)
+			continue;
+		if (N > info->vco1.max_n)
+			break;
+
+		err = abs(freq - (info->refclk * N / M / *P));
+		if (err < best_err) {
+			best_err = err;
+			*pN = N;
+			*pM = M;
+		}
+
+		if (pfN) {
+			*pfN = (((fN << 13) / info->refclk) - 4096) & 0xffff;
+			return freq;
+		}
+	}
+
+	if (unlikely(best_err == ~0)) {
+		nv_error(clock, "unable to find matching pll values\n");
+		return -EINVAL;
+	}
+
+	return info->refclk * *pN / *pM / *P;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
new file mode 100644
index 0000000..5a07a39
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/devinit.h>
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
+int
+nouveau_devinit_init(struct nouveau_devinit *devinit)
+{
+	int ret = nouveau_subdev_init(&devinit->base);
+	if (ret)
+		return ret;
+
+	return nvbios_init(&devinit->base, devinit->post);
+}
+
+int
+nouveau_devinit_fini(struct nouveau_devinit *devinit, bool suspend)
+{
+	/* force full reinit on resume */
+	if (suspend)
+		devinit->post = true;
+
+	return nouveau_subdev_fini(&devinit->base, suspend);
+}
+
+int
+nouveau_devinit_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int size, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_devinit *devinit;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
+				     "init", size, pobject);
+	devinit = *pobject;
+	if (ret)
+		return ret;
+
+	devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
new file mode 100644
index 0000000..6b56a0f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define NV04_PFB_BOOT_0						0x00100000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB			0x00000000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB			0x00000001
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB			0x00000002
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_WIDTH_128			0x00000004
+#	define NV04_PFB_BOOT_0_RAM_TYPE				0x00000028
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT		0x00000000
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT		0x00000008
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK	0x00000010
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT		0x00000018
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT		0x00000020
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16		0x00000028
+#	define NV04_PFB_BOOT_0_UMA_ENABLE			0x00000100
+#	define NV04_PFB_BOOT_0_UMA_SIZE				0x0000f000
+#define NV04_PFB_DEBUG_0					0x00100080
+#	define NV04_PFB_DEBUG_0_PAGE_MODE			0x00000001
+#	define NV04_PFB_DEBUG_0_REFRESH_OFF			0x00000010
+#	define NV04_PFB_DEBUG_0_REFRESH_COUNTX64		0x00003f00
+#	define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK		0x00004000
+#	define NV04_PFB_DEBUG_0_SAFE_MODE			0x00008000
+#	define NV04_PFB_DEBUG_0_ALOM_ENABLE			0x00010000
+#	define NV04_PFB_DEBUG_0_CASOE				0x00100000
+#	define NV04_PFB_DEBUG_0_CKE_INVERT			0x10000000
+#	define NV04_PFB_DEBUG_0_REFINC				0x20000000
+#	define NV04_PFB_DEBUG_0_SAVE_POWER_OFF			0x40000000
+#define NV04_PFB_CFG0						0x00100200
+#	define NV04_PFB_CFG0_SCRAMBLE				0x20000000
+#define NV04_PFB_CFG1						0x00100204
+#define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
+
+#define NV10_PFB_REFCTRL					0x00100210
+#	define NV10_PFB_REFCTRL_VALID_1				(1 << 31)
+
+static inline struct io_mapping *
+fbmem_init(struct pci_dev *pdev)
+{
+	return io_mapping_create_wc(pci_resource_start(pdev, 1),
+				    pci_resource_len(pdev, 1));
+}
+
+static inline void
+fbmem_fini(struct io_mapping *fb)
+{
+	io_mapping_free(fb);
+}
+
+static inline u32
+fbmem_peek(struct io_mapping *fb, u32 off)
+{
+	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
+	u32 val = ioread32(p + (off & ~PAGE_MASK));
+	io_mapping_unmap_atomic(p);
+	return val;
+}
+
+static inline void
+fbmem_poke(struct io_mapping *fb, u32 off, u32 val)
+{
+	u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
+	iowrite32(val, p + (off & ~PAGE_MASK));
+	wmb();
+	io_mapping_unmap_atomic(p);
+}
+
+static inline bool
+fbmem_readback(struct io_mapping *fb, u32 off, u32 val)
+{
+	fbmem_poke(fb, off, val);
+	return val == fbmem_peek(fb, off);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
new file mode 100644
index 0000000..7a72d93
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv04_devinit_priv {
+	struct nouveau_devinit base;
+	int owner;
+};
+
+static void
+nv04_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv04_devinit_priv *priv = (void *)devinit;
+	u32 patt = 0xdeadbeef;
+	struct io_mapping *fb;
+	int i;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	/* Sequencer and refresh off */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+	nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+
+	nv_mask(priv, NV04_PFB_BOOT_0, ~0,
+		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
+		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
+
+	for (i = 0; i < 4; i++)
+		fbmem_poke(fb, 4 * i, patt);
+
+	fbmem_poke(fb, 0x400000, patt + 1);
+
+	if (fbmem_peek(fb, 0) == patt + 1) {
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			      NV04_PFB_BOOT_0_RAM_TYPE,
+			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
+		nv_mask(priv, NV04_PFB_DEBUG_0,
+			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+		for (i = 0; i < 4; i++)
+			fbmem_poke(fb, 4 * i, patt);
+
+		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+	} else
+	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+			      NV04_PFB_BOOT_0_RAM_AMOUNT,
+			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+	} else
+	if (fbmem_peek(fb, 0) != patt) {
+		if (fbmem_readback(fb, 0x800000, patt))
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+		else
+			nv_mask(priv, NV04_PFB_BOOT_0,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT,
+				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
+	} else
+	if (!fbmem_readback(fb, 0x800000, patt)) {
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+	}
+
+	/* Refresh on, sequencer on */
+	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	fbmem_fini(fb);
+}
+
+static int
+nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv04_devinit_meminit;
+	priv->owner = -1;
+	return 0;
+}
+
+void
+nv04_devinit_dtor(struct nouveau_object *object)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	/* restore vga owner saved at first init, and lock crtc regs  */
+	nv_wrvgaowner(priv, priv->owner);
+	nv_lockvgac(priv, true);
+
+	nouveau_devinit_destroy(&priv->base);
+}
+
+int
+nv04_devinit_init(struct nouveau_object *object)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	if (!priv->base.post) {
+		u32 htotal = nv_rdvgac(priv, 0, 0x06);
+		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
+		htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
+		htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
+		htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
+		if (!htotal) {
+			nv_info(priv, "adaptor not initialised\n");
+			priv->base.post = true;
+		}
+	}
+
+	return nouveau_devinit_init(&priv->base);
+}
+
+int
+nv04_devinit_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_devinit_priv *priv = (void *)object;
+
+	/* make i2c busses accessible */
+	nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
+
+	/* unlock extended vga crtc regs, and unslave crtcs */
+	nv_lockvgac(priv, false);
+	if (priv->owner < 0)
+		priv->owner = nv_rdvgaowner(priv);
+	nv_wrvgaowner(priv, 0);
+
+	return nouveau_devinit_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv04_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
new file mode 100644
index 0000000..191447d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/bios.h>
+#include <subdev/bios/bmp.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv05_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv05_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	static const u8 default_config_tab[][2] = {
+		{ 0x24, 0x00 },
+		{ 0x28, 0x00 },
+		{ 0x24, 0x01 },
+		{ 0x1f, 0x00 },
+		{ 0x0f, 0x00 },
+		{ 0x17, 0x00 },
+		{ 0x06, 0x00 },
+		{ 0x00, 0x00 }
+	};
+	struct nv05_devinit_priv *priv = (void *)devinit;
+	struct nouveau_bios *bios = nouveau_bios(priv);
+	struct io_mapping *fb;
+	u32 patt = 0xdeadbeef;
+	u16 data;
+	u8 strap, ramcfg[2];
+	int i, v;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
+	if ((data = bmp_mem_init_table(bios))) {
+		ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
+		ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
+	} else {
+		ramcfg[0] = default_config_tab[strap][0];
+		ramcfg[1] = default_config_tab[strap][1];
+	}
+
+	/* Sequencer off */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
+
+	if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+		goto out;
+
+	nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+	/* If present load the hardcoded scrambling table */
+	if (data) {
+		for (i = 0, data += 0x10; i < 8; i++, data += 4) {
+			u32 scramble = nv_ro32(bios, data);
+			nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
+		}
+	}
+
+	/* Set memory type/width/length defaults depending on the straps */
+	nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+
+	if (ramcfg[1] & 0x80)
+		nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+
+	nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+	nv_mask(priv, NV04_PFB_CFG1, 0, 1);
+
+	/* Probe memory bus width */
+	for (i = 0; i < 4; i++)
+		fbmem_poke(fb, 4 * i, patt);
+
+	if (fbmem_peek(fb, 0xc) != patt)
+		nv_mask(priv, NV04_PFB_BOOT_0,
+			  NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
+
+	/* Probe memory length */
+	v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+
+	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
+	    (!fbmem_readback(fb, 0x1000000, ++patt) ||
+	     !fbmem_readback(fb, 0, ++patt)))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
+
+	if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
+	    !fbmem_readback(fb, 0x800000, ++patt))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+	if (!fbmem_readback(fb, 0x400000, ++patt))
+		nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+			  NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+out:
+	/* Sequencer on */
+	nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
+	fbmem_fini(fb);
+}
+
+static int
+nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv05_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv05_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv05_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x05),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv05_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
new file mode 100644
index 0000000..eb76ffa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv10_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv10_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv10_devinit_priv *priv = (void *)devinit;
+	const int mem_width[] = { 0x10, 0x00, 0x20 };
+	const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
+	uint32_t patt = 0xdeadbeef;
+	struct io_mapping *fb;
+	int i, j, k;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+	/* Probe memory bus width */
+	for (i = 0; i < mem_width_count; i++) {
+		nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
+
+		for (j = 0; j < 4; j++) {
+			for (k = 0; k < 4; k++)
+				fbmem_poke(fb, 0x1c, 0);
+
+			fbmem_poke(fb, 0x1c, patt);
+			fbmem_poke(fb, 0x3c, 0);
+
+			if (fbmem_peek(fb, 0x1c) == patt)
+				goto mem_width_found;
+		}
+	}
+
+mem_width_found:
+	patt <<= 1;
+
+	/* Probe amount of installed memory */
+	for (i = 0; i < 4; i++) {
+		int off = nv_rd32(priv, 0x10020c) - 0x100000;
+
+		fbmem_poke(fb, off, patt);
+		fbmem_poke(fb, 0, 0);
+
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+		fbmem_peek(fb, 0);
+
+		if (fbmem_peek(fb, off) == patt)
+			goto amount_found;
+	}
+
+	/* IC missing - disable the upper half memory space. */
+	nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
+
+amount_found:
+	fbmem_fini(fb);
+}
+
+static int
+nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv10_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv10_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv10_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
new file mode 100644
index 0000000..5b2ba63
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+struct nv1a_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static int
+nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv1a_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv1a_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x1a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv1a_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
new file mode 100644
index 0000000..eb32e99
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+#include "fbmem.h"
+
+struct nv20_devinit_priv {
+	struct nouveau_devinit base;
+	u8 owner;
+};
+
+static void
+nv20_devinit_meminit(struct nouveau_devinit *devinit)
+{
+	struct nv20_devinit_priv *priv = (void *)devinit;
+	struct nouveau_device *device = nv_device(priv);
+	uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
+	uint32_t amount, off;
+	struct io_mapping *fb;
+
+	/* Map the framebuffer aperture */
+	fb = fbmem_init(nv_device(priv)->pdev);
+	if (!fb) {
+		nv_error(priv, "failed to map fb\n");
+		return;
+	}
+
+	nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+	/* Allow full addressing */
+	nv_mask(priv, NV04_PFB_CFG0, 0, mask);
+
+	amount = nv_rd32(priv, 0x10020c);
+	for (off = amount; off > 0x2000000; off -= 0x2000000)
+		fbmem_poke(fb, off - 4, off);
+
+	amount = nv_rd32(priv, 0x10020c);
+	if (amount != fbmem_peek(fb, amount - 4))
+		/* IC missing - disable the upper half memory space. */
+		nv_mask(priv, NV04_PFB_CFG0, mask, 0);
+
+	fbmem_fini(fb);
+}
+
+static int
+nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv20_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.meminit = nv20_devinit_meminit;
+	return 0;
+}
+
+struct nouveau_oclass
+nv20_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_devinit_ctor,
+		.dtor = nv04_devinit_dtor,
+		.init = nv04_devinit_init,
+		.fini = nv04_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
new file mode 100644
index 0000000..4a85778
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/devinit.h>
+#include <subdev/vga.h>
+
+struct nv50_devinit_priv {
+	struct nouveau_devinit base;
+};
+
+static int
+nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv50_devinit_priv *priv;
+	int ret;
+
+	ret = nouveau_devinit_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+nv50_devinit_dtor(struct nouveau_object *object)
+{
+	struct nv50_devinit_priv *priv = (void *)object;
+	nouveau_devinit_destroy(&priv->base);
+}
+
+static int
+nv50_devinit_init(struct nouveau_object *object)
+{
+	struct nouveau_bios *bios = nouveau_bios(object);
+	struct nv50_devinit_priv *priv = (void *)object;
+	struct nvbios_outp info;
+	struct dcb_output outp;
+	u8  ver = 0xff, hdr, cnt, len;
+	int ret, i = 0;
+
+	if (!priv->base.post) {
+		if (!nv_rdvgac(priv, 0, 0x00) &&
+		    !nv_rdvgac(priv, 0, 0x1a)) {
+			nv_info(priv, "adaptor not initialised\n");
+			priv->base.post = true;
+		}
+	}
+
+	ret = nouveau_devinit_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* if we ran the init tables, we have to execute the first script
+	 * pointer of each dcb entry's display encoder table in order
+	 * to properly initialise each encoder.
+	 */
+	while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
+		if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
+				     &ver, &hdr, &cnt, &len, &info)) {
+			struct nvbios_init init = {
+				.subdev = nv_subdev(priv),
+				.bios = bios,
+				.offset = info.script[0],
+				.outp = &outp,
+				.crtc = -1,
+				.execute = 1,
+			};
+
+			nvbios_exec(&init);
+		}
+		i++;
+	}
+
+	return 0;
+}
+
+static int
+nv50_devinit_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_devinit_priv *priv = (void *)object;
+	return nouveau_devinit_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_devinit_oclass = {
+	.handle = NV_SUBDEV(DEVINIT, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_devinit_ctor,
+		.dtor = nv50_devinit_dtor,
+		.init = nv50_devinit_init,
+		.fini = nv50_devinit_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
new file mode 100644
index 0000000..d62045f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/fb.h"
+#include "subdev/bios.h"
+#include "subdev/bios/bit.h"
+
+int
+nouveau_fb_bios_memtype(struct nouveau_bios *bios)
+{
+	struct bit_entry M;
+	u8 ramcfg;
+
+	ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
+	if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
+		u16 table   = nv_ro16(bios, M.offset + 3);
+		u8  version = nv_ro08(bios, table + 0);
+		u8  header  = nv_ro08(bios, table + 1);
+		u8  record  = nv_ro08(bios, table + 2);
+		u8  entries = nv_ro08(bios, table + 3);
+		if (table && version == 0x10 && ramcfg < entries) {
+			u16 entry = table + header + (ramcfg * record);
+			switch (nv_ro08(bios, entry) & 0x0f) {
+			case 0: return NV_MEM_TYPE_DDR2;
+			case 1: return NV_MEM_TYPE_DDR3;
+			case 2: return NV_MEM_TYPE_GDDR3;
+			case 3: return NV_MEM_TYPE_GDDR5;
+			default:
+				break;
+			}
+
+		}
+	}
+
+	return NV_MEM_TYPE_UNKNOWN;
+}
+
+int
+nouveau_fb_preinit(struct nouveau_fb *pfb)
+{
+	static const char *name[] = {
+		[NV_MEM_TYPE_UNKNOWN] = "unknown",
+		[NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+		[NV_MEM_TYPE_SGRAM  ] = "SGRAM",
+		[NV_MEM_TYPE_SDRAM  ] = "SDRAM",
+		[NV_MEM_TYPE_DDR1   ] = "DDR1",
+		[NV_MEM_TYPE_DDR2   ] = "DDR2",
+		[NV_MEM_TYPE_DDR3   ] = "DDR3",
+		[NV_MEM_TYPE_GDDR2  ] = "GDDR2",
+		[NV_MEM_TYPE_GDDR3  ] = "GDDR3",
+		[NV_MEM_TYPE_GDDR4  ] = "GDDR4",
+		[NV_MEM_TYPE_GDDR5  ] = "GDDR5",
+	};
+	int ret, tags;
+
+	tags = pfb->ram.init(pfb);
+	if (tags < 0 || !pfb->ram.size) {
+		nv_fatal(pfb, "error detecting memory configuration!!\n");
+		return (tags < 0) ? tags : -ERANGE;
+	}
+
+	if (!nouveau_mm_initialised(&pfb->vram)) {
+		ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (!nouveau_mm_initialised(&pfb->tags)) {
+		ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
+		if (ret)
+			return ret;
+	}
+
+	nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+	nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+	nv_info(pfb, "   ZCOMP: %d tags\n", tags);
+	return 0;
+}
+
+void
+nouveau_fb_destroy(struct nouveau_fb *pfb)
+{
+	int i;
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
+	nouveau_mm_fini(&pfb->tags);
+	nouveau_mm_fini(&pfb->vram);
+
+	nouveau_subdev_destroy(&pfb->base);
+}
+
+void
+_nouveau_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object;
+	nouveau_fb_destroy(pfb);
+}
+int
+nouveau_fb_init(struct nouveau_fb *pfb)
+{
+	int ret, i;
+
+	ret = nouveau_subdev_init(&pfb->base);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < pfb->tile.regions; i++)
+		pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+
+	return 0;
+}
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_fb *pfb = (void *)object;
+	return nouveau_fb_init(pfb);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
new file mode 100644
index 0000000..6e369f8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+
+#define NV04_PFB_BOOT_0						0x00100000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB			0x00000000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB			0x00000001
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB			0x00000002
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_WIDTH_128			0x00000004
+#	define NV04_PFB_BOOT_0_RAM_TYPE				0x00000028
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT		0x00000000
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT		0x00000008
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK	0x00000010
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT		0x00000018
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT		0x00000020
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16		0x00000028
+#	define NV04_PFB_BOOT_0_UMA_ENABLE			0x00000100
+#	define NV04_PFB_BOOT_0_UMA_SIZE				0x0000f000
+#define NV04_PFB_CFG0						0x00100200
+
+struct nv04_fb_priv {
+	struct nouveau_fb base;
+};
+
+bool
+nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+{
+	if (!(tile_flags & 0xff00))
+		return true;
+
+	return false;
+}
+
+static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+	if (boot0 & 0x00000100) {
+		pfb->ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+		pfb->ram.size *= 1024 * 1024;
+	} else {
+		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+			pfb->ram.size = 32 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+			pfb->ram.size = 16 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+			pfb->ram.size = 8 * 1024 * 1024;
+			break;
+		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+			pfb->ram.size = 4 * 1024 * 1024;
+			break;
+		}
+	}
+
+	if ((boot0 & 0x00000038) <= 0x10)
+		pfb->ram.type = NV_MEM_TYPE_SGRAM;
+	else
+		pfb->ram.type = NV_MEM_TYPE_SDRAM;
+	return 0;
+}
+
+static int
+nv04_fb_init(struct nouveau_object *object)
+{
+	struct nv04_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+	 * nvidia reading PFB_CFG_0, then writing back its original value.
+	 * (which was 0x701114 in this case)
+	 */
+	nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
+	return 0;
+}
+
+static int
+nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv04_fb_vram_init;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv04_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
new file mode 100644
index 0000000..edbbe26
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv10_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 cfg0 = nv_rd32(pfb, 0x100200);
+	if (cfg0 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+	else
+		pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	return 0;
+}
+
+void
+nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0x80000000 | addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0;
+	tile->limit = 0;
+	tile->pitch = 0;
+	tile->zcomp = 0;
+}
+
+void
+nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100240 + (i * 0x10));
+}
+
+static int
+nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv10_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv10_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv10_fb_tile_init;
+	priv->base.tile.fini = nv10_fb_tile_fini;
+	priv->base.tile.prog = nv10_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv10_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 0000000..4836684
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct pci_dev *bridge;
+	u32 mem, mib;
+
+	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+	if (!bridge) {
+		nv_fatal(pfb, "no bridge device\n");
+		return -ENODEV;
+	}
+
+	if (nv_device(pfb)->chipset == 0x1a) {
+		pci_read_config_dword(bridge, 0x7c, &mem);
+		mib = ((mem >> 6) & 31) + 1;
+	} else {
+		pci_read_config_dword(bridge, 0x84, &mem);
+		mib = ((mem >> 4) & 127) + 1;
+	}
+
+	pfb->ram.type = NV_MEM_TYPE_STOLEN;
+	pfb->ram.size = mib * 1024 * 1024;
+	return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv1a_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv1a_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv10_fb_tile_init;
+	priv->base.tile.fini = nv10_fb_tile_fini;
+	priv->base.tile.prog = nv10_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x1a),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv1a_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
new file mode 100644
index 0000000..5d14612
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv20_fb_priv {
+	struct nouveau_fb base;
+};
+
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+	}
+	pfb->ram.size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+	return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0x00000001 | addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+	if (flags & 4) {
+		pfb->tile.comp(pfb, i, size, flags, tile);
+		tile->addr |= 2;
+	}
+}
+
+static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+		else              tile->zcomp = 0x04000000; /* Z24S8 */
+		tile->zcomp |= tile->tag->offset;
+		tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x08000000;
+#endif
+	}
+}
+
+void
+nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0;
+	tile->limit = 0;
+	tile->pitch = 0;
+	tile->zcomp = 0;
+	nouveau_mm_free(&pfb->tags, &tile->tag);
+}
+
+void
+nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100240 + (i * 0x10));
+	nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
+}
+
+static int
+nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv20_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv20_fb_tile_init;
+	priv->base.tile.comp = nv20_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv20_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x20),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv20_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 0000000..0042ace
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+		else              tile->zcomp = 0x00200000; /* Z24S8 */
+		tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x01000000;
+#endif
+	}
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv25_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv20_fb_tile_init;
+	priv->base.tile.comp = nv25_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x25),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv25_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = _nouveau_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
new file mode 100644
index 0000000..a7ba0d0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv30_fb_priv {
+	struct nouveau_fb base;
+};
+
+void
+nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	/* for performance, select alternate bank offset for zeta */
+	if (!(flags & 4)) {
+		tile->addr = (0 << 4);
+	} else {
+		if (pfb->tile.comp) /* z compression */
+			pfb->tile.comp(pfb, i, size, flags, tile);
+		tile->addr = (1 << 4);
+	}
+
+	tile->addr |= 0x00000001; /* enable */
+	tile->addr |= addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+		else           tile->zcomp |= 0x02000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x10000000;
+#endif
+	}
+}
+
+static int
+calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
+{
+	struct nouveau_device *device = nv_device(priv);
+	int b = (device->chipset > 0x30 ?
+		 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+		 0) & 0xf;
+
+	return 2 * (b & 0x8 ? b - 0x10 : b);
+}
+
+static int
+calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
+{
+	int j, x = 0;
+
+	for (j = 0; j < 4; j++) {
+		int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
+
+		x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
+	}
+
+	return x;
+}
+
+int
+nv30_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv30_fb_priv *priv = (void *)object;
+	int ret, i, j;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Init the memory timing regs at 0x10037c/0x1003ac */
+	if (device->chipset == 0x30 ||
+	    device->chipset == 0x31 ||
+	    device->chipset == 0x35) {
+		/* Related to ROP count */
+		int n = (device->chipset == 0x31 ? 2 : 4);
+		int l = nv_rd32(priv, 0x1003d0);
+
+		for (i = 0; i < n; i++) {
+			for (j = 0; j < 3; j++)
+				nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
+					calc_ref(priv, l, 0, j));
+
+			for (j = 0; j < 2; j++)
+				nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
+					calc_ref(priv, l, 1, j));
+		}
+	}
+
+	return 0;
+}
+
+static int
+nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv30_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv30_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv30_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x30),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv30_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 0000000..092f6f4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+		else           tile->zcomp |= 0x08000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x40000000;
+#endif
+	}
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv35_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv35_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x35),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv35_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 0000000..797ab3b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+	struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x40);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+	if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+		else           tile->zcomp |= 0x20000000; /* Z24S8 */
+		tile->zcomp |= ((tile->tag->offset           ) >> 6);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x80000000;
+#endif
+	}
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv36_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv20_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv36_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x36),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv36_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv30_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
new file mode 100644
index 0000000..65e131b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv40_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pbus1218 = nv_rd32(pfb, 0x001218);
+	switch (pbus1218 & 0x00000300) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+	case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+	}
+
+	pfb->ram.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+		  struct nouveau_fb_tile *tile)
+{
+	u32 tiles = DIV_ROUND_UP(size, 0x80);
+	u32 tags  = round_up(tiles / pfb->ram.parts, 0x100);
+	if ( (flags & 2) &&
+	    !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+		tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
+		tile->zcomp |= ((tile->tag->offset           ) >> 8);
+		tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+		tile->zcomp |= 0x40000000;
+#endif
+	}
+}
+
+static int
+nv40_fb_init(struct nouveau_object *object)
+{
+	struct nv40_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
+	return 0;
+}
+
+static int
+nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv40_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv40_fb_vram_init;
+	priv->base.tile.regions = 8;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv20_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv40_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv40_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 0000000..e9e5a08
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+	struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	if (pfb474 & 0x00000004)
+		pfb->ram.type = NV_MEM_TYPE_GDDR3;
+	if (pfb474 & 0x00000002)
+		pfb->ram.type = NV_MEM_TYPE_DDR2;
+	if (pfb474 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+	pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100600 + (i * 0x10));
+	nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+	struct nv41_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100800, 0x00000001);
+	return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv41_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv41_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x41),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv41_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 0000000..ae89b50
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+	struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb474 = nv_rd32(pfb, 0x100474);
+	if (pfb474 & 0x00000004)
+		pfb->ram.type = NV_MEM_TYPE_GDDR3;
+	if (pfb474 & 0x00000002)
+		pfb->ram.type = NV_MEM_TYPE_DDR2;
+	if (pfb474 & 0x00000001)
+		pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	tile->addr  = 0x00000001; /* mode = vram */
+	tile->addr |= addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+	nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+	nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+	nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+	nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+	struct nv44_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100850, 0x80000000);
+	nv_wr32(priv, 0x100800, 0x00000001);
+	return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv44_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv44_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv44_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 0000000..589b93e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+	struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+		  u32 flags, struct nouveau_fb_tile *tile)
+{
+	/* for performance, select alternate bank offset for zeta */
+	if (!(flags & 4)) tile->addr = (0 << 3);
+	else              tile->addr = (1 << 3);
+
+	tile->addr |= 0x00000001; /* mode = vram */
+	tile->addr |= addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv46_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv44_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv46_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x46),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv46_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 0000000..818bba3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv47_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv41_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x47),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv47_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 0000000..84a31af
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+	u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+	switch (pfb914 & 0x00000003) {
+	case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+	case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+	case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+	case 0x00000003: break;
+	}
+
+	pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+	return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv49_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv49_fb_vram_init;
+	priv->base.tile.regions = 15;
+	priv->base.tile.init = nv30_fb_tile_init;
+	priv->base.tile.comp = nv40_fb_tile_comp;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv41_fb_tile_prog;
+
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x49),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv49_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv41_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 0000000..797fd55
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+	struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+	pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+	pfb->ram.type = NV_MEM_TYPE_STOLEN;
+	return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv4e_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nv04_fb_memtype_valid;
+	priv->base.ram.init = nv4e_fb_vram_init;
+	priv->base.tile.regions = 12;
+	priv->base.tile.init = nv46_fb_tile_init;
+	priv->base.tile.fini = nv20_fb_tile_fini;
+	priv->base.tile.prog = nv44_fb_tile_prog;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x4e),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv4e_fb_ctor,
+		.dtor = _nouveau_fb_dtor,
+		.init = nv44_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
new file mode 100644
index 0000000..0772ec9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/client.h>
+#include <core/enum.h>
+#include <core/engctx.h>
+#include <core/object.h>
+
+#include <subdev/fb.h>
+#include <subdev/bios.h>
+
+struct nv50_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+static int types[0x80] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+static bool
+nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
+{
+	return types[(memtype & 0xff00) >> 8] != 0;
+}
+
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(pfb, 0x100200);
+	r4 = nv_rd32(pfb, 0x100204);
+	rt = nv_rd32(pfb, 0x100250);
+	ru = nv_rd32(pfb, 0x001540);
+	nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != pfb->ram.size) {
+		nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+			(u32)(pfb->ram.size >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct nouveau_device *device = nv_device(pfb);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	u32 size, tags = 0;
+	int ret;
+
+	pfb->ram.size = nv_rd32(pfb, 0x10020c);
+	pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+		       ((pfb->ram.size & 0x000000ff) << 32);
+
+	size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+	switch (device->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf: /* IGPs, no reordering, no real VRAM */
+		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+		if (ret)
+			return ret;
+
+		pfb->ram.type   = NV_MEM_TYPE_STOLEN;
+		pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+		break;
+	default:
+		switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+		case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+		case 1:
+			if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+				pfb->ram.type = NV_MEM_TYPE_DDR3;
+			else
+				pfb->ram.type = NV_MEM_TYPE_DDR2;
+			break;
+		case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+		case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+		case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+		default:
+			break;
+		}
+
+		ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+				      nv50_fb_vram_rblock(pfb) >> 12);
+		if (ret)
+			return ret;
+
+		pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+		tags = nv_rd32(pfb, 0x100320);
+		break;
+	}
+
+	return tags;
+}
+
+static int
+nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
+		 u32 memtype, struct nouveau_mem **pmem)
+{
+	struct nv50_fb_priv *priv = (void *)pfb;
+	struct nouveau_mm *heap = &priv->base.vram;
+	struct nouveau_mm *tags = &priv->base.tags;
+	struct nouveau_mm_node *r;
+	struct nouveau_mem *mem;
+	int comp = (memtype & 0x300) >> 8;
+	int type = (memtype & 0x07f);
+	int back = (memtype & 0x800);
+	int min, max, ret;
+
+	max = (size >> 12);
+	min = ncmin ? (ncmin >> 12) : max;
+	align >>= 12;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	mutex_lock(&pfb->base.mutex);
+	if (comp) {
+		if (align == 16) {
+			int n = (max >> 4) * comp;
+
+			ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag);
+			if (ret)
+				mem->tag = NULL;
+		}
+
+		if (unlikely(!mem->tag))
+			comp = 0;
+	}
+
+	INIT_LIST_HEAD(&mem->regions);
+	mem->memtype = (comp << 7) | type;
+	mem->size = max;
+
+	type = types[type];
+	do {
+		if (back)
+			ret = nouveau_mm_tail(heap, type, max, min, align, &r);
+		else
+			ret = nouveau_mm_head(heap, type, max, min, align, &r);
+		if (ret) {
+			mutex_unlock(&pfb->base.mutex);
+			pfb->ram.put(pfb, &mem);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &mem->regions);
+		max -= r->length;
+	} while (max);
+	mutex_unlock(&pfb->base.mutex);
+
+	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+	mem->offset = (u64)r->offset << 12;
+	*pmem = mem;
+	return 0;
+}
+
+void
+nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+	struct nv50_fb_priv *priv = (void *)pfb;
+	struct nouveau_mm_node *this;
+	struct nouveau_mem *mem;
+
+	mem = *pmem;
+	*pmem = NULL;
+	if (unlikely(mem == NULL))
+		return;
+
+	mutex_lock(&pfb->base.mutex);
+	while (!list_empty(&mem->regions)) {
+		this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
+
+		list_del(&this->rl_entry);
+		nouveau_mm_free(&priv->base.vram, &this);
+	}
+
+	nouveau_mm_free(&priv->base.tags, &mem->tag);
+	mutex_unlock(&pfb->base.mutex);
+
+	kfree(mem);
+}
+
+static const struct nouveau_enum vm_dispatch_subclients[] = {
+	{ 0x00000000, "GRCTX", NULL },
+	{ 0x00000001, "NOTIFY", NULL },
+	{ 0x00000002, "QUERY", NULL },
+	{ 0x00000003, "COND", NULL },
+	{ 0x00000004, "M2M_IN", NULL },
+	{ 0x00000005, "M2M_OUT", NULL },
+	{ 0x00000006, "M2M_NOTIFY", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_ccache_subclients[] = {
+	{ 0x00000000, "CB", NULL },
+	{ 0x00000001, "TIC", NULL },
+	{ 0x00000002, "TSC", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_prop_subclients[] = {
+	{ 0x00000000, "RT0", NULL },
+	{ 0x00000001, "RT1", NULL },
+	{ 0x00000002, "RT2", NULL },
+	{ 0x00000003, "RT3", NULL },
+	{ 0x00000004, "RT4", NULL },
+	{ 0x00000005, "RT5", NULL },
+	{ 0x00000006, "RT6", NULL },
+	{ 0x00000007, "RT7", NULL },
+	{ 0x00000008, "ZETA", NULL },
+	{ 0x00000009, "LOCAL", NULL },
+	{ 0x0000000a, "GLOBAL", NULL },
+	{ 0x0000000b, "STACK", NULL },
+	{ 0x0000000c, "DST2D", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_pfifo_subclients[] = {
+	{ 0x00000000, "PUSHBUF", NULL },
+	{ 0x00000001, "SEMAPHORE", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_bar_subclients[] = {
+	{ 0x00000000, "FB", NULL },
+	{ 0x00000001, "IN", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_client[] = {
+	{ 0x00000000, "STRMOUT", NULL },
+	{ 0x00000003, "DISPATCH", vm_dispatch_subclients },
+	{ 0x00000004, "PFIFO_WRITE", NULL },
+	{ 0x00000005, "CCACHE", vm_ccache_subclients },
+	{ 0x00000006, "PPPP", NULL },
+	{ 0x00000007, "CLIPID", NULL },
+	{ 0x00000008, "PFIFO_READ", NULL },
+	{ 0x00000009, "VFETCH", NULL },
+	{ 0x0000000a, "TEXTURE", NULL },
+	{ 0x0000000b, "PROP", vm_prop_subclients },
+	{ 0x0000000c, "PVP", NULL },
+	{ 0x0000000d, "PBSP", NULL },
+	{ 0x0000000e, "PCRYPT", NULL },
+	{ 0x0000000f, "PCOUNTER", NULL },
+	{ 0x00000011, "PDAEMON", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_engine[] = {
+	{ 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
+	{ 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
+	{ 0x00000004, "PEEPHOLE", NULL },
+	{ 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
+	{ 0x00000006, "BAR", vm_bar_subclients },
+	{ 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
+	{ 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
+	{ 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
+	{ 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
+	{ 0x0000000b, "PCOUNTER", NULL },
+	{ 0x0000000c, "SEMAPHORE_BG", NULL },
+	{ 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
+	{ 0x0000000e, "PDAEMON", NULL },
+	{}
+};
+
+static const struct nouveau_enum vm_fault[] = {
+	{ 0x00000000, "PT_NOT_PRESENT", NULL },
+	{ 0x00000001, "PT_TOO_SHORT", NULL },
+	{ 0x00000002, "PAGE_NOT_PRESENT", NULL },
+	{ 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+	{ 0x00000004, "PAGE_READ_ONLY", NULL },
+	{ 0x00000006, "NULL_DMAOBJ", NULL },
+	{ 0x00000007, "WRONG_MEMTYPE", NULL },
+	{ 0x0000000b, "VRAM_LIMIT", NULL },
+	{ 0x0000000f, "DMAOBJ_LIMIT", NULL },
+	{}
+};
+
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_device *device = nv_device(subdev);
+	struct nouveau_engine *engine;
+	struct nv50_fb_priv *priv = (void *)subdev;
+	const struct nouveau_enum *en, *cl;
+	struct nouveau_object *engctx = NULL;
+	u32 trap[6], idx, chan;
+	u8 st0, st1, st2, st3;
+	int i;
+
+	idx = nv_rd32(priv, 0x100c90);
+	if (!(idx & 0x80000000))
+		return;
+	idx &= 0x00ffffff;
+
+	for (i = 0; i < 6; i++) {
+		nv_wr32(priv, 0x100c90, idx | i << 24);
+		trap[i] = nv_rd32(priv, 0x100c94);
+	}
+	nv_wr32(priv, 0x100c90, idx | 0x80000000);
+
+	/* decode status bits into something more useful */
+	if (device->chipset  < 0xa3 ||
+	    device->chipset == 0xaa || device->chipset == 0xac) {
+		st0 = (trap[0] & 0x0000000f) >> 0;
+		st1 = (trap[0] & 0x000000f0) >> 4;
+		st2 = (trap[0] & 0x00000f00) >> 8;
+		st3 = (trap[0] & 0x0000f000) >> 12;
+	} else {
+		st0 = (trap[0] & 0x000000ff) >> 0;
+		st1 = (trap[0] & 0x0000ff00) >> 8;
+		st2 = (trap[0] & 0x00ff0000) >> 16;
+		st3 = (trap[0] & 0xff000000) >> 24;
+	}
+	chan = (trap[2] << 16) | trap[1];
+
+	en = nouveau_enum_find(vm_engine, st0);
+
+	if (en && en->data2) {
+		const struct nouveau_enum *orig_en = en;
+		while (en->name && en->value == st0 && en->data2) {
+			engine = nouveau_engine(subdev, en->data2);
+			if (engine) {
+				engctx = nouveau_engctx_get(engine, chan);
+				if (engctx)
+					break;
+			}
+			en++;
+		}
+		if (!engctx)
+			en = orig_en;
+	}
+
+	nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
+		 (trap[5] & 0x00000100) ? "read" : "write",
+		 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
+		 nouveau_client_name(engctx));
+
+	nouveau_engctx_put(engctx);
+
+	if (en)
+		pr_cont("%s/", en->name);
+	else
+		pr_cont("%02x/", st0);
+
+	cl = nouveau_enum_find(vm_client, st2);
+	if (cl)
+		pr_cont("%s/", cl->name);
+	else
+		pr_cont("%02x/", st2);
+
+	if      (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+	else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+	else                     cl = NULL;
+	if (cl)
+		pr_cont("%s", cl->name);
+	else
+		pr_cont("%02x", st3);
+
+	pr_cont(" reason: ");
+	en = nouveau_enum_find(vm_fault, st1);
+	if (en)
+		pr_cont("%s\n", en->name);
+	else
+		pr_cont("0x%08x\n", st1);
+}
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv50_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (priv->r100c08_page) {
+		priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+					     0, PAGE_SIZE,
+					     PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+			nv_warn(priv, "failed 0x100c08 page map\n");
+	} else {
+		nv_warn(priv, "failed 0x100c08 page alloc\n");
+	}
+
+	priv->base.memtype_valid = nv50_fb_memtype_valid;
+	priv->base.ram.init = nv50_fb_vram_init;
+	priv->base.ram.get = nv50_fb_vram_new;
+	priv->base.ram.put = nv50_fb_vram_del;
+	nv_subdev(priv)->intr = nv50_fb_intr;
+	return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+
+	if (priv->r100c08_page) {
+		pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+			       PCI_DMA_BIDIRECTIONAL);
+		__free_page(priv->r100c08_page);
+	}
+
+	nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv50_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* Not a clue what this is exactly.  Without pointing it at a
+	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+	 * cause IOMMU "read from address 0" errors (rh#561267)
+	 */
+	nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+	/* This is needed to get meaningful information from 100c90
+	 * on traps. No idea what these values mean exactly. */
+	switch (device->chipset) {
+	case 0x50:
+		nv_wr32(priv, 0x100c90, 0x000707ff);
+		break;
+	case 0xa3:
+	case 0xa5:
+	case 0xa8:
+		nv_wr32(priv, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(priv, 0x100c90, 0x089d1fff);
+		break;
+	default:
+		nv_wr32(priv, 0x100c90, 0x001d07ff);
+		break;
+	}
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_fb_ctor,
+		.dtor = nv50_fb_dtor,
+		.init = nv50_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
new file mode 100644
index 0000000..86ad592
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+#include <subdev/ltcg.h>
+#include <subdev/bios.h>
+
+struct nvc0_fb_priv {
+	struct nouveau_fb base;
+	struct page *r100c10_page;
+	dma_addr_t r100c10;
+};
+
+extern const u8 nvc0_pte_storage_type_map[256];
+
+
+static bool
+nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
+{
+	u8 memtype = (tile_flags & 0x0000ff00) >> 8;
+	return likely((nvc0_pte_storage_type_map[memtype] != 0xff));
+}
+
+static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+	struct nouveau_bios *bios = nouveau_bios(pfb);
+	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+	u32 parts = nv_rd32(pfb, 0x022438);
+	u32 pmask = nv_rd32(pfb, 0x022554);
+	u32 bsize = nv_rd32(pfb, 0x10f20c);
+	u32 offset, length;
+	bool uniform = true;
+	int ret, part;
+
+	nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+	nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+	pfb->ram.type = nouveau_fb_bios_memtype(bios);
+	pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+	/* read amount of vram attached to each memory controller */
+	for (part = 0; part < parts; part++) {
+		if (!(pmask & (1 << part))) {
+			u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+			if (psize != bsize) {
+				if (psize < bsize)
+					bsize = psize;
+				uniform = false;
+			}
+
+			nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+			pfb->ram.size += (u64)psize << 20;
+		}
+	}
+
+	/* if all controllers have the same amount attached, there's no holes */
+	if (uniform) {
+		offset = rsvd_head;
+		length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+		return nouveau_mm_init(&pfb->vram, offset, length, 1);
+	}
+
+	/* otherwise, address lowest common amount from 0GiB */
+	ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+	if (ret)
+		return ret;
+
+	/* and the rest starting from (8GiB + common_size) */
+	offset = (0x0200000000ULL >> 12) + (bsize << 8);
+	length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+	ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+	if (ret) {
+		nouveau_mm_fini(&pfb->vram);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
+		 u32 memtype, struct nouveau_mem **pmem)
+{
+	struct nouveau_mm *mm = &pfb->vram;
+	struct nouveau_mm_node *r;
+	struct nouveau_mem *mem;
+	int type = (memtype & 0x0ff);
+	int back = (memtype & 0x800);
+	int ret;
+	const bool comp = nvc0_pte_storage_type_map[type] != type;
+
+	size  >>= 12;
+	align >>= 12;
+	ncmin >>= 12;
+	if (!ncmin)
+		ncmin = size;
+
+	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&mem->regions);
+	mem->size = size;
+
+	mutex_lock(&pfb->base.mutex);
+	if (comp) {
+		struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+		/* compression only works with lpages */
+		if (align == (1 << (17 - 12))) {
+			int n = size >> 5;
+			ltcg->tags_alloc(ltcg, n, &mem->tag);
+		}
+		if (unlikely(!mem->tag))
+			type = nvc0_pte_storage_type_map[type];
+	}
+	mem->memtype = type;
+
+	do {
+		if (back)
+			ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
+		else
+			ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
+		if (ret) {
+			mutex_unlock(&pfb->base.mutex);
+			pfb->ram.put(pfb, &mem);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &mem->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&pfb->base.mutex);
+
+	r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+	mem->offset = (u64)r->offset << 12;
+	*pmem = mem;
+	return 0;
+}
+
+static void
+nvc0_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+	struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb->base.base.parent);
+
+	if ((*pmem)->tag)
+		ltcg->tags_free(ltcg, &(*pmem)->tag);
+
+	nv50_fb_vram_del(pfb, pmem);
+}
+
+static int
+nvc0_fb_init(struct nouveau_object *object)
+{
+	struct nvc0_fb_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_fb_init(&priv->base);
+	if (ret)
+		return ret;
+
+	if (priv->r100c10_page)
+		nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
+	return 0;
+}
+
+static void
+nvc0_fb_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nvc0_fb_priv *priv = (void *)object;
+
+	if (priv->r100c10_page) {
+		pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE,
+			       PCI_DMA_BIDIRECTIONAL);
+		__free_page(priv->r100c10_page);
+	}
+
+	nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nvc0_fb_priv *priv;
+	int ret;
+
+	ret = nouveau_fb_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.memtype_valid = nvc0_fb_memtype_valid;
+	priv->base.ram.init = nvc0_fb_vram_init;
+	priv->base.ram.get = nvc0_fb_vram_new;
+	priv->base.ram.put = nvc0_fb_vram_del;
+
+	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (priv->r100c10_page) {
+		priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page,
+					     0, PAGE_SIZE,
+					     PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(device->pdev, priv->r100c10))
+			return -EFAULT;
+	}
+
+	return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nvc0_fb_oclass = {
+	.handle = NV_SUBDEV(FB, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_fb_ctor,
+		.dtor = nvc0_fb_dtor,
+		.init = nvc0_fb_init,
+		.fini = _nouveau_fb_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
new file mode 100644
index 0000000..d422acc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+static int
+nouveau_gpio_drive(struct nouveau_gpio *gpio,
+		   int idx, int line, int dir, int out)
+{
+	return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV;
+}
+
+static int
+nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
+{
+	return gpio->sense ? gpio->sense(gpio, line) : -ENODEV;
+}
+
+static int
+nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
+		  struct dcb_gpio_func *func)
+{
+	struct nouveau_bios *bios = nouveau_bios(gpio);
+	u8  ver, len;
+	u16 data;
+
+	if (line == 0xff && tag == 0xff)
+		return -EINVAL;
+
+	data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
+	if (data)
+		return 0;
+
+	/* Apple iMac G4 NV18 */
+	if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
+		if (tag == DCB_GPIO_TVDAC0) {
+			*func = (struct dcb_gpio_func) {
+				.func = DCB_GPIO_TVDAC0,
+				.line = 4,
+				.log[0] = 0,
+				.log[1] = 1,
+			};
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int
+nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state)
+{
+	struct dcb_gpio_func func;
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		int dir = !!(func.log[state] & 0x02);
+		int out = !!(func.log[state] & 0x01);
+		ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out);
+	}
+
+	return ret;
+}
+
+static int
+nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
+{
+	struct dcb_gpio_func func;
+	int ret;
+
+	ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
+	if (ret == 0) {
+		ret = nouveau_gpio_sense(gpio, idx, func.line);
+		if (ret >= 0)
+			ret = (ret == (func.log[1] & 1));
+	}
+
+	return ret;
+}
+
+void
+_nouveau_gpio_dtor(struct nouveau_object *object)
+{
+	struct nouveau_gpio *gpio = (void *)object;
+	nouveau_event_destroy(&gpio->events);
+	nouveau_subdev_destroy(&gpio->base);
+}
+
+int
+nouveau_gpio_create_(struct nouveau_object *parent,
+		     struct nouveau_object *engine,
+		     struct nouveau_oclass *oclass, int lines,
+		     int length, void **pobject)
+{
+	struct nouveau_gpio *gpio;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio",
+				     length, pobject);
+	gpio = *pobject;
+	if (ret)
+		return ret;
+
+	ret = nouveau_event_create(lines, &gpio->events);
+	if (ret)
+		return ret;
+
+	gpio->find = nouveau_gpio_find;
+	gpio->set  = nouveau_gpio_set;
+	gpio->get  = nouveau_gpio_get;
+	return 0;
+}
+
+static struct dmi_system_id gpio_reset_ids[] = {
+	{
+		.ident = "Apple Macbook 10,1",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
+		}
+	},
+	{ }
+};
+
+int
+nouveau_gpio_init(struct nouveau_gpio *gpio)
+{
+	int ret = nouveau_subdev_init(&gpio->base);
+	if (ret == 0 && gpio->reset) {
+		if (dmi_check_system(gpio_reset_ids))
+			gpio->reset(gpio, DCB_GPIO_UNUSED);
+	}
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
new file mode 100644
index 0000000..76d5d54
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+struct nv10_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+static int
+nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	if (line < 2) {
+		line = line * 16;
+		line = nv_rd32(gpio, 0x600818) >> line;
+		return !!(line & 0x0100);
+	} else
+	if (line < 10) {
+		line = (line - 2) * 4;
+		line = nv_rd32(gpio, 0x60081c) >> line;
+		return !!(line & 0x04);
+	} else
+	if (line < 14) {
+		line = (line - 10) * 4;
+		line = nv_rd32(gpio, 0x600850) >> line;
+		return !!(line & 0x04);
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 reg, mask, data;
+
+	if (line < 2) {
+		line = line * 16;
+		reg  = 0x600818;
+		mask = 0x00000011;
+		data = (dir << 4) | out;
+	} else
+	if (line < 10) {
+		line = (line - 2) * 4;
+		reg  = 0x60081c;
+		mask = 0x00000003;
+		data = (dir << 1) | out;
+	} else
+	if (line < 14) {
+		line = (line - 10) * 4;
+		reg  = 0x600850;
+		mask = 0x00000003;
+		data = (dir << 1) | out;
+	} else {
+		return -EINVAL;
+	}
+
+	nv_mask(gpio, reg, mask << line, data << line);
+	return 0;
+}
+
+static void
+nv10_gpio_intr(struct nouveau_subdev *subdev)
+{
+	struct nv10_gpio_priv *priv = (void *)subdev;
+	u32 intr = nv_rd32(priv, 0x001104);
+	u32 hi = (intr & 0x0000ffff) >> 0;
+	u32 lo = (intr & 0xffff0000) >> 16;
+	int i;
+
+	for (i = 0; (hi | lo) && i < 32; i++) {
+		if ((hi | lo) & (1 << i))
+			nouveau_event_trigger(priv->base.events, i);
+	}
+
+	nv_wr32(priv, 0x001104, intr);
+}
+
+static void
+nv10_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+	nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+	nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
+}
+
+static void
+nv10_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+	nv_wr32(event->priv, 0x001104, 0x00010001 << line);
+	nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
+}
+
+static int
+nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv10_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.drive = nv10_gpio_drive;
+	priv->base.sense = nv10_gpio_sense;
+	priv->base.events->priv = priv;
+	priv->base.events->enable = nv10_gpio_intr_enable;
+	priv->base.events->disable = nv10_gpio_intr_disable;
+	nv_subdev(priv)->intr = nv10_gpio_intr;
+	return 0;
+}
+
+static void
+nv10_gpio_dtor(struct nouveau_object *object)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nv10_gpio_init(struct nouveau_object *object)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_gpio_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x001144, 0x00000000);
+	nv_wr32(priv, 0x001104, 0xffffffff);
+	return 0;
+}
+
+static int
+nv10_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv10_gpio_priv *priv = (void *)object;
+	nv_wr32(priv, 0x001144, 0x00000000);
+	return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv10_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0x10),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv10_gpio_ctor,
+		.dtor = nv10_gpio_dtor,
+		.init = nv10_gpio_init,
+		.fini = nv10_gpio_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
new file mode 100644
index 0000000..bf489dc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nv50_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+static void
+nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
+{
+	struct nouveau_bios *bios = nouveau_bios(gpio);
+	struct nv50_gpio_priv *priv = (void *)gpio;
+	u8 ver, len;
+	u16 entry;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+		static const u32 regs[] = { 0xe100, 0xe28c };
+		u32 data = nv_ro32(bios, entry);
+		u8  line =   (data & 0x0000001f);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  defs = !!(data & 0x01000000);
+		u8  unk0 = !!(data & 0x02000000);
+		u8  unk1 = !!(data & 0x04000000);
+		u32 val = (unk1 << 16) | unk0;
+		u32 reg = regs[line >> 4]; line &= 0x0f;
+
+		if ( func  == DCB_GPIO_UNUSED ||
+		    (match != DCB_GPIO_UNUSED && match != func))
+			continue;
+
+		gpio->set(gpio, 0, func, line, defs);
+
+		nv_mask(priv, reg, 0x00010001 << line, val << line);
+	}
+}
+
+static int
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
+{
+	const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+
+	if (line >= 32)
+		return -EINVAL;
+
+	*reg = nv50_gpio_reg[line >> 3];
+	*shift = (line & 7) << 2;
+	return 0;
+}
+
+static int
+nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 reg, shift;
+
+	if (nv50_gpio_location(line, &reg, &shift))
+		return -EINVAL;
+
+	nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+	return 0;
+}
+
+static int
+nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	u32 reg, shift;
+
+	if (nv50_gpio_location(line, &reg, &shift))
+		return -EINVAL;
+
+	return !!(nv_rd32(gpio, reg) & (4 << shift));
+}
+
+void
+nv50_gpio_intr(struct nouveau_subdev *subdev)
+{
+	struct nv50_gpio_priv *priv = (void *)subdev;
+	u32 intr0, intr1 = 0;
+	u32 hi, lo;
+	int i;
+
+	intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
+	if (nv_device(priv)->chipset >= 0x90)
+		intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
+
+	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+
+	for (i = 0; (hi | lo) && i < 32; i++) {
+		if ((hi | lo) & (1 << i))
+			nouveau_event_trigger(priv->base.events, i);
+	}
+
+	nv_wr32(priv, 0xe054, intr0);
+	if (nv_device(priv)->chipset >= 0x90)
+		nv_wr32(priv, 0xe074, intr1);
+}
+
+void
+nv50_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+	const u32 addr = line < 16 ? 0xe050 : 0xe070;
+	const u32 mask = 0x00010001 << (line & 0xf);
+	nv_wr32(event->priv, addr + 0x04, mask);
+	nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nv50_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+	const u32 addr = line < 16 ? 0xe050 : 0xe070;
+	const u32 mask = 0x00010001 << (line & 0xf);
+	nv_wr32(event->priv, addr + 0x04, mask);
+	nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
+static int
+nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nv50_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass,
+				  nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+				  &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.reset = nv50_gpio_reset;
+	priv->base.drive = nv50_gpio_drive;
+	priv->base.sense = nv50_gpio_sense;
+	priv->base.events->priv = priv;
+	priv->base.events->enable = nv50_gpio_intr_enable;
+	priv->base.events->disable = nv50_gpio_intr_disable;
+	nv_subdev(priv)->intr = nv50_gpio_intr;
+	return 0;
+}
+
+void
+nv50_gpio_dtor(struct nouveau_object *object)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	nouveau_gpio_destroy(&priv->base);
+}
+
+int
+nv50_gpio_init(struct nouveau_object *object)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_gpio_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* disable, and ack any pending gpio interrupts */
+	nv_wr32(priv, 0xe050, 0x00000000);
+	nv_wr32(priv, 0xe054, 0xffffffff);
+	if (nv_device(priv)->chipset >= 0x90) {
+		nv_wr32(priv, 0xe070, 0x00000000);
+		nv_wr32(priv, 0xe074, 0xffffffff);
+	}
+
+	return 0;
+}
+
+int
+nv50_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_gpio_priv *priv = (void *)object;
+	nv_wr32(priv, 0xe050, 0x00000000);
+	if (nv_device(priv)->chipset >= 0x90)
+		nv_wr32(priv, 0xe070, 0x00000000);
+	return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_gpio_ctor,
+		.dtor = nv50_gpio_dtor,
+		.init = nv50_gpio_init,
+		.fini = nv50_gpio_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
new file mode 100644
index 0000000..010431e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nvd0_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+void
+nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
+{
+	struct nouveau_bios *bios = nouveau_bios(gpio);
+	struct nvd0_gpio_priv *priv = (void *)gpio;
+	u8 ver, len;
+	u16 entry;
+	int ent = -1;
+
+	while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+		u32 data = nv_ro32(bios, entry);
+		u8  line =   (data & 0x0000003f);
+		u8  defs = !!(data & 0x00000080);
+		u8  func =   (data & 0x0000ff00) >> 8;
+		u8  unk0 =   (data & 0x00ff0000) >> 16;
+		u8  unk1 =   (data & 0x1f000000) >> 24;
+
+		if ( func  == DCB_GPIO_UNUSED ||
+		    (match != DCB_GPIO_UNUSED && match != func))
+			continue;
+
+		gpio->set(gpio, 0, func, line, defs);
+
+		nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0);
+		if (unk1--)
+			nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line);
+	}
+}
+
+int
+nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
+{
+	u32 data = ((dir ^ 1) << 13) | (out << 12);
+	nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
+	nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
+	return 0;
+}
+
+int
+nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
+{
+	return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
+}
+
+static int
+nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvd0_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.reset = nvd0_gpio_reset;
+	priv->base.drive = nvd0_gpio_drive;
+	priv->base.sense = nvd0_gpio_sense;
+	priv->base.events->priv = priv;
+	priv->base.events->enable = nv50_gpio_intr_enable;
+	priv->base.events->disable = nv50_gpio_intr_disable;
+	nv_subdev(priv)->intr = nv50_gpio_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_gpio_ctor,
+		.dtor = nv50_gpio_dtor,
+		.init = nv50_gpio_init,
+		.fini = nv50_gpio_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
new file mode 100644
index 0000000..16b8c5b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nve0_gpio_priv {
+	struct nouveau_gpio base;
+};
+
+void
+nve0_gpio_intr(struct nouveau_subdev *subdev)
+{
+	struct nve0_gpio_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
+	u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
+	u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+	int i;
+
+	for (i = 0; (hi | lo) && i < 32; i++) {
+		if ((hi | lo) & (1 << i))
+			nouveau_event_trigger(priv->base.events, i);
+	}
+
+	nv_wr32(priv, 0xdc00, intr0);
+	nv_wr32(priv, 0xdc88, intr1);
+}
+
+void
+nve0_gpio_intr_enable(struct nouveau_event *event, int line)
+{
+	const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+	const u32 mask = 0x00010001 << (line & 0xf);
+	nv_wr32(event->priv, addr + 0x08, mask);
+	nv_mask(event->priv, addr + 0x00, mask, mask);
+}
+
+void
+nve0_gpio_intr_disable(struct nouveau_event *event, int line)
+{
+	const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
+	const u32 mask = 0x00010001 << (line & 0xf);
+	nv_wr32(event->priv, addr + 0x08, mask);
+	nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
+}
+
+int
+nve0_gpio_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nve0_gpio_priv *priv = (void *)object;
+	nv_wr32(priv, 0xdc08, 0x00000000);
+	nv_wr32(priv, 0xdc88, 0x00000000);
+	return nouveau_gpio_fini(&priv->base, suspend);
+}
+
+int
+nve0_gpio_init(struct nouveau_object *object)
+{
+	struct nve0_gpio_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_gpio_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0xdc00, 0xffffffff);
+	nv_wr32(priv, 0xdc80, 0xffffffff);
+	return 0;
+}
+
+void
+nve0_gpio_dtor(struct nouveau_object *object)
+{
+	struct nve0_gpio_priv *priv = (void *)object;
+	nouveau_gpio_destroy(&priv->base);
+}
+
+static int
+nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nve0_gpio_priv *priv;
+	int ret;
+
+	ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.reset = nvd0_gpio_reset;
+	priv->base.drive = nvd0_gpio_drive;
+	priv->base.sense = nvd0_gpio_sense;
+	priv->base.events->priv = priv;
+	priv->base.events->enable = nve0_gpio_intr_enable;
+	priv->base.events->disable = nve0_gpio_intr_disable;
+	nv_subdev(priv)->intr = nve0_gpio_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_gpio_oclass = {
+	.handle = NV_SUBDEV(GPIO, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_gpio_ctor,
+		.dtor = nv50_gpio_dtor,
+		.init = nve0_gpio_init,
+		.fini = nve0_gpio_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
new file mode 100644
index 0000000..2ee1c89
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_GPIO_H__
+#define __NVKM_GPIO_H__
+
+#include <subdev/gpio.h>
+
+void nv50_gpio_dtor(struct nouveau_object *);
+int  nv50_gpio_init(struct nouveau_object *);
+int  nv50_gpio_fini(struct nouveau_object *, bool);
+void nv50_gpio_intr(struct nouveau_subdev *);
+void nv50_gpio_intr_enable(struct nouveau_event *, int line);
+void nv50_gpio_intr_disable(struct nouveau_event *, int line);
+
+void nvd0_gpio_reset(struct nouveau_gpio *, u8);
+int  nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
+int  nvd0_gpio_sense(struct nouveau_gpio *, int);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
new file mode 100644
index 0000000..dec94e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -0,0 +1,279 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <subdev/i2c.h>
+
+struct anx9805_i2c_port {
+	struct nouveau_i2c_port base;
+	u32 addr;
+	u32 ctrl;
+};
+
+static int
+anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
+{
+	struct anx9805_i2c_port *chan = (void *)port;
+	struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+	u8 tmp, i;
+
+	nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
+	nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
+	nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
+	nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
+
+	i = 0;
+	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
+		mdelay(5);
+		if (i++ == 100) {
+			nv_error(port, "link training timed out\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (tmp & 0x70) {
+		nv_error(port, "link training failed: 0x%02x\n", tmp);
+		return -EIO;
+	}
+
+	return 1;
+}
+
+static int
+anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct anx9805_i2c_port *chan = (void *)port;
+	struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
+	int i, ret = -ETIMEDOUT;
+	u8 tmp;
+
+	tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
+	nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
+	nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
+	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+
+	nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
+	for (i = 0; !(type & 1) && i < size; i++)
+		nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
+	nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
+	nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >>  0);
+	nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >>  8);
+	nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
+	nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
+
+	i = 0;
+	while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
+		mdelay(5);
+		if (i++ == 32)
+			goto done;
+	}
+
+	if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
+		ret = -EIO;
+		goto done;
+	}
+
+	for (i = 0; (type & 1) && i < size; i++)
+		data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
+	ret = 0;
+done:
+	nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
+	return ret;
+}
+
+static const struct nouveau_i2c_func
+anx9805_aux_func = {
+	.aux = anx9805_aux,
+	.lnk_ctl = anx9805_train,
+};
+
+static int
+anx9805_aux_chan_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 index,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_i2c_port *mast = (void *)parent;
+	struct anx9805_i2c_port *chan;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_aux_algo, &chan);
+	*pobject = nv_object(chan);
+	if (ret)
+		return ret;
+
+	switch ((oclass->handle & 0xff00) >> 8) {
+	case 0x0d:
+		chan->addr = 0x38;
+		chan->ctrl = 0x39;
+		break;
+	case 0x0e:
+		chan->addr = 0x3c;
+		chan->ctrl = 0x3b;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	if (mast->adapter.algo == &i2c_bit_algo) {
+		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+		algo->udelay = max(algo->udelay, 40);
+	}
+
+	chan->base.func = &anx9805_aux_func;
+	return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_aux_ofuncs = {
+	.ctor =  anx9805_aux_chan_ctor,
+	.dtor = _nouveau_i2c_port_dtor,
+	.init = _nouveau_i2c_port_init,
+	.fini = _nouveau_i2c_port_fini,
+};
+
+static int
+anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct anx9805_i2c_port *port = adap->algo_data;
+	struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
+	struct i2c_msg *msg = msgs;
+	int ret = -ETIMEDOUT;
+	int i, j, cnt = num;
+	u8 seg = 0x00, off = 0x00, tmp;
+
+	tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
+	nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
+	nv_wri2cr(mast, port->ctrl, 0x07, tmp);
+	nv_wri2cr(mast, port->addr, 0x43, 0x05);
+	mdelay(5);
+
+	while (cnt--) {
+		if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
+			nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
+			nv_wri2cr(mast, port->addr, 0x41, seg);
+			nv_wri2cr(mast, port->addr, 0x42, off);
+			nv_wri2cr(mast, port->addr, 0x44, msg->len);
+			nv_wri2cr(mast, port->addr, 0x45, 0x00);
+			nv_wri2cr(mast, port->addr, 0x43, 0x01);
+			for (i = 0; i < msg->len; i++) {
+				j = 0;
+				while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
+					mdelay(5);
+					if (j++ == 32)
+						goto done;
+				}
+				msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
+			}
+		} else
+		if (!(msg->flags & I2C_M_RD)) {
+			if (msg->addr == 0x50 && msg->len == 0x01) {
+				off = msg->buf[0];
+			} else
+			if (msg->addr == 0x30 && msg->len == 0x01) {
+				seg = msg->buf[0];
+			} else
+				goto done;
+		} else {
+			goto done;
+		}
+		msg++;
+	}
+
+	ret = num;
+done:
+	nv_wri2cr(mast, port->addr, 0x43, 0x00);
+	return ret;
+}
+
+static u32
+anx9805_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm
+anx9805_i2c_algo = {
+	.master_xfer = anx9805_xfer,
+	.functionality = anx9805_func
+};
+
+static const struct nouveau_i2c_func
+anx9805_i2c_func = {
+};
+
+static int
+anx9805_ddc_port_ctor(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass, void *data, u32 index,
+		      struct nouveau_object **pobject)
+{
+	struct nouveau_i2c_port *mast = (void *)parent;
+	struct anx9805_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &anx9805_i2c_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	switch ((oclass->handle & 0xff00) >> 8) {
+	case 0x0d:
+		port->addr = 0x3d;
+		port->ctrl = 0x39;
+		break;
+	case 0x0e:
+		port->addr = 0x3f;
+		port->ctrl = 0x3b;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	if (mast->adapter.algo == &i2c_bit_algo) {
+		struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
+		algo->udelay = max(algo->udelay, 40);
+	}
+
+	port->base.func = &anx9805_i2c_func;
+	return 0;
+}
+
+static struct nouveau_ofuncs
+anx9805_ddc_ofuncs = {
+	.ctor =  anx9805_ddc_port_ctor,
+	.dtor = _nouveau_i2c_port_dtor,
+	.init = _nouveau_i2c_port_init,
+	.fini = _nouveau_i2c_port_fini,
+};
+
+struct nouveau_oclass
+nouveau_anx9805_sclass[] = {
+	{ .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
+	{ .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
+	{ .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
+	{ .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
+	{}
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
new file mode 100644
index 0000000..5de074a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+
+int
+nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
+{
+	if (port->func->aux) {
+		if (port->func->acquire)
+			port->func->acquire(port);
+		return port->func->aux(port, 9, addr, data, size);
+	}
+	return -ENODEV;
+}
+
+int
+nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
+{
+	if (port->func->aux) {
+		if (port->func->acquire)
+			port->func->acquire(port);
+		return port->func->aux(port, 8, addr, data, size);
+	}
+	return -ENODEV;
+}
+
+static int
+aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nouveau_i2c_port *port = adap->algo_data;
+	struct i2c_msg *msg = msgs;
+	int ret, mcnt = num;
+
+	if (!port->func->aux)
+		return -ENODEV;
+	if ( port->func->acquire)
+		port->func->acquire(port);
+
+	while (mcnt--) {
+		u8 remaining = msg->len;
+		u8 *ptr = msg->buf;
+
+		while (remaining) {
+			u8 cnt = (remaining > 16) ? 16 : remaining;
+			u8 cmd;
+
+			if (msg->flags & I2C_M_RD)
+				cmd = 1;
+			else
+				cmd = 0;
+
+			if (mcnt || remaining > 16)
+				cmd |= 4; /* MOT */
+
+			ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
+			if (ret < 0)
+				return ret;
+
+			ptr += cnt;
+			remaining -= cnt;
+		}
+
+		msg++;
+	}
+
+	return num;
+}
+
+static u32
+aux_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm nouveau_i2c_aux_algo = {
+	.master_xfer = aux_xfer,
+	.functionality = aux_func
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
new file mode 100644
index 0000000..8ae2625
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/i2c.h>
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+/******************************************************************************
+ * interface to linux i2c bit-banging algorithm
+ *****************************************************************************/
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
+#define CSTMSEL true
+#else
+#define CSTMSEL false
+#endif
+
+static int
+nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
+{
+	struct i2c_algo_bit_data *bit = adap->algo_data;
+	struct nouveau_i2c_port *port = bit->data;
+	if (port->func->acquire)
+		port->func->acquire(port);
+	return 0;
+}
+
+static void
+nouveau_i2c_setscl(void *data, int state)
+{
+	struct nouveau_i2c_port *port = data;
+	port->func->drive_scl(port, state);
+}
+
+static void
+nouveau_i2c_setsda(void *data, int state)
+{
+	struct nouveau_i2c_port *port = data;
+	port->func->drive_sda(port, state);
+}
+
+static int
+nouveau_i2c_getscl(void *data)
+{
+	struct nouveau_i2c_port *port = data;
+	return port->func->sense_scl(port);
+}
+
+static int
+nouveau_i2c_getsda(void *data)
+{
+	struct nouveau_i2c_port *port = data;
+	return port->func->sense_sda(port);
+}
+
+/******************************************************************************
+ * base i2c "port" class implementation
+ *****************************************************************************/
+
+void
+_nouveau_i2c_port_dtor(struct nouveau_object *object)
+{
+	struct nouveau_i2c_port *port = (void *)object;
+	i2c_del_adapter(&port->adapter);
+	nouveau_object_destroy(&port->base);
+}
+
+int
+nouveau_i2c_port_create_(struct nouveau_object *parent,
+			 struct nouveau_object *engine,
+			 struct nouveau_oclass *oclass, u8 index,
+			 const struct i2c_algorithm *algo,
+			 int size, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_i2c *i2c = (void *)engine;
+	struct nouveau_i2c_port *port;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
+	port = *pobject;
+	if (ret)
+		return ret;
+
+	snprintf(port->adapter.name, sizeof(port->adapter.name),
+		 "nouveau-%s-%d", device->name, index);
+	port->adapter.owner = THIS_MODULE;
+	port->adapter.dev.parent = &device->pdev->dev;
+	port->index = index;
+	i2c_set_adapdata(&port->adapter, i2c);
+
+	if ( algo == &nouveau_i2c_bit_algo &&
+	    !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
+		struct i2c_algo_bit_data *bit;
+
+		bit = kzalloc(sizeof(*bit), GFP_KERNEL);
+		if (!bit)
+			return -ENOMEM;
+
+		bit->udelay = 10;
+		bit->timeout = usecs_to_jiffies(2200);
+		bit->data = port;
+		bit->pre_xfer = nouveau_i2c_pre_xfer;
+		bit->setsda = nouveau_i2c_setsda;
+		bit->setscl = nouveau_i2c_setscl;
+		bit->getsda = nouveau_i2c_getsda;
+		bit->getscl = nouveau_i2c_getscl;
+
+		port->adapter.algo_data = bit;
+		ret = i2c_bit_add_bus(&port->adapter);
+	} else {
+		port->adapter.algo_data = port;
+		port->adapter.algo = algo;
+		ret = i2c_add_adapter(&port->adapter);
+	}
+
+	/* drop port's i2c subdev refcount, i2c handles this itself */
+	if (ret == 0)
+		list_add_tail(&port->head, &i2c->ports);
+	return ret;
+}
+
+/******************************************************************************
+ * base i2c subdev class implementation
+ *****************************************************************************/
+
+static struct nouveau_i2c_port *
+nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
+{
+	struct nouveau_bios *bios = nouveau_bios(i2c);
+	struct nouveau_i2c_port *port;
+
+	if (index == NV_I2C_DEFAULT(0) ||
+	    index == NV_I2C_DEFAULT(1)) {
+		u8  ver, hdr, cnt, len;
+		u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
+		if (i2c && ver >= 0x30) {
+			u8 auxidx = nv_ro08(bios, i2c + 4);
+			if (index == NV_I2C_DEFAULT(0))
+				index = (auxidx & 0x0f) >> 0;
+			else
+				index = (auxidx & 0xf0) >> 4;
+		} else {
+			index = 2;
+		}
+	}
+
+	list_for_each_entry(port, &i2c->ports, head) {
+		if (port->index == index)
+			return port;
+	}
+
+	return NULL;
+}
+
+static struct nouveau_i2c_port *
+nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
+{
+	struct nouveau_i2c_port *port;
+
+	list_for_each_entry(port, &i2c->ports, head) {
+		if (nv_hclass(port) == type)
+			return port;
+	}
+
+	return NULL;
+}
+
+static int
+nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
+		     struct i2c_board_info *info,
+		     bool (*match)(struct nouveau_i2c_port *,
+				   struct i2c_board_info *))
+{
+	struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
+	int i;
+
+	if (!port) {
+		nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
+		return -ENODEV;
+	}
+
+	nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
+	for (i = 0; info[i].addr; i++) {
+		if (nv_probe_i2c(port, info[i].addr) &&
+		    (!match || match(port, &info[i]))) {
+			nv_info(i2c, "detected %s: %s\n", what, info[i].type);
+			return i;
+		}
+	}
+
+	nv_debug(i2c, "no devices found.\n");
+	return -ENODEV;
+}
+
+int
+_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	struct nouveau_i2c_port *port;
+	int ret;
+
+	list_for_each_entry(port, &i2c->ports, head) {
+		ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
+		if (ret && suspend)
+			goto fail;
+	}
+
+	return nouveau_subdev_fini(&i2c->base, suspend);
+fail:
+	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+		nv_ofuncs(port)->init(nv_object(port));
+	}
+
+	return ret;
+}
+
+int
+_nouveau_i2c_init(struct nouveau_object *object)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	struct nouveau_i2c_port *port;
+	int ret;
+
+	ret = nouveau_subdev_init(&i2c->base);
+	if (ret == 0) {
+		list_for_each_entry(port, &i2c->ports, head) {
+			ret = nv_ofuncs(port)->init(nv_object(port));
+			if (ret)
+				goto fail;
+		}
+	}
+
+	return ret;
+fail:
+	list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
+		nv_ofuncs(port)->fini(nv_object(port), false);
+	}
+
+	return ret;
+}
+
+void
+_nouveau_i2c_dtor(struct nouveau_object *object)
+{
+	struct nouveau_i2c *i2c = (void *)object;
+	struct nouveau_i2c_port *port, *temp;
+
+	list_for_each_entry_safe(port, temp, &i2c->ports, head) {
+		nouveau_object_ref(NULL, (struct nouveau_object **)&port);
+	}
+
+	nouveau_subdev_destroy(&i2c->base);
+}
+
+static struct nouveau_oclass *
+nouveau_i2c_extdev_sclass[] = {
+	nouveau_anx9805_sclass,
+};
+
+int
+nouveau_i2c_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass,
+		    struct nouveau_oclass *sclass,
+		    int length, void **pobject)
+{
+	struct nouveau_bios *bios = nouveau_bios(parent);
+	struct nouveau_i2c *i2c;
+	struct nouveau_object *object;
+	struct dcb_i2c_entry info;
+	int ret, i, j, index = -1;
+	struct dcb_output outp;
+	u8  ver, hdr;
+	u32 data;
+
+	ret = nouveau_subdev_create(parent, engine, oclass, 0,
+				    "I2C", "i2c", &i2c);
+	*pobject = nv_object(i2c);
+	if (ret)
+		return ret;
+
+	i2c->find = nouveau_i2c_find;
+	i2c->find_type = nouveau_i2c_find_type;
+	i2c->identify = nouveau_i2c_identify;
+	INIT_LIST_HEAD(&i2c->ports);
+
+	while (!dcb_i2c_parse(bios, ++index, &info)) {
+		if (info.type == DCB_I2C_UNUSED)
+			continue;
+
+		oclass = sclass;
+		do {
+			ret = -EINVAL;
+			if (oclass->handle == info.type) {
+				ret = nouveau_object_ctor(*pobject, *pobject,
+							  oclass, &info,
+							  index, &object);
+			}
+		} while (ret && (++oclass)->handle);
+	}
+
+	/* in addition to the busses specified in the i2c table, there
+	 * may be ddc/aux channels hiding behind external tmds/dp/etc
+	 * transmitters.
+	 */
+	index = ((index + 0x0f) / 0x10) * 0x10;
+	i = -1;
+	while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
+		if (!outp.location || !outp.extdev)
+			continue;
+
+		switch (outp.type) {
+		case DCB_OUTPUT_TMDS:
+			info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
+			break;
+		case DCB_OUTPUT_DP:
+			info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
+			break;
+		default:
+			continue;
+		}
+
+		ret = -ENODEV;
+		j = -1;
+		while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
+			parent = nv_object(i2c->find(i2c, outp.i2c_index));
+			oclass = nouveau_i2c_extdev_sclass[j];
+			do {
+				if (oclass->handle != info.type)
+					continue;
+				ret = nouveau_object_ctor(parent, *pobject,
+							  oclass, NULL,
+							  index++, &object);
+			} while (ret && (++oclass)->handle);
+		}
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
new file mode 100644
index 0000000..a6e72d3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/i2c.h"
+
+#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
+#define T_TIMEOUT  2200000
+#define T_RISEFALL 1000
+#define T_HOLD     5000
+
+static inline void
+i2c_drive_scl(struct nouveau_i2c_port *port, int state)
+{
+	port->func->drive_scl(port, state);
+}
+
+static inline void
+i2c_drive_sda(struct nouveau_i2c_port *port, int state)
+{
+	port->func->drive_sda(port, state);
+}
+
+static inline int
+i2c_sense_scl(struct nouveau_i2c_port *port)
+{
+	return port->func->sense_scl(port);
+}
+
+static inline int
+i2c_sense_sda(struct nouveau_i2c_port *port)
+{
+	return port->func->sense_sda(port);
+}
+
+static void
+i2c_delay(struct nouveau_i2c_port *port, u32 nsec)
+{
+	udelay((nsec + 500) / 1000);
+}
+
+static bool
+i2c_raise_scl(struct nouveau_i2c_port *port)
+{
+	u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+	i2c_drive_scl(port, 1);
+	do {
+		i2c_delay(port, T_RISEFALL);
+	} while (!i2c_sense_scl(port) && --timeout);
+
+	return timeout != 0;
+}
+
+static int
+i2c_start(struct nouveau_i2c_port *port)
+{
+	int ret = 0;
+
+	if (!i2c_sense_scl(port) ||
+	    !i2c_sense_sda(port)) {
+		i2c_drive_scl(port, 0);
+		i2c_drive_sda(port, 1);
+		if (!i2c_raise_scl(port))
+			ret = -EBUSY;
+	}
+
+	i2c_drive_sda(port, 0);
+	i2c_delay(port, T_HOLD);
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return ret;
+}
+
+static void
+i2c_stop(struct nouveau_i2c_port *port)
+{
+	i2c_drive_scl(port, 0);
+	i2c_drive_sda(port, 0);
+	i2c_delay(port, T_RISEFALL);
+
+	i2c_drive_scl(port, 1);
+	i2c_delay(port, T_HOLD);
+	i2c_drive_sda(port, 1);
+	i2c_delay(port, T_HOLD);
+}
+
+static int
+i2c_bitw(struct nouveau_i2c_port *port, int sda)
+{
+	i2c_drive_sda(port, sda);
+	i2c_delay(port, T_RISEFALL);
+
+	if (!i2c_raise_scl(port))
+		return -ETIMEDOUT;
+	i2c_delay(port, T_HOLD);
+
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return 0;
+}
+
+static int
+i2c_bitr(struct nouveau_i2c_port *port)
+{
+	int sda;
+
+	i2c_drive_sda(port, 1);
+	i2c_delay(port, T_RISEFALL);
+
+	if (!i2c_raise_scl(port))
+		return -ETIMEDOUT;
+	i2c_delay(port, T_HOLD);
+
+	sda = i2c_sense_sda(port);
+
+	i2c_drive_scl(port, 0);
+	i2c_delay(port, T_HOLD);
+	return sda;
+}
+
+static int
+i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
+{
+	int i, bit;
+
+	*byte = 0;
+	for (i = 7; i >= 0; i--) {
+		bit = i2c_bitr(port);
+		if (bit < 0)
+			return bit;
+		*byte |= bit << i;
+	}
+
+	return i2c_bitw(port, last ? 1 : 0);
+}
+
+static int
+i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
+{
+	int i, ret;
+	for (i = 7; i >= 0; i--) {
+		ret = i2c_bitw(port, !!(byte & (1 << i)));
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = i2c_bitr(port);
+	if (ret == 1) /* nack */
+		ret = -EIO;
+	return ret;
+}
+
+static int
+i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
+{
+	u32 addr = msg->addr << 1;
+	if (msg->flags & I2C_M_RD)
+		addr |= 1;
+	return i2c_put_byte(port, addr);
+}
+
+static int
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	struct nouveau_i2c_port *port = adap->algo_data;
+	struct i2c_msg *msg = msgs;
+	int ret = 0, mcnt = num;
+
+	if (port->func->acquire)
+		port->func->acquire(port);
+
+	while (!ret && mcnt--) {
+		u8 remaining = msg->len;
+		u8 *ptr = msg->buf;
+
+		ret = i2c_start(port);
+		if (ret == 0)
+			ret = i2c_addr(port, msg);
+
+		if (msg->flags & I2C_M_RD) {
+			while (!ret && remaining--)
+				ret = i2c_get_byte(port, ptr++, !remaining);
+		} else {
+			while (!ret && remaining--)
+				ret = i2c_put_byte(port, *ptr++);
+		}
+
+		msg++;
+	}
+
+	i2c_stop(port);
+	return (ret < 0) ? ret : num;
+}
+#else
+static int
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+	return -ENODEV;
+}
+#endif
+
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm nouveau_i2c_bit_algo = {
+	.master_xfer = i2c_bit_xfer,
+	.functionality = i2c_bit_func
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
new file mode 100644
index 0000000..2ad1884
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv04_i2c_priv {
+	struct nouveau_i2c base;
+};
+
+struct nv04_i2c_port {
+	struct nouveau_i2c_port base;
+	u8 drive;
+	u8 sense;
+};
+
+static void
+nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+	struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv04_i2c_port *port = (void *)base;
+	u8 val = nv_rdvgac(priv, 0, port->drive);
+	if (state) val |= 0x20;
+	else	   val &= 0xdf;
+	nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static void
+nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+	struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv04_i2c_port *port = (void *)base;
+	u8 val = nv_rdvgac(priv, 0, port->drive);
+	if (state) val |= 0x10;
+	else	   val &= 0xef;
+	nv_wrvgac(priv, 0, port->drive, val | 0x01);
+}
+
+static int
+nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+	struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv04_i2c_port *port = (void *)base;
+	return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
+}
+
+static int
+nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+	struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv04_i2c_port *port = (void *)base;
+	return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
+}
+
+static const struct nouveau_i2c_func
+nv04_i2c_func = {
+	.drive_scl = nv04_i2c_drive_scl,
+	.drive_sda = nv04_i2c_drive_sda,
+	.sense_scl = nv04_i2c_sense_scl,
+	.sense_sda = nv04_i2c_sense_sda,
+};
+
+static int
+nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv04_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_bit_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	port->base.func = &nv04_i2c_func;
+	port->drive = info->drive;
+	port->sense = info->sense;
+	return 0;
+}
+
+static struct nouveau_oclass
+nv04_i2c_sclass[] = {
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv04_i2c_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = _nouveau_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{}
+};
+
+static int
+nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv04_i2c_priv *priv;
+	int ret;
+
+	ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv04_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_i2c_ctor,
+		.dtor = _nouveau_i2c_dtor,
+		.init = _nouveau_i2c_init,
+		.fini = _nouveau_i2c_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
new file mode 100644
index 0000000..f501ae2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/i2c.h>
+#include <subdev/vga.h>
+
+struct nv4e_i2c_priv {
+	struct nouveau_i2c base;
+};
+
+struct nv4e_i2c_port {
+	struct nouveau_i2c_port base;
+	u32 addr;
+};
+
+static void
+nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+	struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv4e_i2c_port *port = (void *)base;
+	nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
+}
+
+static void
+nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+	struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv4e_i2c_port *port = (void *)base;
+	nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
+}
+
+static int
+nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+	struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv4e_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00040000);
+}
+
+static int
+nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+	struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv4e_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00080000);
+}
+
+static const struct nouveau_i2c_func
+nv4e_i2c_func = {
+	.drive_scl = nv4e_i2c_drive_scl,
+	.drive_sda = nv4e_i2c_drive_sda,
+	.sense_scl = nv4e_i2c_sense_scl,
+	.sense_sda = nv4e_i2c_sense_sda,
+};
+
+static int
+nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv4e_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_bit_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	port->base.func = &nv4e_i2c_func;
+	port->addr = 0x600800 + info->drive;
+	return 0;
+}
+
+static struct nouveau_oclass
+nv4e_i2c_sclass[] = {
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv4e_i2c_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = _nouveau_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{}
+};
+
+static int
+nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv4e_i2c_priv *priv;
+	int ret;
+
+	ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv4e_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0x4e),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv4e_i2c_ctor,
+		.dtor = _nouveau_i2c_dtor,
+		.init = _nouveau_i2c_init,
+		.fini = _nouveau_i2c_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
new file mode 100644
index 0000000..378dfa3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+void
+nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	if (state) port->state |= 0x01;
+	else	   port->state &= 0xfe;
+	nv_wr32(priv, port->addr, port->state);
+}
+
+void
+nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	if (state) port->state |= 0x02;
+	else	   port->state &= 0xfd;
+	nv_wr32(priv, port->addr, port->state);
+}
+
+int
+nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00000001);
+}
+
+int
+nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00000002);
+}
+
+static const struct nouveau_i2c_func
+nv50_i2c_func = {
+	.drive_scl = nv50_i2c_drive_scl,
+	.drive_sda = nv50_i2c_drive_sda,
+	.sense_scl = nv50_i2c_sense_scl,
+	.sense_sda = nv50_i2c_sense_sda,
+};
+
+const u32 nv50_i2c_addr[] = {
+	0x00e138, 0x00e150, 0x00e168, 0x00e180,
+	0x00e254, 0x00e274, 0x00e764, 0x00e780,
+	0x00e79c, 0x00e7b8
+};
+const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
+
+static int
+nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv50_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_bit_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	if (info->drive >= nv50_i2c_addr_nr)
+		return -EINVAL;
+
+	port->base.func = &nv50_i2c_func;
+	port->state = 0x00000007;
+	port->addr = nv50_i2c_addr[info->drive];
+	return 0;
+}
+
+int
+nv50_i2c_port_init(struct nouveau_object *object)
+{
+	struct nv50_i2c_priv *priv = (void *)object->engine;
+	struct nv50_i2c_port *port = (void *)object;
+	nv_wr32(priv, port->addr, port->state);
+	return nouveau_i2c_port_init(&port->base);
+}
+
+static struct nouveau_oclass
+nv50_i2c_sclass[] = {
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv50_i2c_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = nv50_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{}
+};
+
+static int
+nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_i2c_priv *priv;
+	int ret;
+
+	ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_i2c_ctor,
+		.dtor = _nouveau_i2c_dtor,
+		.init = _nouveau_i2c_init,
+		.fini = _nouveau_i2c_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
new file mode 100644
index 0000000..4e5ba48
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
+#ifndef __NV50_I2C_H__
+#define __NV50_I2C_H__
+
+#include <subdev/i2c.h>
+
+struct nv50_i2c_priv {
+	struct nouveau_i2c base;
+};
+
+struct nv50_i2c_port {
+	struct nouveau_i2c_port base;
+	u32 addr;
+	u32 ctrl;
+	u32 data;
+	u32 state;
+};
+
+extern const u32 nv50_i2c_addr[];
+extern const int nv50_i2c_addr_nr;
+int  nv50_i2c_port_init(struct nouveau_object *);
+int  nv50_i2c_sense_scl(struct nouveau_i2c_port *);
+int  nv50_i2c_sense_sda(struct nouveau_i2c_port *);
+void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
+void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
+
+int  nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
+			struct nouveau_oclass *, void *, u32,
+			struct nouveau_object **);
+void nv94_i2c_acquire(struct nouveau_i2c_port *);
+void nv94_i2c_release(struct nouveau_i2c_port *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
new file mode 100644
index 0000000..61b7716
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
+#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
+
+static void
+auxch_fini(struct nouveau_i2c *aux, int ch)
+{
+	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
+}
+
+static int
+auxch_init(struct nouveau_i2c *aux, int ch)
+{
+	const u32 unksel = 1; /* nfi which to use, or if it matters.. */
+	const u32 ureq = unksel ? 0x00100000 : 0x00200000;
+	const u32 urep = unksel ? 0x01000000 : 0x02000000;
+	u32 ctrl, timeout;
+
+	/* wait up to 1ms for any previous transaction to be done... */
+	timeout = 1000;
+	do {
+		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
+			return -EBUSY;
+		}
+	} while (ctrl & 0x03010000);
+
+	/* set some magic, and wait up to 1ms for it to appear */
+	nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
+	timeout = 1000;
+	do {
+		ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+		udelay(1);
+		if (!timeout--) {
+			AUX_ERR("magic wait 0x%08x\n", ctrl);
+			auxch_fini(aux, ch);
+			return -EBUSY;
+		}
+	} while ((ctrl & 0x03000000) != urep);
+
+	return 0;
+}
+
+int
+nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
+{
+	struct nouveau_i2c *aux = nouveau_i2c(base);
+	struct nv50_i2c_port *port = (void *)base;
+	u32 ctrl, stat, timeout, retries;
+	u32 xbuf[4] = {};
+	int ch = port->addr;
+	int ret, i;
+
+	AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
+
+	ret = auxch_init(aux, ch);
+	if (ret)
+		goto out;
+
+	stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
+	if (!(stat & 0x10000000)) {
+		AUX_DBG("sink not detected\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	if (!(type & 1)) {
+		memcpy(xbuf, data, size);
+		for (i = 0; i < 16; i += 4) {
+			AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
+			nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
+		}
+	}
+
+	ctrl  = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+	ctrl &= ~0x0001f0ff;
+	ctrl |= type << 12;
+	ctrl |= size - 1;
+	nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
+
+	/* retry transaction a number of times on failure... */
+	ret = -EREMOTEIO;
+	for (retries = 0; retries < 32; retries++) {
+		/* reset, and delay a while if this is a retry */
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
+		if (retries)
+			udelay(400);
+
+		/* transaction request, wait up to 1ms for it to complete */
+		nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
+
+		timeout = 1000;
+		do {
+			ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
+			udelay(1);
+			if (!timeout--) {
+				AUX_ERR("tx req timeout 0x%08x\n", ctrl);
+				goto out;
+			}
+		} while (ctrl & 0x00010000);
+
+		/* read status, and check if transaction completed ok */
+		stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
+		if (!(stat & 0x000f0f00)) {
+			ret = 0;
+			break;
+		}
+
+		AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
+	}
+
+	if (type & 1) {
+		for (i = 0; i < 16; i += 4) {
+			xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
+			AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
+		}
+		memcpy(data, xbuf, size);
+	}
+
+out:
+	auxch_fini(aux, ch);
+	return ret;
+}
+
+void
+nv94_i2c_acquire(struct nouveau_i2c_port *base)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	if (port->ctrl) {
+		nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
+		nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
+	}
+}
+
+void
+nv94_i2c_release(struct nouveau_i2c_port *base)
+{
+}
+
+static const struct nouveau_i2c_func
+nv94_i2c_func = {
+	.acquire   = nv94_i2c_acquire,
+	.release   = nv94_i2c_release,
+	.drive_scl = nv50_i2c_drive_scl,
+	.drive_sda = nv50_i2c_drive_sda,
+	.sense_scl = nv50_i2c_sense_scl,
+	.sense_sda = nv50_i2c_sense_sda,
+};
+
+static int
+nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv50_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_bit_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	if (info->drive >= nv50_i2c_addr_nr)
+		return -EINVAL;
+
+	port->base.func = &nv94_i2c_func;
+	port->state = 7;
+	port->addr = nv50_i2c_addr[info->drive];
+	if (info->share != DCB_I2C_UNUSED) {
+		port->ctrl = 0x00e500 + (info->share * 0x50);
+		port->data = 0x0000e001;
+	}
+	return 0;
+}
+
+static const struct nouveau_i2c_func
+nv94_aux_func = {
+	.acquire   = nv94_i2c_acquire,
+	.release   = nv94_i2c_release,
+	.aux       = nv94_aux,
+};
+
+int
+nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv50_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_aux_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	port->base.func = &nv94_aux_func;
+	port->addr = info->drive;
+	if (info->share != DCB_I2C_UNUSED) {
+		port->ctrl = 0x00e500 + (info->drive * 0x50);
+		port->data = 0x00002002;
+	}
+
+	return 0;
+}
+
+static struct nouveau_oclass
+nv94_i2c_sclass[] = {
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv94_i2c_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = nv50_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv94_aux_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = _nouveau_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{}
+};
+
+static int
+nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_i2c_priv *priv;
+	int ret;
+
+	ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv94_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0x94),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv94_i2c_ctor,
+		.dtor = _nouveau_i2c_dtor,
+		.init = _nouveau_i2c_init,
+		.fini = _nouveau_i2c_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
new file mode 100644
index 0000000..f761b8a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+static int
+nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00000010);
+}
+
+static int
+nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
+{
+	struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
+	struct nv50_i2c_port *port = (void *)base;
+	return !!(nv_rd32(priv, port->addr) & 0x00000020);
+}
+
+static const struct nouveau_i2c_func
+nvd0_i2c_func = {
+	.acquire   = nv94_i2c_acquire,
+	.release   = nv94_i2c_release,
+	.drive_scl = nv50_i2c_drive_scl,
+	.drive_sda = nv50_i2c_drive_sda,
+	.sense_scl = nvd0_i2c_sense_scl,
+	.sense_sda = nvd0_i2c_sense_sda,
+};
+
+static int
+nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass, void *data, u32 index,
+		   struct nouveau_object **pobject)
+{
+	struct dcb_i2c_entry *info = data;
+	struct nv50_i2c_port *port;
+	int ret;
+
+	ret = nouveau_i2c_port_create(parent, engine, oclass, index,
+				     &nouveau_i2c_bit_algo, &port);
+	*pobject = nv_object(port);
+	if (ret)
+		return ret;
+
+	port->base.func = &nvd0_i2c_func;
+	port->state = 0x00000007;
+	port->addr = 0x00d014 + (info->drive * 0x20);
+	if (info->share != DCB_I2C_UNUSED) {
+		port->ctrl = 0x00e500 + (info->share * 0x50);
+		port->data = 0x0000e001;
+	}
+	return 0;
+}
+
+static struct nouveau_oclass
+nvd0_i2c_sclass[] = {
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nvd0_i2c_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = nv50_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{ .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
+	  .ofuncs = &(struct nouveau_ofuncs) {
+		  .ctor = nv94_aux_port_ctor,
+		  .dtor = _nouveau_i2c_port_dtor,
+		  .init = _nouveau_i2c_port_init,
+		  .fini = _nouveau_i2c_port_fini,
+	  },
+	},
+	{}
+};
+
+static int
+nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_i2c_priv *priv;
+	int ret;
+
+	ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nvd0_i2c_oclass = {
+	.handle = NV_SUBDEV(I2C, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_i2c_ctor,
+		.dtor = _nouveau_i2c_dtor,
+		.init = _nouveau_i2c_init,
+		.fini = _nouveau_i2c_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
new file mode 100644
index 0000000..4e977ff
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ibus.h>
+
+struct nvc0_ibus_priv {
+	struct nouveau_ibus base;
+};
+
+static void
+nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
+	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
+	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
+	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
+	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
+	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
+}
+
+static void
+nvc0_ibus_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_ibus_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0x121c58);
+	u32 intr1 = nv_rd32(priv, 0x121c5c);
+	u32 hubnr = nv_rd32(priv, 0x121c70);
+	u32 ropnr = nv_rd32(priv, 0x121c74);
+	u32 gpcnr = nv_rd32(priv, 0x121c78);
+	u32 i;
+
+	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
+		u32 stat = 0x00000100 << i;
+		if (intr0 & stat) {
+			nvc0_ibus_intr_hub(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
+		u32 stat = 0x00010000 << i;
+		if (intr0 & stat) {
+			nvc0_ibus_intr_rop(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; intr1 && i < gpcnr; i++) {
+		u32 stat = 0x00000001 << i;
+		if (intr1 & stat) {
+			nvc0_ibus_intr_gpc(priv, i);
+			intr1 &= ~stat;
+		}
+	}
+}
+
+static int
+nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_ibus_priv *priv;
+	int ret;
+
+	ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nvc0_ibus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_ibus_oclass = {
+	.handle = NV_SUBDEV(IBUS, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ibus_ctor,
+		.dtor = _nouveau_ibus_dtor,
+		.init = _nouveau_ibus_init,
+		.fini = _nouveau_ibus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
new file mode 100644
index 0000000..7120124
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ibus.h>
+
+struct nve0_ibus_priv {
+	struct nouveau_ibus base;
+};
+
+static void
+nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
+	nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
+	nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
+{
+	u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
+	u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
+	u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
+	nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
+	nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
+}
+
+static void
+nve0_ibus_intr(struct nouveau_subdev *subdev)
+{
+	struct nve0_ibus_priv *priv = (void *)subdev;
+	u32 intr0 = nv_rd32(priv, 0x120058);
+	u32 intr1 = nv_rd32(priv, 0x12005c);
+	u32 hubnr = nv_rd32(priv, 0x120070);
+	u32 ropnr = nv_rd32(priv, 0x120074);
+	u32 gpcnr = nv_rd32(priv, 0x120078);
+	u32 i;
+
+	for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
+		u32 stat = 0x00000100 << i;
+		if (intr0 & stat) {
+			nve0_ibus_intr_hub(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
+		u32 stat = 0x00010000 << i;
+		if (intr0 & stat) {
+			nve0_ibus_intr_rop(priv, i);
+			intr0 &= ~stat;
+		}
+	}
+
+	for (i = 0; intr1 && i < gpcnr; i++) {
+		u32 stat = 0x00000001 << i;
+		if (intr1 & stat) {
+			nve0_ibus_intr_gpc(priv, i);
+			intr1 &= ~stat;
+		}
+	}
+}
+
+static int
+nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nve0_ibus_priv *priv;
+	int ret;
+
+	ret = nouveau_ibus_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	nv_subdev(priv)->intr = nve0_ibus_intr;
+	return 0;
+}
+
+struct nouveau_oclass
+nve0_ibus_oclass = {
+	.handle = NV_SUBDEV(IBUS, 0xe0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nve0_ibus_ctor,
+		.dtor = _nouveau_ibus_dtor,
+		.init = _nouveau_ibus_init,
+		.fini = _nouveau_ibus_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
new file mode 100644
index 0000000..6565f3d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/instmem.h>
+
+int
+nouveau_instobj_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_instmem *imem = (void *)engine;
+	struct nouveau_instobj *iobj;
+	int ret;
+
+	ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
+				     length, pobject);
+	iobj = *pobject;
+	if (ret)
+		return ret;
+
+	mutex_lock(&imem->base.mutex);
+	list_add(&iobj->head, &imem->list);
+	mutex_unlock(&imem->base.mutex);
+	return 0;
+}
+
+void
+nouveau_instobj_destroy(struct nouveau_instobj *iobj)
+{
+	struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+	mutex_lock(&subdev->mutex);
+	list_del(&iobj->head);
+	mutex_unlock(&subdev->mutex);
+
+	return nouveau_object_destroy(&iobj->base);
+}
+
+void
+_nouveau_instobj_dtor(struct nouveau_object *object)
+{
+	struct nouveau_instobj *iobj = (void *)object;
+	return nouveau_instobj_destroy(iobj);
+}
+
+int
+nouveau_instmem_create_(struct nouveau_object *parent,
+			struct nouveau_object *engine,
+			struct nouveau_oclass *oclass,
+			int length, void **pobject)
+{
+	struct nouveau_instmem *imem;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0,
+				     "INSTMEM", "instmem", length, pobject);
+	imem = *pobject;
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&imem->list);
+	return 0;
+}
+
+int
+nouveau_instmem_init(struct nouveau_instmem *imem)
+{
+	struct nouveau_instobj *iobj;
+	int ret, i;
+
+	ret = nouveau_subdev_init(&imem->base);
+	if (ret)
+		return ret;
+
+	mutex_lock(&imem->base.mutex);
+
+	list_for_each_entry(iobj, &imem->list, head) {
+		if (iobj->suspend) {
+			for (i = 0; i < iobj->size; i += 4)
+				nv_wo32(iobj, i, iobj->suspend[i / 4]);
+			vfree(iobj->suspend);
+			iobj->suspend = NULL;
+		}
+	}
+
+	mutex_unlock(&imem->base.mutex);
+
+	return 0;
+}
+
+int
+nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
+{
+	struct nouveau_instobj *iobj;
+	int i, ret = 0;
+
+	if (suspend) {
+		mutex_lock(&imem->base.mutex);
+
+		list_for_each_entry(iobj, &imem->list, head) {
+			iobj->suspend = vmalloc(iobj->size);
+			if (!iobj->suspend) {
+				ret = -ENOMEM;
+				break;
+			}
+
+			for (i = 0; i < iobj->size; i += 4)
+				iobj->suspend[i / 4] = nv_ro32(iobj, i);
+		}
+
+		mutex_unlock(&imem->base.mutex);
+
+		if (ret)
+			return ret;
+	}
+
+	return nouveau_subdev_fini(&imem->base, suspend);
+}
+
+int
+_nouveau_instmem_init(struct nouveau_object *object)
+{
+	struct nouveau_instmem *imem = (void *)object;
+	return nouveau_instmem_init(imem);
+}
+
+int
+_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_instmem *imem = (void *)object;
+	return nouveau_instmem_fini(imem, suspend);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
new file mode 100644
index 0000000..795393d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/fb.h>
+
+#include "nv04.h"
+
+static int
+nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *priv = (void *)engine;
+	struct nv04_instobj_priv *node;
+	int ret, align;
+
+	align = (unsigned long)data;
+	if (!align)
+		align = 1;
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->length;
+	return 0;
+}
+
+static void
+nv04_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object->engine;
+	struct nv04_instobj_priv *node = (void *)object;
+	nouveau_mm_free(&priv->heap, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static u32
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nv04_instobj_priv *node = (void *)object;
+	return nv_ro32(object->engine, node->mem->offset + addr);
+}
+
+static void
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nv04_instobj_priv *node = (void *)object;
+	nv_wo32(object->engine, node->mem->offset + addr, data);
+}
+
+static struct nouveau_oclass
+nv04_instobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_instobj_ctor,
+		.dtor = nv04_instobj_dtor,
+		.init = _nouveau_instobj_init,
+		.fini = _nouveau_instobj_fini,
+		.rd32 = nv04_instobj_rd32,
+		.wr32 = nv04_instobj_wr32,
+	},
+};
+
+int
+nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
+		   u32 size, u32 align, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(imem);
+	int ret;
+
+	ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
+				  (void *)(unsigned long)align, size, pobject);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv04_instmem_priv *priv;
+	int ret;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* PRAMIN aperture maps over the end of VRAM, reserve it */
+	priv->base.reserved = 512 * 1024;
+	priv->base.alloc    = nv04_instmem_alloc;
+
+	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	if (ret)
+		return ret;
+
+	/* 0x00000-0x10000: reserve for probable vbios image */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+				&priv->vbios);
+	if (ret)
+		return ret;
+
+	/* 0x10000-0x18000: reserve for RAMHT */
+	ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht);
+	if (ret)
+		return ret;
+
+	/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00800, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	if (ret)
+		return ret;
+
+	/* 0x18800-0x18a00: reserve for RAMRO */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x00200, 0, 0,
+				&priv->ramro);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void
+nv04_instmem_dtor(struct nouveau_object *object)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	nouveau_gpuobj_ref(NULL, &priv->ramfc);
+	nouveau_gpuobj_ref(NULL, &priv->ramro);
+	nouveau_ramht_ref(NULL, &priv->ramht);
+	nouveau_gpuobj_ref(NULL, &priv->vbios);
+	nouveau_mm_fini(&priv->heap);
+	if (priv->iomem)
+		iounmap(priv->iomem);
+	nouveau_instmem_destroy(&priv->base);
+}
+
+static u32
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+	return nv_rd32(object, 0x700000 + addr);
+}
+
+static void
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	return nv_wr32(object, 0x700000 + addr, data);
+}
+
+struct nouveau_oclass
+nv04_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_instmem_ctor,
+		.dtor = nv04_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = _nouveau_instmem_fini,
+		.rd32 = nv04_instmem_rd32,
+		.wr32 = nv04_instmem_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
new file mode 100644
index 0000000..b15b613
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -0,0 +1,38 @@
+#ifndef __NV04_INSTMEM_H__
+#define __NV04_INSTMEM_H__
+
+#include <core/gpuobj.h>
+#include <core/ramht.h>
+#include <core/mm.h>
+
+#include <subdev/instmem.h>
+
+struct nv04_instmem_priv {
+	struct nouveau_instmem base;
+
+	void __iomem *iomem;
+	struct nouveau_mm heap;
+
+	struct nouveau_gpuobj *vbios;
+	struct nouveau_ramht  *ramht;
+	struct nouveau_gpuobj *ramro;
+	struct nouveau_gpuobj *ramfc;
+};
+
+static inline struct nv04_instmem_priv *
+nv04_instmem(void *obj)
+{
+	return (void *)nouveau_instmem(obj);
+}
+
+struct nv04_instobj_priv {
+	struct nouveau_instobj base;
+	struct nouveau_mm_node *mem;
+};
+
+void nv04_instmem_dtor(struct nouveau_object *);
+
+int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
+		       u32 size, u32 align, struct nouveau_object **pobject);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
new file mode 100644
index 0000000..716bf41
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+static inline int
+nv44_graph_class(struct nv04_instmem_priv *priv)
+{
+	if ((nv_device(priv)->chipset & 0xf0) == 0x60)
+		return 1;
+	return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
+}
+
+static int
+nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct pci_dev *pdev = device->pdev;
+	struct nv04_instmem_priv *priv;
+	int ret, bar, vs;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	/* map bar */
+	if (pci_resource_len(pdev, 2))
+		bar = 2;
+	else
+		bar = 3;
+
+	priv->iomem = ioremap(pci_resource_start(pdev, bar),
+			      pci_resource_len(pdev, bar));
+	if (!priv->iomem) {
+		nv_error(priv, "unable to map PRAMIN BAR\n");
+		return -EFAULT;
+	}
+
+	/* PRAMIN aperture maps over the end of vram, reserve enough space
+	 * to fit graphics contexts for every channel, the magics come
+	 * from engine/graph/nv40.c
+	 */
+	vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
+	if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
+	else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
+	else if (nv44_graph_class(priv))  priv->base.reserved = 0x4980 * vs;
+	else				  priv->base.reserved = 0x4a40 * vs;
+	priv->base.reserved += 16 * 1024;
+	priv->base.reserved *= 32;		/* per-channel */
+	priv->base.reserved += 512 * 1024;	/* pci(e)gart table */
+	priv->base.reserved += 512 * 1024;	/* object storage */
+
+	priv->base.reserved = round_up(priv->base.reserved, 4096);
+	priv->base.alloc    = nv04_instmem_alloc;
+
+	ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
+	if (ret)
+		return ret;
+
+	/* 0x00000-0x10000: reserve for probable vbios image */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
+				&priv->vbios);
+	if (ret)
+		return ret;
+
+	/* 0x10000-0x18000: reserve for RAMHT */
+	ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
+			       &priv->ramht);
+	if (ret)
+		return ret;
+
+	/* 0x18000-0x18200: reserve for RAMRO
+	 * 0x18200-0x20000: padding
+	 */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
+				&priv->ramro);
+	if (ret)
+		return ret;
+
+	/* 0x20000-0x21000: reserve for RAMFC
+	 * 0x21000-0x40000: padding and some unknown crap
+	 */
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static u32
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	return ioread32_native(priv->iomem + addr);
+}
+
+static void
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+	struct nv04_instmem_priv *priv = (void *)object;
+	iowrite32_native(data, priv->iomem + addr);
+}
+
+struct nouveau_oclass
+nv40_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_instmem_ctor,
+		.dtor = nv04_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = _nouveau_instmem_fini,
+		.rd32 = nv40_instmem_rd32,
+		.wr32 = nv40_instmem_wr32,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
new file mode 100644
index 0000000..cfc7e31
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/instmem.h>
+#include <subdev/fb.h>
+
+#include <core/mm.h>
+
+struct nv50_instmem_priv {
+	struct nouveau_instmem base;
+	spinlock_t lock;
+	u64 addr;
+};
+
+struct nv50_instobj_priv {
+	struct nouveau_instobj base;
+	struct nouveau_mem *mem;
+};
+
+static int
+nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	struct nv50_instobj_priv *node;
+	u32 align = (unsigned long)data;
+	int ret;
+
+	size  = max((size  + 4095) & ~4095, (u32)4096);
+	align = max((align + 4095) & ~4095, (u32)4096);
+
+	ret = nouveau_instobj_create(parent, engine, oclass, &node);
+	*pobject = nv_object(node);
+	if (ret)
+		return ret;
+
+	ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
+	if (ret)
+		return ret;
+
+	node->base.addr = node->mem->offset;
+	node->base.size = node->mem->size << 12;
+	node->mem->page_shift = 12;
+	return 0;
+}
+
+static void
+nv50_instobj_dtor(struct nouveau_object *object)
+{
+	struct nv50_instobj_priv *node = (void *)object;
+	struct nouveau_fb *pfb = nouveau_fb(object);
+	pfb->ram.put(pfb, &node->mem);
+	nouveau_instobj_destroy(&node->base);
+}
+
+static u32
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
+{
+	struct nv50_instmem_priv *priv = (void *)object->engine;
+	struct nv50_instobj_priv *node = (void *)object;
+	unsigned long flags;
+	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+	u32 data;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (unlikely(priv->addr != base)) {
+		nv_wr32(priv, 0x001700, base >> 16);
+		priv->addr = base;
+	}
+	data = nv_rd32(priv, 0x700000 + addr);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return data;
+}
+
+static void
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
+{
+	struct nv50_instmem_priv *priv = (void *)object->engine;
+	struct nv50_instobj_priv *node = (void *)object;
+	unsigned long flags;
+	u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
+	u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (unlikely(priv->addr != base)) {
+		nv_wr32(priv, 0x001700, base >> 16);
+		priv->addr = base;
+	}
+	nv_wr32(priv, 0x700000 + addr, data);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static struct nouveau_oclass
+nv50_instobj_oclass = {
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_instobj_ctor,
+		.dtor = nv50_instobj_dtor,
+		.init = _nouveau_instobj_init,
+		.fini = _nouveau_instobj_fini,
+		.rd32 = nv50_instobj_rd32,
+		.wr32 = nv50_instobj_wr32,
+	},
+};
+
+static int
+nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
+		   u32 size, u32 align, struct nouveau_object **pobject)
+{
+	struct nouveau_object *engine = nv_object(imem);
+	return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
+				   (void *)(unsigned long)align, size, pobject);
+}
+
+static int
+nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		  struct nouveau_oclass *oclass, void *data, u32 size,
+		  struct nouveau_object **pobject)
+{
+	struct nv50_instmem_priv *priv;
+	int ret;
+
+	ret = nouveau_instmem_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&priv->lock);
+	priv->base.alloc = nv50_instmem_alloc;
+	return 0;
+}
+
+static int
+nv50_instmem_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv50_instmem_priv *priv = (void *)object;
+	priv->addr = ~0ULL;
+	return nouveau_instmem_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv50_instmem_oclass = {
+	.handle = NV_SUBDEV(INSTMEM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_instmem_ctor,
+		.dtor = _nouveau_instmem_dtor,
+		.init = _nouveau_instmem_init,
+		.fini = nv50_instmem_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
new file mode 100644
index 0000000..fb794e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/ltcg.h>
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+struct nvc0_ltcg_priv {
+	struct nouveau_ltcg base;
+	u32 part_nr;
+	u32 subp_nr;
+	struct nouveau_mm tags;
+	u32 num_tags;
+	struct nouveau_mm_node *tag_ram;
+};
+
+static void
+nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp)
+{
+	u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
+	u32 stat = nv_rd32(priv, subp_base + 0x020);
+
+	if (stat) {
+		nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat);
+		nv_wr32(priv, subp_base + 0x020, stat);
+	}
+}
+
+static void
+nvc0_ltcg_intr(struct nouveau_subdev *subdev)
+{
+	struct nvc0_ltcg_priv *priv = (void *)subdev;
+	u32 units;
+
+	units = nv_rd32(priv, 0x00017c);
+	while (units) {
+		u32 subp, unit = ffs(units) - 1;
+		for (subp = 0; subp < priv->subp_nr; subp++)
+			nvc0_ltcg_subp_isr(priv, unit, subp);
+		units &= ~(1 << unit);
+	}
+
+	/* we do something horribly wrong and upset PMFB a lot, so mask off
+	 * interrupts from it after the first one until it's fixed
+	 */
+	nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
+}
+
+static int
+nvc0_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
+		     struct nouveau_mm_node **pnode)
+{
+	struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+	int ret;
+
+	ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+	if (ret)
+		*pnode = NULL;
+
+	return ret;
+}
+
+static void
+nvc0_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
+{
+	struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+
+	nouveau_mm_free(&priv->tags, pnode);
+}
+
+static void
+nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+{
+	struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+	u32 last = first + count - 1;
+	int p, i;
+
+	BUG_ON((first > last) || (last >= priv->num_tags));
+
+	nv_wr32(priv, 0x17e8cc, first);
+	nv_wr32(priv, 0x17e8d0, last);
+	nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
+
+	/* wait until it's finished with clearing */
+	for (p = 0; p < priv->part_nr; ++p) {
+		for (i = 0; i < priv->subp_nr; ++i)
+			nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
+	}
+}
+
+/* TODO: Figure out tag memory details and drop the over-cautious allocation.
+ */
+static int
+nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
+{
+	u32 tag_size, tag_margin, tag_align;
+	int ret;
+
+	nv_wr32(priv, 0x17e8d8, priv->part_nr);
+	if (nv_device(pfb)->card_type >= NV_E0)
+		nv_wr32(priv, 0x17e000, priv->part_nr);
+
+	/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
+	priv->num_tags = (pfb->ram.size >> 17) / 4;
+	if (priv->num_tags > (1 << 17))
+		priv->num_tags = 1 << 17; /* we have 17 bits in PTE */
+	priv->num_tags = (priv->num_tags + 63) & ~63; /* round up to 64 */
+
+	tag_align = priv->part_nr * 0x800;
+	tag_margin = (tag_align < 0x6000) ? 0x6000 : tag_align;
+
+	/* 4 part 4 sub: 0x2000 bytes for 56 tags */
+	/* 3 part 4 sub: 0x6000 bytes for 168 tags */
+	/*
+	 * About 147 bytes per tag. Let's be safe and allocate x2, which makes
+	 * 0x4980 bytes for 64 tags, and round up to 0x6000 bytes for 64 tags.
+	 *
+	 * For 4 GiB of memory we'll have 8192 tags which makes 3 MiB, < 0.1 %.
+	 */
+	tag_size  = (priv->num_tags / 64) * 0x6000 + tag_margin;
+	tag_size += tag_align;
+	tag_size  = (tag_size + 0xfff) >> 12; /* round up */
+
+	ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+	                      &priv->tag_ram);
+	if (ret) {
+		priv->num_tags = 0;
+	} else {
+		u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
+
+		tag_base += tag_align - 1;
+		ret = do_div(tag_base, tag_align);
+
+		nv_wr32(priv, 0x17e8d4, tag_base);
+	}
+	ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+
+	return ret;
+}
+
+static int
+nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	       struct nouveau_oclass *oclass, void *data, u32 size,
+	       struct nouveau_object **pobject)
+{
+	struct nvc0_ltcg_priv *priv;
+	struct nouveau_fb *pfb = nouveau_fb(parent);
+	u32 parts, mask;
+	int ret, i;
+
+	ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	parts = nv_rd32(priv, 0x022438);
+	mask = nv_rd32(priv, 0x022554);
+	for (i = 0; i < parts; i++) {
+		if (!(mask & (1 << i)))
+			priv->part_nr++;
+	}
+	priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
+
+	nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+
+	ret = nvc0_ltcg_init_tag_ram(pfb, priv);
+	if (ret)
+		return ret;
+
+	priv->base.tags_alloc = nvc0_ltcg_tags_alloc;
+	priv->base.tags_free  = nvc0_ltcg_tags_free;
+	priv->base.tags_clear = nvc0_ltcg_tags_clear;
+
+	nv_subdev(priv)->intr = nvc0_ltcg_intr;
+	return 0;
+}
+
+static void
+nvc0_ltcg_dtor(struct nouveau_object *object)
+{
+	struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+	struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+	struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
+
+	nouveau_mm_fini(&priv->tags);
+	nouveau_mm_free(&pfb->vram, &priv->tag_ram);
+
+	nouveau_ltcg_destroy(ltcg);
+}
+
+struct nouveau_oclass
+nvc0_ltcg_oclass = {
+	.handle = NV_SUBDEV(LTCG, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_ltcg_ctor,
+		.dtor = nvc0_ltcg_dtor,
+		.init = _nouveau_ltcg_init,
+		.fini = _nouveau_ltcg_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
new file mode 100644
index 0000000..ec9cd6f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+static irqreturn_t
+nouveau_mc_intr(int irq, void *arg)
+{
+	struct nouveau_mc *pmc = arg;
+	const struct nouveau_mc_intr *map = pmc->intr_map;
+	struct nouveau_subdev *unit;
+	u32 stat, intr;
+
+	intr = stat = nv_rd32(pmc, 0x000100);
+	while (stat && map->stat) {
+		if (stat & map->stat) {
+			unit = nouveau_subdev(pmc, map->unit);
+			if (unit && unit->intr)
+				unit->intr(unit);
+			intr &= ~map->stat;
+		}
+		map++;
+	}
+
+	if (intr) {
+		nv_error(pmc, "unknown intr 0x%08x\n", stat);
+	}
+
+	return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+int
+_nouveau_mc_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_mc *pmc = (void *)object;
+	nv_wr32(pmc, 0x000140, 0x00000000);
+	return nouveau_subdev_fini(&pmc->base, suspend);
+}
+
+int
+_nouveau_mc_init(struct nouveau_object *object)
+{
+	struct nouveau_mc *pmc = (void *)object;
+	int ret = nouveau_subdev_init(&pmc->base);
+	if (ret)
+		return ret;
+	nv_wr32(pmc, 0x000140, 0x00000001);
+	return 0;
+}
+
+void
+_nouveau_mc_dtor(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nouveau_mc *pmc = (void *)object;
+	free_irq(device->pdev->irq, pmc);
+	nouveau_subdev_destroy(&pmc->base);
+}
+
+int
+nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+		   struct nouveau_oclass *oclass,
+		   const struct nouveau_mc_intr *intr_map,
+		   int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_mc *pmc;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PMC",
+				     "master", length, pobject);
+	pmc = *pobject;
+	if (ret)
+		return ret;
+
+	pmc->intr_map = intr_map;
+
+	ret = request_irq(device->pdev->irq, nouveau_mc_intr,
+			  IRQF_SHARED, "nouveau", pmc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
new file mode 100644
index 0000000..64aa4ed
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv04_mc_priv {
+	struct nouveau_mc base;
+};
+
+const struct nouveau_mc_intr
+nv04_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_MPEG },	/* NV17- MPEG/ME */
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00020000, NVDEV_ENGINE_VP },	/* NV40- */
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x01000000, NVDEV_ENGINE_DISP },	/* NV04- PCRTC0 */
+	{ 0x02000000, NVDEV_ENGINE_DISP },	/* NV11- PCRTC1 */
+	{ 0x10000000, NVDEV_SUBDEV_BUS },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{}
+};
+
+static int
+nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv04_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int
+nv04_mc_init(struct nouveau_object *object)
+{
+	struct nv04_mc_priv *priv = (void *)object;
+
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+	nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
+
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv04_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
new file mode 100644
index 0000000..d989178
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv44_mc_priv {
+	struct nouveau_mc base;
+};
+
+static int
+nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv44_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv44_mc_init(struct nouveau_object *object)
+{
+	struct nv44_mc_priv *priv = (void *)object;
+	u32 tmp = nv_rd32(priv, 0x10020c);
+
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
+
+	nv_wr32(priv, 0x001700, tmp);
+	nv_wr32(priv, 0x001704, 0);
+	nv_wr32(priv, 0x001708, 0);
+	nv_wr32(priv, 0x00170c, tmp);
+
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv44_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv44_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
new file mode 100644
index 0000000..732d810
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv50_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nv50_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_MPEG },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84- */
+	{ 0x00008000, NVDEV_ENGINE_BSP },	/* NV84- */
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x10000000, NVDEV_SUBDEV_BUS },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x0000d101, NVDEV_SUBDEV_FB },
+	{},
+};
+
+static int
+nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv50_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int
+nv50_mc_init(struct nouveau_object *object)
+{
+	struct nv50_mc_priv *priv = (void *)object;
+	nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
+	return nouveau_mc_init(&priv->base);
+}
+
+struct nouveau_oclass
+nv50_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
new file mode 100644
index 0000000..0d57b4d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nv98_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nv98_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_PPP },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00004000, NVDEV_ENGINE_CRYPT },	/* NV84:NVA3 */
+	{ 0x00008000, NVDEV_ENGINE_BSP },
+	{ 0x00080000, NVDEV_SUBDEV_THERM },	/* NVA3:NVC0 */
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x00400000, NVDEV_ENGINE_COPY0 },	/* NVA3-     */
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x10000000, NVDEV_SUBDEV_BUS },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{ 0x0040d101, NVDEV_SUBDEV_FB },
+	{},
+};
+
+static int
+nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nv98_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nv98_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0x98),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv98_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
new file mode 100644
index 0000000..4c97cd2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mc.h>
+
+struct nvc0_mc_priv {
+	struct nouveau_mc base;
+};
+
+static const struct nouveau_mc_intr
+nvc0_mc_intr[] = {
+	{ 0x00000001, NVDEV_ENGINE_PPP },
+	{ 0x00000020, NVDEV_ENGINE_COPY0 },
+	{ 0x00000040, NVDEV_ENGINE_COPY1 },
+	{ 0x00000100, NVDEV_ENGINE_FIFO },
+	{ 0x00001000, NVDEV_ENGINE_GR },
+	{ 0x00008000, NVDEV_ENGINE_BSP },
+	{ 0x00040000, NVDEV_SUBDEV_THERM },
+	{ 0x00020000, NVDEV_ENGINE_VP },
+	{ 0x00100000, NVDEV_SUBDEV_TIMER },
+	{ 0x00200000, NVDEV_SUBDEV_GPIO },
+	{ 0x02000000, NVDEV_SUBDEV_LTCG },
+	{ 0x04000000, NVDEV_ENGINE_DISP },
+	{ 0x10000000, NVDEV_SUBDEV_BUS },
+	{ 0x40000000, NVDEV_SUBDEV_IBUS },
+	{ 0x80000000, NVDEV_ENGINE_SW },
+	{},
+};
+
+static int
+nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	     struct nouveau_oclass *oclass, void *data, u32 size,
+	     struct nouveau_object **pobject)
+{
+	struct nvc0_mc_priv *priv;
+	int ret;
+
+	ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_mc_oclass = {
+	.handle = NV_SUBDEV(MC, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_mc_ctor,
+		.dtor = _nouveau_mc_dtor,
+		.init = nv50_mc_init,
+		.fini = _nouveau_mc_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
new file mode 100644
index 0000000..e286e13
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/option.h>
+
+#include <subdev/i2c.h>
+#include <subdev/mxm.h>
+#include <subdev/bios.h>
+#include <subdev/bios/mxm.h>
+
+#include "mxms.h"
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
+		     u8 offset, u8 size, u8 *data)
+{
+	struct i2c_msg msgs[] = {
+		{ .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+	};
+
+	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	struct nouveau_i2c *i2c = nouveau_i2c(mxm);
+	struct nouveau_i2c_port *port = NULL;
+	u8 i2cidx, mxms[6], addr, size;
+
+	i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
+	if (i2cidx < 0x0f)
+		port = i2c->find(i2c, i2cidx);
+	if (!port)
+		return false;
+
+	addr = 0x54;
+	if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
+		addr = 0x56;
+		if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
+			return false;
+	}
+
+	mxm->mxms = mxms;
+	size = mxms_headerlen(mxm) + mxms_structlen(mxm);
+	mxm->mxms = kmalloc(size, GFP_KERNEL);
+
+	if (mxm->mxms &&
+	    mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
+		return true;
+
+	kfree(mxm->mxms);
+	mxm->mxms = NULL;
+	return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
+{
+	struct nouveau_device *device = nv_device(mxm);
+	static char muid[] = {
+		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+	};
+	u32 mxms_args[] = { 0x00000000 };
+	union acpi_object args[4] = {
+		/* _DSM MUID */
+		{ .buffer.type = 3,
+		  .buffer.length = sizeof(muid),
+		  .buffer.pointer = muid,
+		},
+		/* spec says this can be zero to mean "highest revision", but
+		 * of course there's at least one bios out there which fails
+		 * unless you pass in exactly the version it supports..
+		 */
+		{ .integer.type = ACPI_TYPE_INTEGER,
+		  .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+		},
+		/* MXMS function */
+		{ .integer.type = ACPI_TYPE_INTEGER,
+		  .integer.value = 0x00000010,
+		},
+		/* Pointer to MXMS arguments */
+		{ .buffer.type = ACPI_TYPE_BUFFER,
+		  .buffer.length = sizeof(mxms_args),
+		  .buffer.pointer = (char *)mxms_args,
+		},
+	};
+	struct acpi_object_list list = { ARRAY_SIZE(args), args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_handle handle;
+	int ret;
+
+	handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
+	if (!handle)
+		return false;
+
+	ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+	if (ret) {
+		nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
+		return false;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		mxm->mxms = kmemdup(obj->buffer.pointer,
+					 obj->buffer.length, GFP_KERNEL);
+	} else
+	if (obj->type == ACPI_TYPE_INTEGER) {
+		nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+	}
+
+	kfree(obj);
+	return mxm->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static u8
+wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
+{
+	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
+	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_status status;
+
+	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+	if (ACPI_FAILURE(status)) {
+		nv_debug(mxm, "WMMX MXMI returned %d\n", status);
+		return 0x00;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_INTEGER) {
+		version = obj->integer.value;
+		nv_debug(mxm, "WMMX MXMI version %d.%d\n",
+			     (version >> 4), version & 0x0f);
+	} else {
+		version = 0;
+		nv_debug(mxm, "WMMX MXMI returned non-integer\n");
+	}
+
+	kfree(obj);
+	return version;
+}
+
+static bool
+mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
+{
+	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	acpi_status status;
+
+	if (!wmi_has_guid(WMI_WMMX_GUID)) {
+		nv_debug(mxm, "WMMX GUID not found\n");
+		return false;
+	}
+
+	mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00);
+	if (!mxms_args[1])
+		mxms_args[1] = wmi_wmmx_mxmi(mxm, version);
+	if (!mxms_args[1])
+		return false;
+
+	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+	if (ACPI_FAILURE(status)) {
+		nv_debug(mxm, "WMMX MXMS returned %d\n", status);
+		return false;
+	}
+
+	obj = retn.pointer;
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		mxm->mxms = kmemdup(obj->buffer.pointer,
+					 obj->buffer.length, GFP_KERNEL);
+	}
+
+	kfree(obj);
+	return mxm->mxms != NULL;
+}
+#endif
+
+static struct mxm_shadow_h {
+	const char *name;
+	bool (*exec)(struct nouveau_mxm *, u8 version);
+} _mxm_shadow[] = {
+	{ "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+	{ "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+	{ "WMI", mxm_shadow_wmi },
+#endif
+	{}
+};
+
+static int
+mxm_shadow(struct nouveau_mxm *mxm, u8 version)
+{
+	struct mxm_shadow_h *shadow = _mxm_shadow;
+	do {
+		nv_debug(mxm, "checking %s\n", shadow->name);
+		if (shadow->exec(mxm, version)) {
+			if (mxms_valid(mxm))
+				return 0;
+			kfree(mxm->mxms);
+			mxm->mxms = NULL;
+		}
+	} while ((++shadow)->name);
+	return -ENOENT;
+}
+
+int
+nouveau_mxm_create_(struct nouveau_object *parent,
+		    struct nouveau_object *engine,
+		    struct nouveau_oclass *oclass, int length, void **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_mxm *mxm;
+	u8  ver, len;
+	u16 data;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
+				     length, pobject);
+	mxm = *pobject;
+	if (ret)
+		return ret;
+
+	data = mxm_table(bios, &ver, &len);
+	if (!data || !(ver = nv_ro08(bios, data))) {
+		nv_debug(mxm, "no VBIOS data, nothing to do\n");
+		return 0;
+	}
+
+	nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
+
+	if (mxm_shadow(mxm, ver)) {
+		nv_info(mxm, "failed to locate valid SIS\n");
+#if 0
+		/* we should, perhaps, fall back to some kind of limited
+		 * mode here if the x86 vbios hasn't already done the
+		 * work for us (so we prevent loading with completely
+		 * whacked vbios tables).
+		 */
+		return -EINVAL;
+#else
+		return 0;
+#endif
+	}
+
+	nv_info(mxm, "MXMS Version %d.%d\n",
+		mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
+	mxms_foreach(mxm, 0, NULL, NULL);
+
+	if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true))
+		mxm->action |= MXM_SANITISE_DCB;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
new file mode 100644
index 0000000..4bde7f7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mxm.h>
+#include "mxms.h"
+
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+
+static u8 *
+mxms_data(struct nouveau_mxm *mxm)
+{
+	return mxm->mxms;
+
+}
+
+u16
+mxms_version(struct nouveau_mxm *mxm)
+{
+	u8 *mxms = mxms_data(mxm);
+	u16 version = (mxms[4] << 8) | mxms[5];
+	switch (version ) {
+	case 0x0200:
+	case 0x0201:
+	case 0x0300:
+		return version;
+	default:
+		break;
+	}
+
+	nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
+	return 0x0000;
+}
+
+u16
+mxms_headerlen(struct nouveau_mxm *mxm)
+{
+	return 8;
+}
+
+u16
+mxms_structlen(struct nouveau_mxm *mxm)
+{
+	return *(u16 *)&mxms_data(mxm)[6];
+}
+
+bool
+mxms_checksum(struct nouveau_mxm *mxm)
+{
+	u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
+	u8 *mxms = mxms_data(mxm), sum = 0;
+	while (size--)
+		sum += *mxms++;
+	if (sum) {
+		nv_debug(mxm, "checksum invalid\n");
+		return false;
+	}
+	return true;
+}
+
+bool
+mxms_valid(struct nouveau_mxm *mxm)
+{
+	u8 *mxms = mxms_data(mxm);
+	if (*(u32 *)mxms != 0x5f4d584d) {
+		nv_debug(mxm, "signature invalid\n");
+		return false;
+	}
+
+	if (!mxms_version(mxm) || !mxms_checksum(mxm))
+		return false;
+
+	return true;
+}
+
+bool
+mxms_foreach(struct nouveau_mxm *mxm, u8 types,
+	     bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info)
+{
+	u8 *mxms = mxms_data(mxm);
+	u8 *desc = mxms + mxms_headerlen(mxm);
+	u8 *fini = desc + mxms_structlen(mxm) - 1;
+	while (desc < fini) {
+		u8 type = desc[0] & 0x0f;
+		u8 headerlen = 0;
+		u8 recordlen = 0;
+		u8 entries = 0;
+
+		switch (type) {
+		case 0: /* Output Device Structure */
+			if (mxms_version(mxm) >= 0x0300)
+				headerlen = 8;
+			else
+				headerlen = 6;
+			break;
+		case 1: /* System Cooling Capability Structure */
+		case 2: /* Thermal Structure */
+		case 3: /* Input Power Structure */
+			headerlen = 4;
+			break;
+		case 4: /* GPIO Device Structure */
+			headerlen = 4;
+			recordlen = 2;
+			entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
+			break;
+		case 5: /* Vendor Specific Structure */
+			headerlen = 8;
+			break;
+		case 6: /* Backlight Control Structure */
+			if (mxms_version(mxm) >= 0x0300) {
+				headerlen = 4;
+				recordlen = 8;
+				entries   = (desc[1] & 0xf0) >> 4;
+			} else {
+				headerlen = 8;
+			}
+			break;
+		case 7: /* Fan Control Structure */
+			headerlen = 8;
+			recordlen = 4;
+			entries   = desc[1] & 0x07;
+			break;
+		default:
+			nv_debug(mxm, "unknown descriptor type %d\n", type);
+			return false;
+		}
+
+		if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
+			static const char * mxms_desc_name[] = {
+				"ODS", "SCCS", "TS", "IPS",
+				"GSD", "VSS", "BCS", "FCS",
+			};
+			u8 *dump = desc;
+			int i, j;
+
+			nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
+			for (j = headerlen - 1; j >= 0; j--)
+				pr_cont("%02x", dump[j]);
+			pr_cont("\n");
+			dump += headerlen;
+
+			for (i = 0; i < entries; i++, dump += recordlen) {
+				nv_debug(mxm, "      ");
+				for (j = recordlen - 1; j >= 0; j--)
+					pr_cont("%02x", dump[j]);
+				pr_cont("\n");
+			}
+		}
+
+		if (types & (1 << type)) {
+			if (!exec(mxm, desc, info))
+				return false;
+		}
+
+		desc += headerlen + (entries * recordlen);
+	}
+
+	return true;
+}
+
+void
+mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
+{
+	u64 data = ROM32(pdata[0]);
+	if (mxms_version(mxm) >= 0x0300)
+		data |= (u64)ROM16(pdata[4]) << 32;
+
+	desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+	desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
+	desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+	desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
new file mode 100644
index 0000000..5e0be0c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
@@ -0,0 +1,22 @@
+#ifndef __NVMXM_MXMS_H__
+#define __NVMXM_MXMS_H__
+
+struct mxms_odev {
+	u8 outp_type;
+	u8 conn_type;
+	u8 ddc_port;
+	u8 dig_conn;
+};
+
+void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *);
+
+u16  mxms_version(struct nouveau_mxm *);
+u16  mxms_headerlen(struct nouveau_mxm *);
+u16  mxms_structlen(struct nouveau_mxm *);
+bool mxms_checksum(struct nouveau_mxm *);
+bool mxms_valid(struct nouveau_mxm *);
+
+bool mxms_foreach(struct nouveau_mxm *, u8,
+		  bool (*)(struct nouveau_mxm *, u8 *, void *), void *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
new file mode 100644
index 0000000..af129c2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -0,0 +1,233 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/mxm.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/mxm.h>
+
+#include "mxms.h"
+
+struct nv50_mxm_priv {
+	struct nouveau_mxm base;
+};
+
+struct context {
+	u32 *outp;
+	struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	struct context *ctx = info;
+	struct mxms_odev desc;
+
+	mxms_output_device(mxm, data, &desc);
+	if (desc.outp_type == 2 &&
+	    desc.dig_conn == ctx->desc.dig_conn)
+		return false;
+	return true;
+}
+
+static bool
+mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	struct context *ctx = info;
+	u64 desc = *(u64 *)data;
+
+	mxms_output_device(mxm, data, &ctx->desc);
+
+	/* match dcb encoder type to mxm-ods device type */
+	if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+		return true;
+
+	/* digital output, have some extra stuff to match here, there's a
+	 * table in the vbios that provides a mapping from the mxm digital
+	 * connection enum values to SOR/link
+	 */
+	if ((desc & 0x00000000000000f0) >= 0x20) {
+		/* check against sor index */
+		u8 link = mxm_sor_map(bios, ctx->desc.dig_conn);
+		if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+			return true;
+
+		/* check dcb entry has a compatible link field */
+		link = (link & 0x30) >> 4;
+		if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+			return true;
+	}
+
+	/* mark this descriptor accounted for by setting invalid device type,
+	 * except of course some manufactures don't follow specs properly and
+	 * we need to avoid killing off the TMDS function on DP connectors
+	 * if MXM-SIS is missing an entry for it.
+	 */
+	data[0] &= ~0xf0;
+	if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+	    mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) {
+		data[0] |= 0x20; /* modify descriptor to match TMDS now */
+	} else {
+		data[0] |= 0xf0;
+	}
+
+	return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
+{
+	struct nouveau_mxm *mxm = nouveau_mxm(bios);
+	struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
+	u8 type, i2cidx, link, ver, len;
+	u8 *conn;
+
+	/* look for an output device structure that matches this dcb entry.
+	 * if one isn't found, disable it.
+	 */
+	if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
+		nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
+			idx, ctx.outp[0], ctx.outp[1]);
+		ctx.outp[0] |= 0x0000000f;
+		return 0;
+	}
+
+	/* modify the output's ddc/aux port, there's a pointer to a table
+	 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+	 * vbios mxm table
+	 */
+	i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port);
+	if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP)
+		i2cidx = (i2cidx & 0x0f) << 4;
+	else
+		i2cidx = (i2cidx & 0xf0);
+
+	if (i2cidx != 0xf0) {
+		ctx.outp[0] &= ~0x000000f0;
+		ctx.outp[0] |= i2cidx;
+	}
+
+	/* override dcb sorconf.link, based on what mxm data says */
+	switch (ctx.desc.outp_type) {
+	case 0x00: /* Analog CRT */
+	case 0x01: /* Analog TV/HDTV */
+		break;
+	default:
+		link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30;
+		ctx.outp[1] &= ~0x00000030;
+		ctx.outp[1] |= link;
+		break;
+	}
+
+	/* we may need to fixup various other vbios tables based on what
+	 * the descriptor says the connector type should be.
+	 *
+	 * in a lot of cases, the vbios tables will claim DVI-I is possible,
+	 * and the mxm data says the connector is really HDMI.  another
+	 * common example is DP->eDP.
+	 */
+	conn  = bios->data;
+	conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
+	type  = conn[0];
+	switch (ctx.desc.conn_type) {
+	case 0x01: /* LVDS */
+		ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+		/* XXX: modify default link width in LVDS table */
+		break;
+	case 0x02: /* HDMI */
+		type = DCB_CONNECTOR_HDMI_1;
+		break;
+	case 0x03: /* DVI-D */
+		type = DCB_CONNECTOR_DVI_D;
+		break;
+	case 0x0e: /* eDP, falls through to DPint */
+		ctx.outp[1] |= 0x00010000;
+	case 0x07: /* DP internal, wtf is this?? HP8670w */
+		ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+		type = DCB_CONNECTOR_eDP;
+		break;
+	default:
+		break;
+	}
+
+	if (mxms_version(mxm) >= 0x0300)
+		conn[0] = type;
+
+	return 0;
+}
+
+static bool
+mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
+{
+	u64 desc = *(u64 *)data;
+	if ((desc & 0xf0) != 0xf0)
+	nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
+	return true;
+}
+
+static void
+mxm_dcb_sanitise(struct nouveau_mxm *mxm)
+{
+	struct nouveau_bios *bios = nouveau_bios(mxm);
+	u8  ver, hdr, cnt, len;
+	u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
+	if (dcb == 0x0000 || ver != 0x40) {
+		nv_debug(mxm, "unsupported DCB version\n");
+		return;
+	}
+
+	dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
+	mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
+}
+
+static int
+nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+	      struct nouveau_oclass *oclass, void *data, u32 size,
+	      struct nouveau_object **pobject)
+{
+	struct nv50_mxm_priv *priv;
+	int ret;
+
+	ret = nouveau_mxm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	if (priv->base.action & MXM_SANITISE_DCB)
+		mxm_dcb_sanitise(&priv->base);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_mxm_oclass = {
+	.handle = NV_SUBDEV(MXM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_mxm_ctor,
+		.dtor = _nouveau_mxm_dtor,
+		.init = _nouveau_mxm_init,
+		.fini = _nouveau_mxm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
new file mode 100644
index 0000000..a00a5a7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+
+#include "priv.h"
+
+static int
+nouveau_therm_update_trip(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
+					*cur_trip = NULL,
+					*last_trip = priv->last_trip;
+	u8  temp = therm->temp_get(therm);
+	u16 duty, i;
+
+	/* look for the trip point corresponding to the current temperature */
+	cur_trip = NULL;
+	for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
+		if (temp >= trip[i].temp)
+			cur_trip = &trip[i];
+	}
+
+	/* account for the hysteresis cycle */
+	if (last_trip && temp <= (last_trip->temp) &&
+	    temp > (last_trip->temp - last_trip->hysteresis))
+		cur_trip = last_trip;
+
+	if (cur_trip) {
+		duty = cur_trip->fan_duty;
+		priv->last_trip = cur_trip;
+	} else {
+		duty = 0;
+		priv->last_trip = NULL;
+	}
+
+	return duty;
+}
+
+static int
+nouveau_therm_update_linear(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	u8  linear_min_temp = priv->fan->bios.linear_min_temp;
+	u8  linear_max_temp = priv->fan->bios.linear_max_temp;
+	u8  temp = therm->temp_get(therm);
+	u16 duty;
+
+	/* handle the non-linear part first */
+	if (temp < linear_min_temp)
+		return priv->fan->bios.min_duty;
+	else if (temp > linear_max_temp)
+		return priv->fan->bios.max_duty;
+
+	/* we are in the linear zone */
+	duty  = (temp - linear_min_temp);
+	duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
+	duty /= (linear_max_temp - linear_min_temp);
+	duty += priv->fan->bios.min_duty;
+
+	return duty;
+}
+
+static void
+nouveau_therm_update(struct nouveau_therm *therm, int mode)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(therm);
+	struct nouveau_therm_priv *priv = (void *)therm;
+	unsigned long flags;
+	int duty;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (mode < 0)
+		mode = priv->mode;
+	priv->mode = mode;
+
+	switch (mode) {
+	case NOUVEAU_THERM_CTRL_MANUAL:
+		duty = nouveau_therm_fan_get(therm);
+		if (duty < 0)
+			duty = 100;
+		break;
+	case NOUVEAU_THERM_CTRL_AUTO:
+		if (priv->fan->bios.nr_fan_trip)
+			duty = nouveau_therm_update_trip(therm);
+		else
+			duty = nouveau_therm_update_linear(therm);
+		break;
+	case NOUVEAU_THERM_CTRL_NONE:
+	default:
+		goto done;
+	}
+
+	nv_debug(therm, "FAN target request: %d%%\n", duty);
+	nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
+
+done:
+	if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
+		ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_therm_alarm(struct nouveau_alarm *alarm)
+{
+	struct nouveau_therm_priv *priv =
+	       container_of(alarm, struct nouveau_therm_priv, alarm);
+	nouveau_therm_update(&priv->base, -1);
+}
+
+int
+nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_device *device = nv_device(therm);
+	static const char *name[] = {
+		"disabled",
+		"manual",
+		"automatic"
+	};
+
+	/* The default PDAEMON ucode interferes with fan management */
+	if ((mode >= ARRAY_SIZE(name)) ||
+	    (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
+		return -EINVAL;
+
+	/* do not allow automatic fan management if the thermal sensor is
+	 * not available */
+	if (priv->mode == 2 && therm->temp_get(therm) < 0)
+		return -EINVAL;
+
+	if (priv->mode == mode)
+		return 0;
+
+	nv_info(therm, "fan management: %s\n", name[mode]);
+	nouveau_therm_update(therm, mode);
+	return 0;
+}
+
+int
+nouveau_therm_attr_get(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	switch (type) {
+	case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+		return priv->fan->bios.min_duty;
+	case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+		return priv->fan->bios.max_duty;
+	case NOUVEAU_THERM_ATTR_FAN_MODE:
+		return priv->mode;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+		return priv->bios_sensor.thrs_fan_boost.temp;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+		return priv->bios_sensor.thrs_fan_boost.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+		return priv->bios_sensor.thrs_down_clock.temp;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+		return priv->bios_sensor.thrs_down_clock.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+		return priv->bios_sensor.thrs_critical.temp;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+		return priv->bios_sensor.thrs_critical.hysteresis;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+		return priv->bios_sensor.thrs_shutdown.temp;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+		return priv->bios_sensor.thrs_shutdown.hysteresis;
+	}
+
+	return -EINVAL;
+}
+
+int
+nouveau_therm_attr_set(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type, int value)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	switch (type) {
+	case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
+		if (value < 0)
+			value = 0;
+		if (value > priv->fan->bios.max_duty)
+			value = priv->fan->bios.max_duty;
+		priv->fan->bios.min_duty = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
+		if (value < 0)
+			value = 0;
+		if (value < priv->fan->bios.min_duty)
+			value = priv->fan->bios.min_duty;
+		priv->fan->bios.max_duty = value;
+		return 0;
+	case NOUVEAU_THERM_ATTR_FAN_MODE:
+		return nouveau_therm_fan_mode(therm, value);
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
+		priv->bios_sensor.thrs_fan_boost.temp = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
+		priv->bios_sensor.thrs_fan_boost.hysteresis = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
+		priv->bios_sensor.thrs_down_clock.temp = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
+		priv->bios_sensor.thrs_down_clock.hysteresis = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
+		priv->bios_sensor.thrs_critical.temp = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
+		priv->bios_sensor.thrs_critical.hysteresis = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
+		priv->bios_sensor.thrs_shutdown.temp = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
+		priv->bios_sensor.thrs_shutdown.hysteresis = value;
+		priv->sensor.program_alarms(therm);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+_nouveau_therm_init(struct nouveau_object *object)
+{
+	struct nouveau_therm *therm = (void *)object;
+	struct nouveau_therm_priv *priv = (void *)therm;
+	int ret;
+
+	ret = nouveau_subdev_init(&therm->base);
+	if (ret)
+		return ret;
+
+	if (priv->suspend >= 0)
+		nouveau_therm_fan_mode(therm, priv->mode);
+	priv->sensor.program_alarms(therm);
+	return 0;
+}
+
+int
+_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nouveau_therm *therm = (void *)object;
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (suspend) {
+		priv->suspend = priv->mode;
+		priv->mode = NOUVEAU_THERM_CTRL_NONE;
+	}
+
+	return nouveau_subdev_fini(&therm->base, suspend);
+}
+
+int
+nouveau_therm_create_(struct nouveau_object *parent,
+		      struct nouveau_object *engine,
+		      struct nouveau_oclass *oclass,
+		      int length, void **pobject)
+{
+	struct nouveau_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
+				     "therm", length, pobject);
+	priv = *pobject;
+	if (ret)
+		return ret;
+
+	nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
+	spin_lock_init(&priv->lock);
+	spin_lock_init(&priv->sensor.alarm_program_lock);
+
+	priv->base.fan_get = nouveau_therm_fan_user_get;
+	priv->base.fan_set = nouveau_therm_fan_user_set;
+	priv->base.fan_sense = nouveau_therm_fan_sense;
+	priv->base.attr_get = nouveau_therm_attr_get;
+	priv->base.attr_set = nouveau_therm_attr_set;
+	priv->mode = priv->suspend = -1; /* undefined */
+	return 0;
+}
+
+int
+nouveau_therm_preinit(struct nouveau_therm *therm)
+{
+	nouveau_therm_sensor_ctor(therm);
+	nouveau_therm_ic_ctor(therm);
+	nouveau_therm_fan_ctor(therm);
+
+	nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+	nouveau_therm_sensor_preinit(therm);
+	return 0;
+}
+
+void
+_nouveau_therm_dtor(struct nouveau_object *object)
+{
+	struct nouveau_therm_priv *priv = (void *)object;
+	kfree(priv->fan);
+	nouveau_subdev_destroy(&priv->base.base);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
new file mode 100644
index 0000000..ea19acd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+static int
+nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
+{
+	struct nouveau_therm *therm = fan->parent;
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_timer *ptimer = nouveau_timer(priv);
+	unsigned long flags;
+	int ret = 0;
+	int duty;
+
+	/* update target fan speed, restricting to allowed range */
+	spin_lock_irqsave(&fan->lock, flags);
+	if (target < 0)
+		target = fan->percent;
+	target = max_t(u8, target, fan->bios.min_duty);
+	target = min_t(u8, target, fan->bios.max_duty);
+	if (fan->percent != target) {
+		nv_debug(therm, "FAN target: %d\n", target);
+		fan->percent = target;
+	}
+
+	/* check that we're not already at the target duty cycle */
+	duty = fan->get(therm);
+	if (duty == target) {
+		spin_unlock_irqrestore(&fan->lock, flags);
+		return 0;
+	}
+
+	/* smooth out the fanspeed increase/decrease */
+	if (!immediate && duty >= 0) {
+		/* the constant "3" is a rough approximation taken from
+		 * nvidia's behaviour.
+		 * it is meant to bump the fan speed more incrementally
+		 */
+		if (duty < target)
+			duty = min(duty + 3, target);
+		else if (duty > target)
+			duty = max(duty - 3, target);
+	} else {
+		duty = target;
+	}
+
+	nv_debug(therm, "FAN update: %d\n", duty);
+	ret = fan->set(therm, duty);
+	if (ret) {
+		spin_unlock_irqrestore(&fan->lock, flags);
+		return ret;
+	}
+
+	/* fan speed updated, drop the fan lock before grabbing the
+	 * alarm-scheduling lock and risking a deadlock
+	 */
+	spin_unlock_irqrestore(&fan->lock, flags);
+
+	/* schedule next fan update, if not at target speed already */
+	if (list_empty(&fan->alarm.head) && target != duty) {
+		u16 bump_period = fan->bios.bump_period;
+		u16 slow_down_period = fan->bios.slow_down_period;
+		u64 delay;
+
+		if (duty > target)
+			delay = slow_down_period;
+		else if (duty == target)
+			delay = min(bump_period, slow_down_period) ;
+		else
+			delay = bump_period;
+
+		ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
+	}
+
+	return ret;
+}
+
+static void
+nouveau_fan_alarm(struct nouveau_alarm *alarm)
+{
+	struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
+	nouveau_fan_update(fan, false, -1);
+}
+
+int
+nouveau_therm_fan_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	return priv->fan->get(therm);
+}
+
+int
+nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	return nouveau_fan_update(priv->fan, immediate, percent);
+}
+
+int
+nouveau_therm_fan_sense(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_timer *ptimer = nouveau_timer(therm);
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	u32 cycles, cur, prev;
+	u64 start, end, tach;
+
+	if (priv->fan->tach.func == DCB_GPIO_UNUSED)
+		return -ENODEV;
+
+	/* Time a complete rotation and extrapolate to RPM:
+	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
+	 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
+	 */
+	start = ptimer->read(ptimer);
+	prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+	cycles = 0;
+	do {
+		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+
+		cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
+		if (prev != cur) {
+			if (!start)
+				start = ptimer->read(ptimer);
+			cycles++;
+			prev = cur;
+		}
+	} while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
+	end = ptimer->read(ptimer);
+
+	if (cycles == 5) {
+		tach = (u64)60000000000ULL;
+		do_div(tach, (end - start));
+		return tach;
+	} else
+		return 0;
+}
+
+int
+nouveau_therm_fan_user_get(struct nouveau_therm *therm)
+{
+	return nouveau_therm_fan_get(therm);
+}
+
+int
+nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
+		return -EINVAL;
+
+	return nouveau_therm_fan_set(therm, true, percent);
+}
+
+static void
+nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	priv->fan->bios.pwm_freq = 0;
+	priv->fan->bios.min_duty = 0;
+	priv->fan->bios.max_duty = 100;
+	priv->fan->bios.bump_period = 500;
+	priv->fan->bios.slow_down_period = 2000;
+	priv->fan->bios.linear_min_temp = 40;
+	priv->fan->bios.linear_max_temp = 85;
+}
+
+static void
+nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	if (priv->fan->bios.min_duty > 100)
+		priv->fan->bios.min_duty = 100;
+	if (priv->fan->bios.max_duty > 100)
+		priv->fan->bios.max_duty = 100;
+
+	if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
+		priv->fan->bios.min_duty = priv->fan->bios.max_duty;
+}
+
+int
+nouveau_therm_fan_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	struct nouveau_bios *bios = nouveau_bios(therm);
+	struct dcb_gpio_func func;
+	int ret;
+
+	/* attempt to locate a drivable fan, and determine control method */
+	ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
+	if (ret == 0) {
+		if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
+			nv_debug(therm, "GPIO_FAN is in input mode\n");
+			ret = -EINVAL;
+		} else {
+			ret = nouveau_fanpwm_create(therm, &func);
+			if (ret != 0)
+				ret = nouveau_fantog_create(therm, &func);
+		}
+	}
+
+	/* no controllable fan found, create a dummy fan module */
+	if (ret != 0) {
+		ret = nouveau_fannil_create(therm);
+		if (ret)
+			return ret;
+	}
+
+	nv_info(therm, "FAN control: %s\n", priv->fan->type);
+
+	/* attempt to detect a tachometer connection */
+	ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
+	if (ret)
+		priv->fan->tach.func = DCB_GPIO_UNUSED;
+
+	/* initialise fan bump/slow update handling */
+	priv->fan->parent = therm;
+	nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
+	spin_lock_init(&priv->fan->lock);
+
+	/* other random init... */
+	nouveau_therm_fan_set_defaults(therm);
+	nvbios_perf_fan_parse(bios, &priv->fan->perf);
+	if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
+		nv_error(therm, "parsing the thermal table failed\n");
+	nouveau_therm_fan_safety_checks(therm);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
new file mode 100644
index 0000000..b78c182
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+nouveau_fannil_get(struct nouveau_therm *therm)
+{
+	return -ENODEV;
+}
+
+static int
+nouveau_fannil_set(struct nouveau_therm *therm, int percent)
+{
+	return -ENODEV;
+}
+
+int
+nouveau_fannil_create(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fan *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	tpriv->fan = priv;
+	if (!priv)
+		return -ENOMEM;
+
+	priv->type = "none / external";
+	priv->get = nouveau_fannil_get;
+	priv->set = nouveau_fannil_set;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
new file mode 100644
index 0000000..5f71db8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include <core/option.h>
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nouveau_fanpwm_priv {
+	struct nouveau_fan base;
+	struct dcb_gpio_func func;
+};
+
+static int
+nouveau_fanpwm_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+	struct nouveau_gpio *gpio = nouveau_gpio(therm);
+	int card_type = nv_device(therm)->card_type;
+	u32 divs, duty;
+	int ret;
+
+	ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
+	if (ret == 0 && divs) {
+		divs = max(divs, duty);
+		if (card_type <= NV_40 || (priv->func.log[0] & 1))
+			duty = divs - duty;
+		return (duty * 100) / divs;
+	}
+
+	return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
+}
+
+static int
+nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
+	int card_type = nv_device(therm)->card_type;
+	u32 divs, duty;
+	int ret;
+
+	divs = priv->base.perf.pwm_divisor;
+	if (priv->base.bios.pwm_freq) {
+		divs = 1;
+		if (therm->pwm_clock)
+			divs = therm->pwm_clock(therm);
+		divs /= priv->base.bios.pwm_freq;
+	}
+
+	duty = ((divs * percent) + 99) / 100;
+	if (card_type <= NV_40 || (priv->func.log[0] & 1))
+		duty = divs - duty;
+
+	ret = therm->pwm_set(therm, priv->func.line, divs, duty);
+	if (ret == 0)
+		ret = therm->pwm_ctrl(therm, priv->func.line, true);
+	return ret;
+}
+
+int
+nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+	struct nouveau_device *device = nv_device(therm);
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fanpwm_priv *priv;
+	u32 divs, duty;
+
+	if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
+	    !therm->pwm_ctrl ||
+	     therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
+		return -ENODEV;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	tpriv->fan = &priv->base;
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.type = "PWM";
+	priv->base.get = nouveau_fanpwm_get;
+	priv->base.set = nouveau_fanpwm_set;
+	priv->func = *func;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
new file mode 100644
index 0000000..e601773
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+struct nouveau_fantog_priv {
+	struct nouveau_fan base;
+	struct nouveau_alarm alarm;
+	spinlock_t lock;
+	u32 period_us;
+	u32 percent;
+	struct dcb_gpio_func func;
+};
+
+static void
+nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
+{
+	struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
+	struct nouveau_timer *ptimer = nouveau_timer(tpriv);
+	struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
+	unsigned long flags;
+	int duty;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (percent < 0)
+		percent = priv->percent;
+	priv->percent = percent;
+
+	duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
+	gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
+
+	if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
+		u64 next_change = (percent * priv->period_us) / 100;
+		if (!duty)
+			next_change = priv->period_us - next_change;
+		ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nouveau_fantog_alarm(struct nouveau_alarm *alarm)
+{
+	struct nouveau_fantog_priv *priv =
+	       container_of(alarm, struct nouveau_fantog_priv, alarm);
+	nouveau_fantog_update(priv, -1);
+}
+
+static int
+nouveau_fantog_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+	return priv->percent;
+}
+
+static int
+nouveau_fantog_set(struct nouveau_therm *therm, int percent)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
+	if (therm->pwm_ctrl)
+		therm->pwm_ctrl(therm, priv->func.line, false);
+	nouveau_fantog_update(priv, percent);
+	return 0;
+}
+
+int
+nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
+{
+	struct nouveau_therm_priv *tpriv = (void *)therm;
+	struct nouveau_fantog_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	tpriv->fan = &priv->base;
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.type = "toggle";
+	priv->base.get = nouveau_fantog_get;
+	priv->base.set = nouveau_fantog_set;
+	nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
+	priv->period_us = 100000; /* 10Hz */
+	priv->percent = 100;
+	priv->func = *func;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
new file mode 100644
index 0000000..8b3adec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2012 Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <subdev/i2c.h>
+#include <subdev/bios/extdev.h>
+
+static bool
+probe_monitoring_device(struct nouveau_i2c_port *i2c,
+			struct i2c_board_info *info)
+{
+	struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct i2c_client *client;
+
+	request_module("%s%s", I2C_MODULE_PREFIX, info->type);
+
+	client = i2c_new_device(&i2c->adapter, info);
+	if (!client)
+		return false;
+
+	if (!client->driver || client->driver->detect(client, info)) {
+		i2c_unregister_device(client);
+		return false;
+	}
+
+	nv_info(priv,
+		"Found an %s at address 0x%x (controlled by lm_sensors, "
+		"temp offset %+i C)\n",
+		info->type, info->addr, sensor->offset_constant);
+	priv->ic = client;
+
+	return true;
+}
+
+static struct i2c_board_info
+nv_board_infos[] = {
+	{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
+	{ I2C_BOARD_INFO("w83781d", 0x2d) },
+	{ I2C_BOARD_INFO("adt7473", 0x2e) },
+	{ I2C_BOARD_INFO("adt7473", 0x2d) },
+	{ I2C_BOARD_INFO("adt7473", 0x2c) },
+	{ I2C_BOARD_INFO("f75375", 0x2e) },
+	{ I2C_BOARD_INFO("lm99", 0x4c) },
+	{ I2C_BOARD_INFO("lm90", 0x4c) },
+	{ I2C_BOARD_INFO("lm90", 0x4d) },
+	{ I2C_BOARD_INFO("adm1021", 0x18) },
+	{ I2C_BOARD_INFO("adm1021", 0x19) },
+	{ I2C_BOARD_INFO("adm1021", 0x1a) },
+	{ I2C_BOARD_INFO("adm1021", 0x29) },
+	{ I2C_BOARD_INFO("adm1021", 0x2a) },
+	{ I2C_BOARD_INFO("adm1021", 0x2b) },
+	{ I2C_BOARD_INFO("adm1021", 0x4c) },
+	{ I2C_BOARD_INFO("adm1021", 0x4d) },
+	{ I2C_BOARD_INFO("adm1021", 0x4e) },
+	{ I2C_BOARD_INFO("lm63", 0x18) },
+	{ I2C_BOARD_INFO("lm63", 0x4e) },
+	{ }
+};
+
+void
+nouveau_therm_ic_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_bios *bios = nouveau_bios(therm);
+	struct nouveau_i2c *i2c = nouveau_i2c(therm);
+	struct nvbios_extdev_func extdev_entry;
+
+	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
+		struct i2c_board_info board[] = {
+			{ I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
+			{ }
+		};
+
+		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+				  board, probe_monitoring_device);
+		if (priv->ic)
+			return;
+	}
+
+	if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
+		struct i2c_board_info board[] = {
+			{ I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
+			{ }
+		};
+
+		i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+				  board, probe_monitoring_device);
+		if (priv->ic)
+			return;
+	}
+
+	/* The vbios doesn't provide the address of an exisiting monitoring
+	   device. Let's try our static list.
+	 */
+	i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
+		      nv_board_infos, probe_monitoring_device);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
new file mode 100644
index 0000000..002e51b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+struct nv40_therm_priv {
+	struct nouveau_therm_priv base;
+};
+
+enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
+
+static enum nv40_sensor_style
+nv40_sensor_style(struct nouveau_therm *therm)
+{
+	struct nouveau_device *device = nv_device(therm);
+
+	switch (device->chipset) {
+	case 0x43:
+	case 0x44:
+	case 0x4a:
+	case 0x47:
+		return OLD_STYLE;
+
+	case 0x46:
+	case 0x49:
+	case 0x4b:
+	case 0x4e:
+	case 0x4c:
+	case 0x67:
+	case 0x68:
+	case 0x63:
+		return NEW_STYLE;
+	default:
+		return INVALID_STYLE;
+	}
+}
+
+static int
+nv40_sensor_setup(struct nouveau_therm *therm)
+{
+	enum nv40_sensor_style style = nv40_sensor_style(therm);
+
+	/* enable ADC readout and disable the ALARM threshold */
+	if (style == NEW_STYLE) {
+		nv_mask(therm, 0x15b8, 0x80000000, 0);
+		nv_wr32(therm, 0x15b0, 0x80003fff);
+		mdelay(20); /* wait for the temperature to stabilize */
+		return nv_rd32(therm, 0x15b4) & 0x3fff;
+	} else if (style == OLD_STYLE) {
+		nv_wr32(therm, 0x15b0, 0xff);
+		mdelay(20); /* wait for the temperature to stabilize */
+		return nv_rd32(therm, 0x15b4) & 0xff;
+	} else
+		return -ENODEV;
+}
+
+static int
+nv40_temp_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	enum nv40_sensor_style style = nv40_sensor_style(therm);
+	int core_temp;
+
+	if (style == NEW_STYLE) {
+		nv_wr32(therm, 0x15b0, 0x80003fff);
+		core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
+	} else if (style == OLD_STYLE) {
+		nv_wr32(therm, 0x15b0, 0xff);
+		core_temp = nv_rd32(therm, 0x15b4) & 0xff;
+	} else
+		return -ENODEV;
+
+	/* if the slope or the offset is unset, do no use the sensor */
+	if (!sensor->slope_div || !sensor->slope_mult ||
+	    !sensor->offset_num || !sensor->offset_den)
+	    return -ENODEV;
+
+	core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+	core_temp = core_temp + sensor->offset_num / sensor->offset_den;
+	core_temp = core_temp + sensor->offset_constant - 8;
+
+	/* reserve negative temperatures for errors */
+	if (core_temp < 0)
+		core_temp = 0;
+
+	return core_temp;
+}
+
+static int
+nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+	u32 mask = enable ? 0x80000000 : 0x0000000;
+	if      (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
+	else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
+	else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static int
+nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	if (line == 2) {
+		u32 reg = nv_rd32(therm, 0x0010f0);
+		if (reg & 0x80000000) {
+			*duty = (reg & 0x7fff0000) >> 16;
+			*divs = (reg & 0x00007fff);
+			return 0;
+		}
+	} else
+	if (line == 9) {
+		u32 reg = nv_rd32(therm, 0x0015f4);
+		if (reg & 0x80000000) {
+			*divs = nv_rd32(therm, 0x0015f8);
+			*duty = (reg & 0x7fffffff);
+			return 0;
+		}
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		return -ENODEV;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+	if (line == 2) {
+		nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
+	} else
+	if (line == 9) {
+		nv_wr32(therm, 0x0015f8, divs);
+		nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+void
+nv40_therm_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_therm *therm = nouveau_therm(subdev);
+	uint32_t stat = nv_rd32(therm, 0x1100);
+
+	/* traitement */
+
+	/* ack all IRQs */
+	nv_wr32(therm, 0x1100, 0x70000);
+
+	nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
+}
+
+static int
+nv40_therm_ctor(struct nouveau_object *parent,
+		struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv40_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
+	priv->base.base.pwm_get = nv40_fan_pwm_get;
+	priv->base.base.pwm_set = nv40_fan_pwm_set;
+	priv->base.base.temp_get = nv40_temp_get;
+	priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+	nv_subdev(priv)->intr = nv40_therm_intr;
+	return nouveau_therm_preinit(&priv->base.base);
+}
+
+static int
+nv40_therm_init(struct nouveau_object *object)
+{
+	struct nouveau_therm *therm = (void *)object;
+
+	nv40_sensor_setup(therm);
+
+	return _nouveau_therm_init(object);
+}
+
+struct nouveau_oclass
+nv40_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0x40),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv40_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nv40_therm_init,
+		.fini = _nouveau_therm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
new file mode 100644
index 0000000..8cf7597
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+struct nv50_therm_priv {
+	struct nouveau_therm_priv base;
+};
+
+static int
+pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
+{
+	if (*line == 0x04) {
+		*ctrl = 0x00e100;
+		*line = 4;
+		*indx = 0;
+	} else
+	if (*line == 0x09) {
+		*ctrl = 0x00e100;
+		*line = 9;
+		*indx = 1;
+	} else
+	if (*line == 0x10) {
+		*ctrl = 0x00e28c;
+		*line = 0;
+		*indx = 0;
+	} else {
+		nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+int
+nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+	u32 data = enable ? 0x00000001 : 0x00000000;
+	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+	if (ret == 0)
+		nv_mask(therm, ctrl, 0x00010001 << line, data << line);
+	return ret;
+}
+
+int
+nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+	if (ret)
+		return ret;
+
+	if (nv_rd32(therm, ctrl) & (1 << line)) {
+		*divs = nv_rd32(therm, 0x00e114 + (id * 8));
+		*duty = nv_rd32(therm, 0x00e118 + (id * 8));
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int
+nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+	int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
+	if (ret)
+		return ret;
+
+	nv_wr32(therm, 0x00e114 + (id * 8), divs);
+	nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
+	return 0;
+}
+
+int
+nv50_fan_pwm_clock(struct nouveau_therm *therm)
+{
+	int chipset = nv_device(therm)->chipset;
+	int crystal = nv_device(therm)->crystal;
+	int pwm_clock;
+
+	/* determine the PWM source clock */
+	if (chipset > 0x50 && chipset < 0x94) {
+		u8 pwm_div = nv_rd32(therm, 0x410c);
+		if (nv_rd32(therm, 0xc040) & 0x800000) {
+			/* Use the HOST clock (100 MHz)
+			* Where does this constant(2.4) comes from? */
+			pwm_clock = (100000000 >> pwm_div) * 10 / 24;
+		} else {
+			/* Where does this constant(20) comes from? */
+			pwm_clock = (crystal * 1000) >> pwm_div;
+			pwm_clock /= 20;
+		}
+	} else {
+		pwm_clock = (crystal * 1000) / 20;
+	}
+
+	return pwm_clock;
+}
+
+static void
+nv50_sensor_setup(struct nouveau_therm *therm)
+{
+	nv_mask(therm, 0x20010, 0x40000000, 0x0);
+	mdelay(20); /* wait for the temperature to stabilize */
+}
+
+static int
+nv50_temp_get(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	int core_temp;
+
+	core_temp = nv_rd32(therm, 0x20014) & 0x3fff;
+
+	/* if the slope or the offset is unset, do no use the sensor */
+	if (!sensor->slope_div || !sensor->slope_mult ||
+	    !sensor->offset_num || !sensor->offset_den)
+	    return -ENODEV;
+
+	core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
+	core_temp = core_temp + sensor->offset_num / sensor->offset_den;
+	core_temp = core_temp + sensor->offset_constant - 8;
+
+	/* reserve negative temperatures for errors */
+	if (core_temp < 0)
+		core_temp = 0;
+
+	return core_temp;
+}
+
+static int
+nv50_therm_ctor(struct nouveau_object *parent,
+		struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv50_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+	priv->base.base.pwm_get = nv50_fan_pwm_get;
+	priv->base.base.pwm_set = nv50_fan_pwm_set;
+	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+	priv->base.base.temp_get = nv50_temp_get;
+	priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+	nv_subdev(priv)->intr = nv40_therm_intr;
+
+	return nouveau_therm_preinit(&priv->base.base);
+}
+
+static int
+nv50_therm_init(struct nouveau_object *object)
+{
+	struct nouveau_therm *therm = (void *)object;
+
+	nv50_sensor_setup(therm);
+
+	return _nouveau_therm_init(object);
+}
+
+struct nouveau_oclass
+nv50_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nv50_therm_init,
+		.fini = _nouveau_therm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
new file mode 100644
index 0000000..42ba633
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nv84.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ * 	    Martin Peres
+ */
+
+#include "priv.h"
+
+struct nv84_therm_priv {
+	struct nouveau_therm_priv base;
+};
+
+int
+nv84_temp_get(struct nouveau_therm *therm)
+{
+	return nv_rd32(therm, 0x20400);
+}
+
+static void
+nv84_therm_program_alarms(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+	/* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
+	nv_wr32(therm, 0x20000, 0x000003ff);
+
+	/* shutdown: The computer should be shutdown when reached */
+	nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
+	nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
+
+	/* THRS_1 : fan boost*/
+	nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
+
+	/* THRS_2 : critical */
+	nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
+
+	/* THRS_4 : down clock */
+	nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
+	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
+	nv_debug(therm,
+		 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+		 sensor->thrs_down_clock.temp,
+		 sensor->thrs_down_clock.hysteresis,
+		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nv84_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
+				   uint32_t thrs_reg, u8 status_bit,
+				   const struct nvbios_therm_threshold *thrs,
+				   enum nouveau_therm_thrs thrs_name)
+{
+	enum nouveau_therm_thrs_direction direction;
+	enum nouveau_therm_thrs_state prev_state, new_state;
+	int temp, cur;
+
+	prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+	temp = nv_rd32(therm, thrs_reg);
+
+	/* program the next threshold */
+	if (temp == thrs->temp) {
+		nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
+		new_state = NOUVEAU_THERM_THRS_HIGHER;
+	} else {
+		nv_wr32(therm, thrs_reg, thrs->temp);
+		new_state = NOUVEAU_THERM_THRS_LOWER;
+	}
+
+	/* fix the state (in case someone reprogrammed the alarms) */
+	cur = therm->temp_get(therm);
+	if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
+		new_state = NOUVEAU_THERM_THRS_HIGHER;
+	else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
+		cur < thrs->temp - thrs->hysteresis)
+		new_state = NOUVEAU_THERM_THRS_LOWER;
+	nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+
+	/* find the direction */
+	if (prev_state < new_state)
+		direction = NOUVEAU_THERM_THRS_RISING;
+	else if (prev_state > new_state)
+		direction = NOUVEAU_THERM_THRS_FALLING;
+	else
+		return;
+
+	/* advertise a change in direction */
+	nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+nv84_therm_intr(struct nouveau_subdev *subdev)
+{
+	struct nouveau_therm *therm = nouveau_therm(subdev);
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	unsigned long flags;
+	uint32_t intr;
+
+	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+	intr = nv_rd32(therm, 0x20100);
+
+	/* THRS_4: downclock */
+	if (intr & 0x002) {
+		nv84_therm_threshold_hyst_emulation(therm, 0x20414, 24,
+						  &sensor->thrs_down_clock,
+						  NOUVEAU_THERM_THRS_DOWNCLOCK);
+		intr &= ~0x002;
+	}
+
+	/* shutdown */
+	if (intr & 0x004) {
+		nv84_therm_threshold_hyst_emulation(therm, 0x20480, 20,
+						   &sensor->thrs_shutdown,
+						   NOUVEAU_THERM_THRS_SHUTDOWN);
+		intr &= ~0x004;
+	}
+
+	/* THRS_1 : fan boost */
+	if (intr & 0x008) {
+		nv84_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
+						   &sensor->thrs_fan_boost,
+						   NOUVEAU_THERM_THRS_FANBOOST);
+		intr &= ~0x008;
+	}
+
+	/* THRS_2 : critical */
+	if (intr & 0x010) {
+		nv84_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
+						   &sensor->thrs_critical,
+						   NOUVEAU_THERM_THRS_CRITICAL);
+		intr &= ~0x010;
+	}
+
+	if (intr)
+		nv_error(therm, "unhandled intr 0x%08x\n", intr);
+
+	/* ACK everything */
+	nv_wr32(therm, 0x20100, 0xffffffff);
+	nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
+
+	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+static int
+nv84_therm_ctor(struct nouveau_object *parent,
+		struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv84_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+	priv->base.base.pwm_get = nv50_fan_pwm_get;
+	priv->base.base.pwm_set = nv50_fan_pwm_set;
+	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+	priv->base.base.temp_get = nv84_temp_get;
+	priv->base.sensor.program_alarms = nv84_therm_program_alarms;
+	nv_subdev(priv)->intr = nv84_therm_intr;
+
+	/* init the thresholds */
+	nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+						 NOUVEAU_THERM_THRS_SHUTDOWN,
+						 NOUVEAU_THERM_THRS_LOWER);
+	nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+						 NOUVEAU_THERM_THRS_FANBOOST,
+						 NOUVEAU_THERM_THRS_LOWER);
+	nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+						 NOUVEAU_THERM_THRS_CRITICAL,
+						 NOUVEAU_THERM_THRS_LOWER);
+	nouveau_therm_sensor_set_threshold_state(&priv->base.base,
+						 NOUVEAU_THERM_THRS_DOWNCLOCK,
+						 NOUVEAU_THERM_THRS_LOWER);
+
+	return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nv84_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0x84),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv84_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = _nouveau_therm_init,
+		.fini = _nouveau_therm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
new file mode 100644
index 0000000..d11a7c4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/gpio.h>
+
+#include "priv.h"
+
+struct nva3_therm_priv {
+	struct nouveau_therm_priv base;
+};
+
+int
+nva3_therm_fan_sense(struct nouveau_therm *therm)
+{
+	u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
+	u32 ctrl = nv_rd32(therm, 0x00e720);
+	if (ctrl & 0x00000001)
+		return tach * 60;
+	return -ENODEV;
+}
+
+static int
+nva3_therm_init(struct nouveau_object *object)
+{
+	struct nva3_therm_priv *priv = (void *)object;
+	struct dcb_gpio_func *tach = &priv->base.fan->tach;
+	int ret;
+
+	ret = nouveau_therm_init(&priv->base.base);
+	if (ret)
+		return ret;
+
+	/* enable fan tach, count revolutions per-second */
+	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+	if (tach->func != DCB_GPIO_UNUSED) {
+		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+		nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
+		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+	}
+	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+	return 0;
+}
+
+static int
+nva3_therm_ctor(struct nouveau_object *parent,
+		struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nva3_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
+	priv->base.base.pwm_get = nv50_fan_pwm_get;
+	priv->base.base.pwm_set = nv50_fan_pwm_set;
+	priv->base.base.pwm_clock = nv50_fan_pwm_clock;
+	priv->base.base.temp_get = nv84_temp_get;
+	priv->base.base.fan_sense = nva3_therm_fan_sense;
+	priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+	return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nva3_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0xa3),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nva3_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nva3_therm_init,
+		.fini = _nouveau_therm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
new file mode 100644
index 0000000..54c28bd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+struct nvd0_therm_priv {
+	struct nouveau_therm_priv base;
+};
+
+static int
+pwm_info(struct nouveau_therm *therm, int line)
+{
+	u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
+	switch (gpio & 0x000000c0) {
+	case 0x00000000: /* normal mode, possibly pwm forced off by us */
+	case 0x00000040: /* nvio special */
+		switch (gpio & 0x0000001f) {
+		case 0x19: return 1;
+		case 0x1c: return 0;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
+	return -ENODEV;
+}
+
+static int
+nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
+{
+	u32 data = enable ? 0x00000040 : 0x00000000;
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+
+	nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
+	return 0;
+}
+
+static int
+nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
+{
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+
+	if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
+		*divs = nv_rd32(therm, 0x00e114 + (indx * 8));
+		*duty = nv_rd32(therm, 0x00e118 + (indx * 8));
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int
+nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
+{
+	int indx = pwm_info(therm, line);
+	if (indx < 0)
+		return indx;
+
+	nv_wr32(therm, 0x00e114 + (indx * 8), divs);
+	nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
+	return 0;
+}
+
+static int
+nvd0_fan_pwm_clock(struct nouveau_therm *therm)
+{
+	return (nv_device(therm)->crystal * 1000) / 20;
+}
+
+static int
+nvd0_therm_init(struct nouveau_object *object)
+{
+	struct nvd0_therm_priv *priv = (void *)object;
+	int ret;
+
+	ret = nouveau_therm_init(&priv->base.base);
+	if (ret)
+		return ret;
+
+	/* enable fan tach, count revolutions per-second */
+	nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
+	if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
+		nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
+		nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
+		nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
+	}
+	nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
+
+	return 0;
+}
+
+static int
+nvd0_therm_ctor(struct nouveau_object *parent,
+		struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvd0_therm_priv *priv;
+	int ret;
+
+	ret = nouveau_therm_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
+	priv->base.base.pwm_get = nvd0_fan_pwm_get;
+	priv->base.base.pwm_set = nvd0_fan_pwm_set;
+	priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
+	priv->base.base.temp_get = nv84_temp_get;
+	priv->base.base.fan_sense = nva3_therm_fan_sense;
+	priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
+	return nouveau_therm_preinit(&priv->base.base);
+}
+
+struct nouveau_oclass
+nvd0_therm_oclass = {
+	.handle = NV_SUBDEV(THERM, 0xd0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvd0_therm_ctor,
+		.dtor = _nouveau_therm_dtor,
+		.init = nvd0_therm_init,
+		.fini = _nouveau_therm_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
new file mode 100644
index 0000000..15ca64e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -0,0 +1,150 @@
+#ifndef __NVTHERM_PRIV_H__
+#define __NVTHERM_PRIV_H__
+
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include <subdev/therm.h>
+
+#include <subdev/bios/extdev.h>
+#include <subdev/bios/gpio.h>
+#include <subdev/bios/perf.h>
+#include <subdev/bios/therm.h>
+#include <subdev/timer.h>
+
+struct nouveau_fan {
+	struct nouveau_therm *parent;
+	const char *type;
+
+	struct nvbios_therm_fan bios;
+	struct nvbios_perf_fan perf;
+
+	struct nouveau_alarm alarm;
+	spinlock_t lock;
+	int percent;
+
+	int (*get)(struct nouveau_therm *therm);
+	int (*set)(struct nouveau_therm *therm, int percent);
+
+	struct dcb_gpio_func tach;
+};
+
+enum nouveau_therm_thrs_direction {
+	NOUVEAU_THERM_THRS_FALLING = 0,
+	NOUVEAU_THERM_THRS_RISING = 1
+};
+
+enum nouveau_therm_thrs_state {
+	NOUVEAU_THERM_THRS_LOWER = 0,
+	NOUVEAU_THERM_THRS_HIGHER = 1
+};
+
+enum nouveau_therm_thrs {
+	NOUVEAU_THERM_THRS_FANBOOST = 0,
+	NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
+	NOUVEAU_THERM_THRS_CRITICAL = 2,
+	NOUVEAU_THERM_THRS_SHUTDOWN = 3,
+	NOUVEAU_THERM_THRS_NR
+};
+
+struct nouveau_therm_priv {
+	struct nouveau_therm base;
+
+	/* automatic thermal management */
+	struct nouveau_alarm alarm;
+	spinlock_t lock;
+	struct nouveau_therm_trip_point *last_trip;
+	int mode;
+	int suspend;
+
+	/* bios */
+	struct nvbios_therm_sensor bios_sensor;
+
+	/* fan priv */
+	struct nouveau_fan *fan;
+
+	/* alarms priv */
+	struct {
+		spinlock_t alarm_program_lock;
+		struct nouveau_alarm therm_poll_alarm;
+		enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
+		void (*program_alarms)(struct nouveau_therm *);
+	} sensor;
+
+	/* what should be done if the card overheats */
+	struct {
+		void (*downclock)(struct nouveau_therm *, bool active);
+		void (*pause)(struct nouveau_therm *, bool active);
+	} emergency;
+
+	/* ic */
+	struct i2c_client *ic;
+};
+
+int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
+int nouveau_therm_attr_get(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type);
+int nouveau_therm_attr_set(struct nouveau_therm *therm,
+		       enum nouveau_therm_attr_type type, int value);
+
+void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
+
+int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
+
+int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
+int nouveau_therm_fan_get(struct nouveau_therm *therm);
+int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
+int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
+int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
+
+int nouveau_therm_fan_sense(struct nouveau_therm *therm);
+
+int nouveau_therm_preinit(struct nouveau_therm *);
+
+void nouveau_therm_sensor_preinit(struct nouveau_therm *);
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+					     enum nouveau_therm_thrs thrs,
+					     enum nouveau_therm_thrs_state st);
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+					 enum nouveau_therm_thrs thrs);
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+			        enum nouveau_therm_thrs thrs,
+			        enum nouveau_therm_thrs_direction dir);
+void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
+
+void nv40_therm_intr(struct nouveau_subdev *);
+int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
+int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
+int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
+int nv50_fan_pwm_clock(struct nouveau_therm *);
+int nv84_temp_get(struct nouveau_therm *therm);
+
+int nva3_therm_fan_sense(struct nouveau_therm *);
+
+int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
+int nouveau_fannil_create(struct nouveau_therm *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
new file mode 100644
index 0000000..dde746c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2012 The Nouveau community
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Martin Peres
+ */
+
+#include "priv.h"
+
+#include <core/object.h>
+#include <core/device.h>
+
+#include <subdev/bios.h>
+
+static void
+nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+
+	priv->bios_sensor.offset_constant = 0;
+
+	priv->bios_sensor.thrs_fan_boost.temp = 90;
+	priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
+
+	priv->bios_sensor.thrs_down_clock.temp = 95;
+	priv->bios_sensor.thrs_down_clock.hysteresis = 3;
+
+	priv->bios_sensor.thrs_critical.temp = 105;
+	priv->bios_sensor.thrs_critical.hysteresis = 5;
+
+	priv->bios_sensor.thrs_shutdown.temp = 135;
+	priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
+}
+
+
+static void
+nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *s = &priv->bios_sensor;
+
+	/* enforce a minimum hysteresis on thresholds */
+	s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
+	s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
+	s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
+	s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
+}
+
+/* must be called with alarm_program_lock taken ! */
+void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
+					     enum nouveau_therm_thrs thrs,
+					     enum nouveau_therm_thrs_state st)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	priv->sensor.alarm_state[thrs] = st;
+}
+
+/* must be called with alarm_program_lock taken ! */
+enum nouveau_therm_thrs_state
+nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
+					 enum nouveau_therm_thrs thrs)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	return priv->sensor.alarm_state[thrs];
+}
+
+static void
+nv_poweroff_work(struct work_struct *work)
+{
+	orderly_poweroff(true);
+	kfree(work);
+}
+
+void nouveau_therm_sensor_event(struct nouveau_therm *therm,
+			        enum nouveau_therm_thrs thrs,
+			        enum nouveau_therm_thrs_direction dir)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	bool active;
+	const char *thresolds[] = {
+		"fanboost", "downclock", "critical", "shutdown"
+	};
+	int temperature = therm->temp_get(therm);
+
+	if (thrs < 0 || thrs > 3)
+		return;
+
+	if (dir == NOUVEAU_THERM_THRS_FALLING)
+		nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
+			temperature, thresolds[thrs]);
+	else
+		nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
+			temperature, thresolds[thrs]);
+
+	active = (dir == NOUVEAU_THERM_THRS_RISING);
+	switch (thrs) {
+	case NOUVEAU_THERM_THRS_FANBOOST:
+		if (active) {
+			nouveau_therm_fan_set(therm, true, 100);
+			nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+		}
+		break;
+	case NOUVEAU_THERM_THRS_DOWNCLOCK:
+		if (priv->emergency.downclock)
+			priv->emergency.downclock(therm, active);
+		break;
+	case NOUVEAU_THERM_THRS_CRITICAL:
+		if (priv->emergency.pause)
+			priv->emergency.pause(therm, active);
+		break;
+	case NOUVEAU_THERM_THRS_SHUTDOWN:
+		if (active) {
+			struct work_struct *work;
+
+			work = kmalloc(sizeof(*work), GFP_ATOMIC);
+			if (work) {
+				INIT_WORK(work, nv_poweroff_work);
+				schedule_work(work);
+			}
+		}
+		break;
+	case NOUVEAU_THERM_THRS_NR:
+		break;
+	}
+
+}
+
+/* must be called with alarm_program_lock taken ! */
+static void
+nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
+				   const struct nvbios_therm_threshold *thrs,
+				   enum nouveau_therm_thrs thrs_name)
+{
+	enum nouveau_therm_thrs_direction direction;
+	enum nouveau_therm_thrs_state prev_state, new_state;
+	int temp = therm->temp_get(therm);
+
+	prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
+
+	if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
+		direction = NOUVEAU_THERM_THRS_RISING;
+		new_state = NOUVEAU_THERM_THRS_HIGHER;
+	} else if (temp <= thrs->temp - thrs->hysteresis &&
+			prev_state == NOUVEAU_THERM_THRS_HIGHER) {
+		direction = NOUVEAU_THERM_THRS_FALLING;
+		new_state = NOUVEAU_THERM_THRS_LOWER;
+	} else
+		return; /* nothing to do */
+
+	nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
+	nouveau_therm_sensor_event(therm, thrs_name, direction);
+}
+
+static void
+alarm_timer_callback(struct nouveau_alarm *alarm)
+{
+	struct nouveau_therm_priv *priv =
+	container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+	struct nouveau_timer *ptimer = nouveau_timer(priv);
+	struct nouveau_therm *therm = &priv->base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+
+	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
+					     NOUVEAU_THERM_THRS_FANBOOST);
+
+	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
+					     NOUVEAU_THERM_THRS_DOWNCLOCK);
+
+	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
+					     NOUVEAU_THERM_THRS_CRITICAL);
+
+	nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
+					     NOUVEAU_THERM_THRS_SHUTDOWN);
+
+	/* schedule the next poll in one second */
+	if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
+		ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
+
+	spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+}
+
+void
+nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+
+	nv_debug(therm,
+		 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
+		 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
+		 sensor->thrs_down_clock.temp,
+		 sensor->thrs_down_clock.hysteresis,
+		 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
+		 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
+
+	alarm_timer_callback(&priv->sensor.therm_poll_alarm);
+}
+
+void
+nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
+{
+	const char *sensor_avail = "yes";
+
+	if (therm->temp_get(therm) < 0)
+		sensor_avail = "no";
+
+	nv_info(therm, "internal sensor: %s\n", sensor_avail);
+}
+
+int
+nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
+{
+	struct nouveau_therm_priv *priv = (void *)therm;
+	struct nouveau_bios *bios = nouveau_bios(therm);
+
+	nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
+
+	nouveau_therm_temp_set_defaults(therm);
+	if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
+				      &priv->bios_sensor))
+		nv_error(therm, "nvbios_therm_sensor_parse failed\n");
+	nouveau_therm_temp_safety_checks(therm);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
new file mode 100644
index 0000000..5d417cc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "subdev/timer.h"
+
+bool
+nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
+			if ((nv_rd32(obj, addr) & mask) == data)
+				return true;
+		} else {
+			if ((nv_ro32(obj, addr) & mask) == data)
+				return true;
+		}
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+bool
+nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
+			if ((nv_rd32(obj, addr) & mask) != data)
+				return true;
+		} else {
+			if ((nv_ro32(obj, addr) & mask) != data)
+				return true;
+		}
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+bool
+nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	u64 time0;
+
+	time0 = ptimer->read(ptimer);
+	do {
+		if (func(data) == true)
+			return true;
+	} while (ptimer->read(ptimer) - time0 < nsec);
+
+	return false;
+}
+
+void
+nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
+{
+	struct nouveau_timer *ptimer = nouveau_timer(obj);
+	ptimer->alarm(ptimer, nsec, alarm);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
new file mode 100644
index 0000000..9469b82
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/timer.h>
+
+#define NV04_PTIMER_INTR_0      0x009100
+#define NV04_PTIMER_INTR_EN_0   0x009140
+#define NV04_PTIMER_NUMERATOR   0x009200
+#define NV04_PTIMER_DENOMINATOR 0x009210
+#define NV04_PTIMER_TIME_0      0x009400
+#define NV04_PTIMER_TIME_1      0x009410
+#define NV04_PTIMER_ALARM_0     0x009420
+
+struct nv04_timer_priv {
+	struct nouveau_timer base;
+	struct list_head alarms;
+	spinlock_t lock;
+};
+
+static u64
+nv04_timer_read(struct nouveau_timer *ptimer)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	u32 hi, lo;
+
+	do {
+		hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
+		lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
+	} while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
+
+	return ((u64)hi << 32 | lo);
+}
+
+static void
+nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nouveau_alarm *alarm, *atemp;
+	unsigned long flags;
+	LIST_HEAD(exec);
+
+	/* move any due alarms off the pending list */
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
+		if (alarm->timestamp <= ptimer->read(ptimer))
+			list_move_tail(&alarm->head, &exec);
+	}
+
+	/* reschedule interrupt for next alarm time */
+	if (!list_empty(&priv->alarms)) {
+		alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
+		nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
+		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
+	} else {
+		nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* execute any pending alarm handlers */
+	list_for_each_entry_safe(alarm, atemp, &exec, head) {
+		list_del_init(&alarm->head);
+		alarm->func(alarm);
+	}
+}
+
+static void
+nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
+		 struct nouveau_alarm *alarm)
+{
+	struct nv04_timer_priv *priv = (void *)ptimer;
+	struct nouveau_alarm *list;
+	unsigned long flags;
+
+	alarm->timestamp = ptimer->read(ptimer) + time;
+
+	/* append new alarm to list, in soonest-alarm-first order */
+	spin_lock_irqsave(&priv->lock, flags);
+	if (!time) {
+		if (!list_empty(&alarm->head))
+			list_del(&alarm->head);
+	} else {
+		list_for_each_entry(list, &priv->alarms, head) {
+			if (list->timestamp > alarm->timestamp)
+				break;
+		}
+		list_add_tail(&alarm->head, &list->head);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* process pending alarms */
+	nv04_timer_alarm_trigger(ptimer);
+}
+
+static void
+nv04_timer_intr(struct nouveau_subdev *subdev)
+{
+	struct nv04_timer_priv *priv = (void *)subdev;
+	u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
+
+	if (stat & 0x00000001) {
+		nv04_timer_alarm_trigger(&priv->base);
+		nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
+		stat &= ~0x00000001;
+	}
+
+	if (stat) {
+		nv_error(priv, "unknown stat 0x%08x\n", stat);
+		nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
+	}
+}
+
+static int
+nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_timer_priv *priv;
+	int ret;
+
+	ret = nouveau_timer_create(parent, engine, oclass, &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.base.intr = nv04_timer_intr;
+	priv->base.read = nv04_timer_read;
+	priv->base.alarm = nv04_timer_alarm;
+
+	INIT_LIST_HEAD(&priv->alarms);
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+static void
+nv04_timer_dtor(struct nouveau_object *object)
+{
+	struct nv04_timer_priv *priv = (void *)object;
+	return nouveau_timer_destroy(&priv->base);
+}
+
+static int
+nv04_timer_init(struct nouveau_object *object)
+{
+	struct nouveau_device *device = nv_device(object);
+	struct nv04_timer_priv *priv = (void *)object;
+	u32 m = 1, f, n, d;
+	int ret;
+
+	ret = nouveau_timer_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+
+	/* determine base clock for timer source */
+#if 0 /*XXX*/
+	if (device->chipset < 0x40) {
+		n = nouveau_hw_get_clock(device, PLL_CORE);
+	} else
+#endif
+	if (device->chipset <= 0x40) {
+		/*XXX: figure this out */
+		f = -1;
+		n = 0;
+	} else {
+		f = device->crystal;
+		n = f;
+		while (n < (d * 2)) {
+			n += (n / m);
+			m++;
+		}
+
+		nv_wr32(priv, 0x009220, m - 1);
+	}
+
+	if (!n) {
+		nv_warn(priv, "unknown input clock freq\n");
+		if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
+		    !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
+			nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
+			nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
+		}
+		return 0;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
+	}
+
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nv_debug(priv, "input frequency : %dHz\n", f);
+	nv_debug(priv, "input multiplier: %d\n", m);
+	nv_debug(priv, "numerator       : 0x%08x\n", n);
+	nv_debug(priv, "denominator     : 0x%08x\n", d);
+	nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
+
+	nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
+	nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
+	nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
+	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	return 0;
+}
+
+static int
+nv04_timer_fini(struct nouveau_object *object, bool suspend)
+{
+	struct nv04_timer_priv *priv = (void *)object;
+	nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+	return nouveau_timer_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv04_timer_oclass = {
+	.handle = NV_SUBDEV(TIMER, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_timer_ctor,
+		.dtor = nv04_timer_dtor,
+		.init = nv04_timer_init,
+		.fini = nv04_timer_fini,
+	}
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
new file mode 100644
index 0000000..e66fb77
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/mm.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	struct nouveau_mm_node *r;
+	int big = vma->node->type != vmm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
+	u32 end, len;
+
+	delta = 0;
+	list_for_each_entry(r, &node->regions, rl_entry) {
+		u64 phys = (u64)r->offset << 12;
+		u32 num  = r->length >> bits;
+
+		while (num) {
+			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+			end = (pte + num);
+			if (unlikely(end >= max))
+				end = max;
+			len = end - pte;
+
+			vmm->map(vma, pgt, node, pte, len, phys, delta);
+
+			num -= len;
+			pte += len;
+			if (unlikely(end >= max)) {
+				phys += len << (bits + 12);
+				pde++;
+				pte = 0;
+			}
+
+			delta += (u64)len << vma->node->type;
+		}
+	}
+
+	vmm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+	nouveau_vm_map_at(vma, 0, node);
+}
+
+void
+nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+			struct nouveau_mem *mem)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	int big = vma->node->type != vmm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
+	unsigned m, sglen;
+	u32 end, len;
+	int i;
+	struct scatterlist *sg;
+
+	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+		end = pte + sglen;
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		for (m = 0; m < len; m++) {
+			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+			vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
+			num--;
+			pte++;
+
+			if (num == 0)
+				goto finish;
+		}
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+		if (m < sglen) {
+			for (; m < sglen; m++) {
+				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+				vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
+				num--;
+				pte++;
+				if (num == 0)
+					goto finish;
+			}
+		}
+
+	}
+finish:
+	vmm->flush(vm);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+		  struct nouveau_mem *mem)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	dma_addr_t *list = mem->pages;
+	int big = vma->node->type != vmm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vmm->map_sg(vma, pgt, mem, pte, len, list);
+
+		num  -= len;
+		pte  += len;
+		list += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vmm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	int big = vma->node->type != vmm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vmm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vmm->unmap(pgt, pte, len);
+
+		num -= len;
+		pte += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vmm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_vm_pgt *vpgt;
+	struct nouveau_gpuobj *pgt;
+	u32 pde;
+
+	for (pde = fpde; pde <= lpde; pde++) {
+		vpgt = &vm->pgt[pde - vm->fpde];
+		if (--vpgt->refcount[big])
+			continue;
+
+		pgt = vpgt->obj[big];
+		vpgt->obj[big] = NULL;
+
+		list_for_each_entry(vpgd, &vm->pgd_list, head) {
+			vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
+		}
+
+		mutex_unlock(&vm->mm.mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm.mutex);
+	}
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_gpuobj *pgt;
+	int big = (type != vmm->spg_shift);
+	u32 pgt_size;
+	int ret;
+
+	pgt_size  = (1 << (vmm->pgt_bits + 12)) >> type;
+	pgt_size *= 8;
+
+	mutex_unlock(&vm->mm.mutex);
+	ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+	mutex_lock(&vm->mm.mutex);
+	if (unlikely(ret))
+		return ret;
+
+	/* someone beat us to filling the PDE while we didn't have the lock */
+	if (unlikely(vpgt->refcount[big]++)) {
+		mutex_unlock(&vm->mm.mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm.mutex);
+		return 0;
+	}
+
+	vpgt->obj[big] = pgt;
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
+	}
+
+	return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+	       u32 access, struct nouveau_vma *vma)
+{
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	u32 align = (1 << page_shift) >> 12;
+	u32 msize = size >> 12;
+	u32 fpde, lpde, pde;
+	int ret;
+
+	mutex_lock(&vm->mm.mutex);
+	ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
+			     &vma->node);
+	if (unlikely(ret != 0)) {
+		mutex_unlock(&vm->mm.mutex);
+		return ret;
+	}
+
+	fpde = (vma->node->offset >> vmm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
+
+	for (pde = fpde; pde <= lpde; pde++) {
+		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+		int big = (vma->node->type != vmm->spg_shift);
+
+		if (likely(vpgt->refcount[big])) {
+			vpgt->refcount[big]++;
+			continue;
+		}
+
+		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+		if (ret) {
+			if (pde != fpde)
+				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+			nouveau_mm_free(&vm->mm, &vma->node);
+			mutex_unlock(&vm->mm.mutex);
+			return ret;
+		}
+	}
+	mutex_unlock(&vm->mm.mutex);
+
+	vma->vm     = vm;
+	vma->offset = (u64)vma->node->offset << 12;
+	vma->access = access;
+	return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	u32 fpde, lpde;
+
+	if (unlikely(vma->node == NULL))
+		return;
+	fpde = (vma->node->offset >> vmm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
+
+	mutex_lock(&vm->mm.mutex);
+	nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
+	nouveau_mm_free(&vm->mm, &vma->node);
+	mutex_unlock(&vm->mm.mutex);
+}
+
+int
+nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+		  u64 mm_offset, u32 block, struct nouveau_vm **pvm)
+{
+	struct nouveau_vm *vm;
+	u64 mm_length = (offset + length) - mm_offset;
+	int ret;
+
+	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	if (!vm)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vm->pgd_list);
+	vm->vmm = vmm;
+	vm->refcount = 1;
+	vm->fpde = offset >> (vmm->pgt_bits + 12);
+	vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
+
+	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
+	if (!vm->pgt) {
+		kfree(vm);
+		return -ENOMEM;
+	}
+
+	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+			      block >> 12);
+	if (ret) {
+		vfree(vm->pgt);
+		kfree(vm);
+		return ret;
+	}
+
+	*pvm = vm;
+
+	return 0;
+}
+
+int
+nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	return vmm->create(vmm, offset, length, mm_offset, pvm);
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vmmgr *vmm = vm->vmm;
+	struct nouveau_vm_pgd *vpgd;
+	int i;
+
+	if (!pgd)
+		return 0;
+
+	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+	if (!vpgd)
+		return -ENOMEM;
+
+	nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+	mutex_lock(&vm->mm.mutex);
+	for (i = vm->fpde; i <= vm->lpde; i++)
+		vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+	list_add(&vpgd->head, &vm->pgd_list);
+	mutex_unlock(&vm->mm.mutex);
+	return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+	struct nouveau_gpuobj *pgd = NULL;
+
+	if (!mpgd)
+		return;
+
+	mutex_lock(&vm->mm.mutex);
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		if (vpgd->obj == mpgd) {
+			pgd = vpgd->obj;
+			list_del(&vpgd->head);
+			kfree(vpgd);
+			break;
+		}
+	}
+	mutex_unlock(&vm->mm.mutex);
+
+	nouveau_gpuobj_ref(NULL, &pgd);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		nouveau_vm_unlink(vm, vpgd->obj);
+	}
+
+	nouveau_mm_fini(&vm->mm);
+	vfree(vm->pgt);
+	kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+	       struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm *vm;
+	int ret;
+
+	vm = ref;
+	if (vm) {
+		ret = nouveau_vm_link(vm, pgd);
+		if (ret)
+			return ret;
+
+		vm->refcount++;
+	}
+
+	vm = *ptr;
+	*ptr = ref;
+
+	if (vm) {
+		nouveau_vm_unlink(vm, pgd);
+
+		if (--vm->refcount == 0)
+			nouveau_vm_del(vm);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
new file mode 100644
index 0000000..ed45437
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include "nv04.h"
+
+#define NV04_PDMA_SIZE (128 * 1024 * 1024)
+#define NV04_PDMA_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	pte = 0x00008 + (pte * 4);
+	while (cnt) {
+		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
+		u32 phys = (u32)*list++;
+		while (cnt && page--) {
+			nv_wo32(pgt, pte, phys | 3);
+			phys += NV04_PDMA_PAGE;
+			pte += 4;
+			cnt -= 1;
+		}
+	}
+}
+
+static void
+nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte = 0x00008 + (pte * 4);
+	while (cnt--) {
+		nv_wo32(pgt, pte, 0x00000000);
+		pte += 4;
+	}
+}
+
+static void
+nv04_vm_flush(struct nouveau_vm *vm)
+{
+}
+
+/*******************************************************************************
+ * VM object
+ ******************************************************************************/
+
+int
+nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
+	       struct nouveau_vm **pvm)
+{
+	return -EINVAL;
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv04_vmmgr_priv *priv;
+	struct nouveau_gpuobj *dma;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
+				   "pcigart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV04_PDMA_SIZE;
+	priv->base.dma_bits = 32;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv04_vm_map_sg;
+	priv->base.unmap = nv04_vm_unmap;
+	priv->base.flush = nv04_vm_flush;
+
+	ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+				 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
+				 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	dma = priv->vm->pgt[0].obj[0];
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
+	return 0;
+}
+
+void
+nv04_vmmgr_dtor(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	if (priv->vm) {
+		nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
+		nouveau_vm_ref(NULL, &priv->vm, NULL);
+	}
+	if (priv->nullp) {
+		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
+				    priv->nullp, priv->null);
+	}
+	nouveau_vmmgr_destroy(&priv->base);
+}
+
+struct nouveau_oclass
+nv04_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x04),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv04_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
new file mode 100644
index 0000000..ec42d4b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -0,0 +1,19 @@
+#ifndef __NV04_VMMGR_PRIV__
+#define __NV04_VMMGR_PRIV__
+
+#include <subdev/vm.h>
+
+struct nv04_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	struct nouveau_vm *vm;
+	dma_addr_t null;
+	void *nullp;
+};
+
+static inline struct nv04_vmmgr_priv *
+nv04_vmmgr(void *obj)
+{
+	return (void *)nouveau_vmmgr(obj);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
new file mode 100644
index 0000000..064c762
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/timer.h>
+#include <subdev/vm.h>
+
+#include "nv04.h"
+
+#define NV41_GART_SIZE (512 * 1024 * 1024)
+#define NV41_GART_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	pte = pte * 4;
+	while (cnt) {
+		u32 page = PAGE_SIZE / NV41_GART_PAGE;
+		u64 phys = (u64)*list++;
+		while (cnt && page--) {
+			nv_wo32(pgt, pte, (phys >> 7) | 1);
+			phys += NV41_GART_PAGE;
+			pte += 4;
+			cnt -= 1;
+		}
+	}
+}
+
+static void
+nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte = pte * 4;
+	while (cnt--) {
+		nv_wo32(pgt, pte, 0x00000000);
+		pte += 4;
+	}
+}
+
+static void
+nv41_vm_flush(struct nouveau_vm *vm)
+{
+	struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
+
+	mutex_lock(&nv_subdev(priv)->mutex);
+	nv_wr32(priv, 0x100810, 0x00000022);
+	if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
+		nv_warn(priv, "flush timeout, 0x%08x\n",
+			nv_rd32(priv, 0x100810));
+	}
+	nv_wr32(priv, 0x100810, 0x00000000);
+	mutex_unlock(&nv_subdev(priv)->mutex);
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv04_vmmgr_priv *priv;
+	int ret;
+
+	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+	    !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+		return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
+					   data, size, pobject);
+	}
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
+				   "pciegart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV41_GART_SIZE;
+	priv->base.dma_bits = 39;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv41_vm_map_sg;
+	priv->base.unmap = nv41_vm_unmap;
+	priv->base.flush = nv41_vm_flush;
+
+	ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+				(NV41_GART_SIZE / NV41_GART_PAGE) * 4,
+				 16, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv41_vmmgr_init(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
+	int ret;
+
+	ret = nouveau_vmmgr_init(&priv->base);
+	if (ret)
+		return ret;
+
+	nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
+	nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
+	nv_wr32(priv, 0x100820, 0x00000000);
+	return 0;
+}
+
+struct nouveau_oclass
+nv41_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x41),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv41_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = nv41_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
new file mode 100644
index 0000000..fae1f67
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/timer.h>
+#include <subdev/vm.h>
+
+#include "nv04.h"
+
+#define NV44_GART_SIZE (512 * 1024 * 1024)
+#define NV44_GART_PAGE (  4 * 1024)
+
+/*******************************************************************************
+ * VM map/unmap callbacks
+ ******************************************************************************/
+
+static void
+nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
+	     dma_addr_t *list, u32 pte, u32 cnt)
+{
+	u32 base = (pte << 2) & ~0x0000000f;
+	u32 tmp[4];
+
+	tmp[0] = nv_ro32(pgt, base + 0x0);
+	tmp[1] = nv_ro32(pgt, base + 0x4);
+	tmp[2] = nv_ro32(pgt, base + 0x8);
+	tmp[3] = nv_ro32(pgt, base + 0xc);
+
+	while (cnt--) {
+		u32 addr = list ? (*list++ >> 12) : (null >> 12);
+		switch (pte++ & 0x3) {
+		case 0:
+			tmp[0] &= ~0x07ffffff;
+			tmp[0] |= addr;
+			break;
+		case 1:
+			tmp[0] &= ~0xf8000000;
+			tmp[0] |= addr << 27;
+			tmp[1] &= ~0x003fffff;
+			tmp[1] |= addr >> 5;
+			break;
+		case 2:
+			tmp[1] &= ~0xffc00000;
+			tmp[1] |= addr << 22;
+			tmp[2] &= ~0x0001ffff;
+			tmp[2] |= addr >> 10;
+			break;
+		case 3:
+			tmp[2] &= ~0xfffe0000;
+			tmp[2] |= addr << 17;
+			tmp[3] &= ~0x00000fff;
+			tmp[3] |= addr >> 15;
+			break;
+		}
+	}
+
+	nv_wo32(pgt, base + 0x0, tmp[0]);
+	nv_wo32(pgt, base + 0x4, tmp[1]);
+	nv_wo32(pgt, base + 0x8, tmp[2]);
+	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
+	u32 tmp[4];
+	int i;
+
+	if (pte & 3) {
+		u32  max = 4 - (pte & 3);
+		u32 part = (cnt > max) ? max : cnt;
+		nv44_vm_fill(pgt, priv->null, list, pte, part);
+		pte  += part;
+		list += part;
+		cnt  -= part;
+	}
+
+	while (cnt >= 4) {
+		for (i = 0; i < 4; i++)
+			tmp[i] = *list++ >> 12;
+		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
+		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
+		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
+		cnt -= 4;
+	}
+
+	if (cnt)
+		nv44_vm_fill(pgt, priv->null, list, pte, cnt);
+}
+
+static void
+nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
+
+	if (pte & 3) {
+		u32  max = 4 - (pte & 3);
+		u32 part = (cnt > max) ? max : cnt;
+		nv44_vm_fill(pgt, priv->null, NULL, pte, part);
+		pte  += part;
+		cnt  -= part;
+	}
+
+	while (cnt >= 4) {
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		nv_wo32(pgt, pte++ * 4, 0x00000000);
+		cnt -= 4;
+	}
+
+	if (cnt)
+		nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
+}
+
+static void
+nv44_vm_flush(struct nouveau_vm *vm)
+{
+	struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
+	nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
+	nv_wr32(priv, 0x100808, 0x00000020);
+	if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
+		nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
+	nv_wr32(priv, 0x100808, 0x00000000);
+}
+
+/*******************************************************************************
+ * VMMGR subdev
+ ******************************************************************************/
+
+static int
+nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nouveau_device *device = nv_device(parent);
+	struct nv04_vmmgr_priv *priv;
+	int ret;
+
+	if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+	    !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+		return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
+					   data, size, pobject);
+	}
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
+				   "pciegart", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.create = nv04_vm_create;
+	priv->base.limit = NV44_GART_SIZE;
+	priv->base.dma_bits = 39;
+	priv->base.pgt_bits = 32 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 12;
+	priv->base.map_sg = nv44_vm_map_sg;
+	priv->base.unmap = nv44_vm_unmap;
+	priv->base.flush = nv44_vm_flush;
+
+	priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
+	if (!priv->nullp) {
+		nv_error(priv, "unable to allocate dummy pages\n");
+		return -ENOMEM;
+	}
+
+	ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
+				&priv->vm);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(nv_object(priv), NULL,
+				(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
+				 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
+				 &priv->vm->pgt[0].obj[0]);
+	priv->vm->pgt[0].refcount[0] = 1;
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int
+nv44_vmmgr_init(struct nouveau_object *object)
+{
+	struct nv04_vmmgr_priv *priv = (void *)object;
+	struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
+	u32 addr;
+	int ret;
+
+	ret = nouveau_vmmgr_init(&priv->base);
+	if (ret)
+		return ret;
+
+	/* calculate vram address of this PRAMIN block, object must be
+	 * allocated on 512KiB alignment, and not exceed a total size
+	 * of 512KiB for this to work correctly
+	 */
+	addr  = nv_rd32(priv, 0x10020c);
+	addr -= ((gart->addr >> 19) + 1) << 19;
+
+	nv_wr32(priv, 0x100850, 0x80000000);
+	nv_wr32(priv, 0x100818, priv->null);
+	nv_wr32(priv, 0x100804, NV44_GART_SIZE);
+	nv_wr32(priv, 0x100850, 0x00008000);
+	nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
+	nv_wr32(priv, 0x100820, 0x00000000);
+	nv_wr32(priv, 0x10082c, 0x00000001);
+	nv_wr32(priv, 0x100800, addr | 0x00000010);
+	return 0;
+}
+
+struct nouveau_oclass
+nv44_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x44),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv44_vmmgr_ctor,
+		.dtor = nv04_vmmgr_dtor,
+		.init = nv44_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
new file mode 100644
index 0000000..e067f81
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+
+struct nv50_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	spinlock_t lock;
+};
+
+static void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		struct nouveau_gpuobj *pgt[2])
+{
+	u64 phys = 0xdeadcafe00000000ULL;
+	u32 coverage = 0;
+
+	if (pgt[0]) {
+		phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
+		coverage = (pgt[0]->size >> 3) << 12;
+	} else
+	if (pgt[1]) {
+		phys = 0x00000001 | pgt[1]->addr; /* present */
+		coverage = (pgt[1]->size >> 3) << 16;
+	}
+
+	if (phys & 1) {
+		if (coverage <= 32 * 1024 * 1024)
+			phys |= 0x60;
+		else if (coverage <= 64 * 1024 * 1024)
+			phys |= 0x40;
+		else if (coverage <= 128 * 1024 * 1024)
+			phys |= 0x20;
+	}
+
+	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+	phys |= 1; /* present */
+	phys |= (u64)memtype << 40;
+	phys |= target << 4;
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= (1 << 6);
+	if (!(vma->access & NV_MEM_ACCESS_WO))
+		phys |= (1 << 3);
+	return phys;
+}
+
+static void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+{
+	u32 comp = (mem->memtype & 0x180) >> 7;
+	u32 block, target;
+	int i;
+
+	/* IGPs don't have real VRAM, re-target to stolen system memory */
+	target = 0;
+	if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
+		phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
+		target = 3;
+	}
+
+	phys  = vm_addr(vma, phys, mem->memtype, target);
+	pte <<= 3;
+	cnt <<= 3;
+
+	while (cnt) {
+		u32 offset_h = upper_32_bits(phys);
+		u32 offset_l = lower_32_bits(phys);
+
+		for (i = 7; i >= 0; i--) {
+			block = 1 << (i + 3);
+			if (cnt >= block && !(pte & (block - 1)))
+				break;
+		}
+		offset_l |= (i << 7);
+
+		phys += block << (vma->node->type - 3);
+		cnt  -= block;
+		if (comp) {
+			u32 tag = mem->tag->offset + ((delta >> 16) * comp);
+			offset_h |= (tag << 17);
+			delta    += block << (vma->node->type - 3);
+		}
+
+		while (block) {
+			nv_wo32(pgt, pte + 0, offset_l);
+			nv_wo32(pgt, pte + 4, offset_h);
+			pte += 8;
+			block -= 8;
+		}
+	}
+}
+
+static void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+static void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+static void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+	struct nouveau_engine *engine;
+	int i;
+
+	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
+		if (atomic_read(&vm->engref[i])) {
+			engine = nouveau_engine(vm->vmm, i);
+			if (engine && engine->tlb_flush)
+				engine->tlb_flush(engine);
+		}
+	}
+}
+
+void
+nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
+{
+	struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
+	if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
+		nv_error(subdev, "vm flush timeout: engine %d\n", engine);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	u32 block = (1 << (vmm->pgt_bits + 12));
+	if (block > length)
+		block = length;
+
+	return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
+}
+
+static int
+nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nv50_vmmgr_priv *priv;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.limit = 1ULL << 40;
+	priv->base.dma_bits = 40;
+	priv->base.pgt_bits  = 29 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 16;
+	priv->base.create = nv50_vm_create;
+	priv->base.map_pgt = nv50_vm_map_pgt;
+	priv->base.map = nv50_vm_map;
+	priv->base.map_sg = nv50_vm_map_sg;
+	priv->base.unmap = nv50_vm_unmap;
+	priv->base.flush = nv50_vm_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nv50_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0x50),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nv50_vmmgr_ctor,
+		.dtor = _nouveau_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
new file mode 100644
index 0000000..4c3b0a2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/ltcg.h>
+
+struct nvc0_vmmgr_priv {
+	struct nouveau_vmmgr base;
+	spinlock_t lock;
+};
+
+
+/* Map from compressed to corresponding uncompressed storage type.
+ * The value 0xff represents an invalid storage type.
+ */
+const u8 nvc0_pte_storage_type_map[256] =
+{
+	0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+	0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+	0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+	0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+	0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+	0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+	0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+	0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+	0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+	0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+	0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+	0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+	0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+	0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+	0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+	0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+};
+
+
+static void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+		struct nouveau_gpuobj *pgt[2])
+{
+	u32 pde[2] = { 0, 0 };
+
+	if (pgt[0])
+		pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+	if (pgt[1])
+		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+
+	nv_wo32(pgd, (index * 8) + 0, pde[0]);
+	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+	phys >>= 8;
+
+	phys |= 0x00000001; /* present */
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= 0x00000002;
+
+	phys |= ((u64)target  << 32);
+	phys |= ((u64)memtype << 36);
+
+	return phys;
+}
+
+static void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+{
+	u64 next = 1 << (vma->node->type - 8);
+
+	phys  = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+	pte <<= 3;
+
+	if (mem->tag) {
+		struct nouveau_ltcg *ltcg =
+			nouveau_ltcg(vma->vm->vmm->base.base.parent);
+		u32 tag = mem->tag->offset + (delta >> 17);
+		phys |= (u64)tag << (32 + 12);
+		next |= (u64)1   << (32 + 12);
+		ltcg->tags_clear(ltcg, tag, cnt);
+	}
+
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		phys += next;
+		pte  += 8;
+	}
+}
+
+static void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
+	/* compressed storage types are invalid for system memory */
+	u32 memtype = nvc0_pte_storage_type_map[mem->memtype & 0xff];
+
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nvc0_vm_addr(vma, *list++, memtype, target);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+static void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
+{
+	struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
+	unsigned long flags;
+
+	/* looks like maybe a "free flush slots" counter, the
+	 * faster you write to 0x100cbc to more it decreases
+	 */
+	spin_lock_irqsave(&priv->lock, flags);
+	if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
+		nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
+			 nv_rd32(subdev, 0x100c80), type);
+	}
+
+	nv_wr32(subdev, 0x100cb8, addr >> 8);
+	nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
+
+	/* wait for flush to be queued? */
+	if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
+		nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
+			 nv_rd32(subdev, 0x100c80), type);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd;
+
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
+	}
+}
+
+static int
+nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
+	       u64 mm_offset, struct nouveau_vm **pvm)
+{
+	return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
+}
+
+static int
+nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+		struct nouveau_oclass *oclass, void *data, u32 size,
+		struct nouveau_object **pobject)
+{
+	struct nvc0_vmmgr_priv *priv;
+	int ret;
+
+	ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
+	*pobject = nv_object(priv);
+	if (ret)
+		return ret;
+
+	priv->base.limit = 1ULL << 40;
+	priv->base.dma_bits = 40;
+	priv->base.pgt_bits  = 27 - 12;
+	priv->base.spg_shift = 12;
+	priv->base.lpg_shift = 17;
+	priv->base.create = nvc0_vm_create;
+	priv->base.map_pgt = nvc0_vm_map_pgt;
+	priv->base.map = nvc0_vm_map;
+	priv->base.map_sg = nvc0_vm_map_sg;
+	priv->base.unmap = nvc0_vm_unmap;
+	priv->base.flush = nvc0_vm_flush;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
+
+struct nouveau_oclass
+nvc0_vmmgr_oclass = {
+	.handle = NV_SUBDEV(VM, 0xc0),
+	.ofuncs = &(struct nouveau_ofuncs) {
+		.ctor = nvc0_vmmgr_ctor,
+		.dtor = _nouveau_vmmgr_dtor,
+		.init = _nouveau_vmmgr_init,
+		.fini = _nouveau_vmmgr_fini,
+	},
+};
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/Makefile b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/Makefile
new file mode 100644
index 0000000..ea3f5b8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/Makefile
@@ -0,0 +1,10 @@
+nouveau-y += dispnv04/arb.o
+nouveau-y += dispnv04/crtc.o
+nouveau-y += dispnv04/cursor.o
+nouveau-y += dispnv04/dac.o
+nouveau-y += dispnv04/dfp.o
+nouveau-y += dispnv04/disp.o
+nouveau-y += dispnv04/hw.o
+nouveau-y += dispnv04/tvmodesnv17.o
+nouveau-y += dispnv04/tvnv04.o
+nouveau-y += dispnv04/tvnv17.o
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/arb.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/arb.c
new file mode 100644
index 0000000..2e70462
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "hw.h"
+
+/****************************************************************************\
+*                                                                            *
+* The video arbitration routines calculate some "magic" numbers.  Fixes      *
+* the snow seen when accessing the framebuffer without it.                   *
+* It just works (I hope).                                                    *
+*                                                                            *
+\****************************************************************************/
+
+struct nv_fifo_info {
+	int lwm;
+	int burst;
+};
+
+struct nv_sim_state {
+	int pclk_khz;
+	int mclk_khz;
+	int nvclk_khz;
+	int bpp;
+	int mem_page_miss;
+	int mem_latency;
+	int memory_type;
+	int memory_width;
+	int two_heads;
+};
+
+static void
+nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+	int pagemiss, cas, width, bpp;
+	int nvclks, mclks, pclks, crtpagemiss;
+	int found, mclk_extra, mclk_loop, cbs, m1, p1;
+	int mclk_freq, pclk_freq, nvclk_freq;
+	int us_m, us_n, us_p, crtc_drain_rate;
+	int cpm_us, us_crt, clwm;
+
+	pclk_freq = arb->pclk_khz;
+	mclk_freq = arb->mclk_khz;
+	nvclk_freq = arb->nvclk_khz;
+	pagemiss = arb->mem_page_miss;
+	cas = arb->mem_latency;
+	width = arb->memory_width >> 6;
+	bpp = arb->bpp;
+	cbs = 128;
+
+	pclks = 2;
+	nvclks = 10;
+	mclks = 13 + cas;
+	mclk_extra = 3;
+	found = 0;
+
+	while (!found) {
+		found = 1;
+
+		mclk_loop = mclks + mclk_extra;
+		us_m = mclk_loop * 1000 * 1000 / mclk_freq;
+		us_n = nvclks * 1000 * 1000 / nvclk_freq;
+		us_p = nvclks * 1000 * 1000 / pclk_freq;
+
+		crtc_drain_rate = pclk_freq * bpp / 8;
+		crtpagemiss = 2;
+		crtpagemiss += 1;
+		cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
+		us_crt = cpm_us + us_m + us_n + us_p;
+		clwm = us_crt * crtc_drain_rate / (1000 * 1000);
+		clwm++;
+
+		m1 = clwm + cbs - 512;
+		p1 = m1 * pclk_freq / mclk_freq;
+		p1 = p1 * bpp / 8;
+		if ((p1 < m1 && m1 > 0) || clwm > 519) {
+			found = !mclk_extra;
+			mclk_extra--;
+		}
+		if (clwm < 384)
+			clwm = 384;
+
+		fifo->lwm = clwm;
+		fifo->burst = cbs;
+	}
+}
+
+static void
+nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+	int fill_rate, drain_rate;
+	int pclks, nvclks, mclks, xclks;
+	int pclk_freq, nvclk_freq, mclk_freq;
+	int fill_lat, extra_lat;
+	int max_burst_o, max_burst_l;
+	int fifo_len, min_lwm, max_lwm;
+	const int burst_lat = 80; /* Maximum allowable latency due
+				   * to the CRTC FIFO burst. (ns) */
+
+	pclk_freq = arb->pclk_khz;
+	nvclk_freq = arb->nvclk_khz;
+	mclk_freq = arb->mclk_khz;
+
+	fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
+	drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
+
+	fifo_len = arb->two_heads ? 1536 : 1024; /* B */
+
+	/* Fixed FIFO refill latency. */
+
+	pclks = 4;	/* lwm detect. */
+
+	nvclks = 3	/* lwm -> sync. */
+		+ 2	/* fbi bus cycles (1 req + 1 busy) */
+		+ 1	/* 2 edge sync.  may be very close to edge so
+			 * just put one. */
+		+ 1	/* fbi_d_rdv_n */
+		+ 1	/* Fbi_d_rdata */
+		+ 1;	/* crtfifo load */
+
+	mclks = 1	/* 2 edge sync.  may be very close to edge so
+			 * just put one. */
+		+ 1	/* arb_hp_req */
+		+ 5	/* tiling pipeline */
+		+ 2	/* latency fifo */
+		+ 2	/* memory request to fbio block */
+		+ 7;	/* data returned from fbio block */
+
+	/* Need to accumulate 256 bits for read */
+	mclks += (arb->memory_type == 0 ? 2 : 1)
+		* arb->memory_width / 32;
+
+	fill_lat = mclks * 1000 * 1000 / mclk_freq   /* minimum mclk latency */
+		+ nvclks * 1000 * 1000 / nvclk_freq  /* nvclk latency */
+		+ pclks * 1000 * 1000 / pclk_freq;   /* pclk latency */
+
+	/* Conditional FIFO refill latency. */
+
+	xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
+						* the overlay. */
+		+ 2 * arb->mem_page_miss       /* Extra pagemiss latency. */
+		+ (arb->bpp == 32 ? 8 : 4);    /* Margin of error. */
+
+	extra_lat = xclks * 1000 * 1000 / mclk_freq;
+
+	if (arb->two_heads)
+		/* Account for another CRTC. */
+		extra_lat += fill_lat + extra_lat + burst_lat;
+
+	/* FIFO burst */
+
+	/* Max burst not leading to overflows. */
+	max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
+		* (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
+	fifo->burst = min(max_burst_o, 1024);
+
+	/* Max burst value with an acceptable latency. */
+	max_burst_l = burst_lat * fill_rate / (1000 * 1000);
+	fifo->burst = min(max_burst_l, fifo->burst);
+
+	fifo->burst = rounddown_pow_of_two(fifo->burst);
+
+	/* FIFO low watermark */
+
+	min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
+	max_lwm = fifo_len - fifo->burst
+		+ fill_lat * drain_rate / (1000 * 1000)
+		+ fifo->burst * drain_rate / fill_rate;
+
+	fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
+}
+
+static void
+nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
+		int *burst, int *lwm)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nv_fifo_info fifo_data;
+	struct nv_sim_state sim_data;
+	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+	int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
+	uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
+
+	sim_data.pclk_khz = VClk;
+	sim_data.mclk_khz = MClk;
+	sim_data.nvclk_khz = NVClk;
+	sim_data.bpp = bpp;
+	sim_data.two_heads = nv_two_heads(dev);
+	if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+	    (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+		uint32_t type;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
+
+		sim_data.memory_type = (type >> 12) & 1;
+		sim_data.memory_width = 64;
+		sim_data.mem_latency = 3;
+		sim_data.mem_page_miss = 10;
+	} else {
+		sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
+		sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+		sim_data.mem_latency = cfg1 & 0xf;
+		sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
+	}
+
+	if (nv_device(drm->device)->card_type == NV_04)
+		nv04_calc_arb(&fifo_data, &sim_data);
+	else
+		nv10_calc_arb(&fifo_data, &sim_data);
+
+	*burst = ilog2(fifo_data.burst >> 4);
+	*lwm = fifo_data.lwm >> 3;
+}
+
+static void
+nv20_update_arb(int *burst, int *lwm)
+{
+	unsigned int fifo_size, burst_size, graphics_lwm;
+
+	fifo_size = 2048;
+	burst_size = 512;
+	graphics_lwm = fifo_size - burst_size;
+
+	*burst = ilog2(burst_size >> 5);
+	*lwm = graphics_lwm >> 3;
+}
+
+void
+nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (nv_device(drm->device)->card_type < NV_20)
+		nv04_update_arb(dev, vclk, bpp, burst, lwm);
+	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+		*burst = 128;
+		*lwm = 0x0480;
+	} else
+		nv20_update_arb(burst, lwm);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/crtc.c
new file mode 100644
index 0000000..0782bd2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_bo.h"
+#include "nouveau_gem.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include "nvreg.h"
+#include "nouveau_fbcon.h"
+#include "disp.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb);
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+		       crtcstate->CRTC[index]);
+}
+
+static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+
+	regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
+	if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
+		regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
+		regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
+		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
+	}
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
+}
+
+static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+
+	nv_crtc->sharpness = level;
+	if (level < 0)	/* blur is in hw range 0x3f -> 0x20 */
+		level += 0x40;
+	regp->ramdac_634 = level;
+	NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
+}
+
+#define PLLSEL_VPLL1_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
+#define PLLSEL_VPLL2_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
+#define PLLSEL_TV_MASK					\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2	\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+/* NV4x 0x40.. pll notes:
+ * gpu pll: 0x4000 + 0x4004
+ * ?gpu? pll: 0x4008 + 0x400c
+ * vpll1: 0x4010 + 0x4014
+ * vpll2: 0x4018 + 0x401c
+ * mpll: 0x4020 + 0x4024
+ * mpll: 0x4038 + 0x403c
+ *
+ * the first register of each pair has some unknown details:
+ * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
+ * bits 20-23: (mpll) something to do with post divider?
+ * bits 28-31: related to single stage mode? (bit 8/12)
+ */
+
+static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bios *bios = nouveau_bios(drm->device);
+	struct nouveau_clock *clk = nouveau_clock(drm->device);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
+	struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
+	struct nouveau_pll_vals *pv = &regp->pllvals;
+	struct nvbios_pll pll_lim;
+
+	if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
+			    &pll_lim))
+		return;
+
+	/* NM2 == 0 is used to determine single stage mode on two stage plls */
+	pv->NM2 = 0;
+
+	/* for newer nv4x the blob uses only the first stage of the vpll below a
+	 * certain clock.  for a certain nv4b this is 150MHz.  since the max
+	 * output frequency of the first stage for this card is 300MHz, it is
+	 * assumed the threshold is given by vco1 maxfreq/2
+	 */
+	/* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
+	 * not 8, others unknown), the blob always uses both plls.  no problem
+	 * has yet been observed in allowing the use a single stage pll on all
+	 * nv43 however.  the behaviour of single stage use is untested on nv40
+	 */
+	if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
+		memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
+
+
+	if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv))
+		return;
+
+	state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
+
+	/* The blob uses this always, so let's do the same */
+	if (nv_device(drm->device)->card_type == NV_40)
+		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
+	/* again nv40 and some nv43 act more like nv3x as described above */
+	if (nv_device(drm->device)->chipset < 0x41)
+		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
+				 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
+	state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
+
+	if (pv->NM2)
+		NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+			 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
+	else
+		NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n",
+			 pv->N1, pv->M1, pv->log2P);
+
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+}
+
+static void
+nv_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	unsigned char seq1 = 0, crtc17 = 0;
+	unsigned char crtc1A;
+
+	NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode,
+							nv_crtc->index);
+
+	if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
+		return;
+
+	nv_crtc->last_dpms = mode;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, nv_crtc->index);
+
+	/* nv4ref indicates these two RPC1 bits inhibit h/v sync */
+	crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
+					NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
+	switch (mode) {
+	case DRM_MODE_DPMS_STANDBY:
+		/* Screen: Off; HSync: Off, VSync: On -- Not Supported */
+		seq1 = 0x20;
+		crtc17 = 0x80;
+		crtc1A |= 0x80;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		/* Screen: Off; HSync: On, VSync: Off -- Not Supported */
+		seq1 = 0x20;
+		crtc17 = 0x80;
+		crtc1A |= 0x40;
+		break;
+	case DRM_MODE_DPMS_OFF:
+		/* Screen: Off; HSync: Off, VSync: Off */
+		seq1 = 0x20;
+		crtc17 = 0x00;
+		crtc1A |= 0xC0;
+		break;
+	case DRM_MODE_DPMS_ON:
+	default:
+		/* Screen: On; HSync: On, VSync: On */
+		seq1 = 0x00;
+		crtc17 = 0x80;
+		break;
+	}
+
+	NVVgaSeqReset(dev, nv_crtc->index, true);
+	/* Each head has it's own sequencer, so we can turn it off when we want */
+	seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
+	NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
+	crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
+	mdelay(10);
+	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
+	NVVgaSeqReset(dev, nv_crtc->index, false);
+
+	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+}
+
+static bool
+nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+		   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void
+nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_framebuffer *fb = crtc->fb;
+
+	/* Calculate our timings */
+	int horizDisplay	= (mode->crtc_hdisplay >> 3)		- 1;
+	int horizStart		= (mode->crtc_hsync_start >> 3) 	+ 1;
+	int horizEnd		= (mode->crtc_hsync_end >> 3)		+ 1;
+	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
+	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
+	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;
+	int vertDisplay		= mode->crtc_vdisplay			- 1;
+	int vertStart		= mode->crtc_vsync_start 		- 1;
+	int vertEnd		= mode->crtc_vsync_end			- 1;
+	int vertTotal		= mode->crtc_vtotal 			- 2;
+	int vertBlankStart	= mode->crtc_vdisplay 			- 1;
+	int vertBlankEnd	= mode->crtc_vtotal			- 1;
+
+	struct drm_encoder *encoder;
+	bool fp_output = false;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (encoder->crtc == crtc &&
+		    (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
+		     nv_encoder->dcb->type == DCB_OUTPUT_TMDS))
+			fp_output = true;
+	}
+
+	if (fp_output) {
+		vertStart = vertTotal - 3;
+		vertEnd = vertTotal - 2;
+		vertBlankStart = vertStart;
+		horizStart = horizTotal - 5;
+		horizEnd = horizTotal - 2;
+		horizBlankEnd = horizTotal + 4;
+#if 0
+		if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
+			/* This reportedly works around some video overlay bandwidth problems */
+			horizTotal += 2;
+#endif
+	}
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vertTotal |= 1;
+
+#if 0
+	ErrorF("horizDisplay: 0x%X \n", horizDisplay);
+	ErrorF("horizStart: 0x%X \n", horizStart);
+	ErrorF("horizEnd: 0x%X \n", horizEnd);
+	ErrorF("horizTotal: 0x%X \n", horizTotal);
+	ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
+	ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
+	ErrorF("vertDisplay: 0x%X \n", vertDisplay);
+	ErrorF("vertStart: 0x%X \n", vertStart);
+	ErrorF("vertEnd: 0x%X \n", vertEnd);
+	ErrorF("vertTotal: 0x%X \n", vertTotal);
+	ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
+	ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
+#endif
+
+	/*
+	* compute correct Hsync & Vsync polarity
+	*/
+	if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
+		&& (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
+
+		regp->MiscOutReg = 0x23;
+		if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+			regp->MiscOutReg |= 0x40;
+		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+			regp->MiscOutReg |= 0x80;
+	} else {
+		int vdisplay = mode->vdisplay;
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			vdisplay *= 2;
+		if (mode->vscan > 1)
+			vdisplay *= mode->vscan;
+		if (vdisplay < 400)
+			regp->MiscOutReg = 0xA3;	/* +hsync -vsync */
+		else if (vdisplay < 480)
+			regp->MiscOutReg = 0x63;	/* -hsync +vsync */
+		else if (vdisplay < 768)
+			regp->MiscOutReg = 0xE3;	/* -hsync -vsync */
+		else
+			regp->MiscOutReg = 0x23;	/* +hsync +vsync */
+	}
+
+	regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
+
+	/*
+	 * Time Sequencer
+	 */
+	regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
+	/* 0x20 disables the sequencer */
+	if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
+		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
+	else
+		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
+	regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
+	regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
+	regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
+
+	/*
+	 * CRTC
+	 */
+	regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
+	regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
+	regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
+	regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
+					  XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
+	regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
+	regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
+					  XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
+	regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
+	regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
+					  XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
+					  XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
+					  (1 << 4) |
+					  XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
+					  XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
+					  XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
+					  XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
+	regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
+					      1 << 6 |
+					      XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
+	regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
+	regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
+	regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
+	/* framebuffer can be larger than crtc scanout area. */
+	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
+	regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
+	regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
+	regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
+	regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
+	regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
+
+	/*
+	 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
+	 */
+
+	/* framebuffer can be larger than crtc scanout area. */
+	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+		XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+	regp->CRTC[NV_CIO_CRE_42] =
+		XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+	regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
+					    MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
+	regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
+					   XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
+					   XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
+					   XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
+					   XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
+	regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
+					    XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
+					    XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
+					    XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
+	regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
+					   XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
+					   XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
+					   XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		horizTotal = (horizTotal >> 1) & ~1;
+		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
+		regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
+	} else
+		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff;  /* interlace off */
+
+	/*
+	* Graphics Display Controller
+	*/
+	regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
+	regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
+	regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
+	regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
+	regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
+
+	regp->Attribute[0]  = 0x00; /* standard colormap translation */
+	regp->Attribute[1]  = 0x01;
+	regp->Attribute[2]  = 0x02;
+	regp->Attribute[3]  = 0x03;
+	regp->Attribute[4]  = 0x04;
+	regp->Attribute[5]  = 0x05;
+	regp->Attribute[6]  = 0x06;
+	regp->Attribute[7]  = 0x07;
+	regp->Attribute[8]  = 0x08;
+	regp->Attribute[9]  = 0x09;
+	regp->Attribute[10] = 0x0A;
+	regp->Attribute[11] = 0x0B;
+	regp->Attribute[12] = 0x0C;
+	regp->Attribute[13] = 0x0D;
+	regp->Attribute[14] = 0x0E;
+	regp->Attribute[15] = 0x0F;
+	regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
+	/* Non-vga */
+	regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
+	regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
+	regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
+	regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static void
+nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
+	struct drm_encoder *encoder;
+	bool lvds_output = false, tmds_output = false, tv_output = false,
+		off_chip_digital = false;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+		bool digital = false;
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
+			digital = lvds_output = true;
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
+			tv_output = true;
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
+			digital = tmds_output = true;
+		if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
+			off_chip_digital = true;
+	}
+
+	/* Registers not directly related to the (s)vga mode */
+
+	/* What is the meaning of this register? */
+	/* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
+	regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
+
+	regp->crtc_eng_ctrl = 0;
+	/* Except for rare conditions I2C is enabled on the primary crtc */
+	if (nv_crtc->index == 0)
+		regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
+#if 0
+	/* Set overlay to desired crtc. */
+	if (dev->overlayAdaptor) {
+		NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
+		if (pPriv->overlayCRTC == nv_crtc->index)
+			regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
+	}
+#endif
+
+	/* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
+	regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
+			     NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
+			     NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
+	if (nv_device(drm->device)->chipset >= 0x11)
+		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
+
+	/* Unblock some timings */
+	regp->CRTC[NV_CIO_CRE_53] = 0;
+	regp->CRTC[NV_CIO_CRE_54] = 0;
+
+	/* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
+	if (lvds_output)
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
+	else if (tmds_output)
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
+	else
+		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
+
+	/* These values seem to vary */
+	/* This register seems to be used by the bios to make certain decisions on some G70 cards? */
+	regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
+
+	nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
+
+	/* probably a scratch reg, but kept for cargo-cult purposes:
+	 * bit0: crtc0?, head A
+	 * bit6: lvds, head A
+	 * bit7: (only in X), head A
+	 */
+	if (nv_crtc->index == 0)
+		regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
+
+	/* The blob seems to take the current value from crtc 0, add 4 to that
+	 * and reuse the old value for crtc 1 */
+	regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
+	if (!nv_crtc->index)
+		regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
+
+	/* the blob sometimes sets |= 0x10 (which is the same as setting |=
+	 * 1 << 30 on 0x60.830), for no apparent reason */
+	regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+
+	if (nv_device(drm->device)->card_type >= NV_30)
+		regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
+
+	regp->crtc_830 = mode->crtc_vdisplay - 3;
+	regp->crtc_834 = mode->crtc_vdisplay - 1;
+
+	if (nv_device(drm->device)->card_type == NV_40)
+		/* This is what the blob does */
+		regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
+
+	if (nv_device(drm->device)->card_type >= NV_30)
+		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
+
+	if (nv_device(drm->device)->card_type >= NV_10)
+		regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	else
+		regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+
+	/* Some misc regs */
+	if (nv_device(drm->device)->card_type == NV_40) {
+		regp->CRTC[NV_CIO_CRE_85] = 0xFF;
+		regp->CRTC[NV_CIO_CRE_86] = 0x1;
+	}
+
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
+	/* Enable slaved mode (called MODE_TV in nv4ref.h) */
+	if (lvds_output || tmds_output || tv_output)
+		regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
+
+	/* Generic PRAMDAC regs */
+
+	if (nv_device(drm->device)->card_type >= NV_10)
+		/* Only bit that bios and blob set. */
+		regp->nv10_cursync = (1 << 25);
+
+	regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+				NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
+				NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
+	if (crtc->fb->depth == 16)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	if (nv_device(drm->device)->chipset >= 0x11)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
+
+	regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
+	regp->tv_setup = 0;
+
+	nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
+
+	/* Some values the blob sets */
+	regp->ramdac_8c0 = 0x100;
+	regp->ramdac_a20 = 0x0;
+	regp->ramdac_a24 = 0xfffff;
+	regp->ramdac_a34 = 0x1;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static int
+nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+		 struct drm_display_mode *adjusted_mode,
+		 int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+	drm_mode_debug_printmodeline(adjusted_mode);
+
+	/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
+	nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
+
+	nv_crtc_mode_set_vga(crtc, adjusted_mode);
+	/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
+	if (nv_device(drm->device)->card_type == NV_40)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
+	nv_crtc_mode_set_regs(crtc, adjusted_mode);
+	nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
+	return 0;
+}
+
+static void nv_crtc_save(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
+	struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
+	struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg;
+	struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
+
+	if (nv_two_heads(crtc->dev))
+		NVSetOwner(crtc->dev, nv_crtc->index);
+
+	nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
+
+	/* init some state to saved value */
+	state->sel_clk = saved->sel_clk & ~(0x5 << 16);
+	crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
+	state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
+	crtc_state->gpio_ext = crtc_saved->gpio_ext;
+}
+
+static void nv_crtc_restore(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	int head = nv_crtc->index;
+	uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
+
+	if (nv_two_heads(crtc->dev))
+		NVSetOwner(crtc->dev, head);
+
+	nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg);
+	nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
+
+	nv_crtc->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, nv_crtc->index);
+
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
+	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	NVBlankScreen(dev, nv_crtc->index, true);
+
+	/* Some more preparation. */
+	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
+	if (nv_device(drm->device)->card_type == NV_40) {
+		uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
+		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
+	}
+}
+
+static void nv_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
+	nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
+
+#ifdef __BIG_ENDIAN
+	/* turn on LFB swapping */
+	{
+		uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
+		tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
+		NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
+	}
+#endif
+
+	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
+}
+
+static void nv_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	if (!nv_crtc)
+		return;
+
+	drm_crtc_cleanup(crtc);
+
+	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+	nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	kfree(nv_crtc);
+}
+
+static void
+nv_crtc_gamma_load(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
+	int i;
+
+	rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
+	for (i = 0; i < 256; i++) {
+		rgbs[i].r = nv_crtc->lut.r[i] >> 8;
+		rgbs[i].g = nv_crtc->lut.g[i] >> 8;
+		rgbs[i].b = nv_crtc->lut.b[i] >> 8;
+	}
+
+	nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
+}
+
+static void
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+		  uint32_t size)
+{
+	int end = (start + size > 256) ? 256 : start + size, i;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	for (i = start; i < end; i++) {
+		nv_crtc->lut.r[i] = r[i];
+		nv_crtc->lut.g[i] = g[i];
+		nv_crtc->lut.b[i] = b[i];
+	}
+
+	/* We need to know the depth before we upload, but it's possible to
+	 * get called before a framebuffer is bound.  If this is the case,
+	 * mark the lut values as dirty by setting depth==0, and it'll be
+	 * uploaded on the first mode_set_base()
+	 */
+	if (!nv_crtc->base.fb) {
+		nv_crtc->lut.depth = 0;
+		return;
+	}
+
+	nv_crtc_gamma_load(crtc);
+}
+
+static int
+nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
+			   struct drm_framebuffer *passed_fb,
+			   int x, int y, bool atomic)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_framebuffer *drm_fb;
+	struct nouveau_framebuffer *fb;
+	int arb_burst, arb_lwm;
+	int ret;
+
+	NV_DEBUG(drm, "index %d\n", nv_crtc->index);
+
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		NV_DEBUG(drm, "No FB bound\n");
+		return 0;
+	}
+
+
+	/* If atomic, we want to switch to the fb we were passed, so
+	 * now we update pointers to do that.  (We don't pin; just
+	 * assume we're already pinned and update the base address.)
+	 */
+	if (atomic) {
+		drm_fb = passed_fb;
+		fb = nouveau_framebuffer(passed_fb);
+	} else {
+		drm_fb = crtc->fb;
+		fb = nouveau_framebuffer(crtc->fb);
+		/* If not atomic, we can go ahead and pin, and unpin the
+		 * old fb we were passed.
+		 */
+		ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+		if (ret)
+			return ret;
+
+		if (passed_fb) {
+			struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
+			nouveau_bo_unpin(ofb->nvbo);
+		}
+	}
+
+	nv_crtc->fb.offset = fb->nvbo->bo.offset;
+
+	if (nv_crtc->lut.depth != drm_fb->depth) {
+		nv_crtc->lut.depth = drm_fb->depth;
+		nv_crtc_gamma_load(crtc);
+	}
+
+	/* Update the framebuffer format. */
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
+	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
+	regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	if (crtc->fb->depth == 16)
+		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
+	NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
+		      regp->ramdac_gen_ctrl);
+
+	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
+	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+		XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+	regp->CRTC[NV_CIO_CRE_42] =
+		XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
+
+	/* Update the framebuffer location. */
+	regp->fb_start = nv_crtc->fb.offset & ~3;
+	regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
+	nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
+
+	/* Update the arbitration parameters. */
+	nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+			 &arb_burst, &arb_lwm);
+
+	regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
+	regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
+
+	if (nv_device(drm->device)->card_type >= NV_20) {
+		regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
+		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
+	}
+
+	return 0;
+}
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb)
+{
+	return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+}
+
+static int
+nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+			       struct drm_framebuffer *fb,
+			       int x, int y, enum mode_set_atomic state)
+{
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct drm_device *dev = drm->dev;
+
+	if (state == ENTER_ATOMIC_MODE_SET)
+		nouveau_fbcon_save_disable_accel(dev);
+	else
+		nouveau_fbcon_restore_accel(dev);
+
+	return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
+}
+
+static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+			       struct nouveau_bo *dst)
+{
+	int width = nv_cursor_width(dev);
+	uint32_t pixel;
+	int i, j;
+
+	for (i = 0; i < width; i++) {
+		for (j = 0; j < width; j++) {
+			pixel = nouveau_bo_rd32(src, i*64 + j);
+
+			nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
+				     | (pixel & 0xf80000) >> 9
+				     | (pixel & 0xf800) >> 6
+				     | (pixel & 0xf8) >> 3);
+		}
+	}
+}
+
+static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+			       struct nouveau_bo *dst)
+{
+	uint32_t pixel;
+	int alpha, i;
+
+	/* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
+	 * cursors (though NPM in combination with fp dithering may not work on
+	 * nv11, from "nv" driver history)
+	 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
+	 * blob uses, however we get given PM cursors so we use PM mode
+	 */
+	for (i = 0; i < 64 * 64; i++) {
+		pixel = nouveau_bo_rd32(src, i);
+
+		/* hw gets unhappy if alpha <= rgb values.  for a PM image "less
+		 * than" shouldn't happen; fix "equal to" case by adding one to
+		 * alpha channel (slightly inaccurate, but so is attempting to
+		 * get back to NPM images, due to limits of integer precision)
+		 */
+		alpha = pixel >> 24;
+		if (alpha > 0 && alpha < 255)
+			pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
+
+#ifdef __BIG_ENDIAN
+		{
+			struct nouveau_drm *drm = nouveau_drm(dev);
+
+			if (nv_device(drm->device)->chipset == 0x11) {
+				pixel = ((pixel & 0x000000ff) << 24) |
+					((pixel & 0x0000ff00) << 8) |
+					((pixel & 0x00ff0000) >> 8) |
+					((pixel & 0xff000000) >> 24);
+			}
+		}
+#endif
+
+		nouveau_bo_wr32(dst, i, pixel);
+	}
+}
+
+static int
+nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+		     uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct drm_device *dev = drm->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_bo *cursor = NULL;
+	struct drm_gem_object *gem;
+	int ret = 0;
+
+	if (!buffer_handle) {
+		nv_crtc->cursor.hide(nv_crtc, true);
+		return 0;
+	}
+
+	if (width != 64 || height != 64)
+		return -EINVAL;
+
+	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+	if (!gem)
+		return -ENOENT;
+	cursor = nouveau_gem_object(gem);
+
+	ret = nouveau_bo_map(cursor);
+	if (ret)
+		goto out;
+
+	if (nv_device(drm->device)->chipset >= 0x11)
+		nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+	else
+		nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+
+	nouveau_bo_unmap(cursor);
+	nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+	nv_crtc->cursor.show(nv_crtc, true);
+out:
+	drm_gem_object_unreference_unlocked(gem);
+	return ret;
+}
+
+static int
+nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nv_crtc->cursor.set_pos(nv_crtc, x, y);
+	return 0;
+}
+
+static const struct drm_crtc_funcs nv04_crtc_funcs = {
+	.save = nv_crtc_save,
+	.restore = nv_crtc_restore,
+	.cursor_set = nv04_crtc_cursor_set,
+	.cursor_move = nv04_crtc_cursor_move,
+	.gamma_set = nv_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
+	.destroy = nv_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
+	.dpms = nv_crtc_dpms,
+	.prepare = nv_crtc_prepare,
+	.commit = nv_crtc_commit,
+	.mode_fixup = nv_crtc_mode_fixup,
+	.mode_set = nv_crtc_mode_set,
+	.mode_set_base = nv04_crtc_mode_set_base,
+	.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
+	.load_lut = nv_crtc_gamma_load,
+};
+
+int
+nv04_crtc_create(struct drm_device *dev, int crtc_num)
+{
+	struct nouveau_crtc *nv_crtc;
+	int ret, i;
+
+	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+	if (!nv_crtc)
+		return -ENOMEM;
+
+	for (i = 0; i < 256; i++) {
+		nv_crtc->lut.r[i] = i << 8;
+		nv_crtc->lut.g[i] = i << 8;
+		nv_crtc->lut.b[i] = i << 8;
+	}
+	nv_crtc->lut.depth = 0;
+
+	nv_crtc->index = crtc_num;
+	nv_crtc->last_dpms = NV_DPMS_CLEARED;
+
+	drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+	drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
+	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+	ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+			if (ret)
+				nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	}
+
+	nv04_cursor_init(nv_crtc);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/cursor.c
new file mode 100644
index 0000000..a810303
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -0,0 +1,70 @@
+#include <drm/drmP.h>
+#include <drm/drm_mode.h>
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+
+static void
+nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
+}
+
+static void
+nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
+}
+
+static void
+nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+	nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
+	NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
+		      NV_PRAMDAC_CU_START_POS,
+		      XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
+		      XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
+}
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+		       crtcstate->CRTC[index]);
+}
+
+static void
+nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct drm_crtc *crtc = &nv_crtc->base;
+
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
+		MASK(NV_CIO_CRE_HCUR_ASI) |
+		XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
+		XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
+	if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+		regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
+			MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
+	regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
+
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	if (nv_device(drm->device)->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
+}
+
+int
+nv04_cursor_init(struct nouveau_crtc *crtc)
+{
+	crtc->cursor.set_offset = nv04_cursor_set_offset;
+	crtc->cursor.set_pos = nv04_cursor_set_pos;
+	crtc->cursor.hide = nv04_cursor_hide;
+	crtc->cursor.show = nv04_cursor_show;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dac.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dac.c
new file mode 100644
index 0000000..434b920
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include "nvreg.h"
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+
+int nv04_dac_output_offset(struct drm_encoder *encoder)
+{
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	int offset = 0;
+
+	if (dcb->or & (8 | DCB_OUTPUT_C))
+		offset += 0x68;
+	if (dcb->or & (8 | DCB_OUTPUT_B))
+		offset += 0x2000;
+
+	return offset;
+}
+
+/*
+ * arbitrary limit to number of sense oscillations tolerated in one sample
+ * period (observed to be at least 13 in "nvidia")
+ */
+#define MAX_HBLANK_OSC 20
+
+/*
+ * arbitrary limit to number of conflicting sample pairs to tolerate at a
+ * voltage step (observed to be at least 5 in "nvidia")
+ */
+#define MAX_SAMPLE_PAIRS 10
+
+static int sample_load_twice(struct drm_device *dev, bool sense[2])
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		bool sense_a, sense_b, sense_b_prime;
+		int j = 0;
+
+		/*
+		 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
+		 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
+		 * hblank again
+		 * use a 10ms timeout (guards against crtc being inactive, in
+		 * which case blank state would never change)
+		 */
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000000))
+			return -EBUSY;
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000001))
+			return -EBUSY;
+		if (!nouveau_timer_wait_eq(ptimer, 10000000,
+					   NV_PRMCIO_INP0__COLOR,
+					   0x00000001, 0x00000000))
+			return -EBUSY;
+
+		udelay(100);
+		/* when level triggers, sense is _LO_ */
+		sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+
+		/* take another reading until it agrees with sense_a... */
+		do {
+			udelay(100);
+			sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+			if (sense_a != sense_b) {
+				sense_b_prime =
+					nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+				if (sense_b == sense_b_prime) {
+					/* ... unless two consecutive subsequent
+					 * samples agree; sense_a is replaced */
+					sense_a = sense_b;
+					/* force mis-match so we loop */
+					sense_b = !sense_a;
+				}
+			}
+		} while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
+
+		if (j == MAX_HBLANK_OSC)
+			/* with so much oscillation, default to sense:LO */
+			sense[i] = false;
+		else
+			sense[i] = sense_a;
+	}
+
+	return 0;
+}
+
+static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+						 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
+	uint8_t saved_palette0[3], saved_palette_mask;
+	uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
+	int i;
+	uint8_t blue;
+	bool sense = true;
+
+	/*
+	 * for this detection to work, there needs to be a mode set up on the
+	 * CRTC.  this is presumed to be the case
+	 */
+
+	if (nv_two_heads(dev))
+		/* only implemented for head A for now */
+		NVSetOwner(dev, 0);
+
+	saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
+	saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
+
+	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
+		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+	msleep(10);
+
+	saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
+		       saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
+	saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
+
+	nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+	for (i = 0; i < 3; i++)
+		saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
+	saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
+
+	saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
+		      (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+					   NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
+		      NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
+
+	blue = 8;	/* start of test range */
+
+	do {
+		bool sense_pair[2];
+
+		nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+		/* testing blue won't find monochrome monitors.  I don't care */
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
+
+		i = 0;
+		/* take sample pairs until both samples in the pair agree */
+		do {
+			if (sample_load_twice(dev, sense_pair))
+				goto out;
+		} while ((sense_pair[0] != sense_pair[1]) &&
+							++i < MAX_SAMPLE_PAIRS);
+
+		if (i == MAX_SAMPLE_PAIRS)
+			/* too much oscillation defaults to LO */
+			sense = false;
+		else
+			sense = sense_pair[0];
+
+	/*
+	 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
+	 * ergo, if blue gets to 0x18, monitor must be connected
+	 */
+	} while (++blue < 0x18 && sense);
+
+out:
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
+	nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+	for (i = 0; i < 3; i++)
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
+
+	if (blue == 0x18) {
+		NV_DEBUG(drm, "Load detected on head A\n");
+		return connector_status_connected;
+	}
+
+	return connector_status_disconnected;
+}
+
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
+	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+		saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput;
+	int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+	if (dcb->type == DCB_OUTPUT_TV) {
+		testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
+
+		if (drm->vbios.tvdactestval)
+			testval = drm->vbios.tvdactestval;
+	} else {
+		testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
+
+		if (drm->vbios.dactestval)
+			testval = drm->vbios.dactestval;
+	}
+
+	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
+		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+	saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
+
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+	if (regoffset == 0x68) {
+		saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
+		nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+	}
+
+	if (gpio) {
+		saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+		saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
+	}
+
+	msleep(4);
+
+	saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	head = (saved_routput & 0x100) >> 8;
+
+	/* if there's a spare crtc, using it will minimise flicker */
+	if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0))
+		head ^= 1;
+
+	/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
+	routput = (saved_routput & 0xfffffece) | head << 8;
+
+	if (nv_device(drm->device)->card_type >= NV_40) {
+		if (dcb->type == DCB_OUTPUT_TV)
+			routput |= 0x1a << 16;
+		else
+			routput &= ~(0x1a << 16);
+	}
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
+	msleep(1);
+
+	temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
+		      NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
+	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+		      temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+	msleep(5);
+
+	sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+	/* do it again just in case it's a residual current */
+	sample &= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+
+	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+		      temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
+
+	/* bios does something more complex for restoring, but I think this is good enough */
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
+	if (regoffset == 0x68)
+		nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+
+	if (gpio) {
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
+		gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
+	}
+
+	return sample;
+}
+
+static enum drm_connector_status
+nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+
+	if (nv04_dac_in_use(encoder))
+		return connector_status_disconnected;
+
+	if (nv17_dac_sample_load(encoder) &
+	    NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+		NV_DEBUG(drm, "Load detected on output %c\n",
+			 '@' + ffs(dcb->or));
+		return connector_status_connected;
+	} else {
+		return connector_status_disconnected;
+	}
+}
+
+static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	if (nv04_dac_in_use(encoder))
+		return false;
+
+	return true;
+}
+
+static void nv04_dac_prepare(struct drm_encoder *encoder)
+{
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct drm_device *dev = encoder->dev;
+	int head = nouveau_crtc(encoder->crtc)->index;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+}
+
+static void nv04_dac_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int head = nouveau_crtc(encoder->crtc)->index;
+
+	if (nv_gf4_disp_arch(dev)) {
+		struct drm_encoder *rebind;
+		uint32_t dac_offset = nv04_dac_output_offset(encoder);
+		uint32_t otherdac;
+
+		/* bit 16-19 are bits that are set on some G70 cards,
+		 * but don't seem to have much effect */
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+			      head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+		/* force any other vga encoders to bind to the other crtc */
+		list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
+			if (rebind == encoder
+			    || nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG)
+				continue;
+
+			dac_offset = nv04_dac_output_offset(rebind);
+			otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
+			NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+				      (otherdac & ~0x0100) | (head ^ 1) << 8);
+		}
+	}
+
+	/* This could use refinement for flatpanels, but it should work this way */
+	if (nv_device(drm->device)->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+}
+
+static void nv04_dac_commit(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+		 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+		 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+
+	if (nv_gf4_disp_arch(dev)) {
+		uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1];
+		int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
+		uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
+
+		if (enable) {
+			*dac_users |= 1 << dcb->index;
+			NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+
+		} else {
+			*dac_users &= ~(1 << dcb->index);
+			if (!*dac_users)
+				NVWriteRAMDAC(dev, 0, dacclk_off,
+					dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
+		}
+	}
+}
+
+/* Check if the DAC corresponding to 'encoder' is being used by
+ * someone else. */
+bool nv04_dac_in_use(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+
+	return nv_gf4_disp_arch(encoder->dev) &&
+		(nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
+}
+
+static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
+		 mode, nv_encoder->dcb->index);
+
+	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv04_dac_save(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_gf4_disp_arch(dev))
+		nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+							  nv04_dac_output_offset(encoder));
+}
+
+static void nv04_dac_restore(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
+			      nv_encoder->restore.output);
+
+	nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dac_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
+	.dpms = nv04_dac_dpms,
+	.save = nv04_dac_save,
+	.restore = nv04_dac_restore,
+	.mode_fixup = nv04_dac_mode_fixup,
+	.prepare = nv04_dac_prepare,
+	.commit = nv04_dac_commit,
+	.mode_set = nv04_dac_mode_set,
+	.detect = nv04_dac_detect
+};
+
+static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
+	.dpms = nv04_dac_dpms,
+	.save = nv04_dac_save,
+	.restore = nv04_dac_restore,
+	.mode_fixup = nv04_dac_mode_fixup,
+	.prepare = nv04_dac_prepare,
+	.commit = nv04_dac_commit,
+	.mode_set = nv04_dac_mode_set,
+	.detect = nv17_dac_detect
+};
+
+static const struct drm_encoder_funcs nv04_dac_funcs = {
+	.destroy = nv04_dac_destroy,
+};
+
+int
+nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
+{
+	const struct drm_encoder_helper_funcs *helper;
+	struct nouveau_encoder *nv_encoder = NULL;
+	struct drm_device *dev = connector->dev;
+	struct drm_encoder *encoder;
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	if (nv_gf4_disp_arch(dev))
+		helper = &nv17_dac_helper_funcs;
+	else
+		helper = &nv04_dac_helper_funcs;
+
+	drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
+	drm_encoder_helper_add(encoder, helper);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dfp.c
new file mode 100644
index 0000000..93dd23f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -0,0 +1,720 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include "nvreg.h"
+
+#include <drm/i2c/sil164.h>
+
+#include <subdev/i2c.h>
+
+#define FP_TG_CONTROL_ON  (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |		\
+			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
+#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE |	\
+			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
+
+static inline bool is_fpc_off(uint32_t fpc)
+{
+	return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
+			FP_TG_CONTROL_OFF);
+}
+
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent)
+{
+	/* special case of nv_read_tmds to find crtc associated with an output.
+	 * this does not give a correct answer for off-chip dvi, but there's no
+	 * use for such an answer anyway
+	 */
+	int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
+	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
+	return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
+}
+
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
+			int head, bool dl)
+{
+	/* The BIOS scripts don't do this for us, sadly
+	 * Luckily we do know the values ;-)
+	 *
+	 * head < 0 indicates we wish to force a setting with the overrideval
+	 * (for VT restore etc.)
+	 */
+
+	int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
+	uint8_t tmds04 = 0x80;
+
+	if (head != ramdac)
+		tmds04 = 0x88;
+
+	if (dcbent->type == DCB_OUTPUT_LVDS)
+		tmds04 |= 0x01;
+
+	nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
+
+	if (dl)	/* dual link */
+		nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
+}
+
+void nv04_dfp_disable(struct drm_device *dev, int head)
+{
+	struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
+
+	if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
+	    FP_TG_CONTROL_ON) {
+		/* digital remnants must be cleaned before new crtc
+		 * values programmed.  delay is time for the vga stuff
+		 * to realise it's in control again
+		 */
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+			      FP_TG_CONTROL_OFF);
+		msleep(50);
+	}
+	/* don't inadvertently turn it on when state written later */
+	crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+	crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &=
+		~NV_CIO_CRE_LCD_ROUTE_MASK;
+}
+
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc;
+	struct nouveau_crtc *nv_crtc;
+	uint32_t *fpc;
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		nv_crtc = nouveau_crtc(encoder->crtc);
+		fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+		if (is_fpc_off(*fpc)) {
+			/* using saved value is ok, as (is_digital && dpms_on &&
+			 * fp_control==OFF) is (at present) *only* true when
+			 * fpc's most recent change was by below "off" code
+			 */
+			*fpc = nv_crtc->dpms_saved_fp_control;
+		}
+
+		nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
+		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+	} else {
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			nv_crtc = nouveau_crtc(crtc);
+			fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+			nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
+			if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
+				nv_crtc->dpms_saved_fp_control = *fpc;
+				/* cut the FP output */
+				*fpc &= ~FP_TG_CONTROL_ON;
+				*fpc |= FP_TG_CONTROL_OFF;
+				NVWriteRAMDAC(dev, nv_crtc->index,
+					      NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+			}
+		}
+	}
+}
+
+static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	struct drm_encoder *slave;
+
+	if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
+		return NULL;
+
+	/* Some BIOSes (e.g. the one in a Quadro FX1000) report several
+	 * TMDS transmitters at the same I2C address, in the same I2C
+	 * bus. This can still work because in that case one of them is
+	 * always hard-wired to a reasonable configuration using straps,
+	 * and the other one needs to be programmed.
+	 *
+	 * I don't think there's a way to know which is which, even the
+	 * blob programs the one exposed via I2C for *both* heads, so
+	 * let's do the same.
+	 */
+	list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
+		struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
+
+		if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
+		    slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
+			return slave;
+	}
+
+	return NULL;
+}
+
+static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+	if (!nv_connector->native_mode ||
+	    nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+	    mode->hdisplay > nv_connector->native_mode->hdisplay ||
+	    mode->vdisplay > nv_connector->native_mode->vdisplay) {
+		nv_encoder->mode = *adjusted_mode;
+
+	} else {
+		nv_encoder->mode = *nv_connector->native_mode;
+		adjusted_mode->clock = nv_connector->native_mode->clock;
+	}
+
+	return true;
+}
+
+static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
+				     struct nouveau_encoder *nv_encoder, int head)
+{
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
+	uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000;
+
+	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
+		return;
+
+	/* SEL_CLK is only used on the primary ramdac
+	 * It toggles spread spectrum PLL output and sets the bindings of PLLs
+	 * to heads on digital outputs
+	 */
+	if (head)
+		state->sel_clk |= bits1618;
+	else
+		state->sel_clk &= ~bits1618;
+
+	/* nv30:
+	 *	bit 0		NVClk spread spectrum on/off
+	 *	bit 2		MemClk spread spectrum on/off
+	 * 	bit 4		PixClk1 spread spectrum on/off toggle
+	 * 	bit 6		PixClk2 spread spectrum on/off toggle
+	 *
+	 * nv40 (observations from bios behaviour and mmio traces):
+	 * 	bits 4&6	as for nv30
+	 * 	bits 5&7	head dependent as for bits 4&6, but do not appear with 4&6;
+	 * 			maybe a different spread mode
+	 * 	bits 8&10	seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
+	 * 	The logic behind turning spread spectrum on/off in the first place,
+	 * 	and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
+	 * 	entry has the necessary info)
+	 */
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) {
+		int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1;
+
+		state->sel_clk &= ~0xf0;
+		state->sel_clk |= (head ? 0x40 : 0x10) << shift;
+	}
+}
+
+static void nv04_dfp_prepare(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct drm_device *dev = encoder->dev;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
+	uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
+	uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
+
+	*cr_lcd = (*cr_lcd & ~NV_CIO_CRE_LCD_ROUTE_MASK) | 0x3;
+
+	if (nv_two_heads(dev)) {
+		if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+			*cr_lcd |= head ? 0x0 : 0x8;
+		else {
+			*cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+			if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
+				*cr_lcd |= 0x30;
+			if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+				/* avoid being connected to both crtcs */
+				*cr_lcd_oth &= ~0x30;
+				NVWriteVgaCrtc(dev, head ^ 1,
+					       NV_CIO_CRE_LCD__INDEX,
+					       *cr_lcd_oth);
+			}
+		}
+	}
+}
+
+
+static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode,
+			      struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+	struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
+	struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_display_mode *output_mode = &nv_encoder->mode;
+	struct drm_connector *connector = &nv_connector->base;
+	uint32_t mode_ratio, panel_ratio;
+
+	NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
+	drm_mode_debug_printmodeline(output_mode);
+
+	/* Initialize the FP registers in this CRTC. */
+	regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+	regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+	if (!nv_gf4_disp_arch(dev) ||
+	    (output_mode->hsync_start - output_mode->hdisplay) >=
+					drm->vbios.digital_min_front_porch)
+		regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
+	else
+		regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1;
+	regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
+	regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+	regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
+	regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
+
+	regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+	regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+	regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
+	regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
+	regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+	regp->fp_vert_regs[FP_VALID_START] = 0;
+	regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
+
+	/* bit26: a bit seen on some g7x, no as yet discernable purpose */
+	regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+			   (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
+	/* Deal with vsync/hsync polarity */
+	/* LVDS screens do set this, but modes with +ve syncs are very rare */
+	if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+	if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+	/* panel scaling first, as native would get set otherwise */
+	if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+	    nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER)	/* panel handles it */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
+	else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
+		 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
+	else /* gpu needs to scale */
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
+	if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
+	    output_mode->clock > 165000)
+		regp->fp_control |= (2 << 24);
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
+		bool duallink = false, dummy;
+		if (nv_connector->edid &&
+		    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+			duallink = (((u8 *)nv_connector->edid)[121] == 2);
+		} else {
+			nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+						      &duallink, &dummy);
+		}
+
+		if (duallink)
+			regp->fp_control |= (8 << 28);
+	} else
+	if (output_mode->clock > 165000)
+		regp->fp_control |= (8 << 28);
+
+	regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+			   NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+			   NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+			   NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+			   NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+			   NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+			   NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+	/* We want automatic scaling */
+	regp->fp_debug_1 = 0;
+	/* This can override HTOTAL and VTOTAL */
+	regp->fp_debug_2 = 0;
+
+	/* Use 20.12 fixed point format to avoid floats */
+	mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
+	panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
+	/* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
+	 * get treated the same as SCALE_FULLSCREEN */
+	if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
+	    mode_ratio != panel_ratio) {
+		uint32_t diff, scale;
+		bool divide_by_2 = nv_gf4_disp_arch(dev);
+
+		if (mode_ratio < panel_ratio) {
+			/* vertical needs to expand to glass size (automatic)
+			 * horizontal needs to be scaled at vertical scale factor
+			 * to maintain aspect */
+
+			scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
+			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+			/* restrict area of screen used, horizontally */
+			diff = output_mode->hdisplay -
+			       output_mode->vdisplay * mode_ratio / (1 << 12);
+			regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
+			regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
+		}
+
+		if (mode_ratio > panel_ratio) {
+			/* horizontal needs to expand to glass size (automatic)
+			 * vertical needs to be scaled at horizontal scale factor
+			 * to maintain aspect */
+
+			scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
+			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
+
+			/* restrict area of screen used, vertically */
+			diff = output_mode->vdisplay -
+			       (1 << 12) * output_mode->hdisplay / mode_ratio;
+			regp->fp_vert_regs[FP_VALID_START] += diff / 2;
+			regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
+		}
+	}
+
+	/* Output property. */
+	if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+	    (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+	     encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
+		if (nv_device(drm->device)->chipset == 0x11)
+			regp->dither = savep->dither | 0x00010000;
+		else {
+			int i;
+			regp->dither = savep->dither | 0x00000001;
+			for (i = 0; i < 3; i++) {
+				regp->dither_regs[i] = 0xe4e4e4e4;
+				regp->dither_regs[i + 3] = 0x44444444;
+			}
+		}
+	} else {
+		if (nv_device(drm->device)->chipset != 0x11) {
+			/* reset them */
+			int i;
+			for (i = 0; i < 3; i++) {
+				regp->dither_regs[i] = savep->dither_regs[i];
+				regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
+			}
+		}
+		regp->dither = savep->dither;
+	}
+
+	regp->fp_margin_color = 0;
+}
+
+static void nv04_dfp_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct dcb_output *dcbe = nv_encoder->dcb;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct drm_encoder *slave_encoder;
+
+	if (dcbe->type == DCB_OUTPUT_TMDS)
+		run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
+	else if (dcbe->type == DCB_OUTPUT_LVDS)
+		call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
+
+	/* update fp_control state for any changes made by scripts,
+	 * so correct value is written at DPMS on */
+	nv04_display(dev)->mode_reg.crtc_reg[head].fp_control =
+		NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+
+	/* This could use refinement for flatpanels, but it should work this way */
+	if (nv_device(drm->device)->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+
+	/* Init external transmitters */
+	slave_encoder = get_tmds_slave(encoder);
+	if (slave_encoder)
+		get_slave_funcs(slave_encoder)->mode_set(
+			slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+		 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+		 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	/* BIOS scripts usually take care of the backlight, thanks
+	 * Apple for your consistency.
+	 */
+	if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
+	    dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+		if (mode == DRM_MODE_DPMS_ON) {
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
+		} else {
+			nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+			nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
+		}
+	}
+#endif
+}
+
+static inline bool is_powersaving_dpms(int mode)
+{
+	return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
+}
+
+static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
+		 mode, nv_encoder->dcb->index);
+
+	if (was_powersaving && is_powersaving_dpms(mode))
+		return;
+
+	if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
+		/* when removing an output, crtc may not be set, but PANEL_OFF
+		 * must still be run
+		 */
+		int head = crtc ? nouveau_crtc(crtc)->index :
+			   nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+
+		if (mode == DRM_MODE_DPMS_ON) {
+			call_lvds_script(dev, nv_encoder->dcb, head,
+					 LVDS_PANEL_ON, nv_encoder->mode.clock);
+		} else
+			/* pxclk of 0 is fine for PANEL_OFF, and for a
+			 * disconnected LVDS encoder there is no native_mode
+			 */
+			call_lvds_script(dev, nv_encoder->dcb, head,
+					 LVDS_PANEL_OFF, 0);
+	}
+
+	nv04_dfp_update_backlight(encoder, mode);
+	nv04_dfp_update_fp_control(encoder, mode);
+
+	if (mode == DRM_MODE_DPMS_ON)
+		nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
+	else {
+		nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+		nv04_display(dev)->mode_reg.sel_clk &= ~0xf0;
+	}
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
+}
+
+static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_drm *drm = nouveau_drm(encoder->dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (nv_encoder->last_dpms == mode)
+		return;
+	nv_encoder->last_dpms = mode;
+
+	NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
+		 mode, nv_encoder->dcb->index);
+
+	nv04_dfp_update_backlight(encoder, mode);
+	nv04_dfp_update_fp_control(encoder, mode);
+}
+
+static void nv04_dfp_save(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+
+	if (nv_two_heads(dev))
+		nv_encoder->restore.head =
+			nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+}
+
+static void nv04_dfp_restore(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	int head = nv_encoder->restore.head;
+
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
+		struct nouveau_connector *connector =
+			nouveau_encoder_connector_get(nv_encoder);
+
+		if (connector && connector->native_mode)
+			call_lvds_script(dev, nv_encoder->dcb, head,
+					 LVDS_PANEL_ON,
+					 connector->native_mode->clock);
+
+	} else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
+		int clock = nouveau_hw_pllvals_to_clk
+					(&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals);
+
+		run_tmds_table(dev, nv_encoder->dcb, head, clock);
+	}
+
+	nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dfp_destroy(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+	if (get_slave_funcs(encoder))
+		get_slave_funcs(encoder)->destroy(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+}
+
+static void nv04_tmds_slave_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *port = i2c->find(i2c, 2);
+	struct i2c_board_info info[] = {
+		{
+			.type = "sil164",
+			.addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+			.platform_data = &(struct sil164_encoder_params) {
+				SIL164_INPUT_EDGE_RISING
+			}
+		},
+		{ }
+	};
+	int type;
+
+	if (!nv_gf4_disp_arch(dev) || !port ||
+	    get_tmds_slave(encoder))
+		return;
+
+	type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
+	if (type < 0)
+		return;
+
+	drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+			     &port->adapter, &info[type]);
+}
+
+static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
+	.dpms = nv04_lvds_dpms,
+	.save = nv04_dfp_save,
+	.restore = nv04_dfp_restore,
+	.mode_fixup = nv04_dfp_mode_fixup,
+	.prepare = nv04_dfp_prepare,
+	.commit = nv04_dfp_commit,
+	.mode_set = nv04_dfp_mode_set,
+	.detect = NULL,
+};
+
+static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
+	.dpms = nv04_tmds_dpms,
+	.save = nv04_dfp_save,
+	.restore = nv04_dfp_restore,
+	.mode_fixup = nv04_dfp_mode_fixup,
+	.prepare = nv04_dfp_prepare,
+	.commit = nv04_dfp_commit,
+	.mode_set = nv04_dfp_mode_set,
+	.detect = NULL,
+};
+
+static const struct drm_encoder_funcs nv04_dfp_funcs = {
+	.destroy = nv04_dfp_destroy,
+};
+
+int
+nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
+{
+	const struct drm_encoder_helper_funcs *helper;
+	struct nouveau_encoder *nv_encoder = NULL;
+	struct drm_encoder *encoder;
+	int type;
+
+	switch (entry->type) {
+	case DCB_OUTPUT_TMDS:
+		type = DRM_MODE_ENCODER_TMDS;
+		helper = &nv04_tmds_helper_funcs;
+		break;
+	case DCB_OUTPUT_LVDS:
+		type = DRM_MODE_ENCODER_LVDS;
+		helper = &nv04_lvds_helper_funcs;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	encoder = to_drm_encoder(nv_encoder);
+
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type);
+	drm_encoder_helper_add(encoder, helper);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	if (entry->type == DCB_OUTPUT_TMDS &&
+	    entry->location != DCB_LOC_ON_CHIP)
+		nv04_tmds_slave_init(encoder);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.c
new file mode 100644
index 0000000..4908d3f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+
+#include <subdev/i2c.h>
+
+int
+nv04_display_early_init(struct drm_device *dev)
+{
+	/* ensure vblank interrupts are off, they can't be enabled until
+	 * drm_vblank has been initialised
+	 */
+	NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+	if (nv_two_heads(dev))
+		NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+
+	return 0;
+}
+
+void
+nv04_display_late_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_display_create(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct dcb_table *dcb = &drm->vbios.dcb;
+	struct drm_connector *connector, *ct;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	struct nv04_display *disp;
+	int i, ret;
+
+	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
+
+	nouveau_display(dev)->priv = disp;
+	nouveau_display(dev)->dtor = nv04_display_destroy;
+	nouveau_display(dev)->init = nv04_display_init;
+	nouveau_display(dev)->fini = nv04_display_fini;
+
+	nouveau_hw_save_vga_fonts(dev, 1);
+
+	ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
+				 NV04_DISP_CLASS, NULL, 0, &disp->core);
+	if (ret)
+		return ret;
+
+	nv04_crtc_create(dev, 0);
+	if (nv_two_heads(dev))
+		nv04_crtc_create(dev, 1);
+
+	for (i = 0; i < dcb->entries; i++) {
+		struct dcb_output *dcbent = &dcb->entry[i];
+
+		connector = nouveau_connector_create(dev, dcbent->connector);
+		if (IS_ERR(connector))
+			continue;
+
+		switch (dcbent->type) {
+		case DCB_OUTPUT_ANALOG:
+			ret = nv04_dac_create(connector, dcbent);
+			break;
+		case DCB_OUTPUT_LVDS:
+		case DCB_OUTPUT_TMDS:
+			ret = nv04_dfp_create(connector, dcbent);
+			break;
+		case DCB_OUTPUT_TV:
+			if (dcbent->location == DCB_LOC_ON_CHIP)
+				ret = nv17_tv_create(connector, dcbent);
+			else
+				ret = nv04_tv_create(connector, dcbent);
+			break;
+		default:
+			NV_WARN(drm, "DCB type %d not known\n", dcbent->type);
+			continue;
+		}
+
+		if (ret)
+			continue;
+	}
+
+	list_for_each_entry_safe(connector, ct,
+				 &dev->mode_config.connector_list, head) {
+		if (!connector->encoder_ids[0]) {
+			NV_WARN(drm, "%s has no encoders, removing\n",
+				drm_get_connector_name(connector));
+			connector->funcs->destroy(connector);
+		}
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+		nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
+	}
+
+	/* Save previous state */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->save(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->save(encoder);
+	}
+
+	return 0;
+}
+
+void
+nv04_display_destroy(struct drm_device *dev)
+{
+	struct nv04_display *disp = nv04_display(dev);
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	/* Turn every CRTC off. */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct drm_mode_set modeset = {
+			.crtc = crtc,
+		};
+
+		drm_mode_set_config_internal(&modeset);
+	}
+
+	/* Restore state */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->restore(encoder);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->restore(crtc);
+
+	nouveau_hw_save_vga_fonts(dev, 0);
+
+	nouveau_display(dev)->priv = NULL;
+	kfree(disp);
+}
+
+int
+nv04_display_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	/* meh.. modeset apparently doesn't setup all the regs and depends
+	 * on pre-existing state, for now load the state of the card *before*
+	 * nouveau was loaded, and then do a modeset.
+	 *
+	 * best thing to do probably is to make save/restore routines not
+	 * save/restore "pre-load" state, but more general so we can save
+	 * on suspend too.
+	 */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+		func->restore(encoder);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		crtc->funcs->restore(crtc);
+
+	return 0;
+}
+
+void
+nv04_display_fini(struct drm_device *dev)
+{
+	/* disable vblank interrupts */
+	NVWriteCRTC(dev, 0, NV_PCRTC_INTR_EN_0, 0);
+	if (nv_two_heads(dev))
+		NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.h b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.h
new file mode 100644
index 0000000..a0a031d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -0,0 +1,185 @@
+#ifndef __NV04_DISPLAY_H__
+#define __NV04_DISPLAY_H__
+
+#include <subdev/bios/pll.h>
+
+#include "nouveau_display.h"
+
+enum nv04_fp_display_regs {
+	FP_DISPLAY_END,
+	FP_TOTAL,
+	FP_CRTC,
+	FP_SYNC_START,
+	FP_SYNC_END,
+	FP_VALID_START,
+	FP_VALID_END
+};
+
+struct nv04_crtc_reg {
+	unsigned char MiscOutReg;
+	uint8_t CRTC[0xa0];
+	uint8_t CR58[0x10];
+	uint8_t Sequencer[5];
+	uint8_t Graphics[9];
+	uint8_t Attribute[21];
+	unsigned char DAC[768];
+
+	/* PCRTC regs */
+	uint32_t fb_start;
+	uint32_t crtc_cfg;
+	uint32_t cursor_cfg;
+	uint32_t gpio_ext;
+	uint32_t crtc_830;
+	uint32_t crtc_834;
+	uint32_t crtc_850;
+	uint32_t crtc_eng_ctrl;
+
+	/* PRAMDAC regs */
+	uint32_t nv10_cursync;
+	struct nouveau_pll_vals pllvals;
+	uint32_t ramdac_gen_ctrl;
+	uint32_t ramdac_630;
+	uint32_t ramdac_634;
+	uint32_t tv_setup;
+	uint32_t tv_vtotal;
+	uint32_t tv_vskew;
+	uint32_t tv_vsync_delay;
+	uint32_t tv_htotal;
+	uint32_t tv_hskew;
+	uint32_t tv_hsync_delay;
+	uint32_t tv_hsync_delay2;
+	uint32_t fp_horiz_regs[7];
+	uint32_t fp_vert_regs[7];
+	uint32_t dither;
+	uint32_t fp_control;
+	uint32_t dither_regs[6];
+	uint32_t fp_debug_0;
+	uint32_t fp_debug_1;
+	uint32_t fp_debug_2;
+	uint32_t fp_margin_color;
+	uint32_t ramdac_8c0;
+	uint32_t ramdac_a20;
+	uint32_t ramdac_a24;
+	uint32_t ramdac_a34;
+	uint32_t ctv_regs[38];
+};
+
+struct nv04_output_reg {
+	uint32_t output;
+	int head;
+};
+
+struct nv04_mode_state {
+	struct nv04_crtc_reg crtc_reg[2];
+	uint32_t pllsel;
+	uint32_t sel_clk;
+};
+
+struct nv04_display {
+	struct nv04_mode_state mode_reg;
+	struct nv04_mode_state saved_reg;
+	uint32_t saved_vga_font[4][16384];
+	uint32_t dac_users[4];
+	struct nouveau_object *core;
+};
+
+static inline struct nv04_display *
+nv04_display(struct drm_device *dev)
+{
+	return nouveau_display(dev)->priv;
+}
+
+/* nv04_display.c */
+int nv04_display_early_init(struct drm_device *);
+void nv04_display_late_takedown(struct drm_device *);
+int nv04_display_create(struct drm_device *);
+void nv04_display_destroy(struct drm_device *);
+int nv04_display_init(struct drm_device *);
+void nv04_display_fini(struct drm_device *);
+
+/* nv04_crtc.c */
+int nv04_crtc_create(struct drm_device *, int index);
+
+/* nv04_dac.c */
+int nv04_dac_create(struct drm_connector *, struct dcb_output *);
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
+int nv04_dac_output_offset(struct drm_encoder *encoder);
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+bool nv04_dac_in_use(struct drm_encoder *encoder);
+
+/* nv04_dfp.c */
+int nv04_dfp_create(struct drm_connector *, struct dcb_output *);
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent);
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
+			       int head, bool dl);
+void nv04_dfp_disable(struct drm_device *dev, int head);
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
+
+/* nv04_tv.c */
+int nv04_tv_identify(struct drm_device *dev, int i2c_index);
+int nv04_tv_create(struct drm_connector *, struct dcb_output *);
+
+/* nv17_tv.c */
+int nv17_tv_create(struct drm_connector *, struct dcb_output *);
+
+static inline bool
+nv_two_heads(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
+	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
+		return true;
+
+	return false;
+}
+
+static inline bool
+nv_gf4_disp_arch(struct drm_device *dev)
+{
+	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+}
+
+static inline bool
+nv_two_reg_pll(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	const int impl = dev->pci_device & 0x0ff0;
+
+	if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
+		return true;
+	return false;
+}
+
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+		unsigned sub_vendor, unsigned sub_device)
+{
+	return dev->pdev->device == device &&
+		dev->pdev->subsystem_vendor == sub_vendor &&
+		dev->pdev->subsystem_device == sub_device;
+}
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+
+static inline void
+nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
+			    struct dcb_output *outp, int crtc)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_init init = {
+		.subdev = nv_subdev(bios),
+		.bios = bios,
+		.offset = table,
+		.outp = outp,
+		.crtc = crtc,
+		.execute = 1,
+	};
+
+	nvbios_exec(&init);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.c
new file mode 100644
index 0000000..973056b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "hw.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+#define CHIPSET_NFORCE 0x01a0
+#define CHIPSET_NFORCE2 0x01f0
+
+/*
+ * misc hw access wrappers/control functions
+ */
+
+void
+NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
+}
+
+uint8_t
+NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+	return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
+}
+
+void
+NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
+}
+
+uint8_t
+NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+	return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ *    cr44 must be set to 0 or 3 for accessing values on the correct head
+ *    through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ *    access using the head B addresses can have strange results, ergo we leave
+ *    tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+void
+NVSetOwner(struct drm_device *dev, int owner)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (owner == 1)
+		owner *= 3;
+
+	if (nv_device(drm->device)->chipset == 0x11) {
+		/* This might seem stupid, but the blob does it and
+		 * omitting it often locks the system up.
+		 */
+		NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+		NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
+	}
+
+	/* CR44 is always changed on CRTC0 */
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
+
+	if (nv_device(drm->device)->chipset == 0x11) {	/* set me harder */
+		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+	}
+}
+
+void
+NVBlankScreen(struct drm_device *dev, int head, bool blank)
+{
+	unsigned char seq1;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, head);
+
+	seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+	NVVgaSeqReset(dev, head, true);
+	if (blank)
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+	else
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
+	NVVgaSeqReset(dev, head, false);
+}
+
+/*
+ * PLL getting
+ */
+
+static void
+nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
+		      uint32_t pll2, struct nouveau_pll_vals *pllvals)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	/* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
+
+	/* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
+	pllvals->log2P = (pll1 >> 16) & 0x7;
+	pllvals->N2 = pllvals->M2 = 1;
+
+	if (reg1 <= 0x405c) {
+		pllvals->NM1 = pll2 & 0xffff;
+		/* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
+		if (!(pll1 & 0x1100))
+			pllvals->NM2 = pll2 >> 16;
+	} else {
+		pllvals->NM1 = pll1 & 0xffff;
+		if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
+			pllvals->NM2 = pll2 & 0xffff;
+		else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
+			pllvals->M1 &= 0xf; /* only 4 bits */
+			if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
+				pllvals->M2 = (pll1 >> 4) & 0x7;
+				pllvals->N2 = ((pll1 >> 21) & 0x18) |
+					      ((pll1 >> 19) & 0x7);
+			}
+		}
+	}
+}
+
+int
+nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
+		       struct nouveau_pll_vals *pllvals)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	uint32_t reg1, pll1, pll2 = 0;
+	struct nvbios_pll pll_lim;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, plltype, &pll_lim);
+	if (ret || !(reg1 = pll_lim.reg))
+		return -ENOENT;
+
+	pll1 = nv_rd32(device, reg1);
+	if (reg1 <= 0x405c)
+		pll2 = nv_rd32(device, reg1 + 4);
+	else if (nv_two_reg_pll(dev)) {
+		uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
+
+		pll2 = nv_rd32(device, reg2);
+	}
+
+	if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+		uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+
+		/* check whether vpll has been forced into single stage mode */
+		if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
+			if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
+				pll2 = 0;
+		} else
+			if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
+				pll2 = 0;
+	}
+
+	nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
+	pllvals->refclk = pll_lim.refclk;
+	return 0;
+}
+
+int
+nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
+{
+	/* Avoid divide by zero if called at an inappropriate time */
+	if (!pv->M1 || !pv->M2)
+		return 0;
+
+	return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
+}
+
+int
+nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+{
+	struct nouveau_pll_vals pllvals;
+	int ret;
+
+	if (plltype == PLL_MEMORY &&
+	    (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+		uint32_t mpllP;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+		if (!mpllP)
+			mpllP = 4;
+
+		return 400000 / mpllP;
+	} else
+	if (plltype == PLL_MEMORY &&
+	    (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+		uint32_t clock;
+
+		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+		return clock;
+	}
+
+	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+	if (ret)
+		return ret;
+
+	return nouveau_hw_pllvals_to_clk(&pllvals);
+}
+
+static void
+nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
+{
+	/* the vpll on an unused head can come up with a random value, way
+	 * beyond the pll limits.  for some reason this causes the chip to
+	 * lock up when reading the dac palette regs, so set a valid pll here
+	 * when such a condition detected.  only seen on nv11 to date
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_clock *clk = nouveau_clock(device);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll pll_lim;
+	struct nouveau_pll_vals pv;
+	enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
+
+	if (nvbios_pll_parse(bios, pll, &pll_lim))
+		return;
+	nouveau_hw_get_pllvals(dev, pll, &pv);
+
+	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
+	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
+	    pv.log2P <= pll_lim.max_p)
+		return;
+
+	NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
+
+	/* set lowest clock within static limits */
+	pv.M1 = pll_lim.vco1.max_m;
+	pv.N1 = pll_lim.vco1.min_n;
+	pv.log2P = pll_lim.max_p_usable;
+	clk->pll_prog(clk, pll_lim.reg, &pv);
+}
+
+/*
+ * vga font save/restore
+ */
+
+static void nouveau_vga_font_io(struct drm_device *dev,
+				void __iomem *iovram,
+				bool save, unsigned plane)
+{
+	unsigned i;
+
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
+	for (i = 0; i < 16384; i++) {
+		if (save) {
+			nv04_display(dev)->saved_vga_font[plane][i] =
+					ioread32_native(iovram + i * 4);
+		} else {
+			iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
+							iovram + i * 4);
+		}
+	}
+}
+
+void
+nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t misc, gr4, gr5, gr6, seq2, seq4;
+	bool graphicsmode;
+	unsigned plane;
+	void __iomem *iovram;
+
+	if (nv_two_heads(dev))
+		NVSetOwner(dev, 0);
+
+	NVSetEnablePalette(dev, 0, true);
+	graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
+	NVSetEnablePalette(dev, 0, false);
+
+	if (graphicsmode) /* graphics mode => framebuffer => no need to save */
+		return;
+
+	NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
+
+	/* map first 64KiB of VRAM, holds VGA fonts etc */
+	iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+	if (!iovram) {
+		NV_ERROR(drm, "Failed to map VRAM, "
+					"cannot save/restore VGA fonts.\n");
+		return;
+	}
+
+	if (nv_two_heads(dev))
+		NVBlankScreen(dev, 1, true);
+	NVBlankScreen(dev, 0, true);
+
+	/* save control regs */
+	misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
+	seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
+	seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
+	gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
+	gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
+	gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
+
+	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
+
+	/* store font in planes 0..3 */
+	for (plane = 0; plane < 4; plane++)
+		nouveau_vga_font_io(dev, iovram, save, plane);
+
+	/* restore control regs */
+	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
+	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
+	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
+
+	if (nv_two_heads(dev))
+		NVBlankScreen(dev, 1, false);
+	NVBlankScreen(dev, 0, false);
+
+	iounmap(iovram);
+}
+
+/*
+ * mode state save/load
+ */
+
+static void
+rd_cio_state(struct drm_device *dev, int head,
+	     struct nv04_crtc_reg *crtcstate, int index)
+{
+	crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
+}
+
+static void
+wr_cio_state(struct drm_device *dev, int head,
+	     struct nv04_crtc_reg *crtcstate, int index)
+{
+	NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
+}
+
+static void
+nv_save_state_ramdac(struct drm_device *dev, int head,
+		     struct nv04_mode_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	if (nv_device(drm->device)->card_type >= NV_10)
+		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
+
+	nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
+	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
+	if (nv_two_heads(dev))
+		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+	if (nv_device(drm->device)->chipset == 0x11)
+		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
+
+	regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
+
+	if (nv_gf4_disp_arch(dev))
+		regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
+	if (nv_device(drm->device)->chipset >= 0x30)
+		regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
+
+	regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
+	regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
+	regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
+	regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
+	regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
+	regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
+	regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
+	regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
+
+	for (i = 0; i < 7; i++) {
+		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+		regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
+		regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
+	}
+
+	if (nv_gf4_disp_arch(dev)) {
+		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
+		for (i = 0; i < 3; i++) {
+			regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
+			regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
+		}
+	}
+
+	regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+	regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
+	if (!nv_gf4_disp_arch(dev) && head == 0) {
+		/* early chips don't allow access to PRAMDAC_TMDS_* without
+		 * the head A FPCLK on (nv11 even locks up) */
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
+			      ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
+	}
+	regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
+	regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
+
+	regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
+
+	if (nv_gf4_disp_arch(dev))
+		regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
+
+	if (nv_device(drm->device)->card_type == NV_40) {
+		regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
+		regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
+		regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
+
+		for (i = 0; i < 38; i++)
+			regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
+							 NV_PRAMDAC_CTV + 4*i);
+	}
+}
+
+static void
+nv_load_state_ramdac(struct drm_device *dev, int head,
+		     struct nv04_mode_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_clock *clk = nouveau_clock(drm->device);
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+	int i;
+
+	if (nv_device(drm->device)->card_type >= NV_10)
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
+
+	clk->pll_prog(clk, pllreg, &regp->pllvals);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+	if (nv_two_heads(dev))
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
+	if (nv_device(drm->device)->chipset == 0x11)
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
+	if (nv_device(drm->device)->chipset >= 0x30)
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
+
+	for (i = 0; i < 7; i++) {
+		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+
+		NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
+		NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
+	}
+
+	if (nv_gf4_disp_arch(dev)) {
+		NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
+		for (i = 0; i < 3; i++) {
+			NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
+			NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
+		}
+	}
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
+
+	if (nv_gf4_disp_arch(dev))
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
+
+	if (nv_device(drm->device)->card_type == NV_40) {
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
+		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
+
+		for (i = 0; i < 38; i++)
+			NVWriteRAMDAC(dev, head,
+				      NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
+	}
+}
+
+static void
+nv_save_state_vga(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
+
+	for (i = 0; i < 25; i++)
+		rd_cio_state(dev, head, regp, i);
+
+	NVSetEnablePalette(dev, head, true);
+	for (i = 0; i < 21; i++)
+		regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
+	NVSetEnablePalette(dev, head, false);
+
+	for (i = 0; i < 9; i++)
+		regp->Graphics[i] = NVReadVgaGr(dev, head, i);
+
+	for (i = 0; i < 5; i++)
+		regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
+}
+
+static void
+nv_load_state_vga(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
+
+	for (i = 0; i < 5; i++)
+		NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
+
+	nv_lock_vga_crtc_base(dev, head, false);
+	for (i = 0; i < 25; i++)
+		wr_cio_state(dev, head, regp, i);
+	nv_lock_vga_crtc_base(dev, head, true);
+
+	for (i = 0; i < 9; i++)
+		NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
+
+	NVSetEnablePalette(dev, head, true);
+	for (i = 0; i < 21; i++)
+		NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
+	NVSetEnablePalette(dev, head, false);
+}
+
+static void
+nv_save_state_ext(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	int i;
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
+
+	if (nv_device(drm->device)->card_type >= NV_20)
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+	if (nv_device(drm->device)->card_type >= NV_30)
+		rd_cio_state(dev, head, regp, 0x9f);
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
+		regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
+
+		if (nv_device(drm->device)->card_type >= NV_30)
+			regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
+
+		if (nv_device(drm->device)->card_type == NV_40)
+			regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
+
+		if (nv_two_heads(dev))
+			regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
+		regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
+	}
+
+	regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
+
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+	}
+	/* NV11 and NV20 don't have this, they stop at 0x52. */
+	if (nv_gf4_disp_arch(dev)) {
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+		for (i = 0; i < 0x10; i++)
+			regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
+		rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
+	}
+
+	regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
+}
+
+static void
+nv_load_state_ext(struct drm_device *dev, int head,
+		  struct nv04_mode_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
+	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+	uint32_t reg900;
+	int i;
+
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		if (nv_two_heads(dev))
+			/* setting ENGINE_CTRL (EC) *must* come before
+			 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
+			 * EC that should not be overwritten by writing stale EC
+			 */
+			NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
+
+		nv_wr32(device, NV_PVIDEO_STOP, 1);
+		nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
+		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
+		nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
+		nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
+		nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+
+		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
+		NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
+		NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
+
+		if (nv_device(drm->device)->card_type >= NV_30)
+			NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
+
+		if (nv_device(drm->device)->card_type == NV_40) {
+			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
+
+			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
+			if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
+			else
+				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
+		}
+	}
+
+	NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+
+	if (nv_device(drm->device)->card_type >= NV_20)
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+	if (nv_device(drm->device)->card_type >= NV_30)
+		wr_cio_state(dev, head, regp, 0x9f);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+	if (nv_device(drm->device)->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, head);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+	}
+	/* NV11 and NV20 stop at 0x52. */
+	if (nv_gf4_disp_arch(dev)) {
+		if (nv_device(drm->device)->card_type == NV_10) {
+			/* Not waiting for vertical retrace before modifying
+			   CRE_53/CRE_54 causes lockups. */
+			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+		}
+
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+		for (i = 0; i < 0x10; i++)
+			NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
+		wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
+	}
+
+	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
+}
+
+static void
+nv_save_state_palette(struct drm_device *dev, int head,
+		      struct nv04_mode_state *state)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	int head_offset = head * NV_PRMDIO_SIZE, i;
+
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+				NV_PRMDIO_PIXEL_MASK_MASK);
+	nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+
+	for (i = 0; i < 768; i++) {
+		state->crtc_reg[head].DAC[i] = nv_rd08(device,
+				NV_PRMDIO_PALETTE_DATA + head_offset);
+	}
+
+	NVSetEnablePalette(dev, head, false);
+}
+
+void
+nouveau_hw_load_state_palette(struct drm_device *dev, int head,
+			      struct nv04_mode_state *state)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	int head_offset = head * NV_PRMDIO_SIZE, i;
+
+	nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+				NV_PRMDIO_PIXEL_MASK_MASK);
+	nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+
+	for (i = 0; i < 768; i++) {
+		nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
+				state->crtc_reg[head].DAC[i]);
+	}
+
+	NVSetEnablePalette(dev, head, false);
+}
+
+void nouveau_hw_save_state(struct drm_device *dev, int head,
+			   struct nv04_mode_state *state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (nv_device(drm->device)->chipset == 0x11)
+		/* NB: no attempt is made to restore the bad pll later on */
+		nouveau_hw_fix_bad_vpll(dev, head);
+	nv_save_state_ramdac(dev, head, state);
+	nv_save_state_vga(dev, head, state);
+	nv_save_state_palette(dev, head, state);
+	nv_save_state_ext(dev, head, state);
+}
+
+void nouveau_hw_load_state(struct drm_device *dev, int head,
+			   struct nv04_mode_state *state)
+{
+	NVVgaProtect(dev, head, true);
+	nv_load_state_ramdac(dev, head, state);
+	nv_load_state_ext(dev, head, state);
+	nouveau_hw_load_state_palette(dev, head, state);
+	nv_load_state_vga(dev, head, state);
+	NVVgaProtect(dev, head, false);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.h b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.h
new file mode 100644
index 0000000..eeb70d9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_HW_H__
+#define __NOUVEAU_HW_H__
+
+#include <drm/drmP.h>
+#include "disp.h"
+#include "nvreg.h"
+
+#include <subdev/bios/pll.h>
+
+#define MASK(field) ( \
+	(0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
+
+#define XLATE(src, srclowbit, outfield) ( \
+	(((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
+
+void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
+void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
+void NVSetOwner(struct drm_device *, int owner);
+void NVBlankScreen(struct drm_device *, int head, bool blank);
+int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype,
+			   struct nouveau_pll_vals *pllvals);
+int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
+int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype);
+void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
+void nouveau_hw_save_state(struct drm_device *, int head,
+			   struct nv04_mode_state *state);
+void nouveau_hw_load_state(struct drm_device *, int head,
+			   struct nv04_mode_state *state);
+void nouveau_hw_load_state_palette(struct drm_device *, int head,
+				   struct nv04_mode_state *state);
+
+/* nouveau_calc.c */
+extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
+			     int *burst, int *lwm);
+
+static inline uint32_t NVReadCRTC(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	uint32_t val;
+	if (head)
+		reg += NV_PCRTC0_SIZE;
+	val = nv_rd32(device, reg);
+	return val;
+}
+
+static inline void NVWriteCRTC(struct drm_device *dev,
+					int head, uint32_t reg, uint32_t val)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	if (head)
+		reg += NV_PCRTC0_SIZE;
+	nv_wr32(device, reg, val);
+}
+
+static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	uint32_t val;
+	if (head)
+		reg += NV_PRAMDAC0_SIZE;
+	val = nv_rd32(device, reg);
+	return val;
+}
+
+static inline void NVWriteRAMDAC(struct drm_device *dev,
+					int head, uint32_t reg, uint32_t val)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	if (head)
+		reg += NV_PRAMDAC0_SIZE;
+	nv_wr32(device, reg, val);
+}
+
+static inline uint8_t nv_read_tmds(struct drm_device *dev,
+					int or, int dl, uint8_t address)
+{
+	int ramdac = (or & DCB_OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
+	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
+	return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
+}
+
+static inline void nv_write_tmds(struct drm_device *dev,
+					int or, int dl, uint8_t address,
+					uint8_t data)
+{
+	int ramdac = (or & DCB_OUTPUT_C) >> 2;
+
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
+	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
+}
+
+static inline void NVWriteVgaCrtc(struct drm_device *dev,
+					int head, uint8_t index, uint8_t value)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
+					int head, uint8_t index)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	uint8_t val;
+	nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+	return val;
+}
+
+/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
+ * I suspect they in fact do nothing, but are merely a way to carry useful
+ * per-head variables around
+ *
+ * Known uses:
+ * CR57		CR58
+ * 0x00		index to the appropriate dcb entry (or 7f for inactive)
+ * 0x02		dcb entry's "or" value (or 00 for inactive)
+ * 0x03		bit0 set for dual link (LVDS, possibly elsewhere too)
+ * 0x08 or 0x09	pxclk in MHz
+ * 0x0f		laptop panel info -	low nibble for PEXTDEV_BOOT_0 strap
+ * 					high nibble for xlat strap value
+ */
+
+static inline void
+NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
+}
+
+static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
+{
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+	return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
+}
+
+static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
+					int head, uint32_t reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t val;
+
+	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+	 * NVSetOwner for the relevant head to be programmed */
+	if (head && nv_device(drm->device)->card_type == NV_40)
+		reg += NV_PRMVIO_SIZE;
+
+	val = nv_rd08(device, reg);
+	return val;
+}
+
+static inline void NVWritePRMVIO(struct drm_device *dev,
+					int head, uint32_t reg, uint8_t value)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+	 * NVSetOwner for the relevant head to be programmed */
+	if (head && nv_device(drm->device)->card_type == NV_40)
+		reg += NV_PRMVIO_SIZE;
+
+	nv_wr08(device, reg, value);
+}
+
+static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+}
+
+static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+}
+
+static inline void NVWriteVgaAttr(struct drm_device *dev,
+					int head, uint8_t index, uint8_t value)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	if (NVGetEnablePalette(dev, head))
+		index &= ~0x20;
+	else
+		index |= 0x20;
+
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
+					int head, uint8_t index)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	uint8_t val;
+	if (NVGetEnablePalette(dev, head))
+		index &= ~0x20;
+	else
+		index |= 0x20;
+
+	nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+	nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+	val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+	return val;
+}
+
+static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
+{
+	NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
+}
+
+static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
+{
+	uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+	if (protect) {
+		NVVgaSeqReset(dev, head, true);
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+	} else {
+		/* Reenable sequencer, then turn on screen */
+		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);   /* reenable display */
+		NVVgaSeqReset(dev, head, false);
+	}
+	NVSetEnablePalette(dev, head, protect);
+}
+
+static inline bool
+nv_heads_tied(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (nv_device(drm->device)->chipset == 0x11)
+		return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
+
+	return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
+}
+
+/* makes cr0-7 on the specified head read-only */
+static inline bool
+nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
+{
+	uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
+	bool waslocked = cr11 & 0x80;
+
+	if (lock)
+		cr11 |= 0x80;
+	else
+		cr11 &= ~0x80;
+	NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
+
+	return waslocked;
+}
+
+static inline void
+nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
+{
+	/* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
+	 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
+	 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
+	 * bit5: unlocks HDE
+	 * bit4: unlocks VDE
+	 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
+	 * bit2: same as bit 1 of 0x60?804
+	 * bit0: same as bit 0 of 0x60?804
+	 */
+
+	uint8_t cr21 = lock;
+
+	if (lock < 0)
+		/* 0xfa is generic "unlock all" mask */
+		cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
+
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
+}
+
+/* renders the extended crtc regs (cr19+) on all crtcs impervious:
+ * immutable and unreadable
+ */
+static inline bool
+NVLockVgaCrtcs(struct drm_device *dev, bool lock)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+
+	NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
+		       lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
+	/* NV11 has independently lockable extended crtcs, except when tied */
+	if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
+		NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
+			       lock ? NV_CIO_SR_LOCK_VALUE :
+				      NV_CIO_SR_UNLOCK_RW_VALUE);
+
+	return waslocked;
+}
+
+/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
+#define NV04_CURSOR_SIZE 32
+/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
+#define NV10_CURSOR_SIZE 64
+
+static inline int nv_cursor_width(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+}
+
+static inline void
+nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
+{
+	/* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
+	 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
+	 * for changes to the CRTC CURCTL regs to take effect, whether changing
+	 * the pixmap location, or just showing/hiding the cursor
+	 */
+	uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
+}
+
+static inline void
+nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
+
+	if (nv_device(drm->device)->card_type == NV_04) {
+		/*
+		 * Hilarious, the 24th bit doesn't want to stick to
+		 * PCRTC_START...
+		 */
+		int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX);
+
+		NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX,
+			       (cre_heb & ~0x40) | ((offset >> 18) & 0x40));
+	}
+}
+
+static inline void
+nv_show_cursor(struct drm_device *dev, int head, bool show)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t *curctl1 =
+		&nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
+
+	if (show)
+		*curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+	else
+		*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
+
+	if (nv_device(drm->device)->card_type == NV_40)
+		nv_fix_nv40_hw_cursor(dev, head);
+}
+
+static inline uint32_t
+nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int mask;
+
+	if (bpp == 15)
+		bpp = 16;
+	if (bpp == 24)
+		bpp = 8;
+
+	/* Alignment requirements taken from the Haiku driver */
+	if (nv_device(drm->device)->card_type == NV_04)
+		mask = 128 / bpp - 1;
+	else
+		mask = 512 / bpp - 1;
+
+	return (width + mask) & ~mask;
+}
+
+#endif	/* __NOUVEAU_HW_H__ */
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/nvreg.h b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
new file mode 100644
index 0000000..bbfb1a6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/nvreg.h
@@ -0,0 +1,517 @@
+/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
+/*
+ * Copyright 1996-1997  David J. McKay
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
+
+#ifndef __NVREG_H_
+#define __NVREG_H_
+
+#define NV_PMC_OFFSET               0x00000000
+#define NV_PMC_SIZE                 0x00001000
+
+#define NV_PBUS_OFFSET              0x00001000
+#define NV_PBUS_SIZE                0x00001000
+
+#define NV_PFIFO_OFFSET             0x00002000
+#define NV_PFIFO_SIZE               0x00002000
+
+#define NV_HDIAG_OFFSET             0x00005000
+#define NV_HDIAG_SIZE               0x00001000
+
+#define NV_PRAM_OFFSET              0x00006000
+#define NV_PRAM_SIZE                0x00001000
+
+#define NV_PVIDEO_OFFSET            0x00008000
+#define NV_PVIDEO_SIZE              0x00001000
+
+#define NV_PTIMER_OFFSET            0x00009000
+#define NV_PTIMER_SIZE              0x00001000
+
+#define NV_PPM_OFFSET               0x0000A000
+#define NV_PPM_SIZE                 0x00001000
+
+#define NV_PTV_OFFSET               0x0000D000
+#define NV_PTV_SIZE                 0x00001000
+
+#define NV_PRMVGA_OFFSET            0x000A0000
+#define NV_PRMVGA_SIZE              0x00020000
+
+#define NV_PRMVIO0_OFFSET           0x000C0000
+#define NV_PRMVIO_SIZE              0x00002000
+#define NV_PRMVIO1_OFFSET           0x000C2000
+
+#define NV_PFB_OFFSET               0x00100000
+#define NV_PFB_SIZE                 0x00001000
+
+#define NV_PEXTDEV_OFFSET           0x00101000
+#define NV_PEXTDEV_SIZE             0x00001000
+
+#define NV_PME_OFFSET               0x00200000
+#define NV_PME_SIZE                 0x00001000
+
+#define NV_PROM_OFFSET              0x00300000
+#define NV_PROM_SIZE                0x00010000
+
+#define NV_PGRAPH_OFFSET            0x00400000
+#define NV_PGRAPH_SIZE              0x00010000
+
+#define NV_PCRTC0_OFFSET            0x00600000
+#define NV_PCRTC0_SIZE              0x00002000 /* empirical */
+
+#define NV_PRMCIO0_OFFSET           0x00601000
+#define NV_PRMCIO_SIZE              0x00002000
+#define NV_PRMCIO1_OFFSET           0x00603000
+
+#define NV50_DISPLAY_OFFSET           0x00610000
+#define NV50_DISPLAY_SIZE             0x0000FFFF
+
+#define NV_PRAMDAC0_OFFSET          0x00680000
+#define NV_PRAMDAC0_SIZE            0x00002000
+
+#define NV_PRMDIO0_OFFSET           0x00681000
+#define NV_PRMDIO_SIZE              0x00002000
+#define NV_PRMDIO1_OFFSET           0x00683000
+
+#define NV_PRAMIN_OFFSET            0x00700000
+#define NV_PRAMIN_SIZE              0x00100000
+
+#define NV_FIFO_OFFSET              0x00800000
+#define NV_FIFO_SIZE                0x00800000
+
+#define NV_PMC_BOOT_0			0x00000000
+#define NV_PMC_ENABLE			0x00000200
+
+#define NV_VIO_VSE2			0x000003c3
+#define NV_VIO_SRX			0x000003c4
+
+#define NV_CIO_CRX__COLOR		0x000003d4
+#define NV_CIO_CR__COLOR		0x000003d5
+
+#define NV_PBUS_DEBUG_1			0x00001084
+#define NV_PBUS_DEBUG_4			0x00001098
+#define NV_PBUS_DEBUG_DUALHEAD_CTL	0x000010f0
+#define NV_PBUS_POWERCTRL_1		0x00001584
+#define NV_PBUS_POWERCTRL_2		0x00001588
+#define NV_PBUS_POWERCTRL_4		0x00001590
+#define NV_PBUS_PCI_NV_19		0x0000184C
+#define NV_PBUS_PCI_NV_20		0x00001850
+#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED	(0 << 0)
+#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED	(1 << 0)
+
+#define NV_PFIFO_RAMHT			0x00002210
+
+#define NV_PTV_TV_INDEX			0x0000d220
+#define NV_PTV_TV_DATA			0x0000d224
+#define NV_PTV_HFILTER			0x0000d310
+#define NV_PTV_HFILTER2			0x0000d390
+#define NV_PTV_VFILTER			0x0000d510
+
+#define NV_PRMVIO_MISC__WRITE		0x000c03c2
+#define NV_PRMVIO_SRX			0x000c03c4
+#define NV_PRMVIO_SR			0x000c03c5
+#	define NV_VIO_SR_RESET_INDEX		0x00
+#	define NV_VIO_SR_CLOCK_INDEX		0x01
+#	define NV_VIO_SR_PLANE_MASK_INDEX	0x02
+#	define NV_VIO_SR_CHAR_MAP_INDEX		0x03
+#	define NV_VIO_SR_MEM_MODE_INDEX		0x04
+#define NV_PRMVIO_MISC__READ		0x000c03cc
+#define NV_PRMVIO_GRX			0x000c03ce
+#define NV_PRMVIO_GX			0x000c03cf
+#	define NV_VIO_GX_SR_INDEX		0x00
+#	define NV_VIO_GX_SREN_INDEX		0x01
+#	define NV_VIO_GX_CCOMP_INDEX		0x02
+#	define NV_VIO_GX_ROP_INDEX		0x03
+#	define NV_VIO_GX_READ_MAP_INDEX		0x04
+#	define NV_VIO_GX_MODE_INDEX		0x05
+#	define NV_VIO_GX_MISC_INDEX		0x06
+#	define NV_VIO_GX_DONT_CARE_INDEX	0x07
+#	define NV_VIO_GX_BIT_MASK_INDEX		0x08
+
+#define NV_PCRTC_INTR_0					0x00600100
+#	define NV_PCRTC_INTR_0_VBLANK				(1 << 0)
+#define NV_PCRTC_INTR_EN_0				0x00600140
+#define NV_PCRTC_START					0x00600800
+#define NV_PCRTC_CONFIG					0x00600804
+#	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
+#	define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC		(4 << 0)
+#	define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
+#define NV_PCRTC_CURSOR_CONFIG				0x00600810
+#	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
+#	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
+#	define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM	(1 << 8)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32		(1 << 12)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64		(1 << 16)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32		(2 << 24)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64		(4 << 24)
+#	define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA		(1 << 28)
+
+/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
+#define NV_PCRTC_GPIO					0x00600818
+#define NV_PCRTC_GPIO_EXT				0x0060081c
+#define NV_PCRTC_830					0x00600830
+#define NV_PCRTC_834					0x00600834
+#define NV_PCRTC_850					0x00600850
+#define NV_PCRTC_ENGINE_CTRL				0x00600860
+#	define NV_CRTC_FSEL_I2C					(1 << 4)
+#	define NV_CRTC_FSEL_OVERLAY				(1 << 12)
+
+#define NV_PRMCIO_ARX			0x006013c0
+#define NV_PRMCIO_AR__WRITE		0x006013c0
+#define NV_PRMCIO_AR__READ		0x006013c1
+#	define NV_CIO_AR_MODE_INDEX		0x10
+#	define NV_CIO_AR_OSCAN_INDEX		0x11
+#	define NV_CIO_AR_PLANE_INDEX		0x12
+#	define NV_CIO_AR_HPP_INDEX		0x13
+#	define NV_CIO_AR_CSEL_INDEX		0x14
+#define NV_PRMCIO_INP0			0x006013c2
+#define NV_PRMCIO_CRX__COLOR		0x006013d4
+#define NV_PRMCIO_CR__COLOR		0x006013d5
+	/* Standard VGA CRTC registers */
+#	define NV_CIO_CR_HDT_INDEX		0x00	/* horizontal display total */
+#	define NV_CIO_CR_HDE_INDEX		0x01	/* horizontal display end */
+#	define NV_CIO_CR_HBS_INDEX		0x02	/* horizontal blanking start */
+#	define NV_CIO_CR_HBE_INDEX		0x03	/* horizontal blanking end */
+#		define NV_CIO_CR_HBE_4_0		4:0
+#	define NV_CIO_CR_HRS_INDEX		0x04	/* horizontal retrace start */
+#	define NV_CIO_CR_HRE_INDEX		0x05	/* horizontal retrace end */
+#		define NV_CIO_CR_HRE_4_0		4:0
+#		define NV_CIO_CR_HRE_HBE_5		7:7
+#	define NV_CIO_CR_VDT_INDEX		0x06	/* vertical display total */
+#	define NV_CIO_CR_OVL_INDEX		0x07	/* overflow bits */
+#		define NV_CIO_CR_OVL_VDT_8		0:0
+#		define NV_CIO_CR_OVL_VDE_8		1:1
+#		define NV_CIO_CR_OVL_VRS_8		2:2
+#		define NV_CIO_CR_OVL_VBS_8		3:3
+#		define NV_CIO_CR_OVL_VDT_9		5:5
+#		define NV_CIO_CR_OVL_VDE_9		6:6
+#		define NV_CIO_CR_OVL_VRS_9		7:7
+#	define NV_CIO_CR_RSAL_INDEX		0x08	/* normally "preset row scan" */
+#	define NV_CIO_CR_CELL_HT_INDEX		0x09	/* cell height?! normally "max scan line" */
+#		define NV_CIO_CR_CELL_HT_VBS_9		5:5
+#		define NV_CIO_CR_CELL_HT_SCANDBL	7:7
+#	define NV_CIO_CR_CURS_ST_INDEX		0x0a	/* cursor start */
+#	define NV_CIO_CR_CURS_END_INDEX		0x0b	/* cursor end */
+#	define NV_CIO_CR_SA_HI_INDEX		0x0c	/* screen start address high */
+#	define NV_CIO_CR_SA_LO_INDEX		0x0d	/* screen start address low */
+#	define NV_CIO_CR_TCOFF_HI_INDEX		0x0e	/* cursor offset high */
+#	define NV_CIO_CR_TCOFF_LO_INDEX		0x0f	/* cursor offset low */
+#	define NV_CIO_CR_VRS_INDEX		0x10	/* vertical retrace start */
+#	define NV_CIO_CR_VRE_INDEX		0x11	/* vertical retrace end */
+#		define NV_CIO_CR_VRE_3_0		3:0
+#	define NV_CIO_CR_VDE_INDEX		0x12	/* vertical display end */
+#	define NV_CIO_CR_OFFSET_INDEX		0x13	/* sets screen pitch */
+#	define NV_CIO_CR_ULINE_INDEX		0x14	/* underline location */
+#	define NV_CIO_CR_VBS_INDEX		0x15	/* vertical blank start */
+#	define NV_CIO_CR_VBE_INDEX		0x16	/* vertical blank end */
+#	define NV_CIO_CR_MODE_INDEX		0x17	/* crtc mode control */
+#	define NV_CIO_CR_LCOMP_INDEX		0x18	/* line compare */
+	/* Extended VGA CRTC registers */
+#	define NV_CIO_CRE_RPC0_INDEX		0x19	/* repaint control 0 */
+#		define NV_CIO_CRE_RPC0_OFFSET_10_8	7:5
+#	define NV_CIO_CRE_RPC1_INDEX		0x1a	/* repaint control 1 */
+#		define NV_CIO_CRE_RPC1_LARGE		2:2
+#	define NV_CIO_CRE_FF_INDEX		0x1b	/* fifo control */
+#	define NV_CIO_CRE_ENH_INDEX		0x1c	/* enhanced? */
+#	define NV_CIO_SR_LOCK_INDEX		0x1f	/* crtc lock */
+#		define NV_CIO_SR_UNLOCK_RW_VALUE	0x57
+#		define NV_CIO_SR_LOCK_VALUE		0x99
+#	define NV_CIO_CRE_FFLWM__INDEX		0x20	/* fifo low water mark */
+#	define NV_CIO_CRE_21			0x21	/* vga shadow crtc lock */
+#	define NV_CIO_CRE_LSR_INDEX		0x25	/* ? */
+#		define NV_CIO_CRE_LSR_VDT_10		0:0
+#		define NV_CIO_CRE_LSR_VDE_10		1:1
+#		define NV_CIO_CRE_LSR_VRS_10		2:2
+#		define NV_CIO_CRE_LSR_VBS_10		3:3
+#		define NV_CIO_CRE_LSR_HBE_6		4:4
+#	define NV_CIO_CR_ARX_INDEX		0x26	/* attribute index -- ro copy of 0x60.3c0 */
+#	define NV_CIO_CRE_CHIP_ID_INDEX		0x27	/* chip revision */
+#	define NV_CIO_CRE_PIXEL_INDEX		0x28
+#		define NV_CIO_CRE_PIXEL_FORMAT		1:0
+#	define NV_CIO_CRE_HEB__INDEX		0x2d	/* horizontal extra bits? */
+#		define NV_CIO_CRE_HEB_HDT_8		0:0
+#		define NV_CIO_CRE_HEB_HDE_8		1:1
+#		define NV_CIO_CRE_HEB_HBS_8		2:2
+#		define NV_CIO_CRE_HEB_HRS_8		3:3
+#		define NV_CIO_CRE_HEB_ILC_8		4:4
+#	define NV_CIO_CRE_2E			0x2e	/* some scratch or dummy reg to force writes to sink in */
+#	define NV_CIO_CRE_HCUR_ADDR2_INDEX	0x2f	/* cursor */
+#	define NV_CIO_CRE_HCUR_ADDR0_INDEX	0x30		/* pixmap */
+#		define NV_CIO_CRE_HCUR_ADDR0_ADR	6:0
+#		define NV_CIO_CRE_HCUR_ASI		7:7
+#	define NV_CIO_CRE_HCUR_ADDR1_INDEX	0x31			/* address */
+#		define NV_CIO_CRE_HCUR_ADDR1_ENABLE	0:0
+#		define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL	1:1
+#		define NV_CIO_CRE_HCUR_ADDR1_ADR	7:2
+#	define NV_CIO_CRE_LCD__INDEX		0x33
+#		define NV_CIO_CRE_LCD_LCD_SELECT	0:0
+#		define NV_CIO_CRE_LCD_ROUTE_MASK	0x3b
+#	define NV_CIO_CRE_DDC0_STATUS__INDEX	0x36
+#	define NV_CIO_CRE_DDC0_WR__INDEX	0x37
+#	define NV_CIO_CRE_ILACE__INDEX		0x39	/* interlace */
+#	define NV_CIO_CRE_SCRATCH3__INDEX	0x3b
+#	define NV_CIO_CRE_SCRATCH4__INDEX	0x3c
+#	define NV_CIO_CRE_DDC_STATUS__INDEX	0x3e
+#	define NV_CIO_CRE_DDC_WR__INDEX		0x3f
+#	define NV_CIO_CRE_EBR_INDEX		0x41	/* extra bits ? (vertical) */
+#		define NV_CIO_CRE_EBR_VDT_11		0:0
+#		define NV_CIO_CRE_EBR_VDE_11		2:2
+#		define NV_CIO_CRE_EBR_VRS_11		4:4
+#		define NV_CIO_CRE_EBR_VBS_11		6:6
+#	define NV_CIO_CRE_42			0x42
+#		define NV_CIO_CRE_42_OFFSET_11		6:6
+#	define NV_CIO_CRE_43			0x43
+#	define NV_CIO_CRE_44			0x44	/* head control */
+#	define NV_CIO_CRE_CSB			0x45	/* colour saturation boost */
+#	define NV_CIO_CRE_RCR			0x46
+#		define NV_CIO_CRE_RCR_ENDIAN_BIG	7:7
+#	define NV_CIO_CRE_47			0x47	/* extended fifo lwm, used on nv30+ */
+#	define NV_CIO_CRE_49			0x49
+#	define NV_CIO_CRE_4B			0x4b	/* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
+#	define NV_CIO_CRE_TVOUT_LATENCY		0x52
+#	define NV_CIO_CRE_53			0x53	/* `fp_htiming' according to Haiku */
+#	define NV_CIO_CRE_54			0x54	/* `fp_vtiming' according to Haiku */
+#	define NV_CIO_CRE_57			0x57	/* index reg for cr58 */
+#	define NV_CIO_CRE_58			0x58	/* data reg for cr57 */
+#	define NV_CIO_CRE_59			0x59	/* related to on/off-chip-ness of digital outputs */
+#	define NV_CIO_CRE_5B			0x5B	/* newer colour saturation reg */
+#	define NV_CIO_CRE_85			0x85
+#	define NV_CIO_CRE_86			0x86
+#define NV_PRMCIO_INP0__COLOR		0x006013da
+
+#define NV_PRAMDAC_CU_START_POS				0x00680300
+#	define NV_PRAMDAC_CU_START_POS_X			15:0
+#	define NV_PRAMDAC_CU_START_POS_Y			31:16
+#define NV_RAMDAC_NV10_CURSYNC				0x00680404
+
+#define NV_PRAMDAC_NVPLL_COEFF				0x00680500
+#define NV_PRAMDAC_MPLL_COEFF				0x00680504
+#define NV_PRAMDAC_VPLL_COEFF				0x00680508
+#	define NV30_RAMDAC_ENABLE_VCO2				(8 << 4)
+
+#define NV_PRAMDAC_PLL_COEFF_SELECT			0x0068050c
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE	(4 << 0)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL	(1 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	(2 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL	(4 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2	(8 << 8)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		(1 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		(2 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		(4 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2		(8 << 16)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP	(1 << 20)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2	(1 << 28)
+#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2	(2 << 28)
+
+#define NV_PRAMDAC_PLL_SETUP_CONTROL			0x00680510
+#define NV_RAMDAC_VPLL2					0x00680520
+#define NV_PRAMDAC_SEL_CLK				0x00680524
+#define NV_RAMDAC_DITHER_NV11				0x00680528
+#define NV_PRAMDAC_DACCLK				0x0068052c
+#	define NV_PRAMDAC_DACCLK_SEL_DACCLK			(1 << 0)
+
+#define NV_RAMDAC_NVPLL_B				0x00680570
+#define NV_RAMDAC_MPLL_B				0x00680574
+#define NV_RAMDAC_VPLL_B				0x00680578
+#define NV_RAMDAC_VPLL2_B				0x0068057c
+#	define NV31_RAMDAC_ENABLE_VCO2				(8 << 28)
+#define NV_PRAMDAC_580					0x00680580
+#	define NV_RAMDAC_580_VPLL1_ACTIVE			(1 << 8)
+#	define NV_RAMDAC_580_VPLL2_ACTIVE			(1 << 28)
+
+#define NV_PRAMDAC_GENERAL_CONTROL			0x00680600
+#	define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON		(3 << 4)
+#	define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL		(1 << 8)
+#	define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL		(1 << 12)
+#	define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM	(2 << 16)
+#	define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS		(1 << 20)
+#	define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG		(2 << 28)
+#define NV_PRAMDAC_TEST_CONTROL				0x00680608
+#	define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED	(1 << 12)
+#	define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF		(1 << 16)
+#	define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI		(1 << 28)
+#define NV_PRAMDAC_TESTPOINT_DATA			0x00680610
+#	define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK		(8 << 28)
+#define NV_PRAMDAC_630					0x00680630
+#define NV_PRAMDAC_634					0x00680634
+
+#define NV_PRAMDAC_TV_SETUP				0x00680700
+#define NV_PRAMDAC_TV_VTOTAL				0x00680720
+#define NV_PRAMDAC_TV_VSKEW				0x00680724
+#define NV_PRAMDAC_TV_VSYNC_DELAY			0x00680728
+#define NV_PRAMDAC_TV_HTOTAL				0x0068072c
+#define NV_PRAMDAC_TV_HSKEW				0x00680730
+#define NV_PRAMDAC_TV_HSYNC_DELAY			0x00680734
+#define NV_PRAMDAC_TV_HSYNC_DELAY2			0x00680738
+
+#define NV_PRAMDAC_TV_SETUP                             0x00680700
+
+#define NV_PRAMDAC_FP_VDISPLAY_END			0x00680800
+#define NV_PRAMDAC_FP_VTOTAL				0x00680804
+#define NV_PRAMDAC_FP_VCRTC				0x00680808
+#define NV_PRAMDAC_FP_VSYNC_START			0x0068080c
+#define NV_PRAMDAC_FP_VSYNC_END				0x00680810
+#define NV_PRAMDAC_FP_VVALID_START			0x00680814
+#define NV_PRAMDAC_FP_VVALID_END			0x00680818
+#define NV_PRAMDAC_FP_HDISPLAY_END			0x00680820
+#define NV_PRAMDAC_FP_HTOTAL				0x00680824
+#define NV_PRAMDAC_FP_HCRTC				0x00680828
+#define NV_PRAMDAC_FP_HSYNC_START			0x0068082c
+#define NV_PRAMDAC_FP_HSYNC_END				0x00680830
+#define NV_PRAMDAC_FP_HVALID_START			0x00680834
+#define NV_PRAMDAC_FP_HVALID_END			0x00680838
+
+#define NV_RAMDAC_FP_DITHER				0x0068083c
+#define NV_PRAMDAC_FP_TG_CONTROL			0x00680848
+#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS		(1 << 0)
+#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE		(2 << 0)
+#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS		(1 << 4)
+#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE		(2 << 4)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE		(0 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER		(1 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE		(2 << 8)
+#	define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG		(1 << 20)
+#	define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12		(1 << 24)
+#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS		(1 << 28)
+#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE		(2 << 28)
+#define NV_PRAMDAC_FP_MARGIN_COLOR			0x0068084c
+#define NV_PRAMDAC_850					0x00680850
+#define NV_PRAMDAC_85C					0x0068085c
+#define NV_PRAMDAC_FP_DEBUG_0				0x00680880
+#	define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE		(1 << 0)
+#	define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE		(1 << 4)
+/* This doesn't seem to be essential for tmds, but still often set */
+#	define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED		(8 << 4)
+#	define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR		(1 << 8)
+#	define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR		(1 << 12)
+#	define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND		(1 << 20)
+#	define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND		(1 << 24)
+#       define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK              (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_1				0x00680884
+#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE		11:0
+#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE	(1 << 12)
+#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE		27:16
+#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE	(1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_2				0x00680888
+#define NV_PRAMDAC_FP_DEBUG_3				0x0068088C
+
+/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
+#define NV_PRAMDAC_FP_TMDS_CONTROL			0x006808b0
+#	define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE		(1 << 16)
+#define NV_PRAMDAC_FP_TMDS_DATA				0x006808b4
+
+#define NV_PRAMDAC_8C0                                  0x006808c0
+
+/* Some kind of switch */
+#define NV_PRAMDAC_900					0x00680900
+#define NV_PRAMDAC_A20					0x00680A20
+#define NV_PRAMDAC_A24					0x00680A24
+#define NV_PRAMDAC_A34					0x00680A34
+
+#define NV_PRAMDAC_CTV					0x00680c00
+
+/* names fabricated from NV_USER_DAC info */
+#define NV_PRMDIO_PIXEL_MASK		0x006813c6
+#	define NV_PRMDIO_PIXEL_MASK_MASK	0xff
+#define NV_PRMDIO_READ_MODE_ADDRESS	0x006813c7
+#define NV_PRMDIO_WRITE_MODE_ADDRESS	0x006813c8
+#define NV_PRMDIO_PALETTE_DATA		0x006813c9
+
+#define NV_PGRAPH_DEBUG_0		0x00400080
+#define NV_PGRAPH_DEBUG_1		0x00400084
+#define NV_PGRAPH_DEBUG_2_NV04		0x00400088
+#define NV_PGRAPH_DEBUG_2		0x00400620
+#define NV_PGRAPH_DEBUG_3		0x0040008c
+#define NV_PGRAPH_DEBUG_4		0x00400090
+#define NV_PGRAPH_INTR			0x00400100
+#define NV_PGRAPH_INTR_EN		0x00400140
+#define NV_PGRAPH_CTX_CONTROL		0x00400144
+#define NV_PGRAPH_CTX_CONTROL_NV04	0x00400170
+#define NV_PGRAPH_ABS_UCLIP_XMIN	0x0040053C
+#define NV_PGRAPH_ABS_UCLIP_YMIN	0x00400540
+#define NV_PGRAPH_ABS_UCLIP_XMAX	0x00400544
+#define NV_PGRAPH_ABS_UCLIP_YMAX	0x00400548
+#define NV_PGRAPH_BETA_AND		0x00400608
+#define NV_PGRAPH_LIMIT_VIOL_PIX	0x00400610
+#define NV_PGRAPH_BOFFSET0		0x00400640
+#define NV_PGRAPH_BOFFSET1		0x00400644
+#define NV_PGRAPH_BOFFSET2		0x00400648
+#define NV_PGRAPH_BLIMIT0		0x00400684
+#define NV_PGRAPH_BLIMIT1		0x00400688
+#define NV_PGRAPH_BLIMIT2		0x0040068c
+#define NV_PGRAPH_STATUS		0x00400700
+#define NV_PGRAPH_SURFACE		0x00400710
+#define NV_PGRAPH_STATE			0x00400714
+#define NV_PGRAPH_FIFO			0x00400720
+#define NV_PGRAPH_PATTERN_SHAPE		0x00400810
+#define NV_PGRAPH_TILE			0x00400b00
+
+#define NV_PVIDEO_INTR_EN		0x00008140
+#define NV_PVIDEO_BUFFER		0x00008700
+#define NV_PVIDEO_STOP			0x00008704
+#define NV_PVIDEO_UVPLANE_BASE(buff)	(0x00008800+(buff)*4)
+#define NV_PVIDEO_UVPLANE_LIMIT(buff)	(0x00008808+(buff)*4)
+#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff)	(0x00008820+(buff)*4)
+#define NV_PVIDEO_BASE(buff)		(0x00008900+(buff)*4)
+#define NV_PVIDEO_LIMIT(buff)		(0x00008908+(buff)*4)
+#define NV_PVIDEO_LUMINANCE(buff)	(0x00008910+(buff)*4)
+#define NV_PVIDEO_CHROMINANCE(buff)	(0x00008918+(buff)*4)
+#define NV_PVIDEO_OFFSET_BUFF(buff)	(0x00008920+(buff)*4)
+#define NV_PVIDEO_SIZE_IN(buff)		(0x00008928+(buff)*4)
+#define NV_PVIDEO_POINT_IN(buff)	(0x00008930+(buff)*4)
+#define NV_PVIDEO_DS_DX(buff)		(0x00008938+(buff)*4)
+#define NV_PVIDEO_DT_DY(buff)		(0x00008940+(buff)*4)
+#define NV_PVIDEO_POINT_OUT(buff)	(0x00008948+(buff)*4)
+#define NV_PVIDEO_SIZE_OUT(buff)	(0x00008950+(buff)*4)
+#define NV_PVIDEO_FORMAT(buff)		(0x00008958+(buff)*4)
+#	define NV_PVIDEO_FORMAT_PLANAR			(1 << 0)
+#	define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8	(1 << 16)
+#	define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY	(1 << 20)
+#	define NV_PVIDEO_FORMAT_MATRIX_ITURBT709	(1 << 24)
+#define NV_PVIDEO_COLOR_KEY		0x00008B00
+
+/* NV04 overlay defines from VIDIX & Haiku */
+#define NV_PVIDEO_INTR_EN_0		0x00680140
+#define NV_PVIDEO_STEP_SIZE		0x00680200
+#define NV_PVIDEO_CONTROL_Y		0x00680204
+#define NV_PVIDEO_CONTROL_X		0x00680208
+#define NV_PVIDEO_BUFF0_START_ADDRESS	0x0068020c
+#define NV_PVIDEO_BUFF0_PITCH_LENGTH	0x00680214
+#define NV_PVIDEO_BUFF0_OFFSET		0x0068021c
+#define NV_PVIDEO_BUFF1_START_ADDRESS	0x00680210
+#define NV_PVIDEO_BUFF1_PITCH_LENGTH	0x00680218
+#define NV_PVIDEO_BUFF1_OFFSET		0x00680220
+#define NV_PVIDEO_OE_STATE		0x00680224
+#define NV_PVIDEO_SU_STATE		0x00680228
+#define NV_PVIDEO_RM_STATE		0x0068022c
+#define NV_PVIDEO_WINDOW_START		0x00680230
+#define NV_PVIDEO_WINDOW_SIZE		0x00680234
+#define NV_PVIDEO_FIFO_THRES_SIZE	0x00680238
+#define NV_PVIDEO_FIFO_BURST_LENGTH	0x0068023c
+#define NV_PVIDEO_KEY			0x00680240
+#define NV_PVIDEO_OVERLAY		0x00680244
+#define NV_PVIDEO_RED_CSC_OFFSET	0x00680280
+#define NV_PVIDEO_GREEN_CSC_OFFSET	0x00680284
+#define NV_PVIDEO_BLUE_CSC_OFFSET	0x00680288
+#define NV_PVIDEO_CSC_ADJUST		0x0068028c
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
new file mode 100644
index 0000000..08c6f5e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -0,0 +1,592 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "nouveau_drm.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include "tvnv17.h"
+
+char *nv17_tv_norm_names[NUM_TV_NORMS] = {
+	[TV_NORM_PAL] = "PAL",
+	[TV_NORM_PAL_M] = "PAL-M",
+	[TV_NORM_PAL_N] = "PAL-N",
+	[TV_NORM_PAL_NC] = "PAL-Nc",
+	[TV_NORM_NTSC_M] = "NTSC-M",
+	[TV_NORM_NTSC_J] = "NTSC-J",
+	[TV_NORM_HD480I] = "hd480i",
+	[TV_NORM_HD480P] = "hd480p",
+	[TV_NORM_HD576I] = "hd576i",
+	[TV_NORM_HD576P] = "hd576p",
+	[TV_NORM_HD720P] = "hd720p",
+	[TV_NORM_HD1080I] = "hd1080i"
+};
+
+/* TV standard specific parameters */
+
+struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
+	[TV_NORM_PAL] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_M] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_N] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD480I] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 480, 59940, {
+					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD576I] = { TV_ENC_MODE, {
+			.tv_enc_mode = { 720, 576, 50000, {
+					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+				} } } },
+
+
+	[TV_NORM_HD480P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
+						   720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
+					      0x10160004, 0x10060005, 0x1006000c, 0x10060020,
+					      0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
+					      0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x70,
+					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+				} } } },
+
+	[TV_NORM_HD576P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
+						   720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
+					      0x10060001, 0x10060009, 0x10060026, 0x10060027,
+					      0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
+					      0x10000fff, 0x10000fff, 0x10000fff, 0x69,
+					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+				} } } },
+
+	[TV_NORM_HD720P] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
+						   1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+				.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
+					      0x66b0021, 0x6004a, 0x1210626, 0x8170000,
+					      0x70004, 0x70016, 0x70017, 0x40f0018,
+					      0x702e8, 0x81702ed, 0xfff, 0xfff,
+					      0xfff, 0xfff, 0xfff, 0xfff,
+					      0xfff, 0xfff, 0xfff, 0x0,
+					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
+					      0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
+				} } } },
+
+	[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
+			.ctv_enc_mode = {
+				.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
+						   1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
+						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+						   | DRM_MODE_FLAG_INTERLACE) },
+				.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
+					      0x8940028, 0x60054, 0xe80870, 0xbf70000,
+					      0xbc70004, 0x70005, 0x70012, 0x70013,
+					      0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
+					      0x1c70237, 0x70238, 0x70244, 0x70245,
+					      0x40f0246, 0x70462, 0x1f70464, 0x0,
+					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
+					      0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
+					      0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
+				} } } }
+};
+
+/*
+ * The following is some guesswork on how the TV encoder flicker
+ * filter/rescaler works:
+ *
+ * It seems to use some sort of resampling filter, it is controlled
+ * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
+ * control the horizontal and vertical stage respectively, there is
+ * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
+ * but they seem to do nothing. A rough guess might be that they could
+ * be used to independently control the filtering of each interlaced
+ * field, but I don't know how they are enabled. The whole filtering
+ * process seems to be disabled with bits 26:27 of PTV_200, but we
+ * aren't doing that.
+ *
+ * The layout of both register sets is the same:
+ *
+ * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
+ * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
+ *
+ * Each coefficient is stored in bits [31],[15:9] in two's complement
+ * format. They seem to be some kind of weights used in a low-pass
+ * filter. Both A and B coefficients are applied to the 14 nearest
+ * samples on each side (Listed from nearest to furthermost.  They
+ * roughly cover 2 framebuffer pixels on each side).  They are
+ * probably multiplied with some more hardwired weights before being
+ * used: B-coefficients are applied the same on both sides,
+ * A-coefficients are inverted before being applied to the opposite
+ * side.
+ *
+ * After all the hassle, I got the following formula by empirical
+ * means...
+ */
+
+#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
+
+#define id1 (1LL << 8)
+#define id2 (1LL << 16)
+#define id3 (1LL << 24)
+#define id4 (1LL << 32)
+#define id5 (1LL << 48)
+
+static struct filter_params{
+	int64_t k1;
+	int64_t ki;
+	int64_t ki2;
+	int64_t ki3;
+	int64_t kr;
+	int64_t kir;
+	int64_t ki2r;
+	int64_t ki3r;
+	int64_t kf;
+	int64_t kif;
+	int64_t ki2f;
+	int64_t ki3f;
+	int64_t krf;
+	int64_t kirf;
+	int64_t ki2rf;
+	int64_t ki3rf;
+} fparams[2][4] = {
+	/* Horizontal filter parameters */
+	{
+		{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
+		 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
+		 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
+		 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
+		{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
+		 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
+		 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
+		 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
+		{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
+		 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
+		 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
+		 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
+		{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
+		 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
+		 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
+		 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
+	},
+
+	/* Vertical filter parameters */
+	{
+		{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
+		 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
+		 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
+		 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
+		{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
+		 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
+		 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
+		 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
+		{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
+		 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
+		 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
+		 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
+		{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
+		 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
+		 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
+		 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
+	}
+};
+
+static void tv_setup_filter(struct drm_encoder *encoder)
+{
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	struct drm_display_mode *mode = &encoder->crtc->mode;
+	uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
+				       &tv_enc->state.vfilter};
+	int i, j, k;
+	int32_t overscan = calc_overscan(tv_enc->overscan);
+	int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
+	uint64_t rs[] = {mode->hdisplay * id3,
+			 mode->vdisplay * id3};
+
+	do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
+	do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
+
+	for (k = 0; k < 2; k++) {
+		rs[k] = max((int64_t)rs[k], id2);
+
+		for (j = 0; j < 4; j++) {
+			struct filter_params *p = &fparams[k][j];
+
+			for (i = 0; i < 7; i++) {
+				int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
+					     p->ki3*i*i*i)
+					+ (p->kr + p->kir*i + p->ki2r*i*i +
+					   p->ki3r*i*i*i) * rs[k]
+					+ (p->kf + p->kif*i + p->ki2f*i*i +
+					   p->ki3f*i*i*i) * flicker
+					+ (p->krf + p->kirf*i + p->ki2rf*i*i +
+					   p->ki3rf*i*i*i) * flicker * rs[k];
+
+				(*filters[k])[j][i] = (c + id5/2) >> 39
+					& (0x1 << 31 | 0x7f << 9);
+			}
+		}
+	}
+}
+
+/* Hardware state saving/restoring */
+
+static void tv_save_filter(struct drm_device *dev, uint32_t base,
+			   uint32_t regs[4][7])
+{
+	int i, j;
+	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 7; j++)
+			regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
+	}
+}
+
+static void tv_load_filter(struct drm_device *dev, uint32_t base,
+			   uint32_t regs[4][7])
+{
+	int i, j;
+	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+	for (i = 0; i < 4; i++) {
+		for (j = 0; j < 7; j++)
+			nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
+	}
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
+{
+	int i;
+
+	for (i = 0; i < 0x40; i++)
+		state->tv_enc[i] = nv_read_tv_enc(dev, i);
+
+	tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
+	tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+	tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+	nv_save_ptv(dev, state, 200);
+	nv_save_ptv(dev, state, 204);
+	nv_save_ptv(dev, state, 208);
+	nv_save_ptv(dev, state, 20c);
+	nv_save_ptv(dev, state, 304);
+	nv_save_ptv(dev, state, 500);
+	nv_save_ptv(dev, state, 504);
+	nv_save_ptv(dev, state, 508);
+	nv_save_ptv(dev, state, 600);
+	nv_save_ptv(dev, state, 604);
+	nv_save_ptv(dev, state, 608);
+	nv_save_ptv(dev, state, 60c);
+	nv_save_ptv(dev, state, 610);
+	nv_save_ptv(dev, state, 614);
+}
+
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
+{
+	int i;
+
+	for (i = 0; i < 0x40; i++)
+		nv_write_tv_enc(dev, i, state->tv_enc[i]);
+
+	tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
+	tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+	tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+	nv_load_ptv(dev, state, 200);
+	nv_load_ptv(dev, state, 204);
+	nv_load_ptv(dev, state, 208);
+	nv_load_ptv(dev, state, 20c);
+	nv_load_ptv(dev, state, 304);
+	nv_load_ptv(dev, state, 500);
+	nv_load_ptv(dev, state, 504);
+	nv_load_ptv(dev, state, 508);
+	nv_load_ptv(dev, state, 600);
+	nv_load_ptv(dev, state, 604);
+	nv_load_ptv(dev, state, 608);
+	nv_load_ptv(dev, state, 60c);
+	nv_load_ptv(dev, state, 610);
+	nv_load_ptv(dev, state, 614);
+
+	/* This is required for some settings to kick in. */
+	nv_write_tv_enc(dev, 0x3e, 1);
+	nv_write_tv_enc(dev, 0x3e, 0);
+}
+
+/* Timings similar to the ones the blob sets */
+
+const struct drm_display_mode nv17_tv_modes[] = {
+	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
+		   320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
+		   320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
+		   400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
+		   640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
+		   720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
+		   720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
+		   800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
+		   1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	{}
+};
+
+void nv17_tv_update_properties(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_state *regs = &tv_enc->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int subconnector = tv_enc->select_subconnector ?
+						tv_enc->select_subconnector :
+						tv_enc->subconnector;
+
+	switch (subconnector) {
+	case DRM_MODE_SUBCONNECTOR_Composite:
+	{
+		regs->ptv_204 = 0x2;
+
+		/* The composite connector may be found on either pin. */
+		if (tv_enc->pin_mask & 0x4)
+			regs->ptv_204 |= 0x010000;
+		else if (tv_enc->pin_mask & 0x2)
+			regs->ptv_204 |= 0x100000;
+		else
+			regs->ptv_204 |= 0x110000;
+
+		regs->tv_enc[0x7] = 0x10;
+		break;
+	}
+	case DRM_MODE_SUBCONNECTOR_SVIDEO:
+		regs->ptv_204 = 0x11012;
+		regs->tv_enc[0x7] = 0x18;
+		break;
+
+	case DRM_MODE_SUBCONNECTOR_Component:
+		regs->ptv_204 = 0x111333;
+		regs->tv_enc[0x7] = 0x14;
+		break;
+
+	case DRM_MODE_SUBCONNECTOR_SCART:
+		regs->ptv_204 = 0x111012;
+		regs->tv_enc[0x7] = 0x18;
+		break;
+	}
+
+	regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
+					 255, tv_enc->saturation);
+	regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
+					 255, tv_enc->saturation);
+	regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
+
+	nv_load_ptv(dev, regs, 204);
+	nv_load_tv_enc(dev, regs, 7);
+	nv_load_tv_enc(dev, regs, 20);
+	nv_load_tv_enc(dev, regs, 22);
+	nv_load_tv_enc(dev, regs, 25);
+}
+
+void nv17_tv_update_rescaler(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_state *regs = &tv_enc->state;
+
+	regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
+
+	tv_setup_filter(encoder);
+
+	nv_load_ptv(dev, regs, 208);
+	tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
+	tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
+	tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
+}
+
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
+	struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
+	struct drm_display_mode *output_mode =
+		&get_tv_norm(encoder)->ctv_enc_mode.mode;
+	int overscan, hmargin, vmargin, hratio, vratio;
+
+	/* The rescaler doesn't do the right thing for interlaced modes. */
+	if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
+		overscan = 100;
+	else
+		overscan = tv_enc->overscan;
+
+	hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
+	vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
+
+	hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
+			      hmargin, overscan);
+	vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
+			      vmargin, overscan);
+
+	hratio = crtc_mode->hdisplay * 0x800 /
+		(output_mode->hdisplay - 2*hmargin);
+	vratio = crtc_mode->vdisplay * 0x800 /
+		(output_mode->vdisplay - 2*vmargin) & ~3;
+
+	regs->fp_horiz_regs[FP_VALID_START] = hmargin;
+	regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
+	regs->fp_vert_regs[FP_VALID_START] = vmargin;
+	regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
+
+	regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+		XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
+		NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+		XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
+		      regs->fp_horiz_regs[FP_VALID_START]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
+		      regs->fp_horiz_regs[FP_VALID_END]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
+		      regs->fp_vert_regs[FP_VALID_START]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
+		      regs->fp_vert_regs[FP_VALID_END]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
new file mode 100644
index 0000000..bf13db4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include <drm/drm_crtc_helper.h>
+
+#include <drm/i2c/ch7006.h>
+
+#include <subdev/i2c.h>
+
+static struct i2c_board_info nv04_tv_encoder_info[] = {
+	{
+		I2C_BOARD_INFO("ch7006", 0x75),
+		.platform_data = &(struct ch7006_encoder_params) {
+			CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+			0, 0, 0,
+			CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+			CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+		}
+	},
+	{ }
+};
+
+int nv04_tv_identify(struct drm_device *dev, int i2c_index)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+
+	return i2c->identify(i2c, i2c_index, "TV encoder",
+			     nv04_tv_encoder_info, NULL);
+}
+
+
+#define PLLSEL_TV_CRTC1_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
+#define PLLSEL_TV_CRTC2_MASK				\
+	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		\
+	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
+	uint8_t crtc1A;
+
+	NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
+		 mode, nv_encoder->dcb->index);
+
+	state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		int head = nouveau_crtc(encoder->crtc)->index;
+		crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
+
+		state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
+					PLLSEL_TV_CRTC1_MASK;
+
+		/* Inhibit hsync */
+		crtc1A |= 0x80;
+
+		NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+	}
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+
+	get_slave_funcs(encoder)->dpms(encoder, mode);
+}
+
+static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
+{
+	struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head];
+
+	state->tv_setup = 0;
+
+	if (bind)
+		state->CRTC[NV_CIO_CRE_49] |= 0x10;
+	else
+		state->CRTC[NV_CIO_CRE_49] &= ~0x10;
+
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
+		       state->CRTC[NV_CIO_CRE_LCD__INDEX]);
+	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
+		       state->CRTC[NV_CIO_CRE_49]);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
+		      state->tv_setup);
+}
+
+static void nv04_tv_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+
+	if (nv_two_heads(dev))
+		nv04_tv_bind(dev, head ^ 1, false);
+
+	nv04_tv_bind(dev, head, true);
+}
+
+static void nv04_tv_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+
+	regp->tv_htotal = adjusted_mode->htotal;
+	regp->tv_vtotal = adjusted_mode->vtotal;
+
+	/* These delay the TV signals with respect to the VGA port,
+	 * they might be useful if we ever allow a CRTC to drive
+	 * multiple outputs.
+	 */
+	regp->tv_hskew = 1;
+	regp->tv_hsync_delay = 1;
+	regp->tv_hsync_delay2 = 64;
+	regp->tv_vskew = 1;
+	regp->tv_vsync_delay = 1;
+
+	get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
+}
+
+static void nv04_tv_commit(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+		 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv04_tv_destroy(struct drm_encoder *encoder)
+{
+	get_slave_funcs(encoder)->destroy(encoder);
+	drm_encoder_cleanup(encoder);
+
+	kfree(encoder->helper_private);
+	kfree(nouveau_encoder(encoder));
+}
+
+static const struct drm_encoder_funcs nv04_tv_funcs = {
+	.destroy = nv04_tv_destroy,
+};
+
+static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
+	.dpms = nv04_tv_dpms,
+	.save = drm_i2c_encoder_save,
+	.restore = drm_i2c_encoder_restore,
+	.mode_fixup = drm_i2c_encoder_mode_fixup,
+	.prepare = nv04_tv_prepare,
+	.commit = nv04_tv_commit,
+	.mode_set = nv04_tv_mode_set,
+	.detect = drm_i2c_encoder_detect,
+};
+
+int
+nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
+{
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
+	int type, ret;
+
+	/* Ensure that we can talk to this encoder */
+	type = nv04_tv_identify(dev, entry->i2c_index);
+	if (type < 0)
+		return type;
+
+	/* Allocate the necessary memory */
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+
+	/* Initialize the common members */
+	encoder = to_drm_encoder(nv_encoder);
+
+	drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+	drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+	nv_encoder->dcb = entry;
+	nv_encoder->or = ffs(entry->or) - 1;
+
+	/* Run the slave-specific initialization */
+	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+				   &port->adapter, &nv04_tv_encoder_info[type]);
+	if (ret < 0)
+		goto fail_cleanup;
+
+	/* Attach it to the specified connector. */
+	get_slave_funcs(encoder)->create_resources(encoder, connector);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	return 0;
+
+fail_cleanup:
+	drm_encoder_cleanup(encoder);
+	kfree(nv_encoder);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
new file mode 100644
index 0000000..acef48f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -0,0 +1,843 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "hw.h"
+#include "tvnv17.h"
+
+#include <core/device.h>
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+		 "\t\tDefault: PAL\n"
+		 "\t\t*NOTE* Ignored for cards with external TV encoders.");
+static char *nouveau_tv_norm;
+module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+
+static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+	uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+		fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+	uint32_t sample = 0;
+	int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+	testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
+	if (drm->vbios.tvdactestval)
+		testval = drm->vbios.tvdactestval;
+
+	dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	head = (dacclk & 0x100) >> 8;
+
+	/* Save the previous state. */
+	gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
+	gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
+	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+	fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+	test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+	ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
+	ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
+	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+
+	/* Prepare the DAC for load detection.  */
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+		      NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+		      NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
+		      NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+		      NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
+		      NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+		      (dacclk & ~0xff) | 0x22);
+	msleep(1);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+		      (dacclk & ~0xff) | 0x21);
+
+	NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
+	NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
+
+	/* Sample pin 0x4 (usually S-video luma). */
+	NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
+	msleep(20);
+	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+		& 0x4 << 28;
+
+	/* Sample the remaining pins. */
+	NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
+	msleep(20);
+	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+		& 0xa << 28;
+
+	/* Restore the previous state. */
+	NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
+	NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
+	NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
+
+	return sample;
+}
+
+static bool
+get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *device = drm->device;
+
+	/* Zotac FX5200 */
+	if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
+	    nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
+		*pin_mask = 0xc;
+		return false;
+	}
+
+	/* MSI nForce2 IGP */
+	if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
+		*pin_mask = 0xc;
+		return false;
+	}
+
+	return true;
+}
+
+static enum drm_connector_status
+nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_mode_config *conf = &dev->mode_config;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct dcb_output *dcb = tv_enc->base.dcb;
+	bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
+
+	if (nv04_dac_in_use(encoder))
+		return connector_status_disconnected;
+
+	if (reliable) {
+		if (nv_device(drm->device)->chipset == 0x42 ||
+		    nv_device(drm->device)->chipset == 0x43)
+			tv_enc->pin_mask =
+				nv42_tv_sample_load(encoder) >> 28 & 0xe;
+		else
+			tv_enc->pin_mask =
+				nv17_dac_sample_load(encoder) >> 28 & 0xe;
+	}
+
+	switch (tv_enc->pin_mask) {
+	case 0x2:
+	case 0x4:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+		break;
+	case 0xc:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+		break;
+	case 0xe:
+		if (dcb->tvconf.has_component_output)
+			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
+		else
+			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+		break;
+	default:
+		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+		break;
+	}
+
+	drm_object_property_set_value(&connector->base,
+					 conf->tv_subconnector_property,
+					 tv_enc->subconnector);
+
+	if (!reliable) {
+		return connector_status_unknown;
+	} else if (tv_enc->subconnector) {
+		NV_INFO(drm, "Load detected on output %c\n",
+			'@' + ffs(dcb->or));
+		return connector_status_connected;
+	} else {
+		return connector_status_disconnected;
+	}
+}
+
+static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
+				struct drm_connector *connector)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	const struct drm_display_mode *tv_mode;
+	int n = 0;
+
+	for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(encoder->dev, tv_mode);
+
+		mode->clock = tv_norm->tv_enc_mode.vrefresh *
+			mode->htotal / 1000 *
+			mode->vtotal / 1000;
+
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			mode->clock *= 2;
+
+		if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+		    mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_probed_add(connector, mode);
+		n++;
+	}
+
+	return n;
+}
+
+static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
+				struct drm_connector *connector)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
+	struct drm_display_mode *mode;
+	const struct {
+		int hdisplay;
+		int vdisplay;
+	} modes[] = {
+		{ 640, 400 },
+		{ 640, 480 },
+		{ 720, 480 },
+		{ 720, 576 },
+		{ 800, 600 },
+		{ 1024, 768 },
+		{ 1280, 720 },
+		{ 1280, 1024 },
+		{ 1920, 1080 }
+	};
+	int i, n = 0;
+
+	for (i = 0; i < ARRAY_SIZE(modes); i++) {
+		if (modes[i].hdisplay > output_mode->hdisplay ||
+		    modes[i].vdisplay > output_mode->vdisplay)
+			continue;
+
+		if (modes[i].hdisplay == output_mode->hdisplay &&
+		    modes[i].vdisplay == output_mode->vdisplay) {
+			mode = drm_mode_duplicate(encoder->dev, output_mode);
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		} else {
+			mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
+					    modes[i].vdisplay, 60, false,
+					    (output_mode->flags &
+					     DRM_MODE_FLAG_INTERLACE), false);
+		}
+
+		/* CVT modes are sometimes unsuitable... */
+		if (output_mode->hdisplay <= 720
+		    || output_mode->hdisplay >= 1920) {
+			mode->htotal = output_mode->htotal;
+			mode->hsync_start = (mode->hdisplay + (mode->htotal
+					     - mode->hdisplay) * 9 / 10) & ~7;
+			mode->hsync_end = mode->hsync_start + 8;
+		}
+
+		if (output_mode->vdisplay >= 1024) {
+			mode->vtotal = output_mode->vtotal;
+			mode->vsync_start = output_mode->vsync_start;
+			mode->vsync_end = output_mode->vsync_end;
+		}
+
+		mode->type |= DRM_MODE_TYPE_DRIVER;
+		drm_mode_probed_add(connector, mode);
+		n++;
+	}
+
+	return n;
+}
+
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (tv_norm->kind == CTV_ENC_MODE)
+		return nv17_tv_get_hd_modes(encoder, connector);
+	else
+		return nv17_tv_get_ld_modes(encoder, connector);
+}
+
+static int nv17_tv_mode_valid(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		struct drm_display_mode *output_mode =
+						&tv_norm->ctv_enc_mode.mode;
+
+		if (mode->clock > 400000)
+			return MODE_CLOCK_HIGH;
+
+		if (mode->hdisplay > output_mode->hdisplay ||
+		    mode->vdisplay > output_mode->vdisplay)
+			return MODE_BAD;
+
+		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
+		    (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
+			return MODE_NO_INTERLACE;
+
+		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+			return MODE_NO_DBLESCAN;
+
+	} else {
+		const int vsync_tolerance = 600;
+
+		if (mode->clock > 70000)
+			return MODE_CLOCK_HIGH;
+
+		if (abs(drm_mode_vrefresh(mode) * 1000 -
+			tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
+			return MODE_VSYNC;
+
+		/* The encoder takes care of the actual interlacing */
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			return MODE_NO_INTERLACE;
+	}
+
+	return MODE_OK;
+}
+
+static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (nv04_dac_in_use(encoder))
+		return false;
+
+	if (tv_norm->kind == CTV_ENC_MODE)
+		adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
+	else
+		adjusted_mode->clock = 90000;
+
+	return true;
+}
+
+static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+	if (nouveau_encoder(encoder)->last_dpms == mode)
+		return;
+	nouveau_encoder(encoder)->last_dpms = mode;
+
+	NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
+		 mode, nouveau_encoder(encoder)->dcb->index);
+
+	regs->ptv_200 &= ~1;
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		nv04_dfp_update_fp_control(encoder, mode);
+
+	} else {
+		nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
+
+		if (mode == DRM_MODE_DPMS_ON)
+			regs->ptv_200 |= 1;
+	}
+
+	nv_load_ptv(dev, regs, 200);
+
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
+	gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
+
+	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv17_tv_prepare(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int head = nouveau_crtc(encoder->crtc)->index;
+	uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
+							NV_CIO_CRE_LCD__INDEX];
+	uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
+					nv04_dac_output_offset(encoder);
+	uint32_t dacclk;
+
+	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	nv04_dfp_disable(dev, head);
+
+	/* Unbind any FP encoders from this head if we need the FP
+	 * stuff enabled. */
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		struct drm_encoder *enc;
+
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			struct dcb_output *dcb = nouveau_encoder(enc)->dcb;
+
+			if ((dcb->type == DCB_OUTPUT_TMDS ||
+			     dcb->type == DCB_OUTPUT_LVDS) &&
+			     !enc->crtc &&
+			     nv04_dfp_get_bound_head(dev, dcb) == head) {
+				nv04_dfp_bind_head(dev, dcb, head ^ 1,
+						drm->vbios.fp.dual_link);
+			}
+		}
+
+	}
+
+	if (tv_norm->kind == CTV_ENC_MODE)
+		*cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
+
+	/* Set the DACCLK register */
+	dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
+
+	if (nv_device(drm->device)->card_type == NV_40)
+		dacclk |= 0x1a << 16;
+
+	if (tv_norm->kind == CTV_ENC_MODE) {
+		dacclk |=  0x20;
+
+		if (head)
+			dacclk |= 0x100;
+		else
+			dacclk &= ~0x100;
+
+	} else {
+		dacclk |= 0x10;
+
+	}
+
+	NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
+}
+
+static void nv17_tv_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *drm_mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int head = nouveau_crtc(encoder->crtc)->index;
+	struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
+	struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	int i;
+
+	regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
+	regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
+	regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
+	regs->tv_setup = 1;
+	regs->ramdac_8c0 = 0x0;
+
+	if (tv_norm->kind == TV_ENC_MODE) {
+		tv_regs->ptv_200 = 0x13111100;
+		if (head)
+			tv_regs->ptv_200 |= 0x10;
+
+		tv_regs->ptv_20c = 0x808010;
+		tv_regs->ptv_304 = 0x2d00000;
+		tv_regs->ptv_600 = 0x0;
+		tv_regs->ptv_60c = 0x0;
+		tv_regs->ptv_610 = 0x1e00000;
+
+		if (tv_norm->tv_enc_mode.vdisplay == 576) {
+			tv_regs->ptv_508 = 0x1200000;
+			tv_regs->ptv_614 = 0x33;
+
+		} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+			tv_regs->ptv_508 = 0xf00000;
+			tv_regs->ptv_614 = 0x13;
+		}
+
+		if (nv_device(drm->device)->card_type >= NV_30) {
+			tv_regs->ptv_500 = 0xe8e0;
+			tv_regs->ptv_504 = 0x1710;
+			tv_regs->ptv_604 = 0x0;
+			tv_regs->ptv_608 = 0x0;
+		} else {
+			if (tv_norm->tv_enc_mode.vdisplay == 576) {
+				tv_regs->ptv_604 = 0x20;
+				tv_regs->ptv_608 = 0x10;
+				tv_regs->ptv_500 = 0x19710;
+				tv_regs->ptv_504 = 0x68f0;
+
+			} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+				tv_regs->ptv_604 = 0x10;
+				tv_regs->ptv_608 = 0x20;
+				tv_regs->ptv_500 = 0x4b90;
+				tv_regs->ptv_504 = 0x1b480;
+			}
+		}
+
+		for (i = 0; i < 0x40; i++)
+			tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
+
+	} else {
+		struct drm_display_mode *output_mode =
+						&tv_norm->ctv_enc_mode.mode;
+
+		/* The registers in PRAMDAC+0xc00 control some timings and CSC
+		 * parameters for the CTV encoder (It's only used for "HD" TV
+		 * modes, I don't think I have enough working to guess what
+		 * they exactly mean...), it's probably connected at the
+		 * output of the FP encoder, but it also needs the analog
+		 * encoder in its OR enabled and routed to the head it's
+		 * using. It's enabled with the DACCLK register, bits [5:4].
+		 */
+		for (i = 0; i < 38; i++)
+			regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
+
+		regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+		regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+		regs->fp_horiz_regs[FP_SYNC_START] =
+						output_mode->hsync_start - 1;
+		regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+		regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
+			max((output_mode->hdisplay-600)/40 - 1, 1);
+
+		regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+		regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+		regs->fp_vert_regs[FP_SYNC_START] =
+						output_mode->vsync_start - 1;
+		regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+		regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
+
+		regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+			NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+			NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+
+		if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+		if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+
+		regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+			NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+			NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+			NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+			NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+			NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+			NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+		regs->fp_debug_2 = 0;
+
+		regs->fp_margin_color = 0x801080;
+
+	}
+}
+
+static void nv17_tv_commit(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+	if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
+		nv17_tv_update_rescaler(encoder);
+		nv17_tv_update_properties(encoder);
+	} else {
+		nv17_ctv_update_rescaler(encoder);
+	}
+
+	nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
+
+	/* This could use refinement for flatpanels, but it should work */
+	if (nv_device(drm->device)->chipset < 0x44)
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+					nv04_dac_output_offset(encoder),
+					0xf0000000);
+	else
+		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+					nv04_dac_output_offset(encoder),
+					0x00100000);
+
+	helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+	NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
+		drm_get_connector_name(
+			&nouveau_encoder_connector_get(nv_encoder)->base),
+		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv17_tv_save(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+	nouveau_encoder(encoder)->restore.output =
+					NVReadRAMDAC(dev, 0,
+					NV_PRAMDAC_DACCLK +
+					nv04_dac_output_offset(encoder));
+
+	nv17_tv_state_save(dev, &tv_enc->saved_state);
+
+	tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
+}
+
+static void nv17_tv_restore(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+				nv04_dac_output_offset(encoder),
+				nouveau_encoder(encoder)->restore.output);
+
+	nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+	nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
+}
+
+static int nv17_tv_create_resources(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_mode_config *conf = &dev->mode_config;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
+	int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
+							NUM_LD_TV_NORMS;
+	int i;
+
+	if (nouveau_tv_norm) {
+		for (i = 0; i < num_tv_norms; i++) {
+			if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
+				tv_enc->tv_norm = i;
+				break;
+			}
+		}
+
+		if (i == num_tv_norms)
+			NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
+				nouveau_tv_norm);
+	}
+
+	drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+
+	drm_object_attach_property(&connector->base,
+					conf->tv_select_subconnector_property,
+					tv_enc->select_subconnector);
+	drm_object_attach_property(&connector->base,
+					conf->tv_subconnector_property,
+					tv_enc->subconnector);
+	drm_object_attach_property(&connector->base,
+					conf->tv_mode_property,
+					tv_enc->tv_norm);
+	drm_object_attach_property(&connector->base,
+					conf->tv_flicker_reduction_property,
+					tv_enc->flicker);
+	drm_object_attach_property(&connector->base,
+					conf->tv_saturation_property,
+					tv_enc->saturation);
+	drm_object_attach_property(&connector->base,
+					conf->tv_hue_property,
+					tv_enc->hue);
+	drm_object_attach_property(&connector->base,
+					conf->tv_overscan_property,
+					tv_enc->overscan);
+
+	return 0;
+}
+
+static int nv17_tv_set_property(struct drm_encoder *encoder,
+				struct drm_connector *connector,
+				struct drm_property *property,
+				uint64_t val)
+{
+	struct drm_mode_config *conf = &encoder->dev->mode_config;
+	struct drm_crtc *crtc = encoder->crtc;
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+	bool modes_changed = false;
+
+	if (property == conf->tv_overscan_property) {
+		tv_enc->overscan = val;
+		if (encoder->crtc) {
+			if (tv_norm->kind == CTV_ENC_MODE)
+				nv17_ctv_update_rescaler(encoder);
+			else
+				nv17_tv_update_rescaler(encoder);
+		}
+
+	} else if (property == conf->tv_saturation_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->saturation = val;
+		nv17_tv_update_properties(encoder);
+
+	} else if (property == conf->tv_hue_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->hue = val;
+		nv17_tv_update_properties(encoder);
+
+	} else if (property == conf->tv_flicker_reduction_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->flicker = val;
+		if (encoder->crtc)
+			nv17_tv_update_rescaler(encoder);
+
+	} else if (property == conf->tv_mode_property) {
+		if (connector->dpms != DRM_MODE_DPMS_OFF)
+			return -EINVAL;
+
+		tv_enc->tv_norm = val;
+
+		modes_changed = true;
+
+	} else if (property == conf->tv_select_subconnector_property) {
+		if (tv_norm->kind != TV_ENC_MODE)
+			return -EINVAL;
+
+		tv_enc->select_subconnector = val;
+		nv17_tv_update_properties(encoder);
+
+	} else {
+		return -EINVAL;
+	}
+
+	if (modes_changed) {
+		drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+		/* Disable the crtc to ensure a full modeset is
+		 * performed whenever it's turned on again. */
+		if (crtc) {
+			struct drm_mode_set modeset = {
+				.crtc = crtc,
+			};
+
+			drm_mode_set_config_internal(&modeset);
+		}
+	}
+
+	return 0;
+}
+
+static void nv17_tv_destroy(struct drm_encoder *encoder)
+{
+	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+	drm_encoder_cleanup(encoder);
+	kfree(tv_enc);
+}
+
+static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+	.dpms = nv17_tv_dpms,
+	.save = nv17_tv_save,
+	.restore = nv17_tv_restore,
+	.mode_fixup = nv17_tv_mode_fixup,
+	.prepare = nv17_tv_prepare,
+	.commit = nv17_tv_commit,
+	.mode_set = nv17_tv_mode_set,
+	.detect = nv17_tv_detect,
+};
+
+static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+	.get_modes = nv17_tv_get_modes,
+	.mode_valid = nv17_tv_mode_valid,
+	.create_resources = nv17_tv_create_resources,
+	.set_property = nv17_tv_set_property,
+};
+
+static struct drm_encoder_funcs nv17_tv_funcs = {
+	.destroy = nv17_tv_destroy,
+};
+
+int
+nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_encoder *encoder;
+	struct nv17_tv_encoder *tv_enc = NULL;
+
+	tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
+	if (!tv_enc)
+		return -ENOMEM;
+
+	tv_enc->overscan = 50;
+	tv_enc->flicker = 50;
+	tv_enc->saturation = 50;
+	tv_enc->hue = 0;
+	tv_enc->tv_norm = TV_NORM_PAL;
+	tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+	tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+	tv_enc->pin_mask = 0;
+
+	encoder = to_drm_encoder(&tv_enc->base);
+
+	tv_enc->base.dcb = entry;
+	tv_enc->base.or = ffs(entry->or) - 1;
+
+	drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+	drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
+	to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+
+	encoder->possible_crtcs = entry->heads;
+	encoder->possible_clones = 0;
+
+	nv17_tv_create_resources(encoder, connector);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
new file mode 100644
index 0000000..7b33154
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV17_TV_H__
+#define __NV17_TV_H__
+
+struct nv17_tv_state {
+	uint8_t tv_enc[0x40];
+
+	uint32_t hfilter[4][7];
+	uint32_t hfilter2[4][7];
+	uint32_t vfilter[4][7];
+
+	uint32_t ptv_200;
+	uint32_t ptv_204;
+	uint32_t ptv_208;
+	uint32_t ptv_20c;
+	uint32_t ptv_304;
+	uint32_t ptv_500;
+	uint32_t ptv_504;
+	uint32_t ptv_508;
+	uint32_t ptv_600;
+	uint32_t ptv_604;
+	uint32_t ptv_608;
+	uint32_t ptv_60c;
+	uint32_t ptv_610;
+	uint32_t ptv_614;
+};
+
+enum nv17_tv_norm{
+	TV_NORM_PAL,
+	TV_NORM_PAL_M,
+	TV_NORM_PAL_N,
+	TV_NORM_PAL_NC,
+	TV_NORM_NTSC_M,
+	TV_NORM_NTSC_J,
+	NUM_LD_TV_NORMS,
+	TV_NORM_HD480I = NUM_LD_TV_NORMS,
+	TV_NORM_HD480P,
+	TV_NORM_HD576I,
+	TV_NORM_HD576P,
+	TV_NORM_HD720P,
+	TV_NORM_HD1080I,
+	NUM_TV_NORMS
+};
+
+struct nv17_tv_encoder {
+	struct nouveau_encoder base;
+
+	struct nv17_tv_state state;
+	struct nv17_tv_state saved_state;
+
+	int overscan;
+	int flicker;
+	int saturation;
+	int hue;
+	enum nv17_tv_norm tv_norm;
+	int subconnector;
+	int select_subconnector;
+	uint32_t pin_mask;
+};
+#define to_tv_enc(x) container_of(nouveau_encoder(x),		\
+				  struct nv17_tv_encoder, base)
+
+extern char *nv17_tv_norm_names[NUM_TV_NORMS];
+
+extern struct nv17_tv_norm_params {
+	enum {
+		TV_ENC_MODE,
+		CTV_ENC_MODE,
+	} kind;
+
+	union {
+		struct {
+			int hdisplay;
+			int vdisplay;
+			int vrefresh; /* mHz */
+
+			uint8_t tv_enc[0x40];
+		} tv_enc_mode;
+
+		struct {
+			struct drm_display_mode mode;
+
+			uint32_t ctv_regs[38];
+		} ctv_enc_mode;
+	};
+
+} nv17_tv_norms[NUM_TV_NORMS];
+#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
+
+extern const struct drm_display_mode nv17_tv_modes[];
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_update_properties(struct drm_encoder *encoder);
+void nv17_tv_update_rescaler(struct drm_encoder *encoder);
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
+
+/* TV hardware access functions */
+
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
+				uint32_t val)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	nv_wr32(device, reg, val);
+}
+
+static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	return nv_rd32(device, reg);
+}
+
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
+				   uint8_t val)
+{
+	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+	nv_write_ptv(dev, NV_PTV_TV_DATA, val);
+}
+
+static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
+{
+	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+	return nv_read_ptv(dev, NV_PTV_TV_DATA);
+}
+
+#define nv_load_ptv(dev, state, reg) \
+	nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) \
+	state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) \
+	nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.c
new file mode 100644
index 0000000..1c4c6c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+#include <core/mm.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+#include <engine/graph.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_chan.h"
+#include "nouveau_abi16.h"
+
+struct nouveau_abi16 *
+nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	mutex_lock(&cli->mutex);
+	if (!cli->abi16) {
+		struct nouveau_abi16 *abi16;
+		cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
+		if (cli->abi16) {
+			INIT_LIST_HEAD(&abi16->channels);
+			abi16->client = nv_object(cli);
+
+			/* allocate device object targeting client's default
+			 * device (ie. the one that belongs to the fd it
+			 * opened)
+			 */
+			if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
+					       NVDRM_DEVICE, 0x0080,
+					       &(struct nv_device_class) {
+						.device = ~0ULL,
+					       },
+					       sizeof(struct nv_device_class),
+					       &abi16->device) == 0)
+				return cli->abi16;
+
+			kfree(cli->abi16);
+			cli->abi16 = NULL;
+		}
+
+		mutex_unlock(&cli->mutex);
+	}
+	return cli->abi16;
+}
+
+int
+nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
+{
+	struct nouveau_cli *cli = (void *)abi16->client;
+	mutex_unlock(&cli->mutex);
+	return ret;
+}
+
+u16
+nouveau_abi16_swclass(struct nouveau_drm *drm)
+{
+	switch (nv_device(drm->device)->card_type) {
+	case NV_04:
+		return 0x006e;
+	case NV_10:
+	case NV_20:
+	case NV_30:
+	case NV_40:
+		return 0x016e;
+	case NV_50:
+		return 0x506e;
+	case NV_C0:
+	case NV_D0:
+	case NV_E0:
+		return 0x906e;
+	}
+
+	return 0x0000;
+}
+
+static void
+nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
+			struct nouveau_abi16_ntfy *ntfy)
+{
+	nouveau_mm_free(&chan->heap, &ntfy->node);
+	list_del(&ntfy->head);
+	kfree(ntfy);
+}
+
+static void
+nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
+			struct nouveau_abi16_chan *chan)
+{
+	struct nouveau_abi16_ntfy *ntfy, *temp;
+
+	/* wait for all activity to stop before releasing notify object, which
+	 * may be still in use */
+	if (chan->chan && chan->ntfy)
+		nouveau_channel_idle(chan->chan);
+
+	/* cleanup notifier state */
+	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
+		nouveau_abi16_ntfy_fini(chan, ntfy);
+	}
+
+	if (chan->ntfy) {
+		nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+		drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+	}
+
+	if (chan->heap.block_size)
+		nouveau_mm_fini(&chan->heap);
+
+	/* destroy channel object, all children will be killed too */
+	if (chan->chan) {
+		abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
+		nouveau_channel_del(&chan->chan);
+	}
+
+	list_del(&chan->head);
+	kfree(chan);
+}
+
+void
+nouveau_abi16_fini(struct nouveau_abi16 *abi16)
+{
+	struct nouveau_cli *cli = (void *)abi16->client;
+	struct nouveau_abi16_chan *chan, *temp;
+
+	/* cleanup channels */
+	list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+		nouveau_abi16_chan_fini(abi16, chan);
+	}
+
+	/* destroy the device object */
+	nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
+
+	kfree(cli->abi16);
+	cli->abi16 = NULL;
+}
+
+int
+nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
+	struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
+	struct drm_nouveau_getparam *getparam = data;
+
+	switch (getparam->param) {
+	case NOUVEAU_GETPARAM_CHIPSET_ID:
+		getparam->value = device->chipset;
+		break;
+	case NOUVEAU_GETPARAM_PCI_VENDOR:
+		getparam->value = dev->pci_vendor;
+		break;
+	case NOUVEAU_GETPARAM_PCI_DEVICE:
+		getparam->value = dev->pci_device;
+		break;
+	case NOUVEAU_GETPARAM_BUS_TYPE:
+		if (drm_pci_device_is_agp(dev))
+			getparam->value = 0;
+		else
+		if (!pci_is_pcie(dev->pdev))
+			getparam->value = 1;
+		else
+			getparam->value = 2;
+		break;
+	case NOUVEAU_GETPARAM_FB_SIZE:
+		getparam->value = drm->gem.vram_available;
+		break;
+	case NOUVEAU_GETPARAM_AGP_SIZE:
+		getparam->value = drm->gem.gart_available;
+		break;
+	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+		getparam->value = 0; /* deprecated */
+		break;
+	case NOUVEAU_GETPARAM_PTIMER_TIME:
+		getparam->value = ptimer->read(ptimer);
+		break;
+	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
+		getparam->value = 1;
+		break;
+	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+		getparam->value = 1;
+		break;
+	case NOUVEAU_GETPARAM_GRAPH_UNITS:
+		getparam->value = graph->units ? graph->units(graph) : 0;
+		break;
+	default:
+		nv_debug(device, "unknown parameter %lld\n", getparam->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int
+nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
+{
+	return -EINVAL;
+}
+
+int
+nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
+{
+	struct drm_nouveau_channel_alloc *init = data;
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan;
+	struct nouveau_client *client;
+	struct nouveau_device *device;
+	struct nouveau_instmem *imem;
+	struct nouveau_fb *pfb;
+	int ret;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	if (!drm->channel)
+		return nouveau_abi16_put(abi16, -ENODEV);
+
+	client = nv_client(abi16->client);
+	device = nv_device(abi16->device);
+	imem   = nouveau_instmem(device);
+	pfb    = nouveau_fb(device);
+
+	/* hack to allow channel engine type specification on kepler */
+	if (device->card_type >= NV_E0) {
+		if (init->fb_ctxdma_handle != ~0)
+			init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+		else
+			init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+		/* allow flips to be executed if this is a graphics channel */
+		init->tt_ctxdma_handle = 0;
+		if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+			init->tt_ctxdma_handle = 1;
+	}
+
+	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	/* allocate "abi16 channel" data and make up a handle for it */
+	init->channel = ffsll(~abi16->handles);
+	if (!init->channel--)
+		return nouveau_abi16_put(abi16, -ENOSPC);
+
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	INIT_LIST_HEAD(&chan->notifiers);
+	list_add(&chan->head, &abi16->channels);
+	abi16->handles |= (1 << init->channel);
+
+	/* create channel object and initialise dma and fence management */
+	ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
+				  init->channel, init->fb_ctxdma_handle,
+				  init->tt_ctxdma_handle, &chan->chan);
+	if (ret)
+		goto done;
+
+	if (device->card_type >= NV_50)
+		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+					NOUVEAU_GEM_DOMAIN_GART;
+	else
+	if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
+		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+	else
+		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+
+	if (device->card_type < NV_C0) {
+		init->subchan[0].handle = 0x00000000;
+		init->subchan[0].grclass = 0x0000;
+		init->subchan[1].handle = NvSw;
+		init->subchan[1].grclass = 0x506e;
+		init->nr_subchan = 2;
+	}
+
+	/* Named memory object area */
+	ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
+			      0, 0, &chan->ntfy);
+	if (ret == 0)
+		ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+	if (ret)
+		goto done;
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
+					&chan->ntfy_vma);
+		if (ret)
+			goto done;
+	}
+
+	ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+				    &init->notifier_handle);
+	if (ret)
+		goto done;
+
+	ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+done:
+	if (ret)
+		nouveau_abi16_chan_fini(abi16, chan);
+	return nouveau_abi16_put(abi16, ret);
+}
+
+
+int
+nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
+{
+	struct drm_nouveau_channel_free *req = data;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan;
+	int ret = -ENOENT;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	list_for_each_entry(chan, &abi16->channels, head) {
+		if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
+			nouveau_abi16_chan_fini(abi16, chan);
+			return nouveau_abi16_put(abi16, 0);
+		}
+	}
+
+	return nouveau_abi16_put(abi16, ret);
+}
+
+int
+nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
+{
+	struct drm_nouveau_grobj_alloc *init = data;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *object;
+	int ret;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	if (init->handle == ~0)
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	/* compatibility with userspace that assumes 506e for all chipsets */
+	if (init->class == 0x506e) {
+		init->class = nouveau_abi16_swclass(drm);
+		if (init->class == 0x906e)
+			return nouveau_abi16_put(abi16, 0);
+	}
+
+	ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
+				  init->handle, init->class, NULL, 0, &object);
+	return nouveau_abi16_put(abi16, ret);
+}
+
+int
+nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
+{
+	struct drm_nouveau_notifierobj_alloc *info = data;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan = NULL, *temp;
+	struct nouveau_abi16_ntfy *ntfy;
+	struct nouveau_object *object;
+	struct nv_dma_class args = {};
+	int ret;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	/* completely unnecessary for these chipsets... */
+	if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
+		return nouveau_abi16_put(abi16, -EINVAL);
+
+	list_for_each_entry(temp, &abi16->channels, head) {
+		if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
+			chan = temp;
+			break;
+		}
+	}
+
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+	if (!ntfy)
+		return nouveau_abi16_put(abi16, -ENOMEM);
+
+	list_add(&ntfy->head, &chan->notifiers);
+	ntfy->handle = info->handle;
+
+	ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
+			      &ntfy->node);
+	if (ret)
+		goto done;
+
+	args.start = ntfy->node->offset;
+	args.limit = ntfy->node->offset + ntfy->node->length - 1;
+	if (device->card_type >= NV_50) {
+		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+		args.start += chan->ntfy_vma.offset;
+		args.limit += chan->ntfy_vma.offset;
+	} else
+	if (drm->agp.stat == ENABLED) {
+		args.flags  = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+		args.start += drm->agp.base + chan->ntfy->bo.offset;
+		args.limit += drm->agp.base + chan->ntfy->bo.offset;
+	} else {
+		args.flags  = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+		args.start += chan->ntfy->bo.offset;
+		args.limit += chan->ntfy->bo.offset;
+	}
+
+	ret = nouveau_object_new(abi16->client, chan->chan->handle,
+				 ntfy->handle, 0x003d, &args,
+				 sizeof(args), &object);
+	if (ret)
+		goto done;
+
+done:
+	if (ret)
+		nouveau_abi16_ntfy_fini(chan, ntfy);
+	return nouveau_abi16_put(abi16, ret);
+}
+
+int
+nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
+{
+	struct drm_nouveau_gpuobj_free *fini = data;
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_abi16_chan *chan = NULL, *temp;
+	struct nouveau_abi16_ntfy *ntfy;
+	int ret;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	list_for_each_entry(temp, &abi16->channels, head) {
+		if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
+			chan = temp;
+			break;
+		}
+	}
+
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	/* synchronize with the user channel and destroy the gpu object */
+	nouveau_channel_idle(chan->chan);
+
+	ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
+	if (ret)
+		return nouveau_abi16_put(abi16, ret);
+
+	/* cleanup extra state if this object was a notifier */
+	list_for_each_entry(ntfy, &chan->notifiers, head) {
+		if (ntfy->handle == fini->handle) {
+			nouveau_mm_free(&chan->heap, &ntfy->node);
+			list_del(&ntfy->head);
+			break;
+		}
+	}
+
+	return nouveau_abi16_put(abi16, 0);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.h
new file mode 100644
index 0000000..9000408
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -0,0 +1,115 @@
+#ifndef __NOUVEAU_ABI16_H__
+#define __NOUVEAU_ABI16_H__
+
+#define ABI16_IOCTL_ARGS                                                       \
+	struct drm_device *dev, void *data, struct drm_file *file_priv
+
+int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
+
+struct nouveau_abi16_ntfy {
+	struct list_head head;
+	struct nouveau_mm_node *node;
+	u32 handle;
+};
+
+struct nouveau_abi16_chan {
+	struct list_head head;
+	struct nouveau_channel *chan;
+	struct list_head notifiers;
+	struct nouveau_bo *ntfy;
+	struct nouveau_vma ntfy_vma;
+	struct nouveau_mm  heap;
+};
+
+struct nouveau_abi16 {
+	struct nouveau_object *client;
+	struct nouveau_object *device;
+	struct list_head channels;
+	u64 handles;
+};
+
+struct nouveau_drm;
+struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
+int  nouveau_abi16_put(struct nouveau_abi16 *, int);
+void nouveau_abi16_fini(struct nouveau_abi16 *);
+u16  nouveau_abi16_swclass(struct nouveau_drm *);
+
+#define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
+
+struct drm_nouveau_channel_alloc {
+	uint32_t     fb_ctxdma_handle;
+	uint32_t     tt_ctxdma_handle;
+
+	int          channel;
+	uint32_t     pushbuf_domains;
+
+	/* Notifier memory */
+	uint32_t     notifier_handle;
+
+	/* DRM-enforced subchannel assignments */
+	struct {
+		uint32_t handle;
+		uint32_t grclass;
+	} subchan[8];
+	uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+	int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+	int      channel;
+	uint32_t handle;
+	int      class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+	uint32_t channel;
+	uint32_t handle;
+	uint32_t size;
+	uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+	int      channel;
+	uint32_t handle;
+};
+
+#define NOUVEAU_GETPARAM_PCI_VENDOR      3
+#define NOUVEAU_GETPARAM_PCI_DEVICE      4
+#define NOUVEAU_GETPARAM_BUS_TYPE        5
+#define NOUVEAU_GETPARAM_FB_SIZE         8
+#define NOUVEAU_GETPARAM_AGP_SIZE        9
+#define NOUVEAU_GETPARAM_CHIPSET_ID      11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
+#define NOUVEAU_GETPARAM_PTIMER_TIME     14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
+struct drm_nouveau_getparam {
+	uint64_t param;
+	uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+	uint64_t param;
+	uint64_t value;
+};
+
+#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 0000000..5cec3a0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,422 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
+#include <acpi/acpi.h>
+#include <linux/mxm-wmi.h>
+
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drm_edid.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_acpi.h"
+
+#define NOUVEAU_DSM_LED 0x02
+#define NOUVEAU_DSM_LED_STATE 0x00
+#define NOUVEAU_DSM_LED_OFF 0x10
+#define NOUVEAU_DSM_LED_STAMINA 0x11
+#define NOUVEAU_DSM_LED_SPEED 0x12
+
+#define NOUVEAU_DSM_POWER 0x03
+#define NOUVEAU_DSM_POWER_STATE 0x00
+#define NOUVEAU_DSM_POWER_SPEED 0x01
+#define NOUVEAU_DSM_POWER_STAMINA 0x02
+
+#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
+#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
+
+static struct nouveau_dsm_priv {
+	bool dsm_detected;
+	bool optimus_detected;
+	acpi_handle dhandle;
+	acpi_handle rom_handle;
+} nouveau_dsm_priv;
+
+bool nouveau_is_optimus(void) {
+	return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+	return nouveau_dsm_priv.dsm_detected;
+}
+
+#define NOUVEAU_DSM_HAS_MUX 0x1
+#define NOUVEAU_DSM_HAS_OPT 0x2
+
+static const char nouveau_dsm_muid[] = {
+	0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+	0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+};
+
+static const char nouveau_op_dsm_muid[] = {
+	0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
+	0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
+};
+
+static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *obj;
+	int i, err;
+	char args_buff[4];
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(nouveau_op_dsm_muid);
+	params[0].buffer.pointer = (char *)nouveau_op_dsm_muid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = 0x00000100;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = func;
+	params[3].type = ACPI_TYPE_BUFFER;
+	params[3].buffer.length = 4;
+	/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
+	for (i = 0; i < 4; i++)
+		args_buff[i] = (arg >> i * 8) & 0xFF;
+	params[3].buffer.pointer = args_buff;
+
+	err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+	if (err) {
+		printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
+		return err;
+	}
+
+	obj = (union acpi_object *)output.pointer;
+
+	if (obj->type == ACPI_TYPE_INTEGER)
+		if (obj->integer.value == 0x80000002) {
+			return -ENODEV;
+		}
+
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		if (obj->buffer.length == 4 && result) {
+			*result = 0;
+			*result |= obj->buffer.pointer[0];
+			*result |= (obj->buffer.pointer[1] << 8);
+			*result |= (obj->buffer.pointer[2] << 16);
+			*result |= (obj->buffer.pointer[3] << 24);
+		}
+	}
+
+	kfree(output.pointer);
+	return 0;
+}
+
+static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *obj;
+	int err;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(nouveau_dsm_muid);
+	params[0].buffer.pointer = (char *)nouveau_dsm_muid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = 0x00000102;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = func;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = arg;
+
+	err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+	if (err) {
+		printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
+		return err;
+	}
+
+	obj = (union acpi_object *)output.pointer;
+
+	if (obj->type == ACPI_TYPE_INTEGER)
+		if (obj->integer.value == 0x80000002)
+			return -ENODEV;
+
+	if (obj->type == ACPI_TYPE_BUFFER) {
+		if (obj->buffer.length == 4 && result) {
+			*result = 0;
+			*result |= obj->buffer.pointer[0];
+			*result |= (obj->buffer.pointer[1] << 8);
+			*result |= (obj->buffer.pointer[2] << 16);
+			*result |= (obj->buffer.pointer[3] << 24);
+		}
+	}
+
+	kfree(output.pointer);
+	return 0;
+}
+
+/* Returns 1 if a DSM function is usable and 0 otherwise */
+static int nouveau_test_dsm(acpi_handle test_handle,
+	int (*dsm_func)(acpi_handle, int, int, uint32_t *),
+	int sfnc)
+{
+	u32 result = 0;
+
+	/* Function 0 returns a Buffer containing available functions. The args
+	 * parameter is ignored for function 0, so just put 0 in it */
+	if (dsm_func(test_handle, 0, 0, &result))
+		return 0;
+
+	/* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
+	 * the n-th bit is enabled, function n is supported */
+	return result & 1 && result & (1 << sfnc);
+}
+
+static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
+{
+	mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
+	mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
+	return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+}
+
+static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
+{
+	int arg;
+	if (state == VGA_SWITCHEROO_ON)
+		arg = NOUVEAU_DSM_POWER_SPEED;
+	else
+		arg = NOUVEAU_DSM_POWER_STAMINA;
+	nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+	return 0;
+}
+
+static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+	if (!nouveau_dsm_priv.dsm_detected)
+		return 0;
+	if (id == VGA_SWITCHEROO_IGD)
+		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
+	else
+		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED);
+}
+
+static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+				   enum vga_switcheroo_state state)
+{
+	if (id == VGA_SWITCHEROO_IGD)
+		return 0;
+
+	/* Optimus laptops have the card already disabled in
+	 * nouveau_switcheroo_set_state */
+	if (!nouveau_dsm_priv.dsm_detected)
+		return 0;
+
+	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
+}
+
+static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
+{
+	/* easy option one - intel vendor ID means Integrated */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+		return VGA_SWITCHEROO_IGD;
+
+	/* is this device on Bus 0? - this may need improving */
+	if (pdev->bus->number == 0)
+		return VGA_SWITCHEROO_IGD;
+
+	return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler nouveau_dsm_handler = {
+	.switchto = nouveau_dsm_switchto,
+	.power_state = nouveau_dsm_power_state,
+	.get_client_id = nouveau_dsm_get_client_id,
+};
+
+static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, nvidia_handle;
+	acpi_status status;
+	int retval = 0;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
+	if (ACPI_FAILURE(status)) {
+		return false;
+	}
+
+	if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+		retval |= NOUVEAU_DSM_HAS_MUX;
+
+	if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
+		NOUVEAU_DSM_OPTIMUS_FN))
+		retval |= NOUVEAU_DSM_HAS_OPT;
+
+	if (retval)
+		nouveau_dsm_priv.dhandle = dhandle;
+
+	return retval;
+}
+
+static bool nouveau_dsm_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	int has_dsm = 0;
+	int has_optimus = 0;
+	int vga_count = 0;
+	bool guid_valid;
+	int retval;
+	bool ret = false;
+
+	/* lookup the MXM GUID */
+	guid_valid = mxm_wmi_supported();
+
+	if (guid_valid)
+		printk("MXM: GUID detected in BIOS\n");
+
+	/* now do DSM detection */
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+
+		retval = nouveau_dsm_pci_probe(pdev);
+		if (retval & NOUVEAU_DSM_HAS_MUX)
+			has_dsm |= 1;
+		if (retval & NOUVEAU_DSM_HAS_OPT)
+			has_optimus = 1;
+	}
+
+	/* find the optimus DSM or the old v1 DSM */
+	if (has_optimus == 1) {
+		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+			&buffer);
+		printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+			acpi_method_name);
+		nouveau_dsm_priv.optimus_detected = true;
+		ret = true;
+	} else if (vga_count == 2 && has_dsm && guid_valid) {
+		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+			&buffer);
+		printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+			acpi_method_name);
+		nouveau_dsm_priv.dsm_detected = true;
+		ret = true;
+	}
+
+
+	return ret;
+}
+
+void nouveau_register_dsm_handler(void)
+{
+	bool r;
+
+	r = nouveau_dsm_detect();
+	if (!r)
+		return;
+
+	vga_switcheroo_register_handler(&nouveau_dsm_handler);
+}
+
+/* Must be called for Optimus models before the card can be turned off */
+void nouveau_switcheroo_optimus_dsm(void)
+{
+	u32 result = 0;
+	if (!nouveau_dsm_priv.optimus_detected)
+		return;
+
+	nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
+		NOUVEAU_DSM_OPTIMUS_ARGS, &result);
+}
+
+void nouveau_unregister_dsm_handler(void)
+{
+	if (nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.dsm_detected)
+		vga_switcheroo_unregister_handler();
+}
+
+/* retrieve the ROM in 4k blocks */
+static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object rom_arg_elements[2], *obj;
+	struct acpi_object_list rom_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	rom_arg.count = 2;
+	rom_arg.pointer = &rom_arg_elements[0];
+
+	rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	rom_arg_elements[0].integer.value = offset;
+
+	rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	rom_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, len);
+	kfree(buffer.pointer);
+	return len;
+}
+
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+{
+	acpi_status status;
+	acpi_handle dhandle, rom_handle;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	nouveau_dsm_priv.rom_handle = rom_handle;
+	return true;
+}
+
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+	return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
+}
+
+void *
+nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
+{
+	struct acpi_device *acpidev;
+	acpi_handle handle;
+	int type, ret;
+	void *edid;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_LVDS:
+	case DRM_MODE_CONNECTOR_eDP:
+		type = ACPI_VIDEO_DISPLAY_LCD;
+		break;
+	default:
+		return NULL;
+	}
+
+	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+	if (!handle)
+		return NULL;
+
+	ret = acpi_bus_get_device(handle, &acpidev);
+	if (ret)
+		return NULL;
+
+	ret = acpi_video_get_edid(acpidev, type, -1, &edid);
+	if (ret < 0)
+		return NULL;
+
+	return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.h
new file mode 100644
index 0000000..74acf0f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -0,0 +1,26 @@
+#ifndef __NOUVEAU_ACPI_H__
+#define __NOUVEAU_ACPI_H__
+
+#define ROM_BIOS_PAGE 4096
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
+void nouveau_register_dsm_handler(void);
+void nouveau_unregister_dsm_handler(void);
+void nouveau_switcheroo_optimus_dsm(void);
+int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
+#else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
+static inline void nouveau_register_dsm_handler(void) {}
+static inline void nouveau_unregister_dsm_handler(void) {}
+static inline void nouveau_switcheroo_optimus_dsm(void) {}
+static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
+static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
+#endif
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.c
new file mode 100644
index 0000000..6e7a55f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -0,0 +1,164 @@
+#include <linux/module.h>
+
+#include <core/device.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_agp.h"
+#include "nouveau_reg.h"
+
+#if __OS_HAS_AGP
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+static int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
+
+static unsigned long
+get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+
+	/*
+	 * FW seems to be broken on nv18, it makes the card lock up
+	 * randomly.
+	 */
+	if (device->chipset == 0x18)
+		mode &= ~PCI_AGP_COMMAND_FW;
+
+	/*
+	 * AGP mode set in the command line.
+	 */
+	if (nouveau_agpmode > 0) {
+		bool agpv3 = mode & 0x8;
+		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+		mode = (mode & ~0x7) | (rate & 0x7);
+	}
+
+	return mode;
+}
+
+static bool
+nouveau_agp_enabled(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+
+	if (!drm_pci_device_is_agp(dev) || !dev->agp)
+		return false;
+
+	if (drm->agp.stat == UNKNOWN) {
+		if (!nouveau_agpmode)
+			return false;
+#ifdef __powerpc__
+		/* Disable AGP by default on all PowerPC machines for
+		 * now -- At least some UniNorth-2 AGP bridges are
+		 * known to be broken: DMA from the host to the card
+		 * works just fine, but writeback from the card to the
+		 * host goes straight to memory untranslated bypassing
+		 * the GATT somehow, making them quite painful to deal
+		 * with...
+		 */
+		if (nouveau_agpmode == -1)
+			return false;
+#endif
+		return true;
+	}
+
+	return (drm->agp.stat == ENABLED);
+}
+#endif
+
+void
+nouveau_agp_reset(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct nouveau_device *device = nv_device(drm->device);
+	struct drm_device *dev = drm->dev;
+	u32 save[2];
+	int ret;
+
+	if (!nouveau_agp_enabled(drm))
+		return;
+
+	/* First of all, disable fast writes, otherwise if it's
+	 * already enabled in the AGP bridge and we disable the card's
+	 * AGP controller we might be locking ourselves out of it. */
+	if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
+	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
+		struct drm_agp_info info;
+		struct drm_agp_mode mode;
+
+		ret = drm_agp_info(dev, &info);
+		if (ret)
+			return;
+
+		mode.mode  = get_agp_mode(drm, info.mode);
+		mode.mode &= ~PCI_AGP_COMMAND_FW;
+
+		ret = drm_agp_enable(dev, mode);
+		if (ret)
+			return;
+	}
+
+
+	/* clear busmaster bit, and disable AGP */
+	save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
+	nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
+
+	/* reset PGRAPH, PFIFO and PTIMER */
+	save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
+	nv_mask(device, 0x000200, 0x00011100, save[1]);
+
+	/* and restore bustmaster bit (gives effect of resetting AGP) */
+	nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
+#endif
+}
+
+void
+nouveau_agp_init(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct nouveau_device *device = nv_device(drm->device);
+	struct drm_device *dev = drm->dev;
+	struct drm_agp_info info;
+	struct drm_agp_mode mode;
+	int ret;
+
+	if (!nouveau_agp_enabled(drm))
+		return;
+	drm->agp.stat = DISABLE;
+
+	ret = drm_agp_acquire(dev);
+	if (ret) {
+		nv_error(device, "unable to acquire AGP: %d\n", ret);
+		return;
+	}
+
+	ret = drm_agp_info(dev, &info);
+	if (ret) {
+		nv_error(device, "unable to get AGP info: %d\n", ret);
+		return;
+	}
+
+	/* see agp.h for the AGPSTAT_* modes available */
+	mode.mode = get_agp_mode(drm, info.mode);
+
+	ret = drm_agp_enable(dev, mode);
+	if (ret) {
+		nv_error(device, "unable to enable AGP: %d\n", ret);
+		return;
+	}
+
+	drm->agp.stat = ENABLED;
+	drm->agp.base = info.aperture_base;
+	drm->agp.size = info.aperture_size;
+#endif
+}
+
+void
+nouveau_agp_fini(struct nouveau_drm *drm)
+{
+#if __OS_HAS_AGP
+	struct drm_device *dev = drm->dev;
+	if (dev->agp && dev->agp->acquired)
+		drm_agp_release(dev);
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.h
new file mode 100644
index 0000000..b55c086
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_agp.h
@@ -0,0 +1,10 @@
+#ifndef __NOUVEAU_AGP_H__
+#define __NOUVEAU_AGP_H__
+
+struct nouveau_drm;
+
+void nouveau_agp_reset(struct nouveau_drm *);
+void nouveau_agp_init(struct nouveau_drm *);
+void nouveau_agp_fini(struct nouveau_drm *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_backlight.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 0000000..2ffad21
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2009 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Matthew Garrett <mjg@redhat.com>
+ *
+ * Register locations derived from NVClock by Roderick Colenbrander
+ */
+
+#include <linux/backlight.h>
+#include <linux/acpi.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_encoder.h"
+
+static int
+nv40_get_intensity(struct backlight_device *bd)
+{
+	struct nouveau_drm *drm = bl_get_data(bd);
+	struct nouveau_device *device = nv_device(drm->device);
+	int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
+				   NV40_PMC_BACKLIGHT_MASK) >> 16;
+
+	return val;
+}
+
+static int
+nv40_set_intensity(struct backlight_device *bd)
+{
+	struct nouveau_drm *drm = bl_get_data(bd);
+	struct nouveau_device *device = nv_device(drm->device);
+	int val = bd->props.brightness;
+	int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
+
+	nv_wr32(device, NV40_PMC_BACKLIGHT,
+		 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
+
+	return 0;
+}
+
+static const struct backlight_ops nv40_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nv40_get_intensity,
+	.update_status = nv40_set_intensity,
+};
+
+static int
+nv40_backlight_init(struct drm_connector *connector)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct backlight_properties props;
+	struct backlight_device *bd;
+
+	if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+		return 0;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = 31;
+	bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+				       &nv40_bl_ops, &props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
+	drm->backlight = bd;
+	bd->props.brightness = nv40_get_intensity(bd);
+	backlight_update_status(bd);
+
+	return 0;
+}
+
+static int
+nv50_get_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int or = nv_encoder->or;
+	u32 div = 1025;
+	u32 val;
+
+	val  = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+	val &= NV50_PDISP_SOR_PWM_CTL_VAL;
+	return ((val * 100) + (div / 2)) / div;
+}
+
+static int
+nv50_set_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int or = nv_encoder->or;
+	u32 div = 1025;
+	u32 val = (bd->props.brightness * div) / 100;
+
+	nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
+			NV50_PDISP_SOR_PWM_CTL_NEW | val);
+	return 0;
+}
+
+static const struct backlight_ops nv50_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nv50_get_intensity,
+	.update_status = nv50_set_intensity,
+};
+
+static int
+nva3_get_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int or = nv_encoder->or;
+	u32 div, val;
+
+	div  = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+	val  = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+	val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
+	if (div && div >= val)
+		return ((val * 100) + (div / 2)) / div;
+
+	return 100;
+}
+
+static int
+nva3_set_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int or = nv_encoder->or;
+	u32 div, val;
+
+	div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+	val = (bd->props.brightness * div) / 100;
+	if (div) {
+		nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
+				NV50_PDISP_SOR_PWM_CTL_NEW |
+				NVA3_PDISP_SOR_PWM_CTL_UNK);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct backlight_ops nva3_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nva3_get_intensity,
+	.update_status = nva3_set_intensity,
+};
+
+static int
+nv50_backlight_init(struct drm_connector *connector)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_encoder *nv_encoder;
+	struct backlight_properties props;
+	struct backlight_device *bd;
+	const struct backlight_ops *ops;
+
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
+	if (!nv_encoder) {
+		nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
+		if (!nv_encoder)
+			return -ENODEV;
+	}
+
+	if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+		return 0;
+
+	if (device->chipset <= 0xa0 ||
+	    device->chipset == 0xaa ||
+	    device->chipset == 0xac)
+		ops = &nv50_bl_ops;
+	else
+		ops = &nva3_bl_ops;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = 100;
+	bd = backlight_device_register("nv_backlight", &connector->kdev,
+				       nv_encoder, ops, &props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
+
+	drm->backlight = bd;
+	bd->props.brightness = bd->ops->get_brightness(bd);
+	backlight_update_status(bd);
+	return 0;
+}
+
+int
+nouveau_backlight_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct drm_connector *connector;
+
+#ifdef CONFIG_ACPI
+	if (acpi_video_backlight_support()) {
+		NV_INFO(drm, "ACPI backlight interface available, "
+			     "not registering our own\n");
+		return 0;
+	}
+#endif
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+			continue;
+
+		switch (device->card_type) {
+		case NV_40:
+			return nv40_backlight_init(connector);
+		case NV_50:
+		case NV_C0:
+		case NV_D0:
+		case NV_E0:
+			return nv50_backlight_init(connector);
+		default:
+			break;
+		}
+	}
+
+
+	return 0;
+}
+
+void
+nouveau_backlight_exit(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (drm->backlight) {
+		backlight_device_unregister(drm->backlight);
+		drm->backlight = NULL;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 0000000..6aa2137
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,2121 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <subdev/bios.h>
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "dispnv04/hw.h"
+#include "nouveau_encoder.h"
+
+#include <linux/io-mapping.h>
+#include <linux/firmware.h>
+
+/* these defines are made up */
+#define NV_CIO_CRE_44_HEADA 0x0
+#define NV_CIO_CRE_44_HEADB 0x3
+#define FEATURE_MOBILE 0x10	/* also FEATURE_QUADRO for BMP */
+
+#define EDID1_LEN 128
+
+#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
+#define LOG_OLD_VALUE(x)
+
+struct init_exec {
+	bool execute;
+	bool repeat;
+};
+
+static bool nv_cksum(const uint8_t *data, unsigned int length)
+{
+	/*
+	 * There's a few checksums in the BIOS, so here's a generic checking
+	 * function.
+	 */
+	int i;
+	uint8_t sum = 0;
+
+	for (i = 0; i < length; i++)
+		sum += data[i];
+
+	if (sum)
+		return true;
+
+	return false;
+}
+
+static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
+{
+	int compare_record_len, i = 0;
+	uint16_t compareclk, scriptptr = 0;
+
+	if (bios->major_version < 5) /* pre BIT */
+		compare_record_len = 3;
+	else
+		compare_record_len = 4;
+
+	do {
+		compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
+		if (pxclk >= compareclk * 10) {
+			if (bios->major_version < 5) {
+				uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
+				scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
+			} else
+				scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
+			break;
+		}
+		i++;
+	} while (compareclk);
+
+	return scriptptr;
+}
+
+static void
+run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
+		      struct dcb_output *dcbent, int head, bool dl)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	NV_INFO(drm, "0x%04X: Parsing digital output script table\n",
+		 scriptptr);
+	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB :
+					         NV_CIO_CRE_44_HEADA);
+	nouveau_bios_run_init_table(dev, scriptptr, dcbent, head);
+
+	nv04_dfp_bind_head(dev, dcbent, head, dl);
+}
+
+static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
+	uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+
+	if (!bios->fp.xlated_entry || !sub || !scriptofs)
+		return -EINVAL;
+
+	run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
+
+	if (script == LVDS_PANEL_OFF) {
+		/* off-on delay in ms */
+		mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+	}
+#ifdef __powerpc__
+	/* Powerbook specific quirks */
+	if (script == LVDS_RESET &&
+	    (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+	     dev->pci_device == 0x0329))
+		nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+#endif
+
+	return 0;
+}
+
+static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+	/*
+	 * The BIT LVDS table's header has the information to setup the
+	 * necessary registers. Following the standard 4 byte header are:
+	 * A bitmask byte and a dual-link transition pxclk value for use in
+	 * selecting the init script when not using straps; 4 script pointers
+	 * for panel power, selected by output and on/off; and 8 table pointers
+	 * for panel init, the needed one determined by output, and bits in the
+	 * conf byte. These tables are similar to the TMDS tables, consisting
+	 * of a list of pxclks and script pointers.
+	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
+	uint16_t scriptptr = 0, clktable;
+
+	/*
+	 * For now we assume version 3.0 table - g80 support will need some
+	 * changes
+	 */
+
+	switch (script) {
+	case LVDS_INIT:
+		return -ENOSYS;
+	case LVDS_BACKLIGHT_ON:
+	case LVDS_PANEL_ON:
+		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
+		break;
+	case LVDS_BACKLIGHT_OFF:
+	case LVDS_PANEL_OFF:
+		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
+		break;
+	case LVDS_RESET:
+		clktable = bios->fp.lvdsmanufacturerpointer + 15;
+		if (dcbent->or == 4)
+			clktable += 8;
+
+		if (dcbent->lvdsconf.use_straps_for_mode) {
+			if (bios->fp.dual_link)
+				clktable += 4;
+			if (bios->fp.if_is_24bit)
+				clktable += 2;
+		} else {
+			/* using EDID */
+			int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
+
+			if (bios->fp.dual_link) {
+				clktable += 4;
+				cmpval_24bit <<= 1;
+			}
+
+			if (bios->fp.strapless_is_24bit & cmpval_24bit)
+				clktable += 2;
+		}
+
+		clktable = ROM16(bios->data[clktable]);
+		if (!clktable) {
+			NV_ERROR(drm, "Pixel clock comparison table not found\n");
+			return -ENOENT;
+		}
+		scriptptr = clkcmptable(bios, clktable, pxclk);
+	}
+
+	if (!scriptptr) {
+		NV_ERROR(drm, "LVDS output init script not found\n");
+		return -ENOENT;
+	}
+	run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
+
+	return 0;
+}
+
+int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+	/*
+	 * LVDS operations are multiplexed in an effort to present a single API
+	 * which works with two vastly differing underlying structures.
+	 * This acts as the demux
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nvbios *bios = &drm->vbios;
+	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+	uint32_t sel_clk_binding, sel_clk;
+	int ret;
+
+	if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
+	    (lvds_ver >= 0x30 && script == LVDS_INIT))
+		return 0;
+
+	if (!bios->fp.lvds_init_run) {
+		bios->fp.lvds_init_run = true;
+		call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
+	}
+
+	if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
+		call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
+	if (script == LVDS_RESET && bios->fp.power_off_for_reset)
+		call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
+
+	NV_INFO(drm, "Calling LVDS script %d:\n", script);
+
+	/* don't let script change pll->head binding */
+	sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+
+	if (lvds_ver < 0x30)
+		ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
+	else
+		ret = run_lvds_table(dev, dcbent, head, script, pxclk);
+
+	bios->fp.last_script_invoc = (script << 1 | head);
+
+	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+	/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
+	nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+
+	return ret;
+}
+
+struct lvdstableheader {
+	uint8_t lvds_ver, headerlen, recordlen;
+};
+
+static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
+{
+	/*
+	 * BMP version (0xa) LVDS table has a simple header of version and
+	 * record length. The BIT LVDS table has the typical BIT table header:
+	 * version byte, header length byte, record length byte, and a byte for
+	 * the maximum number of records that can be held in the table.
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t lvds_ver, headerlen, recordlen;
+
+	memset(lth, 0, sizeof(struct lvdstableheader));
+
+	if (bios->fp.lvdsmanufacturerpointer == 0x0) {
+		NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n");
+		return -EINVAL;
+	}
+
+	lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+
+	switch (lvds_ver) {
+	case 0x0a:	/* pre NV40 */
+		headerlen = 2;
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		break;
+	case 0x30:	/* NV4x */
+		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		if (headerlen < 0x1f) {
+			NV_ERROR(drm, "LVDS table header not understood\n");
+			return -EINVAL;
+		}
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+		break;
+	case 0x40:	/* G80/G90 */
+		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+		if (headerlen < 0x7) {
+			NV_ERROR(drm, "LVDS table header not understood\n");
+			return -EINVAL;
+		}
+		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+		break;
+	default:
+		NV_ERROR(drm,
+			 "LVDS table revision %d.%d not currently supported\n",
+			 lvds_ver >> 4, lvds_ver & 0xf);
+		return -ENOSYS;
+	}
+
+	lth->lvds_ver = lvds_ver;
+	lth->headerlen = headerlen;
+	lth->recordlen = recordlen;
+
+	return 0;
+}
+
+static int
+get_fp_strap(struct drm_device *dev, struct nvbios *bios)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	/*
+	 * The fp strap is normally dictated by the "User Strap" in
+	 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
+	 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
+	 * by the PCI subsystem ID during POST, but not before the previous user
+	 * strap has been committed to CR58 for CR57=0xf on head A, which may be
+	 * read and used instead
+	 */
+
+	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
+		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+
+	if (device->card_type >= NV_50)
+		return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+	else
+		return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+}
+
+static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t *fptable;
+	uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
+	int ret, ofs, fpstrapping;
+	struct lvdstableheader lth;
+
+	if (bios->fp.fptablepointer == 0x0) {
+		/* Apple cards don't have the fp table; the laptops use DDC */
+		/* The table is also missing on some x86 IGPs */
+#ifndef __powerpc__
+		NV_ERROR(drm, "Pointer to flat panel table invalid\n");
+#endif
+		bios->digital_min_front_porch = 0x4b;
+		return 0;
+	}
+
+	fptable = &bios->data[bios->fp.fptablepointer];
+	fptable_ver = fptable[0];
+
+	switch (fptable_ver) {
+	/*
+	 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
+	 * version field, and miss one of the spread spectrum/PWM bytes.
+	 * This could affect early GF2Go parts (not seen any appropriate ROMs
+	 * though). Here we assume that a version of 0x05 matches this case
+	 * (combining with a BMP version check would be better), as the
+	 * common case for the panel type field is 0x0005, and that is in
+	 * fact what we are reading the first byte of.
+	 */
+	case 0x05:	/* some NV10, 11, 15, 16 */
+		recordlen = 42;
+		ofs = -1;
+		break;
+	case 0x10:	/* some NV15/16, and NV11+ */
+		recordlen = 44;
+		ofs = 0;
+		break;
+	case 0x20:	/* NV40+ */
+		headerlen = fptable[1];
+		recordlen = fptable[2];
+		fpentries = fptable[3];
+		/*
+		 * fptable[4] is the minimum
+		 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
+		 */
+		bios->digital_min_front_porch = fptable[4];
+		ofs = -7;
+		break;
+	default:
+		NV_ERROR(drm,
+			 "FP table revision %d.%d not currently supported\n",
+			 fptable_ver >> 4, fptable_ver & 0xf);
+		return -ENOSYS;
+	}
+
+	if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
+		return 0;
+
+	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+	if (ret)
+		return ret;
+
+	if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
+		bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
+							lth.headerlen + 1;
+		bios->fp.xlatwidth = lth.recordlen;
+	}
+	if (bios->fp.fpxlatetableptr == 0x0) {
+		NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n");
+		return -EINVAL;
+	}
+
+	fpstrapping = get_fp_strap(dev, bios);
+
+	fpindex = bios->data[bios->fp.fpxlatetableptr +
+					fpstrapping * bios->fp.xlatwidth];
+
+	if (fpindex > fpentries) {
+		NV_ERROR(drm, "Bad flat panel table index\n");
+		return -ENOENT;
+	}
+
+	/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
+	if (lth.lvds_ver > 0x10)
+		bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+
+	/*
+	 * If either the strap or xlated fpindex value are 0xf there is no
+	 * panel using a strap-derived bios mode present.  this condition
+	 * includes, but is different from, the DDC panel indicator above
+	 */
+	if (fpstrapping == 0xf || fpindex == 0xf)
+		return 0;
+
+	bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
+			    recordlen * fpindex + ofs;
+
+	NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
+		 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
+		 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
+		 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
+
+	return 0;
+}
+
+bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
+
+	if (!mode)	/* just checking whether we can produce a mode */
+		return bios->fp.mode_ptr;
+
+	memset(mode, 0, sizeof(struct drm_display_mode));
+	/*
+	 * For version 1.0 (version in byte 0):
+	 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
+	 * single/dual link, and type (TFT etc.)
+	 * bytes 3-6 are bits per colour in RGBX
+	 */
+	mode->clock = ROM16(mode_entry[7]) * 10;
+	/* bytes 9-10 is HActive */
+	mode->hdisplay = ROM16(mode_entry[11]) + 1;
+	/*
+	 * bytes 13-14 is HValid Start
+	 * bytes 15-16 is HValid End
+	 */
+	mode->hsync_start = ROM16(mode_entry[17]) + 1;
+	mode->hsync_end = ROM16(mode_entry[19]) + 1;
+	mode->htotal = ROM16(mode_entry[21]) + 1;
+	/* bytes 23-24, 27-30 similarly, but vertical */
+	mode->vdisplay = ROM16(mode_entry[25]) + 1;
+	mode->vsync_start = ROM16(mode_entry[31]) + 1;
+	mode->vsync_end = ROM16(mode_entry[33]) + 1;
+	mode->vtotal = ROM16(mode_entry[35]) + 1;
+	mode->flags |= (mode_entry[37] & 0x10) ?
+			DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+	mode->flags |= (mode_entry[37] & 0x1) ?
+			DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+	/*
+	 * bytes 38-39 relate to spread spectrum settings
+	 * bytes 40-43 are something to do with PWM
+	 */
+
+	mode->status = MODE_OK;
+	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+	return bios->fp.mode_ptr;
+}
+
+int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
+{
+	/*
+	 * The LVDS table header is (mostly) described in
+	 * parse_lvds_manufacturer_table_header(): the BIT header additionally
+	 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
+	 * straps are not being used for the panel, this specifies the frequency
+	 * at which modes should be set up in the dual link style.
+	 *
+	 * Following the header, the BMP (ver 0xa) table has several records,
+	 * indexed by a separate xlat table, indexed in turn by the fp strap in
+	 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
+	 * numbers for use by INIT_SUB which controlled panel init and power,
+	 * and finally a dword of ms to sleep between power off and on
+	 * operations.
+	 *
+	 * In the BIT versions, the table following the header serves as an
+	 * integrated config and xlat table: the records in the table are
+	 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
+	 * two bytes - the first as a config byte, the second for indexing the
+	 * fp mode table pointed to by the BIT 'D' table
+	 *
+	 * DDC is not used until after card init, so selecting the correct table
+	 * entry and setting the dual link flag for EDID equipped panels,
+	 * requiring tests against the native-mode pixel clock, cannot be done
+	 * until later, when this function should be called with non-zero pxclk
+	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
+	struct lvdstableheader lth;
+	uint16_t lvdsofs;
+	int ret, chip_version = bios->chip_version;
+
+	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+	if (ret)
+		return ret;
+
+	switch (lth.lvds_ver) {
+	case 0x0a:	/* pre NV40 */
+		lvdsmanufacturerindex = bios->data[
+					bios->fp.fpxlatemanufacturertableptr +
+					fpstrapping];
+
+		/* we're done if this isn't the EDID panel case */
+		if (!pxclk)
+			break;
+
+		if (chip_version < 0x25) {
+			/* nv17 behaviour
+			 *
+			 * It seems the old style lvds script pointer is reused
+			 * to select 18/24 bit colour depth for EDID panels.
+			 */
+			lvdsmanufacturerindex =
+				(bios->legacy.lvds_single_a_script_ptr & 1) ?
+									2 : 0;
+			if (pxclk >= bios->fp.duallink_transition_clk)
+				lvdsmanufacturerindex++;
+		} else if (chip_version < 0x30) {
+			/* nv28 behaviour (off-chip encoder)
+			 *
+			 * nv28 does a complex dance of first using byte 121 of
+			 * the EDID to choose the lvdsmanufacturerindex, then
+			 * later attempting to match the EDID manufacturer and
+			 * product IDs in a table (signature 'pidt' (panel id
+			 * table?)), setting an lvdsmanufacturerindex of 0 and
+			 * an fp strap of the match index (or 0xf if none)
+			 */
+			lvdsmanufacturerindex = 0;
+		} else {
+			/* nv31, nv34 behaviour */
+			lvdsmanufacturerindex = 0;
+			if (pxclk >= bios->fp.duallink_transition_clk)
+				lvdsmanufacturerindex = 2;
+			if (pxclk >= 140000)
+				lvdsmanufacturerindex = 3;
+		}
+
+		/*
+		 * nvidia set the high nibble of (cr57=f, cr58) to
+		 * lvdsmanufacturerindex in this case; we don't
+		 */
+		break;
+	case 0x30:	/* NV4x */
+	case 0x40:	/* G80/G90 */
+		lvdsmanufacturerindex = fpstrapping;
+		break;
+	default:
+		NV_ERROR(drm, "LVDS table revision not currently supported\n");
+		return -ENOSYS;
+	}
+
+	lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
+	switch (lth.lvds_ver) {
+	case 0x0a:
+		bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
+		bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
+		bios->fp.dual_link = bios->data[lvdsofs] & 4;
+		bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
+		*if_is_24bit = bios->data[lvdsofs] & 16;
+		break;
+	case 0x30:
+	case 0x40:
+		/*
+		 * No sign of the "power off for reset" or "reset for panel
+		 * on" bits, but it's safer to assume we should
+		 */
+		bios->fp.power_off_for_reset = true;
+		bios->fp.reset_after_pclk_change = true;
+
+		/*
+		 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
+		 * over-written, and if_is_24bit isn't used
+		 */
+		bios->fp.dual_link = bios->data[lvdsofs] & 1;
+		bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
+		bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+		bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+		break;
+	}
+
+	/* set dual_link flag for EDID case */
+	if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+		bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+
+	*dl = bios->fp.dual_link;
+
+	return 0;
+}
+
+int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
+{
+	/*
+	 * the pxclk parameter is in kHz
+	 *
+	 * This runs the TMDS regs setting code found on BIT bios cards
+	 *
+	 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
+	 * ffs(or) == 3, use the second.
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nvbios *bios = &drm->vbios;
+	int cv = bios->chip_version;
+	uint16_t clktable = 0, scriptptr;
+	uint32_t sel_clk_binding, sel_clk;
+
+	/* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
+	if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
+	    dcbent->location != DCB_LOC_ON_CHIP)
+		return 0;
+
+	switch (ffs(dcbent->or)) {
+	case 1:
+		clktable = bios->tmds.output0_script_ptr;
+		break;
+	case 2:
+	case 3:
+		clktable = bios->tmds.output1_script_ptr;
+		break;
+	}
+
+	if (!clktable) {
+		NV_ERROR(drm, "Pixel clock comparison table not found\n");
+		return -EINVAL;
+	}
+
+	scriptptr = clkcmptable(bios, clktable, pxclk);
+
+	if (!scriptptr) {
+		NV_ERROR(drm, "TMDS output init script not found\n");
+		return -ENOENT;
+	}
+
+	/* don't let script change pll->head binding */
+	sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+	run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
+	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+
+	return 0;
+}
+
+static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
+{
+	/*
+	 * Parses the init table segment for pointers used in script execution.
+	 *
+	 * offset + 0  (16 bits): init script tables pointer
+	 * offset + 2  (16 bits): macro index table pointer
+	 * offset + 4  (16 bits): macro table pointer
+	 * offset + 6  (16 bits): condition table pointer
+	 * offset + 8  (16 bits): io condition table pointer
+	 * offset + 10 (16 bits): io flag condition table pointer
+	 * offset + 12 (16 bits): init function table pointer
+	 */
+
+	bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
+}
+
+static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the load detect values for g80 cards.
+	 *
+	 * offset + 0 (16 bits): loadval table pointer
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint16_t load_table_ptr;
+	uint8_t version, headerlen, entrylen, num_entries;
+
+	if (bitentry->length != 3) {
+		NV_ERROR(drm, "Do not understand BIT A table\n");
+		return -EINVAL;
+	}
+
+	load_table_ptr = ROM16(bios->data[bitentry->offset]);
+
+	if (load_table_ptr == 0x0) {
+		NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n");
+		return -EINVAL;
+	}
+
+	version = bios->data[load_table_ptr];
+
+	if (version != 0x10) {
+		NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n",
+			 version >> 4, version & 0xF);
+		return -ENOSYS;
+	}
+
+	headerlen = bios->data[load_table_ptr + 1];
+	entrylen = bios->data[load_table_ptr + 2];
+	num_entries = bios->data[load_table_ptr + 3];
+
+	if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
+		NV_ERROR(drm, "Do not understand BIT loadval table\n");
+		return -EINVAL;
+	}
+
+	/* First entry is normal dac, 2nd tv-out perhaps? */
+	bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+
+	return 0;
+}
+
+static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the flat panel table segment that the bit entry points to.
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): ??? table pointer - seems to have 18 byte
+	 * records beginning with a freq.
+	 * offset + 2  (16 bits): mode table pointer
+	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (bitentry->length != 4) {
+		NV_ERROR(drm, "Do not understand BIT display table\n");
+		return -EINVAL;
+	}
+
+	bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
+
+	return 0;
+}
+
+static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the init table segment that the bit entry points to.
+	 *
+	 * See parse_script_table_pointers for layout
+	 */
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (bitentry->length < 14) {
+		NV_ERROR(drm, "Do not understand init table\n");
+		return -EINVAL;
+	}
+
+	parse_script_table_pointers(bios, bitentry->offset);
+	return 0;
+}
+
+static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * BIT 'i' (info?) table
+	 *
+	 * offset + 0  (32 bits): BIOS version dword (as in B table)
+	 * offset + 5  (8  bits): BIOS feature byte (same as for BMP?)
+	 * offset + 13 (16 bits): pointer to table containing DAC load
+	 * detection comparison values
+	 *
+	 * There's other things in the table, purpose unknown
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint16_t daccmpoffset;
+	uint8_t dacver, dacheaderlen;
+
+	if (bitentry->length < 6) {
+		NV_ERROR(drm, "BIT i table too short for needed information\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
+	 * Quadro identity crisis), other bits possibly as for BMP feature byte
+	 */
+	bios->feature_byte = bios->data[bitentry->offset + 5];
+	bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
+
+	if (bitentry->length < 15) {
+		NV_WARN(drm, "BIT i table not long enough for DAC load "
+			       "detection comparison table\n");
+		return -EINVAL;
+	}
+
+	daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
+
+	/* doesn't exist on g80 */
+	if (!daccmpoffset)
+		return 0;
+
+	/*
+	 * The first value in the table, following the header, is the
+	 * comparison value, the second entry is a comparison value for
+	 * TV load detection.
+	 */
+
+	dacver = bios->data[daccmpoffset];
+	dacheaderlen = bios->data[daccmpoffset + 1];
+
+	if (dacver != 0x00 && dacver != 0x10) {
+		NV_WARN(drm, "DAC load detection comparison table version "
+			       "%d.%d not known\n", dacver >> 4, dacver & 0xf);
+		return -ENOSYS;
+	}
+
+	bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+	bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+
+	return 0;
+}
+
+static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the LVDS table segment that the bit entry points to.
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): LVDS strap xlate table pointer
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (bitentry->length != 2) {
+		NV_ERROR(drm, "Do not understand BIT LVDS table\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * No idea if it's still called the LVDS manufacturer table, but
+	 * the concept's close enough.
+	 */
+	bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
+
+	return 0;
+}
+
+static int
+parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+		      struct bit_entry *bitentry)
+{
+	/*
+	 * offset + 2  (8  bits): number of options in an
+	 * 	INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
+	 * offset + 3  (16 bits): pointer to strap xlate table for RAM
+	 * 	restrict option selection
+	 *
+	 * There's a bunch of bits in this table other than the RAM restrict
+	 * stuff that we don't use - their use currently unknown
+	 */
+
+	/*
+	 * Older bios versions don't have a sufficiently long table for
+	 * what we want
+	 */
+	if (bitentry->length < 0x5)
+		return 0;
+
+	if (bitentry->version < 2) {
+		bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
+		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
+	} else {
+		bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
+		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
+	}
+
+	return 0;
+}
+
+static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+	/*
+	 * Parses the pointer to the TMDS table
+	 *
+	 * Starting at bitentry->offset:
+	 *
+	 * offset + 0  (16 bits): TMDS table pointer
+	 *
+	 * The TMDS table is typically found just before the DCB table, with a
+	 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
+	 * length?)
+	 *
+	 * At offset +7 is a pointer to a script, which I don't know how to
+	 * run yet.
+	 * At offset +9 is a pointer to another script, likewise
+	 * Offset +11 has a pointer to a table where the first word is a pxclk
+	 * frequency and the second word a pointer to a script, which should be
+	 * run if the comparison pxclk frequency is less than the pxclk desired.
+	 * This repeats for decreasing comparison frequencies
+	 * Offset +13 has a pointer to a similar table
+	 * The selection of table (and possibly +7/+9 script) is dictated by
+	 * "or" from the DCB.
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint16_t tmdstableptr, script1, script2;
+
+	if (bitentry->length != 2) {
+		NV_ERROR(drm, "Do not understand BIT TMDS table\n");
+		return -EINVAL;
+	}
+
+	tmdstableptr = ROM16(bios->data[bitentry->offset]);
+	if (!tmdstableptr) {
+		NV_ERROR(drm, "Pointer to TMDS table invalid\n");
+		return -EINVAL;
+	}
+
+	NV_INFO(drm, "TMDS table version %d.%d\n",
+		bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
+	/* nv50+ has v2.0, but we don't parse it atm */
+	if (bios->data[tmdstableptr] != 0x11)
+		return -ENOSYS;
+
+	/*
+	 * These two scripts are odd: they don't seem to get run even when
+	 * they are not stubbed.
+	 */
+	script1 = ROM16(bios->data[tmdstableptr + 7]);
+	script2 = ROM16(bios->data[tmdstableptr + 9]);
+	if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
+		NV_WARN(drm, "TMDS table script pointers not stubbed\n");
+
+	bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
+	bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
+
+	return 0;
+}
+
+struct bit_table {
+	const char id;
+	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+};
+
+#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+int
+bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	u8 entries, *entry;
+
+	if (bios->type != NVBIOS_BIT)
+		return -ENODEV;
+
+	entries = bios->data[bios->offset + 10];
+	entry   = &bios->data[bios->offset + 12];
+	while (entries--) {
+		if (entry[0] == id) {
+			bit->id = entry[0];
+			bit->version = entry[1];
+			bit->length = ROM16(entry[2]);
+			bit->offset = ROM16(entry[4]);
+			bit->data = ROMPTR(dev, entry[4]);
+			return 0;
+		}
+
+		entry += bios->data[bios->offset + 9];
+	}
+
+	return -ENOENT;
+}
+
+static int
+parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
+		struct bit_table *table)
+{
+	struct drm_device *dev = bios->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct bit_entry bitentry;
+
+	if (bit_table(dev, table->id, &bitentry) == 0)
+		return table->parse_fn(dev, bios, &bitentry);
+
+	NV_INFO(drm, "BIT table '%c' not found\n", table->id);
+	return -ENOSYS;
+}
+
+static int
+parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
+{
+	int ret;
+
+	/*
+	 * The only restriction on parsing order currently is having 'i' first
+	 * for use of bios->*_version or bios->feature_byte while parsing;
+	 * functions shouldn't be actually *doing* anything apart from pulling
+	 * data from the image into the bios struct, thus no interdependencies
+	 */
+	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
+	if (ret) /* info? */
+		return ret;
+	if (bios->major_version >= 0x60) /* g80+ */
+		parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
+	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
+	if (ret)
+		return ret;
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
+	parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
+
+	return 0;
+}
+
+static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
+{
+	/*
+	 * Parses the BMP structure for useful things, but does not act on them
+	 *
+	 * offset +   5: BMP major version
+	 * offset +   6: BMP minor version
+	 * offset +   9: BMP feature byte
+	 * offset +  10: BCD encoded BIOS version
+	 *
+	 * offset +  18: init script table pointer (for bios versions < 5.10h)
+	 * offset +  20: extra init script table pointer (for bios
+	 * versions < 5.10h)
+	 *
+	 * offset +  24: memory init table pointer (used on early bios versions)
+	 * offset +  26: SDR memory sequencing setup data table
+	 * offset +  28: DDR memory sequencing setup data table
+	 *
+	 * offset +  54: index of I2C CRTC pair to use for CRT output
+	 * offset +  55: index of I2C CRTC pair to use for TV output
+	 * offset +  56: index of I2C CRTC pair to use for flat panel output
+	 * offset +  58: write CRTC index for I2C pair 0
+	 * offset +  59: read CRTC index for I2C pair 0
+	 * offset +  60: write CRTC index for I2C pair 1
+	 * offset +  61: read CRTC index for I2C pair 1
+	 *
+	 * offset +  67: maximum internal PLL frequency (single stage PLL)
+	 * offset +  71: minimum internal PLL frequency (single stage PLL)
+	 *
+	 * offset +  75: script table pointers, as described in
+	 * parse_script_table_pointers
+	 *
+	 * offset +  89: TMDS single link output A table pointer
+	 * offset +  91: TMDS single link output B table pointer
+	 * offset +  95: LVDS single link output A table pointer
+	 * offset + 105: flat panel timings table pointer
+	 * offset + 107: flat panel strapping translation table pointer
+	 * offset + 117: LVDS manufacturer panel config table pointer
+	 * offset + 119: LVDS manufacturer strapping translation table pointer
+	 *
+	 * offset + 142: PLL limits table pointer
+	 *
+	 * offset + 156: minimum pixel clock for LVDS dual link
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
+	uint16_t bmplength;
+	uint16_t legacy_scripts_offset, legacy_i2c_offset;
+
+	/* load needed defaults in case we can't parse this info */
+	bios->digital_min_front_porch = 0x4b;
+	bios->fmaxvco = 256000;
+	bios->fminvco = 128000;
+	bios->fp.duallink_transition_clk = 90000;
+
+	bmp_version_major = bmp[5];
+	bmp_version_minor = bmp[6];
+
+	NV_INFO(drm, "BMP version %d.%d\n",
+		 bmp_version_major, bmp_version_minor);
+
+	/*
+	 * Make sure that 0x36 is blank and can't be mistaken for a DCB
+	 * pointer on early versions
+	 */
+	if (bmp_version_major < 5)
+		*(uint16_t *)&bios->data[0x36] = 0;
+
+	/*
+	 * Seems that the minor version was 1 for all major versions prior
+	 * to 5. Version 6 could theoretically exist, but I suspect BIT
+	 * happened instead.
+	 */
+	if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
+		NV_ERROR(drm, "You have an unsupported BMP version. "
+				"Please send in your bios\n");
+		return -ENOSYS;
+	}
+
+	if (bmp_version_major == 0)
+		/* nothing that's currently useful in this version */
+		return 0;
+	else if (bmp_version_major == 1)
+		bmplength = 44; /* exact for 1.01 */
+	else if (bmp_version_major == 2)
+		bmplength = 48; /* exact for 2.01 */
+	else if (bmp_version_major == 3)
+		bmplength = 54;
+		/* guessed - mem init tables added in this version */
+	else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
+		/* don't know if 5.0 exists... */
+		bmplength = 62;
+		/* guessed - BMP I2C indices added in version 4*/
+	else if (bmp_version_minor < 0x6)
+		bmplength = 67; /* exact for 5.01 */
+	else if (bmp_version_minor < 0x10)
+		bmplength = 75; /* exact for 5.06 */
+	else if (bmp_version_minor == 0x10)
+		bmplength = 89; /* exact for 5.10h */
+	else if (bmp_version_minor < 0x14)
+		bmplength = 118; /* exact for 5.11h */
+	else if (bmp_version_minor < 0x24)
+		/*
+		 * Not sure of version where pll limits came in;
+		 * certainly exist by 0x24 though.
+		 */
+		/* length not exact: this is long enough to get lvds members */
+		bmplength = 123;
+	else if (bmp_version_minor < 0x27)
+		/*
+		 * Length not exact: this is long enough to get pll limit
+		 * member
+		 */
+		bmplength = 144;
+	else
+		/*
+		 * Length not exact: this is long enough to get dual link
+		 * transition clock.
+		 */
+		bmplength = 158;
+
+	/* checksum */
+	if (nv_cksum(bmp, 8)) {
+		NV_ERROR(drm, "Bad BMP checksum\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Bit 4 seems to indicate either a mobile bios or a quadro card --
+	 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
+	 * (not nv10gl), bit 5 that the flat panel tables are present, and
+	 * bit 6 a tv bios.
+	 */
+	bios->feature_byte = bmp[9];
+
+	if (bmp_version_major < 5 || bmp_version_minor < 0x10)
+		bios->old_style_init = true;
+	legacy_scripts_offset = 18;
+	if (bmp_version_major < 2)
+		legacy_scripts_offset -= 4;
+	bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
+	bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
+
+	if (bmp_version_major > 2) {	/* appears in BMP 3 */
+		bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
+		bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
+		bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
+	}
+
+	legacy_i2c_offset = 0x48;	/* BMP version 2 & 3 */
+	if (bmplength > 61)
+		legacy_i2c_offset = offset + 54;
+	bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
+	bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
+	bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+
+	if (bmplength > 74) {
+		bios->fmaxvco = ROM32(bmp[67]);
+		bios->fminvco = ROM32(bmp[71]);
+	}
+	if (bmplength > 88)
+		parse_script_table_pointers(bios, offset + 75);
+	if (bmplength > 94) {
+		bios->tmds.output0_script_ptr = ROM16(bmp[89]);
+		bios->tmds.output1_script_ptr = ROM16(bmp[91]);
+		/*
+		 * Never observed in use with lvds scripts, but is reused for
+		 * 18/24 bit panel interface default for EDID equipped panels
+		 * (if_is_24bit not set directly to avoid any oscillation).
+		 */
+		bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
+	}
+	if (bmplength > 108) {
+		bios->fp.fptablepointer = ROM16(bmp[105]);
+		bios->fp.fpxlatetableptr = ROM16(bmp[107]);
+		bios->fp.xlatwidth = 1;
+	}
+	if (bmplength > 120) {
+		bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
+		bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
+	}
+#if 0
+	if (bmplength > 143)
+		bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+#endif
+
+	if (bmplength > 157)
+		bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
+
+	return 0;
+}
+
+static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
+{
+	int i, j;
+
+	for (i = 0; i <= (n - len); i++) {
+		for (j = 0; j < len; j++)
+			if (data[i + j] != str[j])
+				break;
+		if (j == len)
+			return i;
+	}
+
+	return 0;
+}
+
+void *
+olddcb_table(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u8 *dcb = NULL;
+
+	if (nv_device(drm->device)->card_type > NV_04)
+		dcb = ROMPTR(dev, drm->vbios.data[0x36]);
+	if (!dcb) {
+		NV_WARN(drm, "No DCB data found in VBIOS\n");
+		return NULL;
+	}
+
+	if (dcb[0] >= 0x41) {
+		NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
+		return NULL;
+	} else
+	if (dcb[0] >= 0x30) {
+		if (ROM32(dcb[6]) == 0x4edcbdcb)
+			return dcb;
+	} else
+	if (dcb[0] >= 0x20) {
+		if (ROM32(dcb[4]) == 0x4edcbdcb)
+			return dcb;
+	} else
+	if (dcb[0] >= 0x15) {
+		if (!memcmp(&dcb[-7], "DEV_REC", 7))
+			return dcb;
+	} else {
+		/*
+		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+		 * always has the same single (crt) entry, even when tv-out
+		 * present, so the conclusion is this version cannot really
+		 * be used.
+		 *
+		 * v1.2 tables (some NV6/10, and NV15+) normally have the
+		 * same 5 entries, which are not specific to the card and so
+		 * no use.
+		 *
+		 * v1.2 does have an I2C table that read_dcb_i2c_table can
+		 * handle, but cards exist (nv11 in #14821) with a bad i2c
+		 * table pointer, so use the indices parsed in
+		 * parse_bmp_structure.
+		 *
+		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+		 */
+		NV_WARN(drm, "No useful DCB data in VBIOS\n");
+		return NULL;
+	}
+
+	NV_WARN(drm, "DCB header validation failed\n");
+	return NULL;
+}
+
+void *
+olddcb_outp(struct drm_device *dev, u8 idx)
+{
+	u8 *dcb = olddcb_table(dev);
+	if (dcb && dcb[0] >= 0x30) {
+		if (idx < dcb[2])
+			return dcb + dcb[1] + (idx * dcb[3]);
+	} else
+	if (dcb && dcb[0] >= 0x20) {
+		u8 *i2c = ROMPTR(dev, dcb[2]);
+		u8 *ent = dcb + 8 + (idx * 8);
+		if (i2c && ent < i2c)
+			return ent;
+	} else
+	if (dcb && dcb[0] >= 0x15) {
+		u8 *i2c = ROMPTR(dev, dcb[2]);
+		u8 *ent = dcb + 4 + (idx * 10);
+		if (i2c && ent < i2c)
+			return ent;
+	}
+
+	return NULL;
+}
+
+int
+olddcb_outp_foreach(struct drm_device *dev, void *data,
+		 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+	int ret, idx = -1;
+	u8 *outp = NULL;
+	while ((outp = olddcb_outp(dev, ++idx))) {
+		if (ROM32(outp[0]) == 0x00000000)
+			break; /* seen on an NV11 with DCB v1.5 */
+		if (ROM32(outp[0]) == 0xffffffff)
+			break; /* seen on an NV17 with DCB v2.0 */
+
+		if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED)
+			continue;
+		if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL)
+			break;
+
+		ret = exec(dev, data, idx, outp);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+u8 *
+olddcb_conntab(struct drm_device *dev)
+{
+	u8 *dcb = olddcb_table(dev);
+	if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+		u8 *conntab = ROMPTR(dev, dcb[0x14]);
+		if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+			return conntab;
+	}
+	return NULL;
+}
+
+u8 *
+olddcb_conn(struct drm_device *dev, u8 idx)
+{
+	u8 *conntab = olddcb_conntab(dev);
+	if (conntab && idx < conntab[2])
+		return conntab + conntab[1] + (idx * conntab[3]);
+	return NULL;
+}
+
+static struct dcb_output *new_dcb_entry(struct dcb_table *dcb)
+{
+	struct dcb_output *entry = &dcb->entry[dcb->entries];
+
+	memset(entry, 0, sizeof(struct dcb_output));
+	entry->index = dcb->entries++;
+
+	return entry;
+}
+
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+				 int heads, int or)
+{
+	struct dcb_output *entry = new_dcb_entry(dcb);
+
+	entry->type = type;
+	entry->i2c_index = i2c;
+	entry->heads = heads;
+	if (type != DCB_OUTPUT_ANALOG)
+		entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+	entry->or = or;
+}
+
+static bool
+parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
+		  uint32_t conn, uint32_t conf, struct dcb_output *entry)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	entry->type = conn & 0xf;
+	entry->i2c_index = (conn >> 4) & 0xf;
+	entry->heads = (conn >> 8) & 0xf;
+	entry->connector = (conn >> 12) & 0xf;
+	entry->bus = (conn >> 16) & 0xf;
+	entry->location = (conn >> 20) & 0x3;
+	entry->or = (conn >> 24) & 0xf;
+
+	switch (entry->type) {
+	case DCB_OUTPUT_ANALOG:
+		/*
+		 * Although the rest of a CRT conf dword is usually
+		 * zeros, mac biosen have stuff there so we must mask
+		 */
+		entry->crtconf.maxfreq = (dcb->version < 0x30) ?
+					 (conf & 0xffff) * 10 :
+					 (conf & 0xff) * 10000;
+		break;
+	case DCB_OUTPUT_LVDS:
+		{
+		uint32_t mask;
+		if (conf & 0x1)
+			entry->lvdsconf.use_straps_for_mode = true;
+		if (dcb->version < 0x22) {
+			mask = ~0xd;
+			/*
+			 * The laptop in bug 14567 lies and claims to not use
+			 * straps when it does, so assume all DCB 2.0 laptops
+			 * use straps, until a broken EDID using one is produced
+			 */
+			entry->lvdsconf.use_straps_for_mode = true;
+			/*
+			 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
+			 * mean the same thing (probably wrong, but might work)
+			 */
+			if (conf & 0x4 || conf & 0x8)
+				entry->lvdsconf.use_power_scripts = true;
+		} else {
+			mask = ~0x7;
+			if (conf & 0x2)
+				entry->lvdsconf.use_acpi_for_edid = true;
+			if (conf & 0x4)
+				entry->lvdsconf.use_power_scripts = true;
+			entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
+		}
+		if (conf & mask) {
+			/*
+			 * Until we even try to use these on G8x, it's
+			 * useless reporting unknown bits.  They all are.
+			 */
+			if (dcb->version >= 0x40)
+				break;
+
+			NV_ERROR(drm, "Unknown LVDS configuration bits, "
+				      "please report\n");
+		}
+		break;
+		}
+	case DCB_OUTPUT_TV:
+	{
+		if (dcb->version >= 0x30)
+			entry->tvconf.has_component_output = conf & (0x8 << 4);
+		else
+			entry->tvconf.has_component_output = false;
+
+		break;
+	}
+	case DCB_OUTPUT_DP:
+		entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+		entry->extdev = (conf & 0x0000ff00) >> 8;
+		switch ((conf & 0x00e00000) >> 21) {
+		case 0:
+			entry->dpconf.link_bw = 162000;
+			break;
+		default:
+			entry->dpconf.link_bw = 270000;
+			break;
+		}
+		switch ((conf & 0x0f000000) >> 24) {
+		case 0xf:
+			entry->dpconf.link_nr = 4;
+			break;
+		case 0x3:
+			entry->dpconf.link_nr = 2;
+			break;
+		default:
+			entry->dpconf.link_nr = 1;
+			break;
+		}
+		break;
+	case DCB_OUTPUT_TMDS:
+		if (dcb->version >= 0x40) {
+			entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+			entry->extdev = (conf & 0x0000ff00) >> 8;
+		}
+		else if (dcb->version >= 0x30)
+			entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
+		else if (dcb->version >= 0x22)
+			entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
+
+		break;
+	case DCB_OUTPUT_EOL:
+		/* weird g80 mobile type that "nv" treats as a terminator */
+		dcb->entries--;
+		return false;
+	default:
+		break;
+	}
+
+	if (dcb->version < 0x40) {
+		/* Normal entries consist of a single bit, but dual link has
+		 * the next most significant bit set too
+		 */
+		entry->duallink_possible =
+			((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+	} else {
+		entry->duallink_possible = (entry->sorconf.link == 3);
+	}
+
+	/* unsure what DCB version introduces this, 3.0? */
+	if (conf & 0x100000)
+		entry->i2c_upper_default = true;
+
+	return true;
+}
+
+static bool
+parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
+		  uint32_t conn, uint32_t conf, struct dcb_output *entry)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	switch (conn & 0x0000000f) {
+	case 0:
+		entry->type = DCB_OUTPUT_ANALOG;
+		break;
+	case 1:
+		entry->type = DCB_OUTPUT_TV;
+		break;
+	case 2:
+	case 4:
+		if (conn & 0x10)
+			entry->type = DCB_OUTPUT_LVDS;
+		else
+			entry->type = DCB_OUTPUT_TMDS;
+		break;
+	case 3:
+		entry->type = DCB_OUTPUT_LVDS;
+		break;
+	default:
+		NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f);
+		return false;
+	}
+
+	entry->i2c_index = (conn & 0x0003c000) >> 14;
+	entry->heads = ((conn & 0x001c0000) >> 18) + 1;
+	entry->or = entry->heads; /* same as heads, hopefully safe enough */
+	entry->location = (conn & 0x01e00000) >> 21;
+	entry->bus = (conn & 0x0e000000) >> 25;
+	entry->duallink_possible = false;
+
+	switch (entry->type) {
+	case DCB_OUTPUT_ANALOG:
+		entry->crtconf.maxfreq = (conf & 0xffff) * 10;
+		break;
+	case DCB_OUTPUT_TV:
+		entry->tvconf.has_component_output = false;
+		break;
+	case DCB_OUTPUT_LVDS:
+		if ((conn & 0x00003f00) >> 8 != 0x10)
+			entry->lvdsconf.use_straps_for_mode = true;
+		entry->lvdsconf.use_power_scripts = true;
+		break;
+	default:
+		break;
+	}
+
+	return true;
+}
+
+static
+void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
+{
+	/*
+	 * DCB v2.0 lists each output combination separately.
+	 * Here we merge compatible entries to have fewer outputs, with
+	 * more options
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int i, newentries = 0;
+
+	for (i = 0; i < dcb->entries; i++) {
+		struct dcb_output *ient = &dcb->entry[i];
+		int j;
+
+		for (j = i + 1; j < dcb->entries; j++) {
+			struct dcb_output *jent = &dcb->entry[j];
+
+			if (jent->type == 100) /* already merged entry */
+				continue;
+
+			/* merge heads field when all other fields the same */
+			if (jent->i2c_index == ient->i2c_index &&
+			    jent->type == ient->type &&
+			    jent->location == ient->location &&
+			    jent->or == ient->or) {
+				NV_INFO(drm, "Merging DCB entries %d and %d\n",
+					 i, j);
+				ient->heads |= jent->heads;
+				jent->type = 100; /* dummy value */
+			}
+		}
+	}
+
+	/* Compact entries merged into others out of dcb */
+	for (i = 0; i < dcb->entries; i++) {
+		if (dcb->entry[i].type == 100)
+			continue;
+
+		if (newentries != i) {
+			dcb->entry[newentries] = dcb->entry[i];
+			dcb->entry[newentries].index = newentries;
+		}
+		newentries++;
+	}
+
+	dcb->entries = newentries;
+}
+
+static bool
+apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
+
+	/* Dell Precision M6300
+	 *   DCB entry 2: 02025312 00000010
+	 *   DCB entry 3: 02026312 00000020
+	 *
+	 * Identical, except apparently a different connector on a
+	 * different SOR link.  Not a clue how we're supposed to know
+	 * which one is in use if it even shares an i2c line...
+	 *
+	 * Ignore the connector on the second SOR link to prevent
+	 * nasty problems until this is sorted (assuming it's not a
+	 * VBIOS bug).
+	 */
+	if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
+		if (*conn == 0x02026312 && *conf == 0x00000020)
+			return false;
+	}
+
+	/* GeForce3 Ti 200
+	 *
+	 * DCB reports an LVDS output that should be TMDS:
+	 *   DCB entry 1: f2005014 ffffffff
+	 */
+	if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+		if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+			fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+			return false;
+		}
+	}
+
+	/* XFX GT-240X-YA
+	 *
+	 * So many things wrong here, replace the entire encoder table..
+	 */
+	if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+		if (idx == 0) {
+			*conn = 0x02001300; /* VGA, connector 1 */
+			*conf = 0x00000028;
+		} else
+		if (idx == 1) {
+			*conn = 0x01010312; /* DVI, connector 0 */
+			*conf = 0x00020030;
+		} else
+		if (idx == 2) {
+			*conn = 0x01010310; /* VGA, connector 0 */
+			*conf = 0x00000028;
+		} else
+		if (idx == 3) {
+			*conn = 0x02022362; /* HDMI, connector 2 */
+			*conf = 0x00020010;
+		} else {
+			*conn = 0x0000000e; /* EOL */
+			*conf = 0x00000000;
+		}
+	}
+
+	/* Some other twisted XFX board (rhbz#694914)
+	 *
+	 * The DVI/VGA encoder combo that's supposed to represent the
+	 * DVI-I connector actually point at two different ones, and
+	 * the HDMI connector ends up paired with the VGA instead.
+	 *
+	 * Connector table is missing anything for VGA at all, pointing it
+	 * an invalid conntab entry 2 so we figure it out ourself.
+	 */
+	if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
+		if (idx == 0) {
+			*conn = 0x02002300; /* VGA, connector 2 */
+			*conf = 0x00000028;
+		} else
+		if (idx == 1) {
+			*conn = 0x01010312; /* DVI, connector 0 */
+			*conf = 0x00020030;
+		} else
+		if (idx == 2) {
+			*conn = 0x04020310; /* VGA, connector 0 */
+			*conf = 0x00000028;
+		} else
+		if (idx == 3) {
+			*conn = 0x02021322; /* HDMI, connector 1 */
+			*conf = 0x00020010;
+		} else {
+			*conn = 0x0000000e; /* EOL */
+			*conf = 0x00000000;
+		}
+	}
+
+	/* fdo#50830: connector indices for VGA and DVI-I are backwards */
+	if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) {
+		if (idx == 0 && *conn == 0x02000300)
+			*conn = 0x02011300;
+		else
+		if (idx == 1 && *conn == 0x04011310)
+			*conn = 0x04000310;
+		else
+		if (idx == 2 && *conn == 0x02011312)
+			*conn = 0x02000312;
+	}
+
+	return true;
+}
+
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct dcb_table *dcb = &bios->dcb;
+	int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+	/* Apple iMac G4 NV17 */
+	if (of_machine_is_compatible("PowerMac4,5")) {
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
+		fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+		return;
+	}
+#endif
+
+	/* Make up some sane defaults */
+	fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
+			     bios->legacy.i2c_indices.crt, 1, 1);
+
+	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
+				     bios->legacy.i2c_indices.tv,
+				     all_heads, 0);
+
+	else if (bios->tmds.output0_script_ptr ||
+		 bios->tmds.output1_script_ptr)
+		fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
+				     bios->legacy.i2c_indices.panel,
+				     all_heads, 1);
+}
+
+static int
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
+	u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+	u32 conn = ROM32(outp[0]);
+	bool ret;
+
+	if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+		struct dcb_output *entry = new_dcb_entry(dcb);
+
+		NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
+
+		if (dcb->version >= 0x20)
+			ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+		else
+			ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+		if (!ret)
+			return 1; /* stop parsing */
+
+		/* Ignore the I2C index for on-chip TV-out, as there
+		 * are cards with bogus values (nv31m in bug 23212),
+		 * and it's otherwise useless.
+		 */
+		if (entry->type == DCB_OUTPUT_TV &&
+		    entry->location == DCB_LOC_ON_CHIP)
+			entry->i2c_index = 0x0f;
+	}
+
+	return 0;
+}
+
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+	struct dcb_table *dcbt = &bios->dcb;
+	u8 map[16] = { };
+	int i, idx = 0;
+
+	/* heuristic: if we ever get a non-zero connector field, assume
+	 * that all the indices are valid and we don't need fake them.
+	 *
+	 * and, as usual, a blacklist of boards with bad bios data..
+	 */
+	if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
+		for (i = 0; i < dcbt->entries; i++) {
+			if (dcbt->entry[i].connector)
+				return;
+		}
+	}
+
+	/* no useful connector info available, we need to make it up
+	 * ourselves.  the rule here is: anything on the same i2c bus
+	 * is considered to be on the same connector.  any output
+	 * without an associated i2c bus is assigned its own unique
+	 * connector index.
+	 */
+	for (i = 0; i < dcbt->entries; i++) {
+		u8 i2c = dcbt->entry[i].i2c_index;
+		if (i2c == 0x0f) {
+			dcbt->entry[i].connector = idx++;
+		} else {
+			if (!map[i2c])
+				map[i2c] = ++idx;
+			dcbt->entry[i].connector = map[i2c] - 1;
+		}
+	}
+
+	/* if we created more than one connector, destroy the connector
+	 * table - just in case it has random, rather than stub, entries.
+	 */
+	if (i > 1) {
+		u8 *conntab = olddcb_conntab(bios->dev);
+		if (conntab)
+			conntab[0] = 0x00;
+	}
+}
+
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &bios->dcb;
+	u8 *dcbt, *conn;
+	int idx;
+
+	dcbt = olddcb_table(dev);
+	if (!dcbt) {
+		/* handle pre-DCB boards */
+		if (bios->type == NVBIOS_BMP) {
+			fabricate_dcb_encoder_table(dev, bios);
+			return 0;
+		}
+
+		return -EINVAL;
+	}
+
+	NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
+
+	dcb->version = dcbt[0];
+	olddcb_outp_foreach(dev, NULL, parse_dcb_entry);
+
+	/*
+	 * apart for v2.1+ not being known for requiring merging, this
+	 * guarantees dcbent->index is the index of the entry in the rom image
+	 */
+	if (dcb->version < 0x21)
+		merge_like_dcb_entries(dev, dcb);
+
+	if (!dcb->entries)
+		return -ENXIO;
+
+	/* dump connector table entries to log, if any exist */
+	idx = -1;
+	while ((conn = olddcb_conn(dev, ++idx))) {
+		if (conn[0] != 0xff) {
+			NV_INFO(drm, "DCB conn %02d: ", idx);
+			if (olddcb_conntab(dev)[3] < 4)
+				pr_cont("%04x\n", ROM16(conn[0]));
+			else
+				pr_cont("%08x\n", ROM32(conn[0]));
+		}
+	}
+	dcb_fake_connectors(bios);
+	return 0;
+}
+
+static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
+{
+	/*
+	 * The header following the "HWSQ" signature has the number of entries,
+	 * and the entry size
+	 *
+	 * An entry consists of a dword to write to the sequencer control reg
+	 * (0x00001304), followed by the ucode bytes, written sequentially,
+	 * starting at reg 0x00001400
+	 */
+
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	uint8_t bytes_to_write;
+	uint16_t hwsq_entry_offset;
+	int i;
+
+	if (bios->data[hwsq_offset] <= entry) {
+		NV_ERROR(drm, "Too few entries in HW sequencer table for "
+				"requested entry\n");
+		return -ENOENT;
+	}
+
+	bytes_to_write = bios->data[hwsq_offset + 1];
+
+	if (bytes_to_write != 36) {
+		NV_ERROR(drm, "Unknown HW sequencer entry size\n");
+		return -EINVAL;
+	}
+
+	NV_INFO(drm, "Loading NV17 power sequencing microcode\n");
+
+	hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
+
+	/* set sequencer control */
+	nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+	bytes_to_write -= 4;
+
+	/* write ucode */
+	for (i = 0; i < bytes_to_write; i += 4)
+		nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+
+	/* twiddle NV_PBUS_DEBUG_4 */
+	nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
+
+	return 0;
+}
+
+static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
+					struct nvbios *bios)
+{
+	/*
+	 * BMP based cards, from NV17, need a microcode loading to correctly
+	 * control the GPIO etc for LVDS panels
+	 *
+	 * BIT based cards seem to do this directly in the init scripts
+	 *
+	 * The microcode entries are found by the "HWSQ" signature.
+	 */
+
+	const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+	const int sz = sizeof(hwsq_signature);
+	int hwsq_offset;
+
+	hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
+	if (!hwsq_offset)
+		return 0;
+
+	/* always use entry 0? */
+	return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
+}
+
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	const uint8_t edid_sig[] = {
+			0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+	uint16_t offset = 0;
+	uint16_t newoffset;
+	int searchlen = NV_PROM_SIZE;
+
+	if (bios->fp.edid)
+		return bios->fp.edid;
+
+	while (searchlen) {
+		newoffset = findstr(&bios->data[offset], searchlen,
+								edid_sig, 8);
+		if (!newoffset)
+			return NULL;
+		offset += newoffset;
+		if (!nv_cksum(&bios->data[offset], EDID1_LEN))
+			break;
+
+		searchlen -= offset;
+		offset++;
+	}
+
+	NV_INFO(drm, "Found EDID in BIOS\n");
+
+	return bios->fp.edid = &bios->data[offset];
+}
+
+static bool NVInitVBIOS(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bios *bios = nouveau_bios(drm->device);
+	struct nvbios *legacy = &drm->vbios;
+
+	memset(legacy, 0, sizeof(struct nvbios));
+	spin_lock_init(&legacy->lock);
+	legacy->dev = dev;
+
+	legacy->data = bios->data;
+	legacy->length = bios->size;
+	legacy->major_version = bios->version.major;
+	legacy->chip_version = bios->version.chip;
+	if (bios->bit_offset) {
+		legacy->type = NVBIOS_BIT;
+		legacy->offset = bios->bit_offset;
+		return !parse_bit_structure(legacy, legacy->offset + 6);
+	} else
+	if (bios->bmp_offset) {
+		legacy->type = NVBIOS_BMP;
+		legacy->offset = bios->bmp_offset;
+		return !parse_bmp_structure(dev, legacy, legacy->offset);
+	}
+
+	return false;
+}
+
+int
+nouveau_run_vbios_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	int ret = 0;
+
+	/* Reset the BIOS head to 0. */
+	bios->state.crtchead = 0;
+
+	if (bios->major_version < 5)	/* BMP only */
+		load_nv17_hw_sequencer_ucode(dev, bios);
+
+	if (bios->execute) {
+		bios->fp.last_script_invoc = 0;
+		bios->fp.lvds_init_run = false;
+	}
+
+	return ret;
+}
+
+static bool
+nouveau_bios_posted(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	unsigned htotal;
+
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+		    NVReadVgaCrtc(dev, 0, 0x1a) == 0)
+			return false;
+		return true;
+	}
+
+	htotal  = NVReadVgaCrtc(dev, 0, 0x06);
+	htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
+	htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
+	htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
+	htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
+
+	return (htotal != 0);
+}
+
+int
+nouveau_bios_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	int ret;
+
+	if (!NVInitVBIOS(dev))
+		return -ENODEV;
+
+	ret = parse_dcb_table(dev, bios);
+	if (ret)
+		return ret;
+
+	if (!bios->major_version)	/* we don't run version 0 bios */
+		return 0;
+
+	/* init script execution disabled */
+	bios->execute = false;
+
+	/* ... unless card isn't POSTed already */
+	if (!nouveau_bios_posted(dev)) {
+		NV_INFO(drm, "Adaptor not initialised, "
+			"running VBIOS init tables.\n");
+		bios->execute = true;
+	}
+
+	ret = nouveau_run_vbios_init(dev);
+	if (ret)
+		return ret;
+
+	/* feature_byte on BMP is poor, but init always sets CR4B */
+	if (bios->major_version < 5)
+		bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
+
+	/* all BIT systems need p_f_m_t for digital_min_front_porch */
+	if (bios->is_mobile || bios->major_version >= 5)
+		ret = parse_fp_mode_table(dev, bios);
+
+	/* allow subsequent scripts to execute */
+	bios->execute = true;
+
+	return 0;
+}
+
+void
+nouveau_bios_takedown(struct drm_device *dev)
+{
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 0000000..0067586
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2007-2008 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DISPBIOS_H__
+#define __NOUVEAU_DISPBIOS_H__
+
+#define DCB_MAX_NUM_ENTRIES 16
+#define DCB_MAX_NUM_I2C_ENTRIES 16
+#define DCB_MAX_NUM_GPIO_ENTRIES 32
+#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
+
+#define DCB_LOC_ON_CHIP 0
+
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({            \
+	struct nouveau_drm *drm = nouveau_drm((d)); \
+	ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
+})
+
+struct bit_entry {
+	uint8_t  id;
+	uint8_t  version;
+	uint16_t length;
+	uint16_t offset;
+	uint8_t *data;
+};
+
+int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/conn.h>
+
+struct dcb_table {
+	uint8_t version;
+	int entries;
+	struct dcb_output entry[DCB_MAX_NUM_ENTRIES];
+};
+
+enum nouveau_or {
+	DCB_OUTPUT_A = (1 << 0),
+	DCB_OUTPUT_B = (1 << 1),
+	DCB_OUTPUT_C = (1 << 2)
+};
+
+enum LVDS_script {
+	/* Order *does* matter here */
+	LVDS_INIT = 1,
+	LVDS_RESET,
+	LVDS_BACKLIGHT_ON,
+	LVDS_BACKLIGHT_OFF,
+	LVDS_PANEL_ON,
+	LVDS_PANEL_OFF
+};
+
+struct nvbios {
+	struct drm_device *dev;
+	enum {
+		NVBIOS_BMP,
+		NVBIOS_BIT
+	} type;
+	uint16_t offset;
+	uint32_t length;
+	uint8_t *data;
+
+	uint8_t chip_version;
+
+	uint32_t dactestval;
+	uint32_t tvdactestval;
+	uint8_t digital_min_front_porch;
+	bool fp_no_ddc;
+
+	spinlock_t lock;
+
+	bool execute;
+
+	uint8_t major_version;
+	uint8_t feature_byte;
+	bool is_mobile;
+
+	uint32_t fmaxvco, fminvco;
+
+	bool old_style_init;
+	uint16_t init_script_tbls_ptr;
+	uint16_t extra_init_script_tbl_ptr;
+
+	uint16_t ram_restrict_tbl_ptr;
+	uint8_t ram_restrict_group_count;
+
+	struct dcb_table dcb;
+
+	struct {
+		int crtchead;
+	} state;
+
+	struct {
+		uint16_t fptablepointer;	/* also used by tmds */
+		uint16_t fpxlatetableptr;
+		int xlatwidth;
+		uint16_t lvdsmanufacturerpointer;
+		uint16_t fpxlatemanufacturertableptr;
+		uint16_t mode_ptr;
+		uint16_t xlated_entry;
+		bool power_off_for_reset;
+		bool reset_after_pclk_change;
+		bool dual_link;
+		bool link_c_increment;
+		bool if_is_24bit;
+		int duallink_transition_clk;
+		uint8_t strapless_is_24bit;
+		uint8_t *edid;
+
+		/* will need resetting after suspend */
+		int last_script_invoc;
+		bool lvds_init_run;
+	} fp;
+
+	struct {
+		uint16_t output0_script_ptr;
+		uint16_t output1_script_ptr;
+	} tmds;
+
+	struct {
+		uint16_t mem_init_tbl_ptr;
+		uint16_t sdr_seq_tbl_ptr;
+		uint16_t ddr_seq_tbl_ptr;
+
+		struct {
+			uint8_t crt, tv, panel;
+		} i2c_indices;
+
+		uint16_t lvds_single_a_script_ptr;
+	} legacy;
+};
+
+void *olddcb_table(struct drm_device *);
+void *olddcb_outp(struct drm_device *, u8 idx);
+int olddcb_outp_foreach(struct drm_device *, void *data,
+		     int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *olddcb_conntab(struct drm_device *);
+u8 *olddcb_conn(struct drm_device *, u8 idx);
+
+int nouveau_bios_init(struct drm_device *);
+void nouveau_bios_takedown(struct drm_device *dev);
+int nouveau_run_vbios_init(struct drm_device *);
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *, int index);
+bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
+int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
+					 bool *dl, bool *if_is_24bit);
+int run_tmds_table(struct drm_device *, struct dcb_output *,
+			  int head, int pxclk);
+int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
+			    enum LVDS_script, int pxclk);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 0000000..5a5f021
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,1546 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ *	    Ben Skeggs   <darktama@iinet.net.au>
+ *	    Jeremy Kolb  <jkolb@brandeis.edu>
+ */
+
+#include <core/engine.h>
+#include <linux/swiotlb.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nouveau_bo.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
+			   u32 addr, u32 size, u32 pitch, u32 flags)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	int i = reg - drm->tile.reg;
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_fb_tile *tile = &pfb->tile.region[i];
+	struct nouveau_engine *engine;
+
+	nouveau_fence_unref(&reg->fence);
+
+	if (tile->pitch)
+		pfb->tile.fini(pfb, i, tile);
+
+	if (pitch)
+		pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
+
+	pfb->tile.prog(pfb, i, tile);
+
+	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
+		engine->tile_prog(engine, i);
+	if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
+		engine->tile_prog(engine, i);
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_get_tile_region(struct drm_device *dev, int i)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
+
+	spin_lock(&drm->tile.lock);
+
+	if (!tile->used &&
+	    (!tile->fence || nouveau_fence_done(tile->fence)))
+		tile->used = true;
+	else
+		tile = NULL;
+
+	spin_unlock(&drm->tile.lock);
+	return tile;
+}
+
+static void
+nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
+			struct nouveau_fence *fence)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (tile) {
+		spin_lock(&drm->tile.lock);
+		if (fence) {
+			/* Mark it as pending. */
+			tile->fence = fence;
+			nouveau_fence_ref(fence);
+		}
+
+		tile->used = false;
+		spin_unlock(&drm->tile.lock);
+	}
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
+		   u32 size, u32 pitch, u32 flags)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_drm_tile *tile, *found = NULL;
+	int i;
+
+	for (i = 0; i < pfb->tile.regions; i++) {
+		tile = nv10_bo_get_tile_region(dev, i);
+
+		if (pitch && !found) {
+			found = tile;
+			continue;
+
+		} else if (tile && pfb->tile.region[i].pitch) {
+			/* Kill an unused tile region. */
+			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
+		}
+
+		nv10_bo_put_tile_region(dev, tile, NULL);
+	}
+
+	if (found)
+		nv10_bo_update_tile_region(dev, found, addr, size,
+					    pitch, flags);
+	return found;
+}
+
+static void
+nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	if (unlikely(nvbo->gem))
+		DRM_ERROR("bo %p still attached to GEM object\n", bo);
+	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+	kfree(nvbo);
+}
+
+static void
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+		       int *align, int *size)
+{
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_device *device = nv_device(drm->device);
+
+	if (device->card_type < NV_50) {
+		if (nvbo->tile_mode) {
+			if (device->chipset >= 0x40) {
+				*align = 65536;
+				*size = roundup(*size, 64 * nvbo->tile_mode);
+
+			} else if (device->chipset >= 0x30) {
+				*align = 32768;
+				*size = roundup(*size, 64 * nvbo->tile_mode);
+
+			} else if (device->chipset >= 0x20) {
+				*align = 16384;
+				*size = roundup(*size, 64 * nvbo->tile_mode);
+
+			} else if (device->chipset >= 0x10) {
+				*align = 16384;
+				*size = roundup(*size, 32 * nvbo->tile_mode);
+			}
+		}
+	} else {
+		*size = roundup(*size, (1 << nvbo->page_shift));
+		*align = max((1 <<  nvbo->page_shift), *align);
+	}
+
+	*size = roundup(*size, PAGE_SIZE);
+}
+
+int
+nouveau_bo_new(struct drm_device *dev, int size, int align,
+	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+	       struct sg_table *sg,
+	       struct nouveau_bo **pnvbo)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bo *nvbo;
+	size_t acc_size;
+	int ret;
+	int type = ttm_bo_type_device;
+
+	if (sg)
+		type = ttm_bo_type_sg;
+
+	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
+	if (!nvbo)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&nvbo->head);
+	INIT_LIST_HEAD(&nvbo->entry);
+	INIT_LIST_HEAD(&nvbo->vma_list);
+	nvbo->tile_mode = tile_mode;
+	nvbo->tile_flags = tile_flags;
+	nvbo->bo.bdev = &drm->ttm.bdev;
+
+	nvbo->page_shift = 12;
+	if (drm->client.base.vm) {
+		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
+			nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
+	}
+
+	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
+	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
+	nouveau_bo_placement_set(nvbo, flags, 0);
+
+	acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
+				       sizeof(struct nouveau_bo));
+
+	ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
+			  type, &nvbo->placement,
+			  align >> PAGE_SHIFT, false, NULL, acc_size, sg,
+			  nouveau_bo_del_ttm);
+	if (ret) {
+		/* ttm will call nouveau_bo_del_ttm if it fails.. */
+		return ret;
+	}
+
+	*pnvbo = nvbo;
+	return 0;
+}
+
+static void
+set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
+{
+	*n = 0;
+
+	if (type & TTM_PL_FLAG_VRAM)
+		pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
+	if (type & TTM_PL_FLAG_TT)
+		pl[(*n)++] = TTM_PL_FLAG_TT | flags;
+	if (type & TTM_PL_FLAG_SYSTEM)
+		pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
+}
+
+static void
+set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+{
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
+
+	if (nv_device(drm->device)->card_type == NV_10 &&
+	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->bo.mem.num_pages < vram_pages / 4) {
+		/*
+		 * Make sure that the color and depth buffers are handled
+		 * by independent memory controller units. Up to a 9x
+		 * speed up when alpha-blending and depth-test are enabled
+		 * at the same time.
+		 */
+		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+			nvbo->placement.fpfn = vram_pages / 2;
+			nvbo->placement.lpfn = ~0;
+		} else {
+			nvbo->placement.fpfn = 0;
+			nvbo->placement.lpfn = vram_pages / 2;
+		}
+	}
+}
+
+void
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+{
+	struct ttm_placement *pl = &nvbo->placement;
+	uint32_t flags = TTM_PL_MASK_CACHING |
+		(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
+
+	pl->placement = nvbo->placements;
+	set_placement_list(nvbo->placements, &pl->num_placement,
+			   type, flags);
+
+	pl->busy_placement = nvbo->busy_placements;
+	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+			   type | busy, flags);
+
+	set_placement_range(nvbo, type);
+}
+
+int
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	int ret;
+
+	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	if (ret)
+		goto out;
+
+	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
+		NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+			 1 << bo->mem.mem_type, memtype);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (nvbo->pin_refcnt++)
+		goto out;
+
+	nouveau_bo_placement_set(nvbo, memtype, 0);
+
+	ret = nouveau_bo_validate(nvbo, false, false);
+	if (ret == 0) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			drm->gem.vram_available -= bo->mem.size;
+			break;
+		case TTM_PL_TT:
+			drm->gem.gart_available -= bo->mem.size;
+			break;
+		default:
+			break;
+		}
+	}
+out:
+	ttm_bo_unreserve(bo);
+	return ret;
+}
+
+int
+nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	int ret;
+
+	ret = ttm_bo_reserve(bo, false, false, false, 0);
+	if (ret)
+		return ret;
+
+	if (--nvbo->pin_refcnt)
+		goto out;
+
+	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+
+	ret = nouveau_bo_validate(nvbo, false, false);
+	if (ret == 0) {
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			drm->gem.vram_available += bo->mem.size;
+			break;
+		case TTM_PL_TT:
+			drm->gem.gart_available += bo->mem.size;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	ttm_bo_unreserve(bo);
+	return ret;
+}
+
+int
+nouveau_bo_map(struct nouveau_bo *nvbo)
+{
+	int ret;
+
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+	ttm_bo_unreserve(&nvbo->bo);
+	return ret;
+}
+
+void
+nouveau_bo_unmap(struct nouveau_bo *nvbo)
+{
+	if (nvbo)
+		ttm_bo_kunmap(&nvbo->kmap);
+}
+
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+		    bool no_wait_gpu)
+{
+	int ret;
+
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+			      interruptible, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+u16
+nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
+{
+	bool is_iomem;
+	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		return ioread16_native((void __force __iomem *)mem);
+	else
+		return *mem;
+}
+
+void
+nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
+{
+	bool is_iomem;
+	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		iowrite16_native(val, (void __force __iomem *)mem);
+	else
+		*mem = val;
+}
+
+u32
+nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		return ioread32_native((void __force __iomem *)mem);
+	else
+		return *mem;
+}
+
+void
+nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+	mem = &mem[index];
+	if (is_iomem)
+		iowrite32_native(val, (void __force __iomem *)mem);
+	else
+		*mem = val;
+}
+
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+		      uint32_t page_flags, struct page *dummy_read)
+{
+#if __OS_HAS_AGP
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct drm_device *dev = drm->dev;
+
+	if (drm->agp.stat == ENABLED) {
+		return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+					 page_flags, dummy_read);
+	}
+#endif
+
+	return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
+}
+
+static int
+nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	/* We'll do this from user space. */
+	return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			 struct ttm_mem_type_manager *man)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		if (nv_device(drm->device)->card_type >= NV_50) {
+			man->func = &nouveau_vram_manager;
+			man->io_reserve_fastpath = false;
+			man->use_io_reserve_lru = true;
+		} else {
+			man->func = &ttm_bo_manager_func;
+		}
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED |
+					 TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	case TTM_PL_TT:
+		if (nv_device(drm->device)->card_type >= NV_50)
+			man->func = &nouveau_gart_manager;
+		else
+		if (drm->agp.stat != ENABLED)
+			man->func = &nv04_gart_manager;
+		else
+			man->func = &ttm_bo_manager_func;
+
+		if (drm->agp.stat == ENABLED) {
+			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
+		} else {
+			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+				     TTM_MEMTYPE_FLAG_CMA;
+			man->available_caching = TTM_PL_MASK_CACHING;
+			man->default_caching = TTM_PL_FLAG_CACHED;
+		}
+
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
+					 TTM_PL_FLAG_SYSTEM);
+		break;
+	default:
+		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+		break;
+	}
+
+	*pl = nvbo->placement;
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * TTM_PL_{VRAM,TT} directly.
+ */
+
+static int
+nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
+			      struct nouveau_bo *nvbo, bool evict,
+			      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	ret = nouveau_fence_new(chan, false, &fence);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+					no_wait_gpu, new_mem);
+	nouveau_fence_unref(&fence);
+	return ret;
+}
+
+static int
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+	int ret = RING_SPACE(chan, 2);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	int ret = RING_SPACE(chan, 10);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
+		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, new_mem->num_pages);
+		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
+	}
+	return ret;
+}
+
+static int
+nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+	int ret = RING_SPACE(chan, 2);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+	}
+	return ret;
+}
+
+static int
+nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	u64 src_offset = node->vma[0].offset;
+	u64 dst_offset = node->vma[1].offset;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 8191) ? 8191 : page_count;
+
+		ret = RING_SPACE(chan, 11);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, line_count);
+		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+		OUT_RING  (chan, 0x00000110);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	u64 src_offset = node->vma[0].offset;
+	u64 dst_offset = node->vma[1].offset;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 2047) ? 2047 : page_count;
+
+		ret = RING_SPACE(chan, 12);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* line_length */
+		OUT_RING  (chan, line_count);
+		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+		OUT_RING  (chan, 0x00100110);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
+nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	u64 src_offset = node->vma[0].offset;
+	u64 dst_offset = node->vma[1].offset;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 8191) ? 8191 : page_count;
+
+		ret = RING_SPACE(chan, 11);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, PAGE_SIZE);
+		OUT_RING  (chan, line_count);
+		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
+		OUT_RING  (chan, 0x00000110);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
+nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	int ret = RING_SPACE(chan, 7);
+	if (ret == 0) {
+		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
+		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, 0x00000000 /* COPY */);
+		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
+	}
+	return ret;
+}
+
+static int
+nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	int ret = RING_SPACE(chan, 7);
+	if (ret == 0) {
+		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
+		OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
+		OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
+		OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
+		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
+	}
+	return ret;
+}
+
+static int
+nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+	int ret = RING_SPACE(chan, 6);
+	if (ret == 0) {
+		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+		OUT_RING  (chan, NvNotify0);
+		OUT_RING  (chan, NvDmaFB);
+		OUT_RING  (chan, NvDmaFB);
+	}
+
+	return ret;
+}
+
+static int
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_mem *node = old_mem->mm_node;
+	u64 length = (new_mem->num_pages << PAGE_SHIFT);
+	u64 src_offset = node->vma[0].offset;
+	u64 dst_offset = node->vma[1].offset;
+	int src_tiled = !!node->memtype;
+	int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
+	int ret;
+
+	while (length) {
+		u32 amount, stride, height;
+
+		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+		if (ret)
+			return ret;
+
+		amount  = min(length, (u64)(4 * 1024 * 1024));
+		stride  = 16 * 4;
+		height  = amount / stride;
+
+		if (src_tiled) {
+			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, stride);
+			OUT_RING  (chan, height);
+			OUT_RING  (chan, 1);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+		} else {
+			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
+			OUT_RING  (chan, 1);
+		}
+		if (dst_tiled) {
+			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, stride);
+			OUT_RING  (chan, height);
+			OUT_RING  (chan, 1);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+		} else {
+			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
+			OUT_RING  (chan, 1);
+		}
+
+		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, height);
+		OUT_RING  (chan, 0x00000101);
+		OUT_RING  (chan, 0x00000000);
+		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+		OUT_RING  (chan, 0);
+
+		length -= amount;
+		src_offset += amount;
+		dst_offset += amount;
+	}
+
+	return 0;
+}
+
+static int
+nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
+{
+	int ret = RING_SPACE(chan, 4);
+	if (ret == 0) {
+		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+		OUT_RING  (chan, handle);
+		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+		OUT_RING  (chan, NvNotify0);
+	}
+
+	return ret;
+}
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+{
+	if (mem->mem_type == TTM_PL_TT)
+		return NvDmaTT;
+	return NvDmaFB;
+}
+
+static int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	u32 src_offset = old_mem->start << PAGE_SHIFT;
+	u32 dst_offset = new_mem->start << PAGE_SHIFT;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	ret = RING_SPACE(chan, 3);
+	if (ret)
+		return ret;
+
+	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 2047) ? 2047 : page_count;
+
+		ret = RING_SPACE(chan, 11);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, NvSubCopy,
+				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+		OUT_RING  (chan, src_offset);
+		OUT_RING  (chan, dst_offset);
+		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* line_length */
+		OUT_RING  (chan, line_count);
+		OUT_RING  (chan, 0x00000101);
+		OUT_RING  (chan, 0x00000000);
+		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+		OUT_RING  (chan, 0);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
+nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
+		   struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+{
+	struct nouveau_mem *node = mem->mm_node;
+	int ret;
+
+	ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
+			     PAGE_SHIFT, node->page_shift,
+			     NV_MEM_ACCESS_RW, vma);
+	if (ret)
+		return ret;
+
+	if (mem->mem_type == TTM_PL_VRAM)
+		nouveau_vm_map(vma, node);
+	else
+		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+
+	return 0;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+		     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_channel *chan = chan = drm->channel;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+
+	mutex_lock(&chan->cli->mutex);
+
+	/* create temporary vmas for the transfer and attach them to the
+	 * old nouveau_mem node, these will get cleaned up after ttm has
+	 * destroyed the ttm_mem_reg
+	 */
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		struct nouveau_mem *node = old_mem->mm_node;
+
+		ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
+		if (ret)
+			goto out;
+
+		ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+		if (ret)
+			goto out;
+	}
+
+	ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+	if (ret == 0) {
+		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+						    no_wait_gpu, new_mem);
+	}
+
+out:
+	mutex_unlock(&chan->cli->mutex);
+	return ret;
+}
+
+void
+nouveau_bo_move_init(struct nouveau_drm *drm)
+{
+	static const struct {
+		const char *name;
+		int engine;
+		u32 oclass;
+		int (*exec)(struct nouveau_channel *,
+			    struct ttm_buffer_object *,
+			    struct ttm_mem_reg *, struct ttm_mem_reg *);
+		int (*init)(struct nouveau_channel *, u32 handle);
+	} _methods[] = {
+		{  "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
+		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
+		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
+		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
+		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
+		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
+		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
+		{},
+		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
+	}, *mthd = _methods;
+	const char *name = "CPU";
+	int ret;
+
+	do {
+		struct nouveau_object *object;
+		struct nouveau_channel *chan;
+		u32 handle = (mthd->engine << 16) | mthd->oclass;
+
+		if (mthd->init == nve0_bo_move_init)
+			chan = drm->cechan;
+		else
+			chan = drm->channel;
+		if (chan == NULL)
+			continue;
+
+		ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
+					 mthd->oclass, NULL, 0, &object);
+		if (ret == 0) {
+			ret = mthd->init(chan, handle);
+			if (ret) {
+				nouveau_object_del(nv_object(drm),
+						   chan->handle, handle);
+				continue;
+			}
+
+			drm->ttm.move = mthd->exec;
+			name = mthd->name;
+			break;
+		}
+	} while ((++mthd)->exec);
+
+	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
+}
+
+static int
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
+		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	struct ttm_placement placement;
+	struct ttm_mem_reg tmp_mem;
+	int ret;
+
+	placement.fpfn = placement.lpfn = 0;
+	placement.num_placement = placement.num_busy_placement = 1;
+	placement.placement = placement.busy_placement = &placement_memtype;
+
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+out:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return ret;
+}
+
+static int
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
+		      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+	struct ttm_placement placement;
+	struct ttm_mem_reg tmp_mem;
+	int ret;
+
+	placement.fpfn = placement.lpfn = 0;
+	placement.num_placement = placement.num_busy_placement = 1;
+	placement.placement = placement.busy_placement = &placement_memtype;
+
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
+	if (ret)
+		goto out;
+
+out:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return ret;
+}
+
+static void
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_vma *vma;
+
+	/* ttm can now (stupidly) pass the driver bos it didn't create... */
+	if (bo->destroy != nouveau_bo_del_ttm)
+		return;
+
+	list_for_each_entry(vma, &nvbo->vma_list, head) {
+		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+			nouveau_vm_map(vma, new_mem->mm_node);
+		} else
+		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
+		    nvbo->page_shift == vma->vm->vmm->spg_shift) {
+			if (((struct nouveau_mem *)new_mem->mm_node)->sg)
+				nouveau_vm_map_sg_table(vma, 0, new_mem->
+						  num_pages << PAGE_SHIFT,
+						  new_mem->mm_node);
+			else
+				nouveau_vm_map_sg(vma, 0, new_mem->
+						  num_pages << PAGE_SHIFT,
+						  new_mem->mm_node);
+		} else {
+			nouveau_vm_unmap(vma);
+		}
+	}
+}
+
+static int
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+		   struct nouveau_drm_tile **new_tile)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	u64 offset = new_mem->start << PAGE_SHIFT;
+
+	*new_tile = NULL;
+	if (new_mem->mem_type != TTM_PL_VRAM)
+		return 0;
+
+	if (nv_device(drm->device)->card_type >= NV_10) {
+		*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
+						nvbo->tile_mode,
+						nvbo->tile_flags);
+	}
+
+	return 0;
+}
+
+static void
+nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
+		      struct nouveau_drm_tile *new_tile,
+		      struct nouveau_drm_tile **old_tile)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = drm->dev;
+
+	nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
+	*old_tile = new_tile;
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+		bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct nouveau_drm_tile *new_tile = NULL;
+	int ret = 0;
+
+	if (nv_device(drm->device)->card_type < NV_50) {
+		ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+		if (ret)
+			return ret;
+	}
+
+	/* Fake bo copy. */
+	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+		BUG_ON(bo->mem.mm_node != NULL);
+		bo->mem = *new_mem;
+		new_mem->mm_node = NULL;
+		goto out;
+	}
+
+	/* CPU copy if we have no accelerated method available */
+	if (!drm->ttm.move) {
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+		goto out;
+	}
+
+	/* Hardware assisted copy. */
+	if (new_mem->mem_type == TTM_PL_SYSTEM)
+		ret = nouveau_bo_move_flipd(bo, evict, intr,
+					    no_wait_gpu, new_mem);
+	else if (old_mem->mem_type == TTM_PL_SYSTEM)
+		ret = nouveau_bo_move_flips(bo, evict, intr,
+					    no_wait_gpu, new_mem);
+	else
+		ret = nouveau_bo_move_m2mf(bo, evict, intr,
+					   no_wait_gpu, new_mem);
+
+	if (!ret)
+		goto out;
+
+	/* Fallback to software copy. */
+	ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+
+out:
+	if (nv_device(drm->device)->card_type < NV_50) {
+		if (ret)
+			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+		else
+			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+	}
+
+	return ret;
+}
+
+static int
+nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct drm_device *dev = drm->dev;
+	int ret;
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		return 0;
+	case TTM_PL_TT:
+#if __OS_HAS_AGP
+		if (drm->agp.stat == ENABLED) {
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = drm->agp.base;
+			mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = pci_resource_start(dev->pdev, 1);
+		mem->bus.is_iomem = true;
+		if (nv_device(drm->device)->card_type >= NV_50) {
+			struct nouveau_bar *bar = nouveau_bar(drm->device);
+			struct nouveau_mem *node = mem->mm_node;
+
+			ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
+					&node->bar_vma);
+			if (ret)
+				return ret;
+
+			mem->bus.offset = node->bar_vma.offset;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_bar *bar = nouveau_bar(drm->device);
+	struct nouveau_mem *node = mem->mm_node;
+
+	if (!node->bar_vma.node)
+		return;
+
+	bar->unmap(bar, &node->bar_vma);
+}
+
+static int
+nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_device *device = nv_device(drm->device);
+	u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
+
+	/* as long as the bo isn't in vram, and isn't tiled, we've got
+	 * nothing to do here.
+	 */
+	if (bo->mem.mem_type != TTM_PL_VRAM) {
+		if (nv_device(drm->device)->card_type < NV_50 ||
+		    !nouveau_bo_tile_layout(nvbo))
+			return 0;
+	}
+
+	/* make sure bo is in mappable vram */
+	if (bo->mem.start + bo->mem.num_pages < mappable)
+		return 0;
+
+
+	nvbo->placement.fpfn = 0;
+	nvbo->placement.lpfn = mappable;
+	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+	return nouveau_bo_validate(nvbo, false, false);
+}
+
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct ttm_dma_tt *ttm_dma = (void *)ttm;
+	struct nouveau_drm *drm;
+	struct drm_device *dev;
+	unsigned i;
+	int r;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (slave && ttm->sg) {
+		/* make userspace faulting work */
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 ttm_dma->dma_address, ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	drm = nouveau_bdev(ttm->bdev);
+	dev = drm->dev;
+
+#if __OS_HAS_AGP
+	if (drm->agp.stat == ENABLED) {
+		return ttm_agp_tt_populate(ttm);
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		return ttm_dma_populate((void *)ttm, dev->dev);
+	}
+#endif
+
+	r = ttm_pool_populate(ttm);
+	if (r) {
+		return r;
+	}
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+						   0, PAGE_SIZE,
+						   PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+			while (--i) {
+				pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				ttm_dma->dma_address[i] = 0;
+			}
+			ttm_pool_unpopulate(ttm);
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct ttm_dma_tt *ttm_dma = (void *)ttm;
+	struct nouveau_drm *drm;
+	struct drm_device *dev;
+	unsigned i;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave)
+		return;
+
+	drm = nouveau_bdev(ttm->bdev);
+	dev = drm->dev;
+
+#if __OS_HAS_AGP
+	if (drm->agp.stat == ENABLED) {
+		ttm_agp_tt_unpopulate(ttm);
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate((void *)ttm, dev->dev);
+		return;
+	}
+#endif
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		if (ttm_dma->dma_address[i]) {
+			pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+	}
+
+	ttm_pool_unpopulate(ttm);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+	struct nouveau_fence *old_fence = NULL;
+
+	if (likely(fence))
+		nouveau_fence_ref(fence);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	old_fence = nvbo->bo.sync_obj;
+	nvbo->bo.sync_obj = fence;
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	nouveau_fence_unref(&old_fence);
+}
+
+static void
+nouveau_bo_fence_unref(void **sync_obj)
+{
+	nouveau_fence_unref((struct nouveau_fence **)sync_obj);
+}
+
+static void *
+nouveau_bo_fence_ref(void *sync_obj)
+{
+	return nouveau_fence_ref(sync_obj);
+}
+
+static bool
+nouveau_bo_fence_signalled(void *sync_obj)
+{
+	return nouveau_fence_done(sync_obj);
+}
+
+static int
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
+{
+	return nouveau_fence_wait(sync_obj, lazy, intr);
+}
+
+static int
+nouveau_bo_fence_flush(void *sync_obj)
+{
+	return 0;
+}
+
+struct ttm_bo_driver nouveau_bo_driver = {
+	.ttm_tt_create = &nouveau_ttm_tt_create,
+	.ttm_tt_populate = &nouveau_ttm_tt_populate,
+	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
+	.invalidate_caches = nouveau_bo_invalidate_caches,
+	.init_mem_type = nouveau_bo_init_mem_type,
+	.evict_flags = nouveau_bo_evict_flags,
+	.move_notify = nouveau_bo_move_ntfy,
+	.move = nouveau_bo_move,
+	.verify_access = nouveau_bo_verify_access,
+	.sync_obj_signaled = nouveau_bo_fence_signalled,
+	.sync_obj_wait = nouveau_bo_fence_wait,
+	.sync_obj_flush = nouveau_bo_fence_flush,
+	.sync_obj_unref = nouveau_bo_fence_unref,
+	.sync_obj_ref = nouveau_bo_fence_ref,
+	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
+	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
+	.io_mem_free = &nouveau_ttm_io_mem_free,
+};
+
+struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
+{
+	struct nouveau_vma *vma;
+	list_for_each_entry(vma, &nvbo->vma_list, head) {
+		if (vma->vm == vm)
+			return vma;
+	}
+
+	return NULL;
+}
+
+int
+nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
+		   struct nouveau_vma *vma)
+{
+	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+	struct nouveau_mem *node = nvbo->bo.mem.mm_node;
+	int ret;
+
+	ret = nouveau_vm_get(vm, size, nvbo->page_shift,
+			     NV_MEM_ACCESS_RW, vma);
+	if (ret)
+		return ret;
+
+	if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
+	else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
+		if (node->sg)
+			nouveau_vm_map_sg_table(vma, 0, size, node);
+		else
+			nouveau_vm_map_sg(vma, 0, size, node);
+	}
+
+	list_add_tail(&vma->head, &nvbo->vma_list);
+	vma->refcount = 1;
+	return 0;
+}
+
+void
+nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+{
+	if (vma->node) {
+		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
+			spin_lock(&nvbo->bo.bdev->fence_lock);
+			ttm_bo_wait(&nvbo->bo, false, false, false);
+			spin_unlock(&nvbo->bo.bdev->fence_lock);
+			nouveau_vm_unmap(vma);
+		}
+
+		nouveau_vm_put(vma);
+		list_del(&vma->head);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.h
new file mode 100644
index 0000000..653dbbb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -0,0 +1,100 @@
+#ifndef __NOUVEAU_BO_H__
+#define __NOUVEAU_BO_H__
+
+struct nouveau_channel;
+struct nouveau_fence;
+struct nouveau_vma;
+
+struct nouveau_bo {
+	struct ttm_buffer_object bo;
+	struct ttm_placement placement;
+	u32 valid_domains;
+	u32 placements[3];
+	u32 busy_placements[3];
+	struct ttm_bo_kmap_obj kmap;
+	struct list_head head;
+
+	/* protected by ttm_bo_reserve() */
+	struct drm_file *reserved_by;
+	struct list_head entry;
+	int pbbo_index;
+	bool validate_mapped;
+
+	struct list_head vma_list;
+	unsigned page_shift;
+
+	u32 tile_mode;
+	u32 tile_flags;
+	struct nouveau_drm_tile *tile;
+
+	struct drm_gem_object *gem;
+
+	/* protect by the ttm reservation lock */
+	int pin_refcnt;
+
+	struct ttm_bo_kmap_obj dma_buf_vmap;
+};
+
+static inline struct nouveau_bo *
+nouveau_bo(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct nouveau_bo, bo);
+}
+
+static inline int
+nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+{
+	struct nouveau_bo *prev;
+
+	if (!pnvbo)
+		return -EINVAL;
+	prev = *pnvbo;
+
+	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
+	if (prev) {
+		struct ttm_buffer_object *bo = &prev->bo;
+
+		ttm_bo_unref(&bo);
+	}
+
+	return 0;
+}
+
+extern struct ttm_bo_driver nouveau_bo_driver;
+
+void nouveau_bo_move_init(struct nouveau_drm *);
+int  nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
+		    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
+		    struct nouveau_bo **);
+int  nouveau_bo_pin(struct nouveau_bo *, u32 flags);
+int  nouveau_bo_unpin(struct nouveau_bo *);
+int  nouveau_bo_map(struct nouveau_bo *);
+void nouveau_bo_unmap(struct nouveau_bo *);
+void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
+u16  nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
+void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
+u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
+void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+			 bool no_wait_gpu);
+
+struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+
+int  nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
+			struct nouveau_vma *);
+void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+	bool is_iomem;
+	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+						&nvbo->kmap, &is_iomem);
+	WARN_ON_ONCE(ioptr && !is_iomem);
+	return ioptr;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 0000000..eaa80a2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
+
+#include <engine/software.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
+#include "nouveau_fence.h"
+#include "nouveau_abi16.h"
+
+MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
+static int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct nouveau_cli *cli = chan->cli;
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	ret = nouveau_fence_new(chan, false, &fence);
+	if (!ret) {
+		ret = nouveau_fence_wait(fence, false, false);
+		nouveau_fence_unref(&fence);
+	}
+
+	if (ret)
+		NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
+			 chan->handle, cli->base.name);
+	return ret;
+}
+
+void
+nouveau_channel_del(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+	if (chan) {
+		struct nouveau_object *client = nv_object(chan->cli);
+		if (chan->fence) {
+			nouveau_channel_idle(chan);
+			nouveau_fence(chan->drm)->context_del(chan);
+		}
+		nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
+		nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
+		nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+		nouveau_bo_unmap(chan->push.buffer);
+		if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+			nouveau_bo_unpin(chan->push.buffer);
+		nouveau_bo_ref(NULL, &chan->push.buffer);
+		kfree(chan);
+	}
+	*pchan = NULL;
+}
+
+static int
+nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		     u32 parent, u32 handle, u32 size,
+		     struct nouveau_channel **pchan)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_instmem *imem = nouveau_instmem(device);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_client *client = &cli->base;
+	struct nv_dma_class args = {};
+	struct nouveau_channel *chan;
+	struct nouveau_object *push;
+	u32 target;
+	int ret;
+
+	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+
+	chan->cli = cli;
+	chan->drm = drm;
+	chan->handle = handle;
+
+	/* allocate memory for dma push buffer */
+	target = TTM_PL_FLAG_TT;
+	if (nouveau_vram_pushbuf)
+		target = TTM_PL_FLAG_VRAM;
+
+	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
+			    &chan->push.buffer);
+	if (ret == 0) {
+		ret = nouveau_bo_pin(chan->push.buffer, target);
+		if (ret == 0)
+			ret = nouveau_bo_map(chan->push.buffer);
+	}
+
+	if (ret) {
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	/* create dma object covering the *entire* memory space that the
+	 * pushbuf lives in, this is because the GEM code requires that
+	 * we be able to call out to other (indirect) push buffers
+	 */
+	chan->push.vma.offset = chan->push.buffer->bo.offset;
+	chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
+					&chan->push.vma);
+		if (ret) {
+			nouveau_channel_del(pchan);
+			return ret;
+		}
+
+		args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+		args.start = 0;
+		args.limit = client->vm->vmm->limit - 1;
+	} else
+	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+		u64 limit = pfb->ram.size - imem->reserved - 1;
+		if (device->card_type == NV_04) {
+			/* nv04 vram pushbuf hack, retarget to its location in
+			 * the framebuffer bar rather than direct vram access..
+			 * nfi why this exists, it came from the -nv ddx.
+			 */
+			args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
+			args.start = pci_resource_start(device->pdev, 1);
+			args.limit = args.start + limit;
+		} else {
+			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = limit;
+		}
+	} else {
+		if (chan->drm->agp.stat == ENABLED) {
+			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+			args.start = chan->drm->agp.base;
+			args.limit = chan->drm->agp.base +
+				     chan->drm->agp.size - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = vmm->limit - 1;
+		}
+	}
+
+	ret = nouveau_object_new(nv_object(chan->cli), parent,
+				 chan->push.handle, 0x0002,
+				 &args, sizeof(args), &push);
+	if (ret) {
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, u32 engine,
+		    struct nouveau_channel **pchan)
+{
+	static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
+					NVC0_CHANNEL_IND_CLASS,
+					NV84_CHANNEL_IND_CLASS,
+					NV50_CHANNEL_IND_CLASS,
+					0 };
+	const u16 *oclass = oclasses;
+	struct nve0_channel_ind_class args;
+	struct nouveau_channel *chan;
+	int ret;
+
+	/* allocate dma push buffer */
+	ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
+	*pchan = chan;
+	if (ret)
+		return ret;
+
+	/* create channel object */
+	args.pushbuf = chan->push.handle;
+	args.ioffset = 0x10000 + chan->push.vma.offset;
+	args.ilength = 0x02000;
+	args.engine  = engine;
+
+	do {
+		ret = nouveau_object_new(nv_object(cli), parent, handle,
+					 *oclass++, &args, sizeof(args),
+					 &chan->object);
+		if (ret == 0)
+			return ret;
+	} while (*oclass);
+
+	nouveau_channel_del(pchan);
+	return ret;
+}
+
+static int
+nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, struct nouveau_channel **pchan)
+{
+	static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
+					NV17_CHANNEL_DMA_CLASS,
+					NV10_CHANNEL_DMA_CLASS,
+					NV03_CHANNEL_DMA_CLASS,
+					0 };
+	const u16 *oclass = oclasses;
+	struct nv03_channel_dma_class args;
+	struct nouveau_channel *chan;
+	int ret;
+
+	/* allocate dma push buffer */
+	ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
+	*pchan = chan;
+	if (ret)
+		return ret;
+
+	/* create channel object */
+	args.pushbuf = chan->push.handle;
+	args.offset = chan->push.vma.offset;
+
+	do {
+		ret = nouveau_object_new(nv_object(cli), parent, handle,
+					 *oclass++, &args, sizeof(args),
+					 &chan->object);
+		if (ret == 0)
+			return ret;
+	} while (ret && *oclass);
+
+	nouveau_channel_del(pchan);
+	return ret;
+}
+
+static int
+nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
+{
+	struct nouveau_client *client = nv_client(chan->cli);
+	struct nouveau_device *device = nv_device(chan->drm->device);
+	struct nouveau_instmem *imem = nouveau_instmem(device);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_software_chan *swch;
+	struct nouveau_object *object;
+	struct nv_dma_class args = {};
+	int ret, i;
+
+	/* allocate dma objects to cover all allowed vram, and gart */
+	if (device->card_type < NV_C0) {
+		if (device->card_type >= NV_50) {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+			args.start = 0;
+			args.limit = client->vm->vmm->limit - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = pfb->ram.size - imem->reserved - 1;
+		}
+
+		ret = nouveau_object_new(nv_object(client), chan->handle, vram,
+					 0x003d, &args, sizeof(args), &object);
+		if (ret)
+			return ret;
+
+		if (device->card_type >= NV_50) {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+			args.start = 0;
+			args.limit = client->vm->vmm->limit - 1;
+		} else
+		if (chan->drm->agp.stat == ENABLED) {
+			args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+			args.start = chan->drm->agp.base;
+			args.limit = chan->drm->agp.base +
+				     chan->drm->agp.size - 1;
+		} else {
+			args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+			args.start = 0;
+			args.limit = vmm->limit - 1;
+		}
+
+		ret = nouveau_object_new(nv_object(client), chan->handle, gart,
+					 0x003d, &args, sizeof(args), &object);
+		if (ret)
+			return ret;
+
+		chan->vram = vram;
+		chan->gart = gart;
+	}
+
+	/* initialise dma tracking parameters */
+	switch (nv_hclass(chan->object) & 0x00ff) {
+	case 0x006b:
+	case 0x006e:
+		chan->user_put = 0x40;
+		chan->user_get = 0x44;
+		chan->dma.max = (0x10000 / 4) - 2;
+		break;
+	default:
+		chan->user_put = 0x40;
+		chan->user_get = 0x44;
+		chan->user_get_hi = 0x60;
+		chan->dma.ib_base =  0x10000 / 4;
+		chan->dma.ib_max  = (0x02000 / 8) - 1;
+		chan->dma.ib_put  = 0;
+		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+		chan->dma.max = chan->dma.ib_base;
+		break;
+	}
+
+	chan->dma.put = 0;
+	chan->dma.cur = chan->dma.put;
+	chan->dma.free = chan->dma.max - chan->dma.cur;
+
+	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(chan, 0x00000000);
+
+	/* allocate software object class (used for fences on <= nv05, and
+	 * to signal flip completion), bind it to a subchannel.
+	 */
+	if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
+		ret = nouveau_object_new(nv_object(client), chan->handle,
+					 NvSw, nouveau_abi16_swclass(chan->drm),
+					 NULL, 0, &object);
+		if (ret)
+			return ret;
+
+		swch = (void *)object->parent;
+		swch->flip = nouveau_flip_complete;
+		swch->flip_data = chan;
+	}
+
+	if (device->card_type < NV_C0) {
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
+		OUT_RING  (chan, NvSw);
+		FIRE_RING (chan);
+	}
+
+	/* initialise synchronisation */
+	return nouveau_fence(chan->drm)->context_new(chan);
+}
+
+int
+nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
+		    u32 parent, u32 handle, u32 arg0, u32 arg1,
+		    struct nouveau_channel **pchan)
+{
+	int ret;
+
+	ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
+	if (ret) {
+		NV_DEBUG(cli, "ib channel create, %d\n", ret);
+		ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
+		if (ret) {
+			NV_DEBUG(cli, "dma channel create, %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = nouveau_channel_init(*pchan, arg0, arg1);
+	if (ret) {
+		NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
+		nouveau_channel_del(pchan);
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 0000000..40f97e2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
+#ifndef __NOUVEAU_CHAN_H__
+#define __NOUVEAU_CHAN_H__
+
+struct nouveau_cli;
+
+struct nouveau_channel {
+	struct nouveau_cli *cli;
+	struct nouveau_drm *drm;
+
+	u32 handle;
+	u32 vram;
+	u32 gart;
+
+	struct {
+		struct nouveau_bo *buffer;
+		struct nouveau_vma vma;
+		u32 handle;
+	} push;
+
+	/* TODO: this will be reworked in the near future */
+	bool accel_done;
+	void *fence;
+	struct {
+		int max;
+		int free;
+		int cur;
+		int put;
+		int ib_base;
+		int ib_max;
+		int ib_free;
+		int ib_put;
+	} dma;
+	u32 user_get_hi;
+	u32 user_get;
+	u32 user_put;
+
+	struct nouveau_object *object;
+};
+
+
+int  nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
+			 u32 parent, u32 handle, u32 arg0, u32 arg1,
+			 struct nouveau_channel **);
+void nouveau_channel_del(struct nouveau_channel **);
+int  nouveau_channel_idle(struct nouveau_channel *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 0000000..4da776f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,1150 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <acpi/button.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_reg.h"
+#include "nouveau_drm.h"
+#include "dispnv04/hw.h"
+#include "nouveau_acpi.h"
+
+#include "nouveau_display.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+#include <subdev/i2c.h>
+#include <subdev/gpio.h>
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
+static int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
+MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
+static int nouveau_ignorelid = 0;
+module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+
+MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
+static int nouveau_duallink = 1;
+module_param_named(duallink, nouveau_duallink, int, 0400);
+
+struct nouveau_encoder *
+find_encoder(struct drm_connector *connector, int type)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_encoder *nv_encoder;
+	struct drm_mode_object *obj;
+	int i, id;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		id = connector->encoder_ids[i];
+		if (!id)
+			break;
+
+		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+		if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
+			return nv_encoder;
+	}
+
+	return NULL;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
+{
+	struct drm_device *dev = to_drm_encoder(encoder)->dev;
+	struct drm_connector *drm_connector;
+
+	list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
+		if (drm_connector->encoder == to_drm_encoder(encoder))
+			return nouveau_connector(drm_connector);
+	}
+
+	return NULL;
+}
+
+static void
+nouveau_connector_destroy(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	kfree(nv_connector->edid);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static struct nouveau_i2c_port *
+nouveau_connector_ddc_detect(struct drm_connector *connector,
+			     struct nouveau_encoder **pnv_encoder)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nouveau_i2c_port *port = NULL;
+	int i, panel = -ENODEV;
+
+	/* eDP panels need powering on by us (if the VBIOS doesn't default it
+	 * to on) before doing any AUX channel transactions.  LVDS panel power
+	 * is handled by the SOR itself, and not required for LVDS DDC.
+	 */
+	if (nv_connector->type == DCB_CONNECTOR_eDP) {
+		panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+		if (panel == 0) {
+			gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+			msleep(300);
+		}
+	}
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		struct nouveau_encoder *nv_encoder;
+		struct drm_mode_object *obj;
+		int id;
+
+		id = connector->encoder_ids[i];
+		if (!id)
+			break;
+
+		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+		port = nv_encoder->i2c;
+		if (port && nv_probe_i2c(port, 0x50)) {
+			*pnv_encoder = nv_encoder;
+			break;
+		}
+
+		port = NULL;
+	}
+
+	/* eDP panel not detected, restore panel power GPIO to previous
+	 * state to avoid confusing the SOR for other output types.
+	 */
+	if (!port && panel == 0)
+		gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+	return port;
+}
+
+static struct nouveau_encoder *
+nouveau_connector_of_detect(struct drm_connector *connector)
+{
+#ifdef __powerpc__
+	struct drm_device *dev = connector->dev;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder;
+	struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+
+	if (!dn ||
+	    !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
+	      (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
+		return NULL;
+
+	for_each_child_of_node(dn, cn) {
+		const char *name = of_get_property(cn, "name", NULL);
+		const void *edid = of_get_property(cn, "EDID", NULL);
+		int idx = name ? name[strlen(name) - 1] - 'A' : 0;
+
+		if (nv_encoder->dcb->i2c_index == idx && edid) {
+			nv_connector->edid =
+				kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+			of_node_put(cn);
+			return nv_encoder;
+		}
+	}
+#endif
+	return NULL;
+}
+
+static void
+nouveau_connector_set_encoder(struct drm_connector *connector,
+			      struct nouveau_encoder *nv_encoder)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct drm_device *dev = connector->dev;
+
+	if (nv_connector->detected_encoder == nv_encoder)
+		return;
+	nv_connector->detected_encoder = nv_encoder;
+
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+	} else
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
+	    nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
+		connector->doublescan_allowed = false;
+		connector->interlace_allowed = false;
+	} else {
+		connector->doublescan_allowed = true;
+		if (nv_device(drm->device)->card_type == NV_20 ||
+		   (nv_device(drm->device)->card_type == NV_10 &&
+		    (dev->pci_device & 0x0ff0) != 0x0100 &&
+		    (dev->pci_device & 0x0ff0) != 0x0150))
+			/* HW is broken */
+			connector->interlace_allowed = false;
+		else
+			connector->interlace_allowed = true;
+	}
+
+	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+		drm_object_property_set_value(&connector->base,
+			dev->mode_config.dvi_i_subconnector_property,
+			nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
+			DRM_MODE_SUBCONNECTOR_DVID :
+			DRM_MODE_SUBCONNECTOR_DVIA);
+	}
+}
+
+static enum drm_connector_status
+nouveau_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = NULL;
+	struct nouveau_encoder *nv_partner;
+	struct nouveau_i2c_port *i2c;
+	int type;
+
+	/* Cleanup the previous EDID block. */
+	if (nv_connector->edid) {
+		drm_mode_connector_update_edid_property(connector, NULL);
+		kfree(nv_connector->edid);
+		nv_connector->edid = NULL;
+	}
+
+	i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
+	if (i2c) {
+		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+		drm_mode_connector_update_edid_property(connector,
+							nv_connector->edid);
+		if (!nv_connector->edid) {
+			NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
+				 drm_get_connector_name(connector));
+			goto detect_analog;
+		}
+
+		if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
+		    !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
+			NV_ERROR(drm, "Detected %s, but failed init\n",
+				 drm_get_connector_name(connector));
+			return connector_status_disconnected;
+		}
+
+		/* Override encoder type for DVI-I based on whether EDID
+		 * says the display is digital or analog, both use the
+		 * same i2c channel so the value returned from ddc_detect
+		 * isn't necessarily correct.
+		 */
+		nv_partner = NULL;
+		if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
+			nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
+		if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
+			nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
+
+		if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
+				    nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
+				   (nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
+				    nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
+			if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
+				type = DCB_OUTPUT_TMDS;
+			else
+				type = DCB_OUTPUT_ANALOG;
+
+			nv_encoder = find_encoder(connector, type);
+		}
+
+		nouveau_connector_set_encoder(connector, nv_encoder);
+		return connector_status_connected;
+	}
+
+	nv_encoder = nouveau_connector_of_detect(connector);
+	if (nv_encoder) {
+		nouveau_connector_set_encoder(connector, nv_encoder);
+		return connector_status_connected;
+	}
+
+detect_analog:
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
+	if (!nv_encoder && !nouveau_tv_disable)
+		nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
+	if (nv_encoder && force) {
+		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+		struct drm_encoder_helper_funcs *helper =
+						encoder->helper_private;
+
+		if (helper->detect(encoder, connector) ==
+						connector_status_connected) {
+			nouveau_connector_set_encoder(connector, nv_encoder);
+			return connector_status_connected;
+		}
+
+	}
+
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = NULL;
+	enum drm_connector_status status = connector_status_disconnected;
+
+	/* Cleanup the previous EDID block. */
+	if (nv_connector->edid) {
+		drm_mode_connector_update_edid_property(connector, NULL);
+		kfree(nv_connector->edid);
+		nv_connector->edid = NULL;
+	}
+
+	nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
+	if (!nv_encoder)
+		return connector_status_disconnected;
+
+	/* Try retrieving EDID via DDC */
+	if (!drm->vbios.fp_no_ddc) {
+		status = nouveau_connector_detect(connector, force);
+		if (status == connector_status_connected)
+			goto out;
+	}
+
+	/* On some laptops (Sony, i'm looking at you) there appears to
+	 * be no direct way of accessing the panel's EDID.  The only
+	 * option available to us appears to be to ask ACPI for help..
+	 *
+	 * It's important this check's before trying straps, one of the
+	 * said manufacturer's laptops are configured in such a way
+	 * the nouveau decides an entry in the VBIOS FP mode table is
+	 * valid - it's not (rh#613284)
+	 */
+	if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
+		if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+			status = connector_status_connected;
+			goto out;
+		}
+	}
+
+	/* If no EDID found above, and the VBIOS indicates a hardcoded
+	 * modeline is avalilable for the panel, set it as the panel's
+	 * native mode and exit.
+	 */
+	if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
+	    nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
+		status = connector_status_connected;
+		goto out;
+	}
+
+	/* Still nothing, some VBIOS images have a hardcoded EDID block
+	 * stored for the panel stored in them.
+	 */
+	if (!drm->vbios.fp_no_ddc) {
+		struct edid *edid =
+			(struct edid *)nouveau_bios_embedded_edid(dev);
+		if (edid) {
+			nv_connector->edid =
+					kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+			if (nv_connector->edid)
+				status = connector_status_connected;
+		}
+	}
+
+out:
+#if defined(CONFIG_ACPI_BUTTON) || \
+	(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
+	if (status == connector_status_connected &&
+	    !nouveau_ignorelid && !acpi_lid_open())
+		status = connector_status_unknown;
+#endif
+
+	drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+	nouveau_connector_set_encoder(connector, nv_encoder);
+	return status;
+}
+
+static void
+nouveau_connector_force(struct drm_connector *connector)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder;
+	int type;
+
+	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+		if (connector->force == DRM_FORCE_ON_DIGITAL)
+			type = DCB_OUTPUT_TMDS;
+		else
+			type = DCB_OUTPUT_ANALOG;
+	} else
+		type = DCB_OUTPUT_ANY;
+
+	nv_encoder = find_encoder(connector, type);
+	if (!nv_encoder) {
+		NV_ERROR(drm, "can't find encoder to force %s on!\n",
+			 drm_get_connector_name(connector));
+		connector->status = connector_status_disconnected;
+		return;
+	}
+
+	nouveau_connector_set_encoder(connector, nv_encoder);
+}
+
+static int
+nouveau_connector_set_property(struct drm_connector *connector,
+			       struct drm_property *property, uint64_t value)
+{
+	struct nouveau_display *disp = nouveau_display(connector->dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+	struct drm_device *dev = connector->dev;
+	struct nouveau_crtc *nv_crtc;
+	int ret;
+
+	nv_crtc = NULL;
+	if (connector->encoder && connector->encoder->crtc)
+		nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
+	/* Scaling mode */
+	if (property == dev->mode_config.scaling_mode_property) {
+		bool modeset = false;
+
+		switch (value) {
+		case DRM_MODE_SCALE_NONE:
+		case DRM_MODE_SCALE_FULLSCREEN:
+		case DRM_MODE_SCALE_CENTER:
+		case DRM_MODE_SCALE_ASPECT:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* LVDS always needs gpu scaling */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+		    value == DRM_MODE_SCALE_NONE)
+			return -EINVAL;
+
+		/* Changing between GPU and panel scaling requires a full
+		 * modeset
+		 */
+		if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
+		    (value == DRM_MODE_SCALE_NONE))
+			modeset = true;
+		nv_connector->scaling_mode = value;
+
+		if (!nv_crtc)
+			return 0;
+
+		if (modeset || !nv_crtc->set_scale) {
+			ret = drm_crtc_helper_set_mode(&nv_crtc->base,
+							&nv_crtc->base.mode,
+							nv_crtc->base.x,
+							nv_crtc->base.y, NULL);
+			if (!ret)
+				return -EINVAL;
+		} else {
+			ret = nv_crtc->set_scale(nv_crtc, true);
+			if (ret)
+				return ret;
+		}
+
+		return 0;
+	}
+
+	/* Underscan */
+	if (property == disp->underscan_property) {
+		if (nv_connector->underscan != value) {
+			nv_connector->underscan = value;
+			if (!nv_crtc || !nv_crtc->set_scale)
+				return 0;
+
+			return nv_crtc->set_scale(nv_crtc, true);
+		}
+
+		return 0;
+	}
+
+	if (property == disp->underscan_hborder_property) {
+		if (nv_connector->underscan_hborder != value) {
+			nv_connector->underscan_hborder = value;
+			if (!nv_crtc || !nv_crtc->set_scale)
+				return 0;
+
+			return nv_crtc->set_scale(nv_crtc, true);
+		}
+
+		return 0;
+	}
+
+	if (property == disp->underscan_vborder_property) {
+		if (nv_connector->underscan_vborder != value) {
+			nv_connector->underscan_vborder = value;
+			if (!nv_crtc || !nv_crtc->set_scale)
+				return 0;
+
+			return nv_crtc->set_scale(nv_crtc, true);
+		}
+
+		return 0;
+	}
+
+	/* Dithering */
+	if (property == disp->dithering_mode) {
+		nv_connector->dithering_mode = value;
+		if (!nv_crtc || !nv_crtc->set_dither)
+			return 0;
+
+		return nv_crtc->set_dither(nv_crtc, true);
+	}
+
+	if (property == disp->dithering_depth) {
+		nv_connector->dithering_depth = value;
+		if (!nv_crtc || !nv_crtc->set_dither)
+			return 0;
+
+		return nv_crtc->set_dither(nv_crtc, true);
+	}
+
+	if (nv_crtc && nv_crtc->set_color_vibrance) {
+		/* Hue */
+		if (property == disp->vibrant_hue_property) {
+			nv_crtc->vibrant_hue = value - 90;
+			return nv_crtc->set_color_vibrance(nv_crtc, true);
+		}
+		/* Saturation */
+		if (property == disp->color_vibrance_property) {
+			nv_crtc->color_vibrance = value - 100;
+			return nv_crtc->set_color_vibrance(nv_crtc, true);
+		}
+	}
+
+	if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
+		return get_slave_funcs(encoder)->set_property(
+			encoder, connector, property, value);
+
+	return -EINVAL;
+}
+
+static struct drm_display_mode *
+nouveau_connector_native_mode(struct drm_connector *connector)
+{
+	struct drm_connector_helper_funcs *helper = connector->helper_private;
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *largest = NULL;
+	int high_w = 0, high_h = 0, high_v = 0;
+
+	list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+		mode->vrefresh = drm_mode_vrefresh(mode);
+		if (helper->mode_valid(connector, mode) != MODE_OK ||
+		    (mode->flags & DRM_MODE_FLAG_INTERLACE))
+			continue;
+
+		/* Use preferred mode if there is one.. */
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			NV_DEBUG(drm, "native mode from preferred\n");
+			return drm_mode_duplicate(dev, mode);
+		}
+
+		/* Otherwise, take the resolution with the largest width, then
+		 * height, then vertical refresh
+		 */
+		if (mode->hdisplay < high_w)
+			continue;
+
+		if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+			continue;
+
+		if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+		    mode->vrefresh < high_v)
+			continue;
+
+		high_w = mode->hdisplay;
+		high_h = mode->vdisplay;
+		high_v = mode->vrefresh;
+		largest = mode;
+	}
+
+	NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
+		      high_w, high_h, high_v);
+	return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+struct moderec {
+	int hdisplay;
+	int vdisplay;
+};
+
+static struct moderec scaler_modes[] = {
+	{ 1920, 1200 },
+	{ 1920, 1080 },
+	{ 1680, 1050 },
+	{ 1600, 1200 },
+	{ 1400, 1050 },
+	{ 1280, 1024 },
+	{ 1280, 960 },
+	{ 1152, 864 },
+	{ 1024, 768 },
+	{ 800, 600 },
+	{ 720, 400 },
+	{ 640, 480 },
+	{ 640, 400 },
+	{ 640, 350 },
+	{}
+};
+
+static int
+nouveau_connector_scaler_modes_add(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_display_mode *native = nv_connector->native_mode, *m;
+	struct drm_device *dev = connector->dev;
+	struct moderec *mode = &scaler_modes[0];
+	int modes = 0;
+
+	if (!native)
+		return 0;
+
+	while (mode->hdisplay) {
+		if (mode->hdisplay <= native->hdisplay &&
+		    mode->vdisplay <= native->vdisplay) {
+			m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
+					 drm_mode_vrefresh(native), false,
+					 false, false);
+			if (!m)
+				continue;
+
+			m->type |= DRM_MODE_TYPE_DRIVER;
+
+			drm_mode_probed_add(connector, m);
+			modes++;
+		}
+
+		mode++;
+	}
+
+	return modes;
+}
+
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	struct nvbios *bios = &drm->vbios;
+	struct drm_display_mode *mode = nv_connector->native_mode;
+	bool duallink;
+
+	/* if the edid is feeling nice enough to provide this info, use it */
+	if (nv_connector->edid && connector->display_info.bpc)
+		return;
+
+	/* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
+	if (nv_connector->type == DCB_CONNECTOR_eDP) {
+		connector->display_info.bpc = 6;
+		return;
+	}
+
+	/* we're out of options unless we're LVDS, default to 8bpc */
+	if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
+		connector->display_info.bpc = 8;
+		return;
+	}
+
+	connector->display_info.bpc = 6;
+
+	/* LVDS: panel straps */
+	if (bios->fp_no_ddc) {
+		if (bios->fp.if_is_24bit)
+			connector->display_info.bpc = 8;
+		return;
+	}
+
+	/* LVDS: DDC panel, need to first determine the number of links to
+	 * know which if_is_24bit flag to check...
+	 */
+	if (nv_connector->edid &&
+	    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+		duallink = ((u8 *)nv_connector->edid)[121] == 2;
+	else
+		duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+	if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+	    ( duallink && (bios->fp.strapless_is_24bit & 2)))
+		connector->display_info.bpc = 8;
+}
+
+static int
+nouveau_connector_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+	int ret = 0;
+
+	/* destroy the native mode, the attached monitor could have changed.
+	 */
+	if (nv_connector->native_mode) {
+		drm_mode_destroy(dev, nv_connector->native_mode);
+		nv_connector->native_mode = NULL;
+	}
+
+	if (nv_connector->edid)
+		ret = drm_add_edid_modes(connector, nv_connector->edid);
+	else
+	if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
+	    (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+	     drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
+		struct drm_display_mode mode;
+
+		nouveau_bios_fp_mode(dev, &mode);
+		nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
+	}
+
+	/* Determine display colour depth for everything except LVDS now,
+	 * DP requires this before mode_valid() is called.
+	 */
+	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+		nouveau_connector_detect_depth(connector);
+
+	/* Find the native mode if this is a digital panel, if we didn't
+	 * find any modes through DDC previously add the native mode to
+	 * the list of modes.
+	 */
+	if (!nv_connector->native_mode)
+		nv_connector->native_mode =
+			nouveau_connector_native_mode(connector);
+	if (ret == 0 && nv_connector->native_mode) {
+		struct drm_display_mode *mode;
+
+		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
+		drm_mode_probed_add(connector, mode);
+		ret = 1;
+	}
+
+	/* Determine LVDS colour depth, must happen after determining
+	 * "native" mode as some VBIOS tables require us to use the
+	 * pixel clock as part of the lookup...
+	 */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+		nouveau_connector_detect_depth(connector);
+
+	if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
+		ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
+
+	if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+	    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+	    nv_connector->type == DCB_CONNECTOR_eDP)
+		ret += nouveau_connector_scaler_modes_add(connector);
+
+	return ret;
+}
+
+static unsigned
+get_tmds_link_bandwidth(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
+
+	if (dcb->location != DCB_LOC_ON_CHIP ||
+	    nv_device(drm->device)->chipset >= 0x46)
+		return 165000;
+	else if (nv_device(drm->device)->chipset >= 0x40)
+		return 155000;
+	else if (nv_device(drm->device)->chipset >= 0x18)
+		return 135000;
+	else
+		return 112000;
+}
+
+static int
+nouveau_connector_mode_valid(struct drm_connector *connector,
+			     struct drm_display_mode *mode)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+	unsigned min_clock = 25000, max_clock = min_clock;
+	unsigned clock = mode->clock;
+
+	switch (nv_encoder->dcb->type) {
+	case DCB_OUTPUT_LVDS:
+		if (nv_connector->native_mode &&
+		    (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+		     mode->vdisplay > nv_connector->native_mode->vdisplay))
+			return MODE_PANEL;
+
+		min_clock = 0;
+		max_clock = 400000;
+		break;
+	case DCB_OUTPUT_TMDS:
+		max_clock = get_tmds_link_bandwidth(connector);
+		if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
+			max_clock *= 2;
+		break;
+	case DCB_OUTPUT_ANALOG:
+		max_clock = nv_encoder->dcb->crtconf.maxfreq;
+		if (!max_clock)
+			max_clock = 350000;
+		break;
+	case DCB_OUTPUT_TV:
+		return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+	case DCB_OUTPUT_DP:
+		max_clock  = nv_encoder->dp.link_nr;
+		max_clock *= nv_encoder->dp.link_bw;
+		clock = clock * (connector->display_info.bpc * 3) / 10;
+		break;
+	default:
+		BUG_ON(1);
+		return MODE_BAD;
+	}
+
+	if (clock < min_clock)
+		return MODE_CLOCK_LOW;
+
+	if (clock > max_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+nouveau_connector_best_encoder(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+	if (nv_connector->detected_encoder)
+		return to_drm_encoder(nv_connector->detected_encoder);
+
+	return NULL;
+}
+
+static const struct drm_connector_helper_funcs
+nouveau_connector_helper_funcs = {
+	.get_modes = nouveau_connector_get_modes,
+	.mode_valid = nouveau_connector_mode_valid,
+	.best_encoder = nouveau_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = NULL,
+	.restore = NULL,
+	.detect = nouveau_connector_detect,
+	.destroy = nouveau_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = nouveau_connector_set_property,
+	.force = nouveau_connector_force
+};
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs_lvds = {
+	.dpms = drm_helper_connector_dpms,
+	.save = NULL,
+	.restore = NULL,
+	.detect = nouveau_connector_detect_lvds,
+	.destroy = nouveau_connector_destroy,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = nouveau_connector_set_property,
+	.force = nouveau_connector_force
+};
+
+static void
+nouveau_connector_hotplug_work(struct work_struct *work)
+{
+	struct nouveau_connector *nv_connector =
+		container_of(work, struct nouveau_connector, hpd_work);
+	struct drm_connector *connector = &nv_connector->base;
+	struct drm_device *dev = connector->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
+
+	NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
+		 drm_get_connector_name(connector));
+
+	if (plugged)
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+	else
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+
+	drm_helper_hpd_irq_event(dev);
+}
+
+static int
+nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
+{
+	struct nouveau_connector *nv_connector =
+		container_of(event, struct nouveau_connector, hpd_func);
+	schedule_work(&nv_connector->hpd_work);
+	return NVKM_EVENT_KEEP;
+}
+
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+	switch (dcb) {
+	case DCB_CONNECTOR_VGA      : return DRM_MODE_CONNECTOR_VGA;
+	case DCB_CONNECTOR_TV_0     :
+	case DCB_CONNECTOR_TV_1     :
+	case DCB_CONNECTOR_TV_3     : return DRM_MODE_CONNECTOR_TV;
+	case DCB_CONNECTOR_DMS59_0  :
+	case DCB_CONNECTOR_DMS59_1  :
+	case DCB_CONNECTOR_DVI_I    : return DRM_MODE_CONNECTOR_DVII;
+	case DCB_CONNECTOR_DVI_D    : return DRM_MODE_CONNECTOR_DVID;
+	case DCB_CONNECTOR_LVDS     :
+	case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+	case DCB_CONNECTOR_DMS59_DP0:
+	case DCB_CONNECTOR_DMS59_DP1:
+	case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
+	case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
+	case DCB_CONNECTOR_HDMI_0   :
+	case DCB_CONNECTOR_HDMI_1   : return DRM_MODE_CONNECTOR_HDMIA;
+	default:
+		break;
+	}
+
+	return DRM_MODE_CONNECTOR_Unknown;
+}
+
+struct drm_connector *
+nouveau_connector_create(struct drm_device *dev, int index)
+{
+	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_connector *nv_connector = NULL;
+	struct drm_connector *connector;
+	int type, ret = 0;
+	bool dummy;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		nv_connector = nouveau_connector(connector);
+		if (nv_connector->index == index)
+			return connector;
+	}
+
+	nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+	if (!nv_connector)
+		return ERR_PTR(-ENOMEM);
+
+	connector = &nv_connector->base;
+	INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
+	nv_connector->index = index;
+
+	/* attempt to parse vbios connector type and hotplug gpio */
+	nv_connector->dcb = olddcb_conn(dev, index);
+	if (nv_connector->dcb) {
+		static const u8 hpd[16] = {
+			0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+			0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+		};
+
+		u32 entry = ROM16(nv_connector->dcb[0]);
+		if (olddcb_conntab(dev)[3] >= 4)
+			entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+		ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
+				 DCB_GPIO_UNUSED, &nv_connector->hpd);
+		nv_connector->hpd_func.func = nouveau_connector_hotplug;
+		if (ret)
+			nv_connector->hpd.func = DCB_GPIO_UNUSED;
+
+		nv_connector->type = nv_connector->dcb[0];
+		if (drm_conntype_from_dcb(nv_connector->type) ==
+					  DRM_MODE_CONNECTOR_Unknown) {
+			NV_WARN(drm, "unknown connector type %02x\n",
+				nv_connector->type);
+			nv_connector->type = DCB_CONNECTOR_NONE;
+		}
+
+		/* Gigabyte NX85T */
+		if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+			if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+				nv_connector->type = DCB_CONNECTOR_DVI_I;
+		}
+
+		/* Gigabyte GV-NX86T512H */
+		if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+			if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+				nv_connector->type = DCB_CONNECTOR_DVI_I;
+		}
+	} else {
+		nv_connector->type = DCB_CONNECTOR_NONE;
+		nv_connector->hpd.func = DCB_GPIO_UNUSED;
+	}
+
+	/* no vbios data, or an unknown dcb connector type - attempt to
+	 * figure out something suitable ourselves
+	 */
+	if (nv_connector->type == DCB_CONNECTOR_NONE) {
+		struct nouveau_drm *drm = nouveau_drm(dev);
+		struct dcb_table *dcbt = &drm->vbios.dcb;
+		u32 encoders = 0;
+		int i;
+
+		for (i = 0; i < dcbt->entries; i++) {
+			if (dcbt->entry[i].connector == nv_connector->index)
+				encoders |= (1 << dcbt->entry[i].type);
+		}
+
+		if (encoders & (1 << DCB_OUTPUT_DP)) {
+			if (encoders & (1 << DCB_OUTPUT_TMDS))
+				nv_connector->type = DCB_CONNECTOR_DP;
+			else
+				nv_connector->type = DCB_CONNECTOR_eDP;
+		} else
+		if (encoders & (1 << DCB_OUTPUT_TMDS)) {
+			if (encoders & (1 << DCB_OUTPUT_ANALOG))
+				nv_connector->type = DCB_CONNECTOR_DVI_I;
+			else
+				nv_connector->type = DCB_CONNECTOR_DVI_D;
+		} else
+		if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
+			nv_connector->type = DCB_CONNECTOR_VGA;
+		} else
+		if (encoders & (1 << DCB_OUTPUT_LVDS)) {
+			nv_connector->type = DCB_CONNECTOR_LVDS;
+		} else
+		if (encoders & (1 << DCB_OUTPUT_TV)) {
+			nv_connector->type = DCB_CONNECTOR_TV_0;
+		}
+	}
+
+	type = drm_conntype_from_dcb(nv_connector->type);
+	if (type == DRM_MODE_CONNECTOR_LVDS) {
+		ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
+		if (ret) {
+			NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
+			kfree(nv_connector);
+			return ERR_PTR(ret);
+		}
+
+		funcs = &nouveau_connector_funcs_lvds;
+	} else {
+		funcs = &nouveau_connector_funcs;
+	}
+
+	/* defaults, will get overridden in detect() */
+	connector->interlace_allowed = false;
+	connector->doublescan_allowed = false;
+
+	drm_connector_init(dev, connector, funcs, type);
+	drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
+	/* Init DVI-I specific properties */
+	if (nv_connector->type == DCB_CONNECTOR_DVI_I)
+		drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
+
+	/* Add overscan compensation options to digital outputs */
+	if (disp->underscan_property &&
+	    (type == DRM_MODE_CONNECTOR_DVID ||
+	     type == DRM_MODE_CONNECTOR_DVII ||
+	     type == DRM_MODE_CONNECTOR_HDMIA ||
+	     type == DRM_MODE_CONNECTOR_DisplayPort)) {
+		drm_object_attach_property(&connector->base,
+					      disp->underscan_property,
+					      UNDERSCAN_OFF);
+		drm_object_attach_property(&connector->base,
+					      disp->underscan_hborder_property,
+					      0);
+		drm_object_attach_property(&connector->base,
+					      disp->underscan_vborder_property,
+					      0);
+	}
+
+	/* Add hue and saturation options */
+	if (disp->vibrant_hue_property)
+		drm_object_attach_property(&connector->base,
+					      disp->vibrant_hue_property,
+					      90);
+	if (disp->color_vibrance_property)
+		drm_object_attach_property(&connector->base,
+					      disp->color_vibrance_property,
+					      150);
+
+	switch (nv_connector->type) {
+	case DCB_CONNECTOR_VGA:
+		if (nv_device(drm->device)->card_type >= NV_50) {
+			drm_object_attach_property(&connector->base,
+					dev->mode_config.scaling_mode_property,
+					nv_connector->scaling_mode);
+		}
+		/* fall-through */
+	case DCB_CONNECTOR_TV_0:
+	case DCB_CONNECTOR_TV_1:
+	case DCB_CONNECTOR_TV_3:
+		nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+		break;
+	default:
+		nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+
+		drm_object_attach_property(&connector->base,
+				dev->mode_config.scaling_mode_property,
+				nv_connector->scaling_mode);
+		if (disp->dithering_mode) {
+			nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+			drm_object_attach_property(&connector->base,
+						disp->dithering_mode,
+						nv_connector->dithering_mode);
+		}
+		if (disp->dithering_depth) {
+			nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+			drm_object_attach_property(&connector->base,
+						disp->dithering_depth,
+						nv_connector->dithering_depth);
+		}
+		break;
+	}
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	drm_sysfs_connector_add(connector);
+	return connector;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 0000000..6e399aa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CONNECTOR_H__
+#define __NOUVEAU_CONNECTOR_H__
+
+#include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
+
+#include <core/event.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/gpio.h>
+
+struct nouveau_i2c_port;
+
+enum nouveau_underscan_type {
+	UNDERSCAN_OFF,
+	UNDERSCAN_ON,
+	UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+	DITHERING_MODE_OFF = 0x00,
+	DITHERING_MODE_ON = 0x01,
+	DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+	DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+	DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+	DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+	DITHERING_DEPTH_6BPC = 0x00,
+	DITHERING_DEPTH_8BPC = 0x02,
+	DITHERING_DEPTH_AUTO
+};
+
+struct nouveau_connector {
+	struct drm_connector base;
+	enum dcb_connector_type type;
+	u8 index;
+	u8 *dcb;
+
+	struct dcb_gpio_func hpd;
+	struct work_struct hpd_work;
+	struct nouveau_eventh hpd_func;
+
+	int dithering_mode;
+	int dithering_depth;
+	int scaling_mode;
+	enum nouveau_underscan_type underscan;
+	u32 underscan_hborder;
+	u32 underscan_vborder;
+
+	struct nouveau_encoder *detected_encoder;
+	struct edid *edid;
+	struct drm_display_mode *native_mode;
+};
+
+static inline struct nouveau_connector *nouveau_connector(
+						struct drm_connector *con)
+{
+	return container_of(con, struct nouveau_connector, base);
+}
+
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+	struct drm_device *dev = nv_crtc->base.dev;
+	struct drm_connector *connector;
+	struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder && connector->encoder->crtc == crtc)
+			return nouveau_connector(connector);
+	}
+
+	return NULL;
+}
+
+struct drm_connector *
+nouveau_connector_create(struct drm_device *, int index);
+
+int
+nouveau_connector_bpp(struct drm_connector *);
+
+#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_crtc.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 0000000..d1e5890
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CRTC_H__
+#define __NOUVEAU_CRTC_H__
+
+struct nouveau_crtc {
+	struct drm_crtc base;
+
+	int index;
+
+	uint32_t dpms_saved_fp_control;
+	uint32_t fp_users;
+	int saturation;
+	int color_vibrance;
+	int vibrant_hue;
+	int sharpness;
+	int last_dpms;
+
+	int cursor_saved_x, cursor_saved_y;
+
+	struct {
+		int cpp;
+		bool blanked;
+		uint32_t offset;
+		uint32_t tile_flags;
+	} fb;
+
+	struct {
+		struct nouveau_bo *nvbo;
+		bool visible;
+		uint32_t offset;
+		void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
+		void (*set_pos)(struct nouveau_crtc *, int x, int y);
+		void (*hide)(struct nouveau_crtc *, bool update);
+		void (*show)(struct nouveau_crtc *, bool update);
+	} cursor;
+
+	struct {
+		struct nouveau_bo *nvbo;
+		uint16_t r[256];
+		uint16_t g[256];
+		uint16_t b[256];
+		int depth;
+	} lut;
+
+	int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+	int (*set_scale)(struct nouveau_crtc *crtc, bool update);
+	int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
+};
+
+static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
+{
+	return container_of(crtc, struct nouveau_crtc, base);
+}
+
+static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
+{
+	return &crtc->base;
+}
+
+int nv04_cursor_init(struct nouveau_crtc *);
+
+#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 0000000..5392e07
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_debugfs.h"
+#include "nouveau_drm.h"
+
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+	int i;
+
+	for (i = 0; i < drm->vbios.length; i++)
+		seq_printf(m, "%c", drm->vbios.data[i]);
+	return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+	drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+	return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+				 minor);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.h
new file mode 100644
index 0000000..a62af6f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -0,0 +1,22 @@
+#ifndef __NOUVEAU_DEBUGFS_H__
+#define __NOUVEAU_DEBUGFS_H__
+
+#include <drm/drmP.h>
+
+#if defined(CONFIG_DEBUG_FS)
+extern int  nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+       return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+#endif
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 0000000..f17dc2a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_fbcon.h"
+#include "dispnv04/hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+#include "nouveau_fence.h"
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+static void
+nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+
+	if (fb->nvbo)
+		drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+
+	drm_framebuffer_cleanup(drm_fb);
+	kfree(fb);
+}
+
+static int
+nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
+				       struct drm_file *file_priv,
+				       unsigned int *handle)
+{
+	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+
+	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+}
+
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+	.destroy = nouveau_user_framebuffer_destroy,
+	.create_handle = nouveau_user_framebuffer_create_handle,
+};
+
+int
+nouveau_framebuffer_init(struct drm_device *dev,
+			 struct nouveau_framebuffer *nv_fb,
+			 struct drm_mode_fb_cmd2 *mode_cmd,
+			 struct nouveau_bo *nvbo)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_framebuffer *fb = &nv_fb->base;
+	int ret;
+
+	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+	nv_fb->nvbo = nvbo;
+
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+		if (tile_flags == 0x7a00 ||
+		    tile_flags == 0xfe00)
+			nv_fb->r_dma = NvEvoFB32;
+		else
+		if (tile_flags == 0x7000)
+			nv_fb->r_dma = NvEvoFB16;
+		else
+			nv_fb->r_dma = NvEvoVRAM_LP;
+
+		switch (fb->depth) {
+		case  8: nv_fb->r_format = 0x1e00; break;
+		case 15: nv_fb->r_format = 0xe900; break;
+		case 16: nv_fb->r_format = 0xe800; break;
+		case 24:
+		case 32: nv_fb->r_format = 0xcf00; break;
+		case 30: nv_fb->r_format = 0xd100; break;
+		default:
+			 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
+			 return -EINVAL;
+		}
+
+		if (nv_device(drm->device)->chipset == 0x50)
+			nv_fb->r_format |= (tile_flags << 8);
+
+		if (!tile_flags) {
+			if (nv_device(drm->device)->card_type < NV_D0)
+				nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
+			else
+				nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
+		} else {
+			u32 mode = nvbo->tile_mode;
+			if (nv_device(drm->device)->card_type >= NV_C0)
+				mode >>= 4;
+			nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
+		}
+	}
+
+	ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *dev,
+				struct drm_file *file_priv,
+				struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct nouveau_framebuffer *nouveau_fb;
+	struct drm_gem_object *gem;
+	int ret;
+
+	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (!gem)
+		return ERR_PTR(-ENOENT);
+
+	nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+	if (!nouveau_fb)
+		return ERR_PTR(-ENOMEM);
+
+	ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
+	if (ret) {
+		drm_gem_object_unreference(gem);
+		return ERR_PTR(ret);
+	}
+
+	return &nouveau_fb->base;
+}
+
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+	.fb_create = nouveau_user_framebuffer_create,
+	.output_poll_changed = nouveau_fbcon_output_poll_changed,
+};
+
+
+struct nouveau_drm_prop_enum_list {
+	u8 gen_mask;
+	int type;
+	char *name;
+};
+
+static struct nouveau_drm_prop_enum_list underscan[] = {
+	{ 6, UNDERSCAN_AUTO, "auto" },
+	{ 6, UNDERSCAN_OFF, "off" },
+	{ 6, UNDERSCAN_ON, "on" },
+	{}
+};
+
+static struct nouveau_drm_prop_enum_list dither_mode[] = {
+	{ 7, DITHERING_MODE_AUTO, "auto" },
+	{ 7, DITHERING_MODE_OFF, "off" },
+	{ 1, DITHERING_MODE_ON, "on" },
+	{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+	{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+	{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
+	{}
+};
+
+static struct nouveau_drm_prop_enum_list dither_depth[] = {
+	{ 6, DITHERING_DEPTH_AUTO, "auto" },
+	{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+	{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+	{}
+};
+
+#define PROP_ENUM(p,gen,n,list) do {                                           \
+	struct nouveau_drm_prop_enum_list *l = (list);                         \
+	int c = 0;                                                             \
+	while (l->gen_mask) {                                                  \
+		if (l->gen_mask & (1 << (gen)))                                \
+			c++;                                                   \
+		l++;                                                           \
+	}                                                                      \
+	if (c) {                                                               \
+		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
+		l = (list);                                                    \
+		c = 0;                                                         \
+		while (p && l->gen_mask) {                                     \
+			if (l->gen_mask & (1 << (gen))) {                      \
+				drm_property_add_enum(p, c, l->type, l->name); \
+				c++;                                           \
+			}                                                      \
+			l++;                                                   \
+		}                                                              \
+	}                                                                      \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct drm_connector *connector;
+	int ret;
+
+	ret = disp->init(dev);
+	if (ret)
+		return ret;
+
+	/* enable polling for external displays */
+	drm_kms_helper_poll_enable(dev);
+
+	/* enable hotplug interrupts */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+			nouveau_event_get(gpio->events, conn->hpd.line,
+					 &conn->hpd_func);
+		}
+	}
+
+	return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_display *disp = nouveau_display(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct drm_connector *connector;
+
+	/* disable hotplug interrupts */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+		if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
+			nouveau_event_put(gpio->events, conn->hpd.line,
+					 &conn->hpd_func);
+		}
+	}
+
+	drm_kms_helper_poll_disable(dev);
+	disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_display *disp;
+	u32 pclass = dev->pdev->class >> 8;
+	int ret, gen;
+
+	disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
+
+	drm_mode_config_init(dev);
+	drm_mode_create_scaling_mode_property(dev);
+	drm_mode_create_dvi_i_properties(dev);
+
+	if (nv_device(drm->device)->card_type < NV_50)
+		gen = 0;
+	else
+	if (nv_device(drm->device)->card_type < NV_D0)
+		gen = 1;
+	else
+		gen = 2;
+
+	PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+	PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+	PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+	disp->underscan_hborder_property =
+		drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
+
+	disp->underscan_vborder_property =
+		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
+
+	if (gen >= 1) {
+		/* -90..+90 */
+		disp->vibrant_hue_property =
+			drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+
+		/* -100..+100 */
+		disp->color_vibrance_property =
+			drm_property_create_range(dev, 0, "color vibrance", 0, 200);
+	}
+
+	dev->mode_config.funcs = &nouveau_mode_config_funcs;
+	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	if (nv_device(drm->device)->card_type < NV_10) {
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+	} else
+	if (nv_device(drm->device)->card_type < NV_50) {
+		dev->mode_config.max_width = 4096;
+		dev->mode_config.max_height = 4096;
+	} else {
+		dev->mode_config.max_width = 8192;
+		dev->mode_config.max_height = 8192;
+	}
+
+	dev->mode_config.preferred_depth = 24;
+	dev->mode_config.prefer_shadow = 1;
+
+	drm_kms_helper_poll_init(dev);
+	drm_kms_helper_poll_disable(dev);
+
+	if (nouveau_modeset == 1 ||
+	    (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
+		if (nv_device(drm->device)->card_type < NV_50)
+			ret = nv04_display_create(dev);
+		else
+			ret = nv50_display_create(dev);
+		if (ret)
+			goto disp_create_err;
+
+		if (dev->mode_config.num_crtc) {
+			ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+			if (ret)
+				goto vblank_err;
+		}
+
+		nouveau_backlight_init(dev);
+	}
+
+	return 0;
+
+vblank_err:
+	disp->dtor(dev);
+disp_create_err:
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+	struct nouveau_display *disp = nouveau_display(dev);
+
+	nouveau_backlight_exit(dev);
+	drm_vblank_cleanup(dev);
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+
+	if (disp->dtor)
+		disp->dtor(dev);
+
+	nouveau_drm(dev)->display = NULL;
+	kfree(disp);
+}
+
+int
+nouveau_display_suspend(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_crtc *crtc;
+
+	nouveau_display_fini(dev);
+
+	NV_INFO(drm, "unpinning framebuffer(s)...\n");
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_unpin(nouveau_fb->nvbo);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+	}
+
+	return 0;
+}
+
+void
+nouveau_display_resume(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_crtc *crtc;
+	int ret;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_framebuffer *nouveau_fb;
+
+		nouveau_fb = nouveau_framebuffer(crtc->fb);
+		if (!nouveau_fb || !nouveau_fb->nvbo)
+			continue;
+
+		nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+	}
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret)
+			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+		if (ret)
+			NV_ERROR(drm, "Could not pin/map cursor.\n");
+	}
+
+	nouveau_fbcon_set_suspend(dev, 0);
+	nouveau_fbcon_zfill_all(dev);
+
+	nouveau_display_init(dev);
+
+	/* Force CLUT to get re-loaded during modeset */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+		nv_crtc->lut.depth = 0;
+	}
+
+	drm_helper_resume_force_mode(dev);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+		u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+
+		nv_crtc->cursor.set_offset(nv_crtc, offset);
+		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+						 nv_crtc->cursor_saved_y);
+	}
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+			  struct nouveau_bo *new_bo)
+{
+	int ret;
+
+	ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail;
+
+	if (likely(old_bo != new_bo)) {
+		ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+		if (ret)
+			goto fail_unreserve;
+	}
+
+	return 0;
+
+fail_unreserve:
+	ttm_bo_unreserve(&new_bo->bo);
+fail:
+	nouveau_bo_unpin(new_bo);
+	return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+			    struct nouveau_bo *new_bo,
+			    struct nouveau_fence *fence)
+{
+	nouveau_bo_fence(new_bo, fence);
+	ttm_bo_unreserve(&new_bo->bo);
+
+	if (likely(old_bo != new_bo)) {
+		nouveau_bo_fence(old_bo, fence);
+		ttm_bo_unreserve(&old_bo->bo);
+	}
+
+	nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+		       struct nouveau_bo *old_bo,
+		       struct nouveau_bo *new_bo,
+		       struct nouveau_page_flip_state *s,
+		       struct nouveau_fence **pfence)
+{
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_drm *drm = chan->drm;
+	struct drm_device *dev = drm->dev;
+	unsigned long flags;
+	int ret;
+
+	/* Queue it to the pending list */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&s->head, &fctx->flip);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* Synchronize with the old framebuffer */
+	ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+	if (ret)
+		goto fail;
+
+	/* Emit the pageflip */
+	ret = RING_SPACE(chan, 3);
+	if (ret)
+		goto fail;
+
+	if (nv_device(drm->device)->card_type < NV_C0) {
+		BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+		OUT_RING  (chan, 0x00000000);
+		OUT_RING  (chan, 0x00000000);
+	} else {
+		BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+		OUT_RING  (chan, 0);
+		BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
+	}
+	FIRE_RING (chan);
+
+	ret = nouveau_fence_new(chan, false, pfence);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_del(&s->head);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		       struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+	struct nouveau_page_flip_state *s;
+	struct nouveau_channel *chan = NULL;
+	struct nouveau_fence *fence;
+	int ret;
+
+	if (!drm->channel)
+		return -ENODEV;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	/* Don't let the buffers go away while we flip */
+	ret = nouveau_page_flip_reserve(old_bo, new_bo);
+	if (ret)
+		goto fail_free;
+
+	/* Initialize a page flip struct */
+	*s = (struct nouveau_page_flip_state)
+		{ { }, event, nouveau_crtc(crtc)->index,
+		  fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+		  new_bo->bo.offset };
+
+	/* Choose the channel the flip will be handled in */
+	fence = new_bo->bo.sync_obj;
+	if (fence)
+		chan = fence->channel;
+	if (!chan)
+		chan = drm->channel;
+	mutex_lock(&chan->cli->mutex);
+
+	/* Emit a page flip */
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		ret = nv50_display_flip_next(crtc, fb, chan, 0);
+		if (ret) {
+			mutex_unlock(&chan->cli->mutex);
+			goto fail_unreserve;
+		}
+	}
+
+	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+	mutex_unlock(&chan->cli->mutex);
+	if (ret)
+		goto fail_unreserve;
+
+	/* Update the crtc struct and cleanup */
+	crtc->fb = fb;
+
+	nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+	nouveau_fence_unref(&fence);
+	return 0;
+
+fail_unreserve:
+	nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+	kfree(s);
+	return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+			 struct nouveau_page_flip_state *ps)
+{
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_drm *drm = chan->drm;
+	struct drm_device *dev = drm->dev;
+	struct nouveau_page_flip_state *s;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (list_empty(&fctx->flip)) {
+		NV_ERROR(drm, "unexpected pageflip\n");
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EINVAL;
+	}
+
+	s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
+	if (s->event)
+		drm_send_vblank_event(dev, -1, s->event);
+
+	list_del(&s->head);
+	if (ps)
+		*ps = *s;
+	kfree(s);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return 0;
+}
+
+int
+nouveau_flip_complete(void *data)
+{
+	struct nouveau_channel *chan = data;
+	struct nouveau_drm *drm = chan->drm;
+	struct nouveau_page_flip_state state;
+
+	if (!nouveau_finish_page_flip(chan, &state)) {
+		if (nv_device(drm->device)->card_type < NV_50) {
+			nv_set_crtc_base(drm->dev, state.crtc, state.offset +
+					 state.y * state.pitch +
+					 state.x * state.bpp / 8);
+		}
+	}
+
+	return 0;
+}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct nouveau_bo *bo;
+	int ret;
+
+	args->pitch = roundup(args->width * (args->bpp / 8), 256);
+	args->size = args->pitch * args->height;
+	args->size = roundup(args->size, PAGE_SIZE);
+
+	ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+	drm_gem_object_unreference_unlocked(bo->gem);
+	return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+			     uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+				struct drm_device *dev,
+				uint32_t handle, uint64_t *poffset)
+{
+	struct drm_gem_object *gem;
+
+	gem = drm_gem_object_lookup(dev, file_priv, handle);
+	if (gem) {
+		struct nouveau_bo *bo = gem->driver_private;
+		*poffset = bo->bo.addr_space_offset;
+		drm_gem_object_unreference_unlocked(gem);
+		return 0;
+	}
+
+	return -ENOENT;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.h
new file mode 100644
index 0000000..1ea3e47
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -0,0 +1,91 @@
+#ifndef __NOUVEAU_DISPLAY_H__
+#define __NOUVEAU_DISPLAY_H__
+
+#include <subdev/vm.h>
+
+#include "nouveau_drm.h"
+
+struct nouveau_framebuffer {
+	struct drm_framebuffer base;
+	struct nouveau_bo *nvbo;
+	struct nouveau_vma vma;
+	u32 r_dma;
+	u32 r_format;
+	u32 r_pitch;
+};
+
+static inline struct nouveau_framebuffer *
+nouveau_framebuffer(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct nouveau_framebuffer, base);
+}
+
+int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
+			     struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
+
+struct nouveau_page_flip_state {
+	struct list_head head;
+	struct drm_pending_vblank_event *event;
+	int crtc, bpp, pitch, x, y;
+	u64 offset;
+};
+
+struct nouveau_display {
+	void *priv;
+	void (*dtor)(struct drm_device *);
+	int  (*init)(struct drm_device *);
+	void (*fini)(struct drm_device *);
+
+	struct drm_property *dithering_mode;
+	struct drm_property *dithering_depth;
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* not really hue and saturation: */
+	struct drm_property *vibrant_hue_property;
+	struct drm_property *color_vibrance_property;
+};
+
+static inline struct nouveau_display *
+nouveau_display(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->display;
+}
+
+int  nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int  nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
+int  nouveau_display_suspend(struct drm_device *dev);
+void nouveau_display_resume(struct drm_device *dev);
+
+int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			    struct drm_pending_vblank_event *event);
+int  nouveau_finish_page_flip(struct nouveau_channel *,
+			      struct nouveau_page_flip_state *);
+
+int  nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+				 struct drm_mode_create_dumb *args);
+int  nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+				     u32 handle, u64 *offset);
+int  nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+				  u32 handle);
+
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
+#else
+static inline int
+nouveau_backlight_init(struct drm_device *dev)
+{
+	return 0;
+}
+
+static inline void
+nouveau_backlight_exit(struct drm_device *dev) {
+}
+#endif
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 0000000..40f91e1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <core/client.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+{
+	bool is_iomem;
+	u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
+	mem = &mem[chan->dma.cur];
+	if (is_iomem)
+		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
+	else
+		memcpy(mem, data, nr_dwords * 4);
+	chan->dma.cur += nr_dwords;
+}
+
+/* Fetch and adjust GPU GET pointer
+ *
+ * Returns:
+ *  value >= 0, the adjusted GET pointer
+ *  -EINVAL if GET pointer currently outside main push buffer
+ *  -EBUSY if timeout exceeded
+ */
+static inline int
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
+{
+	uint64_t val;
+
+	val = nv_ro32(chan->object, chan->user_get);
+        if (chan->user_get_hi)
+                val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
+
+	/* reset counter as long as GET is still advancing, this is
+	 * to avoid misdetecting a GPU lockup if the GPU happens to
+	 * just be processing an operation that takes a long time
+	 */
+	if (val != *prev_get) {
+		*prev_get = val;
+		*timeout = 0;
+	}
+
+	if ((++*timeout & 0xff) == 0) {
+		udelay(1);
+		if (*timeout > 100000)
+			return -EBUSY;
+	}
+
+	if (val < chan->push.vma.offset ||
+	    val > chan->push.vma.offset + (chan->dma.max << 2))
+		return -EINVAL;
+
+	return (val - chan->push.vma.offset) >> 2;
+}
+
+void
+nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
+	      int delta, int length)
+{
+	struct nouveau_bo *pb = chan->push.buffer;
+	struct nouveau_vma *vma;
+	int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+	u64 offset;
+
+	vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
+	BUG_ON(!vma);
+	offset = vma->offset + delta;
+
+	BUG_ON(chan->dma.ib_free < 1);
+
+	nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
+	nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+
+	chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+
+	DRM_MEMORYBARRIER();
+	/* Flush writes. */
+	nouveau_bo_rd32(pb, 0);
+
+	nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
+	chan->dma.ib_free--;
+}
+
+static int
+nv50_dma_push_wait(struct nouveau_channel *chan, int count)
+{
+	uint32_t cnt = 0, prev_get = 0;
+
+	while (chan->dma.ib_free < count) {
+		uint32_t get = nv_ro32(chan->object, 0x88);
+		if (get != prev_get) {
+			prev_get = get;
+			cnt = 0;
+		}
+
+		if ((++cnt & 0xff) == 0) {
+			DRM_UDELAY(1);
+			if (cnt > 100000)
+				return -EBUSY;
+		}
+
+		chan->dma.ib_free = get - chan->dma.ib_put;
+		if (chan->dma.ib_free <= 0)
+			chan->dma.ib_free += chan->dma.ib_max;
+	}
+
+	return 0;
+}
+
+static int
+nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+{
+	uint64_t prev_get = 0;
+	int ret, cnt = 0;
+
+	ret = nv50_dma_push_wait(chan, slots + 1);
+	if (unlikely(ret))
+		return ret;
+
+	while (chan->dma.free < count) {
+		int get = READ_GET(chan, &prev_get, &cnt);
+		if (unlikely(get < 0)) {
+			if (get == -EINVAL)
+				continue;
+
+			return get;
+		}
+
+		if (get <= chan->dma.cur) {
+			chan->dma.free = chan->dma.max - chan->dma.cur;
+			if (chan->dma.free >= count)
+				break;
+
+			FIRE_RING(chan);
+			do {
+				get = READ_GET(chan, &prev_get, &cnt);
+				if (unlikely(get < 0)) {
+					if (get == -EINVAL)
+						continue;
+					return get;
+				}
+			} while (get == 0);
+			chan->dma.cur = 0;
+			chan->dma.put = 0;
+		}
+
+		chan->dma.free = get - chan->dma.cur - 1;
+	}
+
+	return 0;
+}
+
+int
+nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+{
+	uint64_t prev_get = 0;
+	int cnt = 0, get;
+
+	if (chan->dma.ib_max)
+		return nv50_dma_wait(chan, slots, size);
+
+	while (chan->dma.free < size) {
+		get = READ_GET(chan, &prev_get, &cnt);
+		if (unlikely(get == -EBUSY))
+			return -EBUSY;
+
+		/* loop until we have a usable GET pointer.  the value
+		 * we read from the GPU may be outside the main ring if
+		 * PFIFO is processing a buffer called from the main ring,
+		 * discard these values until something sensible is seen.
+		 *
+		 * the other case we discard GET is while the GPU is fetching
+		 * from the SKIPS area, so the code below doesn't have to deal
+		 * with some fun corner cases.
+		 */
+		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
+			continue;
+
+		if (get <= chan->dma.cur) {
+			/* engine is fetching behind us, or is completely
+			 * idle (GET == PUT) so we have free space up until
+			 * the end of the push buffer
+			 *
+			 * we can only hit that path once per call due to
+			 * looping back to the beginning of the push buffer,
+			 * we'll hit the fetching-ahead-of-us path from that
+			 * point on.
+			 *
+			 * the *one* exception to that rule is if we read
+			 * GET==PUT, in which case the below conditional will
+			 * always succeed and break us out of the wait loop.
+			 */
+			chan->dma.free = chan->dma.max - chan->dma.cur;
+			if (chan->dma.free >= size)
+				break;
+
+			/* not enough space left at the end of the push buffer,
+			 * instruct the GPU to jump back to the start right
+			 * after processing the currently pending commands.
+			 */
+			OUT_RING(chan, chan->push.vma.offset | 0x20000000);
+
+			/* wait for GET to depart from the skips area.
+			 * prevents writing GET==PUT and causing a race
+			 * condition that causes us to think the GPU is
+			 * idle when it's not.
+			 */
+			do {
+				get = READ_GET(chan, &prev_get, &cnt);
+				if (unlikely(get == -EBUSY))
+					return -EBUSY;
+				if (unlikely(get == -EINVAL))
+					continue;
+			} while (get <= NOUVEAU_DMA_SKIPS);
+			WRITE_PUT(NOUVEAU_DMA_SKIPS);
+
+			/* we're now submitting commands at the start of
+			 * the push buffer.
+			 */
+			chan->dma.cur  =
+			chan->dma.put  = NOUVEAU_DMA_SKIPS;
+		}
+
+		/* engine fetching ahead of us, we have space up until the
+		 * current GET pointer.  the "- 1" is to ensure there's
+		 * space left to emit a jump back to the beginning of the
+		 * push buffer if we require it.  we can never get GET == PUT
+		 * here, so this is safe.
+		 */
+		chan->dma.free = get - chan->dma.cur - 1;
+	}
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 0000000..690d593
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
+
+int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
+void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
+		   int delta, int length);
+
+/*
+ * There's a hw race condition where you can't jump to your PUT offset,
+ * to avoid this we jump to offset + SKIPS and fill the difference with
+ * NOPs.
+ *
+ * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
+ * a SKIPS value of 8.  Lets assume that the race condition is to do
+ * with writing into the fetch area, we configure a fetch size of 128
+ * bytes so we need a larger SKIPS value.
+ */
+#define NOUVEAU_DMA_SKIPS (128 / 4)
+
+/* Hardcoded object assignments to subchannels (subchannel id). */
+enum {
+	NvSubCtxSurf2D  = 0,
+	NvSubSw		= 1,
+	NvSubImageBlit  = 2,
+	NvSub2D		= 3,
+	NvSubGdiRect    = 3,
+	NvSubCopy	= 4,
+};
+
+/* Object handles. */
+enum {
+	NvM2MF		= 0x80000001,
+	NvDmaFB		= 0x80000002,
+	NvDmaTT		= 0x80000003,
+	NvNotify0       = 0x80000006,
+	Nv2D		= 0x80000007,
+	NvCtxSurf2D	= 0x80000008,
+	NvRop		= 0x80000009,
+	NvImagePatt	= 0x8000000a,
+	NvClipRect	= 0x8000000b,
+	NvGdiRect	= 0x8000000c,
+	NvImageBlit	= 0x8000000d,
+	NvSw		= 0x8000000e,
+	NvSema		= 0x8000000f,
+	NvEvoSema0	= 0x80000010,
+	NvEvoSema1	= 0x80000011,
+	NvNotify1       = 0x80000012,
+
+	/* G80+ display objects */
+	NvEvoVRAM	= 0x01000000,
+	NvEvoFB16	= 0x01000001,
+	NvEvoFB32	= 0x01000002,
+	NvEvoVRAM_LP	= 0x01000003,
+	NvEvoSync	= 0xcafe0000
+};
+
+#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY                         0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE                         0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
+
+static __must_check inline int
+RING_SPACE(struct nouveau_channel *chan, int size)
+{
+	int ret;
+
+	ret = nouveau_dma_wait(chan, 1, size);
+	if (ret)
+		return ret;
+
+	chan->dma.free -= size;
+	return 0;
+}
+
+static inline void
+OUT_RING(struct nouveau_channel *chan, int data)
+{
+	nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
+}
+
+extern void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
+
+static inline void
+BEGIN_NV04(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+	OUT_RING(chan, 0x00000000 | (subc << 13) | (size << 18) | mthd);
+}
+
+static inline void
+BEGIN_NI04(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+	OUT_RING(chan, 0x40000000 | (subc << 13) | (size << 18) | mthd);
+}
+
+static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+	OUT_RING(chan, 0x20000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_NIC0(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+	OUT_RING(chan, 0x60000000 | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
+BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
+{
+	OUT_RING(chan, 0x80000000 | (data << 16) | (subc << 13) | (mthd >> 2));
+}
+
+#define WRITE_PUT(val) do {                                                    \
+	DRM_MEMORYBARRIER();                                                   \
+	nouveau_bo_rd32(chan->push.buffer, 0);                                 \
+	nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset);  \
+} while (0)
+
+static inline void
+FIRE_RING(struct nouveau_channel *chan)
+{
+	if (chan->dma.cur == chan->dma.put)
+		return;
+	chan->accel_done = true;
+
+	if (chan->dma.ib_max) {
+		nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
+			      (chan->dma.cur - chan->dma.put) << 2);
+	} else {
+		WRITE_PUT(chan->dma.cur);
+	}
+
+	chan->dma.put = chan->dma.cur;
+}
+
+static inline void
+WIND_RING(struct nouveau_channel *chan)
+{
+	chan->dma.cur = chan->dma.put;
+}
+
+/* FIFO methods */
+#define NV01_SUBCHAN_OBJECT                                          0x00000000
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH                          0x00000010
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW                           0x00000014
+#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE                              0x00000018
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER                               0x0000001c
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL                 0x00000001
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG                    0x00000002
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL                0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD                         0x00001000
+#define NV84_SUBCHAN_UEVENT                                          0x00000020
+#define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
+#define NV10_SUBCHAN_REF_CNT                                         0x00000050
+#define NVSW_SUBCHAN_PAGE_FLIP                                       0x00000054
+#define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
+#define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
+#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
+#define NV11_SUBCHAN_SEMAPHORE_RELEASE                               0x0000006c
+#define NV40_SUBCHAN_YIELD                                           0x00000080
+
+/* NV_SW object class */
+#define NV_SW_DMA_VBLSEM                                             0x0000018c
+#define NV_SW_VBLSEM_OFFSET                                          0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
+#define NV_SW_VBLSEM_RELEASE                                         0x00000408
+#define NV_SW_PAGE_FLIP                                              0x00000500
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_dp.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 0000000..36fd225
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_dp_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+#include <core/class.h>
+
+#include <subdev/gpio.h>
+#include <subdev/i2c.h>
+
+static void
+nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
+		     u8 *dpcd)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u8 buf[3];
+
+	if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
+		NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
+			     buf[0], buf[1], buf[2]);
+
+	if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
+		NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
+			     buf[0], buf[1], buf[2]);
+
+}
+
+bool
+nouveau_dp_detect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_i2c_port *auxch;
+	u8 *dpcd = nv_encoder->dp.dpcd;
+	int ret;
+
+	auxch = nv_encoder->i2c;
+	if (!auxch)
+		return false;
+
+	ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
+	if (ret)
+		return false;
+
+	nv_encoder->dp.link_bw = 27000 * dpcd[1];
+	nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
+
+	NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
+		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
+	NV_DEBUG(drm, "encoder: %dx%d\n",
+		     nv_encoder->dcb->dpconf.link_nr,
+		     nv_encoder->dcb->dpconf.link_bw);
+
+	if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
+		nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+	if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
+		nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
+
+	NV_DEBUG(drm, "maximum: %dx%d\n",
+		     nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+
+	nouveau_dp_probe_oui(dev, auxch, dpcd);
+
+	return true;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.c
new file mode 100644
index 0000000..4598a6a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <core/device.h>
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <engine/device.h>
+#include <engine/disp.h>
+#include <engine/fifo.h>
+
+#include <subdev/vm.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+#include "nouveau_agp.h"
+#include "nouveau_vga.h"
+#include "nouveau_pm.h"
+#include "nouveau_acpi.h"
+#include "nouveau_bios.h"
+#include "nouveau_ioctl.h"
+#include "nouveau_abi16.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_fence.h"
+#include "nouveau_debugfs.h"
+
+MODULE_PARM_DESC(config, "option string to pass to driver core");
+static char *nouveau_config;
+module_param_named(config, nouveau_config, charp, 0400);
+
+MODULE_PARM_DESC(debug, "debug string to pass to driver core");
+static char *nouveau_debug;
+module_param_named(debug, nouveau_debug, charp, 0400);
+
+MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
+static int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
+		          "0 = disabled, 1 = enabled, 2 = headless)");
+int nouveau_modeset = -1;
+module_param_named(modeset, nouveau_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+	struct nouveau_drm *drm =
+		container_of(event, struct nouveau_drm, vblank[head]);
+	drm_handle_vblank(drm->dev, head);
+	return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_drm_vblank_enable(struct drm_device *dev, int head)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+
+	if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
+		return -EIO;
+	WARN_ON_ONCE(drm->vblank[head].func);
+	drm->vblank[head].func = nouveau_drm_vblank_handler;
+	nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
+	return 0;
+}
+
+static void
+nouveau_drm_vblank_disable(struct drm_device *dev, int head)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+	if (drm->vblank[head].func)
+		nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
+	else
+		WARN_ON_ONCE(1);
+	drm->vblank[head].func = NULL;
+}
+
+static u64
+nouveau_name(struct pci_dev *pdev)
+{
+	u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
+	name |= pdev->bus->number << 16;
+	name |= PCI_SLOT(pdev->devfn) << 8;
+	return name | PCI_FUNC(pdev->devfn);
+}
+
+static int
+nouveau_cli_create(struct pci_dev *pdev, const char *name,
+		   int size, void **pcli)
+{
+	struct nouveau_cli *cli;
+	int ret;
+
+	*pcli = NULL;
+	ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
+				     nouveau_debug, size, pcli);
+	cli = *pcli;
+	if (ret) {
+		if (cli)
+			nouveau_client_destroy(&cli->base);
+		*pcli = NULL;
+		return ret;
+	}
+
+	mutex_init(&cli->mutex);
+	return 0;
+}
+
+static void
+nouveau_cli_destroy(struct nouveau_cli *cli)
+{
+	struct nouveau_object *client = nv_object(cli);
+	nouveau_vm_ref(NULL, &cli->base.vm, NULL);
+	nouveau_client_fini(&cli->base, false);
+	atomic_set(&client->refcount, 1);
+	nouveau_object_ref(NULL, &client);
+}
+
+static void
+nouveau_accel_fini(struct nouveau_drm *drm)
+{
+	nouveau_gpuobj_ref(NULL, &drm->notify);
+	nouveau_channel_del(&drm->channel);
+	nouveau_channel_del(&drm->cechan);
+	if (drm->fence)
+		nouveau_fence(drm)->dtor(drm);
+}
+
+static void
+nouveau_accel_init(struct nouveau_drm *drm)
+{
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_object *object;
+	u32 arg0, arg1;
+	int ret;
+
+	if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
+		return;
+
+	/* initialise synchronisation routines */
+	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
+	else if (device->chipset   <  0x17) ret = nv10_fence_create(drm);
+	else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
+	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
+	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
+	else                                ret = nvc0_fence_create(drm);
+	if (ret) {
+		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
+		nouveau_accel_fini(drm);
+		return;
+	}
+
+	if (device->card_type >= NV_E0) {
+		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
+					  NVDRM_CHAN + 1,
+					  NVE0_CHANNEL_IND_ENGINE_CE0 |
+					  NVE0_CHANNEL_IND_ENGINE_CE1, 0,
+					  &drm->cechan);
+		if (ret)
+			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+
+		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
+		arg1 = 1;
+	} else {
+		arg0 = NvDmaFB;
+		arg1 = NvDmaTT;
+	}
+
+	ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
+				  arg0, arg1, &drm->channel);
+	if (ret) {
+		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
+		nouveau_accel_fini(drm);
+		return;
+	}
+
+	if (device->card_type < NV_C0) {
+		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
+					&drm->notify);
+		if (ret) {
+			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
+			nouveau_accel_fini(drm);
+			return;
+		}
+
+		ret = nouveau_object_new(nv_object(drm),
+					 drm->channel->handle, NvNotify0,
+					 0x003d, &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = drm->notify->addr,
+						.limit = drm->notify->addr + 31
+						}, sizeof(struct nv_dma_class),
+					 &object);
+		if (ret) {
+			nouveau_accel_fini(drm);
+			return;
+		}
+	}
+
+
+	nouveau_bo_move_init(drm);
+}
+
+static int nouveau_drm_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *pent)
+{
+	struct nouveau_device *device;
+	struct apertures_struct *aper;
+	bool boot = false;
+	int ret;
+
+	/* remove conflicting drivers (vesafb, efifb etc) */
+	aper = alloc_apertures(3);
+	if (!aper)
+		return -ENOMEM;
+
+	aper->ranges[0].base = pci_resource_start(pdev, 1);
+	aper->ranges[0].size = pci_resource_len(pdev, 1);
+	aper->count = 1;
+
+	if (pci_resource_len(pdev, 2)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+		aper->count++;
+	}
+
+	if (pci_resource_len(pdev, 3)) {
+		aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+		aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+		aper->count++;
+	}
+
+#ifdef CONFIG_X86
+	boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+	kfree(aper);
+
+	ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
+				    nouveau_config, nouveau_debug, &device);
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = drm_get_pci_dev(pdev, pent, &driver);
+	if (ret) {
+		nouveau_object_ref(NULL, (struct nouveau_object **)&device);
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct lock_class_key drm_client_lock_class_key;
+
+static int
+nouveau_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct nouveau_device *device;
+	struct nouveau_drm *drm;
+	int ret;
+
+	ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
+	if (ret)
+		return ret;
+	lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
+
+	dev->dev_private = drm;
+	drm->dev = dev;
+
+	INIT_LIST_HEAD(&drm->clients);
+	spin_lock_init(&drm->tile.lock);
+
+	/* make sure AGP controller is in a consistent state before we
+	 * (possibly) execute vbios init tables (see nouveau_agp.h)
+	 */
+	if (drm_pci_device_is_agp(dev) && dev->agp) {
+		/* dummy device object, doesn't init anything, but allows
+		 * agp code access to registers
+		 */
+		ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
+					 NVDRM_DEVICE, 0x0080,
+					 &(struct nv_device_class) {
+						.device = ~0,
+						.disable =
+						 ~(NV_DEVICE_DISABLE_MMIO |
+						   NV_DEVICE_DISABLE_IDENTIFY),
+						.debug0 = ~0,
+					 }, sizeof(struct nv_device_class),
+					 &drm->device);
+		if (ret)
+			goto fail_device;
+
+		nouveau_agp_reset(drm);
+		nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
+	}
+
+	ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
+				 0x0080, &(struct nv_device_class) {
+					.device = ~0,
+					.disable = 0,
+					.debug0 = 0,
+				 }, sizeof(struct nv_device_class),
+				 &drm->device);
+	if (ret)
+		goto fail_device;
+
+	dev->irq_enabled = true;
+
+	/* workaround an odd issue on nvc1 by disabling the device's
+	 * nosnoop capability.  hopefully won't cause issues until a
+	 * better fix is found - assuming there is one...
+	 */
+	device = nv_device(drm->device);
+	if (nv_device(drm->device)->chipset == 0xc1)
+		nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
+
+	nouveau_vga_init(drm);
+	nouveau_agp_init(drm);
+
+	if (device->card_type >= NV_50) {
+		ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+				     0x1000, &drm->client.base.vm);
+		if (ret)
+			goto fail_device;
+	}
+
+	ret = nouveau_ttm_init(drm);
+	if (ret)
+		goto fail_ttm;
+
+	ret = nouveau_bios_init(dev);
+	if (ret)
+		goto fail_bios;
+
+	ret = nouveau_display_create(dev);
+	if (ret)
+		goto fail_dispctor;
+
+	if (dev->mode_config.num_crtc) {
+		ret = nouveau_display_init(dev);
+		if (ret)
+			goto fail_dispinit;
+	}
+
+	nouveau_pm_init(dev);
+
+	nouveau_accel_init(drm);
+	nouveau_fbcon_init(dev);
+	return 0;
+
+fail_dispinit:
+	nouveau_display_destroy(dev);
+fail_dispctor:
+	nouveau_bios_takedown(dev);
+fail_bios:
+	nouveau_ttm_fini(drm);
+fail_ttm:
+	nouveau_agp_fini(drm);
+	nouveau_vga_fini(drm);
+fail_device:
+	nouveau_cli_destroy(&drm->client);
+	return ret;
+}
+
+static int
+nouveau_drm_unload(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	nouveau_fbcon_fini(dev);
+	nouveau_accel_fini(drm);
+
+	nouveau_pm_fini(dev);
+
+	if (dev->mode_config.num_crtc)
+		nouveau_display_fini(dev);
+	nouveau_display_destroy(dev);
+
+	nouveau_bios_takedown(dev);
+
+	nouveau_ttm_fini(drm);
+	nouveau_agp_fini(drm);
+	nouveau_vga_fini(drm);
+
+	nouveau_cli_destroy(&drm->client);
+	return 0;
+}
+
+static void
+nouveau_drm_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_object *device;
+
+	dev->irq_enabled = false;
+	device = drm->client.base.device;
+	drm_put_dev(dev);
+
+	nouveau_object_ref(NULL, &device);
+	nouveau_object_debug();
+}
+
+static int
+nouveau_do_suspend(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+	int ret;
+
+	if (dev->mode_config.num_crtc) {
+		NV_INFO(drm, "suspending fbcon...\n");
+		nouveau_fbcon_set_suspend(dev, 1);
+
+		NV_INFO(drm, "suspending display...\n");
+		ret = nouveau_display_suspend(dev);
+		if (ret)
+			return ret;
+	}
+
+	NV_INFO(drm, "evicting buffers...\n");
+	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+	NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+	if (drm->cechan) {
+		ret = nouveau_channel_idle(drm->cechan);
+		if (ret)
+			return ret;
+	}
+
+	if (drm->channel) {
+		ret = nouveau_channel_idle(drm->channel);
+		if (ret)
+			return ret;
+	}
+
+	NV_INFO(drm, "suspending client object trees...\n");
+	if (drm->fence && nouveau_fence(drm)->suspend) {
+		if (!nouveau_fence(drm)->suspend(drm))
+			return -ENOMEM;
+	}
+
+	list_for_each_entry(cli, &drm->clients, head) {
+		ret = nouveau_client_fini(&cli->base, true);
+		if (ret)
+			goto fail_client;
+	}
+
+	NV_INFO(drm, "suspending kernel object tree...\n");
+	ret = nouveau_client_fini(&drm->client.base, true);
+	if (ret)
+		goto fail_client;
+
+	nouveau_agp_fini(drm);
+	return 0;
+
+fail_client:
+	list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
+		nouveau_client_init(&cli->base);
+	}
+
+	if (dev->mode_config.num_crtc) {
+		NV_INFO(drm, "resuming display...\n");
+		nouveau_display_resume(dev);
+	}
+	return ret;
+}
+
+int nouveau_pmops_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	ret = nouveau_do_suspend(drm_dev);
+	if (ret)
+		return ret;
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int
+nouveau_do_resume(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+
+	NV_INFO(drm, "re-enabling device...\n");
+
+	nouveau_agp_reset(drm);
+
+	NV_INFO(drm, "resuming kernel object tree...\n");
+	nouveau_client_init(&drm->client.base);
+	nouveau_agp_init(drm);
+
+	NV_INFO(drm, "resuming client object trees...\n");
+	if (drm->fence && nouveau_fence(drm)->resume)
+		nouveau_fence(drm)->resume(drm);
+
+	list_for_each_entry(cli, &drm->clients, head) {
+		nouveau_client_init(&cli->base);
+	}
+
+	nouveau_run_vbios_init(dev);
+	nouveau_pm_resume(dev);
+
+	if (dev->mode_config.num_crtc) {
+		NV_INFO(drm, "resuming display...\n");
+		nouveau_display_resume(dev);
+	}
+	return 0;
+}
+
+int nouveau_pmops_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+	int ret;
+
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	ret = pci_enable_device(pdev);
+	if (ret)
+		return ret;
+	pci_set_master(pdev);
+
+	return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+	return nouveau_do_resume(drm_dev);
+}
+
+
+static int
+nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli;
+	char name[32], tmpname[TASK_COMM_LEN];
+	int ret;
+
+	get_task_comm(tmpname, current);
+	snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
+
+	ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
+	if (ret)
+		return ret;
+
+	if (nv_device(drm->device)->card_type >= NV_50) {
+		ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+				     0x1000, &cli->base.vm);
+		if (ret) {
+			nouveau_cli_destroy(cli);
+			return ret;
+		}
+	}
+
+	fpriv->driver_priv = cli;
+
+	mutex_lock(&drm->client.mutex);
+	list_add(&cli->head, &drm->clients);
+	mutex_unlock(&drm->client.mutex);
+	return 0;
+}
+
+static void
+nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct nouveau_cli *cli = nouveau_cli(fpriv);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (cli->abi16)
+		nouveau_abi16_fini(cli->abi16);
+
+	mutex_lock(&drm->client.mutex);
+	list_del(&cli->head);
+	mutex_unlock(&drm->client.mutex);
+}
+
+static void
+nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+	struct nouveau_cli *cli = nouveau_cli(fpriv);
+	nouveau_cli_destroy(cli);
+}
+
+static struct drm_ioctl_desc
+nouveau_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+static const struct file_operations
+nouveau_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = nouveau_ttm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl = nouveau_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver
+driver = {
+	.driver_features =
+		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
+
+	.load = nouveau_drm_load,
+	.unload = nouveau_drm_unload,
+	.open = nouveau_drm_open,
+	.preclose = nouveau_drm_preclose,
+	.postclose = nouveau_drm_postclose,
+	.lastclose = nouveau_vga_lastclose,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = nouveau_debugfs_init,
+	.debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = nouveau_drm_vblank_enable,
+	.disable_vblank = nouveau_drm_vblank_disable,
+
+	.ioctls = nouveau_ioctls,
+	.fops = &nouveau_driver_fops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_pin = nouveau_gem_prime_pin,
+	.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
+	.gem_prime_vmap = nouveau_gem_prime_vmap,
+	.gem_prime_vunmap = nouveau_gem_prime_vunmap,
+
+	.gem_init_object = nouveau_gem_object_new,
+	.gem_free_object = nouveau_gem_object_del,
+	.gem_open_object = nouveau_gem_object_open,
+	.gem_close_object = nouveau_gem_object_close,
+
+	.dumb_create = nouveau_display_dumb_create,
+	.dumb_map_offset = nouveau_display_dumb_map_offset,
+	.dumb_destroy = nouveau_display_dumb_destroy,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+	.date = GIT_REVISION,
+#else
+	.date = DRIVER_DATE,
+#endif
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_device_id
+nouveau_drm_pci_table[] = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+		.class = PCI_BASE_CLASS_DISPLAY << 16,
+		.class_mask  = 0xff << 16,
+	},
+	{}
+};
+
+static const struct dev_pm_ops nouveau_pm_ops = {
+	.suspend = nouveau_pmops_suspend,
+	.resume = nouveau_pmops_resume,
+	.freeze = nouveau_pmops_freeze,
+	.thaw = nouveau_pmops_thaw,
+	.poweroff = nouveau_pmops_freeze,
+	.restore = nouveau_pmops_resume,
+};
+
+static struct pci_driver
+nouveau_drm_pci_driver = {
+	.name = "nouveau",
+	.id_table = nouveau_drm_pci_table,
+	.probe = nouveau_drm_probe,
+	.remove = nouveau_drm_remove,
+	.driver.pm = &nouveau_pm_ops,
+};
+
+static int __init
+nouveau_drm_init(void)
+{
+	driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
+
+	if (nouveau_modeset == -1) {
+#ifdef CONFIG_VGA_CONSOLE
+		if (vgacon_text_force())
+			nouveau_modeset = 0;
+#endif
+	}
+
+	if (!nouveau_modeset)
+		return 0;
+
+	nouveau_register_dsm_handler();
+	return drm_pci_init(&driver, &nouveau_drm_pci_driver);
+}
+
+static void __exit
+nouveau_drm_exit(void)
+{
+	if (!nouveau_modeset)
+		return;
+
+	drm_pci_exit(&driver, &nouveau_drm_pci_driver);
+	nouveau_unregister_dsm_handler();
+}
+
+module_init(nouveau_drm_init);
+module_exit(nouveau_drm_exit);
+
+MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.h
new file mode 100644
index 0000000..f2b30f8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -0,0 +1,159 @@
+#ifndef __NOUVEAU_DRMCLI_H__
+#define __NOUVEAU_DRMCLI_H__
+
+#define DRIVER_AUTHOR		"Nouveau Project"
+#define DRIVER_EMAIL		"nouveau@lists.freedesktop.org"
+
+#define DRIVER_NAME		"nouveau"
+#define DRIVER_DESC		"nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DATE		"20120801"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		1
+#define DRIVER_PATCHLEVEL	1
+
+/*
+ * 1.1.1:
+ * 	- added support for tiled system memory buffer objects
+ *      - added support for NOUVEAU_GETPARAM_GRAPH_UNITS on [nvc0,nve0].
+ *      - added support for compressed memory storage types on [nvc0,nve0].
+ *      - added support for software methods 0x600,0x644,0x6ac on nvc0
+ *        to control registers on the MPs to enable performance counters,
+ *        and to control the warp error enable mask (OpenGL requires out of
+ *        bounds access to local memory to be silently ignored / return 0).
+ */
+
+#include <core/client.h>
+#include <core/event.h>
+
+#include <subdev/vm.h>
+
+#include <drmP.h>
+#include <drm/nouveau_drm.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+struct nouveau_channel;
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_fence.h"
+#include "nouveau_bios.h"
+
+struct nouveau_drm_tile {
+	struct nouveau_fence *fence;
+	bool used;
+};
+
+enum nouveau_drm_handle {
+	NVDRM_CLIENT = 0xffffffff,
+	NVDRM_DEVICE = 0xdddddddd,
+	NVDRM_PUSH   = 0xbbbb0000, /* |= client chid */
+	NVDRM_CHAN   = 0xcccc0000, /* |= client chid */
+};
+
+struct nouveau_cli {
+	struct nouveau_client base;
+	struct list_head head;
+	struct mutex mutex;
+	void *abi16;
+};
+
+static inline struct nouveau_cli *
+nouveau_cli(struct drm_file *fpriv)
+{
+	return fpriv ? fpriv->driver_priv : NULL;
+}
+
+struct nouveau_drm {
+	struct nouveau_cli client;
+	struct drm_device *dev;
+
+	struct nouveau_object *device;
+	struct list_head clients;
+
+	struct {
+		enum {
+			UNKNOWN = 0,
+			DISABLE = 1,
+			ENABLED = 2
+		} stat;
+		u32 base;
+		u32 size;
+	} agp;
+
+	/* TTM interface support */
+	struct {
+		struct drm_global_reference mem_global_ref;
+		struct ttm_bo_global_ref bo_global_ref;
+		struct ttm_bo_device bdev;
+		atomic_t validate_sequence;
+		int (*move)(struct nouveau_channel *,
+			    struct ttm_buffer_object *,
+			    struct ttm_mem_reg *, struct ttm_mem_reg *);
+		int mtrr;
+	} ttm;
+
+	/* GEM interface support */
+	struct {
+		u64 vram_available;
+		u64 gart_available;
+	} gem;
+
+	/* synchronisation */
+	void *fence;
+
+	/* context for accelerated drm-internal operations */
+	struct nouveau_channel *cechan;
+	struct nouveau_channel *channel;
+	struct nouveau_gpuobj *notify;
+	struct nouveau_fbdev *fbcon;
+
+	/* nv10-nv40 tiling regions */
+	struct {
+		struct nouveau_drm_tile reg[15];
+		spinlock_t lock;
+	} tile;
+
+	/* modesetting */
+	struct nvbios vbios;
+	struct nouveau_display *display;
+	struct backlight_device *backlight;
+	struct nouveau_eventh vblank[4];
+
+	/* power management */
+	struct nouveau_pm *pm;
+};
+
+static inline struct nouveau_drm *
+nouveau_drm(struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
+static inline struct nouveau_device *
+nouveau_dev(struct drm_device *dev)
+{
+	return nv_device(nouveau_drm(dev)->device);
+}
+
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
+
+#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
+#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
+#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
+#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
+#define NV_DEBUG(cli, fmt, args...) do {                                       \
+	if (drm_debug & DRM_UT_DRIVER)                                         \
+		nv_info((cli), fmt, ##args);                                   \
+} while (0)
+
+extern int nouveau_modeset;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_encoder.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 0000000..24660c0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_H__
+#define __NOUVEAU_ENCODER_H__
+
+#include <subdev/bios/dcb.h>
+
+#include <drm/drm_encoder_slave.h>
+#include "dispnv04/disp.h"
+
+#define NV_DPMS_CLEARED 0x80
+
+struct nouveau_i2c_port;
+
+struct nouveau_encoder {
+	struct drm_encoder_slave base;
+
+	struct dcb_output *dcb;
+	int or;
+	struct nouveau_i2c_port *i2c;
+
+	/* different to drm_encoder.crtc, this reflects what's
+	 * actually programmed on the hw, not the proposed crtc */
+	struct drm_crtc *crtc;
+
+	struct drm_display_mode mode;
+	int last_dpms;
+
+	struct nv04_output_reg restore;
+
+	union {
+		struct {
+			u8  dpcd[8];
+			int link_nr;
+			int link_bw;
+			u32 datarate;
+		} dp;
+	};
+};
+
+struct nouveau_encoder *
+find_encoder(struct drm_connector *connector, int type);
+
+static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
+{
+	struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+	return container_of(slave, struct nouveau_encoder, base);
+}
+
+static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
+{
+	return &enc->base.base;
+}
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+	return to_encoder_slave(enc)->slave_funcs;
+}
+
+/* nouveau_dp.c */
+bool nouveau_dp_detect(struct drm_encoder *);
+void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
+		     struct nouveau_object *);
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+
+#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 0000000..b035317
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,528 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/screen_info.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/console.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_gem.h"
+#include "nouveau_bo.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_chan.h"
+
+#include "nouveau_crtc.h"
+
+#include <core/client.h>
+#include <core/device.h>
+
+#include <subdev/fb.h>
+
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
+static int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
+			ret = nv04_fbcon_fillrect(info, rect);
+		else
+		if (device->card_type < NV_C0)
+			ret = nv50_fbcon_fillrect(info, rect);
+		else
+			ret = nvc0_fbcon_fillrect(info, rect);
+		mutex_unlock(&drm->client.mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
+			ret = nv04_fbcon_copyarea(info, image);
+		else
+		if (device->card_type < NV_C0)
+			ret = nv50_fbcon_copyarea(info, image);
+		else
+			ret = nvc0_fbcon_copyarea(info, image);
+		mutex_unlock(&drm->client.mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&drm->client.mutex)) {
+		if (device->card_type < NV_50)
+			ret = nv04_fbcon_imageblit(info, image);
+		else
+		if (device->card_type < NV_C0)
+			ret = nv50_fbcon_imageblit(info, image);
+		else
+			ret = nvc0_fbcon_imageblit(info, image);
+		mutex_unlock(&drm->client.mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_imageblit(info, image);
+}
+
+static int
+nouveau_fbcon_sync(struct fb_info *info)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	if (!chan || !chan->accel_done || in_interrupt() ||
+	    info->state != FBINFO_STATE_RUNNING ||
+	    info->flags & FBINFO_HWACCEL_DISABLED)
+		return 0;
+
+	if (!mutex_trylock(&drm->client.mutex))
+		return 0;
+
+	ret = nouveau_channel_idle(chan);
+	mutex_unlock(&drm->client.mutex);
+	if (ret) {
+		nouveau_fbcon_gpu_lockup(info);
+		return 0;
+	}
+
+	chan->accel_done = false;
+	return 0;
+}
+
+static struct fb_ops nouveau_fbcon_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = nouveau_fbcon_fillrect,
+	.fb_copyarea = nouveau_fbcon_copyarea,
+	.fb_imageblit = nouveau_fbcon_imageblit,
+	.fb_sync = nouveau_fbcon_sync,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static struct fb_ops nouveau_fbcon_sw_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				    u16 blue, int regno)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	nv_crtc->lut.r[regno] = red;
+	nv_crtc->lut.g[regno] = green;
+	nv_crtc->lut.b[regno] = blue;
+}
+
+static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				    u16 *blue, int regno)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+	*red = nv_crtc->lut.r[regno];
+	*green = nv_crtc->lut.g[regno];
+	*blue = nv_crtc->lut.b[regno];
+}
+
+static void
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
+{
+	struct fb_info *info = fbcon->helper.fbdev;
+	struct fb_fillrect rect;
+
+	/* Clear the entire fbcon.  The drm will program every connector
+	 * with it's preferred mode.  If the sizes differ, one display will
+	 * quite likely have garbage around the console.
+	 */
+	rect.dx = rect.dy = 0;
+	rect.width = info->var.xres_virtual;
+	rect.height = info->var.yres_virtual;
+	rect.color = 0;
+	rect.rop = ROP_COPY;
+	info->fbops->fb_fillrect(info, &rect);
+}
+
+static int
+nouveau_fbcon_create(struct drm_fb_helper *helper,
+		     struct drm_fb_helper_surface_size *sizes)
+{
+	struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
+	struct drm_device *dev = fbcon->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nv_device(drm->device);
+	struct fb_info *info;
+	struct drm_framebuffer *fb;
+	struct nouveau_framebuffer *nouveau_fb;
+	struct nouveau_channel *chan;
+	struct nouveau_bo *nvbo;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct pci_dev *pdev = dev->pdev;
+	int size, ret;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+	mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	size = roundup(size, PAGE_SIZE);
+
+	ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+			      0, 0x0000, &nvbo);
+	if (ret) {
+		NV_ERROR(drm, "failed to allocate framebuffer\n");
+		goto out;
+	}
+
+	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(drm, "failed to pin fb: %d\n", ret);
+		nouveau_bo_ref(NULL, &nvbo);
+		goto out;
+	}
+
+	ret = nouveau_bo_map(nvbo);
+	if (ret) {
+		NV_ERROR(drm, "failed to map fb: %d\n", ret);
+		nouveau_bo_unpin(nvbo);
+		nouveau_bo_ref(NULL, &nvbo);
+		goto out;
+	}
+
+	chan = nouveau_nofbaccel ? NULL : drm->channel;
+	if (chan && device->card_type >= NV_50) {
+		ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
+					&fbcon->nouveau_fb.vma);
+		if (ret) {
+			NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
+			chan = NULL;
+		}
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	info = framebuffer_alloc(0, &pdev->dev);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->par = fbcon;
+
+	nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
+
+	nouveau_fb = &fbcon->nouveau_fb;
+	fb = &nouveau_fb->base;
+
+	/* setup helper */
+	fbcon->helper.fb = fb;
+	fbcon->helper.fbdev = info;
+
+	strcpy(info->fix.id, "nouveaufb");
+	if (nouveau_nofbaccel)
+		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+	else
+		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+			      FBINFO_HWACCEL_FILLRECT |
+			      FBINFO_HWACCEL_IMAGEBLIT;
+	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &nouveau_fbcon_sw_ops;
+	info->fix.smem_start = nvbo->bo.mem.bus.base +
+			       nvbo->bo.mem.bus.offset;
+	info->fix.smem_len = size;
+
+	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+	info->screen_size = size;
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	mutex_unlock(&dev->struct_mutex);
+
+	if (chan) {
+		ret = -ENODEV;
+		if (device->card_type < NV_50)
+			ret = nv04_fbcon_accel_init(info);
+		else
+		if (device->card_type < NV_C0)
+			ret = nv50_fbcon_accel_init(info);
+		else
+			ret = nvc0_fbcon_accel_init(info);
+
+		if (ret == 0)
+			info->fbops = &nouveau_fbcon_ops;
+	}
+
+	nouveau_fbcon_zfill(dev, fbcon);
+
+	/* To allow resizeing without swapping buffers */
+	NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+		nouveau_fb->base.width, nouveau_fb->base.height,
+		nvbo->bo.offset, nvbo);
+
+	vga_switcheroo_client_fb_set(dev->pdev, info);
+	return 0;
+
+out_unref:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	return ret;
+}
+
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	drm_fb_helper_hotplug_event(&drm->fbcon->helper);
+}
+
+static int
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
+{
+	struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
+	struct fb_info *info;
+
+	if (fbcon->helper.fbdev) {
+		info = fbcon->helper.fbdev;
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (nouveau_fb->nvbo) {
+		nouveau_bo_unmap(nouveau_fb->nvbo);
+		nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+		nouveau_fb->nvbo = NULL;
+	}
+	drm_fb_helper_fini(&fbcon->helper);
+	drm_framebuffer_unregister_private(&nouveau_fb->base);
+	drm_framebuffer_cleanup(&nouveau_fb->base);
+	return 0;
+}
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+
+	NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
+	info->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+	.gamma_set = nouveau_fbcon_gamma_set,
+	.gamma_get = nouveau_fbcon_gamma_get,
+	.fb_probe = nouveau_fbcon_create,
+};
+
+
+int
+nouveau_fbcon_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_fbdev *fbcon;
+	int preferred_bpp;
+	int ret;
+
+	if (!dev->mode_config.num_crtc)
+		return 0;
+
+	fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+	if (!fbcon)
+		return -ENOMEM;
+
+	fbcon->dev = dev;
+	drm->fbcon = fbcon;
+	fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, &fbcon->helper,
+				 dev->mode_config.num_crtc, 4);
+	if (ret) {
+		kfree(fbcon);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&fbcon->helper);
+
+	if (pfb->ram.size <= 32 * 1024 * 1024)
+		preferred_bpp = 8;
+	else
+	if (pfb->ram.size <= 64 * 1024 * 1024)
+		preferred_bpp = 16;
+	else
+		preferred_bpp = 32;
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
+	return 0;
+}
+
+void
+nouveau_fbcon_fini(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (!drm->fbcon)
+		return;
+
+	nouveau_fbcon_destroy(dev, drm->fbcon);
+	kfree(drm->fbcon);
+	drm->fbcon = NULL;
+}
+
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+	drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+}
+
+void nouveau_fbcon_restore_accel(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+}
+
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	console_lock();
+	if (state == 0)
+		nouveau_fbcon_save_disable_accel(dev);
+	fb_set_suspend(drm->fbcon->helper.fbdev, state);
+	if (state == 1)
+		nouveau_fbcon_restore_accel(dev);
+	console_unlock();
+}
+
+void nouveau_fbcon_zfill_all(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	nouveau_fbcon_zfill(dev, drm->fbcon);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 0000000..fdfc0c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FBCON_H__
+#define __NOUVEAU_FBCON_H__
+
+#include <drm/drm_fb_helper.h>
+
+#include "nouveau_display.h"
+
+struct nouveau_fbdev {
+	struct drm_fb_helper helper;
+	struct nouveau_framebuffer nouveau_fb;
+	struct list_head fbdev_list;
+	struct drm_device *dev;
+	unsigned int saved_flags;
+};
+
+void nouveau_fbcon_restore(void);
+
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_accel_init(struct fb_info *info);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv50_fbcon_accel_init(struct fb_info *info);
+
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info);
+
+int nouveau_fbcon_init(struct drm_device *dev);
+void nouveau_fbcon_fini(struct drm_device *dev);
+void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
+void nouveau_fbcon_zfill_all(struct drm_device *dev);
+void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
+void nouveau_fbcon_restore_accel(struct drm_device *dev);
+
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+#endif /* __NV50_FBCON_H__ */
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 0000000..6c94683
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include <engine/fifo.h>
+
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
+{
+	struct nouveau_fence *fence, *fnext;
+	spin_lock(&fctx->lock);
+	list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+		fence->channel = NULL;
+		list_del(&fence->head);
+		nouveau_fence_unref(&fence);
+	}
+	spin_unlock(&fctx->lock);
+}
+
+void
+nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
+{
+	INIT_LIST_HEAD(&fctx->flip);
+	INIT_LIST_HEAD(&fctx->pending);
+	spin_lock_init(&fctx->lock);
+}
+
+static void
+nouveau_fence_update(struct nouveau_channel *chan)
+{
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_fence *fence, *fnext;
+
+	spin_lock(&fctx->lock);
+	list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+		if (fctx->read(chan) < fence->sequence)
+			break;
+
+		fence->channel = NULL;
+		list_del(&fence->head);
+		nouveau_fence_unref(&fence);
+	}
+	spin_unlock(&fctx->lock);
+}
+
+int
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
+{
+	struct nouveau_fence_chan *fctx = chan->fence;
+	int ret;
+
+	fence->channel  = chan;
+	fence->timeout  = jiffies + (3 * DRM_HZ);
+	fence->sequence = ++fctx->sequence;
+
+	ret = fctx->emit(fence);
+	if (!ret) {
+		kref_get(&fence->kref);
+		spin_lock(&fctx->lock);
+		list_add_tail(&fence->head, &fctx->pending);
+		spin_unlock(&fctx->lock);
+	}
+
+	return ret;
+}
+
+bool
+nouveau_fence_done(struct nouveau_fence *fence)
+{
+	if (fence->channel)
+		nouveau_fence_update(fence->channel);
+	return !fence->channel;
+}
+
+struct nouveau_fence_uevent {
+	struct nouveau_eventh handler;
+	struct nouveau_fence_priv *priv;
+};
+
+static int
+nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
+{
+	struct nouveau_fence_uevent *uevent =
+		container_of(event, struct nouveau_fence_uevent, handler);
+	wake_up_all(&uevent->priv->waiting);
+	return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+
+{
+	struct nouveau_channel *chan = fence->channel;
+	struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
+	struct nouveau_fence_priv *priv = chan->drm->fence;
+	struct nouveau_fence_uevent uevent = {
+		.handler.func = nouveau_fence_wait_uevent_handler,
+		.priv = priv,
+	};
+	int ret = 0;
+
+	nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
+
+	if (fence->timeout) {
+		unsigned long timeout = fence->timeout - jiffies;
+
+		if (time_before(jiffies, fence->timeout)) {
+			if (intr) {
+				ret = wait_event_interruptible_timeout(
+						priv->waiting,
+						nouveau_fence_done(fence),
+						timeout);
+			} else {
+				ret = wait_event_timeout(priv->waiting,
+						nouveau_fence_done(fence),
+						timeout);
+			}
+		}
+
+		if (ret >= 0) {
+			fence->timeout = jiffies + ret;
+			if (time_after_eq(jiffies, fence->timeout))
+				ret = -EBUSY;
+		}
+	} else {
+		if (intr) {
+			ret = wait_event_interruptible(priv->waiting,
+					nouveau_fence_done(fence));
+		} else {
+			wait_event(priv->waiting, nouveau_fence_done(fence));
+		}
+	}
+
+	nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
+	if (unlikely(ret < 0))
+		return ret;
+
+	return 0;
+}
+
+int
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
+{
+	struct nouveau_channel *chan = fence->channel;
+	struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
+	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+	ktime_t t;
+	int ret = 0;
+
+	while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
+		ret = nouveau_fence_wait_uevent(fence, intr);
+		if (ret < 0)
+			return ret;
+	}
+
+	while (!nouveau_fence_done(fence)) {
+		if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
+			ret = -EBUSY;
+			break;
+		}
+
+		__set_current_state(intr ? TASK_INTERRUPTIBLE :
+					   TASK_UNINTERRUPTIBLE);
+		if (lazy) {
+			t = ktime_set(0, sleep_time);
+			schedule_hrtimeout(&t, HRTIMER_MODE_REL);
+			sleep_time *= 2;
+			if (sleep_time > NSEC_PER_MSEC)
+				sleep_time = NSEC_PER_MSEC;
+		}
+
+		if (intr && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+
+	__set_current_state(TASK_RUNNING);
+	return ret;
+}
+
+int
+nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
+{
+	struct nouveau_fence_chan *fctx = chan->fence;
+	struct nouveau_channel *prev;
+	int ret = 0;
+
+	prev = fence ? fence->channel : NULL;
+	if (prev) {
+		if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
+			ret = fctx->sync(fence, prev, chan);
+			if (unlikely(ret))
+				ret = nouveau_fence_wait(fence, true, false);
+		}
+	}
+
+	return ret;
+}
+
+static void
+nouveau_fence_del(struct kref *kref)
+{
+	struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
+	kfree(fence);
+}
+
+void
+nouveau_fence_unref(struct nouveau_fence **pfence)
+{
+	if (*pfence)
+		kref_put(&(*pfence)->kref, nouveau_fence_del);
+	*pfence = NULL;
+}
+
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
+{
+	kref_get(&fence->kref);
+	return fence;
+}
+
+int
+nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
+		  struct nouveau_fence **pfence)
+{
+	struct nouveau_fence *fence;
+	int ret = 0;
+
+	if (unlikely(!chan->fence))
+		return -ENODEV;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return -ENOMEM;
+
+	fence->sysmem = sysmem;
+	kref_init(&fence->kref);
+
+	ret = nouveau_fence_emit(fence, chan);
+	if (ret)
+		nouveau_fence_unref(&fence);
+
+	*pfence = fence;
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.h
new file mode 100644
index 0000000..c899434
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -0,0 +1,96 @@
+#ifndef __NOUVEAU_FENCE_H__
+#define __NOUVEAU_FENCE_H__
+
+struct nouveau_drm;
+
+struct nouveau_fence {
+	struct list_head head;
+	struct kref kref;
+
+	bool sysmem;
+
+	struct nouveau_channel *channel;
+	unsigned long timeout;
+	u32 sequence;
+};
+
+int  nouveau_fence_new(struct nouveau_channel *, bool sysmem,
+		       struct nouveau_fence **);
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *);
+void nouveau_fence_unref(struct nouveau_fence **);
+
+int  nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+bool nouveau_fence_done(struct nouveau_fence *);
+int  nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
+int  nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
+
+struct nouveau_fence_chan {
+	struct list_head pending;
+	struct list_head flip;
+
+	int  (*emit)(struct nouveau_fence *);
+	int  (*sync)(struct nouveau_fence *, struct nouveau_channel *,
+		     struct nouveau_channel *);
+	u32  (*read)(struct nouveau_channel *);
+	int  (*emit32)(struct nouveau_channel *, u64, u32);
+	int  (*sync32)(struct nouveau_channel *, u64, u32);
+
+	spinlock_t lock;
+	u32 sequence;
+};
+
+struct nouveau_fence_priv {
+	void (*dtor)(struct nouveau_drm *);
+	bool (*suspend)(struct nouveau_drm *);
+	void (*resume)(struct nouveau_drm *);
+	int  (*context_new)(struct nouveau_channel *);
+	void (*context_del)(struct nouveau_channel *);
+
+	wait_queue_head_t waiting;
+	bool uevent;
+};
+
+#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
+
+void nouveau_fence_context_new(struct nouveau_fence_chan *);
+void nouveau_fence_context_del(struct nouveau_fence_chan *);
+
+int nv04_fence_create(struct nouveau_drm *);
+int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
+
+int  nv10_fence_emit(struct nouveau_fence *);
+int  nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
+		     struct nouveau_channel *);
+u32  nv10_fence_read(struct nouveau_channel *);
+void nv10_fence_context_del(struct nouveau_channel *);
+void nv10_fence_destroy(struct nouveau_drm *);
+int  nv10_fence_create(struct nouveau_drm *);
+
+int  nv17_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
+
+int nv50_fence_create(struct nouveau_drm *);
+int nv84_fence_create(struct nouveau_drm *);
+int nvc0_fence_create(struct nouveau_drm *);
+
+int nouveau_flip_complete(void *chan);
+
+struct nv84_fence_chan {
+	struct nouveau_fence_chan base;
+	struct nouveau_vma vma;
+	struct nouveau_vma vma_gart;
+	struct nouveau_vma dispc_vma[4];
+};
+
+struct nv84_fence_priv {
+	struct nouveau_fence_priv base;
+	struct nouveau_bo *bo;
+	struct nouveau_bo *bo_gart;
+	u32 *suspend;
+};
+
+u64  nv84_fence_crtc(struct nouveau_channel *, int);
+int  nv84_fence_context_new(struct nouveau_channel *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 0000000..5bccf31
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright (C) 2008 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+#include "nouveau_abi16.h"
+
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+int
+nouveau_gem_object_new(struct drm_gem_object *gem)
+{
+	return 0;
+}
+
+void
+nouveau_gem_object_del(struct drm_gem_object *gem)
+{
+	struct nouveau_bo *nvbo = gem->driver_private;
+	struct ttm_buffer_object *bo = &nvbo->bo;
+
+	if (!nvbo)
+		return;
+	nvbo->gem = NULL;
+
+	if (unlikely(nvbo->pin_refcnt)) {
+		nvbo->pin_refcnt = 1;
+		nouveau_bo_unpin(nvbo);
+	}
+
+	if (gem->import_attach)
+		drm_prime_gem_destroy(gem, nvbo->bo.sg);
+
+	ttm_bo_unref(&bo);
+
+	drm_gem_object_release(gem);
+	kfree(gem);
+}
+
+int
+nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+	struct nouveau_vma *vma;
+	int ret;
+
+	if (!cli->base.vm)
+		return 0;
+
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	if (ret)
+		return ret;
+
+	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+	if (!vma) {
+		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+		if (!vma) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
+		if (ret) {
+			kfree(vma);
+			goto out;
+		}
+	} else {
+		vma->refcount++;
+	}
+
+out:
+	ttm_bo_unreserve(&nvbo->bo);
+	return ret;
+}
+
+void
+nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+	struct nouveau_vma *vma;
+	int ret;
+
+	if (!cli->base.vm)
+		return;
+
+	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+	if (ret)
+		return;
+
+	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+	if (vma) {
+		if (--vma->refcount == 0) {
+			nouveau_bo_vma_del(nvbo, vma);
+			kfree(vma);
+		}
+	}
+	ttm_bo_unreserve(&nvbo->bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
+		uint32_t tile_mode, uint32_t tile_flags,
+		struct nouveau_bo **pnvbo)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_bo *nvbo;
+	u32 flags = 0;
+	int ret;
+
+	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+		flags |= TTM_PL_FLAG_VRAM;
+	if (domain & NOUVEAU_GEM_DOMAIN_GART)
+		flags |= TTM_PL_FLAG_TT;
+	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
+		flags |= TTM_PL_FLAG_SYSTEM;
+
+	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
+			     tile_flags, NULL, pnvbo);
+	if (ret)
+		return ret;
+	nvbo = *pnvbo;
+
+	/* we restrict allowed domains on nv50+ to only the types
+	 * that were requested at creation time.  not possibly on
+	 * earlier chips without busting the ABI.
+	 */
+	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+			      NOUVEAU_GEM_DOMAIN_GART;
+	if (nv_device(drm->device)->card_type >= NV_50)
+		nvbo->valid_domains &= domain;
+
+	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+	if (!nvbo->gem) {
+		nouveau_bo_ref(NULL, pnvbo);
+		return -ENOMEM;
+	}
+
+	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
+	nvbo->gem->driver_private = nvbo;
+	return 0;
+}
+
+static int
+nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
+		 struct drm_nouveau_gem_info *rep)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+	struct nouveau_vma *vma;
+
+	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+	else
+		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+	rep->offset = nvbo->bo.offset;
+	if (cli->base.vm) {
+		vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+		if (!vma)
+			return -EINVAL;
+
+		rep->offset = vma->offset;
+	}
+
+	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+	rep->map_handle = nvbo->bo.addr_space_offset;
+	rep->tile_mode = nvbo->tile_mode;
+	rep->tile_flags = nvbo->tile_flags;
+	return 0;
+}
+
+int
+nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct drm_nouveau_gem_new *req = data;
+	struct nouveau_bo *nvbo = NULL;
+	int ret = 0;
+
+	drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
+
+	if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
+		NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+		return -EINVAL;
+	}
+
+	ret = nouveau_gem_new(dev, req->info.size, req->align,
+			      req->info.domain, req->info.tile_mode,
+			      req->info.tile_flags, &nvbo);
+	if (ret)
+		return ret;
+
+	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+	if (ret == 0) {
+		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+		if (ret)
+			drm_gem_handle_delete(file_priv, req->info.handle);
+	}
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(nvbo->gem);
+	return ret;
+}
+
+static int
+nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
+		       uint32_t write_domains, uint32_t valid_domains)
+{
+	struct nouveau_bo *nvbo = gem->driver_private;
+	struct ttm_buffer_object *bo = &nvbo->bo;
+	uint32_t domains = valid_domains & nvbo->valid_domains &
+		(write_domains ? write_domains : read_domains);
+	uint32_t pref_flags = 0, valid_flags = 0;
+
+	if (!domains)
+		return -EINVAL;
+
+	if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+		valid_flags |= TTM_PL_FLAG_VRAM;
+
+	if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+		valid_flags |= TTM_PL_FLAG_TT;
+
+	if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+	    bo->mem.mem_type == TTM_PL_VRAM)
+		pref_flags |= TTM_PL_FLAG_VRAM;
+
+	else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
+		 bo->mem.mem_type == TTM_PL_TT)
+		pref_flags |= TTM_PL_FLAG_TT;
+
+	else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
+		pref_flags |= TTM_PL_FLAG_VRAM;
+
+	else
+		pref_flags |= TTM_PL_FLAG_TT;
+
+	nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
+
+	return 0;
+}
+
+struct validate_op {
+	struct list_head vram_list;
+	struct list_head gart_list;
+	struct list_head both_list;
+};
+
+static void
+validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+{
+	struct list_head *entry, *tmp;
+	struct nouveau_bo *nvbo;
+
+	list_for_each_safe(entry, tmp, list) {
+		nvbo = list_entry(entry, struct nouveau_bo, entry);
+
+		if (likely(fence))
+			nouveau_bo_fence(nvbo, fence);
+
+		if (unlikely(nvbo->validate_mapped)) {
+			ttm_bo_kunmap(&nvbo->kmap);
+			nvbo->validate_mapped = false;
+		}
+
+		list_del(&nvbo->entry);
+		nvbo->reserved_by = NULL;
+		ttm_bo_unreserve(&nvbo->bo);
+		drm_gem_object_unreference_unlocked(nvbo->gem);
+	}
+}
+
+static void
+validate_fini(struct validate_op *op, struct nouveau_fence* fence)
+{
+	validate_fini_list(&op->vram_list, fence);
+	validate_fini_list(&op->gart_list, fence);
+	validate_fini_list(&op->both_list, fence);
+}
+
+static int
+validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
+	      int nr_buffers, struct validate_op *op)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct drm_device *dev = chan->drm->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	uint32_t sequence;
+	int trycnt = 0;
+	int ret, i;
+	struct nouveau_bo *res_bo = NULL;
+
+	sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+retry:
+	if (++trycnt > 100000) {
+		NV_ERROR(cli, "%s failed and gave up.\n", __func__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nr_buffers; i++) {
+		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
+		struct drm_gem_object *gem;
+		struct nouveau_bo *nvbo;
+
+		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+		if (!gem) {
+			NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
+			validate_fini(op, NULL);
+			return -ENOENT;
+		}
+		nvbo = gem->driver_private;
+		if (nvbo == res_bo) {
+			res_bo = NULL;
+			drm_gem_object_unreference_unlocked(gem);
+			continue;
+		}
+
+		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
+			NV_ERROR(cli, "multiple instances of buffer %d on "
+				      "validation list\n", b->handle);
+			drm_gem_object_unreference_unlocked(gem);
+			validate_fini(op, NULL);
+			return -EINVAL;
+		}
+
+		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
+		if (ret) {
+			validate_fini(op, NULL);
+			if (unlikely(ret == -EAGAIN)) {
+				sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
+				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
+							      sequence);
+				if (!ret)
+					res_bo = nvbo;
+			}
+			if (unlikely(ret)) {
+				drm_gem_object_unreference_unlocked(gem);
+				if (ret != -ERESTARTSYS)
+					NV_ERROR(cli, "fail reserve\n");
+				return ret;
+			}
+		}
+
+		b->user_priv = (uint64_t)(unsigned long)nvbo;
+		nvbo->reserved_by = file_priv;
+		nvbo->pbbo_index = i;
+		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
+			list_add_tail(&nvbo->entry, &op->both_list);
+		else
+		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+			list_add_tail(&nvbo->entry, &op->vram_list);
+		else
+		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+			list_add_tail(&nvbo->entry, &op->gart_list);
+		else {
+			NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
+				 b->valid_domains);
+			list_add_tail(&nvbo->entry, &op->both_list);
+			validate_fini(op, NULL);
+			return -EINVAL;
+		}
+		if (nvbo == res_bo)
+			goto retry;
+	}
+
+	return 0;
+}
+
+static int
+validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
+{
+	struct nouveau_fence *fence = NULL;
+	int ret = 0;
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	if (nvbo->bo.sync_obj)
+		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	if (fence) {
+		ret = nouveau_fence_sync(fence, chan);
+		nouveau_fence_unref(&fence);
+	}
+
+	return ret;
+}
+
+static int
+validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
+	      struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
+	      uint64_t user_pbbo_ptr)
+{
+	struct nouveau_drm *drm = chan->drm;
+	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+				(void __force __user *)(uintptr_t)user_pbbo_ptr;
+	struct nouveau_bo *nvbo;
+	int ret, relocs = 0;
+
+	list_for_each_entry(nvbo, list, entry) {
+		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+
+		ret = validate_sync(chan, nvbo);
+		if (unlikely(ret)) {
+			NV_ERROR(cli, "fail pre-validate sync\n");
+			return ret;
+		}
+
+		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+					     b->write_domains,
+					     b->valid_domains);
+		if (unlikely(ret)) {
+			NV_ERROR(cli, "fail set_domain\n");
+			return ret;
+		}
+
+		ret = nouveau_bo_validate(nvbo, true, false);
+		if (unlikely(ret)) {
+			if (ret != -ERESTARTSYS)
+				NV_ERROR(cli, "fail ttm_validate\n");
+			return ret;
+		}
+
+		ret = validate_sync(chan, nvbo);
+		if (unlikely(ret)) {
+			NV_ERROR(cli, "fail post-validate sync\n");
+			return ret;
+		}
+
+		if (nv_device(drm->device)->card_type < NV_50) {
+			if (nvbo->bo.offset == b->presumed.offset &&
+			    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+			     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+			      b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
+				continue;
+
+			if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+				b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+			else
+				b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+			b->presumed.offset = nvbo->bo.offset;
+			b->presumed.valid = 0;
+			relocs++;
+
+			if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+					     &b->presumed, sizeof(b->presumed)))
+				return -EFAULT;
+		}
+	}
+
+	return relocs;
+}
+
+static int
+nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
+			     struct drm_file *file_priv,
+			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
+			     uint64_t user_buffers, int nr_buffers,
+			     struct validate_op *op, int *apply_relocs)
+{
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	int ret, relocs = 0;
+
+	INIT_LIST_HEAD(&op->vram_list);
+	INIT_LIST_HEAD(&op->gart_list);
+	INIT_LIST_HEAD(&op->both_list);
+
+	if (nr_buffers == 0)
+		return 0;
+
+	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
+	if (unlikely(ret)) {
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(cli, "validate_init\n");
+		return ret;
+	}
+
+	ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(cli, "validate vram_list\n");
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(cli, "validate gart_list\n");
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
+	if (unlikely(ret < 0)) {
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(cli, "validate both_list\n");
+		validate_fini(op, NULL);
+		return ret;
+	}
+	relocs += ret;
+
+	*apply_relocs = relocs;
+	return 0;
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
+{
+	void *mem;
+	void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+	mem = kmalloc(nmemb * size, GFP_KERNEL);
+	if (!mem)
+		return ERR_PTR(-ENOMEM);
+
+	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
+		kfree(mem);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return mem;
+}
+
+static int
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
+				struct drm_nouveau_gem_pushbuf *req,
+				struct drm_nouveau_gem_pushbuf_bo *bo)
+{
+	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
+	int ret = 0;
+	unsigned i;
+
+	reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
+	if (IS_ERR(reloc))
+		return PTR_ERR(reloc);
+
+	for (i = 0; i < req->nr_relocs; i++) {
+		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
+		struct drm_nouveau_gem_pushbuf_bo *b;
+		struct nouveau_bo *nvbo;
+		uint32_t data;
+
+		if (unlikely(r->bo_index > req->nr_buffers)) {
+			NV_ERROR(cli, "reloc bo index invalid\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		b = &bo[r->bo_index];
+		if (b->presumed.valid)
+			continue;
+
+		if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+			NV_ERROR(cli, "reloc container bo index invalid\n");
+			ret = -EINVAL;
+			break;
+		}
+		nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
+
+		if (unlikely(r->reloc_bo_offset + 4 >
+			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+			NV_ERROR(cli, "reloc outside of bo\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		if (!nvbo->kmap.virtual) {
+			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+					  &nvbo->kmap);
+			if (ret) {
+				NV_ERROR(cli, "failed kmap for reloc\n");
+				break;
+			}
+			nvbo->validate_mapped = true;
+		}
+
+		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
+			data = b->presumed.offset + r->data;
+		else
+		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
+			data = (b->presumed.offset + r->data) >> 32;
+		else
+			data = r->data;
+
+		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
+			if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
+				data |= r->tor;
+			else
+				data |= r->vor;
+		}
+
+		spin_lock(&nvbo->bo.bdev->fence_lock);
+		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+		spin_unlock(&nvbo->bo.bdev->fence_lock);
+		if (ret) {
+			NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
+			break;
+		}
+
+		nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
+	}
+
+	kfree(reloc);
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+	struct nouveau_cli *cli = nouveau_cli(file_priv);
+	struct nouveau_abi16_chan *temp;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct drm_nouveau_gem_pushbuf *req = data;
+	struct drm_nouveau_gem_pushbuf_push *push;
+	struct drm_nouveau_gem_pushbuf_bo *bo;
+	struct nouveau_channel *chan = NULL;
+	struct validate_op op;
+	struct nouveau_fence *fence = NULL;
+	int i, j, ret = 0, do_reloc = 0;
+
+	if (unlikely(!abi16))
+		return -ENOMEM;
+
+	list_for_each_entry(temp, &abi16->channels, head) {
+		if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+			chan = temp->chan;
+			break;
+		}
+	}
+
+	if (!chan)
+		return nouveau_abi16_put(abi16, -ENOENT);
+
+	req->vram_available = drm->gem.vram_available;
+	req->gart_available = drm->gem.gart_available;
+	if (unlikely(req->nr_push == 0))
+		goto out_next;
+
+	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
+		NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
+			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+		return nouveau_abi16_put(abi16, -EINVAL);
+	}
+
+	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
+		NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
+			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+		return nouveau_abi16_put(abi16, -EINVAL);
+	}
+
+	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
+		NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+		return nouveau_abi16_put(abi16, -EINVAL);
+	}
+
+	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
+	if (IS_ERR(push))
+		return nouveau_abi16_put(abi16, PTR_ERR(push));
+
+	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+	if (IS_ERR(bo)) {
+		kfree(push);
+		return nouveau_abi16_put(abi16, PTR_ERR(bo));
+	}
+
+	/* Ensure all push buffers are on validate list */
+	for (i = 0; i < req->nr_push; i++) {
+		if (push[i].bo_index >= req->nr_buffers) {
+			NV_ERROR(cli, "push %d buffer not in list\n", i);
+			ret = -EINVAL;
+			goto out_prevalid;
+		}
+	}
+
+	/* Validate buffer list */
+	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+					   req->nr_buffers, &op, &do_reloc);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(cli, "validate: %d\n", ret);
+		goto out_prevalid;
+	}
+
+	/* Apply any relocations that are required */
+	if (do_reloc) {
+		ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
+		if (ret) {
+			NV_ERROR(cli, "reloc apply: %d\n", ret);
+			goto out;
+		}
+	}
+
+	if (chan->dma.ib_max) {
+		ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
+		if (ret) {
+			NV_ERROR(cli, "nv50cal_space: %d\n", ret);
+			goto out;
+		}
+
+		for (i = 0; i < req->nr_push; i++) {
+			struct nouveau_bo *nvbo = (void *)(unsigned long)
+				bo[push[i].bo_index].user_priv;
+
+			nv50_dma_push(chan, nvbo, push[i].offset,
+				      push[i].length);
+		}
+	} else
+	if (nv_device(drm->device)->chipset >= 0x25) {
+		ret = RING_SPACE(chan, req->nr_push * 2);
+		if (ret) {
+			NV_ERROR(cli, "cal_space: %d\n", ret);
+			goto out;
+		}
+
+		for (i = 0; i < req->nr_push; i++) {
+			struct nouveau_bo *nvbo = (void *)(unsigned long)
+				bo[push[i].bo_index].user_priv;
+
+			OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
+			OUT_RING(chan, 0);
+		}
+	} else {
+		ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
+		if (ret) {
+			NV_ERROR(cli, "jmp_space: %d\n", ret);
+			goto out;
+		}
+
+		for (i = 0; i < req->nr_push; i++) {
+			struct nouveau_bo *nvbo = (void *)(unsigned long)
+				bo[push[i].bo_index].user_priv;
+			uint32_t cmd;
+
+			cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
+			cmd |= 0x20000000;
+			if (unlikely(cmd != req->suffix0)) {
+				if (!nvbo->kmap.virtual) {
+					ret = ttm_bo_kmap(&nvbo->bo, 0,
+							  nvbo->bo.mem.
+							  num_pages,
+							  &nvbo->kmap);
+					if (ret) {
+						WIND_RING(chan);
+						goto out;
+					}
+					nvbo->validate_mapped = true;
+				}
+
+				nouveau_bo_wr32(nvbo, (push[i].offset +
+						push[i].length - 8) / 4, cmd);
+			}
+
+			OUT_RING(chan, 0x20000000 |
+				      (nvbo->bo.offset + push[i].offset));
+			OUT_RING(chan, 0);
+			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+				OUT_RING(chan, 0);
+		}
+	}
+
+	ret = nouveau_fence_new(chan, false, &fence);
+	if (ret) {
+		NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
+		WIND_RING(chan);
+		goto out;
+	}
+
+out:
+	validate_fini(&op, fence);
+	nouveau_fence_unref(&fence);
+
+out_prevalid:
+	kfree(bo);
+	kfree(push);
+
+out_next:
+	if (chan->dma.ib_max) {
+		req->suffix0 = 0x00000000;
+		req->suffix1 = 0x00000000;
+	} else
+	if (nv_device(drm->device)->chipset >= 0x25) {
+		req->suffix0 = 0x00020000;
+		req->suffix1 = 0x00000000;
+	} else {
+		req->suffix0 = 0x20000000 |
+			      (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
+		req->suffix1 = 0x00000000;
+	}
+
+	return nouveau_abi16_put(abi16, ret);
+}
+
+static inline uint32_t
+domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
+{
+	uint32_t flags = 0;
+
+	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+		flags |= TTM_PL_FLAG_VRAM;
+	if (domain & NOUVEAU_GEM_DOMAIN_GART)
+		flags |= TTM_PL_FLAG_TT;
+
+	return flags;
+}
+
+int
+nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_cpu_prep *req = data;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
+	int ret = -EINVAL;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return -ENOENT;
+	nvbo = nouveau_gem_object(gem);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+	drm_gem_object_unreference_unlocked(gem);
+	return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	return 0;
+}
+
+int
+nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_nouveau_gem_info *req = data;
+	struct drm_gem_object *gem;
+	int ret;
+
+	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+	if (!gem)
+		return -ENOENT;
+
+	ret = nouveau_gem_info(file_priv, gem, req);
+	drm_gem_object_unreference_unlocked(gem);
+	return ret;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.h
new file mode 100644
index 0000000..8d7a3f0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_GEM_H__
+#define __NOUVEAU_GEM_H__
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_bo.h"
+
+#define nouveau_bo_tile_layout(nvbo)				\
+	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
+
+static inline struct nouveau_bo *
+nouveau_gem_object(struct drm_gem_object *gem)
+{
+	return gem ? gem->driver_private : NULL;
+}
+
+/* nouveau_gem.c */
+extern int nouveau_gem_new(struct drm_device *, int size, int align,
+			   uint32_t domain, uint32_t tile_mode,
+			   uint32_t tile_flags, struct nouveau_bo **);
+extern int nouveau_gem_object_new(struct drm_gem_object *);
+extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
+extern void nouveau_gem_object_close(struct drm_gem_object *,
+				     struct drm_file *);
+extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+				 struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
+				     struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+				      struct drm_file *);
+extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+				  struct drm_file *);
+
+extern int nouveau_gem_prime_pin(struct drm_gem_object *);
+extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
+extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
+	struct drm_device *, size_t size, struct sg_table *);
+extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
+extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644
index 0000000..6976875
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_hwsq.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+	u8 data[0x200];
+	union {
+		u8  *u08;
+		u16 *u16;
+		u32 *u32;
+	} ptr;
+	u16 len;
+
+	u32 reg;
+	u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+	hwsq->ptr.u08 = hwsq->data;
+	hwsq->reg = 0xffffffff;
+	hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+	do {
+		*hwsq->ptr.u08++ = 0x7f;
+		hwsq->len = hwsq->ptr.u08 - hwsq->data;
+	} while (hwsq->len & 3);
+	hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+	u32 shift = 0;
+	while (usec & ~3) {
+		usec >>= 2;
+		shift++;
+	}
+
+	*hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+	flag += 0x80;
+	if (val >= 0)
+		flag += 0x20;
+	if (val >= 1)
+		flag += 0x20;
+	*hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+	*hwsq->ptr.u08++ = 0x5f;
+	*hwsq->ptr.u08++ = v0;
+	*hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+	if (val != hwsq->val) {
+		if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+			*hwsq->ptr.u08++ = 0x42;
+			*hwsq->ptr.u16++ = (val & 0x0000ffff);
+		} else {
+			*hwsq->ptr.u08++ = 0xe2;
+			*hwsq->ptr.u32++ = val;
+		}
+
+		hwsq->val = val;
+	}
+
+	if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+		*hwsq->ptr.u08++ = 0x40;
+		*hwsq->ptr.u16++ = (reg & 0x0000ffff);
+	} else {
+		*hwsq->ptr.u08++ = 0xe0;
+		*hwsq->ptr.u32++ = reg;
+	}
+	hwsq->reg = reg;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 0000000..08214bc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,69 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+
+#include "nouveau_ioctl.h"
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+			 unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+#if 0
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+		fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
+#endif
+	if (fn != NULL)
+		ret = (*fn)(filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioctl.h
new file mode 100644
index 0000000..ef2b290
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -0,0 +1,6 @@
+#ifndef __NOUVEAU_IOCTL_H__
+#define __NOUVEAU_IOCTL_H__
+
+long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_mem.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 0000000..7e0ff10
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Ben Skeggs <bskeggs@redhat.com>
+ *    Roy Spliet <r.spliet@student.tudelft.nl>
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_pm.h"
+
+#include <subdev/fb.h>
+
+static int
+nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
+		     struct nouveau_pm_tbl_entry *e, u8 len,
+		     struct nouveau_pm_memtiming *boot,
+		     struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
+
+	/* XXX: I don't trust the -1's and +1's... they must come
+	 *      from somewhere! */
+	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
+		    1 << 16 |
+		    (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
+		    (e->tCL + 2 - (t->tCWL - 1));
+
+	t->reg[2] = 0x20200000 |
+		    ((t->tCWL - 1) << 24 |
+		     e->tRRD << 16 |
+		     e->tRCDWR << 8 |
+		     e->tRCDRD);
+
+	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
+		 t->reg[0], t->reg[1], t->reg[2]);
+	return 0;
+}
+
+static int
+nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
+		     struct nouveau_pm_tbl_entry *e, u8 len,
+		     struct nouveau_pm_memtiming *boot,
+		     struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct bit_entry P;
+	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
+
+	if (bit_table(dev, 'P', &P))
+		return -EINVAL;
+
+	switch (min(len, (u8) 22)) {
+	case 22:
+		unk21 = e->tUNK_21;
+	case 21:
+		unk20 = e->tUNK_20;
+	case 20:
+		if (e->tCWL > 0)
+			t->tCWL = e->tCWL;
+	case 19:
+		unk18 = e->tUNK_18;
+		break;
+	}
+
+	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
+
+	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
+				max(unk18, (u8) 1) << 16 |
+				(e->tWTR + 2 + (t->tCWL - 1)) << 8;
+
+	t->reg[2] = ((t->tCWL - 1) << 24 |
+		    e->tRRD << 16 |
+		    e->tRCDWR << 8 |
+		    e->tRCDRD);
+
+	t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
+
+	t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
+
+	t->reg[8] = boot->reg[8] & 0xffffff00;
+
+	if (P.version == 1) {
+		t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
+
+		t->reg[3] = (0x14 + e->tCL) << 24 |
+			    0x16 << 16 |
+			    (e->tCL - 1) << 8 |
+			    (e->tCL - 1);
+
+		t->reg[4] |= boot->reg[4] & 0xffff0000;
+
+		t->reg[6] = (0x33 - t->tCWL) << 16 |
+			    t->tCWL << 8 |
+			    (0x2e + e->tCL - t->tCWL);
+
+		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
+
+		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
+		if (pfb->ram.type == NV_MEM_TYPE_DDR2) {
+			t->reg[5] |= (e->tCL + 3) << 8;
+			t->reg[6] |= (t->tCWL - 2) << 8;
+			t->reg[8] |= (e->tCL - 4);
+		} else {
+			t->reg[5] |= (e->tCL + 2) << 8;
+			t->reg[6] |= t->tCWL << 8;
+			t->reg[8] |= (e->tCL - 2);
+		}
+	} else {
+		t->reg[1] |= (5 + e->tCL - (t->tCWL));
+
+		/* XXX: 0xb? 0x30? */
+		t->reg[3] = (0x30 + e->tCL) << 24 |
+			    (boot->reg[3] & 0x00ff0000)|
+			    (0xb + e->tCL) << 8 |
+			    (e->tCL - 1);
+
+		t->reg[4] |= (unk20 << 24 | unk21 << 16);
+
+		/* XXX: +6? */
+		t->reg[5] |= (t->tCWL + 6) << 8;
+
+		t->reg[6] = (0x5a + e->tCL) << 16 |
+			    (6 - e->tCL + t->tCWL) << 8 |
+			    (0x50 + e->tCL - t->tCWL);
+
+		tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
+		t->reg[7] = (tmp7_3 << 24) |
+			    ((tmp7_3 - 6 + e->tCL) << 16) |
+			    0x202;
+	}
+
+	NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
+		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
+	NV_DEBUG(drm, "         230: %08x %08x %08x %08x\n",
+		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
+	NV_DEBUG(drm, "         240: %08x\n", t->reg[8]);
+	return 0;
+}
+
+static int
+nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
+		     struct nouveau_pm_tbl_entry *e, u8 len,
+		     struct nouveau_pm_memtiming *boot,
+		     struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (e->tCWL > 0)
+		t->tCWL = e->tCWL;
+
+	t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
+		     e->tRFC << 8 | e->tRC);
+
+	t->reg[1] = (boot->reg[1] & 0xff000000) |
+		    (e->tRCDWR & 0x0f) << 20 |
+		    (e->tRCDRD & 0x0f) << 14 |
+		    (t->tCWL << 7) |
+		    (e->tCL & 0x0f);
+
+	t->reg[2] = (boot->reg[2] & 0xff0000ff) |
+		    e->tWR << 16 | e->tWTR << 8;
+
+	t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
+		    (e->tUNK_21 & 0xf) << 5 |
+		    (e->tUNK_13 & 0x1f);
+
+	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
+		    (e->tRRD&0x1f) << 15;
+
+	NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
+		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
+	NV_DEBUG(drm, "         2a0: %08x\n", t->reg[4]);
+	return 0;
+}
+
+/**
+ * MR generation methods
+ */
+
+static int
+nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
+		    struct nouveau_pm_tbl_entry *e, u8 len,
+		    struct nouveau_pm_memtiming *boot,
+		    struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	t->drive_strength = 0;
+	if (len < 15) {
+		t->odt = boot->odt;
+	} else {
+		t->odt = e->RAM_FT1 & 0x07;
+	}
+
+	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		return -ERANGE;
+	}
+
+	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		return -ERANGE;
+	}
+
+	if (t->odt > 3) {
+		NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
+			t->id, t->odt);
+		t->odt = 0;
+	}
+
+	t->mr[0] = (boot->mr[0] & 0x100f) |
+		   (e->tCL) << 4 |
+		   (e->tWR - 1) << 9;
+	t->mr[1] = (boot->mr[1] & 0x101fbb) |
+		   (t->odt & 0x1) << 2 |
+		   (t->odt & 0x2) << 5;
+
+	NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
+	return 0;
+}
+
+static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
+
+static int
+nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
+		    struct nouveau_pm_tbl_entry *e, u8 len,
+		    struct nouveau_pm_memtiming *boot,
+		    struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u8 cl = e->tCL - 4;
+
+	t->drive_strength = 0;
+	if (len < 15) {
+		t->odt = boot->odt;
+	} else {
+		t->odt = e->RAM_FT1 & 0x07;
+	}
+
+	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		return -ERANGE;
+	}
+
+	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		return -ERANGE;
+	}
+
+	if (e->tCWL < 5) {
+		NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
+		return -ERANGE;
+	}
+
+	t->mr[0] = (boot->mr[0] & 0x180b) |
+		   /* CAS */
+		   (cl & 0x7) << 4 |
+		   (cl & 0x8) >> 1 |
+		   (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
+	t->mr[1] = (boot->mr[1] & 0x101dbb) |
+		   (t->odt & 0x1) << 2 |
+		   (t->odt & 0x2) << 5 |
+		   (t->odt & 0x4) << 7;
+	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
+
+	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
+	return 0;
+}
+
+static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
+	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
+static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
+	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
+
+static int
+nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
+		     struct nouveau_pm_tbl_entry *e, u8 len,
+		     struct nouveau_pm_memtiming *boot,
+		     struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (len < 15) {
+		t->drive_strength = boot->drive_strength;
+		t->odt = boot->odt;
+	} else {
+		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
+		t->odt = e->RAM_FT1 & 0x07;
+	}
+
+	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		return -ERANGE;
+	}
+
+	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		return -ERANGE;
+	}
+
+	if (t->odt > 3) {
+		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
+			t->id, t->odt);
+		t->odt = 0;
+	}
+
+	t->mr[0] = (boot->mr[0] & 0xe0b) |
+		   /* CAS */
+		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
+		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
+	t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
+		   (t->odt << 2) |
+		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
+	t->mr[2] = boot->mr[2];
+
+	NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
+		      t->mr[0], t->mr[1], t->mr[2]);
+	return 0;
+}
+
+static int
+nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
+		     struct nouveau_pm_tbl_entry *e, u8 len,
+		     struct nouveau_pm_memtiming *boot,
+		     struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (len < 15) {
+		t->drive_strength = boot->drive_strength;
+		t->odt = boot->odt;
+	} else {
+		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
+		t->odt = e->RAM_FT1 & 0x03;
+	}
+
+	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
+		NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
+		return -ERANGE;
+	}
+
+	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
+		NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
+		return -ERANGE;
+	}
+
+	if (t->odt > 3) {
+		NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
+			t->id, t->odt);
+		t->odt = 0;
+	}
+
+	t->mr[0] = (boot->mr[0] & 0x007) |
+		   ((e->tCL - 5) << 3) |
+		   ((e->tWR - 4) << 8);
+	t->mr[1] = (boot->mr[1] & 0x1007f0) |
+		   t->drive_strength |
+		   (t->odt << 2);
+
+	NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
+	return 0;
+}
+
+int
+nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
+			struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
+	struct nouveau_pm_tbl_entry *e;
+	u8 ver, len, *ptr, *ramcfg;
+	int ret;
+
+	ptr = nouveau_perf_timing(dev, freq, &ver, &len);
+	if (!ptr || ptr[0] == 0x00) {
+		*t = *boot;
+		return 0;
+	}
+	e = (struct nouveau_pm_tbl_entry *)ptr;
+
+	t->tCWL = boot->tCWL;
+
+	switch (device->card_type) {
+	case NV_40:
+		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
+		break;
+	case NV_50:
+		ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
+		break;
+	case NV_C0:
+	case NV_D0:
+		ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
+		break;
+	default:
+		ret = -ENODEV;
+		break;
+	}
+
+	switch (pfb->ram.type * !ret) {
+	case NV_MEM_TYPE_GDDR3:
+		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
+		break;
+	case NV_MEM_TYPE_GDDR5:
+		ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
+		break;
+	case NV_MEM_TYPE_DDR2:
+		ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
+		break;
+	case NV_MEM_TYPE_DDR3:
+		ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
+	if (ramcfg) {
+		int dll_off;
+
+		if (ver == 0x00)
+			dll_off = !!(ramcfg[3] & 0x04);
+		else
+			dll_off = !!(ramcfg[2] & 0x40);
+
+		switch (pfb->ram.type) {
+		case NV_MEM_TYPE_GDDR3:
+			t->mr[1] &= ~0x00000040;
+			t->mr[1] |=  0x00000040 * dll_off;
+			break;
+		default:
+			t->mr[1] &= ~0x00000001;
+			t->mr[1] |=  0x00000001 * dll_off;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+void
+nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	u32 timing_base, timing_regs, mr_base;
+	int i;
+
+	if (device->card_type >= 0xC0) {
+		timing_base = 0x10f290;
+		mr_base = 0x10f300;
+	} else {
+		timing_base = 0x100220;
+		mr_base = 0x1002c0;
+	}
+
+	t->id = -1;
+
+	switch (device->card_type) {
+	case NV_50:
+		timing_regs = 9;
+		break;
+	case NV_C0:
+	case NV_D0:
+		timing_regs = 5;
+		break;
+	case NV_30:
+	case NV_40:
+		timing_regs = 3;
+		break;
+	default:
+		timing_regs = 0;
+		return;
+	}
+	for(i = 0; i < timing_regs; i++)
+		t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
+
+	t->tCWL = 0;
+	if (device->card_type < NV_C0) {
+		t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
+	} else if (device->card_type <= NV_D0) {
+		t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
+	}
+
+	t->mr[0] = nv_rd32(device, mr_base);
+	t->mr[1] = nv_rd32(device, mr_base + 0x04);
+	t->mr[2] = nv_rd32(device, mr_base + 0x20);
+	t->mr[3] = nv_rd32(device, mr_base + 0x24);
+
+	t->odt = 0;
+	t->drive_strength = 0;
+
+	switch (pfb->ram.type) {
+	case NV_MEM_TYPE_DDR3:
+		t->odt |= (t->mr[1] & 0x200) >> 7;
+	case NV_MEM_TYPE_DDR2:
+		t->odt |= (t->mr[1] & 0x04) >> 2 |
+			  (t->mr[1] & 0x40) >> 5;
+		break;
+	case NV_MEM_TYPE_GDDR3:
+	case NV_MEM_TYPE_GDDR5:
+		t->drive_strength = t->mr[1] & 0x03;
+		t->odt = (t->mr[1] & 0x0c) >> 2;
+		break;
+	default:
+		break;
+	}
+}
+
+int
+nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
+		 struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_drm *drm = nouveau_drm(exec->dev);
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nouveau_pm_memtiming *info = &perflvl->timing;
+	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
+	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
+	u32 mr1_dlloff;
+
+	switch (pfb->ram.type) {
+	case NV_MEM_TYPE_DDR2:
+		tDLLK = 2000;
+		mr1_dlloff = 0x00000001;
+		break;
+	case NV_MEM_TYPE_DDR3:
+		tDLLK = 12000;
+		tCKSRE = 2000;
+		tXS = 1000;
+		mr1_dlloff = 0x00000001;
+		break;
+	case NV_MEM_TYPE_GDDR3:
+		tDLLK = 40000;
+		mr1_dlloff = 0x00000040;
+		break;
+	default:
+		NV_ERROR(drm, "cannot reclock unsupported memtype\n");
+		return -ENODEV;
+	}
+
+	/* fetch current MRs */
+	switch (pfb->ram.type) {
+	case NV_MEM_TYPE_GDDR3:
+	case NV_MEM_TYPE_DDR3:
+		mr[2] = exec->mrg(exec, 2);
+	default:
+		mr[1] = exec->mrg(exec, 1);
+		mr[0] = exec->mrg(exec, 0);
+		break;
+	}
+
+	/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
+	if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
+		exec->precharge(exec);
+		exec->mrs (exec, 1, mr[1] | mr1_dlloff);
+		exec->wait(exec, tMRD);
+	}
+
+	/* enter self-refresh mode */
+	exec->precharge(exec);
+	exec->refresh(exec);
+	exec->refresh(exec);
+	exec->refresh_auto(exec, false);
+	exec->refresh_self(exec, true);
+	exec->wait(exec, tCKSRE);
+
+	/* modify input clock frequency */
+	exec->clock_set(exec);
+
+	/* exit self-refresh mode */
+	exec->wait(exec, tCKSRX);
+	exec->precharge(exec);
+	exec->refresh_self(exec, false);
+	exec->refresh_auto(exec, true);
+	exec->wait(exec, tXS);
+	exec->wait(exec, tXS);
+
+	/* update MRs */
+	if (mr[2] != info->mr[2]) {
+		exec->mrs (exec, 2, info->mr[2]);
+		exec->wait(exec, tMRD);
+	}
+
+	if (mr[1] != info->mr[1]) {
+		/* need to keep DLL off until later, at least on GDDR3 */
+		exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
+		exec->wait(exec, tMRD);
+	}
+
+	if (mr[0] != info->mr[0]) {
+		exec->mrs (exec, 0, info->mr[0]);
+		exec->wait(exec, tMRD);
+	}
+
+	/* update PFB timing registers */
+	exec->timing_set(exec);
+
+	/* DLL (enable + ) reset */
+	if (!(info->mr[1] & mr1_dlloff)) {
+		if (mr[1] & mr1_dlloff) {
+			exec->mrs (exec, 1, info->mr[1]);
+			exec->wait(exec, tMRD);
+		}
+		exec->mrs (exec, 0, info->mr[0] | 0x00000100);
+		exec->wait(exec, tMRD);
+		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
+		exec->wait(exec, tMRD);
+		exec->wait(exec, tDLLK);
+		if (pfb->ram.type == NV_MEM_TYPE_GDDR3)
+			exec->precharge(exec);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_perf.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_perf.c
new file mode 100644
index 0000000..4fe883c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_pm.h"
+
+static u8 *
+nouveau_perf_table(struct drm_device *dev, u8 *ver)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	struct bit_entry P;
+
+	if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
+		u8 *perf = ROMPTR(dev, P.data[0]);
+		if (perf) {
+			*ver = perf[0];
+			return perf;
+		}
+	}
+
+	if (bios->type == NVBIOS_BMP) {
+		if (bios->data[bios->offset + 6] >= 0x25) {
+			u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
+			if (perf) {
+				*ver = perf[1];
+				return perf;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static u8 *
+nouveau_perf_entry(struct drm_device *dev, int idx,
+		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	u8 *perf = nouveau_perf_table(dev, ver);
+	if (perf) {
+		if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
+			*hdr = perf[3];
+			*cnt = 0;
+			*len = 0;
+			return perf + perf[0] + idx * perf[3];
+		} else
+		if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
+			*hdr = perf[3];
+			*cnt = perf[4];
+			*len = perf[5];
+			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
+		} else
+		if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
+			*hdr = perf[2];
+			*cnt = perf[4];
+			*len = perf[3];
+			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
+		}
+	}
+	return NULL;
+}
+
+u8 *
+nouveau_perf_rammap(struct drm_device *dev, u32 freq,
+		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct bit_entry P;
+	u8 *perf, i = 0;
+
+	if (!bit_table(dev, 'P', &P) && P.version == 2) {
+		u8 *rammap = ROMPTR(dev, P.data[4]);
+		if (rammap) {
+			u8 *ramcfg = rammap + rammap[1];
+
+			*ver = rammap[0];
+			*hdr = rammap[2];
+			*cnt = rammap[4];
+			*len = rammap[3];
+
+			freq /= 1000;
+			for (i = 0; i < rammap[5]; i++) {
+				if (freq >= ROM16(ramcfg[0]) &&
+				    freq <= ROM16(ramcfg[2]))
+					return ramcfg;
+
+				ramcfg += *hdr + (*cnt * *len);
+			}
+		}
+
+		return NULL;
+	}
+
+	if (nv_device(drm->device)->chipset == 0x49 ||
+	    nv_device(drm->device)->chipset == 0x4b)
+		freq /= 2;
+
+	while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
+		if (*ver >= 0x20 && *ver < 0x25) {
+			if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
+				break;
+		} else
+		if (*ver >= 0x25 && *ver < 0x40) {
+			if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
+				break;
+		}
+	}
+
+	if (perf) {
+		u8 *ramcfg = perf + *hdr;
+		*ver = 0x00;
+		*hdr = 0;
+		return ramcfg;
+	}
+
+	return NULL;
+}
+
+u8 *
+nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	u8 strap, hdr, cnt;
+	u8 *rammap;
+
+	strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
+	if (bios->ram_restrict_tbl_ptr)
+		strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
+
+	rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
+	if (rammap && strap < cnt)
+		return rammap + hdr + (strap * *len);
+
+	return NULL;
+}
+
+u8 *
+nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	struct bit_entry P;
+	u8 *perf, *timing = NULL;
+	u8 i = 0, hdr, cnt;
+
+	if (bios->type == NVBIOS_BMP) {
+		while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
+						  len)) && *ver == 0x15) {
+			if (freq <= ROM32(perf[5]) * 20) {
+				*ver = 0x00;
+				*len = 14;
+				return perf + 41;
+			}
+		}
+		return NULL;
+	}
+
+	if (!bit_table(dev, 'P', &P)) {
+		if (P.version == 1)
+			timing = ROMPTR(dev, P.data[4]);
+		else
+		if (P.version == 2)
+			timing = ROMPTR(dev, P.data[8]);
+	}
+
+	if (timing && timing[0] == 0x10) {
+		u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
+		if (ramcfg && ramcfg[1] < timing[2]) {
+			*ver = timing[0];
+			*len = timing[3];
+			return timing + timing[1] + (ramcfg[1] * timing[3]);
+		}
+	}
+
+	return NULL;
+}
+
+static void
+legacy_perf_init(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nvbios *bios = &drm->vbios;
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	char *perf, *entry, *bmp = &bios->data[bios->offset];
+	int headerlen, use_straps;
+
+	if (bmp[5] < 0x5 || bmp[6] < 0x14) {
+		NV_DEBUG(drm, "BMP version too old for perf\n");
+		return;
+	}
+
+	perf = ROMPTR(dev, bmp[0x73]);
+	if (!perf) {
+		NV_DEBUG(drm, "No memclock table pointer found.\n");
+		return;
+	}
+
+	switch (perf[0]) {
+	case 0x12:
+	case 0x14:
+	case 0x18:
+		use_straps = 0;
+		headerlen = 1;
+		break;
+	case 0x01:
+		use_straps = perf[1] & 1;
+		headerlen = (use_straps ? 8 : 2);
+		break;
+	default:
+		NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
+		return;
+	}
+
+	entry = perf + headerlen;
+	if (use_straps)
+		entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+
+	sprintf(pm->perflvl[0].name, "performance_level_0");
+	pm->perflvl[0].memory = ROM16(entry[0]) * 20;
+	pm->nr_perflvl = 1;
+}
+
+static void
+nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct bit_entry P;
+	u8 *vmap;
+	int id;
+
+	id = perflvl->volt_min;
+	perflvl->volt_min = 0;
+
+	/* boards using voltage table version <0x40 store the voltage
+	 * level directly in the perflvl entry as a multiple of 10mV
+	 */
+	if (drm->pm->voltage.version < 0x40) {
+		perflvl->volt_min = id * 10000;
+		perflvl->volt_max = perflvl->volt_min;
+		return;
+	}
+
+	/* on newer ones, the perflvl stores an index into yet another
+	 * vbios table containing a min/max voltage value for the perflvl
+	 */
+	if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
+		NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
+			 P.version, P.length);
+		return;
+	}
+
+	vmap = ROMPTR(dev, P.data[32]);
+	if (!vmap) {
+		NV_DEBUG(drm, "volt map table pointer invalid\n");
+		return;
+	}
+
+	if (id < vmap[3]) {
+		vmap += vmap[1] + (vmap[2] * id);
+		perflvl->volt_min = ROM32(vmap[0]);
+		perflvl->volt_max = ROM32(vmap[4]);
+	}
+}
+
+void
+nouveau_perf_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nvbios *bios = &drm->vbios;
+	u8 *perf, ver, hdr, cnt, len;
+	int ret, vid, i = -1;
+
+	if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
+		legacy_perf_init(dev);
+		return;
+	}
+
+	perf = nouveau_perf_table(dev, &ver);
+
+	while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
+		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+
+		if (perf[0] == 0xff)
+			continue;
+
+		switch (ver) {
+		case 0x12:
+		case 0x13:
+		case 0x15:
+			perflvl->fanspeed = perf[55];
+			if (hdr > 56)
+				perflvl->volt_min = perf[56];
+			perflvl->core = ROM32(perf[1]) * 10;
+			perflvl->memory = ROM32(perf[5]) * 20;
+			break;
+		case 0x21:
+		case 0x23:
+		case 0x24:
+			perflvl->fanspeed = perf[4];
+			perflvl->volt_min = perf[5];
+			perflvl->shader = ROM16(perf[6]) * 1000;
+			perflvl->core = perflvl->shader;
+			perflvl->core += (signed char)perf[8] * 1000;
+			if (nv_device(drm->device)->chipset == 0x49 ||
+			    nv_device(drm->device)->chipset == 0x4b)
+				perflvl->memory = ROM16(perf[11]) * 1000;
+			else
+				perflvl->memory = ROM16(perf[11]) * 2000;
+			break;
+		case 0x25:
+			perflvl->fanspeed = perf[4];
+			perflvl->volt_min = perf[5];
+			perflvl->core = ROM16(perf[6]) * 1000;
+			perflvl->shader = ROM16(perf[10]) * 1000;
+			perflvl->memory = ROM16(perf[12]) * 1000;
+			break;
+		case 0x30:
+			perflvl->memscript = ROM16(perf[2]);
+		case 0x35:
+			perflvl->fanspeed = perf[6];
+			perflvl->volt_min = perf[7];
+			perflvl->core = ROM16(perf[8]) * 1000;
+			perflvl->shader = ROM16(perf[10]) * 1000;
+			perflvl->memory = ROM16(perf[12]) * 1000;
+			perflvl->vdec = ROM16(perf[16]) * 1000;
+			perflvl->dom6 = ROM16(perf[20]) * 1000;
+			break;
+		case 0x40:
+#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
+			perflvl->fanspeed = 0; /*XXX*/
+			perflvl->volt_min = perf[2];
+			if (nv_device(drm->device)->card_type == NV_50) {
+				perflvl->core   = subent(0);
+				perflvl->shader = subent(1);
+				perflvl->memory = subent(2);
+				perflvl->vdec   = subent(3);
+				perflvl->unka0  = subent(4);
+			} else {
+				perflvl->hub06  = subent(0);
+				perflvl->hub01  = subent(1);
+				perflvl->copy   = subent(2);
+				perflvl->shader = subent(3);
+				perflvl->rop    = subent(4);
+				perflvl->memory = subent(5);
+				perflvl->vdec   = subent(6);
+				perflvl->daemon = subent(10);
+				perflvl->hub07  = subent(11);
+				perflvl->core   = perflvl->shader / 2;
+			}
+			break;
+		}
+
+		/* make sure vid is valid */
+		nouveau_perf_voltage(dev, perflvl);
+		if (pm->voltage.supported && perflvl->volt_min) {
+			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
+			if (vid < 0) {
+				NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
+				continue;
+			}
+		}
+
+		/* get the corresponding memory timings */
+		ret = nouveau_mem_timing_calc(dev, perflvl->memory,
+					          &perflvl->timing);
+		if (ret) {
+			NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
+			continue;
+		}
+
+		snprintf(perflvl->name, sizeof(perflvl->name),
+			 "performance_level_%d", i);
+		perflvl->id = i;
+
+		snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
+			 "%d", perflvl->id);
+		perflvl->profile.func = &nouveau_pm_static_profile_func;
+		list_add_tail(&perflvl->profile.head, &pm->profiles);
+
+
+		pm->nr_perflvl++;
+	}
+}
+
+void
+nouveau_perf_fini(struct drm_device *dev)
+{
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.c
new file mode 100644
index 0000000..936b442
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -0,0 +1,1174 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_pm.h"
+
+#include <subdev/gpio.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
+static char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
+static int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+		       struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	/*XXX: not on all boards, we should control based on temperature
+	 *     on recent boards..  or maybe on some other factor we don't
+	 *     know about?
+	 */
+	if (therm && therm->fan_set &&
+		a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+		ret = therm->fan_set(therm, perflvl->fanspeed);
+		if (ret && ret != -ENODEV) {
+			NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
+		}
+	}
+
+	if (pm->voltage.supported && pm->voltage_set) {
+		if (perflvl->volt_min && b->volt_min > a->volt_min) {
+			ret = pm->voltage_set(dev, perflvl->volt_min);
+			if (ret) {
+				NV_ERROR(drm, "voltage set failed: %d\n", ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
+nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	void *state;
+	int ret;
+
+	if (perflvl == pm->cur)
+		return 0;
+
+	ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+	if (ret)
+		return ret;
+
+	state = pm->clocks_pre(dev, perflvl);
+	if (IS_ERR(state)) {
+		ret = PTR_ERR(state);
+		goto error;
+	}
+	ret = pm->clocks_set(dev, state);
+	if (ret)
+		goto error;
+
+	ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+	if (ret)
+		return ret;
+
+	pm->cur = perflvl;
+	return 0;
+
+error:
+	/* restore the fan speed and voltage before leaving */
+	nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+	return ret;
+}
+
+void
+nouveau_pm_trigger(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(drm->device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_profile *profile = NULL;
+	struct nouveau_pm_level *perflvl = NULL;
+	int ret;
+
+	/* select power profile based on current power source */
+	if (power_supply_is_system_supplied())
+		profile = pm->profile_ac;
+	else
+		profile = pm->profile_dc;
+
+	if (profile != pm->profile) {
+		pm->profile->func->fini(pm->profile);
+		pm->profile = profile;
+		pm->profile->func->init(pm->profile);
+	}
+
+	/* select performance level based on profile */
+	perflvl = profile->func->select(profile);
+
+	/* change perflvl, if necessary */
+	if (perflvl != pm->cur) {
+		u64 time0 = ptimer->read(ptimer);
+
+		NV_INFO(drm, "setting performance level: %d", perflvl->id);
+		ret = nouveau_pm_perflvl_set(dev, perflvl);
+		if (ret)
+			NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
+
+		NV_INFO(drm, "> reclocking took %lluns\n\n",
+			     ptimer->read(ptimer) - time0);
+	}
+}
+
+static struct nouveau_pm_profile *
+profile_find(struct drm_device *dev, const char *string)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_profile *profile;
+
+	list_for_each_entry(profile, &pm->profiles, head) {
+		if (!strncmp(profile->name, string, sizeof(profile->name)))
+			return profile;
+	}
+
+	return NULL;
+}
+
+static int
+nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_profile *ac = NULL, *dc = NULL;
+	char string[16], *cur = string, *ptr;
+
+	/* safety precaution, for now */
+	if (nouveau_perflvl_wr != 7777)
+		return -EPERM;
+
+	strncpy(string, profile, sizeof(string));
+	string[sizeof(string) - 1] = 0;
+	if ((ptr = strchr(string, '\n')))
+		*ptr = '\0';
+
+	ptr = strsep(&cur, ",");
+	if (ptr)
+		ac = profile_find(dev, ptr);
+
+	ptr = strsep(&cur, ",");
+	if (ptr)
+		dc = profile_find(dev, ptr);
+	else
+		dc = ac;
+
+	if (ac == NULL || dc == NULL)
+		return -EINVAL;
+
+	pm->profile_ac = ac;
+	pm->profile_dc = dc;
+	nouveau_pm_trigger(dev);
+	return 0;
+}
+
+static void
+nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
+{
+}
+
+static struct nouveau_pm_level *
+nouveau_pm_static_select(struct nouveau_pm_profile *profile)
+{
+	return container_of(profile, struct nouveau_pm_level, profile);
+}
+
+const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
+	.destroy = nouveau_pm_static_dummy,
+	.init = nouveau_pm_static_dummy,
+	.fini = nouveau_pm_static_dummy,
+	.select = nouveau_pm_static_select,
+};
+
+static int
+nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	memset(perflvl, 0, sizeof(*perflvl));
+
+	if (pm->clocks_get) {
+		ret = pm->clocks_get(dev, perflvl);
+		if (ret)
+			return ret;
+	}
+
+	if (pm->voltage.supported && pm->voltage_get) {
+		ret = pm->voltage_get(dev);
+		if (ret > 0) {
+			perflvl->volt_min = ret;
+			perflvl->volt_max = ret;
+		}
+	}
+
+	if (therm && therm->fan_get) {
+		ret = therm->fan_get(therm);
+		if (ret >= 0)
+			perflvl->fanspeed = ret;
+	}
+
+	nouveau_mem_timing_read(dev, &perflvl->timing);
+	return 0;
+}
+
+static void
+nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+{
+	char c[16], s[16], v[32], f[16], m[16];
+
+	c[0] = '\0';
+	if (perflvl->core)
+		snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
+
+	s[0] = '\0';
+	if (perflvl->shader)
+		snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+
+	m[0] = '\0';
+	if (perflvl->memory)
+		snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
+
+	v[0] = '\0';
+	if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
+		snprintf(v, sizeof(v), " voltage %dmV-%dmV",
+			 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
+	} else
+	if (perflvl->volt_min) {
+		snprintf(v, sizeof(v), " voltage %dmV",
+			 perflvl->volt_min / 1000);
+	}
+
+	f[0] = '\0';
+	if (perflvl->fanspeed)
+		snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+
+	snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl_info(struct device *d,
+			    struct device_attribute *a, char *buf)
+{
+	struct nouveau_pm_level *perflvl =
+		container_of(a, struct nouveau_pm_level, dev_attr);
+	char *ptr = buf;
+	int len = PAGE_SIZE;
+
+	snprintf(ptr, len, "%d:", perflvl->id);
+	ptr += strlen(buf);
+	len -= strlen(buf);
+
+	nouveau_pm_perflvl_info(perflvl, ptr, len);
+	return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_level cur;
+	int len = PAGE_SIZE, ret;
+	char *ptr = buf;
+
+	snprintf(ptr, len, "profile: %s, %s\nc:",
+		 pm->profile_ac->name, pm->profile_dc->name);
+	ptr += strlen(buf);
+	len -= strlen(buf);
+
+	ret = nouveau_pm_perflvl_get(dev, &cur);
+	if (ret == 0)
+		nouveau_pm_perflvl_info(&cur, ptr, len);
+	return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
+		       const char *buf, size_t count)
+{
+	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+	int ret;
+
+	ret = nouveau_pm_profile_set(dev, buf);
+	if (ret)
+		return ret;
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
+		   nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
+
+static int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct device *d = &dev->pdev->dev;
+	int ret, i;
+
+	ret = device_create_file(d, &dev_attr_performance_level);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		struct nouveau_pm_level *perflvl = &pm->perflvl[i];
+
+		perflvl->dev_attr.attr.name = perflvl->name;
+		perflvl->dev_attr.attr.mode = S_IRUGO;
+		perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
+		perflvl->dev_attr.store = NULL;
+		sysfs_attr_init(&perflvl->dev_attr.attr);
+
+		ret = device_create_file(d, &perflvl->dev_attr);
+		if (ret) {
+			NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
+				 perflvl->id, i);
+			perflvl->dev_attr.attr.name = NULL;
+			nouveau_pm_fini(dev);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct device *d = &dev->pdev->dev;
+	int i;
+
+	device_remove_file(d, &dev_attr_performance_level);
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		struct nouveau_pm_level *pl = &pm->perflvl[i];
+
+		if (!pl->dev_attr.attr.name)
+			break;
+
+		device_remove_file(d, &pl->dev_attr);
+	}
+}
+
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int temp = therm->temp_get(therm);
+
+	if (temp < 0)
+		return temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+						  NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
+					 struct device_attribute *a, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
+			  nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
+				     struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	      therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
+					 struct device_attribute *a,
+					 const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp,
+			  nouveau_hwmon_set_temp1_auto_point1_temp, 0);
+
+static ssize_t
+nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
+					  struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
+					      struct device_attribute *a,
+					      const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_temp1_auto_point1_temp_hyst,
+			  nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+						  nouveau_hwmon_set_max_temp,
+						  0);
+
+static ssize_t
+nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
+			    char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_max_temp_hyst,
+			  nouveau_hwmon_set_max_temp_hyst, 0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+						nouveau_hwmon_critical_temp,
+						nouveau_hwmon_set_critical_temp,
+						0);
+
+static ssize_t
+nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp_hyst(struct device *d,
+				     struct device_attribute *a,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_critical_temp_hyst,
+			  nouveau_hwmon_set_critical_temp_hyst, 0);
+static ssize_t
+nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	       therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp,
+					nouveau_hwmon_set_emergency_temp,
+					0);
+
+static ssize_t
+nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
+}
+static ssize_t
+nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
+				      struct device_attribute *a,
+				      const char *buf,
+				      size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
+			value / 1000);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
+					nouveau_hwmon_emergency_temp_hyst,
+					nouveau_hwmon_set_emergency_temp_hyst,
+					0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+						nouveau_hwmon_show_update_rate,
+						NULL, 0);
+
+static ssize_t
+nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
+}
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
+			  NULL, 0);
+
+ static ssize_t
+nouveau_hwmon_get_pwm1_enable(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_enable,
+			  nouveau_hwmon_set_pwm1_enable, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->fan_get(therm);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
+		       const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret = -ENODEV;
+	long value;
+
+	if (nouveau_perflvl_wr != 7777)
+		return -EPERM;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->fan_set(therm, value);
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1,
+			  nouveau_hwmon_set_pwm1, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_min(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_min,
+			  nouveau_hwmon_set_pwm1_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm1_max(struct device *d,
+			   struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	int ret;
+
+	ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
+			   const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	long value;
+	int ret;
+
+	if (kstrtol(buf, 10, &value) == -EINVAL)
+		return -EINVAL;
+
+	ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
+			  nouveau_hwmon_get_pwm1_max,
+			  nouveau_hwmon_set_pwm1_max, 0);
+
+static struct attribute *hwmon_default_attributes[] = {
+	&sensor_dev_attr_name.dev_attr.attr,
+	&sensor_dev_attr_update_rate.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_temp_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
+	&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm1_min.dev_attr.attr,
+	&sensor_dev_attr_pwm1_max.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_default_attrgroup = {
+	.attrs = hwmon_default_attributes,
+};
+static const struct attribute_group hwmon_temp_attrgroup = {
+	.attrs = hwmon_temp_attributes,
+};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+	.attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+	.attrs = hwmon_pwm_fan_attributes,
+};
+#endif
+
+static int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_therm *therm = nouveau_therm(drm->device);
+	struct device *hwmon_dev;
+	int ret = 0;
+
+	if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
+		return -ENODEV;
+
+	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+	if (IS_ERR(hwmon_dev)) {
+		ret = PTR_ERR(hwmon_dev);
+		NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
+		return ret;
+	}
+	dev_set_drvdata(hwmon_dev, dev);
+
+	/* set the default attributes */
+	ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
+	if (ret) {
+		if (ret)
+			goto error;
+	}
+
+	/* if the card has a working thermal sensor */
+	if (therm->temp_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+		if (ret) {
+			if (ret)
+				goto error;
+		}
+	}
+
+	/* if the card has a pwm fan */
+	/*XXX: incorrect, need better detection for this, some boards have
+	 *     the gpio entries for pwm fan control even when there's no
+	 *     actual fan connected to it... therm table? */
+	if (therm->fan_get && therm->fan_get(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_pwm_fan_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	/* if the card can read the fan rpm */
+	if (therm->fan_sense(therm) >= 0) {
+		ret = sysfs_create_group(&hwmon_dev->kobj,
+					 &hwmon_fan_rpm_attrgroup);
+		if (ret)
+			goto error;
+	}
+
+	pm->hwmon = hwmon_dev;
+
+	return 0;
+
+error:
+	NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
+	hwmon_device_unregister(hwmon_dev);
+	pm->hwmon = NULL;
+	return ret;
+#else
+	pm->hwmon = NULL;
+	return 0;
+#endif
+}
+
+static void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+	struct nouveau_pm *pm = nouveau_pm(dev);
+
+	if (pm->hwmon) {
+		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
+		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
+		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
+
+		hwmon_device_unregister(pm->hwmon);
+	}
+#endif
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
+	struct nouveau_drm *drm = nouveau_drm(pm->dev);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, "ac_adapter") == 0) {
+		bool ac = power_supply_is_system_supplied();
+
+		NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
+		nouveau_pm_trigger(pm->dev);
+	}
+
+	return NOTIFY_OK;
+}
+#endif
+
+int
+nouveau_pm_init(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_pm *pm;
+	char info[256];
+	int ret, i;
+
+	pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
+	if (!pm)
+		return -ENOMEM;
+
+	pm->dev = dev;
+
+	if (device->card_type < NV_40) {
+		pm->clocks_get = nv04_pm_clocks_get;
+		pm->clocks_pre = nv04_pm_clocks_pre;
+		pm->clocks_set = nv04_pm_clocks_set;
+		if (nouveau_gpio(drm->device)) {
+			pm->voltage_get = nouveau_voltage_gpio_get;
+			pm->voltage_set = nouveau_voltage_gpio_set;
+		}
+	} else
+	if (device->card_type < NV_50) {
+		pm->clocks_get = nv40_pm_clocks_get;
+		pm->clocks_pre = nv40_pm_clocks_pre;
+		pm->clocks_set = nv40_pm_clocks_set;
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	} else
+	if (device->card_type < NV_C0) {
+		if (device->chipset <  0xa3 ||
+		    device->chipset == 0xaa ||
+		    device->chipset == 0xac) {
+			pm->clocks_get = nv50_pm_clocks_get;
+			pm->clocks_pre = nv50_pm_clocks_pre;
+			pm->clocks_set = nv50_pm_clocks_set;
+		} else {
+			pm->clocks_get = nva3_pm_clocks_get;
+			pm->clocks_pre = nva3_pm_clocks_pre;
+			pm->clocks_set = nva3_pm_clocks_set;
+		}
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	} else
+	if (device->card_type < NV_E0) {
+		pm->clocks_get = nvc0_pm_clocks_get;
+		pm->clocks_pre = nvc0_pm_clocks_pre;
+		pm->clocks_set = nvc0_pm_clocks_set;
+		pm->voltage_get = nouveau_voltage_gpio_get;
+		pm->voltage_set = nouveau_voltage_gpio_set;
+	}
+
+
+	/* parse aux tables from vbios */
+	nouveau_volt_init(dev);
+
+	INIT_LIST_HEAD(&pm->profiles);
+
+	/* determine current ("boot") performance level */
+	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+	if (ret) {
+		NV_ERROR(drm, "failed to determine boot perflvl\n");
+		return ret;
+	}
+
+	strncpy(pm->boot.name, "boot", 4);
+	strncpy(pm->boot.profile.name, "boot", 4);
+	pm->boot.profile.func = &nouveau_pm_static_profile_func;
+
+	list_add(&pm->boot.profile.head, &pm->profiles);
+
+	pm->profile_ac = &pm->boot.profile;
+	pm->profile_dc = &pm->boot.profile;
+	pm->profile = &pm->boot.profile;
+	pm->cur = &pm->boot;
+
+	/* add performance levels from vbios */
+	nouveau_perf_init(dev);
+
+	/* display available performance levels */
+	NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+		NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
+	}
+
+	nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+	NV_INFO(drm, "c:%s", info);
+
+	/* switch performance levels now if requested */
+	if (nouveau_perflvl != NULL)
+		nouveau_pm_profile_set(dev, nouveau_perflvl);
+
+	nouveau_sysfs_init(dev);
+	nouveau_hwmon_init(dev);
+#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
+	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+	register_acpi_notifier(&pm->acpi_nb);
+#endif
+
+	return 0;
+}
+
+void
+nouveau_pm_fini(struct drm_device *dev)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_profile *profile, *tmp;
+
+	list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
+		list_del(&profile->head);
+		profile->func->destroy(profile);
+	}
+
+	if (pm->cur != &pm->boot)
+		nouveau_pm_perflvl_set(dev, &pm->boot);
+
+	nouveau_perf_fini(dev);
+	nouveau_volt_fini(dev);
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
+	unregister_acpi_notifier(&pm->acpi_nb);
+#endif
+	nouveau_hwmon_fini(dev);
+	nouveau_sysfs_fini(dev);
+
+	nouveau_drm(dev)->pm = NULL;
+	kfree(pm);
+}
+
+void
+nouveau_pm_resume(struct drm_device *dev)
+{
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_level *perflvl;
+
+	if (!pm->cur || pm->cur == &pm->boot)
+		return;
+
+	perflvl = pm->cur;
+	pm->cur = &pm->boot;
+	nouveau_pm_perflvl_set(dev, perflvl);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.h
new file mode 100644
index 0000000..73b789c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+
+struct nouveau_pm_voltage_level {
+	u32 voltage; /* microvolts */
+	u8  vid;
+};
+
+struct nouveau_pm_voltage {
+	bool supported;
+	u8 version;
+	u8 vid_mask;
+
+	struct nouveau_pm_voltage_level *level;
+	int nr_level;
+};
+
+/* Exclusive upper limits */
+#define NV_MEM_CL_DDR2_MAX 8
+#define NV_MEM_WR_DDR2_MAX 9
+#define NV_MEM_CL_DDR3_MAX 17
+#define NV_MEM_WR_DDR3_MAX 17
+#define NV_MEM_CL_GDDR3_MAX 16
+#define NV_MEM_WR_GDDR3_MAX 18
+#define NV_MEM_CL_GDDR5_MAX 21
+#define NV_MEM_WR_GDDR5_MAX 20
+
+struct nouveau_pm_memtiming {
+	int id;
+
+	u32 reg[9];
+	u32 mr[4];
+
+	u8 tCWL;
+
+	u8 odt;
+	u8 drive_strength;
+};
+
+struct nouveau_pm_tbl_header {
+	u8 version;
+	u8 header_len;
+	u8 entry_cnt;
+	u8 entry_len;
+};
+
+struct nouveau_pm_tbl_entry {
+	u8 tWR;
+	u8 tWTR;
+	u8 tCL;
+	u8 tRC;
+	u8 empty_4;
+	u8 tRFC;	/* Byte 5 */
+	u8 empty_6;
+	u8 tRAS;	/* Byte 7 */
+	u8 empty_8;
+	u8 tRP;		/* Byte 9 */
+	u8 tRCDRD;
+	u8 tRCDWR;
+	u8 tRRD;
+	u8 tUNK_13;
+	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
+	u8 empty_15;
+	u8 tUNK_16;
+	u8 empty_17;
+	u8 tUNK_18;
+	u8 tCWL;
+	u8 tUNK_20, tUNK_21;
+};
+
+struct nouveau_pm_profile;
+struct nouveau_pm_profile_func {
+	void (*destroy)(struct nouveau_pm_profile *);
+	void (*init)(struct nouveau_pm_profile *);
+	void (*fini)(struct nouveau_pm_profile *);
+	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
+};
+
+struct nouveau_pm_profile {
+	const struct nouveau_pm_profile_func *func;
+	struct list_head head;
+	char name[8];
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+	struct nouveau_pm_profile profile;
+	struct device_attribute dev_attr;
+	char name[32];
+	int id;
+
+	struct nouveau_pm_memtiming timing;
+	u32 memory;
+	u16 memscript;
+
+	u32 core;
+	u32 shader;
+	u32 rop;
+	u32 copy;
+	u32 daemon;
+	u32 vdec;
+	u32 dom6;
+	u32 unka0;	/* nva3:nvc0 */
+	u32 hub01;	/* nvc0- */
+	u32 hub06;	/* nvc0- */
+	u32 hub07;	/* nvc0- */
+
+	u32 volt_min; /* microvolts */
+	u32 volt_max;
+	u8  fanspeed;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+	u16 offset_constant;
+	s16 offset_mult;
+	s16 offset_div;
+	s16 slope_mult;
+	s16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+	s16 critical;
+	s16 down_clock;
+};
+
+struct nouveau_pm {
+	struct drm_device *dev;
+
+	struct nouveau_pm_voltage voltage;
+	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+	int nr_perflvl;
+	struct nouveau_pm_temp_sensor_constants sensor_constants;
+	struct nouveau_pm_threshold_temp threshold_temp;
+
+	struct nouveau_pm_profile *profile_ac;
+	struct nouveau_pm_profile *profile_dc;
+	struct nouveau_pm_profile *profile;
+	struct list_head profiles;
+
+	struct nouveau_pm_level boot;
+	struct nouveau_pm_level *cur;
+
+	struct device *hwmon;
+	struct notifier_block acpi_nb;
+
+	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
+	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
+	int (*clocks_set)(struct drm_device *, void *);
+
+	int (*voltage_get)(struct drm_device *);
+	int (*voltage_set)(struct drm_device *, int voltage);
+};
+
+static inline struct nouveau_pm *
+nouveau_pm(struct drm_device *dev)
+{
+	return nouveau_drm(dev)->pm;
+}
+
+struct nouveau_mem_exec_func {
+	struct drm_device *dev;
+	void (*precharge)(struct nouveau_mem_exec_func *);
+	void (*refresh)(struct nouveau_mem_exec_func *);
+	void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
+	void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
+	void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
+	u32  (*mrg)(struct nouveau_mem_exec_func *, int mr);
+	void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
+	void (*clock_set)(struct nouveau_mem_exec_func *);
+	void (*timing_set)(struct nouveau_mem_exec_func *);
+	void *priv;
+};
+
+/* nouveau_mem.c */
+int  nouveau_mem_exec(struct nouveau_mem_exec_func *,
+		      struct nouveau_pm_level *);
+
+/* nouveau_pm.c */
+int  nouveau_pm_init(struct drm_device *dev);
+void nouveau_pm_fini(struct drm_device *dev);
+void nouveau_pm_resume(struct drm_device *dev);
+extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
+void nouveau_pm_trigger(struct drm_device *dev);
+
+/* nouveau_volt.c */
+void nouveau_volt_init(struct drm_device *);
+void nouveau_volt_fini(struct drm_device *);
+int  nouveau_volt_vid_lookup(struct drm_device *, int voltage);
+int  nouveau_volt_lvl_lookup(struct drm_device *, int vid);
+int  nouveau_voltage_gpio_get(struct drm_device *);
+int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+
+/* nouveau_perf.c */
+void nouveau_perf_init(struct drm_device *);
+void nouveau_perf_fini(struct drm_device *);
+u8 *nouveau_perf_rammap(struct drm_device *, u32 freq, u8 *ver,
+			u8 *hdr, u8 *cnt, u8 *len);
+u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+
+/* nouveau_mem.c */
+void nouveau_mem_timing_init(struct drm_device *);
+void nouveau_mem_timing_fini(struct drm_device *);
+
+/* nv04_pm.c */
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
+
+/* nv40_pm.c */
+int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
+
+/* nv50_pm.c */
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
+
+/* nva3_pm.c */
+int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
+
+/* nvc0_pm.c */
+int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
+
+/* nouveau_mem.c */
+int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
+			     struct nouveau_pm_memtiming *);
+void nouveau_mem_timing_read(struct drm_device *,
+			     struct nouveau_pm_memtiming *);
+
+static inline int
+nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
+	      int *N, int *fN, int *M, int *P)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_clock *clk = nouveau_clock(device);
+	struct nouveau_pll_vals pv;
+	int ret;
+
+	ret = clk->pll_calc(clk, pll, freq, &pv);
+	*N = pv.N1;
+	*M = pv.M1;
+	*P = pv.log2P;
+	return ret;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_prime.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_prime.c
new file mode 100644
index 0000000..f53e108
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_gem.h"
+
+struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+	int npages = nvbo->bo.num_pages;
+
+	return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
+}
+
+void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+	int ret;
+
+	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+			  &nvbo->dma_buf_vmap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return nvbo->dma_buf_vmap.virtual;
+}
+
+void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+
+	ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+}
+
+struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
+							 size_t size,
+							 struct sg_table *sg)
+{
+	struct nouveau_bo *nvbo;
+	u32 flags = 0;
+	int ret;
+
+	flags = TTM_PL_FLAG_TT;
+
+	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
+			     sg, &nvbo);
+	if (ret)
+		return ERR_PTR(ret);
+
+	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+	if (!nvbo->gem) {
+		nouveau_bo_ref(NULL, &nvbo);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	nvbo->gem->driver_private = nvbo;
+	return nvbo->gem;
+}
+
+int nouveau_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
+	int ret = 0;
+
+	/* pin buffer into GTT */
+	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_reg.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 0000000..43a96b9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,858 @@
+
+#define NV04_PFB_BOOT_0						0x00100000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB			0x00000000
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB			0x00000001
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB			0x00000002
+#	define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB			0x00000003
+#	define NV04_PFB_BOOT_0_RAM_WIDTH_128			0x00000004
+#	define NV04_PFB_BOOT_0_RAM_TYPE				0x00000028
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT		0x00000000
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT		0x00000008
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK	0x00000010
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT		0x00000018
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT		0x00000020
+#	define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16		0x00000028
+#	define NV04_PFB_BOOT_0_UMA_ENABLE			0x00000100
+#	define NV04_PFB_BOOT_0_UMA_SIZE				0x0000f000
+#define NV04_PFB_DEBUG_0					0x00100080
+#	define NV04_PFB_DEBUG_0_PAGE_MODE			0x00000001
+#	define NV04_PFB_DEBUG_0_REFRESH_OFF			0x00000010
+#	define NV04_PFB_DEBUG_0_REFRESH_COUNTX64		0x00003f00
+#	define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK		0x00004000
+#	define NV04_PFB_DEBUG_0_SAFE_MODE			0x00008000
+#	define NV04_PFB_DEBUG_0_ALOM_ENABLE			0x00010000
+#	define NV04_PFB_DEBUG_0_CASOE				0x00100000
+#	define NV04_PFB_DEBUG_0_CKE_INVERT			0x10000000
+#	define NV04_PFB_DEBUG_0_REFINC				0x20000000
+#	define NV04_PFB_DEBUG_0_SAVE_POWER_OFF			0x40000000
+#define NV04_PFB_CFG0						0x00100200
+#	define NV04_PFB_CFG0_SCRAMBLE				0x20000000
+#define NV04_PFB_CFG1						0x00100204
+#define NV04_PFB_FIFO_DATA					0x0010020c
+#	define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK		0xfff00000
+#	define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_SHIFT		20
+#define NV10_PFB_REFCTRL					0x00100210
+#	define NV10_PFB_REFCTRL_VALID_1				(1 << 31)
+#define NV04_PFB_PAD						0x0010021c
+#	define NV04_PFB_PAD_CKE_NORMAL				(1 << 0)
+#define NV10_PFB_TILE(i)                              (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE					8
+#define NV10_PFB_TLIMIT(i)                            (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i)                             (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i)                           (0x0010024c + (i*16))
+#define NV04_PFB_REF						0x001002d0
+#	define NV04_PFB_REF_CMD_REFRESH				(1 << 0)
+#define NV04_PFB_PRE						0x001002d4
+#	define NV04_PFB_PRE_CMD_PRECHARGE			(1 << 0)
+#define NV20_PFB_ZCOMP(i)                              (0x00100300 + 4*(i))
+#	define NV20_PFB_ZCOMP_MODE_32				(4 << 24)
+#	define NV20_PFB_ZCOMP_EN				(1 << 31)
+#	define NV25_PFB_ZCOMP_MODE_16				(1 << 20)
+#	define NV25_PFB_ZCOMP_MODE_32				(2 << 20)
+#define NV10_PFB_CLOSE_PAGE2					0x0010033c
+#define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
+#define NV40_PFB_TILE(i)                              (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0					12
+#define NV40_PFB_TILE__SIZE_1					15
+#define NV40_PFB_TLIMIT(i)                            (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i)                             (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i)                           (0x0010060c + (i*16))
+#define NV40_PFB_UNK_800					0x00100800
+
+#define NV_PEXTDEV_BOOT_0					0x00101000
+#define NV_PEXTDEV_BOOT_0_RAMCFG				0x0000003c
+#	define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT		(8 << 12)
+#define NV_PEXTDEV_BOOT_3					0x0010100c
+
+#define NV_RAMIN                                           0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET                             0
+#define NV_RAMHT_CONTEXT_OFFSET                            4
+#    define NV_RAMHT_CONTEXT_VALID                         (1<<31)
+#    define NV_RAMHT_CONTEXT_CHANNEL_SHIFT                 24
+#    define NV_RAMHT_CONTEXT_ENGINE_SHIFT                  16
+#        define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE           0
+#        define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS           1
+#    define NV_RAMHT_CONTEXT_INSTANCE_SHIFT                0
+#    define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT               23
+#    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
+#    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
+
+/* Some object classes we care about in the drm */
+#define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
+#define NV_CLASS_DMA_TO_MEMORY                             0x00000003
+#define NV_CLASS_NULL                                      0x00000030
+#define NV_CLASS_DMA_IN_MEMORY                             0x0000003D
+
+#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE))
+#define NV03_USER__SIZE                                                       16
+#define NV10_USER__SIZE                                                       32
+#define NV03_USER_SIZE                                                0x00010000
+#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_PUT__SIZE                                               16
+#define NV10_USER_DMA_PUT__SIZE                                               32
+#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_GET__SIZE                                               16
+#define NV10_USER_DMA_GET__SIZE                                               32
+#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE))
+#define NV03_USER_REF_CNT__SIZE                                               16
+#define NV10_USER_REF_CNT__SIZE                                               32
+
+#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE))
+#define NV40_USER_SIZE                                                0x00001000
+#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_PUT__SIZE                                               32
+#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_GET__SIZE                                               32
+#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE))
+#define NV40_USER_REF_CNT__SIZE                                               32
+
+#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE))
+#define NV50_USER_SIZE                                                0x00002000
+#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_PUT__SIZE                                              128
+#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_GET__SIZE                                              128
+#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE))
+#define NV50_USER_REF_CNT__SIZE                                              128
+
+#define NV03_FIFO_SIZE                                     0x8000UL
+
+#define NV03_PMC_BOOT_0                                    0x00000000
+#define NV03_PMC_BOOT_1                                    0x00000004
+#define NV03_PMC_INTR_0                                    0x00000100
+#    define NV_PMC_INTR_0_PFIFO_PENDING                        (1<<8)
+#    define NV_PMC_INTR_0_PGRAPH_PENDING                      (1<<12)
+#    define NV_PMC_INTR_0_NV50_I2C_PENDING                    (1<<21)
+#    define NV_PMC_INTR_0_CRTC0_PENDING                       (1<<24)
+#    define NV_PMC_INTR_0_CRTC1_PENDING                       (1<<25)
+#    define NV_PMC_INTR_0_NV50_DISPLAY_PENDING                (1<<26)
+#    define NV_PMC_INTR_0_CRTCn_PENDING                       (3<<24)
+#define NV03_PMC_INTR_EN_0                                 0x00000140
+#    define NV_PMC_INTR_EN_0_MASTER_ENABLE                     (1<<0)
+#define NV03_PMC_ENABLE                                    0x00000200
+#    define NV_PMC_ENABLE_PFIFO                                (1<<8)
+#    define NV_PMC_ENABLE_PGRAPH                              (1<<12)
+/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
+ * the card will hang early on in the X init process.
+ */
+#    define NV_PMC_ENABLE_UNK13                               (1<<13)
+#define NV40_PMC_GRAPH_UNITS				   0x00001540
+#define NV40_PMC_BACKLIGHT				   0x000015f0
+#	define NV40_PMC_BACKLIGHT_MASK			   0x001f0000
+#define NV40_PMC_1700                                      0x00001700
+#define NV40_PMC_1704                                      0x00001704
+#define NV40_PMC_1708                                      0x00001708
+#define NV40_PMC_170C                                      0x0000170C
+
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN                              0x00001700
+#define NV50_PUNK_BAR_CFG_BASE                             0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID                          (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA                              0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID                           (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA                              0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)
+#define NV50_PUNK_UNK1710                                  0x00001710
+
+#define NV04_PBUS_PCI_NV_1                                 0x00001804
+#define NV04_PBUS_PCI_NV_19                                0x0000184C
+#define NV04_PBUS_PCI_NV_20				0x00001850
+#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED		(0 << 0)
+#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED		(1 << 0)
+
+#define NV04_PTIMER_INTR_0                                 0x00009100
+#define NV04_PTIMER_INTR_EN_0                              0x00009140
+#define NV04_PTIMER_NUMERATOR                              0x00009200
+#define NV04_PTIMER_DENOMINATOR                            0x00009210
+#define NV04_PTIMER_TIME_0                                 0x00009400
+#define NV04_PTIMER_TIME_1                                 0x00009410
+#define NV04_PTIMER_ALARM_0                                0x00009420
+
+#define NV04_PGRAPH_DEBUG_0                                0x00400080
+#define NV04_PGRAPH_DEBUG_1                                0x00400084
+#define NV04_PGRAPH_DEBUG_2                                0x00400088
+#define NV04_PGRAPH_DEBUG_3                                0x0040008c
+#define NV10_PGRAPH_DEBUG_4                                0x00400090
+#define NV03_PGRAPH_INTR                                   0x00400100
+#define NV03_PGRAPH_NSTATUS                                0x00400104
+#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
+#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
+#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
+#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
+#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
+#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
+#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
+#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
+#define NV03_PGRAPH_NSOURCE                                0x00400108
+#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                   (1<<0)
+#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                     (1<<1)
+#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR               (1<<2)
+#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION                (1<<3)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                    (1<<4)
+#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                     (1<<5)
+#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                   (1<<6)
+#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION               (1<<7)
+#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION               (1<<8)
+#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION               (1<<9)
+#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
+#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
+#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
+#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
+#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
+#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
+#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
+#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
+#define NV03_PGRAPH_INTR_EN                                0x00400140
+#define NV40_PGRAPH_INTR_EN                                0x0040013C
+#    define NV_PGRAPH_INTR_NOTIFY                              (1<<0)
+#    define NV_PGRAPH_INTR_MISSING_HW                          (1<<4)
+#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
+#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
+#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
+#define NV10_PGRAPH_CTX_USER                               0x00400148
+#define NV10_PGRAPH_CTX_SWITCH(i)                         (0x0040014C + 0x4*(i))
+#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j)                       (0x00400160	\
+							   + 0x4*(i) + 0x20*(j))
+#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
+#define NV04_PGRAPH_CTX_USER                               0x00400174
+#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
+#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
+#define NV03_PGRAPH_CTX_USER                               0x00400194
+#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
+#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE                    0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR                             0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED                      0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE                    0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT                            0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
+#define NV03_PGRAPH_X_MISC                                 0x00400500
+#define NV03_PGRAPH_Y_MISC                                 0x00400504
+#define NV04_PGRAPH_VALID1                                 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
+#define NV04_PGRAPH_MISC24_0                               0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
+#define NV03_PGRAPH_CLIPX_0                                0x00400524
+#define NV03_PGRAPH_CLIPX_1                                0x00400528
+#define NV03_PGRAPH_CLIPY_0                                0x0040052C
+#define NV03_PGRAPH_CLIPY_1                                0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
+#define NV04_PGRAPH_MISC24_1                               0x00400570
+#define NV04_PGRAPH_MISC24_2                               0x00400574
+#define NV04_PGRAPH_VALID2                                 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
+#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
+#define NV04_PGRAPH_FORMAT_0                               0x004005A8
+#define NV04_PGRAPH_FORMAT_1                               0x004005AC
+#define NV04_PGRAPH_FILTER_0                               0x004005B0
+#define NV04_PGRAPH_FILTER_1                               0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
+#define NV04_PGRAPH_ROP3                                   0x00400604
+#define NV04_PGRAPH_BETA_AND                               0x00400608
+#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
+#define NV04_PGRAPH_FORMATS                                0x00400618
+#define NV10_PGRAPH_DEBUG_2                                0x00400620
+#define NV04_PGRAPH_BOFFSET0                               0x00400640
+#define NV04_PGRAPH_BOFFSET1                               0x00400644
+#define NV04_PGRAPH_BOFFSET2                               0x00400648
+#define NV04_PGRAPH_BOFFSET3                               0x0040064C
+#define NV04_PGRAPH_BOFFSET4                               0x00400650
+#define NV04_PGRAPH_BOFFSET5                               0x00400654
+#define NV04_PGRAPH_BBASE0                                 0x00400658
+#define NV04_PGRAPH_BBASE1                                 0x0040065C
+#define NV04_PGRAPH_BBASE2                                 0x00400660
+#define NV04_PGRAPH_BBASE3                                 0x00400664
+#define NV04_PGRAPH_BBASE4                                 0x00400668
+#define NV04_PGRAPH_BBASE5                                 0x0040066C
+#define NV04_PGRAPH_BPITCH0                                0x00400670
+#define NV04_PGRAPH_BPITCH1                                0x00400674
+#define NV04_PGRAPH_BPITCH2                                0x00400678
+#define NV04_PGRAPH_BPITCH3                                0x0040067C
+#define NV04_PGRAPH_BPITCH4                                0x00400680
+#define NV04_PGRAPH_BLIMIT0                                0x00400684
+#define NV04_PGRAPH_BLIMIT1                                0x00400688
+#define NV04_PGRAPH_BLIMIT2                                0x0040068C
+#define NV04_PGRAPH_BLIMIT3                                0x00400690
+#define NV04_PGRAPH_BLIMIT4                                0x00400694
+#define NV04_PGRAPH_BLIMIT5                                0x00400698
+#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
+#define NV03_PGRAPH_STATUS                                 0x004006B0
+#define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
+#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
+#define NV04_PGRAPH_SURFACE                                0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
+#define NV04_PGRAPH_STATE                                  0x00400710
+#define NV10_PGRAPH_SURFACE                                0x00400710
+#define NV04_PGRAPH_NOTIFY                                 0x00400714
+#define NV10_PGRAPH_STATE                                  0x00400714
+#define NV10_PGRAPH_NOTIFY                                 0x00400718
+
+#define NV04_PGRAPH_FIFO                                   0x00400720
+
+#define NV04_PGRAPH_BPIXEL                                 0x00400724
+#define NV10_PGRAPH_RDI_INDEX                              0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
+#define NV10_PGRAPH_RDI_DATA                               0x00400754
+#define NV04_PGRAPH_DMA_PITCH                              0x00400760
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR                       0x00400760
+#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL                         0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH                         0x0040076c
+#define NV10_PGRAPH_DMA_PITCH                              0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
+#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
+#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
+#define NV04_PGRAPH_PATTERN                                0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
+#define NV04_PGRAPH_CHROMA                                 0x00400814
+#define NV04_PGRAPH_CONTROL0                               0x00400818
+#define NV04_PGRAPH_CONTROL1                               0x0040081C
+#define NV04_PGRAPH_CONTROL2                               0x00400820
+#define NV04_PGRAPH_BLEND                                  0x00400824
+#define NV04_PGRAPH_STORED_FMT                             0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
+#define NV20_PGRAPH_TILE(i)                                (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM                                  0x00400D00
+#define NV47_PGRAPH_TILE(i)                                (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i)                              (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i)                               (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM                                  0x00400D40
+#define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
+#define NV10_PGRAPH_XFMODE0                                0x00400F40
+#define NV10_PGRAPH_XFMODE1                                0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
+#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
+#define NV04_PGRAPH_DMA_START_0                            0x00401000
+#define NV04_PGRAPH_DMA_START_1                            0x00401004
+#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
+#define NV04_PGRAPH_DMA_MISC                               0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
+#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
+#define NV04_PGRAPH_DMA_RM                                 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV04_PFIFO_DELAY_0                                 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
+#define NV03_PFIFO_INTR_0                                  0x00002100
+#define NV03_PFIFO_INTR_EN_0                               0x00002140
+#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
+#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
+#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
+#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
+#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
+#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
+#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
+#define NV03_PFIFO_RAMHT                                   0x00002210
+#define NV03_PFIFO_RAMFC                                   0x00002214
+#define NV03_PFIFO_RAMRO                                   0x00002218
+#define NV40_PFIFO_RAMFC                                   0x00002220
+#define NV03_PFIFO_CACHES                                  0x00002500
+#define NV04_PFIFO_MODE                                    0x00002504
+#define NV04_PFIFO_DMA                                     0x00002508
+#define NV04_PFIFO_SIZE                                    0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE                                128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
+#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
+#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
+#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
+#define NV03_PFIFO_CACHE1_PUT                              0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
+#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
+#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
+#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
+#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
+#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
+#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED            0x00000010
+#    define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY              0x00001000
+#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
+#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
+#define NV04_PFIFO_CACHE1_HASH                             0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
+#define NV03_PFIFO_CACHE1_GET                              0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
+#define NV40_PFIFO_UNK32E4                                 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT                                   0x00600100
+#define NV_CRTC0_INTEN                                     0x00600140
+#define NV_CRTC1_INTSTAT                                   0x00602100
+#define NV_CRTC1_INTEN                                     0x00602140
+#    define NV_CRTC_INTR_VBLANK                                (1<<0)
+
+#define NV04_PRAMIN						0x00700000
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP                                 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK                     0x1ffffffc
+#define NV03_FIFO_CMD_REWIND                               (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* This is a partial import from rules-ng, a few things may be duplicated.
+ * Eventually we should completely import everything from rules-ng.
+ * For the moment check rules-ng for docs.
+  */
+
+#define NV50_PMC                                            0x00000000
+#define NV50_PMC__LEN                                              0x1
+#define NV50_PMC__ESIZE                                         0x2000
+#    define NV50_PMC_BOOT_0                                 0x00000000
+#        define NV50_PMC_BOOT_0_REVISION                    0x000000ff
+#        define NV50_PMC_BOOT_0_REVISION__SHIFT                      0
+#        define NV50_PMC_BOOT_0_ARCH                        0x0ff00000
+#        define NV50_PMC_BOOT_0_ARCH__SHIFT                         20
+#    define NV50_PMC_INTR_0                                 0x00000100
+#        define NV50_PMC_INTR_0_PFIFO                           (1<<8)
+#        define NV50_PMC_INTR_0_PGRAPH                         (1<<12)
+#        define NV50_PMC_INTR_0_PTIMER                         (1<<20)
+#        define NV50_PMC_INTR_0_HOTPLUG                        (1<<21)
+#        define NV50_PMC_INTR_0_DISPLAY                        (1<<26)
+#    define NV50_PMC_INTR_EN_0                              0x00000140
+#        define NV50_PMC_INTR_EN_0_MASTER                       (1<<0)
+#            define NV50_PMC_INTR_EN_0_MASTER_DISABLED          (0<<0)
+#            define NV50_PMC_INTR_EN_0_MASTER_ENABLED           (1<<0)
+#    define NV50_PMC_ENABLE                                 0x00000200
+#        define NV50_PMC_ENABLE_PFIFO                           (1<<8)
+#        define NV50_PMC_ENABLE_PGRAPH                         (1<<12)
+
+#define NV50_PCONNECTOR                                     0x0000e000
+#define NV50_PCONNECTOR__LEN                                       0x1
+#define NV50_PCONNECTOR__ESIZE                                  0x1000
+#    define NV50_PCONNECTOR_HOTPLUG_INTR                    0x0000e050
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0          (1<<0)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1          (1<<1)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2          (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3          (1<<3)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0       (1<<16)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1       (1<<17)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2       (1<<18)
+#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3       (1<<19)
+#    define NV50_PCONNECTOR_HOTPLUG_CTRL                    0x0000e054
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0          (1<<0)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1          (1<<1)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2          (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3          (1<<3)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0       (1<<16)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1       (1<<17)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2       (1<<18)
+#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3       (1<<19)
+#    define NV50_PCONNECTOR_HOTPLUG_STATE                   0x0000e104
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
+#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
+#    define NV50_PCONNECTOR_I2C_PORT_0                      0x0000e138
+#    define NV50_PCONNECTOR_I2C_PORT_1                      0x0000e150
+#    define NV50_PCONNECTOR_I2C_PORT_2                      0x0000e168
+#    define NV50_PCONNECTOR_I2C_PORT_3                      0x0000e180
+#    define NV50_PCONNECTOR_I2C_PORT_4                      0x0000e240
+#    define NV50_PCONNECTOR_I2C_PORT_5                      0x0000e258
+
+#define NV50_AUXCH_DATA_OUT(i, n)            ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
+#define NV50_AUXCH_DATA_OUT__SIZE                                             4
+#define NV50_AUXCH_DATA_IN(i, n)             ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
+#define NV50_AUXCH_DATA_IN__SIZE                                              4
+#define NV50_AUXCH_ADDR(i)                             ((i) * 0x50 + 0x0000e4e0)
+#define NV50_AUXCH_CTRL(i)                             ((i) * 0x50 + 0x0000e4e4)
+#define NV50_AUXCH_CTRL_LINKSTAT                                     0x01000000
+#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY                           0x00000000
+#define NV50_AUXCH_CTRL_LINKSTAT_READY                               0x01000000
+#define NV50_AUXCH_CTRL_LINKEN                                       0x00100000
+#define NV50_AUXCH_CTRL_LINKEN_DISABLED                              0x00000000
+#define NV50_AUXCH_CTRL_LINKEN_ENABLED                               0x00100000
+#define NV50_AUXCH_CTRL_EXEC                                         0x00010000
+#define NV50_AUXCH_CTRL_EXEC_COMPLETE                                0x00000000
+#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS                              0x00010000
+#define NV50_AUXCH_CTRL_CMD                                          0x0000f000
+#define NV50_AUXCH_CTRL_CMD_SHIFT                                            12
+#define NV50_AUXCH_CTRL_LEN                                          0x0000000f
+#define NV50_AUXCH_CTRL_LEN_SHIFT                                             0
+#define NV50_AUXCH_STAT(i)                             ((i) * 0x50 + 0x0000e4e8)
+#define NV50_AUXCH_STAT_STATE                                        0x10000000
+#define NV50_AUXCH_STAT_STATE_NOT_READY                              0x00000000
+#define NV50_AUXCH_STAT_STATE_READY                                  0x10000000
+#define NV50_AUXCH_STAT_REPLY                                        0x000f0000
+#define NV50_AUXCH_STAT_REPLY_AUX                                    0x00030000
+#define NV50_AUXCH_STAT_REPLY_AUX_ACK                                0x00000000
+#define NV50_AUXCH_STAT_REPLY_AUX_NACK                               0x00010000
+#define NV50_AUXCH_STAT_REPLY_AUX_DEFER                              0x00020000
+#define NV50_AUXCH_STAT_REPLY_I2C                                    0x000c0000
+#define NV50_AUXCH_STAT_REPLY_I2C_ACK                                0x00000000
+#define NV50_AUXCH_STAT_REPLY_I2C_NACK                               0x00040000
+#define NV50_AUXCH_STAT_REPLY_I2C_DEFER                              0x00080000
+#define NV50_AUXCH_STAT_COUNT                                        0x0000001f
+
+#define NV50_PBUS                                           0x00088000
+#define NV50_PBUS__LEN                                             0x1
+#define NV50_PBUS__ESIZE                                        0x1000
+#    define NV50_PBUS_PCI_ID                                0x00088000
+#        define NV50_PBUS_PCI_ID_VENDOR_ID                  0x0000ffff
+#        define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT                    0
+#        define NV50_PBUS_PCI_ID_DEVICE_ID                  0xffff0000
+#        define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT                   16
+
+#define NV50_PFB                                            0x00100000
+#define NV50_PFB__LEN                                              0x1
+#define NV50_PFB__ESIZE                                         0x1000
+
+#define NV50_PEXTDEV                                        0x00101000
+#define NV50_PEXTDEV__LEN                                          0x1
+#define NV50_PEXTDEV__ESIZE                                     0x1000
+
+#define NV50_PROM                                           0x00300000
+#define NV50_PROM__LEN                                             0x1
+#define NV50_PROM__ESIZE                                       0x10000
+
+#define NV50_PGRAPH                                         0x00400000
+#define NV50_PGRAPH__LEN                                           0x1
+#define NV50_PGRAPH__ESIZE                                     0x10000
+
+#define NV50_PDISPLAY                                                0x00610000
+#define NV50_PDISPLAY_OBJECTS                                        0x00610010
+#define NV50_PDISPLAY_INTR_0                                         0x00610020
+#define NV50_PDISPLAY_INTR_1                                         0x00610024
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC                             0x0000000c
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT                                2
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0                           0x00000004
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1                           0x00000008
+#define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
+#define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
+#define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
+#define NV50_PDISPLAY_INTR_EN_0                                      0x00610028
+#define NV50_PDISPLAY_INTR_EN_1                                      0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC                          0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n)                 (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0                        0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1                        0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10                            0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20                            0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40                            0x00000040
+#define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
+#define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
+#define NV50_PDISPLAY_TRAPPED_ADDR(i)                  ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i)                  ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i)                      ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA                                   0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED                          0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED                           0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i)                    ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION                            0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM                       0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM                     0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID                               0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i)                      ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i)                  ((i) * 0x10 + 0x0061020c)
+
+#define NV50_PDISPLAY_CURSOR                                         0x00610270
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON                         0x00000001
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
+
+#define NV50_PDISPLAY_PIO_CTRL                                       0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING                               0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD                                  0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED                               0x00000001
+#define NV50_PDISPLAY_PIO_DATA                                       0x00610304
+
+#define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */                0x00610a18
+#define NV50_PDISPLAY_CRTC_CLUT_MODE                                 0x00610a24
+#define NV50_PDISPLAY_CRTC_INTERLACE                                 0x00610a48
+#define NV50_PDISPLAY_CRTC_SCALE_CTRL                                0x00610a50
+#define NV50_PDISPLAY_CRTC_CURSOR_CTRL                               0x00610a58
+#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */                 0x00610a78
+#define NV50_PDISPLAY_CRTC_UNK0AB8                                   0x00610ab8
+#define NV50_PDISPLAY_CRTC_DEPTH                                     0x00610ac8
+#define NV50_PDISPLAY_CRTC_CLOCK                                     0x00610ad0
+#define NV50_PDISPLAY_CRTC_COLOR_CTRL                                0x00610ae0
+#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END                   0x00610ae8
+#define NV50_PDISPLAY_CRTC_MODE_UNK1                                 0x00610af0
+#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL                             0x00610af8
+#define NV50_PDISPLAY_CRTC_SYNC_DURATION                             0x00610b00
+#define NV50_PDISPLAY_CRTC_MODE_UNK2                                 0x00610b08
+#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */                0x00610b10
+#define NV50_PDISPLAY_CRTC_FB_SIZE                                   0x00610b18
+#define NV50_PDISPLAY_CRTC_FB_PITCH                                  0x00610b20
+#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR                           0x00100000
+#define NV50_PDISPLAY_CRTC_FB_POS                                    0x00610b28
+#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET                       0x00610b38
+#define NV50_PDISPLAY_CRTC_REAL_RES                                  0x00610b40
+#define NV50_PDISPLAY_CRTC_SCALE_RES1                                0x00610b48
+#define NV50_PDISPLAY_CRTC_SCALE_RES2                                0x00610b50
+
+#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610b70 + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_P(i)                (0x00610b80 + (i) * 0x8)
+#define NV50_PDISPLAY_EXT_MODE_CTRL_C(i)                (0x00610b84 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610bdc + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610be0 + (i) * 0x8)
+#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610794 + (i) * 0x8)
+#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610798 + (i) * 0x8)
+
+#define NV50_PDISPLAY_CRTC_CLK                                       0x00614000
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i)                 ((i) * 0x800 + 0x614100)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED                       0x00000600
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i)                ((i) * 0x800 + 0x614104)
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i)                ((i) * 0x800 + 0x614108)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i)                 ((i) * 0x800 + 0x614200)
+
+#define NV50_PDISPLAY_DAC_CLK                                        0x00614000
+#define NV50_PDISPLAY_DAC_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614280)
+
+#define NV50_PDISPLAY_SOR_CLK                                        0x00614000
+#define NV50_PDISPLAY_SOR_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614300)
+
+#define NV50_PDISPLAY_VGACRTC(r)                                ((r) + 0x619400)
+
+#define NV50_PDISPLAY_DAC                                            0x0061a000
+#define NV50_PDISPLAY_DAC_DPMS_CTRL(i)                (0x0061a004 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF                        0x00000001
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF                        0x00000004
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED                          0x00000010
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF                              0x00000040
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING                          0x80000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL(i)                (0x0061a00c + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE                           0x00100000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT                          0x38000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE                             0x80000000
+#define NV50_PDISPLAY_DAC_CLK_CTRL1(i)                (0x0061a010 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED                        0x00000600
+
+#define NV50_PDISPLAY_SOR                                            0x0061c000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL(i)                (0x0061c004 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING                          0x80000000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON                               0x00000001
+#define NV50_PDISPLAY_SOR_CLK_CTRL1(i)                (0x0061c008 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED                        0x00000600
+#define NV50_PDISPLAY_SOR_DPMS_STATE(i)               (0x0061c030 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE                          0x00030000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED                         0x00080000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT                            0x10000000
+#define NV50_PDISP_SOR_PWM_DIV(i)                     (0x0061c080 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL(i)                     (0x0061c084 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL_NEW                                   0x80000000
+#define NVA3_PDISP_SOR_PWM_CTL_UNK                                   0x40000000
+#define NV50_PDISP_SOR_PWM_CTL_VAL                                   0x000007ff
+#define NVA3_PDISP_SOR_PWM_CTL_VAL                                   0x00ffffff
+#define NV50_SOR_DP_CTRL(i, l)           (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENABLED                                     0x00000001
+#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED                      0x00004000
+#define NV50_SOR_DP_CTRL_LANE_MASK                                   0x001f0000
+#define NV50_SOR_DP_CTRL_LANE_0_ENABLED                              0x00010000
+#define NV50_SOR_DP_CTRL_LANE_1_ENABLED                              0x00020000
+#define NV50_SOR_DP_CTRL_LANE_2_ENABLED                              0x00040000
+#define NV50_SOR_DP_CTRL_LANE_3_ENABLED                              0x00080000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN                            0x0f000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED                   0x00000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1                          0x01000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
+#define NV50_SOR_DP_UNK118(i, l)         (0x0061c118 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK120(i, l)         (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_SCFG(i, l)           (0x0061c128 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK130(i, l)         (0x0061c130 + (i) * 0x800 + (l) * 0x80)
+
+#define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_PUT(i)                    ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_GET(i)                    ((i) * 0x1000 + 0x00640004)
+
+#define NV50_PDISPLAY_CURSOR_USER                                    0x00647000
+#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i)        ((i) * 0x1000 + 0x00647080)
+#define NV50_PDISPLAY_CURSOR_USER_POS(i)             ((i) * 0x1000 + 0x00647084)
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 0000000..ca5492a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,112 @@
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
+
+struct nouveau_sgdma_be {
+	/* this has to be the first field so populate/unpopulated in
+	 * nouve_bo.c works properly, otherwise have to move them here
+	 */
+	struct ttm_dma_tt ttm;
+	struct drm_device *dev;
+	struct nouveau_mem *node;
+};
+
+static void
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+
+	if (ttm) {
+		ttm_dma_tt_fini(&nvbe->ttm);
+		kfree(nvbe);
+	}
+}
+
+static int
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+	struct nouveau_mem *node = mem->mm_node;
+	u64 size = mem->num_pages << 12;
+
+	if (ttm->sg) {
+		node->sg = ttm->sg;
+		nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+	} else {
+		node->pages = nvbe->ttm.dma_address;
+		nouveau_vm_map_sg(&node->vma[0], 0, size, node);
+	}
+
+	nvbe->node = node;
+	return 0;
+}
+
+static int
+nv04_sgdma_unbind(struct ttm_tt *ttm)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+	nouveau_vm_unmap(&nvbe->node->vma[0]);
+	return 0;
+}
+
+static struct ttm_backend_func nv04_sgdma_backend = {
+	.bind			= nv04_sgdma_bind,
+	.unbind			= nv04_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
+static int
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+	struct nouveau_mem *node = mem->mm_node;
+
+	/* noop: bound in move_notify() */
+	if (ttm->sg) {
+		node->sg = ttm->sg;
+	} else
+		node->pages = nvbe->ttm.dma_address;
+	return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_tt *ttm)
+{
+	/* noop: unbound in move_notify() */
+	return 0;
+}
+
+static struct ttm_backend_func nv50_sgdma_backend = {
+	.bind			= nv50_sgdma_bind,
+	.unbind			= nv50_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+			 unsigned long size, uint32_t page_flags,
+			 struct page *dummy_read_page)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bdev);
+	struct nouveau_sgdma_be *nvbe;
+
+	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
+	if (!nvbe)
+		return NULL;
+
+	nvbe->dev = drm->dev;
+	if (nv_device(drm->device)->card_type < NV_50)
+		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
+	else
+		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
+
+	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+		kfree(nvbe);
+		return NULL;
+	}
+	return &nvbe->ttm.ttm;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 0000000..f19a15a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	man->priv = pfb;
+	return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+	man->priv = NULL;
+	return 0;
+}
+
+static inline void
+nouveau_mem_node_cleanup(struct nouveau_mem *node)
+{
+	if (node->vma[0].node) {
+		nouveau_vm_unmap(&node->vma[0]);
+		nouveau_vm_put(&node->vma[0]);
+	}
+
+	if (node->vma[1].node) {
+		nouveau_vm_unmap(&node->vma[1]);
+		nouveau_vm_put(&node->vma[1]);
+	}
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	nouveau_mem_node_cleanup(mem->mm_node);
+	pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_fb *pfb = nouveau_fb(drm->device);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_mem *node;
+	u32 size_nc = 0;
+	int ret;
+
+	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+		size_nc = 1 << nvbo->page_shift;
+
+	ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
+			   mem->page_alignment << PAGE_SHIFT, size_nc,
+			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
+	if (ret) {
+		mem->mm_node = NULL;
+		return (ret == -ENOSPC) ? 0 : ret;
+	}
+
+	node->page_shift = nvbo->page_shift;
+
+	mem->mm_node = node;
+	mem->start   = node->offset >> PAGE_SHIFT;
+	return 0;
+}
+
+static void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+	struct nouveau_fb *pfb = man->priv;
+	struct nouveau_mm *mm = &pfb->vram;
+	struct nouveau_mm_node *r;
+	u32 total = 0, free = 0;
+
+	mutex_lock(&mm->mutex);
+	list_for_each_entry(r, &mm->nodes, nl_entry) {
+		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
+		       prefix, r->type, ((u64)r->offset << 12),
+		       (((u64)r->offset + r->length) << 12));
+
+		total += r->length;
+		if (!r->type)
+			free += r->length;
+	}
+	mutex_unlock(&mm->mutex);
+
+	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
+	       prefix, (u64)total << 12, (u64)free << 12);
+	printk(KERN_DEBUG "%s  block: 0x%08x\n",
+	       prefix, mm->block_size << 12);
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+	nouveau_vram_manager_init,
+	nouveau_vram_manager_fini,
+	nouveau_vram_manager_new,
+	nouveau_vram_manager_del,
+	nouveau_vram_manager_debug
+};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+	return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	nouveau_mem_node_cleanup(mem->mm_node);
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_mem *node;
+
+	if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
+		return -ENOMEM;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->page_shift = 12;
+
+	switch (nv_device(drm->device)->card_type) {
+	case NV_50:
+		if (nv_device(drm->device)->chipset != 0x50)
+			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
+		break;
+	case NV_C0:
+	case NV_D0:
+	case NV_E0:
+		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
+		break;
+	default:
+		break;
+	}
+
+	mem->mm_node = node;
+	mem->start   = 0;
+	return 0;
+}
+
+static void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+	nouveau_gart_manager_init,
+	nouveau_gart_manager_fini,
+	nouveau_gart_manager_new,
+	nouveau_gart_manager_del,
+	nouveau_gart_manager_debug
+};
+
+#include <core/subdev/vm/nv04.h>
+static int
+nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+	struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
+	struct nv04_vmmgr_priv *priv = (void *)vmm;
+	struct nouveau_vm *vm = NULL;
+	nouveau_vm_ref(priv->vm, &vm, NULL);
+	man->priv = vm;
+	return 0;
+}
+
+static int
+nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+	struct nouveau_vm *vm = man->priv;
+	nouveau_vm_ref(NULL, &vm, NULL);
+	man->priv = NULL;
+	return 0;
+}
+
+static void
+nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
+{
+	struct nouveau_mem *node = mem->mm_node;
+	if (node->vma[0].node)
+		nouveau_vm_put(&node->vma[0]);
+	kfree(mem->mm_node);
+	mem->mm_node = NULL;
+}
+
+static int
+nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+		      struct ttm_buffer_object *bo,
+		      struct ttm_placement *placement,
+		      struct ttm_mem_reg *mem)
+{
+	struct nouveau_mem *node;
+	int ret;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->page_shift = 12;
+
+	ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
+			     NV_MEM_ACCESS_RW, &node->vma[0]);
+	if (ret) {
+		kfree(node);
+		return ret;
+	}
+
+	mem->mm_node = node;
+	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
+	return 0;
+}
+
+static void
+nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nv04_gart_manager = {
+	nv04_gart_manager_init,
+	nv04_gart_manager_fini,
+	nv04_gart_manager_new,
+	nv04_gart_manager_del,
+	nv04_gart_manager_debug
+};
+
+int
+nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+		return drm_mmap(filp, vma);
+
+	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
+}
+
+static int
+nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void
+nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+int
+nouveau_ttm_global_init(struct nouveau_drm *drm)
+{
+	struct drm_global_reference *global_ref;
+	int ret;
+
+	global_ref = &drm->ttm.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &nouveau_ttm_mem_global_init;
+	global_ref->release = &nouveau_ttm_mem_global_release;
+
+	ret = drm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM memory accounting\n");
+		drm->ttm.mem_global_ref.release = NULL;
+		return ret;
+	}
+
+	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
+	global_ref = &drm->ttm.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+
+	ret = drm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM BO subsystem\n");
+		drm_global_item_unref(&drm->ttm.mem_global_ref);
+		drm->ttm.mem_global_ref.release = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+void
+nouveau_ttm_global_release(struct nouveau_drm *drm)
+{
+	if (drm->ttm.mem_global_ref.release == NULL)
+		return;
+
+	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
+	drm_global_item_unref(&drm->ttm.mem_global_ref);
+	drm->ttm.mem_global_ref.release = NULL;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	u32 bits;
+	int ret;
+
+	bits = nouveau_vmmgr(drm->device)->dma_bits;
+	if ( drm->agp.stat == ENABLED ||
+	    !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
+		bits = 32;
+
+	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+	if (ret)
+		return ret;
+
+	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+	if (ret)
+		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+
+	ret = nouveau_ttm_global_init(drm);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_device_init(&drm->ttm.bdev,
+				  drm->ttm.bo_global_ref.ref.object,
+				  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+				  bits <= 32 ? true : false);
+	if (ret) {
+		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
+		return ret;
+	}
+
+	/* VRAM init */
+	drm->gem.vram_available  = nouveau_fb(drm->device)->ram.size;
+	drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
+
+	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
+			      drm->gem.vram_available >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
+		return ret;
+	}
+
+	drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+				     pci_resource_len(dev->pdev, 1),
+				     DRM_MTRR_WC);
+
+	/* GART init */
+	if (drm->agp.stat != ENABLED) {
+		drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
+		if (drm->gem.gart_available > 512 * 1024 * 1024)
+			drm->gem.gart_available = 512 * 1024 * 1024;
+	} else {
+		drm->gem.gart_available = drm->agp.size;
+	}
+
+	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
+			      drm->gem.gart_available >> PAGE_SHIFT);
+	if (ret) {
+		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
+		return ret;
+	}
+
+	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
+	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
+	return 0;
+}
+
+void
+nouveau_ttm_fini(struct nouveau_drm *drm)
+{
+	mutex_lock(&drm->dev->struct_mutex);
+	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
+	mutex_unlock(&drm->dev->struct_mutex);
+
+	ttm_bo_device_release(&drm->ttm.bdev);
+
+	nouveau_ttm_global_release(drm);
+
+	if (drm->ttm.mtrr >= 0) {
+		drm_mtrr_del(drm->ttm.mtrr,
+			     pci_resource_start(drm->dev->pdev, 1),
+			     pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
+		drm->ttm.mtrr = -1;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 0000000..25b0de4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,25 @@
+#ifndef __NOUVEAU_TTM_H__
+#define __NOUVEAU_TTM_H__
+
+static inline struct nouveau_drm *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+	return container_of(bd, struct nouveau_drm, ttm.bdev);
+}
+
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
+extern const struct ttm_mem_type_manager_func nv04_gart_manager;
+
+struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
+					unsigned long size, u32 page_flags,
+					struct page *dummy_read_page);
+
+int  nouveau_ttm_init(struct nouveau_drm *drm);
+void nouveau_ttm_fini(struct nouveau_drm *drm);
+int  nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+
+int  nouveau_ttm_global_init(struct nouveau_drm *);
+void nouveau_ttm_global_release(struct nouveau_drm *);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.c
new file mode 100644
index 0000000..25d3495
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -0,0 +1,98 @@
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_acpi.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_vga.h"
+
+static unsigned int
+nouveau_vga_set_decode(void *priv, bool state)
+{
+	struct nouveau_device *device = nouveau_dev(priv);
+
+	if (device->chipset >= 0x40)
+		nv_wr32(device, 0x088054, state);
+	else
+		nv_wr32(device, 0x001854, state);
+
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void
+nouveau_switcheroo_set_state(struct pci_dev *pdev,
+			     enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	if (state == VGA_SWITCHEROO_ON) {
+		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		nouveau_pmops_resume(&pdev->dev);
+		drm_kms_helper_poll_enable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+	} else {
+		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		drm_kms_helper_poll_disable(dev);
+		nouveau_switcheroo_optimus_dsm();
+		nouveau_pmops_suspend(&pdev->dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+static void
+nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	nouveau_fbcon_output_poll_changed(dev);
+}
+
+static bool
+nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	bool can_switch;
+
+	spin_lock(&dev->count_lock);
+	can_switch = (dev->open_count == 0);
+	spin_unlock(&dev->count_lock);
+	return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops
+nouveau_switcheroo_ops = {
+	.set_gpu_state = nouveau_switcheroo_set_state,
+	.reprobe = nouveau_switcheroo_reprobe,
+	.can_switch = nouveau_switcheroo_can_switch,
+};
+
+void
+nouveau_vga_init(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+	vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
+}
+
+void
+nouveau_vga_fini(struct nouveau_drm *drm)
+{
+	struct drm_device *dev = drm->dev;
+	vga_switcheroo_unregister_client(dev->pdev);
+	vga_client_register(dev->pdev, NULL, NULL, NULL);
+}
+
+
+void
+nouveau_vga_lastclose(struct drm_device *dev)
+{
+	vga_switcheroo_process_delayed_switch();
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.h b/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.h
new file mode 100644
index 0000000..ea3ad69
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -0,0 +1,8 @@
+#ifndef __NOUVEAU_VGA_H__
+#define __NOUVEAU_VGA_H__
+
+void nouveau_vga_init(struct nouveau_drm *);
+void nouveau_vga_fini(struct nouveau_drm *);
+void nouveau_vga_lastclose(struct drm_device *dev);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nouveau_volt.c b/linux-imx/drivers/gpu/drm/nouveau/nouveau_volt.c
new file mode 100644
index 0000000..9976414
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_pm.h"
+
+#include <subdev/bios/gpio.h>
+#include <subdev/gpio.h>
+
+static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
+static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
+
+int
+nouveau_voltage_gpio_get(struct drm_device *dev)
+{
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
+	u8 vid = 0;
+	int i;
+
+	for (i = 0; i < nr_vidtag; i++) {
+		if (!(volt->vid_mask & (1 << i)))
+			continue;
+
+		vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
+	}
+
+	return nouveau_volt_lvl_lookup(dev, vid);
+}
+
+int
+nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(device);
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+	int vid, i;
+
+	vid = nouveau_volt_vid_lookup(dev, voltage);
+	if (vid < 0)
+		return vid;
+
+	for (i = 0; i < nr_vidtag; i++) {
+		if (!(volt->vid_mask & (1 << i)))
+			continue;
+
+		gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
+	}
+
+	return 0;
+}
+
+int
+nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
+{
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+	int i;
+
+	for (i = 0; i < volt->nr_level; i++) {
+		if (volt->level[i].voltage == voltage)
+			return volt->level[i].vid;
+	}
+
+	return -ENOENT;
+}
+
+int
+nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
+{
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+	int i;
+
+	for (i = 0; i < volt->nr_level; i++) {
+		if (volt->level[i].vid == vid)
+			return volt->level[i].voltage;
+	}
+
+	return -ENOENT;
+}
+
+void
+nouveau_volt_init(struct drm_device *dev)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+	struct nouveau_pm *pm = nouveau_pm(dev);
+	struct nouveau_pm_voltage *voltage = &pm->voltage;
+	struct nvbios *bios = &drm->vbios;
+	struct dcb_gpio_func func;
+	struct bit_entry P;
+	u8 *volt = NULL, *entry;
+	int i, headerlen, recordlen, entries, vidmask, vidshift;
+
+	if (bios->type == NVBIOS_BIT) {
+		if (bit_table(dev, 'P', &P))
+			return;
+
+		if (P.version == 1)
+			volt = ROMPTR(dev, P.data[16]);
+		else
+		if (P.version == 2)
+			volt = ROMPTR(dev, P.data[12]);
+		else {
+			NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
+		}
+	} else {
+		if (bios->data[bios->offset + 6] < 0x27) {
+			NV_DEBUG(drm, "BMP version too old for voltage\n");
+			return;
+		}
+
+		volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
+	}
+
+	if (!volt) {
+		NV_DEBUG(drm, "voltage table pointer invalid\n");
+		return;
+	}
+
+	switch (volt[0]) {
+	case 0x10:
+	case 0x11:
+	case 0x12:
+		headerlen = 5;
+		recordlen = volt[1];
+		entries   = volt[2];
+		vidshift  = 0;
+		vidmask   = volt[4];
+		break;
+	case 0x20:
+		headerlen = volt[1];
+		recordlen = volt[3];
+		entries   = volt[2];
+		vidshift  = 0; /* could be vidshift like 0x30? */
+		vidmask   = volt[5];
+		break;
+	case 0x30:
+		headerlen = volt[1];
+		recordlen = volt[2];
+		entries   = volt[3];
+		vidmask   = volt[4];
+		/* no longer certain what volt[5] is, if it's related to
+		 * the vid shift then it's definitely not a function of
+		 * how many bits are set.
+		 *
+		 * after looking at a number of nva3+ vbios images, they
+		 * all seem likely to have a static shift of 2.. lets
+		 * go with that for now until proven otherwise.
+		 */
+		vidshift  = 2;
+		break;
+	case 0x40:
+		headerlen = volt[1];
+		recordlen = volt[2];
+		entries   = volt[3]; /* not a clue what the entries are for.. */
+		vidmask   = volt[11]; /* guess.. */
+		vidshift  = 0;
+		break;
+	default:
+		NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
+		return;
+	}
+
+	/* validate vid mask */
+	voltage->vid_mask = vidmask;
+	if (!voltage->vid_mask)
+		return;
+
+	i = 0;
+	while (vidmask) {
+		if (i > nr_vidtag) {
+			NV_DEBUG(drm, "vid bit %d unknown\n", i);
+			return;
+		}
+
+		if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
+			NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
+			return;
+		}
+
+		vidmask >>= 1;
+		i++;
+	}
+
+	/* parse vbios entries into common format */
+	voltage->version = volt[0];
+	if (voltage->version < 0x40) {
+		voltage->nr_level = entries;
+		voltage->level =
+			kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+		if (!voltage->level)
+			return;
+
+		entry = volt + headerlen;
+		for (i = 0; i < entries; i++, entry += recordlen) {
+			voltage->level[i].voltage = entry[0] * 10000;
+			voltage->level[i].vid     = entry[1] >> vidshift;
+		}
+	} else {
+		u32 volt_uv = ROM32(volt[4]);
+		s16 step_uv = ROM16(volt[8]);
+		u8 vid;
+
+		voltage->nr_level = voltage->vid_mask + 1;
+		voltage->level = kcalloc(voltage->nr_level,
+					 sizeof(*voltage->level), GFP_KERNEL);
+		if (!voltage->level)
+			return;
+
+		for (vid = 0; vid <= voltage->vid_mask; vid++) {
+			voltage->level[vid].voltage = volt_uv;
+			voltage->level[vid].vid = vid;
+			volt_uv += step_uv;
+		}
+	}
+
+	voltage->supported = true;
+}
+
+void
+nouveau_volt_fini(struct drm_device *dev)
+{
+	struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
+
+	kfree(volt->level);
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv04_fbcon.c b/linux-imx/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 0000000..77dcc9c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright 2009 Ben Skeggs
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/object.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+int
+nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 4);
+	if (ret)
+		return ret;
+
+	BEGIN_NV04(chan, NvSubImageBlit, 0x0300, 3);
+	OUT_RING(chan, (region->sy << 16) | region->sx);
+	OUT_RING(chan, (region->dy << 16) | region->dx);
+	OUT_RING(chan, (region->height << 16) | region->width);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 7);
+	if (ret)
+		return ret;
+
+	BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
+	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x03fc, 1);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING(chan, rect->color);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0400, 2);
+	OUT_RING(chan, (rect->dx << 16) | rect->dy);
+	OUT_RING(chan, (rect->width << 16) | rect->height);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	uint32_t fg;
+	uint32_t bg;
+	uint32_t dsize;
+	uint32_t width;
+	uint32_t *data = (uint32_t *)image->data;
+	int ret;
+
+	if (image->depth != 1)
+		return -ENODEV;
+
+	ret = RING_SPACE(chan, 8);
+	if (ret)
+		return ret;
+
+	width = ALIGN(image->width, 8);
+	dsize = ALIGN(width * image->height, 32) >> 5;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+		bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
+	} else {
+		fg = image->fg_color;
+		bg = image->bg_color;
+	}
+
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0be4, 7);
+	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+	OUT_RING(chan, ((image->dy + image->height) << 16) |
+			 ((image->dx + image->width) & 0xffff));
+	OUT_RING(chan, bg);
+	OUT_RING(chan, fg);
+	OUT_RING(chan, (image->height << 16) | width);
+	OUT_RING(chan, (image->height << 16) | image->width);
+	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+
+	while (dsize) {
+		int iter_len = dsize > 128 ? 128 : dsize;
+
+		ret = RING_SPACE(chan, iter_len + 1);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, NvSubGdiRect, 0x0c00, iter_len);
+		OUT_RINGp(chan, data, iter_len);
+		data += iter_len;
+		dsize -= iter_len;
+	}
+
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv04_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_device *device = nv_device(drm->device);
+	struct nouveau_object *object;
+	int surface_fmt, pattern_fmt, rect_fmt;
+	int ret;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		surface_fmt = 1;
+		pattern_fmt = 3;
+		rect_fmt = 3;
+		break;
+	case 16:
+		surface_fmt = 4;
+		pattern_fmt = 1;
+		rect_fmt = 1;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32 */
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		surface_fmt = 6;
+		pattern_fmt = 3;
+		rect_fmt = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
+				 device->card_type >= NV_10 ? 0x0062 : 0x0042,
+				 NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
+				 0x0019, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
+				 0x0043, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
+				 0x0044, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
+				 0x004a, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
+				 device->chipset >= 0x11 ? 0x009f : 0x005f,
+				 NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	if (RING_SPACE(chan, 49)) {
+		nouveau_fbcon_gpu_lockup(info);
+		return 0;
+	}
+
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
+	OUT_RING(chan, surface_fmt);
+	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
+	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
+	OUT_RING(chan, NvRop);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
+	OUT_RING(chan, 0x55);
+
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
+	OUT_RING(chan, NvImagePatt);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
+	OUT_RING(chan, pattern_fmt);
+#ifdef __BIG_ENDIAN
+	OUT_RING(chan, 2);
+#else
+	OUT_RING(chan, 1);
+#endif
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+	OUT_RING(chan, ~0);
+
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
+	OUT_RING(chan, NvClipRect);
+	BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+	BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
+	OUT_RING(chan, NvImageBlit);
+	BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
+	OUT_RING(chan, 3);
+
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
+	OUT_RING(chan, NvGdiRect);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
+	OUT_RING(chan, NvCtxSurf2D);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
+	OUT_RING(chan, NvImagePatt);
+	OUT_RING(chan, NvRop);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
+	OUT_RING(chan, rect_fmt);
+	BEGIN_NV04(chan, NvSubGdiRect, 0x02fc, 1);
+	OUT_RING(chan, 3);
+
+	FIRE_RING(chan);
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv04_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 0000000..94eadd1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+struct nv04_fence_chan {
+	struct nouveau_fence_chan base;
+};
+
+struct nv04_fence_priv {
+	struct nouveau_fence_priv base;
+};
+
+static int
+nv04_fence_emit(struct nouveau_fence *fence)
+{
+	struct nouveau_channel *chan = fence->channel;
+	int ret = RING_SPACE(chan, 2);
+	if (ret == 0) {
+		BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
+		OUT_RING  (chan, fence->sequence);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nv04_fence_sync(struct nouveau_fence *fence,
+		struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+	return -ENODEV;
+}
+
+static u32
+nv04_fence_read(struct nouveau_channel *chan)
+{
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	return atomic_read(&fifo->refcnt);
+}
+
+static void
+nv04_fence_context_del(struct nouveau_channel *chan)
+{
+	struct nv04_fence_chan *fctx = chan->fence;
+	nouveau_fence_context_del(&fctx->base);
+	chan->fence = NULL;
+	kfree(fctx);
+}
+
+static int
+nv04_fence_context_new(struct nouveau_channel *chan)
+{
+	struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (fctx) {
+		nouveau_fence_context_new(&fctx->base);
+		fctx->base.emit = nv04_fence_emit;
+		fctx->base.sync = nv04_fence_sync;
+		fctx->base.read = nv04_fence_read;
+		chan->fence = fctx;
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+static void
+nv04_fence_destroy(struct nouveau_drm *drm)
+{
+	struct nv04_fence_priv *priv = drm->fence;
+	drm->fence = NULL;
+	kfree(priv);
+}
+
+int
+nv04_fence_create(struct nouveau_drm *drm)
+{
+	struct nv04_fence_priv *priv;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv04_fence_destroy;
+	priv->base.context_new = nv04_fence_context_new;
+	priv->base.context_del = nv04_fence_context_del;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv04_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nv04_pm.c
new file mode 100644
index 0000000..27afc0e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "dispnv04/hw.h"
+#include "nouveau_pm.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	int ret;
+
+	ret = nouveau_hw_get_clock(dev, PLL_CORE);
+	if (ret < 0)
+		return ret;
+	perflvl->core = ret;
+
+	ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+	if (ret < 0)
+		return ret;
+	perflvl->memory = ret;
+
+	return 0;
+}
+
+struct nv04_pm_clock {
+	struct nvbios_pll pll;
+	struct nouveau_pll_vals calc;
+};
+
+struct nv04_pm_state {
+	struct nv04_pm_clock core;
+	struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
+	int ret;
+
+	ret = nvbios_pll_parse(bios, id, &clk->pll);
+	if (ret)
+		return ret;
+
+	ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
+	if (!ret)
+		return -EINVAL;
+
+	return 0;
+}
+
+void *
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nv04_pm_state *info;
+	int ret;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+	if (ret)
+		goto error;
+
+	if (perflvl->memory) {
+		ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+		if (ret)
+			goto error;
+	}
+
+	return info;
+error:
+	kfree(info);
+	return ERR_PTR(ret);
+}
+
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_clock *pclk = nouveau_clock(device);
+	u32 reg = clk->pll.reg;
+
+	/* thank the insane nouveau_hw_setpll() interface for this */
+	if (device->card_type >= NV_40)
+		reg += 4;
+
+	pclk->pll_prog(pclk, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_timer *ptimer = nouveau_timer(device);
+	struct nv04_pm_state *state = pre_state;
+
+	prog_pll(dev, &state->core);
+
+	if (state->memory.pll.reg) {
+		prog_pll(dev, &state->memory);
+		if (device->card_type < NV_30) {
+			if (device->card_type == NV_20)
+				nv_mask(device, 0x1002c4, 0, 1 << 20);
+
+			/* Reset the DLLs */
+			nv_mask(device, 0x1002c0, 0, 1 << 8);
+		}
+	}
+
+	nv_ofuncs(ptimer)->init(nv_object(ptimer));
+
+	kfree(state);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 0000000..06f434f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+int
+nv10_fence_emit(struct nouveau_fence *fence)
+{
+	struct nouveau_channel *chan = fence->channel;
+	int ret = RING_SPACE(chan, 2);
+	if (ret == 0) {
+		BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+		OUT_RING  (chan, fence->sequence);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+
+static int
+nv10_fence_sync(struct nouveau_fence *fence,
+		struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+	return -ENODEV;
+}
+
+u32
+nv10_fence_read(struct nouveau_channel *chan)
+{
+	return nv_ro32(chan->object, 0x0048);
+}
+
+void
+nv10_fence_context_del(struct nouveau_channel *chan)
+{
+	struct nv10_fence_chan *fctx = chan->fence;
+	nouveau_fence_context_del(&fctx->base);
+	chan->fence = NULL;
+	kfree(fctx);
+}
+
+int
+nv10_fence_context_new(struct nouveau_channel *chan)
+{
+	struct nv10_fence_chan *fctx;
+
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return -ENOMEM;
+
+	nouveau_fence_context_new(&fctx->base);
+	fctx->base.emit = nv10_fence_emit;
+	fctx->base.read = nv10_fence_read;
+	fctx->base.sync = nv10_fence_sync;
+	return 0;
+}
+
+void
+nv10_fence_destroy(struct nouveau_drm *drm)
+{
+	struct nv10_fence_priv *priv = drm->fence;
+	nouveau_bo_unmap(priv->bo);
+	if (priv->bo)
+		nouveau_bo_unpin(priv->bo);
+	nouveau_bo_ref(NULL, &priv->bo);
+	drm->fence = NULL;
+	kfree(priv);
+}
+
+int
+nv10_fence_create(struct nouveau_drm *drm)
+{
+	struct nv10_fence_priv *priv;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv10_fence_destroy;
+	priv->base.context_new = nv10_fence_context_new;
+	priv->base.context_del = nv10_fence_context_del;
+	spin_lock_init(&priv->lock);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.h b/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.h
new file mode 100644
index 0000000..e5d9204
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -0,0 +1,19 @@
+#ifndef __NV10_FENCE_H_
+#define __NV10_FENCE_H_
+
+#include <core/os.h>
+#include "nouveau_fence.h"
+#include "nouveau_bo.h"
+
+struct nv10_fence_chan {
+	struct nouveau_fence_chan base;
+};
+
+struct nv10_fence_priv {
+	struct nouveau_fence_priv base;
+	struct nouveau_bo *bo;
+	spinlock_t lock;
+	u32 sequence;
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv17_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nv17_fence.c
new file mode 100644
index 0000000..22aa996
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+int
+nv17_fence_sync(struct nouveau_fence *fence,
+		struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+	struct nv10_fence_priv *priv = chan->drm->fence;
+	u32 value;
+	int ret;
+
+	if (!mutex_trylock(&prev->cli->mutex))
+		return -EBUSY;
+
+	spin_lock(&priv->lock);
+	value = priv->sequence;
+	priv->sequence += 2;
+	spin_unlock(&priv->lock);
+
+	ret = RING_SPACE(prev, 5);
+	if (!ret) {
+		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+		OUT_RING  (prev, NvSema);
+		OUT_RING  (prev, 0);
+		OUT_RING  (prev, value + 0);
+		OUT_RING  (prev, value + 1);
+		FIRE_RING (prev);
+	}
+
+	if (!ret && !(ret = RING_SPACE(chan, 5))) {
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
+		OUT_RING  (chan, NvSema);
+		OUT_RING  (chan, 0);
+		OUT_RING  (chan, value + 1);
+		OUT_RING  (chan, value + 2);
+		FIRE_RING (chan);
+	}
+
+	mutex_unlock(&prev->cli->mutex);
+	return 0;
+}
+
+static int
+nv17_fence_context_new(struct nouveau_channel *chan)
+{
+	struct nv10_fence_priv *priv = chan->drm->fence;
+	struct nv10_fence_chan *fctx;
+	struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+	struct nouveau_object *object;
+	u32 start = mem->start * PAGE_SIZE;
+	u32 limit = start + mem->size - 1;
+	int ret = 0;
+
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return -ENOMEM;
+
+	nouveau_fence_context_new(&fctx->base);
+	fctx->base.emit = nv10_fence_emit;
+	fctx->base.read = nv10_fence_read;
+	fctx->base.sync = nv17_fence_sync;
+
+	ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+				 NvSema, 0x0002,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = start,
+					.limit = limit,
+				 }, sizeof(struct nv_dma_class),
+				 &object);
+	if (ret)
+		nv10_fence_context_del(chan);
+	return ret;
+}
+
+void
+nv17_fence_resume(struct nouveau_drm *drm)
+{
+	struct nv10_fence_priv *priv = drm->fence;
+
+	nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
+int
+nv17_fence_create(struct nouveau_drm *drm)
+{
+	struct nv10_fence_priv *priv;
+	int ret = 0;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv10_fence_destroy;
+	priv->base.resume = nv17_fence_resume;
+	priv->base.context_new = nv17_fence_context_new;
+	priv->base.context_del = nv10_fence_context_del;
+	spin_lock_init(&priv->lock);
+
+	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &priv->bo);
+	if (!ret) {
+		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(priv->bo);
+			if (ret)
+				nouveau_bo_unpin(priv->bo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &priv->bo);
+	}
+
+	if (ret) {
+		nv10_fence_destroy(drm);
+		return ret;
+	}
+
+	nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv40_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nv40_pm.c
new file mode 100644
index 0000000..3af5bcd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+#include "dispnv04/hw.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+
+#define min2(a,b) ((a) < (b) ? (a) : (b))
+
+static u32
+read_pll_1(struct drm_device *dev, u32 reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, reg + 0x00);
+	int P = (ctrl & 0x00070000) >> 16;
+	int N = (ctrl & 0x0000ff00) >> 8;
+	int M = (ctrl & 0x000000ff) >> 0;
+	u32 ref = 27000, clk = 0;
+
+	if (ctrl & 0x80000000)
+		clk = ref * N / M;
+
+	return clk >> P;
+}
+
+static u32
+read_pll_2(struct drm_device *dev, u32 reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, reg + 0x00);
+	u32 coef = nv_rd32(device, reg + 0x04);
+	int N2 = (coef & 0xff000000) >> 24;
+	int M2 = (coef & 0x00ff0000) >> 16;
+	int N1 = (coef & 0x0000ff00) >> 8;
+	int M1 = (coef & 0x000000ff) >> 0;
+	int P = (ctrl & 0x00070000) >> 16;
+	u32 ref = 27000, clk = 0;
+
+	if ((ctrl & 0x80000000) && M1) {
+		clk = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				clk = clk * N2 / M2;
+			else
+				clk = 0;
+		}
+	}
+
+	return clk >> P;
+}
+
+static u32
+read_clk(struct drm_device *dev, u32 src)
+{
+	switch (src) {
+	case 3:
+		return read_pll_2(dev, 0x004000);
+	case 2:
+		return read_pll_1(dev, 0x004008);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int
+nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, 0x00c040);
+
+	perflvl->core   = read_clk(dev, (ctrl & 0x00000003) >> 0);
+	perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
+	perflvl->memory = read_pll_2(dev, 0x4020);
+	return 0;
+}
+
+struct nv40_pm_state {
+	u32 ctrl;
+	u32 npll_ctrl;
+	u32 npll_coef;
+	u32 spll;
+	u32 mpll_ctrl;
+	u32 mpll_coef;
+};
+
+static int
+nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
+	      u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
+	struct nouveau_pll_vals coef;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, pll);
+	if (ret)
+		return ret;
+
+	if (clk < pll->vco1.max_freq)
+		pll->vco2.max_freq = 0;
+
+	pclk->pll_calc(pclk, pll, clk, &coef);
+	if (ret == 0)
+		return -ERANGE;
+
+	*N1 = coef.N1;
+	*M1 = coef.M1;
+	if (N2 && M2) {
+		if (pll->vco2.max_freq) {
+			*N2 = coef.N2;
+			*M2 = coef.M2;
+		} else {
+			*N2 = 1;
+			*M2 = 1;
+		}
+	}
+	*log2P = coef.log2P;
+	return 0;
+}
+
+void *
+nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nv40_pm_state *info;
+	struct nvbios_pll pll;
+	int N1, N2, M1, M2, log2P;
+	int ret;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	/* core/geometric clock */
+	ret = nv40_calc_pll(dev, 0x004000, &pll, perflvl->core,
+			    &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		goto out;
+
+	if (N2 == M2) {
+		info->npll_ctrl = 0x80000100 | (log2P << 16);
+		info->npll_coef = (N1 << 8) | M1;
+	} else {
+		info->npll_ctrl = 0xc0000000 | (log2P << 16);
+		info->npll_coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+	/* use the second PLL for shader/rop clock, if it differs from core */
+	if (perflvl->shader && perflvl->shader != perflvl->core) {
+		ret = nv40_calc_pll(dev, 0x004008, &pll, perflvl->shader,
+				    &N1, &M1, NULL, NULL, &log2P);
+		if (ret < 0)
+			goto out;
+
+		info->spll = 0xc0000000 | (log2P << 16) | (N1 << 8) | M1;
+		info->ctrl = 0x00000223;
+	} else {
+		info->spll = 0x00000000;
+		info->ctrl = 0x00000333;
+	}
+
+	/* memory clock */
+	if (!perflvl->memory) {
+		info->mpll_ctrl = 0x00000000;
+		goto out;
+	}
+
+	ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
+			    &N1, &M1, &N2, &M2, &log2P);
+	if (ret < 0)
+		goto out;
+
+	info->mpll_ctrl  = 0x80000000 | (log2P << 16);
+	info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
+	if (N2 == M2) {
+		info->mpll_ctrl |= 0x00000100;
+		info->mpll_coef  = (N1 << 8) | M1;
+	} else {
+		info->mpll_ctrl |= 0x40000000;
+		info->mpll_coef  = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1;
+	}
+
+out:
+	if (ret < 0) {
+		kfree(info);
+		info = ERR_PTR(ret);
+	}
+	return info;
+}
+
+static bool
+nv40_pm_gr_idle(void *data)
+{
+	struct drm_device *dev = data;
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
+	    (nv_rd32(device, 0x400760) & 0x0000000f))
+		return false;
+
+	if (nv_rd32(device, 0x400700))
+		return false;
+
+	return true;
+}
+
+int
+nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_fifo *pfifo = nouveau_fifo(device);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv40_pm_state *info = pre_state;
+	unsigned long flags;
+	struct bit_entry M;
+	u32 crtc_mask = 0;
+	u8 sr1[2];
+	int i, ret = -EAGAIN;
+
+	/* determine which CRTCs are active, fetch VGA_SR1 for each */
+	for (i = 0; i < 2; i++) {
+		u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
+		u32 cnt = 0;
+		do {
+			if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
+				nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+				sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
+				if (!(sr1[i] & 0x20))
+					crtc_mask |= (1 << i);
+				break;
+			}
+			udelay(1);
+		} while (cnt++ < 32);
+	}
+
+	/* halt and idle engines */
+	pfifo->pause(pfifo, &flags);
+
+	if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
+		goto resume;
+
+	ret = 0;
+
+	/* set engine clocks */
+	nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
+	nv_wr32(device, 0x004004, info->npll_coef);
+	nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
+	nv_mask(device, 0x004008, 0xc007ffff, info->spll);
+	mdelay(5);
+	nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
+
+	if (!info->mpll_ctrl)
+		goto resume;
+
+	/* wait for vblank start on active crtcs, disable memory access */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
+	}
+
+	/* prepare ram for reclocking */
+	nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
+	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
+	nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
+	nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
+
+	/* change the PLL of each memory partition */
+	nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
+	switch (nv_device(drm->device)->chipset) {
+	case 0x40:
+	case 0x45:
+	case 0x41:
+	case 0x42:
+	case 0x47:
+		nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
+		nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x004048, info->mpll_coef);
+		nv_wr32(device, 0x004030, info->mpll_coef);
+	case 0x43:
+	case 0x49:
+	case 0x4b:
+		nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x00403c, info->mpll_coef);
+	default:
+		nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
+		nv_wr32(device, 0x004024, info->mpll_coef);
+		break;
+	}
+	udelay(100);
+	nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
+
+	/* re-enable normal operation of memory controller */
+	nv_wr32(device, 0x1002dc, 0x00000000);
+	nv_mask(device, 0x100210, 0x80000000, 0x80000000);
+	udelay(100);
+
+	/* execute memory reset script from vbios */
+	if (!bit_table(dev, 'M', &M))
+		nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
+
+	/* make sure we're in vblank (hopefully the same one as before), and
+	 * then re-enable crtc memory access
+	 */
+	for (i = 0; i < 2; i++) {
+		if (!(crtc_mask & (1 << i)))
+			continue;
+		nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
+		nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
+		nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
+	}
+
+	/* resume engines */
+resume:
+	pfifo->start(pfifo, &flags);
+	kfree(info);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv50_display.c b/linux-imx/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 0000000..dd5e01f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,2300 @@
+	/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fence.h"
+#include "nv50_display.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+#include <subdev/i2c.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
+
+#define EVO_CORE_HANDLE      (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) |                               \
+			      (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+	struct nouveau_object *user;
+	u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, struct nv50_chan *chan)
+{
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+	const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+	int ret;
+
+	ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+				 oclass, data, size, &chan->user);
+	if (ret)
+		return ret;
+
+	chan->handle = handle;
+	return 0;
+}
+
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
+{
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	if (chan->handle)
+		nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
+
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
+
+struct nv50_pioc {
+	struct nv50_chan base;
+};
+
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
+{
+	nv50_chan_destroy(core, &pioc->base);
+}
+
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, struct nv50_pioc *pioc)
+{
+	return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
+}
+
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
+
+struct nv50_dmac {
+	struct nv50_chan base;
+	dma_addr_t handle;
+	u32 *ptr;
+
+	/* Protects against concurrent pushbuf access to this channel, lock is
+	 * grabbed by evo_wait (if the pushbuf reservation is successful) and
+	 * dropped again by evo_kick. */
+	struct mutex lock;
+};
+
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+	if (dmac->ptr) {
+		struct pci_dev *pdev = nv_device(core)->pdev;
+		pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+	}
+
+	nv50_chan_destroy(core, &dmac->base);
+}
+
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE |
+					         NV50_DMA_CONF0_PART_256,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB16,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+					         NV50_DMA_CONF0_PART_256,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+					         NV50_DMA_CONF0_PART_256,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB16,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+				     NV_DMA_IN_MEMORY_CLASS,
+				     &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVD0_DMA_CONF0_ENABLE |
+						 NVD0_DMA_CONF0_PAGE_LP,
+				     }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, parent, NvEvoFB32,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+					.conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+						 NVD0_DMA_CONF0_PAGE_LP,
+				 }, sizeof(struct nv_dma_class), &object);
+	return ret;
+}
+
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+		 void *data, u32 size, u64 syncbuf,
+		 struct nv50_dmac *dmac)
+{
+	struct nouveau_fb *pfb = nouveau_fb(core);
+	struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+	struct nouveau_object *object;
+	u32 pushbuf = *(u32 *)data;
+	int ret;
+
+	mutex_init(&dmac->lock);
+
+	dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+					&dmac->handle);
+	if (!dmac->ptr)
+		return -ENOMEM;
+
+	ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+				 NV_DMA_FROM_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_PCI_US |
+						 NV_DMA_ACCESS_RD,
+					.start = dmac->handle + 0x0000,
+					.limit = dmac->handle + 0x0fff,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = syncbuf + 0x0000,
+					.limit = syncbuf + 0x0fff,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+				 NV_DMA_IN_MEMORY_CLASS,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = 0,
+					.limit = pfb->ram.size - 1,
+				 }, sizeof(struct nv_dma_class), &object);
+	if (ret)
+		return ret;
+
+	if (nv_device(core)->card_type < NV_C0)
+		ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+	else
+	if (nv_device(core)->card_type < NV_D0)
+		ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+	else
+		ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+	return ret;
+}
+
+struct nv50_mast {
+	struct nv50_dmac base;
+};
+
+struct nv50_curs {
+	struct nv50_pioc base;
+};
+
+struct nv50_sync {
+	struct nv50_dmac base;
+	u32 addr;
+	u32 data;
+};
+
+struct nv50_ovly {
+	struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+	struct nv50_pioc base;
+};
+
+struct nv50_head {
+	struct nouveau_crtc base;
+	struct nv50_curs curs;
+	struct nv50_sync sync;
+	struct nv50_ovly ovly;
+	struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+	struct nouveau_object *core;
+	struct nv50_mast mast;
+
+	u32 modeset;
+
+	struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
+{
+	return nouveau_display(dev)->priv;
+}
+
+#define nv50_mast(d) (&nv50_disp(d)->mast)
+
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+	return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+	struct nv50_dmac *dmac = evoc;
+	u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+
+	mutex_lock(&dmac->lock);
+	if (put + nr >= (PAGE_SIZE / 4) - 8) {
+		dmac->ptr[put] = 0x20000000;
+
+		nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+		if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+			mutex_unlock(&dmac->lock);
+			NV_ERROR(dmac->base.user, "channel stalled\n");
+			return NULL;
+		}
+
+		put = 0;
+	}
+
+	return dmac->ptr + put;
+}
+
+static void
+evo_kick(u32 *push, void *evoc)
+{
+	struct nv50_dmac *dmac = evoc;
+	nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+	mutex_unlock(&dmac->lock);
+}
+
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d)   *((p)++) = (d)
+
+static bool
+evo_sync_wait(void *data)
+{
+	if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
+		return true;
+	usleep_range(1, 2);
+	return false;
+}
+
+static int
+evo_sync(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct nv50_mast *mast = nv50_mast(dev);
+	u32 *push = evo_wait(mast, 8);
+	if (push) {
+		nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+		evo_mthd(push, 0x0080, 2);
+		evo_data(push, 0x00000000);
+		evo_data(push, 0x00000000);
+		evo_kick(push, mast);
+		if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nv50_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+	return nv50_disp(dev)->sync;
+}
+
+struct nv50_display_flip {
+	struct nv50_disp *disp;
+	struct nv50_sync *chan;
+};
+
+static bool
+nv50_display_flip_wait(void *data)
+{
+	struct nv50_display_flip *flip = data;
+	if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
+					      flip->chan->data)
+		return true;
+	usleep_range(1, 2);
+	return false;
+}
+
+void
+nv50_display_flip_stop(struct drm_crtc *crtc)
+{
+	struct nouveau_device *device = nouveau_dev(crtc->dev);
+	struct nv50_display_flip flip = {
+		.disp = nv50_disp(crtc->dev),
+		.chan = nv50_sync(crtc),
+	};
+	u32 *push;
+
+	push = evo_wait(flip.chan, 8);
+	if (push) {
+		evo_mthd(push, 0x0084, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x0094, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x00c0, 1);
+		evo_data(push, 0x00000000);
+		evo_mthd(push, 0x0080, 1);
+		evo_data(push, 0x00000000);
+		evo_kick(push, flip.chan);
+	}
+
+	nv_wait_cb(device, nv50_display_flip_wait, &flip);
+}
+
+int
+nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		       struct nouveau_channel *chan, u32 swap_interval)
+{
+	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_sync *sync = nv50_sync(crtc);
+	int head = nv_crtc->index, ret;
+	u32 *push;
+
+	swap_interval <<= 4;
+	if (swap_interval == 0)
+		swap_interval |= 0x100;
+	if (chan == NULL)
+		evo_sync(crtc->dev);
+
+	push = evo_wait(sync, 128);
+	if (unlikely(push == NULL))
+		return -EBUSY;
+
+	if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+		ret = RING_SPACE(chan, 8);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
+		OUT_RING  (chan, NvEvoSema0 + head);
+		OUT_RING  (chan, sync->addr ^ 0x10);
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+		OUT_RING  (chan, sync->data + 1);
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+		OUT_RING  (chan, sync->addr);
+		OUT_RING  (chan, sync->data);
+	} else
+	if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+		u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+		ret = RING_SPACE(chan, 12);
+		if (ret)
+			return ret;
+
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+		OUT_RING  (chan, chan->vram);
+		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
+		OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
+		OUT_RING  (chan, sync->data + 1);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(addr));
+		OUT_RING  (chan, lower_32_bits(addr));
+		OUT_RING  (chan, sync->data);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
+	} else
+	if (chan) {
+		u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+		ret = RING_SPACE(chan, 10);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(addr ^ 0x10));
+		OUT_RING  (chan, lower_32_bits(addr ^ 0x10));
+		OUT_RING  (chan, sync->data + 1);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
+				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(addr));
+		OUT_RING  (chan, lower_32_bits(addr));
+		OUT_RING  (chan, sync->data);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
+				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+	}
+
+	if (chan) {
+		sync->addr ^= 0x10;
+		sync->data++;
+		FIRE_RING (chan);
+	}
+
+	/* queue the flip */
+	evo_mthd(push, 0x0100, 1);
+	evo_data(push, 0xfffe0000);
+	evo_mthd(push, 0x0084, 1);
+	evo_data(push, swap_interval);
+	if (!(swap_interval & 0x00000100)) {
+		evo_mthd(push, 0x00e0, 1);
+		evo_data(push, 0x40000000);
+	}
+	evo_mthd(push, 0x0088, 4);
+	evo_data(push, sync->addr);
+	evo_data(push, sync->data++);
+	evo_data(push, sync->data);
+	evo_data(push, NvEvoSync);
+	evo_mthd(push, 0x00a0, 2);
+	evo_data(push, 0x00000000);
+	evo_data(push, 0x00000000);
+	evo_mthd(push, 0x00c0, 1);
+	evo_data(push, nv_fb->r_dma);
+	evo_mthd(push, 0x0110, 2);
+	evo_data(push, 0x00000000);
+	evo_data(push, 0x00000000);
+	if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+		evo_mthd(push, 0x0800, 5);
+		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+		evo_data(push, 0);
+		evo_data(push, (fb->height << 16) | fb->width);
+		evo_data(push, nv_fb->r_pitch);
+		evo_data(push, nv_fb->r_format);
+	} else {
+		evo_mthd(push, 0x0400, 5);
+		evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+		evo_data(push, 0);
+		evo_data(push, (fb->height << 16) | fb->width);
+		evo_data(push, nv_fb->r_pitch);
+		evo_data(push, nv_fb->r_format);
+	}
+	evo_mthd(push, 0x0080, 1);
+	evo_data(push, 0x00000000);
+	evo_kick(push, sync);
+	return 0;
+}
+
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	struct nouveau_connector *nv_connector;
+	struct drm_connector *connector;
+	u32 *push, mode = 0x00;
+
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	connector = &nv_connector->base;
+	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+			mode = DITHERING_MODE_DYNAMIC2X2;
+	} else {
+		mode = nv_connector->dithering_mode;
+	}
+
+	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+		if (connector->display_info.bpc >= 8)
+			mode |= DITHERING_DEPTH_8BPC;
+	} else {
+		mode |= nv_connector->dithering_depth;
+	}
+
+	push = evo_wait(mast, 4);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+			evo_data(push, mode);
+		} else
+		if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+			evo_data(push, mode);
+		} else {
+			evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+			evo_data(push, mode);
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+	struct drm_crtc *crtc = &nv_crtc->base;
+	struct nouveau_connector *nv_connector;
+	int mode = DRM_MODE_SCALE_NONE;
+	u32 oX, oY, *push;
+
+	/* start off at the resolution we programmed the crtc for, this
+	 * effectively handles NONE/FULL scaling
+	 */
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	if (nv_connector && nv_connector->native_mode)
+		mode = nv_connector->scaling_mode;
+
+	if (mode != DRM_MODE_SCALE_NONE)
+		omode = nv_connector->native_mode;
+	else
+		omode = umode;
+
+	oX = omode->hdisplay;
+	oY = omode->vdisplay;
+	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+		oY *= 2;
+
+	/* add overscan compensation if necessary, will keep the aspect
+	 * ratio the same as the backend mode unless overridden by the
+	 * user setting both hborder and vborder properties.
+	 */
+	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+			     (nv_connector->underscan == UNDERSCAN_AUTO &&
+			      nv_connector->edid &&
+			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
+		u32 bX = nv_connector->underscan_hborder;
+		u32 bY = nv_connector->underscan_vborder;
+		u32 aspect = (oY << 19) / oX;
+
+		if (bX) {
+			oX -= (bX * 2);
+			if (bY) oY -= (bY * 2);
+			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+		} else {
+			oX -= (oX >> 4) + 32;
+			if (bY) oY -= (bY * 2);
+			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+		}
+	}
+
+	/* handle CENTER/ASPECT scaling, taking into account the areas
+	 * removed already for overscan compensation
+	 */
+	switch (mode) {
+	case DRM_MODE_SCALE_CENTER:
+		oX = min((u32)umode->hdisplay, oX);
+		oY = min((u32)umode->vdisplay, oY);
+		/* fall-through */
+	case DRM_MODE_SCALE_ASPECT:
+		if (oY < oX) {
+			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+			oX = ((oY * aspect) + (aspect / 2)) >> 19;
+		} else {
+			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+			oY = ((oX * aspect) + (aspect / 2)) >> 19;
+		}
+		break;
+	default:
+		break;
+	}
+
+	push = evo_wait(mast, 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			/*XXX: SCALE_CTRL_ACTIVE??? */
+			evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+		} else {
+			evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_data(push, (oY << 16) | oX);
+			evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+		}
+
+		evo_kick(push, mast);
+
+		if (update) {
+			nv50_display_flip_stop(crtc);
+			nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+		}
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push, hue, vib;
+	int adj;
+
+	adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+	vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+	hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+	push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, (hue << 20) | (vib << 8));
+		} else {
+			evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, (hue << 20) | (vib << 8));
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+		    int x, int y, bool update)
+{
+	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push;
+
+	push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, nvfb->nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+			evo_data(push, (fb->height << 16) | fb->width);
+			evo_data(push, nvfb->r_pitch);
+			evo_data(push, nvfb->r_format);
+			evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, (y << 16) | x);
+			if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+				evo_data(push, nvfb->r_dma);
+			}
+		} else {
+			evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, nvfb->nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+			evo_data(push, (fb->height << 16) | fb->width);
+			evo_data(push, nvfb->r_pitch);
+			evo_data(push, nvfb->r_format);
+			evo_data(push, nvfb->r_dma);
+			evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, (y << 16) | x);
+		}
+
+		if (update) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+
+	nv_crtc->fb.tile_flags = nvfb->r_dma;
+	return 0;
+}
+
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM);
+		} else {
+			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, 0x85000000);
+			evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, NvEvoVRAM);
+		}
+		evo_kick(push, mast);
+	}
+}
+
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+	u32 *push = evo_wait(mast, 16);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x05000000);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x05000000);
+			evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+		}
+		evo_kick(push, mast);
+	}
+}
+
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+	if (show)
+		nv50_crtc_cursor_show(nv_crtc);
+	else
+		nv50_crtc_cursor_hide(nv_crtc);
+
+	if (update) {
+		u32 *push = evo_wait(mast, 2);
+		if (push) {
+			evo_mthd(push, 0x0080, 1);
+			evo_data(push, 0x00000000);
+			evo_kick(push, mast);
+		}
+	}
+}
+
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	u32 *push;
+
+	nv50_display_flip_stop(crtc);
+
+	push = evo_wait(mast, 2);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x40000000);
+		} else
+		if (nv50_vers(mast) <  NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x40000000);
+			evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+		} else {
+			evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x03000000);
+			evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv50_crtc_cursor_show_hide(nv_crtc, false, false);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	u32 *push;
+
+	push = evo_wait(mast, 32);
+	if (push) {
+		if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM_LP);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0xc0000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+		} else
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+			evo_data(push, nv_crtc->fb.tile_flags);
+			evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0xc0000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+			evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, NvEvoVRAM);
+		} else {
+			evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, nv_crtc->fb.tile_flags);
+			evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+			evo_data(push, 0x83000000);
+			evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+			evo_data(push, 0x00000000);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, NvEvoVRAM);
+			evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0xffffff00);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+	nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+		     struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+	int ret;
+
+	ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	if (old_fb) {
+		nvfb = nouveau_framebuffer(old_fb);
+		nouveau_bo_unpin(nvfb->nvbo);
+	}
+
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+		   struct drm_display_mode *mode, int x, int y,
+		   struct drm_framebuffer *old_fb)
+{
+	struct nv50_mast *mast = nv50_mast(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nouveau_connector *nv_connector;
+	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+	u32 vblan2e = 0, vblan2s = 1;
+	u32 *push;
+	int ret;
+
+	hactive = mode->htotal;
+	hsynce  = mode->hsync_end - mode->hsync_start - 1;
+	hbackp  = mode->htotal - mode->hsync_end;
+	hblanke = hsynce + hbackp;
+	hfrontp = mode->hsync_start - mode->hdisplay;
+	hblanks = mode->htotal - hfrontp - 1;
+
+	vactive = mode->vtotal * vscan / ilace;
+	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+	vblanke = vsynce + vbackp;
+	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+	vblanks = vactive - vfrontp - 1;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		vblan2e = vactive + vsynce + vbackp;
+		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+		vactive = (vactive * 2) + 1;
+	}
+
+	ret = nv50_crtc_swap_fbs(crtc, old_fb);
+	if (ret)
+		return ret;
+
+	push = evo_wait(mast, 64);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x00800000 | mode->clock);
+			evo_data(push, (ilace == 2) ? 2 : 0);
+			evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+			evo_data(push, 0x00000000);
+			evo_data(push, (vactive << 16) | hactive);
+			evo_data(push, ( vsynce << 16) | hsynce);
+			evo_data(push, (vblanke << 16) | hblanke);
+			evo_data(push, (vblanks << 16) | hblanks);
+			evo_data(push, (vblan2e << 16) | vblan2s);
+			evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+			evo_data(push, 0x00000000);
+			evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+			evo_data(push, 0x00000311);
+			evo_data(push, 0x00000100);
+		} else {
+			evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+			evo_data(push, 0x00000000);
+			evo_data(push, (vactive << 16) | hactive);
+			evo_data(push, ( vsynce << 16) | hsynce);
+			evo_data(push, (vblanke << 16) | hblanke);
+			evo_data(push, (vblanks << 16) | hblanks);
+			evo_data(push, (vblan2e << 16) | vblan2s);
+			evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+			evo_data(push, 0x00000000); /* ??? */
+			evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+			evo_data(push, mode->clock * 1000);
+			evo_data(push, 0x00200000); /* ??? */
+			evo_data(push, mode->clock * 1000);
+			evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, 0x00000311);
+			evo_data(push, 0x00000100);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+	nv50_crtc_set_dither(nv_crtc, false);
+	nv50_crtc_set_scale(nv_crtc, false);
+	nv50_crtc_set_color_vibrance(nv_crtc, false);
+	nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+			struct drm_framebuffer *old_fb)
+{
+	struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	int ret;
+
+	if (!crtc->fb) {
+		NV_DEBUG(drm, "No FB bound\n");
+		return 0;
+	}
+
+	ret = nv50_crtc_swap_fbs(crtc, old_fb);
+	if (ret)
+		return ret;
+
+	nv50_display_flip_stop(crtc);
+	nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+	nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+	return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+			       struct drm_framebuffer *fb, int x, int y,
+			       enum mode_set_atomic state)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	nv50_display_flip_stop(crtc);
+	nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+	return 0;
+}
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		u16 r = nv_crtc->lut.r[i] >> 2;
+		u16 g = nv_crtc->lut.g[i] >> 2;
+		u16 b = nv_crtc->lut.b[i] >> 2;
+
+		if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+			writew(r + 0x0000, lut + (i * 0x08) + 0);
+			writew(g + 0x0000, lut + (i * 0x08) + 2);
+			writew(b + 0x0000, lut + (i * 0x08) + 4);
+		} else {
+			writew(r + 0x6000, lut + (i * 0x20) + 0);
+			writew(g + 0x6000, lut + (i * 0x20) + 2);
+			writew(b + 0x6000, lut + (i * 0x20) + 4);
+		}
+	}
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+		     uint32_t handle, uint32_t width, uint32_t height)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_gem_object *gem;
+	struct nouveau_bo *nvbo;
+	bool visible = (handle != 0);
+	int i, ret = 0;
+
+	if (visible) {
+		if (width != 64 || height != 64)
+			return -EINVAL;
+
+		gem = drm_gem_object_lookup(dev, file_priv, handle);
+		if (unlikely(!gem))
+			return -ENOENT;
+		nvbo = nouveau_gem_object(gem);
+
+		ret = nouveau_bo_map(nvbo);
+		if (ret == 0) {
+			for (i = 0; i < 64 * 64; i++) {
+				u32 v = nouveau_bo_rd32(nvbo, i);
+				nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+			}
+			nouveau_bo_unmap(nvbo);
+		}
+
+		drm_gem_object_unreference_unlocked(gem);
+	}
+
+	if (visible != nv_crtc->cursor.visible) {
+		nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+		nv_crtc->cursor.visible = visible;
+	}
+
+	return ret;
+}
+
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct nv50_curs *curs = nv50_curs(crtc);
+	struct nv50_chan *chan = nv50_chan(curs);
+	nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+	nv_wo32(chan->user, 0x0080, 0x00000000);
+	return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+		    uint32_t start, uint32_t size)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	u32 end = max(start + size, (u32)256);
+	u32 i;
+
+	for (i = start; i < end; i++) {
+		nv_crtc->lut.r[i] = r[i];
+		nv_crtc->lut.g[i] = g[i];
+		nv_crtc->lut.b[i] = b[i];
+	}
+
+	nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+	struct nv50_disp *disp = nv50_disp(crtc->dev);
+	struct nv50_head *head = nv50_head(crtc);
+	nv50_dmac_destroy(disp->core, &head->ovly.base);
+	nv50_pioc_destroy(disp->core, &head->oimm.base);
+	nv50_dmac_destroy(disp->core, &head->sync.base);
+	nv50_pioc_destroy(disp->core, &head->curs.base);
+	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+	if (nv_crtc->cursor.nvbo)
+		nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+	nouveau_bo_unmap(nv_crtc->lut.nvbo);
+	if (nv_crtc->lut.nvbo)
+		nouveau_bo_unpin(nv_crtc->lut.nvbo);
+	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+	.dpms = nv50_crtc_dpms,
+	.prepare = nv50_crtc_prepare,
+	.commit = nv50_crtc_commit,
+	.mode_fixup = nv50_crtc_mode_fixup,
+	.mode_set = nv50_crtc_mode_set,
+	.mode_set_base = nv50_crtc_mode_set_base,
+	.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+	.load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+	.cursor_set = nv50_crtc_cursor_set,
+	.cursor_move = nv50_crtc_cursor_move,
+	.gamma_set = nv50_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = nv50_crtc_destroy,
+	.page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct nv50_head *head;
+	struct drm_crtc *crtc;
+	int ret, i;
+
+	head = kzalloc(sizeof(*head), GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	head->base.index = index;
+	head->base.set_dither = nv50_crtc_set_dither;
+	head->base.set_scale = nv50_crtc_set_scale;
+	head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+	head->base.color_vibrance = 50;
+	head->base.vibrant_hue = 0;
+	head->base.cursor.set_offset = nv50_cursor_set_offset;
+	head->base.cursor.set_pos = nv50_cursor_set_pos;
+	for (i = 0; i < 256; i++) {
+		head->base.lut.r[i] = i << 8;
+		head->base.lut.g[i] = i << 8;
+		head->base.lut.b[i] = i << 8;
+	}
+
+	crtc = &head->base.base;
+	drm_crtc_init(dev, crtc, &nv50_crtc_func);
+	drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &head->base.lut.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(head->base.lut.nvbo);
+			if (ret)
+				nouveau_bo_unpin(head->base.lut.nvbo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+	}
+
+	if (ret)
+		goto out;
+
+	nv50_crtc_lut_load(crtc);
+
+	/* allocate cursor resources */
+	ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+			      &(struct nv50_display_curs_class) {
+					.head = index,
+			      }, sizeof(struct nv50_display_curs_class),
+			      &head->curs.base);
+	if (ret)
+		goto out;
+
+	ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &head->base.cursor.nvbo);
+	if (!ret) {
+		ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(head->base.cursor.nvbo);
+			if (ret)
+				nouveau_bo_unpin(head->base.lut.nvbo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
+	}
+
+	if (ret)
+		goto out;
+
+	/* allocate page flip / sync resources */
+	ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+			      &(struct nv50_display_sync_class) {
+					.pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+					.head = index,
+			      }, sizeof(struct nv50_display_sync_class),
+			      disp->sync->bo.offset, &head->sync.base);
+	if (ret)
+		goto out;
+
+	head->sync.addr = EVO_FLIP_SEM0(index);
+	head->sync.data = 0x00000000;
+
+	/* allocate overlay resources */
+	ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+			      &(struct nv50_display_oimm_class) {
+					.head = index,
+			      }, sizeof(struct nv50_display_oimm_class),
+			      &head->oimm.base);
+	if (ret)
+		goto out;
+
+	ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+			      &(struct nv50_display_ovly_class) {
+					.pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+					.head = index,
+			      }, sizeof(struct nv50_display_ovly_class),
+			      disp->sync->bo.offset, &head->ovly.base);
+	if (ret)
+		goto out;
+
+out:
+	if (ret)
+		nv50_crtc_destroy(crtc);
+	return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	int or = nv_encoder->or;
+	u32 dpms_ctrl;
+
+	dpms_ctrl = 0x00000000;
+	if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+		dpms_ctrl |= 0x00000001;
+	if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+		dpms_ctrl |= 0x00000004;
+
+	nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+		    const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (nv_connector && nv_connector->native_mode) {
+		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+			int id = adjusted_mode->base.id;
+			*adjusted_mode = *nv_connector->native_mode;
+			adjusted_mode->base.id = id;
+		}
+	}
+
+	return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		  struct drm_display_mode *adjusted_mode)
+{
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	u32 *push;
+
+	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	push = evo_wait(mast, 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			u32 syncs = 0x00000000;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000001;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000002;
+
+			evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+			evo_data(push, 1 << nv_crtc->index);
+			evo_data(push, syncs);
+		} else {
+			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+			u32 syncs = 0x00000001;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000008;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000010;
+
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				magic |= 0x00000001;
+
+			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, syncs);
+			evo_data(push, magic);
+			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+			evo_data(push, 1 << nv_crtc->index);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_dac_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	const int or = nv_encoder->or;
+	u32 *push;
+
+	if (nv_encoder->crtc) {
+		nv50_crtc_prepare(nv_encoder->crtc);
+
+		push = evo_wait(mast, 4);
+		if (push) {
+			if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0400 + (or * 0x080), 1);
+				evo_data(push, 0x00000000);
+			} else {
+				evo_mthd(push, 0x0180 + (or * 0x020), 1);
+				evo_data(push, 0x00000000);
+			}
+			evo_kick(push, mast);
+		}
+	}
+
+	nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	int ret, or = nouveau_encoder(encoder)->or;
+	u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
+	if (load == 0)
+		load = 340;
+
+	ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+	if (ret || load != 7)
+		return connector_status_disconnected;
+
+	return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+	.dpms = nv50_dac_dpms,
+	.mode_fixup = nv50_dac_mode_fixup,
+	.prepare = nv50_dac_disconnect,
+	.commit = nv50_dac_commit,
+	.mode_set = nv50_dac_mode_set,
+	.disable = nv50_dac_disconnect,
+	.get_crtc = nv50_display_crtc_get,
+	.detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+	.destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+	int type = DRM_MODE_ENCODER_DAC;
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	nv_encoder->dcb = dcbe;
+	nv_encoder->or = ffs(dcbe->or) - 1;
+	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+
+	encoder = to_drm_encoder(nv_encoder);
+	encoder->possible_crtcs = dcbe->heads;
+	encoder->possible_clones = 0;
+	drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
+	drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!drm_detect_monitor_audio(nv_connector->edid))
+		return;
+
+	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+	nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+			    nv_connector->base.eld,
+			    nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+	nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_connector *nv_connector;
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+	u32 rekey = 56; /* binary driver, and tegra constant */
+	u32 max_ac_packet;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (!drm_detect_hdmi_monitor(nv_connector->edid))
+		return;
+
+	max_ac_packet  = mode->htotal - mode->hdisplay;
+	max_ac_packet -= rekey;
+	max_ac_packet -= 18; /* constant from tegra */
+	max_ac_packet /= 32;
+
+	nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+			    NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+			    (max_ac_packet << 16) | rekey);
+
+	nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+	nv50_audio_disconnect(encoder);
+
+	nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct drm_encoder *partner;
+	int or = nv_encoder->or;
+
+	nv_encoder->last_dpms = mode;
+
+	list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+		if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+			continue;
+
+		if (nv_partner != nv_encoder &&
+		    nv_partner->dcb->or == nv_encoder->dcb->or) {
+			if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+				return;
+			break;
+		}
+	}
+
+	nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+		    const struct drm_display_mode *mode,
+		    struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (nv_connector && nv_connector->native_mode) {
+		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+			int id = adjusted_mode->base.id;
+			*adjusted_mode = *nv_connector->native_mode;
+			adjusted_mode->base.id = id;
+		}
+	}
+
+	return true;
+}
+
+static void
+nv50_sor_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	const int or = nv_encoder->or;
+	u32 *push;
+
+	if (nv_encoder->crtc) {
+		nv50_crtc_prepare(nv_encoder->crtc);
+
+		push = evo_wait(mast, 4);
+		if (push) {
+			if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0600 + (or * 0x40), 1);
+				evo_data(push, 0x00000000);
+			} else {
+				evo_mthd(push, 0x0200 + (or * 0x20), 1);
+				evo_data(push, 0x00000000);
+			}
+			evo_kick(push, mast);
+		}
+
+		nv50_hdmi_disconnect(encoder);
+	}
+
+	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+	nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+		  struct drm_display_mode *mode)
+{
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct drm_device *dev = encoder->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_connector *nv_connector;
+	struct nvbios *bios = &drm->vbios;
+	u32 *push, lvds = 0;
+	u8 owner = 1 << nv_crtc->index;
+	u8 proto = 0xf;
+	u8 depth = 0x0;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	switch (nv_encoder->dcb->type) {
+	case DCB_OUTPUT_TMDS:
+		if (nv_encoder->dcb->sorconf.link & 1) {
+			if (mode->clock < 165000)
+				proto = 0x1;
+			else
+				proto = 0x5;
+		} else {
+			proto = 0x2;
+		}
+
+		nv50_hdmi_mode_set(encoder, mode);
+		break;
+	case DCB_OUTPUT_LVDS:
+		proto = 0x0;
+
+		if (bios->fp_no_ddc) {
+			if (bios->fp.dual_link)
+				lvds |= 0x0100;
+			if (bios->fp.if_is_24bit)
+				lvds |= 0x0200;
+		} else {
+			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+				if (((u8 *)nv_connector->edid)[121] == 2)
+					lvds |= 0x0100;
+			} else
+			if (mode->clock >= bios->fp.duallink_transition_clk) {
+				lvds |= 0x0100;
+			}
+
+			if (lvds & 0x0100) {
+				if (bios->fp.strapless_is_24bit & 2)
+					lvds |= 0x0200;
+			} else {
+				if (bios->fp.strapless_is_24bit & 1)
+					lvds |= 0x0200;
+			}
+
+			if (nv_connector->base.display_info.bpc == 8)
+				lvds |= 0x0200;
+		}
+
+		nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+		break;
+	case DCB_OUTPUT_DP:
+		if (nv_connector->base.display_info.bpc == 6) {
+			nv_encoder->dp.datarate = mode->clock * 18 / 8;
+			depth = 0x2;
+		} else
+		if (nv_connector->base.display_info.bpc == 8) {
+			nv_encoder->dp.datarate = mode->clock * 24 / 8;
+			depth = 0x5;
+		} else {
+			nv_encoder->dp.datarate = mode->clock * 30 / 8;
+			depth = 0x6;
+		}
+
+		if (nv_encoder->dcb->sorconf.link & 1)
+			proto = 0x8;
+		else
+			proto = 0x9;
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+
+	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	push = evo_wait(nv50_mast(dev), 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+			u32 ctrl = (depth << 16) | (proto << 8) | owner;
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				ctrl |= 0x00001000;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				ctrl |= 0x00002000;
+			evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+			evo_data(push, ctrl);
+		} else {
+			u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+			u32 syncs = 0x00000001;
+
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				syncs |= 0x00000008;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				syncs |= 0x00000010;
+
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				magic |= 0x00000001;
+
+			evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+			evo_data(push, syncs | (depth << 6));
+			evo_data(push, magic);
+			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+			evo_data(push, owner | (proto << 8));
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+	.dpms = nv50_sor_dpms,
+	.mode_fixup = nv50_sor_mode_fixup,
+	.prepare = nv50_sor_disconnect,
+	.commit = nv50_sor_commit,
+	.mode_set = nv50_sor_mode_set,
+	.disable = nv50_sor_disconnect,
+	.get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+	.destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+	int type;
+
+	switch (dcbe->type) {
+	case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
+	case DCB_OUTPUT_TMDS:
+	case DCB_OUTPUT_DP:
+	default:
+		type = DRM_MODE_ENCODER_TMDS;
+		break;
+	}
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	nv_encoder->dcb = dcbe;
+	nv_encoder->or = ffs(dcbe->or) - 1;
+	nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
+	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+
+	encoder = to_drm_encoder(nv_encoder);
+	encoder->possible_crtcs = dcbe->heads;
+	encoder->possible_clones = 0;
+	drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
+	drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
+
+/******************************************************************************
+ * PIOR
+ *****************************************************************************/
+
+static void
+nv50_pior_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_disp *disp = nv50_disp(encoder->dev);
+	u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
+	u32 ctrl = (mode == DRM_MODE_DPMS_ON);
+	nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+}
+
+static bool
+nv50_pior_mode_fixup(struct drm_encoder *encoder,
+		     const struct drm_display_mode *mode,
+		     struct drm_display_mode *adjusted_mode)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_connector *nv_connector;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	if (nv_connector && nv_connector->native_mode) {
+		if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+			int id = adjusted_mode->base.id;
+			*adjusted_mode = *nv_connector->native_mode;
+			adjusted_mode->base.id = id;
+		}
+	}
+
+	adjusted_mode->clock *= 2;
+	return true;
+}
+
+static void
+nv50_pior_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+		   struct drm_display_mode *adjusted_mode)
+{
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+	struct nouveau_connector *nv_connector;
+	u8 owner = 1 << nv_crtc->index;
+	u8 proto, depth;
+	u32 *push;
+
+	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+	switch (nv_connector->base.display_info.bpc) {
+	case 10: depth = 0x6; break;
+	case  8: depth = 0x5; break;
+	case  6: depth = 0x2; break;
+	default: depth = 0x0; break;
+	}
+
+	switch (nv_encoder->dcb->type) {
+	case DCB_OUTPUT_TMDS:
+	case DCB_OUTPUT_DP:
+		proto = 0x0;
+		break;
+	default:
+		BUG_ON(1);
+		break;
+	}
+
+	nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	push = evo_wait(mast, 8);
+	if (push) {
+		if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+			u32 ctrl = (depth << 16) | (proto << 8) | owner;
+			if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+				ctrl |= 0x00001000;
+			if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+				ctrl |= 0x00002000;
+			evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
+			evo_data(push, ctrl);
+		}
+
+		evo_kick(push, mast);
+	}
+
+	nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_pior_disconnect(struct drm_encoder *encoder)
+{
+	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+	struct nv50_mast *mast = nv50_mast(encoder->dev);
+	const int or = nv_encoder->or;
+	u32 *push;
+
+	if (nv_encoder->crtc) {
+		nv50_crtc_prepare(nv_encoder->crtc);
+
+		push = evo_wait(mast, 4);
+		if (push) {
+			if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+				evo_mthd(push, 0x0700 + (or * 0x040), 1);
+				evo_data(push, 0x00000000);
+			}
+			evo_kick(push, mast);
+		}
+	}
+
+	nv_encoder->crtc = NULL;
+}
+
+static void
+nv50_pior_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
+	.dpms = nv50_pior_dpms,
+	.mode_fixup = nv50_pior_mode_fixup,
+	.prepare = nv50_pior_disconnect,
+	.commit = nv50_pior_commit,
+	.mode_set = nv50_pior_mode_set,
+	.disable = nv50_pior_disconnect,
+	.get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_pior_func = {
+	.destroy = nv50_pior_destroy,
+};
+
+static int
+nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+	struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+	struct nouveau_i2c_port *ddc = NULL;
+	struct nouveau_encoder *nv_encoder;
+	struct drm_encoder *encoder;
+	int type;
+
+	switch (dcbe->type) {
+	case DCB_OUTPUT_TMDS:
+		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
+		type = DRM_MODE_ENCODER_TMDS;
+		break;
+	case DCB_OUTPUT_DP:
+		ddc  = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
+		type = DRM_MODE_ENCODER_TMDS;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+	if (!nv_encoder)
+		return -ENOMEM;
+	nv_encoder->dcb = dcbe;
+	nv_encoder->or = ffs(dcbe->or) - 1;
+	nv_encoder->i2c = ddc;
+
+	encoder = to_drm_encoder(nv_encoder);
+	encoder->possible_crtcs = dcbe->heads;
+	encoder->possible_clones = 0;
+	drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
+	drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
+
+	drm_mode_connector_attach_encoder(connector, encoder);
+	return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
+	struct nv50_disp *disp = nv50_disp(dev);
+	struct drm_crtc *crtc;
+	u32 *push;
+
+	push = evo_wait(nv50_mast(dev), 32);
+	if (!push)
+		return -EBUSY;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct nv50_sync *sync = nv50_sync(crtc);
+		nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
+	}
+
+	evo_mthd(push, 0x0088, 1);
+	evo_data(push, NvEvoSync);
+	evo_kick(push, nv50_mast(dev));
+	return 0;
+}
+
+void
+nv50_display_destroy(struct drm_device *dev)
+{
+	struct nv50_disp *disp = nv50_disp(dev);
+
+	nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+	nouveau_bo_unmap(disp->sync);
+	if (disp->sync)
+		nouveau_bo_unpin(disp->sync);
+	nouveau_bo_ref(NULL, &disp->sync);
+
+	nouveau_display(dev)->priv = NULL;
+	kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
+{
+	static const u16 oclass[] = {
+		NVF0_DISP_CLASS,
+		NVE0_DISP_CLASS,
+		NVD0_DISP_CLASS,
+		NVA3_DISP_CLASS,
+		NV94_DISP_CLASS,
+		NVA0_DISP_CLASS,
+		NV84_DISP_CLASS,
+		NV50_DISP_CLASS,
+	};
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct dcb_table *dcb = &drm->vbios.dcb;
+	struct drm_connector *connector, *tmp;
+	struct nv50_disp *disp;
+	struct dcb_output *dcbe;
+	int crtcs, ret, i;
+
+	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+	if (!disp)
+		return -ENOMEM;
+
+	nouveau_display(dev)->priv = disp;
+	nouveau_display(dev)->dtor = nv50_display_destroy;
+	nouveau_display(dev)->init = nv50_display_init;
+	nouveau_display(dev)->fini = nv50_display_fini;
+
+	/* small shared memory area we use for notifiers and semaphores */
+	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &disp->sync);
+	if (!ret) {
+		ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(disp->sync);
+			if (ret)
+				nouveau_bo_unpin(disp->sync);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &disp->sync);
+	}
+
+	if (ret)
+		goto out;
+
+	/* attempt to allocate a supported evo display class */
+	ret = -ENODEV;
+	for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+		ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+					 0xd1500000, oclass[i], NULL, 0,
+					 &disp->core);
+	}
+
+	if (ret)
+		goto out;
+
+	/* allocate master evo channel */
+	ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+			      &(struct nv50_display_mast_class) {
+					.pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+			      }, sizeof(struct nv50_display_mast_class),
+			      disp->sync->bo.offset, &disp->mast.base);
+	if (ret)
+		goto out;
+
+	/* create crtc objects to represent the hw heads */
+	if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+		crtcs = nv_rd32(device, 0x022448);
+	else
+		crtcs = 2;
+
+	for (i = 0; i < crtcs; i++) {
+		ret = nv50_crtc_create(dev, disp->core, i);
+		if (ret)
+			goto out;
+	}
+
+	/* create encoder/connector objects based on VBIOS DCB table */
+	for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+		connector = nouveau_connector_create(dev, dcbe->connector);
+		if (IS_ERR(connector))
+			continue;
+
+		if (dcbe->location == DCB_LOC_ON_CHIP) {
+			switch (dcbe->type) {
+			case DCB_OUTPUT_TMDS:
+			case DCB_OUTPUT_LVDS:
+			case DCB_OUTPUT_DP:
+				ret = nv50_sor_create(connector, dcbe);
+				break;
+			case DCB_OUTPUT_ANALOG:
+				ret = nv50_dac_create(connector, dcbe);
+				break;
+			default:
+				ret = -ENODEV;
+				break;
+			}
+		} else {
+			ret = nv50_pior_create(connector, dcbe);
+		}
+
+		if (ret) {
+			NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+				     dcbe->location, dcbe->type,
+				     ffs(dcbe->or) - 1, ret);
+			ret = 0;
+		}
+	}
+
+	/* cull any connectors we created that don't have an encoder */
+	list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+		if (connector->encoder_ids[0])
+			continue;
+
+		NV_WARN(drm, "%s has no encoders, removing\n",
+			drm_get_connector_name(connector));
+		connector->funcs->destroy(connector);
+	}
+
+out:
+	if (ret)
+		nv50_display_destroy(dev);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv50_display.h b/linux-imx/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 0000000..70da347
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV50_DISPLAY_H__
+#define __NV50_DISPLAY_H__
+
+#include "nouveau_display.h"
+#include "nouveau_crtc.h"
+#include "nouveau_reg.h"
+
+int  nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int  nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
+
+void nv50_display_flip_stop(struct drm_crtc *);
+int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+			    struct nouveau_channel *, u32 swap_interval);
+
+struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
+
+#endif /* __NV50_DISPLAY_H__ */
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv50_fbcon.c b/linux-imx/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 0000000..52068a0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+int
+nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
+
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING(chan, 1);
+	}
+	BEGIN_NV04(chan, NvSub2D, 0x0588, 1);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING(chan, rect->color);
+	BEGIN_NV04(chan, NvSub2D, 0x0600, 4);
+	OUT_RING(chan, rect->dx);
+	OUT_RING(chan, rect->dy);
+	OUT_RING(chan, rect->dx + rect->width);
+	OUT_RING(chan, rect->dy + rect->height);
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING(chan, 3);
+	}
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
+
+	BEGIN_NV04(chan, NvSub2D, 0x0110, 1);
+	OUT_RING(chan, 0);
+	BEGIN_NV04(chan, NvSub2D, 0x08b0, 4);
+	OUT_RING(chan, region->dx);
+	OUT_RING(chan, region->dy);
+	OUT_RING(chan, region->width);
+	OUT_RING(chan, region->height);
+	BEGIN_NV04(chan, NvSub2D, 0x08d0, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, region->sx);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, region->sy);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+	uint32_t *palette = info->pseudo_palette;
+	int ret;
+
+	if (image->depth != 1)
+		return -ENODEV;
+
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
+
+	width = ALIGN(image->width, 32);
+	dwords = (width * image->height) >> 5;
+
+	BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		OUT_RING(chan, palette[image->bg_color] | mask);
+		OUT_RING(chan, palette[image->fg_color] | mask);
+	} else {
+		OUT_RING(chan, image->bg_color);
+		OUT_RING(chan, image->fg_color);
+	}
+	BEGIN_NV04(chan, NvSub2D, 0x0838, 2);
+	OUT_RING(chan, image->width);
+	OUT_RING(chan, image->height);
+	BEGIN_NV04(chan, NvSub2D, 0x0850, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, image->dx);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, image->dy);
+
+	while (dwords) {
+		int push = dwords > 2047 ? 2047 : dwords;
+
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
+
+		dwords -= push;
+
+		BEGIN_NI04(chan, NvSub2D, 0x0860, push);
+		OUT_RINGp(chan, data, push);
+		data += push;
+	}
+
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nv50_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+	struct drm_device *dev = nfbdev->dev;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_object *object;
+	int ret, format;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		format = 0xf3;
+		break;
+	case 15:
+		format = 0xf8;
+		break;
+	case 16:
+		format = 0xe8;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32, just use 24.. */
+			format = 0xe6;
+			break;
+		case 2: /* depth 30 */
+			format = 0xd1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+				 0x502d, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	ret = RING_SPACE(chan, 59);
+	if (ret) {
+		nouveau_fbcon_gpu_lockup(info);
+		return ret;
+	}
+
+	BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
+	OUT_RING(chan, Nv2D);
+	BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
+	OUT_RING(chan, NvDmaFB);
+	BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
+	OUT_RING(chan, 0);
+	BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x02ac, 1);
+	OUT_RING(chan, 3);
+	BEGIN_NV04(chan, NvSub2D, 0x02a0, 1);
+	OUT_RING(chan, 0x55);
+	BEGIN_NV04(chan, NvSub2D, 0x08c0, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0580, 2);
+	OUT_RING(chan, 4);
+	OUT_RING(chan, format);
+	BEGIN_NV04(chan, NvSub2D, 0x02e8, 2);
+	OUT_RING(chan, 2);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0804, 1);
+	OUT_RING(chan, format);
+	BEGIN_NV04(chan, NvSub2D, 0x0800, 1);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0808, 3);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x081c, 1);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0840, 4);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	OUT_RING(chan, 0);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0200, 2);
+	OUT_RING(chan, format);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0214, 5);
+	OUT_RING(chan, info->fix.line_length);
+	OUT_RING(chan, info->var.xres_virtual);
+	OUT_RING(chan, info->var.yres_virtual);
+	OUT_RING(chan, upper_32_bits(fb->vma.offset));
+	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+	BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
+	OUT_RING(chan, format);
+	OUT_RING(chan, 1);
+	BEGIN_NV04(chan, NvSub2D, 0x0244, 5);
+	OUT_RING(chan, info->fix.line_length);
+	OUT_RING(chan, info->var.xres_virtual);
+	OUT_RING(chan, info->var.yres_virtual);
+	OUT_RING(chan, upper_32_bits(fb->vma.offset));
+	OUT_RING(chan, lower_32_bits(fb->vma.offset));
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv50_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nv50_fence.c
new file mode 100644
index 0000000..0ee3638
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nv10_fence.h"
+
+#include "nv50_display.h"
+
+static int
+nv50_fence_context_new(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->drm->dev;
+	struct nv10_fence_priv *priv = chan->drm->fence;
+	struct nv10_fence_chan *fctx;
+	struct ttm_mem_reg *mem = &priv->bo->bo.mem;
+	struct nouveau_object *object;
+	u32 start = mem->start * PAGE_SIZE;
+	u32 limit = start + mem->size - 1;
+	int ret, i;
+
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return -ENOMEM;
+
+	nouveau_fence_context_new(&fctx->base);
+	fctx->base.emit = nv10_fence_emit;
+	fctx->base.read = nv10_fence_read;
+	fctx->base.sync = nv17_fence_sync;
+
+	ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+				 NvSema, 0x003d,
+				 &(struct nv_dma_class) {
+					.flags = NV_DMA_TARGET_VRAM |
+						 NV_DMA_ACCESS_RDWR,
+					.start = start,
+					.limit = limit,
+				 }, sizeof(struct nv_dma_class),
+				 &object);
+
+	/* dma objects for display sync channel semaphore blocks */
+	for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+		u32 start = bo->bo.mem.start * PAGE_SIZE;
+		u32 limit = start + bo->bo.mem.size - 1;
+
+		ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+					 NvEvoSema0 + i, 0x003d,
+					 &(struct nv_dma_class) {
+						.flags = NV_DMA_TARGET_VRAM |
+							 NV_DMA_ACCESS_RDWR,
+						.start = start,
+						.limit = limit,
+					 }, sizeof(struct nv_dma_class),
+					 &object);
+	}
+
+	if (ret)
+		nv10_fence_context_del(chan);
+	return ret;
+}
+
+int
+nv50_fence_create(struct nouveau_drm *drm)
+{
+	struct nv10_fence_priv *priv;
+	int ret = 0;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv10_fence_destroy;
+	priv->base.resume = nv17_fence_resume;
+	priv->base.context_new = nv50_fence_context_new;
+	priv->base.context_del = nv10_fence_context_del;
+	spin_lock_init(&priv->lock);
+
+	ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+			     0, 0x0000, NULL, &priv->bo);
+	if (!ret) {
+		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+		if (!ret) {
+			ret = nouveau_bo_map(priv->bo);
+			if (ret)
+				nouveau_bo_unpin(priv->bo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &priv->bo);
+	}
+
+	if (ret) {
+		nv10_fence_destroy(drm);
+		return ret;
+	}
+
+	nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv50_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nv50_pm.c
new file mode 100644
index 0000000..69620e3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -0,0 +1,855 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "nouveau_bios.h"
+#include "dispnv04/hw.h"
+#include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
+
+#include "nv50_display.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+enum clk_src {
+	clk_src_crystal,
+	clk_src_href,
+	clk_src_hclk,
+	clk_src_hclkm3,
+	clk_src_hclkm3d2,
+	clk_src_host,
+	clk_src_nvclk,
+	clk_src_sclk,
+	clk_src_mclk,
+	clk_src_vdec,
+	clk_src_dom6
+};
+
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	switch (nv_device(drm->device)->chipset) {
+	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+	case 0x84:
+	case 0x86:
+	case 0x98:
+	case 0xa0:
+		return nv_rd32(device, 0x004700);
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		return nv_rd32(device, 0x004800);
+	default:
+		return 0x00000000;
+	}
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 coef, ref = read_clk(dev, clk_src_crystal);
+	u32 rsel = nv_rd32(device, 0x00e18c);
+	int P, N, M, id;
+
+	switch (nv_device(drm->device)->chipset) {
+	case 0x50:
+	case 0xa0:
+		switch (base) {
+		case 0x4020:
+		case 0x4028: id = !!(rsel & 0x00000004); break;
+		case 0x4008: id = !!(rsel & 0x00000008); break;
+		case 0x4030: id = 0; break;
+		default:
+			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
+		ref *=  (coef & 0x01000000) ? 2 : 4;
+		P    =  (coef & 0x00070000) >> 16;
+		N    = ((coef & 0x0000ff00) >> 8) + 1;
+		M    = ((coef & 0x000000ff) >> 0) + 1;
+		break;
+	case 0x84:
+	case 0x86:
+	case 0x92:
+		coef = nv_rd32(device, 0x00e81c);
+		P    = (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	case 0x94:
+	case 0x96:
+	case 0x98:
+		rsel = nv_rd32(device, 0x00c050);
+		switch (base) {
+		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+		case 0x4030: rsel = 3; break;
+		default:
+			NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
+			return 0;
+		}
+
+		switch (rsel) {
+		case 0: id = 1; break;
+		case 1: return read_clk(dev, clk_src_crystal);
+		case 2: return read_clk(dev, clk_src_href);
+		case 3: id = 0; break;
+		}
+
+		coef =  nv_rd32(device, 0x00e81c + (id * 0x28));
+		P    = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
+		P   += (coef & 0x00070000) >> 16;
+		N    = (coef & 0x0000ff00) >> 8;
+		M    = (coef & 0x000000ff) >> 0;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	if (M)
+		return (ref * N / M) >> P;
+	return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 src, mast = nv_rd32(device, 0x00c040);
+
+	switch (base) {
+	case 0x004028:
+		src = !!(mast & 0x00200000);
+		break;
+	case 0x004020:
+		src = !!(mast & 0x00400000);
+		break;
+	case 0x004008:
+		src = !!(mast & 0x00010000);
+		break;
+	case 0x004030:
+		src = !!(mast & 0x02000000);
+		break;
+	case 0x00e810:
+		return read_clk(dev, clk_src_crystal);
+	default:
+		NV_ERROR(drm, "bad pll 0x%06x\n", base);
+		return 0;
+	}
+
+	if (src)
+		return read_clk(dev, clk_src_href);
+	return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 mast = nv_rd32(device, 0x00c040);
+	u32 ctrl = nv_rd32(device, base + 0);
+	u32 coef = nv_rd32(device, base + 4);
+	u32 ref = read_pll_ref(dev, base);
+	u32 clk = 0;
+	int N1, N2, M1, M2;
+
+	if (base == 0x004028 && (mast & 0x00100000)) {
+		/* wtf, appears to only disable post-divider on nva0 */
+		if (nv_device(drm->device)->chipset != 0xa0)
+			return read_clk(dev, clk_src_dom6);
+	}
+
+	N2 = (coef & 0xff000000) >> 24;
+	M2 = (coef & 0x00ff0000) >> 16;
+	N1 = (coef & 0x0000ff00) >> 8;
+	M1 = (coef & 0x000000ff);
+	if ((ctrl & 0x80000000) && M1) {
+		clk = ref * N1 / M1;
+		if ((ctrl & 0x40000100) == 0x40000000) {
+			if (M2)
+				clk = clk * N2 / M2;
+			else
+				clk = 0;
+		}
+	}
+
+	return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 mast = nv_rd32(device, 0x00c040);
+	u32 P = 0;
+
+	switch (src) {
+	case clk_src_crystal:
+		return device->crystal;
+	case clk_src_href:
+		return 100000; /* PCIE reference clock */
+	case clk_src_hclk:
+		return read_clk(dev, clk_src_href) * 27778 / 10000;
+	case clk_src_hclkm3:
+		return read_clk(dev, clk_src_hclk) * 3;
+	case clk_src_hclkm3d2:
+		return read_clk(dev, clk_src_hclk) * 3 / 2;
+	case clk_src_host:
+		switch (mast & 0x30000000) {
+		case 0x00000000: return read_clk(dev, clk_src_href);
+		case 0x10000000: break;
+		case 0x20000000: /* !0x50 */
+		case 0x30000000: return read_clk(dev, clk_src_hclk);
+		}
+		break;
+	case clk_src_nvclk:
+		if (!(mast & 0x00100000))
+			P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
+		switch (mast & 0x00000003) {
+		case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+		case 0x00000001: return read_clk(dev, clk_src_dom6);
+		case 0x00000002: return read_pll(dev, 0x004020) >> P;
+		case 0x00000003: return read_pll(dev, 0x004028) >> P;
+		}
+		break;
+	case clk_src_sclk:
+		P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
+		switch (mast & 0x00000030) {
+		case 0x00000000:
+			if (mast & 0x00000080)
+				return read_clk(dev, clk_src_host) >> P;
+			return read_clk(dev, clk_src_crystal) >> P;
+		case 0x00000010: break;
+		case 0x00000020: return read_pll(dev, 0x004028) >> P;
+		case 0x00000030: return read_pll(dev, 0x004020) >> P;
+		}
+		break;
+	case clk_src_mclk:
+		P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
+		if (nv_rd32(device, 0x004008) & 0x00000200) {
+			switch (mast & 0x0000c000) {
+			case 0x00000000:
+				return read_clk(dev, clk_src_crystal) >> P;
+			case 0x00008000:
+			case 0x0000c000:
+				return read_clk(dev, clk_src_href) >> P;
+			}
+		} else {
+			return read_pll(dev, 0x004008) >> P;
+		}
+		break;
+	case clk_src_vdec:
+		P = (read_div(dev) & 0x00000700) >> 8;
+		switch (nv_device(drm->device)->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
+					return read_clk(dev, clk_src_nvclk) >> P;
+				return read_clk(dev, clk_src_crystal) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				if (mast & 0x01000000)
+					return read_pll(dev, 0x004028) >> P;
+				return read_pll(dev, 0x004030) >> P;
+			case 0x00000c00:
+				return read_clk(dev, clk_src_nvclk) >> P;
+			}
+			break;
+		case 0x98:
+			switch (mast & 0x00000c00) {
+			case 0x00000000:
+				return read_clk(dev, clk_src_nvclk) >> P;
+			case 0x00000400:
+				return 0;
+			case 0x00000800:
+				return read_clk(dev, clk_src_hclkm3d2) >> P;
+			case 0x00000c00:
+				return read_clk(dev, clk_src_mclk) >> P;
+			}
+			break;
+		}
+		break;
+	case clk_src_dom6:
+		switch (nv_device(drm->device)->chipset) {
+		case 0x50:
+		case 0xa0:
+			return read_pll(dev, 0x00e810) >> 2;
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+			P = (read_div(dev) & 0x00000007) >> 0;
+			switch (mast & 0x0c000000) {
+			case 0x00000000: return read_clk(dev, clk_src_href);
+			case 0x04000000: break;
+			case 0x08000000: return read_clk(dev, clk_src_hclk);
+			case 0x0c000000:
+				return read_clk(dev, clk_src_hclkm3) >> P;
+			}
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
+	return 0;
+}
+
+int
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	if (nv_device(drm->device)->chipset == 0xaa ||
+	    nv_device(drm->device)->chipset == 0xac)
+		return 0;
+
+	perflvl->core   = read_clk(dev, clk_src_nvclk);
+	perflvl->shader = read_clk(dev, clk_src_sclk);
+	perflvl->memory = read_clk(dev, clk_src_mclk);
+	if (nv_device(drm->device)->chipset != 0x50) {
+		perflvl->vdec = read_clk(dev, clk_src_vdec);
+		perflvl->dom6 = read_clk(dev, clk_src_dom6);
+	}
+
+	return 0;
+}
+
+struct nv50_pm_state {
+	struct nouveau_pm_level *perflvl;
+	struct hwsq_ucode eclk_hwsq;
+	struct hwsq_ucode mclk_hwsq;
+	u32 mscript;
+	u32 mmast;
+	u32 mctrl;
+	u32 mcoef;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
+	 u32 clk, int *N1, int *M1, int *log2P)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nouveau_clock *pclk = nouveau_clock(device);
+	struct nouveau_pll_vals coef;
+	int ret;
+
+	ret = nvbios_pll_parse(bios, reg, pll);
+	if (ret)
+		return 0;
+
+	pll->vco2.max_freq = 0;
+	pll->refclk = read_pll_ref(dev, reg);
+	if (!pll->refclk)
+		return 0;
+
+	ret = pclk->pll_calc(pclk, pll, clk, &coef);
+	if (ret == 0)
+		return 0;
+
+	*N1 = coef.N1;
+	*M1 = coef.M1;
+	*log2P = coef.log2P;
+	return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+	u32 clk0 = src, clk1 = src;
+	for (*div = 0; *div <= 7; (*div)++) {
+		if (clk0 <= target) {
+			clk1 = clk0 << (*div ? 1 : 0);
+			break;
+		}
+		clk0 >>= 1;
+	}
+
+	if (target - clk0 <= clk1 - target)
+		return clk0;
+	(*div)--;
+	return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+	return ((a / 1000) == (b / 1000));
+}
+
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	if (nsec > 1000)
+		hwsq_usec(hwsq, (nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	if (mr <= 1)
+		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
+	if (mr <= 3)
+		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
+	return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+
+	if (mr <= 1) {
+		if (pfb->ram.ranks > 1)
+			hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
+		hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
+	} else
+	if (mr <= 3) {
+		if (pfb->ram.ranks > 1)
+			hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
+		hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
+	}
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nv50_pm_state *info = exec->priv;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+	u32 ctrl = nv_rd32(device, 0x004008);
+
+	info->mmast = nv_rd32(device, 0x00c040);
+	info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
+	info->mmast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+	hwsq_wr32(hwsq, 0xc040, info->mmast);
+	hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
+	if (info->mctrl & 0x80000000)
+		hwsq_wr32(hwsq, 0x400c, info->mcoef);
+	hwsq_wr32(hwsq, 0x4008, info->mctrl);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nv50_pm_state *info = exec->priv;
+	struct nouveau_pm_level *perflvl = info->perflvl;
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+	int i;
+
+	for (i = 0; i < 9; i++) {
+		u32 reg = 0x100220 + (i * 4);
+		u32 val = nv_rd32(device, reg);
+		if (val != perflvl->timing.reg[i])
+			hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
+	}
+}
+
+static int
+calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+	  struct nv50_pm_state *info)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
+	struct nouveau_mem_exec_func exec = {
+		.dev = dev,
+		.precharge = mclk_precharge,
+		.refresh = mclk_refresh,
+		.refresh_auto = mclk_refresh_auto,
+		.refresh_self = mclk_refresh_self,
+		.wait = mclk_wait,
+		.mrg = mclk_mrg,
+		.mrs = mclk_mrs,
+		.clock_set = mclk_clock_set,
+		.timing_set = mclk_timing_set,
+		.priv = info
+	};
+	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
+	struct nvbios_pll pll;
+	int N, M, P;
+	int ret;
+
+	/* use pcie refclock if possible, otherwise use mpll */
+	info->mctrl  = nv_rd32(device, 0x004008);
+	info->mctrl &= ~0x81ff0200;
+	if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
+		info->mctrl |= 0x00000200 | (pll.bias_p << 19);
+	} else {
+		ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
+		if (ret == 0)
+			return -EINVAL;
+
+		info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
+		info->mctrl |= pll.bias_p << 19;
+		info->mcoef  = (N << 8) | M;
+	}
+
+	/* build the ucode which will reclock the memory for us */
+	hwsq_init(hwsq);
+	if (crtc_mask) {
+		hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+		hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
+	}
+	if (nv_device(drm->device)->chipset >= 0x92)
+		hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+	hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+	ret = nouveau_mem_exec(&exec, perflvl);
+	if (ret)
+		return ret;
+
+	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+	hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+	if (nv_device(drm->device)->chipset >= 0x92)
+		hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+	hwsq_fini(hwsq);
+	return 0;
+}
+
+void *
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nv50_pm_state *info;
+	struct hwsq_ucode *hwsq;
+	struct nvbios_pll pll;
+	u32 out, mast, divs, ctrl;
+	int clk, ret = -EINVAL;
+	int N, M, P1, P2;
+
+	if (nv_device(drm->device)->chipset == 0xaa ||
+	    nv_device(drm->device)->chipset == 0xac)
+		return ERR_PTR(-ENODEV);
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+	info->perflvl = perflvl;
+
+	/* memory: build hwsq ucode which we'll use to reclock memory.
+	 *         use pcie refclock if possible, otherwise use mpll */
+	info->mclk_hwsq.len = 0;
+	if (perflvl->memory) {
+		ret = calc_mclk(dev, perflvl, info);
+		if (ret)
+			goto error;
+		info->mscript = perflvl->memscript;
+	}
+
+	divs = read_div(dev);
+	mast = info->mmast;
+
+	/* start building HWSQ script for engine reclocking */
+	hwsq = &info->eclk_hwsq;
+	hwsq_init(hwsq);
+	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+	hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
+
+	/* vdec/dom6: switch to "safe" clocks temporarily */
+	if (perflvl->vdec) {
+		mast &= ~0x00000c00;
+		divs &= ~0x00000700;
+	}
+
+	if (perflvl->dom6) {
+		mast &= ~0x0c000000;
+		divs &= ~0x00000007;
+	}
+
+	hwsq_wr32(hwsq, 0x00c040, mast);
+
+	/* vdec: avoid modifying xpll until we know exactly how the other
+	 * clock domains work, i suspect at least some of them can also be
+	 * tied to xpll...
+	 */
+	if (perflvl->vdec) {
+		/* see how close we can get using nvclk as a source */
+		clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+		/* see how close we can get using xpll/hclk as a source */
+		if (nv_device(drm->device)->chipset != 0x98)
+			out = read_pll(dev, 0x004030);
+		else
+			out = read_clk(dev, clk_src_hclkm3d2);
+		out = calc_div(out, perflvl->vdec, &P2);
+
+		/* select whichever gets us closest */
+		if (abs((int)perflvl->vdec - clk) <=
+		    abs((int)perflvl->vdec - out)) {
+			if (nv_device(drm->device)->chipset != 0x98)
+				mast |= 0x00000c00;
+			divs |= P1 << 8;
+		} else {
+			mast |= 0x00000800;
+			divs |= P2 << 8;
+		}
+	}
+
+	/* dom6: nfi what this is, but we're limited to various combinations
+	 * of the host clock frequency
+	 */
+	if (perflvl->dom6) {
+		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+			mast |= 0x00000000;
+		} else
+		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+			mast |= 0x08000000;
+		} else {
+			clk = read_clk(dev, clk_src_hclk) * 3;
+			clk = calc_div(clk, perflvl->dom6, &P1);
+
+			mast |= 0x0c000000;
+			divs |= P1;
+		}
+	}
+
+	/* vdec/dom6: complete switch to new clocks */
+	switch (nv_device(drm->device)->chipset) {
+	case 0x92:
+	case 0x94:
+	case 0x96:
+		hwsq_wr32(hwsq, 0x004800, divs);
+		break;
+	default:
+		hwsq_wr32(hwsq, 0x004700, divs);
+		break;
+	}
+
+	hwsq_wr32(hwsq, 0x00c040, mast);
+
+	/* core/shader: make sure sclk/nvclk are disconnected from their
+	 * PLLs (nvclk to dom6, sclk to hclk)
+	 */
+	if (nv_device(drm->device)->chipset < 0x92)
+		mast = (mast & ~0x001000b0) | 0x00100080;
+	else
+		mast = (mast & ~0x000000b3) | 0x00000081;
+
+	hwsq_wr32(hwsq, 0x00c040, mast);
+
+	/* core: for the moment at least, always use nvpll */
+	clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+	if (clk == 0)
+		goto error;
+
+	ctrl  = nv_rd32(device, 0x004028) & ~0xc03f0100;
+	mast &= ~0x00100000;
+	mast |= 3;
+
+	hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
+	hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
+
+	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
+	 * very careful that the shader clock is at least twice the core, or
+	 * some chipsets will be very unhappy.  i expect most or all of these
+	 * cases will be handled by tying to nvclk, but it's possible there's
+	 * corners
+	 */
+	ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
+
+	if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
+		hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
+	} else {
+		clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+		if (clk == 0)
+			goto error;
+		ctrl |= 0x80000000;
+
+		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
+		hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
+		hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
+	}
+
+	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+	hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
+	hwsq_fini(hwsq);
+
+	return info;
+error:
+	kfree(info);
+	return ERR_PTR(ret);
+}
+
+static int
+prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 hwsq_data, hwsq_kick;
+	int i;
+
+	if (nv_device(drm->device)->chipset < 0x94) {
+		hwsq_data = 0x001400;
+		hwsq_kick = 0x00000003;
+	} else {
+		hwsq_data = 0x080000;
+		hwsq_kick = 0x00000001;
+	}
+	/* upload hwsq ucode */
+	nv_mask(device, 0x001098, 0x00000008, 0x00000000);
+	nv_wr32(device, 0x001304, 0x00000000);
+	if (nv_device(drm->device)->chipset >= 0x92)
+		nv_wr32(device, 0x001318, 0x00000000);
+	for (i = 0; i < hwsq->len / 4; i++)
+		nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+	nv_mask(device, 0x001098, 0x00000018, 0x00000018);
+
+	/* launch, and wait for completion */
+	nv_wr32(device, 0x00130c, hwsq_kick);
+	if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
+		NV_ERROR(drm, "hwsq ucode exec timed out\n");
+		NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
+		for (i = 0; i < hwsq->len / 4; i++) {
+			NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+				 nv_rd32(device, 0x001400 + (i * 4)));
+		}
+
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nv50_pm_state *info = data;
+	struct bit_entry M;
+	int ret = -EBUSY;
+
+	/* halt and idle execution engines */
+	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
+	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
+		goto resume;
+	if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
+		goto resume;
+
+	/* program memory clock, if necessary - must come before engine clock
+	 * reprogramming due to how we construct the hwsq scripts in pre()
+	 */
+#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
+	if (info->mclk_hwsq.len) {
+		/* execute some scripts that do ??? from the vbios.. */
+		if (!bit_table(dev, 'M', &M) && M.version == 1) {
+			if (M.length >= 6)
+				nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+			if (M.length >= 8)
+				nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+			if (M.length >= 10)
+				nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+			nouveau_bios_init_exec(dev, info->mscript);
+		}
+
+		ret = prog_hwsq(dev, &info->mclk_hwsq);
+		if (ret)
+			goto resume;
+	}
+
+	/* program engine clocks */
+	ret = prog_hwsq(dev, &info->eclk_hwsq);
+
+resume:
+	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
+	kfree(info);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nv84_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 0000000..9fd475c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/class.h>
+
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+u64
+nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+	struct nv84_fence_chan *fctx = chan->fence;
+	return fctx->dispc_vma[crtc].offset;
+}
+
+static int
+nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+	int ret = RING_SPACE(chan, 8);
+	if (ret == 0) {
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+		OUT_RING  (chan, chan->vram);
+		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+		OUT_RING  (chan, upper_32_bits(virtual));
+		OUT_RING  (chan, lower_32_bits(virtual));
+		OUT_RING  (chan, sequence);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+		OUT_RING  (chan, 0x00000000);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+	int ret = RING_SPACE(chan, 7);
+	if (ret == 0) {
+		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+		OUT_RING  (chan, chan->vram);
+		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(virtual));
+		OUT_RING  (chan, lower_32_bits(virtual));
+		OUT_RING  (chan, sequence);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nv84_fence_emit(struct nouveau_fence *fence)
+{
+	struct nouveau_channel *chan = fence->channel;
+	struct nv84_fence_chan *fctx = chan->fence;
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	u64 addr = fifo->chid * 16;
+
+	if (fence->sysmem)
+		addr += fctx->vma_gart.offset;
+	else
+		addr += fctx->vma.offset;
+
+	return fctx->base.emit32(chan, addr, fence->sequence);
+}
+
+static int
+nv84_fence_sync(struct nouveau_fence *fence,
+		struct nouveau_channel *prev, struct nouveau_channel *chan)
+{
+	struct nv84_fence_chan *fctx = chan->fence;
+	struct nouveau_fifo_chan *fifo = (void *)prev->object;
+	u64 addr = fifo->chid * 16;
+
+	if (fence->sysmem)
+		addr += fctx->vma_gart.offset;
+	else
+		addr += fctx->vma.offset;
+
+	return fctx->base.sync32(chan, addr, fence->sequence);
+}
+
+static u32
+nv84_fence_read(struct nouveau_channel *chan)
+{
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nv84_fence_priv *priv = chan->drm->fence;
+	return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
+}
+
+static void
+nv84_fence_context_del(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->drm->dev;
+	struct nv84_fence_priv *priv = chan->drm->fence;
+	struct nv84_fence_chan *fctx = chan->fence;
+	int i;
+
+	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+		nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+	}
+
+	nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
+	nouveau_bo_vma_del(priv->bo, &fctx->vma);
+	nouveau_fence_context_del(&fctx->base);
+	chan->fence = NULL;
+	kfree(fctx);
+}
+
+int
+nv84_fence_context_new(struct nouveau_channel *chan)
+{
+	struct nouveau_fifo_chan *fifo = (void *)chan->object;
+	struct nouveau_client *client = nouveau_client(fifo);
+	struct nv84_fence_priv *priv = chan->drm->fence;
+	struct nv84_fence_chan *fctx;
+	int ret, i;
+
+	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
+	if (!fctx)
+		return -ENOMEM;
+
+	nouveau_fence_context_new(&fctx->base);
+	fctx->base.emit = nv84_fence_emit;
+	fctx->base.sync = nv84_fence_sync;
+	fctx->base.read = nv84_fence_read;
+	fctx->base.emit32 = nv84_fence_emit32;
+	fctx->base.sync32 = nv84_fence_sync32;
+
+	ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+	if (ret == 0) {
+		ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+					&fctx->vma_gart);
+	}
+
+	/* map display semaphore buffers into channel's vm */
+	for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+		struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
+		ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+	}
+
+	nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+
+	if (ret)
+		nv84_fence_context_del(chan);
+	return ret;
+}
+
+static bool
+nv84_fence_suspend(struct nouveau_drm *drm)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+	struct nv84_fence_priv *priv = drm->fence;
+	int i;
+
+	priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+	if (priv->suspend) {
+		for (i = 0; i <= pfifo->max; i++)
+			priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
+	}
+
+	return priv->suspend != NULL;
+}
+
+static void
+nv84_fence_resume(struct nouveau_drm *drm)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+	struct nv84_fence_priv *priv = drm->fence;
+	int i;
+
+	if (priv->suspend) {
+		for (i = 0; i <= pfifo->max; i++)
+			nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
+		vfree(priv->suspend);
+		priv->suspend = NULL;
+	}
+}
+
+static void
+nv84_fence_destroy(struct nouveau_drm *drm)
+{
+	struct nv84_fence_priv *priv = drm->fence;
+	nouveau_bo_unmap(priv->bo_gart);
+	if (priv->bo_gart)
+		nouveau_bo_unpin(priv->bo_gart);
+	nouveau_bo_ref(NULL, &priv->bo_gart);
+	nouveau_bo_unmap(priv->bo);
+	if (priv->bo)
+		nouveau_bo_unpin(priv->bo);
+	nouveau_bo_ref(NULL, &priv->bo);
+	drm->fence = NULL;
+	kfree(priv);
+}
+
+int
+nv84_fence_create(struct nouveau_drm *drm)
+{
+	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+	struct nv84_fence_priv *priv;
+	int ret;
+
+	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->base.dtor = nv84_fence_destroy;
+	priv->base.suspend = nv84_fence_suspend;
+	priv->base.resume = nv84_fence_resume;
+	priv->base.context_new = nv84_fence_context_new;
+	priv->base.context_del = nv84_fence_context_del;
+
+	init_waitqueue_head(&priv->base.waiting);
+	priv->base.uevent = true;
+
+	ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+	if (ret == 0) {
+		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+		if (ret == 0) {
+			ret = nouveau_bo_map(priv->bo);
+			if (ret)
+				nouveau_bo_unpin(priv->bo);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &priv->bo);
+	}
+
+	if (ret == 0)
+		ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+				     TTM_PL_FLAG_TT, 0, 0, NULL,
+				     &priv->bo_gart);
+	if (ret == 0) {
+		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
+		if (ret == 0) {
+			ret = nouveau_bo_map(priv->bo_gart);
+			if (ret)
+				nouveau_bo_unpin(priv->bo_gart);
+		}
+		if (ret)
+			nouveau_bo_ref(NULL, &priv->bo_gart);
+	}
+
+	if (ret)
+		nv84_fence_destroy(drm);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nva3_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nva3_pm.c
new file mode 100644
index 0000000..863f010
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <drm/drmP.h>
+#include "nouveau_drm.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/bios.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+static u32 read_clk(struct drm_device *, int, bool);
+static u32 read_pll(struct drm_device *, int, u32);
+
+static u32
+read_vco(struct drm_device *dev, int clk)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
+	if ((sctl & 0x00000030) != 0x00000030)
+		return read_pll(dev, 0x41, 0x00e820);
+	return read_pll(dev, 0x42, 0x00e8a0);
+}
+
+static u32
+read_clk(struct drm_device *dev, int clk, bool ignore_en)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	u32 sctl, sdiv, sclk;
+
+	/* refclk for the 0xe8xx plls is a fixed frequency */
+	if (clk >= 0x40) {
+		if (nv_device(drm->device)->chipset == 0xaf) {
+			/* no joke.. seriously.. sigh.. */
+			return nv_rd32(device, 0x00471c) * 1000;
+		}
+
+		return device->crystal;
+	}
+
+	sctl = nv_rd32(device, 0x4120 + (clk * 4));
+	if (!ignore_en && !(sctl & 0x00000100))
+		return 0;
+
+	switch (sctl & 0x00003000) {
+	case 0x00000000:
+		return device->crystal;
+	case 0x00002000:
+		if (sctl & 0x00000040)
+			return 108000;
+		return 100000;
+	case 0x00003000:
+		sclk = read_vco(dev, clk);
+		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
+		return (sclk * 2) / sdiv;
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_pll(struct drm_device *dev, int clk, u32 pll)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, pll + 0);
+	u32 sclk = 0, P = 1, N = 1, M = 1;
+
+	if (!(ctrl & 0x00000008)) {
+		if (ctrl & 0x00000001) {
+			u32 coef = nv_rd32(device, pll + 4);
+			M = (coef & 0x000000ff) >> 0;
+			N = (coef & 0x0000ff00) >> 8;
+			P = (coef & 0x003f0000) >> 16;
+
+			/* no post-divider on these.. */
+			if ((pll & 0x00ff00) == 0x00e800)
+				P = 1;
+
+			sclk = read_clk(dev, 0x00 + clk, false);
+		}
+	} else {
+		sclk = read_clk(dev, 0x10 + clk, false);
+	}
+
+	if (M * P)
+		return sclk * N / (M * P);
+	return 0;
+}
+
+struct creg {
+	u32 clk;
+	u32 pll;
+};
+
+static int
+calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
+{
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll limits;
+	u32 oclk, sclk, sdiv;
+	int P, N, M, diff;
+	int ret;
+
+	reg->pll = 0;
+	reg->clk = 0;
+	if (!khz) {
+		NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
+		return 0;
+	}
+
+	switch (khz) {
+	case 27000:
+		reg->clk = 0x00000100;
+		return khz;
+	case 100000:
+		reg->clk = 0x00002100;
+		return khz;
+	case 108000:
+		reg->clk = 0x00002140;
+		return khz;
+	default:
+		sclk = read_vco(dev, clk);
+		sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
+		/* if the clock has a PLL attached, and we can get a within
+		 * [-2, 3) MHz of a divider, we'll disable the PLL and use
+		 * the divider instead.
+		 *
+		 * divider can go as low as 2, limited here because NVIDIA
+		 * and the VBIOS on my NVA8 seem to prefer using the PLL
+		 * for 810MHz - is there a good reason?
+		 */
+		if (sdiv > 4) {
+			oclk = (sclk * 2) / sdiv;
+			diff = khz - oclk;
+			if (!pll || (diff >= -2000 && diff < 3000)) {
+				reg->clk = (((sdiv - 2) << 16) | 0x00003100);
+				return oclk;
+			}
+		}
+
+		if (!pll) {
+			NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
+			return -ERANGE;
+		}
+
+		break;
+	}
+
+	ret = nvbios_pll_parse(bios, pll, &limits);
+	if (ret)
+		return ret;
+
+	limits.refclk = read_clk(dev, clk - 0x10, true);
+	if (!limits.refclk)
+		return -EINVAL;
+
+	ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
+	if (ret >= 0) {
+		reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
+		reg->pll = (P << 16) | (N << 8) | M;
+	}
+
+	return ret;
+}
+
+static void
+prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	const u32 src0 = 0x004120 + (clk * 4);
+	const u32 src1 = 0x004160 + (clk * 4);
+	const u32 ctrl = pll + 0;
+	const u32 coef = pll + 4;
+
+	if (!reg->clk && !reg->pll) {
+		NV_DEBUG(drm, "no clock for %02x\n", clk);
+		return;
+	}
+
+	if (reg->pll) {
+		nv_mask(device, src0, 0x00000101, 0x00000101);
+		nv_wr32(device, coef, reg->pll);
+		nv_mask(device, ctrl, 0x00000015, 0x00000015);
+		nv_mask(device, ctrl, 0x00000010, 0x00000000);
+		nv_wait(device, ctrl, 0x00020000, 0x00020000);
+		nv_mask(device, ctrl, 0x00000010, 0x00000010);
+		nv_mask(device, ctrl, 0x00000008, 0x00000000);
+		nv_mask(device, src1, 0x00000100, 0x00000000);
+		nv_mask(device, src1, 0x00000001, 0x00000000);
+	} else {
+		nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
+		nv_mask(device, ctrl, 0x00000018, 0x00000018);
+		udelay(20);
+		nv_mask(device, ctrl, 0x00000001, 0x00000000);
+		nv_mask(device, src0, 0x00000100, 0x00000000);
+		nv_mask(device, src0, 0x00000001, 0x00000000);
+	}
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct creg *reg)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+
+	if (!reg->clk) {
+		NV_DEBUG(drm, "no clock for %02x\n", clk);
+		return;
+	}
+
+	nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
+}
+
+int
+nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	perflvl->core   = read_pll(dev, 0x00, 0x4200);
+	perflvl->shader = read_pll(dev, 0x01, 0x4220);
+	perflvl->memory = read_pll(dev, 0x02, 0x4000);
+	perflvl->unka0  = read_clk(dev, 0x20, false);
+	perflvl->vdec   = read_clk(dev, 0x21, false);
+	perflvl->daemon = read_clk(dev, 0x25, false);
+	perflvl->copy   = perflvl->core;
+	return 0;
+}
+
+struct nva3_pm_state {
+	struct nouveau_pm_level *perflvl;
+
+	struct creg nclk;
+	struct creg sclk;
+	struct creg vdec;
+	struct creg unka0;
+
+	struct creg mclk;
+	u8 *rammap;
+	u8  rammap_ver;
+	u8  rammap_len;
+	u8 *ramcfg;
+	u8  ramcfg_len;
+	u32 r004018;
+	u32 r100760;
+};
+
+void *
+nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nva3_pm_state *info;
+	u8 ramcfg_cnt;
+	int ret;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
+	if (ret < 0)
+		goto out;
+
+	ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
+	if (ret < 0)
+		goto out;
+
+	ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
+	if (ret < 0)
+		goto out;
+
+	ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
+	if (ret < 0)
+		goto out;
+
+	ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
+	if (ret < 0)
+		goto out;
+
+	info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
+					   &info->rammap_ver,
+					   &info->rammap_len,
+					   &ramcfg_cnt, &info->ramcfg_len);
+	if (info->rammap_ver != 0x10 || info->rammap_len < 5)
+		info->rammap = NULL;
+
+	info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
+					   &info->rammap_ver,
+					   &info->ramcfg_len);
+	if (info->rammap_ver != 0x10)
+		info->ramcfg = NULL;
+
+	info->perflvl = perflvl;
+out:
+	if (ret < 0) {
+		kfree(info);
+		info = ERR_PTR(ret);
+	}
+	return info;
+}
+
+static bool
+nva3_pm_grcp_idle(void *data)
+{
+	struct drm_device *dev = data;
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	if (!(nv_rd32(device, 0x400304) & 0x00000001))
+		return true;
+	if (nv_rd32(device, 0x400308) == 0x0050001c)
+		return true;
+	return false;
+}
+
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002d4, 0x00000001);
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002d0, 0x00000001);
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	volatile u32 post = nv_rd32(device, 0); (void)post;
+	udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	if (mr <= 1)
+		return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
+	if (mr <= 3)
+		return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
+	return 0;
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	if (mr <= 1) {
+		if (pfb->ram.ranks > 1)
+			nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
+		nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
+	} else
+	if (mr <= 3) {
+		if (pfb->ram.ranks > 1)
+			nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
+		nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
+	}
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nva3_pm_state *info = exec->priv;
+	u32 ctrl;
+
+	ctrl = nv_rd32(device, 0x004000);
+	if (!(ctrl & 0x00000008) && info->mclk.pll) {
+		nv_wr32(device, 0x004000, (ctrl |=  0x00000008));
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
+		nv_wr32(device, 0x004018, 0x00001000);
+		nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
+		nv_wr32(device, 0x004004, info->mclk.pll);
+		nv_wr32(device, 0x004000, (ctrl |=  0x00000001));
+		udelay(64);
+		nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
+		udelay(20);
+	} else
+	if (!info->mclk.pll) {
+		nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
+		nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
+		nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
+	}
+
+	if (info->rammap) {
+		if (info->ramcfg && (info->rammap[4] & 0x08)) {
+			u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
+				      info->ramcfg[5];
+			u32 unk5a4 = ROM16(info->ramcfg[7]);
+			u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
+				     (info->ramcfg[3] & 0x0f) << 16 |
+				     (info->ramcfg[9] & 0x0f) |
+				     0x80000000;
+			nv_wr32(device, 0x1005a0, unk5a0);
+			nv_wr32(device, 0x1005a4, unk5a4);
+			nv_wr32(device, 0x10f804, unk804);
+			nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
+		} else {
+			nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
+			nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
+			nv_mask(device, 0x100760, 0x22222222, info->r100760);
+			nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
+			nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
+		}
+	}
+
+	if (info->mclk.pll) {
+		nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
+		nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
+	}
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nva3_pm_state *info = exec->priv;
+	struct nouveau_pm_level *perflvl = info->perflvl;
+	int i;
+
+	for (i = 0; i < 9; i++)
+		nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
+
+	if (info->ramcfg) {
+		u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
+		nv_mask(device, 0x100200, 0x00001000, data);
+	}
+
+	if (info->ramcfg) {
+		u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
+		u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
+		u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
+		if ( (info->ramcfg[2] & 0x20))
+			unk714 |= 0xf0000000;
+		if (!(info->ramcfg[2] & 0x04))
+			unk714 |= 0x00000010;
+		nv_wr32(device, 0x100714, unk714);
+
+		if (info->ramcfg[2] & 0x01)
+			unk71c |= 0x00000100;
+		nv_wr32(device, 0x10071c, unk71c);
+
+		if (info->ramcfg[2] & 0x02)
+			unk718 |= 0x00000100;
+		nv_wr32(device, 0x100718, unk718);
+
+		if (info->ramcfg[2] & 0x10)
+			nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
+	}
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_mem_exec_func exec = {
+		.dev = dev,
+		.precharge = mclk_precharge,
+		.refresh = mclk_refresh,
+		.refresh_auto = mclk_refresh_auto,
+		.refresh_self = mclk_refresh_self,
+		.wait = mclk_wait,
+		.mrg = mclk_mrg,
+		.mrs = mclk_mrs,
+		.clock_set = mclk_clock_set,
+		.timing_set = mclk_timing_set,
+		.priv = info
+	};
+	u32 ctrl;
+
+	/* XXX: where the fuck does 750MHz come from? */
+	if (info->perflvl->memory <= 750000) {
+		info->r004018 = 0x10000000;
+		info->r100760 = 0x22222222;
+	}
+
+	ctrl = nv_rd32(device, 0x004000);
+	if (ctrl & 0x00000008) {
+		if (info->mclk.pll) {
+			nv_mask(device, 0x004128, 0x00000101, 0x00000101);
+			nv_wr32(device, 0x004004, info->mclk.pll);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
+			nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
+			nv_wait(device, 0x004000, 0x00020000, 0x00020000);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
+			nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
+			nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
+		}
+	} else {
+		u32 ssel = 0x00000101;
+		if (info->mclk.clk)
+			ssel |= info->mclk.clk;
+		else
+			ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
+		nv_mask(device, 0x004168, 0x003f3141, ctrl);
+	}
+
+	if (info->ramcfg) {
+		if (info->ramcfg[2] & 0x10) {
+			nv_mask(device, 0x111104, 0x00000600, 0x00000000);
+		} else {
+			nv_mask(device, 0x111100, 0x40000000, 0x40000000);
+			nv_mask(device, 0x111104, 0x00000180, 0x00000000);
+		}
+	}
+	if (info->rammap && !(info->rammap[4] & 0x02))
+		nv_mask(device, 0x100200, 0x00000800, 0x00000000);
+	nv_wr32(device, 0x611200, 0x00003300);
+	if (!(info->ramcfg[2] & 0x10))
+		nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
+
+	nouveau_mem_exec(&exec, info->perflvl);
+
+	nv_wr32(device, 0x611200, 0x00003330);
+	if (info->rammap && (info->rammap[4] & 0x02))
+		nv_mask(device, 0x100200, 0x00000800, 0x00000800);
+	if (info->ramcfg) {
+		if (info->ramcfg[2] & 0x10) {
+			nv_mask(device, 0x111104, 0x00000180, 0x00000180);
+			nv_mask(device, 0x111100, 0x40000000, 0x00000000);
+		} else {
+			nv_mask(device, 0x111104, 0x00000600, 0x00000600);
+		}
+	}
+
+	if (info->mclk.pll) {
+		nv_mask(device, 0x004168, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004168, 0x00000100, 0x00000000);
+	} else {
+		nv_mask(device, 0x004000, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004128, 0x00000001, 0x00000000);
+		nv_mask(device, 0x004128, 0x00000100, 0x00000000);
+	}
+}
+
+int
+nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nva3_pm_state *info = pre_state;
+	int ret = -EAGAIN;
+
+	/* prevent any new grctx switches from starting */
+	nv_wr32(device, 0x400324, 0x00000000);
+	nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
+	/* wait for any pending grctx switches to complete */
+	if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
+		NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
+		goto cleanup;
+	}
+	/* freeze PFIFO */
+	nv_mask(device, 0x002504, 0x00000001, 0x00000001);
+	if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
+		NV_ERROR(drm, "pm: fifo didn't go idle\n");
+		goto cleanup;
+	}
+
+	prog_pll(dev, 0x00, 0x004200, &info->nclk);
+	prog_pll(dev, 0x01, 0x004220, &info->sclk);
+	prog_clk(dev, 0x20, &info->unka0);
+	prog_clk(dev, 0x21, &info->vdec);
+
+	if (info->mclk.clk || info->mclk.pll)
+		prog_mem(dev, info);
+
+	ret = 0;
+
+cleanup:
+	/* unfreeze PFIFO */
+	nv_mask(device, 0x002504, 0x00000001, 0x00000000);
+	/* restore ctxprog to normal */
+	nv_wr32(device, 0x400324, 0x00000000);
+	nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
+	/* unblock it if necessary */
+	if (nv_rd32(device, 0x400308) == 0x0050001c)
+		nv_mask(device, 0x400824, 0x10000000, 0x10000000);
+	kfree(info);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/linux-imx/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 0000000..9dcd30f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
+
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 1);
+	}
+	BEGIN_NVC0(chan, NvSub2D, 0x0588, 1);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING  (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING  (chan, rect->color);
+	BEGIN_NVC0(chan, NvSub2D, 0x0600, 4);
+	OUT_RING  (chan, rect->dx);
+	OUT_RING  (chan, rect->dy);
+	OUT_RING  (chan, rect->dx + rect->width);
+	OUT_RING  (chan, rect->dy + rect->height);
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 3);
+	}
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
+
+	BEGIN_NVC0(chan, NvSub2D, 0x0110, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, NvSub2D, 0x08b0, 4);
+	OUT_RING  (chan, region->dx);
+	OUT_RING  (chan, region->dy);
+	OUT_RING  (chan, region->width);
+	OUT_RING  (chan, region->height);
+	BEGIN_NVC0(chan, NvSub2D, 0x08d0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sy);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+	struct nouveau_channel *chan = drm->channel;
+	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+	uint32_t *palette = info->pseudo_palette;
+	int ret;
+
+	if (image->depth != 1)
+		return -ENODEV;
+
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
+
+	width = ALIGN(image->width, 32);
+	dwords = (width * image->height) >> 5;
+
+	BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		OUT_RING  (chan, palette[image->bg_color] | mask);
+		OUT_RING  (chan, palette[image->fg_color] | mask);
+	} else {
+		OUT_RING  (chan, image->bg_color);
+		OUT_RING  (chan, image->fg_color);
+	}
+	BEGIN_NVC0(chan, NvSub2D, 0x0838, 2);
+	OUT_RING  (chan, image->width);
+	OUT_RING  (chan, image->height);
+	BEGIN_NVC0(chan, NvSub2D, 0x0850, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dy);
+
+	while (dwords) {
+		int push = dwords > 2047 ? 2047 : dwords;
+
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
+
+		dwords -= push;
+
+		BEGIN_NIC0(chan, NvSub2D, 0x0860, push);
+		OUT_RINGp(chan, data, push);
+		data += push;
+	}
+
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+	struct nouveau_drm *drm = nouveau_drm(dev);
+	struct nouveau_channel *chan = drm->channel;
+	struct nouveau_object *object;
+	int ret, format;
+
+	ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+				 0x902d, NULL, 0, &object);
+	if (ret)
+		return ret;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		format = 0xf3;
+		break;
+	case 15:
+		format = 0xf8;
+		break;
+	case 16:
+		format = 0xe8;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32, just use 24.. */
+			format = 0xe6;
+			break;
+		case 2: /* depth 30 */
+			format = 0xd1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = RING_SPACE(chan, 60);
+	if (ret) {
+		WARN_ON(1);
+		nouveau_fbcon_gpu_lockup(info);
+		return ret;
+	}
+
+	BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
+	OUT_RING  (chan, 0x0000902d);
+	BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x02ac, 1);
+	OUT_RING  (chan, 3);
+	BEGIN_NVC0(chan, NvSub2D, 0x02a0, 1);
+	OUT_RING  (chan, 0x55);
+	BEGIN_NVC0(chan, NvSub2D, 0x08c0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x0580, 2);
+	OUT_RING  (chan, 4);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, NvSub2D, 0x02e8, 2);
+	OUT_RING  (chan, 2);
+	OUT_RING  (chan, 1);
+
+	BEGIN_NVC0(chan, NvSub2D, 0x0804, 1);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, NvSub2D, 0x0800, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x0808, 3);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x081c, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x0840, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, NvSub2D, 0x0200, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
+	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(fb->vma.offset));
+	OUT_RING  (chan, lower_32_bits(fb->vma.offset));
+	FIRE_RING (chan);
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nvc0_fence.c b/linux-imx/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 0000000..9566267
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/class.h>
+
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
+
+#include "nv50_display.h"
+
+static int
+nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+	int ret = RING_SPACE(chan, 6);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
+		OUT_RING  (chan, upper_32_bits(virtual));
+		OUT_RING  (chan, lower_32_bits(virtual));
+		OUT_RING  (chan, sequence);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
+		OUT_RING  (chan, 0x00000000);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
+{
+	int ret = RING_SPACE(chan, 5);
+	if (ret == 0) {
+		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+		OUT_RING  (chan, upper_32_bits(virtual));
+		OUT_RING  (chan, lower_32_bits(virtual));
+		OUT_RING  (chan, sequence);
+		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
+				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
+		FIRE_RING (chan);
+	}
+	return ret;
+}
+
+static int
+nvc0_fence_context_new(struct nouveau_channel *chan)
+{
+	int ret = nv84_fence_context_new(chan);
+	if (ret == 0) {
+		struct nv84_fence_chan *fctx = chan->fence;
+		fctx->base.emit32 = nvc0_fence_emit32;
+		fctx->base.sync32 = nvc0_fence_sync32;
+	}
+	return ret;
+}
+
+int
+nvc0_fence_create(struct nouveau_drm *drm)
+{
+	int ret = nv84_fence_create(drm);
+	if (ret == 0) {
+		struct nv84_fence_priv *priv = drm->fence;
+		priv->base.context_new = nvc0_fence_context_new;
+	}
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/nouveau/nvc0_pm.c b/linux-imx/drivers/gpu/drm/nouveau/nvc0_pm.c
new file mode 100644
index 0000000..0d34eb5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_bios.h"
+#include "nouveau_pm.h"
+
+#include <subdev/bios/pll.h>
+#include <subdev/bios.h>
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+static u32 read_div(struct drm_device *, int, u32, u32);
+static u32 read_pll(struct drm_device *, u32);
+
+static u32
+read_vco(struct drm_device *dev, u32 dsrc)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssrc = nv_rd32(device, dsrc);
+	if (!(ssrc & 0x00000100))
+		return read_pll(dev, 0x00e800);
+	return read_pll(dev, 0x00e820);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 pll)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ctrl = nv_rd32(device, pll + 0);
+	u32 coef = nv_rd32(device, pll + 4);
+	u32 P = (coef & 0x003f0000) >> 16;
+	u32 N = (coef & 0x0000ff00) >> 8;
+	u32 M = (coef & 0x000000ff) >> 0;
+	u32 sclk, doff;
+
+	if (!(ctrl & 0x00000001))
+		return 0;
+
+	switch (pll & 0xfff000) {
+	case 0x00e000:
+		sclk = 27000;
+		P = 1;
+		break;
+	case 0x137000:
+		doff = (pll - 0x137000) / 0x20;
+		sclk = read_div(dev, doff, 0x137120, 0x137140);
+		break;
+	case 0x132000:
+		switch (pll) {
+		case 0x132000:
+			sclk = read_pll(dev, 0x132020);
+			break;
+		case 0x132020:
+			sclk = read_div(dev, 0, 0x137320, 0x137330);
+			break;
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	return sclk * N / M / P;
+}
+
+static u32
+read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
+	u32 sctl = nv_rd32(device, dctl + (doff * 4));
+
+	switch (ssrc & 0x00000003) {
+	case 0:
+		if ((ssrc & 0x00030000) != 0x00030000)
+			return 27000;
+		return 108000;
+	case 2:
+		return 100000;
+	case 3:
+		if (sctl & 0x80000000) {
+			u32 sclk = read_vco(dev, dsrc + (doff * 4));
+			u32 sdiv = (sctl & 0x0000003f) + 2;
+			return (sclk * 2) / sdiv;
+		}
+
+		return read_vco(dev, dsrc + (doff * 4));
+	default:
+		return 0;
+	}
+}
+
+static u32
+read_mem(struct drm_device *dev)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 ssel = nv_rd32(device, 0x1373f0);
+	if (ssel & 0x00000001)
+		return read_div(dev, 0, 0x137300, 0x137310);
+	return read_pll(dev, 0x132000);
+}
+
+static u32
+read_clk(struct drm_device *dev, int clk)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
+	u32 ssel = nv_rd32(device, 0x137100);
+	u32 sclk, sdiv;
+
+	if (ssel & (1 << clk)) {
+		if (clk < 7)
+			sclk = read_pll(dev, 0x137000 + (clk * 0x20));
+		else
+			sclk = read_pll(dev, 0x1370e0);
+		sdiv = ((sctl & 0x00003f00) >> 8) + 2;
+	} else {
+		sclk = read_div(dev, clk, 0x137160, 0x1371d0);
+		sdiv = ((sctl & 0x0000003f) >> 0) + 2;
+	}
+
+	if (sctl & 0x80000000)
+		return (sclk * 2) / sdiv;
+	return sclk;
+}
+
+int
+nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	perflvl->shader = read_clk(dev, 0x00);
+	perflvl->core   = perflvl->shader / 2;
+	perflvl->memory = read_mem(dev);
+	perflvl->rop    = read_clk(dev, 0x01);
+	perflvl->hub07  = read_clk(dev, 0x02);
+	perflvl->hub06  = read_clk(dev, 0x07);
+	perflvl->hub01  = read_clk(dev, 0x08);
+	perflvl->copy   = read_clk(dev, 0x09);
+	perflvl->daemon = read_clk(dev, 0x0c);
+	perflvl->vdec   = read_clk(dev, 0x0e);
+	return 0;
+}
+
+struct nvc0_pm_clock {
+	u32 freq;
+	u32 ssel;
+	u32 mdiv;
+	u32 dsrc;
+	u32 ddiv;
+	u32 coef;
+};
+
+struct nvc0_pm_state {
+	struct nouveau_pm_level *perflvl;
+	struct nvc0_pm_clock eng[16];
+	struct nvc0_pm_clock mem;
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+	u32 div = min((ref * 2) / freq, (u32)65);
+	if (div < 2)
+		div = 2;
+
+	*ddiv = div - 2;
+	return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+	u32 sclk;
+
+	/* use one of the fixed frequencies if possible */
+	*ddiv = 0x00000000;
+	switch (freq) {
+	case  27000:
+	case 108000:
+		*dsrc = 0x00000000;
+		if (freq == 108000)
+			*dsrc |= 0x00030000;
+		return freq;
+	case 100000:
+		*dsrc = 0x00000002;
+		return freq;
+	default:
+		*dsrc = 0x00000003;
+		break;
+	}
+
+	/* otherwise, calculate the closest divider */
+	sclk = read_vco(dev, clk);
+	if (clk < 7)
+		sclk = calc_div(dev, clk, sclk, freq, ddiv);
+	return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll limits;
+	int N, M, P, ret;
+
+	ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
+	if (ret)
+		return 0;
+
+	limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+	if (!limits.refclk)
+		return 0;
+
+	ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+	if (ret <= 0)
+		return 0;
+
+	*coef = (P << 16) | (N << 8) | M;
+	return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks.  For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists.  You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+	u32 src0, div0, div1D, div1P = 0;
+	u32 clk0, clk1 = 0;
+
+	/* invalid clock domain */
+	if (!freq)
+		return 0;
+
+	/* first possible path, using only dividers */
+	clk0 = calc_src(dev, clk, freq, &src0, &div0);
+	clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+	/* see if we can get any closer using PLLs */
+	if (clk0 != freq && (0x00004387 & (1 << clk))) {
+		if (clk < 7)
+			clk1 = calc_pll(dev, clk, freq, &info->coef);
+		else
+			clk1 = read_pll(dev, 0x1370e0);
+		clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+	}
+
+	/* select the method which gets closest to target freq */
+	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+		info->dsrc = src0;
+		if (div0) {
+			info->ddiv |= 0x80000000;
+			info->ddiv |= div0 << 8;
+			info->ddiv |= div0;
+		}
+		if (div1D) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1D;
+		}
+		info->ssel = 0;
+		info->freq = clk0;
+	} else {
+		if (div1P) {
+			info->mdiv |= 0x80000000;
+			info->mdiv |= div1P << 8;
+		}
+		info->ssel = (1 << clk);
+		info->freq = clk1;
+	}
+
+	return 0;
+}
+
+static int
+calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_bios *bios = nouveau_bios(device);
+	struct nvbios_pll pll;
+	int N, M, P, ret;
+	u32 ctrl;
+
+	/* mclk pll input freq comes from another pll, make sure it's on */
+	ctrl = nv_rd32(device, 0x132020);
+	if (!(ctrl & 0x00000001)) {
+		/* if not, program it to 567MHz.  nfi where this value comes
+		 * from - it looks like it's in the pll limits table for
+		 * 132000 but the binary driver ignores all my attempts to
+		 * change this value.
+		 */
+		nv_wr32(device, 0x137320, 0x00000103);
+		nv_wr32(device, 0x137330, 0x81200606);
+		nv_wait(device, 0x132020, 0x00010000, 0x00010000);
+		nv_wr32(device, 0x132024, 0x0001150f);
+		nv_mask(device, 0x132020, 0x00000001, 0x00000001);
+		nv_wait(device, 0x137390, 0x00020000, 0x00020000);
+		nv_mask(device, 0x132020, 0x00000004, 0x00000004);
+	}
+
+	/* for the moment, until the clock tree is better understood, use
+	 * pll mode for all clock frequencies
+	 */
+	ret = nvbios_pll_parse(bios, 0x132000, &pll);
+	if (ret == 0) {
+		pll.refclk = read_pll(dev, 0x132020);
+		if (pll.refclk) {
+			ret = nva3_calc_pll(dev, &pll, freq, &N, NULL, &M, &P);
+			if (ret > 0) {
+				info->coef = (P << 16) | (N << 8) | M;
+				return 0;
+			}
+		}
+	}
+
+	return -EINVAL;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nvc0_pm_state *info;
+	int ret;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ERR_PTR(-ENOMEM);
+
+	/* NFI why this is still in the performance table, the ROPCs appear
+	 * to get their clock from clock 2 ("hub07", actually hub05 on this
+	 * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
+	 * are always the same freq with the binary driver even when the
+	 * performance table says they should differ.
+	 */
+	if (device->chipset == 0xd9)
+		perflvl->rop = 0;
+
+	if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+	    (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+	    (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+	    (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+	    (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+	    (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+	    (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+	    (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+		kfree(info);
+		return ERR_PTR(ret);
+	}
+
+	if (perflvl->memory) {
+		ret = calc_mem(dev, &info->mem, perflvl->memory);
+		if (ret) {
+			kfree(info);
+			return ERR_PTR(ret);
+		}
+	}
+
+	info->perflvl = perflvl;
+	return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+
+	/* program dividers at 137160/1371d0 first */
+	if (clk < 7 && !info->ssel) {
+		nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+		nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
+	}
+
+	/* switch clock to non-pll mode */
+	nv_mask(device, 0x137100, (1 << clk), 0x00000000);
+	nv_wait(device, 0x137100, (1 << clk), 0x00000000);
+
+	/* reprogram pll */
+	if (clk < 7) {
+		/* make sure it's disabled first... */
+		u32 base = 0x137000 + (clk * 0x20);
+		u32 ctrl = nv_rd32(device, base + 0x00);
+		if (ctrl & 0x00000001) {
+			nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
+			nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
+		}
+		/* program it to new values, if necessary */
+		if (info->ssel) {
+			nv_wr32(device, base + 0x04, info->coef);
+			nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
+			nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
+			nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
+		}
+	}
+
+	/* select pll/non-pll mode, and program final clock divider */
+	nv_mask(device, 0x137100, (1 << clk), info->ssel);
+	nv_wait(device, 0x137100, (1 << clk), info->ssel);
+	nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+static void
+mclk_precharge(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh(struct nouveau_mem_exec_func *exec)
+{
+}
+
+static void
+mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
+}
+
+static void
+mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
+{
+}
+
+static void
+mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
+{
+	udelay((nsec + 500) / 1000);
+}
+
+static u32
+mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
+		if (mr <= 1)
+			return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
+		return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
+	} else {
+		if (mr == 0)
+			return nv_rd32(device, 0x10f300 + (mr * 4));
+		else
+		if (mr <= 7)
+			return nv_rd32(device, 0x10f32c + (mr * 4));
+		return nv_rd32(device, 0x10f34c);
+	}
+}
+
+static void
+mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nouveau_fb *pfb = nouveau_fb(device);
+	if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
+		if (mr <= 1) {
+			nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
+			if (pfb->ram.ranks > 1)
+				nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
+		} else
+		if (mr <= 3) {
+			nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
+			if (pfb->ram.ranks > 1)
+				nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
+		}
+	} else {
+		if      (mr ==  0) nv_wr32(device, 0x10f300 + (mr * 4), data);
+		else if (mr <=  7) nv_wr32(device, 0x10f32c + (mr * 4), data);
+		else if (mr == 15) nv_wr32(device, 0x10f34c, data);
+	}
+}
+
+static void
+mclk_clock_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nvc0_pm_state *info = exec->priv;
+	u32 ctrl = nv_rd32(device, 0x132000);
+
+	nv_wr32(device, 0x137360, 0x00000001);
+	nv_wr32(device, 0x137370, 0x00000000);
+	nv_wr32(device, 0x137380, 0x00000000);
+	if (ctrl & 0x00000001)
+		nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
+
+	nv_wr32(device, 0x132004, info->mem.coef);
+	nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
+	nv_wait(device, 0x137390, 0x00000002, 0x00000002);
+	nv_wr32(device, 0x132018, 0x00005000);
+
+	nv_wr32(device, 0x137370, 0x00000001);
+	nv_wr32(device, 0x137380, 0x00000001);
+	nv_wr32(device, 0x137360, 0x00000000);
+}
+
+static void
+mclk_timing_set(struct nouveau_mem_exec_func *exec)
+{
+	struct nouveau_device *device = nouveau_dev(exec->dev);
+	struct nvc0_pm_state *info = exec->priv;
+	struct nouveau_pm_level *perflvl = info->perflvl;
+	int i;
+
+	for (i = 0; i < 5; i++)
+		nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
+}
+
+static void
+prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
+{
+	struct nouveau_device *device = nouveau_dev(dev);
+	struct nouveau_mem_exec_func exec = {
+		.dev = dev,
+		.precharge = mclk_precharge,
+		.refresh = mclk_refresh,
+		.refresh_auto = mclk_refresh_auto,
+		.refresh_self = mclk_refresh_self,
+		.wait = mclk_wait,
+		.mrg = mclk_mrg,
+		.mrs = mclk_mrs,
+		.clock_set = mclk_clock_set,
+		.timing_set = mclk_timing_set,
+		.priv = info
+	};
+
+	if (device->chipset < 0xd0)
+		nv_wr32(device, 0x611200, 0x00003300);
+	else
+		nv_wr32(device, 0x62c000, 0x03030000);
+
+	nouveau_mem_exec(&exec, info->perflvl);
+
+	if (device->chipset < 0xd0)
+		nv_wr32(device, 0x611200, 0x00003330);
+	else
+		nv_wr32(device, 0x62c000, 0x03030300);
+}
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+	struct nvc0_pm_state *info = data;
+	int i;
+
+	if (info->mem.coef)
+		prog_mem(dev, info);
+
+	for (i = 0; i < 16; i++) {
+		if (!info->eng[i].freq)
+			continue;
+		prog_clk(dev, i, &info->eng[i]);
+	}
+
+	kfree(info);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/Kconfig b/linux-imx/drivers/gpu/drm/omapdrm/Kconfig
new file mode 100644
index 0000000..09f65dc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/Kconfig
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+	tristate "OMAP DRM"
+	depends on DRM && !CONFIG_FB_OMAP2
+	depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+	depends on OMAP2_DSS
+	select DRM_KMS_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	default n
+	help
+	  DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+	int "Number of CRTCs"
+	range 1 10
+	default 1  if ARCH_OMAP2 || ARCH_OMAP3
+	default 2  if ARCH_OMAP4
+	depends on DRM_OMAP
+	help
+	  Select the number of video overlays which can be used as framebuffers.
+	  The remaining overlays are reserved for video.
+
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/Makefile b/linux-imx/drivers/gpu/drm/omapdrm/Makefile
new file mode 100644
index 0000000..d85e058
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o \
+	omap_irq.o \
+	omap_debugfs.o \
+	omap_crtc.o \
+	omap_plane.o \
+	omap_encoder.o \
+	omap_connector.o \
+	omap_fb.o \
+	omap_fbdev.o \
+	omap_gem.o \
+	omap_gem_dmabuf.o \
+	omap_dmm_tiler.o \
+	tcm-sita.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP)	+= omapdrm.o
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/TODO b/linux-imx/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 0000000..4d8c18a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
+TODO
+. Where should we do eviction (detatch_pages())?  We aren't necessarily
+  accessing the pages via a GART, so maybe we need some other threshold
+  to put a cap on the # of pages that can be pin'd.
+  . Use mm_shrinker to trigger unpinning pages.
+  . This is mainly theoretical since most of these devices don't actually
+    have swap or harddrive.
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+  etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+  already.  Possibly this could be refactored out and made more common?
+  There should be some way to do this with less wheel-reinvention.
+  . This can be handled by the dma-buf fence/reservation stuff when it
+    lands
+
+Userspace:
+. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
+. OMAP5432 uEVM
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_connector.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_connector.c
new file mode 100644
index 0000000..912759d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -0,0 +1,319 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+	struct drm_connector base;
+	struct omap_dss_device *dssdev;
+	struct drm_encoder *encoder;
+};
+
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+		struct omap_video_timings *timings)
+{
+	mode->clock = timings->pixel_clock;
+
+	mode->hdisplay = timings->x_res;
+	mode->hsync_start = mode->hdisplay + timings->hfp;
+	mode->hsync_end = mode->hsync_start + timings->hsw;
+	mode->htotal = mode->hsync_end + timings->hbp;
+
+	mode->vdisplay = timings->y_res;
+	mode->vsync_start = mode->vdisplay + timings->vfp;
+	mode->vsync_end = mode->vsync_start + timings->vsw;
+	mode->vtotal = mode->vsync_end + timings->vbp;
+
+	mode->flags = 0;
+
+	if (timings->interlace)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+	if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	else
+		mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+	if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
+		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	else
+		mode->flags |= DRM_MODE_FLAG_NVSYNC;
+}
+
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+		struct drm_display_mode *mode)
+{
+	timings->pixel_clock = mode->clock;
+
+	timings->x_res = mode->hdisplay;
+	timings->hfp = mode->hsync_start - mode->hdisplay;
+	timings->hsw = mode->hsync_end - mode->hsync_start;
+	timings->hbp = mode->htotal - mode->hsync_end;
+
+	timings->y_res = mode->vdisplay;
+	timings->vfp = mode->vsync_start - mode->vdisplay;
+	timings->vsw = mode->vsync_end - mode->vsync_start;
+	timings->vbp = mode->vtotal - mode->vsync_end;
+
+	timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+	else
+		timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+	else
+		timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
+
+	timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+	timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+	timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+}
+
+static enum drm_connector_status omap_connector_detect(
+		struct drm_connector *connector, bool force)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+	struct omap_dss_device *dssdev = omap_connector->dssdev;
+	struct omap_dss_driver *dssdrv = dssdev->driver;
+	enum drm_connector_status ret;
+
+	if (dssdrv->detect) {
+		if (dssdrv->detect(dssdev))
+			ret = connector_status_connected;
+		else
+			ret = connector_status_disconnected;
+	} else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
+			dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
+			dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
+			dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
+		ret = connector_status_connected;
+	} else {
+		ret = connector_status_unknown;
+	}
+
+	VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+	return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+	struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+	DBG("%s", omap_connector->dssdev->name);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(omap_connector);
+
+	omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID  512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+	struct omap_dss_device *dssdev = omap_connector->dssdev;
+	struct omap_dss_driver *dssdrv = dssdev->driver;
+	struct drm_device *dev = connector->dev;
+	int n = 0;
+
+	DBG("%s", omap_connector->dssdev->name);
+
+	/* if display exposes EDID, then we parse that in the normal way to
+	 * build table of supported modes.. otherwise (ie. fixed resolution
+	 * LCD panels) we just return a single mode corresponding to the
+	 * currently configured timings:
+	 */
+	if (dssdrv->read_edid) {
+		void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+		if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+				drm_edid_is_valid(edid)) {
+			drm_mode_connector_update_edid_property(
+					connector, edid);
+			n = drm_add_edid_modes(connector, edid);
+		} else {
+			drm_mode_connector_update_edid_property(
+					connector, NULL);
+		}
+		kfree(edid);
+	} else {
+		struct drm_display_mode *mode = drm_mode_create(dev);
+		struct omap_video_timings timings = {0};
+
+		dssdrv->get_timings(dssdev, &timings);
+
+		copy_timings_omap_to_drm(mode, &timings);
+
+		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+
+		n = 1;
+	}
+
+	return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+	struct omap_dss_device *dssdev = omap_connector->dssdev;
+	struct omap_dss_driver *dssdrv = dssdev->driver;
+	struct omap_video_timings timings = {0};
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *new_mode;
+	int r, ret = MODE_BAD;
+
+	copy_timings_drm_to_omap(&timings, mode);
+	mode->vrefresh = drm_mode_vrefresh(mode);
+
+	/*
+	 * if the panel driver doesn't have a check_timings, it's most likely
+	 * a fixed resolution panel, check if the timings match with the
+	 * panel's timings
+	 */
+	if (dssdrv->check_timings) {
+		r = dssdrv->check_timings(dssdev, &timings);
+	} else {
+		struct omap_video_timings t = {0};
+
+		dssdrv->get_timings(dssdev, &t);
+
+		if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
+			r = -EINVAL;
+		else
+			r = 0;
+	}
+
+	if (!r) {
+		/* check if vrefresh is still valid */
+		new_mode = drm_mode_duplicate(dev, mode);
+		new_mode->clock = timings.pixel_clock;
+		new_mode->vrefresh = 0;
+		if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+			ret = MODE_OK;
+		drm_mode_destroy(dev, new_mode);
+	}
+
+	DBG("connector: mode %s: "
+			"%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			(ret == MODE_OK) ? "valid" : "invalid",
+			mode->base.id, mode->name, mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+	return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+		struct drm_connector *connector)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+	return omap_connector->encoder;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = omap_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+	.get_modes = omap_connector_get_modes,
+	.mode_valid = omap_connector_mode_valid,
+	.best_encoder = omap_connector_attached_encoder,
+};
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+		int x, int y, int w, int h)
+{
+	struct omap_connector *omap_connector = to_omap_connector(connector);
+
+	/* TODO: enable when supported in dss */
+	VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+		int connector_type, struct omap_dss_device *dssdev,
+		struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = NULL;
+	struct omap_connector *omap_connector;
+
+	DBG("%s", dssdev->name);
+
+	omap_dss_get_device(dssdev);
+
+	omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+	if (!omap_connector)
+		goto fail;
+
+	omap_connector->dssdev = dssdev;
+	omap_connector->encoder = encoder;
+
+	connector = &omap_connector->base;
+
+	drm_connector_init(dev, connector, &omap_connector_funcs,
+				connector_type);
+	drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+	if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+		connector->polled = 0;
+	else
+#endif
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+				DRM_CONNECTOR_POLL_DISCONNECT;
+
+	connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+
+	drm_sysfs_connector_add(connector);
+
+	return connector;
+
+fail:
+	if (connector)
+		omap_connector_destroy(connector);
+
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_crtc.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_crtc.c
new file mode 100644
index 0000000..79b200a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -0,0 +1,661 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+	struct drm_crtc base;
+	struct drm_plane *plane;
+
+	const char *name;
+	int pipe;
+	enum omap_channel channel;
+	struct omap_overlay_manager_info info;
+
+	/*
+	 * Temporary: eventually this will go away, but it is needed
+	 * for now to keep the output's happy.  (They only need
+	 * mgr->id.)  Eventually this will be replaced w/ something
+	 * more common-panel-framework-y
+	 */
+	struct omap_overlay_manager mgr;
+
+	struct omap_video_timings timings;
+	bool enabled;
+	bool full_update;
+
+	struct omap_drm_apply apply;
+
+	struct omap_drm_irq apply_irq;
+	struct omap_drm_irq error_irq;
+
+	/* list of in-progress apply's: */
+	struct list_head pending_applies;
+
+	/* list of queued apply's: */
+	struct list_head queued_applies;
+
+	/* for handling queued and in-progress applies: */
+	struct work_struct apply_work;
+
+	/* if there is a pending flip, these will be non-null: */
+	struct drm_pending_vblank_event *event;
+	struct drm_framebuffer *old_fb;
+
+	/* for handling page flips without caring about what
+	 * the callback is called from.  Possibly we should just
+	 * make omap_gem always call the cb from the worker so
+	 * we don't have to care about this..
+	 *
+	 * XXX maybe fold into apply_work??
+	 */
+	struct work_struct page_flip_work;
+};
+
+uint32_t pipe2vbl(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+	return dispc_mgr_get_vsync_irq(omap_crtc->channel);
+}
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+	return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+		const struct omap_video_timings *timings)
+{
+	struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+	DBG("%s", omap_crtc->name);
+	omap_crtc->timings = *timings;
+	omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+		const struct dss_lcd_mgr_config *config)
+{
+	struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+	DBG("%s", omap_crtc->name);
+	dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+		struct omap_overlay_manager *mgr,
+		void (*handler)(void *), void *data)
+{
+	return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+		struct omap_overlay_manager *mgr,
+		void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+		.start_update = omap_crtc_start_update,
+		.enable = omap_crtc_enable,
+		.disable = omap_crtc_disable,
+		.set_timings = omap_crtc_set_timings,
+		.set_lcd_config = omap_crtc_set_lcd_config,
+		.register_framedone_handler = omap_crtc_register_framedone_handler,
+		.unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
+};
+
+/*
+ * CRTC funcs:
+ */
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+	DBG("%s", omap_crtc->name);
+
+	WARN_ON(omap_crtc->apply_irq.registered);
+	omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+	omap_crtc->plane->funcs->destroy(omap_crtc->plane);
+	drm_crtc_cleanup(crtc);
+
+	kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct omap_drm_private *priv = crtc->dev->dev_private;
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+	int i;
+
+	DBG("%s: %d", omap_crtc->name, mode);
+
+	if (enabled != omap_crtc->enabled) {
+		omap_crtc->enabled = enabled;
+		omap_crtc->full_update = true;
+		omap_crtc_apply(crtc, &omap_crtc->apply);
+
+		/* also enable our private plane: */
+		WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+		/* and any attached overlay planes: */
+		for (i = 0; i < priv->num_planes; i++) {
+			struct drm_plane *plane = priv->planes[i];
+			if (plane->crtc == crtc)
+				WARN_ON(omap_plane_dpms(plane, mode));
+		}
+	}
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode,
+		int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+	mode = adjusted_mode;
+
+	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			omap_crtc->name, mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+	omap_crtc->full_update = true;
+
+	return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16,
+			NULL, NULL);
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	DBG("%s", omap_crtc->name);
+	omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	DBG("%s", omap_crtc->name);
+	omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	struct drm_plane *plane = omap_crtc->plane;
+	struct drm_display_mode *mode = &crtc->mode;
+
+	return omap_plane_mode_set(plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16,
+			NULL, NULL);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void vblank_cb(void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct drm_device *dev = crtc->dev;
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	/* wakeup userspace */
+	if (omap_crtc->event)
+		drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
+
+	omap_crtc->event = NULL;
+	omap_crtc->old_fb = NULL;
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void page_flip_worker(struct work_struct *work)
+{
+	struct omap_crtc *omap_crtc =
+			container_of(work, struct omap_crtc, page_flip_work);
+	struct drm_crtc *crtc = &omap_crtc->base;
+	struct drm_display_mode *mode = &crtc->mode;
+	struct drm_gem_object *bo;
+
+	mutex_lock(&crtc->mutex);
+	omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			crtc->x << 16, crtc->y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16,
+			vblank_cb, crtc);
+	mutex_unlock(&crtc->mutex);
+
+	bo = omap_framebuffer_bo(crtc->fb, 0);
+	drm_gem_object_unreference_unlocked(bo);
+}
+
+static void page_flip_cb(void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	struct omap_drm_private *priv = crtc->dev->dev_private;
+
+	/* avoid assumptions about what ctxt we are called from: */
+	queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+		 struct drm_framebuffer *fb,
+		 struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	struct drm_gem_object *bo;
+
+	DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+			fb->base.id, event);
+
+	if (omap_crtc->old_fb) {
+		dev_err(dev->dev, "already a pending flip\n");
+		return -EINVAL;
+	}
+
+	omap_crtc->event = event;
+	crtc->fb = fb;
+
+	/*
+	 * Hold a reference temporarily until the crtc is updated
+	 * and takes the reference to the bo.  This avoids it
+	 * getting freed from under us:
+	 */
+	bo = omap_framebuffer_bo(fb, 0);
+	drm_gem_object_reference(bo);
+
+	omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc);
+
+	return 0;
+}
+
+static int omap_crtc_set_property(struct drm_crtc *crtc,
+		struct drm_property *property, uint64_t val)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	struct omap_drm_private *priv = crtc->dev->dev_private;
+
+	if (property == priv->rotation_prop) {
+		crtc->invert_dimensions =
+				!!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
+	}
+
+	return omap_plane_set_property(omap_crtc->plane, property, val);
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = omap_crtc_destroy,
+	.page_flip = omap_crtc_page_flip_locked,
+	.set_property = omap_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+	.dpms = omap_crtc_dpms,
+	.mode_fixup = omap_crtc_mode_fixup,
+	.mode_set = omap_crtc_mode_set,
+	.prepare = omap_crtc_prepare,
+	.commit = omap_crtc_commit,
+	.mode_set_base = omap_crtc_mode_set_base,
+	.load_lut = omap_crtc_load_lut,
+};
+
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+	struct omap_crtc *omap_crtc =
+			container_of(irq, struct omap_crtc, error_irq);
+	struct drm_crtc *crtc = &omap_crtc->base;
+	DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+	/* avoid getting in a flood, unregister the irq until next vblank */
+	omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+	struct omap_crtc *omap_crtc =
+			container_of(irq, struct omap_crtc, apply_irq);
+	struct drm_crtc *crtc = &omap_crtc->base;
+
+	if (!omap_crtc->error_irq.registered)
+		omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+	if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+		struct omap_drm_private *priv =
+				crtc->dev->dev_private;
+		DBG("%s: apply done", omap_crtc->name);
+		omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+		queue_work(priv->wq, &omap_crtc->apply_work);
+	}
+}
+
+static void apply_worker(struct work_struct *work)
+{
+	struct omap_crtc *omap_crtc =
+			container_of(work, struct omap_crtc, apply_work);
+	struct drm_crtc *crtc = &omap_crtc->base;
+	struct drm_device *dev = crtc->dev;
+	struct omap_drm_apply *apply, *n;
+	bool need_apply;
+
+	/*
+	 * Synchronize everything on mode_config.mutex, to keep
+	 * the callbacks and list modification all serialized
+	 * with respect to modesetting ioctls from userspace.
+	 */
+	mutex_lock(&crtc->mutex);
+	dispc_runtime_get();
+
+	/*
+	 * If we are still pending a previous update, wait.. when the
+	 * pending update completes, we get kicked again.
+	 */
+	if (omap_crtc->apply_irq.registered)
+		goto out;
+
+	/* finish up previous apply's: */
+	list_for_each_entry_safe(apply, n,
+			&omap_crtc->pending_applies, pending_node) {
+		apply->post_apply(apply);
+		list_del(&apply->pending_node);
+	}
+
+	need_apply = !list_empty(&omap_crtc->queued_applies);
+
+	/* then handle the next round of of queued apply's: */
+	list_for_each_entry_safe(apply, n,
+			&omap_crtc->queued_applies, queued_node) {
+		apply->pre_apply(apply);
+		list_del(&apply->queued_node);
+		apply->queued = false;
+		list_add_tail(&apply->pending_node,
+				&omap_crtc->pending_applies);
+	}
+
+	if (need_apply) {
+		enum omap_channel channel = omap_crtc->channel;
+
+		DBG("%s: GO", omap_crtc->name);
+
+		if (dispc_mgr_is_enabled(channel)) {
+			omap_irq_register(dev, &omap_crtc->apply_irq);
+			dispc_mgr_go(channel);
+		} else {
+			struct omap_drm_private *priv = dev->dev_private;
+			queue_work(priv->wq, &omap_crtc->apply_work);
+		}
+	}
+
+out:
+	dispc_runtime_put();
+	mutex_unlock(&crtc->mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+		struct omap_drm_apply *apply)
+{
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+	WARN_ON(!mutex_is_locked(&crtc->mutex));
+
+	/* no need to queue it again if it is already queued: */
+	if (apply->queued)
+		return 0;
+
+	apply->queued = true;
+	list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+	/*
+	 * If there are no currently pending updates, then go ahead and
+	 * kick the worker immediately, otherwise it will run again when
+	 * the current update finishes.
+	 */
+	if (list_empty(&omap_crtc->pending_applies)) {
+		struct omap_drm_private *priv = crtc->dev->dev_private;
+		queue_work(priv->wq, &omap_crtc->apply_work);
+	}
+
+	return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+	struct drm_device *dev = crtc->dev;
+	struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+	enum omap_channel channel = omap_crtc->channel;
+	struct omap_irq_wait *wait = NULL;
+
+	if (dispc_mgr_is_enabled(channel) == enable)
+		return;
+
+	/* ignore sync-lost irqs during enable/disable */
+	omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+	if (dispc_mgr_get_framedone_irq(channel)) {
+		if (!enable) {
+			wait = omap_irq_wait_init(dev,
+					dispc_mgr_get_framedone_irq(channel), 1);
+		}
+	} else {
+		/*
+		 * When we disable digit output, we need to wait until fields
+		 * are done.  Otherwise the DSS is still working, and turning
+		 * off the clocks prevents DSS from going to OFF mode. And when
+		 * enabling, we need to wait for the extra sync losts
+		 */
+		wait = omap_irq_wait_init(dev,
+				dispc_mgr_get_vsync_irq(channel), 2);
+	}
+
+	dispc_mgr_enable(channel, enable);
+
+	if (wait) {
+		int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+		if (ret) {
+			dev_err(dev->dev, "%s: timeout waiting for %s\n",
+					omap_crtc->name, enable ? "enable" : "disable");
+		}
+	}
+
+	omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+	struct omap_crtc *omap_crtc =
+			container_of(apply, struct omap_crtc, apply);
+	struct drm_crtc *crtc = &omap_crtc->base;
+	struct drm_encoder *encoder = NULL;
+
+	DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+			omap_crtc->enabled, omap_crtc->full_update);
+
+	if (omap_crtc->full_update) {
+		struct omap_drm_private *priv = crtc->dev->dev_private;
+		int i;
+		for (i = 0; i < priv->num_encoders; i++) {
+			if (priv->encoders[i]->crtc == crtc) {
+				encoder = priv->encoders[i];
+				break;
+			}
+		}
+	}
+
+	if (!omap_crtc->enabled) {
+		set_enabled(&omap_crtc->base, false);
+		if (encoder)
+			omap_encoder_set_enabled(encoder, false);
+	} else {
+		if (encoder) {
+			omap_encoder_set_enabled(encoder, false);
+			omap_encoder_update(encoder, &omap_crtc->mgr,
+					&omap_crtc->timings);
+			omap_encoder_set_enabled(encoder, true);
+			omap_crtc->full_update = false;
+		}
+
+		dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+		dispc_mgr_set_timings(omap_crtc->channel,
+				&omap_crtc->timings);
+		set_enabled(&omap_crtc->base, true);
+	}
+
+	omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+	/* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+		[OMAP_DSS_CHANNEL_LCD] = "lcd",
+		[OMAP_DSS_CHANNEL_DIGIT] = "tv",
+		[OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, enum omap_channel channel, int id)
+{
+	struct drm_crtc *crtc = NULL;
+	struct omap_crtc *omap_crtc;
+	struct omap_overlay_manager_info *info;
+
+	DBG("%s", channel_names[channel]);
+
+	omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+	if (!omap_crtc)
+		goto fail;
+
+	crtc = &omap_crtc->base;
+
+	INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+	INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+	INIT_LIST_HEAD(&omap_crtc->pending_applies);
+	INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+	omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
+	omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+	omap_crtc->channel = channel;
+	omap_crtc->plane = plane;
+	omap_crtc->plane->crtc = crtc;
+	omap_crtc->name = channel_names[channel];
+	omap_crtc->pipe = id;
+
+	omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
+	omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+	omap_crtc->error_irq.irqmask =
+			dispc_mgr_get_sync_lost_irq(channel);
+	omap_crtc->error_irq.irq = omap_crtc_error_irq;
+	omap_irq_register(dev, &omap_crtc->error_irq);
+
+	/* temporary: */
+	omap_crtc->mgr.id = channel;
+
+	dss_install_mgr_ops(&mgr_ops);
+
+	/* TODO: fix hard-coded setup.. add properties! */
+	info = &omap_crtc->info;
+	info->default_color = 0x00000000;
+	info->trans_key = 0x00000000;
+	info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+	info->trans_enabled = false;
+
+	drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+	drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+	omap_plane_install_properties(omap_crtc->plane, &crtc->base);
+
+	return crtc;
+
+fail:
+	if (crtc)
+		omap_crtc_destroy(crtc);
+
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_debugfs.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_debugfs.c
new file mode 100644
index 0000000..c27f59d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -0,0 +1,125 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_debugfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#include "drm_fb_helper.h"
+
+
+#ifdef CONFIG_DEBUG_FS
+
+static int gem_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct omap_drm_private *priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "All Objects:\n");
+	omap_gem_describe_objects(&priv->obj_list, m);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int mm_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int fb_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_framebuffer *fb;
+
+	seq_printf(m, "fbcon ");
+	omap_framebuffer_describe(priv->fbdev->fb, m);
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+		if (fb == priv->fbdev->fb)
+			continue;
+
+		seq_printf(m, "user ");
+		omap_framebuffer_describe(fb, m);
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+/* list of debufs files that are applicable to all devices */
+static struct drm_info_list omap_debugfs_list[] = {
+	{"gem", gem_show, 0},
+	{"mm", mm_show, 0},
+	{"fb", fb_show, 0},
+};
+
+/* list of debugfs files that are specific to devices with dmm/tiler */
+static struct drm_info_list omap_dmm_debugfs_list[] = {
+	{"tiler_map", tiler_map_show, 0},
+};
+
+int omap_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	ret = drm_debugfs_create_files(omap_debugfs_list,
+			ARRAY_SIZE(omap_debugfs_list),
+			minor->debugfs_root, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install omap_debugfs_list\n");
+		return ret;
+	}
+
+	if (dmm_is_available())
+		ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
+				ARRAY_SIZE(omap_dmm_debugfs_list),
+				minor->debugfs_root, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+void omap_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(omap_debugfs_list,
+			ARRAY_SIZE(omap_debugfs_list), minor);
+	if (dmm_is_available())
+		drm_debugfs_remove_files(omap_dmm_debugfs_list,
+				ARRAY_SIZE(omap_dmm_debugfs_list), minor);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
new file mode 100644
index 0000000..58bcd6a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -0,0 +1,188 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ *         Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_PRIV_H
+#define OMAP_DMM_PRIV_H
+
+#define DMM_REVISION          0x000
+#define DMM_HWINFO            0x004
+#define DMM_LISA_HWINFO       0x008
+#define DMM_DMM_SYSCONFIG     0x010
+#define DMM_LISA_LOCK         0x01C
+#define DMM_LISA_MAP__0       0x040
+#define DMM_LISA_MAP__1       0x044
+#define DMM_TILER_HWINFO      0x208
+#define DMM_TILER_OR__0       0x220
+#define DMM_TILER_OR__1       0x224
+#define DMM_PAT_HWINFO        0x408
+#define DMM_PAT_GEOMETRY      0x40C
+#define DMM_PAT_CONFIG        0x410
+#define DMM_PAT_VIEW__0       0x420
+#define DMM_PAT_VIEW__1       0x424
+#define DMM_PAT_VIEW_MAP__0   0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI       0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS     0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0     0x4C0
+#define DMM_PAT_STATUS__1     0x4C4
+#define DMM_PAT_STATUS__2     0x4C8
+#define DMM_PAT_STATUS__3     0x4CC
+#define DMM_PAT_DESCR__0      0x500
+#define DMM_PAT_DESCR__1      0x510
+#define DMM_PAT_DESCR__2      0x520
+#define DMM_PAT_DESCR__3      0x530
+#define DMM_PEG_HWINFO        0x608
+#define DMM_PEG_PRIO          0x620
+#define DMM_PEG_PRIO_PAT      0x640
+
+#define DMM_IRQSTAT_DST			(1<<0)
+#define DMM_IRQSTAT_LST			(1<<1)
+#define DMM_IRQSTAT_ERR_INV_DSC		(1<<2)
+#define DMM_IRQSTAT_ERR_INV_DATA	(1<<3)
+#define DMM_IRQSTAT_ERR_UPD_AREA	(1<<4)
+#define DMM_IRQSTAT_ERR_UPD_CTRL	(1<<5)
+#define DMM_IRQSTAT_ERR_UPD_DATA	(1<<6)
+#define DMM_IRQSTAT_ERR_LUT_MISS	(1<<7)
+
+#define DMM_IRQSTAT_ERR_MASK	(DMM_IRQ_STAT_ERR_INV_DSC | \
+				DMM_IRQ_STAT_ERR_INV_DATA | \
+				DMM_IRQ_STAT_ERR_UPD_AREA | \
+				DMM_IRQ_STAT_ERR_UPD_CTRL | \
+				DMM_IRQ_STAT_ERR_UPD_DATA | \
+				DMM_IRQ_STAT_ERR_LUT_MISS)
+
+#define DMM_PATSTATUS_READY		(1<<0)
+#define DMM_PATSTATUS_VALID		(1<<1)
+#define DMM_PATSTATUS_RUN		(1<<2)
+#define DMM_PATSTATUS_DONE		(1<<3)
+#define DMM_PATSTATUS_LINKED		(1<<4)
+#define DMM_PATSTATUS_BYPASSED		(1<<7)
+#define DMM_PATSTATUS_ERR_INV_DESCR	(1<<10)
+#define DMM_PATSTATUS_ERR_INV_DATA	(1<<11)
+#define DMM_PATSTATUS_ERR_UPD_AREA	(1<<12)
+#define DMM_PATSTATUS_ERR_UPD_CTRL	(1<<13)
+#define DMM_PATSTATUS_ERR_UPD_DATA	(1<<14)
+#define DMM_PATSTATUS_ERR_ACCESS	(1<<15)
+
+/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
+#define DMM_PATSTATUS_ERR	(DMM_PATSTATUS_ERR_INV_DESCR | \
+				DMM_PATSTATUS_ERR_INV_DATA | \
+				DMM_PATSTATUS_ERR_UPD_AREA | \
+				DMM_PATSTATUS_ERR_UPD_CTRL | \
+				DMM_PATSTATUS_ERR_UPD_DATA)
+
+
+
+enum {
+	PAT_STATUS,
+	PAT_DESCR
+};
+
+struct pat_ctrl {
+	u32 start:4;
+	u32 dir:4;
+	u32 lut_id:8;
+	u32 sync:12;
+	u32 ini:4;
+};
+
+struct pat {
+	uint32_t next_pa;
+	struct pat_area area;
+	struct pat_ctrl ctrl;
+	uint32_t data_pa;
+};
+
+#define DMM_FIXED_RETRY_COUNT 1000
+
+/* create refill buffer big enough to refill all slots, plus 3 descriptors..
+ * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
+ * but I guess you don't hit that worst case at the same time as full area
+ * refill
+ */
+#define DESCR_SIZE 128
+#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+
+/* For OMAP5, a fixed offset is added to all Y coordinates for 1D buffers.
+ * This is used in programming to address the upper portion of the LUT
+*/
+#define OMAP5_LUT_OFFSET       128
+
+struct dmm;
+
+struct dmm_txn {
+	void *engine_handle;
+	struct tcm *tcm;
+
+	uint8_t *current_va;
+	dma_addr_t current_pa;
+
+	struct pat *last_pat;
+};
+
+struct refill_engine {
+	int id;
+	struct dmm *dmm;
+	struct tcm *tcm;
+
+	uint8_t *refill_va;
+	dma_addr_t refill_pa;
+
+	/* only one trans per engine for now */
+	struct dmm_txn txn;
+
+	bool async;
+
+	wait_queue_head_t wait_for_refill;
+
+	struct list_head idle_node;
+};
+
+struct dmm {
+	struct device *dev;
+	void __iomem *base;
+	int irq;
+
+	struct page *dummy_page;
+	dma_addr_t dummy_pa;
+
+	void *refill_va;
+	dma_addr_t refill_pa;
+
+	/* refill engines */
+	wait_queue_head_t engine_queue;
+	struct list_head idle_head;
+	struct refill_engine *engines;
+	int num_engines;
+	atomic_t engine_counter;
+
+	/* container information */
+	int container_width;
+	int container_height;
+	int lut_width;
+	int lut_height;
+	int num_lut;
+
+	/* array of LUT - TCM containers */
+	struct tcm **tcm;
+
+	/* allocation list and lock */
+	struct list_head alloc_head;
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
new file mode 100644
index 0000000..9b794c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -0,0 +1,986 @@
+/*
+ * DMM IOMMU driver support functions for TI OMAP processors.
+ *
+ * Author: Rob Clark <rob@ti.com>
+ *         Andy Gross <andy.gross@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/list.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_dmm_priv.h"
+
+#define DMM_DRIVER_NAME "dmm"
+
+/* mappings for associating views to luts */
+static struct tcm *containers[TILFMT_NFORMATS];
+static struct dmm *omap_dmm;
+
+/* global spinlock for protecting lists */
+static DEFINE_SPINLOCK(list_lock);
+
+/* Geometry table */
+#define GEOM(xshift, yshift, bytes_per_pixel) { \
+		.x_shft = (xshift), \
+		.y_shft = (yshift), \
+		.cpp    = (bytes_per_pixel), \
+		.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+		.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+	}
+
+static const struct {
+	uint32_t x_shft;	/* unused X-bits (as part of bpp) */
+	uint32_t y_shft;	/* unused Y-bits (as part of bpp) */
+	uint32_t cpp;		/* bytes/chars per pixel */
+	uint32_t slot_w;	/* width of each slot (in pixels) */
+	uint32_t slot_h;	/* height of each slot (in pixels) */
+} geom[TILFMT_NFORMATS] = {
+		[TILFMT_8BIT]  = GEOM(0, 0, 1),
+		[TILFMT_16BIT] = GEOM(0, 1, 2),
+		[TILFMT_32BIT] = GEOM(1, 1, 4),
+		[TILFMT_PAGE]  = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+};
+
+
+/* lookup table for registers w/ per-engine instances */
+static const uint32_t reg[][4] = {
+		[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+				DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+		[PAT_DESCR]  = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+				DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+};
+
+/* simple allocator to grab next 16 byte aligned memory from txn */
+static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+{
+	void *ptr;
+	struct refill_engine *engine = txn->engine_handle;
+
+	/* dmm programming requires 16 byte aligned addresses */
+	txn->current_pa = round_up(txn->current_pa, 16);
+	txn->current_va = (void *)round_up((long)txn->current_va, 16);
+
+	ptr = txn->current_va;
+	*pa = txn->current_pa;
+
+	txn->current_pa += sz;
+	txn->current_va += sz;
+
+	BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+
+	return ptr;
+}
+
+/* check status and spin until wait_mask comes true */
+static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+{
+	struct dmm *dmm = engine->dmm;
+	uint32_t r = 0, err, i;
+
+	i = DMM_FIXED_RETRY_COUNT;
+	while (true) {
+		r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+		err = r & DMM_PATSTATUS_ERR;
+		if (err)
+			return -EFAULT;
+
+		if ((r & wait_mask) == wait_mask)
+			break;
+
+		if (--i == 0)
+			return -ETIMEDOUT;
+
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static void release_engine(struct refill_engine *engine)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_add(&engine->idle_node, &omap_dmm->idle_head);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	atomic_inc(&omap_dmm->engine_counter);
+	wake_up_interruptible(&omap_dmm->engine_queue);
+}
+
+static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+{
+	struct dmm *dmm = arg;
+	uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+	int i;
+
+	/* ack IRQ */
+	writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+
+	for (i = 0; i < dmm->num_engines; i++) {
+		if (status & DMM_IRQSTAT_LST) {
+			wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+
+			if (dmm->engines[i].async)
+				release_engine(&dmm->engines[i]);
+		}
+
+		status >>= 8;
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * Get a handle for a DMM transaction
+ */
+static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+{
+	struct dmm_txn *txn = NULL;
+	struct refill_engine *engine = NULL;
+	int ret;
+	unsigned long flags;
+
+
+	/* wait until an engine is available */
+	ret = wait_event_interruptible(omap_dmm->engine_queue,
+		atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
+	if (ret)
+		return ERR_PTR(ret);
+
+	/* grab an idle engine */
+	spin_lock_irqsave(&list_lock, flags);
+	if (!list_empty(&dmm->idle_head)) {
+		engine = list_entry(dmm->idle_head.next, struct refill_engine,
+					idle_node);
+		list_del(&engine->idle_node);
+	}
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	BUG_ON(!engine);
+
+	txn = &engine->txn;
+	engine->tcm = tcm;
+	txn->engine_handle = engine;
+	txn->last_pat = NULL;
+	txn->current_va = engine->refill_va;
+	txn->current_pa = engine->refill_pa;
+
+	return txn;
+}
+
+/**
+ * Add region to DMM transaction.  If pages or pages[i] is NULL, then the
+ * corresponding slot is cleared (ie. dummy_pa is programmed)
+ */
+static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+		struct page **pages, uint32_t npages, uint32_t roll)
+{
+	dma_addr_t pat_pa = 0;
+	uint32_t *data;
+	struct pat *pat;
+	struct refill_engine *engine = txn->engine_handle;
+	int columns = (1 + area->x1 - area->x0);
+	int rows = (1 + area->y1 - area->y0);
+	int i = columns*rows;
+
+	pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+
+	if (txn->last_pat)
+		txn->last_pat->next_pa = (uint32_t)pat_pa;
+
+	pat->area = *area;
+
+	/* adjust Y coordinates based off of container parameters */
+	pat->area.y0 += engine->tcm->y_offset;
+	pat->area.y1 += engine->tcm->y_offset;
+
+	pat->ctrl = (struct pat_ctrl){
+			.start = 1,
+			.lut_id = engine->tcm->lut_id,
+		};
+
+	data = alloc_dma(txn, 4*i, &pat->data_pa);
+
+	while (i--) {
+		int n = i + roll;
+		if (n >= npages)
+			n -= npages;
+		data[i] = (pages && pages[n]) ?
+			page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+	}
+
+	txn->last_pat = pat;
+
+	return;
+}
+
+/**
+ * Commit the DMM transaction.
+ */
+static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+{
+	int ret = 0;
+	struct refill_engine *engine = txn->engine_handle;
+	struct dmm *dmm = engine->dmm;
+
+	if (!txn->last_pat) {
+		dev_err(engine->dmm->dev, "need at least one txn\n");
+		ret = -EINVAL;
+		goto cleanup;
+	}
+
+	txn->last_pat->next_pa = 0;
+
+	/* write to PAT_DESCR to clear out any pending transaction */
+	writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+
+	/* wait for engine ready: */
+	ret = wait_status(engine, DMM_PATSTATUS_READY);
+	if (ret) {
+		ret = -EFAULT;
+		goto cleanup;
+	}
+
+	/* mark whether it is async to denote list management in IRQ handler */
+	engine->async = wait ? false : true;
+
+	/* kick reload */
+	writel(engine->refill_pa,
+		dmm->base + reg[PAT_DESCR][engine->id]);
+
+	if (wait) {
+		if (wait_event_interruptible_timeout(engine->wait_for_refill,
+				wait_status(engine, DMM_PATSTATUS_READY) == 0,
+				msecs_to_jiffies(1)) <= 0) {
+			dev_err(dmm->dev, "timed out waiting for done\n");
+			ret = -ETIMEDOUT;
+		}
+	}
+
+cleanup:
+	/* only place engine back on list if we are done with it */
+	if (ret || wait)
+		release_engine(engine);
+
+	return ret;
+}
+
+/*
+ * DMM programming
+ */
+static int fill(struct tcm_area *area, struct page **pages,
+		uint32_t npages, uint32_t roll, bool wait)
+{
+	int ret = 0;
+	struct tcm_area slice, area_s;
+	struct dmm_txn *txn;
+
+	txn = dmm_txn_init(omap_dmm, area->tcm);
+	if (IS_ERR_OR_NULL(txn))
+		return -ENOMEM;
+
+	tcm_for_each_slice(slice, *area, area_s) {
+		struct pat_area p_area = {
+				.x0 = slice.p0.x,  .y0 = slice.p0.y,
+				.x1 = slice.p1.x,  .y1 = slice.p1.y,
+		};
+
+		dmm_txn_append(txn, &p_area, pages, npages, roll);
+
+		roll += tcm_sizeof(slice);
+	}
+
+	ret = dmm_txn_commit(txn, wait);
+
+	return ret;
+}
+
+/*
+ * Pin/unpin
+ */
+
+/* note: slots for which pages[i] == NULL are filled w/ dummy page
+ */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+		uint32_t npages, uint32_t roll, bool wait)
+{
+	int ret;
+
+	ret = fill(&block->area, pages, npages, roll, wait);
+
+	if (ret)
+		tiler_unpin(block);
+
+	return ret;
+}
+
+int tiler_unpin(struct tiler_block *block)
+{
+	return fill(&block->area, NULL, 0, 0, false);
+}
+
+/*
+ * Reserve/release
+ */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+		uint16_t h, uint16_t align)
+{
+	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+	u32 min_align = 128;
+	int ret;
+	unsigned long flags;
+
+	BUG_ON(!validfmt(fmt));
+
+	/* convert width/height to slots */
+	w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+	h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+
+	/* convert alignment to slots */
+	min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+	align = ALIGN(align, min_align);
+	align /= geom[fmt].slot_w * geom[fmt].cpp;
+
+	block->fmt = fmt;
+
+	ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+	if (ret) {
+		kfree(block);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* add to allocation list */
+	spin_lock_irqsave(&list_lock, flags);
+	list_add(&block->alloc_node, &omap_dmm->alloc_head);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	return block;
+}
+
+struct tiler_block *tiler_reserve_1d(size_t size)
+{
+	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	unsigned long flags;
+
+	if (!block)
+		return ERR_PTR(-ENOMEM);
+
+	block->fmt = TILFMT_PAGE;
+
+	if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+				&block->area)) {
+		kfree(block);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_add(&block->alloc_node, &omap_dmm->alloc_head);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	return block;
+}
+
+/* note: if you have pin'd pages, you should have already unpin'd first! */
+int tiler_release(struct tiler_block *block)
+{
+	int ret = tcm_free(&block->area);
+	unsigned long flags;
+
+	if (block->area.tcm)
+		dev_err(omap_dmm->dev, "failed to release block\n");
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_del(&block->alloc_node);
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	kfree(block);
+	return ret;
+}
+
+/*
+ * Utils
+ */
+
+/* calculate the tiler space address of a pixel in a view orientation...
+ * below description copied from the display subsystem section of TRM:
+ *
+ * When the TILER is addressed, the bits:
+ *   [28:27] = 0x0 for 8-bit tiled
+ *             0x1 for 16-bit tiled
+ *             0x2 for 32-bit tiled
+ *             0x3 for page mode
+ *   [31:29] = 0x0 for 0-degree view
+ *             0x1 for 180-degree view + mirroring
+ *             0x2 for 0-degree view + mirroring
+ *             0x3 for 180-degree view
+ *             0x4 for 270-degree view + mirroring
+ *             0x5 for 270-degree view
+ *             0x6 for 90-degree view
+ *             0x7 for 90-degree view + mirroring
+ * Otherwise the bits indicated the corresponding bit address to access
+ * the SDRAM.
+ */
+static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
+{
+	u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+	x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+	y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+	alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+	/* validate coordinate */
+	x_mask = MASK(x_bits);
+	y_mask = MASK(y_bits);
+
+	if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
+		DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
+				x, x, x_mask, y, y, y_mask);
+		return 0;
+	}
+
+	/* account for mirroring */
+	if (orient & MASK_X_INVERT)
+		x ^= x_mask;
+	if (orient & MASK_Y_INVERT)
+		y ^= y_mask;
+
+	/* get coordinate address */
+	if (orient & MASK_XY_FLIP)
+		tmp = ((x << y_bits) + y);
+	else
+		tmp = ((y << x_bits) + x);
+
+	return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+dma_addr_t tiler_ssptr(struct tiler_block *block)
+{
+	BUG_ON(!validfmt(block->fmt));
+
+	return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
+			block->area.p0.x * geom[block->fmt].slot_w,
+			block->area.p0.y * geom[block->fmt].slot_h);
+}
+
+dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
+		uint32_t x, uint32_t y)
+{
+	struct tcm_pt *p = &block->area.p0;
+	BUG_ON(!validfmt(block->fmt));
+
+	return tiler_get_address(block->fmt, orient,
+			(p->x * geom[block->fmt].slot_w) + x,
+			(p->y * geom[block->fmt].slot_h) + y);
+}
+
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+{
+	BUG_ON(!validfmt(fmt));
+	*w = round_up(*w, geom[fmt].slot_w);
+	*h = round_up(*h, geom[fmt].slot_h);
+}
+
+uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
+{
+	BUG_ON(!validfmt(fmt));
+
+	if (orient & MASK_XY_FLIP)
+		return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
+	else
+		return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+	tiler_align(fmt, &w, &h);
+	return geom[fmt].cpp * w * h;
+}
+
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+	BUG_ON(!validfmt(fmt));
+	return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+}
+
+bool dmm_is_available(void)
+{
+	return omap_dmm ? true : false;
+}
+
+static int omap_dmm_remove(struct platform_device *dev)
+{
+	struct tiler_block *block, *_block;
+	int i;
+	unsigned long flags;
+
+	if (omap_dmm) {
+		/* free all area regions */
+		spin_lock_irqsave(&list_lock, flags);
+		list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+					alloc_node) {
+			list_del(&block->alloc_node);
+			kfree(block);
+		}
+		spin_unlock_irqrestore(&list_lock, flags);
+
+		for (i = 0; i < omap_dmm->num_lut; i++)
+			if (omap_dmm->tcm && omap_dmm->tcm[i])
+				omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+		kfree(omap_dmm->tcm);
+
+		kfree(omap_dmm->engines);
+		if (omap_dmm->refill_va)
+			dma_free_writecombine(omap_dmm->dev,
+				REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+				omap_dmm->refill_va,
+				omap_dmm->refill_pa);
+		if (omap_dmm->dummy_page)
+			__free_page(omap_dmm->dummy_page);
+
+		if (omap_dmm->irq > 0)
+			free_irq(omap_dmm->irq, omap_dmm);
+
+		iounmap(omap_dmm->base);
+		kfree(omap_dmm);
+		omap_dmm = NULL;
+	}
+
+	return 0;
+}
+
+static int omap_dmm_probe(struct platform_device *dev)
+{
+	int ret = -EFAULT, i;
+	struct tcm_area area = {0};
+	u32 hwinfo, pat_geom;
+	struct resource *mem;
+
+	omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
+	if (!omap_dmm)
+		goto fail;
+
+	/* initialize lists */
+	INIT_LIST_HEAD(&omap_dmm->alloc_head);
+	INIT_LIST_HEAD(&omap_dmm->idle_head);
+
+	init_waitqueue_head(&omap_dmm->engine_queue);
+
+	/* lookup hwmod data - base address and irq */
+	mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&dev->dev, "failed to get base address resource\n");
+		goto fail;
+	}
+
+	omap_dmm->base = ioremap(mem->start, SZ_2K);
+
+	if (!omap_dmm->base) {
+		dev_err(&dev->dev, "failed to get dmm base address\n");
+		goto fail;
+	}
+
+	omap_dmm->irq = platform_get_irq(dev, 0);
+	if (omap_dmm->irq < 0) {
+		dev_err(&dev->dev, "failed to get IRQ resource\n");
+		goto fail;
+	}
+
+	omap_dmm->dev = &dev->dev;
+
+	hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+	omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+	omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+	omap_dmm->container_width = 256;
+	omap_dmm->container_height = 128;
+
+	atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
+
+	/* read out actual LUT width and height */
+	pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+	omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+	omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+
+	/* increment LUT by one if on OMAP5 */
+	/* LUT has twice the height, and is split into a separate container */
+	if (omap_dmm->lut_height != omap_dmm->container_height)
+		omap_dmm->num_lut++;
+
+	/* initialize DMM registers */
+	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+	writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+	writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+	writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+	writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+
+	ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+				"omap_dmm_irq_handler", omap_dmm);
+
+	if (ret) {
+		dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
+			omap_dmm->irq, ret);
+		omap_dmm->irq = -1;
+		goto fail;
+	}
+
+	/* Enable all interrupts for each refill engine except
+	 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+	 * about because we want to be able to refill live scanout
+	 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+	 * we just generally don't care about.
+	 */
+	writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+
+	omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+	if (!omap_dmm->dummy_page) {
+		dev_err(&dev->dev, "could not allocate dummy page\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* set dma mask for device */
+	/* NOTE: this is a workaround for the hwmod not initializing properly */
+	dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+	omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+
+	/* alloc refill memory */
+	omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
+				REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+				&omap_dmm->refill_pa, GFP_KERNEL);
+	if (!omap_dmm->refill_va) {
+		dev_err(&dev->dev, "could not allocate refill memory\n");
+		goto fail;
+	}
+
+	/* alloc engines */
+	omap_dmm->engines = kcalloc(omap_dmm->num_engines,
+				    sizeof(struct refill_engine), GFP_KERNEL);
+	if (!omap_dmm->engines) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < omap_dmm->num_engines; i++) {
+		omap_dmm->engines[i].id = i;
+		omap_dmm->engines[i].dmm = omap_dmm;
+		omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+						(REFILL_BUFFER_SIZE * i);
+		omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+						(REFILL_BUFFER_SIZE * i);
+		init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+
+		list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+	}
+
+	omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
+				GFP_KERNEL);
+	if (!omap_dmm->tcm) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* init containers */
+	/* Each LUT is associated with a TCM (container manager).  We use the
+	   lut_id to denote the lut_id used to identify the correct LUT for
+	   programming during reill operations */
+	for (i = 0; i < omap_dmm->num_lut; i++) {
+		omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+						omap_dmm->container_height,
+						NULL);
+
+		if (!omap_dmm->tcm[i]) {
+			dev_err(&dev->dev, "failed to allocate container\n");
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		omap_dmm->tcm[i]->lut_id = i;
+	}
+
+	/* assign access mode containers to applicable tcm container */
+	/* OMAP 4 has 1 container for all 4 views */
+	/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
+	containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+	containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+	containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+
+	if (omap_dmm->container_height != omap_dmm->lut_height) {
+		/* second LUT is used for PAGE mode.  Programming must use
+		   y offset that is added to all y coordinates.  LUT id is still
+		   0, because it is the same LUT, just the upper 128 lines */
+		containers[TILFMT_PAGE] = omap_dmm->tcm[1];
+		omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
+		omap_dmm->tcm[1]->lut_id = 0;
+	} else {
+		containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+	}
+
+	area = (struct tcm_area) {
+		.tcm = NULL,
+		.p1.x = omap_dmm->container_width - 1,
+		.p1.y = omap_dmm->container_height - 1,
+	};
+
+	/* initialize all LUTs to dummy page entries */
+	for (i = 0; i < omap_dmm->num_lut; i++) {
+		area.tcm = omap_dmm->tcm[i];
+		if (fill(&area, NULL, 0, 0, true))
+			dev_err(omap_dmm->dev, "refill failed");
+	}
+
+	dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+
+	return 0;
+
+fail:
+	if (omap_dmm_remove(dev))
+		dev_err(&dev->dev, "cleanup failed\n");
+	return ret;
+}
+
+/*
+ * debugfs support
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+				"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+static const char *special = ".,:;'\"`~!^-+";
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+							char c, bool ovw)
+{
+	int x, y;
+	for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+		for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+			if (map[y][x] == ' ' || ovw)
+				map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+									char c)
+{
+	map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+	return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+	return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+	char *p = map[yd] + (x0 / xdiv);
+	int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+	if (w >= 0) {
+		p += w;
+		while (*nice)
+			*p++ = *nice++;
+	}
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+							struct tcm_area *a)
+{
+	sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+	if (a->p0.y + 1 < a->p1.y) {
+		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+							256 - 1);
+	} else if (a->p0.y < a->p1.y) {
+		if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+			text_map(map, xdiv, nice, a->p0.y / ydiv,
+					a->p0.x + xdiv,	256 - 1);
+		else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+			text_map(map, xdiv, nice, a->p1.y / ydiv,
+					0, a->p1.y - xdiv);
+	} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+		text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+	}
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+							struct tcm_area *a)
+{
+	sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+	if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+		text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+							a->p0.x, a->p1.x);
+}
+
+int tiler_map_show(struct seq_file *s, void *arg)
+{
+	int xdiv = 2, ydiv = 1;
+	char **map = NULL, *global_map;
+	struct tiler_block *block;
+	struct tcm_area a, p;
+	int i;
+	const char *m2d = alphabet;
+	const char *a2d = special;
+	const char *m2dp = m2d, *a2dp = a2d;
+	char nice[128];
+	int h_adj;
+	int w_adj;
+	unsigned long flags;
+	int lut_idx;
+
+
+	if (!omap_dmm) {
+		/* early return if dmm/tiler device is not initialized */
+		return 0;
+	}
+
+	h_adj = omap_dmm->container_height / ydiv;
+	w_adj = omap_dmm->container_width / xdiv;
+
+	map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
+	global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+
+	if (!map || !global_map)
+		goto error;
+
+	for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
+		memset(map, 0, sizeof(h_adj * sizeof(*map)));
+		memset(global_map, ' ', (w_adj + 1) * h_adj);
+
+		for (i = 0; i < omap_dmm->container_height; i++) {
+			map[i] = global_map + i * (w_adj + 1);
+			map[i][w_adj] = 0;
+		}
+
+		spin_lock_irqsave(&list_lock, flags);
+
+		list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+			if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
+				if (block->fmt != TILFMT_PAGE) {
+					fill_map(map, xdiv, ydiv, &block->area,
+						*m2dp, true);
+					if (!*++a2dp)
+						a2dp = a2d;
+					if (!*++m2dp)
+						m2dp = m2d;
+					map_2d_info(map, xdiv, ydiv, nice,
+							&block->area);
+				} else {
+					bool start = read_map_pt(map, xdiv,
+						ydiv, &block->area.p0) == ' ';
+					bool end = read_map_pt(map, xdiv, ydiv,
+							&block->area.p1) == ' ';
+
+					tcm_for_each_slice(a, block->area, p)
+						fill_map(map, xdiv, ydiv, &a,
+							'=', true);
+					fill_map_pt(map, xdiv, ydiv,
+							&block->area.p0,
+							start ? '<' : 'X');
+					fill_map_pt(map, xdiv, ydiv,
+							&block->area.p1,
+							end ? '>' : 'X');
+					map_1d_info(map, xdiv, ydiv, nice,
+							&block->area);
+				}
+			}
+		}
+
+		spin_unlock_irqrestore(&list_lock, flags);
+
+		if (s) {
+			seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
+			for (i = 0; i < 128; i++)
+				seq_printf(s, "%03d:%s\n", i, map[i]);
+			seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
+		} else {
+			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
+				lut_idx);
+			for (i = 0; i < 128; i++)
+				dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+			dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
+				lut_idx);
+		}
+	}
+
+error:
+	kfree(map);
+	kfree(global_map);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int omap_dmm_resume(struct device *dev)
+{
+	struct tcm_area area;
+	int i;
+
+	if (!omap_dmm)
+		return -ENODEV;
+
+	area = (struct tcm_area) {
+		.tcm = NULL,
+		.p1.x = omap_dmm->container_width - 1,
+		.p1.y = omap_dmm->container_height - 1,
+	};
+
+	/* initialize all LUTs to dummy page entries */
+	for (i = 0; i < omap_dmm->num_lut; i++) {
+		area.tcm = omap_dmm->tcm[i];
+		if (fill(&area, NULL, 0, 0, true))
+			dev_err(dev, "refill failed");
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops omap_dmm_pm_ops = {
+	.resume = omap_dmm_resume,
+};
+#endif
+
+struct platform_driver omap_dmm_driver = {
+	.probe = omap_dmm_probe,
+	.remove = omap_dmm_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DMM_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm = &omap_dmm_pm_ops,
+#endif
+	},
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
+MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
+MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
new file mode 100644
index 0000000..4fdd61e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ *         Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_TILER_H
+#define OMAP_DMM_TILER_H
+
+#include "omap_drv.h"
+#include "tcm.h"
+
+enum tiler_fmt {
+	TILFMT_8BIT = 0,
+	TILFMT_16BIT,
+	TILFMT_32BIT,
+	TILFMT_PAGE,
+	TILFMT_NFORMATS
+};
+
+struct pat_area {
+	u32 x0:8;
+	u32 y0:8;
+	u32 x1:8;
+	u32 y1:8;
+};
+
+struct tiler_block {
+	struct list_head alloc_node;	/* node for global block list */
+	struct tcm_area area;		/* area */
+	enum tiler_fmt fmt;		/* format */
+};
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS         6
+#define SLOT_HEIGHT_BITS        6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS         14
+#define CONT_HEIGHT_BITS        13
+
+/* calculated constants */
+#define TILER_PAGE              (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH             (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT            (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+/*
+Table 15-11. Coding and Description of TILER Orientations
+S Y X	Description				Alternate description
+0 0 0	0-degree view				Natural view
+0 0 1	0-degree view with vertical mirror 	180-degree view with horizontal mirror
+0 1 0	0-degree view with horizontal mirror 	180-degree view with vertical mirror
+0 1 1	180-degree view
+1 0 0	90-degree view with vertical mirror	270-degree view with horizontal mirror
+1 0 1	270-degree view
+1 1 0	90-degree view
+1 1 1	90-degree view with horizontal mirror	270-degree view with vertical mirror
+ */
+#define MASK_XY_FLIP		(1 << 31)
+#define MASK_Y_INVERT		(1 << 30)
+#define MASK_X_INVERT		(1 << 29)
+#define SHIFT_ACC_MODE		27
+#define MASK_ACC_MODE		3
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILVIEW_8BIT    0x60000000u
+#define TILVIEW_16BIT   (TILVIEW_8BIT  + VIEW_SIZE)
+#define TILVIEW_32BIT   (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE    (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END     (TILVIEW_PAGE  + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+	((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+#ifdef CONFIG_DEBUG_FS
+int tiler_map_show(struct seq_file *s, void *arg);
+#endif
+
+/* pin/unpin */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+		uint32_t npages, uint32_t roll, bool wait);
+int tiler_unpin(struct tiler_block *block);
+
+/* reserve/release */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
+				uint16_t align);
+struct tiler_block *tiler_reserve_1d(size_t size);
+int tiler_release(struct tiler_block *block);
+
+/* utilities */
+dma_addr_t tiler_ssptr(struct tiler_block *block);
+dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
+		uint32_t x, uint32_t y);
+uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+bool dmm_is_available(void);
+
+extern struct platform_driver omap_dmm_driver;
+
+/* GEM bo flags -> tiler fmt */
+static inline enum tiler_fmt gem2fmt(uint32_t flags)
+{
+	switch (flags & OMAP_BO_TILED) {
+	case OMAP_BO_TILED_8:
+		return TILFMT_8BIT;
+	case OMAP_BO_TILED_16:
+		return TILFMT_16BIT;
+	case OMAP_BO_TILED_32:
+		return TILFMT_32BIT;
+	default:
+		return TILFMT_PAGE;
+	}
+}
+
+static inline bool validfmt(enum tiler_fmt fmt)
+{
+	switch (fmt) {
+	case TILFMT_8BIT:
+	case TILFMT_16BIT:
+	case TILFMT_32BIT:
+	case TILFMT_PAGE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.c
new file mode 100644
index 0000000..826586f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -0,0 +1,712 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "omap_dmm_tiler.h"
+
+#define DRIVER_NAME		MODULE_NAME
+#define DRIVER_DESC		"OMAP DRM"
+#define DRIVER_DATE		"20110917"
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ *    CRTC:        overlay
+ *    encoder:     manager.. with some extension to allow one primary CRTC
+ *                 and zero or more video CRTC's to be mapped to one encoder?
+ *    connector:   dssdev.. manager can be attached/detached from different
+ *                 devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	DBG("dev=%p", dev);
+	if (priv->fbdev)
+		drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs omap_mode_config_funcs = {
+	.fb_create = omap_framebuffer_create,
+	.output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+	switch (dssdev->type) {
+	case OMAP_DISPLAY_TYPE_HDMI:
+		return DRM_MODE_CONNECTOR_HDMIA;
+	case OMAP_DISPLAY_TYPE_DPI:
+		if (!strcmp(dssdev->name, "dvi"))
+			return DRM_MODE_CONNECTOR_DVID;
+		/* fallthrough */
+	default:
+		return DRM_MODE_CONNECTOR_Unknown;
+	}
+}
+
+static bool channel_used(struct drm_device *dev, enum omap_channel channel)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < priv->num_crtcs; i++) {
+		struct drm_crtc *crtc = priv->crtcs[i];
+
+		if (omap_crtc_channel(crtc) == channel)
+			return true;
+	}
+
+	return false;
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_dss_device *dssdev = NULL;
+	int num_ovls = dss_feat_get_num_ovls();
+	int num_mgrs = dss_feat_get_num_mgrs();
+	int num_crtcs;
+	int i, id = 0;
+
+	drm_mode_config_init(dev);
+
+	omap_drm_irq_install(dev);
+
+	/*
+	 * We usually don't want to create a CRTC for each manager, at least
+	 * not until we have a way to expose private planes to userspace.
+	 * Otherwise there would not be enough video pipes left for drm planes.
+	 * We use the num_crtc argument to limit the number of crtcs we create.
+	 */
+	num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
+
+	dssdev = NULL;
+
+	for_each_dss_dev(dssdev) {
+		struct drm_connector *connector;
+		struct drm_encoder *encoder;
+		enum omap_channel channel;
+
+		if (!dssdev->driver) {
+			dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+					dssdev->name);
+			continue;
+		}
+
+		if (!(dssdev->driver->get_timings ||
+					dssdev->driver->read_edid)) {
+			dev_warn(dev->dev, "%s driver does not support "
+				"get_timings or read_edid.. skipping it!\n",
+				dssdev->name);
+			continue;
+		}
+
+		encoder = omap_encoder_init(dev, dssdev);
+
+		if (!encoder) {
+			dev_err(dev->dev, "could not create encoder: %s\n",
+					dssdev->name);
+			return -ENOMEM;
+		}
+
+		connector = omap_connector_init(dev,
+				get_connector_type(dssdev), dssdev, encoder);
+
+		if (!connector) {
+			dev_err(dev->dev, "could not create connector: %s\n",
+					dssdev->name);
+			return -ENOMEM;
+		}
+
+		BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+		BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+		priv->encoders[priv->num_encoders++] = encoder;
+		priv->connectors[priv->num_connectors++] = connector;
+
+		drm_mode_connector_attach_encoder(connector, encoder);
+
+		/*
+		 * if we have reached the limit of the crtcs we are allowed to
+		 * create, let's not try to look for a crtc for this
+		 * panel/encoder and onwards, we will, of course, populate the
+		 * the possible_crtcs field for all the encoders with the final
+		 * set of crtcs we create
+		 */
+		if (id == num_crtcs)
+			continue;
+
+		/*
+		 * get the recommended DISPC channel for this encoder. For now,
+		 * we only try to get create a crtc out of the recommended, the
+		 * other possible channels to which the encoder can connect are
+		 * not considered.
+		 */
+		channel = dssdev->output->dispc_channel;
+
+		/*
+		 * if this channel hasn't already been taken by a previously
+		 * allocated crtc, we create a new crtc for it
+		 */
+		if (!channel_used(dev, channel)) {
+			struct drm_plane *plane;
+			struct drm_crtc *crtc;
+
+			plane = omap_plane_init(dev, id, true);
+			crtc = omap_crtc_init(dev, plane, channel, id);
+
+			BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+			priv->crtcs[id] = crtc;
+			priv->num_crtcs++;
+
+			priv->planes[id] = plane;
+			priv->num_planes++;
+
+			id++;
+		}
+	}
+
+	/*
+	 * we have allocated crtcs according to the need of the panels/encoders,
+	 * adding more crtcs here if needed
+	 */
+	for (; id < num_crtcs; id++) {
+
+		/* find a free manager for this crtc */
+		for (i = 0; i < num_mgrs; i++) {
+			if (!channel_used(dev, i)) {
+				struct drm_plane *plane;
+				struct drm_crtc *crtc;
+
+				plane = omap_plane_init(dev, id, true);
+				crtc = omap_crtc_init(dev, plane, i, id);
+
+				BUG_ON(priv->num_crtcs >=
+					ARRAY_SIZE(priv->crtcs));
+
+				priv->crtcs[id] = crtc;
+				priv->num_crtcs++;
+
+				priv->planes[id] = plane;
+				priv->num_planes++;
+
+				break;
+			} else {
+				continue;
+			}
+		}
+
+		if (i == num_mgrs) {
+			/* this shouldn't really happen */
+			dev_err(dev->dev, "no managers left for crtc\n");
+			return -ENOMEM;
+		}
+	}
+
+	/*
+	 * Create normal planes for the remaining overlays:
+	 */
+	for (; id < num_ovls; id++) {
+		struct drm_plane *plane = omap_plane_init(dev, id, false);
+
+		BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+		priv->planes[priv->num_planes++] = plane;
+	}
+
+	for (i = 0; i < priv->num_encoders; i++) {
+		struct drm_encoder *encoder = priv->encoders[i];
+		struct omap_dss_device *dssdev =
+					omap_encoder_get_dssdev(encoder);
+
+		/* figure out which crtc's we can connect the encoder to: */
+		encoder->possible_crtcs = 0;
+		for (id = 0; id < priv->num_crtcs; id++) {
+			struct drm_crtc *crtc = priv->crtcs[id];
+			enum omap_channel crtc_channel;
+			enum omap_dss_output_id supported_outputs;
+
+			crtc_channel = omap_crtc_channel(crtc);
+			supported_outputs =
+				dss_feat_get_supported_outputs(crtc_channel);
+
+			if (supported_outputs & dssdev->output->id)
+				encoder->possible_crtcs |= (1 << id);
+		}
+	}
+
+	DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
+		priv->num_planes, priv->num_crtcs, priv->num_encoders,
+		priv->num_connectors);
+
+	dev->mode_config.min_width = 32;
+	dev->mode_config.min_height = 32;
+
+	/* note: eventually will need some cpu_is_omapXYZ() type stuff here
+	 * to fill in these limits properly on different OMAP generations..
+	 */
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.funcs = &omap_mode_config_funcs;
+
+	return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_omap_param *args = data;
+
+	DBG("%p: param=%llu", dev, args->param);
+
+	switch (args->param) {
+	case OMAP_PARAM_CHIPSET_ID:
+		args->value = priv->omaprev;
+		break;
+	default:
+		DBG("unknown parameter %lld", args->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_omap_param *args = data;
+
+	switch (args->param) {
+	default:
+		DBG("unknown parameter %lld", args->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_omap_gem_new *args = data;
+	VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+			args->size.bytes, args->flags);
+	return omap_gem_new_handle(dev, file_priv, args->size,
+			args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_omap_gem_cpu_prep *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	ret = omap_gem_op_sync(obj, args->op);
+
+	if (!ret)
+		ret = omap_gem_op_start(obj, args->op);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_omap_gem_cpu_fini *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	/* XXX flushy, flushy */
+	ret = 0;
+
+	if (!ret)
+		ret = omap_gem_op_finish(obj, args->op);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_omap_gem_info *args = data;
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj)
+		return -ENOENT;
+
+	args->size = omap_gem_mmap_size(obj);
+	args->offset = omap_gem_mmap_offset(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+	DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+	struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+	struct omap_drm_private *priv;
+	int ret;
+
+	DBG("load: dev=%p", dev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->omaprev = pdata->omaprev;
+
+	dev->dev_private = priv;
+
+	priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+
+	INIT_LIST_HEAD(&priv->obj_list);
+
+	omap_gem_init(dev);
+
+	ret = omap_modeset_init(dev);
+	if (ret) {
+		dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+		dev->dev_private = NULL;
+		kfree(priv);
+		return ret;
+	}
+
+	ret = drm_vblank_init(dev, priv->num_crtcs);
+	if (ret)
+		dev_warn(dev->dev, "could not init vblank\n");
+
+	priv->fbdev = omap_fbdev_init(dev);
+	if (!priv->fbdev) {
+		dev_warn(dev->dev, "omap_fbdev_init failed\n");
+		/* well, limp along without an fbdev.. maybe X11 will work? */
+	}
+
+	/* store off drm_device for use in pm ops */
+	dev_set_drvdata(dev->dev, dev);
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+
+	DBG("unload: dev=%p", dev);
+
+	drm_kms_helper_poll_fini(dev);
+	drm_vblank_cleanup(dev);
+	omap_drm_irq_uninstall(dev);
+
+	omap_fbdev_free(dev);
+	omap_modeset_free(dev);
+	omap_gem_deinit(dev);
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+
+	kfree(dev->dev_private);
+	dev->dev_private = NULL;
+
+	dev_set_drvdata(dev->dev, NULL);
+
+	return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+	file->driver_priv = NULL;
+
+	DBG("open: dev=%p, file=%p", dev, file);
+
+	return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+	DBG("firstopen: dev=%p", dev);
+	return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+	int i;
+
+	/* we don't support vga-switcheroo.. so just make sure the fbdev
+	 * mode is active
+	 */
+	struct omap_drm_private *priv = dev->dev_private;
+	int ret;
+
+	DBG("lastclose: dev=%p", dev);
+
+	if (priv->rotation_prop) {
+		/* need to restore default rotation state.. not sure
+		 * if there is a cleaner way to restore properties to
+		 * default state?  Maybe a flag that properties should
+		 * automatically be restored to default state on
+		 * lastclose?
+		 */
+		for (i = 0; i < priv->num_crtcs; i++) {
+			drm_object_property_set_value(&priv->crtcs[i]->base,
+					priv->rotation_prop, 0);
+		}
+
+		for (i = 0; i < priv->num_planes; i++) {
+			drm_object_property_set_value(&priv->planes[i]->base,
+					priv->rotation_prop, 0);
+		}
+	}
+
+	drm_modeset_lock_all(dev);
+	ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+	drm_modeset_unlock_all(dev);
+	if (ret)
+		DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+static const struct vm_operations_struct omap_gem_vm_ops = {
+	.fault = omap_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations omapdriver_fops = {
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.unlocked_ioctl = drm_ioctl,
+		.release = drm_release,
+		.mmap = omap_gem_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
+		.read = drm_read,
+		.llseek = noop_llseek,
+};
+
+static struct drm_driver omap_drm_driver = {
+		.driver_features =
+				DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+		.load = dev_load,
+		.unload = dev_unload,
+		.open = dev_open,
+		.firstopen = dev_firstopen,
+		.lastclose = dev_lastclose,
+		.preclose = dev_preclose,
+		.postclose = dev_postclose,
+		.get_vblank_counter = drm_vblank_count,
+		.enable_vblank = omap_irq_enable_vblank,
+		.disable_vblank = omap_irq_disable_vblank,
+		.irq_preinstall = omap_irq_preinstall,
+		.irq_postinstall = omap_irq_postinstall,
+		.irq_uninstall = omap_irq_uninstall,
+		.irq_handler = omap_irq_handler,
+#ifdef CONFIG_DEBUG_FS
+		.debugfs_init = omap_debugfs_init,
+		.debugfs_cleanup = omap_debugfs_cleanup,
+#endif
+		.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+		.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+		.gem_prime_export = omap_gem_prime_export,
+		.gem_prime_import = omap_gem_prime_import,
+		.gem_init_object = omap_gem_init_object,
+		.gem_free_object = omap_gem_free_object,
+		.gem_vm_ops = &omap_gem_vm_ops,
+		.dumb_create = omap_gem_dumb_create,
+		.dumb_map_offset = omap_gem_dumb_map_offset,
+		.dumb_destroy = omap_gem_dumb_destroy,
+		.ioctls = ioctls,
+		.num_ioctls = DRM_OMAP_NUM_IOCTLS,
+		.fops = &omapdriver_fops,
+		.name = DRIVER_NAME,
+		.desc = DRIVER_DESC,
+		.date = DRIVER_DATE,
+		.major = DRIVER_MAJOR,
+		.minor = DRIVER_MINOR,
+		.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+	DBG("");
+	return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+	DBG("");
+	return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+	DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+	if (omapdss_is_initialized() == false)
+		return -EPROBE_DEFER;
+
+	DBG("%s", device->name);
+	return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+	DBG("");
+	drm_platform_exit(&omap_drm_driver, device);
+
+	platform_driver_unregister(&omap_dmm_driver);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops omapdrm_pm_ops = {
+	.resume = omap_gem_resume,
+};
+#endif
+
+static struct platform_driver pdev = {
+		.driver = {
+			.name = DRIVER_NAME,
+			.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+			.pm = &omapdrm_pm_ops,
+#endif
+		},
+		.probe = pdev_probe,
+		.remove = pdev_remove,
+		.suspend = pdev_suspend,
+		.resume = pdev_resume,
+		.shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+	DBG("init");
+	if (platform_driver_register(&omap_dmm_driver)) {
+		/* we can continue on without DMM.. so not fatal */
+		dev_err(NULL, "DMM registration failed\n");
+	}
+	return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+	DBG("fini");
+	platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.h b/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.h
new file mode 100644
index 0000000..215a20d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -0,0 +1,303 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/omap_drm.h>
+#include <linux/platform_data/omap_drm.h>
+
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME     "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+	uint32_t rotation;
+	int32_t  crtc_x, crtc_y;		/* signed because can be offscreen */
+	uint32_t crtc_w, crtc_h;
+	uint32_t src_x, src_y;
+	uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared.  So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared.  The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+	struct list_head pending_node, queued_node;
+	bool queued;
+	void (*pre_apply)(struct omap_drm_apply *apply);
+	void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration.  We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+	struct list_head node;
+	uint32_t irqmask;
+	bool registered;
+	void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+		uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+		unsigned long timeout);
+
+struct omap_drm_private {
+	uint32_t omaprev;
+
+	unsigned int num_crtcs;
+	struct drm_crtc *crtcs[8];
+
+	unsigned int num_planes;
+	struct drm_plane *planes[8];
+
+	unsigned int num_encoders;
+	struct drm_encoder *encoders[8];
+
+	unsigned int num_connectors;
+	struct drm_connector *connectors[8];
+
+	struct drm_fb_helper *fbdev;
+
+	struct workqueue_struct *wq;
+
+	/* list of GEM objects: */
+	struct list_head obj_list;
+
+	bool has_dmm;
+
+	/* properties: */
+	struct drm_property *rotation_prop;
+	struct drm_property *zorder_prop;
+
+	/* irq handling: */
+	struct list_head irq_list;    /* list of omap_drm_irq */
+	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+	struct omap_drm_irq error_handler;
+};
+
+/* this should probably be in drm-core to standardize amongst drivers */
+#define DRM_ROTATE_0	0
+#define DRM_ROTATE_90	1
+#define DRM_ROTATE_180	2
+#define DRM_ROTATE_270	3
+#define DRM_REFLECT_X	4
+#define DRM_REFLECT_Y	5
+
+#ifdef CONFIG_DEBUG_FS
+int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_cleanup(struct drm_minor *minor);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
+#endif
+
+#ifdef CONFIG_PM
+int omap_gem_resume(struct device *dev);
+#endif
+
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+		struct omap_drm_apply *apply);
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, enum omap_channel channel, int id);
+
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+		int plane_id, bool private_plane);
+int omap_plane_dpms(struct drm_plane *plane, int mode);
+int omap_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h,
+		void (*fxn)(void *), void *arg);
+void omap_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj);
+int omap_plane_set_property(struct drm_plane *plane,
+		struct drm_property *property, uint64_t val);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+		struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+		struct omap_overlay_manager *mgr,
+		struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+		int connector_type, struct omap_dss_device *dssdev,
+		struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+		struct drm_connector *connector);
+void omap_connector_flush(struct drm_connector *connector,
+		int x, int y, int w, int h);
+
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+		struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+		struct drm_display_mode *mode);
+
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+		uint32_t max_formats, enum omap_color_mode supported_modes);
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+		struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+		struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+		struct drm_framebuffer *b, void *arg,
+		void (*unpin)(void *arg, struct drm_gem_object *bo));
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+		struct omap_drm_window *win, struct omap_overlay_info *info);
+struct drm_connector *omap_framebuffer_get_next_connector(
+		struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+		int x, int y, int w, int h);
+
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+		union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+		struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+		struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+		void (*fxn)(void *arg), void *arg);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+		enum dma_data_direction dir);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+		dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+		bool remap);
+int omap_gem_put_pages(struct drm_gem_object *obj);
+uint32_t omap_gem_flags(struct drm_gem_object *obj);
+int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
+		int x, int y, dma_addr_t *paddr);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
+int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
+
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+		struct drm_gem_object *obj, int flags);
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+		struct dma_buf *buffer);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+	int bytespp = (bpp + 7) / 8;
+	/* in case someone tries to feed us a completely bogus stride: */
+	pitch = max(pitch, width * bytespp);
+	/* PVR needs alignment to 8 pixels.. right now that is the most
+	 * restrictive stride requirement..
+	 */
+	return ALIGN(pitch, 8 * bytespp);
+}
+
+/* map crtc to vblank mask */
+uint32_t pipe2vbl(struct drm_crtc *crtc);
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
+
+/* should these be made into common util helpers?
+ */
+
+static inline int objects_lookup(struct drm_device *dev,
+		struct drm_file *filp, uint32_t pixel_format,
+		struct drm_gem_object **bos, uint32_t *handles)
+{
+	int i, n = drm_format_num_planes(pixel_format);
+
+	for (i = 0; i < n; i++) {
+		bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
+		if (!bos[i])
+			goto fail;
+
+	}
+
+	return 0;
+
+fail:
+	while (--i > 0)
+		drm_gem_object_unreference_unlocked(bos[i]);
+
+	return -ENOENT;
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_encoder.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_encoder.c
new file mode 100644
index 0000000..c29451b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -0,0 +1,188 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include <linux/list.h>
+
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
+struct omap_encoder {
+	struct drm_encoder base;
+	struct omap_dss_device *dssdev;
+};
+
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
+{
+	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+
+	return omap_encoder->dssdev;
+}
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(omap_encoder);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+	.destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+	.dpms = omap_encoder_dpms,
+	.mode_fixup = omap_encoder_mode_fixup,
+	.mode_set = omap_encoder_mode_set,
+	.prepare = omap_encoder_prepare,
+	.commit = omap_encoder_commit,
+};
+
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
+{
+	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+	struct omap_dss_device *dssdev = omap_encoder->dssdev;
+	struct omap_dss_driver *dssdrv = dssdev->driver;
+
+	if (enabled) {
+		return dssdrv->enable(dssdev);
+	} else {
+		dssdrv->disable(dssdev);
+		return 0;
+	}
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+		struct omap_overlay_manager *mgr,
+		struct omap_video_timings *timings)
+{
+	struct drm_device *dev = encoder->dev;
+	struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+	struct omap_dss_device *dssdev = omap_encoder->dssdev;
+	struct omap_dss_driver *dssdrv = dssdev->driver;
+	int ret;
+
+	dssdev->output->manager = mgr;
+
+	if (dssdrv->check_timings) {
+		ret = dssdrv->check_timings(dssdev, timings);
+	} else {
+		struct omap_video_timings t = {0};
+
+		dssdrv->get_timings(dssdev, &t);
+
+		if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
+			ret = -EINVAL;
+		else
+			ret = 0;
+	}
+
+	if (ret) {
+		dev_err(dev->dev, "could not set timings: %d\n", ret);
+		return ret;
+	}
+
+	if (dssdrv->set_timings)
+		dssdrv->set_timings(dssdev, timings);
+
+	return 0;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+		struct omap_dss_device *dssdev)
+{
+	struct drm_encoder *encoder = NULL;
+	struct omap_encoder *omap_encoder;
+
+	omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+	if (!omap_encoder)
+		goto fail;
+
+	omap_encoder->dssdev = dssdev;
+
+	encoder = &omap_encoder->base;
+
+	drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+	return encoder;
+
+fail:
+	if (encoder)
+		omap_encoder_destroy(encoder);
+
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_fb.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_fb.c
new file mode 100644
index 0000000..8031402
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -0,0 +1,471 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * framebuffer funcs
+ */
+
+/* per-format info: */
+struct format {
+	enum omap_color_mode dss_format;
+	uint32_t pixel_format;
+	struct {
+		int stride_bpp;           /* this times width is stride */
+		int sub_y;                /* sub-sample in y dimension */
+	} planes[4];
+	bool yuv;
+};
+
+static const struct format formats[] = {
+	/* 16bpp [A]RGB: */
+	{ OMAP_DSS_COLOR_RGB16,       DRM_FORMAT_RGB565,   {{2, 1}}, false }, /* RGB16-565 */
+	{ OMAP_DSS_COLOR_RGB12U,      DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
+	{ OMAP_DSS_COLOR_RGBX16,      DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
+	{ OMAP_DSS_COLOR_RGBA16,      DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
+	{ OMAP_DSS_COLOR_ARGB16,      DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
+	{ OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
+	{ OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+	/* 24bpp RGB: */
+	{ OMAP_DSS_COLOR_RGB24P,      DRM_FORMAT_RGB888,   {{3, 1}}, false }, /* RGB24-888 */
+	/* 32bpp [A]RGB: */
+	{ OMAP_DSS_COLOR_RGBX32,      DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
+	{ OMAP_DSS_COLOR_RGB24U,      DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
+	{ OMAP_DSS_COLOR_RGBA32,      DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
+	{ OMAP_DSS_COLOR_ARGB32,      DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+	/* YUV: */
+	{ OMAP_DSS_COLOR_NV12,        DRM_FORMAT_NV12,     {{1, 1}, {1, 2}}, true },
+	{ OMAP_DSS_COLOR_YUV2,        DRM_FORMAT_YUYV,     {{2, 1}}, true },
+	{ OMAP_DSS_COLOR_UYVY,        DRM_FORMAT_UYVY,     {{2, 1}}, true },
+};
+
+/* convert from overlay's pixel formats bitmask to an array of fourcc's */
+uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
+		uint32_t max_formats, enum omap_color_mode supported_modes)
+{
+	uint32_t nformats = 0;
+	int i = 0;
+
+	for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
+		if (formats[i].dss_format & supported_modes)
+			pixel_formats[nformats++] = formats[i].pixel_format;
+
+	return nformats;
+}
+
+/* per-plane info for the fb: */
+struct plane {
+	struct drm_gem_object *bo;
+	uint32_t pitch;
+	uint32_t offset;
+	dma_addr_t paddr;
+};
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+	struct drm_framebuffer base;
+	const struct format *format;
+	struct plane planes[4];
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+		struct drm_file *file_priv,
+		unsigned int *handle)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	return drm_gem_handle_create(file_priv,
+			omap_fb->planes[0].bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	int i, n = drm_format_num_planes(fb->pixel_format);
+
+	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+	drm_framebuffer_cleanup(fb);
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &omap_fb->planes[i];
+		if (plane->bo)
+			drm_gem_object_unreference_unlocked(plane->bo);
+	}
+
+	kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+		struct drm_file *file_priv, unsigned flags, unsigned color,
+		struct drm_clip_rect *clips, unsigned num_clips)
+{
+	int i;
+
+	for (i = 0; i < num_clips; i++) {
+		omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+					clips[i].x2 - clips[i].x1,
+					clips[i].y2 - clips[i].y1);
+	}
+
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+	.create_handle = omap_framebuffer_create_handle,
+	.destroy = omap_framebuffer_destroy,
+	.dirty = omap_framebuffer_dirty,
+};
+
+static uint32_t get_linear_addr(struct plane *plane,
+		const struct format *format, int n, int x, int y)
+{
+	uint32_t offset;
+
+	offset = plane->offset +
+			(x * format->planes[n].stride_bpp) +
+			(y * plane->pitch / format->planes[n].sub_y);
+
+	return plane->paddr + offset;
+}
+
+/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
+ */
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+		struct omap_drm_window *win, struct omap_overlay_info *info)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	const struct format *format = omap_fb->format;
+	struct plane *plane = &omap_fb->planes[0];
+	uint32_t x, y, orient = 0;
+
+	info->color_mode = format->dss_format;
+
+	info->pos_x      = win->crtc_x;
+	info->pos_y      = win->crtc_y;
+	info->out_width  = win->crtc_w;
+	info->out_height = win->crtc_h;
+	info->width      = win->src_w;
+	info->height     = win->src_h;
+
+	x = win->src_x;
+	y = win->src_y;
+
+	if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+		uint32_t w = win->src_w;
+		uint32_t h = win->src_h;
+
+		switch (win->rotation & 0xf) {
+		default:
+			dev_err(fb->dev->dev, "invalid rotation: %02x",
+					(uint32_t)win->rotation);
+			/* fallthru to default to no rotation */
+		case 0:
+		case BIT(DRM_ROTATE_0):
+			orient = 0;
+			break;
+		case BIT(DRM_ROTATE_90):
+			orient = MASK_XY_FLIP | MASK_X_INVERT;
+			break;
+		case BIT(DRM_ROTATE_180):
+			orient = MASK_X_INVERT | MASK_Y_INVERT;
+			break;
+		case BIT(DRM_ROTATE_270):
+			orient = MASK_XY_FLIP | MASK_Y_INVERT;
+			break;
+		}
+
+		if (win->rotation & BIT(DRM_REFLECT_X))
+			orient ^= MASK_X_INVERT;
+
+		if (win->rotation & BIT(DRM_REFLECT_Y))
+			orient ^= MASK_Y_INVERT;
+
+		/* adjust x,y offset for flip/invert: */
+		if (orient & MASK_XY_FLIP)
+			swap(w, h);
+		if (orient & MASK_Y_INVERT)
+			y += h - 1;
+		if (orient & MASK_X_INVERT)
+			x += w - 1;
+
+		omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
+		info->rotation_type = OMAP_DSS_ROT_TILER;
+		info->screen_width  = omap_gem_tiled_stride(plane->bo, orient);
+	} else {
+		info->paddr         = get_linear_addr(plane, format, 0, x, y);
+		info->rotation_type = OMAP_DSS_ROT_DMA;
+		info->screen_width  = plane->pitch;
+	}
+
+	/* convert to pixels: */
+	info->screen_width /= format->planes[0].stride_bpp;
+
+	if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+		plane = &omap_fb->planes[1];
+
+		if (info->rotation_type == OMAP_DSS_ROT_TILER) {
+			WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
+			omap_gem_rotated_paddr(plane->bo, orient,
+					x/2, y/2, &info->p_uv_addr);
+		} else {
+			info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+		}
+	} else {
+		info->p_uv_addr = 0;
+	}
+}
+
+/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL).  Although
+ * buffers to unpin are just pushed to the unpin fifo so that the
+ * caller can defer unpin until vblank.
+ *
+ * Note if this fails (ie. something went very wrong!), all buffers are
+ * unpinned, and the caller disables the overlay.  We could have tried
+ * to revert back to the previous set of pinned buffers but if things are
+ * hosed there is no guarantee that would succeed.
+ */
+int omap_framebuffer_replace(struct drm_framebuffer *a,
+		struct drm_framebuffer *b, void *arg,
+		void (*unpin)(void *arg, struct drm_gem_object *bo))
+{
+	int ret = 0, i, na, nb;
+	struct omap_framebuffer *ofba = to_omap_framebuffer(a);
+	struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
+	uint32_t pinned_mask = 0;
+
+	na = a ? drm_format_num_planes(a->pixel_format) : 0;
+	nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+
+	for (i = 0; i < max(na, nb); i++) {
+		struct plane *pa, *pb;
+
+		pa = (i < na) ? &ofba->planes[i] : NULL;
+		pb = (i < nb) ? &ofbb->planes[i] : NULL;
+
+		if (pa)
+			unpin(arg, pa->bo);
+
+		if (pb && !ret) {
+			ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
+			if (!ret) {
+				omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
+				pinned_mask |= (1 << i);
+			}
+		}
+	}
+
+	if (ret) {
+		/* something went wrong.. unpin what has been pinned */
+		for (i = 0; i < nb; i++) {
+			if (pinned_mask & (1 << i)) {
+				struct plane *pb = &ofba->planes[i];
+				unpin(arg, pb->bo);
+			}
+		}
+	}
+
+	return ret;
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	if (p >= drm_format_num_planes(fb->pixel_format))
+		return NULL;
+	return omap_fb->planes[p].bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+		struct drm_framebuffer *fb, struct drm_connector *from)
+{
+	struct drm_device *dev = fb->dev;
+	struct list_head *connector_list = &dev->mode_config.connector_list;
+	struct drm_connector *connector = from;
+
+	if (!from)
+		return list_first_entry(connector_list, typeof(*from), head);
+
+	list_for_each_entry_from(connector, connector_list, head) {
+		if (connector != from) {
+			struct drm_encoder *encoder = connector->encoder;
+			struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+			if (crtc && crtc->fb == fb)
+				return connector;
+
+		}
+	}
+
+	return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+		int x, int y, int w, int h)
+{
+	struct drm_connector *connector = NULL;
+
+	VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+	while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+		/* only consider connectors that are part of a chain */
+		if (connector->encoder && connector->encoder->crtc) {
+			/* TODO: maybe this should propagate thru the crtc who
+			 * could do the coordinate translation..
+			 */
+			struct drm_crtc *crtc = connector->encoder->crtc;
+			int cx = max(0, x - crtc->x);
+			int cy = max(0, y - crtc->y);
+			int cw = w + (x - crtc->x) - cx;
+			int ch = h + (y - crtc->y) - cy;
+
+			omap_connector_flush(connector, cx, cy, cw, ch);
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+	struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+	int i, n = drm_format_num_planes(fb->pixel_format);
+
+	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+			(char *)&fb->pixel_format);
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &omap_fb->planes[i];
+		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
+				i, plane->offset, plane->pitch);
+		omap_gem_describe(plane->bo, m);
+	}
+}
+#endif
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+		struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *bos[4];
+	struct drm_framebuffer *fb;
+	int ret;
+
+	ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+			bos, mode_cmd->handles);
+	if (ret)
+		return ERR_PTR(ret);
+
+	fb = omap_framebuffer_init(dev, mode_cmd, bos);
+	if (IS_ERR(fb)) {
+		int i, n = drm_format_num_planes(mode_cmd->pixel_format);
+		for (i = 0; i < n; i++)
+			drm_gem_object_unreference_unlocked(bos[i]);
+		return fb;
+	}
+	return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+		struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+	struct omap_framebuffer *omap_fb;
+	struct drm_framebuffer *fb = NULL;
+	const struct format *format = NULL;
+	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+			(char *)&mode_cmd->pixel_format);
+
+	for (i = 0; i < ARRAY_SIZE(formats); i++) {
+		if (formats[i].pixel_format == mode_cmd->pixel_format) {
+			format = &formats[i];
+			break;
+		}
+	}
+
+	if (!format) {
+		dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+				(char *)&mode_cmd->pixel_format);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+	if (!omap_fb) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fb = &omap_fb->base;
+	omap_fb->format = format;
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &omap_fb->planes[i];
+		int size, pitch = mode_cmd->pitches[i];
+
+		if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
+			dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
+					pitch, mode_cmd->width * format->planes[i].stride_bpp);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		size = pitch * mode_cmd->height / format->planes[i].sub_y;
+
+		if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
+			dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
+					bos[i]->size - mode_cmd->offsets[i], size);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		plane->bo     = bos[i];
+		plane->offset = mode_cmd->offsets[i];
+		plane->pitch  = pitch;
+		plane->paddr  = 0;
+	}
+
+	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+	ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+	if (ret) {
+		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+		goto fail;
+	}
+
+	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+	return fb;
+
+fail:
+	if (fb)
+		omap_framebuffer_destroy(fb);
+
+	return ERR_PTR(ret);
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_fbdev.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_fbdev.c
new file mode 100644
index 0000000..b11ce60
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -0,0 +1,397 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+static bool ywrap_enabled = true;
+module_param_named(ywrap, ywrap_enabled, bool, 0644);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+	struct drm_fb_helper base;
+	struct drm_framebuffer *fb;
+	struct drm_gem_object *bo;
+	bool ywrap_enabled;
+
+	/* for deferred dmm roll when getting called in atomic ctx */
+	struct work_struct work;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	ssize_t res;
+
+	res = fb_sys_write(fbi, buf, count, ppos);
+	omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+	return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+		const struct fb_fillrect *rect)
+{
+	sys_fillrect(fbi, rect);
+	omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+		const struct fb_copyarea *area)
+{
+	sys_copyarea(fbi, area);
+	omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+		const struct fb_image *image)
+{
+	sys_imageblit(fbi, image);
+	omap_fbdev_flush(fbi, image->dx, image->dy,
+				image->width, image->height);
+}
+
+static void pan_worker(struct work_struct *work)
+{
+	struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
+	struct fb_info *fbi = fbdev->base.fbdev;
+	int npages;
+
+	/* DMM roll shifts in 4K pages: */
+	npages = fbi->fix.line_length >> PAGE_SHIFT;
+	omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
+}
+
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *fbi)
+{
+	struct drm_fb_helper *helper = get_fb(fbi);
+	struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+
+	if (!helper)
+		goto fallback;
+
+	if (!fbdev->ywrap_enabled)
+		goto fallback;
+
+	if (drm_can_sleep()) {
+		pan_worker(&fbdev->work);
+	} else {
+		struct omap_drm_private *priv = helper->dev->dev_private;
+		queue_work(priv->wq, &fbdev->work);
+	}
+
+	return 0;
+
+fallback:
+	return drm_fb_helper_pan_display(var, fbi);
+}
+
+static struct fb_ops omap_fb_ops = {
+	.owner = THIS_MODULE,
+
+	/* Note: to properly handle manual update displays, we wrap the
+	 * basic fbdev ops which write to the framebuffer
+	 */
+	.fb_read = fb_sys_read,
+	.fb_write = omap_fbdev_write,
+	.fb_fillrect = omap_fbdev_fillrect,
+	.fb_copyarea = omap_fbdev_copyarea,
+	.fb_imageblit = omap_fbdev_imageblit,
+
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_pan_display = omap_fbdev_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+		struct drm_fb_helper_surface_size *sizes)
+{
+	struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+	struct drm_device *dev = helper->dev;
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_framebuffer *fb = NULL;
+	union omap_gem_size gsize;
+	struct fb_info *fbi = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd = {0};
+	dma_addr_t paddr;
+	int ret;
+
+	/* only doing ARGB32 since this is what is needed to alpha-blend
+	 * with video overlays:
+	 */
+	sizes->surface_bpp = 32;
+	sizes->surface_depth = 32;
+
+	DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+			sizes->surface_height, sizes->surface_bpp,
+			sizes->fb_width, sizes->fb_height);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+			sizes->surface_depth);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = align_pitch(
+			mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
+			mode_cmd.width, sizes->surface_bpp);
+
+	fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+	if (fbdev->ywrap_enabled) {
+		/* need to align pitch to page size if using DMM scrolling */
+		mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+	}
+
+	/* allocate backing bo */
+	gsize = (union omap_gem_size){
+		.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
+	};
+	DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+	fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+	if (!fbdev->bo) {
+		dev_err(dev->dev, "failed to allocate buffer object\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+	if (IS_ERR(fb)) {
+		dev_err(dev->dev, "failed to allocate fb\n");
+		/* note: if fb creation failed, we can't rely on fb destroy
+		 * to unref the bo:
+		 */
+		drm_gem_object_unreference(fbdev->bo);
+		ret = PTR_ERR(fb);
+		goto fail;
+	}
+
+	/* note: this keeps the bo pinned.. which is perhaps not ideal,
+	 * but is needed as long as we use fb_mmap() to mmap to userspace
+	 * (since this happens using fix.smem_start).  Possibly we could
+	 * implement our own mmap using GEM mmap support to avoid this
+	 * (non-tiled buffer doesn't need to be pinned for fbcon to write
+	 * to it).  Then we just need to be sure that we are able to re-
+	 * pin it in case of an opps.
+	 */
+	ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
+	if (ret) {
+		dev_err(dev->dev,
+			"could not map (paddr)!  Skipping framebuffer alloc\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	fbi = framebuffer_alloc(0, dev->dev);
+	if (!fbi) {
+		dev_err(dev->dev, "failed to allocate fb info\n");
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	DBG("fbi=%p, dev=%p", fbi, dev);
+
+	fbdev->fb = fb;
+	helper->fb = fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_DEFAULT;
+	fbi->fbops = &omap_fb_ops;
+
+	strcpy(fbi->fix.id, MODULE_NAME);
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+	dev->mode_config.fb_base = paddr;
+
+	fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+	fbi->screen_size = fbdev->bo->size;
+	fbi->fix.smem_start = paddr;
+	fbi->fix.smem_len = fbdev->bo->size;
+
+	/* if we have DMM, then we can use it for scrolling by just
+	 * shuffling pages around in DMM rather than doing sw blit.
+	 */
+	if (fbdev->ywrap_enabled) {
+		DRM_INFO("Enabling DMM ywrap scrolling\n");
+		fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+		fbi->fix.ywrapstep = 1;
+	}
+
+
+	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&dev->struct_mutex);
+fail:
+
+	if (ret) {
+		if (fbi)
+			framebuffer_release(fbi);
+		if (fb) {
+			drm_framebuffer_unregister_private(fb);
+			drm_framebuffer_remove(fb);
+		}
+	}
+
+	return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+		u16 red, u16 green, u16 blue, int regno)
+{
+	DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+		u16 *red, u16 *green, u16 *blue, int regno)
+{
+	DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+	.gamma_set = omap_crtc_fb_gamma_set,
+	.gamma_get = omap_crtc_fb_gamma_get,
+	.fb_probe = omap_fbdev_create,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+	if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+		/* these are not the fb's you're looking for */
+		return NULL;
+	}
+	return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+	struct drm_fb_helper *helper = get_fb(fbi);
+
+	if (!helper)
+		return;
+
+	VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+	omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_fbdev *fbdev = NULL;
+	struct drm_fb_helper *helper;
+	int ret = 0;
+
+	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev)
+		goto fail;
+
+	INIT_WORK(&fbdev->work, pan_worker);
+
+	helper = &fbdev->base;
+
+	helper->funcs = &omap_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, helper,
+			priv->num_crtcs, priv->num_connectors);
+	if (ret) {
+		dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+		goto fail;
+	}
+
+	drm_fb_helper_single_add_all_connectors(helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(helper, 32);
+
+	priv->fbdev = helper;
+
+	return helper;
+
+fail:
+	kfree(fbdev);
+	return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_fb_helper *helper = priv->fbdev;
+	struct omap_fbdev *fbdev;
+	struct fb_info *fbi;
+
+	DBG();
+
+	fbi = helper->fbdev;
+
+	/* only cleanup framebuffer if it is present */
+	if (fbi) {
+		unregister_framebuffer(fbi);
+		framebuffer_release(fbi);
+	}
+
+	drm_fb_helper_fini(helper);
+
+	fbdev = to_omap_fbdev(priv->fbdev);
+
+	/* this will free the backing object */
+	if (fbdev->fb) {
+		drm_framebuffer_unregister_private(fbdev->fb);
+		drm_framebuffer_remove(fbdev->fb);
+	}
+
+	kfree(fbdev);
+
+	priv->fbdev = NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_gem.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem.c
new file mode 100644
index 0000000..ebbdf41
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -0,0 +1,1507 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* remove these once drm core helpers are merged */
+struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+		bool dirty, bool accessed);
+int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA			0x01000000	/* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC	0x02000000	/* externally allocated sync object */
+#define OMAP_BO_EXT_MEM		0x04000000	/* externally allocated memory */
+
+
+struct omap_gem_object {
+	struct drm_gem_object base;
+
+	struct list_head mm_list;
+
+	uint32_t flags;
+
+	/** width/height for tiled formats (rounded up to slot boundaries) */
+	uint16_t width, height;
+
+	/** roll applied when mapping to DMM */
+	uint32_t roll;
+
+	/**
+	 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+	 * is set and the paddr is valid.  Also if the buffer is remapped in
+	 * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
+	 * the physical address and OMAP_BO_DMA is not set, then you should
+	 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+	 * not removed from under your feet.
+	 *
+	 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+	 * buffer is requested, but doesn't mean that it is.  Use the
+	 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+	 * physical address.
+	 */
+	dma_addr_t paddr;
+
+	/**
+	 * # of users of paddr
+	 */
+	uint32_t paddr_cnt;
+
+	/**
+	 * tiler block used when buffer is remapped in DMM/TILER.
+	 */
+	struct tiler_block *block;
+
+	/**
+	 * Array of backing pages, if allocated.  Note that pages are never
+	 * allocated for buffers originally allocated from contiguous memory
+	 */
+	struct page **pages;
+
+	/** addresses corresponding to pages in above array */
+	dma_addr_t *addrs;
+
+	/**
+	 * Virtual address, if mapped.
+	 */
+	void *vaddr;
+
+	/**
+	 * sync-object allocated on demand (if needed)
+	 *
+	 * Per-buffer sync-object for tracking pending and completed hw/dma
+	 * read and write operations.  The layout in memory is dictated by
+	 * the SGX firmware, which uses this information to stall the command
+	 * stream if a surface is not ready yet.
+	 *
+	 * Note that when buffer is used by SGX, the sync-object needs to be
+	 * allocated from a special heap of sync-objects.  This way many sync
+	 * objects can be packed in a page, and not waste GPU virtual address
+	 * space.  Because of this we have to have a omap_gem_set_sync_object()
+	 * API to allow replacement of the syncobj after it has (potentially)
+	 * already been allocated.  A bit ugly but I haven't thought of a
+	 * better alternative.
+	 */
+	struct {
+		uint32_t write_pending;
+		uint32_t write_complete;
+		uint32_t read_pending;
+		uint32_t read_complete;
+	} *sync;
+};
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+static uint64_t mmap_offset(struct drm_gem_object *obj);
+
+/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+ * not necessarily pinned in TILER all the time, and (b) when they are
+ * they are not necessarily page aligned, we reserve one or more small
+ * regions in each of the 2d containers to use as a user-GART where we
+ * can create a second page-aligned mapping of parts of the buffer
+ * being accessed from userspace.
+ *
+ * Note that we could optimize slightly when we know that multiple
+ * tiler containers are backed by the same PAT.. but I'll leave that
+ * for later..
+ */
+#define NUM_USERGART_ENTRIES 2
+struct usergart_entry {
+	struct tiler_block *block;	/* the reserved tiler block */
+	dma_addr_t paddr;
+	struct drm_gem_object *obj;	/* the current pinned obj */
+	pgoff_t obj_pgoff;		/* page offset of obj currently
+					   mapped in */
+};
+static struct {
+	struct usergart_entry entry[NUM_USERGART_ENTRIES];
+	int height;				/* height in rows */
+	int height_shift;		/* ilog2(height in rows) */
+	int slot_shift;			/* ilog2(width per slot) */
+	int stride_pfn;			/* stride in pages */
+	int last;				/* index of last used entry */
+} *usergart;
+
+static void evict_entry(struct drm_gem_object *obj,
+		enum tiler_fmt fmt, struct usergart_entry *entry)
+{
+	if (obj->dev->dev_mapping) {
+		struct omap_gem_object *omap_obj = to_omap_bo(obj);
+		int n = usergart[fmt].height;
+		size_t size = PAGE_SIZE * n;
+		loff_t off = mmap_offset(obj) +
+				(entry->obj_pgoff << PAGE_SHIFT);
+		const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+		if (m > 1) {
+			int i;
+			/* if stride > than PAGE_SIZE then sparse mapping: */
+			for (i = n; i > 0; i--) {
+				unmap_mapping_range(obj->dev->dev_mapping,
+						off, PAGE_SIZE, 1);
+				off += PAGE_SIZE * m;
+			}
+		} else {
+			unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+		}
+	}
+
+	entry->obj = NULL;
+}
+
+/* Evict a buffer from usergart, if it is mapped there */
+static void evict(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	if (omap_obj->flags & OMAP_BO_TILED) {
+		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+		int i;
+
+		if (!usergart)
+			return;
+
+		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+			struct usergart_entry *entry = &usergart[fmt].entry[i];
+			if (entry->obj == obj)
+				evict_entry(obj, fmt, entry);
+		}
+	}
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+	return obj->filp != NULL;
+}
+
+/**
+ * shmem buffers that are mapped cached can simulate coherency via using
+ * page faulting to keep track of dirty pages
+ */
+static inline bool is_cached_coherent(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	return is_shmem(obj) &&
+		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+}
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	struct page **pages;
+	int npages = obj->size >> PAGE_SHIFT;
+	int i, ret;
+	dma_addr_t *addrs;
+
+	WARN_ON(omap_obj->pages);
+
+	/* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+	 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+	 * we actually want CMA memory for it all anyways..
+	 */
+	pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+	if (IS_ERR(pages)) {
+		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+		return PTR_ERR(pages);
+	}
+
+	/* for non-cached buffers, ensure the new pages are clean because
+	 * DSS, GPU, etc. are not cache coherent:
+	 */
+	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+		addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
+		if (!addrs) {
+			ret = -ENOMEM;
+			goto free_pages;
+		}
+
+		for (i = 0; i < npages; i++) {
+			addrs[i] = dma_map_page(dev->dev, pages[i],
+					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+		}
+	} else {
+		addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
+		if (!addrs) {
+			ret = -ENOMEM;
+			goto free_pages;
+		}
+	}
+
+	omap_obj->addrs = addrs;
+	omap_obj->pages = pages;
+
+	return 0;
+
+free_pages:
+	_drm_gem_put_pages(obj, pages, true, false);
+
+	return ret;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	/* for non-cached buffers, ensure the new pages are clean because
+	 * DSS, GPU, etc. are not cache coherent:
+	 */
+	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+		int i, npages = obj->size >> PAGE_SHIFT;
+		for (i = 0; i < npages; i++) {
+			dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+					PAGE_SIZE, DMA_BIDIRECTIONAL);
+		}
+	}
+
+	kfree(omap_obj->addrs);
+	omap_obj->addrs = NULL;
+
+	_drm_gem_put_pages(obj, omap_obj->pages, true, false);
+	omap_obj->pages = NULL;
+}
+
+/* get buffer flags */
+uint32_t omap_gem_flags(struct drm_gem_object *obj)
+{
+	return to_omap_bo(obj)->flags;
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->map_list.map) {
+		/* Make it mmapable */
+		size_t size = omap_gem_mmap_size(obj);
+		int ret = _drm_gem_create_mmap_offset_size(obj, size);
+
+		if (ret) {
+			dev_err(dev->dev, "could not allocate mmap offset\n");
+			return 0;
+		}
+	}
+
+	return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	uint64_t offset;
+	mutex_lock(&obj->dev->struct_mutex);
+	offset = mmap_offset(obj);
+	mutex_unlock(&obj->dev->struct_mutex);
+	return offset;
+}
+
+/** get mmap size */
+size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	size_t size = obj->size;
+
+	if (omap_obj->flags & OMAP_BO_TILED) {
+		/* for tiled buffers, the virtual size has stride rounded up
+		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+		 * 32kb later!).  But we don't back the entire buffer with
+		 * pages, only the valid picture part.. so need to adjust for
+		 * this in the size used to mmap and generate mmap offset
+		 */
+		size = tiler_vsize(gem2fmt(omap_obj->flags),
+				omap_obj->width, omap_obj->height);
+	}
+
+	return size;
+}
+
+/* get tiled size, returns -EINVAL if not tiled buffer */
+int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	if (omap_obj->flags & OMAP_BO_TILED) {
+		*w = omap_obj->width;
+		*h = omap_obj->height;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+/* Normal handling for the case of faulting in non-tiled buffers */
+static int fault_1d(struct drm_gem_object *obj,
+		struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	unsigned long pfn;
+	pgoff_t pgoff;
+
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+
+	if (omap_obj->pages) {
+		omap_gem_cpu_sync(obj, pgoff);
+		pfn = page_to_pfn(omap_obj->pages[pgoff]);
+	} else {
+		BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+	}
+
+	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+			pfn, pfn << PAGE_SHIFT);
+
+	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+}
+
+/* Special handling for the case of faulting in 2d tiled buffers */
+static int fault_2d(struct drm_gem_object *obj,
+		struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	struct usergart_entry *entry;
+	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+	struct page *pages[64];  /* XXX is this too much to have on stack? */
+	unsigned long pfn;
+	pgoff_t pgoff, base_pgoff;
+	void __user *vaddr;
+	int i, ret, slots;
+
+	/*
+	 * Note the height of the slot is also equal to the number of pages
+	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
+	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
+	 */
+	const int n = usergart[fmt].height;
+	const int n_shift = usergart[fmt].height_shift;
+
+	/*
+	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
+	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
+	 * into account in some of the math, so figure out virtual stride
+	 * in pages
+	 */
+	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+
+	/*
+	 * Actual address we start mapping at is rounded down to previous slot
+	 * boundary in the y direction:
+	 */
+	base_pgoff = round_down(pgoff, m << n_shift);
+
+	/* figure out buffer width in slots */
+	slots = omap_obj->width >> usergart[fmt].slot_shift;
+
+	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+
+	entry = &usergart[fmt].entry[usergart[fmt].last];
+
+	/* evict previous buffer using this usergart entry, if any: */
+	if (entry->obj)
+		evict_entry(entry->obj, fmt, entry);
+
+	entry->obj = obj;
+	entry->obj_pgoff = base_pgoff;
+
+	/* now convert base_pgoff to phys offset from virt offset: */
+	base_pgoff = (base_pgoff >> n_shift) * slots;
+
+	/* for wider-than 4k.. figure out which part of the slot-row we want: */
+	if (m > 1) {
+		int off = pgoff % m;
+		entry->obj_pgoff += off;
+		base_pgoff /= m;
+		slots = min(slots - (off << n_shift), n);
+		base_pgoff += off << n_shift;
+		vaddr += off << PAGE_SHIFT;
+	}
+
+	/*
+	 * Map in pages. Beyond the valid pixel part of the buffer, we set
+	 * pages[i] to NULL to get a dummy page mapped in.. if someone
+	 * reads/writes it they will get random/undefined content, but at
+	 * least it won't be corrupting whatever other random page used to
+	 * be mapped in, or other undefined behavior.
+	 */
+	memcpy(pages, &omap_obj->pages[base_pgoff],
+			sizeof(struct page *) * slots);
+	memset(pages + slots, 0,
+			sizeof(struct page *) * (n - slots));
+
+	ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+	if (ret) {
+		dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+		return ret;
+	}
+
+	pfn = entry->paddr >> PAGE_SHIFT;
+
+	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+			pfn, pfn << PAGE_SHIFT);
+
+	for (i = n; i > 0; i--) {
+		vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+		pfn += usergart[fmt].stride_pfn;
+		vaddr += PAGE_SIZE * m;
+	}
+
+	/* simple round-robin: */
+	usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+
+	return 0;
+}
+
+/**
+ * omap_gem_fault		-	pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	struct drm_device *dev = obj->dev;
+	struct page **pages;
+	int ret;
+
+	/* Make sure we don't parallel update on a fault, nor move or remove
+	 * something from beneath our feet
+	 */
+	mutex_lock(&dev->struct_mutex);
+
+	/* if a shmem backed object, make sure we have pages attached now */
+	ret = get_pages(obj, &pages);
+	if (ret)
+		goto fail;
+
+	/* where should we do corresponding put_pages().. we are mapping
+	 * the original page, rather than thru a GART, so we can't rely
+	 * on eviction to trigger this.  But munmap() or all mappings should
+	 * probably trigger put_pages()?
+	 */
+
+	if (omap_obj->flags & OMAP_BO_TILED)
+		ret = fault_2d(obj, vma, vmf);
+	else
+		ret = fault_1d(obj, vma, vmf);
+
+
+fail:
+	mutex_unlock(&dev->struct_mutex);
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret) {
+		DBG("mmap failed: %d", ret);
+		return ret;
+	}
+
+	return omap_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+		struct vm_area_struct *vma)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	if (omap_obj->flags & OMAP_BO_WC) {
+		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+	} else {
+		/*
+		 * We do have some private objects, at least for scanout buffers
+		 * on hardware without DMM/TILER.  But these are allocated write-
+		 * combine
+		 */
+		if (WARN_ON(!obj->filp))
+			return -EINVAL;
+
+		/*
+		 * Shunt off cached objs to shmem file so they have their own
+		 * address_space (so unmap_mapping_range does what we want,
+		 * in particular in the case of mmap'd dmabufs)
+		 */
+		fput(vma->vm_file);
+		vma->vm_pgoff = 0;
+		vma->vm_file  = get_file(obj->filp);
+
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	}
+
+	return 0;
+}
+
+
+/**
+ * omap_gem_dumb_create	-	create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+		struct drm_mode_create_dumb *args)
+{
+	union omap_gem_size gsize;
+
+	/* in case someone tries to feed us a completely bogus stride: */
+	args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+	args->size = PAGE_ALIGN(args->pitch * args->height);
+
+	gsize = (union omap_gem_size){
+		.bytes = args->size,
+	};
+
+	return omap_gem_new_handle(dev, file, gsize,
+			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy	-	destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle)
+{
+	/* No special work needed, drop the reference and see what falls out */
+	return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map	-	buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	/* GEM does all our handle to object mapping */
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto fail;
+	}
+
+	*offset = omap_gem_mmap_offset(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+fail:
+	return ret;
+}
+
+/* Set scrolling position.  This allows us to implement fast scrolling
+ * for console.
+ *
+ * Call only from non-atomic contexts.
+ */
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	uint32_t npages = obj->size >> PAGE_SHIFT;
+	int ret = 0;
+
+	if (roll > npages) {
+		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+		return -EINVAL;
+	}
+
+	omap_obj->roll = roll;
+
+	mutex_lock(&obj->dev->struct_mutex);
+
+	/* if we aren't mapped yet, we don't need to do anything */
+	if (omap_obj->block) {
+		struct page **pages;
+		ret = get_pages(obj, &pages);
+		if (ret)
+			goto fail;
+		ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+		if (ret)
+			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+	}
+
+fail:
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return ret;
+}
+
+/* Sync the buffer for CPU access.. note pages should already be
+ * attached, ie. omap_gem_get_pages()
+ */
+void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+{
+	struct drm_device *dev = obj->dev;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
+		dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
+				PAGE_SIZE, DMA_BIDIRECTIONAL);
+		omap_obj->addrs[pgoff] = 0;
+	}
+}
+
+/* sync the buffer for DMA access */
+void omap_gem_dma_sync(struct drm_gem_object *obj,
+		enum dma_data_direction dir)
+{
+	struct drm_device *dev = obj->dev;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	if (is_cached_coherent(obj)) {
+		int i, npages = obj->size >> PAGE_SHIFT;
+		struct page **pages = omap_obj->pages;
+		bool dirty = false;
+
+		for (i = 0; i < npages; i++) {
+			if (!omap_obj->addrs[i]) {
+				omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+						PAGE_SIZE, DMA_BIDIRECTIONAL);
+				dirty = true;
+			}
+		}
+
+		if (dirty) {
+			unmap_mapping_range(obj->filp->f_mapping, 0,
+					omap_gem_mmap_size(obj), 1);
+		}
+	}
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+		dma_addr_t *paddr, bool remap)
+{
+	struct omap_drm_private *priv = obj->dev->dev_private;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+
+	mutex_lock(&obj->dev->struct_mutex);
+
+	if (remap && is_shmem(obj) && priv->has_dmm) {
+		if (omap_obj->paddr_cnt == 0) {
+			struct page **pages;
+			uint32_t npages = obj->size >> PAGE_SHIFT;
+			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+			struct tiler_block *block;
+
+			BUG_ON(omap_obj->block);
+
+			ret = get_pages(obj, &pages);
+			if (ret)
+				goto fail;
+
+			if (omap_obj->flags & OMAP_BO_TILED) {
+				block = tiler_reserve_2d(fmt,
+						omap_obj->width,
+						omap_obj->height, 0);
+			} else {
+				block = tiler_reserve_1d(obj->size);
+			}
+
+			if (IS_ERR(block)) {
+				ret = PTR_ERR(block);
+				dev_err(obj->dev->dev,
+					"could not remap: %d (%d)\n", ret, fmt);
+				goto fail;
+			}
+
+			/* TODO: enable async refill.. */
+			ret = tiler_pin(block, pages, npages,
+					omap_obj->roll, true);
+			if (ret) {
+				tiler_release(block);
+				dev_err(obj->dev->dev,
+						"could not pin: %d\n", ret);
+				goto fail;
+			}
+
+			omap_obj->paddr = tiler_ssptr(block);
+			omap_obj->block = block;
+
+			DBG("got paddr: %08x", omap_obj->paddr);
+		}
+
+		omap_obj->paddr_cnt++;
+
+		*paddr = omap_obj->paddr;
+	} else if (omap_obj->flags & OMAP_BO_DMA) {
+		*paddr = omap_obj->paddr;
+	} else {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+fail:
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	if (omap_obj->paddr_cnt > 0) {
+		omap_obj->paddr_cnt--;
+		if (omap_obj->paddr_cnt == 0) {
+			ret = tiler_unpin(omap_obj->block);
+			if (ret) {
+				dev_err(obj->dev->dev,
+					"could not unpin pages: %d\n", ret);
+				goto fail;
+			}
+			ret = tiler_release(omap_obj->block);
+			if (ret) {
+				dev_err(obj->dev->dev,
+					"could not release unmap: %d\n", ret);
+			}
+			omap_obj->block = NULL;
+		}
+	}
+fail:
+	mutex_unlock(&obj->dev->struct_mutex);
+	return ret;
+}
+
+/* Get rotated scanout address (only valid if already pinned), at the
+ * specified orientation and x,y offset from top-left corner of buffer
+ * (only valid for tiled 2d buffers)
+ */
+int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
+		int x, int y, dma_addr_t *paddr)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = -EINVAL;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
+			(omap_obj->flags & OMAP_BO_TILED)) {
+		*paddr = tiler_tsptr(omap_obj->block, orient, x, y);
+		ret = 0;
+	}
+	mutex_unlock(&obj->dev->struct_mutex);
+	return ret;
+}
+
+/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
+int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = -EINVAL;
+	if (omap_obj->flags & OMAP_BO_TILED)
+		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
+	return ret;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+
+	if (is_shmem(obj) && !omap_obj->pages) {
+		ret = omap_gem_attach_pages(obj);
+		if (ret) {
+			dev_err(obj->dev->dev, "could not attach pages\n");
+			return ret;
+		}
+	}
+
+	/* TODO: even phys-contig.. we should have a list of pages? */
+	*pages = omap_obj->pages;
+
+	return 0;
+}
+
+/* if !remap, and we don't have pages backing, then fail, rather than
+ * increasing the pin count (which we don't really do yet anyways,
+ * because we don't support swapping pages back out).  And 'remap'
+ * might not be quite the right name, but I wanted to keep it working
+ * similarly to omap_gem_get_paddr().  Note though that mutex is not
+ * aquired if !remap (because this can be called in atomic ctxt),
+ * but probably omap_gem_get_paddr() should be changed to work in the
+ * same way.  If !remap, a matching omap_gem_put_pages() call is not
+ * required (and should not be made).
+ */
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+		bool remap)
+{
+	int ret;
+	if (!remap) {
+		struct omap_gem_object *omap_obj = to_omap_bo(obj);
+		if (!omap_obj->pages)
+			return -ENOMEM;
+		*pages = omap_obj->pages;
+		return 0;
+	}
+	mutex_lock(&obj->dev->struct_mutex);
+	ret = get_pages(obj, pages);
+	mutex_unlock(&obj->dev->struct_mutex);
+	return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+	/* do something here if we dynamically attach/detach pages.. at
+	 * least they would no longer need to be pinned if everyone has
+	 * released the pages..
+	 */
+	return 0;
+}
+
+/* Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev.  This should be called with struct_mutex
+ * held.
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+	if (!omap_obj->vaddr) {
+		struct page **pages;
+		int ret = get_pages(obj, &pages);
+		if (ret)
+			return ERR_PTR(ret);
+		omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+	}
+	return omap_obj->vaddr;
+}
+
+#ifdef CONFIG_PM
+/* re-pin objects in DMM in resume path: */
+int omap_gem_resume(struct device *dev)
+{
+	struct drm_device *drm_dev = dev_get_drvdata(dev);
+	struct omap_drm_private *priv = drm_dev->dev_private;
+	struct omap_gem_object *omap_obj;
+	int ret = 0;
+
+	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
+		if (omap_obj->block) {
+			struct drm_gem_object *obj = &omap_obj->base;
+			uint32_t npages = obj->size >> PAGE_SHIFT;
+			WARN_ON(!omap_obj->pages);  /* this can't happen */
+			ret = tiler_pin(omap_obj->block,
+					omap_obj->pages, npages,
+					omap_obj->roll, true);
+			if (ret) {
+				dev_err(dev, "could not repin: %d\n", ret);
+				return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+	struct drm_device *dev = obj->dev;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	uint64_t off = 0;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (obj->map_list.map)
+		off = (uint64_t)obj->map_list.hash.key;
+
+	seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+			omap_obj->flags, obj->name, obj->refcount.refcount.counter,
+			off, omap_obj->paddr, omap_obj->paddr_cnt,
+			omap_obj->vaddr, omap_obj->roll);
+
+	if (omap_obj->flags & OMAP_BO_TILED) {
+		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
+		if (omap_obj->block) {
+			struct tcm_area *area = &omap_obj->block->area;
+			seq_printf(m, " (%dx%d, %dx%d)",
+					area->p0.x, area->p0.y,
+					area->p1.x, area->p1.y);
+		}
+	} else {
+		seq_printf(m, " %d", obj->size);
+	}
+
+	seq_printf(m, "\n");
+}
+
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+	struct omap_gem_object *omap_obj;
+	int count = 0;
+	size_t size = 0;
+
+	list_for_each_entry(omap_obj, list, mm_list) {
+		struct drm_gem_object *obj = &omap_obj->base;
+		seq_printf(m, "   ");
+		omap_gem_describe(obj, m);
+		count++;
+		size += obj->size;
+	}
+
+	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+	struct list_head list;
+	struct omap_gem_object *omap_obj;
+	enum omap_gem_op op;
+	uint32_t read_target, write_target;
+	/* notify called w/ sync_lock held */
+	void (*notify)(void *arg);
+	void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+	struct omap_gem_object *omap_obj = waiter->omap_obj;
+	if ((waiter->op & OMAP_GEM_READ) &&
+			(omap_obj->sync->read_complete < waiter->read_target))
+		return true;
+	if ((waiter->op & OMAP_GEM_WRITE) &&
+			(omap_obj->sync->write_complete < waiter->write_target))
+		return true;
+	return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+		printk(KERN_ERR "%s:%d: "fmt"\n", \
+				__func__, __LINE__, ##__VA_ARGS__); \
+	} while (0)
+
+
+static void sync_op_update(void)
+{
+	struct omap_gem_sync_waiter *waiter, *n;
+	list_for_each_entry_safe(waiter, n, &waiters, list) {
+		if (!is_waiting(waiter)) {
+			list_del(&waiter->list);
+			SYNC("notify: %p", waiter);
+			waiter->notify(waiter->arg);
+			kfree(waiter);
+		}
+	}
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+		enum omap_gem_op op, bool start)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+
+	spin_lock(&sync_lock);
+
+	if (!omap_obj->sync) {
+		omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+		if (!omap_obj->sync) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+	}
+
+	if (start) {
+		if (op & OMAP_GEM_READ)
+			omap_obj->sync->read_pending++;
+		if (op & OMAP_GEM_WRITE)
+			omap_obj->sync->write_pending++;
+	} else {
+		if (op & OMAP_GEM_READ)
+			omap_obj->sync->read_complete++;
+		if (op & OMAP_GEM_WRITE)
+			omap_obj->sync->write_complete++;
+		sync_op_update();
+	}
+
+unlock:
+	spin_unlock(&sync_lock);
+
+	return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient.  So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+	spin_lock(&sync_lock);
+	sync_op_update();
+	spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+	return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+	return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+	struct task_struct **waiter_task = arg;
+	*waiter_task = NULL;
+	wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+	if (omap_obj->sync) {
+		struct task_struct *waiter_task = current;
+		struct omap_gem_sync_waiter *waiter =
+				kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+		if (!waiter)
+			return -ENOMEM;
+
+		waiter->omap_obj = omap_obj;
+		waiter->op = op;
+		waiter->read_target = omap_obj->sync->read_pending;
+		waiter->write_target = omap_obj->sync->write_pending;
+		waiter->notify = sync_notify;
+		waiter->arg = &waiter_task;
+
+		spin_lock(&sync_lock);
+		if (is_waiting(waiter)) {
+			SYNC("waited: %p", waiter);
+			list_add_tail(&waiter->list, &waiters);
+			spin_unlock(&sync_lock);
+			ret = wait_event_interruptible(sync_event,
+					(waiter_task == NULL));
+			spin_lock(&sync_lock);
+			if (waiter_task) {
+				SYNC("interrupted: %p", waiter);
+				/* we were interrupted */
+				list_del(&waiter->list);
+				waiter_task = NULL;
+			} else {
+				/* freed in sync_op_update() */
+				waiter = NULL;
+			}
+		}
+		spin_unlock(&sync_lock);
+
+		if (waiter)
+			kfree(waiter);
+	}
+	return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked..  fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+		void (*fxn)(void *arg), void *arg)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	if (omap_obj->sync) {
+		struct omap_gem_sync_waiter *waiter =
+				kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+		if (!waiter)
+			return -ENOMEM;
+
+		waiter->omap_obj = omap_obj;
+		waiter->op = op;
+		waiter->read_target = omap_obj->sync->read_pending;
+		waiter->write_target = omap_obj->sync->write_pending;
+		waiter->notify = fxn;
+		waiter->arg = arg;
+
+		spin_lock(&sync_lock);
+		if (is_waiting(waiter)) {
+			SYNC("waited: %p", waiter);
+			list_add_tail(&waiter->list, &waiters);
+			spin_unlock(&sync_lock);
+			return 0;
+		}
+
+		spin_unlock(&sync_lock);
+	}
+
+	/* no waiting.. */
+	fxn(arg);
+
+	return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+	int ret = 0;
+
+	spin_lock(&sync_lock);
+
+	if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+		/* clearing a previously set syncobj */
+		syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
+				  GFP_ATOMIC);
+		if (!syncobj) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+		omap_obj->sync = syncobj;
+	} else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+		/* replacing an existing syncobj */
+		if (omap_obj->sync) {
+			memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+			kfree(omap_obj->sync);
+		}
+		omap_obj->flags |= OMAP_BO_EXT_SYNC;
+		omap_obj->sync = syncobj;
+	}
+
+unlock:
+	spin_unlock(&sync_lock);
+	return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+	return -EINVAL;          /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+	evict(obj);
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	list_del(&omap_obj->mm_list);
+
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+
+	/* this means the object is still pinned.. which really should
+	 * not happen.  I think..
+	 */
+	WARN_ON(omap_obj->paddr_cnt > 0);
+
+	/* don't free externally allocated backing memory */
+	if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+		if (omap_obj->pages)
+			omap_gem_detach_pages(obj);
+
+		if (!is_shmem(obj)) {
+			dma_free_writecombine(dev->dev, obj->size,
+					omap_obj->vaddr, omap_obj->paddr);
+		} else if (omap_obj->vaddr) {
+			vunmap(omap_obj->vaddr);
+		}
+	}
+
+	/* don't free externally allocated syncobj */
+	if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
+		kfree(omap_obj->sync);
+
+	drm_gem_object_release(obj);
+
+	kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	obj = omap_gem_new(dev, gsize, flags);
+	if (!obj)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, obj, handle);
+	if (ret) {
+		drm_gem_object_release(obj);
+		kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+		return ret;
+	}
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(obj);
+
+	return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+		union omap_gem_size gsize, uint32_t flags)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_gem_object *omap_obj;
+	struct drm_gem_object *obj = NULL;
+	size_t size;
+	int ret;
+
+	if (flags & OMAP_BO_TILED) {
+		if (!usergart) {
+			dev_err(dev->dev, "Tiled buffers require DMM\n");
+			goto fail;
+		}
+
+		/* tiled buffers are always shmem paged backed.. when they are
+		 * scanned out, they are remapped into DMM/TILER
+		 */
+		flags &= ~OMAP_BO_SCANOUT;
+
+		/* currently don't allow cached buffers.. there is some caching
+		 * stuff that needs to be handled better
+		 */
+		flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+		flags |= OMAP_BO_WC;
+
+		/* align dimensions to slot boundaries... */
+		tiler_align(gem2fmt(flags),
+				&gsize.tiled.width, &gsize.tiled.height);
+
+		/* ...and calculate size based on aligned dimensions */
+		size = tiler_size(gem2fmt(flags),
+				gsize.tiled.width, gsize.tiled.height);
+	} else {
+		size = PAGE_ALIGN(gsize.bytes);
+	}
+
+	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+	if (!omap_obj)
+		goto fail;
+
+	list_add(&omap_obj->mm_list, &priv->obj_list);
+
+	obj = &omap_obj->base;
+
+	if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+		/* attempt to allocate contiguous memory if we don't
+		 * have DMM for remappign discontiguous buffers
+		 */
+		omap_obj->vaddr =  dma_alloc_writecombine(dev->dev, size,
+				&omap_obj->paddr, GFP_KERNEL);
+		if (omap_obj->vaddr)
+			flags |= OMAP_BO_DMA;
+
+	}
+
+	omap_obj->flags = flags;
+
+	if (flags & OMAP_BO_TILED) {
+		omap_obj->width = gsize.tiled.width;
+		omap_obj->height = gsize.tiled.height;
+	}
+
+	if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+		ret = drm_gem_private_object_init(dev, obj, size);
+	else
+		ret = drm_gem_object_init(dev, obj, size);
+
+	if (ret)
+		goto fail;
+
+	return obj;
+
+fail:
+	if (obj)
+		omap_gem_free_object(obj);
+
+	return NULL;
+}
+
+/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+void omap_gem_init(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	const enum tiler_fmt fmts[] = {
+			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+	};
+	int i, j;
+
+	if (!dmm_is_available()) {
+		/* DMM only supported on OMAP4 and later, so this isn't fatal */
+		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
+		return;
+	}
+
+	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
+	if (!usergart)
+		return;
+
+	/* reserve 4k aligned/wide regions for userspace mappings: */
+	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+		uint16_t h = 1, w = PAGE_SIZE >> i;
+		tiler_align(fmts[i], &w, &h);
+		/* note: since each region is 1 4kb page wide, and minimum
+		 * number of rows, the height ends up being the same as the
+		 * # of pages in the region
+		 */
+		usergart[i].height = h;
+		usergart[i].height_shift = ilog2(h);
+		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
+		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+			struct usergart_entry *entry = &usergart[i].entry[j];
+			struct tiler_block *block =
+					tiler_reserve_2d(fmts[i], w, h,
+							PAGE_SIZE);
+			if (IS_ERR(block)) {
+				dev_err(dev->dev,
+						"reserve failed: %d, %d, %ld\n",
+						i, j, PTR_ERR(block));
+				return;
+			}
+			entry->paddr = tiler_ssptr(block);
+			entry->block = block;
+
+			DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+					entry->paddr,
+					usergart[i].stride_pfn << PAGE_SHIFT);
+		}
+	}
+
+	priv->has_dmm = true;
+}
+
+void omap_gem_deinit(struct drm_device *dev)
+{
+	/* I believe we can rely on there being no more outstanding GEM
+	 * objects which could depend on usergart/dmm at this point.
+	 */
+	kfree(usergart);
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
new file mode 100644
index 0000000..be7cd97
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -0,0 +1,224 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include <linux/dma-buf.h>
+
+static struct sg_table *omap_gem_map_dma_buf(
+		struct dma_buf_attachment *attachment,
+		enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attachment->dmabuf->priv;
+	struct sg_table *sg;
+	dma_addr_t paddr;
+	int ret;
+
+	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+	if (!sg)
+		return ERR_PTR(-ENOMEM);
+
+	/* camera, etc, need physically contiguous.. but we need a
+	 * better way to know this..
+	 */
+	ret = omap_gem_get_paddr(obj, &paddr, true);
+	if (ret)
+		goto out;
+
+	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+	if (ret)
+		goto out;
+
+	sg_init_table(sg->sgl, 1);
+	sg_dma_len(sg->sgl) = obj->size;
+	sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
+	sg_dma_address(sg->sgl) = paddr;
+
+	/* this should be after _get_paddr() to ensure we have pages attached */
+	omap_gem_dma_sync(obj, dir);
+
+	return sg;
+out:
+	kfree(sg);
+	return ERR_PTR(ret);
+}
+
+static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+		struct sg_table *sg, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = attachment->dmabuf->priv;
+	omap_gem_put_paddr(obj);
+	sg_free_table(sg);
+	kfree(sg);
+}
+
+static void omap_gem_dmabuf_release(struct dma_buf *buffer)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	/* release reference that was taken when dmabuf was exported
+	 * in omap_gem_prime_set()..
+	 */
+	drm_gem_object_unreference_unlocked(obj);
+}
+
+
+static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
+		size_t start, size_t len, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	struct page **pages;
+	if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+		/* TODO we would need to pin at least part of the buffer to
+		 * get de-tiled view.  For now just reject it.
+		 */
+		return -ENOMEM;
+	}
+	/* make sure we have the pages: */
+	return omap_gem_get_pages(obj, &pages, true);
+}
+
+static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
+		size_t start, size_t len, enum dma_data_direction dir)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	omap_gem_put_pages(obj);
+}
+
+
+static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
+		unsigned long page_num)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	struct page **pages;
+	omap_gem_get_pages(obj, &pages, false);
+	omap_gem_cpu_sync(obj, page_num);
+	return kmap_atomic(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
+		unsigned long page_num, void *addr)
+{
+	kunmap_atomic(addr);
+}
+
+static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
+		unsigned long page_num)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	struct page **pages;
+	omap_gem_get_pages(obj, &pages, false);
+	omap_gem_cpu_sync(obj, page_num);
+	return kmap(pages[page_num]);
+}
+
+static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
+		unsigned long page_num, void *addr)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	struct page **pages;
+	omap_gem_get_pages(obj, &pages, false);
+	kunmap(pages[page_num]);
+}
+
+/*
+ * TODO maybe we can split up drm_gem_mmap to avoid duplicating
+ * some here.. or at least have a drm_dmabuf_mmap helper.
+ */
+static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
+		struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = buffer->priv;
+	int ret = 0;
+
+	if (WARN_ON(!obj->filp))
+		return -EINVAL;
+
+	/* Check for valid size. */
+	if (omap_gem_mmap_size(obj) < vma->vm_end - vma->vm_start) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (!obj->dev->driver->gem_vm_ops) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_ops = obj->dev->driver->gem_vm_ops;
+	vma->vm_private_data = obj;
+	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	vma->vm_ops->open(vma);
+
+out_unlock:
+
+	return omap_gem_mmap_obj(obj, vma);
+}
+
+static struct dma_buf_ops omap_dmabuf_ops = {
+		.map_dma_buf = omap_gem_map_dma_buf,
+		.unmap_dma_buf = omap_gem_unmap_dma_buf,
+		.release = omap_gem_dmabuf_release,
+		.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
+		.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
+		.kmap_atomic = omap_gem_dmabuf_kmap_atomic,
+		.kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+		.kmap = omap_gem_dmabuf_kmap,
+		.kunmap = omap_gem_dmabuf_kunmap,
+		.mmap = omap_gem_dmabuf_mmap,
+};
+
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+		struct drm_gem_object *obj, int flags)
+{
+	return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
+}
+
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+		struct dma_buf *buffer)
+{
+	struct drm_gem_object *obj;
+
+	/* is this one of own objects? */
+	if (buffer->ops == &omap_dmabuf_ops) {
+		obj = buffer->priv;
+		/* is it from our device? */
+		if (obj->dev == dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_reference(obj);
+			return obj;
+		}
+	}
+
+	/*
+	 * TODO add support for importing buffers from other devices..
+	 * for now we don't need this but would be nice to add eventually
+	 */
+	return ERR_PTR(-EINVAL);
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
new file mode 100644
index 0000000..f9eb679
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -0,0 +1,169 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	struct page *p, **pages;
+	int i, npages;
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = file_inode(obj->filp);
+	mapping = inode->i_mapping;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	for (i = 0; i < npages; i++) {
+		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(p))
+			goto fail;
+		pages[i] = p;
+
+		/* There is a hypothetical issue w/ drivers that require
+		 * buffer memory in the low 4GB.. if the pages are un-
+		 * pinned, and swapped out, they can end up swapped back
+		 * in above 4GB.  If pages are already in memory, then
+		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
+		 * even if the already in-memory page disobeys the mask.
+		 *
+		 * It is only a theoretical issue today, because none of
+		 * the devices with this limitation can be populated with
+		 * enough memory to trigger the issue.  But this BUG_ON()
+		 * is here as a reminder in case the problem with
+		 * shmem_read_mapping_page_gfp() isn't solved by the time
+		 * it does become a real issue.
+		 *
+		 * See this thread: http://lkml.org/lkml/2011/7/11/238
+		 */
+		BUG_ON((gfpmask & __GFP_DMA32) &&
+				(page_to_pfn(p) >= 0x00100000UL));
+	}
+
+	return pages;
+
+fail:
+	while (i--)
+		page_cache_release(pages[i]);
+
+	drm_free_large(pages);
+	return ERR_CAST(p);
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+		bool dirty, bool accessed)
+{
+	int i, npages;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	for (i = 0; i < npages; i++) {
+		if (dirty)
+			set_page_dirty(pages[i]);
+
+		if (accessed)
+			mark_page_accessed(pages[i]);
+
+		/* Undo the reference we took when populating the table */
+		page_cache_release(pages[i]);
+	}
+
+	drm_free_large(pages);
+}
+
+int
+_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_map_list *list;
+	struct drm_local_map *map;
+	int ret = 0;
+
+	/* Set the object up for mmap'ing */
+	list = &obj->map_list;
+	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+	if (!list->map)
+		return -ENOMEM;
+
+	map = list->map;
+	map->type = _DRM_GEM;
+	map->size = size;
+	map->handle = obj;
+
+	/* Get a DRM GEM mmap offset allocated... */
+	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+			size / PAGE_SIZE, 0, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		ret = -ENOSPC;
+		goto out_free_list;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+			size / PAGE_SIZE, 0);
+	if (!list->file_offset_node) {
+		ret = -ENOMEM;
+		goto out_free_list;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+	if (ret) {
+		DRM_ERROR("failed to add to map hash\n");
+		goto out_free_mm;
+	}
+
+	return 0;
+
+out_free_mm:
+	drm_mm_put_block(list->file_offset_node);
+out_free_list:
+	kfree(list->map);
+	list->map = NULL;
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_irq.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_irq.c
new file mode 100644
index 0000000..9263db1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -0,0 +1,327 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+		uint32_t irqstatus)
+{
+	DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_drm_irq *irq;
+	uint32_t irqmask = priv->vblank_mask;
+
+	BUG_ON(!spin_is_locked(&list_lock));
+
+	list_for_each_entry(irq, &priv->irq_list, node)
+		irqmask |= irq->irqmask;
+
+	DBG("irqmask=%08x", irqmask);
+
+	dispc_write_irqenable(irqmask);
+	dispc_read_irqenable();        /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	unsigned long flags;
+
+	dispc_runtime_get();
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (!WARN_ON(irq->registered)) {
+		irq->registered = true;
+		list_add(&irq->node, &priv->irq_list);
+		omap_irq_update(dev);
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+	dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+	unsigned long flags;
+
+	dispc_runtime_get();
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (!WARN_ON(!irq->registered)) {
+		irq->registered = false;
+		list_del(&irq->node);
+		omap_irq_update(dev);
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+	dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+	struct omap_drm_irq irq;
+	int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+	struct omap_irq_wait *wait =
+			container_of(irq, struct omap_irq_wait, irq);
+	wait->count--;
+	wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+		uint32_t irqmask, int count)
+{
+	struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+	wait->irq.irq = wait_irq;
+	wait->irq.irqmask = irqmask;
+	wait->count = count;
+	omap_irq_register(dev, &wait->irq);
+	return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+		unsigned long timeout)
+{
+	int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+	omap_irq_unregister(dev, &wait->irq);
+	kfree(wait);
+	if (ret == 0)
+		return -1;
+	return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_crtc *crtc = priv->crtcs[crtc_id];
+	unsigned long flags;
+
+	DBG("dev=%p, crtc=%d", dev, crtc_id);
+
+	dispc_runtime_get();
+	spin_lock_irqsave(&list_lock, flags);
+	priv->vblank_mask |= pipe2vbl(crtc);
+	omap_irq_update(dev);
+	spin_unlock_irqrestore(&list_lock, flags);
+	dispc_runtime_put();
+
+	return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_crtc *crtc = priv->crtcs[crtc_id];
+	unsigned long flags;
+
+	DBG("dev=%p, crtc=%d", dev, crtc_id);
+
+	dispc_runtime_get();
+	spin_lock_irqsave(&list_lock, flags);
+	priv->vblank_mask &= ~pipe2vbl(crtc);
+	omap_irq_update(dev);
+	spin_unlock_irqrestore(&list_lock, flags);
+	dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_drm_irq *handler, *n;
+	unsigned long flags;
+	unsigned int id;
+	u32 irqstatus;
+
+	irqstatus = dispc_read_irqstatus();
+	dispc_clear_irqstatus(irqstatus);
+	dispc_read_irqstatus();        /* flush posted write */
+
+	VERB("irqs: %08x", irqstatus);
+
+	for (id = 0; id < priv->num_crtcs; id++) {
+		struct drm_crtc *crtc = priv->crtcs[id];
+
+		if (irqstatus & pipe2vbl(crtc))
+			drm_handle_vblank(dev, id);
+	}
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+		if (handler->irqmask & irqstatus) {
+			spin_unlock_irqrestore(&list_lock, flags);
+			handler->irq(handler, handler->irqmask & irqstatus);
+			spin_lock_irqsave(&list_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+	DBG("dev=%p", dev);
+	dispc_runtime_get();
+	dispc_clear_irqstatus(0xffffffff);
+	dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct omap_drm_irq *error_handler = &priv->error_handler;
+
+	DBG("dev=%p", dev);
+
+	INIT_LIST_HEAD(&priv->irq_list);
+
+	error_handler->irq = omap_irq_error_handler;
+	error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+	/* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+	 * we just need to ignore it while enabling tv-out
+	 */
+	error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+	omap_irq_register(dev, error_handler);
+
+	return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+	DBG("dev=%p", dev);
+	// TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss.  Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (dev->irq_enabled) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+	dev->irq_enabled = 1;
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Before installing handler */
+	if (dev->driver->irq_preinstall)
+		dev->driver->irq_preinstall(dev);
+
+	ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* After installing handler */
+	if (dev->driver->irq_postinstall)
+		ret = dev->driver->irq_postinstall(dev);
+
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+		dispc_free_irq(dev);
+	}
+
+	return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+	unsigned long irqflags;
+	int irq_enabled, i;
+
+	mutex_lock(&dev->struct_mutex);
+	irq_enabled = dev->irq_enabled;
+	dev->irq_enabled = 0;
+	mutex_unlock(&dev->struct_mutex);
+
+	/*
+	 * Wake up any waiters so they don't hang.
+	 */
+	if (dev->num_crtcs) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		for (i = 0; i < dev->num_crtcs; i++) {
+			DRM_WAKEUP(&dev->vbl_queue[i]);
+			dev->vblank_enabled[i] = 0;
+			dev->last_vblank[i] =
+				dev->driver->get_vblank_counter(dev, i);
+		}
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+	}
+
+	if (!irq_enabled)
+		return -EINVAL;
+
+	if (dev->driver->irq_uninstall)
+		dev->driver->irq_uninstall(dev);
+
+	dispc_free_irq(dev);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/omap_plane.c b/linux-imx/drivers/gpu/drm/omapdrm/omap_plane.c
new file mode 100644
index 0000000..8d225d7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -0,0 +1,454 @@
+/*
+ * drivers/gpu/drm/omapdrm/omap_plane.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* some hackery because omapdss has an 'enum omap_plane' (which would be
+ * better named omap_plane_id).. and compiler seems unhappy about having
+ * both a 'struct omap_plane' and 'enum omap_plane'
+ */
+#define omap_plane _omap_plane
+
+/*
+ * plane funcs
+ */
+
+struct callback {
+	void (*fxn)(void *);
+	void *arg;
+};
+
+#define to_omap_plane(x) container_of(x, struct omap_plane, base)
+
+struct omap_plane {
+	struct drm_plane base;
+	int id;  /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+	const char *name;
+	struct omap_overlay_info info;
+	struct omap_drm_apply apply;
+
+	/* position/orientation of scanout within the fb: */
+	struct omap_drm_window win;
+	bool enabled;
+
+	/* last fb that we pinned: */
+	struct drm_framebuffer *pinned_fb;
+
+	uint32_t nformats;
+	uint32_t formats[32];
+
+	struct omap_drm_irq error_irq;
+
+	/* set of bo's pending unpin until next post_apply() */
+	DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+
+	// XXX maybe get rid of this and handle vblank in crtc too?
+	struct callback apply_done_cb;
+};
+
+static void unpin(void *arg, struct drm_gem_object *bo)
+{
+	struct drm_plane *plane = arg;
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+
+	if (kfifo_put(&omap_plane->unpin_fifo,
+			(const struct drm_gem_object **)&bo)) {
+		/* also hold a ref so it isn't free'd while pinned */
+		drm_gem_object_reference(bo);
+	} else {
+		dev_err(plane->dev->dev, "unpin fifo full!\n");
+		omap_gem_put_paddr(bo);
+	}
+}
+
+/* update which fb (if any) is pinned for scanout */
+static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
+
+	if (pinned_fb != fb) {
+		int ret;
+
+		DBG("%p -> %p", pinned_fb, fb);
+
+		if (fb)
+			drm_framebuffer_reference(fb);
+
+		ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+
+		if (pinned_fb)
+			drm_framebuffer_unreference(pinned_fb);
+
+		if (ret) {
+			dev_err(plane->dev->dev, "could not swap %p -> %p\n",
+					omap_plane->pinned_fb, fb);
+			if (fb)
+				drm_framebuffer_unreference(fb);
+			omap_plane->pinned_fb = NULL;
+			return ret;
+		}
+
+		omap_plane->pinned_fb = fb;
+	}
+
+	return 0;
+}
+
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
+{
+	struct omap_plane *omap_plane =
+			container_of(apply, struct omap_plane, apply);
+	struct omap_drm_window *win = &omap_plane->win;
+	struct drm_plane *plane = &omap_plane->base;
+	struct drm_device *dev = plane->dev;
+	struct omap_overlay_info *info = &omap_plane->info;
+	struct drm_crtc *crtc = plane->crtc;
+	enum omap_channel channel;
+	bool enabled = omap_plane->enabled && crtc;
+	bool ilace, replication;
+	int ret;
+
+	DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+	/* if fb has changed, pin new fb: */
+	update_pin(plane, enabled ? plane->fb : NULL);
+
+	if (!enabled) {
+		dispc_ovl_enable(omap_plane->id, false);
+		return;
+	}
+
+	channel = omap_crtc_channel(crtc);
+
+	/* update scanout: */
+	omap_framebuffer_update_scanout(plane->fb, win, info);
+
+	DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+			info->out_width, info->out_height,
+			info->screen_width);
+	DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+			info->paddr, info->p_uv_addr);
+
+	/* TODO: */
+	ilace = false;
+	replication = false;
+
+	/* and finally, update omapdss: */
+	ret = dispc_ovl_setup(omap_plane->id, info,
+			replication, omap_crtc_timings(crtc), false);
+	if (ret) {
+		dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+		return;
+	}
+
+	dispc_ovl_enable(omap_plane->id, true);
+	dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+	struct omap_plane *omap_plane =
+			container_of(apply, struct omap_plane, apply);
+	struct drm_plane *plane = &omap_plane->base;
+	struct omap_overlay_info *info = &omap_plane->info;
+	struct drm_gem_object *bo = NULL;
+	struct callback cb;
+
+	cb = omap_plane->apply_done_cb;
+	omap_plane->apply_done_cb.fxn = NULL;
+
+	while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+		omap_gem_put_paddr(bo);
+		drm_gem_object_unreference_unlocked(bo);
+	}
+
+	if (cb.fxn)
+		cb.fxn(cb.arg);
+
+	if (omap_plane->enabled) {
+		omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+				info->out_width, info->out_height);
+	}
+}
+
+static int apply(struct drm_plane *plane)
+{
+	if (plane->crtc) {
+		struct omap_plane *omap_plane = to_omap_plane(plane);
+		return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+	}
+	return 0;
+}
+
+int omap_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h,
+		void (*fxn)(void *), void *arg)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	struct omap_drm_window *win = &omap_plane->win;
+
+	win->crtc_x = crtc_x;
+	win->crtc_y = crtc_y;
+	win->crtc_w = crtc_w;
+	win->crtc_h = crtc_h;
+
+	/* src values are in Q16 fixed point, convert to integer: */
+	win->src_x = src_x >> 16;
+	win->src_y = src_y >> 16;
+	win->src_w = src_w >> 16;
+	win->src_h = src_h >> 16;
+
+	if (fxn) {
+		/* omap_crtc should ensure that a new page flip
+		 * isn't permitted while there is one pending:
+		 */
+		BUG_ON(omap_plane->apply_done_cb.fxn);
+
+		omap_plane->apply_done_cb.fxn = fxn;
+		omap_plane->apply_done_cb.arg = arg;
+	}
+
+	plane->fb = fb;
+	plane->crtc = crtc;
+
+	return apply(plane);
+}
+
+static int omap_plane_update(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	omap_plane->enabled = true;
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	drm_framebuffer_reference(fb);
+
+	return omap_plane_mode_set(plane, crtc, fb,
+			crtc_x, crtc_y, crtc_w, crtc_h,
+			src_x, src_y, src_w, src_h,
+			NULL, NULL);
+}
+
+static int omap_plane_disable(struct drm_plane *plane)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	omap_plane->win.rotation = BIT(DRM_ROTATE_0);
+	return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_plane_destroy(struct drm_plane *plane)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+
+	DBG("%s", omap_plane->name);
+
+	omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
+	omap_plane_disable(plane);
+	drm_plane_cleanup(plane);
+
+	WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
+	kfifo_free(&omap_plane->unpin_fifo);
+
+	kfree(omap_plane);
+}
+
+int omap_plane_dpms(struct drm_plane *plane, int mode)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+	int ret = 0;
+
+	if (enabled != omap_plane->enabled) {
+		omap_plane->enabled = enabled;
+		ret = apply(plane);
+	}
+
+	return ret;
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void omap_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj)
+{
+	struct drm_device *dev = plane->dev;
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_property *prop;
+
+	if (priv->has_dmm) {
+		prop = priv->rotation_prop;
+		if (!prop) {
+			const struct drm_prop_enum_list props[] = {
+					{ DRM_ROTATE_0,   "rotate-0" },
+					{ DRM_ROTATE_90,  "rotate-90" },
+					{ DRM_ROTATE_180, "rotate-180" },
+					{ DRM_ROTATE_270, "rotate-270" },
+					{ DRM_REFLECT_X,  "reflect-x" },
+					{ DRM_REFLECT_Y,  "reflect-y" },
+			};
+			prop = drm_property_create_bitmask(dev, 0, "rotation",
+					props, ARRAY_SIZE(props));
+			if (prop == NULL)
+				return;
+			priv->rotation_prop = prop;
+		}
+		drm_object_attach_property(obj, prop, 0);
+	}
+
+	prop = priv->zorder_prop;
+	if (!prop) {
+		prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
+		if (prop == NULL)
+			return;
+		priv->zorder_prop = prop;
+	}
+	drm_object_attach_property(obj, prop, 0);
+}
+
+int omap_plane_set_property(struct drm_plane *plane,
+		struct drm_property *property, uint64_t val)
+{
+	struct omap_plane *omap_plane = to_omap_plane(plane);
+	struct omap_drm_private *priv = plane->dev->dev_private;
+	int ret = -EINVAL;
+
+	if (property == priv->rotation_prop) {
+		DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
+		omap_plane->win.rotation = val;
+		ret = apply(plane);
+	} else if (property == priv->zorder_prop) {
+		DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
+		omap_plane->info.zorder = val;
+		ret = apply(plane);
+	}
+
+	return ret;
+}
+
+static const struct drm_plane_funcs omap_plane_funcs = {
+		.update_plane = omap_plane_update,
+		.disable_plane = omap_plane_disable,
+		.destroy = omap_plane_destroy,
+		.set_property = omap_plane_set_property,
+};
+
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+	struct omap_plane *omap_plane =
+			container_of(irq, struct omap_plane, error_irq);
+	DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+		[OMAP_DSS_GFX] = "gfx",
+		[OMAP_DSS_VIDEO1] = "vid1",
+		[OMAP_DSS_VIDEO2] = "vid2",
+		[OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+		[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+		[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+		[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+		[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
+/* initialize plane */
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+		int id, bool private_plane)
+{
+	struct omap_drm_private *priv = dev->dev_private;
+	struct drm_plane *plane = NULL;
+	struct omap_plane *omap_plane;
+	struct omap_overlay_info *info;
+	int ret;
+
+	DBG("%s: priv=%d", plane_names[id], private_plane);
+
+	omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
+	if (!omap_plane)
+		goto fail;
+
+	ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+	if (ret) {
+		dev_err(dev->dev, "could not allocate unpin FIFO\n");
+		goto fail;
+	}
+
+	omap_plane->nformats = omap_framebuffer_get_formats(
+			omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
+			dss_feat_get_supported_color_modes(id));
+	omap_plane->id = id;
+	omap_plane->name = plane_names[id];
+
+	plane = &omap_plane->base;
+
+	omap_plane->apply.pre_apply  = omap_plane_pre_apply;
+	omap_plane->apply.post_apply = omap_plane_post_apply;
+
+	omap_plane->error_irq.irqmask = error_irqs[id];
+	omap_plane->error_irq.irq = omap_plane_error_irq;
+	omap_irq_register(dev, &omap_plane->error_irq);
+
+	drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+			omap_plane->formats, omap_plane->nformats, private_plane);
+
+	omap_plane_install_properties(plane, &plane->base);
+
+	/* get our starting configuration, set defaults for parameters
+	 * we don't currently use, etc:
+	 */
+	info = &omap_plane->info;
+	info->rotation_type = OMAP_DSS_ROT_DMA;
+	info->rotation = OMAP_DSS_ROT_0;
+	info->global_alpha = 0xff;
+	info->mirror = 0;
+
+	/* Set defaults depending on whether we are a CRTC or overlay
+	 * layer.
+	 * TODO add ioctl to give userspace an API to change this.. this
+	 * will come in a subsequent patch.
+	 */
+	if (private_plane)
+		omap_plane->info.zorder = 0;
+	else
+		omap_plane->info.zorder = id;
+
+	return plane;
+
+fail:
+	if (plane)
+		omap_plane_destroy(plane);
+
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.c b/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.c
new file mode 100644
index 0000000..efb6095
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -0,0 +1,703 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ *          Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "tcm-sita.h"
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+
+/*********************************************
+ *	TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+			   struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ *	Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+				   struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+			struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+			struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+			struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ *	Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+			    struct tcm_area *field, s32 criteria,
+			    struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+				struct tcm_area *candidate,
+				struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+			       struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+				struct tcm_area *area, struct tcm_area *parent);
+
+
+/*********************************************/
+
+/*********************************************
+ *	Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+	struct tcm *tcm;
+	struct sita_pvt *pvt;
+	struct tcm_area area = {0};
+	s32 i;
+
+	if (width == 0 || height == 0)
+		return NULL;
+
+	tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+	pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+	if (!tcm || !pvt)
+		goto error;
+
+	memset(tcm, 0, sizeof(*tcm));
+	memset(pvt, 0, sizeof(*pvt));
+
+	/* Updating the pointers to SiTA implementation APIs */
+	tcm->height = height;
+	tcm->width = width;
+	tcm->reserve_2d = sita_reserve_2d;
+	tcm->reserve_1d = sita_reserve_1d;
+	tcm->free = sita_free;
+	tcm->deinit = sita_deinit;
+	tcm->pvt = (void *)pvt;
+
+	spin_lock_init(&(pvt->lock));
+
+	/* Creating tam map */
+	pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+	if (!pvt->map)
+		goto error;
+
+	for (i = 0; i < tcm->width; i++) {
+		pvt->map[i] =
+			kmalloc(sizeof(**pvt->map) * tcm->height,
+								GFP_KERNEL);
+		if (pvt->map[i] == NULL) {
+			while (i--)
+				kfree(pvt->map[i]);
+			kfree(pvt->map);
+			goto error;
+		}
+	}
+
+	if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+		pvt->div_pt.x = attr->x;
+		pvt->div_pt.y = attr->y;
+
+	} else {
+		/* Defaulting to 3:1 ratio on width for 2D area split */
+		/* Defaulting to 3:1 ratio on height for 2D and 1D split */
+		pvt->div_pt.x = (tcm->width * 3) / 4;
+		pvt->div_pt.y = (tcm->height * 3) / 4;
+	}
+
+	spin_lock(&(pvt->lock));
+	assign(&area, 0, 0, width - 1, height - 1);
+	fill_area(tcm, &area, NULL);
+	spin_unlock(&(pvt->lock));
+	return tcm;
+
+error:
+	kfree(tcm);
+	kfree(pvt);
+	return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+	struct tcm_area area = {0};
+	s32 i;
+
+	area.p1.x = tcm->width - 1;
+	area.p1.y = tcm->height - 1;
+
+	spin_lock(&(pvt->lock));
+	fill_area(tcm, &area, NULL);
+	spin_unlock(&(pvt->lock));
+
+	for (i = 0; i < tcm->height; i++)
+		kfree(pvt->map[i]);
+	kfree(pvt->map);
+	kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots	size of 1D area
+ * @param area		pointer to the area that will be populated with the
+ *			reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+			   struct tcm_area *area)
+{
+	s32 ret;
+	struct tcm_area field = {0};
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+	spin_lock(&(pvt->lock));
+
+	/* Scanning entire container */
+	assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+
+	ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+	if (!ret)
+		/* update map */
+		fill_area(tcm, area, area);
+
+	spin_unlock(&(pvt->lock));
+	return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w	width
+ * @param h	height
+ * @param area	pointer to the area that will be populated with the reserved
+ *		area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+			   struct tcm_area *area)
+{
+	s32 ret;
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+	/* not supporting more than 64 as alignment */
+	if (align > 64)
+		return -EINVAL;
+
+	/* we prefer 1, 32 and 64 as alignment */
+	align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+	spin_lock(&(pvt->lock));
+	ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+	if (!ret)
+		/* update map */
+		fill_area(tcm, area, area);
+
+	spin_unlock(&(pvt->lock));
+	return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area	area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+	spin_lock(&(pvt->lock));
+
+	/* check that this is in fact an existing area */
+	WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+		pvt->map[area->p1.x][area->p1.y] != area);
+
+	/* Clear the contents of the associated tiles in the map */
+	fill_area(tcm, area, NULL);
+
+	spin_unlock(&(pvt->lock));
+
+	return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field.  Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w	width of desired area
+ * @param h	height of desired area
+ * @param align	desired area alignment
+ * @param area	pointer to the area that will be set to the best position
+ * @param field	area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+			struct tcm_area *field, struct tcm_area *area)
+{
+	s32 x, y;
+	s16 start_x, end_x, start_y, end_y, found_x = -1;
+	struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+	struct score best = {{0}, {0}, {0}, 0};
+
+	start_x = field->p0.x;
+	end_x = field->p1.x;
+	start_y = field->p0.y;
+	end_y = field->p1.y;
+
+	/* check scan area co-ordinates */
+	if (field->p0.x < field->p1.x ||
+	    field->p1.y < field->p0.y)
+		return -EINVAL;
+
+	/* check if allocation would fit in scan area */
+	if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+		return -ENOSPC;
+
+	/* adjust start_x and end_y, as allocation would not fit beyond */
+	start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+	end_y = end_y - h + 1;
+
+	/* check if allocation would still fit in scan area */
+	if (start_x < end_x)
+		return -ENOSPC;
+
+	/* scan field top-to-bottom, right-to-left */
+	for (y = start_y; y <= end_y; y++) {
+		for (x = start_x; x >= end_x; x -= align) {
+			if (is_area_free(map, x, y, w, h)) {
+				found_x = x;
+
+				/* update best candidate */
+				if (update_candidate(tcm, x, y, w, h, field,
+							CR_R2L_T2B, &best))
+					goto done;
+
+				/* change upper x bound */
+				end_x = x + 1;
+				break;
+			} else if (map[x][y] && map[x][y]->is2d) {
+				/* step over 2D areas */
+				x = ALIGN(map[x][y]->p0.x - w + 1, align);
+			}
+		}
+
+		/* break if you find a free area shouldering the scan field */
+		if (found_x == start_x)
+			break;
+	}
+
+	if (!best.a.tcm)
+		return -ENOSPC;
+done:
+	assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+	return 0;
+}
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w	width of desired area
+ * @param h	height of desired area
+ * @param align	desired area alignment
+ * @param area	pointer to the area that will be set to the best position
+ * @param field	area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+			struct tcm_area *field, struct tcm_area *area)
+{
+	s32 x, y;
+	s16 start_x, end_x, start_y, end_y, found_x = -1;
+	struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+	struct score best = {{0}, {0}, {0}, 0};
+
+	start_x = field->p0.x;
+	end_x = field->p1.x;
+	start_y = field->p0.y;
+	end_y = field->p1.y;
+
+	/* check scan area co-ordinates */
+	if (field->p1.x < field->p0.x ||
+	    field->p1.y < field->p0.y)
+		return -EINVAL;
+
+	/* check if allocation would fit in scan area */
+	if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+		return -ENOSPC;
+
+	start_x = ALIGN(start_x, align);
+
+	/* check if allocation would still fit in scan area */
+	if (w > LEN(end_x, start_x))
+		return -ENOSPC;
+
+	/* adjust end_x and end_y, as allocation would not fit beyond */
+	end_x = end_x - w + 1; /* + 1 to be inclusive */
+	end_y = end_y - h + 1;
+
+	/* scan field top-to-bottom, left-to-right */
+	for (y = start_y; y <= end_y; y++) {
+		for (x = start_x; x <= end_x; x += align) {
+			if (is_area_free(map, x, y, w, h)) {
+				found_x = x;
+
+				/* update best candidate */
+				if (update_candidate(tcm, x, y, w, h, field,
+							CR_L2R_T2B, &best))
+					goto done;
+				/* change upper x bound */
+				end_x = x - 1;
+
+				break;
+			} else if (map[x][y] && map[x][y]->is2d) {
+				/* step over 2D areas */
+				x = ALIGN_DOWN(map[x][y]->p1.x, align);
+			}
+		}
+
+		/* break if you find a free area shouldering the scan field */
+		if (found_x == start_x)
+			break;
+	}
+
+	if (!best.a.tcm)
+		return -ENOSPC;
+done:
+	assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+	return 0;
+}
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots	size of desired area
+ * @param align		desired area alignment
+ * @param area		pointer to the area that will be set to the best
+ *			position
+ * @param field		area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+				struct tcm_area *field, struct tcm_area *area)
+{
+	s32 found = 0;
+	s16 x, y;
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+	struct tcm_area *p;
+
+	/* check scan area co-ordinates */
+	if (field->p0.y < field->p1.y)
+		return -EINVAL;
+
+	/**
+	 * Currently we only support full width 1D scan field, which makes sense
+	 * since 1D slot-ordering spans the full container width.
+	 */
+	if (tcm->width != field->p0.x - field->p1.x + 1)
+		return -EINVAL;
+
+	/* check if allocation would fit in scan area */
+	if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+		return -ENOSPC;
+
+	x = field->p0.x;
+	y = field->p0.y;
+
+	/* find num_slots consecutive free slots to the left */
+	while (found < num_slots) {
+		if (y < 0)
+			return -ENOSPC;
+
+		/* remember bottom-right corner */
+		if (found == 0) {
+			area->p1.x = x;
+			area->p1.y = y;
+		}
+
+		/* skip busy regions */
+		p = pvt->map[x][y];
+		if (p) {
+			/* move to left of 2D areas, top left of 1D */
+			x = p->p0.x;
+			if (!p->is2d)
+				y = p->p0.y;
+
+			/* start over */
+			found = 0;
+		} else {
+			/* count consecutive free slots */
+			found++;
+			if (found == num_slots)
+				break;
+		}
+
+		/* move to the left */
+		if (x == 0)
+			y--;
+		x = (x ? : tcm->width) - 1;
+
+	}
+
+	/* set top-left corner */
+	area->p0.x = x;
+	area->p0.y = y;
+	return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w	width of desired area
+ * @param h	height of desired area
+ * @param align	desired area alignment
+ * @param area	pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+				   struct tcm_area *area)
+{
+	s32 ret = 0;
+	struct tcm_area field = {0};
+	u16 boundary_x, boundary_y;
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+	if (align > 1) {
+		/* prefer top-left corner */
+		boundary_x = pvt->div_pt.x - 1;
+		boundary_y = pvt->div_pt.y - 1;
+
+		/* expand width and height if needed */
+		if (w > pvt->div_pt.x)
+			boundary_x = tcm->width - 1;
+		if (h > pvt->div_pt.y)
+			boundary_y = tcm->height - 1;
+
+		assign(&field, 0, 0, boundary_x, boundary_y);
+		ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+		/* scan whole container if failed, but do not scan 2x */
+		if (ret != 0 && (boundary_x != tcm->width - 1 ||
+				 boundary_y != tcm->height - 1)) {
+			/* scan the entire container if nothing found */
+			assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+			ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+		}
+	} else if (align == 1) {
+		/* prefer top-right corner */
+		boundary_x = pvt->div_pt.x;
+		boundary_y = pvt->div_pt.y - 1;
+
+		/* expand width and height if needed */
+		if (w > (tcm->width - pvt->div_pt.x))
+			boundary_x = 0;
+		if (h > pvt->div_pt.y)
+			boundary_y = tcm->height - 1;
+
+		assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+		ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+		/* scan whole container if failed, but do not scan 2x */
+		if (ret != 0 && (boundary_x != 0 ||
+				 boundary_y != tcm->height - 1)) {
+			/* scan the entire container if nothing found */
+			assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+			ret = scan_r2l_t2b(tcm, w, h, align, &field,
+					   area);
+		}
+	}
+
+	return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+	u16 x = 0, y = 0;
+	for (y = y0; y < y0 + h; y++) {
+		for (x = x0; x < x0 + w; x++) {
+			if (map[x][y])
+				return false;
+		}
+	}
+	return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+			struct tcm_area *parent)
+{
+	s32 x, y;
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+	struct tcm_area a, a_;
+
+	/* set area's tcm; otherwise, enumerator considers it invalid */
+	area->tcm = tcm;
+
+	tcm_for_each_slice(a, *area, a_) {
+		for (x = a.p0.x; x <= a.p1.x; ++x)
+			for (y = a.p0.y; y <= a.p1.y; ++y)
+				pvt->map[x][y] = parent;
+
+	}
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h		top, left, width, height of candidate area
+ * @param field			scan field
+ * @param criteria		scan criteria
+ * @param best			best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+			    struct tcm_area *field, s32 criteria,
+			    struct score *best)
+{
+	struct score me;	/* score for area */
+
+	/*
+	 * NOTE: For horizontal bias we always give the first found, because our
+	 * scan is horizontal-raster-based and the first candidate will always
+	 * have the horizontal bias.
+	 */
+	bool first = criteria & CR_BIAS_HORIZONTAL;
+
+	assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+	/* calculate score for current candidate */
+	if (!first) {
+		get_neighbor_stats(tcm, &me.a, &me.n);
+		me.neighs = me.n.edge + me.n.busy;
+		get_nearness_factor(field, &me.a, &me.f);
+	}
+
+	/* the 1st candidate is always the best */
+	if (!best->a.tcm)
+		goto better;
+
+	BUG_ON(first);
+
+	/* diagonal balance check */
+	if ((criteria & CR_DIAGONAL_BALANCE) &&
+		best->neighs <= me.neighs &&
+		(best->neighs < me.neighs ||
+		 /* this implies that neighs and occupied match */
+		 best->n.busy < me.n.busy ||
+		 (best->n.busy == me.n.busy &&
+		  /* check the nearness factor */
+		  best->f.x + best->f.y > me.f.x + me.f.y)))
+		goto better;
+
+	/* not better, keep going */
+	return 0;
+
+better:
+	/* save current area as best */
+	memcpy(best, &me, sizeof(me));
+	best->a.tcm = tcm;
+	return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field.  The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+				struct nearness_factor *nf)
+{
+	/**
+	 * Using signed math as field coordinates may be reversed if
+	 * search direction is right-to-left or bottom-to-top.
+	 */
+	nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+		(field->p1.x - field->p0.x);
+	nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+		(field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+			 struct neighbor_stats *stat)
+{
+	s16 x = 0, y = 0;
+	struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+	/* Clearing any exisiting values */
+	memset(stat, 0, sizeof(*stat));
+
+	/* process top & bottom edges */
+	for (x = area->p0.x; x <= area->p1.x; x++) {
+		if (area->p0.y == 0)
+			stat->edge++;
+		else if (pvt->map[x][area->p0.y - 1])
+			stat->busy++;
+
+		if (area->p1.y == tcm->height - 1)
+			stat->edge++;
+		else if (pvt->map[x][area->p1.y + 1])
+			stat->busy++;
+	}
+
+	/* process left & right edges */
+	for (y = area->p0.y; y <= area->p1.y; ++y) {
+		if (area->p0.x == 0)
+			stat->edge++;
+		else if (pvt->map[area->p0.x - 1][y])
+			stat->busy++;
+
+		if (area->p1.x == tcm->width - 1)
+			stat->edge++;
+		else if (pvt->map[area->p1.x + 1][y])
+			stat->busy++;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.h b/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.h
new file mode 100644
index 0000000..0444f86
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/tcm-sita.h
@@ -0,0 +1,95 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ *   its contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+	CR_MAX_NEIGHS		= 0x01,
+	CR_FIRST_FOUND		= 0x10,
+	CR_BIAS_HORIZONTAL	= 0x20,
+	CR_BIAS_VERTICAL	= 0x40,
+	CR_DIAGONAL_BALANCE	= 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+	s32 x;
+	s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots.  Edge is the number of
+ * border segments that are also border segments of the scan field.  Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+	u16 edge;
+	u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+	struct nearness_factor	f;
+	struct neighbor_stats	n;
+	struct tcm_area		a;
+	u16    neighs;		/* number of busy neighbors */
+};
+
+struct sita_pvt {
+	spinlock_t lock;	/* spinlock to protect access */
+	struct tcm_pt div_pt;	/* divider point splitting container */
+	struct tcm_area ***map;	/* pointers to the parent area for each slot */
+};
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+	a->p0.x = x0;
+	a->p0.y = y0;
+	a->p1.x = x1;
+	a->p1.y = y1;
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/omapdrm/tcm.h b/linux-imx/drivers/gpu/drm/omapdrm/tcm.h
new file mode 100644
index 0000000..a8d5ce4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/omapdrm/tcm.h
@@ -0,0 +1,328 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ *   its contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+	u16 x;
+	u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+	bool is2d;		/* whether area is 1d or 2d */
+	struct tcm    *tcm;	/* parent */
+	struct tcm_pt  p0;
+	struct tcm_pt  p1;
+};
+
+struct tcm {
+	u16 width, height;	/* container dimensions */
+	int lut_id;		/* Lookup table identifier */
+
+	unsigned int y_offset;	/* offset to use for y coordinates */
+
+	/* 'pvt' structure shall contain any tcm details (attr) along with
+	linked list of allocated areas and mutex for mutually exclusive access
+	to the list.  It may also contain copies of width and height to notice
+	any changes to the publicly available width and height fields. */
+	void *pvt;
+
+	/* function table */
+	s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+			  struct tcm_area *area);
+	s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+	s32 (*free)      (struct tcm *tcm, struct tcm_area *area);
+	void (*deinit)   (struct tcm *tcm);
+};
+
+/*=============================================================================
+    BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ *   area pointer is NULL
+ *   width and height fits within container
+ *   number of pages is more than the size of the container
+ *
+ */
+
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm	Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error.  The call
+ *	   should free as much memory as possible and meaningful
+ *	   even on failure.  Some error codes: -ENODEV: invalid
+ *	   manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+	if (tcm)
+		tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm		Pointer to container manager.
+ * @param height	Height(in pages) of area to be reserved.
+ * @param width		Width(in pages) of area to be reserved.
+ * @param align		Alignment requirement for top-left corner of area. Not
+ *			all values may be supported by the container manager,
+ *			but it must support 0 (1), 32 and 64.
+ *			0 value is equivalent to 1.
+ * @param area		Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success.  Non-0 error code on failure.  Also,
+ *	   the tcm field of the area will be set to NULL on
+ *	   failure.  Some error codes: -ENODEV: invalid manager,
+ *	   -EINVAL: invalid area, -ENOMEM: not enough space for
+ *	    allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+				 u16 align, struct tcm_area *area)
+{
+	/* perform rudimentary error checking */
+	s32 res = tcm  == NULL ? -ENODEV :
+		(area == NULL || width == 0 || height == 0 ||
+		 /* align must be a 2 power */
+		 (align & (align - 1))) ? -EINVAL :
+		(height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+	if (!res) {
+		area->is2d = true;
+		res = tcm->reserve_2d(tcm, height, width, align, area);
+		area->tcm = res ? NULL : tcm;
+	}
+
+	return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm		Pointer to container manager.
+ * @param slots		Number of (contiguous) slots to reserve.
+ * @param area		Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success.  Non-0 error code on failure.  Also,
+ *	   the tcm field of the area will be set to NULL on
+ *	   failure.  Some error codes: -ENODEV: invalid manager,
+ *	   -EINVAL: invalid area, -ENOMEM: not enough space for
+ *	    allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+				 struct tcm_area *area)
+{
+	/* perform rudimentary error checking */
+	s32 res = tcm  == NULL ? -ENODEV :
+		(area == NULL || slots == 0) ? -EINVAL :
+		slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+	if (!res) {
+		area->is2d = false;
+		res = tcm->reserve_1d(tcm, slots, area);
+		area->tcm = res ? NULL : tcm;
+	}
+
+	return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area	Pointer to area reserved by a prior call to
+ *		tcm_reserve_1d or tcm_reserve_2d call, whether
+ *		it was successful or not. (Note: all fields of
+ *		the structure must match.)
+ *
+ * @return 0 on success.  Non-0 error code on failure.  Also, the tcm
+ *	   field of the area is set to NULL on success to avoid subsequent
+ *	   freeing.  This call will succeed even if supplying
+ *	   the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+	s32 res = 0; /* free succeeds by default */
+
+	if (area && area->tcm) {
+		res = area->tcm->free(area->tcm, area);
+		if (res == 0)
+			area->tcm = NULL;
+	}
+
+	return res;
+}
+
+/*=============================================================================
+    HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter.  The 'parent' parameter will get modified to
+ * contain the remaining portion of the area.  If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent	Pointer to a VALID parent area that will get modified
+ * @param slice		Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+	*slice = *parent;
+
+	/* check if we need to slice */
+	if (slice->tcm && !slice->is2d &&
+		slice->p0.y != slice->p1.y &&
+		(slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+		/* set end point of slice (start always remains) */
+		slice->p1.x = slice->tcm->width - 1;
+		slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+		/* adjust remaining area */
+		parent->p0.x = 0;
+		parent->p0.y = slice->p1.y + 1;
+	} else {
+		/* mark this as the last slice */
+		parent->tcm = NULL;
+	}
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+	return area && area->tcm &&
+		/* coordinate bounds */
+		area->p1.x < area->tcm->width &&
+		area->p1.y < area->tcm->height &&
+		area->p0.y <= area->p1.y &&
+		/* 1D coordinate relationship + p0.x check */
+		((!area->is2d &&
+		  area->p0.x < area->tcm->width &&
+		  area->p0.x + area->p0.y * area->tcm->width <=
+		  area->p1.x + area->p1.y * area->tcm->width) ||
+		 /* 2D coordinate relationship */
+		 (area->is2d &&
+		  area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+	u16 i;
+
+	if (a->is2d) {
+		return p->x >= a->p0.x && p->x <= a->p1.x &&
+		       p->y >= a->p0.y && p->y <= a->p1.y;
+	} else {
+		i = p->x + p->y * a->tcm->width;
+		return i >= a->p0.x + a->p0.y * a->tcm->width &&
+		       i <= a->p1.x + a->p1.y * a->tcm->width;
+	}
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+	return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+	return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+	return area->is2d ?
+		__tcm_area_width(area) * __tcm_area_height(area) :
+		(area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+							area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+	if (__tcm_sizeof(a) < num_pg)
+		return -ENOMEM;
+	if (!num_pg)
+		return -EINVAL;
+
+	a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+	a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+	return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var		Name of a local variable of type 'struct
+ *			tcm_area *' that will get modified to
+ *			contain each slice.
+ * @param area		Pointer to the VALID parent area. This
+ *			structure will not get modified
+ *			throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+	for (safe = area, \
+	     tcm_slice(&safe, &var); \
+	     var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/qxl/Kconfig b/linux-imx/drivers/gpu/drm/qxl/Kconfig
new file mode 100644
index 0000000..d6c1279
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/Kconfig
@@ -0,0 +1,11 @@
+config DRM_QXL
+	tristate "QXL virtual GPU"
+	depends on DRM && PCI
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_DEFERRED_IO
+        select DRM_KMS_HELPER
+        select DRM_TTM
+	help
+		QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/linux-imx/drivers/gpu/drm/qxl/Makefile b/linux-imx/drivers/gpu/drm/qxl/Makefile
new file mode 100644
index 0000000..ea046ba
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
+
+obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_cmd.c b/linux-imx/drivers/gpu/drm/qxl/qxl_cmd.c
new file mode 100644
index 0000000..f867714
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+/* QXL cmd/ring handling */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
+
+struct ring {
+	struct qxl_ring_header      header;
+	uint8_t                     elements[0];
+};
+
+struct qxl_ring {
+	struct ring	       *ring;
+	int			element_size;
+	int			n_elements;
+	int			prod_notify;
+	wait_queue_head_t      *push_event;
+	spinlock_t             lock;
+};
+
+void qxl_ring_free(struct qxl_ring *ring)
+{
+	kfree(ring);
+}
+
+struct qxl_ring *
+qxl_ring_create(struct qxl_ring_header *header,
+		int element_size,
+		int n_elements,
+		int prod_notify,
+		bool set_prod_notify,
+		wait_queue_head_t *push_event)
+{
+	struct qxl_ring *ring;
+
+	ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ring = (struct ring *)header;
+	ring->element_size = element_size;
+	ring->n_elements = n_elements;
+	ring->prod_notify = prod_notify;
+	ring->push_event = push_event;
+	if (set_prod_notify)
+		header->notify_on_prod = ring->n_elements;
+	spin_lock_init(&ring->lock);
+	return ring;
+}
+
+static int qxl_check_header(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod - header->cons < header->num_items;
+	if (ret == 0)
+		header->notify_on_cons = header->cons + 1;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+static int qxl_check_idle(struct qxl_ring *ring)
+{
+	int ret;
+	struct qxl_ring_header *header = &(ring->ring->header);
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	ret = header->prod == header->cons;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return ret;
+}
+
+int qxl_ring_push(struct qxl_ring *ring,
+		  const void *new_elt, bool interruptible)
+{
+	struct qxl_ring_header *header = &(ring->ring->header);
+	uint8_t *elt;
+	int idx, ret;
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->prod - header->cons == header->num_items) {
+		header->notify_on_cons = header->cons + 1;
+		mb();
+		spin_unlock_irqrestore(&ring->lock, flags);
+		if (!drm_can_sleep()) {
+			while (!qxl_check_header(ring))
+				udelay(1);
+		} else {
+			if (interruptible) {
+				ret = wait_event_interruptible(*ring->push_event,
+							       qxl_check_header(ring));
+				if (ret)
+					return ret;
+			} else {
+				wait_event(*ring->push_event,
+					   qxl_check_header(ring));
+			}
+
+		}
+		spin_lock_irqsave(&ring->lock, flags);
+	}
+
+	idx = header->prod & (ring->n_elements - 1);
+	elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy((void *)elt, new_elt, ring->element_size);
+
+	header->prod++;
+
+	mb();
+
+	if (header->prod == header->notify_on_prod)
+		outb(0, ring->prod_notify);
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return 0;
+}
+
+static bool qxl_ring_pop(struct qxl_ring *ring,
+			 void *element)
+{
+	volatile struct qxl_ring_header *header = &(ring->ring->header);
+	volatile uint8_t *ring_elt;
+	int idx;
+	unsigned long flags;
+	spin_lock_irqsave(&ring->lock, flags);
+	if (header->cons == header->prod) {
+		header->notify_on_prod = header->cons + 1;
+		spin_unlock_irqrestore(&ring->lock, flags);
+		return false;
+	}
+
+	idx = header->cons & (ring->n_elements - 1);
+	ring_elt = ring->ring->elements + idx * ring->element_size;
+
+	memcpy(element, (void *)ring_elt, ring->element_size);
+
+	header->cons++;
+
+	spin_unlock_irqrestore(&ring->lock, flags);
+	return true;
+}
+
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
+}
+
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible)
+{
+	struct qxl_command cmd;
+
+	cmd.type = type;
+	cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+
+	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
+}
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
+{
+	if (!qxl_check_idle(qdev->release_ring)) {
+		queue_work(qdev->gc_queue, &qdev->gc_work);
+		if (flush)
+			flush_work(&qdev->gc_work);
+		return true;
+	}
+	return false;
+}
+
+int qxl_garbage_collect(struct qxl_device *qdev)
+{
+	struct qxl_release *release;
+	uint64_t id, next_id;
+	int i = 0;
+	int ret;
+	union qxl_release_info *info;
+
+	while (qxl_ring_pop(qdev->release_ring, &id)) {
+		QXL_INFO(qdev, "popped %lld\n", id);
+		while (id) {
+			release = qxl_release_from_id_locked(qdev, id);
+			if (release == NULL)
+				break;
+
+			ret = qxl_release_reserve(qdev, release, false);
+			if (ret) {
+				qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
+				DRM_ERROR("failed to reserve release %lld\n", id);
+			}
+
+			info = qxl_release_map(qdev, release);
+			next_id = info->next;
+			qxl_release_unmap(qdev, release, info);
+
+			qxl_release_unreserve(qdev, release);
+			QXL_INFO(qdev, "popped %lld, next %lld\n", id,
+				next_id);
+
+			switch (release->type) {
+			case QXL_RELEASE_DRAWABLE:
+			case QXL_RELEASE_SURFACE_CMD:
+			case QXL_RELEASE_CURSOR_CMD:
+				break;
+			default:
+				DRM_ERROR("unexpected release type\n");
+				break;
+			}
+			id = next_id;
+
+			qxl_release_free(qdev, release);
+			++i;
+		}
+	}
+
+	QXL_INFO(qdev, "%s: %lld\n", __func__, i);
+
+	return i;
+}
+
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+			  struct qxl_bo **_bo)
+{
+	struct qxl_bo *bo;
+	int ret;
+
+	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
+			    QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+	if (ret) {
+		DRM_ERROR("failed to allocate VRAM BO\n");
+		return ret;
+	}
+	ret = qxl_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	*_bo = bo;
+	return 0;
+out_unref:
+	qxl_bo_unref(&bo);
+	return 0;
+}
+
+static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
+{
+	int irq_num;
+	long addr = qdev->io_base + port;
+	int ret;
+
+	mutex_lock(&qdev->async_io_mutex);
+	irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	if (qdev->last_sent_io_cmd > irq_num) {
+		if (intr)
+			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+		else
+			ret = wait_event_timeout(qdev->io_cmd_event,
+						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+		/* 0 is timeout, just bail the "hw" has gone away */
+		if (ret <= 0)
+			goto out;
+		irq_num = atomic_read(&qdev->irq_received_io_cmd);
+	}
+	outb(val, addr);
+	qdev->last_sent_io_cmd = irq_num + 1;
+	if (intr)
+		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
+						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+	else
+		ret = wait_event_timeout(qdev->io_cmd_event,
+					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
+out:
+	if (ret > 0)
+		ret = 0;
+	mutex_unlock(&qdev->async_io_mutex);
+	return ret;
+}
+
+static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
+{
+	int ret;
+
+restart:
+	ret = wait_for_io_cmd_user(qdev, val, port, false);
+	if (ret == -ERESTARTSYS)
+		goto restart;
+}
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+			const struct qxl_rect *area)
+{
+	int surface_id;
+	uint32_t surface_width, surface_height;
+	int ret;
+
+	if (!surf->hw_surf_alloc)
+		DRM_ERROR("got io update area with no hw surface\n");
+
+	if (surf->is_primary)
+		surface_id = 0;
+	else
+		surface_id = surf->surface_id;
+	surface_width = surf->surf.width;
+	surface_height = surf->surf.height;
+
+	if (area->left < 0 || area->top < 0 ||
+	    area->right > surface_width || area->bottom > surface_height) {
+		qxl_io_log(qdev, "%s: not doing area update for "
+			   "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
+			   area->top, area->right, area->bottom, surface_width, surface_height);
+		return -EINVAL;
+	}
+	mutex_lock(&qdev->update_area_mutex);
+	qdev->ram_header->update_area = *area;
+	qdev->ram_header->update_surface = surface_id;
+	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
+	mutex_unlock(&qdev->update_area_mutex);
+	return ret;
+}
+
+void qxl_io_notify_oom(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
+}
+
+void qxl_io_flush_release(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
+}
+
+void qxl_io_flush_surfaces(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
+}
+
+
+void qxl_io_destroy_primary(struct qxl_device *qdev)
+{
+	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
+}
+
+void qxl_io_create_primary(struct qxl_device *qdev, unsigned width,
+			   unsigned height, unsigned offset, struct qxl_bo *bo)
+{
+	struct qxl_surface_create *create;
+
+	QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
+		 qdev->ram_header);
+	create = &qdev->ram_header->create_surface;
+	create->format = bo->surf.format;
+	create->width = width;
+	create->height = height;
+	create->stride = bo->surf.stride;
+	create->mem = qxl_bo_physical_address(qdev, bo, offset);
+
+	QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
+		 bo->kptr);
+
+	create->flags = QXL_SURF_FLAG_KEEP_DATA;
+	create->type = QXL_SURF_TYPE_PRIMARY;
+
+	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
+}
+
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
+{
+	QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
+}
+
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
+	va_end(args);
+	/*
+	 * DO not do a DRM output here - this will call printk, which will
+	 * call back into qxl for rendering (qxl_fb)
+	 */
+	outb(0, qdev->io_base + QXL_IO_LOG);
+}
+
+void qxl_io_reset(struct qxl_device *qdev)
+{
+	outb(0, qdev->io_base + QXL_IO_RESET);
+}
+
+void qxl_io_monitors_config(struct qxl_device *qdev)
+{
+	qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
+		   qdev->monitors_config ?
+		   qdev->monitors_config->count : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].width : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].height : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].x : -1,
+		   qdev->monitors_config && qdev->monitors_config->count ?
+		   qdev->monitors_config->heads[0].y : -1
+		   );
+
+	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
+}
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+		      struct qxl_bo *surf)
+{
+	uint32_t handle;
+	int idr_ret;
+	int count = 0;
+again:
+	idr_preload(GFP_ATOMIC);
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	idr_preload_end();
+	if (idr_ret < 0)
+		return idr_ret;
+	handle = idr_ret;
+
+	if (handle >= qdev->rom->n_surfaces) {
+		count++;
+		spin_lock(&qdev->surf_id_idr_lock);
+		idr_remove(&qdev->surf_id_idr, handle);
+		spin_unlock(&qdev->surf_id_idr_lock);
+		qxl_reap_surface_id(qdev, 2);
+		goto again;
+	}
+	surf->surface_id = handle;
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	qdev->last_alloced_surf_id = handle;
+	spin_unlock(&qdev->surf_id_idr_lock);
+	return 0;
+}
+
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id)
+{
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_remove(&qdev->surf_id_idr, surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+}
+
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf,
+			 struct ttm_mem_reg *new_mem)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+
+	if (surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
+						 NULL,
+						 &release);
+	if (ret)
+		return ret;
+
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_CREATE;
+	cmd->u.surface_create.format = surf->surf.format;
+	cmd->u.surface_create.width = surf->surf.width;
+	cmd->u.surface_create.height = surf->surf.height;
+	cmd->u.surface_create.stride = surf->surf.stride;
+	if (new_mem) {
+		int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+		struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+		/* TODO - need to hold one of the locks to read tbo.offset */
+		cmd->u.surface_create.data = slot->high_bits;
+
+		cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
+	} else
+		cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
+	cmd->surface_id = surf->surface_id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	surf->surf_create = release;
+
+	/* no need to add a release to the fence for this bo,
+	   since it is only released when we ask to destroy the surface
+	   and it would never signal otherwise */
+	qxl_fence_releaseable(qdev, release);
+
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	qxl_release_unreserve(qdev, release);
+
+	surf->hw_surf_alloc = true;
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	return 0;
+}
+
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf)
+{
+	struct qxl_surface_cmd *cmd;
+	struct qxl_release *release;
+	int ret;
+	int id;
+
+	if (!surf->hw_surf_alloc)
+		return 0;
+
+	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
+						 surf->surf_create,
+						 &release);
+	if (ret)
+		return ret;
+
+	surf->surf_create = NULL;
+	/* remove the surface from the idr, but not the surface id yet */
+	spin_lock(&qdev->surf_id_idr_lock);
+	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
+	spin_unlock(&qdev->surf_id_idr_lock);
+	surf->hw_surf_alloc = false;
+
+	id = surf->surface_id;
+	surf->surface_id = 0;
+
+	release->surface_release_id = id;
+	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_SURFACE_CMD_DESTROY;
+	cmd->surface_id = id;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
+
+	qxl_release_unreserve(qdev, release);
+
+
+	return 0;
+}
+
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
+{
+	struct qxl_rect rect;
+	int ret;
+
+	/* if we are evicting, we need to make sure the surface is up
+	   to date */
+	rect.left = 0;
+	rect.right = surf->surf.width;
+	rect.top = 0;
+	rect.bottom = surf->surf.height;
+retry:
+	ret = qxl_io_update_area(qdev, surf, &rect);
+	if (ret == -ERESTARTSYS)
+		goto retry;
+	return ret;
+}
+
+static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	/* no need to update area if we are just freeing the surface normally */
+	if (do_update_area)
+		qxl_update_surface(qdev, surf);
+
+	/* nuke the surface id at the hw */
+	qxl_hw_surface_dealloc(qdev, surf);
+}
+
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
+{
+	mutex_lock(&qdev->surf_evict_mutex);
+	qxl_surface_evict_locked(qdev, surf, do_update_area);
+	mutex_unlock(&qdev->surf_evict_mutex);
+}
+
+static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
+{
+	int ret;
+
+	ret = qxl_bo_reserve(surf, false);
+	if (ret == -EBUSY)
+		return -EBUSY;
+
+	if (surf->fence.num_active_releases > 0 && stall == false) {
+		qxl_bo_unreserve(surf);
+		return -EBUSY;
+	}
+
+	if (stall)
+		mutex_unlock(&qdev->surf_evict_mutex);
+
+	spin_lock(&surf->tbo.bdev->fence_lock);
+	ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+	spin_unlock(&surf->tbo.bdev->fence_lock);
+
+	if (stall)
+		mutex_lock(&qdev->surf_evict_mutex);
+	if (ret == -EBUSY) {
+		qxl_bo_unreserve(surf);
+		return -EBUSY;
+	}
+
+	qxl_surface_evict_locked(qdev, surf, true);
+	qxl_bo_unreserve(surf);
+	return 0;
+}
+
+static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
+{
+	int num_reaped = 0;
+	int i, ret;
+	bool stall = false;
+	int start = 0;
+
+	mutex_lock(&qdev->surf_evict_mutex);
+again:
+
+	spin_lock(&qdev->surf_id_idr_lock);
+	start = qdev->last_alloced_surf_id + 1;
+	spin_unlock(&qdev->surf_id_idr_lock);
+
+	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
+		void *objptr;
+		int surfid = i % qdev->rom->n_surfaces;
+
+		/* this avoids the case where the objects is in the
+		   idr but has been evicted half way - its makes
+		   the idr lookup atomic with the eviction */
+		spin_lock(&qdev->surf_id_idr_lock);
+		objptr = idr_find(&qdev->surf_id_idr, surfid);
+		spin_unlock(&qdev->surf_id_idr_lock);
+
+		if (!objptr)
+			continue;
+
+		ret = qxl_reap_surf(qdev, objptr, stall);
+		if (ret == 0)
+			num_reaped++;
+		if (num_reaped >= max_to_reap)
+			break;
+	}
+	if (num_reaped == 0 && stall == false) {
+		stall = true;
+		goto again;
+	}
+
+	mutex_unlock(&qdev->surf_evict_mutex);
+	if (num_reaped) {
+		usleep_range(500, 1000);
+		qxl_queue_garbage_collect(qdev, true);
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_debugfs.c b/linux-imx/drivers/gpu/drm/qxl/qxl_debugfs.c
new file mode 100644
index 0000000..c3c2bbd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ *  Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+
+#if defined(CONFIG_DEBUG_FS)
+static int
+qxl_debugfs_irq_received(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
+	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
+	seq_printf(m, "%d\n", qdev->irq_received_error);
+	return 0;
+}
+
+static int
+qxl_debugfs_buffers_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct qxl_device *qdev = node->minor->dev->dev_private;
+	struct qxl_bo *bo;
+
+	list_for_each_entry(bo, &qdev->gem.objects, list) {
+		seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
+			   (unsigned long)bo->gem_base.size, bo->pin_count,
+			   bo->tbo.sync_obj, bo->fence.num_active_releases);
+	}
+	return 0;
+}
+
+static struct drm_info_list qxl_debugfs_list[] = {
+	{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
+	{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
+};
+#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
+#endif
+
+int
+qxl_debugfs_init(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+				 minor->debugfs_root, minor);
+#endif
+	return 0;
+}
+
+void
+qxl_debugfs_takedown(struct drm_minor *minor)
+{
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
+				 minor);
+#endif
+}
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < qdev->debugfs_count; i++) {
+		if (qdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = qdev->debugfs_count + 1;
+	if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	qdev->debugfs[qdev->debugfs_count].files = files;
+	qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
+	qdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 qdev->ddev->control->debugfs_root,
+				 qdev->ddev->control);
+	drm_debugfs_create_files(files, nfiles,
+				 qdev->ddev->primary->debugfs_root,
+				 qdev->ddev->primary);
+#endif
+	return 0;
+}
+
+void qxl_debugfs_remove_files(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+
+	for (i = 0; i < qdev->debugfs_count; i++) {
+		drm_debugfs_remove_files(qdev->debugfs[i].files,
+					 qdev->debugfs[i].num_files,
+					 qdev->ddev->control);
+		drm_debugfs_remove_files(qdev->debugfs[i].files,
+					 qdev->debugfs[i].num_files,
+					 qdev->ddev->primary);
+	}
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_dev.h b/linux-imx/drivers/gpu/drm/qxl/qxl_dev.h
new file mode 100644
index 0000000..94c5aec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_dev.h
@@ -0,0 +1,879 @@
+/*
+   Copyright (C) 2009 Red Hat, Inc.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+	 notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+	 notice, this list of conditions and the following disclaimer in
+	 the documentation and/or other materials provided with the
+	 distribution.
+       * Neither the name of the copyright holder nor the names of its
+	 contributors may be used to endorse or promote products derived
+	 from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
+   IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+   TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+   PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+
+#ifndef H_QXL_DEV
+#define H_QXL_DEV
+
+#include <linux/types.h>
+
+/*
+ * from spice-protocol
+ * Release 0.10.0
+ */
+
+/* enums.h */
+
+enum SpiceImageType {
+	SPICE_IMAGE_TYPE_BITMAP,
+	SPICE_IMAGE_TYPE_QUIC,
+	SPICE_IMAGE_TYPE_RESERVED,
+	SPICE_IMAGE_TYPE_LZ_PLT = 100,
+	SPICE_IMAGE_TYPE_LZ_RGB,
+	SPICE_IMAGE_TYPE_GLZ_RGB,
+	SPICE_IMAGE_TYPE_FROM_CACHE,
+	SPICE_IMAGE_TYPE_SURFACE,
+	SPICE_IMAGE_TYPE_JPEG,
+	SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
+	SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
+	SPICE_IMAGE_TYPE_JPEG_ALPHA,
+
+	SPICE_IMAGE_TYPE_ENUM_END
+};
+
+enum SpiceBitmapFmt {
+	SPICE_BITMAP_FMT_INVALID,
+	SPICE_BITMAP_FMT_1BIT_LE,
+	SPICE_BITMAP_FMT_1BIT_BE,
+	SPICE_BITMAP_FMT_4BIT_LE,
+	SPICE_BITMAP_FMT_4BIT_BE,
+	SPICE_BITMAP_FMT_8BIT,
+	SPICE_BITMAP_FMT_16BIT,
+	SPICE_BITMAP_FMT_24BIT,
+	SPICE_BITMAP_FMT_32BIT,
+	SPICE_BITMAP_FMT_RGBA,
+
+	SPICE_BITMAP_FMT_ENUM_END
+};
+
+enum SpiceSurfaceFmt {
+	SPICE_SURFACE_FMT_INVALID,
+	SPICE_SURFACE_FMT_1_A,
+	SPICE_SURFACE_FMT_8_A = 8,
+	SPICE_SURFACE_FMT_16_555 = 16,
+	SPICE_SURFACE_FMT_32_xRGB = 32,
+	SPICE_SURFACE_FMT_16_565 = 80,
+	SPICE_SURFACE_FMT_32_ARGB = 96,
+
+	SPICE_SURFACE_FMT_ENUM_END
+};
+
+enum SpiceClipType {
+	SPICE_CLIP_TYPE_NONE,
+	SPICE_CLIP_TYPE_RECTS,
+
+	SPICE_CLIP_TYPE_ENUM_END
+};
+
+enum SpiceRopd {
+	SPICE_ROPD_INVERS_SRC = (1 << 0),
+	SPICE_ROPD_INVERS_BRUSH = (1 << 1),
+	SPICE_ROPD_INVERS_DEST = (1 << 2),
+	SPICE_ROPD_OP_PUT = (1 << 3),
+	SPICE_ROPD_OP_OR = (1 << 4),
+	SPICE_ROPD_OP_AND = (1 << 5),
+	SPICE_ROPD_OP_XOR = (1 << 6),
+	SPICE_ROPD_OP_BLACKNESS = (1 << 7),
+	SPICE_ROPD_OP_WHITENESS = (1 << 8),
+	SPICE_ROPD_OP_INVERS = (1 << 9),
+	SPICE_ROPD_INVERS_RES = (1 << 10),
+
+	SPICE_ROPD_MASK = 0x7ff
+};
+
+enum SpiceBrushType {
+	SPICE_BRUSH_TYPE_NONE,
+	SPICE_BRUSH_TYPE_SOLID,
+	SPICE_BRUSH_TYPE_PATTERN,
+
+	SPICE_BRUSH_TYPE_ENUM_END
+};
+
+enum SpiceCursorType {
+	SPICE_CURSOR_TYPE_ALPHA,
+	SPICE_CURSOR_TYPE_MONO,
+	SPICE_CURSOR_TYPE_COLOR4,
+	SPICE_CURSOR_TYPE_COLOR8,
+	SPICE_CURSOR_TYPE_COLOR16,
+	SPICE_CURSOR_TYPE_COLOR24,
+	SPICE_CURSOR_TYPE_COLOR32,
+
+	SPICE_CURSOR_TYPE_ENUM_END
+};
+
+/* qxl_dev.h */
+
+#pragma pack(push, 1)
+
+#define REDHAT_PCI_VENDOR_ID 0x1b36
+
+/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
+#define QXL_DEVICE_ID_STABLE 0x0100
+
+enum {
+	QXL_REVISION_STABLE_V04 = 0x01,
+	QXL_REVISION_STABLE_V06 = 0x02,
+	QXL_REVISION_STABLE_V10 = 0x03,
+	QXL_REVISION_STABLE_V12 = 0x04,
+};
+
+#define QXL_DEVICE_ID_DEVEL 0x01ff
+#define QXL_REVISION_DEVEL 0x01
+
+#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
+#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
+
+enum {
+	QXL_RAM_RANGE_INDEX,
+	QXL_VRAM_RANGE_INDEX,
+	QXL_ROM_RANGE_INDEX,
+	QXL_IO_RANGE_INDEX,
+
+	QXL_PCI_RANGES
+};
+
+/* qxl-1 compat: append only */
+enum {
+	QXL_IO_NOTIFY_CMD,
+	QXL_IO_NOTIFY_CURSOR,
+	QXL_IO_UPDATE_AREA,
+	QXL_IO_UPDATE_IRQ,
+	QXL_IO_NOTIFY_OOM,
+	QXL_IO_RESET,
+	QXL_IO_SET_MODE,                  /* qxl-1 */
+	QXL_IO_LOG,
+	/* appended for qxl-2 */
+	QXL_IO_MEMSLOT_ADD,
+	QXL_IO_MEMSLOT_DEL,
+	QXL_IO_DETACH_PRIMARY,
+	QXL_IO_ATTACH_PRIMARY,
+	QXL_IO_CREATE_PRIMARY,
+	QXL_IO_DESTROY_PRIMARY,
+	QXL_IO_DESTROY_SURFACE_WAIT,
+	QXL_IO_DESTROY_ALL_SURFACES,
+	/* appended for qxl-3 */
+	QXL_IO_UPDATE_AREA_ASYNC,
+	QXL_IO_MEMSLOT_ADD_ASYNC,
+	QXL_IO_CREATE_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_PRIMARY_ASYNC,
+	QXL_IO_DESTROY_SURFACE_ASYNC,
+	QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
+	QXL_IO_FLUSH_SURFACES_ASYNC,
+	QXL_IO_FLUSH_RELEASE,
+	/* appended for qxl-4 */
+	QXL_IO_MONITORS_CONFIG_ASYNC,
+
+	QXL_IO_RANGE_SIZE
+};
+
+typedef uint64_t QXLPHYSICAL;
+typedef int32_t QXLFIXED; /* fixed 28.4 */
+
+struct qxl_point_fix {
+	QXLFIXED x;
+	QXLFIXED y;
+};
+
+struct qxl_point {
+	int32_t x;
+	int32_t y;
+};
+
+struct qxl_point_1_6 {
+	int16_t x;
+	int16_t y;
+};
+
+struct qxl_rect {
+	int32_t top;
+	int32_t left;
+	int32_t bottom;
+	int32_t right;
+};
+
+struct qxl_urect {
+	uint32_t top;
+	uint32_t left;
+	uint32_t bottom;
+	uint32_t right;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_rom {
+	uint32_t magic;
+	uint32_t id;
+	uint32_t update_id;
+	uint32_t compression_level;
+	uint32_t log_level;
+	uint32_t mode;			  /* qxl-1 */
+	uint32_t modes_offset;
+	uint32_t num_io_pages;
+	uint32_t pages_offset;		  /* qxl-1 */
+	uint32_t draw_area_offset;	  /* qxl-1 */
+	uint32_t surface0_area_size;	  /* qxl-1 name: draw_area_size */
+	uint32_t ram_header_offset;
+	uint32_t mm_clock;
+	/* appended for qxl-2 */
+	uint32_t n_surfaces;
+	uint64_t flags;
+	uint8_t slots_start;
+	uint8_t slots_end;
+	uint8_t slot_gen_bits;
+	uint8_t slot_id_bits;
+	uint8_t slot_generation;
+	/* appended for qxl-4 */
+	uint8_t client_present;
+	uint8_t client_capabilities[58];
+	uint32_t client_monitors_config_crc;
+	struct {
+		uint16_t count;
+	uint16_t padding;
+		struct qxl_urect heads[64];
+	} client_monitors_config;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_mode {
+	uint32_t id;
+	uint32_t x_res;
+	uint32_t y_res;
+	uint32_t bits;
+	uint32_t stride;
+	uint32_t x_mili;
+	uint32_t y_mili;
+	uint32_t orientation;
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_modes {
+	uint32_t n_modes;
+	struct qxl_mode modes[0];
+};
+
+/* qxl-1 compat: append only */
+enum qxl_cmd_type {
+	QXL_CMD_NOP,
+	QXL_CMD_DRAW,
+	QXL_CMD_UPDATE,
+	QXL_CMD_CURSOR,
+	QXL_CMD_MESSAGE,
+	QXL_CMD_SURFACE,
+};
+
+/* qxl-1 compat: fixed */
+struct qxl_command {
+	QXLPHYSICAL data;
+	uint32_t type;
+	uint32_t padding;
+};
+
+#define QXL_COMMAND_FLAG_COMPAT		(1<<0)
+#define QXL_COMMAND_FLAG_COMPAT_16BPP	(2<<0)
+
+struct qxl_command_ext {
+	struct qxl_command cmd;
+	uint32_t group_id;
+	uint32_t flags;
+};
+
+struct qxl_mem_slot {
+	uint64_t mem_start;
+	uint64_t mem_end;
+};
+
+#define QXL_SURF_TYPE_PRIMARY	   0
+
+#define QXL_SURF_FLAG_KEEP_DATA	   (1 << 0)
+
+struct qxl_surface_create {
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	uint32_t format;
+	uint32_t position;
+	uint32_t mouse_mode;
+	uint32_t flags;
+	uint32_t type;
+	QXLPHYSICAL mem;
+};
+
+#define QXL_COMMAND_RING_SIZE 32
+#define QXL_CURSOR_RING_SIZE 32
+#define QXL_RELEASE_RING_SIZE 8
+
+#define QXL_LOG_BUF_SIZE 4096
+
+#define QXL_INTERRUPT_DISPLAY (1 << 0)
+#define QXL_INTERRUPT_CURSOR (1 << 1)
+#define QXL_INTERRUPT_IO_CMD (1 << 2)
+#define QXL_INTERRUPT_ERROR  (1 << 3)
+#define QXL_INTERRUPT_CLIENT (1 << 4)
+#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG  (1 << 5)
+
+struct qxl_ring_header {
+	uint32_t num_items;
+	uint32_t prod;
+	uint32_t notify_on_prod;
+	uint32_t cons;
+	uint32_t notify_on_cons;
+};
+
+/* qxl-1 compat: append only */
+struct qxl_ram_header {
+	uint32_t magic;
+	uint32_t int_pending;
+	uint32_t int_mask;
+	uint8_t log_buf[QXL_LOG_BUF_SIZE];
+	struct qxl_ring_header  cmd_ring_hdr;
+	struct qxl_command	cmd_ring[QXL_COMMAND_RING_SIZE];
+	struct qxl_ring_header  cursor_ring_hdr;
+	struct qxl_command	cursor_ring[QXL_CURSOR_RING_SIZE];
+	struct qxl_ring_header  release_ring_hdr;
+	uint64_t		release_ring[QXL_RELEASE_RING_SIZE];
+	struct qxl_rect update_area;
+	/* appended for qxl-2 */
+	uint32_t update_surface;
+	struct qxl_mem_slot mem_slot;
+	struct qxl_surface_create create_surface;
+	uint64_t flags;
+
+	/* appended for qxl-4 */
+
+	/* used by QXL_IO_MONITORS_CONFIG_ASYNC */
+	QXLPHYSICAL monitors_config;
+	uint8_t guest_capabilities[64];
+};
+
+union qxl_release_info {
+	uint64_t id;	  /* in  */
+	uint64_t next;	  /* out */
+};
+
+struct qxl_release_info_ext {
+	union qxl_release_info *info;
+	uint32_t group_id;
+};
+
+struct qxl_data_chunk {
+	uint32_t data_size;
+	QXLPHYSICAL prev_chunk;
+	QXLPHYSICAL next_chunk;
+	uint8_t data[0];
+};
+
+struct qxl_message {
+	union qxl_release_info release_info;
+	uint8_t data[0];
+};
+
+struct qxl_compat_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+};
+
+struct qxl_update_cmd {
+	union qxl_release_info release_info;
+	struct qxl_rect area;
+	uint32_t update_id;
+	uint32_t surface_id;
+};
+
+struct qxl_cursor_header {
+	uint64_t unique;
+	uint16_t type;
+	uint16_t width;
+	uint16_t height;
+	uint16_t hot_spot_x;
+	uint16_t hot_spot_y;
+};
+
+struct qxl_cursor {
+	struct qxl_cursor_header header;
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_CURSOR_SET,
+	QXL_CURSOR_MOVE,
+	QXL_CURSOR_HIDE,
+	QXL_CURSOR_TRAIL,
+};
+
+#define QXL_CURSOR_DEVICE_DATA_SIZE 128
+
+struct qxl_cursor_cmd {
+	union qxl_release_info release_info;
+	uint8_t type;
+	union {
+		struct {
+			struct qxl_point_1_6 position;
+			uint8_t visible;
+			QXLPHYSICAL shape;
+		} set;
+		struct {
+			uint16_t length;
+			uint16_t frequency;
+		} trail;
+		struct qxl_point_1_6 position;
+	} u;
+	/* todo: dynamic size from rom */
+	uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
+};
+
+enum {
+	QXL_DRAW_NOP,
+	QXL_DRAW_FILL,
+	QXL_DRAW_OPAQUE,
+	QXL_DRAW_COPY,
+	QXL_COPY_BITS,
+	QXL_DRAW_BLEND,
+	QXL_DRAW_BLACKNESS,
+	QXL_DRAW_WHITENESS,
+	QXL_DRAW_INVERS,
+	QXL_DRAW_ROP3,
+	QXL_DRAW_STROKE,
+	QXL_DRAW_TEXT,
+	QXL_DRAW_TRANSPARENT,
+	QXL_DRAW_ALPHA_BLEND,
+	QXL_DRAW_COMPOSITE
+};
+
+struct qxl_raster_glyph {
+	struct qxl_point render_pos;
+	struct qxl_point glyph_origin;
+	uint16_t width;
+	uint16_t height;
+	uint8_t data[0];
+};
+
+struct qxl_string {
+	uint32_t data_size;
+	uint16_t length;
+	uint16_t flags;
+	struct qxl_data_chunk chunk;
+};
+
+struct qxl_copy_bits {
+	struct qxl_point src_pos;
+};
+
+enum qxl_effect_type {
+	QXL_EFFECT_BLEND = 0,
+	QXL_EFFECT_OPAQUE = 1,
+	QXL_EFFECT_REVERT_ON_DUP = 2,
+	QXL_EFFECT_BLACKNESS_ON_DUP = 3,
+	QXL_EFFECT_WHITENESS_ON_DUP = 4,
+	QXL_EFFECT_NOP_ON_DUP = 5,
+	QXL_EFFECT_NOP = 6,
+	QXL_EFFECT_OPAQUE_BRUSH = 7
+};
+
+struct qxl_pattern {
+	QXLPHYSICAL pat;
+	struct qxl_point pos;
+};
+
+struct qxl_brush {
+	uint32_t type;
+	union {
+		uint32_t color;
+		struct qxl_pattern pattern;
+	} u;
+};
+
+struct qxl_q_mask {
+	uint8_t flags;
+	struct qxl_point pos;
+	QXLPHYSICAL bitmap;
+};
+
+struct qxl_fill {
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_opaque {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_copy {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint16_t rop_descriptor;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_transparent {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	uint32_t src_color;
+	uint32_t true_color;
+};
+
+struct qxl_alpha_blend {
+	uint16_t alpha_flags;
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_compat_alpha_blend {
+	uint8_t alpha;
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+};
+
+struct qxl_rop_3 {
+	QXLPHYSICAL src_bitmap;
+	struct qxl_rect src_area;
+	struct qxl_brush brush;
+	uint8_t rop3;
+	uint8_t scale_mode;
+	struct qxl_q_mask mask;
+};
+
+struct qxl_line_attr {
+	uint8_t flags;
+	uint8_t join_style;
+	uint8_t end_style;
+	uint8_t style_nseg;
+	QXLFIXED width;
+	QXLFIXED miter_limit;
+	QXLPHYSICAL style;
+};
+
+struct qxl_stroke {
+	QXLPHYSICAL path;
+	struct qxl_line_attr attr;
+	struct qxl_brush brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_text {
+	QXLPHYSICAL str;
+	struct qxl_rect back_area;
+	struct qxl_brush fore_brush;
+	struct qxl_brush back_brush;
+	uint16_t fore_mode;
+	uint16_t back_mode;
+};
+
+struct qxl_mask {
+	struct qxl_q_mask mask;
+};
+
+struct qxl_clip {
+	uint32_t type;
+	QXLPHYSICAL data;
+};
+
+enum qxl_operator {
+	QXL_OP_CLEAR			 = 0x00,
+	QXL_OP_SOURCE			 = 0x01,
+	QXL_OP_DST			 = 0x02,
+	QXL_OP_OVER			 = 0x03,
+	QXL_OP_OVER_REVERSE		 = 0x04,
+	QXL_OP_IN			 = 0x05,
+	QXL_OP_IN_REVERSE		 = 0x06,
+	QXL_OP_OUT			 = 0x07,
+	QXL_OP_OUT_REVERSE		 = 0x08,
+	QXL_OP_ATOP			 = 0x09,
+	QXL_OP_ATOP_REVERSE		 = 0x0a,
+	QXL_OP_XOR			 = 0x0b,
+	QXL_OP_ADD			 = 0x0c,
+	QXL_OP_SATURATE			 = 0x0d,
+	/* Note the jump here from 0x0d to 0x30 */
+	QXL_OP_MULTIPLY			 = 0x30,
+	QXL_OP_SCREEN			 = 0x31,
+	QXL_OP_OVERLAY			 = 0x32,
+	QXL_OP_DARKEN			 = 0x33,
+	QXL_OP_LIGHTEN			 = 0x34,
+	QXL_OP_COLOR_DODGE		 = 0x35,
+	QXL_OP_COLOR_BURN		 = 0x36,
+	QXL_OP_HARD_LIGHT		 = 0x37,
+	QXL_OP_SOFT_LIGHT		 = 0x38,
+	QXL_OP_DIFFERENCE		 = 0x39,
+	QXL_OP_EXCLUSION		 = 0x3a,
+	QXL_OP_HSL_HUE			 = 0x3b,
+	QXL_OP_HSL_SATURATION		 = 0x3c,
+	QXL_OP_HSL_COLOR		 = 0x3d,
+	QXL_OP_HSL_LUMINOSITY		 = 0x3e
+};
+
+struct qxl_transform {
+	uint32_t	t00;
+	uint32_t	t01;
+	uint32_t	t02;
+	uint32_t	t10;
+	uint32_t	t11;
+	uint32_t	t12;
+};
+
+/* The flags field has the following bit fields:
+ *
+ *     operator:		[  0 -  7 ]
+ *     src_filter:		[  8 - 10 ]
+ *     mask_filter:		[ 11 - 13 ]
+ *     src_repeat:		[ 14 - 15 ]
+ *     mask_repeat:		[ 16 - 17 ]
+ *     component_alpha:		[ 18 - 18 ]
+ *     reserved:		[ 19 - 31 ]
+ *
+ * The repeat and filter values are those of pixman:
+ *		REPEAT_NONE =		0
+ *              REPEAT_NORMAL =		1
+ *		REPEAT_PAD =		2
+ *		REPEAT_REFLECT =	3
+ *
+ * The filter values are:
+ *		FILTER_NEAREST =	0
+ *		FILTER_BILINEAR	=	1
+ */
+struct qxl_composite {
+	uint32_t		flags;
+
+	QXLPHYSICAL			src;
+	QXLPHYSICAL			src_transform;	/* May be NULL */
+	QXLPHYSICAL			mask;		/* May be NULL */
+	QXLPHYSICAL			mask_transform;	/* May be NULL */
+	struct qxl_point_1_6	src_origin;
+	struct qxl_point_1_6	mask_origin;
+};
+
+struct qxl_compat_drawable {
+	union qxl_release_info release_info;
+	uint8_t effect;
+	uint8_t type;
+	uint16_t bitmap_offset;
+	struct qxl_rect bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_compat_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+	} u;
+};
+
+struct qxl_drawable {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t effect;
+	uint8_t type;
+	uint8_t self_bitmap;
+	struct qxl_rect self_bitmap_area;
+	struct qxl_rect bbox;
+	struct qxl_clip clip;
+	uint32_t mm_time;
+	int32_t surfaces_dest[3];
+	struct qxl_rect surfaces_rects[3];
+	union {
+		struct qxl_fill fill;
+		struct qxl_opaque opaque;
+		struct qxl_copy copy;
+		struct qxl_transparent transparent;
+		struct qxl_alpha_blend alpha_blend;
+		struct qxl_copy_bits copy_bits;
+		struct qxl_copy blend;
+		struct qxl_rop_3 rop3;
+		struct qxl_stroke stroke;
+		struct qxl_text text;
+		struct qxl_mask blackness;
+		struct qxl_mask invers;
+		struct qxl_mask whiteness;
+		struct qxl_composite composite;
+	} u;
+};
+
+enum qxl_surface_cmd_type {
+	QXL_SURFACE_CMD_CREATE,
+	QXL_SURFACE_CMD_DESTROY,
+};
+
+struct qxl_surface {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	int32_t stride;
+	QXLPHYSICAL data;
+};
+
+struct qxl_surface_cmd {
+	union qxl_release_info release_info;
+	uint32_t surface_id;
+	uint8_t type;
+	uint32_t flags;
+	union {
+		struct qxl_surface surface_create;
+	} u;
+};
+
+struct qxl_clip_rects {
+	uint32_t num_rects;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_PATH_BEGIN = (1 << 0),
+	QXL_PATH_END = (1 << 1),
+	QXL_PATH_CLOSE = (1 << 3),
+	QXL_PATH_BEZIER = (1 << 4),
+};
+
+struct qxl_path_seg {
+	uint32_t flags;
+	uint32_t count;
+	struct qxl_point_fix points[0];
+};
+
+struct qxl_path {
+	uint32_t data_size;
+	struct qxl_data_chunk chunk;
+};
+
+enum {
+	QXL_IMAGE_GROUP_DRIVER,
+	QXL_IMAGE_GROUP_DEVICE,
+	QXL_IMAGE_GROUP_RED,
+	QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
+};
+
+struct qxl_image_id {
+	uint32_t group;
+	uint32_t unique;
+};
+
+union qxl_image_id_union {
+	struct qxl_image_id id;
+	uint64_t value;
+};
+
+enum qxl_image_flags {
+	QXL_IMAGE_CACHE = (1 << 0),
+	QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
+};
+
+enum qxl_bitmap_flags {
+	QXL_BITMAP_DIRECT = (1 << 0),
+	QXL_BITMAP_UNSTABLE = (1 << 1),
+	QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
+};
+
+#define QXL_SET_IMAGE_ID(image, _group, _unique) {              \
+	(image)->descriptor.id = (((uint64_t)_unique) << 32) | _group;	\
+}
+
+struct qxl_image_descriptor {
+	uint64_t id;
+	uint8_t type;
+	uint8_t flags;
+	uint32_t width;
+	uint32_t height;
+};
+
+struct qxl_palette {
+	uint64_t unique;
+	uint16_t num_ents;
+	uint32_t ents[0];
+};
+
+struct qxl_bitmap {
+	uint8_t format;
+	uint8_t flags;
+	uint32_t x;
+	uint32_t y;
+	uint32_t stride;
+	QXLPHYSICAL palette;
+	QXLPHYSICAL data; /* data[0] ? */
+};
+
+struct qxl_surface_id {
+	uint32_t surface_id;
+};
+
+struct qxl_encoder_data {
+	uint32_t data_size;
+	uint8_t data[0];
+};
+
+struct qxl_image {
+	struct qxl_image_descriptor descriptor;
+	union { /* variable length */
+		struct qxl_bitmap bitmap;
+		struct qxl_encoder_data quic;
+		struct qxl_surface_id surface_image;
+	} u;
+};
+
+/* A QXLHead is a single monitor output backed by a QXLSurface.
+ * x and y offsets are unsigned since they are used in relation to
+ * the given surface, not the same as the x, y coordinates in the guest
+ * screen reference frame. */
+struct qxl_head {
+	uint32_t id;
+	uint32_t surface_id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t x;
+	uint32_t y;
+	uint32_t flags;
+};
+
+struct qxl_monitors_config {
+	uint16_t count;
+	uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
+				 driver */
+	struct qxl_head heads[0];
+};
+
+#pragma pack(pop)
+
+#endif /* _H_QXL_DEV */
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_display.c b/linux-imx/drivers/gpu/drm/qxl/qxl_display.c
new file mode 100644
index 0000000..823d29e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_display.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "linux/crc32.h"
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+#include "drm_crtc_helper.h"
+
+static void qxl_crtc_set_to_mode(struct qxl_device *qdev,
+				 struct drm_connector *connector,
+				 struct qxl_head *head)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode, *t;
+	int width = head->width;
+	int height = head->height;
+
+	if (width < 320 || height < 240) {
+		qxl_io_log(qdev, "%s: bad head: %dx%d", width, height);
+		width = 1024;
+		height = 768;
+	}
+	if (width * height * 4 > 16*1024*1024) {
+		width = 1024;
+		height = 768;
+	}
+	/* TODO: go over regular modes and removed preferred? */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
+		drm_mode_remove(connector, mode);
+	mode = drm_cvt_mode(dev, width, height, 60, false, false, false);
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	mode->status = MODE_OK;
+	drm_mode_probed_add(connector, mode);
+	qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height);
+}
+
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev)
+{
+	struct drm_connector *connector;
+	int i;
+	struct drm_device *dev = qdev->ddev;
+
+	i = 0;
+	qxl_io_log(qdev, "%s: %d, %d\n", __func__,
+		   dev->mode_config.num_connector,
+		   qdev->monitors_config->count);
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (i > qdev->monitors_config->count) {
+			/* crtc will be reported as disabled */
+			continue;
+		}
+		qxl_crtc_set_to_mode(qdev, connector,
+				     &qdev->monitors_config->heads[i]);
+		++i;
+	}
+}
+
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+{
+	if (qdev->client_monitors_config &&
+	    count > qdev->client_monitors_config->count) {
+		kfree(qdev->client_monitors_config);
+		qdev->client_monitors_config = NULL;
+	}
+	if (!qdev->client_monitors_config) {
+		qdev->client_monitors_config = kzalloc(
+				sizeof(struct qxl_monitors_config) +
+				sizeof(struct qxl_head) * count, GFP_KERNEL);
+		if (!qdev->client_monitors_config) {
+			qxl_io_log(qdev,
+				   "%s: allocation failure for %u heads\n",
+				   __func__, count);
+			return;
+		}
+	}
+	qdev->client_monitors_config->count = count;
+}
+
+static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+	int num_monitors;
+	uint32_t crc;
+
+	BUG_ON(!qdev->monitors_config);
+	num_monitors = qdev->rom->client_monitors_config.count;
+	crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
+		  sizeof(qdev->rom->client_monitors_config));
+	if (crc != qdev->rom->client_monitors_config_crc) {
+		qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc,
+			   sizeof(qdev->rom->client_monitors_config),
+			   qdev->rom->client_monitors_config_crc);
+		return 1;
+	}
+	if (num_monitors > qdev->monitors_config->max_allowed) {
+		DRM_INFO("client monitors list will be truncated: %d < %d\n",
+			 qdev->monitors_config->max_allowed, num_monitors);
+		num_monitors = qdev->monitors_config->max_allowed;
+	} else {
+		num_monitors = qdev->rom->client_monitors_config.count;
+	}
+	qxl_alloc_client_monitors_config(qdev, num_monitors);
+	/* we copy max from the client but it isn't used */
+	qdev->client_monitors_config->max_allowed =
+				qdev->monitors_config->max_allowed;
+	for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
+		struct qxl_urect *c_rect =
+			&qdev->rom->client_monitors_config.heads[i];
+		struct qxl_head *client_head =
+			&qdev->client_monitors_config->heads[i];
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+		client_head->x = head->x = c_rect->left;
+		client_head->y = head->y = c_rect->top;
+		client_head->width = head->width =
+						c_rect->right - c_rect->left;
+		client_head->height = head->height =
+						c_rect->bottom - c_rect->top;
+		client_head->surface_id = head->surface_id = 0;
+		client_head->id = head->id = i;
+		client_head->flags = head->flags = 0;
+		QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height,
+			  head->x, head->y);
+	}
+	return 0;
+}
+
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
+{
+
+	while (qxl_display_copy_rom_client_monitors_config(qdev)) {
+		qxl_io_log(qdev, "failed crc check for client_monitors_config,"
+				 " retrying\n");
+	}
+	qxl_crtc_set_from_monitors_config(qdev);
+	/* fire off a uevent and let userspace tell us what to do */
+	qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n");
+	drm_sysfs_hotplug_event(qdev->ddev);
+}
+
+static int qxl_add_monitors_config_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_output *output = drm_connector_to_qxl_output(connector);
+	int h = output->index;
+	struct drm_display_mode *mode = NULL;
+	struct qxl_head *head;
+
+	if (!qdev->monitors_config)
+		return 0;
+	head = &qdev->monitors_config->heads[h];
+
+	mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false,
+			    false);
+	mode->type |= DRM_MODE_TYPE_PREFERRED;
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
+static int qxl_add_common_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_display_mode *mode = NULL;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+				    60, false, false, false);
+		if (common_modes[i].w == 1024 && common_modes[i].h == 768)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, mode);
+	}
+	return i - 1;
+}
+
+static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+			       u16 *blue, uint32_t start, uint32_t size)
+{
+	/* TODO */
+}
+
+static void qxl_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(qxl_crtc);
+}
+
+static void
+qxl_hide_cursor(struct qxl_device *qdev)
+{
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	int ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_HIDE;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+static int qxl_crtc_cursor_set(struct drm_crtc *crtc,
+			       struct drm_file *file_priv,
+			       uint32_t handle,
+			       uint32_t width,
+			       uint32_t height)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct drm_gem_object *obj;
+	struct qxl_cursor *cursor;
+	struct qxl_cursor_cmd *cmd;
+	struct qxl_bo *cursor_bo, *user_bo;
+	struct qxl_release *release;
+	void *user_ptr;
+
+	int size = 64*64*4;
+	int ret = 0;
+	if (!handle) {
+		qxl_hide_cursor(qdev);
+		return 0;
+	}
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("cannot find cursor object\n");
+		return -ENOENT;
+	}
+
+	user_bo = gem_to_qxl_bo(obj);
+
+	ret = qxl_bo_reserve(user_bo, false);
+	if (ret)
+		goto out_unref;
+
+	ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+	if (ret)
+		goto out_unreserve;
+
+	ret = qxl_bo_kmap(user_bo, &user_ptr);
+	if (ret)
+		goto out_unpin;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+					 QXL_RELEASE_CURSOR_CMD,
+					 &release, NULL);
+	if (ret)
+		goto out_kunmap;
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
+				    &cursor_bo);
+	if (ret)
+		goto out_free_release;
+	ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+	if (ret)
+		goto out_free_bo;
+
+	cursor->header.unique = 0;
+	cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
+	cursor->header.width = 64;
+	cursor->header.height = 64;
+	cursor->header.hot_spot_x = 0;
+	cursor->header.hot_spot_y = 0;
+	cursor->data_size = size;
+	cursor->chunk.next_chunk = 0;
+	cursor->chunk.prev_chunk = 0;
+	cursor->chunk.data_size = size;
+
+	memcpy(cursor->chunk.data, user_ptr, size);
+
+	qxl_bo_kunmap(cursor_bo);
+
+	/* finish with the userspace bo */
+	qxl_bo_kunmap(user_bo);
+	qxl_bo_unpin(user_bo);
+	qxl_bo_unreserve(user_bo);
+	drm_gem_object_unreference_unlocked(obj);
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_SET;
+	cmd->u.set.position.x = qcrtc->cur_x;
+	cmd->u.set.position.y = qcrtc->cur_y;
+
+	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+	qxl_release_add_res(qdev, release, cursor_bo);
+
+	cmd->u.set.visible = 1;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+
+	qxl_bo_unreserve(cursor_bo);
+	qxl_bo_unref(&cursor_bo);
+
+	return ret;
+out_free_bo:
+	qxl_bo_unref(&cursor_bo);
+out_free_release:
+	qxl_release_unreserve(qdev, release);
+	qxl_release_free(qdev, release);
+out_kunmap:
+	qxl_bo_kunmap(user_bo);
+out_unpin:
+	qxl_bo_unpin(user_bo);
+out_unreserve:
+	qxl_bo_unreserve(user_bo);
+out_unref:
+	drm_gem_object_unreference_unlocked(obj);
+	return ret;
+}
+
+static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+				int x, int y)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
+	struct qxl_release *release;
+	struct qxl_cursor_cmd *cmd;
+	int ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
+				   &release, NULL);
+
+	qcrtc->cur_x = x;
+	qcrtc->cur_y = y;
+
+	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+	cmd->type = QXL_CURSOR_MOVE;
+	cmd->u.position.x = qcrtc->cur_x;
+	cmd->u.position.y = qcrtc->cur_y;
+	qxl_release_unmap(qdev, release, &cmd->release_info);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+	qxl_release_unreserve(qdev, release);
+	return 0;
+}
+
+
+static const struct drm_crtc_funcs qxl_crtc_funcs = {
+	.cursor_set = qxl_crtc_cursor_set,
+	.cursor_move = qxl_crtc_cursor_move,
+	.gamma_set = qxl_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = qxl_crtc_destroy,
+};
+
+static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+
+	if (qxl_fb->obj)
+		drm_gem_object_unreference_unlocked(qxl_fb->obj);
+	drm_framebuffer_cleanup(fb);
+	kfree(qxl_fb);
+}
+
+static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
+					 struct drm_file *file_priv,
+					 unsigned flags, unsigned color,
+					 struct drm_clip_rect *clips,
+					 unsigned num_clips)
+{
+	/* TODO: vmwgfx where this was cribbed from had locking. Why? */
+	struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+	struct qxl_device *qdev = qxl_fb->base.dev->dev_private;
+	struct drm_clip_rect norect;
+	struct qxl_bo *qobj;
+	int inc = 1;
+
+	qobj = gem_to_qxl_bo(qxl_fb->obj);
+	/* if we aren't primary surface ignore this */
+	if (!qobj->is_primary)
+		return 0;
+
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = fb->width;
+		norect.y2 = fb->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		inc = 2; /* skip source rects */
+	}
+
+	qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color,
+			  clips, num_clips, inc);
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs qxl_fb_funcs = {
+	.destroy = qxl_user_framebuffer_destroy,
+	.dirty = qxl_framebuffer_surface_dirty,
+/*	TODO?
+ *	.create_handle = qxl_user_framebuffer_create_handle, */
+};
+
+int
+qxl_framebuffer_init(struct drm_device *dev,
+		     struct qxl_framebuffer *qfb,
+		     struct drm_mode_fb_cmd2 *mode_cmd,
+		     struct drm_gem_object *obj)
+{
+	int ret;
+
+	qfb->obj = obj;
+	ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+	if (ret) {
+		qfb->obj = NULL;
+		return ret;
+	}
+	drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd);
+	return 0;
+}
+
+static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+
+	qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n",
+		   __func__,
+		   mode->hdisplay, mode->vdisplay,
+		   adjusted_mode->hdisplay,
+		   adjusted_mode->vdisplay);
+	return true;
+}
+
+void
+qxl_send_monitors_config(struct qxl_device *qdev)
+{
+	int i;
+
+	BUG_ON(!qdev->ram_header->monitors_config);
+
+	if (qdev->monitors_config->count == 0) {
+		qxl_io_log(qdev, "%s: 0 monitors??\n", __func__);
+		return;
+	}
+	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
+		struct qxl_head *head = &qdev->monitors_config->heads[i];
+
+		if (head->y > 8192 || head->y < head->x ||
+		    head->width > 8192 || head->height > 8192) {
+			DRM_ERROR("head %d wrong: %dx%d+%d+%d\n",
+				  i, head->width, head->height,
+				  head->x, head->y);
+			return;
+		}
+	}
+	qxl_io_monitors_config(qdev);
+}
+
+static void qxl_monitors_config_set_single(struct qxl_device *qdev,
+					   unsigned x, unsigned y,
+					   unsigned width, unsigned height)
+{
+	DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y);
+	qdev->monitors_config->count = 1;
+	qdev->monitors_config->heads[0].x = x;
+	qdev->monitors_config->heads[0].y = y;
+	qdev->monitors_config->heads[0].width = width;
+	qdev->monitors_config->heads[0].height = height;
+}
+
+static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y,
+			       struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_mode *m = (void *)mode->private;
+	struct qxl_framebuffer *qfb;
+	struct qxl_bo *bo, *old_bo = NULL;
+	uint32_t width, height, base_offset;
+	bool recreate_primary = false;
+	int ret;
+
+	if (!crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (old_fb) {
+		qfb = to_qxl_framebuffer(old_fb);
+		old_bo = gem_to_qxl_bo(qfb->obj);
+	}
+	qfb = to_qxl_framebuffer(crtc->fb);
+	bo = gem_to_qxl_bo(qfb->obj);
+	if (!m)
+		/* and do we care? */
+		DRM_DEBUG("%dx%d: not a native mode\n", x, y);
+	else
+		DRM_DEBUG("%dx%d: qxl id %d\n",
+			  mode->hdisplay, mode->vdisplay, m->id);
+	DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
+		  x, y,
+		  mode->hdisplay, mode->vdisplay,
+		  adjusted_mode->hdisplay,
+		  adjusted_mode->vdisplay);
+
+	recreate_primary = true;
+
+	width = mode->hdisplay;
+	height = mode->vdisplay;
+	base_offset = 0;
+
+	ret = qxl_bo_reserve(bo, false);
+	if (ret != 0)
+		return ret;
+	ret = qxl_bo_pin(bo, bo->type, NULL);
+	if (ret != 0) {
+		qxl_bo_unreserve(bo);
+		return -EINVAL;
+	}
+	qxl_bo_unreserve(bo);
+	if (recreate_primary) {
+		qxl_io_destroy_primary(qdev);
+		qxl_io_log(qdev,
+			   "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
+			   width, height, bo->surf.width,
+			   bo->surf.height, bo->surf.stride, bo->surf.format);
+		qxl_io_create_primary(qdev, width, height, base_offset, bo);
+		bo->is_primary = true;
+	}
+
+	if (old_bo && old_bo != bo) {
+		old_bo->is_primary = false;
+		ret = qxl_bo_reserve(old_bo, false);
+		qxl_bo_unpin(old_bo);
+		qxl_bo_unreserve(old_bo);
+	}
+
+	if (qdev->monitors_config->count == 0) {
+		qxl_monitors_config_set_single(qdev, x, y,
+					       mode->hdisplay,
+					       mode->vdisplay);
+	}
+	return 0;
+}
+
+static void qxl_crtc_prepare(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("current: %dx%d+%d+%d (%d).\n",
+		  crtc->mode.hdisplay, crtc->mode.vdisplay,
+		  crtc->x, crtc->y, crtc->enabled);
+}
+
+static void qxl_crtc_commit(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("\n");
+}
+
+static void qxl_crtc_load_lut(struct drm_crtc *crtc)
+{
+	DRM_DEBUG("\n");
+}
+
+static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
+	.dpms = qxl_crtc_dpms,
+	.mode_fixup = qxl_crtc_mode_fixup,
+	.mode_set = qxl_crtc_mode_set,
+	.prepare = qxl_crtc_prepare,
+	.commit = qxl_crtc_commit,
+	.load_lut = qxl_crtc_load_lut,
+};
+
+static int qdev_crtc_init(struct drm_device *dev, int num_crtc)
+{
+	struct qxl_crtc *qxl_crtc;
+
+	qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
+	if (!qxl_crtc)
+		return -ENOMEM;
+
+	drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
+	drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
+	return 0;
+}
+
+static void qxl_enc_dpms(struct drm_encoder *encoder, int mode)
+{
+	DRM_DEBUG("\n");
+}
+
+static bool qxl_enc_mode_fixup(struct drm_encoder *encoder,
+			       const struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG("\n");
+	return true;
+}
+
+static void qxl_enc_prepare(struct drm_encoder *encoder)
+{
+	DRM_DEBUG("\n");
+}
+
+static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev,
+		struct drm_encoder *encoder)
+{
+	int i;
+	struct qxl_head *head;
+	struct drm_display_mode *mode;
+
+	BUG_ON(!encoder);
+	/* TODO: ugly, do better */
+	for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i)
+		;
+	if (encoder->possible_crtcs != (1 << i)) {
+		DRM_ERROR("encoder has wrong possible_crtcs: %x\n",
+			  encoder->possible_crtcs);
+		return;
+	}
+	if (!qdev->monitors_config ||
+	    qdev->monitors_config->max_allowed <= i) {
+		DRM_ERROR(
+		"head number too large or missing monitors config: %p, %d",
+		qdev->monitors_config,
+		qdev->monitors_config ?
+			qdev->monitors_config->max_allowed : -1);
+		return;
+	}
+	if (!encoder->crtc) {
+		DRM_ERROR("missing crtc on encoder %p\n", encoder);
+		return;
+	}
+	if (i != 0)
+		DRM_DEBUG("missing for multiple monitors: no head holes\n");
+	head = &qdev->monitors_config->heads[i];
+	head->id = i;
+	head->surface_id = 0;
+	if (encoder->crtc->enabled) {
+		mode = &encoder->crtc->mode;
+		head->width = mode->hdisplay;
+		head->height = mode->vdisplay;
+		head->x = encoder->crtc->x;
+		head->y = encoder->crtc->y;
+		if (qdev->monitors_config->count < i + 1)
+			qdev->monitors_config->count = i + 1;
+	} else {
+		head->width = 0;
+		head->height = 0;
+		head->x = 0;
+		head->y = 0;
+	}
+	DRM_DEBUG("setting head %d to +%d+%d %dx%d\n",
+		  i, head->x, head->y, head->width, head->height);
+	head->flags = 0;
+	/* TODO - somewhere else to call this for multiple monitors
+	 * (config_commit?) */
+	qxl_send_monitors_config(qdev);
+}
+
+static void qxl_enc_commit(struct drm_encoder *encoder)
+{
+	struct qxl_device *qdev = encoder->dev->dev_private;
+
+	qxl_write_monitors_config_for_encoder(qdev, encoder);
+	DRM_DEBUG("\n");
+}
+
+static void qxl_enc_mode_set(struct drm_encoder *encoder,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG("\n");
+}
+
+static int qxl_conn_get_modes(struct drm_connector *connector)
+{
+	int ret = 0;
+	struct qxl_device *qdev = connector->dev->dev_private;
+
+	DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config);
+	/* TODO: what should we do here? only show the configured modes for the
+	 * device, or allow the full list, or both? */
+	if (qdev->monitors_config && qdev->monitors_config->count) {
+		ret = qxl_add_monitors_config_modes(connector);
+		if (ret < 0)
+			return ret;
+	}
+	ret += qxl_add_common_modes(connector);
+	return ret;
+}
+
+static int qxl_conn_mode_valid(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	/* TODO: is this called for user defined modes? (xrandr --add-mode)
+	 * TODO: check that the mode fits in the framebuffer */
+	DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+		  mode->vdisplay, mode->status);
+	return MODE_OK;
+}
+
+static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	DRM_DEBUG("\n");
+	return &qxl_output->enc;
+}
+
+
+static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
+	.dpms = qxl_enc_dpms,
+	.mode_fixup = qxl_enc_mode_fixup,
+	.prepare = qxl_enc_prepare,
+	.mode_set = qxl_enc_mode_set,
+	.commit = qxl_enc_commit,
+};
+
+static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
+	.get_modes = qxl_conn_get_modes,
+	.mode_valid = qxl_conn_mode_valid,
+	.best_encoder = qxl_best_encoder,
+};
+
+static void qxl_conn_save(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static void qxl_conn_restore(struct drm_connector *connector)
+{
+	DRM_DEBUG("\n");
+}
+
+static enum drm_connector_status qxl_conn_detect(
+			struct drm_connector *connector,
+			bool force)
+{
+	struct qxl_output *output =
+		drm_connector_to_qxl_output(connector);
+	struct drm_device *ddev = connector->dev;
+	struct qxl_device *qdev = ddev->dev_private;
+	int connected;
+
+	/* The first monitor is always connected */
+	connected = (output->index == 0) ||
+		    (qdev->monitors_config &&
+		     qdev->monitors_config->count > output->index);
+
+	DRM_DEBUG("\n");
+	return connected ? connector_status_connected
+			 : connector_status_disconnected;
+}
+
+static int qxl_conn_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	DRM_DEBUG("\n");
+	return 0;
+}
+
+static void qxl_conn_destroy(struct drm_connector *connector)
+{
+	struct qxl_output *qxl_output =
+		drm_connector_to_qxl_output(connector);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(qxl_output);
+}
+
+static const struct drm_connector_funcs qxl_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.save = qxl_conn_save,
+	.restore = qxl_conn_restore,
+	.detect = qxl_conn_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = qxl_conn_set_property,
+	.destroy = qxl_conn_destroy,
+};
+
+static void qxl_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs qxl_enc_funcs = {
+	.destroy = qxl_enc_destroy,
+};
+
+static int qdev_output_init(struct drm_device *dev, int num_output)
+{
+	struct qxl_output *qxl_output;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+
+	qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
+	if (!qxl_output)
+		return -ENOMEM;
+
+	qxl_output->index = num_output;
+
+	connector = &qxl_output->base;
+	encoder = &qxl_output->enc;
+	drm_connector_init(dev, &qxl_output->base,
+			   &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+
+	drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+
+	encoder->possible_crtcs = 1 << num_output;
+	drm_mode_connector_attach_encoder(&qxl_output->base,
+					  &qxl_output->enc);
+	drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
+	drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+	return 0;
+}
+
+static struct drm_framebuffer *
+qxl_user_framebuffer_create(struct drm_device *dev,
+			    struct drm_file *file_priv,
+			    struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct qxl_framebuffer *qxl_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+
+	qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
+	if (qxl_fb == NULL)
+		return NULL;
+
+	ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+	if (ret) {
+		kfree(qxl_fb);
+		drm_gem_object_unreference_unlocked(obj);
+		return NULL;
+	}
+
+	return &qxl_fb->base;
+}
+
+static const struct drm_mode_config_funcs qxl_mode_funcs = {
+	.fb_create = qxl_user_framebuffer_create,
+};
+
+int qxl_modeset_init(struct qxl_device *qdev)
+{
+	int i;
+	int ret;
+	struct drm_gem_object *gobj;
+	int max_allowed = QXL_NUM_OUTPUTS;
+	int monitors_config_size = sizeof(struct qxl_monitors_config) +
+				   max_allowed * sizeof(struct qxl_head);
+
+	drm_mode_config_init(qdev->ddev);
+	ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
+				    QXL_GEM_DOMAIN_VRAM,
+				    false, false, NULL, &gobj);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret);
+		return -ENOMEM;
+	}
+	qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
+	qxl_bo_kmap(qdev->monitors_config_bo, NULL);
+	qdev->monitors_config = qdev->monitors_config_bo->kptr;
+	qdev->ram_header->monitors_config =
+		qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
+
+	memset(qdev->monitors_config, 0, monitors_config_size);
+	qdev->monitors_config->max_allowed = max_allowed;
+
+	qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs;
+
+	/* modes will be validated against the framebuffer size */
+	qdev->ddev->mode_config.min_width = 320;
+	qdev->ddev->mode_config.min_height = 200;
+	qdev->ddev->mode_config.max_width = 8192;
+	qdev->ddev->mode_config.max_height = 8192;
+
+	qdev->ddev->mode_config.fb_base = qdev->vram_base;
+	for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) {
+		qdev_crtc_init(qdev->ddev, i);
+		qdev_output_init(qdev->ddev, i);
+	}
+
+	qdev->mode_info.mode_config_initialized = true;
+
+	/* primary surface must be created by this point, to allow
+	 * issuing command queue commands and having them read by
+	 * spice server. */
+	qxl_fbdev_init(qdev);
+	return 0;
+}
+
+void qxl_modeset_fini(struct qxl_device *qdev)
+{
+	qxl_fbdev_fini(qdev);
+	if (qdev->mode_info.mode_config_initialized) {
+		drm_mode_config_cleanup(qdev->ddev);
+		qdev->mode_info.mode_config_initialized = false;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_draw.c b/linux-imx/drivers/gpu/drm/qxl/qxl_draw.c
new file mode 100644
index 0000000..3c8c3db
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_draw.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* returns a pointer to the already allocated qxl_rect array inside
+ * the qxl_clip_rects. This is *not* the same as the memory allocated
+ * on the device, it is offset to qxl_clip_rects.chunk.data */
+static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
+					      struct qxl_drawable *drawable,
+					      unsigned num_clips,
+					      struct qxl_bo **clips_bo,
+					      struct qxl_release *release)
+{
+	struct qxl_clip_rects *dev_clips;
+	int ret;
+	int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
+	ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
+	if (ret)
+		return NULL;
+
+	ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+	if (ret) {
+		qxl_bo_unref(clips_bo);
+		return NULL;
+	}
+	dev_clips->num_rects = num_clips;
+	dev_clips->chunk.next_chunk = 0;
+	dev_clips->chunk.prev_chunk = 0;
+	dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
+	return (struct qxl_rect *)dev_clips->chunk.data;
+}
+
+static int
+make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
+	      const struct qxl_rect *rect,
+	      struct qxl_release **release)
+{
+	struct qxl_drawable *drawable;
+	int i, ret;
+
+	ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
+					 QXL_RELEASE_DRAWABLE, release,
+					 NULL);
+	if (ret)
+		return ret;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
+	drawable->type = type;
+
+	drawable->surface_id = surface;		/* Only primary for now */
+	drawable->effect = QXL_EFFECT_OPAQUE;
+	drawable->self_bitmap = 0;
+	drawable->self_bitmap_area.top = 0;
+	drawable->self_bitmap_area.left = 0;
+	drawable->self_bitmap_area.bottom = 0;
+	drawable->self_bitmap_area.right = 0;
+	/* FIXME: add clipping */
+	drawable->clip.type = SPICE_CLIP_TYPE_NONE;
+
+	/*
+	 * surfaces_dest[i] should apparently be filled out with the
+	 * surfaces that we depend on, and surface_rects should be
+	 * filled with the rectangles of those surfaces that we
+	 * are going to use.
+	 */
+	for (i = 0; i < 3; ++i)
+		drawable->surfaces_dest[i] = -1;
+
+	if (rect)
+		drawable->bbox = *rect;
+
+	drawable->mm_time = qdev->rom->mm_clock;
+	qxl_release_unmap(qdev, *release, &drawable->release_info);
+	return 0;
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+				   const struct qxl_fb_image *qxl_fb_image)
+{
+	struct qxl_device *qdev = qxl_fb_image->qdev;
+	const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+	uint32_t visual = qxl_fb_image->visual;
+	const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
+	struct qxl_palette *pal;
+	int ret;
+	uint32_t fgcolor, bgcolor;
+	static uint64_t unique; /* we make no attempt to actually set this
+				 * correctly globaly, since that would require
+				 * tracking all of our palettes. */
+
+	ret = qxl_alloc_bo_reserved(qdev,
+				    sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+				    palette_bo);
+
+	ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+	pal->num_ents = 2;
+	pal->unique = unique++;
+	if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
+		/* NB: this is the only used branch currently. */
+		fgcolor = pseudo_palette[fb_image->fg_color];
+		bgcolor = pseudo_palette[fb_image->bg_color];
+	} else {
+		fgcolor = fb_image->fg_color;
+		bgcolor = fb_image->bg_color;
+	}
+	pal->ents[0] = bgcolor;
+	pal->ents[1] = fgcolor;
+	qxl_bo_kunmap(*palette_bo);
+	return 0;
+}
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+			int stride /* filled in if 0 */)
+{
+	struct qxl_device *qdev = qxl_fb_image->qdev;
+	struct qxl_drawable *drawable;
+	struct qxl_rect rect;
+	const struct fb_image *fb_image = &qxl_fb_image->fb_image;
+	int x = fb_image->dx;
+	int y = fb_image->dy;
+	int width = fb_image->width;
+	int height = fb_image->height;
+	const char *src = fb_image->data;
+	int depth = fb_image->depth;
+	struct qxl_release *release;
+	struct qxl_bo *image_bo;
+	struct qxl_image *image;
+	int ret;
+
+	if (stride == 0)
+		stride = depth * width / 8;
+
+	rect.left = x;
+	rect.right = x + width;
+	rect.top = y;
+	rect.bottom = y + height;
+
+	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
+	if (ret)
+		return;
+
+	ret = qxl_image_create(qdev, release, &image_bo,
+			       (const uint8_t *)src, 0, 0,
+			       width, height, depth, stride);
+	if (ret) {
+		qxl_release_unreserve(qdev, release);
+		qxl_release_free(qdev, release);
+		return;
+	}
+
+	if (depth == 1) {
+		struct qxl_bo *palette_bo;
+		void *ptr;
+		ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
+		qxl_release_add_res(qdev, release, palette_bo);
+
+		ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+		image = ptr;
+		image->u.bitmap.palette =
+			qxl_bo_physical_address(qdev, palette_bo, 0);
+		qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+		qxl_bo_unreserve(palette_bo);
+		qxl_bo_unref(&palette_bo);
+	}
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+	drawable->u.copy.src_area.top = 0;
+	drawable->u.copy.src_area.bottom = height;
+	drawable->u.copy.src_area.left = 0;
+	drawable->u.copy.src_area.right = width;
+
+	drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+	drawable->u.copy.scale_mode = 0;
+	drawable->u.copy.mask.flags = 0;
+	drawable->u.copy.mask.pos.x = 0;
+	drawable->u.copy.mask.pos.y = 0;
+	drawable->u.copy.mask.bitmap = 0;
+
+	drawable->u.copy.src_bitmap =
+		qxl_bo_physical_address(qdev, image_bo, 0);
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+
+	qxl_release_add_res(qdev, release, image_bo);
+	qxl_bo_unreserve(image_bo);
+	qxl_bo_unref(&image_bo);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+/* push a draw command using the given clipping rectangles as
+ * the sources from the shadow framebuffer.
+ *
+ * Right now implementing with a single draw and a clip list. Clip
+ * lists are known to be a problem performance wise, this can be solved
+ * by treating them differently in the server.
+ */
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct qxl_framebuffer *qxl_fb,
+		       struct qxl_bo *bo,
+		       unsigned flags, unsigned color,
+		       struct drm_clip_rect *clips,
+		       unsigned num_clips, int inc)
+{
+	/*
+	 * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
+	 * send a fill command instead, much cheaper.
+	 *
+	 * See include/drm/drm_mode.h
+	 */
+	struct drm_clip_rect *clips_ptr;
+	int i;
+	int left, right, top, bottom;
+	int width, height;
+	struct qxl_drawable *drawable;
+	struct qxl_rect drawable_rect;
+	struct qxl_rect *rects;
+	int stride = qxl_fb->base.pitches[0];
+	/* depth is not actually interesting, we don't mask with it */
+	int depth = qxl_fb->base.bits_per_pixel;
+	uint8_t *surface_base;
+	struct qxl_release *release;
+	struct qxl_bo *image_bo;
+	struct qxl_bo *clips_bo;
+	int ret;
+
+	left = clips->x1;
+	right = clips->x2;
+	top = clips->y1;
+	bottom = clips->y2;
+
+	/* skip the first clip rect */
+	for (i = 1, clips_ptr = clips + inc;
+	     i < num_clips; i++, clips_ptr += inc) {
+		left = min_t(int, left, (int)clips_ptr->x1);
+		right = max_t(int, right, (int)clips_ptr->x2);
+		top = min_t(int, top, (int)clips_ptr->y1);
+		bottom = max_t(int, bottom, (int)clips_ptr->y2);
+	}
+
+	width = right - left;
+	height = bottom - top;
+	drawable_rect.left = left;
+	drawable_rect.right = right;
+	drawable_rect.top = top;
+	drawable_rect.bottom = bottom;
+	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
+			    &release);
+	if (ret)
+		return;
+
+	ret = qxl_bo_kmap(bo, (void **)&surface_base);
+	if (ret)
+		goto out_unref;
+
+	ret = qxl_image_create(qdev, release, &image_bo, surface_base,
+			       left, top, width, height, depth, stride);
+	qxl_bo_kunmap(bo);
+	if (ret)
+		goto out_unref;
+
+	rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
+	if (!rects) {
+		qxl_bo_unref(&image_bo);
+		goto out_unref;
+	}
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+
+	drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
+	drawable->clip.data = qxl_bo_physical_address(qdev,
+						      clips_bo, 0);
+	qxl_release_add_res(qdev, release, clips_bo);
+
+	drawable->u.copy.src_area.top = 0;
+	drawable->u.copy.src_area.bottom = height;
+	drawable->u.copy.src_area.left = 0;
+	drawable->u.copy.src_area.right = width;
+
+	drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
+	drawable->u.copy.scale_mode = 0;
+	drawable->u.copy.mask.flags = 0;
+	drawable->u.copy.mask.pos.x = 0;
+	drawable->u.copy.mask.pos.y = 0;
+	drawable->u.copy.mask.bitmap = 0;
+
+	drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_release_add_res(qdev, release, image_bo);
+	qxl_bo_unreserve(image_bo);
+	qxl_bo_unref(&image_bo);
+	clips_ptr = clips;
+	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+		rects[i].left   = clips_ptr->x1;
+		rects[i].right  = clips_ptr->x2;
+		rects[i].top    = clips_ptr->y1;
+		rects[i].bottom = clips_ptr->y2;
+	}
+	qxl_bo_kunmap(clips_bo);
+	qxl_bo_unreserve(clips_bo);
+	qxl_bo_unref(&clips_bo);
+
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+	return;
+
+out_unref:
+	qxl_release_unreserve(qdev, release);
+	qxl_release_free(qdev, release);
+}
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+		       u32 width, u32 height,
+		       u32 sx, u32 sy,
+		       u32 dx, u32 dy)
+{
+	struct qxl_drawable *drawable;
+	struct qxl_rect rect;
+	struct qxl_release *release;
+	int ret;
+
+	rect.left = dx;
+	rect.top = dy;
+	rect.right = dx + width;
+	rect.bottom = dy + height;
+	ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
+	if (ret)
+		return;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+	drawable->u.copy_bits.src_pos.x = sx;
+	drawable->u.copy_bits.src_pos.y = sy;
+
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
+{
+	struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
+	struct qxl_rect rect = qxl_draw_fill_rec->rect;
+	uint32_t color = qxl_draw_fill_rec->color;
+	uint16_t rop = qxl_draw_fill_rec->rop;
+	struct qxl_drawable *drawable;
+	struct qxl_release *release;
+	int ret;
+
+	ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+	if (ret)
+		return;
+
+	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+	drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
+	drawable->u.fill.brush.u.color = color;
+	drawable->u.fill.rop_descriptor = rop;
+	drawable->u.fill.mask.flags = 0;
+	drawable->u.fill.mask.pos.x = 0;
+	drawable->u.fill.mask.pos.y = 0;
+	drawable->u.fill.mask.bitmap = 0;
+
+	qxl_release_unmap(qdev, release, &drawable->release_info);
+	qxl_fence_releaseable(qdev, release);
+	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
+	qxl_release_unreserve(qdev, release);
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_drv.c b/linux-imx/drivers/gpu/drm/qxl/qxl_drv.c
new file mode 100644
index 0000000..aa291d8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_drv.c
@@ -0,0 +1,145 @@
+/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
+/* qxl_drv.c -- QXL driver -*- linux-c -*-
+ *
+ * Copyright 2011 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie <airlie@redhat.com>
+ *    Alon Levy <alevy@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+
+#include "qxl_drv.h"
+
+extern int qxl_max_ioctls;
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
+	  0xffff00, 0 },
+	{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
+	  0xffff00, 0 },
+	{ 0, 0, 0 },
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int qxl_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, qxl_modeset, int, 0400);
+
+static struct drm_driver qxl_driver;
+static struct pci_driver qxl_pci_driver;
+
+static int
+qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	if (pdev->revision < 4) {
+		DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
+			  " use xf86-video-qxl in user mode");
+		return -EINVAL; /* TODO: ENODEV ? */
+	}
+	return drm_get_pci_dev(pdev, ent, &qxl_driver);
+}
+
+static void
+qxl_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static struct pci_driver qxl_pci_driver = {
+	 .name = DRIVER_NAME,
+	 .id_table = pciidlist,
+	 .probe = qxl_pci_probe,
+	 .remove = qxl_pci_remove,
+};
+
+static const struct file_operations qxl_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.mmap = qxl_mmap,
+};
+
+static struct drm_driver qxl_driver = {
+	.driver_features = DRIVER_GEM | DRIVER_MODESET |
+			   DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+	.dev_priv_size = 0,
+	.load = qxl_driver_load,
+	.unload = qxl_driver_unload,
+
+	.dumb_create = qxl_mode_dumb_create,
+	.dumb_map_offset = qxl_mode_dumb_mmap,
+	.dumb_destroy = qxl_mode_dumb_destroy,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = qxl_debugfs_init,
+	.debugfs_cleanup = qxl_debugfs_takedown,
+#endif
+	.gem_init_object = qxl_gem_object_init,
+	.gem_free_object = qxl_gem_object_free,
+	.gem_open_object = qxl_gem_object_open,
+	.gem_close_object = qxl_gem_object_close,
+	.fops = &qxl_fops,
+	.ioctls = qxl_ioctls,
+	.irq_handler = qxl_irq_handler,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = 0,
+	.minor = 1,
+	.patchlevel = 0,
+};
+
+static int __init qxl_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && qxl_modeset == -1)
+		return -EINVAL;
+#endif
+
+	if (qxl_modeset == 0)
+		return -EINVAL;
+	qxl_driver.num_ioctls = qxl_max_ioctls;
+	return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+}
+
+static void __exit qxl_exit(void)
+{
+	drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+}
+
+module_init(qxl_init);
+module_exit(qxl_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_drv.h b/linux-imx/drivers/gpu/drm/qxl/qxl_drv.h
new file mode 100644
index 0000000..43d06ab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_drv.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#ifndef QXL_DRV_H
+#define QXL_DRV_H
+
+/*
+ * Definitions taken from spice-protocol, plus kernel driver specific bits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
+#include <drm/qxl_drm.h>
+#include "qxl_dev.h"
+
+#define DRIVER_AUTHOR		"Dave Airlie"
+
+#define DRIVER_NAME		"qxl"
+#define DRIVER_DESC		"RH QXL"
+#define DRIVER_DATE		"20120117"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define QXL_NUM_OUTPUTS 1
+
+#define QXL_DEBUGFS_MAX_COMPONENTS		32
+
+extern int qxl_log_level;
+
+enum {
+	QXL_INFO_LEVEL = 1,
+	QXL_DEBUG_LEVEL = 2,
+};
+
+#define QXL_INFO(qdev, fmt, ...) do { \
+		if (qxl_log_level >= QXL_INFO_LEVEL) {	\
+			qxl_io_log(qdev, fmt, __VA_ARGS__); \
+		}	\
+	} while (0)
+#define QXL_DEBUG(qdev, fmt, ...) do { \
+		if (qxl_log_level >= QXL_DEBUG_LEVEL) {	\
+			qxl_io_log(qdev, fmt, __VA_ARGS__); \
+		}	\
+	} while (0)
+#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
+		static int done;		\
+		if (!done) {			\
+			done = 1;			\
+			QXL_INFO(qdev, fmt, __VA_ARGS__);	\
+		}						\
+	} while (0)
+
+#define DRM_FILE_OFFSET 0x100000000ULL
+#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
+
+#define QXL_INTERRUPT_MASK (\
+	QXL_INTERRUPT_DISPLAY |\
+	QXL_INTERRUPT_CURSOR |\
+	QXL_INTERRUPT_IO_CMD |\
+	QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
+
+struct qxl_fence {
+	struct qxl_device *qdev;
+	uint32_t num_active_releases;
+	uint32_t *release_ids;
+	struct radix_tree_root tree;
+};
+
+struct qxl_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	int                             type;
+	/* Constant after initialization */
+	struct drm_gem_object		gem_base;
+	bool is_primary; /* is this now a primary surface */
+	bool hw_surf_alloc;
+	struct qxl_surface surf;
+	uint32_t surface_id;
+	struct qxl_fence fence; /* per bo fence  - list of releases */
+	struct qxl_release *surf_create;
+	atomic_t reserve_count;
+};
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+
+struct qxl_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+struct qxl_bo_list {
+	struct list_head lhead;
+	struct qxl_bo *bo;
+};
+
+struct qxl_reloc_list {
+	struct list_head bos;
+};
+
+struct qxl_crtc {
+	struct drm_crtc base;
+	int cur_x;
+	int cur_y;
+};
+
+struct qxl_output {
+	int index;
+	struct drm_connector base;
+	struct drm_encoder enc;
+};
+
+struct qxl_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
+#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base)
+#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
+
+struct qxl_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	bool				mem_global_referenced;
+	struct ttm_bo_device		bdev;
+};
+
+struct qxl_mode_info {
+	int num_modes;
+	struct qxl_mode *modes;
+	bool mode_config_initialized;
+
+	/* pointer to fbdev info structure */
+	struct qxl_fbdev *qfbdev;
+};
+
+
+struct qxl_memslot {
+	uint8_t		generation;
+	uint64_t	start_phys_addr;
+	uint64_t	end_phys_addr;
+	uint64_t	high_bits;
+};
+
+enum {
+	QXL_RELEASE_DRAWABLE,
+	QXL_RELEASE_SURFACE_CMD,
+	QXL_RELEASE_CURSOR_CMD,
+};
+
+/* drm_ prefix to differentiate from qxl_release_info in
+ * spice-protocol/qxl_dev.h */
+#define QXL_MAX_RES 96
+struct qxl_release {
+	int id;
+	int type;
+	int bo_count;
+	uint32_t release_offset;
+	uint32_t surface_release_id;
+	struct qxl_bo *bos[QXL_MAX_RES];
+};
+
+struct qxl_fb_image {
+	struct qxl_device *qdev;
+	uint32_t pseudo_palette[16];
+	struct fb_image fb_image;
+	uint32_t visual;
+};
+
+struct qxl_draw_fill {
+	struct qxl_device *qdev;
+	struct qxl_rect rect;
+	uint32_t color;
+	uint16_t rop;
+};
+
+/*
+ * Debugfs
+ */
+struct qxl_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int qxl_debugfs_add_files(struct qxl_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int qxl_debugfs_fence_init(struct qxl_device *rdev);
+void qxl_debugfs_remove_files(struct qxl_device *qdev);
+
+struct qxl_device;
+
+struct qxl_device {
+	struct device			*dev;
+	struct drm_device		*ddev;
+	struct pci_dev			*pdev;
+	unsigned long flags;
+
+	resource_size_t vram_base, vram_size;
+	resource_size_t surfaceram_base, surfaceram_size;
+	resource_size_t rom_base, rom_size;
+	struct qxl_rom *rom;
+
+	struct qxl_mode *modes;
+	struct qxl_bo *monitors_config_bo;
+	struct qxl_monitors_config *monitors_config;
+
+	/* last received client_monitors_config */
+	struct qxl_monitors_config *client_monitors_config;
+
+	int io_base;
+	void *ram;
+	struct qxl_mman		mman;
+	struct qxl_gem		gem;
+	struct qxl_mode_info mode_info;
+
+	struct fb_info			*fbdev_info;
+	struct qxl_framebuffer	*fbdev_qfb;
+	void *ram_physical;
+
+	struct qxl_ring *release_ring;
+	struct qxl_ring *command_ring;
+	struct qxl_ring *cursor_ring;
+
+	struct qxl_ram_header *ram_header;
+
+	bool primary_created;
+
+	struct qxl_memslot	*mem_slots;
+	uint8_t		n_mem_slots;
+
+	uint8_t		main_mem_slot;
+	uint8_t		surfaces_mem_slot;
+	uint8_t		slot_id_bits;
+	uint8_t		slot_gen_bits;
+	uint64_t	va_slot_mask;
+
+	struct idr	release_idr;
+	spinlock_t release_idr_lock;
+	struct mutex	async_io_mutex;
+	unsigned int last_sent_io_cmd;
+
+	/* interrupt handling */
+	atomic_t irq_received;
+	atomic_t irq_received_display;
+	atomic_t irq_received_cursor;
+	atomic_t irq_received_io_cmd;
+	unsigned irq_received_error;
+	wait_queue_head_t display_event;
+	wait_queue_head_t cursor_event;
+	wait_queue_head_t io_cmd_event;
+	struct work_struct client_monitors_config_work;
+
+	/* debugfs */
+	struct qxl_debugfs	debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
+	unsigned		debugfs_count;
+
+	struct mutex		update_area_mutex;
+
+	struct idr	surf_id_idr;
+	spinlock_t surf_id_idr_lock;
+	int last_alloced_surf_id;
+
+	struct mutex surf_evict_mutex;
+	struct io_mapping *vram_mapping;
+	struct io_mapping *surface_mapping;
+
+	/* */
+	struct mutex release_mutex;
+	struct qxl_bo *current_release_bo[3];
+	int current_release_bo_offset[3];
+
+	struct workqueue_struct *gc_queue;
+	struct work_struct gc_work;
+
+};
+
+/* forward declaration for QXL_INFO_IO */
+void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
+
+extern struct drm_ioctl_desc qxl_ioctls[];
+extern int qxl_max_ioctl;
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags);
+int qxl_driver_unload(struct drm_device *dev);
+
+int qxl_modeset_init(struct qxl_device *qdev);
+void qxl_modeset_fini(struct qxl_device *qdev);
+
+int qxl_bo_init(struct qxl_device *qdev);
+void qxl_bo_fini(struct qxl_device *qdev);
+
+struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
+				 int element_size,
+				 int n_elements,
+				 int prod_notify,
+				 bool set_prod_notify,
+				 wait_queue_head_t *push_event);
+void qxl_ring_free(struct qxl_ring *ring);
+
+static inline void *
+qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
+{
+	QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+	return 0;
+}
+
+static inline uint64_t
+qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
+			unsigned long offset)
+{
+	int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
+	struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
+
+	/* TODO - need to hold one of the locks to read tbo.offset */
+	return slot->high_bits | (bo->tbo.offset + offset);
+}
+
+/* qxl_fb.c */
+#define QXLFB_CONN_LIMIT 1
+
+int qxl_fbdev_init(struct qxl_device *qdev);
+void qxl_fbdev_fini(struct qxl_device *qdev);
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+				  struct drm_file *file_priv,
+				  uint32_t *handle);
+
+/* qxl_display.c */
+int
+qxl_framebuffer_init(struct drm_device *dev,
+		     struct qxl_framebuffer *rfb,
+		     struct drm_mode_fb_cmd2 *mode_cmd,
+		     struct drm_gem_object *obj);
+void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
+void qxl_send_monitors_config(struct qxl_device *qdev);
+
+/* used by qxl_debugfs only */
+void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
+void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
+
+/* qxl_gem.c */
+int qxl_gem_init(struct qxl_device *qdev);
+void qxl_gem_fini(struct qxl_device *qdev);
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj);
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+			  uint64_t *gpu_addr);
+void qxl_gem_object_unpin(struct drm_gem_object *obj);
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle);
+int qxl_gem_object_init(struct drm_gem_object *obj);
+void qxl_gem_object_free(struct drm_gem_object *gobj);
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv);
+void qxl_bo_force_delete(struct qxl_device *qdev);
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+
+/* qxl_dumb.c */
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			 struct drm_device *dev,
+			 struct drm_mode_create_dumb *args);
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+			  struct drm_device *dev,
+			  uint32_t handle);
+int qxl_mode_dumb_mmap(struct drm_file *filp,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p);
+
+
+/* qxl ttm */
+int qxl_ttm_init(struct qxl_device *qdev);
+void qxl_ttm_fini(struct qxl_device *qdev);
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* qxl image */
+
+int qxl_image_create(struct qxl_device *qdev,
+		     struct qxl_release *release,
+		     struct qxl_bo **image_bo,
+		     const uint8_t *data,
+		     int x, int y, int width, int height,
+		     int depth, int stride);
+void qxl_update_screen(struct qxl_device *qxl);
+
+/* qxl io operations (qxl_cmd.c) */
+
+void qxl_io_create_primary(struct qxl_device *qdev,
+			   unsigned width, unsigned height, unsigned offset,
+			   struct qxl_bo *bo);
+void qxl_io_destroy_primary(struct qxl_device *qdev);
+void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
+void qxl_io_notify_oom(struct qxl_device *qdev);
+
+int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
+		       const struct qxl_rect *area);
+
+void qxl_io_reset(struct qxl_device *qdev);
+void qxl_io_monitors_config(struct qxl_device *qdev);
+int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
+void qxl_io_flush_release(struct qxl_device *qdev);
+void qxl_io_flush_surfaces(struct qxl_device *qdev);
+
+int qxl_release_reserve(struct qxl_device *qdev,
+			struct qxl_release *release, bool no_wait);
+void qxl_release_unreserve(struct qxl_device *qdev,
+			   struct qxl_release *release);
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release);
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info);
+/*
+ * qxl_bo_add_resource.
+ *
+ */
+void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release);
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+			       int type, struct qxl_release **release,
+			       struct qxl_bo **rbo);
+int qxl_fence_releaseable(struct qxl_device *qdev,
+			  struct qxl_release *release);
+int
+qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			      uint32_t type, bool interruptible);
+int
+qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
+			     uint32_t type, bool interruptible);
+int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+			  struct qxl_bo **_bo);
+/* qxl drawing commands */
+
+void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
+			int stride /* filled in if 0 */);
+
+void qxl_draw_dirty_fb(struct qxl_device *qdev,
+		       struct qxl_framebuffer *qxl_fb,
+		       struct qxl_bo *bo,
+		       unsigned flags, unsigned color,
+		       struct drm_clip_rect *clips,
+		       unsigned num_clips, int inc);
+
+void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
+
+void qxl_draw_copyarea(struct qxl_device *qdev,
+		       u32 width, u32 height,
+		       u32 sx, u32 sy,
+		       u32 dx, u32 dy);
+
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+		  struct qxl_release **ret);
+
+void qxl_release_free(struct qxl_device *qdev,
+		      struct qxl_release *release);
+void qxl_release_add_res(struct qxl_device *qdev,
+			 struct qxl_release *release,
+			 struct qxl_bo *bo);
+/* used by qxl_debugfs_release */
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id);
+
+bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
+int qxl_garbage_collect(struct qxl_device *qdev);
+
+/* debugfs */
+
+int qxl_debugfs_init(struct drm_minor *minor);
+void qxl_debugfs_takedown(struct drm_minor *minor);
+
+/* qxl_irq.c */
+int qxl_irq_init(struct qxl_device *qdev);
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS);
+
+/* qxl_fb.c */
+int qxl_fb_init(struct qxl_device *qdev);
+
+int qxl_debugfs_add_files(struct qxl_device *qdev,
+			  struct drm_info_list *files,
+			  unsigned nfiles);
+
+int qxl_surface_id_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf);
+void qxl_surface_id_dealloc(struct qxl_device *qdev,
+			    uint32_t surface_id);
+int qxl_hw_surface_alloc(struct qxl_device *qdev,
+			 struct qxl_bo *surf,
+			 struct ttm_mem_reg *mem);
+int qxl_hw_surface_dealloc(struct qxl_device *qdev,
+			   struct qxl_bo *surf);
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
+
+struct qxl_drv_surface *
+qxl_surface_lookup(struct drm_device *dev, int surface_id);
+void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
+int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
+
+/* qxl_fence.c */
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
+void qxl_fence_fini(struct qxl_fence *qfence);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_dumb.c b/linux-imx/drivers/gpu/drm/qxl/qxl_dumb.c
new file mode 100644
index 0000000..847c4ee
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/* dumb ioctls implementation */
+
+int qxl_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	int r;
+	struct qxl_surface surf;
+	uint32_t pitch, format;
+	pitch = args->width * ((args->bpp + 1) / 8);
+	args->size = pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	switch (args->bpp) {
+	case 16:
+		format = SPICE_SURFACE_FMT_16_565;
+		break;
+	case 32:
+		format = SPICE_SURFACE_FMT_32_xRGB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	  
+	surf.width = args->width;
+	surf.height = args->height;
+	surf.stride = pitch;
+	surf.format = format;
+	r = qxl_gem_object_create_with_handle(qdev, file_priv,
+					      QXL_GEM_DOMAIN_VRAM,
+					      args->size, &surf, &qobj,
+					      &handle);
+	if (r)
+		return r;
+	args->pitch = pitch;
+	args->handle = handle;
+	return 0;
+}
+
+int qxl_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+
+int qxl_mode_dumb_mmap(struct drm_file *file_priv,
+		       struct drm_device *dev,
+		       uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+
+	BUG_ON(!offset_p);
+	gobj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	qobj = gem_to_qxl_bo(gobj);
+	*offset_p = qxl_bo_mmap_offset(qobj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_fb.c b/linux-imx/drivers/gpu/drm/qxl/qxl_fb.c
new file mode 100644
index 0000000..7002de7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_fb.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright © 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "drm/drm_crtc.h"
+#include "drm/drm_crtc_helper.h"
+#include "qxl_drv.h"
+
+#include "qxl_object.h"
+#include "drm_fb_helper.h"
+
+#define QXL_DIRTY_DELAY (HZ / 30)
+
+struct qxl_fbdev {
+	struct drm_fb_helper helper;
+	struct qxl_framebuffer	qfb;
+	struct list_head	fbdev_list;
+	struct qxl_device	*qdev;
+
+	void *shadow;
+	int size;
+
+	/* dirty memory logging */
+	struct {
+		spinlock_t lock;
+		bool active;
+		unsigned x1;
+		unsigned y1;
+		unsigned x2;
+		unsigned y2;
+	} dirty;
+};
+
+static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
+			      struct qxl_device *qdev, struct fb_info *info,
+			      const struct fb_image *image)
+{
+	qxl_fb_image->qdev = qdev;
+	if (info) {
+		qxl_fb_image->visual = info->fix.visual;
+		if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
+		    qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
+			memcpy(&qxl_fb_image->pseudo_palette,
+			       info->pseudo_palette,
+			       sizeof(qxl_fb_image->pseudo_palette));
+	} else {
+		 /* fallback */
+		if (image->depth == 1)
+			qxl_fb_image->visual = FB_VISUAL_MONO10;
+		else
+			qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
+	}
+	if (image) {
+		memcpy(&qxl_fb_image->fb_image, image,
+		       sizeof(qxl_fb_image->fb_image));
+	}
+}
+
+static void qxl_fb_dirty_flush(struct fb_info *info)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_fb_image qxl_fb_image;
+	struct fb_image *image = &qxl_fb_image.fb_image;
+	u32 x1, x2, y1, y2;
+
+	/* TODO: hard coding 32 bpp */
+	int stride = qfbdev->qfb.base.pitches[0];
+
+	x1 = qfbdev->dirty.x1;
+	x2 = qfbdev->dirty.x2;
+	y1 = qfbdev->dirty.y1;
+	y2 = qfbdev->dirty.y2;
+	/*
+	 * we are using a shadow draw buffer, at qdev->surface0_shadow
+	 */
+	qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
+	image->dx = x1;
+	image->dy = y1;
+	image->width = x2 - x1;
+	image->height = y2 - y1;
+	image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+					 warnings */
+	image->bg_color = 0;
+	image->depth = 32;	     /* TODO: take from somewhere? */
+	image->cmap.start = 0;
+	image->cmap.len = 0;
+	image->cmap.red = NULL;
+	image->cmap.green = NULL;
+	image->cmap.blue = NULL;
+	image->cmap.transp = NULL;
+	image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
+
+	qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+	qxl_draw_opaque_fb(&qxl_fb_image, stride);
+	qfbdev->dirty.x1 = 0;
+	qfbdev->dirty.x2 = 0;
+	qfbdev->dirty.y1 = 0;
+	qfbdev->dirty.y2 = 0;
+}
+
+static void qxl_deferred_io(struct fb_info *info,
+			    struct list_head *pagelist)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	unsigned long start, end, min, max;
+	struct page *page;
+	int y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = (max / info->fix.line_length) + 1;
+
+		/* TODO: add spin lock? */
+		/* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
+		qfbdev->dirty.x1 = 0;
+		qfbdev->dirty.y1 = y1;
+		qfbdev->dirty.x2 = info->var.xres;
+		qfbdev->dirty.y2 = y2;
+		/* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
+	}
+
+	qxl_fb_dirty_flush(info);
+};
+
+
+static struct fb_deferred_io qxl_defio = {
+	.delay		= QXL_DIRTY_DELAY,
+	.deferred_io	= qxl_deferred_io,
+};
+
+static void qxl_fb_fillrect(struct fb_info *info,
+			    const struct fb_fillrect *fb_rect)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_rect rect;
+	uint32_t color;
+	int x = fb_rect->dx;
+	int y = fb_rect->dy;
+	int width = fb_rect->width;
+	int height = fb_rect->height;
+	uint16_t rop;
+	struct qxl_draw_fill qxl_draw_fill_rec;
+
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
+	else
+		color = fb_rect->color;
+	rect.left = x;
+	rect.right = x + width;
+	rect.top = y;
+	rect.bottom = y + height;
+	switch (fb_rect->rop) {
+	case ROP_XOR:
+		rop = SPICE_ROPD_OP_XOR;
+		break;
+	case ROP_COPY:
+		rop = SPICE_ROPD_OP_PUT;
+		break;
+	default:
+		pr_err("qxl_fb_fillrect(): unknown rop, "
+		       "defaulting to SPICE_ROPD_OP_PUT\n");
+		rop = SPICE_ROPD_OP_PUT;
+	}
+	qxl_draw_fill_rec.qdev = qdev;
+	qxl_draw_fill_rec.rect = rect;
+	qxl_draw_fill_rec.color = color;
+	qxl_draw_fill_rec.rop = rop;
+	if (!drm_can_sleep()) {
+		qxl_io_log(qdev,
+			"%s: TODO use RCU, mysterious locks with spin_lock\n",
+			__func__);
+		return;
+	}
+	qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_copyarea(struct fb_info *info,
+			    const struct fb_copyarea *region)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+
+	qxl_draw_copyarea(qfbdev->qdev,
+			  region->width, region->height,
+			  region->sx, region->sy,
+			  region->dx, region->dy);
+}
+
+static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
+{
+	qxl_draw_opaque_fb(qxl_fb_image, 0);
+}
+
+static void qxl_fb_imageblit(struct fb_info *info,
+			     const struct fb_image *image)
+{
+	struct qxl_fbdev *qfbdev = info->par;
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct qxl_fb_image qxl_fb_image;
+
+	if (!drm_can_sleep()) {
+		/* we cannot do any ttm_bo allocation since that will fail on
+		 * ioremap_wc..__get_vm_area_node, so queue the work item
+		 * instead This can happen from printk inside an interrupt
+		 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
+		qxl_io_log(qdev,
+			"%s: TODO use RCU, mysterious locks with spin_lock\n",
+			   __func__);
+		return;
+	}
+
+	/* ensure proper order of rendering operations - TODO: must do this
+	 * for everything. */
+	qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+	qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
+int qxl_fb_init(struct qxl_device *qdev)
+{
+	return 0;
+}
+
+static struct fb_ops qxlfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
+	.fb_fillrect = qxl_fb_fillrect,
+	.fb_copyarea = qxl_fb_copyarea,
+	.fb_imageblit = qxl_fb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
+	int ret;
+
+	ret = qxl_bo_reserve(qbo, false);
+	if (likely(ret == 0)) {
+		qxl_bo_kunmap(qbo);
+		qxl_bo_unpin(qbo);
+		qxl_bo_unreserve(qbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
+				  struct drm_file *file_priv,
+				  uint32_t *handle)
+{
+	int r;
+	struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
+
+	BUG_ON(!gobj);
+	/* drm_get_handle_create adds a reference - good */
+	r = drm_gem_handle_create(file_priv, gobj, handle);
+	if (r)
+		return r;
+	return 0;
+}
+
+static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
+				      struct drm_mode_fb_cmd2 *mode_cmd,
+				      struct drm_gem_object **gobj_p)
+{
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qbo = NULL;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	int bpp;
+	int depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
+
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+	/* TODO: unallocate and reallocate surface0 for real. Hack to just
+	 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
+	ret = qxl_gem_object_create(qdev, aligned_size, 0,
+				    QXL_GEM_DOMAIN_SURFACE,
+				    false, /* is discardable */
+				    false, /* is kernel (false means device) */
+				    NULL,
+				    &gobj);
+	if (ret) {
+		pr_err("failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
+	}
+	qbo = gem_to_qxl_bo(gobj);
+
+	qbo->surf.width = mode_cmd->width;
+	qbo->surf.height = mode_cmd->height;
+	qbo->surf.stride = mode_cmd->pitches[0];
+	qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
+	ret = qxl_bo_reserve(qbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+	if (ret) {
+		qxl_bo_unreserve(qbo);
+		goto out_unref;
+	}
+	ret = qxl_bo_kmap(qbo, NULL);
+	qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
+	if (ret)
+		goto out_unref;
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	qxlfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int qxlfb_create(struct qxl_fbdev *qfbdev,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct qxl_device *qdev = qfbdev->qdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qbo = NULL;
+	struct device *device = &qdev->pdev->dev;
+	int ret;
+	int size;
+	int bpp = sizes->surface_bpp;
+	int depth = sizes->surface_depth;
+	void *shadow;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
+	ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+	qbo = gem_to_qxl_bo(gobj);
+	QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
+		 mode_cmd.height, mode_cmd.pitches[0]);
+
+	shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
+	/* TODO: what's the usual response to memory allocation errors? */
+	BUG_ON(!shadow);
+	QXL_INFO(qdev,
+	"surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+		 qxl_bo_gpu_offset(qbo),
+		 qxl_bo_mmap_offset(qbo),
+		 qbo->kptr,
+		 shadow);
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+
+	info = framebuffer_alloc(0, device);
+	if (info == NULL) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->par = qfbdev;
+
+	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+
+	fb = &qfbdev->qfb.base;
+
+	/* setup helper with fb data */
+	qfbdev->helper.fb = fb;
+	qfbdev->helper.fbdev = info;
+	qfbdev->shadow = shadow;
+	strcpy(info->fix.id, "qxldrmfb");
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+	info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+	info->fbops = &qxlfb_ops;
+
+	/*
+	 * TODO: using gobj->size in various places in this function. Not sure
+	 * what the difference between the different sizes is.
+	 */
+	info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
+	info->fix.smem_len = gobj->size;
+	info->screen_base = qfbdev->shadow;
+	info->screen_size = gobj->size;
+
+	drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
+			       sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = qdev->vram_size;
+
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+
+	if (info->screen_base == NULL) {
+		ret = -ENOSPC;
+		goto out_unref;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->fbdefio = &qxl_defio;
+	fb_deferred_io_init(info);
+
+	qdev->fbdev_info = info;
+	qdev->fbdev_qfb = &qfbdev->qfb;
+	DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
+	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+	return 0;
+
+out_unref:
+	if (qbo) {
+		ret = qxl_bo_reserve(qbo, false);
+		if (likely(ret == 0)) {
+			qxl_bo_kunmap(qbo);
+			qxl_bo_unpin(qbo);
+			qxl_bo_unreserve(qbo);
+		}
+	}
+	if (fb && ret) {
+		drm_gem_object_unreference(gobj);
+		drm_framebuffer_cleanup(fb);
+		kfree(fb);
+	}
+	drm_gem_object_unreference(gobj);
+	return ret;
+}
+
+static int qxl_fb_find_or_create_single(
+		struct drm_fb_helper *helper,
+		struct drm_fb_helper_surface_size *sizes)
+{
+	struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper;
+	int new_fb = 0;
+	int ret;
+
+	if (!helper->fb) {
+		ret = qxlfb_create(qfbdev, sizes);
+		if (ret)
+			return ret;
+		new_fb = 1;
+	}
+	return new_fb;
+}
+
+static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
+{
+	struct fb_info *info;
+	struct qxl_framebuffer *qfb = &qfbdev->qfb;
+
+	if (qfbdev->helper.fbdev) {
+		info = qfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		framebuffer_release(info);
+	}
+	if (qfb->obj) {
+		qxlfb_destroy_pinned_object(qfb->obj);
+		qfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&qfbdev->helper);
+	vfree(qfbdev->shadow);
+	drm_framebuffer_cleanup(&qfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+	/* TODO
+	.gamma_set = qxl_crtc_fb_gamma_set,
+	.gamma_get = qxl_crtc_fb_gamma_get,
+	*/
+	.fb_probe = qxl_fb_find_or_create_single,
+};
+
+int qxl_fbdev_init(struct qxl_device *qdev)
+{
+	struct qxl_fbdev *qfbdev;
+	int bpp_sel = 32; /* TODO: parameter from somewhere? */
+	int ret;
+
+	qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
+	if (!qfbdev)
+		return -ENOMEM;
+
+	qfbdev->qdev = qdev;
+	qdev->mode_info.qfbdev = qfbdev;
+	qfbdev->helper.funcs = &qxl_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
+				 1 /* num_crtc - QXL supports just 1 */,
+				 QXLFB_CONN_LIMIT);
+	if (ret) {
+		kfree(qfbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
+	drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void qxl_fbdev_fini(struct qxl_device *qdev)
+{
+	if (!qdev->mode_info.qfbdev)
+		return;
+
+	qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
+	kfree(qdev->mode_info.qfbdev);
+	qdev->mode_info.qfbdev = NULL;
+}
+
+
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_fence.c b/linux-imx/drivers/gpu/drm/qxl/qxl_fence.c
new file mode 100644
index 0000000..63c6715
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_fence.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+
+#include "qxl_drv.h"
+
+/* QXL fencing-
+
+   When we submit operations to the GPU we pass a release reference to the GPU
+   with them, the release reference is then added to the release ring when
+   the GPU is finished with that particular operation and has removed it from
+   its tree.
+
+   So we have can have multiple outstanding non linear fences per object.
+
+   From a TTM POV we only care if the object has any outstanding releases on
+   it.
+
+   we wait until all outstanding releases are processeed.
+
+   sync object is just a list of release ids that represent that fence on
+   that buffer.
+
+   we just add new releases onto the sync object attached to the object.
+
+   This currently uses a radix tree to store the list of release ids.
+
+   For some reason every so often qxl hw fails to release, things go wrong.
+*/
+
+
+int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	spin_lock(&bo->tbo.bdev->fence_lock);
+	radix_tree_insert(&qfence->tree, rel_id, qfence);
+	qfence->num_active_releases++;
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	return 0;
+}
+
+int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
+{
+	void *ret;
+	int retval = 0;
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	spin_lock(&bo->tbo.bdev->fence_lock);
+
+	ret = radix_tree_delete(&qfence->tree, rel_id);
+	if (ret == qfence)
+		qfence->num_active_releases--;
+	else {
+		DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
+		retval = -ENOENT;
+	}
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	return retval;
+}
+
+
+int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
+{
+	qfence->qdev = qdev;
+	qfence->num_active_releases = 0;
+	INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
+	return 0;
+}
+
+void qxl_fence_fini(struct qxl_fence *qfence)
+{
+	kfree(qfence->release_ids);
+	qfence->num_active_releases = 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_gem.c b/linux-imx/drivers/gpu/drm/qxl/qxl_gem.c
new file mode 100644
index 0000000..a235693
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_gem.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "drmP.h"
+#include "drm/drm.h"
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+int qxl_gem_object_init(struct drm_gem_object *obj)
+{
+	/* we do nothings here */
+	return 0;
+}
+
+void qxl_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
+
+	if (qobj)
+		qxl_bo_unref(&qobj);
+}
+
+int qxl_gem_object_create(struct qxl_device *qdev, int size,
+			  int alignment, int initial_domain,
+			  bool discardable, bool kernel,
+			  struct qxl_surface *surf,
+			  struct drm_gem_object **obj)
+{
+	struct qxl_bo *qbo;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE)
+		alignment = PAGE_SIZE;
+	r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR(
+			"Failed to allocate GEM object (%d, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		return r;
+	}
+	*obj = &qbo->gem_base;
+
+	mutex_lock(&qdev->gem.mutex);
+	list_add_tail(&qbo->list, &qdev->gem.objects);
+	mutex_unlock(&qdev->gem.mutex);
+
+	return 0;
+}
+
+int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+				      struct drm_file *file_priv,
+				      u32 domain,
+				      size_t size,
+				      struct qxl_surface *surf,
+				      struct qxl_bo **qobj,
+				      uint32_t *handle)
+{
+	struct drm_gem_object *gobj;
+	int r;
+
+	BUG_ON(!qobj);
+	BUG_ON(!handle);
+
+	r = qxl_gem_object_create(qdev, size, 0,
+				  domain,
+				  false, false, surf,
+				  &gobj);
+	if (r)
+		return -ENOMEM;
+	r = drm_gem_handle_create(file_priv, gobj, handle);
+	if (r)
+		return r;
+	/* drop reference from allocate - handle holds it now */
+	*qobj = gem_to_qxl_bo(gobj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
+
+int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+			  uint64_t *gpu_addr)
+{
+	struct qxl_bo *qobj = obj->driver_private;
+	int r;
+
+	r = qxl_bo_reserve(qobj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
+	qxl_bo_unreserve(qobj);
+	return r;
+}
+
+void qxl_gem_object_unpin(struct drm_gem_object *obj)
+{
+	struct qxl_bo *qobj = obj->driver_private;
+	int r;
+
+	r = qxl_bo_reserve(qobj, false);
+	if (likely(r == 0)) {
+		qxl_bo_unpin(qobj);
+		qxl_bo_unreserve(qobj);
+	}
+}
+
+int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+void qxl_gem_object_close(struct drm_gem_object *obj,
+			  struct drm_file *file_priv)
+{
+}
+
+int qxl_gem_init(struct qxl_device *qdev)
+{
+	INIT_LIST_HEAD(&qdev->gem.objects);
+	return 0;
+}
+
+void qxl_gem_fini(struct qxl_device *qdev)
+{
+	qxl_bo_force_delete(qdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_image.c b/linux-imx/drivers/gpu/drm/qxl/qxl_image.c
new file mode 100644
index 0000000..cf85620
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_image.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+static int
+qxl_image_create_helper(struct qxl_device *qdev,
+			struct qxl_release *release,
+			struct qxl_bo **image_bo,
+			const uint8_t *data,
+			int width, int height,
+			int depth, unsigned int hash,
+			int stride)
+{
+	struct qxl_image *image;
+	struct qxl_data_chunk *chunk;
+	int i;
+	int chunk_stride;
+	int linesize = width * depth / 8;
+	struct qxl_bo *chunk_bo;
+	int ret;
+	void *ptr;
+	/* Chunk */
+	/* FIXME: Check integer overflow */
+	/* TODO: variable number of chunks */
+	chunk_stride = stride; /* TODO: should use linesize, but it renders
+				  wrong (check the bitmaps are sent correctly
+				  first) */
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
+				    &chunk_bo);
+	
+	ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
+	chunk = ptr;
+	chunk->data_size = height * chunk_stride;
+	chunk->prev_chunk = 0;
+	chunk->next_chunk = 0;
+	qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+
+	{
+		void *k_data, *i_data;
+		int remain;
+		int page;
+		int size;
+		if (stride == linesize && chunk_stride == stride) {
+			remain = linesize * height;
+			page = 0;
+			i_data = (void *)data;
+
+			while (remain > 0) {
+				ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+				if (page == 0) {
+					chunk = ptr;
+					k_data = chunk->data;
+					size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
+				} else {
+					k_data = ptr;
+					size = PAGE_SIZE;
+				}
+				size = min(size, remain);
+
+				memcpy(k_data, i_data, size);
+
+				qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+				i_data += size;
+				remain -= size;
+				page++;
+			}
+		} else {
+			unsigned page_base, page_offset, out_offset;
+			for (i = 0 ; i < height ; ++i) {
+				i_data = (void *)data + i * stride;
+				remain = linesize;
+				out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
+
+				while (remain > 0) {
+					page_base = out_offset & PAGE_MASK;
+					page_offset = offset_in_page(out_offset);
+					
+					size = min((int)(PAGE_SIZE - page_offset), remain);
+
+					ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
+					k_data = ptr + page_offset;
+					memcpy(k_data, i_data, size);
+					qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
+					remain -= size;
+					i_data += size;
+					out_offset += size;
+				}
+			}
+		}
+	}
+
+
+	qxl_bo_kunmap(chunk_bo);
+
+	/* Image */
+	ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
+
+	ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+	image = ptr;
+
+	image->descriptor.id = 0;
+	image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
+
+	image->descriptor.flags = 0;
+	image->descriptor.width = width;
+	image->descriptor.height = height;
+
+	switch (depth) {
+	case 1:
+		/* TODO: BE? check by arch? */
+		image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
+		break;
+	case 24:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
+		break;
+	case 32:
+		image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
+		break;
+	default:
+		DRM_ERROR("unsupported image bit depth\n");
+		return -EINVAL; /* TODO: cleanup */
+	}
+	image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+	image->u.bitmap.x = width;
+	image->u.bitmap.y = height;
+	image->u.bitmap.stride = chunk_stride;
+	image->u.bitmap.palette = 0;
+	image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+	qxl_release_add_res(qdev, release, chunk_bo);
+	qxl_bo_unreserve(chunk_bo);
+	qxl_bo_unref(&chunk_bo);
+
+	qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+
+	return 0;
+}
+
+int qxl_image_create(struct qxl_device *qdev,
+		     struct qxl_release *release,
+		     struct qxl_bo **image_bo,
+		     const uint8_t *data,
+		     int x, int y, int width, int height,
+		     int depth, int stride)
+{
+	data += y * stride + x * (depth / 8);
+	return qxl_image_create_helper(qdev, release, image_bo, data,
+				       width, height, depth, 0, stride);
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_ioctl.c b/linux-imx/drivers/gpu/drm/qxl/qxl_ioctl.c
new file mode 100644
index 0000000..a30f294
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * TODO: allocating a new gem(in qxl_bo) for each request.
+ * This is wasteful since bo's are page aligned.
+ */
+static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc *qxl_alloc = data;
+	int ret;
+	struct qxl_bo *qobj;
+	uint32_t handle;
+	u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+	if (qxl_alloc->size == 0) {
+		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
+		return -EINVAL;
+	}
+	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
+						domain,
+						qxl_alloc->size,
+						NULL,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	}
+	qxl_alloc->handle = handle;
+	return 0;
+}
+
+static int qxl_map_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_map *qxl_map = data;
+
+	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
+				  &qxl_map->offset);
+}
+
+/*
+ * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
+ * are on vram).
+ * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
+ */
+static void
+apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+	    struct qxl_bo *src, uint64_t src_off)
+{
+	void *reloc_page;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+	*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+								     src, src_off);
+	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+static void
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
+		 struct qxl_bo *src)
+{
+	uint32_t id = 0;
+	void *reloc_page;
+
+	if (src && !src->is_primary)
+		id = src->surface_id;
+
+	reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
+	*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
+	qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+}
+
+/* return holding the reference to this object */
+static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+					 struct drm_file *file_priv, uint64_t handle,
+					 struct qxl_reloc_list *reloc_list)
+{
+	struct drm_gem_object *gobj;
+	struct qxl_bo *qobj;
+	int ret;
+
+	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+	if (!gobj) {
+		DRM_ERROR("bad bo handle %lld\n", handle);
+		return NULL;
+	}
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_list_add(reloc_list, qobj);
+	if (ret)
+		return NULL;
+
+	return qobj;
+}
+
+/*
+ * Usage of execbuffer:
+ * Relocations need to take into account the full QXLDrawable size.
+ * However, the command as passed from user space must *not* contain the initial
+ * QXLReleaseInfo struct (first XXX bytes)
+ */
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_execbuffer *execbuffer = data;
+	struct drm_qxl_command user_cmd;
+	int cmd_num;
+	struct qxl_bo *reloc_src_bo;
+	struct qxl_bo *reloc_dst_bo;
+	struct drm_qxl_reloc reloc;
+	void *fb_cmd;
+	int i, ret;
+	struct qxl_reloc_list reloc_list;
+	int unwritten;
+	uint32_t reloc_dst_offset;
+	INIT_LIST_HEAD(&reloc_list.bos);
+
+	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+		struct qxl_release *release;
+		struct qxl_bo *cmd_bo;
+		int release_type;
+		struct drm_qxl_command *commands =
+			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+
+		if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+				       sizeof(user_cmd)))
+			return -EFAULT;
+		switch (user_cmd.type) {
+		case QXL_CMD_DRAW:
+			release_type = QXL_RELEASE_DRAWABLE;
+			break;
+		case QXL_CMD_SURFACE:
+		case QXL_CMD_CURSOR:
+		default:
+			DRM_DEBUG("Only draw commands in execbuffers\n");
+			return -EINVAL;
+			break;
+		}
+
+		if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+			return -EINVAL;
+
+		if (!access_ok(VERIFY_READ,
+			       (void *)(unsigned long)user_cmd.command,
+			       user_cmd.command_size))
+			return -EFAULT;
+
+		ret = qxl_alloc_release_reserved(qdev,
+						 sizeof(union qxl_release_info) +
+						 user_cmd.command_size,
+						 release_type,
+						 &release,
+						 &cmd_bo);
+		if (ret)
+			return ret;
+
+		/* TODO copy slow path code from i915 */
+		fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+		unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+		qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+		if (unwritten) {
+			DRM_ERROR("got unwritten %d\n", unwritten);
+			qxl_release_unreserve(qdev, release);
+			qxl_release_free(qdev, release);
+			return -EFAULT;
+		}
+
+		for (i = 0 ; i < user_cmd.relocs_num; ++i) {
+			if (DRM_COPY_FROM_USER(&reloc,
+					       &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
+					       sizeof(reloc))) {
+				qxl_bo_list_unreserve(&reloc_list, true);
+				qxl_release_unreserve(qdev, release);
+				qxl_release_free(qdev, release);
+				return -EFAULT;
+			}
+
+			/* add the bos to the list of bos to validate -
+			   need to validate first then process relocs? */
+			if (reloc.dst_handle) {
+				reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+								  reloc.dst_handle, &reloc_list);
+				if (!reloc_dst_bo) {
+					qxl_bo_list_unreserve(&reloc_list, true);
+					qxl_release_unreserve(qdev, release);
+					qxl_release_free(qdev, release);
+					return -EINVAL;
+				}
+				reloc_dst_offset = 0;
+			} else {
+				reloc_dst_bo = cmd_bo;
+				reloc_dst_offset = release->release_offset;
+			}
+
+			/* reserve and validate the reloc dst bo */
+			if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+				reloc_src_bo =
+					qxlhw_handle_to_bo(qdev, file_priv,
+							   reloc.src_handle, &reloc_list);
+				if (!reloc_src_bo) {
+					if (reloc_dst_bo != cmd_bo)
+						drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+					qxl_bo_list_unreserve(&reloc_list, true);
+					qxl_release_unreserve(qdev, release);
+					qxl_release_free(qdev, release);
+					return -EINVAL;
+				}
+			} else
+				reloc_src_bo = NULL;
+			if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
+				apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
+					    reloc_src_bo, reloc.src_offset);
+			} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
+				apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
+			} else {
+				DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
+				return -EINVAL;
+			}
+
+			if (reloc_src_bo && reloc_src_bo != cmd_bo) {
+				qxl_release_add_res(qdev, release, reloc_src_bo);
+				drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
+			}
+
+			if (reloc_dst_bo != cmd_bo)
+				drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
+		}
+		qxl_fence_releaseable(qdev, release);
+
+		ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
+		if (ret == -ERESTARTSYS) {
+			qxl_release_unreserve(qdev, release);
+			qxl_release_free(qdev, release);
+			qxl_bo_list_unreserve(&reloc_list, true);
+			return ret;
+		}
+		qxl_release_unreserve(qdev, release);
+	}
+	qxl_bo_list_unreserve(&reloc_list, 0);
+	return 0;
+}
+
+static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_update_area *update_area = data;
+	struct qxl_rect area = {.left = update_area->left,
+				.top = update_area->top,
+				.right = update_area->right,
+				.bottom = update_area->bottom};
+	int ret;
+	struct drm_gem_object *gobj = NULL;
+	struct qxl_bo *qobj = NULL;
+
+	if (update_area->left >= update_area->right ||
+	    update_area->top >= update_area->bottom)
+		return -EINVAL;
+
+	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+
+	qobj = gem_to_qxl_bo(gobj);
+
+	ret = qxl_bo_reserve(qobj, false);
+	if (ret)
+		goto out;
+
+	if (!qobj->pin_count) {
+		qxl_ttm_placement_from_domain(qobj, qobj->type);
+		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
+				      true, false);
+		if (unlikely(ret))
+			goto out;
+	}
+
+	ret = qxl_bo_check_id(qdev, qobj);
+	if (ret)
+		goto out2;
+	if (!qobj->surface_id)
+		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
+	ret = qxl_io_update_area(qdev, qobj, &area);
+
+out2:
+	qxl_bo_unreserve(qobj);
+
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return ret;
+}
+
+static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_getparam *param = data;
+
+	switch (param->param) {
+	case QXL_PARAM_NUM_SURFACES:
+		param->value = qdev->rom->n_surfaces;
+		break;
+	case QXL_PARAM_MAX_RELOCS:
+		param->value = QXL_MAX_RES;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_clientcap *param = data;
+	int byte, idx;
+
+	byte = param->index / 8;
+	idx = param->index % 8;
+
+	if (qdev->pdev->revision < 4)
+		return -ENOSYS;
+
+	if (byte >= 58)
+		return -ENOSYS;
+
+	if (qdev->rom->client_capabilities[byte] & (1 << idx))
+		return 0;
+	return -ENOSYS;
+}
+
+static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file)
+{
+	struct qxl_device *qdev = dev->dev_private;
+	struct drm_qxl_alloc_surf *param = data;
+	struct qxl_bo *qobj;
+	int handle;
+	int ret;
+	int size, actual_stride;
+	struct qxl_surface surf;
+
+	/* work out size allocate bo with handle */
+	actual_stride = param->stride < 0 ? -param->stride : param->stride;
+	size = actual_stride * param->height + actual_stride;
+
+	surf.format = param->format;
+	surf.width = param->width;
+	surf.height = param->height;
+	surf.stride = param->stride;
+	surf.data = 0;
+
+	ret = qxl_gem_object_create_with_handle(qdev, file,
+						QXL_GEM_DOMAIN_SURFACE,
+						size,
+						&surf,
+						&qobj, &handle);
+	if (ret) {
+		DRM_ERROR("%s: failed to create gem ret=%d\n",
+			  __func__, ret);
+		return -ENOMEM;
+	} else
+		param->handle = handle;
+	return ret;
+}
+
+struct drm_ioctl_desc qxl_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
+							DRM_AUTH|DRM_UNLOCKED),
+
+	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
+			  DRM_AUTH|DRM_UNLOCKED),
+};
+
+int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_irq.c b/linux-imx/drivers/gpu/drm/qxl/qxl_irq.c
new file mode 100644
index 0000000..f4b6b89
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_irq.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+
+irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+	uint32_t pending;
+
+	pending = xchg(&qdev->ram_header->int_pending, 0);
+
+	if (!pending)
+		return IRQ_NONE;
+
+	atomic_inc(&qdev->irq_received);
+
+	if (pending & QXL_INTERRUPT_DISPLAY) {
+		atomic_inc(&qdev->irq_received_display);
+		wake_up_all(&qdev->display_event);
+		qxl_queue_garbage_collect(qdev, false);
+	}
+	if (pending & QXL_INTERRUPT_CURSOR) {
+		atomic_inc(&qdev->irq_received_cursor);
+		wake_up_all(&qdev->cursor_event);
+	}
+	if (pending & QXL_INTERRUPT_IO_CMD) {
+		atomic_inc(&qdev->irq_received_io_cmd);
+		wake_up_all(&qdev->io_cmd_event);
+	}
+	if (pending & QXL_INTERRUPT_ERROR) {
+		/* TODO: log it, reset device (only way to exit this condition)
+		 * (do it a certain number of times, afterwards admit defeat,
+		 * to avoid endless loops).
+		 */
+		qdev->irq_received_error++;
+		qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
+	}
+	if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
+		qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
+		schedule_work(&qdev->client_monitors_config_work);
+	}
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
+	return IRQ_HANDLED;
+}
+
+static void qxl_client_monitors_config_work_func(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device,
+					       client_monitors_config_work);
+
+	qxl_display_read_client_monitors_config(qdev);
+}
+
+int qxl_irq_init(struct qxl_device *qdev)
+{
+	int ret;
+
+	init_waitqueue_head(&qdev->display_event);
+	init_waitqueue_head(&qdev->cursor_event);
+	init_waitqueue_head(&qdev->io_cmd_event);
+	INIT_WORK(&qdev->client_monitors_config_work,
+		  qxl_client_monitors_config_work_func);
+	atomic_set(&qdev->irq_received, 0);
+	atomic_set(&qdev->irq_received_display, 0);
+	atomic_set(&qdev->irq_received_cursor, 0);
+	atomic_set(&qdev->irq_received_io_cmd, 0);
+	qdev->irq_received_error = 0;
+	ret = drm_irq_install(qdev->ddev);
+	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed installing irq: %d\n", ret);
+		return 1;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_kms.c b/linux-imx/drivers/gpu/drm/qxl/qxl_kms.c
new file mode 100644
index 0000000..e27ce2a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_kms.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+
+int qxl_log_level;
+
+static void qxl_dump_mode(struct qxl_device *qdev, void *p)
+{
+	struct qxl_mode *m = p;
+	DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
+		      m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
+		      m->y_mili, m->orientation);
+}
+
+static bool qxl_check_device(struct qxl_device *qdev)
+{
+	struct qxl_rom *rom = qdev->rom;
+	int mode_offset;
+	int i;
+
+	if (rom->magic != 0x4f525851) {
+		DRM_ERROR("bad rom signature %x\n", rom->magic);
+		return false;
+	}
+
+	DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
+	DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
+		 rom->log_level);
+	DRM_INFO("Currently using mode #%d, list at 0x%x\n",
+		 rom->mode, rom->modes_offset);
+	DRM_INFO("%d io pages at offset 0x%x\n",
+		 rom->num_io_pages, rom->pages_offset);
+	DRM_INFO("%d byte draw area at offset 0x%x\n",
+		 rom->surface0_area_size, rom->draw_area_offset);
+
+	qdev->vram_size = rom->surface0_area_size;
+	DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
+
+	mode_offset = rom->modes_offset / 4;
+	qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
+	DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
+		 qdev->mode_info.num_modes);
+	qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
+	for (i = 0; i < qdev->mode_info.num_modes; i++)
+		qxl_dump_mode(qdev, qdev->mode_info.modes + i);
+	return true;
+}
+
+static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
+	unsigned long start_phys_addr, unsigned long end_phys_addr)
+{
+	uint64_t high_bits;
+	struct qxl_memslot *slot;
+	uint8_t slot_index;
+	struct qxl_ram_header *ram_header = qdev->ram_header;
+
+	slot_index = qdev->rom->slots_start + slot_index_offset;
+	slot = &qdev->mem_slots[slot_index];
+	slot->start_phys_addr = start_phys_addr;
+	slot->end_phys_addr = end_phys_addr;
+	ram_header->mem_slot.mem_start = slot->start_phys_addr;
+	ram_header->mem_slot.mem_end = slot->end_phys_addr;
+	qxl_io_memslot_add(qdev, slot_index);
+	slot->generation = qdev->rom->slot_generation;
+	high_bits = slot_index << qdev->slot_gen_bits;
+	high_bits |= slot->generation;
+	high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
+	slot->high_bits = high_bits;
+	return slot_index;
+}
+
+static void qxl_gc_work(struct work_struct *work)
+{
+	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+	qxl_garbage_collect(qdev);
+}
+
+int qxl_device_init(struct qxl_device *qdev,
+		    struct drm_device *ddev,
+		    struct pci_dev *pdev,
+		    unsigned long flags)
+{
+	int r;
+
+	qdev->dev = &pdev->dev;
+	qdev->ddev = ddev;
+	qdev->pdev = pdev;
+	qdev->flags = flags;
+
+	mutex_init(&qdev->gem.mutex);
+	mutex_init(&qdev->update_area_mutex);
+	mutex_init(&qdev->release_mutex);
+	mutex_init(&qdev->surf_evict_mutex);
+	INIT_LIST_HEAD(&qdev->gem.objects);
+
+	qdev->rom_base = pci_resource_start(pdev, 2);
+	qdev->rom_size = pci_resource_len(pdev, 2);
+	qdev->vram_base = pci_resource_start(pdev, 0);
+	qdev->surfaceram_base = pci_resource_start(pdev, 1);
+	qdev->surfaceram_size = pci_resource_len(pdev, 1);
+	qdev->io_base = pci_resource_start(pdev, 3);
+
+	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+	qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
+	DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk)\n",
+		 (unsigned long long)qdev->vram_base,
+		 (unsigned long long)pci_resource_end(pdev, 0),
+		 (int)pci_resource_len(pdev, 0) / 1024 / 1024,
+		 (int)pci_resource_len(pdev, 0) / 1024,
+		 (unsigned long long)qdev->surfaceram_base,
+		 (unsigned long long)pci_resource_end(pdev, 1),
+		 (int)qdev->surfaceram_size / 1024 / 1024,
+		 (int)qdev->surfaceram_size / 1024);
+
+	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
+	if (!qdev->rom) {
+		pr_err("Unable to ioremap ROM\n");
+		return -ENOMEM;
+	}
+
+	qxl_check_device(qdev);
+
+	r = qxl_bo_init(qdev);
+	if (r) {
+		DRM_ERROR("bo init failed %d\n", r);
+		return r;
+	}
+
+	qdev->ram_header = ioremap(qdev->vram_base +
+				   qdev->rom->ram_header_offset,
+				   sizeof(*qdev->ram_header));
+
+	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
+					     sizeof(struct qxl_command),
+					     QXL_COMMAND_RING_SIZE,
+					     qdev->io_base + QXL_IO_NOTIFY_CMD,
+					     false,
+					     &qdev->display_event);
+
+	qdev->cursor_ring = qxl_ring_create(
+				&(qdev->ram_header->cursor_ring_hdr),
+				sizeof(struct qxl_command),
+				QXL_CURSOR_RING_SIZE,
+				qdev->io_base + QXL_IO_NOTIFY_CMD,
+				false,
+				&qdev->cursor_event);
+
+	qdev->release_ring = qxl_ring_create(
+				&(qdev->ram_header->release_ring_hdr),
+				sizeof(uint64_t),
+				QXL_RELEASE_RING_SIZE, 0, true,
+				NULL);
+
+	/* TODO - slot initialization should happen on reset. where is our
+	 * reset handler? */
+	qdev->n_mem_slots = qdev->rom->slots_end;
+	qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
+	qdev->slot_id_bits = qdev->rom->slot_id_bits;
+	qdev->va_slot_mask =
+		(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
+
+	qdev->mem_slots =
+		kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
+			GFP_KERNEL);
+
+	idr_init(&qdev->release_idr);
+	spin_lock_init(&qdev->release_idr_lock);
+
+	idr_init(&qdev->surf_id_idr);
+	spin_lock_init(&qdev->surf_id_idr_lock);
+
+	mutex_init(&qdev->async_io_mutex);
+
+	/* reset the device into a known state - no memslots, no primary
+	 * created, no surfaces. */
+	qxl_io_reset(qdev);
+
+	/* must initialize irq before first async io - slot creation */
+	r = qxl_irq_init(qdev);
+	if (r)
+		return r;
+
+	/*
+	 * Note that virtual is surface0. We rely on the single ioremap done
+	 * before.
+	 */
+	qdev->main_mem_slot = setup_slot(qdev, 0,
+		(unsigned long)qdev->vram_base,
+		(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
+	qdev->surfaces_mem_slot = setup_slot(qdev, 1,
+		(unsigned long)qdev->surfaceram_base,
+		(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
+	DRM_INFO("main mem slot %d [%lx,%x)\n",
+		qdev->main_mem_slot,
+		(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
+
+
+	qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
+	INIT_WORK(&qdev->gc_work, qxl_gc_work);
+
+	r = qxl_fb_init(qdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void qxl_device_fini(struct qxl_device *qdev)
+{
+	if (qdev->current_release_bo[0])
+		qxl_bo_unref(&qdev->current_release_bo[0]);
+	if (qdev->current_release_bo[1])
+		qxl_bo_unref(&qdev->current_release_bo[1]);
+	flush_workqueue(qdev->gc_queue);
+	destroy_workqueue(qdev->gc_queue);
+	qdev->gc_queue = NULL;
+
+	qxl_ring_free(qdev->command_ring);
+	qxl_ring_free(qdev->cursor_ring);
+	qxl_ring_free(qdev->release_ring);
+	qxl_bo_fini(qdev);
+	io_mapping_free(qdev->surface_mapping);
+	io_mapping_free(qdev->vram_mapping);
+	iounmap(qdev->ram_header);
+	iounmap(qdev->rom);
+	qdev->rom = NULL;
+	qdev->mode_info.modes = NULL;
+	qdev->mode_info.num_modes = 0;
+	qxl_debugfs_remove_files(qdev);
+}
+
+int qxl_driver_unload(struct drm_device *dev)
+{
+	struct qxl_device *qdev = dev->dev_private;
+
+	if (qdev == NULL)
+		return 0;
+	qxl_modeset_fini(qdev);
+	qxl_device_fini(qdev);
+
+	kfree(qdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+int qxl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct qxl_device *qdev;
+	int r;
+
+	/* require kms */
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -ENODEV;
+
+	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
+	if (qdev == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = qdev;
+
+	r = qxl_device_init(qdev, dev, dev->pdev, flags);
+	if (r)
+		goto out;
+
+	r = qxl_modeset_init(qdev);
+	if (r) {
+		qxl_driver_unload(dev);
+		goto out;
+	}
+
+	return 0;
+out:
+	kfree(qdev);
+	return r;
+}
+
+
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_object.c b/linux-imx/drivers/gpu/drm/qxl/qxl_object.c
new file mode 100644
index 0000000..d9b12e7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_object.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/io-mapping.h>
+static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct qxl_bo *bo;
+	struct qxl_device *qdev;
+
+	bo = container_of(tbo, struct qxl_bo, tbo);
+	qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+
+	qxl_surface_evict(qdev, bo, false);
+	qxl_fence_fini(&bo->fence);
+	mutex_lock(&qdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&qdev->gem.mutex);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo);
+}
+
+bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &qxl_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+{
+	u32 c = 0;
+
+	qbo->placement.fpfn = 0;
+	qbo->placement.lpfn = 0;
+	qbo->placement.placement = qbo->placements;
+	qbo->placement.busy_placement = qbo->placements;
+	if (domain == QXL_GEM_DOMAIN_VRAM)
+		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+	if (domain == QXL_GEM_DOMAIN_SURFACE)
+		qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+	if (domain == QXL_GEM_DOMAIN_CPU)
+		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	if (!c)
+		qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	qbo->placement.num_placement = c;
+	qbo->placement.num_busy_placement = c;
+}
+
+
+int qxl_bo_create(struct qxl_device *qdev,
+		  unsigned long size, bool kernel, u32 domain,
+		  struct qxl_surface *surf,
+		  struct qxl_bo **bo_ptr)
+{
+	struct qxl_bo *bo;
+	enum ttm_bo_type type;
+	int r;
+
+	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+	if (kernel)
+		type = ttm_bo_type_kernel;
+	else
+		type = ttm_bo_type_device;
+	*bo_ptr = NULL;
+	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	size = roundup(size, PAGE_SIZE);
+	r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->gem_base.driver_private = NULL;
+	bo->type = domain;
+	bo->pin_count = 0;
+	bo->surface_id = 0;
+	qxl_fence_init(qdev, &bo->fence);
+	INIT_LIST_HEAD(&bo->list);
+	atomic_set(&bo->reserve_count, 0);
+	if (surf)
+		bo->surf = *surf;
+
+	qxl_ttm_placement_from_domain(bo, domain);
+
+	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, 0, !kernel, NULL, size,
+			NULL, &qxl_ttm_bo_destroy);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(qdev->dev,
+				"object_init failed for (%lu, 0x%08X)\n",
+				size, domain);
+		return r;
+	}
+	*bo_ptr = bo;
+	return 0;
+}
+
+int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr)
+			*ptr = bo->kptr;
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r)
+		return r;
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr)
+		*ptr = bo->kptr;
+	return 0;
+}
+
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+			      struct qxl_bo *bo, int page_offset)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+	void *rptr;
+	int ret;
+	struct io_mapping *map;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		map = qdev->vram_mapping;
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+		map = qdev->surface_mapping;
+	else
+		goto fallback;
+
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+
+	return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+fallback:
+	if (bo->kptr) {
+		rptr = bo->kptr + (page_offset * PAGE_SIZE);
+		return rptr;
+	}
+
+	ret = qxl_bo_kmap(bo, &rptr);
+	if (ret)
+		return NULL;
+
+	rptr += page_offset * PAGE_SIZE;
+	return rptr;
+}
+
+void qxl_bo_kunmap(struct qxl_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+			       struct qxl_bo *bo, void *pmap)
+{
+	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+	struct io_mapping *map;
+
+	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+		map = qdev->vram_mapping;
+	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
+		map = qdev->surface_mapping;
+	else
+		goto fallback;
+
+	io_mapping_unmap_atomic(pmap);
+
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
+	ttm_mem_io_unlock(man);
+	return ;
+ fallback:
+	qxl_bo_kunmap(bo);
+}
+
+void qxl_bo_unref(struct qxl_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+
+	if ((*bo) == NULL)
+		return;
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
+{
+	ttm_bo_reference(&bo->tbo);
+	return bo;
+}
+
+int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	int r, i;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = qxl_bo_gpu_offset(bo);
+		return 0;
+	}
+	qxl_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = qxl_bo_gpu_offset(bo);
+	}
+	if (unlikely(r != 0))
+		dev_err(qdev->dev, "%p pin failed\n", bo);
+	return r;
+}
+
+int qxl_bo_unpin(struct qxl_bo *bo)
+{
+	struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
+	return r;
+}
+
+void qxl_bo_force_delete(struct qxl_device *qdev)
+{
+	struct qxl_bo *bo, *n;
+
+	if (list_empty(&qdev->gem.objects))
+		return;
+	dev_err(qdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
+		mutex_lock(&qdev->ddev->struct_mutex);
+		dev_err(qdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		mutex_lock(&qdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&qdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		mutex_unlock(&qdev->ddev->struct_mutex);
+	}
+}
+
+int qxl_bo_init(struct qxl_device *qdev)
+{
+	return qxl_ttm_init(qdev);
+}
+
+void qxl_bo_fini(struct qxl_device *qdev)
+{
+	qxl_ttm_fini(qdev);
+}
+
+int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
+{
+	int ret;
+	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
+		/* allocate a surface id for this surface now */
+		ret = qxl_surface_id_alloc(qdev, bo);
+		if (ret)
+			return ret;
+
+		ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
+{
+	struct qxl_bo_list *entry, *sf;
+
+	list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
+		qxl_bo_unreserve(entry->bo);
+		list_del(&entry->lhead);
+		kfree(entry);
+	}
+}
+
+int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
+{
+	struct qxl_bo_list *entry;
+	int ret;
+
+	list_for_each_entry(entry, &reloc_list->bos, lhead) {
+		if (entry->bo == bo)
+			return 0;
+	}
+
+	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->bo = bo;
+	list_add(&entry->lhead, &reloc_list->bos);
+
+	ret = qxl_bo_reserve(bo, false);
+	if (ret)
+		return ret;
+
+	if (!bo->pin_count) {
+		qxl_ttm_placement_from_domain(bo, bo->type);
+		ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+				      true, false);
+		if (ret)
+			return ret;
+	}
+
+	/* allocate a surface for reserved + validated buffers */
+	ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+	if (ret)
+		return ret;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_object.h b/linux-imx/drivers/gpu/drm/qxl/qxl_object.h
new file mode 100644
index 0000000..b4fd89f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_object.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+#ifndef QXL_OBJECT_H
+#define QXL_OBJECT_H
+
+#include "qxl_drv.h"
+
+static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+			dev_err(qdev->dev, "%p reserve failed\n", bo);
+		}
+		return r;
+	}
+	return 0;
+}
+
+static inline void qxl_bo_unreserve(struct qxl_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
+{
+	return !!atomic_read(&bo->tbo.reserved);
+}
+
+static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
+			      bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS) {
+			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+			dev_err(qdev->dev, "%p reserve failed for wait\n",
+				bo);
+		}
+		return r;
+	}
+	spin_lock(&bo->tbo.bdev->fence_lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+extern int qxl_bo_create(struct qxl_device *qdev,
+			 unsigned long size,
+			 bool kernel, u32 domain,
+			 struct qxl_surface *surf,
+			 struct qxl_bo **bo_ptr);
+extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+extern void qxl_bo_kunmap(struct qxl_bo *bo);
+void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+extern void qxl_bo_unref(struct qxl_bo **bo);
+extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_unpin(struct qxl_bo *bo);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
+
+extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
+extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_release.c b/linux-imx/drivers/gpu/drm/qxl/qxl_release.c
new file mode 100644
index 0000000..b443d67
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_release.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+/*
+ * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
+ * into 256 byte chunks for now - gives 16 cmds per page.
+ *
+ * use an ida to index into the chunks?
+ */
+/* manage releaseables */
+/* stack them 16 high for now -drawable object is 191 */
+#define RELEASE_SIZE 256
+#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
+#define SURFACE_RELEASE_SIZE 128
+#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+
+static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
+static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
+uint64_t
+qxl_release_alloc(struct qxl_device *qdev, int type,
+		  struct qxl_release **ret)
+{
+	struct qxl_release *release;
+	int handle;
+	size_t size = sizeof(*release);
+	int idr_ret;
+
+	release = kmalloc(size, GFP_KERNEL);
+	if (!release) {
+		DRM_ERROR("Out of memory\n");
+		return 0;
+	}
+	release->type = type;
+	release->bo_count = 0;
+	release->release_offset = 0;
+	release->surface_release_id = 0;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&qdev->release_idr_lock);
+	idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
+	spin_unlock(&qdev->release_idr_lock);
+	idr_preload_end();
+	handle = idr_ret;
+	if (idr_ret < 0)
+		goto release_fail;
+	*ret = release;
+	QXL_INFO(qdev, "allocated release %lld\n", handle);
+	release->id = handle;
+release_fail:
+
+	return handle;
+}
+
+void
+qxl_release_free(struct qxl_device *qdev,
+		 struct qxl_release *release)
+{
+	int i;
+
+	QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
+		 release->type, release->bo_count);
+
+	if (release->surface_release_id)
+		qxl_surface_id_dealloc(qdev, release->surface_release_id);
+
+	for (i = 0 ; i < release->bo_count; ++i) {
+		QXL_INFO(qdev, "release %llx\n",
+			release->bos[i]->tbo.addr_space_offset
+						- DRM_FILE_OFFSET);
+		qxl_fence_remove_release(&release->bos[i]->fence, release->id);
+		qxl_bo_unref(&release->bos[i]);
+	}
+	spin_lock(&qdev->release_idr_lock);
+	idr_remove(&qdev->release_idr, release->id);
+	spin_unlock(&qdev->release_idr_lock);
+	kfree(release);
+}
+
+void
+qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
+		    struct qxl_bo *bo)
+{
+	int i;
+	for (i = 0; i < release->bo_count; i++)
+		if (release->bos[i] == bo)
+			return;
+
+	if (release->bo_count >= QXL_MAX_RES) {
+		DRM_ERROR("exceeded max resource on a qxl_release item\n");
+		return;
+	}
+	release->bos[release->bo_count++] = qxl_bo_ref(bo);
+}
+
+static int qxl_release_bo_alloc(struct qxl_device *qdev,
+				struct qxl_bo **bo)
+{
+	int ret;
+	ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+			    bo);
+	return ret;
+}
+
+int qxl_release_reserve(struct qxl_device *qdev,
+			struct qxl_release *release, bool no_wait)
+{
+	int ret;
+	if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
+		ret = qxl_bo_reserve(release->bos[0], no_wait);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+void qxl_release_unreserve(struct qxl_device *qdev,
+			  struct qxl_release *release)
+{
+	if (atomic_dec_and_test(&release->bos[0]->reserve_count))
+		qxl_bo_unreserve(release->bos[0]);
+}
+
+int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
+				       enum qxl_surface_cmd_type surface_cmd_type,
+				       struct qxl_release *create_rel,
+				       struct qxl_release **release)
+{
+	int ret;
+
+	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
+		int idr_ret;
+		struct qxl_bo *bo;
+		union qxl_release_info *info;
+
+		/* stash the release after the create command */
+		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+		bo = qxl_bo_ref(create_rel->bos[0]);
+
+		(*release)->release_offset = create_rel->release_offset + 64;
+
+		qxl_release_add_res(qdev, *release, bo);
+
+		ret = qxl_release_reserve(qdev, *release, false);
+		if (ret) {
+			DRM_ERROR("release reserve failed\n");
+			goto out_unref;
+		}
+		info = qxl_release_map(qdev, *release);
+		info->id = idr_ret;
+		qxl_release_unmap(qdev, *release, info);
+
+
+out_unref:
+		qxl_bo_unref(&bo);
+		return ret;
+	}
+
+	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
+					 QXL_RELEASE_SURFACE_CMD, release, NULL);
+}
+
+int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
+				       int type, struct qxl_release **release,
+				       struct qxl_bo **rbo)
+{
+	struct qxl_bo *bo;
+	int idr_ret;
+	int ret;
+	union qxl_release_info *info;
+	int cur_idx;
+
+	if (type == QXL_RELEASE_DRAWABLE)
+		cur_idx = 0;
+	else if (type == QXL_RELEASE_SURFACE_CMD)
+		cur_idx = 1;
+	else if (type == QXL_RELEASE_CURSOR_CMD)
+		cur_idx = 2;
+	else {
+		DRM_ERROR("got illegal type: %d\n", type);
+		return -EINVAL;
+	}
+
+	idr_ret = qxl_release_alloc(qdev, type, release);
+
+	mutex_lock(&qdev->release_mutex);
+	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
+		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+		qdev->current_release_bo_offset[cur_idx] = 0;
+		qdev->current_release_bo[cur_idx] = NULL;
+	}
+	if (!qdev->current_release_bo[cur_idx]) {
+		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+		if (ret) {
+			mutex_unlock(&qdev->release_mutex);
+			return ret;
+		}
+
+		/* pin releases bo's they are too messy to evict */
+		ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
+		qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
+		qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
+	}
+
+	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
+
+	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
+	qdev->current_release_bo_offset[cur_idx]++;
+
+	if (rbo)
+		*rbo = bo;
+
+	qxl_release_add_res(qdev, *release, bo);
+
+	ret = qxl_release_reserve(qdev, *release, false);
+	mutex_unlock(&qdev->release_mutex);
+	if (ret)
+		goto out_unref;
+
+	info = qxl_release_map(qdev, *release);
+	info->id = idr_ret;
+	qxl_release_unmap(qdev, *release, info);
+
+out_unref:
+	qxl_bo_unref(&bo);
+	return ret;
+}
+
+int qxl_fence_releaseable(struct qxl_device *qdev,
+			  struct qxl_release *release)
+{
+	int i, ret;
+	for (i = 0; i < release->bo_count; i++) {
+		if (!release->bos[i]->tbo.sync_obj)
+			release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
+		ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
+						   uint64_t id)
+{
+	struct qxl_release *release;
+
+	spin_lock(&qdev->release_idr_lock);
+	release = idr_find(&qdev->release_idr, id);
+	spin_unlock(&qdev->release_idr_lock);
+	if (!release) {
+		DRM_ERROR("failed to find id in release_idr\n");
+		return NULL;
+	}
+	if (release->bo_count < 1) {
+		DRM_ERROR("read a released resource with 0 bos\n");
+		return NULL;
+	}
+	return release;
+}
+
+union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
+					struct qxl_release *release)
+{
+	void *ptr;
+	union qxl_release_info *info;
+	struct qxl_bo *bo = release->bos[0];
+
+	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+	info = ptr + (release->release_offset & ~PAGE_SIZE);
+	return info;
+}
+
+void qxl_release_unmap(struct qxl_device *qdev,
+		       struct qxl_release *release,
+		       union qxl_release_info *info)
+{
+	struct qxl_bo *bo = release->bos[0];
+	void *ptr;
+
+	ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
+}
diff --git a/linux-imx/drivers/gpu/drm/qxl/qxl_ttm.c b/linux-imx/drivers/gpu/drm/qxl/qxl_ttm.c
new file mode 100644
index 0000000..3401eb8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alon Levy
+ */
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_page_alloc.h>
+#include <ttm/ttm_module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/qxl_drm.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
+#include <linux/delay.h>
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+
+static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+{
+	struct qxl_mman *mman;
+	struct qxl_device *qdev;
+
+	mman = container_of(bdev, struct qxl_mman, bdev);
+	qdev = container_of(mman, struct qxl_device, mman);
+	return qdev;
+}
+
+static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int qxl_ttm_global_init(struct qxl_device *qdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	qdev->mman.mem_global_referenced = false;
+	global_ref = &qdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &qxl_ttm_mem_global_init;
+	global_ref->release = &qxl_ttm_mem_global_release;
+
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	qdev->mman.bo_global_ref.mem_glob =
+		qdev->mman.mem_global_ref.object;
+	global_ref = &qdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&qdev->mman.mem_global_ref);
+		return r;
+	}
+
+	qdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void qxl_ttm_global_fini(struct qxl_device *qdev)
+{
+	if (qdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&qdev->mman.mem_global_ref);
+		qdev->mman.mem_global_referenced = false;
+	}
+}
+
+static struct vm_operations_struct qxl_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct qxl_device *qdev;
+	int r;
+
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;
+	if (bo == NULL)
+		return VM_FAULT_NOPAGE;
+	qdev = qxl_get_qdev(bo->bdev);
+	r = ttm_vm_ops->fault(vma, vmf);
+	return r;
+}
+
+int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct qxl_device *qdev;
+	int r;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+		pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
+			__func__, vma->vm_pgoff);
+		return drm_mmap(filp, vma);
+	}
+
+	file_priv = filp->private_data;
+	qdev = file_priv->minor->dev->dev_private;
+	if (qdev == NULL) {
+		DRM_ERROR(
+		 "filp->private_data->minor->dev->dev_private == NULL\n");
+		return -EINVAL;
+	}
+	QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+		 __func__, filp->private_data, vma->vm_pgoff);
+
+	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
+	if (unlikely(r != 0))
+		return r;
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		qxl_ttm_vm_ops = *ttm_vm_ops;
+		qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
+	}
+	vma->vm_ops = &qxl_ttm_vm_ops;
+	return 0;
+}
+
+static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+			     struct ttm_mem_type_manager *man)
+{
+	struct qxl_device *qdev;
+
+	qdev = qxl_get_qdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+	case TTM_PL_PRIV0:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct qxl_bo *qbo;
+	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo)) {
+		placement->fpfn = 0;
+		placement->lpfn = 0;
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	qbo = container_of(bo, struct qxl_bo, tbo);
+	qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+	*placement = qbo->placement;
+}
+
+static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+				  struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct qxl_device *qdev = qxl_get_qdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->vram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	case TTM_PL_PRIV0:
+		mem->bus.is_iomem = true;
+		mem->bus.base = qdev->surfaceram_base;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
+				struct ttm_mem_reg *mem)
+{
+}
+
+/*
+ * TTM backend functions.
+ */
+struct qxl_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct qxl_device		*qdev;
+	u64				offset;
+};
+
+static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
+				struct ttm_mem_reg *bo_mem)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	/* Not implemented */
+	return -1;
+}
+
+static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	/* Not implemented */
+	return -1;
+}
+
+static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct qxl_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func qxl_backend_func = {
+	.bind = &qxl_ttm_backend_bind,
+	.unbind = &qxl_ttm_backend_unbind,
+	.destroy = &qxl_ttm_backend_destroy,
+};
+
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	int r;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	r = ttm_pool_populate(ttm);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
+					unsigned long size, uint32_t page_flags,
+					struct page *dummy_read_page)
+{
+	struct qxl_device *qdev;
+	struct qxl_ttm_tt *gtt;
+
+	qdev = qxl_get_qdev(bdev);
+	gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL)
+		return NULL;
+	gtt->ttm.ttm.func = &qxl_backend_func;
+	gtt->qdev = qdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags,
+			    dummy_read_page)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static void qxl_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int qxl_bo_move(struct ttm_buffer_object *bo,
+		       bool evict, bool interruptible,
+		       bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		qxl_move_null(bo, new_mem);
+		return 0;
+	}
+	return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+}
+
+
+static int qxl_sync_obj_wait(void *sync_obj,
+			     bool lazy, bool interruptible)
+{
+	struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+	int count = 0, sc = 0;
+	struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
+
+	if (qfence->num_active_releases == 0)
+		return 0;
+
+retry:
+	if (sc == 0) {
+		if (bo->type == QXL_GEM_DOMAIN_SURFACE)
+			qxl_update_surface(qfence->qdev, bo);
+	} else if (sc >= 1) {
+		qxl_io_notify_oom(qfence->qdev);
+	}
+
+	sc++;
+
+	for (count = 0; count < 10; count++) {
+		bool ret;
+		ret = qxl_queue_garbage_collect(qfence->qdev, true);
+		if (ret == false)
+			break;
+
+		if (qfence->num_active_releases == 0)
+			return 0;
+	}
+
+	if (qfence->num_active_releases) {
+		bool have_drawable_releases = false;
+		void **slot;
+		struct radix_tree_iter iter;
+		int release_id;
+
+		radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) {
+			struct qxl_release *release;
+
+			release_id = iter.index;
+			release = qxl_release_from_id_locked(qfence->qdev, release_id);
+			if (release == NULL)
+				continue;
+
+			if (release->type == QXL_RELEASE_DRAWABLE)
+				have_drawable_releases = true;
+		}
+
+		qxl_queue_garbage_collect(qfence->qdev, true);
+
+		if (have_drawable_releases || sc < 4) {
+			if (sc > 2)
+				/* back off */
+				usleep_range(500, 1000);
+			if (have_drawable_releases && sc > 300) {
+				WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
+				return -EBUSY;
+			}
+			goto retry;
+		}
+	}
+	return 0;
+}
+
+static int qxl_sync_obj_flush(void *sync_obj)
+{
+	return 0;
+}
+
+static void qxl_sync_obj_unref(void **sync_obj)
+{
+	*sync_obj = NULL;
+}
+
+static void *qxl_sync_obj_ref(void *sync_obj)
+{
+	return sync_obj;
+}
+
+static bool qxl_sync_obj_signaled(void *sync_obj)
+{
+	struct qxl_fence *qfence = (struct qxl_fence *)sync_obj;
+	return (qfence->num_active_releases == 0);
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+			       struct ttm_mem_reg *new_mem)
+{
+	struct qxl_bo *qbo;
+	struct qxl_device *qdev;
+
+	if (!qxl_ttm_bo_is_qxl_bo(bo))
+		return;
+	qbo = container_of(bo, struct qxl_bo, tbo);
+	qdev = qbo->gem_base.dev->dev_private;
+
+	if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
+		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+}
+
+static struct ttm_bo_driver qxl_bo_driver = {
+	.ttm_tt_create = &qxl_ttm_tt_create,
+	.ttm_tt_populate = &qxl_ttm_tt_populate,
+	.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
+	.invalidate_caches = &qxl_invalidate_caches,
+	.init_mem_type = &qxl_init_mem_type,
+	.evict_flags = &qxl_evict_flags,
+	.move = &qxl_bo_move,
+	.verify_access = &qxl_verify_access,
+	.io_mem_reserve = &qxl_ttm_io_mem_reserve,
+	.io_mem_free = &qxl_ttm_io_mem_free,
+	.sync_obj_signaled = &qxl_sync_obj_signaled,
+	.sync_obj_wait = &qxl_sync_obj_wait,
+	.sync_obj_flush = &qxl_sync_obj_flush,
+	.sync_obj_unref = &qxl_sync_obj_unref,
+	.sync_obj_ref = &qxl_sync_obj_ref,
+	.move_notify = &qxl_bo_move_notify,
+};
+
+
+
+int qxl_ttm_init(struct qxl_device *qdev)
+{
+	int r;
+	int num_io_pages; /* != rom->num_io_pages, we include surface0 */
+
+	r = qxl_ttm_global_init(qdev);
+	if (r)
+		return r;
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&qdev->mman.bdev,
+			       qdev->mman.bo_global_ref.ref.object,
+			       &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	/* NOTE: this includes the framebuffer (aka surface 0) */
+	num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
+			   num_io_pages);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
+			   qdev->surfaceram_size / PAGE_SIZE);
+	if (r) {
+		DRM_ERROR("Failed initializing Surfaces heap.\n");
+		return r;
+	}
+	DRM_INFO("qxl: %uM of VRAM memory size\n",
+		 (unsigned)qdev->vram_size / (1024 * 1024));
+	DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
+		 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+	if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
+		qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
+	r = qxl_ttm_debugfs_init(qdev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void qxl_ttm_fini(struct qxl_device *qdev)
+{
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
+	ttm_bo_device_release(&qdev->mman.bdev);
+	qxl_ttm_global_fini(qdev);
+	DRM_INFO("qxl: ttm finalized\n");
+}
+
+
+#define QXL_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int qxl_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct qxl_device *rdev = dev->dev_private;
+	int ret;
+	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	ret = drm_mm_dump_table(m, mm);
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+#endif
+
+static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
+	static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
+	unsigned i;
+
+	for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
+		if (i == 0)
+			sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
+		else
+			sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
+		qxl_mem_types_list[i].name = qxl_mem_types_names[i];
+		qxl_mem_types_list[i].show = &qxl_mm_dump_table;
+		qxl_mem_types_list[i].driver_features = 0;
+		if (i == 0)
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
+		else
+			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
+
+	}
+	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+#else
+	return 0;
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/r128/Makefile b/linux-imx/drivers/gpu/drm/r128/Makefile
new file mode 100644
index 0000000..1cc72ae
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+r128-y   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+
+r128-$(CONFIG_COMPAT)   += r128_ioc32.o
+
+obj-$(CONFIG_DRM_R128)	+= r128.o
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_cce.c b/linux-imx/drivers/gpu/drm/r128/r128_cce.c
new file mode 100644
index 0000000..d4660cf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_cce.c
@@ -0,0 +1,937 @@
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
+ */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/r128_drm.h>
+#include "r128_drv.h"
+
+#define R128_FIFO_DEBUG		0
+
+#define FIRMWARE_NAME		"r128/r128_cce.bin"
+
+MODULE_FIRMWARE(FIRMWARE_NAME);
+
+static int R128_READ_PLL(struct drm_device *dev, int addr)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+
+	R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return R128_READ(R128_CLOCK_CNTL_DATA);
+}
+
+#if R128_FIFO_DEBUG
+static void r128_status(drm_r128_private_t *dev_priv)
+{
+	printk("GUI_STAT           = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_GUI_STAT));
+	printk("PM4_STAT           = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_STAT));
+	printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
+	printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
+	printk("PM4_MICRO_CNTL     = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
+	printk("PM4_BUFFER_CNTL    = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
+{
+	u32 tmp;
+	int i;
+
+	tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
+	R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
+{
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
+{
+	int i, ret;
+
+	ret = r128_do_wait_for_fifo(dev_priv, 64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
+			r128_do_pixcache_flush(dev_priv);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+/* ================================================================
+ * CCE control, initialization
+ */
+
+/* Load the microcode for the CCE */
+static int r128_cce_load_microcode(drm_r128_private_t *dev_priv)
+{
+	struct platform_device *pdev;
+	const struct firmware *fw;
+	const __be32 *fw_data;
+	int rc, i;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("r128_cce", 0, NULL, 0);
+	if (IS_ERR(pdev)) {
+		printk(KERN_ERR "r128_cce: Failed to register firmware\n");
+		return PTR_ERR(pdev);
+	}
+	rc = request_firmware(&fw, FIRMWARE_NAME, &pdev->dev);
+	platform_device_unregister(pdev);
+	if (rc) {
+		printk(KERN_ERR "r128_cce: Failed to load firmware \"%s\"\n",
+		       FIRMWARE_NAME);
+		return rc;
+	}
+
+	if (fw->size != 256 * 8) {
+		printk(KERN_ERR
+		       "r128_cce: Bogus length %zu in firmware \"%s\"\n",
+		       fw->size, FIRMWARE_NAME);
+		rc = -EINVAL;
+		goto out_release;
+	}
+
+	r128_do_wait_for_idle(dev_priv);
+
+	fw_data = (const __be32 *)fw->data;
+	R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
+	for (i = 0; i < 256; i++) {
+		R128_WRITE(R128_PM4_MICROCODE_DATAH,
+			   be32_to_cpup(&fw_data[i * 2]));
+		R128_WRITE(R128_PM4_MICROCODE_DATAL,
+			   be32_to_cpup(&fw_data[i * 2 + 1]));
+	}
+
+out_release:
+	release_firmware(fw);
+	return rc;
+}
+
+/* Flush any pending commands to the CCE.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
+{
+	u32 tmp;
+
+	tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
+}
+
+/* Wait for the CCE to go idle.
+ */
+int r128_do_cce_idle(drm_r128_private_t *dev_priv)
+{
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
+			int pm4stat = R128_READ(R128_PM4_STAT);
+			if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
+			     dev_priv->cce_fifo_size) &&
+			    !(pm4stat & (R128_PM4_BUSY |
+					 R128_PM4_GUI_ACTIVE))) {
+				return r128_do_pixcache_flush(dev_priv);
+			}
+		}
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	r128_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+/* Start the Concurrent Command Engine.
+ */
+static void r128_do_cce_start(drm_r128_private_t *dev_priv)
+{
+	r128_do_wait_for_idle(dev_priv);
+
+	R128_WRITE(R128_PM4_BUFFER_CNTL,
+		   dev_priv->cce_mode | dev_priv->ring.size_l2qw
+		   | R128_PM4_BUFFER_CNTL_NOUPDATE);
+	R128_READ(R128_PM4_BUFFER_ADDR);	/* as per the sample code */
+	R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
+
+	dev_priv->cce_running = 1;
+}
+
+/* Reset the Concurrent Command Engine.  This will not flush any pending
+ * commands, so you must wait for the CCE command stream to complete
+ * before calling this routine.
+ */
+static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
+{
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
+	R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
+	dev_priv->ring.tail = 0;
+}
+
+/* Stop the Concurrent Command Engine.  This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CCE
+ * to go idle before calling this routine.
+ */
+static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
+{
+	R128_WRITE(R128_PM4_MICRO_CNTL, 0);
+	R128_WRITE(R128_PM4_BUFFER_CNTL,
+		   R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
+
+	dev_priv->cce_running = 0;
+}
+
+/* Reset the engine.  This will stop the CCE if it is running.
+ */
+static int r128_do_engine_reset(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
+
+	r128_do_pixcache_flush(dev_priv);
+
+	clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
+	mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
+
+	R128_WRITE_PLL(R128_MCLK_CNTL,
+		       mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
+
+	gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
+
+	/* Taken from the sample code - do not change */
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
+	R128_READ(R128_GEN_RESET_CNTL);
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
+	R128_READ(R128_GEN_RESET_CNTL);
+
+	R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
+	R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
+
+	/* Reset the CCE ring */
+	r128_do_cce_reset(dev_priv);
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	r128_freelist_reset(dev);
+
+	return 0;
+}
+
+static void r128_cce_init_ring_buffer(struct drm_device *dev,
+				      drm_r128_private_t *dev_priv)
+{
+	u32 ring_start;
+	u32 tmp;
+
+	DRM_DEBUG("\n");
+
+	/* The manual (p. 2) says this address is in "VM space".  This
+	 * means it's an offset from the start of AGP space.
+	 */
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci)
+		ring_start = dev_priv->cce_ring->offset - dev->agp->base;
+	else
+#endif
+		ring_start = dev_priv->cce_ring->offset -
+		    (unsigned long)dev->sg->virtual;
+
+	R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
+
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
+	R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
+
+	/* Set watermark control */
+	R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
+		   ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
+		   | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
+		   | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
+		   | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
+
+	/* Force read.  Why?  Because it's in the examples... */
+	R128_READ(R128_PM4_BUFFER_ADDR);
+
+	/* Turn on bus mastering */
+	tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
+	R128_WRITE(R128_BUS_CNTL, tmp);
+}
+
+static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
+{
+	drm_r128_private_t *dev_priv;
+	int rc;
+
+	DRM_DEBUG("\n");
+
+	if (dev->dev_private) {
+		DRM_DEBUG("called when already initialized\n");
+		return -EINVAL;
+	}
+
+	dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev_priv->is_pci = init->is_pci;
+
+	if (dev_priv->is_pci && !dev->sg) {
+		DRM_ERROR("PCI GART memory not allocated!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cce_mode = init->cce_mode;
+
+	/* GH: Simple idle check.
+	 */
+	atomic_set(&dev_priv->idle_count, 0);
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cce_mode != R128_PM4_192BM) &&
+	    (init->cce_mode != R128_PM4_128BM_64INDBM) &&
+	    (init->cce_mode != R128_PM4_64BM_128INDBM) &&
+	    (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
+		DRM_DEBUG("Bad cce_mode!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	switch (init->cce_mode) {
+	case R128_PM4_NONPM4:
+		dev_priv->cce_fifo_size = 0;
+		break;
+	case R128_PM4_192PIO:
+	case R128_PM4_192BM:
+		dev_priv->cce_fifo_size = 192;
+		break;
+	case R128_PM4_128PIO_64INDBM:
+	case R128_PM4_128BM_64INDBM:
+		dev_priv->cce_fifo_size = 128;
+		break;
+	case R128_PM4_64PIO_128INDBM:
+	case R128_PM4_64BM_128INDBM:
+	case R128_PM4_64PIO_64VCBM_64INDBM:
+	case R128_PM4_64BM_64VCBM_64INDBM:
+	case R128_PM4_64PIO_64VCPIO_64INDPIO:
+		dev_priv->cce_fifo_size = 64;
+		break;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = R128_DATATYPE_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	switch (init->depth_bpp) {
+	case 16:
+		dev_priv->depth_fmt = R128_DATATYPE_RGB565;
+		break;
+	case 24:
+	case 32:
+	default:
+		dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
+		break;
+	}
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+	dev_priv->span_offset = init->span_offset;
+
+	dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
+					  (dev_priv->front_offset >> 5));
+	dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
+					 (dev_priv->back_offset >> 5));
+	dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
+					  (dev_priv->depth_offset >> 5) |
+					  R128_DST_TILE);
+	dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
+					 (dev_priv->span_offset >> 5));
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio) {
+		DRM_ERROR("could not find mmio region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cce_ring) {
+		DRM_ERROR("could not find cce ring region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	if (!dev_priv->is_pci) {
+		dev_priv->agp_textures =
+		    drm_core_findmap(dev, init->agp_textures_offset);
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("could not find agp texture region!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -EINVAL;
+		}
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				  init->sarea_priv_offset);
+
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci) {
+		drm_core_ioremap_wc(dev_priv->cce_ring, dev);
+		drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+		drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+		if (!dev_priv->cce_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("Could not ioremap agp regions!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -ENOMEM;
+		}
+	} else
+#endif
+	{
+		dev_priv->cce_ring->handle =
+			(void *)(unsigned long)dev_priv->cce_ring->offset;
+		dev_priv->ring_rptr->handle =
+			(void *)(unsigned long)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+			(void *)(unsigned long)dev->agp_buffer_map->offset;
+	}
+
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci)
+		dev_priv->cce_buffers_offset = dev->agp->base;
+	else
+#endif
+		dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
+
+	dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = 128;
+
+	dev_priv->sarea_priv->last_frame = 0;
+	R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+
+	dev_priv->sarea_priv->last_dispatch = 0;
+	R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
+
+#if __OS_HAS_AGP
+	if (dev_priv->is_pci) {
+#endif
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
+		dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
+		dev_priv->gart_info.addr = NULL;
+		dev_priv->gart_info.bus_addr = 0;
+		dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+		if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
+			DRM_ERROR("failed to init PCI GART!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -ENOMEM;
+		}
+		R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
+#if __OS_HAS_AGP
+	}
+#endif
+
+	r128_cce_init_ring_buffer(dev, dev_priv);
+	rc = r128_cce_load_microcode(dev_priv);
+
+	dev->dev_private = (void *)dev_priv;
+
+	r128_do_engine_reset(dev);
+
+	if (rc) {
+		DRM_ERROR("Failed to load firmware!\n");
+		r128_do_cleanup_cce(dev);
+	}
+
+	return rc;
+}
+
+int r128_do_cleanup_cce(struct drm_device *dev)
+{
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		drm_r128_private_t *dev_priv = dev->dev_private;
+
+#if __OS_HAS_AGP
+		if (!dev_priv->is_pci) {
+			if (dev_priv->cce_ring != NULL)
+				drm_core_ioremapfree(dev_priv->cce_ring, dev);
+			if (dev_priv->ring_rptr != NULL)
+				drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			if (dev->agp_buffer_map != NULL) {
+				drm_core_ioremapfree(dev->agp_buffer_map, dev);
+				dev->agp_buffer_map = NULL;
+			}
+		} else
+#endif
+		{
+			if (dev_priv->gart_info.bus_addr)
+				if (!drm_ati_pcigart_cleanup(dev,
+							&dev_priv->gart_info))
+					DRM_ERROR
+					    ("failed to cleanup PCI GART!\n");
+		}
+
+		kfree(dev->dev_private);
+		dev->dev_private = NULL;
+	}
+
+	return 0;
+}
+
+int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_init_t *init = data;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case R128_INIT_CCE:
+		return r128_do_init_cce(dev, init);
+	case R128_CLEANUP_CCE:
+		return r128_do_cleanup_cce(dev);
+	}
+
+	return -EINVAL;
+}
+
+int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
+		DRM_DEBUG("while CCE running\n");
+		return 0;
+	}
+
+	r128_do_cce_start(dev_priv);
+
+	return 0;
+}
+
+/* Stop the CCE.  The engine must have been idled before calling this
+ * routine.
+ */
+int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_cce_stop_t *stop = data;
+	int ret;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	/* Flush any pending CCE commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if (stop->flush)
+		r128_do_cce_flush(dev_priv);
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if (stop->idle) {
+		ret = r128_do_cce_idle(dev_priv);
+		if (ret)
+			return ret;
+	}
+
+	/* Finally, we can turn off the CCE.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CCE is shut down.
+	 */
+	r128_do_cce_stop(dev_priv);
+
+	/* Reset the engine */
+	r128_do_engine_reset(dev);
+
+	return 0;
+}
+
+/* Just reset the CCE ring.  Called as part of an X Server engine reset.
+ */
+int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	r128_do_cce_reset(dev_priv);
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	return 0;
+}
+
+int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	if (dev_priv->cce_running)
+		r128_do_cce_flush(dev_priv);
+
+	return r128_do_cce_idle(dev_priv);
+}
+
+int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev->dev_private);
+
+	return r128_do_engine_reset(dev);
+}
+
+int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	return -EINVAL;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+#define R128_BUFFER_USED	0xffffffff
+#define R128_BUFFER_FREE	0
+
+#if 0
+static int r128_freelist_init(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_freelist_t *entry;
+	int i;
+
+	dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
+	if (dev_priv->head == NULL)
+		return -ENOMEM;
+
+	dev_priv->head->age = R128_BUFFER_USED;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+
+		entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
+		if (!entry)
+			return -ENOMEM;
+
+		entry->age = R128_BUFFER_FREE;
+		entry->buf = buf;
+		entry->prev = dev_priv->head;
+		entry->next = dev_priv->head->next;
+		if (!entry->next)
+			dev_priv->tail = entry;
+
+		buf_priv->discard = 0;
+		buf_priv->dispatched = 0;
+		buf_priv->list_entry = entry;
+
+		dev_priv->head->next = entry;
+
+		if (dev_priv->head->next)
+			dev_priv->head->next->prev = entry;
+	}
+
+	return 0;
+
+}
+#endif
+
+static struct drm_buf *r128_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+
+	/* FIXME: Optimize -- use freelist code */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+		if (!buf->file_priv)
+			return buf;
+	}
+
+	for (t = 0; t < dev_priv->usec_timeout; t++) {
+		u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
+
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[i];
+			buf_priv = buf->dev_private;
+			if (buf->pending && buf_priv->age <= done_age) {
+				/* The buffer has been processed, so it
+				 * can now be used.
+				 */
+				buf->pending = 0;
+				return buf;
+			}
+		}
+		DRM_UDELAY(1);
+	}
+
+	DRM_DEBUG("returning NULL!\n");
+	return NULL;
+}
+
+void r128_freelist_reset(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+/* ================================================================
+ * CCE command submission
+ */
+
+int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		r128_update_ring_snapshot(dev_priv);
+		if (ring->space >= n)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+	/* FIXME: This is being ignored... */
+	DRM_ERROR("failed!\n");
+	return -EBUSY;
+}
+
+static int r128_cce_get_buffers(struct drm_device *dev,
+				struct drm_file *file_priv,
+				struct drm_dma *d)
+{
+	int i;
+	struct drm_buf *buf;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = r128_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+				     sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+				     sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int ret = 0;
+	struct drm_dma *d = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count)
+		ret = r128_cce_get_buffers(dev, file_priv, d);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_drv.c b/linux-imx/drivers/gpu/drm/r128/r128_drv.c
new file mode 100644
index 0000000..472c38f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_drv.c
@@ -0,0 +1,112 @@
+/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/r128_drm.h>
+#include "r128_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static struct pci_device_id pciidlist[] = {
+	r128_PCI_IDS
+};
+
+static const struct file_operations r128_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = r128_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+	.dev_priv_size = sizeof(drm_r128_buf_priv_t),
+	.load = r128_driver_load,
+	.preclose = r128_driver_preclose,
+	.lastclose = r128_driver_lastclose,
+	.get_vblank_counter = r128_get_vblank_counter,
+	.enable_vblank = r128_enable_vblank,
+	.disable_vblank = r128_disable_vblank,
+	.irq_preinstall = r128_driver_irq_preinstall,
+	.irq_postinstall = r128_driver_irq_postinstall,
+	.irq_uninstall = r128_driver_irq_uninstall,
+	.irq_handler = r128_driver_irq_handler,
+	.ioctls = r128_ioctls,
+	.dma_ioctl = r128_cce_buffers,
+	.fops = &r128_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int r128_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	pci_set_master(dev->pdev);
+	return drm_vblank_init(dev, 1);
+}
+
+static struct pci_driver r128_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init r128_init(void)
+{
+	driver.num_ioctls = r128_max_ioctl;
+
+	return drm_pci_init(&driver, &r128_pci_driver);
+}
+
+static void __exit r128_exit(void)
+{
+	drm_pci_exit(&driver, &r128_pci_driver);
+}
+
+module_init(r128_init);
+module_exit(r128_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_drv.h b/linux-imx/drivers/gpu/drm/r128/r128_drv.h
new file mode 100644
index 0000000..930c71b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_drv.h
@@ -0,0 +1,530 @@
+/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ *    Michel D�zer <daenzerm@student.ethz.ch>
+ */
+
+#ifndef __R128_DRV_H__
+#define __R128_DRV_H__
+
+/* General customization:
+ */
+#define DRIVER_AUTHOR		"Gareth Hughes, VA Linux Systems Inc."
+
+#define DRIVER_NAME		"r128"
+#define DRIVER_DESC		"ATI Rage 128"
+#define DRIVER_DATE		"20030725"
+
+/* Interface history:
+ *
+ * ??  - ??
+ * 2.4 - Add support for ycbcr textures (no new ioctls)
+ * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
+ */
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		5
+#define DRIVER_PATCHLEVEL	0
+
+#define GET_RING_HEAD(dev_priv)		R128_READ(R128_PM4_BUFFER_DL_RPTR)
+
+typedef struct drm_r128_freelist {
+	unsigned int age;
+	struct drm_buf *buf;
+	struct drm_r128_freelist *next;
+	struct drm_r128_freelist *prev;
+} drm_r128_freelist_t;
+
+typedef struct drm_r128_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	u32 tail;
+	u32 tail_mask;
+	int space;
+
+	int high_mark;
+} drm_r128_ring_buffer_t;
+
+typedef struct drm_r128_private {
+	drm_r128_ring_buffer_t ring;
+	drm_r128_sarea_t *sarea_priv;
+
+	int cce_mode;
+	int cce_fifo_size;
+	int cce_running;
+
+	drm_r128_freelist_t *head;
+	drm_r128_freelist_t *tail;
+
+	int usec_timeout;
+	int is_pci;
+	unsigned long cce_buffers_offset;
+
+	atomic_t idle_count;
+
+	int page_flipping;
+	int current_page;
+	u32 crtc_offset;
+	u32 crtc_offset_cntl;
+
+	atomic_t vbl_received;
+
+	u32 color_fmt;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	u32 depth_fmt;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+	unsigned int span_offset;
+
+	u32 front_pitch_offset_c;
+	u32 back_pitch_offset_c;
+	u32 depth_pitch_offset_c;
+	u32 span_pitch_offset_c;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *cce_ring;
+	drm_local_map_t *ring_rptr;
+	drm_local_map_t *agp_textures;
+	struct drm_ati_pcigart_info gart_info;
+} drm_r128_private_t;
+
+typedef struct drm_r128_buf_priv {
+	u32 age;
+	int prim;
+	int discard;
+	int dispatched;
+	drm_r128_freelist_t *list_entry;
+} drm_r128_buf_priv_t;
+
+extern struct drm_ioctl_desc r128_ioctls[];
+extern int r128_max_ioctl;
+
+				/* r128_cce.c */
+extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void r128_freelist_reset(struct drm_device *dev);
+
+extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
+
+extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
+extern int r128_do_cleanup_cce(struct drm_device *dev);
+
+extern int r128_enable_vblank(struct drm_device *dev, int crtc);
+extern void r128_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
+extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern void r128_driver_irq_preinstall(struct drm_device *dev);
+extern int r128_driver_irq_postinstall(struct drm_device *dev);
+extern void r128_driver_irq_uninstall(struct drm_device *dev);
+extern void r128_driver_lastclose(struct drm_device *dev);
+extern int r128_driver_load(struct drm_device *dev, unsigned long flags);
+extern void r128_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file_priv);
+
+extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Rage 128 kernel driver.
+ */
+
+#define R128_AUX_SC_CNTL		0x1660
+#	define R128_AUX1_SC_EN			(1 << 0)
+#	define R128_AUX1_SC_MODE_OR		(0 << 1)
+#	define R128_AUX1_SC_MODE_NAND		(1 << 1)
+#	define R128_AUX2_SC_EN			(1 << 2)
+#	define R128_AUX2_SC_MODE_OR		(0 << 3)
+#	define R128_AUX2_SC_MODE_NAND		(1 << 3)
+#	define R128_AUX3_SC_EN			(1 << 4)
+#	define R128_AUX3_SC_MODE_OR		(0 << 5)
+#	define R128_AUX3_SC_MODE_NAND		(1 << 5)
+#define R128_AUX1_SC_LEFT		0x1664
+#define R128_AUX1_SC_RIGHT		0x1668
+#define R128_AUX1_SC_TOP		0x166c
+#define R128_AUX1_SC_BOTTOM		0x1670
+#define R128_AUX2_SC_LEFT		0x1674
+#define R128_AUX2_SC_RIGHT		0x1678
+#define R128_AUX2_SC_TOP		0x167c
+#define R128_AUX2_SC_BOTTOM		0x1680
+#define R128_AUX3_SC_LEFT		0x1684
+#define R128_AUX3_SC_RIGHT		0x1688
+#define R128_AUX3_SC_TOP		0x168c
+#define R128_AUX3_SC_BOTTOM		0x1690
+
+#define R128_BRUSH_DATA0		0x1480
+#define R128_BUS_CNTL			0x0030
+#	define R128_BUS_MASTER_DIS		(1 << 6)
+
+#define R128_CLOCK_CNTL_INDEX		0x0008
+#define R128_CLOCK_CNTL_DATA		0x000c
+#	define R128_PLL_WR_EN			(1 << 7)
+#define R128_CONSTANT_COLOR_C		0x1d34
+#define R128_CRTC_OFFSET		0x0224
+#define R128_CRTC_OFFSET_CNTL		0x0228
+#	define R128_CRTC_OFFSET_FLIP_CNTL	(1 << 16)
+
+#define R128_DP_GUI_MASTER_CNTL		0x146c
+#       define R128_GMC_SRC_PITCH_OFFSET_CNTL	(1    <<  0)
+#       define R128_GMC_DST_PITCH_OFFSET_CNTL	(1    <<  1)
+#	define R128_GMC_BRUSH_SOLID_COLOR	(13   <<  4)
+#	define R128_GMC_BRUSH_NONE		(15   <<  4)
+#	define R128_GMC_DST_16BPP		(4    <<  8)
+#	define R128_GMC_DST_24BPP		(5    <<  8)
+#	define R128_GMC_DST_32BPP		(6    <<  8)
+#       define R128_GMC_DST_DATATYPE_SHIFT	8
+#	define R128_GMC_SRC_DATATYPE_COLOR	(3    << 12)
+#	define R128_DP_SRC_SOURCE_MEMORY	(2    << 24)
+#	define R128_DP_SRC_SOURCE_HOST_DATA	(3    << 24)
+#	define R128_GMC_CLR_CMP_CNTL_DIS	(1    << 28)
+#	define R128_GMC_AUX_CLIP_DIS		(1    << 29)
+#	define R128_GMC_WR_MSK_DIS		(1    << 30)
+#	define R128_ROP3_S			0x00cc0000
+#	define R128_ROP3_P			0x00f00000
+#define R128_DP_WRITE_MASK		0x16cc
+#define R128_DST_PITCH_OFFSET_C		0x1c80
+#	define R128_DST_TILE			(1 << 31)
+
+#define R128_GEN_INT_CNTL		0x0040
+#	define R128_CRTC_VBLANK_INT_EN		(1 <<  0)
+#define R128_GEN_INT_STATUS		0x0044
+#	define R128_CRTC_VBLANK_INT		(1 <<  0)
+#	define R128_CRTC_VBLANK_INT_AK		(1 <<  0)
+#define R128_GEN_RESET_CNTL		0x00f0
+#	define R128_SOFT_RESET_GUI		(1 <<  0)
+
+#define R128_GUI_SCRATCH_REG0		0x15e0
+#define R128_GUI_SCRATCH_REG1		0x15e4
+#define R128_GUI_SCRATCH_REG2		0x15e8
+#define R128_GUI_SCRATCH_REG3		0x15ec
+#define R128_GUI_SCRATCH_REG4		0x15f0
+#define R128_GUI_SCRATCH_REG5		0x15f4
+
+#define R128_GUI_STAT			0x1740
+#	define R128_GUI_FIFOCNT_MASK		0x0fff
+#	define R128_GUI_ACTIVE			(1 << 31)
+
+#define R128_MCLK_CNTL			0x000f
+#	define R128_FORCE_GCP			(1 << 16)
+#	define R128_FORCE_PIPE3D_CP		(1 << 17)
+#	define R128_FORCE_RCP			(1 << 18)
+
+#define R128_PC_GUI_CTLSTAT		0x1748
+#define R128_PC_NGUI_CTLSTAT		0x0184
+#	define R128_PC_FLUSH_GUI		(3 << 0)
+#	define R128_PC_RI_GUI			(1 << 2)
+#	define R128_PC_FLUSH_ALL		0x00ff
+#	define R128_PC_BUSY			(1 << 31)
+
+#define R128_PCI_GART_PAGE		0x017c
+#define R128_PRIM_TEX_CNTL_C		0x1cb0
+
+#define R128_SCALE_3D_CNTL		0x1a00
+#define R128_SEC_TEX_CNTL_C		0x1d00
+#define R128_SEC_TEXTURE_BORDER_COLOR_C	0x1d3c
+#define R128_SETUP_CNTL			0x1bc4
+#define R128_STEN_REF_MASK_C		0x1d40
+
+#define R128_TEX_CNTL_C			0x1c9c
+#	define R128_TEX_CACHE_FLUSH		(1 << 23)
+
+#define R128_WAIT_UNTIL			0x1720
+#	define R128_EVENT_CRTC_OFFSET		(1 << 0)
+#define R128_WINDOW_XY_OFFSET		0x1bcc
+
+/* CCE registers
+ */
+#define R128_PM4_BUFFER_OFFSET		0x0700
+#define R128_PM4_BUFFER_CNTL		0x0704
+#	define R128_PM4_MASK			(15 << 28)
+#	define R128_PM4_NONPM4			(0  << 28)
+#	define R128_PM4_192PIO			(1  << 28)
+#	define R128_PM4_192BM			(2  << 28)
+#	define R128_PM4_128PIO_64INDBM		(3  << 28)
+#	define R128_PM4_128BM_64INDBM		(4  << 28)
+#	define R128_PM4_64PIO_128INDBM		(5  << 28)
+#	define R128_PM4_64BM_128INDBM		(6  << 28)
+#	define R128_PM4_64PIO_64VCBM_64INDBM	(7  << 28)
+#	define R128_PM4_64BM_64VCBM_64INDBM	(8  << 28)
+#	define R128_PM4_64PIO_64VCPIO_64INDPIO	(15 << 28)
+#	define R128_PM4_BUFFER_CNTL_NOUPDATE	(1  << 27)
+
+#define R128_PM4_BUFFER_WM_CNTL		0x0708
+#	define R128_WMA_SHIFT			0
+#	define R128_WMB_SHIFT			8
+#	define R128_WMC_SHIFT			16
+#	define R128_WB_WM_SHIFT			24
+
+#define R128_PM4_BUFFER_DL_RPTR_ADDR	0x070c
+#define R128_PM4_BUFFER_DL_RPTR		0x0710
+#define R128_PM4_BUFFER_DL_WPTR		0x0714
+#	define R128_PM4_BUFFER_DL_DONE		(1 << 31)
+
+#define R128_PM4_VC_FPU_SETUP		0x071c
+
+#define R128_PM4_IW_INDOFF		0x0738
+#define R128_PM4_IW_INDSIZE		0x073c
+
+#define R128_PM4_STAT			0x07b8
+#	define R128_PM4_FIFOCNT_MASK		0x0fff
+#	define R128_PM4_BUSY			(1 << 16)
+#	define R128_PM4_GUI_ACTIVE		(1 << 31)
+
+#define R128_PM4_MICROCODE_ADDR		0x07d4
+#define R128_PM4_MICROCODE_RADDR	0x07d8
+#define R128_PM4_MICROCODE_DATAH	0x07dc
+#define R128_PM4_MICROCODE_DATAL	0x07e0
+
+#define R128_PM4_BUFFER_ADDR		0x07f0
+#define R128_PM4_MICRO_CNTL		0x07fc
+#	define R128_PM4_MICRO_FREERUN		(1 << 30)
+
+#define R128_PM4_FIFO_DATA_EVEN		0x1000
+#define R128_PM4_FIFO_DATA_ODD		0x1004
+
+/* CCE command packets
+ */
+#define R128_CCE_PACKET0		0x00000000
+#define R128_CCE_PACKET1		0x40000000
+#define R128_CCE_PACKET2		0x80000000
+#define R128_CCE_PACKET3		0xC0000000
+#	define R128_CNTL_HOSTDATA_BLT		0x00009400
+#	define R128_CNTL_PAINT_MULTI		0x00009A00
+#	define R128_CNTL_BITBLT_MULTI		0x00009B00
+#	define R128_3D_RNDR_GEN_INDX_PRIM	0x00002300
+
+#define R128_CCE_PACKET_MASK		0xC0000000
+#define R128_CCE_PACKET_COUNT_MASK	0x3fff0000
+#define R128_CCE_PACKET0_REG_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG0_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG1_MASK	0x003ff800
+
+#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE		0x00000000
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT	0x00000001
+#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE		0x00000002
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE	0x00000003
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST	0x00000004
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN	0x00000005
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP	0x00000006
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2	0x00000007
+#define R128_CCE_VC_CNTL_PRIM_WALK_IND		0x00000010
+#define R128_CCE_VC_CNTL_PRIM_WALK_LIST		0x00000020
+#define R128_CCE_VC_CNTL_PRIM_WALK_RING		0x00000030
+#define R128_CCE_VC_CNTL_NUM_SHIFT		16
+
+#define R128_DATATYPE_VQ		0
+#define R128_DATATYPE_CI4		1
+#define R128_DATATYPE_CI8		2
+#define R128_DATATYPE_ARGB1555		3
+#define R128_DATATYPE_RGB565		4
+#define R128_DATATYPE_RGB888		5
+#define R128_DATATYPE_ARGB8888		6
+#define R128_DATATYPE_RGB332		7
+#define R128_DATATYPE_Y8		8
+#define R128_DATATYPE_RGB8		9
+#define R128_DATATYPE_CI16		10
+#define R128_DATATYPE_YVYU422		11
+#define R128_DATATYPE_VYUY422		12
+#define R128_DATATYPE_AYUV444		14
+#define R128_DATATYPE_ARGB4444		15
+
+/* Constants */
+#define R128_AGP_OFFSET			0x02000000
+
+#define R128_WATERMARK_L		16
+#define R128_WATERMARK_M		8
+#define R128_WATERMARK_N		8
+#define R128_WATERMARK_K		128
+
+#define R128_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+
+#define R128_LAST_FRAME_REG		R128_GUI_SCRATCH_REG0
+#define R128_LAST_DISPATCH_REG		R128_GUI_SCRATCH_REG1
+#define R128_MAX_VB_AGE			0x7fffffff
+#define R128_MAX_VB_VERTS		(0xffff)
+
+#define R128_RING_HIGH_MARK		128
+
+#define R128_PERFORMANCE_BOXES		0
+
+#define R128_PCIGART_TABLE_SIZE         32768
+
+#define R128_READ(reg)		DRM_READ32(dev_priv->mmio, (reg))
+#define R128_WRITE(reg, val)	DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define R128_READ8(reg)		DRM_READ8(dev_priv->mmio, (reg))
+#define R128_WRITE8(reg, val)	DRM_WRITE8(dev_priv->mmio, (reg), (val))
+
+#define R128_WRITE_PLL(addr, val)					\
+do {									\
+	R128_WRITE8(R128_CLOCK_CNTL_INDEX,				\
+		    ((addr) & 0x1f) | R128_PLL_WR_EN);			\
+	R128_WRITE(R128_CLOCK_CNTL_DATA, (val));			\
+} while (0)
+
+#define CCE_PACKET0(reg, n)		(R128_CCE_PACKET0 |		\
+					 ((n) << 16) | ((reg) >> 2))
+#define CCE_PACKET1(reg0, reg1)		(R128_CCE_PACKET1 |		\
+					 (((reg1) >> 2) << 11) | ((reg0) >> 2))
+#define CCE_PACKET2()			(R128_CCE_PACKET2)
+#define CCE_PACKET3(pkt, n)		(R128_CCE_PACKET3 |		\
+					 (pkt) | ((n) << 16))
+
+static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv)
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+	ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
+	if (ring->space <= 0)
+		ring->space += ring->size;
+}
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+#define DEV_INIT_TEST_WITH_RETURN(_dev_priv)				\
+do {									\
+	if (!_dev_priv) {						\
+		DRM_ERROR("called with no initialization\n");		\
+		return -EINVAL;						\
+	}								\
+} while (0)
+
+#define RING_SPACE_TEST_WITH_RETURN(dev_priv)				\
+do {									\
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i;		\
+	if (ring->space < ring->high_mark) {				\
+		for (i = 0 ; i < dev_priv->usec_timeout ; i++) {	\
+			r128_update_ring_snapshot(dev_priv);		\
+			if (ring->space >= ring->high_mark)		\
+				goto __ring_space_done;			\
+			DRM_UDELAY(1);					\
+		}							\
+		DRM_ERROR("ring space check failed!\n");		\
+		return -EBUSY;						\
+	}								\
+ __ring_space_done:							\
+	;								\
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN(dev_priv)				\
+do {									\
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;		\
+	if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) {		\
+		int __ret = r128_do_cce_idle(dev_priv);			\
+		if (__ret)						\
+			return __ret;					\
+		sarea_priv->last_dispatch = 0;				\
+		r128_freelist_reset(dev);				\
+	}								\
+} while (0)
+
+#define R128_WAIT_UNTIL_PAGE_FLIPPED() do {				\
+	OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0));			\
+	OUT_RING(R128_EVENT_CRTC_OFFSET);				\
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define R128_VERBOSE	0
+
+#define RING_LOCALS							\
+	int write, _nr; unsigned int tail_mask; volatile u32 *ring;
+
+#define BEGIN_RING(n) do {						\
+	if (R128_VERBOSE)						\
+		DRM_INFO("BEGIN_RING(%d)\n", (n));			\
+	if (dev_priv->ring.space <= (n) * sizeof(u32)) {		\
+		COMMIT_RING();						\
+		r128_wait_ring(dev_priv, (n) * sizeof(u32));		\
+	}								\
+	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	tail_mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+/* You can set this to zero if you want.  If the card locks up, you'll
+ * need to keep this set.  It works around a bug in early revs of the
+ * Rage 128 chipset, where the CCE would read 32 dwords past the end of
+ * the ring buffer before wrapping around.
+ */
+#define R128_BROKEN_CCE	1
+
+#define ADVANCE_RING() do {						\
+	if (R128_VERBOSE)						\
+		DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
+			 write, dev_priv->ring.tail);			\
+	if (R128_BROKEN_CCE && write < 32)				\
+		memcpy(dev_priv->ring.end,				\
+		       dev_priv->ring.start,				\
+		       write * sizeof(u32));				\
+	if (((dev_priv->ring.tail + _nr) & tail_mask) != write)		\
+		DRM_ERROR(						\
+			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\
+			((dev_priv->ring.tail + _nr) & tail_mask),	\
+			write, __LINE__);				\
+	else								\
+		dev_priv->ring.tail = write;				\
+} while (0)
+
+#define COMMIT_RING() do {						\
+	if (R128_VERBOSE)						\
+		DRM_INFO("COMMIT_RING() tail=0x%06x\n",			\
+			 dev_priv->ring.tail);				\
+	DRM_MEMORYBARRIER();						\
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail);	\
+	R128_READ(R128_PM4_BUFFER_DL_WPTR);				\
+} while (0)
+
+#define OUT_RING(x) do {						\
+	if (R128_VERBOSE)						\
+		DRM_INFO("   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			 (unsigned int)(x), write);			\
+	ring[write++] = cpu_to_le32(x);					\
+	write &= tail_mask;						\
+} while (0)
+
+#endif				/* __R128_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_ioc32.c b/linux-imx/drivers/gpu/drm/r128/r128_ioc32.c
new file mode 100644
index 0000000..a954c54
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_ioc32.c
@@ -0,0 +1,214 @@
+/**
+ * \file r128_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the R128 DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/r128_drm.h>
+
+typedef struct drm_r128_init32 {
+	int func;
+	unsigned int sarea_priv_offset;
+	int is_pci;
+	int cce_mode;
+	int cce_secure;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+	unsigned int span_offset;
+
+	unsigned int fb_offset;
+	unsigned int mmio_offset;
+	unsigned int ring_offset;
+	unsigned int ring_rptr_offset;
+	unsigned int buffers_offset;
+	unsigned int agp_textures_offset;
+} drm_r128_init32_t;
+
+static int compat_r128_init(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	drm_r128_init32_t init32;
+	drm_r128_init_t __user *init;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.is_pci, &init->is_pci)
+	    || __put_user(init32.cce_mode, &init->cce_mode)
+	    || __put_user(init32.cce_secure, &init->cce_secure)
+	    || __put_user(init32.ring_size, &init->ring_size)
+	    || __put_user(init32.usec_timeout, &init->usec_timeout)
+	    || __put_user(init32.fb_bpp, &init->fb_bpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_bpp, &init->depth_bpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.span_offset, &init->span_offset)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.ring_offset, &init->ring_offset)
+	    || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset)
+	    || __put_user(init32.agp_textures_offset,
+			  &init->agp_textures_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
+}
+
+typedef struct drm_r128_depth32 {
+	int func;
+	int n;
+	u32 x;
+	u32 y;
+	u32 buffer;
+	u32 mask;
+} drm_r128_depth32_t;
+
+static int compat_r128_depth(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_r128_depth32_t depth32;
+	drm_r128_depth_t __user *depth;
+
+	if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
+		return -EFAULT;
+
+	depth = compat_alloc_user_space(sizeof(*depth));
+	if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
+	    || __put_user(depth32.func, &depth->func)
+	    || __put_user(depth32.n, &depth->n)
+	    || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
+	    || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
+	    || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
+			  &depth->buffer)
+	    || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
+			  &depth->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+
+}
+
+typedef struct drm_r128_stipple32 {
+	u32 mask;
+} drm_r128_stipple32_t;
+
+static int compat_r128_stipple(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_r128_stipple32_t stipple32;
+	drm_r128_stipple_t __user *stipple;
+
+	if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
+		return -EFAULT;
+
+	stipple = compat_alloc_user_space(sizeof(*stipple));
+	if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
+	    || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
+			  &stipple->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+}
+
+typedef struct drm_r128_getparam32 {
+	int param;
+	u32 value;
+} drm_r128_getparam32_t;
+
+static int compat_r128_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_r128_getparam32_t getparam32;
+	drm_r128_getparam_t __user *getparam;
+
+	if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
+		return -EFAULT;
+
+	getparam = compat_alloc_user_space(sizeof(*getparam));
+	if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
+	    || __put_user(getparam32.param, &getparam->param)
+	    || __put_user((void __user *)(unsigned long)getparam32.value,
+			  &getparam->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+}
+
+drm_ioctl_compat_t *r128_compat_ioctls[] = {
+	[DRM_R128_INIT] = compat_r128_init,
+	[DRM_R128_DEPTH] = compat_r128_depth,
+	[DRM_R128_STIPPLE] = compat_r128_stipple,
+	[DRM_R128_GETPARAM] = compat_r128_getparam,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+		fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_irq.c b/linux-imx/drivers/gpu/drm/r128/r128_irq.c
new file mode 100644
index 0000000..2ea4f09
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_irq.c
@@ -0,0 +1,115 @@
+/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/r128_drm.h>
+#include "r128_drv.h"
+
+u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_r128_private_t *dev_priv = dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
+irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+	int status;
+
+	status = R128_READ(R128_GEN_INT_STATUS);
+
+	/* VBLANK interrupt */
+	if (status & R128_CRTC_VBLANK_INT) {
+		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+int r128_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+		return -EINVAL;
+	}
+
+	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+	return 0;
+}
+
+void r128_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+
+	/*
+	 * FIXME: implement proper interrupt disable by using the vblank
+	 * counter register (if available)
+	 *
+	 * R128_WRITE(R128_GEN_INT_CNTL,
+	 *            R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
+	 */
+}
+
+void r128_driver_irq_preinstall(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+
+	/* Disable *all* interrupts */
+	R128_WRITE(R128_GEN_INT_CNTL, 0);
+	/* Clear vblank bit if it's already high */
+	R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+}
+
+int r128_driver_irq_postinstall(struct drm_device *dev)
+{
+	return 0;
+}
+
+void r128_driver_irq_uninstall(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	/* Disable *all* interrupts */
+	R128_WRITE(R128_GEN_INT_CNTL, 0);
+}
diff --git a/linux-imx/drivers/gpu/drm/r128/r128_state.c b/linux-imx/drivers/gpu/drm/r128/r128_state.c
new file mode 100644
index 0000000..19bb7e6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/r128/r128_state.c
@@ -0,0 +1,1666 @@
+/* r128_state.c -- State support for r128 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/r128_drm.h>
+#include "r128_drv.h"
+
+/* ================================================================
+ * CCE hardware state programming functions
+ */
+
+static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
+				 struct drm_clip_rect *boxes, int count)
+{
+	u32 aux_sc_cntl = 0x00000000;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
+
+	if (count >= 1) {
+		OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
+		OUT_RING(boxes[0].x1);
+		OUT_RING(boxes[0].x2 - 1);
+		OUT_RING(boxes[0].y1);
+		OUT_RING(boxes[0].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
+	}
+	if (count >= 2) {
+		OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
+		OUT_RING(boxes[1].x1);
+		OUT_RING(boxes[1].x2 - 1);
+		OUT_RING(boxes[1].y1);
+		OUT_RING(boxes[1].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
+	}
+	if (count >= 3) {
+		OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
+		OUT_RING(boxes[2].x1);
+		OUT_RING(boxes[2].x2 - 1);
+		OUT_RING(boxes[2].y1);
+		OUT_RING(boxes[2].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
+	}
+
+	OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
+	OUT_RING(aux_sc_cntl);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
+	OUT_RING(ctx->scale_3d_cntl);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(13);
+
+	OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
+	OUT_RING(ctx->dst_pitch_offset_c);
+	OUT_RING(ctx->dp_gui_master_cntl_c);
+	OUT_RING(ctx->sc_top_left_c);
+	OUT_RING(ctx->sc_bottom_right_c);
+	OUT_RING(ctx->z_offset_c);
+	OUT_RING(ctx->z_pitch_c);
+	OUT_RING(ctx->z_sten_cntl_c);
+	OUT_RING(ctx->tex_cntl_c);
+	OUT_RING(ctx->misc_3d_state_cntl_reg);
+	OUT_RING(ctx->texture_clr_cmp_clr_c);
+	OUT_RING(ctx->texture_clr_cmp_msk_c);
+	OUT_RING(ctx->fog_color_c);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(3);
+
+	OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
+	OUT_RING(ctx->setup_cntl);
+	OUT_RING(ctx->pm4_vc_fpu_setup);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+
+	OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
+	OUT_RING(ctx->dp_write_mask);
+
+	OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
+	OUT_RING(ctx->sten_ref_mask_c);
+	OUT_RING(ctx->plane_3d_mask_c);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
+	OUT_RING(ctx->window_xy_offset);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
+
+	OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
+			     2 + R128_MAX_TEXTURE_LEVELS));
+	OUT_RING(tex->tex_cntl);
+	OUT_RING(tex->tex_combine_cntl);
+	OUT_RING(ctx->tex_size_pitch_c);
+	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
+		OUT_RING(tex->tex_offset[i]);
+
+	OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
+	OUT_RING(ctx->constant_color_c);
+	OUT_RING(tex->tex_border_color);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
+
+	OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
+	OUT_RING(tex->tex_cntl);
+	OUT_RING(tex->tex_combine_cntl);
+	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
+		OUT_RING(tex->tex_offset[i]);
+
+	OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
+	OUT_RING(tex->tex_border_color);
+
+	ADVANCE_RING();
+}
+
+static void r128_emit_state(drm_r128_private_t *dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+	if (dirty & R128_UPLOAD_CORE) {
+		r128_emit_core(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_CORE;
+	}
+
+	if (dirty & R128_UPLOAD_CONTEXT) {
+		r128_emit_context(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & R128_UPLOAD_SETUP) {
+		r128_emit_setup(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
+	}
+
+	if (dirty & R128_UPLOAD_MASKS) {
+		r128_emit_masks(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
+	}
+
+	if (dirty & R128_UPLOAD_WINDOW) {
+		r128_emit_window(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
+	}
+
+	if (dirty & R128_UPLOAD_TEX0) {
+		r128_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
+	}
+
+	if (dirty & R128_UPLOAD_TEX1) {
+		r128_emit_tex1(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
+	}
+
+	/* Turn off the texture cache flushing */
+	sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
+
+	sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
+}
+
+#if R128_PERFORMANCE_BOXES
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void r128_clear_box(drm_r128_private_t *dev_priv,
+			   int x, int y, int w, int h, int r, int g, int b)
+{
+	u32 pitch, offset;
+	u32 fb_bpp, color;
+	RING_LOCALS;
+
+	switch (dev_priv->fb_bpp) {
+	case 16:
+		fb_bpp = R128_GMC_DST_16BPP;
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+		break;
+	case 24:
+		fb_bpp = R128_GMC_DST_24BPP;
+		color = ((r << 16) | (g << 8) | b);
+		break;
+	case 32:
+		fb_bpp = R128_GMC_DST_32BPP;
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+		break;
+	default:
+		return;
+	}
+
+	offset = dev_priv->back_offset;
+	pitch = dev_priv->back_pitch >> 3;
+
+	BEGIN_RING(6);
+
+	OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+	OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+		 R128_GMC_BRUSH_SOLID_COLOR |
+		 fb_bpp |
+		 R128_GMC_SRC_DATATYPE_COLOR |
+		 R128_ROP3_P |
+		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
+
+	OUT_RING((pitch << 21) | (offset >> 5));
+	OUT_RING(color);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((w << 16) | h);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
+{
+	if (atomic_read(&dev_priv->idle_count) == 0)
+		r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+	else
+		atomic_set(&dev_priv->idle_count, 0);
+}
+
+#endif
+
+/* ================================================================
+ * CCE command dispatch functions
+ */
+
+static void r128_print_dirty(const char *msg, unsigned int flags)
+{
+	DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
+		 msg,
+		 flags,
+		 (flags & R128_UPLOAD_CORE) ? "core, " : "",
+		 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
+		 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
+		 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
+		 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
+		 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
+		 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
+		 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
+		 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
+}
+
+static void r128_cce_dispatch_clear(struct drm_device *dev,
+				    drm_r128_clear_t *clear)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	unsigned int flags = clear->flags;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(R128_FRONT | R128_BACK);
+		if (tmp & R128_FRONT)
+			flags |= R128_BACK;
+		if (tmp & R128_BACK)
+			flags |= R128_FRONT;
+	}
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
+			  pbox[i].x1, pbox[i].y1, pbox[i].x2,
+			  pbox[i].y2, flags);
+
+		if (flags & (R128_FRONT | R128_BACK)) {
+			BEGIN_RING(2);
+
+			OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
+			OUT_RING(clear->color_mask);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_FRONT) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->color_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS);
+
+			OUT_RING(dev_priv->front_pitch_offset_c);
+			OUT_RING(clear->clear_color);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_BACK) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->color_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS);
+
+			OUT_RING(dev_priv->back_pitch_offset_c);
+			OUT_RING(clear->clear_color);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_DEPTH) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(clear->clear_depth);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+	}
+}
+
+static void r128_cce_dispatch_swap(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+#if R128_PERFORMANCE_BOXES
+	/* Do some trivial performance monitoring...
+	 */
+	r128_cce_performance_boxes(dev_priv);
+#endif
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		BEGIN_RING(7);
+
+		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+			 R128_GMC_DST_PITCH_OFFSET_CNTL |
+			 R128_GMC_BRUSH_NONE |
+			 (dev_priv->color_fmt << 8) |
+			 R128_GMC_SRC_DATATYPE_COLOR |
+			 R128_ROP3_S |
+			 R128_DP_SRC_SOURCE_MEMORY |
+			 R128_GMC_CLR_CMP_CNTL_DIS |
+			 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
+
+		/* Make this work even if front & back are flipped:
+		 */
+		if (dev_priv->current_page == 0) {
+			OUT_RING(dev_priv->back_pitch_offset_c);
+			OUT_RING(dev_priv->front_pitch_offset_c);
+		} else {
+			OUT_RING(dev_priv->front_pitch_offset_c);
+			OUT_RING(dev_priv->back_pitch_offset_c);
+		}
+
+		OUT_RING((x << 16) | y);
+		OUT_RING((x << 16) | y);
+		OUT_RING((w << 16) | h);
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
+	OUT_RING(dev_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_dispatch_flip(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
+		  dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
+
+#if R128_PERFORMANCE_BOXES
+	/* Do some trivial performance monitoring...
+	 */
+	r128_cce_performance_boxes(dev_priv);
+#endif
+
+	BEGIN_RING(4);
+
+	R128_WAIT_UNTIL_PAGE_FLIPPED();
+	OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
+
+	if (dev_priv->current_page == 0)
+		OUT_RING(dev_priv->back_offset);
+	else
+		OUT_RING(dev_priv->front_offset);
+
+	ADVANCE_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
+	    1 - dev_priv->current_page;
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
+	OUT_RING(dev_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = buf->bus_address;
+	int size = buf->used;
+	int prim = buf_priv->prim;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
+
+	if (0)
+		r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
+
+	if (buf->used) {
+		buf_priv->dispatched = 1;
+
+		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
+			r128_emit_state(dev_priv);
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if (i < sarea_priv->nbox) {
+				r128_emit_clip_rects(dev_priv,
+						     &sarea_priv->boxes[i],
+						     sarea_priv->nbox - i);
+			}
+
+			/* Emit the vertex buffer rendering commands */
+			BEGIN_RING(5);
+
+			OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
+			OUT_RING(offset);
+			OUT_RING(size);
+			OUT_RING(format);
+			OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
+				 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
+
+			ADVANCE_RING();
+
+			i += 3;
+		} while (i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+static void r128_cce_dispatch_indirect(struct drm_device *dev,
+				       struct drm_buf *buf, int start, int end)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+	if (start != end) {
+		int offset = buf->bus_address + start;
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CCE packet.
+		 */
+		if (dwords & 1) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
+		}
+
+		buf_priv->dispatched = 1;
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(3);
+
+		OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
+		OUT_RING(offset);
+		OUT_RING(dwords);
+
+		ADVANCE_RING();
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the indirect buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+}
+
+static void r128_cce_dispatch_indices(struct drm_device *dev,
+				      struct drm_buf *buf,
+				      int start, int end, int count)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
+	int prim = buf_priv->prim;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
+
+	if (0)
+		r128_print_dirty("dispatch_indices", sarea_priv->dirty);
+
+	if (start != end) {
+		buf_priv->dispatched = 1;
+
+		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
+			r128_emit_state(dev_priv);
+
+		dwords = (end - start + 3) / sizeof(u32);
+
+		data = (u32 *) ((char *)dev->agp_buffer_map->handle
+				+ buf->offset + start);
+
+		data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
+						  dwords - 2));
+
+		data[1] = cpu_to_le32(offset);
+		data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
+		data[3] = cpu_to_le32(format);
+		data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
+				       (count << 16)));
+
+		if (count & 0x1) {
+#ifdef __LITTLE_ENDIAN
+			data[dwords - 1] &= 0x0000ffff;
+#else
+			data[dwords - 1] &= 0xffff0000;
+#endif
+		}
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if (i < sarea_priv->nbox) {
+				r128_emit_clip_rects(dev_priv,
+						     &sarea_priv->boxes[i],
+						     sarea_priv->nbox - i);
+			}
+
+			r128_cce_dispatch_indirect(dev, buf, start, end);
+
+			i += 3;
+		} while (i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+static int r128_cce_dispatch_blit(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  drm_r128_blit_t *blit)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	u32 *data;
+	int dword_shift, dwords;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch (blit->format) {
+	case R128_DATATYPE_ARGB8888:
+		dword_shift = 0;
+		break;
+	case R128_DATATYPE_ARGB1555:
+	case R128_DATATYPE_RGB565:
+	case R128_DATATYPE_ARGB4444:
+	case R128_DATATYPE_YVYU422:
+	case R128_DATATYPE_VYUY422:
+		dword_shift = 1;
+		break;
+	case R128_DATATYPE_CI8:
+	case R128_DATATYPE_RGB8:
+		dword_shift = 2;
+		break;
+	default:
+		DRM_ERROR("invalid blit format %d\n", blit->format);
+		return -EINVAL;
+	}
+
+	/* Flush the pixel cache, and mark the contents as Read Invalid.
+	 * This ensures no pixel data gets mixed up with the texture
+	 * data from the host data blit, otherwise part of the texture
+	 * image may be corrupted.
+	 */
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
+	OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
+
+	ADVANCE_RING();
+
+	/* Dispatch the indirect buffer.
+	 */
+	buf = dma->buflist[blit->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", blit->idx);
+		return -EINVAL;
+	}
+
+	buf_priv->discard = 1;
+
+	dwords = (blit->width * blit->height) >> dword_shift;
+
+	data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+	data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
+	data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
+			       R128_GMC_BRUSH_NONE |
+			       (blit->format << 8) |
+			       R128_GMC_SRC_DATATYPE_COLOR |
+			       R128_ROP3_S |
+			       R128_DP_SRC_SOURCE_HOST_DATA |
+			       R128_GMC_CLR_CMP_CNTL_DIS |
+			       R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
+
+	data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
+	data[3] = cpu_to_le32(0xffffffff);
+	data[4] = cpu_to_le32(0xffffffff);
+	data[5] = cpu_to_le32((blit->y << 16) | blit->x);
+	data[6] = cpu_to_le32((blit->height << 16) | blit->width);
+	data[7] = cpu_to_le32(dwords);
+
+	buf->used = (dwords + 8) * sizeof(u32);
+
+	r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
+	OUT_RING(R128_PC_FLUSH_GUI);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/* ================================================================
+ * Tiled depth buffer management
+ *
+ * FIXME: These should all set the destination write mask for when we
+ * have hardware stencil support.
+ */
+
+static int r128_cce_dispatch_write_span(struct drm_device *dev,
+					drm_r128_depth_t *depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	u32 *buffer;
+	u8 *mask;
+	int i, buffer_size, mask_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+		return -EFAULT;
+	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+		return -EFAULT;
+
+	buffer_size = depth->n * sizeof(u32);
+	buffer = kmalloc(buffer_size, GFP_KERNEL);
+	if (buffer == NULL)
+		return -ENOMEM;
+	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+		kfree(buffer);
+		return -EFAULT;
+	}
+
+	mask_size = depth->n * sizeof(u8);
+	if (depth->mask) {
+		mask = kmalloc(mask_size, GFP_KERNEL);
+		if (mask == NULL) {
+			kfree(buffer);
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+			kfree(buffer);
+			kfree(mask);
+			return -EFAULT;
+		}
+
+		for (i = 0; i < count; i++, x++) {
+			if (mask[i]) {
+				BEGIN_RING(6);
+
+				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+					 R128_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->depth_fmt << 8) |
+					 R128_GMC_SRC_DATATYPE_COLOR |
+					 R128_ROP3_P |
+					 R128_GMC_CLR_CMP_CNTL_DIS |
+					 R128_GMC_WR_MSK_DIS);
+
+				OUT_RING(dev_priv->depth_pitch_offset_c);
+				OUT_RING(buffer[i]);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((1 << 16) | 1);
+
+				ADVANCE_RING();
+			}
+		}
+
+		kfree(mask);
+	} else {
+		for (i = 0; i < count; i++, x++) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(buffer[i]);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((1 << 16) | 1);
+
+			ADVANCE_RING();
+		}
+	}
+
+	kfree(buffer);
+
+	return 0;
+}
+
+static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
+					  drm_r128_depth_t *depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	u32 *buffer;
+	u8 *mask;
+	int i, xbuf_size, ybuf_size, buffer_size, mask_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	xbuf_size = count * sizeof(*x);
+	ybuf_size = count * sizeof(*y);
+	x = kmalloc(xbuf_size, GFP_KERNEL);
+	if (x == NULL)
+		return -ENOMEM;
+	y = kmalloc(ybuf_size, GFP_KERNEL);
+	if (y == NULL) {
+		kfree(x);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+		kfree(x);
+		kfree(y);
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+		kfree(x);
+		kfree(y);
+		return -EFAULT;
+	}
+
+	buffer_size = depth->n * sizeof(u32);
+	buffer = kmalloc(buffer_size, GFP_KERNEL);
+	if (buffer == NULL) {
+		kfree(x);
+		kfree(y);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+		kfree(x);
+		kfree(y);
+		kfree(buffer);
+		return -EFAULT;
+	}
+
+	if (depth->mask) {
+		mask_size = depth->n * sizeof(u8);
+		mask = kmalloc(mask_size, GFP_KERNEL);
+		if (mask == NULL) {
+			kfree(x);
+			kfree(y);
+			kfree(buffer);
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+			kfree(x);
+			kfree(y);
+			kfree(buffer);
+			kfree(mask);
+			return -EFAULT;
+		}
+
+		for (i = 0; i < count; i++) {
+			if (mask[i]) {
+				BEGIN_RING(6);
+
+				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+					 R128_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->depth_fmt << 8) |
+					 R128_GMC_SRC_DATATYPE_COLOR |
+					 R128_ROP3_P |
+					 R128_GMC_CLR_CMP_CNTL_DIS |
+					 R128_GMC_WR_MSK_DIS);
+
+				OUT_RING(dev_priv->depth_pitch_offset_c);
+				OUT_RING(buffer[i]);
+
+				OUT_RING((x[i] << 16) | y[i]);
+				OUT_RING((1 << 16) | 1);
+
+				ADVANCE_RING();
+			}
+		}
+
+		kfree(mask);
+	} else {
+		for (i = 0; i < count; i++) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(buffer[i]);
+
+			OUT_RING((x[i] << 16) | y[i]);
+			OUT_RING((1 << 16) | 1);
+
+			ADVANCE_RING();
+		}
+	}
+
+	kfree(x);
+	kfree(y);
+	kfree(buffer);
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_span(struct drm_device *dev,
+				       drm_r128_depth_t *depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
+		return -EFAULT;
+	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
+		return -EFAULT;
+
+	BEGIN_RING(7);
+
+	OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+	OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+		 R128_GMC_DST_PITCH_OFFSET_CNTL |
+		 R128_GMC_BRUSH_NONE |
+		 (dev_priv->depth_fmt << 8) |
+		 R128_GMC_SRC_DATATYPE_COLOR |
+		 R128_ROP3_S |
+		 R128_DP_SRC_SOURCE_MEMORY |
+		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
+
+	OUT_RING(dev_priv->depth_pitch_offset_c);
+	OUT_RING(dev_priv->span_pitch_offset_c);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((0 << 16) | 0);
+	OUT_RING((count << 16) | 1);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
+					 drm_r128_depth_t *depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	int i, xbuf_size, ybuf_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (count > dev_priv->depth_pitch)
+		count = dev_priv->depth_pitch;
+
+	xbuf_size = count * sizeof(*x);
+	ybuf_size = count * sizeof(*y);
+	x = kmalloc(xbuf_size, GFP_KERNEL);
+	if (x == NULL)
+		return -ENOMEM;
+	y = kmalloc(ybuf_size, GFP_KERNEL);
+	if (y == NULL) {
+		kfree(x);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+		kfree(x);
+		kfree(y);
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+		kfree(x);
+		kfree(y);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < count; i++) {
+		BEGIN_RING(7);
+
+		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+			 R128_GMC_DST_PITCH_OFFSET_CNTL |
+			 R128_GMC_BRUSH_NONE |
+			 (dev_priv->depth_fmt << 8) |
+			 R128_GMC_SRC_DATATYPE_COLOR |
+			 R128_ROP3_S |
+			 R128_DP_SRC_SOURCE_MEMORY |
+			 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
+
+		OUT_RING(dev_priv->depth_pitch_offset_c);
+		OUT_RING(dev_priv->span_pitch_offset_c);
+
+		OUT_RING((x[i] << 16) | y[i]);
+		OUT_RING((i << 16) | 0);
+		OUT_RING((1 << 16) | 1);
+
+		ADVANCE_RING();
+	}
+
+	kfree(x);
+	kfree(y);
+
+	return 0;
+}
+
+/* ================================================================
+ * Polygon stipple
+ */
+
+static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(33);
+
+	OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
+	for (i = 0; i < 32; i++)
+		OUT_RING(stipple[i]);
+
+	ADVANCE_RING();
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+
+static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv;
+	drm_r128_clear_t *clear = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	sarea_priv = dev_priv->sarea_priv;
+
+	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_clear(dev, clear);
+	COMMIT_RING();
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
+
+	return 0;
+}
+
+static int r128_do_init_pageflip(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
+	dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
+
+	R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
+	R128_WRITE(R128_CRTC_OFFSET_CNTL,
+		   dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
+
+	dev_priv->page_flipping = 1;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
+
+	return 0;
+}
+
+static int r128_do_cleanup_pageflip(struct drm_device *dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
+	R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
+
+	if (dev_priv->current_page != 0) {
+		r128_cce_dispatch_flip(dev);
+		COMMIT_RING();
+	}
+
+	dev_priv->page_flipping = 0;
+	return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+
+static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (!dev_priv->page_flipping)
+		r128_do_init_pageflip(dev);
+
+	r128_cce_dispatch_flip(dev);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_swap(dev);
+	dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
+					R128_UPLOAD_MASKS);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (vertex->prim < 0 ||
+	    vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+		DRM_ERROR("buffer prim %d\n", vertex->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	buf->used = vertex->count;
+	buf_priv->prim = vertex->prim;
+	buf_priv->discard = vertex->discard;
+
+	r128_cce_dispatch_vertex(dev, buf);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_indices_t *elts = data;
+	int count;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
+		  elts->idx, elts->start, elts->end, elts->discard);
+
+	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  elts->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (elts->prim < 0 ||
+	    elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+		DRM_ERROR("buffer prim %d\n", elts->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[elts->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", elts->idx);
+		return -EINVAL;
+	}
+
+	count = (elts->end - elts->start) / sizeof(u16);
+	elts->start -= R128_INDEX_PRIM_OFFSET;
+
+	if (elts->start & 0x7) {
+		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+		return -EINVAL;
+	}
+	if (elts->start < buf->used) {
+		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+		return -EINVAL;
+	}
+
+	buf->used = elts->end;
+	buf_priv->prim = elts->prim;
+	buf_priv->discard = elts->discard;
+
+	r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_blit_t *blit = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
+
+	if (blit->idx < 0 || blit->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  blit->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	ret = r128_cce_dispatch_blit(dev, file_priv, blit);
+
+	COMMIT_RING();
+	return ret;
+}
+
+static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_depth_t *depth = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	ret = -EINVAL;
+	switch (depth->func) {
+	case R128_WRITE_SPAN:
+		ret = r128_cce_dispatch_write_span(dev, depth);
+		break;
+	case R128_WRITE_PIXELS:
+		ret = r128_cce_dispatch_write_pixels(dev, depth);
+		break;
+	case R128_READ_SPAN:
+		ret = r128_cce_dispatch_read_span(dev, depth);
+		break;
+	case R128_READ_PIXELS:
+		ret = r128_cce_dispatch_read_pixels(dev, depth);
+		break;
+	}
+
+	COMMIT_RING();
+	return ret;
+}
+
+static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_stipple_t *stipple = data;
+	u32 mask[32];
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	r128_cce_dispatch_stipple(dev, mask);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_indirect_t *indirect = data;
+#if 0
+	RING_LOCALS;
+#endif
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+		  indirect->idx, indirect->start, indirect->end,
+		  indirect->discard);
+
+	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  indirect->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[indirect->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+		return -EINVAL;
+	}
+
+	if (indirect->start < buf->used) {
+		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+			  indirect->start, buf->used);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf->used = indirect->end;
+	buf_priv->discard = indirect->discard;
+
+#if 0
+	/* Wait for the 3D stream to idle before the indirect buffer
+	 * containing 2D acceleration commands is processed.
+	 */
+	BEGIN_RING(2);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	ADVANCE_RING();
+#endif
+
+	/* Dispatch the indirect buffer full of commands from the
+	 * X server.  This is insecure and is thus only available to
+	 * privileged clients.
+	 */
+	r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_getparam_t *param = data;
+	int value;
+
+	DEV_INIT_TEST_WITH_RETURN(dev_priv);
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case R128_PARAM_IRQ_NR:
+		value = drm_dev_to_irq(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_r128_private_t *dev_priv = dev->dev_private;
+		if (dev_priv->page_flipping)
+			r128_do_cleanup_pageflip(dev);
+	}
+}
+void r128_driver_lastclose(struct drm_device *dev)
+{
+	r128_do_cleanup_cce(dev);
+}
+
+struct drm_ioctl_desc r128_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
+};
+
+int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/radeon/Kconfig b/linux-imx/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 0000000..970f8e9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,8 @@
+config DRM_RADEON_UMS
+	bool "Enable userspace modesetting on radeon (DEPRECATED)"
+	depends on DRM_RADEON
+	help
+	  Choose this option if you still need userspace modesetting.
+
+	  Userspace modesetting is deprecated for quite some time now, so
+	  enable this only if you have ancient versions of the DDX drivers.
diff --git a/linux-imx/drivers/gpu/drm/radeon/Makefile b/linux-imx/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 0000000..86c5e36
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,87 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+
+quiet_cmd_mkregtable = MKREGTABLE $@
+      cmd_mkregtable = $(obj)/mkregtable $< > $@
+
+$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
+$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
+
+$(obj)/r200.o: $(obj)/r200_reg_safe.h
+
+$(obj)/rv515.o: $(obj)/rv515_reg_safe.h
+
+$(obj)/r300.o: $(obj)/r300_reg_safe.h
+
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
+$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
+
+radeon-y := radeon_drv.o
+
+# add UMS driver
+radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
+	radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
+
+# add KMS driver
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
+	radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
+	atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
+	radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
+	radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
+	radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
+	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
+	r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
+	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+	evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+	atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+	si_blit_shaders.o radeon_prime.o radeon_uvd.o
+
+radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+radeon-$(CONFIG_ACPI) += radeon_acpi.o
+
+obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/linux-imx/drivers/gpu/drm/radeon/ObjectID.h b/linux-imx/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 0000000..ca4b038
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,696 @@
+/*
+* Copyright 2006-2007 Advanced Micro Devices, Inc.  
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
+
+#ifndef _OBJECTID_H
+#define _OBJECTID_H
+
+#if defined(_X86_)
+#pragma pack(1)
+#endif
+
+/****************************************************/
+/* Graphics Object Type Definition                  */
+/****************************************************/
+#define GRAPH_OBJECT_TYPE_NONE                    0x0
+#define GRAPH_OBJECT_TYPE_GPU                     0x1
+#define GRAPH_OBJECT_TYPE_ENCODER                 0x2
+#define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
+#define GRAPH_OBJECT_TYPE_ROUTER                  0x4
+/* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
+
+/****************************************************/
+/* Encoder Object ID Definition                     */
+/****************************************************/
+#define ENCODER_OBJECT_ID_NONE                    0x00 
+
+/* Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+#define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+#define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+#define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+
+/* External Third Party Encoders */
+#define ENCODER_OBJECT_ID_SI170B                  0x08
+#define ENCODER_OBJECT_ID_CH7303                  0x09
+#define ENCODER_OBJECT_ID_CH7301                  0x0A
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+#define ENCODER_OBJECT_ID_TITFP513                0x0E
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
+#define ENCODER_OBJECT_ID_VT1623                  0x10
+#define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+#define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
+/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+#define ENCODER_OBJECT_ID_VT1625                  0x1A
+#define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+#define ENCODER_OBJECT_ID_DP_AN9801               0x1C
+#define ENCODER_OBJECT_ID_DP_DP501                0x1D
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY         0x1E
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
+#define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
+
+#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+
+/****************************************************/
+/* Connector Object ID Definition                   */
+/****************************************************/
+#define CONNECTOR_OBJECT_ID_NONE                  0x00 
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D       0x04
+#define CONNECTOR_OBJECT_ID_VGA                   0x05
+#define CONNECTOR_OBJECT_ID_COMPOSITE             0x06
+#define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+#define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+#define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_SCART                 0x0B
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+#define CONNECTOR_OBJECT_ID_LVDS                  0x0E
+#define CONNECTOR_OBJECT_ID_7PIN_DIN              0x0F
+#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR        0x10
+#define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+#define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+#define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+#define CONNECTOR_OBJECT_ID_eDP                   0x14
+#define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+
+/* deleted */
+
+/****************************************************/
+/* Router Object ID Definition                      */
+/****************************************************/
+#define ROUTER_OBJECT_ID_NONE											0x00
+#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
+
+/****************************************************/
+/* Generic Object ID Definition                     */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE                    0x00
+#define GENERIC_OBJECT_ID_GLSYNC                  0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
+#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+
+/****************************************************/
+/* Graphics Object ENUM ID Definition               */
+/****************************************************/
+#define GRAPH_OBJECT_ENUM_ID1                     0x01
+#define GRAPH_OBJECT_ENUM_ID2                     0x02
+#define GRAPH_OBJECT_ENUM_ID3                     0x03
+#define GRAPH_OBJECT_ENUM_ID4                     0x04
+#define GRAPH_OBJECT_ENUM_ID5                     0x05
+#define GRAPH_OBJECT_ENUM_ID6                     0x06
+#define GRAPH_OBJECT_ENUM_ID7                     0x07
+
+/****************************************************/
+/* Graphics Object ID Bit definition                */
+/****************************************************/
+#define OBJECT_ID_MASK                            0x00FF
+#define ENUM_ID_MASK                              0x0700
+#define RESERVED1_ID_MASK                         0x0800
+#define OBJECT_TYPE_MASK                          0x7000
+#define RESERVED2_ID_MASK                         0x8000
+                                                  
+#define OBJECT_ID_SHIFT                           0x00
+#define ENUM_ID_SHIFT                             0x08
+#define OBJECT_TYPE_SHIFT                         0x0C
+
+
+/****************************************************/
+/* Graphics Object family definition                */
+/****************************************************/
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+/****************************************************/
+/* GPU Object ID definition - Shared with BIOS      */
+/****************************************************/
+#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+
+/****************************************************/
+/* Encoder Object ID definition - Shared with BIOS  */
+/****************************************************/
+/*
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+#define ENCODER_SIL170B_ENUM_ID1              0x2108  
+#define ENCODER_CH7303_ENUM_ID1               0x2109
+#define ENCODER_CH7301_ENUM_ID1               0x210A
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1       0x210C
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1       0x210D
+#define ENCODER_TITFP513_ENUM_ID1             0x210E
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1       0x210F
+#define ENCODER_VT1623_ENUM_ID1               0x2110
+#define ENCODER_HDMI_SI1930_ENUM_ID1          0x2111
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1        0x2112
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
+#define ENCODER_SI178_ENUM_ID1                   0x2117 
+#define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+#define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+#define ENCODER_VT1625_ENUM_ID1                  0x211A
+#define ENCODER_HDMI_SI1932_ENUM_ID1             0x211B
+#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1       0x211C
+#define ENCODER_DP_DP501_ENUM_ID1                0x211D
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+*/
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
+
+#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
+#define ENCODER_VCE_ENUM_ID1                     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Connector Object ID definition - Shared with BIOS */
+/****************************************************/
+/*
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1        0x3101
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1          0x3102
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1        0x3103
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1          0x3104
+#define CONNECTOR_VGA_ENUM_ID1                      0x3105
+#define CONNECTOR_COMPOSITE_ENUM_ID1                0x3106
+#define CONNECTOR_SVIDEO_ENUM_ID1                   0x3107
+#define CONNECTOR_YPbPr_ENUM_ID1                    0x3108
+#define CONNECTOR_D_CONNECTORE_ENUM_ID1             0x3109
+#define CONNECTOR_9PIN_DIN_ENUM_ID1                 0x310A
+#define CONNECTOR_SCART_ENUM_ID1                    0x310B
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1              0x310C
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1              0x310D
+#define CONNECTOR_LVDS_ENUM_ID1                     0x310E
+#define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+*/
+#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Router Object ID definition - Shared with BIOS   */
+/****************************************************/
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+
+/* deleted */
+
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS  */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
+/****************************************************/
+/* Object Cap definition - Shared with BIOS         */
+/****************************************************/
+#define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+#define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+
+
+#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+
+#if defined(_X86_)
+#pragma pack()
+#endif
+
+#endif  /*GRAPHICTYPE */
+
+
+
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/atom-bits.h b/linux-imx/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 0000000..e8fae5c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_BITS_H
+#define ATOM_BITS_H
+
+static inline uint8_t get_u8(void *bios, int ptr)
+{
+    return ((unsigned char *)bios)[ptr];
+}
+#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
+#define CU8(ptr) get_u8(ctx->bios, (ptr))
+static inline uint16_t get_u16(void *bios, int ptr)
+{
+    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+}
+#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
+#define CU16(ptr) get_u16(ctx->bios, (ptr))
+static inline uint32_t get_u32(void *bios, int ptr)
+{
+    return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
+}
+#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
+#define CU32(ptr) get_u32(ctx->bios, (ptr))
+#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/atom-names.h b/linux-imx/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 0000000..6f907a5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_NAMES_H
+#define ATOM_NAMES_H
+
+#include "atom.h"
+
+#ifdef ATOM_DEBUG
+
+#define ATOM_OP_NAMES_CNT 123
+static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
+"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
+"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
+"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
+"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
+"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
+"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
+"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
+"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
+"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
+"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
+"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
+"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
+"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
+"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
+"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
+"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
+"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
+"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
+"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
+"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
+"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
+"DEBUG", "CTB_DS",
+};
+
+#define ATOM_TABLE_NAMES_CNT 74
+static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
+"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
+"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
+"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
+"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
+"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
+"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
+"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
+"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
+"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
+"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
+"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
+"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
+"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
+"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
+"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
+"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
+"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
+"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
+"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
+"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
+"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
+"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
+"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
+"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
+"MemoryDeviceInit", "EnableYUV",
+};
+
+#define ATOM_IO_NAMES_CNT 5
+static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
+"MM", "PLL", "MC", "PCIE", "PCIE PORT",
+};
+
+#else
+
+#define ATOM_OP_NAMES_CNT 0
+#define ATOM_TABLE_NAMES_CNT 0
+#define ATOM_IO_NAMES_CNT 0
+
+#endif
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/atom-types.h b/linux-imx/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 0000000..1125b86
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie
+ */
+
+#ifndef ATOM_TYPES_H
+#define ATOM_TYPES_H
+
+/* sync atom types to kernel types */
+
+typedef uint16_t USHORT;
+typedef uint32_t ULONG;
+typedef uint8_t UCHAR;
+
+
+#ifndef ATOM_BIG_ENDIAN
+#if defined(__BIG_ENDIAN)
+#define ATOM_BIG_ENDIAN 1
+#else
+#define ATOM_BIG_ENDIAN 0
+#endif
+#endif
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/atom.c b/linux-imx/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 0000000..15da7ef
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define ATOM_DEBUG
+
+#include "atom.h"
+#include "atom-names.h"
+#include "atom-bits.h"
+#include "radeon.h"
+
+#define ATOM_COND_ABOVE		0
+#define ATOM_COND_ABOVEOREQUAL	1
+#define ATOM_COND_ALWAYS	2
+#define ATOM_COND_BELOW		3
+#define ATOM_COND_BELOWOREQUAL	4
+#define ATOM_COND_EQUAL		5
+#define ATOM_COND_NOTEQUAL	6
+
+#define ATOM_PORT_ATI	0
+#define ATOM_PORT_PCI	1
+#define ATOM_PORT_SYSIO	2
+
+#define ATOM_UNIT_MICROSEC	0
+#define ATOM_UNIT_MILLISEC	1
+
+#define PLL_INDEX	2
+#define PLL_DATA	3
+
+typedef struct {
+	struct atom_context *ctx;
+	uint32_t *ps, *ws;
+	int ps_shift;
+	uint16_t start;
+	unsigned last_jump;
+	unsigned long last_jump_jiffies;
+	bool abort;
+} atom_exec_context;
+
+int atom_debug = 0;
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+
+static uint32_t atom_arg_mask[8] =
+    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+0xFF000000 };
+static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
+
+static int atom_dst_to_src[8][4] = {
+	/* translate destination alignment field to the source alignment encoding */
+	{0, 0, 0, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{1, 2, 3, 0},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+	{4, 5, 6, 7},
+};
+static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
+
+static int debug_depth = 0;
+#ifdef ATOM_DEBUG
+static void debug_print_spaces(int n)
+{
+	while (n--)
+		printk("   ");
+}
+
+#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#else
+#define DEBUG(...) do { } while (0)
+#define SDEBUG(...) do { } while (0)
+#endif
+
+static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+				 uint32_t index, uint32_t data)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	uint32_t temp = 0xCDCDCDCD;
+
+	while (1)
+		switch (CU8(base)) {
+		case ATOM_IIO_NOP:
+			base++;
+			break;
+		case ATOM_IIO_READ:
+			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			base += 3;
+			break;
+		case ATOM_IIO_WRITE:
+			if (rdev->family == CHIP_RV515)
+				(void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+			base += 3;
+			break;
+		case ATOM_IIO_CLEAR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 2));
+			base += 3;
+			break;
+		case ATOM_IIO_SET:
+			temp |=
+			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
+									2);
+			base += 3;
+			break;
+		case ATOM_IIO_MOVE_INDEX:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((index >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_DATA:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((data >> CU8(base + 2)) &
+			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+									  3);
+			base += 4;
+			break;
+		case ATOM_IIO_MOVE_ATTR:
+			temp &=
+			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+			      CU8(base + 3));
+			temp |=
+			    ((ctx->
+			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+									  CU8
+									  (base
+									   +
+									   1))))
+			    << CU8(base + 3);
+			base += 4;
+			break;
+		case ATOM_IIO_END:
+			return temp;
+		default:
+			printk(KERN_INFO "Unknown IIO opcode.\n");
+			return 0;
+		}
+}
+
+static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+				 int *ptr, uint32_t *saved, int print)
+{
+	uint32_t idx, val = 0xCDCDCDCD, align, arg;
+	struct atom_context *gctx = ctx->ctx;
+	arg = attr & 7;
+	align = (attr >> 3) & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print)
+			DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			val = gctx->card->reg_read(gctx->card, idx);
+			break;
+		case ATOM_IO_PCI:
+			printk(KERN_INFO
+			       "PCI registers are not implemented.\n");
+			return 0;
+		case ATOM_IO_SYSIO:
+			printk(KERN_INFO
+			       "SYSIO registers are not implemented.\n");
+			return 0;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				printk(KERN_INFO "Bad IO mode.\n");
+				return 0;
+			}
+			if (!gctx->iio[gctx->io_mode & 0x7F]) {
+				printk(KERN_INFO
+				       "Undefined indirect IO read method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return 0;
+			}
+			val =
+			    atom_iio_execute(gctx,
+					     gctx->iio[gctx->io_mode & 0x7F],
+					     idx, 0);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		/* get_unaligned_le32 avoids unaligned accesses from atombios
+		 * tables, noticed on a DEC Alpha. */
+		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+		if (print)
+			DEBUG("PS[0x%02X,0x%04X]", idx, val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			val = gctx->divmul[0];
+			break;
+		case ATOM_WS_REMAINDER:
+			val = gctx->divmul[1];
+			break;
+		case ATOM_WS_DATAPTR:
+			val = gctx->data_block;
+			break;
+		case ATOM_WS_SHIFT:
+			val = gctx->shift;
+			break;
+		case ATOM_WS_OR_MASK:
+			val = 1 << gctx->shift;
+			break;
+		case ATOM_WS_AND_MASK:
+			val = ~(1 << gctx->shift);
+			break;
+		case ATOM_WS_FB_WINDOW:
+			val = gctx->fb_base;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			val = gctx->io_attr;
+			break;
+		case ATOM_WS_REGPTR:
+			val = gctx->reg_block;
+			break;
+		default:
+			val = ctx->ws[idx];
+		}
+		break;
+	case ATOM_ARG_ID:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		if (print) {
+			if (gctx->data_block)
+				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
+			else
+				DEBUG("ID[0x%04X]", idx);
+		}
+		val = U32(idx + gctx->data_block);
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+			val = 0;
+		} else
+			val = gctx->scratch[(gctx->fb_base / 4) + idx];
+		if (print)
+			DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			val = U32(*ptr);
+			(*ptr) += 4;
+			if (print)
+				DEBUG("IMM 0x%08X\n", val);
+			return val;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			val = U16(*ptr);
+			(*ptr) += 2;
+			if (print)
+				DEBUG("IMM 0x%04X\n", val);
+			return val;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			val = U8(*ptr);
+			(*ptr)++;
+			if (print)
+				DEBUG("IMM 0x%02X\n", val);
+			return val;
+		}
+		return 0;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("PLL[0x%02X]", idx);
+		val = gctx->card->pll_read(gctx->card, idx);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if (print)
+			DEBUG("MC[0x%02X]", idx);
+		val = gctx->card->mc_read(gctx->card, idx);
+		break;
+	}
+	if (saved)
+		*saved = val;
+	val &= atom_arg_mask[align];
+	val >>= atom_arg_shift[align];
+	if (print)
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			DEBUG(".[31:0] -> 0x%08X\n", val);
+			break;
+		case ATOM_SRC_WORD0:
+			DEBUG(".[15:0] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD8:
+			DEBUG(".[23:8] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_WORD16:
+			DEBUG(".[31:16] -> 0x%04X\n", val);
+			break;
+		case ATOM_SRC_BYTE0:
+			DEBUG(".[7:0] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE8:
+			DEBUG(".[15:8] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE16:
+			DEBUG(".[23:16] -> 0x%02X\n", val);
+			break;
+		case ATOM_SRC_BYTE24:
+			DEBUG(".[31:24] -> 0x%02X\n", val);
+			break;
+		}
+	return val;
+}
+
+static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
+	switch (arg) {
+	case ATOM_ARG_REG:
+	case ATOM_ARG_ID:
+		(*ptr) += 2;
+		break;
+	case ATOM_ARG_PLL:
+	case ATOM_ARG_MC:
+	case ATOM_ARG_PS:
+	case ATOM_ARG_WS:
+	case ATOM_ARG_FB:
+		(*ptr)++;
+		break;
+	case ATOM_ARG_IMM:
+		switch (align) {
+		case ATOM_SRC_DWORD:
+			(*ptr) += 4;
+			return;
+		case ATOM_SRC_WORD0:
+		case ATOM_SRC_WORD8:
+		case ATOM_SRC_WORD16:
+			(*ptr) += 2;
+			return;
+		case ATOM_SRC_BYTE0:
+		case ATOM_SRC_BYTE8:
+		case ATOM_SRC_BYTE16:
+		case ATOM_SRC_BYTE24:
+			(*ptr)++;
+			return;
+		}
+		return;
+	}
+}
+
+static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+{
+	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+}
+
+static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
+{
+	uint32_t val = 0xCDCDCDCD;
+
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		val = U32(*ptr);
+		(*ptr) += 4;
+		break;
+	case ATOM_SRC_WORD0:
+	case ATOM_SRC_WORD8:
+	case ATOM_SRC_WORD16:
+		val = U16(*ptr);
+		(*ptr) += 2;
+		break;
+	case ATOM_SRC_BYTE0:
+	case ATOM_SRC_BYTE8:
+	case ATOM_SRC_BYTE16:
+	case ATOM_SRC_BYTE24:
+		val = U8(*ptr);
+		(*ptr)++;
+		break;
+	}
+	return val;
+}
+
+static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			     int *ptr, uint32_t *saved, int print)
+{
+	return atom_get_src_int(ctx,
+				arg | atom_dst_to_src[(attr >> 3) &
+						      7][(attr >> 6) & 3] << 3,
+				ptr, saved, print);
+}
+
+static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
+{
+	atom_skip_src_int(ctx,
+			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
+								 3] << 3, ptr);
+}
+
+static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+			 int *ptr, uint32_t val, uint32_t saved)
+{
+	uint32_t align =
+	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
+	    val, idx;
+	struct atom_context *gctx = ctx->ctx;
+	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
+	val <<= atom_arg_shift[align];
+	val &= atom_arg_mask[align];
+	saved &= ~atom_arg_mask[align];
+	val |= saved;
+	switch (arg) {
+	case ATOM_ARG_REG:
+		idx = U16(*ptr);
+		(*ptr) += 2;
+		DEBUG("REG[0x%04X]", idx);
+		idx += gctx->reg_block;
+		switch (gctx->io_mode) {
+		case ATOM_IO_MM:
+			if (idx == 0)
+				gctx->card->reg_write(gctx->card, idx,
+						      val << 2);
+			else
+				gctx->card->reg_write(gctx->card, idx, val);
+			break;
+		case ATOM_IO_PCI:
+			printk(KERN_INFO
+			       "PCI registers are not implemented.\n");
+			return;
+		case ATOM_IO_SYSIO:
+			printk(KERN_INFO
+			       "SYSIO registers are not implemented.\n");
+			return;
+		default:
+			if (!(gctx->io_mode & 0x80)) {
+				printk(KERN_INFO "Bad IO mode.\n");
+				return;
+			}
+			if (!gctx->iio[gctx->io_mode & 0xFF]) {
+				printk(KERN_INFO
+				       "Undefined indirect IO write method %d.\n",
+				       gctx->io_mode & 0x7F);
+				return;
+			}
+			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
+					 idx, val);
+		}
+		break;
+	case ATOM_ARG_PS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PS[0x%02X]", idx);
+		ctx->ps[idx] = cpu_to_le32(val);
+		break;
+	case ATOM_ARG_WS:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("WS[0x%02X]", idx);
+		switch (idx) {
+		case ATOM_WS_QUOTIENT:
+			gctx->divmul[0] = val;
+			break;
+		case ATOM_WS_REMAINDER:
+			gctx->divmul[1] = val;
+			break;
+		case ATOM_WS_DATAPTR:
+			gctx->data_block = val;
+			break;
+		case ATOM_WS_SHIFT:
+			gctx->shift = val;
+			break;
+		case ATOM_WS_OR_MASK:
+		case ATOM_WS_AND_MASK:
+			break;
+		case ATOM_WS_FB_WINDOW:
+			gctx->fb_base = val;
+			break;
+		case ATOM_WS_ATTRIBUTES:
+			gctx->io_attr = val;
+			break;
+		case ATOM_WS_REGPTR:
+			gctx->reg_block = val;
+			break;
+		default:
+			ctx->ws[idx] = val;
+		}
+		break;
+	case ATOM_ARG_FB:
+		idx = U8(*ptr);
+		(*ptr)++;
+		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
+			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
+				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
+		} else
+			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
+		DEBUG("FB[0x%02X]", idx);
+		break;
+	case ATOM_ARG_PLL:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("PLL[0x%02X]", idx);
+		gctx->card->pll_write(gctx->card, idx, val);
+		break;
+	case ATOM_ARG_MC:
+		idx = U8(*ptr);
+		(*ptr)++;
+		DEBUG("MC[0x%02X]", idx);
+		gctx->card->mc_write(gctx->card, idx, val);
+		return;
+	}
+	switch (align) {
+	case ATOM_SRC_DWORD:
+		DEBUG(".[31:0] <- 0x%08X\n", old_val);
+		break;
+	case ATOM_SRC_WORD0:
+		DEBUG(".[15:0] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD8:
+		DEBUG(".[23:8] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_WORD16:
+		DEBUG(".[31:16] <- 0x%04X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE0:
+		DEBUG(".[7:0] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE8:
+		DEBUG(".[15:8] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE16:
+		DEBUG(".[23:16] <- 0x%02X\n", old_val);
+		break;
+	case ATOM_SRC_BYTE24:
+		DEBUG(".[31:24] <- 0x%02X\n", old_val);
+		break;
+	}
+}
+
+static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst += src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk("ATOM BIOS beeped!\n");
+}
+
+static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8((*ptr)++);
+	int r = 0;
+
+	if (idx < ATOM_TABLE_NAMES_CNT)
+		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
+	else
+		SDEBUG("   table: %d\n", idx);
+	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+		r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+	if (r) {
+		ctx->abort = true;
+	}
+}
+
+static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t saved;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
+}
+
+static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = (dst == src);
+	ctx->ctx->cs_above = (dst > src);
+	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
+	       ctx->ctx->cs_above ? "GT" : "LE");
+}
+
+static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+{
+	unsigned count = U8((*ptr)++);
+	SDEBUG("   count: %d\n", count);
+	if (arg == ATOM_UNIT_MICROSEC)
+		udelay(count);
+	else if (!drm_can_sleep())
+		mdelay(count);
+	else
+		msleep(count);
+}
+
+static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	if (src != 0) {
+		ctx->ctx->divmul[0] = dst / src;
+		ctx->ctx->divmul[1] = dst % src;
+	} else {
+		ctx->ctx->divmul[0] = 0;
+		ctx->ctx->divmul[1] = 0;
+	}
+}
+
+static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* functionally, a nop */
+}
+
+static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int execute = 0, target = U16(*ptr);
+	unsigned long cjiffies;
+
+	(*ptr) += 2;
+	switch (arg) {
+	case ATOM_COND_ABOVE:
+		execute = ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_ABOVEOREQUAL:
+		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_ALWAYS:
+		execute = 1;
+		break;
+	case ATOM_COND_BELOW:
+		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
+		break;
+	case ATOM_COND_BELOWOREQUAL:
+		execute = !ctx->ctx->cs_above;
+		break;
+	case ATOM_COND_EQUAL:
+		execute = ctx->ctx->cs_equal;
+		break;
+	case ATOM_COND_NOTEQUAL:
+		execute = !ctx->ctx->cs_equal;
+		break;
+	}
+	if (arg != ATOM_COND_ALWAYS)
+		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
+	SDEBUG("   target: 0x%04X\n", target);
+	if (execute) {
+		if (ctx->last_jump == (ctx->start + target)) {
+			cjiffies = jiffies;
+			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+				cjiffies -= ctx->last_jump_jiffies;
+				if ((jiffies_to_msecs(cjiffies) > 5000)) {
+					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+					ctx->abort = true;
+				}
+			} else {
+				/* jiffies wrap around we will just wait a little longer */
+				ctx->last_jump_jiffies = jiffies;
+			}
+		} else {
+			ctx->last_jump = ctx->start + target;
+			ctx->last_jump_jiffies = jiffies;
+		}
+		*ptr = ctx->start + target;
+	}
+}
+
+static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, mask, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	SDEBUG("   mask: 0x%08x", mask);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, saved;
+	int dptr = *ptr;
+	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
+		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
+	else {
+		atom_skip_dst(ctx, arg, attr, ptr);
+		saved = 0xCDCDCDCD;
+	}
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
+}
+
+static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->divmul[0] = dst * src;
+}
+
+static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
+{
+	/* nothing */
+}
+
+static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst |= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t val = U8((*ptr)++);
+	SDEBUG("POST card output: 0x%02X\n", val);
+}
+
+static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int idx = U8(*ptr);
+	(*ptr)++;
+	SDEBUG("   block: %d\n", idx);
+	if (!idx)
+		ctx->ctx->data_block = 0;
+	else if (idx == 255)
+		ctx->ctx->data_block = ctx->start;
+	else
+		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
+}
+
+static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	SDEBUG("   fb_base: ");
+	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
+}
+
+static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
+{
+	int port;
+	switch (arg) {
+	case ATOM_PORT_ATI:
+		port = U16(*ptr);
+		if (port < ATOM_IO_NAMES_CNT)
+			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
+		else
+			SDEBUG("   port: %d\n", port);
+		if (!port)
+			ctx->ctx->io_mode = ATOM_IO_MM;
+		else
+			ctx->ctx->io_mode = ATOM_IO_IIO | port;
+		(*ptr) += 2;
+		break;
+	case ATOM_PORT_PCI:
+		ctx->ctx->io_mode = ATOM_IO_PCI;
+		(*ptr)++;
+		break;
+	case ATOM_PORT_SYSIO:
+		ctx->ctx->io_mode = ATOM_IO_SYSIO;
+		(*ptr)++;
+		break;
+	}
+}
+
+static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+{
+	ctx->ctx->reg_block = U16(*ptr);
+	(*ptr) += 2;
+	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
+}
+
+static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	attr &= 0x38;
+	attr |= atom_def_dst[attr >> 3] << 6;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst <<= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++), shift;
+	uint32_t saved, dst;
+	int dptr = *ptr;
+	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	/* op needs to full dst value */
+	dst = saved;
+	shift = atom_get_src(ctx, attr, ptr);
+	SDEBUG("   shift: %d\n", shift);
+	dst >>= shift;
+	dst &= atom_arg_mask[dst_align];
+	dst >>= atom_arg_shift[dst_align];
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst -= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t src, val, target;
+	SDEBUG("   switch: ");
+	src = atom_get_src(ctx, attr, ptr);
+	while (U16(*ptr) != ATOM_CASE_END)
+		if (U8(*ptr) == ATOM_CASE_MAGIC) {
+			(*ptr)++;
+			SDEBUG("   case: ");
+			val =
+			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
+					 ptr);
+			target = U16(*ptr);
+			if (val == src) {
+				SDEBUG("   target: %04X\n", target);
+				*ptr = ctx->start + target;
+				return;
+			}
+			(*ptr) += 2;
+		} else {
+			printk(KERN_INFO "Bad case.\n");
+			return;
+		}
+	(*ptr) += 2;
+}
+
+static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src;
+	SDEBUG("   src1: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+	SDEBUG("   src2: ");
+	src = atom_get_src(ctx, attr, ptr);
+	ctx->ctx->cs_equal = ((dst & src) == 0);
+	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
+}
+
+static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
+{
+	uint8_t attr = U8((*ptr)++);
+	uint32_t dst, src, saved;
+	int dptr = *ptr;
+	SDEBUG("   dst: ");
+	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst ^= src;
+	SDEBUG("   dst: ");
+	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
+}
+
+static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
+{
+	printk(KERN_INFO "unimplemented!\n");
+}
+
+static struct {
+	void (*func) (atom_exec_context *, int *, int);
+	int arg;
+} opcode_table[ATOM_OP_CNT] = {
+	{
+	NULL, 0}, {
+	atom_op_move, ATOM_ARG_REG}, {
+	atom_op_move, ATOM_ARG_PS}, {
+	atom_op_move, ATOM_ARG_WS}, {
+	atom_op_move, ATOM_ARG_FB}, {
+	atom_op_move, ATOM_ARG_PLL}, {
+	atom_op_move, ATOM_ARG_MC}, {
+	atom_op_and, ATOM_ARG_REG}, {
+	atom_op_and, ATOM_ARG_PS}, {
+	atom_op_and, ATOM_ARG_WS}, {
+	atom_op_and, ATOM_ARG_FB}, {
+	atom_op_and, ATOM_ARG_PLL}, {
+	atom_op_and, ATOM_ARG_MC}, {
+	atom_op_or, ATOM_ARG_REG}, {
+	atom_op_or, ATOM_ARG_PS}, {
+	atom_op_or, ATOM_ARG_WS}, {
+	atom_op_or, ATOM_ARG_FB}, {
+	atom_op_or, ATOM_ARG_PLL}, {
+	atom_op_or, ATOM_ARG_MC}, {
+	atom_op_shift_left, ATOM_ARG_REG}, {
+	atom_op_shift_left, ATOM_ARG_PS}, {
+	atom_op_shift_left, ATOM_ARG_WS}, {
+	atom_op_shift_left, ATOM_ARG_FB}, {
+	atom_op_shift_left, ATOM_ARG_PLL}, {
+	atom_op_shift_left, ATOM_ARG_MC}, {
+	atom_op_shift_right, ATOM_ARG_REG}, {
+	atom_op_shift_right, ATOM_ARG_PS}, {
+	atom_op_shift_right, ATOM_ARG_WS}, {
+	atom_op_shift_right, ATOM_ARG_FB}, {
+	atom_op_shift_right, ATOM_ARG_PLL}, {
+	atom_op_shift_right, ATOM_ARG_MC}, {
+	atom_op_mul, ATOM_ARG_REG}, {
+	atom_op_mul, ATOM_ARG_PS}, {
+	atom_op_mul, ATOM_ARG_WS}, {
+	atom_op_mul, ATOM_ARG_FB}, {
+	atom_op_mul, ATOM_ARG_PLL}, {
+	atom_op_mul, ATOM_ARG_MC}, {
+	atom_op_div, ATOM_ARG_REG}, {
+	atom_op_div, ATOM_ARG_PS}, {
+	atom_op_div, ATOM_ARG_WS}, {
+	atom_op_div, ATOM_ARG_FB}, {
+	atom_op_div, ATOM_ARG_PLL}, {
+	atom_op_div, ATOM_ARG_MC}, {
+	atom_op_add, ATOM_ARG_REG}, {
+	atom_op_add, ATOM_ARG_PS}, {
+	atom_op_add, ATOM_ARG_WS}, {
+	atom_op_add, ATOM_ARG_FB}, {
+	atom_op_add, ATOM_ARG_PLL}, {
+	atom_op_add, ATOM_ARG_MC}, {
+	atom_op_sub, ATOM_ARG_REG}, {
+	atom_op_sub, ATOM_ARG_PS}, {
+	atom_op_sub, ATOM_ARG_WS}, {
+	atom_op_sub, ATOM_ARG_FB}, {
+	atom_op_sub, ATOM_ARG_PLL}, {
+	atom_op_sub, ATOM_ARG_MC}, {
+	atom_op_setport, ATOM_PORT_ATI}, {
+	atom_op_setport, ATOM_PORT_PCI}, {
+	atom_op_setport, ATOM_PORT_SYSIO}, {
+	atom_op_setregblock, 0}, {
+	atom_op_setfbbase, 0}, {
+	atom_op_compare, ATOM_ARG_REG}, {
+	atom_op_compare, ATOM_ARG_PS}, {
+	atom_op_compare, ATOM_ARG_WS}, {
+	atom_op_compare, ATOM_ARG_FB}, {
+	atom_op_compare, ATOM_ARG_PLL}, {
+	atom_op_compare, ATOM_ARG_MC}, {
+	atom_op_switch, 0}, {
+	atom_op_jump, ATOM_COND_ALWAYS}, {
+	atom_op_jump, ATOM_COND_EQUAL}, {
+	atom_op_jump, ATOM_COND_BELOW}, {
+	atom_op_jump, ATOM_COND_ABOVE}, {
+	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
+	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
+	atom_op_jump, ATOM_COND_NOTEQUAL}, {
+	atom_op_test, ATOM_ARG_REG}, {
+	atom_op_test, ATOM_ARG_PS}, {
+	atom_op_test, ATOM_ARG_WS}, {
+	atom_op_test, ATOM_ARG_FB}, {
+	atom_op_test, ATOM_ARG_PLL}, {
+	atom_op_test, ATOM_ARG_MC}, {
+	atom_op_delay, ATOM_UNIT_MILLISEC}, {
+	atom_op_delay, ATOM_UNIT_MICROSEC}, {
+	atom_op_calltable, 0}, {
+	atom_op_repeat, 0}, {
+	atom_op_clear, ATOM_ARG_REG}, {
+	atom_op_clear, ATOM_ARG_PS}, {
+	atom_op_clear, ATOM_ARG_WS}, {
+	atom_op_clear, ATOM_ARG_FB}, {
+	atom_op_clear, ATOM_ARG_PLL}, {
+	atom_op_clear, ATOM_ARG_MC}, {
+	atom_op_nop, 0}, {
+	atom_op_eot, 0}, {
+	atom_op_mask, ATOM_ARG_REG}, {
+	atom_op_mask, ATOM_ARG_PS}, {
+	atom_op_mask, ATOM_ARG_WS}, {
+	atom_op_mask, ATOM_ARG_FB}, {
+	atom_op_mask, ATOM_ARG_PLL}, {
+	atom_op_mask, ATOM_ARG_MC}, {
+	atom_op_postcard, 0}, {
+	atom_op_beep, 0}, {
+	atom_op_savereg, 0}, {
+	atom_op_restorereg, 0}, {
+	atom_op_setdatablock, 0}, {
+	atom_op_xor, ATOM_ARG_REG}, {
+	atom_op_xor, ATOM_ARG_PS}, {
+	atom_op_xor, ATOM_ARG_WS}, {
+	atom_op_xor, ATOM_ARG_FB}, {
+	atom_op_xor, ATOM_ARG_PLL}, {
+	atom_op_xor, ATOM_ARG_MC}, {
+	atom_op_shl, ATOM_ARG_REG}, {
+	atom_op_shl, ATOM_ARG_PS}, {
+	atom_op_shl, ATOM_ARG_WS}, {
+	atom_op_shl, ATOM_ARG_FB}, {
+	atom_op_shl, ATOM_ARG_PLL}, {
+	atom_op_shl, ATOM_ARG_MC}, {
+	atom_op_shr, ATOM_ARG_REG}, {
+	atom_op_shr, ATOM_ARG_PS}, {
+	atom_op_shr, ATOM_ARG_WS}, {
+	atom_op_shr, ATOM_ARG_FB}, {
+	atom_op_shr, ATOM_ARG_PLL}, {
+	atom_op_shr, ATOM_ARG_MC}, {
+atom_op_debug, 0},};
+
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int base = CU16(ctx->cmd_table + 4 + 2 * index);
+	int len, ws, ps, ptr;
+	unsigned char op;
+	atom_exec_context ectx;
+	int ret = 0;
+
+	if (!base)
+		return -EINVAL;
+
+	len = CU16(base + ATOM_CT_SIZE_PTR);
+	ws = CU8(base + ATOM_CT_WS_PTR);
+	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
+	ptr = base + ATOM_CT_CODE_PTR;
+
+	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+
+	ectx.ctx = ctx;
+	ectx.ps_shift = ps / 4;
+	ectx.start = base;
+	ectx.ps = params;
+	ectx.abort = false;
+	ectx.last_jump = 0;
+	if (ws)
+		ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
+	else
+		ectx.ws = NULL;
+
+	debug_depth++;
+	while (1) {
+		op = CU8(ptr++);
+		if (op < ATOM_OP_NAMES_CNT)
+			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
+		else
+			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+		if (ectx.abort) {
+			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+				base, len, ws, ps, ptr - 1);
+			ret = -EINVAL;
+			goto free;
+		}
+
+		if (op < ATOM_OP_CNT && op > 0)
+			opcode_table[op].func(&ectx, &ptr,
+					      opcode_table[op].arg);
+		else
+			break;
+
+		if (op == ATOM_OP_EOT)
+			break;
+	}
+	debug_depth--;
+	SDEBUG("<<\n");
+
+free:
+	if (ws)
+		kfree(ectx.ws);
+	return ret;
+}
+
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+	int r;
+
+	mutex_lock(&ctx->mutex);
+	/* reset data block */
+	ctx->data_block = 0;
+	/* reset reg block */
+	ctx->reg_block = 0;
+	/* reset fb window */
+	ctx->fb_base = 0;
+	/* reset io mode */
+	ctx->io_mode = ATOM_IO_MM;
+	/* reset divmul */
+	ctx->divmul[0] = 0;
+	ctx->divmul[1] = 0;
+	r = atom_execute_table_locked(ctx, index, params);
+	mutex_unlock(&ctx->mutex);
+	return r;
+}
+
+static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+
+static void atom_index_iio(struct atom_context *ctx, int base)
+{
+	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
+	if (!ctx->iio)
+		return;
+	while (CU8(base) == ATOM_IIO_START) {
+		ctx->iio[CU8(base + 1)] = base + 2;
+		base += 2;
+		while (CU8(base) != ATOM_IIO_END)
+			base += atom_iio_len[CU8(base)];
+		base += 3;
+	}
+}
+
+struct atom_context *atom_parse(struct card_info *card, void *bios)
+{
+	int base;
+	struct atom_context *ctx =
+	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+	char *str;
+	char name[512];
+	int i;
+
+	if (!ctx)
+		return NULL;
+
+	ctx->card = card;
+	ctx->bios = bios;
+
+	if (CU16(0) != ATOM_BIOS_MAGIC) {
+		printk(KERN_INFO "Invalid BIOS magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+	if (strncmp
+	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
+	     strlen(ATOM_ATI_MAGIC))) {
+		printk(KERN_INFO "Invalid ATI magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	base = CU16(ATOM_ROM_TABLE_PTR);
+	if (strncmp
+	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
+	     strlen(ATOM_ROM_MAGIC))) {
+		printk(KERN_INFO "Invalid ATOM magic.\n");
+		kfree(ctx);
+		return NULL;
+	}
+
+	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
+	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
+	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
+	if (!ctx->iio) {
+		atom_destroy(ctx);
+		return NULL;
+	}
+
+	str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
+	while (*str && ((*str == '\n') || (*str == '\r')))
+		str++;
+	/* name string isn't always 0 terminated */
+	for (i = 0; i < 511; i++) {
+		name[i] = str[i];
+		if (name[i] < '.' || name[i] > 'z') {
+			name[i] = 0;
+			break;
+		}
+	}
+	printk(KERN_INFO "ATOM BIOS: %s\n", name);
+
+	return ctx;
+}
+
+int atom_asic_init(struct atom_context *ctx)
+{
+	struct radeon_device *rdev = ctx->card->dev->dev_private;
+	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+	uint32_t ps[16];
+	int ret;
+
+	memset(ps, 0, 64);
+
+	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
+	if (!ps[0] || !ps[1])
+		return 1;
+
+	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+		return 1;
+	ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+	if (ret)
+		return ret;
+
+	memset(ps, 0, 64);
+
+	if (rdev->family < CHIP_R600) {
+		if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
+			atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+	}
+	return ret;
+}
+
+void atom_destroy(struct atom_context *ctx)
+{
+	kfree(ctx->iio);
+	kfree(ctx);
+}
+
+bool atom_parse_data_header(struct atom_context *ctx, int index,
+			    uint16_t * size, uint8_t * frev, uint8_t * crev,
+			    uint16_t * data_start)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->data_table + offset);
+	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+	if (!mdt[index])
+		return false;
+
+	if (size)
+		*size = CU16(idx);
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	*data_start = idx;
+	return true;
+}
+
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+			   uint8_t * crev)
+{
+	int offset = index * 2 + 4;
+	int idx = CU16(ctx->cmd_table + offset);
+	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+	if (!mct[index])
+		return false;
+
+	if (frev)
+		*frev = CU8(idx + 2);
+	if (crev)
+		*crev = CU8(idx + 3);
+	return true;
+}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes = 0;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+		DRM_DEBUG("atom firmware requested %08x %dkb\n",
+			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
+			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+		usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+	}
+	ctx->scratch_size_bytes = 0;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	ctx->scratch_size_bytes = usage_bytes;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/atom.h b/linux-imx/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 0000000..feba6b8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Stanislaw Skowronek
+ */
+
+#ifndef ATOM_H
+#define ATOM_H
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#define ATOM_BIOS_MAGIC		0xAA55
+#define ATOM_ATI_MAGIC_PTR	0x30
+#define ATOM_ATI_MAGIC		" 761295520"
+#define ATOM_ROM_TABLE_PTR	0x48
+
+#define ATOM_ROM_MAGIC		"ATOM"
+#define ATOM_ROM_MAGIC_PTR	4
+
+#define ATOM_ROM_MSG_PTR	0x10
+#define ATOM_ROM_CMD_PTR	0x1E
+#define ATOM_ROM_DATA_PTR	0x20
+
+#define ATOM_CMD_INIT		0
+#define ATOM_CMD_SETSCLK	0x0A
+#define ATOM_CMD_SETMCLK	0x0B
+#define ATOM_CMD_SETPCLK	0x0C
+#define ATOM_CMD_SPDFANCNTL	0x39
+
+#define ATOM_DATA_FWI_PTR	0xC
+#define ATOM_DATA_IIO_PTR	0x32
+
+#define ATOM_FWI_DEFSCLK_PTR	8
+#define ATOM_FWI_DEFMCLK_PTR	0xC
+#define ATOM_FWI_MAXSCLK_PTR	0x24
+#define ATOM_FWI_MAXMCLK_PTR	0x28
+
+#define ATOM_CT_SIZE_PTR	0
+#define ATOM_CT_WS_PTR		4
+#define ATOM_CT_PS_PTR		5
+#define ATOM_CT_PS_MASK		0x7F
+#define ATOM_CT_CODE_PTR	6
+
+#define ATOM_OP_CNT		123
+#define ATOM_OP_EOT		91
+
+#define ATOM_CASE_MAGIC		0x63
+#define ATOM_CASE_END		0x5A5A
+
+#define ATOM_ARG_REG		0
+#define ATOM_ARG_PS		1
+#define ATOM_ARG_WS		2
+#define ATOM_ARG_FB		3
+#define ATOM_ARG_ID		4
+#define ATOM_ARG_IMM		5
+#define ATOM_ARG_PLL		6
+#define ATOM_ARG_MC		7
+
+#define ATOM_SRC_DWORD		0
+#define ATOM_SRC_WORD0		1
+#define ATOM_SRC_WORD8		2
+#define ATOM_SRC_WORD16		3
+#define ATOM_SRC_BYTE0		4
+#define ATOM_SRC_BYTE8		5
+#define ATOM_SRC_BYTE16		6
+#define ATOM_SRC_BYTE24		7
+
+#define ATOM_WS_QUOTIENT	0x40
+#define ATOM_WS_REMAINDER	0x41
+#define ATOM_WS_DATAPTR		0x42
+#define ATOM_WS_SHIFT		0x43
+#define ATOM_WS_OR_MASK		0x44
+#define ATOM_WS_AND_MASK	0x45
+#define ATOM_WS_FB_WINDOW	0x46
+#define ATOM_WS_ATTRIBUTES	0x47
+#define ATOM_WS_REGPTR  	0x48
+
+#define ATOM_IIO_NOP		0
+#define ATOM_IIO_START		1
+#define ATOM_IIO_READ		2
+#define ATOM_IIO_WRITE		3
+#define ATOM_IIO_CLEAR		4
+#define ATOM_IIO_SET		5
+#define ATOM_IIO_MOVE_INDEX	6
+#define ATOM_IIO_MOVE_ATTR	7
+#define ATOM_IIO_MOVE_DATA	8
+#define ATOM_IIO_END		9
+
+#define ATOM_IO_MM		0
+#define ATOM_IO_PCI		1
+#define ATOM_IO_SYSIO		2
+#define ATOM_IO_IIO		0x80
+
+struct card_info {
+	struct drm_device *dev;
+	void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* reg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* ioreg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* ioreg_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* mc_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* mc_read)(struct card_info *, uint32_t);          /*  filled by driver */
+	void (* pll_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
+        uint32_t (* pll_read)(struct card_info *, uint32_t);          /*  filled by driver */
+};
+
+struct atom_context {
+	struct card_info *card;
+	struct mutex mutex;
+	void *bios;
+	uint32_t cmd_table, data_table;
+	uint16_t *iio;
+
+	uint16_t data_block;
+	uint32_t fb_base;
+	uint32_t divmul[2];
+	uint16_t io_attr;
+	uint16_t reg_block;
+	uint8_t shift;
+	int cs_equal, cs_above;
+	int io_mode;
+	uint32_t *scratch;
+	int scratch_size_bytes;
+};
+
+extern int atom_debug;
+
+struct atom_context *atom_parse(struct card_info *, void *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_asic_init(struct atom_context *);
+void atom_destroy(struct atom_context *);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+			    uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+			   uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
+#include "atom-types.h"
+#include "atombios.h"
+#include "ObjectID.h"
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/atombios.h b/linux-imx/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 0000000..0ee5737
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,8012 @@
+/*
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.  
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/****************************************************************************/	
+/*Portion I: Definitions  shared between VBIOS and Driver                   */
+/****************************************************************************/
+
+
+#ifndef _ATOMBIOS_H
+#define _ATOMBIOS_H
+
+#define ATOM_VERSION_MAJOR                   0x00020000
+#define ATOM_VERSION_MINOR                   0x00000002
+
+#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
+
+/* Endianness should be specified before inclusion,
+ * default to little endian
+ */
+#ifndef ATOM_BIG_ENDIAN
+#error Endian not specified
+#endif
+
+#ifdef _H2INC
+  #ifndef ULONG 
+    typedef unsigned long ULONG;
+  #endif
+
+  #ifndef UCHAR
+    typedef unsigned char UCHAR;
+  #endif
+
+  #ifndef USHORT 
+    typedef unsigned short USHORT;
+  #endif
+#endif
+      
+#define ATOM_DAC_A            0 
+#define ATOM_DAC_B            1
+#define ATOM_EXT_DAC          2
+
+#define ATOM_CRTC1            0
+#define ATOM_CRTC2            1
+#define ATOM_CRTC3            2
+#define ATOM_CRTC4            3
+#define ATOM_CRTC5            4
+#define ATOM_CRTC6            5
+#define ATOM_CRTC_INVALID     0xFF
+
+#define ATOM_DIGA             0
+#define ATOM_DIGB             1
+
+#define ATOM_PPLL1            0
+#define ATOM_PPLL2            1
+#define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
+#define ATOM_PPLL_INVALID     0xFF
+
+#define ENCODER_REFCLK_SRC_P1PLL       0       
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
+#define ATOM_SCALER1          0
+#define ATOM_SCALER2          1
+
+#define ATOM_SCALER_DISABLE   0   
+#define ATOM_SCALER_CENTER    1   
+#define ATOM_SCALER_EXPANSION 2   
+#define ATOM_SCALER_MULTI_EX  3   
+
+#define ATOM_DISABLE          0
+#define ATOM_ENABLE           1
+#define ATOM_LCD_BLOFF                          (ATOM_DISABLE+2)
+#define ATOM_LCD_BLON                           (ATOM_ENABLE+2)
+#define ATOM_LCD_BL_BRIGHTNESS_CONTROL          (ATOM_ENABLE+3)
+#define ATOM_LCD_SELFTEST_START									(ATOM_DISABLE+5)
+#define ATOM_LCD_SELFTEST_STOP									(ATOM_ENABLE+5)
+#define ATOM_ENCODER_INIT			                  (ATOM_DISABLE+7)
+#define ATOM_INIT			                          (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS                         (ATOM_DISABLE+8)
+
+#define ATOM_BLANKING         1
+#define ATOM_BLANKING_OFF     0
+
+#define ATOM_CURSOR1          0
+#define ATOM_CURSOR2          1
+
+#define ATOM_ICON1            0
+#define ATOM_ICON2            1
+
+#define ATOM_CRT1             0
+#define ATOM_CRT2             1
+
+#define ATOM_TV_NTSC          1
+#define ATOM_TV_NTSCJ         2
+#define ATOM_TV_PAL           3
+#define ATOM_TV_PALM          4
+#define ATOM_TV_PALCN         5
+#define ATOM_TV_PALN          6
+#define ATOM_TV_PAL60         7
+#define ATOM_TV_SECAM         8
+#define ATOM_TV_CV            16
+
+#define ATOM_DAC1_PS2         1
+#define ATOM_DAC1_CV          2
+#define ATOM_DAC1_NTSC        3
+#define ATOM_DAC1_PAL         4
+
+#define ATOM_DAC2_PS2         ATOM_DAC1_PS2
+#define ATOM_DAC2_CV          ATOM_DAC1_CV
+#define ATOM_DAC2_NTSC        ATOM_DAC1_NTSC
+#define ATOM_DAC2_PAL         ATOM_DAC1_PAL
+ 
+#define ATOM_PM_ON            0
+#define ATOM_PM_STANDBY       1
+#define ATOM_PM_SUSPEND       2
+#define ATOM_PM_OFF           3
+
+/* Bit0:{=0:single, =1:dual},
+   Bit1 {=0:666RGB, =1:888RGB},
+   Bit2:3:{Grey level}
+   Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
+
+#define ATOM_PANEL_MISC_DUAL               0x00000001
+#define ATOM_PANEL_MISC_888RGB             0x00000002
+#define ATOM_PANEL_MISC_GREY_LEVEL         0x0000000C
+#define ATOM_PANEL_MISC_FPDI               0x00000010
+#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT   2
+#define ATOM_PANEL_MISC_SPATIAL            0x00000020
+#define ATOM_PANEL_MISC_TEMPORAL           0x00000040
+#define ATOM_PANEL_MISC_API_ENABLED        0x00000080
+
+
+#define MEMTYPE_DDR1              "DDR1"
+#define MEMTYPE_DDR2              "DDR2"
+#define MEMTYPE_DDR3              "DDR3"
+#define MEMTYPE_DDR4              "DDR4"
+
+#define ASIC_BUS_TYPE_PCI         "PCI"
+#define ASIC_BUS_TYPE_AGP         "AGP"
+#define ASIC_BUS_TYPE_PCIE        "PCI_EXPRESS"
+
+/* Maximum size of that FireGL flag string */
+
+#define ATOM_FIREGL_FLAG_STRING     "FGL"             //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING  3        //sizeof( ATOM_FIREGL_FLAG_STRING )
+
+#define ATOM_FAKE_DESKTOP_STRING    "DSK"             //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING  ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 
+
+#define ATOM_M54T_FLAG_STRING       "M54T"            //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING    4        //sizeof( ATOM_M54T_FLAG_STRING )
+
+#define HW_ASSISTED_I2C_STATUS_FAILURE          2
+#define HW_ASSISTED_I2C_STATUS_SUCCESS          1
+
+#pragma pack(1)                                       /* BIOS data must use byte aligment */
+
+/*  Define offset to location of ROM header. */
+
+#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER		0x00000048L
+#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE				    0x00000002L
+
+#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE    0x94
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE   20    /* including the terminator 0x0! */
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER		0x002f
+#define	OFFSET_TO_GET_ATOMBIOS_STRINGS_START		0x006e
+
+/* Common header for all ROM Data tables.
+  Every table pointed  _ATOM_MASTER_DATA_TABLE has this common header. 
+  And the pointer actually points to this header. */
+
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+  USHORT usStructureSize;
+  UCHAR  ucTableFormatRevision;   /*Change it when the Parser is not backward compatible */
+  UCHAR  ucTableContentRevision;  /*Change it only when the table needs to change but the firmware */
+                                  /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structure stores the ROM header.
+/****************************************************************************/	
+typedef struct _ATOM_ROM_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  UCHAR	 uaFirmWareSignature[4];    /*Signature to distinguish between Atombios and non-atombios, 
+                                      atombios should init it as "ATOM", don't change the position */
+  USHORT usBiosRuntimeSegmentAddress;
+  USHORT usProtectedModeInfoOffset;
+  USHORT usConfigFilenameOffset;
+  USHORT usCRC_BlockOffset;
+  USHORT usBIOS_BootupMessageOffset;
+  USHORT usInt10Offset;
+  USHORT usPciBusDevInitCode;
+  USHORT usIoBaseAddress;
+  USHORT usSubsystemVendorID;
+  USHORT usSubsystemID;
+  USHORT usPCI_InfoOffset; 
+  USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+  USHORT usMasterDataTableOffset;   /*Offset for SW to get all data table offsets, Don't change the position */
+  UCHAR  ucExtendedFunctionCode;
+  UCHAR  ucReserved;
+}ATOM_ROM_HEADER;
+
+/*==============================Command Table Portion==================================== */
+
+#ifdef	UEFI_BUILD
+	#define	UTEMP	USHORT
+	#define	USHORT	void*
+#endif
+
+/****************************************************************************/	
+// Structures used in Command.mtb 
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+  USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
+  USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
+  USHORT ASIC_RegistersInit;                     //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT VRAM_BlockVenderDetection;              //Atomic Table,  used only by Bios
+  USHORT DIGxEncoderControl;										 //Only used by Bios
+  USHORT MemoryControllerInit;                   //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT EnableCRTCMemReq;                       //Function Table,directly used by various SW components,latest version 2.1
+  USHORT MemoryParamAdjust; 										 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock if needed
+  USHORT DVOEncoderControl;                      //Function Table,directly used by various SW components,latest version 1.2
+  USHORT GPIOPinControl;												 //Atomic Table,  only used by Bios
+  USHORT SetEngineClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetMemoryClock;                         //Function Table,directly used by various SW components,latest version 1.1
+  USHORT SetPixelClock;                          //Function Table,directly used by various SW components,latest version 1.2  
+  USHORT EnableDispPowerGating;                  //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT ResetMemoryDLL;                         //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ResetMemoryDevice;                      //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryPLLInit;                          //Atomic Table,  used only by Bios
+  USHORT AdjustDisplayPll;											 //Atomic Table,  used by various SW componentes. 
+  USHORT AdjustMemoryController;                 //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock                
+  USHORT EnableASIC_StaticPwrMgt;                //Atomic Table,  only used by Bios
+  USHORT ASIC_StaticPwrMgtStatusChange;          //Obsolete ,     only used by Bios   
+  USHORT DAC_LoadDetection;                      //Atomic Table,  directly used by various SW components,latest version 1.2  
+  USHORT LVTMAEncoderControl;                    //Atomic Table,directly used by various SW components,latest version 1.3
+  USHORT HW_Misc_Operation;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DAC1EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1  
+  USHORT DAC2EncoderControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT DVOOutputControl;                       //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT CV1OutputControl;                       //Atomic Table,  Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead 
+  USHORT GetConditionalGoldenSetting;            //Only used by Bios
+  USHORT TVEncoderControl;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT PatchMCSetting;                         //only used by BIOS
+  USHORT MC_SEQ_Control;                         //only used by BIOS
+  USHORT TV1OutputControl;                       //Atomic Table,  Obsolete from Ry6xx, use DAC2 Output instead
+  USHORT EnableScaler;                           //Atomic Table,  used only by Bios
+  USHORT BlankCRTC;                              //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableCRTC;                             //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetPixelClock;                          //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableVGA_Render;                       //Function Table,directly used by various SW components,latest version 1.1
+  USHORT GetSCLKOverMCLKRatio;                   //Atomic Table,  only used by Bios
+  USHORT SetCRTC_Timing;                         //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetCRTC_OverScan;                       //Atomic Table,  used by various SW components,latest version 1.1 
+  USHORT SetCRTC_Replication;                    //Atomic Table,  used only by Bios
+  USHORT SelectCRTC_Source;                      //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT EnableGraphSurfaces;                    //Atomic Table,  used only by Bios
+  USHORT UpdateCRTC_DoubleBufferRegisters;			 //Atomic Table,  used only by Bios
+  USHORT LUT_AutoFill;                           //Atomic Table,  only used by Bios
+  USHORT EnableHW_IconCursor;                    //Atomic Table,  only used by Bios
+  USHORT GetMemoryClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT GetEngineClock;                         //Atomic Table,  directly used by various SW components,latest version 1.1 
+  USHORT SetCRTC_UsingDTDTiming;                 //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ExternalEncoderControl;                 //Atomic Table,  directly used by various SW components,latest version 2.1
+  USHORT LVTMAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT VRAM_BlockDetectionByStrap;             //Atomic Table,  used only by Bios
+  USHORT MemoryCleanUp;                          //Atomic Table,  only used by Bios    
+  USHORT ProcessI2cChannelTransaction;           //Function Table,only used by Bios
+  USHORT WriteOneByteToHWAssistedI2C;            //Function Table,indirectly used by various SW components 
+  USHORT ReadHWAssistedI2CStatus;                //Atomic Table,  indirectly used by various SW components
+  USHORT SpeedFanControl;                        //Function Table,indirectly used by various SW components,called from ASIC_Init
+  USHORT PowerConnectorDetection;                //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT MC_Synchronization;                     //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT ComputeMemoryEnginePLL;                 //Atomic Table,  indirectly used by various SW components,called from SetMemory/EngineClock
+  USHORT MemoryRefreshConversion;                //Atomic Table,  indirectly used by various SW components,called from SetMemory or SetEngineClock
+  USHORT VRAM_GetCurrentInfoBlock;               //Atomic Table,  used only by Bios
+  USHORT DynamicMemorySettings;                  //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT MemoryTraining;                         //Atomic Table,  used only by Bios
+  USHORT EnableSpreadSpectrumOnPPLL;             //Atomic Table,  directly used by various SW components,latest version 1.2
+  USHORT TMDSAOutputControl;                     //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT SetVoltage;                             //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+  USHORT DAC1OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT DAC2OutputControl;                      //Atomic Table,  directly used by various SW components,latest version 1.1
+  USHORT ComputeMemoryClockParam;                //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+  USHORT ClockSource;                            //Atomic Table,  indirectly used by various SW components,called from ASIC_Init
+  USHORT MemoryDeviceInit;                       //Atomic Table,  indirectly used by various SW components,called from SetMemoryClock
+  USHORT GetDispObjectInfo;                      //Atomic Table,  indirectly used by various SW components,called from EnableVGARender
+  USHORT DIG1EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2EncoderControl;                     //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG1TransmitterControl;                 //Atomic Table,directly used by various SW components,latest version 1.1
+  USHORT DIG2TransmitterControl;	               //Atomic Table,directly used by various SW components,latest version 1.1 
+  USHORT ProcessAuxChannelTransaction;					 //Function Table,only used by Bios
+  USHORT DPEncoderService;											 //Function Table,only used by Bios
+  USHORT GetVoltageInfo;                         //Function Table,only used by Bios since SI
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;   
+
+// For backward compatible 
+#define ReadEDIDFromHWAssistedI2C                ProcessI2cChannelTransaction
+#define DPTranslatorControl                      DIG2EncoderControl
+#define UNIPHYTransmitterControl			     DIG1TransmitterControl
+#define LVTMATransmitterControl				     DIG2TransmitterControl
+#define SetCRTC_DPM_State                        GetConditionalGoldenSetting
+#define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService                      ReadHWAssistedI2CStatus
+#define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define EnableYUV                                GetDispObjectInfo                         
+#define DynamicClockGating                       EnableDispPowerGating
+#define SetupHWAssistedI2CStatus                 ComputeMemoryClockParam
+
+#define TMDSAEncoderControl                      PatchMCSetting
+#define LVDSEncoderControl                       MC_SEQ_Control
+#define LCD1OutputControl                        HW_Misc_Operation
+
+
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+  ATOM_COMMON_TABLE_HEADER           sHeader;
+  ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/	
+// Structures used in every command table
+/****************************************************************************/	
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
+#if ATOM_BIG_ENDIAN
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+#else
+  USHORT  WS_SizeInBytes:8;           //[7:0]=Size of workspace in Bytes (in multiple of a dword), 
+  USHORT  PS_SizeInBytes:7;           //[14:8]=Size of parameter space in Bytes (multiple of a dword), 
+  USHORT  UpdatedByUtility:1;         //[15]=Table updated by utility flag
+#endif
+}ATOM_TABLE_ATTRIBUTE;
+
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+  ATOM_TABLE_ATTRIBUTE sbfAccess;
+  USHORT               susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/	
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. 
+// And the pointer actually points to this header.
+/****************************************************************************/	
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+  ATOM_COMMON_TABLE_HEADER CommonHeader;
+  ATOM_TABLE_ATTRIBUTE     TableAttribute;	
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+
+/****************************************************************************/	
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/	
+#define COMPUTE_MEMORY_PLL_PARAM        1
+#define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/	
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/	
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+  ULONG   ulClock;        //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+  UCHAR   ucAction;       //0:reserved //1:Memory //2:Engine  
+  UCHAR   ucReserved;     //may expand to return larger Fbdiv later
+  UCHAR   ucFbDiv;        //return value
+  UCHAR   ucPostDiv;      //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+  ULONG   ulClock;        //When return, [23:0] return real clock 
+  UCHAR   ucAction;       //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+  USHORT  usFbDiv;		    //return Feedback value to be written to register
+  UCHAR   ucPostDiv;      //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION   COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+
+
+#define SET_CLOCK_FREQ_MASK                     0x00FFFFFF  //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK                  0x01000000  //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK            0x02000000	//Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04000000  //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK									0x08000000	//Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL											0x10000000	//Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+#define USE_SS_ENABLED_PIXEL_CLOCK  USE_NON_BUS_CLOCK_MASK
+
+#define b3USE_NON_BUS_CLOCK_MASK                  0x01       //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH                 0x02	     //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE   0x04       //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK									0x08       //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL											0x10			 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
+
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+#else
+  ULONG ulClockFreq:24;                       // in unit of 10kHz
+  ULONG ulComputeClockFlag:8;                 // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+#endif
+}ATOM_COMPUTE_CLOCK_FREQ;
+
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+  USHORT usFbDivFrac;  
+  USHORT usFbDiv;  
+}ATOM_S_MPLL_FB_DIVIDER;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  UCHAR   ucCntlFlag;                         //Output Parameter      
+  UCHAR   ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
+#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN          1
+#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE            2
+#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE         4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9						8
+
+
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucPostDiv;          //return parameter: post divider which is used to program to register directly
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+#else
+  ULONG  ulClock:24;         //Input= target clock, output = actual clock 
+  ULONG  ucPostDiv;          //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ULONG ulClockParams;                      //ULONG access for BE
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;                       
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
+// use for ComputeMemoryClockParamTable
+typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
+{
+  union
+  {
+    ULONG  ulClock;         
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output:UPPER_WORD=FB_DIV_INTEGER,  LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
+  };
+  UCHAR   ucDllSpeed;                         //Output 
+  UCHAR   ucPostDiv;                          //Output
+  union{
+    UCHAR   ucInputFlag;                      //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
+    UCHAR   ucPllCntlFlag;                    //Output: 
+  };
+  UCHAR   ucBWCntl;                       
+}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
+
+// definition of ucInputFlag
+#define MPLL_INPUT_FLAG_STROBE_MODE_EN          0x01
+// definition of ucPllCntlFlag
+#define MPLL_CNTL_FLAG_VCO_MODE_MASK            0x03 
+#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL            0x04
+#define MPLL_CNTL_FLAG_QDR_ENABLE               0x08
+#define MPLL_CNTL_FLAG_AD_HALF_RATE             0x10
+
+//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
+#define MPLL_CNTL_FLAG_BYPASS_AD_PLL            0x04
+
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+  ATOM_COMPUTE_CLOCK_FREQ ulClock;
+  ULONG ulMemoryClock;
+  ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/	
+// Structures used by SetEngineClockTable
+/****************************************************************************/	
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
+
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetEngineClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by SetMemoryClockTable
+/****************************************************************************/	
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
+
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+  COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/	
+typedef struct _ASIC_INIT_PARAMETERS
+{
+  ULONG ulDefaultEngineClock;         //In 10Khz unit
+  ULONG ulDefaultMemoryClock;         //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
+
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+  ASIC_INIT_PARAMETERS sASICInitClocks;
+  SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/	
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS 
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
+#define  DYNAMIC_CLOCK_GATING_PS_ALLOCATION  DYNAMIC_CLOCK_GATING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used by EnableDispPowerGatingTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 
+{
+  UCHAR ucDispPipeId;                 // ATOM_CRTC1, ATOM_CRTC2, ...
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[2];
+}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
+
+/****************************************************************************/	
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/	
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION  ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+  USHORT usDeviceID;                  //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+  UCHAR  ucDacType;                   //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+  UCHAR  ucMisc;											//Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
+
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
+#define DAC_LOAD_MISC_YPrPb						0x01
+
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+  DAC_LOAD_DETECTION_PARAMETERS            sDacload;
+  ULONG                                    Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/	
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS 
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucDacStandard;               // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+                                      // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
+
+#define DAC_ENCODER_CONTROL_PS_ALLOCATION  DAC_ENCODER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by DIG1EncoderControlTable
+//                    DIG2EncoderControlTable
+//                    ExternalEncoderControlTable
+/****************************************************************************/	
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+  UCHAR  ucConfig;		  
+                            // [2] Link Select:
+                            // =0: PHY linkA if bfLane<3
+                            // =1: PHY linkB if bfLanes<3
+                            // =0: PHY linkA+B if bfLanes=3
+                            // [3] Transmitter Sel
+                            // =0: UNIPHY or PCIEPHY
+                            // =1: LVTMA 					
+  UCHAR ucAction;           // =0: turn off encoder					
+                            // =1: turn on encoder			
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
+#define DIG_ENCODER_CONTROL_PS_ALLOCATION			  DIG_ENCODER_CONTROL_PARAMETERS
+#define EXTERNAL_ENCODER_CONTROL_PARAMETER			DIG_ENCODER_CONTROL_PARAMETERS
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ		0x00
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ		0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ		0x02
+#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_LINKA_B							  ATOM_TRANSMITTER_CONFIG_LINKA
+#define ATOM_ENCODER_CONFIG_LINKB_A							  ATOM_ENCODER_CONFIG_LINKB
+#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK	0x08
+#define ATOM_ENCODER_CONFIG_UNIPHY							  0x00
+#define ATOM_ENCODER_CONFIG_LVTMA								  0x08
+#define ATOM_ENCODER_CONFIG_TRANSMITTER1				  0x00
+#define ATOM_ENCODER_CONFIG_TRANSMITTER2				  0x08
+#define ATOM_ENCODER_CONFIG_DIGB								  0x80			// VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE:  Enable Encoder
+// ATOM_DISABLE: Disable Encoder
+
+//ucEncoderMode
+#define ATOM_ENCODER_MODE_DP											0
+#define ATOM_ENCODER_MODE_LVDS										1
+#define ATOM_ENCODER_MODE_DVI											2
+#define ATOM_ENCODER_MODE_HDMI										3
+#define ATOM_ENCODER_MODE_SDVO										4
+#define ATOM_ENCODER_MODE_DP_AUDIO                5
+#define ATOM_ENCODER_MODE_TV											13
+#define ATOM_ENCODER_MODE_CV											14
+#define ATOM_ENCODER_MODE_CRT											15
+#define ATOM_ENCODER_MODE_DVO											16
+#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
+
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:2;
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucReserved:1;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:1;
+    UCHAR ucLinkSel:1;            // =0: linkA/C/E =1: linkB/D/F
+    UCHAR ucTransmitterSel:2;     // =0: UniphyAB, =1: UniphyCD  =2: UniphyEF
+    UCHAR ucReserved1:2;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V2;
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+  UCHAR ucAction;                                       
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucStatus;           // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK				0x01
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK				  0x04
+#define ATOM_ENCODER_CONFIG_V2_LINKA								  0x00
+#define ATOM_ENCODER_CONFIG_V2_LINKB								  0x04
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK	  0x18
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1				    0x00
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2				    0x08
+#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3				    0x10
+
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS    0x0e
+#define ATOM_ENCODER_CMD_SETUP                        0x0f
+#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE             0x10
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:3;
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+    UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
+    UCHAR ucReserved:3;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER					  0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI           
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also referred as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ		  0x02
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ		  0x03
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER					  0x50
+#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER					  0x60
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                              
+  union {
+    UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+    UCHAR ucPanelMode;      // only valid when ucAction == ATOM_ENCODER_CMD_SETUP_PANEL_MODE
+	                    // =0:     external DP
+	                    // =1:     internal DP2
+	                    // =0x11:  internal DP1 for NutMeg/Travis DP translator
+  };
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
+
+// define ucBitPerColor: 
+#define PANEL_BPC_UNDEFINE                               0x00
+#define PANEL_6BIT_PER_COLOR                             0x01 
+#define PANEL_8BIT_PER_COLOR                             0x02
+#define PANEL_10BIT_PER_COLOR                            0x03
+#define PANEL_12BIT_PER_COLOR                            0x04
+#define PANEL_16BIT_PER_COLOR                            0x05
+
+//define ucPanelMode
+#define DP_PANEL_MODE_EXTERNAL_DP_MODE                   0x00
+#define DP_PANEL_MODE_INTERNAL_DP2_MODE                  0x01
+#define DP_PANEL_MODE_INTERNAL_DP1_MODE                  0x11
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable
+//                    LVTMATransmitterControlTable
+//                    DVOOutputControlTable
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE
+{
+  UCHAR ucLaneSel;
+  UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  UCHAR ucConfig;
+													// [0]=0: 4 lane Link,      
+													//    =1: 8 lane Link ( Dual Links TMDS ) 
+                          // [1]=0: InCoherent mode   
+													//    =1: Coherent Mode										
+													// [2] Link Select:
+  												// =0: PHY linkA   if bfLane<3
+													// =1: PHY linkB   if bfLanes<3
+		  										// =0: PHY linkA+B if bfLanes=3		
+                          // [5:4]PCIE lane Sel
+                          // =0: lane 0~3 or 0~7
+                          // =1: lane 4~7
+                          // =2: lane 8~11 or 8~15
+                          // =3: lane 12~15 
+	UCHAR ucAction;				  // =0: turn off encoder					
+	                        // =1: turn on encoder			
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION		DIG_TRANSMITTER_CONTROL_PARAMETERS					
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK	0x00ff			
+
+//ucConfig 
+#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK			0x01
+#define ATOM_TRANSMITTER_CONFIG_COHERENT				0x02
+#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK		0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA						0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKB						0x04
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B					0x00			
+#define ATOM_TRANSMITTER_CONFIG_LINKB_A					0x04
+
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK	0x08			// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER		0x00				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER		0x08				// only used when ATOM_TRANSMITTER_ACTION_ENABLE
+
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK			0x30
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL			0x00
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE			0x20
+#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN		0x30
+#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK		0xc0
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_3				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_0_7				0x00
+#define ATOM_TRANSMITTER_CONFIG_LANE_4_7				0x40
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_11				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_8_15				0x80
+#define ATOM_TRANSMITTER_CONFIG_LANE_12_15			0xc0
+
+//ucAction
+#define ATOM_TRANSMITTER_ACTION_DISABLE					       0
+#define ATOM_TRANSMITTER_ACTION_ENABLE					       1
+#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF				       2
+#define ATOM_TRANSMITTER_ACTION_LCD_BLON				       3
+#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL  4
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START		 5
+#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP			 6
+#define ATOM_TRANSMITTER_ACTION_INIT						       7
+#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT	       8
+#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT		       9
+#define ATOM_TRANSMITTER_ACTION_SETUP						       10
+#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH           11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON               12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF              13
+
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucReserved:1;               
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+  UCHAR fDPConnector:1;             //bit4=0: DP connector  =1: None DP connector
+  UCHAR ucReserved:1;               
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER		          0x00				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER		          0x08				// only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+
+// Bit4
+#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR			        0x10
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3           	0x80	//EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+	union
+	{
+  USHORT usPixelClock;		// in 10KHz; for bios convenient
+	USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+  ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+	UCHAR ucAction;				  // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
+
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+	union
+	{
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+	  USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+	};
+  ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+	UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR			0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT				          0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA  			            0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB				            0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER		          0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER		          0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL          		        0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL		                  0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT            0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3           	0x80	//EF
+
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+ 	{  
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+ 		};
+ 	}; 
+}ATOM_DP_VS_MODE_V4;
+ 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+    USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX	                        
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR			0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT				          0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA  			            0x00			
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB				            0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER		          0x00				 
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER		          0x08				
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL		                0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL		                0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3           	0x80	//EF
+
+
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucReservd1:1;
+  UCHAR ucHPDSel:3;
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucReserved:1;
+#else
+  UCHAR ucReserved:1;
+  UCHAR ucCoherentMode:1;            
+  UCHAR ucPhyClkSrcId:2;            
+  UCHAR ucHPDSel:3;
+  UCHAR ucReservd1:1;
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V5;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+{
+  USHORT usSymClock;		        // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock,  (HDMI deep color), =pixel clock * deep_color_ratio
+  UCHAR  ucPhyId;                   // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+  UCHAR  ucAction;				    // define as ATOM_TRANSMITER_ACTION_xxx
+  UCHAR  ucLaneNum;                 // indicate lane number 1-8
+  UCHAR  ucConnObjId;               // Connector Object Id defined in ObjectId.h
+  UCHAR  ucDigMode;                 // indicate DIG mode
+  union{
+  ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR  ucDigEncoderSel;           // indicate DIG front end encoder 
+  UCHAR  ucDPLaneSet;
+  UCHAR  ucReserved;
+  UCHAR  ucReserved1;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
+
+//ucPhyId
+#define ATOM_PHY_ID_UNIPHYA                                 0  
+#define ATOM_PHY_ID_UNIPHYB                                 1
+#define ATOM_PHY_ID_UNIPHYC                                 2
+#define ATOM_PHY_ID_UNIPHYD                                 3
+#define ATOM_PHY_ID_UNIPHYE                                 4
+#define ATOM_PHY_ID_UNIPHYF                                 5
+#define ATOM_PHY_ID_UNIPHYG                                 6
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V5__DIGA_SEL                       0x01
+#define ATOM_TRANMSITTER_V5__DIGB_SEL                       0x02
+#define ATOM_TRANMSITTER_V5__DIGC_SEL                       0x04
+#define ATOM_TRANMSITTER_V5__DIGD_SEL                       0x08
+#define ATOM_TRANMSITTER_V5__DIGE_SEL                       0x10
+#define ATOM_TRANMSITTER_V5__DIGF_SEL                       0x20
+#define ATOM_TRANMSITTER_V5__DIGG_SEL                       0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP                      0
+#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS                    1
+#define ATOM_TRANSMITTER_DIGMODE_V5_DVI                     2
+#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI                    3
+#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO                    4
+#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST                  5
+
+// ucDPLaneSet
+#define DP_LANE_SET__0DB_0_4V                               0x00
+#define DP_LANE_SET__0DB_0_6V                               0x01
+#define DP_LANE_SET__0DB_0_8V                               0x02
+#define DP_LANE_SET__0DB_1_2V                               0x03
+#define DP_LANE_SET__3_5DB_0_4V                             0x08  
+#define DP_LANE_SET__3_5DB_0_6V                             0x09
+#define DP_LANE_SET__3_5DB_0_8V                             0x0a
+#define DP_LANE_SET__6DB_0_4V                               0x10
+#define DP_LANE_SET__6DB_0_6V                               0x11
+#define DP_LANE_SET__9_5DB_0_4V                             0x18  
+
+// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
+// Bit1
+#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT				          0x02
+
+// Bit3:2
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 	        0x0c
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT		    0x02
+
+#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL		                0x04
+#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL		                0x08   
+#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT           0x0c
+// Bit6:4
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK		          0x70
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT		      0x04
+
+#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL				        0x00
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL				          0x10
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL				          0x20
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL				          0x30
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL				          0x40
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL				          0x50
+#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL				          0x60
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5            DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+
+
+/****************************************************************************/	
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucAction;          // 
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;        
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP              0x14
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ		  0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK		    0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1		            0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2		            0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3		            0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
+/****************************************************************************/	
+// Structures used by DAC1OuputControlTable
+//                    DAC2OuputControlTable
+//                    LVTMAOutputControlTable  (Before DEC30)
+//                    TMDSAOutputControlTable  (Before DEC30)
+/****************************************************************************/	
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+  UCHAR  ucAction;                    // Possible input:ATOM_ENABLE||ATOMDISABLE
+                                      // When the display is LCD, in addition to above:
+                                      // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+                                      // ATOM_LCD_SELFTEST_STOP
+                                      
+  UCHAR  aucPadding[3];               // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+
+#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CRT2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS 
+#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define CV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define TV1_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define TV1_OUTPUT_CONTROL_PS_ALLOCATION   DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DFP2_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define LCD1_OUTPUT_CONTROL_PARAMETERS     DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION  DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define DVO_OUTPUT_CONTROL_PARAMETERS      DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define DVO_OUTPUT_CONTROL_PS_ALLOCATION   DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
+#define DVO_OUTPUT_CONTROL_PARAMETERS_V3	 DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by BlankCRTCTable
+/****************************************************************************/	
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+  UCHAR  ucCRTC;                    	// ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucBlanking;                  // ATOM_BLANKING or ATOM_BLANKINGOFF
+  USHORT usBlackColorRCr;
+  USHORT usBlackColorGY;
+  USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
+#define BLANK_CRTC_PS_ALLOCATION    BLANK_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by EnableCRTCTable
+//                    EnableCRTCMemReqTable
+//                    UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/	
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEnable;                     // ATOM_ENABLE or ATOM_DISABLE 
+  UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
+#define ENABLE_CRTC_PS_ALLOCATION   ENABLE_CRTC_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+  USHORT usOverscanRight;             // right
+  USHORT usOverscanLeft;              // left
+  USHORT usOverscanBottom;            // bottom
+  USHORT usOverscanTop;               // top
+  UCHAR  ucCRTC;                      // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR  ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
+#define SET_CRTC_OVERSCAN_PS_ALLOCATION  SET_CRTC_OVERSCAN_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+  UCHAR ucH_Replication;              // horizontal replication
+  UCHAR ucV_Replication;              // vertical replication
+  UCHAR usCRTC;                       // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
+#define SET_CRTC_REPLICATION_PS_ALLOCATION  SET_CRTC_REPLICATION_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/	
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucDevice;                     // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+  UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
+#define SELECT_CRTC_SOURCE_PS_ALLOCATION  SELECT_CRTC_SOURCE_PARAMETERS
+
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+  UCHAR ucCRTC;                    	  // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR ucEncoderID;                  // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+  UCHAR ucEncodeMode;									// Encoding mode, only valid when using DIG1/DIG2/DVO
+  UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID    						0x00 
+//#define ASIC_INT_TV_ENCODER_ID									0x02
+//#define ASIC_INT_DIG1_ENCODER_ID								0x03
+//#define ASIC_INT_DAC2_ENCODER_ID								0x04
+//#define ASIC_EXT_TV_ENCODER_ID									0x06
+//#define ASIC_INT_DVO_ENCODER_ID									0x07
+//#define ASIC_INT_DIG2_ENCODER_ID								0x09
+//#define ASIC_EXT_DIG_ENCODER_ID									0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP										0
+//#define ATOM_ENCODER_MODE_LVDS									1
+//#define ATOM_ENCODER_MODE_DVI										2
+//#define ATOM_ENCODER_MODE_HDMI									3
+//#define ATOM_ENCODER_MODE_SDVO									4
+//#define ATOM_ENCODER_MODE_TV										13
+//#define ATOM_ENCODER_MODE_CV										14
+//#define ATOM_ENCODER_MODE_CRT										15
+
+/****************************************************************************/	
+// Structures used by SetPixelClockTable
+//                    GetPixelClockTable 
+/****************************************************************************/	
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
+#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
+#define MISC_DEVICE_INDEX_MASK        0xF0
+#define MISC_DEVICE_INDEX_SHIFT       4
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucRefDivSrc;                 // ATOM_PJITTER or ATO_NONPJITTER
+  UCHAR  ucCRTC;                      // Which CRTC uses this Ppll
+  UCHAR  ucMiscInfo;                  // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV										13
+//ATOM_ENCODER_MODE_CV										14
+//ATOM_ENCODER_MODE_CRT										15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+//#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+//ucMiscInfo: also changed, see below
+#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL						0x01
+#define PIXEL_CLOCK_MISC_VGA_MODE										0x02
+#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK							0x04
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1							0x00
+#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2							0x04
+#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK			0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC                    0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
+
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+  USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+                                      // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+  USHORT usRefDiv;                    // Reference divider
+  USHORT usFbDiv;                     // feedback divider
+  UCHAR  ucPostDiv;                   // post divider	
+  UCHAR  ucFracFbDiv;                 // fractional feedback divider
+  UCHAR  ucPpll;                      // ATOM_PPLL1 or ATOM_PPL2
+  UCHAR  ucTransmitterId;             // graphic encoder id defined in objectId.h
+	union
+	{
+  UCHAR  ucEncoderMode;               // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+	UCHAR  ucDVOConfig;									// when use DVO, need to know SDR/DDR, 12bit or 24bit
+	};
+  UCHAR  ucMiscInfo;                  // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+                                      // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+                                      // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
+
+#define PIXEL_CLOCK_PARAMETERS_LAST			PIXEL_CLOCK_PARAMETERS_V2
+#define GET_PIXEL_CLOCK_PS_ALLOCATION		PIXEL_CLOCK_PARAMETERS_LAST
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+  UCHAR  ucCRTC;             // ATOM_CRTC1~6, indicate the CRTC controller to 
+                             // drive the pixel clock. not used for DCPLL case.
+  union{
+  UCHAR  ucReserved;
+  UCHAR  ucFracFbDiv;        // [gphan] temporary to prevent build problem.  remove it after driver code is changed.
+  };
+  USHORT usPixelClock;       // target the pixel clock to drive the CRTC timing
+                             // 0 means disable PPLL/DCPLL. 
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP              0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
+
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined                                            
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+  PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+  UCHAR  ucStatus;
+  UCHAR  ucRefDivSrc;                 // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+  UCHAR  ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+  PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/	
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/	
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
+	USHORT usPixelClock;
+	UCHAR ucTransmitterID;
+	UCHAR ucEncodeMode;
+	union
+	{
+		UCHAR ucDVOConfig;									//if DVO, need passing link rate and output 12bitlow or 24bit
+		UCHAR ucConfig;											//if none DVO, not defined yet
+	};
+	UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_PARAMETERS;
+
+#define ADJUST_DISPLAY_CONFIG_SS_ENABLE       0x10
+#define ADJUST_DISPLAY_PLL_PS_ALLOCATION			ADJUST_DISPLAY_PLL_PARAMETERS
+
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+	USHORT usPixelClock;                    // target pixel clock
+	UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
+	UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+  UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+  UCHAR ucExtTransmitterID;               // external encoder id.
+	UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL                0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED               0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED               0x0001     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL              0x000c     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT                0x0000     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT              0x0004     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT                   0x0008     // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE                   0x0010     // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE               0x0020     // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK                   0x0040     // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+  ULONG ulDispPllFreq;                 // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+  UCHAR ucRefDiv;                      // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+  UCHAR ucPostDiv;                     // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+  UCHAR ucReserved[2];  
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+  union 
+  {
+    ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3  sInput;
+    ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+  };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/	
+// Structures used by EnableYUVTable
+/****************************************************************************/	
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+  UCHAR ucEnable;                     // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+  UCHAR ucCRTC;                       // Which CRTC needs this YUV or RGB format
+  UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
+#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetMemoryClockTable
+/****************************************************************************/	
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+  ULONG ulReturnMemoryClock;          // current memory speed in 10KHz unit
+} GET_MEMORY_CLOCK_PARAMETERS;
+#define GET_MEMORY_CLOCK_PS_ALLOCATION  GET_MEMORY_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Structures used by GetEngineClockTable
+/****************************************************************************/	
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+  ULONG ulReturnEngineClock;          // current engine speed in 10KHz unit
+} GET_ENGINE_CLOCK_PARAMETERS;
+#define GET_ENGINE_CLOCK_PS_ALLOCATION  GET_ENGINE_CLOCK_PARAMETERS
+
+/****************************************************************************/	
+// Following Structures and constant may be obsolete
+/****************************************************************************/	
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
+  USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
+                                //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
+  UCHAR     ucSlaveAddr;        //Read from which slave
+  UCHAR     ucLineNumber;       //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION  READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
+
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE                  0
+#define  ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES              1
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK       2
+#define  ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK  3
+#define  ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK       4
+
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  USHORT    usByteOffset;       //Write to which byte
+                                //Upper portion of usByteOffset is Format of data 
+                                //1bytePS+offsetPS
+                                //2bytesPS+offsetPS
+                                //blockID+offsetPS
+                                //blockID+offsetID
+                                //blockID+counterID+offsetID
+  UCHAR     ucData;             //PS data1
+  UCHAR     ucStatus;           //Status byte 1=success, 2=failure, Also is used as PS data2
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+
+#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION  WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+  USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
+  UCHAR     ucSlaveAddr;        //Write to which slave
+  UCHAR     ucLineNumber;       //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
+
+/**************************************************************************/
+#define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+/****************************************************************************/	
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/	
+typedef struct	_POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucPwrBehaviorId;							
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{                               
+  UCHAR   ucPowerConnectorStatus;      //Used for return value 0: detected, 1:not detected
+	UCHAR   ucReserved;
+	USHORT	usPwrBudget;								 //how much power currently boot to in unit of watt
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+
+/****************************LVDS SS Command Table Definitions**********************/
+
+/****************************************************************************/	
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/	
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct	_ENABLE_LVDS_SS_PARAMETERS_V2
+{
+  USHORT  usSpreadSpectrumPercentage;       
+  UCHAR   ucSpreadSpectrumType;           //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       //ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct	_ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;           // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+  UCHAR   ucSpreadSpectrumStep;           //
+  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR   ucSpreadSpectrumDelay;
+  UCHAR   ucSpreadSpectrumRange;
+  UCHAR   ucPpll;												  // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+  USHORT  usSpreadSpectrumPercentage;
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
+
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+    
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_TYPE_V3_P0PLL            ATOM_PPLL_SS_TYPE_V3_DCPLL
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
+#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+/**************************************************************************/
+
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+  PIXEL_CLOCK_PARAMETERS sPCLKInput;
+  ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion 
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
+
+#define ENABLE_VGA_RENDER_PS_ALLOCATION   SET_PIXEL_CLOCK_PS_ALLOCATION
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct	_MEMORY_TRAINING_PARAMETERS
+{
+  ULONG ulTargetMemoryClock;          //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
+#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
+
+/****************************LVDS and other encoder command table definitions **********************/
+
+
+/****************************************************************************/	
+// Structures used by LVDSEncoderControlTable   (Before DCE30)
+//                    LVTMAEncoderControlTable  (Before DCE30)
+//                    TMDSAEncoderControlTable  (Before DCE30)
+/****************************************************************************/	
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // bit0=0: Enable single link
+                        //     =1: Enable dual link
+                        // Bit1=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION  LVDS_ENCODER_CONTROL_PARAMETERS
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS    LVDS_ENCODER_CONTROL_PARAMETERS
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS    TMDS1_ENCODER_CONTROL_PARAMETERS
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
+
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+  USHORT usPixelClock;  // in 10KHz; for bios convenient
+  UCHAR  ucMisc;        // see PANEL_ENCODER_MISC_xx defintions below
+  UCHAR  ucAction;      // 0: turn off encoder
+                        // 1: setup and turn on encoder
+  UCHAR  ucTruncate;    // bit0=0: Disable truncate
+                        //     =1: Enable truncate
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucSpatial;     // bit0=0: Disable spatial dithering
+                        //     =1: Enable spatial dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+  UCHAR  ucTemporal;    // bit0=0: Disable temporal dithering
+                        //     =1: Enable temporal dithering
+                        // bit4=0: 666RGB
+                        //     =1: 888RGB
+                        // bit5=0: Gray level 2
+                        //     =1: Gray level 4
+  UCHAR  ucFRC;         // bit4=0: 25FRC_SEL pattern E
+                        //     =1: 25FRC_SEL pattern F
+                        // bit6:5=0: 50FRC_SEL pattern A
+                        //       =1: 50FRC_SEL pattern B
+                        //       =2: 50FRC_SEL pattern C
+                        //       =3: 50FRC_SEL pattern D
+                        // bit7=0: 75FRC_SEL pattern E
+                        //     =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+   
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2    LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+  
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2    TMDS1_ENCODER_CONTROL_PARAMETERS_V2
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
+
+#define LVDS_ENCODER_CONTROL_PARAMETERS_V3     LVDS_ENCODER_CONTROL_PARAMETERS_V2
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3  LVDS_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
+
+/****************************************************************************/	
+// Structures used by ###
+/****************************************************************************/	
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{                               
+  UCHAR    ucEnable;            // Enable or Disable External TMDS encoder
+  UCHAR    ucMisc;              // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+  UCHAR    ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION   sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+
+#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2  LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{                               
+  ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2    sXTmdsEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+  DIG_ENCODER_CONTROL_PARAMETERS            sDigEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+
+/****************************************************************************/	
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/	
+//ucTableFormatRevision=1,ucTableContentRevision=3
+
+//ucDVOConfig:
+#define DVO_ENCODER_CONFIG_RATE_SEL							0x01
+#define DVO_ENCODER_CONFIG_DDR_SPEED						0x00
+#define DVO_ENCODER_CONFIG_SDR_SPEED						0x01
+#define DVO_ENCODER_CONFIG_OUTPUT_SEL						0x0c
+#define DVO_ENCODER_CONFIG_LOW12BIT							0x00
+#define DVO_ENCODER_CONFIG_UPPER12BIT						0x04
+#define DVO_ENCODER_CONFIG_24BIT								0x08
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+  USHORT usPixelClock; 
+  UCHAR  ucDVOConfig;
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  UCHAR  ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3	DVO_ENCODER_CONTROL_PARAMETERS_V3
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for 
+// bit1=0: non-coherent mode
+//     =1: coherent mode
+
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
+#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST     LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST  LVDS_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST    LVDS_ENCODER_CONTROL_PARAMETERS_V3
+#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
+
+#define DVO_ENCODER_CONTROL_PARAMETERS_LAST      DVO_ENCODER_CONTROL_PARAMETERS
+#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST   DVO_ENCODER_CONTROL_PS_ALLOCATION
+
+//==========================================================================================
+#define PANEL_ENCODER_MISC_DUAL                0x01
+#define PANEL_ENCODER_MISC_COHERENT            0x02
+#define	PANEL_ENCODER_MISC_TMDS_LINKB					 0x04
+#define	PANEL_ENCODER_MISC_HDMI_TYPE					 0x08
+
+#define PANEL_ENCODER_ACTION_DISABLE           ATOM_DISABLE
+#define PANEL_ENCODER_ACTION_ENABLE            ATOM_ENABLE
+#define PANEL_ENCODER_ACTION_COHERENTSEQ       (ATOM_ENABLE+1)
+
+#define PANEL_ENCODER_TRUNCATE_EN              0x01
+#define PANEL_ENCODER_TRUNCATE_DEPTH           0x10
+#define PANEL_ENCODER_SPATIAL_DITHER_EN        0x01
+#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH     0x10
+#define PANEL_ENCODER_TEMPORAL_DITHER_EN       0x01
+#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH    0x10
+#define PANEL_ENCODER_TEMPORAL_LEVEL_4         0x20
+#define PANEL_ENCODER_25FRC_MASK               0x10
+#define PANEL_ENCODER_25FRC_E                  0x00
+#define PANEL_ENCODER_25FRC_F                  0x10
+#define PANEL_ENCODER_50FRC_MASK               0x60
+#define PANEL_ENCODER_50FRC_A                  0x00
+#define PANEL_ENCODER_50FRC_B                  0x20
+#define PANEL_ENCODER_50FRC_C                  0x40
+#define PANEL_ENCODER_50FRC_D                  0x60
+#define PANEL_ENCODER_75FRC_MASK               0x80
+#define PANEL_ENCODER_75FRC_E                  0x00
+#define PANEL_ENCODER_75FRC_F                  0x80
+
+/****************************************************************************/	
+// Structures used by SetVoltageTable
+/****************************************************************************/	
+#define SET_VOLTAGE_TYPE_ASIC_VDDC             1
+#define SET_VOLTAGE_TYPE_ASIC_MVDDC            2
+#define SET_VOLTAGE_TYPE_ASIC_MVDDQ            3
+#define SET_VOLTAGE_TYPE_ASIC_VDDCI            4
+#define SET_VOLTAGE_INIT_MODE                  5
+#define SET_VOLTAGE_GET_MAX_VOLTAGE            6					//Gets the Max. voltage for the soldered Asic
+
+#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE       0x1
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_A         0x2
+#define SET_ASIC_VOLTAGE_MODE_SOURCE_B         0x4
+
+#define	SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE      0x0
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL      0x1	
+#define	SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK     0x2
+
+typedef struct	_SET_VOLTAGE_PARAMETERS
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // To set all, to set source A or source B or ...
+  UCHAR    ucVoltageIndex;              // An index to tell which voltage level
+  UCHAR    ucReserved;          
+}SET_VOLTAGE_PARAMETERS;
+
+typedef struct	_SET_VOLTAGE_PARAMETERS_V2
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+  UCHAR    ucVoltageMode;               // Not used, maybe use for state machine for differen power mode
+  USHORT   usVoltageLevel;              // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
+
+
+typedef struct	_SET_VOLTAGE_PARAMETERS_V1_3
+{
+  UCHAR    ucVoltageType;               // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Indicate action: Set voltage level
+  USHORT   usVoltageLevel;              // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
+}SET_VOLTAGE_PARAMETERS_V1_3;
+
+//ucVoltageType
+#define VOLTAGE_TYPE_VDDC                    1
+#define VOLTAGE_TYPE_MVDDC                   2
+#define VOLTAGE_TYPE_MVDDQ                   3
+#define VOLTAGE_TYPE_VDDCI                   4
+
+//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
+#define ATOM_SET_VOLTAGE                     0        //Set voltage Level
+#define ATOM_INIT_VOLTAGE_REGULATOR          3        //Init Regulator
+#define ATOM_SET_VOLTAGE_PHASE               4        //Set Vregulator Phase
+#define ATOM_GET_MAX_VOLTAGE                 6        //Get Max Voltage, not used in SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL               6        //Get Voltage level from vitual voltage ID
+
+// define vitual voltage id in usVoltageLevel
+#define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
+#define ATOM_VIRTUAL_VOLTAGE_ID1             0xff02
+#define ATOM_VIRTUAL_VOLTAGE_ID2             0xff03
+#define ATOM_VIRTUAL_VOLTAGE_ID3             0xff04
+
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+  SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+// New Added from SI for GetVoltageInfoTable, input parameter structure
+typedef struct  _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
+{
+  UCHAR    ucVoltageType;               // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+  UCHAR    ucVoltageMode;               // Input: Indicate action: Get voltage info
+  USHORT   usVoltageLevel;              // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id 
+  ULONG    ulReserved;
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
+typedef struct  _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  ULONG    ulVotlageGpioState;
+  ULONG    ulVoltageGPioMask;
+}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
+typedef struct  _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
+{
+  USHORT   usVoltageLevel;
+  USHORT   usVoltageId;                                  // Voltage Id programmed in Voltage Regulator
+  ULONG    ulReseved;
+}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
+
+
+// GetVoltageInfo v1.1 ucVoltageMode
+#define	ATOM_GET_VOLTAGE_VID                0x00
+#define ATOM_GET_VOTLAGE_INIT_SEQ           0x03
+#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID   0x04
+// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
+#define	ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+
+// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
+#define	ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
+// undefined power state
+#define	ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
+#define	ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
+
+/****************************************************************************/	
+// Structures used by TVEncoderControlTable
+/****************************************************************************/	
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock;                // in 10KHz; for bios convenient
+  UCHAR  ucTvStandard;                // See definition "ATOM_TV_NTSC ..."
+  UCHAR  ucAction;                    // 0: turn off encoder
+                                      // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+  TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;          
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION    sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
+
+//==============================Data Table Portion====================================
+
+/****************************************************************************/	
+// Structure used in Data.mtb
+/****************************************************************************/	
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+  USHORT        UtilityPipeLine;	        // Offest for the utility to get parser info,Don't change this position!
+  USHORT        MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios 
+  USHORT        MultimediaConfigInfo;     // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+  USHORT        StandardVESA_Timing;      // Only used by Bios
+  USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
+  USHORT        PaletteData;              // Only used by BIOS
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
+  USHORT        DIGTransmitterInfo;       // Internal used by VBIOS only version 3.1
+  USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
+  USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
+  USHORT        GPIO_I2C_Info;            // Shared by various SW components,latest version 1.2 will be used from R600           
+  USHORT        VRAM_UsageByFirmware;     // Shared by various SW components,latest version 1.3 will be used from R600
+  USHORT        GPIO_Pin_LUT;             // Shared by various SW components,latest version 1.1
+  USHORT        VESA_ToInternalModeLUT;   // Only used by Bios
+  USHORT        ComponentVideoInfo;       // Shared by various SW components,latest version 2.1 will be used from R600
+  USHORT        PowerPlayInfo;            // Shared by various SW components,latest version 2.1,new design from R600
+  USHORT        CompassionateData;        // Will be obsolete from R600
+  USHORT        SaveRestoreInfo;          // Only used by Bios
+  USHORT        PPLL_SS_Info;             // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+  USHORT        OemInfo;                  // Defined and used by external SW, should be obsolete soon
+  USHORT        XTMDS_Info;               // Will be obsolete from R600
+  USHORT        MclkSS_Info;              // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+  USHORT        Object_Header;            // Shared by various SW components,latest version 1.1
+  USHORT        IndirectIOAccess;         // Only used by Bios,this table position can't change at all!!
+  USHORT        MC_InitParameter;         // Only used by command table
+  USHORT        ASIC_VDDC_Info;						// Will be obsolete from R600
+  USHORT        ASIC_InternalSS_Info;			// New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+  USHORT        TV_VideoMode;							// Only used by command table
+  USHORT        VRAM_Info;								// Only used by command table, latest version 1.3
+  USHORT        MemoryTrainingInfo;				// Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+  USHORT        IntegratedSystemInfo;			// Shared by various SW components
+  USHORT        ASIC_ProfilingInfo;				// New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+  USHORT        VoltageObjectInfo;				// Shared by various SW components, latest version 1.1
+	USHORT				PowerSourceInfo;					// Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+#define DAC_Info                 PaletteData
+#define TMDS_Info                DIGTransmitterInfo
+
+/****************************************************************************/	
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ULONG                    ulSignature;      // HW info table signature string "$ATI"
+  UCHAR                    ucI2C_Type;       // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+  UCHAR                    ucTV_OutInfo;     // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+  UCHAR                    ucVideoPortInfo;  // Provides the video port capabilities
+  UCHAR                    ucHostPortInfo;   // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/	
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  ULONG                    ulSignature;      // MM info table signature sting "$MMT"
+  UCHAR                    ucTunerInfo;      // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+  UCHAR                    ucAudioChipInfo;  // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+  UCHAR                    ucProductID;      // Defines as OEM ID or ATI board ID dependent on product type setting
+  UCHAR                    ucMiscInfo1;      // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+  UCHAR                    ucMiscInfo2;      // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+  UCHAR                    ucMiscInfo3;      // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+  UCHAR                    ucMiscInfo4;      // Video Decoder Host Config (2:0) reserved (7:3)
+  UCHAR                    ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+  UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+
+/****************************************************************************/	
+// Structures used in FirmwareInfoTable
+/****************************************************************************/	
+
+// usBIOSCapability Definition:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; 
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; 
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; 
+// Others: Reserved
+#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED         0x0001
+#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT            0x0002
+#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT     0x0004
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT      0x0008		// (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. 
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT      0x0010		// (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. 
+#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU         0x0020
+#define ATOM_BIOS_INFO_WMI_SUPPORT                  0x0040
+#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM   0x0080
+#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT          0x0100
+#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK        0x1E00
+#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
+#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE  0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT  0x0008		// (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT  0x0010		// (valid from v2.1 ): =1: engclk ss enable with external ss chip
+
+#ifndef _H2INC
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:1;
+  USHORT SCL2Redefined:1;
+  USHORT PostWithoutModeSet:1;
+  USHORT HyperMemory_Size:4;
+  USHORT HyperMemory_Support:1;
+  USHORT PPMode_Assigned:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT GPUControlsBL:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT DualCRTC_Support:1;
+  USHORT FirmwarePosted:1;
+#else
+  USHORT FirmwarePosted:1;
+  USHORT DualCRTC_Support:1;
+  USHORT ExtendedDesktopSupport:1;
+  USHORT MemoryClockSS_Support:1;
+  USHORT EngineClockSS_Support:1;
+  USHORT GPUControlsBL:1;
+  USHORT WMI_SUPPORT:1;
+  USHORT PPMode_Assigned:1;
+  USHORT HyperMemory_Support:1;
+  USHORT HyperMemory_Size:4;
+  USHORT PostWithoutModeSet:1;
+  USHORT SCL2Redefined:1;
+  USHORT Reserved:1;
+#endif
+}ATOM_FIRMWARE_CAPABILITY;
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  ATOM_FIRMWARE_CAPABILITY sbfAccess;
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#else
+
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+  USHORT                   susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
+
+#endif
+
+typedef struct _ATOM_FIRMWARE_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucPadding[3];               //Don't use them
+  ULONG                           aulReservedForBIOS[3];      //Don't use them
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit, the definitions above can't change!!!
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS[2];      //Don't use them
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  UCHAR                           ucPadding[2];               //Don't use them
+  ULONG                           aulReservedForBIOS;         //Don't use them
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulDriverTargetEngineClock;  //In 10Khz unit
+  ULONG                           ulDriverTargetMemoryClock;  //In 10Khz unit
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulASICMaxEngineClock;       //In 10Khz unit
+  ULONG                           ulASICMaxMemoryClock;       //In 10Khz unit
+  UCHAR                           ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ul3DAccelerationEngineClock;//In 10Khz unit
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usReferenceClock;           //In 10Khz unit	
+  USHORT                          usPM_RTS_Location;          //RTS PM4 starting location in ROM in 1Kb unit 
+  UCHAR                           ucPM_RTS_StreamSize;        //RTS PM4 packets in Kb unit
+  UCHAR                           ucDesign_ID;                //Indicate what is the board design
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved1;
+  ULONG                           ulReserved2;
+  ULONG                           ulMaxEngineClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxMemoryClockPLL_Output; //In 10Khz unit
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit
+  UCHAR                           ucReserved1;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxEngineClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinEngineClockPLL_Output; //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMaxMemoryClockPLL_Input;  //In 10Khz unit
+  USHORT                          usMinMemoryClockPLL_Output; //In 10Khz unit
+  USHORT                          usMaxPixelClock;            //In 10Khz unit, Max.  Pclk
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMinPixelClockPLL_Output;  //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved[2];
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  UCHAR                           ucRemoteDisplayConfig;
+  UCHAR                           ucReserved5[3];             //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved9[3];
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usReserved12;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
+
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
+
+
+// definition of ucRemoteDisplayConfig
+#define REMOTE_DISPLAY_DISABLE                   0x00
+#define REMOTE_DISPLAY_ENABLE                    0x01
+
+/****************************************************************************/	
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/	
+#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN      0x2
+#define IGP_CAP_FLAG_AC_CARD               0x4
+#define IGP_CAP_FLAG_SDVO_CARD             0x8
+#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE     0x10
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG	                          ulBootUpEngineClock;		    //in 10kHz unit
+  ULONG	                          ulBootUpMemoryClock;		    //in 10kHz unit
+  ULONG	                          ulMaxSystemMemoryClock;	    //in 10kHz unit
+  ULONG	                          ulMinSystemMemoryClock;	    //in 10kHz unit
+  UCHAR                           ucNumberOfCyclesInPeriodHi;
+  UCHAR                           ucLCDTimingSel;             //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+  USHORT                          usReserved1;
+  USHORT                          usInterNBVoltageLow;        //An intermidiate PMW value to set the voltage 
+  USHORT                          usInterNBVoltageHigh;       //Another intermidiate PMW value to set the voltage 
+  ULONG	                          ulReserved[2];
+
+  USHORT	                        usFSBClock;			            //In MHz unit
+  USHORT                          usCapabilityFlag;		        //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+																                              //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+                                                              //Bit[4]==1: P/2 mode, ==0: P/1 mode
+  USHORT	                        usPCIENBCfgReg7;				    //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+  USHORT	                        usK8MemoryClock;            //in MHz unit
+  USHORT	                        usK8SyncStartDelay;         //in 0.01 us unit
+  USHORT	                        usK8DataReturnTime;         //in 0.01 us unit
+  UCHAR                           ucMaxNBVoltage;
+  UCHAR                           ucMinNBVoltage;
+  UCHAR                           ucMemoryType;					      //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+  UCHAR                           ucNumberOfCyclesInPeriod;		//CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod 
+  UCHAR                           ucStartingPWM_HighTime;     //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+  UCHAR                           ucHTLinkWidth;              //16 bit vs. 8 bit
+  UCHAR                           ucMaxNBVoltageHigh;    
+  UCHAR                           ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
+
+/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
+ulBootUpMemoryClock:    For Intel IGP,it's the UMA system memory clock 
+                        For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
+ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+                        For AMD IGP,for now this can be 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 
+                        For AMD IGP,for now this can be 0
+
+usFSBClock:             For Intel IGP,it's FSB Freq 
+                        For AMD IGP,it's HT Link Speed
+
+usK8MemoryClock:        For AMD IGP only. For RevF CPU, set it to 200
+usK8SyncStartDelay:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+usK8DataReturnTime:     For AMD IGP only. Memory access latency in K8, required for watermark calculation
+
+VC:Voltage Control
+ucMaxNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltage:         Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+ucNumberOfCyclesInPeriod:   Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. 
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 
+
+ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
+ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
+
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+*/
+
+
+/*
+The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. 
+The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
+
+SW components can access the IGP system infor structure in the same way as before
+*/
+
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG			     ulReserved1[2];            //must be 0x0 for the reserved
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG	                     ulBootUpSidePortClock;     //in 10kHz unit
+  ULONG	                     ulMinSidePortClock;        //in 10kHz unit
+  ULONG			     ulReserved2[6];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //see explanation below
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulDDISlot1Config;
+  ULONG                      ulDDISlot2Config;
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  UCHAR                      ucDockingPinBit;
+  UCHAR                      ucDockingPinPolarity;
+  ULONG                      ulDockingPinCFGInfo;
+  ULONG                      ulCPUCapInfo;
+  USHORT                     usNumberOfCyclesInPeriod;
+  USHORT                     usMaxNBVoltage;
+  USHORT                     usMinNBVoltage;
+  USHORT                     usBootUpNBVoltage;
+  ULONG                      ulHTLinkFreq;              //in 10Khz
+  USHORT                     usMinHTLinkWidth;
+  USHORT                     usMaxHTLinkWidth;
+  USHORT                     usUMASyncStartDelay;
+  USHORT                     usUMADataReturnTime;
+  USHORT                     usLinkStatusZeroTime;
+  USHORT                     usDACEfuse;				//for storing badgap value (for RS880 only)
+  ULONG                      ulHighVoltageHTLinkFreq;     // in 10Khz
+  ULONG                      ulLowVoltageHTLinkFreq;      // in 10Khz
+  USHORT                     usMaxUpStreamHTLinkWidth;
+  USHORT                     usMaxDownStreamHTLinkWidth;
+  USHORT                     usMinUpStreamHTLinkWidth;
+  USHORT                     usMinDownStreamHTLinkWidth;
+  USHORT                     usFirmwareVersion;         //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+  USHORT                     usFullT0Time;             // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+  ULONG                      ulReserved3[96];          //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;   
+
+/*
+ulBootUpEngineClock:   Boot-up Engine Clock in 10Khz;
+ulBootUpUMAClock:      Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
+ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
+
+ulSystemConfig:  
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; 
+Bit[1]=1: system boots up at AMD overdrived state or user customized  mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
+      =0: system boots up at driver control state. Power state depends on PowerPlay table.
+Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
+Bit[3]=1: Only one power state(Performance) will be supported.
+      =0: Multiple power states supported from PowerPlay table.
+Bit[4]=1: CLMC is supported and enabled on current system. 
+      =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.  
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.  
+      =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
+Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
+      =0: Voltage settings is determined by powerplay table.
+Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
+      =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+      =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+      =0: DLL Shut Down feature is not enabled or supported on current system.
+
+ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
+
+ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
+			              [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
+
+ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
+      [3:0]  - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
+			[7:4]  - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+      When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+      in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+      one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+			[15:8] - Lane configuration attribute; 
+      [23:16]- Connector type, possible value:
+               CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
+               CONNECTOR_OBJECT_ID_HDMI_TYPE_A
+               CONNECTOR_OBJECT_ID_DISPLAYPORT
+               CONNECTOR_OBJECT_ID_eDP
+			[31:24]- Reserved
+
+ulDDISlot2Config: Same as Slot1.
+ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
+For IGP, Hypermemory is the only memory type showed in CCC.
+
+ucUMAChannelNumber:  how many channels for the UMA;
+
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin 
+ucDockingPinBit:     which bit in this register to read the pin status;
+ucDockingPinPolarity:Polarity of the pin when docked;
+
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
+
+usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. 
+usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
+                    GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
+                    PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
+                    GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
+usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
+
+ulHTLinkFreq:       Bootup HT link Frequency in 10Khz.
+usMinHTLinkWidth:   Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth:   Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. 
+                    If CDLW enabled, both upstream and downstream width should be the same during bootup.  
+
+usUMASyncStartDelay: Memory access latency, required for watermark calculation 
+usUMADataReturnTime: Memory access latency, required for watermark calculation
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us 
+for Griffin or Greyhound. SBIOS needs to convert to actual time by:
+                     if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
+                     if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
+                     if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
+                     if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
+
+ulHighVoltageHTLinkFreq:     HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHTLinkFreq(bootup frequency). 
+ulLowVoltageHTLinkFreq:      HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
+                             This must be less than or equal to ulHighVoltageHTLinkFreq.
+
+usMaxUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
+usMaxDownStreamHTLinkWidth:  same as above.
+usMinUpStreamHTLinkWidth:    Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
+usMinDownStreamHTLinkWidth:  same as above.
+*/
+
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI         5
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI    // this deff reflects max defined CPU code
+
+#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
+#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE                  0x00000004 
+#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY         0x00000008
+#define SYSTEM_CONFIG_CLMC_ENABLED                        0x00000010
+#define SYSTEM_CONFIG_CDLW_ENABLED                        0x00000020
+#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED              0x00000040
+#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED            0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED                        0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED                0x00000200
+
+#define IGP_DDI_SLOT_LANE_CONFIG_MASK                     0x000000FF
+
+#define b0IGP_DDI_SLOT_LANE_MAP_MASK                      0x0F
+#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK              0xF0
+#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3                    0x01
+#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7                    0x02
+#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11                   0x04
+#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15                  0x08
+
+#define IGP_DDI_SLOT_ATTRIBUTE_MASK                       0x0000FF00
+#define IGP_DDI_SLOT_CONFIG_REVERSED                      0x00000100
+#define b1IGP_DDI_SLOT_CONFIG_REVERSED                    0x01
+
+#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK                  0x00FF0000
+
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG	                     ulBootUpEngineClock;       //in 10kHz unit
+  ULONG                      ulDentistVCOFreq;          //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. 
+  ULONG                      ulLClockFreq;              //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+  ULONG	                     ulBootUpUMAClock;          //in 10kHz unit
+  ULONG                      ulReserved1[8];            //must be 0x0 for the reserved
+  ULONG                      ulBootUpReqDisplayVector;
+  ULONG                      ulOtherDisplayMisc;
+  ULONG                      ulReserved2[4];            //must be 0x0 for the reserved
+  ULONG                      ulSystemConfig;            //TBD
+  ULONG                      ulCPUCapInfo;              //TBD
+  USHORT                     usMaxNBVoltage;            //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usMinNBVoltage;            //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+  USHORT                     usBootUpNBVoltage;         //boot up NB voltage
+  UCHAR                      ucHtcTmpLmt;               //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+  UCHAR                      ucTjOffset;                //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+  ULONG                      ulReserved3[4];            //must be 0x0 for the reserved
+  ULONG                      ulDDISlot1Config;          //see above ulDDISlot1Config definition
+  ULONG                      ulDDISlot2Config;
+  ULONG                      ulDDISlot3Config;
+  ULONG                      ulDDISlot4Config;
+  ULONG                      ulReserved4[4];            //must be 0x0 for the reserved
+  UCHAR                      ucMemoryType;              //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+  UCHAR                      ucUMAChannelNumber;
+  USHORT                     usReserved;
+  ULONG                      ulReserved5[4];            //must be 0x0 for the reserved
+  ULONG                      ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+  ULONG                      ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+  ULONG                      ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+  ULONG                      ulReserved6[61];           //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;   
+
+#define ATOM_CRT_INT_ENCODER1_INDEX                       0x00000000
+#define ATOM_LCD_INT_ENCODER1_INDEX                       0x00000001
+#define ATOM_TV_INT_ENCODER1_INDEX                        0x00000002
+#define ATOM_DFP_INT_ENCODER1_INDEX                       0x00000003
+#define ATOM_CRT_INT_ENCODER2_INDEX                       0x00000004
+#define ATOM_LCD_EXT_ENCODER1_INDEX                       0x00000005
+#define ATOM_TV_EXT_ENCODER1_INDEX                        0x00000006
+#define ATOM_DFP_EXT_ENCODER1_INDEX                       0x00000007
+#define ATOM_CV_INT_ENCODER1_INDEX                        0x00000008
+#define ATOM_DFP_INT_ENCODER2_INDEX                       0x00000009
+#define ATOM_CRT_EXT_ENCODER1_INDEX                       0x0000000A
+#define ATOM_CV_EXT_ENCODER1_INDEX                        0x0000000B
+#define ATOM_DFP_INT_ENCODER3_INDEX                       0x0000000C
+#define ATOM_DFP_INT_ENCODER4_INDEX                       0x0000000D
+
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID    											0x00 
+#define ASIC_INT_TV_ENCODER_ID														0x02
+#define ASIC_INT_DIG1_ENCODER_ID													0x03
+#define ASIC_INT_DAC2_ENCODER_ID													0x04
+#define ASIC_EXT_TV_ENCODER_ID														0x06
+#define ASIC_INT_DVO_ENCODER_ID														0x07
+#define ASIC_INT_DIG2_ENCODER_ID													0x09
+#define ASIC_EXT_DIG_ENCODER_ID														0x05
+#define ASIC_EXT_DIG2_ENCODER_ID													0x08
+#define ASIC_INT_DIG3_ENCODER_ID													0x0a
+#define ASIC_INT_DIG4_ENCODER_ID													0x0b
+#define ASIC_INT_DIG5_ENCODER_ID													0x0c
+#define ASIC_INT_DIG6_ENCODER_ID													0x0d
+#define ASIC_INT_DIG7_ENCODER_ID													0x0e
+
+//define Encoder attribute
+#define ATOM_ANALOG_ENCODER																0
+#define ATOM_DIGITAL_ENCODER															1		
+#define ATOM_DP_ENCODER															      2		
+
+#define ATOM_ENCODER_ENUM_MASK                            0x70
+#define ATOM_ENCODER_ENUM_ID1                             0x00
+#define ATOM_ENCODER_ENUM_ID2                             0x10
+#define ATOM_ENCODER_ENUM_ID3                             0x20
+#define ATOM_ENCODER_ENUM_ID4                             0x30
+#define ATOM_ENCODER_ENUM_ID5                             0x40 
+#define ATOM_ENCODER_ENUM_ID6                             0x50
+
+#define ATOM_DEVICE_CRT1_INDEX                            0x00000000
+#define ATOM_DEVICE_LCD1_INDEX                            0x00000001
+#define ATOM_DEVICE_TV1_INDEX                             0x00000002
+#define ATOM_DEVICE_DFP1_INDEX                            0x00000003
+#define ATOM_DEVICE_CRT2_INDEX                            0x00000004
+#define ATOM_DEVICE_LCD2_INDEX                            0x00000005
+#define ATOM_DEVICE_DFP6_INDEX                            0x00000006
+#define ATOM_DEVICE_DFP2_INDEX                            0x00000007
+#define ATOM_DEVICE_CV_INDEX                              0x00000008
+#define ATOM_DEVICE_DFP3_INDEX                            0x00000009
+#define ATOM_DEVICE_DFP4_INDEX                            0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX                            0x0000000B
+
+#define ATOM_DEVICE_RESERVEDC_INDEX                       0x0000000C
+#define ATOM_DEVICE_RESERVEDD_INDEX                       0x0000000D
+#define ATOM_DEVICE_RESERVEDE_INDEX                       0x0000000E
+#define ATOM_DEVICE_RESERVEDF_INDEX                       0x0000000F
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO                    (ATOM_DEVICE_DFP3_INDEX+1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2                  ATOM_MAX_SUPPORTED_DEVICE_INFO
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3                  (ATOM_DEVICE_DFP5_INDEX + 1 )
+
+#define ATOM_MAX_SUPPORTED_DEVICE                         (ATOM_DEVICE_RESERVEDF_INDEX+1)
+
+#define ATOM_DEVICE_CRT1_SUPPORT                          (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT                          (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT                           (0x1L << ATOM_DEVICE_TV1_INDEX  )
+#define ATOM_DEVICE_DFP1_SUPPORT                          (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT                          (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT                          (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT                          (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT                          (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT                            (0x1L << ATOM_DEVICE_CV_INDEX   )
+#define ATOM_DEVICE_DFP3_SUPPORT                          (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT                          (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT                          (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT                           (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT                           (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT |  ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT                            (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT                           (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+
+#define ATOM_DEVICE_CONNECTOR_TYPE_MASK                   0x000000F0
+#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT                  0x00000004
+#define ATOM_DEVICE_CONNECTOR_VGA                         0x00000001
+#define ATOM_DEVICE_CONNECTOR_DVI_I                       0x00000002
+#define ATOM_DEVICE_CONNECTOR_DVI_D                       0x00000003
+#define ATOM_DEVICE_CONNECTOR_DVI_A                       0x00000004
+#define ATOM_DEVICE_CONNECTOR_SVIDEO                      0x00000005
+#define ATOM_DEVICE_CONNECTOR_COMPOSITE                   0x00000006
+#define ATOM_DEVICE_CONNECTOR_LVDS                        0x00000007
+#define ATOM_DEVICE_CONNECTOR_DIGI_LINK                   0x00000008
+#define ATOM_DEVICE_CONNECTOR_SCART                       0x00000009
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A                 0x0000000A
+#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B                 0x0000000B
+#define ATOM_DEVICE_CONNECTOR_CASE_1                      0x0000000E
+#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT                 0x0000000F
+
+
+#define ATOM_DEVICE_DAC_INFO_MASK                         0x0000000F
+#define ATOM_DEVICE_DAC_INFO_SHIFT                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_NODAC                        0x00000000
+#define ATOM_DEVICE_DAC_INFO_DACA                         0x00000001
+#define ATOM_DEVICE_DAC_INFO_DACB                         0x00000002
+#define ATOM_DEVICE_DAC_INFO_EXDAC                        0x00000003
+
+#define ATOM_DEVICE_I2C_ID_NOI2C                          0x00000000
+
+#define ATOM_DEVICE_I2C_LINEMUX_MASK                      0x0000000F
+#define ATOM_DEVICE_I2C_LINEMUX_SHIFT                     0x00000000
+
+#define ATOM_DEVICE_I2C_ID_MASK                           0x00000070
+#define ATOM_DEVICE_I2C_ID_SHIFT                          0x00000004
+#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE              0x00000001
+#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE                  0x00000002
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE                0x00000003    //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL                 0x00000004    //For IGP RS690
+
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK                 0x00000080
+#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT                0x00000007
+#define	ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C            0x00000000
+#define	ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C            0x00000001
+
+//  usDeviceSupport:
+//  Bits0	= 0 - no CRT1 support= 1- CRT1 is supported
+//  Bit 1	= 0 - no LCD1 support= 1- LCD1 is supported
+//  Bit 2	= 0 - no TV1  support= 1- TV1  is supported
+//  Bit 3	= 0 - no DFP1 support= 1- DFP1 is supported
+//  Bit 4	= 0 - no CRT2 support= 1- CRT2 is supported
+//  Bit 5	= 0 - no LCD2 support= 1- LCD2 is supported
+//  Bit 6	= 0 - no DFP6 support= 1- DFP6 is supported
+//  Bit 7	= 0 - no DFP2 support= 1- DFP2 is supported
+//  Bit 8	= 0 - no CV   support= 1- CV   is supported
+//  Bit 9	= 0 - no DFP3 support= 1- DFP3 is supported
+//  Bit 10      = 0 - no DFP4 support= 1- DFP4 is supported
+//  Bit 11      = 0 - no DFP5 support= 1- DFP5 is supported
+//   
+//  
+
+/****************************************************************************/
+/* Structure used in MclkSS_InfoTable                                       */
+/****************************************************************************/
+//		ucI2C_ConfigID
+//    [7:0] - I2C LINE Associate ID
+//          = 0   - no I2C
+//    [7]		-	HW_Cap        =	1,  [6:0]=HW assisted I2C ID(HW line selection)
+//                          =	0,  [6:0]=SW assisted I2C ID
+//    [6-4]	- HW_ENGINE_ID  =	1,  HW engine for NON multimedia use
+//                          =	2,	HW engine for Multimedia use
+//                          =	3-7	Reserved for future I2C engines
+//		[3-0] - I2C_LINE_MUX  = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfHW_Capable:1;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfI2C_LineMux:4;
+#else
+  UCHAR   bfI2C_LineMux:4;
+  UCHAR   bfHW_EngineID:3;
+  UCHAR   bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
+
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+  ATOM_I2C_ID_CONFIG sbfAccess;
+  UCHAR              ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+   
+
+/****************************************************************************/	
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+  USHORT                    usClkMaskRegisterIndex;
+  USHORT                    usClkEnRegisterIndex;
+  USHORT                    usClkY_RegisterIndex;
+  USHORT                    usClkA_RegisterIndex;
+  USHORT                    usDataMaskRegisterIndex;
+  USHORT                    usDataEnRegisterIndex;
+  USHORT                    usDataY_RegisterIndex;
+  USHORT                    usDataA_RegisterIndex;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+  UCHAR                     ucClkMaskShift;
+  UCHAR                     ucClkEnShift;
+  UCHAR                     ucClkY_Shift;
+  UCHAR                     ucClkA_Shift;
+  UCHAR                     ucDataMaskShift;
+  UCHAR                     ucDataEnShift;
+  UCHAR                     ucDataY_Shift;
+  UCHAR                     ucDataA_Shift;
+  UCHAR                     ucReserved1;
+  UCHAR                     ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_GPIO_I2C_ASSIGMENT   asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/	
+// Common Structure used in other structures
+/****************************************************************************/	
+
+#ifndef _H2INC
+  
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{ 
+#if ATOM_BIG_ENDIAN
+  USHORT Reserved:6;
+  USHORT RGB888:1;
+  USHORT DoubleClock:1;
+  USHORT Interlace:1;
+  USHORT CompositeSync:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT VerticalCutOff:1;
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT HorizontalCutOff:1;
+#else
+  USHORT HorizontalCutOff:1;
+  USHORT HSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VSyncPolarity:1;      //0=Active High, 1=Active Low
+  USHORT VerticalCutOff:1;
+  USHORT H_ReplicationBy2:1;
+  USHORT V_ReplicationBy2:1;
+  USHORT CompositeSync:1;
+  USHORT Interlace:1;
+  USHORT DoubleClock:1;
+  USHORT RGB888:1;
+  USHORT Reserved:6;           
+#endif
+}ATOM_MODE_MISC_INFO;
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  ATOM_MODE_MISC_INFO sbfAccess;
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+  
+#else
+  
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{ 
+  USHORT              usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+   
+#endif
+
+// usModeMiscInfo-
+#define ATOM_H_CUTOFF           0x01
+#define ATOM_HSYNC_POLARITY     0x02             //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY     0x04             //0=Active High, 1=Active Low
+#define ATOM_V_CUTOFF           0x08
+#define ATOM_H_REPLICATIONBY2   0x10
+#define ATOM_V_REPLICATIONBY2   0x20
+#define ATOM_COMPOSITESYNC      0x40
+#define ATOM_INTERLACE          0x80
+#define ATOM_DOUBLE_CLOCK_MODE  0x100
+#define ATOM_RGB888_MODE        0x200
+
+//usRefreshRate-
+#define ATOM_REFRESH_43         43
+#define ATOM_REFRESH_47         47
+#define ATOM_REFRESH_56         56	
+#define ATOM_REFRESH_60         60
+#define ATOM_REFRESH_65         65
+#define ATOM_REFRESH_70         70
+#define ATOM_REFRESH_72         72
+#define ATOM_REFRESH_75         75
+#define ATOM_REFRESH_85         85
+
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+//	VESA_HTOTAL			=	VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+//						=	EDID_HA + EDID_HBL
+//	VESA_HDISP			=	VESA_ACTIVE	=	EDID_HA
+//	VESA_HSYNC_START	=	VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+//						=	EDID_HA + EDID_HSO
+//	VESA_HSYNC_WIDTH	=	VESA_HSYNC_TIME	=	EDID_HSPW
+//	VESA_BORDER			=	EDID_BORDER
+
+/****************************************************************************/	
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+  USHORT  usH_Size;
+  USHORT  usH_Blanking_Time;
+  USHORT  usV_Size;
+  USHORT  usV_Blanking_Time;			
+  USHORT  usH_SyncOffset;
+  USHORT  usH_SyncWidth;
+  USHORT  usV_SyncOffset;
+  USHORT  usV_SyncWidth;
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;  
+  UCHAR   ucH_Border;         // From DFP EDID
+  UCHAR   ucV_Border;
+  UCHAR   ucCRTC;             // ATOM_CRTC1 or ATOM_CRTC2  
+  UCHAR   ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/	
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/	
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+  USHORT                      usH_Total;        // horizontal total
+  USHORT                      usH_Disp;         // horizontal display
+  USHORT                      usH_SyncStart;    // horozontal Sync start
+  USHORT                      usH_SyncWidth;    // horizontal Sync width
+  USHORT                      usV_Total;        // vertical total
+  USHORT                      usV_Disp;         // vertical display
+  USHORT                      usV_SyncStart;    // vertical Sync start
+  USHORT                      usV_SyncWidth;    // vertical Sync width
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  UCHAR                       ucCRTC;           // ATOM_CRTC1 or ATOM_CRTC2
+  UCHAR                       ucOverscanRight;  // right
+  UCHAR                       ucOverscanLeft;   // left
+  UCHAR                       ucOverscanBottom; // bottom
+  UCHAR                       ucOverscanTop;    // top
+  UCHAR                       ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
+#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
+
+/****************************************************************************/	
+// Structure used in StandardVESA_TimingTable
+//                   AnalogTV_InfoTable 
+//                   ComponentVideoInfoTable
+/****************************************************************************/	
+typedef struct _ATOM_MODE_TIMING
+{
+  USHORT  usCRTC_H_Total;
+  USHORT  usCRTC_H_Disp;
+  USHORT  usCRTC_H_SyncStart;
+  USHORT  usCRTC_H_SyncWidth;
+  USHORT  usCRTC_V_Total;
+  USHORT  usCRTC_V_Disp;
+  USHORT  usCRTC_V_SyncStart;
+  USHORT  usCRTC_V_SyncWidth;
+  USHORT  usPixelClock;					                 //in 10Khz unit
+  ATOM_MODE_MISC_INFO_ACCESS  susModeMiscInfo;
+  USHORT  usCRTC_OverscanRight;
+  USHORT  usCRTC_OverscanLeft;
+  USHORT  usCRTC_OverscanBottom;
+  USHORT  usCRTC_OverscanTop;
+  USHORT  usReserve;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+  USHORT  usPixClk;
+  USHORT  usHActive;
+  USHORT  usHBlanking_Time;
+  USHORT  usVActive;
+  USHORT  usVBlanking_Time;			
+  USHORT  usHSyncOffset;
+  USHORT  usHSyncWidth;
+  USHORT  usVSyncOffset;
+  USHORT  usVSyncWidth;
+  USHORT  usImageHSize;
+  USHORT  usImageVSize;
+  UCHAR   ucHBorder;
+  UCHAR   ucVBorder;
+  ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+  UCHAR   ucInternalModeNumber;
+  UCHAR   ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/	
+// Structure used in LVDS_InfoTable 
+//  * Need a document to describe this table
+/****************************************************************************/	
+#define SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+#define SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+#define SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+#define SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usModePatchTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  USHORT              usOffDelayInMs;
+  UCHAR               ucPowerSequenceDigOntoDEin10Ms;
+  UCHAR               ucPowerSequenceDEtoBLOnin10Ms;
+  UCHAR               ucLVDS_Misc;               // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+                                                 // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+                                                 // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+                                                 // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap; 
+	UCHAR								ucPanelInfoSize;					//  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  UCHAR               ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_READ_EDID                  0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_DRR_SUPPORTED              0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_eDP                        0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK    0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}   
+#define PANEL_RANDOM_DITHER   0x80
+#define PANEL_RANDOM_DITHER_MASK   0x80
+
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
+
+/****************************************************************************/	
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved 
+  UCHAR               ucPanelInfoSize;					 //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  UCHAR               ucDPCD_eDP_CONFIGURATION_CAP;     // dpcd 0dh
+  UCHAR               ucDPCD_MAX_LINK_RATE;             // dpcd 01h
+  UCHAR               ucDPCD_MAX_LANE_COUNT;            // dpcd 02h
+  UCHAR               ucDPCD_MAX_DOWNSPREAD;            // dpcd 03h
+
+  USHORT              usMaxPclkFreqInSingleLink;        // Max PixelClock frequency in single link mode. 
+  UCHAR               uceDPToLVDSRxId;
+  UCHAR               ucLcdReservd;
+  ULONG               ulReserved[2];
+}ATOM_LCD_INFO_V13;  
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+ 
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
+
+//uceDPToLVDSRxId
+#define eDP_TO_LVDS_RX_DISABLE                  0x00       // no eDP->LVDS translator chip 
+#define eDP_TO_LVDS_COMMON_ID                   0x01       // common eDP->LVDS translator chip without AMD SW init
+#define eDP_TO_LVDS_RT_ID                       0x02       // RT tanslator which require AMD SW init
+
+typedef struct  _ATOM_PATCH_RECORD_MODE
+{
+  UCHAR     ucRecordType;
+  USHORT    usHDisp;
+  USHORT    usVDisp;
+}ATOM_PATCH_RECORD_MODE;
+
+typedef struct  _ATOM_LCD_RTS_RECORD
+{
+  UCHAR     ucRecordType;
+  UCHAR     ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
+
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!! 
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct  _ATOM_LCD_MODE_CONTROL_CAP
+{
+  UCHAR     ucRecordType;
+  USHORT    usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
+
+#define LCD_MODE_CAP_BL_OFF                   1
+#define LCD_MODE_CAP_CRTC_OFF                 2
+#define LCD_MODE_CAP_PANEL_OFF                4
+
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+  UCHAR ucRecordType;
+  UCHAR ucFakeEDIDLength;
+  UCHAR ucFakeEDIDString[1];    // This actually has ucFakeEdidLength elements.
+} ATOM_FAKE_EDID_PATCH_RECORD;
+
+typedef struct  _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+   UCHAR    ucRecordType;
+   USHORT		usHSize;
+   USHORT		usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+
+#define LCD_MODE_PATCH_RECORD_MODE_TYPE       1
+#define LCD_RTS_RECORD_TYPE                   2
+#define LCD_CAP_RECORD_TYPE                   3
+#define LCD_FAKE_EDID_PATCH_RECORD_TYPE       4
+#define LCD_PANEL_RESOLUTION_RECORD_TYPE      5
+#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE     6
+#define ATOM_RECORD_END_TYPE                  0xFF
+
+/****************************Spread Spectrum Info Table Definitions **********************/
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+  USHORT              usSpreadSpectrumPercentage; 
+  UCHAR               ucSpreadSpectrumType;	    //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS  Others:TBD
+  UCHAR               ucSS_Step;
+  UCHAR               ucSS_Delay;
+  UCHAR               ucSS_Id;
+  UCHAR               ucRecommendedRef_Div;
+  UCHAR               ucSS_Range;               //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+
+#define ATOM_MAX_SS_ENTRY                      16
+#define ATOM_DP_SS_ID1												 0x0f1			// SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. 
+#define ATOM_DP_SS_ID2												 0x0f2			// SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. 
+#define ATOM_LVLINK_2700MHz_SS_ID              0x0f3      // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID              0x0f4      // SS ID for LV link translator chip at 1.62Ghz
+
+
+#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+#define ATOM_INTERNAL_SS_MASK                  0x00000000
+#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+#define EXEC_SS_STEP_SIZE_SHIFT                2
+#define EXEC_SS_DELAY_SHIFT                    4    
+#define ACTIVEDATA_TO_BLON_DELAY_SHIFT         4
+
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_SPREAD_SPECTRUM_ASSIGNMENT   asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/	
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/	
+//ucTVBootUpDefaultStd definition:
+
+//ATOM_TV_NTSC                1
+//ATOM_TV_NTSCJ               2
+//ATOM_TV_PAL                 3
+//ATOM_TV_PALM                4
+//ATOM_TV_PALCN               5
+//ATOM_TV_PALN                6
+//ATOM_TV_PAL60               7
+//ATOM_TV_SECAM               8
+
+//ucTVSupportedStd definition:
+#define NTSC_SUPPORT          0x1
+#define NTSCJ_SUPPORT         0x2
+
+#define PAL_SUPPORT           0x4
+#define PALM_SUPPORT          0x8
+#define PALCN_SUPPORT         0x10
+#define PALN_SUPPORT          0x20
+#define PAL60_SUPPORT         0x40
+#define SECAM_SUPPORT         0x80
+
+#define MAX_SUPPORTED_TV_TIMING    2
+
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  /*ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+  ATOM_MODE_TIMING         aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
+
+#define MAX_SUPPORTED_TV_TIMING_V1_2    3
+
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    ucTV_SupportedStandard;
+  UCHAR                    ucTV_BootUpDefaultStandard; 
+  UCHAR                    ucExt_TV_ASIC_ID;
+  UCHAR                    ucExt_TV_ASIC_SlaveAddr;
+  ATOM_DTD_FORMAT          aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+  UCHAR   ucRevisionNumber;        //10h : Revision 1.0; 11h : Revision 1.1   
+  UCHAR   ucMaxLinkRate;           //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+  UCHAR   ucMaxLane;               //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP 
+  UCHAR   ucMaxDownSpread;         //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK    0x1F
+
+/**************************************************************************/
+// VRAM usage and their defintions
+
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios:  ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX 
+
+#ifndef VESA_MEMORY_IN_64K_BLOCK
+#define VESA_MEMORY_IN_64K_BLOCK        0x100       //256*64K=16Mb (Max. VESA memory is 16Mb!)
+#endif
+
+#define ATOM_EDID_RAW_DATASIZE          256         //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE        4096        //In Bytes
+#define ATOM_HWICON_INFOTABLE_SIZE      32
+#define MAX_DTD_MODE_IN_VRAM            6
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
+
+#define ATOM_HWICON1_SURFACE_ADDR       0
+#define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_HWICON_INFOTABLE_ADDR      (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
+#define ATOM_CRT1_EDID_ADDR             (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
+#define ATOM_CRT1_DTD_MODE_TBL_ADDR     (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT1_STD_MODE_TBL_ADDR	    (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD1_EDID_ADDR             (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_DTD_MODE_TBL_ADDR     (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR   	(ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_TV1_DTD_MODE_TBL_ADDR      (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP1_EDID_ADDR             (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP1_DTD_MODE_TBL_ADDR     (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP1_STD_MODE_TBL_ADDR	    (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CRT2_EDID_ADDR             (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CRT2_DTD_MODE_TBL_ADDR     (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CRT2_STD_MODE_TBL_ADDR	    (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_LCD2_EDID_ADDR             (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_DTD_MODE_TBL_ADDR     (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR   	(ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP6_EDID_ADDR             (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR     (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR     (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP2_EDID_ADDR             (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_DTD_MODE_TBL_ADDR     (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP2_STD_MODE_TBL_ADDR     (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_CV_EDID_ADDR               (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_CV_DTD_MODE_TBL_ADDR       (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_CV_STD_MODE_TBL_ADDR       (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP3_EDID_ADDR             (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP3_DTD_MODE_TBL_ADDR     (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP3_STD_MODE_TBL_ADDR     (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP4_EDID_ADDR             (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP4_DTD_MODE_TBL_ADDR     (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP4_STD_MODE_TBL_ADDR     (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DFP5_EDID_ADDR             (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
+
+//The size below is in Kb!
+#define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
+   
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
+#define	ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
+#define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
+#define	ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
+#define	ATOM_VRAM_BLOCK_NEEDS_RESERVATION      0x0
+
+/***********************************************************************************/	
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+//        at running time.   
+// note2: From RV770, the memory is more than 32bit addressable, so we will change 
+//        ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains 
+//        exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware 
+//        (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/	
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can  have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else	//Non VGA case
+ if (FB_Size<=2Gb)
+    FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+	  FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
+/***********************************************************************************/	
+#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO			1
+
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
+
+// change verion to 1.5, when allow driver to allocate the vram area for command table access. 
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+  ULONG   ulStartAddrUsedByFirmware;
+  USHORT  usFirmwareUseInKb;
+  USHORT  usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
+
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5	asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/	
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/	
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+  USHORT                   usGpioPin_AIndex;
+  UCHAR                    ucGpioPinBitShift;
+  UCHAR                    ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
+
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+  ATOM_COMMON_TABLE_HEADER  sHeader;
+  ATOM_GPIO_PIN_ASSIGNMENT	asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/	
+// Structure used in ComponentVideoInfoTable	
+/****************************************************************************/	
+#define GPIO_PIN_ACTIVE_HIGH          0x1
+
+#define MAX_SUPPORTED_CV_STANDARDS    5
+
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK  0x1F    // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK  0x60    // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK    0x80    // [7]
+
+typedef struct _ATOM_GPIO_INFO
+{
+  USHORT  usAOffset;
+  UCHAR   ucSettings;
+  UCHAR   ucReserved;
+}ATOM_GPIO_INFO;
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
+#define ATOM_CV_RESTRICT_FORMAT_SELECTION           0x2
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN                   0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK              0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A       0x01     //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B       0x02     //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT   0x0   
+
+//Line 3 out put 2.2V              
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04     //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08     //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2     
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A        0x10     //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B        0x20     //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT    0x4 
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK              0x3F     // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST             0x80     //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A   3   //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B   4   //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  USHORT             usMask_PinRegisterIndex;
+  USHORT             usEN_PinRegisterIndex;
+  USHORT             usY_PinRegisterIndex;
+  USHORT             usA_PinRegisterIndex;
+  UCHAR              ucBitShift;
+  UCHAR              ucPinActiveState;  //ucPinActiveState: Bit0=1 active high, =0 active low
+  ATOM_DTD_FORMAT    sReserved;         // must be zeroed out
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucReserved[3];
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR              ucMiscInfo;
+  UCHAR              uc480i;
+  UCHAR              uc480p;
+  UCHAR              uc720p;
+  UCHAR              uc1080i;
+  UCHAR              ucReserved;
+  UCHAR              ucLetterBoxMode;
+  UCHAR              ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+  ATOM_GPIO_INFO     aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+  ATOM_DTD_FORMAT    aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
+
+#define ATOM_COMPONENT_VIDEO_INFO_LAST  ATOM_COMPONENT_VIDEO_INFO_V21
+
+/****************************************************************************/	
+// Structure used in object_InfoTable
+/****************************************************************************/	
+typedef struct _ATOM_OBJECT_HEADER
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  USHORT                    usConnectorObjectTableOffset;
+  USHORT                    usRouterObjectTableOffset;
+  USHORT                    usEncoderObjectTableOffset;
+  USHORT                    usProtectionObjectTableOffset; //only available when Protection block is independent.
+  USHORT                    usDisplayPathTableOffset;
+  USHORT                    usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct  _ATOM_DISPLAY_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+  UCHAR                           ucNumOfDispPath;
+  UCHAR                           ucVersion;
+  UCHAR                           ucPadding[2];
+  ATOM_DISPLAY_OBJECT_PATH        asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT                                //each object has this structure    
+{
+  USHORT              usObjectID;
+  USHORT              usSrcDstTableOffset;
+  USHORT              usRecordOffset;                     //this pointing to a bunch of records defined below
+  USHORT              usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE                         //Above 4 object table offset pointing to a bunch of objects all have this structure     
+{
+  UCHAR               ucNumberOfObjects;
+  UCHAR               ucPadding[3];
+  ATOM_OBJECT         asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT         //usSrcDstTableOffset pointing to this structure
+{
+  UCHAR               ucNumberOfSrc;
+  USHORT              usSrcObjectID[1];
+  UCHAR               ucNumberOfDst;
+  USHORT              usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0                   0
+#define EXT_HPDPIN_LUTINDEX_1                   1
+#define EXT_HPDPIN_LUTINDEX_2                   2
+#define EXT_HPDPIN_LUTINDEX_3                   3
+#define EXT_HPDPIN_LUTINDEX_4                   4
+#define EXT_HPDPIN_LUTINDEX_5                   5
+#define EXT_HPDPIN_LUTINDEX_6                   6
+#define EXT_HPDPIN_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES   (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0                   0
+#define EXT_AUXDDC_LUTINDEX_1                   1
+#define EXT_AUXDDC_LUTINDEX_2                   2
+#define EXT_AUXDDC_LUTINDEX_3                   3
+#define EXT_AUXDDC_LUTINDEX_4                   4
+#define EXT_AUXDDC_LUTINDEX_5                   5
+#define EXT_AUXDDC_LUTINDEX_6                   6
+#define EXT_AUXDDC_LUTINDEX_7                   7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
+
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS 
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping. 
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
+typedef struct _EXT_DISPLAY_PATH
+{
+  USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
+  USHORT  usDeviceACPIEnum;               //16bit device ACPI id. 
+  USHORT  usDeviceConnector;              //A physical connector for displays to plug in, using object connector definitions
+  UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
+  UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
+  USHORT  usExtEncoderObjId;              //external encoder object id
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucChPNInvert;                   // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
+  USHORT  usCaps;
+  USHORT  usReserved; 
+}EXT_DISPLAY_PATH;
+   
+#define NUMBER_OF_UCHAR_FOR_GUID          16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH    7
+
+//usCaps
+#define  EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE          0x01
+
+typedef  struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;
+  UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
+  EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+  UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    ucRemoteDisplayConfig;
+  UCHAR                    uceDPToLVDSRxId;
+  UCHAR                    Reserved[4];                           // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are different but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+  UCHAR               ucRecordType;                      //An emun to indicate the record type
+  UCHAR               ucRecordSize;                      //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE                           1         
+#define ATOM_HPD_INT_RECORD_TYPE                       2
+#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE             3
+#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE          4
+#define	ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE	     5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE          6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE      7
+#define ATOM_JTAG_RECORD_TYPE                          8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE              9
+#define ATOM_ENCODER_DVO_CF_RECORD_TYPE               10
+#define ATOM_CONNECTOR_CF_RECORD_TYPE                 11
+#define	ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE	      12
+#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE  13
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE	      14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE	15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE          16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
+
+typedef struct  _ATOM_I2C_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          sucI2cId; 
+  UCHAR                       ucI2CAddr;              //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct  _ATOM_HPD_INT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDIntGPIOID;         //Corresponding block in GPIO_PIN_INFO table gives the pin info           
+  UCHAR                       ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct  _ATOM_OUTPUT_PROTECTION_RECORD 
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucProtectionFlag;
+  UCHAR                       ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG
+{
+  ULONG                       ulACPIDeviceEnum;       //Reserved for now
+  USHORT                      usDeviceID;             //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+  USHORT                      usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct  _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucNumberOfDevice;
+  UCHAR                       ucReserved;
+  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[1];         //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct  _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR						            ucConfigGPIOID;
+  UCHAR						            ucConfigGPIOState;	    //Set to 1 when it's active high to enable external flow in
+  UCHAR                       ucFlowinGPIPID;
+  UCHAR                       ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct  _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucCTL1GPIO_ID;
+  UCHAR                       ucCTL1GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL2GPIO_ID;
+  UCHAR                       ucCTL2GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTL3GPIO_ID;
+  UCHAR                       ucCTL3GPIOState;        //Set to 1 when it's active high
+  UCHAR                       ucCTLFPGA_IN_ID;
+  UCHAR                       ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucGPIOID;               //Corresponding block in GPIO_PIN_INFO table gives the pin info 
+  UCHAR                       ucTVActiveState;        //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct  _ATOM_JTAG_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucTMSGPIO_ID;
+  UCHAR                       ucTMSGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTCKGPIO_ID;
+  UCHAR                       ucTCKGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDOGPIO_ID;
+  UCHAR                       ucTDOGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucTDIGPIO_ID;
+  UCHAR                       ucTDIGPIOState;         //Set to 1 when it's active high
+  UCHAR                       ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+  UCHAR                       ucGPIOID;               // GPIO_ID, find the corresponding ID in GPIO_LUT table
+  UCHAR                       ucGPIO_PinState;        // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucFlags;                // Future expnadibility
+  UCHAR                       ucNumberOfPins;         // Number of GPIO pins used to control the object
+  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[1];              // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state 
+#define GPIO_PIN_TYPE_INPUT             0x00
+#define GPIO_PIN_TYPE_OUTPUT            0x10
+#define GPIO_PIN_TYPE_HW_CONTROL        0x20
+
+//For GPIO_PIN_TYPE_OUTPUT the following is defined 
+#define GPIO_PIN_OUTPUT_STATE_MASK      0x01
+#define GPIO_PIN_OUTPUT_STATE_SHIFT     0
+#define GPIO_PIN_STATE_ACTIVE_LOW       0x0
+#define GPIO_PIN_STATE_ACTIVE_HIGH      0x1
+
+// Indexes to GPIO array in GLSync record 
+// GLSync record is for Frame Lock/Gen Lock feature.
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK    0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC     1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC     2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ  3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT  4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET   6
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL  8
+#define ATOM_GPIO_INDEX_GLSYNC_MAX       9
+
+typedef struct  _ATOM_ENCODER_DVO_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ULONG                       ulStrengthControl;      // DVOA strength control for CF
+  UCHAR                       ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
+
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2                  0x01         // DP1.2 HBR2 is supported by HW encoder
+#define ATOM_ENCODER_CAP_RECORD_HBR2_EN               0x02         // DP1.2 HBR2 setting is qualified and HBR2 can be enabled 
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;         
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+      USHORT                  usHBR2En:1;           // Bit1 is for DP1.2 HBR2 enable
+      USHORT                  usReserved:14;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  }; 
+}ATOM_ENCODER_CAP_RECORD;                             
+
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
+#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
+
+typedef struct  _ATOM_CONNECTOR_CF_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usMaxPixClk;
+  UCHAR                       ucFlowCntlGpioId;
+  UCHAR                       ucSwapCntlGpioId;
+  UCHAR                       ucConnectedDvoBundle;
+  UCHAR                       ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct  _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+	ATOM_DTD_FORMAT							asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;                //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+  UCHAR                       ucSubConnectorType;     //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+  UCHAR                       ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;							//decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+	ATOM_COMMON_RECORD_HEADER   sheader;                
+	UCHAR												ucMuxType;
+	UCHAR												ucMuxControlPin;
+	UCHAR												ucMuxState[2];					//for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
+#define ATOM_ROUTER_MUX_PIN_STATE_MASK								0x0f
+#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT		0x01
+
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD     //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  UCHAR                       ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES];  //An fixed size array which maps external pins to internal GPIO_PIN_INFO table 
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD  //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  ATOM_I2C_ID_CONFIG          ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES];  //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usObjectID;         //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  USHORT                      usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/	
+// ASIC voltage data table
+/****************************************************************************/	
+typedef struct  _ATOM_VOLTAGE_INFO_HEADER
+{
+   USHORT   usVDDCBaseLevel;                //In number of 50mv unit
+   USHORT   usReserved;                     //For possible extension table offset
+   UCHAR    ucNumOfVoltageEntries;
+   UCHAR    ucBytesPerVoltageEntry;
+   UCHAR    ucVoltageStep;                  //Indicating in how many mv increament is one step, 0.5mv unit
+   UCHAR    ucDefaultVoltageEntry;
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct  _ATOM_VOLTAGE_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+   ATOM_VOLTAGE_INFO_HEADER viHeader;
+   UCHAR    ucVoltageEntries[64];            //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct  _ATOM_VOLTAGE_FORMULA
+{
+   USHORT   usVoltageBaseLevel;             // In number of 1mv unit
+   USHORT   usVoltageStep;                  // Indicating in how many mv increament is one step, 1mv unit
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucFlag;													// bit0=0 :step is 1mv =1 0.5mv
+	 UCHAR		ucBaseVID;											// if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+	 UCHAR		ucReserved;
+	 UCHAR		ucVIDAdjustEntries[32];					// 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct  _VOLTAGE_LUT_ENTRY
+{
+	 USHORT		usVoltageCode;									// The Voltage ID, either GPIO or I2C code
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct  _ATOM_VOLTAGE_FORMULA_V2
+{
+	 UCHAR		ucNumOfVoltageEntries;					// Number of Voltage Entry, which indicate max Voltage
+	 UCHAR		ucReserved[3];
+	 VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+	UCHAR		 ucVoltageControlId;							//Indicate it is controlled by I2C or GPIO or HW state machine		
+  UCHAR    ucVoltageControlI2cLine;
+  UCHAR    ucVoltageControlAddress;
+  UCHAR    ucVoltageControlOffset;	 	
+  USHORT   usGpioPin_AIndex;								//GPIO_PAD register index
+  UCHAR    ucGpioPinBitShift[9];						//at most 8 pin support 255 VIDs, termintate with 0xff
+	UCHAR		 ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
+#define	VOLTAGE_CONTROLLED_BY_HW							0x00
+#define	VOLTAGE_CONTROLLED_BY_I2C_MASK				0x7F
+#define	VOLTAGE_CONTROLLED_BY_GPIO						0x80
+#define	VOLTAGE_CONTROL_ID_LM64								0x01									//I2C control, used for R5xx Core Voltage
+#define	VOLTAGE_CONTROL_ID_DAC								0x02									//I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define	VOLTAGE_CONTROL_ID_VT116xM						0x03									//I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402							0x04									
+#define VOLTAGE_CONTROL_ID_UP6266 						0x05									
+#define VOLTAGE_CONTROL_ID_SCORPIO						0x06
+#define	VOLTAGE_CONTROL_ID_VT1556M						0x07									
+#define	VOLTAGE_CONTROL_ID_CHL822x						0x08									
+#define	VOLTAGE_CONTROL_ID_VT1586M						0x09
+#define VOLTAGE_CONTROL_ID_UP1637 						0x0A
+
+typedef struct  _ATOM_VOLTAGE_OBJECT
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA			asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_V2
+{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+	 UCHAR		ucSize;													//Size of Object	
+	 ATOM_VOLTAGE_CONTROL			asControl;			//describ how to control 	 
+ 	 ATOM_VOLTAGE_FORMULA_V2	asFormula;			//Indicate How to convert real Voltage to VID 
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V2			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct  _ATOM_LEAKID_VOLTAGE
+{
+	UCHAR		ucLeakageId;
+	UCHAR		ucReserved;
+	USHORT	usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
+ 	 UCHAR		ucVoltageType;									//Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI	 
+   UCHAR		ucVoltageMode;							    //Indicate voltage control mode: Init/Set/Leakage/Set phase 
+	 USHORT		usSize;													//Size of Object	
+}ATOM_VOLTAGE_OBJECT_HEADER_V3;
+
+typedef struct  _VOLTAGE_LUT_ENTRY_V2
+{
+	 ULONG		ulVoltageId;									  // The Voltage ID which is used to program GPIO register
+	 USHORT		usVoltageValue;									// The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
+{
+  USHORT	usVoltageLevel; 							  // The Voltage ID which is used to program GPIO register
+  USHORT  usVoltageId;                    
+	USHORT	usLeakageId;									  // The corresponding Voltage Value, in mV
+}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
+
+typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR	ucVoltageRegulatorId;					  //Indicate Voltage Regulator Id
+   UCHAR    ucVoltageControlI2cLine;
+   UCHAR    ucVoltageControlAddress;
+   UCHAR    ucVoltageControlOffset;	 	
+   ULONG    ulReserved;
+   VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
+}ATOM_I2C_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_GPIO_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;   
+   UCHAR    ucVoltageGpioCntlId;         // default is 0 which indicate control through CG VID mode 
+   UCHAR    ucGpioEntryNum;              // indiate the entry numbers of Votlage/Gpio value Look up table
+   UCHAR    ucPhaseDelay;                // phase delay in unit of micro second
+   UCHAR    ucReserved;   
+   ULONG    ulGpioMaskVal;               // GPIO Mask value
+   VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];   
+}ATOM_GPIO_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
+{
+   ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
+   UCHAR    ucLeakageCntlId;             // default is 0
+   UCHAR    ucLeakageEntryNum;           // indicate the entry number of LeakageId/Voltage Lut table
+   UCHAR    ucReserved[2];               
+   ULONG    ulMaxVoltageLevel;
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];   
+}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
+
+typedef union _ATOM_VOLTAGE_OBJECT_V3{
+  ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
+  ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
+  ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
+}ATOM_VOLTAGE_OBJECT_V3;
+
+typedef struct  _ATOM_VOLTAGE_OBJECT_INFO_V3_1
+{
+   ATOM_COMMON_TABLE_HEADER	sHeader; 
+	 ATOM_VOLTAGE_OBJECT_V3			asVoltageObj[3];	//Info for Voltage control	  	 
+}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
+
+typedef struct  _ATOM_ASIC_PROFILE_VOLTAGE
+{
+	UCHAR		ucProfileId;
+	UCHAR		ucReserved;
+	USHORT	usSize;
+	USHORT	usEfuseSpareStartAddr;
+	USHORT	usFuseIndex[8];												//from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, 
+	ATOM_LEAKID_VOLTAGE					asLeakVol[2];			//Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE			1		
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE			1
+#define	ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE					2
+
+typedef struct  _ATOM_ASIC_PROFILING_INFO
+{
+  ATOM_COMMON_TABLE_HEADER			asHeader; 
+	ATOM_ASIC_PROFILE_VOLTAGE			asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+	UCHAR	ucPwrSrcId;													// Power source
+	UCHAR	ucPwrSensorType;										// GPIO, I2C or none
+	UCHAR	ucPwrSensId;											  // if GPIO detect, it is GPIO id,  if I2C detect, it is I2C id
+	UCHAR	ucPwrSensSlaveAddr;									// Slave address if I2C detect
+	UCHAR ucPwrSensRegIndex;									// I2C register Index if I2C detect
+	UCHAR ucPwrSensRegBitMask;								// detect which bit is used if I2C detect
+	UCHAR	ucPwrSensActiveState;								// high active or low active
+	UCHAR	ucReserve[3];												// reserve		
+	USHORT usSensPwr;													// in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+		ATOM_COMMON_TABLE_HEADER		asHeader;
+		UCHAR												asPwrbehave[16];
+		ATOM_POWER_SOURCE_OBJECT		asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
+#define POWERSOURCE_PCIE_ID1						0x00
+#define POWERSOURCE_6PIN_CONNECTOR_ID1	0x01
+#define POWERSOURCE_8PIN_CONNECTOR_ID1	0x02
+#define POWERSOURCE_6PIN_CONNECTOR_ID2	0x04
+#define POWERSOURCE_8PIN_CONNECTOR_ID2	0x08
+
+//define ucPwrSensorId
+#define POWER_SENSOR_ALWAYS							0x00
+#define POWER_SENSOR_GPIO								0x01
+#define POWER_SENSOR_I2C								0x02
+
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;          
+  ULONG  ulBootUpUMAClock;          
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;   
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;           
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;              
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
+  ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
+  ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  ULONG  ulReserved3[15]; 
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;   
+
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+//ucLVDSMisc:                   
+#define SYS_INFO_LVDSMISC__888_FPDI_MODE                                             0x01
+#define SYS_INFO_LVDSMISC__DL_CH_SWAP                                                0x02
+#define SYS_INFO_LVDSMISC__888_BPC                                                   0x04
+#define SYS_INFO_LVDSMISC__OVERRIDE_EN                                               0x08
+#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW                                           0x10
+
+// not used any more
+#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW                                          0x04
+#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW                                          0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        Other display related flags, not defined yet. 
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+**********************************************************************************************************************/
+
+// this Table is used for Liano/Ontario APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
+{
+  ATOM_INTEGRATED_SYSTEM_INFO_V6    sIntegratedSysInfo;   
+  ULONG  ulPowerplayTable[128];  
+}ATOM_FUSION_SYSTEM_INFO_V1; 
+/**********************************************************************************************************************
+  ATOM_FUSION_SYSTEM_INFO_V1 Description
+sIntegratedSysInfo:               refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
+ulPowerplayTable[128]:            This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]    
+**********************************************************************************************************************/ 
+
+// this IntegrateSystemInfoTable is used for Trinity APU
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  ULONG  ulBootUpEngineClock;
+  ULONG  ulDentistVCOFreq;
+  ULONG  ulBootUpUMAClock;
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];
+  ULONG  ulBootUpReqDisplayVector;
+  ULONG  ulOtherDisplayMisc;
+  ULONG  ulGPUCapInfo;
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;
+  ULONG  ulSystemConfig;            
+  ULONG  ulCPUCapInfo;
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
+  UCHAR  ucMemoryType;  
+  UCHAR  ucUMAChannelNumber;
+  UCHAR  strVBIOSMsg[40];
+  ULONG  ulReserved[20];
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  SclkDpmBoostMargin;
+  ULONG  SclkDpmThrottleMargin;
+  USHORT SclkDpmTdpLimitPG; 
+  USHORT SclkDpmTdpLimitBoost;
+  ULONG  ulBoostEngineCLock;
+  UCHAR  ulBoostVid_2bit;  
+  UCHAR  EnableBoost;
+  USHORT GnbTdpLimit;
+  USHORT usMaxLVDSPclkFreqInSingleLink;
+  UCHAR  ucLvdsMisc;
+  UCHAR  ucLVDSReserved;
+  UCHAR  ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+  UCHAR  ucLVDSOffToOnDelay_in4Ms;
+  UCHAR  ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+  UCHAR  ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+  UCHAR  ucLVDSReserved1;
+  ULONG  ulLCDBitDepthControlVal;
+  ULONG  ulNbpStateMemclkFreq[4];
+  USHORT usNBP2Voltage;               
+  USHORT usNBP3Voltage;
+  ULONG  ulNbpStateNClkFreq[4];
+  UCHAR  ucNBDPMEnable;
+  UCHAR  ucReserved[3];
+  UCHAR  ucDPMState0VclkFid;
+  UCHAR  ucDPMState0DclkFid;
+  UCHAR  ucDPMState1VclkFid;
+  UCHAR  ucDPMState1DclkFid;
+  UCHAR  ucDPMState2VclkFid;
+  UCHAR  ucDPMState2DclkFid;
+  UCHAR  ucDPMState3VclkFid;
+  UCHAR  ucDPMState3DclkFid;
+  ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT            0x01
+#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT  0x02
+#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT       0x04
+#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT                         0x08
+
+// ulGPUCapInfo
+#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE                0x01
+#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE                               0x02
+#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT                         0x08
+
+/**********************************************************************************************************************
+  ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Trinity projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS. 
+                                        =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS. 
+                                  bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
+                                        =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
+                                  bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
+                                        =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
+                                  bit[3]=0: VBIOS fast boot is disable
+                                        =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
+                                        =1: DP mode use single PLL mode
+                                  bit[3]=0: Enable AUX HW mode detection logic
+                                        =1: Disable AUX HW mode detection logic
+                                      
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usNBP2Voltage:                    VID for voltage on NB P2 State
+usNBP3Voltage:                    VID for voltage on NB P3 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usMaxLVDSPclkFreqInSingleLink:    Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
+ucLVDSMisc:                       [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
+                                  [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
+                                  [bit2] LVDS 888bit per color mode  =0: 666 bit per color =1:888 bit per color
+                                  [bit3] LVDS parameter override enable  =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
+                                  [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
+ucLVDSPwrOnSeqDIGONtoDE_in4Ms:    LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
+                                  =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+ucLVDSPwrOnDEtoVARY_BL_in4Ms:     LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).  
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON. 
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffVARY_BLtoDE_in4Ms:    LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off. 
+                                  =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffDEtoDIGON_in4Ms:      LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off. 
+                                  =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSOffToOnDelay_in4Ms:         LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active. 
+                                  =0 means to use VBIOS default delay which is 125 ( 500ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOnVARY_BLtoBLON_in4Ms:   LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ucLVDSPwrOffBLONtoVARY_BL_in4Ms:  LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off. 
+                                  =0 means to use VBIOS default delay which is 0 ( 0ms ).
+                                  This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
+
+ulNbpStateMemclkFreq[4]:          system memory clock frequncey in unit of 10Khz in different NB pstate. 
+
+**********************************************************************************************************************/
+
+/**************************************************************************/
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
+#define ICS91719  1
+#define ICS91720  2
+
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+  UCHAR         ucNunberOfBytes;                                              //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+  UCHAR         ucI2CData[1];                                                 //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+  ATOM_I2C_ID_CONFIG_ACCESS       sucI2cId;               //I2C line and HW/SW assisted cap.
+  UCHAR		                        ucSSChipID;             //SS chip being used
+  UCHAR		                        ucSSChipSlaveAddr;      //Slave Address to set up this SS chip
+  UCHAR                           ucNumOfI2CDataRecords;  //number of data block
+  ATOM_I2C_DATA_RECORD            asI2CData[1];  
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct  _ATOM_ASIC_MVDD_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
+#define ATOM_MCLK_SS_INFO         ATOM_ASIC_MVDD_INFO
+
+//==========================================================================================
+/**************************************************************************/
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+	ULONG								ulTargetClockRange;						//Clock Out frequence (VCO ), in unit of 10Khz
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateInKhz;						//in unit of kHz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit1=0 Down Spread,=1 Center Spread.
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
+#define ASIC_INTERNAL_MEMORY_SS			1
+#define ASIC_INTERNAL_ENGINE_SS			2
+#define ASIC_INTERNAL_UVD_SS        3
+#define ASIC_INTERNAL_SS_ON_TMDS    4
+#define ASIC_INTERNAL_SS_ON_HDMI    5
+#define ASIC_INTERNAL_SS_ON_LVDS    6
+#define ASIC_INTERNAL_SS_ON_DP      7
+#define ASIC_INTERNAL_SS_ON_DCPLL   8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
+#define ASIC_INTERNAL_VCE_SS        10
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK          0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE               0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK        0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE             0x00000001
+//#define ATOM_INTERNAL_SS_MASK                  0x00000000
+//#define ATOM_EXTERNAL_SS_MASK                  0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT		      asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V2		  asSpreadSpectrum[1];      //this is point only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+	ULONG								ulTargetClockRange;						//For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+                                                    //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+  USHORT              usSpreadSpectrumPercentage;		//in unit of 0.01%
+	USHORT							usSpreadRateIn10Hz;						//in unit of 10Hz, modulation freq
+  UCHAR               ucClockIndication;					  //Indicate which clock source needs SS
+	UCHAR								ucSpreadSpectrumMode;					//Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+	UCHAR								ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	      sHeader; 
+  ATOM_ASIC_SS_ASSIGNMENT_V3		  asSpreadSpectrum[1];      //this is pointer only. 
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
+#define ATOM_DEVICE_CONNECT_INFO_DEF  0
+#define ATOM_ROM_LOCATION_DEF         1
+#define ATOM_TV_STANDARD_DEF          2
+#define ATOM_ACTIVE_INFO_DEF          3
+#define ATOM_LCD_INFO_DEF             4
+#define ATOM_DOS_REQ_INFO_DEF         5
+#define ATOM_ACC_CHANGE_INFO_DEF      6
+#define ATOM_DOS_MODE_INFO_DEF        7
+#define ATOM_I2C_CHANNEL_STATUS_DEF   8
+#define ATOM_I2C_CHANNEL_STATUS1_DEF  9
+#define ATOM_INTERNAL_TIMER_DEF       10
+
+// BIOS_0_SCRATCH Definition 
+#define ATOM_S0_CRT1_MONO               0x00000001L
+#define ATOM_S0_CRT1_COLOR              0x00000002L
+#define ATOM_S0_CRT1_MASK               (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE_A         0x00000004L
+#define ATOM_S0_TV1_SVIDEO_A            0x00000008L
+#define ATOM_S0_TV1_MASK_A              (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
+
+#define ATOM_S0_CV_A                    0x00000010L
+#define ATOM_S0_CV_DIN_A                0x00000020L
+#define ATOM_S0_CV_MASK_A               (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
+
+#define ATOM_S0_CRT2_MONO               0x00000100L
+#define ATOM_S0_CRT2_COLOR              0x00000200L
+#define ATOM_S0_CRT2_MASK               (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
+
+#define ATOM_S0_TV1_COMPOSITE           0x00000400L
+#define ATOM_S0_TV1_SVIDEO              0x00000800L
+#define ATOM_S0_TV1_SCART               0x00004000L
+#define ATOM_S0_TV1_MASK                (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
+
+#define ATOM_S0_CV                      0x00001000L
+#define ATOM_S0_CV_DIN                  0x00002000L
+#define ATOM_S0_CV_MASK                 (ATOM_S0_CV+ATOM_S0_CV_DIN)
+
+#define ATOM_S0_DFP1                    0x00010000L
+#define ATOM_S0_DFP2                    0x00020000L
+#define ATOM_S0_LCD1                    0x00040000L
+#define ATOM_S0_LCD2                    0x00080000L
+#define ATOM_S0_DFP6                    0x00100000L
+#define ATOM_S0_DFP3                    0x00200000L
+#define ATOM_S0_DFP4                    0x00400000L
+#define ATOM_S0_DFP5                    0x00800000L
+
+#define ATOM_S0_DFP_MASK                ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
+
+#define ATOM_S0_FAD_REGISTER_BUG        0x02000000L // If set, indicates we are running a PCIE asic with 
+                                                    // the FAD/HDP reg access bug.  Bit is read by DAL, this is obsolete from RV5xx
+
+#define ATOM_S0_THERMAL_STATE_MASK      0x1C000000L
+#define ATOM_S0_THERMAL_STATE_SHIFT     26
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 
+
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S0_CRT1_MONOb0             0x01
+#define ATOM_S0_CRT1_COLORb0            0x02
+#define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+
+#define ATOM_S0_TV1_COMPOSITEb0         0x04
+#define ATOM_S0_TV1_SVIDEOb0            0x08
+#define ATOM_S0_TV1_MASKb0              (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
+
+#define ATOM_S0_CVb0                    0x10
+#define ATOM_S0_CV_DINb0                0x20
+#define ATOM_S0_CV_MASKb0               (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
+
+#define ATOM_S0_CRT2_MONOb1             0x01
+#define ATOM_S0_CRT2_COLORb1            0x02
+#define ATOM_S0_CRT2_MASKb1             (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
+
+#define ATOM_S0_TV1_COMPOSITEb1         0x04
+#define ATOM_S0_TV1_SVIDEOb1            0x08
+#define ATOM_S0_TV1_SCARTb1             0x40
+#define ATOM_S0_TV1_MASKb1              (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
+
+#define ATOM_S0_CVb1                    0x10
+#define ATOM_S0_CV_DINb1                0x20
+#define ATOM_S0_CV_MASKb1               (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
+
+#define ATOM_S0_DFP1b2                  0x01
+#define ATOM_S0_DFP2b2                  0x02
+#define ATOM_S0_LCD1b2                  0x04
+#define ATOM_S0_LCD2b2                  0x08
+#define ATOM_S0_DFP6b2                  0x10
+#define ATOM_S0_DFP3b2                  0x20
+#define ATOM_S0_DFP4b2                  0x40
+#define ATOM_S0_DFP5b2                  0x80
+
+
+#define ATOM_S0_THERMAL_STATE_MASKb3    0x1C
+#define ATOM_S0_THERMAL_STATE_SHIFTb3   2
+
+#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
+#define ATOM_S0_LCD1_SHIFT              18
+
+// BIOS_1_SCRATCH Definition
+#define ATOM_S1_ROM_LOCATION_MASK       0x0000FFFFL
+#define ATOM_S1_PCI_BUS_DEV_MASK        0xFFFF0000L
+
+//	BIOS_2_SCRATCH Definition
+#define ATOM_S2_TV1_STANDARD_MASK       0x0000000FL
+#define ATOM_S2_CURRENT_BL_LEVEL_MASK   0x0000FF00L
+#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT  8
+
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK       0x0C000000L
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE     0x10000000L
+
+#define ATOM_S2_DEVICE_DPMS_STATE       0x00010000L
+#define ATOM_S2_VRI_BRIGHT_ENABLE       0x20000000L
+
+#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE     0x0
+#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE    0x1
+#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE   0x2
+#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE   0x3
+#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+#define ATOM_S2_DEVICE_DPMS_STATEb2     0x01
+
+#define ATOM_S2_DEVICE_DPMS_MASKw1      0x3FF
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3     0x0C
+#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3   0x10
+#define ATOM_S2_TMDS_COHERENT_MODEb3    0x10          // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+#define ATOM_S2_VRI_BRIGHT_ENABLEb3     0x20
+#define ATOM_S2_ROTATION_STATE_MASKb3   0xC0
+
+
+// BIOS_3_SCRATCH Definition
+#define ATOM_S3_CRT1_ACTIVE             0x00000001L
+#define ATOM_S3_LCD1_ACTIVE             0x00000002L
+#define ATOM_S3_TV1_ACTIVE              0x00000004L
+#define ATOM_S3_DFP1_ACTIVE             0x00000008L
+#define ATOM_S3_CRT2_ACTIVE             0x00000010L
+#define ATOM_S3_LCD2_ACTIVE             0x00000020L
+#define ATOM_S3_DFP6_ACTIVE             0x00000040L
+#define ATOM_S3_DFP2_ACTIVE             0x00000080L
+#define ATOM_S3_CV_ACTIVE               0x00000100L
+#define ATOM_S3_DFP3_ACTIVE							0x00000200L
+#define ATOM_S3_DFP4_ACTIVE							0x00000400L
+#define ATOM_S3_DFP5_ACTIVE							0x00000800L
+
+#define ATOM_S3_DEVICE_ACTIVE_MASK      0x00000FFFL
+
+#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE         0x00001000L
+#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
+
+#define ATOM_S3_CRT1_CRTC_ACTIVE        0x00010000L
+#define ATOM_S3_LCD1_CRTC_ACTIVE        0x00020000L
+#define ATOM_S3_TV1_CRTC_ACTIVE         0x00040000L
+#define ATOM_S3_DFP1_CRTC_ACTIVE        0x00080000L
+#define ATOM_S3_CRT2_CRTC_ACTIVE        0x00100000L
+#define ATOM_S3_LCD2_CRTC_ACTIVE        0x00200000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE        0x00400000L
+#define ATOM_S3_DFP2_CRTC_ACTIVE        0x00800000L
+#define ATOM_S3_CV_CRTC_ACTIVE          0x01000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE				0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE				0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE				0x08000000L
+
+#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNG    0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+#define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S3_CRT1_ACTIVEb0           0x01
+#define ATOM_S3_LCD1_ACTIVEb0           0x02
+#define ATOM_S3_TV1_ACTIVEb0            0x04
+#define ATOM_S3_DFP1_ACTIVEb0           0x08
+#define ATOM_S3_CRT2_ACTIVEb0           0x10
+#define ATOM_S3_LCD2_ACTIVEb0           0x20
+#define ATOM_S3_DFP6_ACTIVEb0           0x40
+#define ATOM_S3_DFP2_ACTIVEb0           0x80
+#define ATOM_S3_CV_ACTIVEb1             0x01
+#define ATOM_S3_DFP3_ACTIVEb1						0x02
+#define ATOM_S3_DFP4_ACTIVEb1						0x04
+#define ATOM_S3_DFP5_ACTIVEb1						0x08
+
+#define ATOM_S3_ACTIVE_CRTC1w0          0xFFF
+
+#define ATOM_S3_CRT1_CRTC_ACTIVEb2      0x01
+#define ATOM_S3_LCD1_CRTC_ACTIVEb2      0x02
+#define ATOM_S3_TV1_CRTC_ACTIVEb2       0x04
+#define ATOM_S3_DFP1_CRTC_ACTIVEb2      0x08
+#define ATOM_S3_CRT2_CRTC_ACTIVEb2      0x10
+#define ATOM_S3_LCD2_CRTC_ACTIVEb2      0x20
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2      0x40
+#define ATOM_S3_DFP2_CRTC_ACTIVEb2      0x80
+#define ATOM_S3_CV_CRTC_ACTIVEb3        0x01
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3			0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3			0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3			0x08
+
+#define ATOM_S3_ACTIVE_CRTC2w1          0xFFF
+
+// BIOS_4_SCRATCH Definition
+#define ATOM_S4_LCD1_PANEL_ID_MASK      0x000000FFL
+#define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+#define ATOM_S4_LCD1_REFRESH_SHIFT      8
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S4_LCD1_PANEL_ID_MASKb0	  0x0FF
+#define ATOM_S4_LCD1_REFRESH_MASKb1		  ATOM_S4_LCD1_PANEL_ID_MASKb0
+#define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
+#define ATOM_S5_DOS_REQ_CRT1b0          0x01
+#define ATOM_S5_DOS_REQ_LCD1b0          0x02
+#define ATOM_S5_DOS_REQ_TV1b0           0x04
+#define ATOM_S5_DOS_REQ_DFP1b0          0x08
+#define ATOM_S5_DOS_REQ_CRT2b0          0x10
+#define ATOM_S5_DOS_REQ_LCD2b0          0x20
+#define ATOM_S5_DOS_REQ_DFP6b0          0x40
+#define ATOM_S5_DOS_REQ_DFP2b0          0x80
+#define ATOM_S5_DOS_REQ_CVb1            0x01
+#define ATOM_S5_DOS_REQ_DFP3b1					0x02
+#define ATOM_S5_DOS_REQ_DFP4b1					0x04
+#define ATOM_S5_DOS_REQ_DFP5b1					0x08
+
+#define ATOM_S5_DOS_REQ_DEVICEw0        0x0FFF
+
+#define ATOM_S5_DOS_REQ_CRT1            0x0001
+#define ATOM_S5_DOS_REQ_LCD1            0x0002
+#define ATOM_S5_DOS_REQ_TV1             0x0004
+#define ATOM_S5_DOS_REQ_DFP1            0x0008
+#define ATOM_S5_DOS_REQ_CRT2            0x0010
+#define ATOM_S5_DOS_REQ_LCD2            0x0020
+#define ATOM_S5_DOS_REQ_DFP6            0x0040
+#define ATOM_S5_DOS_REQ_DFP2            0x0080
+#define ATOM_S5_DOS_REQ_CV              0x0100
+#define ATOM_S5_DOS_REQ_DFP3            0x0200
+#define ATOM_S5_DOS_REQ_DFP4            0x0400
+#define ATOM_S5_DOS_REQ_DFP5            0x0800
+
+#define ATOM_S5_DOS_FORCE_CRT1b2        ATOM_S5_DOS_REQ_CRT1b0
+#define ATOM_S5_DOS_FORCE_TV1b2         ATOM_S5_DOS_REQ_TV1b0
+#define ATOM_S5_DOS_FORCE_CRT2b2        ATOM_S5_DOS_REQ_CRT2b0
+#define ATOM_S5_DOS_FORCE_CVb3          ATOM_S5_DOS_REQ_CVb1
+#define ATOM_S5_DOS_FORCE_DEVICEw1      (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+                                        (ATOM_S5_DOS_FORCE_CVb3<<8))
+
+// BIOS_6_SCRATCH Definition
+#define ATOM_S6_DEVICE_CHANGE           0x00000001L
+#define ATOM_S6_SCALER_CHANGE           0x00000002L
+#define ATOM_S6_LID_CHANGE              0x00000004L
+#define ATOM_S6_DOCKING_CHANGE          0x00000008L
+#define ATOM_S6_ACC_MODE                0x00000010L
+#define ATOM_S6_EXT_DESKTOP_MODE        0x00000020L
+#define ATOM_S6_LID_STATE               0x00000040L
+#define ATOM_S6_DOCK_STATE              0x00000080L
+#define ATOM_S6_CRITICAL_STATE          0x00000100L
+#define ATOM_S6_HW_I2C_BUSY_STATE       0x00000200L
+#define ATOM_S6_THERMAL_STATE_CHANGE    0x00000400L
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS   0x00000800L
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL         0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO  0x00002000L //Aspect ratio expansion Request bit for LCD
+
+#define ATOM_S6_DISPLAY_STATE_CHANGE    0x00004000L        //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE        0x00008000L        //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
+
+#define ATOM_S6_ACC_REQ_CRT1            0x00010000L
+#define ATOM_S6_ACC_REQ_LCD1            0x00020000L
+#define ATOM_S6_ACC_REQ_TV1             0x00040000L
+#define ATOM_S6_ACC_REQ_DFP1            0x00080000L
+#define ATOM_S6_ACC_REQ_CRT2            0x00100000L
+#define ATOM_S6_ACC_REQ_LCD2            0x00200000L
+#define ATOM_S6_ACC_REQ_DFP6            0x00400000L
+#define ATOM_S6_ACC_REQ_DFP2            0x00800000L
+#define ATOM_S6_ACC_REQ_CV              0x01000000L
+#define ATOM_S6_ACC_REQ_DFP3						0x02000000L
+#define ATOM_S6_ACC_REQ_DFP4						0x04000000L
+#define ATOM_S6_ACC_REQ_DFP5						0x08000000L
+
+#define ATOM_S6_ACC_REQ_MASK                0x0FFF0000L
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE    0x10000000L
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH    0x20000000L
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+
+//Byte aligned definition for BIOS usage
+#define ATOM_S6_DEVICE_CHANGEb0         0x01
+#define ATOM_S6_SCALER_CHANGEb0         0x02
+#define ATOM_S6_LID_CHANGEb0            0x04
+#define ATOM_S6_DOCKING_CHANGEb0        0x08
+#define ATOM_S6_ACC_MODEb0              0x10
+#define ATOM_S6_EXT_DESKTOP_MODEb0      0x20
+#define ATOM_S6_LID_STATEb0             0x40
+#define ATOM_S6_DOCK_STATEb0            0x80
+#define ATOM_S6_CRITICAL_STATEb1        0x01
+#define ATOM_S6_HW_I2C_BUSY_STATEb1     0x02  
+#define ATOM_S6_THERMAL_STATE_CHANGEb1  0x04
+#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1        0x10    
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 
+
+#define ATOM_S6_ACC_REQ_CRT1b2          0x01
+#define ATOM_S6_ACC_REQ_LCD1b2          0x02
+#define ATOM_S6_ACC_REQ_TV1b2           0x04
+#define ATOM_S6_ACC_REQ_DFP1b2          0x08
+#define ATOM_S6_ACC_REQ_CRT2b2          0x10
+#define ATOM_S6_ACC_REQ_LCD2b2          0x20
+#define ATOM_S6_ACC_REQ_DFP6b2          0x40
+#define ATOM_S6_ACC_REQ_DFP2b2          0x80
+#define ATOM_S6_ACC_REQ_CVb3            0x01
+#define ATOM_S6_ACC_REQ_DFP3b3          0x02
+#define ATOM_S6_ACC_REQ_DFP4b3          0x04
+#define ATOM_S6_ACC_REQ_DFP5b3          0x08
+
+#define ATOM_S6_ACC_REQ_DEVICEw1        ATOM_S5_DOS_REQ_DEVICEw0
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3    0x40
+#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3    0x80
+
+#define ATOM_S6_DEVICE_CHANGE_SHIFT             0
+#define ATOM_S6_SCALER_CHANGE_SHIFT             1
+#define ATOM_S6_LID_CHANGE_SHIFT                2
+#define ATOM_S6_DOCKING_CHANGE_SHIFT            3
+#define ATOM_S6_ACC_MODE_SHIFT                  4
+#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT          5
+#define ATOM_S6_LID_STATE_SHIFT                 6
+#define ATOM_S6_DOCK_STATE_SHIFT                7
+#define ATOM_S6_CRITICAL_STATE_SHIFT            8
+#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT         9
+#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT      10
+#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT     11
+#define ATOM_S6_REQ_SCALER_SHIFT                12
+#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT         13
+#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT      14
+#define ATOM_S6_I2C_STATE_CHANGE_SHIFT          15
+#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT  28
+#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT  29
+#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT     30
+#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT     31
+
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
+#define ATOM_S7_DOS_MODE_TYPEb0             0x03
+#define ATOM_S7_DOS_MODE_VGAb0              0x00
+#define ATOM_S7_DOS_MODE_VESAb0             0x01
+#define ATOM_S7_DOS_MODE_EXTb0              0x02
+#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0      0x0C
+#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0     0xF0
+#define ATOM_S7_DOS_8BIT_DAC_ENb1           0x01
+#define ATOM_S7_DOS_MODE_NUMBERw1           0x0FFFF
+
+#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT       8
+
+// BIOS_8_SCRATCH Definition
+#define ATOM_S8_I2C_CHANNEL_BUSY_MASK       0x00000FFFF
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK     0x0FFFF0000   
+
+#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT      0
+#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT       16
+
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK  0x0000FFFF
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK  
+#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK    0xFFFF0000
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 
+#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
+#endif
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   
+#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT   16
+#endif
+
+ 
+#define ATOM_FLAG_SET                         0x20
+#define ATOM_FLAG_CLEAR                       0
+#define CLEAR_ATOM_S6_ACC_MODE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE             ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_LID_STATE                 ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE               ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE			          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE  ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS     ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE            ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER                ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)  
+#define CLEAR_ATOM_S6_REQ_SCALER              ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO         ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO       ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE          ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE      ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG           ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1                    ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )|  ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN           ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN         ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/	
+//Portion II: Definitinos only used in Driver
+/****************************************************************************/
+
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define	GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET)  ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
+
+#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
+#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
+
+/****************************************************************************/	
+//Portion III: Definitinos only used in VBIOS
+/****************************************************************************/
+#define ATOM_DAC_SRC					0x80
+#define ATOM_SRC_DAC1					0
+#define ATOM_SRC_DAC2					0x80
+
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+  ULONG ulTargetMemoryClock; //In 10Khz unit
+  UCHAR   ucAction;					 //not define yet
+  UCHAR   ucFbDiv_Hi;				 //Fbdiv Hi byte
+  UCHAR   ucFbDiv;					 //FB value
+  UCHAR   ucPostDiv;				 //Post div
+}MEMORY_PLLINIT_PARAMETERS;
+
+#define MEMORY_PLLINIT_PS_ALLOCATION  MEMORY_PLLINIT_PARAMETERS
+
+
+#define	GPIO_PIN_WRITE													0x01			
+#define	GPIO_PIN_READ														0x00
+
+typedef struct  _GPIO_PIN_CONTROL_PARAMETERS
+{
+  UCHAR ucGPIO_ID;           //return value, read from GPIO pins
+  UCHAR ucGPIOBitShift;	     //define which bit in uGPIOBitVal need to be update 
+	UCHAR ucGPIOBitVal;		     //Set/Reset corresponding bit defined in ucGPIOBitMask
+  UCHAR ucAction;				     //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+  UCHAR ucScaler;            // ATOM_SCALER1, ATOM_SCALER2
+  UCHAR ucEnable;            // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+  UCHAR ucTVStandard;        // 
+  UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS; 
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS 
+
+//ucEnable:
+#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION    0
+#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION  1
+#define SCALER_ENABLE_2TAP_ALPHA_MODE               2
+#define SCALER_ENABLE_MULTITAP_MODE                 3
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+  ULONG  usHWIconHorzVertPosn;        // Hardware Icon Vertical position
+  UCHAR  ucHWIconVertOffset;          // Hardware Icon Vertical offset
+  UCHAR  ucHWIconHorzOffset;          // Hardware Icon Horizontal offset
+  UCHAR  ucSelection;                 // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+  ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS  sEnableIcon;
+  ENABLE_CRTC_PARAMETERS                  sReserved;  
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2	
+  UCHAR  ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT usDeviceId;                  // Active Device Id for this surface. If no device, set to 0. 
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
+{
+  USHORT usHight;                     // Image Hight
+  USHORT usWidth;                     // Image Width
+  USHORT usGraphPitch;
+  UCHAR  ucColorDepth;
+  UCHAR  ucPixelFormat;
+  UCHAR  ucSurface;                   // Surface 1 or 2
+  UCHAR  ucEnable;                    // ATOM_ENABLE or ATOM_DISABLE
+  UCHAR  ucModeType;
+  UCHAR  ucReserved;
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
+
+// ucEnable
+#define ATOM_GRAPH_CONTROL_SET_PITCH             0x0f
+#define ATOM_GRAPH_CONTROL_SET_DISP_START        0x10
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+  ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;          
+  ENABLE_YUV_PS_ALLOCATION        sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+  USHORT  usMemoryStart;                //in 8Kb boundary, offset from memory base address
+  USHORT  usMemorySize;                 //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
+#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+  USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+  USHORT  usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; 
+
+typedef struct  _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
+{
+  union{
+    USHORT  usX_Size;                     //When use as input parameter, usX_Size indicates which CRTC                 
+    USHORT  usSurface; 
+  };
+  USHORT usY_Size;
+  USHORT usDispXStart;               
+  USHORT usDispYStart;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2; 
+
+
+typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3 
+{
+  UCHAR  ucLutId;
+  UCHAR  ucAction;
+  USHORT usLutStartIndex;
+  USHORT usLutLength;
+  USHORT usLutOffsetInVram;
+}PALETTE_DATA_CONTROL_PARAMETERS_V3;
+
+// ucAction:
+#define PALETTE_DATA_AUTO_FILL            1
+#define PALETTE_DATA_READ                 2
+#define PALETTE_DATA_WRITE                3
+
+
+typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
+{
+  UCHAR  ucInterruptId;
+  UCHAR  ucServiceId;
+  UCHAR  ucStatus;
+  UCHAR  ucReserved;
+}INTERRUPT_SERVICE_PARAMETER_V2;
+
+// ucInterruptId
+#define HDP1_INTERRUPT_ID                 1
+#define HDP2_INTERRUPT_ID                 2
+#define HDP3_INTERRUPT_ID                 3
+#define HDP4_INTERRUPT_ID                 4
+#define HDP5_INTERRUPT_ID                 5
+#define HDP6_INTERRUPT_ID                 6
+#define SW_INTERRUPT_ID                   11   
+
+// ucAction
+#define INTERRUPT_SERVICE_GEN_SW_INT      1
+#define INTERRUPT_SERVICE_GET_STATUS      2
+
+ // ucStatus
+#define INTERRUPT_STATUS__INT_TRIGGER     1
+#define INTERRUPT_STATUS__HPD_HIGH        2
+
+typedef struct _INDIRECT_IO_ACCESS
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  UCHAR                    IOAccessSequence[256];
+} INDIRECT_IO_ACCESS;
+
+#define INDIRECT_READ              0x00
+#define INDIRECT_WRITE             0x80
+
+#define INDIRECT_IO_MM             0
+#define INDIRECT_IO_PLL            1
+#define INDIRECT_IO_MC             2
+#define INDIRECT_IO_PCIE           3
+#define INDIRECT_IO_PCIEP          4
+#define INDIRECT_IO_NBMISC         5
+
+#define INDIRECT_IO_PLL_READ       INDIRECT_IO_PLL   | INDIRECT_READ
+#define INDIRECT_IO_PLL_WRITE      INDIRECT_IO_PLL   | INDIRECT_WRITE
+#define INDIRECT_IO_MC_READ        INDIRECT_IO_MC    | INDIRECT_READ
+#define INDIRECT_IO_MC_WRITE       INDIRECT_IO_MC    | INDIRECT_WRITE
+#define INDIRECT_IO_PCIE_READ      INDIRECT_IO_PCIE  | INDIRECT_READ
+#define INDIRECT_IO_PCIE_WRITE     INDIRECT_IO_PCIE  | INDIRECT_WRITE
+#define INDIRECT_IO_PCIEP_READ     INDIRECT_IO_PCIEP | INDIRECT_READ
+#define INDIRECT_IO_PCIEP_WRITE    INDIRECT_IO_PCIEP | INDIRECT_WRITE
+#define INDIRECT_IO_NBMISC_READ    INDIRECT_IO_NBMISC | INDIRECT_READ
+#define INDIRECT_IO_NBMISC_WRITE   INDIRECT_IO_NBMISC | INDIRECT_WRITE
+
+typedef struct _ATOM_OEM_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+   UCHAR	ucVMode_Num;			  //Video mode number
+   UCHAR	ucTV_Mode_Num;			//Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+   USHORT	usTV_Mode_LUT_Offset;	// Pointer to standard to internal number conversion table
+   USHORT	usTV_FIFO_Offset;		  // Pointer to FIFO entry table
+   USHORT	usNTSC_Tbl_Offset;		// Pointer to SDTV_Mode_NTSC table
+   USHORT	usPAL_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+   USHORT	usCV_Tbl_Offset;		  // Pointer to SDTV_Mode_PAL table 
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+   USHORT	ucFilter0_Offset;		//Pointer to filter format 0 coefficients
+   USHORT	usFilter1_Offset;		//Pointer to filter format 0 coefficients
+   UCHAR	ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT 				 aModeTimings[16];      // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{ 
+  USHORT    usSTD_HDisp;
+  USHORT    usSTD_VDisp;
+  USHORT    usSTD_RefreshRate;
+  USHORT    usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+  USHORT  usVESA_ModeNumber;
+  USHORT  usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{ 
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
+
+/*************** ATOM Memory Related Data Structure ***********************/
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+	UCHAR												ucMemoryType;
+	UCHAR												ucMemoryVendor;
+	UCHAR												ucAdjMCId;
+	UCHAR												ucDynClkId;
+	ULONG												ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
+#if ATOM_BIG_ENDIAN
+	ULONG												ucMemBlkId:8;
+	ULONG												ulMemClockRange:24;
+#else
+	ULONG												ulMemClockRange:24;
+	ULONG												ucMemBlkId:8;
+#endif
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+  ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+  ULONG                         ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+	ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS			ulMemoryID;
+	ULONG															        aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+	 USHORT											usRegIndex;                                     // MC register index
+	 UCHAR											ucPreRegDataLength;                             // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+	USHORT													usRegIndexTblSize;													//size of asRegIndexBuf
+	USHORT													usRegDataBlkSize;														//size of ATOM_MEMORY_SETTING_DATA_BLOCK
+	ATOM_INIT_REG_INDEX_FORMAT			asRegIndexBuf[1];
+	ATOM_MEMORY_SETTING_DATA_BLOCK	asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
+
+#define END_OF_REG_INDEX_BLOCK  0x0ffff
+#define END_OF_REG_DATA_BLOCK   0x00000000
+#define ATOM_INIT_REG_MASK_FLAG 0x80               //Not used in BIOS
+#define	CLOCK_RANGE_HIGHEST			0x00ffffff
+
+#define VALUE_DWORD             SIZEOF ULONG
+#define VALUE_SAME_AS_ABOVE     0
+#define VALUE_MASK_DWORD        0x84
+
+#define INDEX_ACCESS_RANGE_BEGIN	    (VALUE_DWORD + 1)
+#define INDEX_ACCESS_RANGE_END		    (INDEX_ACCESS_RANGE_BEGIN + 1)
+#define VALUE_INDEX_ACCESS_SINGLE	    (INDEX_ACCESS_RANGE_END + 1)
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{ 
+  ATOM_COMMON_TABLE_HEADER		sHeader;
+  USHORT											usAdjustARB_SEQDataOffset;
+  USHORT											usMCInitMemTypeTblOffset;
+  USHORT											usMCInitCommonTblOffset;
+  USHORT											usMCInitPowerDownTblOffset;
+	ULONG												ulARB_SEQDataBuf[32];
+	ATOM_INIT_REG_BLOCK					asMCInitMemType;
+	ATOM_INIT_REG_BLOCK					asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
+
+#define _4Mx16              0x2
+#define _4Mx32              0x3
+#define _8Mx16              0x12
+#define _8Mx32              0x13
+#define _16Mx16             0x22
+#define _16Mx32             0x23
+#define _32Mx16             0x32
+#define _32Mx32             0x33
+#define _64Mx8              0x41
+#define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _256Mx8             0x61
+#define _256Mx16            0x62
+
+#define SAMSUNG             0x1
+#define INFINEON            0x2
+#define ELPIDA              0x3
+#define ETRON               0x4
+#define NANYA               0x5
+#define HYNIX               0x6
+#define MOSEL               0x7
+#define WINBOND             0x8
+#define ESMT                0x9
+#define MICRON              0xF
+
+#define QIMONDA             INFINEON
+#define PROMOS              MOSEL
+#define KRETON              INFINEON
+#define ELIXIR              NANYA
+
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
+
+#define UCODE_ROM_START_ADDRESS		0x1b800
+#define	UCODE_SIGNATURE			0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+  ULONG  ulSignature;
+  UCHAR  ucRevision;
+  UCHAR  ucChecksum;
+  UCHAR  ucReserved1;
+  UCHAR  ucReserved2;
+  USHORT usParametersLength;
+  USHORT usUCodeLength;
+  USHORT usReserved1;
+  USHORT usReserved2;
+} MCuCodeHeader;
+
+//////////////////////////////////////////////////////////////////////////////////
+
+#define ATOM_MAX_NUMBER_OF_VRAM_MODULE	16
+
+#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK	0xF
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+  ULONG                      ulReserved;
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender 
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+  ULONG                      ulReserved;
+  ULONG                      ulFlags;     			// To enable/disable functionalities based on memory type
+  ULONG                      ulEngineClock;     // Override of default engine clock for particular memory type
+  ULONG                      ulMemoryClock;     // Override of default memory clock for particular memory type
+  USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  USHORT                     usEMRSValue;  
+  USHORT                     usMRSValue;
+  USHORT                     usReserved;
+  UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+  UCHAR                      ucChannelNum;      // Number of channel;
+  UCHAR                      ucChannelConfig;   // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+  UCHAR                      ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+  UCHAR                      ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+  UCHAR                      ucRefreshRateFactor;
+  UCHAR                      ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+  union{
+	  USHORT										 usMRS;							// mode register						
+    USHORT                     usDDR3_MR0;
+  };
+  union{
+	  USHORT										 usEMRS;						// extended mode register
+    USHORT                     usDDR3_MR1;
+  };
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+  union 
+  {
+    struct {
+	    UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+	    UCHAR											 ucReserved;						
+    };
+    USHORT                   usDDR3_MR2;
+  };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef	struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+	ULONG											 ulClkRange;				// memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing 	
+	USHORT										 usMRS;							// mode register						
+	USHORT										 usEMRS;						// extended mode register
+	UCHAR											 ucCL;							// CAS latency
+	UCHAR											 ucWL;							// WRITE Latency				
+	UCHAR											 uctRAS;						// tRAS
+	UCHAR											 uctRC;							// tRC	
+	UCHAR											 uctRFC;						// tRFC
+	UCHAR											 uctRCDR;						// tRCDR	
+	UCHAR											 uctRCDW;						// tRCDW
+	UCHAR											 uctRP;							// tRP
+	UCHAR											 uctRRD;						// tRRD	
+	UCHAR											 uctWR;							// tWR
+	UCHAR											 uctWTR;						// tWTR
+	UCHAR											 uctPDIX;						// tPDIX
+	UCHAR											 uctFAW;						// tFAW
+	UCHAR											 uctAOND;						// tAOND
+	UCHAR											 ucflag;						// flag to control memory timing calculation. bit0= control EMRS2 Infineon 
+////////////////////////////////////GDDR parameters///////////////////////////////////
+	UCHAR											 uctCCDL;						// 
+	UCHAR											 uctCRCRL;						// 
+	UCHAR											 uctCRCWL;						// 
+	UCHAR											 uctCKE;						// 
+	UCHAR											 uctCKRSE;						// 
+	UCHAR											 uctCKRSX;						// 
+	UCHAR											 uctFAW32;						// 
+	UCHAR											 ucMR4lo;					// 
+	UCHAR											 ucMR4hi;					// 
+	UCHAR											 ucMR5lo;					// 
+	UCHAR											 ucMR5hi;					// 
+	UCHAR											 ucTerminator;
+	UCHAR											 ucReserved;	
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef	struct _ATOM_MEMORY_FORMAT
+{
+	ULONG											 ulDllDisClock;			// memory DLL will be disable when target memory clock is below this clock
+  union{
+    USHORT                     usEMRS2Value;      // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_Reserved;   // Not used for DDR3 memory
+  };
+  union{
+    USHORT                     usEMRS3Value;      // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT                     usDDR3_MR3;        // Used for DDR3 memory
+  };
+  UCHAR                      ucMemoryType;      // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+  UCHAR                      ucMemoryVenderID;  // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+  UCHAR                      ucRow;             // Number of Row,in power of 2;
+  UCHAR                      ucColumn;          // Number of Column,in power of 2;
+  UCHAR                      ucBank;            // Nunber of Bank;
+  UCHAR                      ucRank;            // Number of Rank, in power of 2
+	UCHAR											 ucBurstSize;				// burst size, 0= burst size=4  1= burst size=8
+  UCHAR                      ucDllDisBit;				// position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+  UCHAR                      ucRefreshRateFactor;	// memory refresh rate in unit of ms	
+	UCHAR											 ucDensity;					// _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR											 ucPreamble;				//[7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR											 ucMemAttrib;				// Memory Device Addribute, like RDBI/WDBI etc
+	ATOM_MEMORY_TIMING_FORMAT	 asMemTiming[5];		//Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+	ULONG											 ulChannelMapCfg;		// board dependent paramenter:Channel combination
+	USHORT										 usSize;						// size of ATOM_VRAM_MODULE_V3
+  USHORT                     usDefaultMVDDQ;		// board dependent parameter:Default Memory Core Voltage
+  USHORT                     usDefaultMVDDC;		// board dependent parameter:Default Memory IO Voltage
+	UCHAR                      ucExtMemoryID;     // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR                      ucChannelNum;      // board dependent parameter:Number of channel;
+	UCHAR											 ucChannelSize;			// board dependent parameter:32bit or 64bit	
+	UCHAR											 ucVREFI;						// board dependnt parameter: EXT or INT +160mv to -140mv
+	UCHAR											 ucNPL_RT;					// board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+	UCHAR											 ucFlag;						// To enable/disable functionalities based on memory type
+	ATOM_MEMORY_FORMAT				 asMemory;					// describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
+#define NPL_RT_MASK															0x0f
+#define BATTERY_ODT_MASK												0xc0
+
+#define ATOM_VRAM_MODULE		 ATOM_VRAM_MODULE_V3
+
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  union{
+    USHORT	usEMRS2Value;                   // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_Reserved;
+  };
+  union{
+    USHORT	usEMRS3Value;                   // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+    USHORT  usDDR3_MR3;                     // Used for DDR3 memory
+  };  
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR   ucReserved2[2];
+  ATOM_MEMORY_TIMING_FORMAT  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
+
+#define VRAM_MODULE_V4_MISC_RANK_MASK       0x3
+#define VRAM_MODULE_V4_MISC_DUAL_RANK       0x1
+#define VRAM_MODULE_V4_MISC_BL_MASK         0x4
+#define VRAM_MODULE_V4_MISC_BL8             0x4
+#define VRAM_MODULE_V4_MISC_DUAL_CS         0x10
+
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V1  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+  ULONG	  ulChannelMapCfg;	                // board dependent parameter: Channel combination
+  USHORT  usModuleSize;                     // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+  USHORT  usPrivateReserved;                // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;    		            // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+  UCHAR   ucMemoryType;                     // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+  UCHAR   ucChannelNum;                     // Number of channels present in this module config
+  UCHAR   ucChannelWidth;                   // 0 - 32 bits; 1 - 64 bits
+	UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+	UCHAR	  ucFlag;						                // To enable/disable functionalities based on memory type
+	UCHAR	  ucMisc;						                // bit0: 0 - single rank; 1 - dual rank;   bit2: 0 - burstlength 4, 1 - burstlength 8
+  UCHAR		ucVREFI;                          // board dependent parameter
+  UCHAR   ucNPL_RT;                         // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+  UCHAR		ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+                                            // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+  USHORT	usEMRS2Value;      		            // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+  USHORT	usEMRS3Value;      		            // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+  UCHAR   ucMemoryVenderID;  		            // Predefined, If not predefined, vendor detection table gets executed
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;			                // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+  UCHAR   ucCDR_Bandwidth;		   // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG	  ulChannelMapCfg;	                // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usEnableChannels;                 // bit vector which indicate which channels are enabled
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR	  ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+  UCHAR	  ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR	  ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR	  ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  USHORT  usSEQSettingOffset;
+  UCHAR   ucReserved;
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'. 
+}ATOM_VRAM_MODULE_V7;
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE           aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
+
+typedef struct _ATOM_VRAM_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+	USHORT										 usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+	USHORT										 usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+	USHORT										 usRerseved;
+	UCHAR           	         aVID_PinsShift[9];															 // 8 bit strap maximum+terminator
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE		       aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V3;
+
+#define	ATOM_VRAM_INFO_LAST	     ATOM_VRAM_INFO_V3
+
+typedef struct _ATOM_VRAM_INFO_V4
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT										 usRerseved;
+  UCHAR           	         ucMemDQ7_0ByteRemap;													   // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+  ULONG                      ulMemDQ7_0BitRemap;                             // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+  UCHAR                      ucReservde[4]; 
+  UCHAR                      ucNumOfVRAMModule;
+  ATOM_VRAM_MODULE_V4		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+	ATOM_INIT_REG_BLOCK				 asMemPatch;																		 // for allocation
+																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  USHORT                     usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+  USHORT                     usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+  USHORT                     usPerBytePresetOffset;                          // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
+  USHORT                     usReserved[3];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved; 
+  ATOM_VRAM_MODULE_V7		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+  UCHAR           	         aVID_PinsShift[9];   //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+	ATOM_COMMON_TABLE_HEADER   sHeader;
+	UCHAR											 ucTrainingLoop;
+	UCHAR											 ucReserved[3];
+	ATOM_INIT_REG_BLOCK				 asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+  UCHAR    ucControl;
+  UCHAR    ucData; 
+  UCHAR    ucSatus; 
+  UCHAR    ucTemp; 
+} SW_I2C_CNTL_DATA_PARAMETERS;
+
+#define SW_I2C_CNTL_DATA_PS_ALLOCATION  SW_I2C_CNTL_DATA_PARAMETERS
+
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{                               
+  USHORT   GPIO_Info;
+  UCHAR    ucAct; 
+  UCHAR    ucData; 
+ } SW_I2C_IO_DATA_PARAMETERS;
+
+#define SW_I2C_IO_DATA_PS_ALLOCATION  SW_I2C_IO_DATA_PARAMETERS
+
+/****************************SW I2C CNTL DEFINITIONS**********************/
+#define SW_I2C_IO_RESET       0
+#define SW_I2C_IO_GET         1
+#define SW_I2C_IO_DRIVE       2
+#define SW_I2C_IO_SET         3
+#define SW_I2C_IO_START       4
+
+#define SW_I2C_IO_CLOCK       0
+#define SW_I2C_IO_DATA        0x80
+
+#define SW_I2C_IO_ZERO        0
+#define SW_I2C_IO_ONE         0x100
+
+#define SW_I2C_CNTL_READ      0
+#define SW_I2C_CNTL_WRITE     1
+#define SW_I2C_CNTL_START     2
+#define SW_I2C_CNTL_STOP      3
+#define SW_I2C_CNTL_OPEN      4
+#define SW_I2C_CNTL_CLOSE     5
+#define SW_I2C_CNTL_WRITE1BIT 6
+
+//==============================VESA definition Portion===============================
+#define VESA_OEM_PRODUCT_REV			            "01.00"
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT	     0xBB	//refer to VBE spec p.32, no TTY support
+#define VESA_MODE_WIN_ATTRIBUTE						     7
+#define VESA_WIN_SIZE											     64
+
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+	USHORT	Offset16;			
+	USHORT	Segment16;				
+} PTR_32_BIT_STRUCTURE;
+
+typedef union _PTR_32_BIT_UNION
+{
+	PTR_32_BIT_STRUCTURE	SegmentOffset;
+	ULONG					        Ptr32_Bit;
+} PTR_32_BIT_UNION;
+
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+	UCHAR				      VbeSignature[4];
+	USHORT				    VbeVersion;
+	PTR_32_BIT_UNION	OemStringPtr;
+	UCHAR				      Capabilities[4];
+	PTR_32_BIT_UNION	VideoModePtr;
+	USHORT				    TotalMemory;
+} VBE_1_2_INFO_BLOCK_UPDATABLE;
+
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+	VBE_1_2_INFO_BLOCK_UPDATABLE	CommonBlock;
+	USHORT							    OemSoftRev;
+	PTR_32_BIT_UNION				OemVendorNamePtr;
+	PTR_32_BIT_UNION				OemProductNamePtr;
+	PTR_32_BIT_UNION				OemProductRevPtr;
+} VBE_2_0_INFO_BLOCK_UPDATABLE;
+
+typedef union _VBE_VERSION_UNION
+{
+	VBE_2_0_INFO_BLOCK_UPDATABLE	VBE_2_0_InfoBlock;
+	VBE_1_2_INFO_BLOCK_UPDATABLE	VBE_1_2_InfoBlock;
+} VBE_VERSION_UNION;
+
+typedef struct _VBE_INFO_BLOCK
+{
+	VBE_VERSION_UNION			UpdatableVBE_Info;
+	UCHAR						      Reserved[222];
+	UCHAR						      OemData[256];
+} VBE_INFO_BLOCK;
+
+typedef struct _VBE_FP_INFO
+{
+  USHORT	HSize;
+	USHORT	VSize;
+	USHORT	FPType;
+	UCHAR		RedBPP;
+	UCHAR		GreenBPP;
+	UCHAR		BlueBPP;
+	UCHAR		ReservedBPP;
+	ULONG		RsvdOffScrnMemSize;
+	ULONG		RsvdOffScrnMEmPtr;
+	UCHAR		Reserved[14];
+} VBE_FP_INFO;
+
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+  USHORT    ModeAttributes;  //			dw	?	; mode attributes
+	UCHAR     WinAAttributes;  //			db	?	; window A attributes
+	UCHAR     WinBAttributes;  //			db	?	; window B attributes
+	USHORT    WinGranularity;  //			dw	?	; window granularity
+	USHORT    WinSize;         //			dw	?	; window size
+	USHORT    WinASegment;     //			dw	?	; window A start segment
+	USHORT    WinBSegment;     //			dw	?	; window B start segment
+	ULONG     WinFuncPtr;      //			dd	?	; real mode pointer to window function
+	USHORT    BytesPerScanLine;//			dw	?	; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+  USHORT    XResolution;      //			dw	?	; horizontal resolution in pixels or characters
+	USHORT    YResolution;      //			dw	?	; vertical resolution in pixels or characters
+	UCHAR     XCharSize;        //			db	?	; character cell width in pixels
+	UCHAR     YCharSize;        //			db	?	; character cell height in pixels
+	UCHAR     NumberOfPlanes;   //			db	?	; number of memory planes
+	UCHAR     BitsPerPixel;     //			db	?	; bits per pixel
+	UCHAR     NumberOfBanks;    //			db	?	; number of banks
+	UCHAR     MemoryModel;      //			db	?	; memory model type
+	UCHAR     BankSize;         //			db	?	; bank size in KB
+	UCHAR     NumberOfImagePages;//		  db	?	; number of images
+	UCHAR     ReservedForPageFunction;//db	1	; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+	UCHAR			RedMaskSize;        //		db	?	; size of direct color red mask in bits
+	UCHAR			RedFieldPosition;   //		db	?	; bit position of lsb of red mask
+	UCHAR			GreenMaskSize;      //		db	?	; size of direct color green mask in bits
+	UCHAR			GreenFieldPosition; //		db	?	; bit position of lsb of green mask
+	UCHAR			BlueMaskSize;       //		db	?	; size of direct color blue mask in bits
+	UCHAR			BlueFieldPosition;  //		db	?	; bit position of lsb of blue mask
+	UCHAR			RsvdMaskSize;       //		db	?	; size of direct color reserved mask in bits
+	UCHAR			RsvdFieldPosition;  //		db	?	; bit position of lsb of reserved mask
+	UCHAR			DirectColorModeInfo;//		db	?	; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+	ULONG			PhysBasePtr;        //		dd	?	; physical address for flat memory frame buffer
+	ULONG			Reserved_1;         //		dd	0	; reserved - always set to 0
+	USHORT		Reserved_2;         //	  dw	0	; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+	USHORT		LinBytesPerScanLine;  //	dw	?	; bytes per scan line for linear modes
+	UCHAR			BnkNumberOfImagePages;//	db	?	; number of images for banked modes
+	UCHAR			LinNumberOfImagPages; //	db	?	; number of images for linear modes
+	UCHAR			LinRedMaskSize;       //	db	?	; size of direct color red mask(linear modes)
+	UCHAR			LinRedFieldPosition;  //	db	?	; bit position of lsb of red mask(linear modes)
+	UCHAR			LinGreenMaskSize;     //	db	?	; size of direct color green mask(linear modes)
+	UCHAR			LinGreenFieldPosition;//	db	?	; bit position of lsb of green mask(linear modes)
+	UCHAR			LinBlueMaskSize;      //	db	?	; size of direct color blue mask(linear modes)
+	UCHAR			LinBlueFieldPosition; //	db	?	; bit position of lsb of blue mask(linear modes)
+	UCHAR			LinRsvdMaskSize;      //	db	?	; size of direct color reserved mask(linear modes)
+	UCHAR			LinRsvdFieldPosition; //	db	?	; bit position of lsb of reserved mask(linear modes)
+	ULONG			MaxPixelClock;        //	dd	?	; maximum pixel clock(in Hz) for graphics mode
+	UCHAR			Reserved;             //	db	190 dup (0)
+} VESA_MODE_INFO_BLOCK;
+
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE        0xA0	        // ATI Extended Function code
+#define ATOM_BIOS_FUNCTION_COP_MODE             0x00
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY1         0x04
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY2         0x05
+#define ATOM_BIOS_FUNCTION_SHORT_QUERY3         0x06
+#define ATOM_BIOS_FUNCTION_GET_DDC              0x0B   
+#define ATOM_BIOS_FUNCTION_ASIC_DSTATE          0x0E
+#define ATOM_BIOS_FUNCTION_DEBUG_PLAY           0x0F
+#define ATOM_BIOS_FUNCTION_STV_STD              0x16
+#define ATOM_BIOS_FUNCTION_DEVICE_DET           0x17
+#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH        0x18
+
+#define ATOM_BIOS_FUNCTION_PANEL_CONTROL        0x82
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET       0x83
+#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH    0x84
+#define ATOM_BIOS_FUNCTION_HW_ICON              0x8A 
+#define ATOM_BIOS_FUNCTION_SET_CMOS             0x8B
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO        0x8000          // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO      0x8100          // Sub function 80
+
+#define ATOM_BIOS_FUNCTION_DISPLAY_INFO         0x8D
+#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF        0x8E
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE          0x8F 
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE    0x0300          // Sub function 03  
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE          0x0700          // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE  0x1400          // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300          // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE          0x8500          // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT    0x9400          // Notify caller that ADC is supported
+     
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS            0x4F10          // Set DPMS 
+#define ATOM_SUB_FUNCTION_SET_DPMS              0x0001          // BL: Sub function 01 
+#define ATOM_SUB_FUNCTION_GET_DPMS              0x0002          // BL: Sub function 02 
+#define ATOM_PARAMETER_VESA_DPMS_ON             0x0000          // BH Parameter for DPMS ON.  
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY        0x0100          // BH Parameter for DPMS STANDBY  
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND        0x0200          // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF            0x0400          // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON      0x0800          // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
+
+#define ATOM_BIOS_RETURN_CODE_MASK              0x0000FF00L
+#define ATOM_BIOS_REG_HIGH_MASK                 0x0000FF00L
+#define ATOM_BIOS_REG_LOW_MASK                  0x000000FFL
+
+// structure used for VBIOS only
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
+	USHORT usTransmitterObjId;
+	USHORT usSupportDevice;
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 //available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    //available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
+typedef struct _ASIC_ENCODER_INFO
+{
+	UCHAR ucEncoderID;
+	UCHAR ucEncoderConfig;
+  USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+	ASIC_TRANSMITTER_INFO  asTransmitterInfo[1];
+	ASIC_ENCODER_INFO      asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
+
+
+typedef struct _ATOM_DISP_CLOCK_ID {
+  UCHAR ucPpllId; 
+  UCHAR ucPpllAttribute;
+}ATOM_DISP_CLOCK_ID;
+
+// ucPpllAttribute
+#define CLOCK_SOURCE_SHAREABLE            0x01
+#define CLOCK_SOURCE_DP_MODE              0x02
+#define CLOCK_SOURCE_NONE_DP_MODE         0x04
+
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO_V2
+{
+	USHORT usTransmitterObjId;
+	USHORT usDispClkIdOffset;    // point to clock source id list supported by Encoder Object
+  UCHAR  ucTransmitterCmdTblId;
+	UCHAR  ucConfig;
+	UCHAR  ucEncoderID;					 // available 1st encoder ( default )
+	UCHAR  ucOptionEncoderID;    // available 2nd encoder ( optional )
+	UCHAR  uc2ndEncoderID;
+	UCHAR  ucReserved;
+}ASIC_TRANSMITTER_INFO_V2;
+
+typedef struct _ATOM_DISP_OUT_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT ptrTransmitterInfo;
+	USHORT ptrEncoderInfo;
+  USHORT ptrMainCallParserFar;                  // direct address of main parser call in VBIOS binary. 
+  USHORT usReserved;
+  UCHAR  ucDCERevision;   
+  UCHAR  ucMaxDispEngineNum;
+  UCHAR  ucMaxActiveDispEngineNum;
+  UCHAR  ucMaxPPLLNum;
+  UCHAR  ucCoreRefClkSource;                          // value of CORE_REF_CLK_SOURCE
+  UCHAR  ucReserved[3];
+	ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+}ATOM_DISP_OUT_INFO_V3;
+
+typedef enum CORE_REF_CLK_SOURCE{
+  CLOCK_SRC_XTALIN=0,
+  CLOCK_SRC_XO_IN=1,
+  CLOCK_SRC_XO_IN2=2,
+}CORE_REF_CLK_SOURCE;
+
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+	USHORT asDevicePriority[16];
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+	USHORT	lpAuxRequest;
+	USHORT  lpDataOut;
+	UCHAR		ucChannelID;
+	union
+	{
+  UCHAR   ucReplyStatus;
+	UCHAR   ucDelay;
+	};
+  UCHAR   ucDataOutLen;
+	UCHAR   ucHPD_ID;                                       //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
+
+#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION			PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+
+//GetSinkType
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
+	USHORT ucLinkClock;
+	union 
+	{
+	UCHAR ucConfig;				// for DP training command
+	UCHAR ucI2cId;				// use for GET_SINK_TYPE command
+	};
+	UCHAR ucAction;
+	UCHAR ucStatus;
+	UCHAR ucLaneNum;
+	UCHAR ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS;
+
+// ucAction
+#define ATOM_DP_ACTION_GET_SINK_TYPE							0x01
+/* obselete */
+#define ATOM_DP_ACTION_TRAINING_START							0x02
+#define ATOM_DP_ACTION_TRAINING_COMPLETE					0x03
+#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL				0x04
+#define ATOM_DP_ACTION_SET_VSWING_PREEMP					0x05
+#define ATOM_DP_ACTION_GET_VSWING_PREEMP					0x06
+#define ATOM_DP_ACTION_BLANKING                   0x07
+
+// ucConfig
+#define ATOM_DP_CONFIG_ENCODER_SEL_MASK						0x03
+#define ATOM_DP_CONFIG_DIG1_ENCODER								0x00
+#define ATOM_DP_CONFIG_DIG2_ENCODER								0x01
+#define ATOM_DP_CONFIG_EXTERNAL_ENCODER						0x02
+#define ATOM_DP_CONFIG_LINK_SEL_MASK							0x04
+#define ATOM_DP_CONFIG_LINK_A											0x00
+#define ATOM_DP_CONFIG_LINK_B											0x04
+/* /obselete */
+#define DP_ENCODER_SERVICE_PS_ALLOCATION				WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+	USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters. 
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+	UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE							0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION			    0x02
+
+
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR				ATOM_DP_TRAINING_TBL_ADDR		
+#define DPCD_SET_SS_CNTL_TBL_ADDR													(ATOM_DP_TRAINING_TBL_ADDR + 8 )
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 24 )
+#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 32)
+#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 40)
+#define	DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR							(ATOM_DP_TRAINING_TBL_ADDR + 48)
+#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 60)
+#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 64)
+#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR								(ATOM_DP_TRAINING_TBL_ADDR + 72)
+#define DP_I2C_AUX_DDC_READ_TBL_ADDR											(ATOM_DP_TRAINING_TBL_ADDR + 76)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR                 (ATOM_DP_TRAINING_TBL_ADDR + 80) 
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR									(ATOM_DP_TRAINING_TBL_ADDR + 84)
+
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+	UCHAR   ucI2CSpeed;
+ 	union
+	{
+   UCHAR ucRegIndex;
+   UCHAR ucStatus;
+	};
+	USHORT  lpI2CDataOut;
+  UCHAR   ucFlag;               
+  UCHAR   ucTransBytes;
+  UCHAR   ucSlaveAddr;
+  UCHAR   ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+
+#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION       PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+
+//ucFlag
+#define HW_I2C_WRITE        1
+#define HW_I2C_READ         0
+#define I2C_2BYTE_ADDR      0x02
+
+/****************************************************************************/	
+// Structures used by HW_Misc_OperationTable
+/****************************************************************************/	
+typedef struct  _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucCmd;                //  Input: To tell which action to take
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1; 
+
+typedef struct  _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1 
+{
+  UCHAR  ucReturnCode;        // Output: Return value base on action was taken
+  UCHAR  ucReserved[3];
+  ULONG  ulReserved;
+}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
+
+// Actions code
+#define  ATOM_GET_SDI_SUPPORT              0xF0
+
+// Return code 
+#define  ATOM_UNKNOWN_CMD                   0
+#define  ATOM_FEATURE_NOT_SUPPORTED         1
+#define  ATOM_FEATURE_SUPPORTED             2
+
+typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
+{
+	ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1        sInput_Output;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS         sReserved; 
+}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
+
+/****************************************************************************/	
+
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+   UCHAR ucHWBlkInst;                // HW block instance, 0, 1, 2, ...
+   UCHAR ucReserved[3]; 
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK       0x07
+#define HWBLKINST_HWBLK_MASK          0xF0
+#define HWBLKINST_HWBLK_SHIFT         0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE            0
+#define SELECT_DISP_PLL               1
+#define SELECT_DCIO_UNIPHY_LINK0      2
+#define SELECT_DCIO_UNIPHY_LINK1      3
+#define SELECT_DCIO_IMPCAL            4
+#define SELECT_DCIO_DIG               6
+#define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
+
+// DIGTransmitterInfoTable structure used to program UNIPHY settings 
+typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{  
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT usDPVsPreEmphSettingOffset;     // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock 
+  USHORT usPhyAnalogRegListOffset;       // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info 
+  USHORT usPhyAnalogSettingOffset;       // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
+  USHORT usPhyPllRegListOffset;          // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info 
+  USHORT usPhyPllSettingOffset;          // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
+}DIG_TRANSMITTER_INFO_HEADER_V3_1;
+
+typedef struct _CLOCK_CONDITION_REGESTER_INFO{
+  USHORT usRegisterIndex;
+  UCHAR  ucStartBit;
+  UCHAR  ucEndBit;
+}CLOCK_CONDITION_REGESTER_INFO;
+
+typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
+  USHORT usMaxClockFreq;
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  ULONG  ulAnalogSetting[1];
+}CLOCK_CONDITION_SETTING_ENTRY;
+
+typedef struct _CLOCK_CONDITION_SETTING_INFO{
+  USHORT usEntrySize;
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+}CLOCK_CONDITION_SETTING_INFO;
+
+typedef struct _PHY_CONDITION_REG_VAL{
+  ULONG  ulCondition;
+  ULONG  ulRegVal;
+}PHY_CONDITION_REG_VAL;
+
+typedef struct _PHY_CONDITION_REG_INFO{
+  USHORT usRegIndex;
+  USHORT usSize;
+  PHY_CONDITION_REG_VAL asRegVal[1];
+}PHY_CONDITION_REG_INFO;
+
+typedef struct _PHY_ANALOG_SETTING_INFO{
+  UCHAR  ucEncodeMode;
+  UCHAR  ucPhySel;
+  USHORT usSize;
+  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+}PHY_ANALOG_SETTING_INFO;
+
+/****************************************************************************/	
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
+/****************************************************************************/
+
+#define MC_MISC0__MEMORY_TYPE_MASK    0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1  0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2   0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3  0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4  0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5  0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3   0xB0000000
+
+/****************************************************************************/	
+//Portion VI: Definitinos being oboselete
+/****************************************************************************/
+
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT                   usMaxFrequency;      // in 10kHz unit
+  USHORT                   usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct  _COMPASSIONATE_DATA           
+{
+  ATOM_COMMON_TABLE_HEADER sHeader; 
+
+  //==============================  DAC1 portion
+  UCHAR   ucDAC1_BG_Adjustment;
+  UCHAR   ucDAC1_DAC_Adjustment;
+  USHORT  usDAC1_FORCE_Data;
+  //==============================  DAC2 portion
+  UCHAR   ucDAC2_CRT2_BG_Adjustment;
+  UCHAR   ucDAC2_CRT2_DAC_Adjustment;
+  USHORT  usDAC2_CRT2_FORCE_Data;
+  USHORT  usDAC2_CRT2_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CRT2_MUX_RegisterInfo;     //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_NTSC_BG_Adjustment;
+  UCHAR   ucDAC2_NTSC_DAC_Adjustment;
+  USHORT  usDAC2_TV1_FORCE_Data;
+  USHORT  usDAC2_TV1_MUX_RegisterIndex;
+  UCHAR   ucDAC2_TV1_MUX_RegisterInfo;      //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_CV_BG_Adjustment;
+  UCHAR   ucDAC2_CV_DAC_Adjustment;
+  USHORT  usDAC2_CV_FORCE_Data;
+  USHORT  usDAC2_CV_MUX_RegisterIndex;
+  UCHAR   ucDAC2_CV_MUX_RegisterInfo;       //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+  UCHAR   ucDAC2_PAL_BG_Adjustment;
+  UCHAR   ucDAC2_PAL_DAC_Adjustment;
+  USHORT  usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
+
+/****************************Supported Device Info Table Definitions**********************/
+//  ucConnectInfo:
+//    [7:4] - connector type
+//      = 1   - VGA connector   
+//      = 2   - DVI-I
+//      = 3   - DVI-D
+//      = 4   - DVI-A
+//      = 5   - SVIDEO
+//      = 6   - COMPOSITE
+//      = 7   - LVDS
+//      = 8   - DIGITAL LINK
+//      = 9   - SCART
+//      = 0xA - HDMI_type A
+//      = 0xB - HDMI_type B
+//      = 0xE - Special case1 (DVI+DIN)
+//      Others=TBD
+//    [3:0] - DAC Associated
+//      = 0   - no DAC
+//      = 1   - DACA
+//      = 2   - DACB
+//      = 3   - External DAC
+//      Others=TBD
+//    
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR   bfConnectorType:4;
+  UCHAR   bfAssociatedDAC:4;
+#else
+  UCHAR   bfAssociatedDAC:4;
+  UCHAR   bfConnectorType:4;
+#endif
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+  ATOM_CONNECTOR_INFO sbfAccess;
+  UCHAR               ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
+
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+  ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
+
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{ 
+  ATOM_COMMON_TABLE_HEADER	sHeader;
+  USHORT                    usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C   asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
+
+#define NO_INT_SRC_MAPPED       0xFF
+
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+  UCHAR   ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{ 
+  ATOM_COMMON_TABLE_HEADER      sHeader;
+  USHORT                        usDeviceSupport;
+  ATOM_CONNECTOR_INFO_I2C       asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+  ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
+
+#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
+
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+   USHORT usFrequency;
+   UCHAR  ucPLL_ChargePump;				                // PLL charge-pump gain control
+   UCHAR  ucPLL_DutyCycle;				                // PLL duty cycle control
+   UCHAR  ucPLL_VCO_Gain;				                  // PLL VCO gain control
+   UCHAR  ucPLL_VoltageSwing;			                // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;  
+
+
+#define ATOM_MAX_MISC_INFO       4
+
+typedef struct _ATOM_TMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  USHORT							usMaxFrequency;             // in 10Khz
+  ATOM_MISC_CONTROL_INFO				asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+  UCHAR ucTVStandard;     //Same as TV standards defined above, 
+  UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
+
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+  UCHAR ucAttribute;      //Same as other digital encoder attributes defined above
+  UCHAR ucPadding[1];		
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+  ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+  ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
+
+
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+  USHORT usPixelClock; 
+  USHORT usEncoderID; 
+  UCHAR  ucDeviceType;												//Use ATOM_DEVICE_xxx1_Index to indicate device type only.	
+  UCHAR  ucAction;														//ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+  ATOM_ENCODER_ATTRIBUTE usDevAttr;     		
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{                               
+  DVO_ENCODER_CONTROL_PARAMETERS    sDVOEncoder;
+  WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION      sReserved;     //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
+
+
+#define ATOM_XTMDS_ASIC_SI164_ID        1
+#define ATOM_XTMDS_ASIC_SI178_ID        2
+#define ATOM_XTMDS_ASIC_TFP513_ID       3
+#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
+#define ATOM_XTMDS_SUPPORTED_DUALLINK   0x00000002
+#define ATOM_XTMDS_MVPU_FPGA            0x00000004
+
+                           
+typedef struct _ATOM_XTMDS_INFO
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;  
+  USHORT                     usSingleLinkMaxFrequency; 
+  ATOM_I2C_ID_CONFIG_ACCESS  sucI2cId;           //Point the ID on which I2C is used to control external chip
+  UCHAR                      ucXtransimitterID;          
+  UCHAR                      ucSupportedLink;    // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+  UCHAR                      ucSequnceAlterID;   // Even with the same external TMDS asic, it's possible that the program seqence alters 
+                                                 // due to design. This ID is used to alert driver that the sequence is not "standard"!              
+  UCHAR                      ucMasterAddress;    // Address to control Master xTMDS Chip
+  UCHAR                      ucSlaveAddress;     // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{  
+  UCHAR ucEnable;                     // ATOM_ENABLE=On or ATOM_DISABLE=Off
+  UCHAR ucDevice;                     // ATOM_DEVICE_DFP1_INDEX....
+  UCHAR ucPadding[2];             
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+/****************************Legacy Power Play Table Definitions **********************/
+
+//Definitions for ulPowerPlayMiscInfo
+#define ATOM_PM_MISCINFO_SPLIT_CLOCK                     0x00000000L
+#define ATOM_PM_MISCINFO_USING_MCLK_SRC                  0x00000001L
+#define ATOM_PM_MISCINFO_USING_SCLK_SRC                  0x00000002L
+
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT            0x00000004L
+#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH        0x00000008L
+
+#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN             0x00000010L
+
+#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN          0x00000020L
+#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN          0x00000040L
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE                 0x00000080L  //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program  
+ 
+#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN      0x00000100L
+#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN         0x00000200L
+#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN              0x00000400L
+#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN                 0x00000800L
+#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE     0x00001000L
+#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
+#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE            0x00004000L
+
+#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE             0x00008000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE                 0x00010000L 
+#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE                 0x00020000L
+#define ATOM_PM_MISCINFO_POWER_SAVING_MODE               0x00040000L
+#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE              0x00080000L
+
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK           0x00300000L  //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT          20 
+
+#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE                 0x00400000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2      0x00800000L
+#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4      0x01000000L
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN            0x02000000L  //When set, Dynamic 
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN        0x04000000L  //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN              0x08000000L  //When set, This mode is for acceleated 3D mode
+
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK   0x70000000L  //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) 
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT  28
+#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS                0x80000000L
+
+#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE            0x00000001L
+#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT          0x00000002L
+#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN           0x00000004L
+#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO            0x00000008L
+#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE              0x00000010L
+#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN       0x00000020L
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE         0x00000040L  //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. 
+                                                                      //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
+#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC                0x00000080L
+#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN                0x00000100L
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE               0x00000200L 
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulReserved1;                // must set to 0
+  ULONG     ulReserved2;                // must set to 0
+  USHORT    usEngineClock;
+  USHORT    usMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct  _ATOM_POWERMODE_INFO_V2
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to GPIO table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct  _ATOM_POWERMODE_INFO_V3
+{
+  ULONG     ulMiscInfo;                 //The power level should be arranged in ascending order
+  ULONG     ulMiscInfo2;                
+  ULONG     ulEngineClock;                
+  ULONG     ulMemoryClock;
+  UCHAR     ucVoltageDropIndex;         // index to Core (VDDC) votage table
+  UCHAR     ucSelectedPanel_RefreshRate;// panel refresh rate
+  UCHAR     ucMinTemperature;
+  UCHAR     ucMaxTemperature;
+  UCHAR     ucNumPciELanes;             // number of PCIE lanes
+  UCHAR     ucVDDCI_VoltageDropIndex;   // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
+
+#define ATOM_MAX_NUMBEROF_POWER_BLOCK  8
+
+#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN            0x01
+#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE         0x02
+
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63      0x01
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032   0x02
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030   0x03
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649   0x04
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64      0x05
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375    0x06
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512   0x07	// Andigilog
+
+
+typedef struct  _ATOM_POWERPLAY_INFO
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct  _ATOM_POWERPLAY_INFO_V2
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+  
+typedef struct  _ATOM_POWERPLAY_INFO_V3
+{
+  ATOM_COMMON_TABLE_HEADER	sHeader; 
+  UCHAR    ucOverdriveThermalController;
+  UCHAR    ucOverdriveI2cLine;
+  UCHAR    ucOverdriveIntBitmap;
+  UCHAR    ucOverdriveControllerAddress;
+  UCHAR    ucSizeOfPowerModeEntry;
+  UCHAR    ucNumOfPowerModeEntries;
+  ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
+
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
+    UCHAR ucI2cLine;        // as interpreted by DAL I2C
+    UCHAR ucI2cAddress;
+    UCHAR ucFanParameters;  // Fan Control Parameters.
+    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
+    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
+    UCHAR ucReserved;       // ----
+    UCHAR ucFlags;          // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE      0
+#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64      5
+#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
+#define ATOM_PP_THERMALCONTROLLER_RV770     8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
+#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+    UCHAR ucNonClockStateIndex;
+    UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+    UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+    UCHAR   ucTHyst;                         // Temperature hysteresis. Integer.
+    USHORT  usTMin;                          // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+    USHORT  usTMed;                          // The middle temperature where we change slopes.
+    USHORT  usTHigh;                         // The high point above TMed for adjusting the second slope.
+    USHORT  usPWMMin;                        // The minimum PWM value in percent (0.01% increments).
+    USHORT  usPWMMed;                        // The PWM value (in percent) at TMed.
+    USHORT  usPWMHigh;                       // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+    ATOM_PPLIB_FANTABLE basicTable;
+    USHORT  usTMax;                          // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+    USHORT  usSize;
+    ULONG   ulMaxEngineClock;   // For Overdrive.
+    ULONG   ulMaxMemoryClock;   // For Overdrive.
+    // Add extra system parameters here, always adjust size to include all fields.
+    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000              // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000   // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000                   // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+      ATOM_COMMON_TABLE_HEADER sHeader;
+
+      UCHAR ucDataRevision;
+
+      UCHAR ucNumStates;
+      UCHAR ucStateEntrySize;
+      UCHAR ucClockInfoSize;
+      UCHAR ucNonClockSize;
+
+      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+      USHORT usStateArrayOffset;
+
+      // offset from start of this table to array of ASIC-specific structures,
+      // currently ATOM_PPLIB_CLOCK_INFO.
+      USHORT usClockInfoArrayOffset;
+
+      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+      USHORT usNonClockInfoArrayOffset;
+
+      USHORT usBackbiasTime;    // in microseconds
+      USHORT usVoltageTime;     // in microseconds
+      USHORT usTableSize;       //the size of this structure, or the extended structure
+
+      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
+
+      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
+
+      USHORT usBootClockInfoOffset;
+      USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+    ATOM_PPLIB_POWERPLAYTABLE basicTable;
+    UCHAR   ucNumCustomThermalPolicy;
+    USHORT  usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+    ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+    USHORT                     usFormatID;                      // To be used ONLY by PPGen.
+    USHORT                     usFanTableOffset;
+    USHORT                     usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+    USHORT                     usReserved;  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
+    USHORT                     usTDPODLimit;
+    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE               0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF               0x000010000
+
+//M3 Arb    //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK                       0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT                      17
+
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+      USHORT usClassification;
+      UCHAR  ucMinTemperature;
+      UCHAR  ucMaxTemperature;
+      ULONG  ulCapsAndSettings;
+      UCHAR  ucRequiredPower;
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usUnused1;
+      USHORT usUnused2;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      USHORT usUnused;
+
+      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+      USHORT usEngineClockLow;
+      UCHAR  ucEngineClockHigh;
+
+      USHORT usMemoryClockLow;
+      UCHAR  ucMemoryClockHigh;
+
+      USHORT usVDDC;
+      USHORT usVDDCI;
+      UCHAR  ucPCIEGen;
+      UCHAR  ucUnused1;
+
+      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
+      UCHAR  ucLowEngineClockHigh;
+      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
+      UCHAR  ucHighEngineClockHigh;
+      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
+      UCHAR  ucPadding;                   // For proper alignment and size.
+      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+      ULONG  ulFlags; 
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0 
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1 
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2 
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3 
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      USHORT tdpLimit;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Record
+{
+    USHORT usVddc;  // We use this field for the "fake" standardized VDDC for power calculations                                                  
+    ULONG  ulLeakageValue;
+}ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+    USHORT usVoltage;
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+    UCHAR ucNumEntries;                                                 // Number of entries.
+    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+    USHORT usEVClkLow;
+    UCHAR  ucEVClkHigh;
+    USHORT usECClkLow;
+    UCHAR  ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+    UCHAR ucNumEntries;
+    VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+    UCHAR  ucVCEClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+      UCHAR revid;
+//    VCEClockInfoArray array;
+//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+    USHORT usVClkLow;
+    UCHAR  ucVClkHigh;
+    USHORT usDClkLow;
+    UCHAR  ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+    UCHAR ucNumEntries;
+    UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+    USHORT usVoltage;
+    UCHAR  ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_State_Record
+{
+    UCHAR  ucUVDClockInfoIndex;
+    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_UVD_State_Record;
+
+typedef struct _ATOM_PPLIB_UVD_State_Table
+{
+    UCHAR numEntries;
+    ATOM_PPLIB_UVD_State_Record entries[1];
+}ATOM_PPLIB_UVD_State_Table;
+
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+      UCHAR revid;
+//    UVDClockInfoArray array;
+//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+//    ATOM_PPLIB_UVD_State_Table states;
+}ATOM_PPLIB_UVD_Table;
+
+/**************************************************************************/
+
+
+// Following definitions are for compatibility issue in different SW components. 
+#define ATOM_MASTER_DATA_TABLE_REVISION   0x01
+#define Object_Info												Object_Header			
+#define	AdjustARB_SEQ											MC_InitParameter
+#define	VRAM_GPIO_DetectionInfo						VoltageObjectInfo
+#define	ASIC_VDDCI_Info                   ASIC_ProfilingInfo														
+#define ASIC_MVDDQ_Info										MemoryTrainingInfo
+#define SS_Info                           PPLL_SS_Info                      
+#define ASIC_MVDDC_Info                   ASIC_InternalSS_Info
+#define DispDevicePriorityInfo						SaveRestoreInfo
+#define DispOutInfo												TV_VideoMode
+
+
+#define ATOM_ENCODER_OBJECT_TABLE         ATOM_OBJECT_TABLE
+#define ATOM_CONNECTOR_OBJECT_TABLE       ATOM_OBJECT_TABLE
+
+//New device naming, remove them when both DAL/VBIOS is ready
+#define DFP2I_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1X_OUTPUT_CONTROL_PARAMETERS    CRT1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
+
+#define DFP1I_OUTPUT_CONTROL_PARAMETERS    DFP1_OUTPUT_CONTROL_PARAMETERS
+#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
+
+#define ATOM_DEVICE_DFP1I_SUPPORT          ATOM_DEVICE_DFP1_SUPPORT
+#define ATOM_DEVICE_DFP1X_SUPPORT          ATOM_DEVICE_DFP2_SUPPORT
+
+#define ATOM_DEVICE_DFP1I_INDEX            ATOM_DEVICE_DFP1_INDEX
+#define ATOM_DEVICE_DFP1X_INDEX            ATOM_DEVICE_DFP2_INDEX
+ 
+#define ATOM_DEVICE_DFP2I_INDEX            0x00000009
+#define ATOM_DEVICE_DFP2I_SUPPORT          (0x1L << ATOM_DEVICE_DFP2I_INDEX)
+
+#define ATOM_S0_DFP1I                      ATOM_S0_DFP1
+#define ATOM_S0_DFP1X                      ATOM_S0_DFP2
+
+#define ATOM_S0_DFP2I                      0x00200000L
+#define ATOM_S0_DFP2Ib2                    0x20
+
+#define ATOM_S2_DFP1I_DPMS_STATE           ATOM_S2_DFP1_DPMS_STATE
+#define ATOM_S2_DFP1X_DPMS_STATE           ATOM_S2_DFP2_DPMS_STATE
+
+#define ATOM_S2_DFP2I_DPMS_STATE           0x02000000L
+#define ATOM_S2_DFP2I_DPMS_STATEb3         0x02
+
+#define ATOM_S3_DFP2I_ACTIVEb1             0x02
+
+#define ATOM_S3_DFP1I_ACTIVE               ATOM_S3_DFP1_ACTIVE 
+#define ATOM_S3_DFP1X_ACTIVE               ATOM_S3_DFP2_ACTIVE
+
+#define ATOM_S3_DFP2I_ACTIVE               0x00000200L
+
+#define ATOM_S3_DFP1I_CRTC_ACTIVE          ATOM_S3_DFP1_CRTC_ACTIVE
+#define ATOM_S3_DFP1X_CRTC_ACTIVE          ATOM_S3_DFP2_CRTC_ACTIVE
+#define ATOM_S3_DFP2I_CRTC_ACTIVE          0x02000000L
+
+#define ATOM_S3_DFP2I_CRTC_ACTIVEb3        0x02
+#define ATOM_S5_DOS_REQ_DFP2Ib1            0x02
+
+#define ATOM_S5_DOS_REQ_DFP2I              0x0200
+#define ATOM_S6_ACC_REQ_DFP1I              ATOM_S6_ACC_REQ_DFP1
+#define ATOM_S6_ACC_REQ_DFP1X              ATOM_S6_ACC_REQ_DFP2
+
+#define ATOM_S6_ACC_REQ_DFP2Ib3            0x02
+#define ATOM_S6_ACC_REQ_DFP2I              0x02000000L
+
+#define TMDS1XEncoderControl               DVOEncoderControl           
+#define DFP1XOutputControl                 DVOOutputControl
+
+#define ExternalDFPOutputControl           DFP1XOutputControl
+#define EnableExternalTMDS_Encoder         TMDS1XEncoderControl
+
+#define DFP1IOutputControl                 TMDSAOutputControl
+#define DFP2IOutputControl                 LVTMAOutputControl      
+
+#define DAC1_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define DAC2_ENCODER_CONTROL_PARAMETERS    DAC_ENCODER_CONTROL_PARAMETERS
+#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
+
+#define ucDac1Standard  ucDacStandard
+#define ucDac2Standard  ucDacStandard  
+
+#define TMDS1EncoderControl TMDSAEncoderControl
+#define TMDS2EncoderControl LVTMAEncoderControl
+
+#define DFP1OutputControl   TMDSAOutputControl
+#define DFP2OutputControl   LVTMAOutputControl
+#define CRT1OutputControl   DAC1OutputControl
+#define CRT2OutputControl   DAC2OutputControl
+
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
+#define EnableLVDS_SS   EnableSpreadSpectrumOnPPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3  ENABLE_SPREAD_SPECTRUM_ON_PPLL  
+
+//#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE	        ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE          ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE         ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2             0x00400000L
+#define ATOM_DEVICE_TV2_INDEX           0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT         (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2                     0x00100000L
+#define ATOM_S3_TV2_ACTIVE              ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE         ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE         0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE	        0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE          0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE         0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE         0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE         0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE          0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE         0x00800000L
+#define ATOM_S2_CV_DPMS_STATE           0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE					0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE					0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE					0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2       0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2       0x02
+#define ATOM_S2_TV1_DPMS_STATEb2        0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2       0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2       0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2       0x20
+#define ATOM_S2_TV2_DPMS_STATEb2        0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2       0x80
+#define ATOM_S2_CV_DPMS_STATEb3         0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3				0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3				0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3				0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3	0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3  0x80
+
+/*********************************************************************************/
+
+#pragma pack() // BIOS data must use byte aligment
+
+//
+// AMD ACPI Table
+//
+#pragma pack(1)
+
+typedef struct {
+  ULONG Signature;
+  ULONG TableLength;      //Length
+  UCHAR Revision;
+  UCHAR Checksum;
+  UCHAR OemId[6];
+  UCHAR OemTableId[8];    //UINT64  OemTableId;
+  ULONG OemRevision;
+  ULONG CreatorId;
+  ULONG CreatorRevision;
+} AMD_ACPI_DESCRIPTION_HEADER;
+/*
+//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
+typedef struct {
+  UINT32  Signature;       //0x0
+  UINT32  Length;          //0x4
+  UINT8   Revision;        //0x8
+  UINT8   Checksum;        //0x9
+  UINT8   OemId[6];        //0xA
+  UINT64  OemTableId;      //0x10
+  UINT32  OemRevision;     //0x18
+  UINT32  CreatorId;       //0x1C
+  UINT32  CreatorRevision; //0x20
+}EFI_ACPI_DESCRIPTION_HEADER;
+*/
+typedef struct {
+  AMD_ACPI_DESCRIPTION_HEADER SHeader;
+  UCHAR TableUUID[16];    //0x24
+  ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+  ULONG Lib1ImageOffset;  //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+  ULONG Reserved[4];      //0x3C
+}UEFI_ACPI_VFCT;
+
+typedef struct {
+  ULONG  PCIBus;          //0x4C
+  ULONG  PCIDevice;       //0x50
+  ULONG  PCIFunction;     //0x54
+  USHORT VendorID;        //0x58
+  USHORT DeviceID;        //0x5A
+  USHORT SSVID;           //0x5C
+  USHORT SSID;            //0x5E
+  ULONG  Revision;        //0x60
+  ULONG  ImageLength;     //0x64
+}VFCT_IMAGE_HEADER;
+
+
+typedef struct {
+  VFCT_IMAGE_HEADER	VbiosHeader;
+  UCHAR	VbiosContent[1];
+}GOP_VBIOS_CONTENT;
+
+typedef struct {
+  VFCT_IMAGE_HEADER	Lib1Header;
+  UCHAR	Lib1Content[1];
+}GOP_LIB1_CONTENT;
+
+#pragma pack()
+
+
+#endif /* _ATOMBIOS_H */
diff --git a/linux-imx/drivers/gpu/drm/radeon/atombios_crtc.c b/linux-imx/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 0000000..971dd87
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,1952 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+#include "atom-bits.h"
+
+static void atombios_overscan_setup(struct drm_crtc *crtc,
+				    struct drm_display_mode *mode,
+				    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	SET_CRTC_OVERSCAN_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
+	int a1, a2;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_CENTER:
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		break;
+	case RMX_ASPECT:
+		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
+		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
+
+		if (a1 > a2) {
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+		} else if (a2 > a1) {
+			args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+		}
+		break;
+	case RMX_FULL:
+	default:
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
+		break;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_scaler_setup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	ENABLE_SCALER_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	/* fixme - fill in enc_priv for atom dac */
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+	bool is_tv = false, is_cv = false;
+
+	if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
+		return;
+
+	if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		tv_std = tv_dac->tv_std;
+		is_tv = true;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucScaler = radeon_crtc->crtc_id;
+
+	if (is_tv) {
+		switch (tv_std) {
+		case TV_STD_NTSC:
+		default:
+			args.ucTVStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.ucTVStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.ucTVStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.ucTVStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.ucTVStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.ucTVStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.ucTVStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.ucTVStandard = ATOM_TV_PALCN;
+			break;
+		}
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else if (is_cv) {
+		args.ucTVStandard = ATOM_TV_CV;
+		args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
+	} else {
+		switch (radeon_crtc->rmx_type) {
+		case RMX_FULL:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		case RMX_CENTER:
+			args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		case RMX_ASPECT:
+			args.ucEnable = ATOM_SCALER_EXPANSION;
+			break;
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.ucEnable = ATOM_SCALER_DISABLE;
+			else
+				args.ucEnable = ATOM_SCALER_CENTER;
+			break;
+		}
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	if ((is_tv || is_cv)
+	    && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
+		atom_rv515_force_tv_scaler(rdev, radeon_crtc);
+	}
+}
+
+static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index =
+	    GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = lock;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
+	ENABLE_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
+	BLANK_CRTC_PS_ALLOCATION args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucCRTC = radeon_crtc->crtc_id;
+	args.ucBlanking = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
+	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+
+	memset(&args, 0, sizeof(args));
+
+	args.ucDispPipeId = radeon_crtc->crtc_id;
+	args.ucEnable = state;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		atombios_enable_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+		atombios_blank_crtc(crtc, ATOM_DISABLE);
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		if (radeon_crtc->enabled)
+			atombios_blank_crtc(crtc, ATOM_ENABLE);
+		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+		atombios_enable_crtc(crtc, ATOM_DISABLE);
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		break;
+	}
+}
+
+static void
+atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+			     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
+	args.usH_Blanking_Time =
+		cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+	args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
+	args.usV_Blanking_Time =
+		cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
+	args.usH_SyncOffset =
+		cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_SyncOffset =
+		cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+	args.ucH_Border = radeon_crtc->h_border;
+	args.ucV_Border = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+				     struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
+	u16 misc = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.usH_Total = cpu_to_le16(mode->crtc_htotal);
+	args.usH_Disp = cpu_to_le16(mode->crtc_hdisplay);
+	args.usH_SyncStart = cpu_to_le16(mode->crtc_hsync_start);
+	args.usH_SyncWidth =
+		cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
+	args.usV_Total = cpu_to_le16(mode->crtc_vtotal);
+	args.usV_Disp = cpu_to_le16(mode->crtc_vdisplay);
+	args.usV_SyncStart = cpu_to_le16(mode->crtc_vsync_start);
+	args.usV_SyncWidth =
+		cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+	args.ucOverscanRight = radeon_crtc->h_border;
+	args.ucOverscanLeft = radeon_crtc->h_border;
+	args.ucOverscanBottom = radeon_crtc->v_border;
+	args.ucOverscanTop = radeon_crtc->v_border;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		misc |= ATOM_VSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		misc |= ATOM_HSYNC_POLARITY;
+	if (mode->flags & DRM_MODE_FLAG_CSYNC)
+		misc |= ATOM_COMPOSITESYNC;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		misc |= ATOM_INTERLACE;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		misc |= ATOM_DOUBLE_CLOCK_MODE;
+
+	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+{
+	u32 ss_cntl;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+			WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_PPLL2:
+			ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+			ss_cntl &= ~1;
+			WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+			break;
+		case ATOM_DCPLL:
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+	}
+}
+
+
+union atom_enable_ss {
+	ENABLE_LVDS_SS_PARAMETERS lvds_ss;
+	ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+};
+
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
+				     int enable,
+				     int pll_id,
+				     int crtc_id,
+				     struct radeon_atom_ss *ss)
+{
+	unsigned i;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+	union atom_enable_ss args;
+
+	if (!enable) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->mode_info.crtcs[i] &&
+			    rdev->mode_info.crtcs[i]->enabled &&
+			    i != crtc_id &&
+			    pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+				/* one other crtc is using this pll don't turn
+				 * off spread spectrum as it might turn off
+				 * display on active crtc
+				 */
+				return;
+			}
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE5(rdev)) {
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
+		args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v3.ucEnable = enable;
+		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
+			args.v3.ucEnable = ATOM_DISABLE;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
+			break;
+		case ATOM_PPLL2:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
+			break;
+		case ATOM_DCPLL:
+			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+		args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
+		args.v2.ucEnable = enable;
+		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
+			args.v2.ucEnable = ATOM_DISABLE;
+	} else if (ASIC_IS_DCE3(rdev)) {
+		args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.v1.ucSpreadSpectrumStep = ss->step;
+		args.v1.ucSpreadSpectrumDelay = ss->delay;
+		args.v1.ucSpreadSpectrumRange = ss->range;
+		args.v1.ucPpll = pll_id;
+		args.v1.ucEnable = enable;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss_2.ucSpreadSpectrumStep = ss->step;
+		args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay;
+		args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
+		args.lvds_ss_2.ucEnable = enable;
+	} else {
+		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+			atombios_disable_ss(rdev, pll_id);
+			return;
+		}
+		args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+		args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2;
+		args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
+		args.lvds_ss.ucEnable = enable;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union adjust_pixel_clock {
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+	ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
+};
+
+static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder = radeon_crtc->encoder;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	u32 adjusted_clock = mode->clock;
+	int encoder_mode = atombios_get_encoder_mode(encoder);
+	u32 dp_clock = mode->clock;
+	int bpc = radeon_get_monitor_bpc(connector);
+	bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
+
+	/* reset the pll flags */
+	radeon_crtc->pll_flags = 0;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
+				RADEON_PLL_PREFER_CLOSEST_LOWER);
+
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+		if (rdev->family < CHIP_RV770)
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+		/* use frac fb div on APUs */
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		/* use frac fb div on RS780/RS880 */
+		if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+	} else {
+		radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
+
+		if (mode->clock > 200000)	/* range limits??? */
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+		else
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+	}
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+		if (connector) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			struct radeon_connector_atom_dig *dig_connector =
+				radeon_connector->con_priv;
+
+			dp_clock = dig_connector->dp_clock;
+		}
+	}
+
+	/* use recommended ref_div for ss */
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_crtc->ss_enabled) {
+			if (radeon_crtc->ss.refdiv) {
+				radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+				radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+				if (ASIC_IS_AVIVO(rdev))
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+			}
+		}
+	}
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
+			adjusted_clock = mode->clock * 2;
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
+	} else {
+		if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+			radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+		if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
+			radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+	}
+
+	/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
+	 * accordingly based on the encoder/transmitter to work around
+	 * special hw requirements.
+	 */
+	if (ASIC_IS_DCE3(rdev)) {
+		union adjust_pixel_clock args;
+		u8 frev, crev;
+		int index;
+
+		index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+					   &crev))
+			return adjusted_clock;
+
+		memset(&args, 0, sizeof(args));
+
+		switch (frev) {
+		case 1:
+			switch (crev) {
+			case 1:
+			case 2:
+				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v1.ucEncodeMode = encoder_mode;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v1.ucConfig |=
+						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
+				break;
+			case 3:
+				args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+				args.v3.sInput.ucEncodeMode = encoder_mode;
+				args.v3.sInput.ucDispPllConfig = 0;
+				if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;
+				if (ENCODER_MODE_IS_DP(encoder_mode)) {
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_COHERENT_MODE;
+					/* 16200 or 27000 */
+					args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+				} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+					if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+						/* deep color support */
+						args.v3.sInput.usPixelClock =
+							cpu_to_le16((mode->clock * bpc / 8) / 10);
+					if (dig->coherent_mode)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_COHERENT_MODE;
+					if (is_duallink)
+						args.v3.sInput.ucDispPllConfig |=
+							DISPPLL_CONFIG_DUAL_LINK;
+				}
+				if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+				    ENCODER_OBJECT_ID_NONE)
+					args.v3.sInput.ucExtTransmitterID =
+						radeon_encoder_get_dp_bridge_encoder_id(encoder);
+				else
+					args.v3.sInput.ucExtTransmitterID = 0;
+
+				atom_execute_table(rdev->mode_info.atom_context,
+						   index, (uint32_t *)&args);
+				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+				if (args.v3.sOutput.ucRefDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+					radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
+				}
+				if (args.v3.sOutput.ucPostDiv) {
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+					radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
+					radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
+				}
+				break;
+			default:
+				DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+				return adjusted_clock;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return adjusted_clock;
+		}
+	}
+	return adjusted_clock;
+}
+
+union set_pixel_clock {
+	SET_PIXEL_CLOCK_PS_ALLOCATION base;
+	PIXEL_CLOCK_PARAMETERS v1;
+	PIXEL_CLOCK_PARAMETERS_V2 v2;
+	PIXEL_CLOCK_PARAMETERS_V3 v3;
+	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
+};
+
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
+				    u32 dispclk)
+{
+	u8 frev, crev;
+	int index;
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 5:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v5.ucCRTC = ATOM_CRTC_INVALID;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
+			args.v5.ucPpll = ATOM_DCPLL;
+			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+			if (ASIC_IS_DCE61(rdev))
+				args.v6.ucPpll = ATOM_EXT_PLL1;
+			else if (ASIC_IS_DCE6(rdev))
+				args.v6.ucPpll = ATOM_PPLL0;
+			else
+				args.v6.ucPpll = ATOM_DCPLL;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+				      u32 crtc_id,
+				      int pll_id,
+				      u32 encoder_mode,
+				      u32 encoder_id,
+				      u32 clock,
+				      u32 ref_div,
+				      u32 fb_div,
+				      u32 frac_fb_div,
+				      u32 post_div,
+				      int bpc,
+				      bool ss_enabled,
+				      struct radeon_atom_ss *ss)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 frev, crev;
+	int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+	union set_pixel_clock args;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+				   &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			if (clock == ATOM_DISABLE)
+				return;
+			args.v1.usPixelClock = cpu_to_le16(clock / 10);
+			args.v1.usRefDiv = cpu_to_le16(ref_div);
+			args.v1.usFbDiv = cpu_to_le16(fb_div);
+			args.v1.ucFracFbDiv = frac_fb_div;
+			args.v1.ucPostDiv = post_div;
+			args.v1.ucPpll = pll_id;
+			args.v1.ucCRTC = crtc_id;
+			args.v1.ucRefDivSrc = 1;
+			break;
+		case 2:
+			args.v2.usPixelClock = cpu_to_le16(clock / 10);
+			args.v2.usRefDiv = cpu_to_le16(ref_div);
+			args.v2.usFbDiv = cpu_to_le16(fb_div);
+			args.v2.ucFracFbDiv = frac_fb_div;
+			args.v2.ucPostDiv = post_div;
+			args.v2.ucPpll = pll_id;
+			args.v2.ucCRTC = crtc_id;
+			args.v2.ucRefDivSrc = 1;
+			break;
+		case 3:
+			args.v3.usPixelClock = cpu_to_le16(clock / 10);
+			args.v3.usRefDiv = cpu_to_le16(ref_div);
+			args.v3.usFbDiv = cpu_to_le16(fb_div);
+			args.v3.ucFracFbDiv = frac_fb_div;
+			args.v3.ucPostDiv = post_div;
+			args.v3.ucPpll = pll_id;
+			if (crtc_id == ATOM_CRTC2)
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+			else
+				args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+			args.v3.ucTransmitterId = encoder_id;
+			args.v3.ucEncoderMode = encoder_mode;
+			break;
+		case 5:
+			args.v5.ucCRTC = crtc_id;
+			args.v5.usPixelClock = cpu_to_le16(clock / 10);
+			args.v5.ucRefDiv = ref_div;
+			args.v5.usFbDiv = cpu_to_le16(fb_div);
+			args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v5.ucPostDiv = post_div;
+			args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+					break;
+				}
+			}
+			args.v5.ucTransmitterID = encoder_id;
+			args.v5.ucEncoderMode = encoder_mode;
+			args.v5.ucPpll = pll_id;
+			break;
+		case 6:
+			args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
+				args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+			if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+				switch (bpc) {
+				case 8:
+				default:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+					break;
+				case 10:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+					break;
+				case 12:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+					break;
+				case 16:
+					args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+					break;
+				}
+			}
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	radeon_crtc->bpc = 8;
+	radeon_crtc->ss_enabled = false;
+
+	if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		struct drm_connector *connector =
+			radeon_get_connector_for_encoder(radeon_crtc->encoder);
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+		int dp_clock;
+		radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
+
+		switch (encoder_mode) {
+		case ATOM_ENCODER_MODE_DP_MST:
+		case ATOM_ENCODER_MODE_DP:
+			/* DP/eDP */
+			dp_clock = dig_connector->dp_clock / 10;
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_DP,
+									 dp_clock);
+			else {
+				if (dp_clock == 16200) {
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID2);
+					if (!radeon_crtc->ss_enabled)
+						radeon_crtc->ss_enabled =
+							radeon_atombios_get_ppll_ss_info(rdev,
+											 &radeon_crtc->ss,
+											 ATOM_DP_SS_ID1);
+				} else {
+					radeon_crtc->ss_enabled =
+						radeon_atombios_get_ppll_ss_info(rdev,
+										 &radeon_crtc->ss,
+										 ATOM_DP_SS_ID1);
+				}
+				/* disable spread spectrum on DCE3 DP */
+				radeon_crtc->ss_enabled = false;
+			}
+			break;
+		case ATOM_ENCODER_MODE_LVDS:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id,
+									 mode->clock / 10);
+			else
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_ppll_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 dig->lcd_ss_id);
+			break;
+		case ATOM_ENCODER_MODE_DVI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_TMDS,
+									 mode->clock / 10);
+			break;
+		case ATOM_ENCODER_MODE_HDMI:
+			if (ASIC_IS_DCE4(rdev))
+				radeon_crtc->ss_enabled =
+					radeon_atombios_get_asic_ss_info(rdev,
+									 &radeon_crtc->ss,
+									 ASIC_INTERNAL_SS_ON_HDMI,
+									 mode->clock / 10);
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* adjust pixel clock as needed */
+	radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
+
+	return true;
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_clock = mode->clock;
+	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+	struct radeon_pll *pll;
+	int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+		pll = &rdev->clock.p1pll;
+		break;
+	case ATOM_PPLL2:
+		pll = &rdev->clock.p2pll;
+		break;
+	case ATOM_DCPLL:
+	case ATOM_PPLL_INVALID:
+	default:
+		pll = &rdev->clock.dcpll;
+		break;
+	}
+
+	/* update pll params */
+	pll->flags = radeon_crtc->pll_flags;
+	pll->reference_div = radeon_crtc->pll_reference_div;
+	pll->post_div = radeon_crtc->pll_post_div;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		/* TV seems to prefer the legacy algo on some boards */
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else if (ASIC_IS_AVIVO(rdev))
+		radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					 &fb_div, &frac_fb_div, &ref_div, &post_div);
+	else
+		radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
+					  &fb_div, &frac_fb_div, &ref_div, &post_div);
+
+	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
+				 radeon_crtc->crtc_id, &radeon_crtc->ss);
+
+	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
+				  ref_div, fb_div, frac_fb_div, post_div,
+				  radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
+
+	if (radeon_crtc->ss_enabled) {
+		/* calculate ss amount and step size */
+		if (ASIC_IS_DCE4(rdev)) {
+			u32 step_size;
+			u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
+			radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
+			radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+				ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
+			if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
+				step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			else
+				step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
+					(125 * 25 * pll->reference_freq / 100);
+			radeon_crtc->ss.step = step_size;
+		}
+
+		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
+					 radeon_crtc->crtc_id, &radeon_crtc->ss);
+	}
+}
+
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	unsigned bankw, bankh, mtaspect, tile_split;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 tmp, viewport_w, viewport_h;
+	int r;
+
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		break;
+	case 15:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+		break;
+	case 16:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+		break;
+	case 24:
+	case 32:
+		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+		break;
+	default:
+		DRM_ERROR("Unsupported screen depth %d\n",
+			  target_fb->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (rdev->family >= CHIP_TAHITI)
+			tmp = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
+			tmp = rdev->config.cayman.tile_config;
+		else
+			tmp = rdev->config.evergreen.tile_config;
+
+		switch ((tmp & 0xf0) >> 4) {
+		case 0: /* 4 banks */
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+			break;
+		case 1: /* 8 banks */
+		default:
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+			break;
+		case 2: /* 16 banks */
+			fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+			break;
+		}
+
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+
+		evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
+		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
+		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
+		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
+		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+	} else if (tiling_flags & RADEON_TILING_MICRO)
+		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
+	if ((rdev->family == CHIP_TAHITI) ||
+	    (rdev->family == CHIP_PITCAIRN))
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+	else if ((rdev->family == CHIP_VERDE) ||
+		 (rdev->family == CHIP_OLAND) ||
+		 (rdev->family == CHIP_HAINAN)) /* for completeness.  HAINAN has no display hw */
+		fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+		break;
+	case 1:
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+		break;
+	case 2:
+		WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+		break;
+	case 3:
+		WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+		break;
+	case 4:
+		WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+		break;
+	case 5:
+		WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+		break;
+	default:
+		break;
+	}
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(fb_location));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+	WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+	       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* pageflip setup */
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+				  struct drm_framebuffer *fb,
+				  int x, int y, int atomic)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct drm_framebuffer *target_fb;
+	uint64_t fb_location;
+	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
+	u32 tmp, viewport_w, viewport_h;
+	int r;
+
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+
+	/* If atomic, assume fb object is pinned & idle & fenced and
+	 * just update base pointers
+	 */
+	if (atomic)
+		fb_location = radeon_bo_gpu_offset(rbo);
+	else {
+		r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+		if (unlikely(r != 0)) {
+			radeon_bo_unreserve(rbo);
+			return -EINVAL;
+		}
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
+		    AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
+		break;
+	case 15:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+		break;
+	case 16:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+		    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+		break;
+	case 24:
+	case 32:
+		fb_format =
+		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+		break;
+	default:
+		DRM_ERROR("Unsupported screen depth %d\n",
+			  target_fb->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+
+		if (tiling_flags & RADEON_TILING_MICRO)
+			fb_format |= AVIVO_D1GRPH_TILED;
+	}
+
+	if (radeon_crtc->crtc_id == 0)
+		WREG32(AVIVO_D1VGA_CONTROL, 0);
+	else
+		WREG32(AVIVO_D2VGA_CONTROL, 0);
+
+	if (rdev->family >= CHIP_RV770) {
+		if (radeon_crtc->crtc_id) {
+			WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		} else {
+			WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+			WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+		}
+	}
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
+	       radeon_crtc->crtc_offset, (u32) fb_location);
+	WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+	WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+
+	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+	WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+	       target_fb->height);
+	x &= ~3;
+	y &= ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+	       (x << 16) | y);
+	viewport_w = crtc->mode.hdisplay;
+	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
+	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+	       (viewport_w << 16) | viewport_h);
+
+	/* pageflip setup */
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			   struct drm_framebuffer *old_fb)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
+	else
+		return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+                                  struct drm_framebuffer *fb,
+				  int x, int y, enum mode_set_atomic state)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev))
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
+	else if (ASIC_IS_AVIVO(rdev))
+		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
+	else
+		return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+/* properly set additional regs when using atombios */
+static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	u32 disp_merge_cntl;
+
+	switch (radeon_crtc->crtc_id) {
+	case 0:
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		break;
+	case 1:
+		disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+		break;
+	}
+}
+
+/**
+ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+ */
+static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 pll_in_use = 0;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+			pll_in_use |= (1 << test_radeon_crtc->pll_id);
+	}
+	return pll_in_use;
+}
+
+/**
+ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+ * also in DP mode.  For DP, a single PPLL can be used for all DP
+ * crtcs/encoders.
+ */
+static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* for DP use the same PLL for all */
+			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
+ *
+ * @crtc: drm crtc
+ * @encoder: drm encoder
+ *
+ * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
+ * be shared (i.e., same clock).
+ */
+static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *test_crtc;
+	struct radeon_crtc *test_radeon_crtc;
+	u32 adjusted_clock, test_adjusted_clock;
+
+	adjusted_clock = radeon_crtc->adjusted_clock;
+
+	if (adjusted_clock == 0)
+		return ATOM_PPLL_INVALID;
+
+	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+		if (crtc == test_crtc)
+			continue;
+		test_radeon_crtc = to_radeon_crtc(test_crtc);
+		if (test_radeon_crtc->encoder &&
+		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+			/* check if we are already driving this connector with another crtc */
+			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+				/* if we are, return that pll */
+				if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+					return test_radeon_crtc->pll_id;
+			}
+			/* for non-DP check the clock */
+			test_adjusted_clock = test_radeon_crtc->adjusted_clock;
+			if ((crtc->mode.clock == test_crtc->mode.clock) &&
+			    (adjusted_clock == test_adjusted_clock) &&
+			    (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
+			    (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+				return test_radeon_crtc->pll_id;
+		}
+	}
+	return ATOM_PPLL_INVALID;
+}
+
+/**
+ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+ *
+ * @crtc: drm crtc
+ *
+ * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
+ * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
+ * monitors a dedicated PPLL must be used.  If a particular board has
+ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+ * as there is no need to program the PLL itself.  If we are not able to
+ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+ * avoid messing up an existing monitor.
+ *
+ * Asic specific PLL information
+ *
+ * DCE 6.1
+ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+ *
+ * DCE 6.0
+ * - PPLL0 is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 5.0
+ * - DCPLL is available to all UNIPHY (DP only)
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ * DCE 3.0/4.0/4.1
+ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+ *
+ */
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	u32 pll_in_use;
+	int pll;
+
+	if (ASIC_IS_DCE61(rdev)) {
+		struct radeon_encoder_atom_dig *dig =
+			radeon_encoder->enc_priv;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
+		    (dig->linkb == false))
+			/* UNIPHY A uses PPLL2 */
+			return ATOM_PPLL2;
+		else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			/* UNIPHY B/C/D/E/F */
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* UNIPHY B/C/D/E/F */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL0)))
+			return ATOM_PPLL0;
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else if (ASIC_IS_DCE41(rdev)) {
+		/* Don't share PLLs on DCE4.1 chips */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+		}
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+		 * depending on the asic:
+		 * DCE4: PPLL or ext clock
+		 * DCE5: PPLL, DCPLL, or ext clock
+		 * DCE6: PPLL, PPLL0, or ext clock
+		 *
+		 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+		 * PPLL/DCPLL programming and only program the DP DTO for the
+		 * crtc virtual pixel clock.
+		 */
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+			if (rdev->clock.dp_extclk)
+				/* skip PPLL programming if using ext clock */
+				return ATOM_PPLL_INVALID;
+			else if (ASIC_IS_DCE6(rdev))
+				/* use PPLL0 for all DP */
+				return ATOM_PPLL0;
+			else if (ASIC_IS_DCE5(rdev))
+				/* use DCPLL for all DP */
+				return ATOM_DCPLL;
+			else {
+				/* use the same PPLL for all DP monitors */
+				pll = radeon_get_shared_dp_ppll(crtc);
+				if (pll != ATOM_PPLL_INVALID)
+					return pll;
+			}
+		} else {
+			/* use the same PPLL for all monitors with the same clock */
+			pll = radeon_get_shared_nondp_ppll(crtc);
+			if (pll != ATOM_PPLL_INVALID)
+				return pll;
+		}
+		/* all other cases */
+		pll_in_use = radeon_get_pll_use_mask(crtc);
+		if (!(pll_in_use & (1 << ATOM_PPLL1)))
+			return ATOM_PPLL1;
+		if (!(pll_in_use & (1 << ATOM_PPLL2)))
+			return ATOM_PPLL2;
+		DRM_ERROR("unable to allocate a PPLL\n");
+		return ATOM_PPLL_INVALID;
+	} else {
+		/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+		/* some atombios (observed in some DCE2/DCE3) code have a bug,
+		 * the matching btw pll and crtc is done through
+		 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
+		 * pll (1 or 2) to select which register to write. ie if using
+		 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
+		 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
+		 * choose which value to write. Which is reverse order from
+		 * register logic. So only case that works is when pllid is
+		 * same as crtcid or when both pll and crtc are enabled and
+		 * both use same clock.
+		 *
+		 * So just return crtc id as if crtc and pll were hard linked
+		 * together even if they aren't
+		 */
+		return radeon_crtc->crtc_id;
+	}
+}
+
+void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
+{
+	/* always set DCPLL */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+	else if (ASIC_IS_DCE4(rdev)) {
+		struct radeon_atom_ss ss;
+		bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+								   ASIC_INTERNAL_SS_ON_DCPLL,
+								   rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
+		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
+		if (ss_enabled)
+			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
+	}
+
+}
+
+int atombios_crtc_mode_set(struct drm_crtc *crtc,
+			   struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode,
+			   int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder =
+		to_radeon_encoder(radeon_crtc->encoder);
+	bool is_tvcv = false;
+
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+		is_tvcv = true;
+
+	atombios_crtc_set_pll(crtc, adjusted_mode);
+
+	if (ASIC_IS_DCE4(rdev))
+		atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	else if (ASIC_IS_AVIVO(rdev)) {
+		if (is_tvcv)
+			atombios_crtc_set_timing(crtc, adjusted_mode);
+		else
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+	} else {
+		atombios_crtc_set_timing(crtc, adjusted_mode);
+		if (radeon_crtc->crtc_id == 0)
+			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+		radeon_legacy_atom_fixup(crtc);
+	}
+	atombios_crtc_set_base(crtc, x, y, old_fb);
+	atombios_overscan_setup(crtc, mode, adjusted_mode);
+	atombios_scaler_setup(crtc);
+	return 0;
+}
+
+static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	/* assign the encoder to the radeon crtc to avoid repeated lookups later */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			radeon_crtc->encoder = encoder;
+			radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
+			break;
+		}
+	}
+	if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
+		radeon_crtc->encoder = NULL;
+		radeon_crtc->connector = NULL;
+		return false;
+	}
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
+		return false;
+	/* pick pll */
+	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+	/* if we can't get a PPLL for a non-DP encoder, fail */
+	if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
+	    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
+		return false;
+
+	return true;
+}
+
+static void atombios_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* disable crtc pair power gating before programming */
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
+	atombios_lock_crtc(crtc, ATOM_ENABLE);
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void atombios_crtc_commit(struct drm_crtc *crtc)
+{
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	atombios_lock_crtc(crtc, ATOM_DISABLE);
+}
+
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_atom_ss ss;
+	int i;
+
+	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	if (ASIC_IS_DCE6(rdev))
+		atombios_powergate_crtc(crtc, ATOM_ENABLE);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i] &&
+		    rdev->mode_info.crtcs[i]->enabled &&
+		    i != radeon_crtc->crtc_id &&
+		    radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+			/* one other crtc is using this pll don't turn
+			 * off the pll
+			 */
+			goto done;
+		}
+	}
+
+	switch (radeon_crtc->pll_id) {
+	case ATOM_PPLL1:
+	case ATOM_PPLL2:
+		/* disable the ppll */
+		atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	case ATOM_PPLL0:
+		/* disable the ppll */
+		if (ASIC_IS_DCE61(rdev))
+			atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+		break;
+	default:
+		break;
+	}
+done:
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+}
+
+static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+	.dpms = atombios_crtc_dpms,
+	.mode_fixup = atombios_crtc_mode_fixup,
+	.mode_set = atombios_crtc_mode_set,
+	.mode_set_base = atombios_crtc_set_base,
+	.mode_set_base_atomic = atombios_crtc_set_base_atomic,
+	.prepare = atombios_crtc_prepare,
+	.commit = atombios_crtc_commit,
+	.load_lut = radeon_crtc_load_lut,
+	.disable = atombios_crtc_disable,
+};
+
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+		default:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+			break;
+		case 1:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+			break;
+		case 2:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+			break;
+		case 3:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+			break;
+		case 4:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+			break;
+		case 5:
+			radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+			break;
+		}
+	} else {
+		if (radeon_crtc->crtc_id == 1)
+			radeon_crtc->crtc_offset =
+				AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+		else
+			radeon_crtc->crtc_offset = 0;
+	}
+	radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+	radeon_crtc->adjusted_clock = 0;
+	radeon_crtc->encoder = NULL;
+	radeon_crtc->connector = NULL;
+	drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/atombios_dp.c b/linux-imx/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 0000000..4c05f2b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,938 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include <drm/drm_dp_helper.h>
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
+
+static char *voltage_names[] = {
+        "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+        "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+	u32 *dst32, *src32;
+	int i;
+
+	memcpy(src_tmp, src, num_bytes);
+	src32 = (u32 *)src_tmp;
+	dst32 = (u32 *)dst_tmp;
+	if (to_le) {
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = cpu_to_le32(src32[i]);
+		memcpy(dst, dst_tmp, num_bytes);
+	} else {
+		u8 dws = num_bytes & ~3;
+		for (i = 0; i < ((num_bytes + 3) / 4); i++)
+			dst32[i] = le32_to_cpu(src32[i]);
+		memcpy(dst, dst_tmp, dws);
+		if (num_bytes % 4) {
+			for (i = 0; i < (num_bytes % 4); i++)
+				dst[dws+i] = dst_tmp[dws+i];
+		}
+	}
+#else
+	memcpy(dst, src, num_bytes);
+#endif
+}
+
+union aux_channel_transaction {
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
+
+static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+				 u8 *send, int send_bytes,
+				 u8 *recv, int recv_size,
+				 u8 delay, u8 *ack)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union aux_channel_transaction args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+	int recv_bytes;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
+
+	radeon_atom_copy_swap(base, send, send_bytes, true);
+
+	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
+	args.v1.ucDataOutLen = 0;
+	args.v1.ucChannelID = chan->rec.i2c_id;
+	args.v1.ucDelay = delay / 10;
+	if (ASIC_IS_DCE4(rdev))
+		args.v2.ucHPD_ID = chan->rec.hpd;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	*ack = args.v1.ucReplyStatus;
+
+	/* timeout */
+	if (args.v1.ucReplyStatus == 1) {
+		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	/* flags not zero */
+	if (args.v1.ucReplyStatus == 2) {
+		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
+		return -EBUSY;
+	}
+
+	/* error */
+	if (args.v1.ucReplyStatus == 3) {
+		DRM_DEBUG_KMS("dp_aux_ch error\n");
+		return -EIO;
+	}
+
+	recv_bytes = args.v1.ucDataOutLen;
+	if (recv_bytes > recv_size)
+		recv_bytes = recv_size;
+
+	if (recv && recv_size)
+		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
+
+	return recv_bytes;
+}
+
+static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
+				      u16 address, u8 *send, u8 send_bytes, u8 delay)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	int ret;
+	u8 msg[20];
+	int msg_bytes = send_bytes + 4;
+	u8 ack;
+	unsigned retry;
+
+	if (send_bytes > 16)
+		return -1;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_WRITE << 4;
+	msg[3] = (msg_bytes << 4) | (send_bytes - 1);
+	memcpy(&msg[4], send, send_bytes);
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+					    msg, msg_bytes, NULL, 0, delay, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			return send_bytes;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(400);
+		else
+			return -EIO;
+	}
+
+	return -EIO;
+}
+
+static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
+				     u16 address, u8 *recv, int recv_bytes, u8 delay)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[4];
+	int msg_bytes = 4;
+	u8 ack;
+	int ret;
+	unsigned retry;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_READ << 4;
+	msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
+					    msg, msg_bytes, recv, recv_bytes, delay, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0)
+			return ret;
+		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+			return ret;
+		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+			udelay(400);
+		else if (ret == 0)
+			return -EPROTO;
+		else
+			return -EIO;
+	}
+
+	return -EIO;
+}
+
+static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
+				 u16 reg, u8 val)
+{
+	radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
+}
+
+static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
+			       u16 reg)
+{
+	u8 val = 0;
+
+	radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
+
+	return val;
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+			 u8 write_byte, u8 *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+	u16 address = algo_data->address;
+	u8 msg[5];
+	u8 reply[2];
+	unsigned retry;
+	int msg_bytes;
+	int reply_bytes = 1;
+	int ret;
+	u8 ack;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[2] = AUX_I2C_READ << 4;
+	else
+		msg[2] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[2] |= AUX_I2C_MOT << 4;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg_bytes = 5;
+		msg[3] = msg_bytes << 4;
+		msg[4] = write_byte;
+		break;
+	case MODE_I2C_READ:
+		msg_bytes = 4;
+		msg[3] = msg_bytes << 4;
+		break;
+	default:
+		msg_bytes = 4;
+		msg[3] = 3 << 4;
+		break;
+	}
+
+	for (retry = 0; retry < 4; retry++) {
+		ret = radeon_process_aux_ch(auxch,
+					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
+		if (ret == -EBUSY)
+			continue;
+		else if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+
+		switch (ack & AUX_NATIVE_REPLY_MASK) {
+		case AUX_NATIVE_REPLY_ACK:
+			/* I2C-over-AUX Reply field is only valid
+			 * when paired with AUX ACK.
+			 */
+			break;
+		case AUX_NATIVE_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch native nack\n");
+			return -EREMOTEIO;
+		case AUX_NATIVE_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_ch native defer\n");
+			udelay(400);
+			continue;
+		default:
+			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
+			return -EREMOTEIO;
+		}
+
+		switch (ack & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ)
+				*read_byte = reply[0];
+			return ret;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_i2c nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_i2c defer\n");
+			udelay(400);
+			break;
+		default:
+			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
+			return -EREMOTEIO;
+		}
+	}
+
+	DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
+	return -EREMOTEIO;
+}
+
+/***** general DP utility functions *****/
+
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
+#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+				int lane_count,
+				u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v |= DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= DP_PRE_EMPHASIS_MAX)
+		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+/* convert bits per color to bits per pixel */
+/* get bpc from the EDID */
+static int convert_bpc_to_bpp(int bpc)
+{
+	if (bpc == 0)
+		return 24;
+	else
+		return bpc * 3;
+}
+
+/* get the max pix clock supported by the link rate and lane num */
+static int dp_get_max_dp_pix_clock(int link_rate,
+				   int lane_num,
+				   int bpp)
+{
+	return (link_rate * lane_num * 8) / bpp;
+}
+
+/***** radeon specific DP functions *****/
+
+static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+				       u8 dpcd[DP_DPCD_SIZE])
+{
+	int max_link_rate;
+
+	if (radeon_connector_is_dp12_capable(connector))
+		max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+	else
+		max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+
+	return max_link_rate;
+}
+
+/* First get the min lane# when low rate is used according to pixel clock
+ * (prefer low rate), second check max lane# supported by DP panel,
+ * if the max lane# < low rate lane# then use max lane# instead.
+ */
+static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+					u8 dpcd[DP_DPCD_SIZE],
+					int pix_clock)
+{
+	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+	int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+	int max_lane_num = drm_dp_max_lane_count(dpcd);
+	int lane_num;
+	int max_dp_pix_clock;
+
+	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+		max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+		if (pix_clock <= max_dp_pix_clock)
+			break;
+	}
+
+	return lane_num;
+}
+
+static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+				       u8 dpcd[DP_DPCD_SIZE],
+				       int pix_clock)
+{
+	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+	int lane_num, max_pix_clock;
+
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG)
+		return 270000;
+
+	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+	max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 162000;
+	max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+	if (pix_clock <= max_pix_clock)
+		return 270000;
+	if (radeon_connector_is_dp12_capable(connector)) {
+		max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+		if (pix_clock <= max_pix_clock)
+			return 540000;
+	}
+
+	return radeon_dp_get_max_link_rate(connector, dpcd);
+}
+
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+				    int action, int dp_clock,
+				    u8 ucconfig, u8 lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 buf[3];
+
+	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
+		return;
+
+	if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
+		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+
+	if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
+		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
+			      buf[0], buf[1], buf[2]);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[DP_DPCD_SIZE];
+	int ret, i;
+
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+					DP_DPCD_SIZE, 0);
+	if (ret > 0) {
+		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+		DRM_DEBUG_KMS("DPCD: ");
+		for (i = 0; i < DP_DPCD_SIZE; i++)
+			DRM_DEBUG_KMS("%02x ", msg[i]);
+		DRM_DEBUG_KMS("\n");
+
+		radeon_dp_probe_oui(radeon_connector);
+
+		return true;
+	}
+	dig_connector->dpcd[0] = 0;
+	return false;
+}
+
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+			     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+	u8 tmp;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return panel_mode;
+
+	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+		/* DP bridge chips */
+		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+		if (tmp & 1)
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+		else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+			 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+		else
+			panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		/* eDP */
+		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+		if (tmp & 1)
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+	}
+
+	return panel_mode;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+			       const struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+		dig_connector->dp_clock =
+			radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+		dig_connector->dp_lane_count =
+			radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
+	}
+}
+
+int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int dp_clock;
+
+	if (!radeon_connector->con_priv)
+		return MODE_CLOCK_HIGH;
+	dig_connector = radeon_connector->con_priv;
+
+	dp_clock =
+		radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+
+	if ((dp_clock == 540000) &&
+	    (!radeon_connector_is_dp12_capable(connector)))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+				      u8 link_status[DP_LINK_STATUS_SIZE])
+{
+	int ret;
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+					link_status, DP_LINK_STATUS_SIZE, 100);
+	if (ret <= 0) {
+		return false;
+	}
+
+	DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+	return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+	if (!radeon_dp_get_link_status(radeon_connector, link_status))
+		return false;
+	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
+		return false;
+	return true;
+}
+
+struct radeon_dp_link_train_info {
+	struct radeon_device *rdev;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	int enc_id;
+	int dp_clock;
+	int dp_lane_count;
+	bool tp3_supported;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 train_set[4];
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 tries;
+	bool use_dpencoder;
+};
+
+static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
+{
+	/* set the initial vs/emph on the source */
+	atombios_dig_transmitter_setup(dp_info->encoder,
+				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+				       0, dp_info->train_set[0]); /* sets all lanes at once */
+
+	/* set the vs/emph on the sink */
+	radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
+				   dp_info->train_set, dp_info->dp_lane_count, 0);
+}
+
+static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
+{
+	int rtp = 0;
+
+	/* set training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
+			break;
+		case DP_TRAINING_PATTERN_3:
+			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
+			break;
+		}
+		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
+	} else {
+		switch (tp) {
+		case DP_TRAINING_PATTERN_1:
+			rtp = 0;
+			break;
+		case DP_TRAINING_PATTERN_2:
+			rtp = 1;
+			break;
+		}
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+					  dp_info->dp_clock, dp_info->enc_id, rtp);
+	}
+
+	/* enable training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
+}
+
+static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u8 tmp;
+
+	/* power up the sink */
+	if (dp_info->dpcd[0] >= 0x11)
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_SET_POWER, DP_SET_POWER_D0);
+
+	/* possibly enable downspread on the sink */
+	if (dp_info->dpcd[3] & 0x1)
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
+	else
+		radeon_write_dpcd_reg(dp_info->radeon_connector,
+				      DP_DOWNSPREAD_CTRL, 0);
+
+	if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+	    (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+		radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+	}
+
+	/* set the lane count on the sink */
+	tmp = dp_info->dp_lane_count;
+	if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
+	    dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
+
+	/* set the link rate on the sink */
+	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
+	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
+
+	/* start training on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	/* disable the training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector,
+			      DP_TRAINING_PATTERN_SET,
+			      DP_TRAINING_PATTERN_DISABLE);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
+{
+	udelay(400);
+
+	/* disable the training pattern on the sink */
+	radeon_write_dpcd_reg(dp_info->radeon_connector,
+			      DP_TRAINING_PATTERN_SET,
+			      DP_TRAINING_PATTERN_DISABLE);
+
+	/* disable the training pattern on the source */
+	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
+		atombios_dig_encoder_setup(dp_info->encoder,
+					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
+	else
+		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+					  dp_info->dp_clock, dp_info->enc_id, 0);
+
+	return 0;
+}
+
+static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+{
+	bool clock_recovery;
+ 	u8 voltage;
+	int i;
+
+	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
+	memset(dp_info->train_set, 0, 4);
+	radeon_dp_update_vs_emph(dp_info);
+
+	udelay(400);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	dp_info->tries = 0;
+	voltage = 0xff;
+	while (1) {
+		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+
+		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dp_info->dp_lane_count; i++) {
+			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dp_info->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++dp_info->tries;
+			if (dp_info->tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			dp_info->tries = 0;
+
+		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+	}
+	if (!clock_recovery) {
+		DRM_ERROR("clock recovery failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+{
+	bool channel_eq;
+
+	if (dp_info->tp3_supported)
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
+	else
+		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
+
+	/* channel equalization loop */
+	dp_info->tries = 0;
+	channel_eq = false;
+	while (1) {
+		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+
+		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+			DRM_ERROR("displayport link status failed\n");
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (dp_info->tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
+
+		radeon_dp_update_vs_emph(dp_info);
+		dp_info->tries++;
+	}
+
+	if (!channel_eq) {
+		DRM_ERROR("channel eq failed\n");
+		return -1;
+	} else {
+		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
+			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+		return 0;
+	}
+}
+
+void radeon_dp_link_train(struct drm_encoder *encoder,
+			  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	struct radeon_dp_link_train_info dp_info;
+	int index;
+	u8 tmp, frev, crev;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+	dig = radeon_encoder->enc_priv;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
+	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
+		return;
+
+	/* DPEncoderService newer than 1.1 can't program properly the
+	 * training pattern. When facing such version use the
+	 * DIGXEncoderControl (X== 1 | 2)
+	 */
+	dp_info.use_dpencoder = true;
+	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+		if (crev > 1) {
+			dp_info.use_dpencoder = false;
+		}
+	}
+
+	dp_info.enc_id = 0;
+	if (dig->dig_encoder)
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+	if (dig->linkb)
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
+	else
+		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
+
+	tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
+	if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+		dp_info.tp3_supported = true;
+	else
+		dp_info.tp3_supported = false;
+
+	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
+	dp_info.rdev = rdev;
+	dp_info.encoder = encoder;
+	dp_info.connector = connector;
+	dp_info.radeon_connector = radeon_connector;
+	dp_info.dp_lane_count = dig_connector->dp_lane_count;
+	dp_info.dp_clock = dig_connector->dp_clock;
+
+	if (radeon_dp_link_train_init(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_cr(&dp_info))
+		goto done;
+	if (radeon_dp_link_train_ce(&dp_info))
+		goto done;
+done:
+	if (radeon_dp_link_train_finish(&dp_info))
+		return;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/atombios_encoders.c b/linux-imx/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 0000000..1b564d7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2680 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+#include <linux/backlight.h>
+
+extern int atom_debug;
+
+static u8
+radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
+{
+	u8 backlight_level;
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
+			   ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+	return backlight_level;
+}
+
+static void
+radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
+				       u8 backlight_level)
+{
+	u32 bios_2_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+	bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+			   ATOM_S2_CURRENT_BL_LEVEL_MASK);
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
+
+u8
+atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return 0;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+void
+atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_encoder *encoder = &radeon_encoder->base;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dig *dig;
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+	    radeon_encoder->enc_priv) {
+		dig = radeon_encoder->enc_priv;
+		dig->backlight_level = level;
+		radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+			if (dig->backlight_level == 0) {
+				args.ucAction = ATOM_LCD_BLOFF;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			} else {
+				args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+				args.ucAction = ATOM_LCD_BLON;
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->backlight_level == 0)
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+			else {
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static u8 radeon_atom_bl_level(struct backlight_device *bd)
+{
+	u8 level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	return level;
+}
+
+static int radeon_atom_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
+
+	return 0;
+}
+
+static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_atom_get_backlight_level_from_reg(rdev);
+}
+
+static const struct backlight_ops radeon_atom_backlight_ops = {
+	.get_brightness = radeon_atom_backlight_get_brightness,
+	.update_status	= radeon_atom_backlight_update_status,
+};
+
+void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+				struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	struct radeon_encoder_atom_dig *dig;
+	char bl_name[16];
+
+	/* Mac laptops with multiple GPUs use the gmux driver for backlight
+	 * so don't register a backlight device
+	 */
+	if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (rdev->pdev->device == 0x6741))
+		return;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+				       pdata, &radeon_atom_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	dig = radeon_encoder->enc_priv;
+	dig->bl_dev = bd;
+
+	bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+	/* Set a reasonable default here if the level is 0 otherwise
+	 * fbdev will attempt to turn the backlight on after console
+	 * unblanking and it will try and restore 0 which turns the backlight
+	 * off again.
+	 */
+	if (bd->props.brightness == 0)
+		bd->props.brightness = RADEON_MAX_BL_LEVEL;
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon atom DIG backlight initialized\n");
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+	struct radeon_encoder_atom_dig *dig;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (!rdev->is_atom_bios)
+		return;
+
+	if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
+		return;
+
+	dig = radeon_encoder->enc_priv;
+	bd = dig->bl_dev;
+	dig->bl_dev = NULL;
+
+	if (bd) {
+		struct radeon_legacy_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("radeon atom LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_atom_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode);
+
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* hw bug */
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	/* get the native mode for TV */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		if (tv_dac) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M)
+				radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+			else
+				radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+		}
+	}
+
+	if (ASIC_IS_DCE3(rdev) &&
+	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	     (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, adjusted_mode);
+	}
+
+	return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+		break;
+	}
+
+	args.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_PS2;
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_PAL:
+		case TV_STD_PAL_M:
+		case TV_STD_SCART_PAL:
+		case TV_STD_SECAM:
+		case TV_STD_PAL_CN:
+			args.ucDacStandard = ATOM_DAC1_PAL;
+			break;
+		case TV_STD_NTSC:
+		case TV_STD_NTSC_J:
+		case TV_STD_PAL_60:
+		default:
+			args.ucDacStandard = ATOM_DAC1_NTSC;
+			break;
+		}
+	}
+	args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	TV_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+	args.sTVEncoder.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_NTSC:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+			break;
+		default:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		}
+	}
+
+	args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	int bpc = 8;
+
+	if (connector)
+		bpc = radeon_get_monitor_bpc(connector);
+
+	switch (bpc) {
+	case 0:
+		return PANEL_BPC_UNDEFINE;
+	case 6:
+		return PANEL_6BIT_PER_COLOR;
+	case 8:
+	default:
+		return PANEL_8BIT_PER_COLOR;
+	case 10:
+		return PANEL_10BIT_PER_COLOR;
+	case 12:
+		return PANEL_12BIT_PER_COLOR;
+	case 16:
+		return PANEL_16BIT_PER_COLOR;
+	}
+}
+
+
+union dvo_encoder_control {
+	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	union dvo_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+	uint8_t frev, crev;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* some R4xx chips have the wrong frev */
+	if (rdev->family <= CHIP_RV410)
+		frev = 1;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			/* R4xx, R5xx */
+			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			break;
+		case 2:
+			/* RS600/690/740 */
+			args.dvo.sDVOEncoder.ucAction = action;
+			args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			/* DFP1, CRT1, TV1 depending on the type of port */
+			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+			break;
+		case 3:
+			/* R6xx */
+			args.dvo_v3.ucAction = action;
+			args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.dvo_v3.ucDVOConfig = 0; /* XXX */
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	union lvds_encoder_control args;
+	int index = 0;
+	int hdmi_detected = 0;
+	uint8_t frev, crev;
+
+	if (!dig)
+		return;
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+		hdmi_detected = 1;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+	case 2:
+		switch (crev) {
+		case 1:
+			args.v1.ucMisc = 0;
+			args.v1.ucAction = action;
+			if (hdmi_detected)
+				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+					args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			} else {
+				if (dig->linkb)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				/*if (pScrn->rgbBits == 8) */
+				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			}
+			break;
+		case 2:
+		case 3:
+			args.v2.ucMisc = 0;
+			args.v2.ucAction = action;
+			if (crev == 3) {
+				if (dig->coherent_mode)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+			}
+			if (hdmi_detected)
+				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v2.ucTruncate = 0;
+			args.v2.ucSpatial = 0;
+			args.v2.ucTemporal = 0;
+			args.v2.ucFRC = 0;
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+				}
+				if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+					if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+				}
+			} else {
+				if (dig->linkb)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	/* dp bridges are always DP */
+	if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+		return ATOM_ENCODER_MODE_DP;
+
+	/* DVO is always DVO */
+	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
+	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+		return ATOM_ENCODER_MODE_DVO;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		    radeon_audio &&
+		    !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+			return ATOM_ENCODER_MODE_HDMI;
+		else if (radeon_connector->use_digital)
+			return ATOM_ENCODER_MODE_DVI;
+		else
+			return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	default:
+		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+		    radeon_audio &&
+		    !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+			return ATOM_ENCODER_MODE_HDMI;
+		else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return ATOM_ENCODER_MODE_DP;
+		else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			 radeon_audio &&
+			 !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+			return ATOM_ENCODER_MODE_HDMI;
+		else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		return ATOM_ENCODER_MODE_DP;
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
+		return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		/* fix me */
+		return ATOM_ENCODER_MODE_TV;
+		/*return ATOM_ENCODER_MODE_CV;*/
+		break;
+	}
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0/6.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * llano
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ * ontario
+ * DIG1 drives UNIPHY0/1/2 link A
+ * DIG2 drives UNIPHY0/1/2 link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	union dig_encoder_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
+		return;
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE4(rdev))
+		index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+	else {
+		if (dig->dig_encoder)
+			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v1.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.ucLaneNum = 8;
+			else
+				args.v1.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+				break;
+			}
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+			break;
+		case 2:
+		case 3:
+			args.v3.ucAction = action;
+			args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+			args.v3.acConfig.ucDigSel = dig->dig_encoder;
+			args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v4.ucPanelMode = panel_mode;
+			else
+				args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+			}
+			args.v4.acConfig.ucDigSel = dig->dig_encoder;
+			args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v4.ucHPD_ID = 0;
+			else
+				args.v4.ucHPD_ID = hpd_id + 1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	union dig_transmitter_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	bool is_dp = false;
+	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
+	int dig_encoder = dig->dig_encoder;
+	int hpd_id = RADEON_HPD_NONE;
+
+	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+		connector = radeon_get_connector_for_encoder_init(encoder);
+		/* just needed to avoid bailing in the encoder check.  the encoder
+		 * isn't used for init
+		 */
+		dig_encoder = 0;
+	} else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		hpd_id = radeon_connector->hpd.hpd;
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		igp_lane_info = dig_connector->igp_lane_info;
+	}
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		pll_id = radeon_crtc->pll_id;
+	}
+
+	/* no dig encoder assigned */
+	if (dig_encoder == -1)
+		return;
+
+	if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+		is_dp = true;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v1.asMode.ucLaneSel = lane_num;
+				args.v1.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+			if (dig_encoder)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+				if (is_dp ||
+				    !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
+					if (igp_lane_info & 0x1)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+					else if (igp_lane_info & 0x2)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+					else if (igp_lane_info & 0x4)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+					else if (igp_lane_info & 0x8)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+				} else {
+					if (igp_lane_info & 0x3)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+					else if (igp_lane_info & 0xc)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+				}
+			}
+
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+			if (is_dp)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+			}
+			break;
+		case 2:
+			args.v2.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v2.asMode.ucLaneSel = lane_num;
+				args.v2.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v2.acConfig.ucEncoderSel = dig_encoder;
+			if (dig->linkb)
+				args.v2.acConfig.ucLinkSel = 1;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v2.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v2.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v2.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp) {
+				args.v2.acConfig.fCoherentMode = 1;
+				args.v2.acConfig.fDPConnector = 1;
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v2.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v2.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 3:
+			args.v3.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v3.asMode.ucLaneSel = lane_num;
+				args.v3.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v3.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v3.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE4, if there is an external clock, it generates the DP ref clock */
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v3.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v3.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v3.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v3.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v3.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v4.asMode.ucLaneSel = lane_num;
+				args.v4.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
+				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v4.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v4.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE5 DCPLL usually generates the DP ref clock */
+			if (is_dp) {
+				if (rdev->clock.dp_extclk)
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+				else
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+			} else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v4.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v4.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v4.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v4.acConfig.fCoherentMode = 1;
+				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+					args.v4.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 5:
+			args.v5.ucAction = action;
+			if (is_dp)
+				args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
+			else
+				args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+				else
+					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+				break;
+			}
+			if (is_dp)
+				args.v5.ucLaneNum = dp_lane_count;
+			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v5.ucLaneNum = 8;
+			else
+				args.v5.ucLaneNum = 4;
+			args.v5.ucConnObjId = connector_object_id;
+			args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
+
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
+			else
+				args.v5.asConfig.ucPhyClkSrcId = pll_id;
+
+			if (is_dp)
+				args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v5.asConfig.ucCoherentMode = 1;
+			}
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v5.asConfig.ucHPDSel = 0;
+			else
+				args.v5.asConfig.ucHPDSel = hpd_id + 1;
+			args.v5.ucDigEncoderSel = 1 << dig_encoder;
+			args.v5.ucDPLaneSet = lane_set;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union dig_transmitter_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+	uint8_t frev, crev;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+		goto done;
+
+	if (!ASIC_IS_DCE4(rdev))
+		goto done;
+
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+		goto done;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		goto done;
+
+	memset(&args, 0, sizeof(args));
+
+	args.v1.ucAction = action;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* wait for the panel to power up */
+	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+		int i;
+
+		for (i = 0; i < 300; i++) {
+			if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+				return true;
+			mdelay(1);
+		}
+		return false;
+	}
+done:
+	return true;
+}
+
+union external_encoder_control {
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+				struct drm_encoder *ext_encoder,
+				int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+	union external_encoder_control args;
+	struct drm_connector *connector;
+	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+	u8 frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		/* no params on frev 1 */
+		break;
+	case 2:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.sDigEncoder.ucAction = action;
+			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v1.sDigEncoder.ucLaneNum = 8;
+			else
+				args.v1.sDigEncoder.ucLaneNum = 4;
+			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
+			break;
+		default:
+			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	ENABLE_YUV_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+	uint32_t temp, reg;
+
+	memset(&args, 0, sizeof(args));
+
+	if (rdev->family >= CHIP_R600)
+		reg = R600_BIOS_3_SCRATCH;
+	else
+		reg = RADEON_BIOS_3_SCRATCH;
+
+	/* XXX: fix up scratch reg handling */
+	temp = RREG32(reg);
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+			     (radeon_crtc->crtc_id << 18)));
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+	else
+		WREG32(reg, 0);
+
+	if (enable)
+		args.ucEnable = ATOM_ENABLE;
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+		break;
+	default:
+		return;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		args.ucAction = ATOM_ENABLE;
+		/* workaround for DVOOutputControl on some RS690 systems */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+			u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg);
+		} else
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLON;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		args.ucAction = ATOM_DISABLE;
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLOFF;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_connector *radeon_connector = NULL;
+	struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+
+	if (connector) {
+		radeon_connector = to_radeon_connector(connector);
+		radeon_dig_connector = radeon_connector->con_priv;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			if (!connector)
+				dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+			else
+				dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+			atombios_dig_encoder_setup(encoder,
+						   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+						   dig->panel_mode);
+			if (ext_encoder) {
+				if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+					atombios_external_encoder_setup(encoder, ext_encoder,
+									EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+			}
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		} else if (ASIC_IS_DCE4(rdev)) {
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+			/* enable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+		} else {
+			/* setup and enable the encoder and transmitter */
+			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+			/* some dce3.x boards have a bug in their transmitter control table.
+			 * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+			 * does the same thing and more.
+			 */
+			if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+			    (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+				radeon_dig_connector->edp_on = true;
+			}
+			radeon_dp_link_train(encoder, connector);
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		} else if (ASIC_IS_DCE4(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		} else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+		}
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+				radeon_dig_connector->edp_on = false;
+			}
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+			     struct drm_encoder *ext_encoder,
+			     int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	default:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+		  radeon_encoder->active_device);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		radeon_atom_encoder_dpms_dig(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dvo_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dvo_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else if (ASIC_IS_DCE3(rdev))
+			radeon_atom_encoder_dpms_dig(encoder, mode);
+		else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dac_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dac_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	default:
+		return;
+	}
+
+	if (ext_encoder)
+		radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
+	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	union crtc_source_param args;
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *dig;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.v1.ucCRTC = radeon_crtc->crtc_id;
+			else {
+				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+					args.v1.ucCRTC = radeon_crtc->crtc_id;
+				} else {
+					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+				}
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+				if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+			case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+				break;
+			}
+			break;
+		case 2:
+			args.v2.ucCRTC = radeon_crtc->crtc_id;
+			if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+			} else {
+				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = radeon_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* update scratch regs with new routing */
+	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	/* Funky macbooks */
+	if ((dev->pdev->device == 0x71C5) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x0080)) {
+		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+			WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+		}
+	}
+
+	/* set scaler clears this on some chips */
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
+	}
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *test_encoder;
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t dig_enc_in_use = 0;
+
+	if (ASIC_IS_DCE6(rdev)) {
+		/* DCE6 */
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			if (dig->linkb)
+				return 1;
+			else
+				return 0;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			if (dig->linkb)
+				return 3;
+			else
+				return 2;
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			if (dig->linkb)
+				return 5;
+			else
+				return 4;
+			break;
+		}
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* DCE4/5 */
+		if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
+			/* ontario follows DCE4 */
+			if (rdev->family == CHIP_PALM) {
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+			} else
+				/* llano follows DCE3.2 */
+				return radeon_crtc->crtc_id;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					return 3;
+				else
+					return 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					return 5;
+				else
+					return 4;
+				break;
+			}
+		}
+	}
+
+	/* on DCE32 and encoder can driver any block so just crtc id */
+	if (ASIC_IS_DCE32(rdev)) {
+		return radeon_crtc->crtc_id;
+	}
+
+	/* on DCE3 - LVTMA can only be driven by DIGB */
+	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_test_encoder;
+
+		if (encoder == test_encoder)
+			continue;
+
+		if (!radeon_encoder_is_digital(test_encoder))
+			continue;
+
+		radeon_test_encoder = to_radeon_encoder(test_encoder);
+		dig = radeon_test_encoder->enc_priv;
+
+		if (dig->dig_encoder >= 0)
+			dig_enc_in_use |= (1 << dig->dig_encoder);
+	}
+
+	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+		if (dig_enc_in_use & 0x2)
+			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+		return 1;
+	}
+	if (!(dig_enc_in_use & 1))
+		return 0;
+	return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+			break;
+		default:
+			break;
+		}
+
+		if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+	}
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+	/* need to call this here rather than in prepare() since we need some crtc info */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+			atombios_yuv_setup(encoder, true);
+		else
+			atombios_yuv_setup(encoder, false);
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_ENABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+				atombios_tv_setup(encoder, ATOM_ENABLE);
+			else
+				atombios_tv_setup(encoder, ATOM_DISABLE);
+		}
+		break;
+	}
+
+	atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		if (rdev->asic->display.hdmi_enable)
+			radeon_hdmi_enable(rdev, encoder, true);
+		if (rdev->asic->display.hdmi_setmode)
+			radeon_hdmi_setmode(rdev, encoder, adjusted_mode);
+	}
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+				       ATOM_DEVICE_CV_SUPPORT |
+				       ATOM_DEVICE_CRT_SUPPORT)) {
+		DAC_LOAD_DETECTION_PS_ALLOCATION args;
+		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+		uint8_t frev, crev;
+
+		memset(&args, 0, sizeof(args));
+
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+			return false;
+
+		args.sDacload.ucMisc = 0;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+			args.sDacload.ucDacType = ATOM_DAC_A;
+		else
+			args.sDacload.ucDacType = ATOM_DAC_B;
+
+		if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		}
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		return true;
+	} else
+		return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	uint32_t bios_0_scratch;
+
+	if (!atombios_dac_load_detect(encoder, connector)) {
+		DRM_DEBUG_KMS("detect returned false \n");
+		return connector_status_unknown;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+	else
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	u32 bios_0_scratch;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return connector_status_unknown;
+
+	if (!ext_encoder)
+		return connector_status_unknown;
+
+	if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+		return connector_status_unknown;
+
+	/* load detect on the dp bridge */
+	atombios_external_encoder_setup(encoder, ext_encoder,
+					EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+	bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	if (ext_encoder)
+		/* ddc_setup on the dp bridge */
+		atombios_external_encoder_setup(encoder, ext_encoder,
+						EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if ((radeon_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		if (dig) {
+			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+				if (rdev->family >= CHIP_R600)
+					dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+				else
+					/* RS600/690/740 have only 1 afmt block */
+					dig->afmt = rdev->mode_info.afmt[0];
+			}
+		}
+	}
+
+	radeon_atom_output_lock(encoder, true);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+	/* need to call this here as we need the crtc set up */
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		/* handled in dpms */
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+			atombios_tv_setup(encoder, ATOM_DISABLE);
+		break;
+	}
+
+disable_done:
+	if (radeon_encoder_is_digital(encoder)) {
+		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+			if (rdev->asic->display.hdmi_enable)
+				radeon_hdmi_enable(rdev, encoder, false);
+		}
+		dig = radeon_encoder->enc_priv;
+		dig->dig_encoder = -1;
+	}
+	radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+				       const struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+	.dpms = radeon_atom_ext_dpms,
+	.mode_fixup = radeon_atom_ext_mode_fixup,
+	.prepare = radeon_atom_ext_prepare,
+	.mode_set = radeon_atom_ext_mode_set,
+	.commit = radeon_atom_ext_commit,
+	.disable = radeon_atom_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.disable = radeon_atom_encoder_disable,
+	.detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_atom_backlight_exit(radeon_encoder);
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+	if (!dac)
+		return NULL;
+
+	dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	return dac;
+}
+
+static struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+	if (!dig)
+		return NULL;
+
+	/* coherent mode by default */
+	dig->coherent_mode = true;
+	dig->dig_encoder = -1;
+
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
+	return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	switch (rdev->num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+	radeon_encoder->underscan_type = UNDERSCAN_OFF;
+	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		radeon_encoder->is_ext_encoder = true;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+		else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		else
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+		break;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/atombios_i2c.c b/linux-imx/drivers/gpu/drm/radeon/atombios_i2c.c
new file mode 100644
index 0000000..2ca389d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ *
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
+#define TARGET_HW_I2C_CLOCK 50
+
+/* these are a limitation of ProcessI2cChannelTransaction not the hw */
+#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_READ  255
+
+static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
+				 u8 slave_addr, u8 flags,
+				 u8 *buf, u8 num)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+	unsigned char *base;
+	u16 out;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+	if (flags & HW_I2C_WRITE) {
+		if (num > ATOM_MAX_HW_I2C_WRITE) {
+			DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+			return -EINVAL;
+		}
+		memcpy(&out, buf, num);
+		args.lpI2CDataOut = cpu_to_le16(out);
+	} else {
+		if (num > ATOM_MAX_HW_I2C_READ) {
+			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
+			return -EINVAL;
+		}
+	}
+
+	args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+	args.ucRegIndex = 0;
+	args.ucTransBytes = num;
+	args.ucSlaveAddr = slave_addr << 1;
+	args.ucLineNumber = chan->rec.i2c_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* error */
+	if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
+		DRM_DEBUG_KMS("hw_i2c error\n");
+		return -EIO;
+	}
+
+	if (!(flags & HW_I2C_WRITE))
+		radeon_atom_copy_swap(buf, base, num, false);
+
+	return 0;
+}
+
+int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct i2c_msg *p;
+	int i, remaining, current_count, buffer_offset, max_bytes, ret;
+	u8 buf = 0, flags;
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		ret = radeon_process_i2c_ch(i2c,
+					    p->addr, HW_I2C_WRITE,
+					    &buf, 1);
+		if (ret)
+			return ret;
+		else
+			return num;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
+		if (p->flags & I2C_M_RD) {
+			max_bytes = ATOM_MAX_HW_I2C_READ;
+			flags = HW_I2C_READ;
+		} else {
+			max_bytes = ATOM_MAX_HW_I2C_WRITE;
+			flags = HW_I2C_WRITE;
+		}
+		while (remaining) {
+			if (remaining > max_bytes)
+				current_count = max_bytes;
+			else
+				current_count = remaining;
+			ret = radeon_process_i2c_ch(i2c,
+						    p->addr, flags,
+						    &p->buf[buffer_offset], current_count);
+			if (ret)
+				return ret;
+			remaining -= current_count;
+			buffer_offset += current_count;
+		}
+	}
+
+	return num;
+}
+
+u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/avivod.h b/linux-imx/drivers/gpu/drm/radeon/avivod.h
new file mode 100644
index 0000000..3c391e7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/avivod.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef AVIVOD_H
+#define AVIVOD_H
+
+
+#define	D1CRTC_CONTROL					0x6080
+#define		CRTC_EN						(1 << 0)
+#define	D1CRTC_STATUS					0x609c
+#define	D1CRTC_UPDATE_LOCK				0x60E8
+#define	D1GRPH_PRIMARY_SURFACE_ADDRESS			0x6110
+#define	D1GRPH_SECONDARY_SURFACE_ADDRESS		0x6118
+
+#define	D2CRTC_CONTROL					0x6880
+#define	D2CRTC_STATUS					0x689c
+#define	D2CRTC_UPDATE_LOCK				0x68E8
+#define	D2GRPH_PRIMARY_SURFACE_ADDRESS			0x6910
+#define	D2GRPH_SECONDARY_SURFACE_ADDRESS		0x6918
+
+#define	D1VGA_CONTROL					0x0330
+#define		DVGA_CONTROL_MODE_ENABLE			(1 << 0)
+#define		DVGA_CONTROL_TIMING_SELECT			(1 << 8)
+#define		DVGA_CONTROL_SYNC_POLARITY_SELECT		(1 << 9)
+#define		DVGA_CONTROL_OVERSCAN_TIMING_SELECT		(1 << 10)
+#define		DVGA_CONTROL_OVERSCAN_COLOR_EN			(1 << 16)
+#define		DVGA_CONTROL_ROTATE				(1 << 24)
+#define D2VGA_CONTROL					0x0338
+
+#define	VGA_HDP_CONTROL					0x328
+#define		VGA_MEM_PAGE_SELECT_EN				(1 << 0)
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+#define		VGA_RBBM_LOCK_DISABLE				(1 << 8)
+#define		VGA_SOFT_RESET					(1 << 16)
+#define	VGA_MEMORY_BASE_ADDRESS				0x0310
+#define	VGA_RENDER_CONTROL				0x0300
+#define		VGA_VSTATUS_CNTL_MASK				0x00030000
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 0000000..19a0114
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x00000187,
+	0x00000100, /* SPI_VS_OUT_ID_0 */
+
+	0xc0026900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+	0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0106900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+	0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+	0x00000000, /* SPI_GPR_MGMT */
+	0x00000000, /* SPI_LDS_MGMT */
+	0x00000000, /* SPI_STACK_MGMT */
+	0x00000000, /* SPI_WAVE_MGMT_1 */
+	0x00000000, /* SPI_WAVE_MGMT_2 */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+	0x00000000,
+
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 cayman_vs[] =
+{
+	0x00000004,
+	0x80400400,
+	0x0000a03c,
+	0x95000688,
+	0x00004000,
+	0x15000688,
+	0x00000000,
+	0x88000000,
+	0x04000000,
+	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x00020000,
+#else
+	0x00000000,
+#endif
+	0x00000000,
+	0x04000000,
+	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
+	0x00000008,
+#endif
+	0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+	0x00000004,
+	0xa00c0000,
+	0x00000008,
+	0x80400000,
+	0x00000000,
+	0x95000688,
+	0x00000000,
+	0x88000000,
+	0x00380400,
+	0x00146b10,
+	0x00380000,
+	0x20146b10,
+	0x00380400,
+	0x40146b00,
+	0x80380000,
+	0x60146b00,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 0000000..f5d0e9a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_ps[];
+extern const u32 cayman_vs[];
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_ps_size, cayman_vs_size;
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen.c b/linux-imx/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 0000000..e62a9ce
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,5082 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "evergreend.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+#include "evergreen_blit_shaders.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+
+static const u32 crtc_offsets[6] =
+{
+	EVERGREEN_CRTC0_REGISTER_OFFSET,
+	EVERGREEN_CRTC1_REGISTER_OFFSET,
+	EVERGREEN_CRTC2_REGISTER_OFFSET,
+	EVERGREEN_CRTC3_REGISTER_OFFSET,
+	EVERGREEN_CRTC4_REGISTER_OFFSET,
+	EVERGREEN_CRTC5_REGISTER_OFFSET
+};
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+				     int ring, u32 cp_int_cntl);
+
+static const u32 evergreen_golden_registers[] =
+{
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x10830, 0xffffffff, 0x00000011,
+	0x11430, 0xffffffff, 0x00000011,
+	0x12030, 0xffffffff, 0x00000011,
+	0x12c30, 0xffffffff, 0x00000011,
+	0xd02c, 0xffffffff, 0x08421000,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x10c, 0x00000001, 0x00000001,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8cf0, 0xffffffff, 0x08e00620,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x28350, 0xffffffff, 0x00000000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x9508, 0xffffffff, 0x00000002,
+	0x913c, 0x0000000f, 0x0000000a
+};
+
+static const u32 evergreen_golden_registers2[] =
+{
+	0x2f4c, 0xffffffff, 0x00000000,
+	0x54f4, 0xffffffff, 0x00000000,
+	0x54f0, 0xffffffff, 0x00000000,
+	0x5498, 0xffffffff, 0x00000000,
+	0x549c, 0xffffffff, 0x00000000,
+	0x5494, 0xffffffff, 0x00000000,
+	0x53cc, 0xffffffff, 0x00000000,
+	0x53c8, 0xffffffff, 0x00000000,
+	0x53c4, 0xffffffff, 0x00000000,
+	0x53c0, 0xffffffff, 0x00000000,
+	0x53bc, 0xffffffff, 0x00000000,
+	0x53b8, 0xffffffff, 0x00000000,
+	0x53b4, 0xffffffff, 0x00000000,
+	0x53b0, 0xffffffff, 0x00000000
+};
+
+static const u32 cypress_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0x40010000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 redwood_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 cedar_golden_registers[] =
+{
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000000,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x10830, 0xffffffff, 0x00000011,
+	0x11430, 0xffffffff, 0x00000011,
+	0xd02c, 0xffffffff, 0x08421000,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x10c, 0x00000001, 0x00000001,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8cf0, 0xffffffff, 0x08e00410,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x28350, 0xffffffff, 0x00000000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 cedar_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9178, 0xffffffff, 0x00050000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00010004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00010004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 juniper_mgcg_init[] =
+{
+	0x802c, 0xffffffff, 0xc0000000,
+	0x5448, 0xffffffff, 0x00000100,
+	0x55e4, 0xffffffff, 0x00000100,
+	0x160c, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x8d58, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x9654, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0x9040, 0xffffffff, 0x00000100,
+	0xa200, 0xffffffff, 0x00000100,
+	0xa204, 0xffffffff, 0x00000100,
+	0xa208, 0xffffffff, 0x00000100,
+	0xa20c, 0xffffffff, 0x00000100,
+	0x971c, 0xffffffff, 0x00000100,
+	0xd0c0, 0xffffffff, 0xff000100,
+	0x802c, 0xffffffff, 0x40000000,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x9178, 0xffffffff, 0x00070000,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x918c, 0xffffffff, 0x00010006,
+	0x9190, 0xffffffff, 0x00090008,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x91e8, 0xffffffff, 0x00000000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9204, 0xffffffff, 0x00090008,
+	0x9208, 0xffffffff, 0x00070000,
+	0x920c, 0xffffffff, 0x00030002,
+	0x9210, 0xffffffff, 0x00050004,
+	0x921c, 0xffffffff, 0x00010006,
+	0x9220, 0xffffffff, 0x00090008,
+	0x9224, 0xffffffff, 0x00070000,
+	0x9228, 0xffffffff, 0x00030002,
+	0x922c, 0xffffffff, 0x00050004,
+	0x9238, 0xffffffff, 0x00010006,
+	0x923c, 0xffffffff, 0x00090008,
+	0x9240, 0xffffffff, 0x00070000,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9248, 0xffffffff, 0x00050004,
+	0x9254, 0xffffffff, 0x00010006,
+	0x9258, 0xffffffff, 0x00090008,
+	0x925c, 0xffffffff, 0x00070000,
+	0x9260, 0xffffffff, 0x00030002,
+	0x9264, 0xffffffff, 0x00050004,
+	0x9270, 0xffffffff, 0x00010006,
+	0x9274, 0xffffffff, 0x00090008,
+	0x9278, 0xffffffff, 0x00070000,
+	0x927c, 0xffffffff, 0x00030002,
+	0x9280, 0xffffffff, 0x00050004,
+	0x928c, 0xffffffff, 0x00010006,
+	0x9290, 0xffffffff, 0x00090008,
+	0x9294, 0xffffffff, 0x00000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x802c, 0xffffffff, 0xc0000000,
+	0x977c, 0xffffffff, 0x00000100,
+	0x3f80, 0xffffffff, 0x00000100,
+	0xa210, 0xffffffff, 0x00000100,
+	0xa214, 0xffffffff, 0x00000100,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x9784, 0xffffffff, 0x00000100,
+	0x9698, 0xffffffff, 0x00000100,
+	0x4d4, 0xffffffff, 0x00000200,
+	0x30cc, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xc0000000
+};
+
+static const u32 supersumo_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x8c04, 0xffffffff, 0x40600060,
+	0x8c08, 0xffffffff, 0x001c001c,
+	0x8c20, 0xffffffff, 0x00800080,
+	0x8c24, 0xffffffff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x918c, 0xffffffff, 0x00010006,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x91c4, 0xffffffff, 0x00010006,
+	0x91e0, 0xffffffff, 0x00010006,
+	0x9200, 0xffffffff, 0x00010006,
+	0x9150, 0xffffffff, 0x6e944040,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9180, 0xffffffff, 0x00050004,
+	0x9198, 0xffffffff, 0x00030002,
+	0x919c, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91b8, 0xffffffff, 0x00050004,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91d4, 0xffffffff, 0x00050004,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x91f4, 0xffffffff, 0x00050004,
+	0x915c, 0xffffffff, 0x00010000,
+	0x9160, 0xffffffff, 0x00030002,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9178, 0xffffffff, 0x00070000,
+	0x9194, 0xffffffff, 0x00070000,
+	0x91b0, 0xffffffff, 0x00070000,
+	0x91cc, 0xffffffff, 0x00070000,
+	0x91ec, 0xffffffff, 0x00070000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x9190, 0xffffffff, 0x00090008,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x91c8, 0xffffffff, 0x00090008,
+	0x91e4, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x5644, 0xffffffff, 0x00000100,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8030, 0xffffffff, 0x0000100a,
+	0x8a14, 0xffffffff, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x913c, 0xffff000f, 0x0100000a,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0xd02c, 0xffffffff, 0x08421000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8cf0, 0x1fffffff, 0x08e00620,
+	0x28350, 0xffffffff, 0x00000000,
+	0x9508, 0xffffffff, 0x00000002
+};
+
+static const u32 sumo_golden_registers[] =
+{
+	0x900c, 0x00ffffff, 0x0017071f,
+	0x8c18, 0xffffffff, 0x10101060,
+	0x8c1c, 0xffffffff, 0x00001010,
+	0x8c30, 0x0000000f, 0x00000005,
+	0x9688, 0x0000000f, 0x00000007
+};
+
+static const u32 wrestler_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5c4, 0xffffffff, 0x00000001,
+	0x7030, 0xffffffff, 0x00000011,
+	0x7c30, 0xffffffff, 0x00000011,
+	0x6104, 0x01000300, 0x00000000,
+	0x5bc0, 0x00300000, 0x00000000,
+	0x918c, 0xffffffff, 0x00010006,
+	0x91a8, 0xffffffff, 0x00010006,
+	0x9150, 0xffffffff, 0x6e944040,
+	0x917c, 0xffffffff, 0x00030002,
+	0x9198, 0xffffffff, 0x00030002,
+	0x915c, 0xffffffff, 0x00010000,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9178, 0xffffffff, 0x00070000,
+	0x9194, 0xffffffff, 0x00070000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x9190, 0xffffffff, 0x00090008,
+	0x91ac, 0xffffffff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0xffffffff, 0x00000001,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x9b7c, 0xffffffff, 0x00000000,
+	0x8030, 0xffffffff, 0x0000100a,
+	0x8a14, 0xffffffff, 0x00000001,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x8b10, 0xffffffff, 0x00000000,
+	0x28a4c, 0x06000000, 0x06000000,
+	0x4d8, 0xffffffff, 0x00000100,
+	0x913c, 0xffff000f, 0x0100000a,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x88d4, 0xffffffff, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0xc78, 0x00000080, 0x00000080,
+	0x5e78, 0xffffffff, 0x001000f0,
+	0xd02c, 0xffffffff, 0x08421000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x8d00, 0xffffffff, 0x100e4848,
+	0x8d04, 0xffffffff, 0x00164745,
+	0x8c00, 0xffffffff, 0xe4000003,
+	0x8cf0, 0x1fffffff, 0x08e00410,
+	0x28350, 0xffffffff, 0x00000000,
+	0x9508, 0xffffffff, 0x00000002,
+	0x900c, 0xffffffff, 0x0017071f,
+	0x8c18, 0xffffffff, 0x10101060,
+	0x8c1c, 0xffffffff, 0x00001010
+};
+
+static const u32 barts_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x70073777, 0x00010001,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02011003,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02011003,
+	0x98fc, 0xffffffff, 0x76543210,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x00000007, 0x02011003,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00620,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 turks_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x8c8, 0x00003000, 0x00001070,
+	0x8cc, 0x000fffff, 0x00040035,
+	0x3f90, 0xffff0000, 0xfff00000,
+	0x9148, 0xffff0000, 0xfff00000,
+	0x3f94, 0xffff0000, 0xfff00000,
+	0x914c, 0xffff0000, 0xfff00000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x00073007, 0x00010002,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02010002,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x00010002,
+	0x98fc, 0xffffffff, 0x33221100,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x00010002,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 caicos_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x8c8, 0x00003420, 0x00001450,
+	0x8cc, 0x000fffff, 0x00040035,
+	0x3f90, 0xffff0000, 0xfffc0000,
+	0x9148, 0xffff0000, 0xfffc0000,
+	0x3f94, 0xffff0000, 0xfffc0000,
+	0x914c, 0xffff0000, 0xfffc0000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x00073007, 0x00010001,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x03773777, 0x02010001,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02010001,
+	0x98fc, 0xffffffff, 0x33221100,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x02010001,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000380,
+	0x8a14, 0xf000001f, 0x00000001,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000000f, 0x0100000a,
+	0x8d00, 0xffff7f7f, 0x100e4848,
+	0x8d04, 0x00ffffff, 0x00164745,
+	0x8c00, 0xfffc0003, 0xe4000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c08, 0x00ff00ff, 0x001c001c,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8c20, 0x0fff0fff, 0x00800080,
+	0x8c24, 0x0fff0fff, 0x00800080,
+	0x8c18, 0xffffffff, 0x20202078,
+	0x8c1c, 0x0000ffff, 0x00001010,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x000000c2,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static void evergreen_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 cypress_mgcg_init,
+						 (const u32)ARRAY_SIZE(cypress_mgcg_init));
+		break;
+	case CHIP_JUNIPER:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 juniper_mgcg_init,
+						 (const u32)ARRAY_SIZE(juniper_mgcg_init));
+		break;
+	case CHIP_REDWOOD:
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 redwood_mgcg_init,
+						 (const u32)ARRAY_SIZE(redwood_mgcg_init));
+		break;
+	case CHIP_CEDAR:
+		radeon_program_register_sequence(rdev,
+						 cedar_golden_registers,
+						 (const u32)ARRAY_SIZE(cedar_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 evergreen_golden_registers2,
+						 (const u32)ARRAY_SIZE(evergreen_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 cedar_mgcg_init,
+						 (const u32)ARRAY_SIZE(cedar_mgcg_init));
+		break;
+	case CHIP_PALM:
+		radeon_program_register_sequence(rdev,
+						 wrestler_golden_registers,
+						 (const u32)ARRAY_SIZE(wrestler_golden_registers));
+		break;
+	case CHIP_SUMO:
+		radeon_program_register_sequence(rdev,
+						 supersumo_golden_registers,
+						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
+		break;
+	case CHIP_SUMO2:
+		radeon_program_register_sequence(rdev,
+						 supersumo_golden_registers,
+						 (const u32)ARRAY_SIZE(supersumo_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 sumo_golden_registers,
+						 (const u32)ARRAY_SIZE(sumo_golden_registers));
+		break;
+	case CHIP_BARTS:
+		radeon_program_register_sequence(rdev,
+						 barts_golden_registers,
+						 (const u32)ARRAY_SIZE(barts_golden_registers));
+		break;
+	case CHIP_TURKS:
+		radeon_program_register_sequence(rdev,
+						 turks_golden_registers,
+						 (const u32)ARRAY_SIZE(turks_golden_registers));
+		break;
+	case CHIP_CAICOS:
+		radeon_program_register_sequence(rdev,
+						 caicos_golden_registers,
+						 (const u32)ARRAY_SIZE(caicos_golden_registers));
+		break;
+	default:
+		break;
+	}
+}
+
+void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+			     unsigned *bankh, unsigned *mtaspect,
+			     unsigned *tile_split)
+{
+	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+	switch (*bankw) {
+	default:
+	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
+	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
+	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
+	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
+	}
+	switch (*bankh) {
+	default:
+	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
+	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
+	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
+	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
+	}
+	switch (*mtaspect) {
+	default:
+	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
+	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
+	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
+	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
+	}
+}
+
+static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
+			      u32 cntl_reg, u32 status_reg)
+{
+	int r, i;
+	struct atom_clock_dividers dividers;
+
+        r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+					   clock, false, &dividers);
+	if (r)
+		return r;
+
+	WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
+
+	for (i = 0; i < 100; i++) {
+		if (RREG32(status_reg) & DCLK_STATUS)
+			break;
+		mdelay(10);
+	}
+	if (i == 100)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	int r = 0;
+	u32 cg_scratch = RREG32(CG_SCRATCH1);
+
+	r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
+	if (r)
+		goto done;
+	cg_scratch &= 0xffff0000;
+	cg_scratch |= vclk / 100; /* Mhz */
+
+	r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
+	if (r)
+		goto done;
+	cg_scratch &= 0x0000ffff;
+	cg_scratch |= (dclk / 100) << 16; /* Mhz */
+
+done:
+	WREG32(CG_SCRATCH1, cg_scratch);
+
+	return r;
+}
+
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	/* start off with something large */
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	/* put PLL in bypass mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+					  16384, 0x03FFFFFF, 0, 128, 5,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	/* set VCO_MODE to 1 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+	/* toggle UPLL_SLEEP to 1 then back to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+	/* deassert UPLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(1);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert UPLL_RESET again */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* disable spread spectrum. */
+	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+	/* set feedback divider */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+	/* set ref divider to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+	if (fb_div < 307200)
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+	else
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+	/* set PDIV_A and PDIV_B */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* switch from bypass mode to normal mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+{
+	u16 ctl, v;
+	int err;
+
+	err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
+	if (err)
+		return;
+
+	v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
+
+	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
+	 * to avoid hangs or perfomance issues
+	 */
+	if ((v == 0) || (v == 6) || (v == 7)) {
+		ctl &= ~PCI_EXP_DEVCTL_READRQ;
+		ctl |= (2 << 12);
+		pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
+	}
+}
+
+static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
+void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!dce4_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!dce4_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
+ */
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int evergreen_get_temp(struct radeon_device *rdev)
+{
+	u32 temp, toffset;
+	int actual_temp = 0;
+
+	if (rdev->family == CHIP_JUNIPER) {
+		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
+			TOFFSET_SHIFT;
+		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
+			TS0_ADC_DOUT_SHIFT;
+
+		if (toffset & 0x100)
+			actual_temp = temp / 2 - (0x200 - toffset);
+		else
+			actual_temp = temp / 2 + toffset;
+
+		actual_temp = actual_temp * 1000;
+
+	} else {
+		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+			ASIC_T_SHIFT;
+
+		if (temp & 0x400)
+			actual_temp = -256;
+		else if (temp & 0x200)
+			actual_temp = 255;
+		else if (temp & 0x100) {
+			actual_temp = temp & 0x1ff;
+			actual_temp |= ~0x1ff;
+		} else
+			actual_temp = temp & 0xff;
+
+		actual_temp = (actual_temp * 1000) / 2;
+	}
+
+	return actual_temp;
+}
+
+int sumo_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+	int actual_temp = temp - 49;
+
+	return actual_temp * 1000;
+}
+
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
+void sumo_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+
+	/* low,mid sh/mh */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+
+	/* high sh/mh */
+	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
+		rdev->pm.power_state[idx].num_clock_modes - 1;
+}
+
+/**
+ * btc_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (BTC, cayman).
+ * Used for profile mode only.
+ */
+void btc_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+	/* starting with BTC, there is one state that is used for both
+	 * MH and SH.  Difference is that we always use the high clock index for
+	 * mclk.
+	 */
+	if (rdev->flags & RADEON_IS_MOBILITY)
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+	else
+		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+}
+
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
+void evergreen_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if (voltage->type == VOLTAGE_SW) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+		}
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			voltage = &rdev->pm.power_state[req_ps_idx].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
+
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->vddci == 0xff01)
+			return;
+		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			rdev->pm.current_vddci = voltage->vddci;
+			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
+void evergreen_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
+void evergreen_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_3:
+		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_4:
+		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_5:
+		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_6:
+		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+			connected = true;
+			break;
+	default:
+		break;
+	}
+
+	return connected;
+}
+
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = evergreen_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_3:
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_4:
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_5:
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+	case RADEON_HPD_6:
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
+		if (connected)
+			tmp &= ~DC_HPDx_INT_POLARITY;
+		else
+			tmp |= DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enabled = 0;
+	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
+		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 * also avoid interrupt storms during dpms.
+			 */
+			continue;
+		}
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, tmp);
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+		enabled |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_enable_hpd(rdev, enabled);
+}
+
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disabled = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(DC_HPD1_CONTROL, 0);
+			break;
+		case RADEON_HPD_2:
+			WREG32(DC_HPD2_CONTROL, 0);
+			break;
+		case RADEON_HPD_3:
+			WREG32(DC_HPD3_CONTROL, 0);
+			break;
+		case RADEON_HPD_4:
+			WREG32(DC_HPD4_CONTROL, 0);
+			break;
+		case RADEON_HPD_5:
+			WREG32(DC_HPD5_CONTROL, 0);
+			break;
+		case RADEON_HPD_6:
+			WREG32(DC_HPD6_CONTROL, 0);
+			break;
+		default:
+			break;
+		}
+		disabled |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disabled);
+}
+
+/* watermark setup */
+
+static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+					struct radeon_crtc *radeon_crtc,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 2:0:
+	 * first display controller
+	 *  0 - first half of lb (3840 * 2)
+	 *  1 - first 3/4 of lb (5760 * 2)
+	 *  2 - whole lb (7680 * 2), other crtc must be disabled
+	 *  3 - first 1/4 of lb (1920 * 2)
+	 * second display controller
+	 *  4 - second half of lb (3840 * 2)
+	 *  5 - second 3/4 of lb (5760 * 2)
+	 *  6 - whole lb (7680 * 2), other crtc must be disabled
+	 *  7 - last 1/4 of lb (1920 * 2)
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	/* second controller of the pair uses second half of the lb */
+	if (radeon_crtc->crtc_id % 2)
+		tmp += 4;
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
+	if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+		WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+		       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+			    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+				break;
+			udelay(1);
+		}
+	}
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		case 4:
+		default:
+			if (ASIC_IS_DCE5(rdev))
+				return 4096 * 2;
+			else
+				return 3840 * 2;
+		case 1:
+		case 5:
+			if (ASIC_IS_DCE5(rdev))
+				return 6144 * 2;
+			else
+				return 5760 * 2;
+		case 2:
+		case 6:
+			if (ASIC_IS_DCE5(rdev))
+				return 8192 * 2;
+			else
+				return 7680 * 2;
+		case 3:
+		case 7:
+			if (ASIC_IS_DCE5(rdev))
+				return 2048 * 2;
+			else
+				return 1920 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	}
+}
+
+struct evergreen_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, disp_clk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
+	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = evergreen_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
+{
+	if (evergreen_average_bandwidth(wm) <=
+	    (evergreen_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (evergreen_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void evergreen_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct evergreen_wm_params wm;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 pipe_offset = radeon_crtc->crtc_id * 16;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		wm.yclk = rdev->pm.current_mclk * 10;
+		wm.sclk = rdev->pm.current_sclk * 10;
+		wm.disp_clk = mode->clock;
+		wm.src_width = mode->crtc_hdisplay;
+		wm.active_time = mode->crtc_hdisplay * pixel_period;
+		wm.blank_time = line_time - wm.active_time;
+		wm.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm.interlaced = true;
+		wm.vsc = radeon_crtc->vsc;
+		wm.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm.vtaps = 2;
+		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm.lb_size = lb_size;
+		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+		wm.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
+		/* set for low clocks */
+		/* wm.yclk = low clk; wm.sclk = low clk */
+		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+		    !evergreen_check_latency_hiding(&wm) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
+	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(SRBM_STATUS) & 0x1F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+/*
+ * GART
+ */
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	if (rdev->flags & RADEON_IS_IGP) {
+		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+	} else {
+		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+		if ((rdev->family == CHIP_JUNIPER) ||
+		    (rdev->family == CHIP_CYPRESS) ||
+		    (rdev->family == CHIP_HEMLOCK) ||
+		    (rdev->family == CHIP_BARTS))
+			WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	}
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	evergreen_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+	evergreen_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+
+static void evergreen_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+}
+
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+		save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+		/* disable VGA render */
+		WREG32(VGA_RENDER_CONTROL, 0);
+	}
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				}
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+					radeon_wait_for_vblank(rdev, i);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+					WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				}
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
+			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+		/* Block CPU access */
+		WREG32(BIF_FB_EN, 0);
+		/* blackout the MC */
+		blackout &= ~BLACKOUT_MODE_MASK;
+		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
+				tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+	}
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x3) != 0) {
+				tmp &= ~0x3;
+				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
+				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	/* unblackout the MC */
+	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
+	tmp &= ~BLACKOUT_MODE_MASK;
+	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
+	/* allow CPU access */
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			if (ASIC_IS_DCE6(rdev)) {
+				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			} else {
+				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* Unlock vga access */
+		WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+		mdelay(1);
+		WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+	}
+}
+
+void evergreen_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	/* llano/ontario only */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+	}
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/*
+ * CP.
+ */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cp_me = 0xff;
+	WREG32(CP_ME_CNTL, cp_me);
+
+	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < evergreen_default_size; i++)
+		radeon_ring_write(ring, evergreen_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	return 0;
+}
+
+static int evergreen_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	ring->rptr = RREG32(CP_RB_RPTR);
+
+	evergreen_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+	return 0;
+}
+
+/*
+ * Core functions
+ */
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 sq_config;
+	u32 sq_lds_resource_mgmt;
+	u32 sq_gpr_resource_mgmt_1;
+	u32 sq_gpr_resource_mgmt_2;
+	u32 sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt;
+	u32 sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1;
+	u32 sq_stack_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_3;
+	u32 vgt_cache_invalidation;
+	u32 hdp_host_path_cntl, tmp;
+	u32 disabled_rb_mask;
+	int i, j, num_shader_engines, ps_thread_count;
+
+	switch (rdev->family) {
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_JUNIPER:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 10;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_REDWOOD:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CEDAR:
+	default:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PALM:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		if (rdev->pdev->device == 0x9648)
+			rdev->config.evergreen.max_simds = 3;
+		else if ((rdev->pdev->device == 0x9647) ||
+			 (rdev->pdev->device == 0x964a))
+			rdev->config.evergreen.max_simds = 4;
+		else
+			rdev->config.evergreen.max_simds = 5;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_SUMO2:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_BARTS:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 7;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_TURKS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 6;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_CAICOS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.evergreen.tile_config = 0;
+	switch (rdev->config.evergreen.max_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.evergreen.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.evergreen.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.evergreen.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.evergreen.tile_config |= (3 << 0);
+		break;
+	}
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.evergreen.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.evergreen.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.evergreen.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.evergreen.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.evergreen.tile_config |= 0 << 8;
+	rdev->config.evergreen.tile_config |=
+		((gb_addr_config & 0x30000000) >> 28) << 12;
+
+	num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
+
+	if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+		u32 efuse_straps_4;
+		u32 efuse_straps_3;
+
+		WREG32(RCU_IND_INDEX, 0x204);
+		efuse_straps_4 = RREG32(RCU_IND_DATA);
+		WREG32(RCU_IND_INDEX, 0x203);
+		efuse_straps_3 = RREG32(RCU_IND_DATA);
+		tmp = (((efuse_straps_4 & 0xf) << 4) |
+		      ((efuse_straps_3 & 0xf0000000) >> 28));
+	} else {
+		tmp = 0;
+		for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+			u32 rb_disable_bitmap;
+
+			WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+			rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+			tmp <<= 4;
+			tmp |= rb_disable_bitmap;
+		}
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+	tmp = 0;
+	for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < rdev->config.evergreen.max_backends; i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+	if ((rdev->config.evergreen.max_backends == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 1) {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		} else {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+						EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
+			     SYNC_GRADIENT |
+			     SYNC_WALKER |
+			     SYNC_ALIGNER));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family <= CHIP_SUMO2)
+		WREG32(SMX_SAR_CTL0, 0x00010000);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+	WREG32(SPI_CONFIG_CNTL, 0);
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+		break;
+	default:
+		break;
+	}
+
+	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
+
+	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
+	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
+	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
+	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		ps_thread_count = 96;
+		break;
+	default:
+		ps_thread_count = 128;
+		break;
+	}
+
+	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
+	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
+
+	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
+	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
+	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_CAICOS:
+		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
+		break;
+	default:
+		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+		break;
+	}
+	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
+	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
+	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR8_BASE, 0);
+	WREG32(CB_COLOR9_BASE, 0);
+	WREG32(CB_COLOR10_BASE, 0);
+	WREG32(CB_COLOR11_BASE, 0);
+
+	/* set the shader const cache sizes to 0 */
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
+		WREG32(i, 0);
+	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
+		WREG32(i, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2))
+		tmp = RREG32(FUS_MC_ARB_RAMCFG);
+	else
+		tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	if ((rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2)) {
+		/* size in bytes on fusion */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	} else {
+		/* size in MB on evergreen/cayman/tn */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
+	}
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
+{
+	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
+		RREG32(GRBM_STATUS));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE0));
+	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
+		RREG32(GRBM_STATUS_SE1));
+	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
+		RREG32(SRBM_STATUS));
+	dev_info(rdev->dev, "  SRBM_STATUS2              = 0x%08X\n",
+		RREG32(SRBM_STATUS2));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		RREG32(CP_STAT));
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+	if (rdev->family >= CHIP_CAYMAN) {
+		dev_info(rdev->dev, "  R_00D834_DMA_STATUS_REG   = 0x%08X\n",
+			 RREG32(DMA_STATUS_REG + 0x800));
+	}
+}
+
+bool evergreen_is_display_hung(struct radeon_device *rdev)
+{
+	u32 crtc_hung = 0;
+	u32 crtc_status[6];
+	u32 i, j, tmp;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
+			crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+			crtc_hung |= (1 << i);
+		}
+	}
+
+	for (j = 0; j < 10; j++) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (crtc_hung & (1 << i)) {
+				tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+				if (tmp != crtc_status[i])
+					crtc_hung &= ~(1 << i);
+			}
+		}
+		if (crtc_hung == 0)
+			return false;
+		udelay(100);
+	}
+
+	return true;
+}
+
+static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   SH_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   SPI_BUSY | VGT_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG */
+	tmp = RREG32(DMA_STATUS_REG);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* Disable DMA */
+		tmp = RREG32(DMA_RB_CNTL);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		grbm_soft_reset |= SOFT_RESET_DB |
+			SOFT_RESET_CB |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SX |
+			SOFT_RESET_SH |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VC |
+			SOFT_RESET_VGT;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP |
+			SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= SOFT_RESET_MC;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+int evergreen_asic_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask;
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	evergreen_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & RADEON_RESET_DMA)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* Interrupts */
+
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc >= rdev->num_crtc)
+		return 0;
+	else
+		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+}
+
+void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0,
+					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+		cayman_cp_int_cntl_setup(rdev, 1, 0);
+		cayman_cp_int_cntl_setup(rdev, 2, 0);
+		tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		WREG32(CAYMAN_DMA1_CNTL, tmp);
+	} else
+		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	/* only one DAC on DCE5 */
+	if (!ASIC_IS_DCE5(rdev))
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+
+	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD1_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD2_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD3_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD4_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD5_INT_CONTROL, tmp);
+	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+	WREG32(DC_HPD6_INT_CONTROL, tmp);
+
+}
+
+int evergreen_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+	u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+	u32 dma_cntl, dma_cntl1 = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		evergreen_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+
+	afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+	afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* enable CP interrupts on all rings */
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+		}
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+		}
+	} else {
+		if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+			cp_int_cntl |= RB_INT_ENABLE;
+			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+		}
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+		if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+			DRM_DEBUG("r600_irq_set: sw int dma1\n");
+			dma_cntl1 |= TRAP_ENABLE;
+		}
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    atomic_read(&rdev->irq.pflip[2])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    atomic_read(&rdev->irq.pflip[3])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    atomic_read(&rdev->irq.pflip[4])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    atomic_read(&rdev->irq.pflip[5])) {
+		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.afmt[0]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
+		afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[1]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
+		afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[2]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
+		afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[3]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
+		afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[4]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
+		afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[5]) {
+		DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
+		afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+	} else
+		WREG32(CP_INT_CNTL, cp_int_cntl);
+
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_CAYMAN)
+		WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+	}
+
+	WREG32(DC_HPD1_INT_CONTROL, hpd1);
+	WREG32(DC_HPD2_INT_CONTROL, hpd2);
+	WREG32(DC_HPD3_INT_CONTROL, hpd3);
+	WREG32(DC_HPD4_INT_CONTROL, hpd4);
+	WREG32(DC_HPD5_INT_CONTROL, hpd5);
+	WREG32(DC_HPD6_INT_CONTROL, hpd6);
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
+
+	return 0;
+}
+
+static void evergreen_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	if (rdev->num_crtc >= 4) {
+		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	}
+	if (rdev->num_crtc >= 6) {
+		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
+
+	rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (rdev->num_crtc >= 4) {
+		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->num_crtc >= 6) {
+		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+		tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+		tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
+	}
+}
+
+static void evergreen_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	evergreen_irq_ack(rdev);
+	evergreen_disable_interrupt_state(rdev);
+}
+
+void evergreen_irq_suspend(struct radeon_device *rdev)
+{
+	evergreen_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+int evergreen_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	evergreen_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D2 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[2]))
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D3 vblank\n");
+				}
+				break;
+			case 1: /* D3 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D3 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[3]))
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D4 vblank\n");
+				}
+				break;
+			case 1: /* D4 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D4 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[4]))
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D5 vblank\n");
+				}
+				break;
+			case 1: /* D5 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D5 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[5]))
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D6 vblank\n");
+				}
+				break;
+			case 1: /* D6 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D6 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 44: /* hdmi */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI0\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI1\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI2\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI3\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI4\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI5\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			if (rdev->family >= CHIP_CAYMAN) {
+				switch (src_data) {
+				case 0:
+					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+					break;
+				case 1:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+					break;
+				case 2:
+					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+					break;
+				}
+			} else
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			if (rdev->family >= CHIP_CAYMAN) {
+				DRM_DEBUG("IH: DMA1 trap\n");
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			}
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		schedule_work(&rdev->hotplug_work);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = evergreen_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, fence->seq);
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+	/* flush HDP */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFFF)
+			cur_size_in_dw = 0xFFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+
+	evergreen_mc_program(rdev);
+
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		evergreen_agp_enable(rdev);
+	} else {
+		r = evergreen_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	evergreen_gpu_init(rdev);
+
+	r = evergreen_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = rv770_uvd_resume(rdev);
+	if (!r) {
+		r = radeon_fence_driver_start_ring(rdev,
+						   R600_RING_TYPE_UVD_INDEX);
+		if (r)
+			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+	}
+
+	if (r)
+		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+	if (r)
+		return r;
+
+	r = evergreen_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = evergreen_cp_resume(rdev);
+	if (r)
+		return r;
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	if (ring->ring_size) {
+		r = radeon_ring_init(rdev, ring, ring->ring_size,
+				     R600_WB_UVD_RPTR_OFFSET,
+				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+				     0, 0xfffff, RADEON_CP_PACKET2);
+		if (!r)
+			r = r600_uvd_init(rdev);
+
+		if (r)
+			DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	evergreen_init_golden_registers(rdev);
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		DRM_ERROR("evergreen startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_suspend(rdev);
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	evergreen_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* reset the asic, the gfx blocks are often in a bad state
+	 * after the driver is unloaded or after a resume
+	 */
+	if (radeon_asic_reset(rdev))
+		dev_warn(rdev->dev, "GPU reset failed !\n");
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	evergreen_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	r = radeon_uvd_init(rdev);
+	if (!r) {
+		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+		r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+			       4096);
+	}
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = evergreen_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		evergreen_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing on BTC parts.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+			DRM_ERROR("radeon: MC ucode required for NI+.\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_blit_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	evergreen_pcie_gart_fini(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, speed_cntl;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_kms.c
new file mode 100644
index 0000000..057c87b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -0,0 +1,729 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "evergreend.h"
+#include "evergreen_blit_shaders.h"
+#include "cayman_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* emits 17 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+		  int w, int h, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cb_color_info;
+	int pitch, slice;
+
+	h = ALIGN(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = CB_FORMAT(format) |
+		CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+		CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, pitch);
+	radeon_ring_write(ring, slice);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, cb_color_info);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+		    u32 sync_type, u32 size,
+		    u64 mc_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cp_coher_size;
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
+		 * to the RB directly. For IBs, the CP programs this as part of the
+		 * surface_sync packet.
+		 */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, sync_type);
+	radeon_ring_write(ring, cp_coher_size);
+	radeon_ring_write(ring, mc_addr >> 8);
+	radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 11dw + 1 surface sync = 16dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u64 gpu_addr;
+
+	/* VS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, 2);
+	radeon_ring_write(ring, 0);
+
+	/* PS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, 1);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 2);
+
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 10 + 1 sync (5) = 15 */
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+
+	/* high addr, stride */
+	sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+		SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+	/* xyzw swizzles */
+	sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
+		SQ_VTCX_SEL_Y(SQ_SEL_Y) |
+		SQ_VTCX_SEL_Z(SQ_SEL_Z) |
+		SQ_VTCX_SEL_W(SQ_SEL_W);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+	radeon_ring_write(ring, 0x580);
+	radeon_ring_write(ring, gpu_addr & 0xffffffff);
+	radeon_ring_write(ring, 48 - 1); /* size */
+	radeon_ring_write(ring, sq_vtx_constant_word2);
+	radeon_ring_write(ring, sq_vtx_constant_word3);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_SUMO) ||
+	    (rdev->family == CHIP_SUMO2) ||
+	    (rdev->family == CHIP_CAICOS))
+		cp_set_surface_sync(rdev,
+				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(rdev,
+				    PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+
+}
+
+/* emits 10 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+		 int format, int w, int h, int pitch,
+		 u64 gpu_addr, u32 size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_tex_resource_word0, sq_tex_resource_word1;
+	u32 sq_tex_resource_word4, sq_tex_resource_word7;
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
+	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
+				  ((w - 1) << 18));
+	sq_tex_resource_word1 = ((h - 1) << 0) |
+				TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	/* xyzw swizzles */
+	sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
+				TEX_DST_SEL_Y(SQ_SEL_Y) |
+				TEX_DST_SEL_Z(SQ_SEL_Z) |
+				TEX_DST_SEL_W(SQ_SEL_W);
+
+	sq_tex_resource_word7 = format |
+		S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
+
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word0);
+	radeon_ring_write(ring, sq_tex_resource_word1);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, sq_tex_resource_word4);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word7);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+	     int x2, int y2)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	/* workaround some hw bugs */
+	if (x2 == 0)
+		x1 = 1;
+	if (y2 == 0)
+		y1 = 1;
+	if (rdev->family >= CHIP_CAYMAN) {
+		if ((x2 == 1) && (y2 == 1))
+			x2 = 2;
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, DI_PT_RECTLIST);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+	radeon_ring_write(ring, 1);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+	radeon_ring_write(ring, 3);
+	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 39 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+	u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+	u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs;
+	int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_hs_threads, num_ls_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	int num_hs_stack_entries, num_ls_stack_entries;
+	u64 gpu_addr;
+	int dwords;
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	if (rdev->family < CHIP_CAYMAN) {
+		switch (rdev->family) {
+		case CHIP_CEDAR:
+		default:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 16;
+			num_gs_threads = 16;
+			num_es_threads = 16;
+			num_hs_threads = 16;
+			num_ls_threads = 16;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_REDWOOD:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_JUNIPER:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_CYPRESS:
+		case CHIP_HEMLOCK:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_PALM:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 16;
+			num_gs_threads = 16;
+			num_es_threads = 16;
+			num_hs_threads = 16;
+			num_ls_threads = 16;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_SUMO:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 25;
+			num_gs_threads = 25;
+			num_es_threads = 25;
+			num_hs_threads = 25;
+			num_ls_threads = 25;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_SUMO2:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 96;
+			num_vs_threads = 25;
+			num_gs_threads = 25;
+			num_es_threads = 25;
+			num_hs_threads = 25;
+			num_ls_threads = 25;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_BARTS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 85;
+			num_vs_stack_entries = 85;
+			num_gs_stack_entries = 85;
+			num_es_stack_entries = 85;
+			num_hs_stack_entries = 85;
+			num_ls_stack_entries = 85;
+			break;
+		case CHIP_TURKS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 20;
+			num_gs_threads = 20;
+			num_es_threads = 20;
+			num_hs_threads = 20;
+			num_ls_threads = 20;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		case CHIP_CAICOS:
+			num_ps_gprs = 93;
+			num_vs_gprs = 46;
+			num_temp_gprs = 4;
+			num_gs_gprs = 31;
+			num_es_gprs = 31;
+			num_hs_gprs = 23;
+			num_ls_gprs = 23;
+			num_ps_threads = 128;
+			num_vs_threads = 10;
+			num_gs_threads = 10;
+			num_es_threads = 10;
+			num_hs_threads = 10;
+			num_ls_threads = 10;
+			num_ps_stack_entries = 42;
+			num_vs_stack_entries = 42;
+			num_gs_stack_entries = 42;
+			num_es_stack_entries = 42;
+			num_hs_stack_entries = 42;
+			num_ls_stack_entries = 42;
+			break;
+		}
+
+		if ((rdev->family == CHIP_CEDAR) ||
+		    (rdev->family == CHIP_PALM) ||
+		    (rdev->family == CHIP_SUMO) ||
+		    (rdev->family == CHIP_SUMO2) ||
+		    (rdev->family == CHIP_CAICOS))
+			sq_config = 0;
+		else
+			sq_config = VC_ENABLE;
+
+		sq_config |= (EXPORT_SRC_C |
+			      CS_PRIO(0) |
+			      LS_PRIO(0) |
+			      HS_PRIO(0) |
+			      PS_PRIO(0) |
+			      VS_PRIO(1) |
+			      GS_PRIO(2) |
+			      ES_PRIO(3));
+
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+					  NUM_VS_GPRS(num_vs_gprs) |
+					  NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+					  NUM_ES_GPRS(num_es_gprs));
+		sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
+					  NUM_LS_GPRS(num_ls_gprs));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+					   NUM_VS_THREADS(num_vs_threads) |
+					   NUM_GS_THREADS(num_gs_threads) |
+					   NUM_ES_THREADS(num_es_threads));
+		sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
+					     NUM_LS_THREADS(num_ls_threads));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+					    NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+					    NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+		sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
+					    NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+
+		/* disable dyn gprs */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0);
+
+		/* setup LDS */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, 0x10001000);
+
+		/* SQ config */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+		radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, sq_config);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+		radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, sq_thread_resource_mgmt);
+		radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+		radeon_ring_write(ring, sq_stack_resource_mgmt_3);
+	}
+
+	/* CONTEXT_CONTROL */
+	radeon_ring_write(ring, 0xc0012800);
+	radeon_ring_write(ring, 0x80000000);
+	radeon_ring_write(ring, 0x80000000);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* SET_SAMPLER */
+	radeon_ring_write(ring, 0xc0036e00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000012);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	/* emit an IB pointing at default state */
+	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(ring, dwords);
+
+}
+
+int evergreen_blit_init(struct radeon_device *rdev)
+{
+	u32 obj_size;
+	int i, r, dwords;
+	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
+
+	rdev->r600_blit.primitives.set_render_target = set_render_target;
+	rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+	rdev->r600_blit.primitives.set_shaders = set_shaders;
+	rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+	rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+	rdev->r600_blit.primitives.set_scissors = set_scissors;
+	rdev->r600_blit.primitives.draw_auto = draw_auto;
+	rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+	rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+	rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
+	rdev->r600_blit.ring_size_common += 5; /* done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+	rdev->r600_blit.ring_size_per_loop = 74;
+	if (rdev->family >= CHIP_CAYMAN)
+		rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
+
+	rdev->r600_blit.max_dim = 16384;
+
+	rdev->r600_blit.state_offset = 0;
+
+	if (rdev->family < CHIP_CAYMAN)
+		rdev->r600_blit.state_len = evergreen_default_size;
+	else
+		rdev->r600_blit.state_len = cayman_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	rdev->r600_blit.vs_offset = obj_size;
+	if (rdev->family < CHIP_CAYMAN)
+		obj_size += evergreen_vs_size * 4;
+	else
+		obj_size += cayman_vs_size * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	rdev->r600_blit.ps_offset = obj_size;
+	if (rdev->family < CHIP_CAYMAN)
+		obj_size += evergreen_ps_size * 4;
+	else
+		obj_size += cayman_ps_size * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	/* pin copy shader into vram if not already initialized */
+	if (!rdev->r600_blit.shader_obj) {
+		r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->r600_blit.shader_obj);
+		if (r) {
+			DRM_ERROR("evergreen failed to allocate shader\n");
+			return r;
+		}
+
+		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->r600_blit.shader_gpu_addr);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+			return r;
+		}
+	}
+
+	DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
+		  obj_size,
+		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+	if (r) {
+		DRM_ERROR("failed to map blit object %d\n", r);
+		return r;
+	}
+
+	if (rdev->family < CHIP_CAYMAN) {
+		memcpy_toio(ptr + rdev->r600_blit.state_offset,
+			    evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+		if (num_packet2s)
+			memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+				    packet2s, num_packet2s * 4);
+		for (i = 0; i < evergreen_vs_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+		for (i = 0; i < evergreen_ps_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
+	} else {
+		memcpy_toio(ptr + rdev->r600_blit.state_offset,
+			    cayman_default_state, rdev->r600_blit.state_len * 4);
+
+		if (num_packet2s)
+			memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+				    packet2s, num_packet2s * 4);
+		for (i = 0; i < cayman_vs_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
+		for (i = 0; i < cayman_ps_size; i++)
+			*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
+	}
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
new file mode 100644
index 0000000..f85c0af
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 evergreen_default_state[] =
+{
+	0xc0016900,
+	0x0000023b,
+	0x00000000, /* SQ_LDS_ALLOC_PS */
+
+	0xc0066900,
+	0x00000240,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000247,
+	0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x00000010,
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0106900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc00d6900,
+	0x00000202,
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x0000022a,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0096900,
+	0x00000100,
+	0x00ffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000, /*  */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc0016900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+
+	0xc0016900,
+	0x00000187,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc00b6900,
+	0x000001b3,
+	0x20000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00100000, /* SPI_BARYC_CNTL */
+	0x00000000, /* SPI_PS_IN_CONTROL_2 */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 evergreen_vs[] =
+{
+	0x00000004,
+	0x80800400,
+	0x0000a03c,
+	0x95000688,
+	0x00004000,
+	0x15200688,
+	0x00000000,
+	0x00000000,
+	0x3c000000,
+	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
+	0x00080000,
+#endif
+	0x00000000,
+	0x1c000000,
+	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
+	0x00000008,
+#endif
+	0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+	0x00000003,
+	0xa00c0000,
+	0x00000008,
+	0x80400000,
+	0x00000000,
+	0x95200688,
+	0x00380400,
+	0x00146b10,
+	0x00380000,
+	0x20146b10,
+	0x00380400,
+	0x40146b00,
+	0x80380000,
+	0x60146b00,
+	0x00000000,
+	0x00000000,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
+const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
new file mode 100644
index 0000000..bb8d6c7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef EVERGREEN_BLIT_SHADERS_H
+#define EVERGREEN_BLIT_SHADERS_H
+
+extern const u32 evergreen_ps[];
+extern const u32 evergreen_vs[];
+extern const u32 evergreen_default_state[];
+
+extern const u32 evergreen_ps_size, evergreen_vs_size;
+extern const u32 evergreen_default_size;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_cs.c b/linux-imx/drivers/gpu/drm/radeon/evergreen_cs.c
new file mode 100644
index 0000000..c7cac07
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -0,0 +1,3517 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "evergreend.h"
+#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
+
+#define MAX(a,b)                   (((a)>(b))?(a):(b))
+#define MIN(a,b)                   (((a)<(b))?(a):(b))
+
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc);
+struct evergreen_cs_track {
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	u32			row_size;
+	/* value we track */
+	u32			nsamples;		/* unused */
+	struct radeon_bo	*cb_color_bo[12];
+	u32			cb_color_bo_offset[12];
+	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
+	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
+	u32			cb_color_info[12];
+	u32			cb_color_view[12];
+	u32			cb_color_pitch[12];
+	u32			cb_color_slice[12];
+	u32			cb_color_slice_idx[12];
+	u32			cb_color_attrib[12];
+	u32			cb_color_cmask_slice[8];/* unused */
+	u32			cb_color_fmask_slice[8];/* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask; /* unused */
+	u32			vgt_strmout_config;
+	u32			vgt_strmout_buffer_config;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_view;
+	u32			db_depth_slice;
+	u32			db_depth_size;
+	u32			db_z_info;
+	u32			db_z_read_offset;
+	u32			db_z_write_offset;
+	struct radeon_bo	*db_z_read_bo;
+	struct radeon_bo	*db_z_write_bo;
+	u32			db_s_info;
+	u32			db_s_read_offset;
+	u32			db_s_write_offset;
+	struct radeon_bo	*db_s_read_bo;
+	struct radeon_bo	*db_s_write_bo;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	u32			htile_offset;
+	u32			htile_surface;
+	struct radeon_bo	*htile_bo;
+};
+
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+	if (tiling_flags & RADEON_TILING_MACRO)
+		return ARRAY_2D_TILED_THIN1;
+	else if (tiling_flags & RADEON_TILING_MICRO)
+		return ARRAY_1D_TILED_THIN1;
+	else
+		return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+	switch (nbanks) {
+	case 2:
+		return ADDR_SURF_2_BANK;
+	case 4:
+		return ADDR_SURF_4_BANK;
+	case 8:
+	default:
+		return ADDR_SURF_8_BANK;
+	case 16:
+		return ADDR_SURF_16_BANK;
+	}
+}
+
+static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+{
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		track->cb_color_fmask_bo[i] = NULL;
+		track->cb_color_cmask_bo[i] = NULL;
+		track->cb_color_cmask_slice[i] = 0;
+		track->cb_color_fmask_slice[i] = 0;
+	}
+
+	for (i = 0; i < 12; i++) {
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_pitch[i] = 0;
+		track->cb_color_slice[i] = 0xfffffff;
+		track->cb_color_slice_idx[i] = 0;
+	}
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+
+	track->db_depth_slice = 0xffffffff;
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_z_info = 0xFFFFFFFF;
+	track->db_z_read_offset = 0xFFFFFFFF;
+	track->db_z_write_offset = 0xFFFFFFFF;
+	track->db_z_read_bo = NULL;
+	track->db_z_write_bo = NULL;
+	track->db_s_info = 0xFFFFFFFF;
+	track->db_s_read_offset = 0xFFFFFFFF;
+	track->db_s_write_offset = 0xFFFFFFFF;
+	track->db_s_read_bo = NULL;
+	track->db_s_write_bo = NULL;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+struct eg_surface {
+	/* value gathered from cs */
+	unsigned	nbx;
+	unsigned	nby;
+	unsigned	format;
+	unsigned	mode;
+	unsigned	nbanks;
+	unsigned	bankw;
+	unsigned	bankh;
+	unsigned	tsplit;
+	unsigned	mtilea;
+	unsigned	nsamples;
+	/* output value */
+	unsigned	bpe;
+	unsigned	layer_size;
+	unsigned	palign;
+	unsigned	halign;
+	unsigned long	base_align;
+};
+
+static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
+					  struct eg_surface *surf,
+					  const char *prefix)
+{
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = surf->bpe;
+	surf->palign = 1;
+	surf->halign = 1;
+	return 0;
+}
+
+static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
+						  struct eg_surface *surf,
+						  const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = MAX(64, track->group_size / surf->bpe);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 1;
+	if (surf->nbx & (palign - 1)) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign;
+
+	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
+	palign = MAX(8, palign);
+	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
+	surf->base_align = track->group_size;
+	surf->palign = palign;
+	surf->halign = 8;
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign,
+				 track->group_size, surf->bpe, surf->nsamples);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (8 - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
+				 __func__, __LINE__, prefix, surf->nby);
+		}
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
+				      struct eg_surface *surf,
+				      const char *prefix)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned palign, halign, tileb, slice_pt;
+	unsigned mtile_pr, mtile_ps, mtileb;
+
+	tileb = 64 * surf->bpe * surf->nsamples;
+	slice_pt = 1;
+	if (tileb > surf->tsplit) {
+		slice_pt = tileb / surf->tsplit;
+	}
+	tileb = tileb / slice_pt;
+	/* macro tile width & height */
+	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
+	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
+	mtileb = (palign / 8) * (halign / 8) * tileb;
+	mtile_pr = surf->nbx / palign;
+	mtile_ps = (mtile_pr * surf->nby) / halign;
+	surf->layer_size = mtile_ps * mtileb * slice_pt;
+	surf->base_align = (palign / 8) * (halign / 8) * tileb;
+	surf->palign = palign;
+	surf->halign = halign;
+
+	if ((surf->nbx & (palign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nbx, palign);
+		}
+		return -EINVAL;
+	}
+	if ((surf->nby & (halign - 1))) {
+		if (prefix) {
+			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
+				 __func__, __LINE__, prefix, surf->nby, halign);
+		}
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int evergreen_surface_check(struct radeon_cs_parser *p,
+				   struct eg_surface *surf,
+				   const char *prefix)
+{
+	/* some common value computed here */
+	surf->bpe = r600_fmt_get_blocksize(surf->format);
+
+	switch (surf->mode) {
+	case ARRAY_LINEAR_GENERAL:
+		return evergreen_surface_check_linear(p, surf, prefix);
+	case ARRAY_LINEAR_ALIGNED:
+		return evergreen_surface_check_linear_aligned(p, surf, prefix);
+	case ARRAY_1D_TILED_THIN1:
+		return evergreen_surface_check_1d(p, surf, prefix);
+	case ARRAY_2D_TILED_THIN1:
+		return evergreen_surface_check_2d(p, surf, prefix);
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+	return -EINVAL;
+}
+
+static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
+					      struct eg_surface *surf,
+					      const char *prefix)
+{
+	switch (surf->mode) {
+	case ARRAY_2D_TILED_THIN1:
+		break;
+	case ARRAY_LINEAR_GENERAL:
+	case ARRAY_LINEAR_ALIGNED:
+	case ARRAY_1D_TILED_THIN1:
+		return 0;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
+				__func__, __LINE__, prefix, surf->mode);
+		return -EINVAL;
+	}
+
+	switch (surf->nbanks) {
+	case 0: surf->nbanks = 2; break;
+	case 1: surf->nbanks = 4; break;
+	case 2: surf->nbanks = 8; break;
+	case 3: surf->nbanks = 16; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
+			 __func__, __LINE__, prefix, surf->nbanks);
+		return -EINVAL;
+	}
+	switch (surf->bankw) {
+	case 0: surf->bankw = 1; break;
+	case 1: surf->bankw = 2; break;
+	case 2: surf->bankw = 4; break;
+	case 3: surf->bankw = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
+			 __func__, __LINE__, prefix, surf->bankw);
+		return -EINVAL;
+	}
+	switch (surf->bankh) {
+	case 0: surf->bankh = 1; break;
+	case 1: surf->bankh = 2; break;
+	case 2: surf->bankh = 4; break;
+	case 3: surf->bankh = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
+			 __func__, __LINE__, prefix, surf->bankh);
+		return -EINVAL;
+	}
+	switch (surf->mtilea) {
+	case 0: surf->mtilea = 1; break;
+	case 1: surf->mtilea = 2; break;
+	case 2: surf->mtilea = 4; break;
+	case 3: surf->mtilea = 8; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
+			 __func__, __LINE__, prefix, surf->mtilea);
+		return -EINVAL;
+	}
+	switch (surf->tsplit) {
+	case 0: surf->tsplit = 64; break;
+	case 1: surf->tsplit = 128; break;
+	case 2: surf->tsplit = 256; break;
+	case 3: surf->tsplit = 512; break;
+	case 4: surf->tsplit = 1024; break;
+	case 5: surf->tsplit = 2048; break;
+	case 6: surf->tsplit = 4096; break;
+	default:
+		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
+			 __func__, __LINE__, prefix, surf->tsplit);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
+	pitch = track->cb_color_pitch[id];
+	slice = track->cb_color_slice[id];
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
+	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
+	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
+	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
+	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
+	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
+	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
+	surf.nsamples = 1;
+
+	if (!r600_fmt_is_valid_color(surf.format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
+			 __func__, __LINE__, surf.format,
+			id, track->cb_color_info[id]);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "cb");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "cb");
+	if (r) {
+		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, id, track->cb_color_pitch[id],
+			 track->cb_color_slice[id], track->cb_color_attrib[id],
+			 track->cb_color_info[id]);
+		return r;
+	}
+
+	offset = track->cb_color_bo_offset[id] << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, id, offset, surf.base_align);
+		return -EINVAL;
+	}
+
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+		/* old ddx are broken they allocate bo with w*h*bpp but
+		 * program slice with ALIGN(h, 8), catch this and patch
+		 * command stream.
+		 */
+		if (!surf.mode) {
+			volatile u32 *ib = p->ib.ptr;
+			unsigned long tmp, nby, bsize, size, min = 0;
+
+			/* find the height the ddx wants */
+			if (surf.nby > 8) {
+				min = surf.nby - 8;
+			}
+			bsize = radeon_bo_size(track->cb_color_bo[id]);
+			tmp = track->cb_color_bo_offset[id] << 8;
+			for (nby = surf.nby; nby > min; nby--) {
+				size = nby * surf.nbx * surf.bpe * surf.nsamples;
+				if ((tmp + size * mslice) <= bsize) {
+					break;
+				}
+			}
+			if (nby > min) {
+				surf.nby = nby;
+				slice = ((nby * surf.nbx) / 64) - 1;
+				if (!evergreen_surface_check(p, &surf, "cb")) {
+					/* check if this one works */
+					tmp += surf.layer_size * mslice;
+					if (tmp <= bsize) {
+						ib[track->cb_color_slice_idx[id]] = slice;
+						goto old_ddx_ok;
+					}
+				}
+			}
+		}
+		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
+			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
+			 __func__, __LINE__, id, surf.layer_size,
+			track->cb_color_bo_offset[id] << 8, mslice,
+			radeon_bo_size(track->cb_color_bo[id]), slice);
+		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+			 __func__, __LINE__, surf.nbx, surf.nby,
+			surf.mode, surf.bpe, surf.nsamples,
+			surf.bankw, surf.bankh,
+			surf.tsplit, surf.mtilea);
+		return -EINVAL;
+	}
+old_ddx_ok:
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
+						unsigned nbx, unsigned nby)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned long size;
+
+	if (track->htile_bo == NULL) {
+		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				__func__, __LINE__, track->db_z_info);
+		return -EINVAL;
+	}
+
+	if (G_028ABC_LINEAR(track->htile_surface)) {
+		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
+		nbx = round_up(nbx, 16 * 8);
+		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
+		nby = round_up(nby, track->npipes * 8);
+	} else {
+		/* always assume 8x8 htile */
+		/* align is htile align * 8, htile align vary according to
+		 * number of pipe and tile width and nby
+		 */
+		switch (track->npipes) {
+		case 8:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 64 * 8);
+			nby = round_up(nby, 64 * 8);
+			break;
+		case 4:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 64 * 8);
+			nby = round_up(nby, 32 * 8);
+			break;
+		case 2:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 32 * 8);
+			nby = round_up(nby, 32 * 8);
+			break;
+		case 1:
+			/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+			nbx = round_up(nbx, 32 * 8);
+			nby = round_up(nby, 16 * 8);
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					__func__, __LINE__, track->npipes);
+			return -EINVAL;
+		}
+	}
+	/* compute number of htile */
+	nbx = nbx >> 3;
+	nby = nby >> 3;
+	/* size must be aligned on npipes * 2K boundary */
+	size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+	size += track->htile_offset;
+
+	if (size > radeon_bo_size(track->htile_bo)) {
+		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				__func__, __LINE__, radeon_bo_size(track->htile_bo),
+				size, nbx, nby);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028044_FORMAT(track->db_s_info);
+	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	if (surf.format != 1) {
+		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	/* replace by color format so we can use same code */
+	surf.format = V_028C70_COLOR_8;
+
+	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
+	if (r) {
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, NULL);
+	if (r) {
+		/* old userspace doesn't compute proper depth/stencil alignment
+		 * check that alignment against a bigger byte per elements and
+		 * only report if that alignment is wrong too.
+		 */
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		r = evergreen_surface_check(p, &surf, "stencil");
+		if (r) {
+			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+				 __func__, __LINE__, track->db_depth_size,
+				 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		}
+		return r;
+	}
+
+	offset = track->db_s_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_read_bo)) {
+		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_read_offset << 8, mslice,
+			radeon_bo_size(track->db_s_read_bo));
+		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_s_info, track->db_z_info);
+		return -EINVAL;
+	}
+
+	offset = track->db_s_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_s_write_bo)) {
+		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_s_write_offset << 8, mslice,
+			radeon_bo_size(track->db_s_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	struct eg_surface surf;
+	unsigned pitch, slice, mslice;
+	unsigned long offset;
+	int r;
+
+	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
+	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
+	slice = track->db_depth_slice;
+	surf.nbx = (pitch + 1) * 8;
+	surf.nby = ((slice + 1) * 64) / surf.nbx;
+	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
+	surf.format = G_028040_FORMAT(track->db_z_info);
+	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
+	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
+	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
+	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
+	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
+	surf.nsamples = 1;
+
+	switch (surf.format) {
+	case V_028040_Z_16:
+		surf.format = V_028C70_COLOR_16;
+		break;
+	case V_028040_Z_24:
+	case V_028040_Z_32_FLOAT:
+		surf.format = V_028C70_COLOR_8_8_8_8;
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	r = evergreen_surface_check(p, &surf, "depth");
+	if (r) {
+		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
+			 __func__, __LINE__, track->db_depth_size,
+			 track->db_depth_slice, track->db_z_info);
+		return r;
+	}
+
+	offset = track->db_z_read_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_read_bo)) {
+		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_read_offset << 8, mslice,
+			radeon_bo_size(track->db_z_read_bo));
+		return -EINVAL;
+	}
+
+	offset = track->db_z_write_offset << 8;
+	if (offset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, offset, surf.base_align);
+		return -EINVAL;
+	}
+	offset += surf.layer_size * mslice;
+	if (offset > radeon_bo_size(track->db_z_write_bo)) {
+		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
+			 "offset %ld, max layer %d, bo size %ld)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)track->db_z_write_offset << 8, mslice,
+			radeon_bo_size(track->db_z_write_bo));
+		return -EINVAL;
+	}
+
+	/* hyperz */
+	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
+		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
+		if (r) {
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
+					       struct radeon_bo *texture,
+					       struct radeon_bo *mipmap,
+					       unsigned idx)
+{
+	struct eg_surface surf;
+	unsigned long toffset, moffset;
+	unsigned dim, llevel, mslice, width, height, depth, i;
+	u32 texdw[8];
+	int r;
+
+	texdw[0] = radeon_get_ib_value(p, idx + 0);
+	texdw[1] = radeon_get_ib_value(p, idx + 1);
+	texdw[2] = radeon_get_ib_value(p, idx + 2);
+	texdw[3] = radeon_get_ib_value(p, idx + 3);
+	texdw[4] = radeon_get_ib_value(p, idx + 4);
+	texdw[5] = radeon_get_ib_value(p, idx + 5);
+	texdw[6] = radeon_get_ib_value(p, idx + 6);
+	texdw[7] = radeon_get_ib_value(p, idx + 7);
+	dim = G_030000_DIM(texdw[0]);
+	llevel = G_030014_LAST_LEVEL(texdw[5]);
+	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
+	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
+	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
+	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
+	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
+	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
+	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
+	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
+	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
+	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
+	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
+	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
+	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
+	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
+	surf.nsamples = 1;
+	toffset = texdw[2] << 8;
+	moffset = texdw[3] << 8;
+
+	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, surf.format);
+		return -EINVAL;
+	}
+	switch (dim) {
+	case V_030000_SQ_TEX_DIM_1D:
+	case V_030000_SQ_TEX_DIM_2D:
+	case V_030000_SQ_TEX_DIM_CUBEMAP:
+	case V_030000_SQ_TEX_DIM_1D_ARRAY:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY:
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_2D_MSAA:
+	case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		surf.nsamples = 1 << llevel;
+		llevel = 0;
+		depth = 1;
+		break;
+	case V_030000_SQ_TEX_DIM_3D:
+		break;
+	default:
+		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
+			 __func__, __LINE__, dim);
+		return -EINVAL;
+	}
+
+	r = evergreen_surface_value_conv_check(p, &surf, "texture");
+	if (r) {
+		return r;
+	}
+
+	/* align height */
+	evergreen_surface_check(p, &surf, NULL);
+	surf.nby = ALIGN(surf.nby, surf.halign);
+
+	r = evergreen_surface_check(p, &surf, "texture");
+	if (r) {
+		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
+			 texdw[5], texdw[6], texdw[7]);
+		return r;
+	}
+
+	/* check texture size */
+	if (toffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, toffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
+		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
+			 __func__, __LINE__, moffset, surf.base_align);
+		return -EINVAL;
+	}
+	if (dim == SQ_TEX_DIM_3D) {
+		toffset += surf.layer_size * depth;
+	} else {
+		toffset += surf.layer_size * mslice;
+	}
+	if (toffset > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
+			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
+			 __func__, __LINE__, surf.layer_size,
+			(unsigned long)texdw[2] << 8, mslice,
+			depth, radeon_bo_size(texture),
+			surf.nbx, surf.nby);
+		return -EINVAL;
+	}
+
+	if (!mipmap) {
+		if (llevel) {
+			dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
+				 __func__, __LINE__);
+			return -EINVAL;
+		} else {
+			return 0; /* everything's ok */
+		}
+	}
+
+	/* check mipmap size */
+	for (i = 1; i <= llevel; i++) {
+		unsigned w, h, d;
+
+		w = r600_mip_minify(width, i);
+		h = r600_mip_minify(height, i);
+		d = r600_mip_minify(depth, i);
+		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
+		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
+
+		switch (surf.mode) {
+		case ARRAY_2D_TILED_THIN1:
+			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
+				surf.mode = ARRAY_1D_TILED_THIN1;
+			}
+			/* recompute alignment */
+			evergreen_surface_check(p, &surf, NULL);
+			break;
+		case ARRAY_LINEAR_GENERAL:
+		case ARRAY_LINEAR_ALIGNED:
+		case ARRAY_1D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
+				 __func__, __LINE__, surf.mode);
+			return -EINVAL;
+		}
+		surf.nbx = ALIGN(surf.nbx, surf.palign);
+		surf.nby = ALIGN(surf.nby, surf.halign);
+
+		r = evergreen_surface_check(p, &surf, "mipmap");
+		if (r) {
+			return r;
+		}
+
+		if (dim == SQ_TEX_DIM_3D) {
+			moffset += surf.layer_size * d;
+		} else {
+			moffset += surf.layer_size * mslice;
+		}
+		if (moffset > radeon_bo_size(mipmap)) {
+			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
+					"offset %ld, coffset %ld, max layer %d, depth %d, "
+					"bo size %ld) level0 (%d %d %d)\n",
+					__func__, __LINE__, i, surf.layer_size,
+					(unsigned long)texdw[3] << 8, moffset, mslice,
+					d, radeon_bo_size(mipmap),
+					width, height, depth);
+			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
+				 __func__, __LINE__, surf.nbx, surf.nby,
+				surf.mode, surf.bpe, surf.nsamples,
+				surf.bankw, surf.bankh,
+				surf.tsplit, surf.mtilea);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int evergreen_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct evergreen_cs_track *track = p->track;
+	unsigned tmp, i;
+	int r;
+	unsigned buffer_mask = 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_config) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_config & (1 << i)) {
+				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
+			}
+		}
+
+		for (i = 0; i < 4; i++) {
+			if (buffer_mask & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+							(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+							  i, offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+		for (i = 0; i < 8; i++) {
+			u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_028C70_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* check cb */
+				r = evergreen_cs_track_validate_cb(p, i);
+				if (r) {
+					return r;
+				}
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	if (track->db_dirty) {
+		/* Check stencil buffer */
+		if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+		    G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_stencil(p);
+			if (r)
+				return r;
+		}
+		/* Check depth buffer */
+		if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+		    G_028800_Z_ENABLE(track->db_depth_control)) {
+			r = evergreen_cs_track_validate_depth(p);
+			if (r)
+				return r;
+		}
+		track->db_dirty = false;
+	}
+
+	return 0;
+}
+
+/**
+ * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * This is an Evergreen(+)-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+
+	static uint32_t vline_start_end[6] = {
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
+	};
+	static uint32_t vline_status[6] = {
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
+	};
+
+	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+static int evergreen_packet0_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt,
+				   unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case EVERGREEN_VLINE_START_END:
+		r = evergreen_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = evergreen_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * evergreen_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
+	struct radeon_cs_reloc *reloc;
+	u32 last_reg;
+	u32 m, i, tmp, *ib;
+	int r;
+
+	if (p->rdev->family >= CHIP_CAYMAN)
+		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+	else
+		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+	i = (reg >> 7);
+	if (i >= last_reg) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (p->rdev->family >= CHIP_CAYMAN) {
+		if (!(cayman_reg_safe_bm[i] & m))
+			return 0;
+	} else {
+		if (!(evergreen_reg_safe_bm[i] & m))
+			return 0;
+	}
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+		/* get value to populate the IB don't remove */
+		/*tmp =radeon_get_ib_value(p, idx);
+		  ib[idx] = 0;*/
+		break;
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case CAYMAN_DB_EQAA:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_DB_DEPTH_INFO:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		break;
+	case DB_Z_INFO:
+		track->db_z_info = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] &= ~Z_ARRAY_MODE(0xf);
+			track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= DB_TILE_SPLIT(tile_split) |
+						DB_BANK_WIDTH(bankw) |
+						DB_BANK_HEIGHT(bankh) |
+						DB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_INFO:
+		track->db_s_info = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_02805C_DB_DEPTH_SLICE:
+		track->db_depth_slice = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case DB_Z_READ_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_z_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_Z_WRITE_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_z_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_z_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_READ_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_read_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_s_read_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_STENCIL_WRITE_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_s_write_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_s_write_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case VGT_STRMOUT_CONFIG:
+		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_CONFIG:
+		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+	case CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case PA_SC_AA_CONFIG:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CAYMAN_PA_SC_AA_CONFIG:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+		track->nsamples = 1 << tmp;
+		break;
+	case CB_COLOR0_VIEW:
+	case CB_COLOR1_VIEW:
+	case CB_COLOR2_VIEW:
+	case CB_COLOR3_VIEW:
+	case CB_COLOR4_VIEW:
+	case CB_COLOR5_VIEW:
+	case CB_COLOR6_VIEW:
+	case CB_COLOR7_VIEW:
+		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_VIEW:
+	case CB_COLOR9_VIEW:
+	case CB_COLOR10_VIEW:
+	case CB_COLOR11_VIEW:
+		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_INFO:
+	case CB_COLOR1_INFO:
+	case CB_COLOR2_INFO:
+	case CB_COLOR3_INFO:
+	case CB_COLOR4_INFO:
+	case CB_COLOR5_INFO:
+	case CB_COLOR6_INFO:
+	case CB_COLOR7_INFO:
+		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_INFO:
+	case CB_COLOR9_INFO:
+	case CB_COLOR10_INFO:
+	case CB_COLOR11_INFO:
+		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+						"0x%04X\n", reg);
+				return -EINVAL;
+			}
+			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+		}
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_PITCH:
+	case CB_COLOR1_PITCH:
+	case CB_COLOR2_PITCH:
+	case CB_COLOR3_PITCH:
+	case CB_COLOR4_PITCH:
+	case CB_COLOR5_PITCH:
+	case CB_COLOR6_PITCH:
+	case CB_COLOR7_PITCH:
+		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_PITCH:
+	case CB_COLOR9_PITCH:
+	case CB_COLOR10_PITCH:
+	case CB_COLOR11_PITCH:
+		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_SLICE:
+	case CB_COLOR1_SLICE:
+	case CB_COLOR2_SLICE:
+	case CB_COLOR3_SLICE:
+	case CB_COLOR4_SLICE:
+	case CB_COLOR5_SLICE:
+	case CB_COLOR6_SLICE:
+	case CB_COLOR7_SLICE:
+		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_SLICE:
+	case CB_COLOR9_SLICE:
+	case CB_COLOR10_SLICE:
+	case CB_COLOR11_SLICE:
+		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_slice_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_ATTRIB:
+	case CB_COLOR1_ATTRIB:
+	case CB_COLOR2_ATTRIB:
+	case CB_COLOR3_ATTRIB:
+	case CB_COLOR4_ATTRIB:
+	case CB_COLOR5_ATTRIB:
+	case CB_COLOR6_ATTRIB:
+	case CB_COLOR7_ATTRIB:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_ATTRIB:
+	case CB_COLOR9_ATTRIB:
+	case CB_COLOR10_ATTRIB:
+	case CB_COLOR11_ATTRIB:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				unsigned bankw, bankh, mtaspect, tile_split;
+
+				evergreen_tiling_fields(reloc->lobj.tiling_flags,
+							&bankw, &bankh, &mtaspect,
+							&tile_split);
+				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+				ib[idx] |= CB_TILE_SPLIT(tile_split) |
+					   CB_BANK_WIDTH(bankw) |
+					   CB_BANK_HEIGHT(bankh) |
+					   CB_MACRO_TILE_ASPECT(mtaspect);
+			}
+		}
+		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
+		track->cb_color_attrib[tmp] = ib[idx];
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR0_FMASK:
+	case CB_COLOR1_FMASK:
+	case CB_COLOR2_FMASK:
+	case CB_COLOR3_FMASK:
+	case CB_COLOR4_FMASK:
+	case CB_COLOR5_FMASK:
+	case CB_COLOR6_FMASK:
+	case CB_COLOR7_FMASK:
+		tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_fmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_CMASK:
+	case CB_COLOR1_CMASK:
+	case CB_COLOR2_CMASK:
+	case CB_COLOR3_CMASK:
+	case CB_COLOR4_CMASK:
+	case CB_COLOR5_CMASK:
+	case CB_COLOR6_CMASK:
+	case CB_COLOR7_CMASK:
+		tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_cmask_bo[tmp] = reloc->robj;
+		break;
+	case CB_COLOR0_FMASK_SLICE:
+	case CB_COLOR1_FMASK_SLICE:
+	case CB_COLOR2_FMASK_SLICE:
+	case CB_COLOR3_FMASK_SLICE:
+	case CB_COLOR4_FMASK_SLICE:
+	case CB_COLOR5_FMASK_SLICE:
+	case CB_COLOR6_FMASK_SLICE:
+	case CB_COLOR7_FMASK_SLICE:
+		tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
+		track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_CMASK_SLICE:
+	case CB_COLOR1_CMASK_SLICE:
+	case CB_COLOR2_CMASK_SLICE:
+	case CB_COLOR3_CMASK_SLICE:
+	case CB_COLOR4_CMASK_SLICE:
+	case CB_COLOR5_CMASK_SLICE:
+	case CB_COLOR6_CMASK_SLICE:
+	case CB_COLOR7_CMASK_SLICE:
+		tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
+		track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case CB_COLOR8_BASE:
+	case CB_COLOR9_BASE:
+	case CB_COLOR10_BASE:
+	case CB_COLOR11_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx);
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		/* 8x8 only */
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case CB_IMMED0_BASE:
+	case CB_IMMED1_BASE:
+	case CB_IMMED2_BASE:
+	case CB_IMMED3_BASE:
+	case CB_IMMED4_BASE:
+	case CB_IMMED5_BASE:
+	case CB_IMMED6_BASE:
+	case CB_IMMED7_BASE:
+	case CB_IMMED8_BASE:
+	case CB_IMMED9_BASE:
+	case CB_IMMED10_BASE:
+	case CB_IMMED11_BASE:
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_PGM_START_HS:
+	case SQ_PGM_START_LS:
+	case SQ_CONST_MEM_BASE:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+	case SQ_ALU_CONST_CACHE_HS_0:
+	case SQ_ALU_CONST_CACHE_HS_1:
+	case SQ_ALU_CONST_CACHE_HS_2:
+	case SQ_ALU_CONST_CACHE_HS_3:
+	case SQ_ALU_CONST_CACHE_HS_4:
+	case SQ_ALU_CONST_CACHE_HS_5:
+	case SQ_ALU_CONST_CACHE_HS_6:
+	case SQ_ALU_CONST_CACHE_HS_7:
+	case SQ_ALU_CONST_CACHE_HS_8:
+	case SQ_ALU_CONST_CACHE_HS_9:
+	case SQ_ALU_CONST_CACHE_HS_10:
+	case SQ_ALU_CONST_CACHE_HS_11:
+	case SQ_ALU_CONST_CACHE_HS_12:
+	case SQ_ALU_CONST_CACHE_HS_13:
+	case SQ_ALU_CONST_CACHE_HS_14:
+	case SQ_ALU_CONST_CACHE_HS_15:
+	case SQ_ALU_CONST_CACHE_LS_0:
+	case SQ_ALU_CONST_CACHE_LS_1:
+	case SQ_ALU_CONST_CACHE_LS_2:
+	case SQ_ALU_CONST_CACHE_LS_3:
+	case SQ_ALU_CONST_CACHE_LS_4:
+	case SQ_ALU_CONST_CACHE_LS_5:
+	case SQ_ALU_CONST_CACHE_LS_6:
+	case SQ_ALU_CONST_CACHE_LS_7:
+	case SQ_ALU_CONST_CACHE_LS_8:
+	case SQ_ALU_CONST_CACHE_LS_9:
+	case SQ_ALU_CONST_CACHE_LS_10:
+	case SQ_ALU_CONST_CACHE_LS_11:
+	case SQ_ALU_CONST_CACHE_LS_12:
+	case SQ_ALU_CONST_CACHE_LS_13:
+	case SQ_ALU_CONST_CACHE_LS_14:
+	case SQ_ALU_CONST_CACHE_LS_15:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case CAYMAN_SX_SCATTER_EXPORT_BASE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+				 "0x%04X\n", reg);
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	u32 last_reg, m, i;
+
+	if (p->rdev->family >= CHIP_CAYMAN)
+		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+	else
+		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+	i = (reg >> 7);
+	if (i >= last_reg) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (p->rdev->family >= CHIP_CAYMAN) {
+		if (!(cayman_reg_safe_bm[i] & m))
+			return true;
+	} else {
+		if (!(evergreen_reg_safe_bm[i] & m))
+			return true;
+	}
+	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+	return false;
+}
+
+static int evergreen_packet3_check(struct radeon_cs_parser *p,
+				   struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct evergreen_cs_track *track;
+	volatile u32 *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct evergreen_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (idx_value & 0xfffffff0) +
+		         ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_CLEAR_STATE:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		if (p->rdev->family < CHIP_CAYMAN) {
+			DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_BASE:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad INDEX_BASE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_2:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX_2\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         radeon_get_ib_value(p, idx+1) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset;
+		ib[idx+2] = upper_32_bits(offset) & 0xff;
+
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET:
+		if (pkt->count != 2) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_DISPATCH_DIRECT:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DISPATCH_DIRECT\n");
+			return -EINVAL;
+		}
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DISPATCH_INDIRECT:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad DISPATCH_INDIRECT\n");
+			return -EINVAL;
+		}
+		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+		r = evergreen_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else if (idx_value & 0x100) {
+			DRM_ERROR("cannot use PFP on REG wait\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size, info;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		info = radeon_get_ib_value(p, idx+1);
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if (size % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			/* GDS is ok */
+			if (((info & 0x60000000) >> 29) != 1) {
+				DRM_ERROR("CP DMA SAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			if (((info & 0x60000000) >> 29) == 0) {
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad CP DMA SRC\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx) +
+					((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx] = offset;
+				ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+			} else if (((info & 0x60000000) >> 29) != 2) {
+				DRM_ERROR("bad CP DMA SRC_SEL\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			/* GDS is ok */
+			if (((info & 0x00300000) >> 20) != 1) {
+				DRM_ERROR("CP DMA DAS not supported\n");
+				return -EINVAL;
+			}
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			if (((info & 0x00300000) >> 20) == 0) {
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad CP DMA DST\n");
+					return -EINVAL;
+				}
+
+				tmp = radeon_get_ib_value(p, idx+2) +
+					((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+				offset = reloc->lobj.gpu_offset + tmp;
+
+				if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+					dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+						 tmp + size, radeon_bo_size(reloc->robj));
+					return -EINVAL;
+				}
+
+				ib[idx+2] = offset;
+				ib[idx+3] = upper_32_bits(offset) & 0xff;
+			} else {
+				DRM_ERROR("bad CP DMA DST_SEL\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	}
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_EVENT_WRITE_EOS:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = evergreen_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = evergreen_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 8) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_START) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 8); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 toffset, moffset;
+			u32 size, offset, mip_address, tex_dim;
+
+			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (tex)\n");
+					return -EINVAL;
+				}
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					ib[idx+1+(i*8)+1] |=
+						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+						unsigned bankw, bankh, mtaspect, tile_split;
+
+						evergreen_tiling_fields(reloc->lobj.tiling_flags,
+									&bankw, &bankh, &mtaspect,
+									&tile_split);
+						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
+						ib[idx+1+(i*8)+7] |=
+							TEX_BANK_WIDTH(bankw) |
+							TEX_BANK_HEIGHT(bankh) |
+							MACRO_TILE_ASPECT(mtaspect) |
+							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+					}
+				}
+				texture = reloc->robj;
+				toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+
+				/* tex mip base */
+				tex_dim = ib[idx+1+(i*8)+0] & 0x7;
+				mip_address = ib[idx+1+(i*8)+3];
+
+				if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
+				    !mip_address &&
+				    !radeon_cs_packet_next_is_pkt3_nop(p)) {
+					/* MIP_ADDRESS should point to FMASK for an MSAA texture.
+					 * It should be 0 if FMASK is disabled. */
+					moffset = 0;
+					mipmap = NULL;
+				} else {
+					r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+					if (r) {
+						DRM_ERROR("bad SET_RESOURCE (tex)\n");
+						return -EINVAL;
+					}
+					moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+					mipmap = reloc->robj;
+				}
+
+				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+				if (r)
+					return r;
+				ib[idx+1+(i*8)+2] += toffset;
+				ib[idx+1+(i*8)+3] += moffset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*8)+1);
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->lobj.gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		/* XXX fix me ALU const buffers only */
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_START) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+				  offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->lobj.gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!evergreen_is_safe_reg(p, reg, idx+1))
+				return -EINVAL;
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!evergreen_is_safe_reg(p, reg, idx+3))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct evergreen_cs_track *track;
+	u32 tmp;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = kzalloc(sizeof(*track), GFP_KERNEL);
+		if (track == NULL)
+			return -ENOMEM;
+		evergreen_cs_track_init(track);
+		if (p->rdev->family >= CHIP_CAYMAN)
+			tmp = p->rdev->config.cayman.tile_config;
+		else
+			tmp = p->rdev->config.evergreen.tile_config;
+
+		switch (tmp & 0xf) {
+		case 0:
+			track->npipes = 1;
+			break;
+		case 1:
+		default:
+			track->npipes = 2;
+			break;
+		case 2:
+			track->npipes = 4;
+			break;
+		case 3:
+			track->npipes = 8;
+			break;
+		}
+
+		switch ((tmp & 0xf0) >> 4) {
+		case 0:
+			track->nbanks = 4;
+			break;
+		case 1:
+		default:
+			track->nbanks = 8;
+			break;
+		case 2:
+			track->nbanks = 16;
+			break;
+		}
+
+		switch ((tmp & 0xf00) >> 8) {
+		case 0:
+			track->group_size = 256;
+			break;
+		case 1:
+		default:
+			track->group_size = 512;
+			break;
+		}
+
+		switch ((tmp & 0xf000) >> 12) {
+		case 0:
+			track->row_size = 1;
+			break;
+		case 1:
+		default:
+			track->row_size = 2;
+			break;
+		case 2:
+			track->row_size = 4;
+			break;
+		}
+
+		p->track = track;
+	}
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = evergreen_cs_parse_packet0(p, &pkt);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = evergreen_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			kfree(p->track);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	kfree(p->track);
+	p->track = NULL;
+	return 0;
+}
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+	u32 header, cmd, count, sub_cmd;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx;
+	u64 src_offset, dst_offset, dst2_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		sub_cmd = GET_DMA_SUB_CMD(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			switch (sub_cmd) {
+			/* tiled */
+			case 8:
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 7;
+				break;
+			/* linear */
+			case 0:
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			switch (sub_cmd) {
+			/* Copy L2L, DW aligned */
+			case 0x00:
+				/* L2L, dw */
+				src_offset = radeon_get_ib_value(p, idx+2);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 5;
+				break;
+			/* Copy L2T/T2L */
+			case 0x08:
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx + 7);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+7);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				p->idx += 9;
+				break;
+			/* Copy L2L, byte aligned */
+			case 0x40:
+				/* L2L, byte */
+				src_offset = radeon_get_ib_value(p, idx+2);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+				if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+							src_offset + count, radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+							dst_offset + count, radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+				ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+				ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 5;
+				break;
+			/* Copy L2L, partial */
+			case 0x41:
+				/* L2L, partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+				ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+				ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+				p->idx += 9;
+				break;
+			/* Copy L2L, DW aligned, broadcast */
+			case 0x44:
+				/* L2L, dw, broadcast */
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+				src_offset = radeon_get_ib_value(p, idx+3);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+				ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 7;
+				break;
+			/* Copy L2T Frame to Field */
+			case 0x48:
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			/* Copy L2T/T2L, partial */
+			case 0x49:
+				/* L2T, T2L partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				p->idx += 12;
+				break;
+			/* Copy L2T broadcast */
+			case 0x4b:
+				/* L2T, broadcast */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			/* Copy L2T/T2L (tile units) */
+			case 0x4c:
+				/* L2T, T2L */
+				/* detile bit */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx+7);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+7);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+					ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				p->idx += 9;
+				break;
+			/* Copy T2T, partial (tile units) */
+			case 0x4d:
+				/* T2T partial */
+				if (p->family < CHIP_CAYMAN) {
+					DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+				ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += 13;
+				break;
+			/* Copy L2T broadcast (tile units) */
+			case 0x4f:
+				/* L2T, broadcast */
+				if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+				if (r) {
+					DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+					return -EINVAL;
+				}
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+				dst2_offset = radeon_get_ib_value(p, idx+2);
+				dst2_offset <<= 8;
+				src_offset = radeon_get_ib_value(p, idx+8);
+				src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+				if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+							src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+							dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+					return -EINVAL;
+				}
+				if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+					dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+							dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+					return -EINVAL;
+				}
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+				ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += 10;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset, radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
+
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case WAIT_UNTIL:
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case CP_COHER_CNTL:
+	case CP_COHER_SIZE:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_COMPUTE_DIM_X:
+	case VGT_COMPUTE_DIM_Y:
+	case VGT_COMPUTE_DIM_Z:
+	case VGT_COMPUTE_START_X:
+	case VGT_COMPUTE_START_Y:
+	case VGT_COMPUTE_START_Z:
+	case VGT_COMPUTE_INDEX:
+	case VGT_COMPUTE_THREAD_GROUP_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+	case SQ_DYN_GPR_SIMD_LOCK_EN:
+	case SQ_CONFIG:
+	case SQ_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+	case SQ_CONST_MEM_BASE:
+	case SQ_STATIC_THREAD_MGMT_1:
+	case SQ_STATIC_THREAD_MGMT_2:
+	case SQ_STATIC_THREAD_MGMT_3:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+	case DB_DEBUG:
+	case DB_DEBUG2:
+	case DB_DEBUG3:
+	case DB_DEBUG4:
+	case DB_WATERMARKS:
+	case TD_PS_BORDER_COLOR_INDEX:
+	case TD_PS_BORDER_COLOR_RED:
+	case TD_PS_BORDER_COLOR_GREEN:
+	case TD_PS_BORDER_COLOR_BLUE:
+	case TD_PS_BORDER_COLOR_ALPHA:
+	case TD_VS_BORDER_COLOR_INDEX:
+	case TD_VS_BORDER_COLOR_RED:
+	case TD_VS_BORDER_COLOR_GREEN:
+	case TD_VS_BORDER_COLOR_BLUE:
+	case TD_VS_BORDER_COLOR_ALPHA:
+	case TD_GS_BORDER_COLOR_INDEX:
+	case TD_GS_BORDER_COLOR_RED:
+	case TD_GS_BORDER_COLOR_GREEN:
+	case TD_GS_BORDER_COLOR_BLUE:
+	case TD_GS_BORDER_COLOR_ALPHA:
+	case TD_HS_BORDER_COLOR_INDEX:
+	case TD_HS_BORDER_COLOR_RED:
+	case TD_HS_BORDER_COLOR_GREEN:
+	case TD_HS_BORDER_COLOR_BLUE:
+	case TD_HS_BORDER_COLOR_ALPHA:
+	case TD_LS_BORDER_COLOR_INDEX:
+	case TD_LS_BORDER_COLOR_RED:
+	case TD_LS_BORDER_COLOR_GREEN:
+	case TD_LS_BORDER_COLOR_BLUE:
+	case TD_LS_BORDER_COLOR_ALPHA:
+	case TD_CS_BORDER_COLOR_INDEX:
+	case TD_CS_BORDER_COLOR_RED:
+	case TD_CS_BORDER_COLOR_GREEN:
+	case TD_CS_BORDER_COLOR_BLUE:
+	case TD_CS_BORDER_COLOR_ALPHA:
+	case SQ_ESGS_RING_SIZE:
+	case SQ_GSVS_RING_SIZE:
+	case SQ_ESTMP_RING_SIZE:
+	case SQ_GSTMP_RING_SIZE:
+	case SQ_HSTMP_RING_SIZE:
+	case SQ_LSTMP_RING_SIZE:
+	case SQ_PSTMP_RING_SIZE:
+	case SQ_VSTMP_RING_SIZE:
+	case SQ_ESGS_RING_ITEMSIZE:
+	case SQ_ESTMP_RING_ITEMSIZE:
+	case SQ_GSTMP_RING_ITEMSIZE:
+	case SQ_GSVS_RING_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE:
+	case SQ_GS_VERT_ITEMSIZE_1:
+	case SQ_GS_VERT_ITEMSIZE_2:
+	case SQ_GS_VERT_ITEMSIZE_3:
+	case SQ_GSVS_RING_OFFSET_1:
+	case SQ_GSVS_RING_OFFSET_2:
+	case SQ_GSVS_RING_OFFSET_3:
+	case SQ_HSTMP_RING_ITEMSIZE:
+	case SQ_LSTMP_RING_ITEMSIZE:
+	case SQ_PSTMP_RING_ITEMSIZE:
+	case SQ_VSTMP_RING_ITEMSIZE:
+	case VGT_TF_RING_SIZE:
+	case SQ_ESGS_RING_BASE:
+	case SQ_GSVS_RING_BASE:
+	case SQ_ESTMP_RING_BASE:
+	case SQ_GSTMP_RING_BASE:
+	case SQ_HSTMP_RING_BASE:
+	case SQ_LSTMP_RING_BASE:
+	case SQ_PSTMP_RING_BASE:
+	case SQ_VSTMP_RING_BASE:
+	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+				      u32 *ib, struct radeon_cs_packet *pkt)
+{
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+	u32 command, info;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_MODE_CONTROL:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_DRAW_INDEX_OFFSET:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDEX:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_BOOL_CONST:
+	case PACKET3_SET_LOOP_CONST:
+	case PACKET3_SET_RESOURCE:
+	case PACKET3_SET_SAMPLER:
+	case PACKET3_SET_CTL_CONST:
+	case PACKET3_SET_RESOURCE_OFFSET:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_RESOURCE_INDIRECT:
+	case CAYMAN_PACKET3_DEALLOC_STATE:
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!evergreen_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		command = ib[idx + 4];
+		info = ib[idx + 1];
+		if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+		    (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+		    ((((info & 0x00300000) >> 20) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+		    ((((info & 0x60000000) >> 29) == 0) &&
+		     (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+			/* non mem to mem copies requires dw aligned count */
+			if ((command & 0x1fffff) % 4) {
+				DRM_ERROR("CP DMA command requires dw count alignment\n");
+				return -EINVAL;
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			if (((info & 0x60000000) >> 29) == 0) {
+				start_reg = idx_value << 2;
+				if (command & PACKET3_CP_DMA_CMD_SAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad SRC register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			if (((info & 0x00300000) >> 20) == 0) {
+				start_reg = ib[idx + 2];
+				if (command & PACKET3_CP_DMA_CMD_DAIC) {
+					reg = start_reg;
+					if (!evergreen_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				} else {
+					for (i = 0; i < (command & 0x1fffff); i++) {
+						reg = start_reg + (4 * i);
+						if (!evergreen_vm_reg_valid(reg)) {
+							DRM_ERROR("CP DMA Bad DST register\n");
+							return -EINVAL;
+						}
+					}
+				}
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case RADEON_PACKET_TYPE2:
+			idx += 1;
+			break;
+		case RADEON_PACKET_TYPE3:
+			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret)
+			break;
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:	radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	u32 idx = 0;
+	u32 header, cmd, count, sub_cmd;
+
+	do {
+		header = ib->ptr[idx];
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		sub_cmd = GET_DMA_SUB_CMD(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			switch (sub_cmd) {
+			/* tiled */
+			case 8:
+				idx += count + 7;
+				break;
+			/* linear */
+			case 0:
+				idx += count + 3;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			switch (sub_cmd) {
+			/* Copy L2L, DW aligned */
+			case 0x00:
+				idx += 5;
+				break;
+			/* Copy L2T/T2L */
+			case 0x08:
+				idx += 9;
+				break;
+			/* Copy L2L, byte aligned */
+			case 0x40:
+				idx += 5;
+				break;
+			/* Copy L2L, partial */
+			case 0x41:
+				idx += 9;
+				break;
+			/* Copy L2L, DW aligned, broadcast */
+			case 0x44:
+				idx += 7;
+				break;
+			/* Copy L2T Frame to Field */
+			case 0x48:
+				idx += 10;
+				break;
+			/* Copy L2T/T2L, partial */
+			case 0x49:
+				idx += 12;
+				break;
+			/* Copy L2T broadcast */
+			case 0x4b:
+				idx += 10;
+				break;
+			/* Copy L2T/T2L (tile units) */
+			case 0x4c:
+				idx += 9;
+				break;
+			/* Copy T2T, partial (tile units) */
+			case 0x4d:
+				idx += 13;
+				break;
+			/* Copy L2T broadcast (tile units) */
+			case 0x4f:
+				idx += 10;
+				break;
+			default:
+				DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (idx < ib->length_dw);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_hdmi.c b/linux-imx/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 0000000..067cc1f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ *          Rafał Miłecki
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+	WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+	WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+	WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+	WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+	WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector = NULL;
+	struct cea_sad *sads;
+	int i, sad_count;
+
+	static const u16 eld_reg_to_type[][2] = {
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+		{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+	};
+
+	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder)
+			radeon_connector = to_radeon_connector(connector);
+	}
+
+	if (!radeon_connector) {
+		DRM_ERROR("Couldn't find encoder's connector\n");
+		return;
+	}
+
+	sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+	if (sad_count < 0) {
+		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+		return;
+	}
+	BUG_ON(!sads);
+
+	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+		u32 value = 0;
+		int j;
+
+		for (j = 0; j < sad_count; j++) {
+			struct cea_sad *sad = &sads[j];
+
+			if (sad->format == eld_reg_to_type[i][1]) {
+				value = MAX_CHANNELS(sad->channels) |
+					DESCRIPTOR_BYTE_2(sad->byte2) |
+					SUPPORTED_FREQUENCIES(sad->freq);
+				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+					value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+				break;
+			}
+		}
+		WREG32(eld_reg_to_type[i][0], value);
+	}
+
+	kfree(sads);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+						void *buffer, size_t size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	uint8_t *frame = buffer + 3;
+	uint8_t *header = buffer;
+
+	WREG32(AFMT_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(AFMT_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(AFMT_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(AFMT_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	u32 base_rate = 24000;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* XXX two dtos; generally use dto0 for hdmi */
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+	WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+	WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+	struct hdmi_avi_infoframe frame;
+	uint32_t offset;
+	ssize_t err;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	evergreen_audio_set_dto(encoder, mode->clock);
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND); /* send null packets when required */
+
+	WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+	WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+	       HDMI_NULL_SEND | /* send null packets when required */
+	       HDMI_GC_SEND | /* send general control packets */
+	       HDMI_GC_CONT); /* send general control packets every frame */
+
+	WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+	       HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+	       HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+	WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+	       AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+	WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+	       HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+	WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+	WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+	       HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+	       HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+	       AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+	/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
+
+	WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+	       HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+	evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+	WREG32(AFMT_60958_0 + offset,
+	       AFMT_60958_CS_CHANNEL_NUMBER_L(1));
+
+	WREG32(AFMT_60958_1 + offset,
+	       AFMT_60958_CS_CHANNEL_NUMBER_R(2));
+
+	WREG32(AFMT_60958_2 + offset,
+	       AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
+	       AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
+	       AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
+	       AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
+	       AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
+	       AFMT_60958_CS_CHANNEL_NUMBER_7(8));
+
+	/* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+
+	WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
+	       AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+
+	/* fglrx sets 0x40 in 0x5f80 here */
+	evergreen_hdmi_write_sad_regs(encoder);
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+
+	WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
+		  HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+		  HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+
+	WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
+		 HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+		 ~HDMI_AVI_INFO_LINE_MASK);
+
+	WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+		  AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
+
+	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+	WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+	WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+	WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+	WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
+
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (enable && dig->afmt->enabled)
+		return;
+	if (!enable && !dig->afmt->enabled)
+		return;
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreen_reg.h b/linux-imx/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 0000000..881aba2
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS               0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH          0x324
+#define EVERGREEN_D3VGA_CONTROL                         0x3e0
+#define EVERGREEN_D4VGA_CONTROL                         0x3e4
+#define EVERGREEN_D5VGA_CONTROL                         0x3e8
+#define EVERGREEN_D6VGA_CONTROL                         0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL                         0x414
+#define EVERGREEN_P2PLL_SS_CNTL                         0x454
+#       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL			0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV			0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK			0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE				0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID			0x5ec0
+
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE                           0x6800
+#define EVERGREEN_GRPH_CONTROL                          0x6804
+#       define EVERGREEN_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_DEPTH_8BPP                0
+#       define EVERGREEN_GRPH_DEPTH_16BPP               1
+#       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
+#       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define EVERGREEN_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB1555           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB565            1
+#       define EVERGREEN_GRPH_FORMAT_ARGB4444           2
+#       define EVERGREEN_GRPH_FORMAT_AI88               3
+#       define EVERGREEN_GRPH_FORMAT_MONO16             4
+#       define EVERGREEN_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define EVERGREEN_GRPH_FORMAT_ARGB8888           0
+#       define EVERGREEN_GRPH_FORMAT_ARGB2101010        1
+#       define EVERGREEN_GRPH_FORMAT_32BPP_DIG          2
+#       define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010     3
+#       define EVERGREEN_GRPH_FORMAT_BGRA1010102        4
+#       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
+#       define EVERGREEN_GRPH_FORMAT_RGB111110          6
+#       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_GRPH_SWAP_CONTROL                     0x680c
+#       define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#       define EVERGREEN_GRPH_ENDIAN_NONE               0
+#       define EVERGREEN_GRPH_ENDIAN_8IN16              1
+#       define EVERGREEN_GRPH_ENDIAN_8IN32              2
+#       define EVERGREEN_GRPH_ENDIAN_8IN64              3
+#       define EVERGREEN_GRPH_RED_CROSSBAR(x)           (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_RED_SEL_R                 0
+#       define EVERGREEN_GRPH_RED_SEL_G                 1
+#       define EVERGREEN_GRPH_RED_SEL_B                 2
+#       define EVERGREEN_GRPH_RED_SEL_A                 3
+#       define EVERGREEN_GRPH_GREEN_CROSSBAR(x)         (((x) & 0x3) << 6)
+#       define EVERGREEN_GRPH_GREEN_SEL_G               0
+#       define EVERGREEN_GRPH_GREEN_SEL_B               1
+#       define EVERGREEN_GRPH_GREEN_SEL_A               2
+#       define EVERGREEN_GRPH_GREEN_SEL_R               3
+#       define EVERGREEN_GRPH_BLUE_CROSSBAR(x)          (((x) & 0x3) << 8)
+#       define EVERGREEN_GRPH_BLUE_SEL_B                0
+#       define EVERGREEN_GRPH_BLUE_SEL_A                1
+#       define EVERGREEN_GRPH_BLUE_SEL_R                2
+#       define EVERGREEN_GRPH_BLUE_SEL_G                3
+#       define EVERGREEN_GRPH_ALPHA_CROSSBAR(x)         (((x) & 0x3) << 10)
+#       define EVERGREEN_GRPH_ALPHA_SEL_A               0
+#       define EVERGREEN_GRPH_ALPHA_SEL_R               1
+#       define EVERGREEN_GRPH_ALPHA_SEL_G               2
+#       define EVERGREEN_GRPH_ALPHA_SEL_B               3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS          0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS        0x6814
+#       define EVERGREEN_GRPH_DFQ_ENABLE                (1 << 0)
+#       define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK      0xffffff00
+#define EVERGREEN_GRPH_PITCH                            0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH     0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH   0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X                 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y                 0x6828
+#define EVERGREEN_GRPH_X_START                          0x682c
+#define EVERGREEN_GRPH_Y_START                          0x6830
+#define EVERGREEN_GRPH_X_END                            0x6834
+#define EVERGREEN_GRPH_Y_END                            0x6838
+#define EVERGREEN_GRPH_UPDATE                           0x6844
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#       define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x6848
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL                           0x6998
+#       define EVERGREEN_CURSOR_EN                      (1 << 0)
+#       define EVERGREEN_CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#       define EVERGREEN_CURSOR_MONO                    0
+#       define EVERGREEN_CURSOR_24_1                    1
+#       define EVERGREEN_CURSOR_24_8_PRE_MULT           2
+#       define EVERGREEN_CURSOR_24_8_UNPRE_MULT         3
+#       define EVERGREEN_CURSOR_2X_MAGNIFY              (1 << 16)
+#       define EVERGREEN_CURSOR_FORCE_MC_ON             (1 << 20)
+#       define EVERGREEN_CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#       define EVERGREEN_CURSOR_URGENT_ALWAYS           0
+#       define EVERGREEN_CURSOR_URGENT_1_8              1
+#       define EVERGREEN_CURSOR_URGENT_1_4              2
+#       define EVERGREEN_CURSOR_URGENT_3_8              3
+#       define EVERGREEN_CURSOR_URGENT_1_2              4
+#define EVERGREEN_CUR_SURFACE_ADDRESS                   0x699c
+#       define EVERGREEN_CUR_SURFACE_ADDRESS_MASK       0xfffff000
+#define EVERGREEN_CUR_SIZE                              0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH              0x69a4
+#define EVERGREEN_CUR_POSITION                          0x69a8
+#define EVERGREEN_CUR_HOT_SPOT                          0x69ac
+#define EVERGREEN_CUR_COLOR1                            0x69b0
+#define EVERGREEN_CUR_COLOR2                            0x69b4
+#define EVERGREEN_CUR_UPDATE                            0x69b8
+#       define EVERGREEN_CURSOR_UPDATE_PENDING          (1 << 0)
+#       define EVERGREEN_CURSOR_UPDATE_TAKEN            (1 << 1)
+#       define EVERGREEN_CURSOR_UPDATE_LOCK             (1 << 16)
+#       define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE                        0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX                       0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR                      0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA                       0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR                       0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE              0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK                  0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL                       0x69fc
+#define EVERGREEN_DC_LUT_CONTROL                        0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE              0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN             0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED               0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE              0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN             0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED               0x6a18
+
+#define EVERGREEN_DATA_FORMAT                           0x6b00
+#       define EVERGREEN_INTERLEAVE_EN                  (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT                        0x6b04
+#define EVERGREEN_VLINE_START_END                       0x6b08
+#define EVERGREEN_VLINE_STATUS                          0x6bb8
+#       define EVERGREEN_VLINE_STAT                     (1 << 12)
+
+#define EVERGREEN_VIEWPORT_START                        0x6d70
+#define EVERGREEN_VIEWPORT_SIZE                         0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET                 (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET                 (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET                 (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET                 (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET                 (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET                 (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_V_BLANK_START_END                0x6e34
+#define EVERGREEN_CRTC_CONTROL                          0x6e70
+#       define EVERGREEN_CRTC_MASTER_EN                 (1 << 0)
+#       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
+#define EVERGREEN_CRTC_BLANK_CONTROL                    0x6e74
+#       define EVERGREEN_CRTC_BLANK_DATA_EN             (1 << 8)
+#define EVERGREEN_CRTC_STATUS                           0x6e8c
+#       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
+#define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
+#define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
+#define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
+#define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
+
+#define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A                         0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
+
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE				0x7030
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/evergreend.h b/linux-imx/drivers/gpu/drm/radeon/evergreend.h
new file mode 100644
index 0000000..150e318
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/evergreend.h
@@ -0,0 +1,2100 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef EVERGREEND_H
+#define EVERGREEND_H
+
+#define EVERGREEN_MAX_SH_GPRS           256
+#define EVERGREEN_MAX_TEMP_GPRS         16
+#define EVERGREEN_MAX_SH_THREADS        256
+#define EVERGREEN_MAX_SH_STACK_ENTRIES  4096
+#define EVERGREEN_MAX_FRC_EOV_CNT       16384
+#define EVERGREEN_MAX_BACKENDS          8
+#define EVERGREEN_MAX_BACKENDS_MASK     0xFF
+#define EVERGREEN_MAX_SIMDS             16
+#define EVERGREEN_MAX_SIMDS_MASK        0xFFFF
+#define EVERGREEN_MAX_PIPES             8
+#define EVERGREEN_MAX_PIPES_MASK        0xFF
+#define EVERGREEN_MAX_LDS_NUM           0xFFFF
+
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN        0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN          0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN         0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN        0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
+
+/* Registers */
+
+#define RCU_IND_INDEX           			0x100
+#define RCU_IND_DATA            			0x104
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL				0x718
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_VCO_MODE_MASK			0x00000200
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define CG_UPLL_FUNC_CNTL_2				0x71c
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define CG_UPLL_FUNC_CNTL_3				0x720
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define CG_UPLL_FUNC_CNTL_4				0x854
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define CG_UPLL_SPREAD_SPECTRUM				0x79c
+#	define SSEN_MASK				0x00000001
+
+/* fusion uvd clocks */
+#define CG_DCLK_CNTL                                    0x610
+#       define DCLK_DIVIDER_MASK                        0x7f
+#       define DCLK_DIR_CNTL_EN                         (1 << 8)
+#define CG_DCLK_STATUS                                  0x614
+#       define DCLK_STATUS                              (1 << 0)
+#define CG_VCLK_CNTL                                    0x618
+#define CG_VCLK_STATUS                                  0x61c
+#define	CG_SCRATCH1					0x820
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+#define RLC_GFX_INDEX           			0x3fC4
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define		WRITE_DIS      				(1 << 0)
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x0000000f
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		ROW_SIZE(x)             		((x) << 28)
+#define GB_BACKEND_MAP  				0x98FC
+#define DMIF_ADDR_CONFIG  				0xBD4
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL  					0x2F4C
+#define		HDP_FLUSH_INVALIDATE_CACHE      	(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+
+#define	CP_COHER_CNTL					0x85F0
+#define	CP_COHER_SIZE					0x85F4
+#define	CP_COHER_BASE					0x85F8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_DEBUG					0xC1FC
+
+/* Audio clocks */
+#define DCCG_AUDIO_DTO_SOURCE             0x05ac
+#       define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
+#       define DCCG_AUDIO_DTO_SEL         (1 << 4) /* 0=dto0 1=dto1 */
+
+#define DCCG_AUDIO_DTO0_PHASE             0x05b0
+#define DCCG_AUDIO_DTO0_MODULE            0x05b4
+#define DCCG_AUDIO_DTO0_LOAD              0x05b8
+#define DCCG_AUDIO_DTO0_CNTL              0x05bc
+
+#define DCCG_AUDIO_DTO1_PHASE             0x05c0
+#define DCCG_AUDIO_DTO1_MODULE            0x05c4
+#define DCCG_AUDIO_DTO1_LOAD              0x05c8
+#define DCCG_AUDIO_DTO1_CNTL              0x05cc
+
+/* DCE 4.0 AFMT */
+#define HDMI_CONTROL                         0x7030
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#       define HDMI_DEEP_COLOR_ENABLE        (1 << 24)
+#       define HDMI_DEEP_COLOR_DEPTH         (((x) & 3) << 28)
+#       define HDMI_24BIT_DEEP_COLOR         0
+#       define HDMI_30BIT_DEEP_COLOR         1
+#       define HDMI_36BIT_DEEP_COLOR         2
+#define HDMI_STATUS                          0x7034
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7038
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x703c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#       define HDMI_ACR_N_MULTIPLE(x)        (((x) & 7) << 16)
+#       define HDMI_ACR_X1                   1
+#       define HDMI_ACR_X2                   2
+#       define HDMI_ACR_X4                   4
+#       define HDMI_ACR_AUDIO_PRIORITY       (1 << 31)
+#define HDMI_VBI_PACKET_CONTROL              0x7040
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7044
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7048
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AVI_INFO_LINE_MASK       (0x3f << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x704c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7058
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#       define HDMI_GC_AVMUTE_CONT           (1 << 2)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x705c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7084
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7088
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_CN(x)           (((x) & 0x3) << 12)
+#       define AFMT_AVI_INFO_YQ(x)           (((x) & 0x3) << 14)
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x708c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7090
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7094
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7098
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x709c
+#define AFMT_GENERIC0_0                      0x70a0
+#define AFMT_GENERIC0_1                      0x70a4
+#define AFMT_GENERIC0_2                      0x70a8
+#define AFMT_GENERIC0_3                      0x70ac
+#define AFMT_GENERIC0_4                      0x70b0
+#define AFMT_GENERIC0_5                      0x70b4
+#define AFMT_GENERIC0_6                      0x70b8
+#define AFMT_GENERIC1_HDR                    0x70bc
+#define AFMT_GENERIC1_0                      0x70c0
+#define AFMT_GENERIC1_1                      0x70c4
+#define AFMT_GENERIC1_2                      0x70c8
+#define AFMT_GENERIC1_3                      0x70cc
+#define AFMT_GENERIC1_4                      0x70d0
+#define AFMT_GENERIC1_5                      0x70d4
+#define AFMT_GENERIC1_6                      0x70d8
+#define HDMI_ACR_32_0                        0x70dc
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x70e0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x70e4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x70e8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x70ec
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x70f0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x70f4
+#define HDMI_ACR_STATUS_1                    0x70f8
+#define AFMT_AUDIO_INFO0                     0x70fc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CT(x)         (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#       define AFMT_AUDIO_INFO_CXT(x)        (((x) & 0x1f) << 24)
+#define AFMT_AUDIO_INFO1                     0x7100
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#       define AFMT_AUDIO_INFO_LFEBPL(x)     (((x) & 3) << 16)
+#define AFMT_60958_0                         0x7104
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x7108
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x710c
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x7110
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1 << 31)
+#define AFMT_RAMP_CONTROL1                   0x7114
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x7118
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x711c
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x7120
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7128
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AUDIO_HBR_ENABLE         (1 << 8)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x712c
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7130
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x7134
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - afmt regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7138
+
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VC					(1 << 13)
+#define		SOFT_RESET_VGT					(1 << 14)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+/* evergreen */
+#define	CG_THERMAL_CTRL					0x72c
+#define		TOFFSET_MASK			        0x00003FE0
+#define		TOFFSET_SHIFT			        5
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x07FF0000
+#define		ASIC_T_SHIFT			        16
+#define	CG_TS0_STATUS					0x760
+#define		TS0_ADC_DOUT_MASK			0x000003FF
+#define		TS0_ADC_DOUT_SHIFT			0
+/* APU */
+#define	CG_THERMAL_STATUS			        0x678
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define		BLACKOUT_MODE_MASK			0x00000007
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	FUS_MC_ARB_RAMCFG				0x2768
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_FUS_VM_FB_OFFSET				0x2898
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+
+#define	FUS_MC_VM_MD_L1_TLB0_CNTL			0x265C
+#define	FUS_MC_VM_MD_L1_TLB1_CNTL			0x2660
+#define	FUS_MC_VM_MD_L1_TLB2_CNTL			0x2664
+
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_ENHANCE					0x8BF0
+#define PA_SC_AA_CONFIG					0x28C04
+#define         MSAA_NUM_SAMPLES_SHIFT                  0
+#define         MSAA_NUM_SAMPLES_MASK                   0x3
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		CS_PRIO(x)					((x) << 18)
+#define		LS_PRIO(x)					((x) << 20)
+#define		HS_PRIO(x)					((x) << 22)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_GPR_RESOURCE_MGMT_3				0x8C0C
+#define		NUM_HS_GPRS(x)					((x) << 0)
+#define		NUM_LS_GPRS(x)					((x) << 16)
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_1			0x8C10
+#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_2			0x8C14
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C18
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+#define	SQ_THREAD_RESOURCE_MGMT_2			0x8C1C
+#define		NUM_HS_THREADS(x)				((x) << 0)
+#define		NUM_LS_THREADS(x)				((x) << 8)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C20
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C24
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_3			0x8C28
+#define		NUM_HS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_LS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define	SQ_DYN_GPR_SIMD_LOCK_EN    			0x8D94
+#define	SQ_STATIC_THREAD_MGMT_1    			0x8E20
+#define	SQ_STATIC_THREAD_MGMT_2    			0x8E24
+#define	SQ_STATIC_THREAD_MGMT_3    			0x8E28
+#define	SQ_LDS_RESOURCE_MGMT    			0x8E2C
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_MISC						0x28350
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
+#define	WAIT_UNTIL					0x8040
+
+#define	SRBM_STATUS				        0x0E50
+#define		RLC_RQ_PENDING 				(1 << 3)
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		RLC_BUSY 				(1 << 15)
+#define		IH_BUSY 				(1 << 17)
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SRBM_SOFT_RESET_ALL_MASK    	       	0x00FEEFA6
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+/* display watermarks */
+#define	DC_LB_MEMORY_SPLIT				  0x6b0c
+#define	PRIORITY_A_CNT			                  0x6b18
+#define		PRIORITY_MARK_MASK			  0x7fff
+#define		PRIORITY_OFF				  (1 << 16)
+#define		PRIORITY_ALWAYS_ON			  (1 << 20)
+#define	PRIORITY_B_CNT			                  0x6b1c
+#define	PIPE0_ARBITRATION_CONTROL3			  0x0bf0
+#       define LATENCY_WATERMARK_MASK(x)                  ((x) << 16)
+#define	PIPE0_LATENCY_CONTROL			          0x0bf4
+#       define LATENCY_LOW_WATERMARK(x)                   ((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)                  ((x) << 16)
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define CP_INT_CNTL                                     0xc124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define SCRATCH_INT_ENABLE                       (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define IB2_INT_ENABLE                           (1 << 29)
+#       define IB1_INT_ENABLE                           (1 << 30)
+#       define RB_INT_ENABLE                            (1 << 31)
+#define CP_INT_STATUS                                   0xc128
+#       define SCRATCH_INT_STAT                         (1 << 25)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define IB2_INT_STAT                             (1 << 29)
+#       define IB1_INT_STAT                             (1 << 30)
+#       define RB_INT_STAT                              (1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
+#define	DACB_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG  				  0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) |    \
+                                    (((sub_cmd) & 0xFF) << 20) |\
+                                    (((n) & 0xFFFFF) << 0))
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE                        0x2
+#define	DMA_PACKET_COPY                         0x3
+#define	DMA_PACKET_INDIRECT_BUFFER              0x4
+#define	DMA_PACKET_SEMAPHORE                    0x5
+#define	DMA_PACKET_FENCE                        0x6
+#define	DMA_PACKET_TRAP                         0x7
+#define	DMA_PACKET_SRBM_WRITE                   0x9
+#define	DMA_PACKET_CONSTANT_FILL                0xd
+#define	DMA_PACKET_NOP                          0xf
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0xef4c
+#define UVD_UDEC_DB_ADDR_CONFIG				0xef50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xef54
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_RB_OFFSET				0x4B
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+
+#define	SQ_RESOURCE_CONSTANT_WORD7_0				0x3001c
+#define		S__SQ_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+#define VGT_VTX_VECT_EJECT_REG				0x88b0
+
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define SQ_ESGS_RING_BASE				0x8c40
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_BASE				0x8c48
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define VGT_TF_RING_SIZE				0x8988
+
+#define SQ_ESGS_RING_ITEMSIZE				0x28900
+#define SQ_GSVS_RING_ITEMSIZE				0x28904
+#define SQ_ESTMP_RING_ITEMSIZE				0x28908
+#define SQ_GSTMP_RING_ITEMSIZE				0x2890c
+#define SQ_VSTMP_RING_ITEMSIZE				0x28910
+#define SQ_PSTMP_RING_ITEMSIZE				0x28914
+#define SQ_LSTMP_RING_ITEMSIZE				0x28830
+#define SQ_HSTMP_RING_ITEMSIZE				0x28834
+
+#define SQ_GS_VERT_ITEMSIZE				0x2891c
+#define SQ_GS_VERT_ITEMSIZE_1				0x28920
+#define SQ_GS_VERT_ITEMSIZE_2				0x28924
+#define SQ_GS_VERT_ITEMSIZE_3				0x28928
+#define SQ_GSVS_RING_OFFSET_1				0x2892c
+#define SQ_GSVS_RING_OFFSET_2				0x28930
+#define SQ_GSVS_RING_OFFSET_3				0x28934
+
+#define SQ_ALU_CONST_BUFFER_SIZE_PS_0			0x28140
+#define SQ_ALU_CONST_BUFFER_SIZE_HS_0			0x28f80
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+#define SQ_ALU_CONST_CACHE_HS_0				0x28f00
+#define SQ_ALU_CONST_CACHE_HS_1				0x28f04
+#define SQ_ALU_CONST_CACHE_HS_2				0x28f08
+#define SQ_ALU_CONST_CACHE_HS_3				0x28f0c
+#define SQ_ALU_CONST_CACHE_HS_4				0x28f10
+#define SQ_ALU_CONST_CACHE_HS_5				0x28f14
+#define SQ_ALU_CONST_CACHE_HS_6				0x28f18
+#define SQ_ALU_CONST_CACHE_HS_7				0x28f1c
+#define SQ_ALU_CONST_CACHE_HS_8				0x28f20
+#define SQ_ALU_CONST_CACHE_HS_9				0x28f24
+#define SQ_ALU_CONST_CACHE_HS_10			0x28f28
+#define SQ_ALU_CONST_CACHE_HS_11			0x28f2c
+#define SQ_ALU_CONST_CACHE_HS_12			0x28f30
+#define SQ_ALU_CONST_CACHE_HS_13			0x28f34
+#define SQ_ALU_CONST_CACHE_HS_14			0x28f38
+#define SQ_ALU_CONST_CACHE_HS_15			0x28f3c
+#define SQ_ALU_CONST_CACHE_LS_0				0x28f40
+#define SQ_ALU_CONST_CACHE_LS_1				0x28f44
+#define SQ_ALU_CONST_CACHE_LS_2				0x28f48
+#define SQ_ALU_CONST_CACHE_LS_3				0x28f4c
+#define SQ_ALU_CONST_CACHE_LS_4				0x28f50
+#define SQ_ALU_CONST_CACHE_LS_5				0x28f54
+#define SQ_ALU_CONST_CACHE_LS_6				0x28f58
+#define SQ_ALU_CONST_CACHE_LS_7				0x28f5c
+#define SQ_ALU_CONST_CACHE_LS_8				0x28f60
+#define SQ_ALU_CONST_CACHE_LS_9				0x28f64
+#define SQ_ALU_CONST_CACHE_LS_10			0x28f68
+#define SQ_ALU_CONST_CACHE_LS_11			0x28f6c
+#define SQ_ALU_CONST_CACHE_LS_12			0x28f70
+#define SQ_ALU_CONST_CACHE_LS_13			0x28f74
+#define SQ_ALU_CONST_CACHE_LS_14			0x28f78
+#define SQ_ALU_CONST_CACHE_LS_15			0x28f7c
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define VGT_INDEX_TYPE                                  0x895C
+
+#define VGT_NUM_INDICES                                 0x8970
+
+#define VGT_COMPUTE_DIM_X                               0x8990
+#define VGT_COMPUTE_DIM_Y                               0x8994
+#define VGT_COMPUTE_DIM_Z                               0x8998
+#define VGT_COMPUTE_START_X                             0x899C
+#define VGT_COMPUTE_START_Y                             0x89A0
+#define VGT_COMPUTE_START_Z                             0x89A4
+#define VGT_COMPUTE_INDEX                               0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
+#define VGT_HS_OFFCHIP_PARAM                            0x89B0
+
+#define DB_DEBUG					0x9830
+#define DB_DEBUG2					0x9834
+#define DB_DEBUG3					0x9838
+#define DB_DEBUG4					0x983C
+#define DB_WATERMARKS					0x9854
+#define DB_DEPTH_CONTROL				0x28800
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define     V_028800_STENCILFUNC_NEVER                 0x00000000
+#define     V_028800_STENCILFUNC_LESS                  0x00000001
+#define     V_028800_STENCILFUNC_EQUAL                 0x00000002
+#define     V_028800_STENCILFUNC_LEQUAL                0x00000003
+#define     V_028800_STENCILFUNC_GREATER               0x00000004
+#define     V_028800_STENCILFUNC_NOTEQUAL              0x00000005
+#define     V_028800_STENCILFUNC_GEQUAL                0x00000006
+#define     V_028800_STENCILFUNC_ALWAYS                0x00000007
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define     V_028800_STENCIL_KEEP                      0x00000000
+#define     V_028800_STENCIL_ZERO                      0x00000001
+#define     V_028800_STENCIL_REPLACE                   0x00000002
+#define     V_028800_STENCIL_INCR                      0x00000003
+#define     V_028800_STENCIL_DECR                      0x00000004
+#define     V_028800_STENCIL_INVERT                    0x00000005
+#define     V_028800_STENCIL_INCR_WRAP                 0x00000006
+#define     V_028800_STENCIL_DECR_WRAP                 0x00000007
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+#define DB_DEPTH_VIEW					0x28008
+#define R_028008_DB_DEPTH_VIEW                       0x00028008
+#define   S_028008_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028008_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028008_SLICE_START                         0xFFFFF800
+#define   S_028008_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028008_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028008_SLICE_MAX                           0xFF001FFF
+#define DB_HTILE_DATA_BASE				0x28014
+#define DB_HTILE_SURFACE				0x28abc
+#define   S_028ABC_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028ABC_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028ABC_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028ABC_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028ABC_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028ABC_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028ABC_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define DB_Z_INFO					0x28040
+#       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
+#       define DB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 24)
+#define R_028040_DB_Z_INFO                       0x028040
+#define   S_028040_FORMAT(x)                           (((x) & 0x3) << 0)
+#define   G_028040_FORMAT(x)                           (((x) >> 0) & 0x3)
+#define   C_028040_FORMAT                              0xFFFFFFFC
+#define     V_028040_Z_INVALID                     0x00000000
+#define     V_028040_Z_16                          0x00000001
+#define     V_028040_Z_24                          0x00000002
+#define     V_028040_Z_32_FLOAT                    0x00000003
+#define   S_028040_ARRAY_MODE(x)                       (((x) & 0xF) << 4)
+#define   G_028040_ARRAY_MODE(x)                       (((x) >> 4) & 0xF)
+#define   C_028040_ARRAY_MODE                          0xFFFFFF0F
+#define   S_028040_READ_SIZE(x)                        (((x) & 0x1) << 28)
+#define   G_028040_READ_SIZE(x)                        (((x) >> 28) & 0x1)
+#define   C_028040_READ_SIZE                           0xEFFFFFFF
+#define   S_028040_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 29)
+#define   G_028040_TILE_SURFACE_ENABLE(x)              (((x) >> 29) & 0x1)
+#define   C_028040_TILE_SURFACE_ENABLE                 0xDFFFFFFF
+#define   S_028040_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028040_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028040_ZRANGE_PRECISION                    0x7FFFFFFF
+#define   S_028040_TILE_SPLIT(x)                       (((x) & 0x7) << 8)
+#define   G_028040_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define   S_028040_NUM_BANKS(x)                        (((x) & 0x3) << 12)
+#define   G_028040_NUM_BANKS(x)                        (((x) >> 12) & 0x3)
+#define   S_028040_BANK_WIDTH(x)                       (((x) & 0x3) << 16)
+#define   G_028040_BANK_WIDTH(x)                       (((x) >> 16) & 0x3)
+#define   S_028040_BANK_HEIGHT(x)                      (((x) & 0x3) << 20)
+#define   G_028040_BANK_HEIGHT(x)                      (((x) >> 20) & 0x3)
+#define   S_028040_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 24)
+#define   G_028040_MACRO_TILE_ASPECT(x)                (((x) >> 24) & 0x3)
+#define DB_STENCIL_INFO					0x28044
+#define R_028044_DB_STENCIL_INFO                     0x028044
+#define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
+#define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
+#define   C_028044_FORMAT                              0xFFFFFFFE
+#define	    V_028044_STENCIL_INVALID			0
+#define	    V_028044_STENCIL_8				1
+#define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+#define DB_Z_READ_BASE					0x28048
+#define DB_STENCIL_READ_BASE				0x2804c
+#define DB_Z_WRITE_BASE					0x28050
+#define DB_STENCIL_WRITE_BASE				0x28054
+#define DB_DEPTH_SIZE					0x28058
+#define R_028058_DB_DEPTH_SIZE                       0x028058
+#define   S_028058_PITCH_TILE_MAX(x)                   (((x) & 0x7FF) << 0)
+#define   G_028058_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x7FF)
+#define   C_028058_PITCH_TILE_MAX                      0xFFFFF800
+#define   S_028058_HEIGHT_TILE_MAX(x)                   (((x) & 0x7FF) << 11)
+#define   G_028058_HEIGHT_TILE_MAX(x)                   (((x) >> 11) & 0x7FF)
+#define   C_028058_HEIGHT_TILE_MAX                      0xFFC007FF
+#define R_02805C_DB_DEPTH_SLICE                      0x02805C
+#define   S_02805C_SLICE_TILE_MAX(x)                   (((x) & 0x3FFFFF) << 0)
+#define   G_02805C_SLICE_TILE_MAX(x)                   (((x) >> 0) & 0x3FFFFF)
+#define   C_02805C_SLICE_TILE_MAX                      0xFFC00000
+
+#define SQ_PGM_START_PS					0x28840
+#define SQ_PGM_START_VS					0x2885c
+#define SQ_PGM_START_GS					0x28874
+#define SQ_PGM_START_ES					0x2888c
+#define SQ_PGM_START_FS					0x288a4
+#define SQ_PGM_START_HS					0x288b8
+#define SQ_PGM_START_LS					0x288d0
+
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+#define VGT_STRMOUT_CONFIG				0x28b94
+#define VGT_STRMOUT_BUFFER_CONFIG			0x28b98
+
+#define CB_TARGET_MASK					0x28238
+#define CB_SHADER_MASK					0x2823c
+
+#define GDS_ADDR_BASE					0x28720
+
+#define	CB_IMMED0_BASE					0x28b9c
+#define	CB_IMMED1_BASE					0x28ba0
+#define	CB_IMMED2_BASE					0x28ba4
+#define	CB_IMMED3_BASE					0x28ba8
+#define	CB_IMMED4_BASE					0x28bac
+#define	CB_IMMED5_BASE					0x28bb0
+#define	CB_IMMED6_BASE					0x28bb4
+#define	CB_IMMED7_BASE					0x28bb8
+#define	CB_IMMED8_BASE					0x28bbc
+#define	CB_IMMED9_BASE					0x28bc0
+#define	CB_IMMED10_BASE					0x28bc4
+#define	CB_IMMED11_BASE					0x28bc8
+
+/* all 12 CB blocks have these regs */
+#define	CB_COLOR0_BASE					0x28c60
+#define	CB_COLOR0_PITCH					0x28c64
+#define	CB_COLOR0_SLICE					0x28c68
+#define	CB_COLOR0_VIEW					0x28c6c
+#define R_028C6C_CB_COLOR0_VIEW                      0x00028C6C
+#define   S_028C6C_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028C6C_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028C6C_SLICE_START                         0xFFFFF800
+#define   S_028C6C_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028C6C_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028C6C_SLICE_MAX                           0xFF001FFF
+#define R_028C70_CB_COLOR0_INFO                      0x028C70
+#define   S_028C70_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_028C70_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_028C70_ENDIAN                              0xFFFFFFFC
+#define   S_028C70_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_028C70_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_028C70_FORMAT                              0xFFFFFF03
+#define     V_028C70_COLOR_INVALID                     0x00000000
+#define     V_028C70_COLOR_8                           0x00000001
+#define     V_028C70_COLOR_4_4                         0x00000002
+#define     V_028C70_COLOR_3_3_2                       0x00000003
+#define     V_028C70_COLOR_16                          0x00000005
+#define     V_028C70_COLOR_16_FLOAT                    0x00000006
+#define     V_028C70_COLOR_8_8                         0x00000007
+#define     V_028C70_COLOR_5_6_5                       0x00000008
+#define     V_028C70_COLOR_6_5_5                       0x00000009
+#define     V_028C70_COLOR_1_5_5_5                     0x0000000A
+#define     V_028C70_COLOR_4_4_4_4                     0x0000000B
+#define     V_028C70_COLOR_5_5_5_1                     0x0000000C
+#define     V_028C70_COLOR_32                          0x0000000D
+#define     V_028C70_COLOR_32_FLOAT                    0x0000000E
+#define     V_028C70_COLOR_16_16                       0x0000000F
+#define     V_028C70_COLOR_16_16_FLOAT                 0x00000010
+#define     V_028C70_COLOR_8_24                        0x00000011
+#define     V_028C70_COLOR_8_24_FLOAT                  0x00000012
+#define     V_028C70_COLOR_24_8                        0x00000013
+#define     V_028C70_COLOR_24_8_FLOAT                  0x00000014
+#define     V_028C70_COLOR_10_11_11                    0x00000015
+#define     V_028C70_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_028C70_COLOR_11_11_10                    0x00000017
+#define     V_028C70_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_028C70_COLOR_2_10_10_10                  0x00000019
+#define     V_028C70_COLOR_8_8_8_8                     0x0000001A
+#define     V_028C70_COLOR_10_10_10_2                  0x0000001B
+#define     V_028C70_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_028C70_COLOR_32_32                       0x0000001D
+#define     V_028C70_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_028C70_COLOR_16_16_16_16                 0x0000001F
+#define     V_028C70_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_028C70_COLOR_32_32_32_32                 0x00000022
+#define     V_028C70_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_028C70_COLOR_32_32_32_FLOAT              0x00000030
+#define   S_028C70_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_028C70_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_028C70_ARRAY_MODE                          0xFFFFF0FF
+#define     V_028C70_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_028C70_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_028C70_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028C70_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028C70_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_028C70_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_028C70_NUMBER_TYPE                         0xFFFF8FFF
+#define     V_028C70_NUMBER_UNORM                      0x00000000
+#define     V_028C70_NUMBER_SNORM                      0x00000001
+#define     V_028C70_NUMBER_USCALED                    0x00000002
+#define     V_028C70_NUMBER_SSCALED                    0x00000003
+#define     V_028C70_NUMBER_UINT                       0x00000004
+#define     V_028C70_NUMBER_SINT                       0x00000005
+#define     V_028C70_NUMBER_SRGB                       0x00000006
+#define     V_028C70_NUMBER_FLOAT                      0x00000007
+#define   S_028C70_COMP_SWAP(x)                        (((x) & 0x3) << 15)
+#define   G_028C70_COMP_SWAP(x)                        (((x) >> 15) & 0x3)
+#define   C_028C70_COMP_SWAP                           0xFFFE7FFF
+#define     V_028C70_SWAP_STD                          0x00000000
+#define     V_028C70_SWAP_ALT                          0x00000001
+#define     V_028C70_SWAP_STD_REV                      0x00000002
+#define     V_028C70_SWAP_ALT_REV                      0x00000003
+#define   S_028C70_FAST_CLEAR(x)                       (((x) & 0x1) << 17)
+#define   G_028C70_FAST_CLEAR(x)                       (((x) >> 17) & 0x1)
+#define   C_028C70_FAST_CLEAR                          0xFFFDFFFF
+#define   S_028C70_COMPRESSION(x)                      (((x) & 0x3) << 18)
+#define   G_028C70_COMPRESSION(x)                      (((x) >> 18) & 0x3)
+#define   C_028C70_COMPRESSION                         0xFFF3FFFF
+#define   S_028C70_BLEND_CLAMP(x)                      (((x) & 0x1) << 19)
+#define   G_028C70_BLEND_CLAMP(x)                      (((x) >> 19) & 0x1)
+#define   C_028C70_BLEND_CLAMP                         0xFFF7FFFF
+#define   S_028C70_BLEND_BYPASS(x)                     (((x) & 0x1) << 20)
+#define   G_028C70_BLEND_BYPASS(x)                     (((x) >> 20) & 0x1)
+#define   C_028C70_BLEND_BYPASS                        0xFFEFFFFF
+#define   S_028C70_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 21)
+#define   G_028C70_SIMPLE_FLOAT(x)                     (((x) >> 21) & 0x1)
+#define   C_028C70_SIMPLE_FLOAT                        0xFFDFFFFF
+#define   S_028C70_ROUND_MODE(x)                       (((x) & 0x1) << 22)
+#define   G_028C70_ROUND_MODE(x)                       (((x) >> 22) & 0x1)
+#define   C_028C70_ROUND_MODE                          0xFFBFFFFF
+#define   S_028C70_TILE_COMPACT(x)                     (((x) & 0x1) << 23)
+#define   G_028C70_TILE_COMPACT(x)                     (((x) >> 23) & 0x1)
+#define   C_028C70_TILE_COMPACT                        0xFF7FFFFF
+#define   S_028C70_SOURCE_FORMAT(x)                    (((x) & 0x3) << 24)
+#define   G_028C70_SOURCE_FORMAT(x)                    (((x) >> 24) & 0x3)
+#define   C_028C70_SOURCE_FORMAT                       0xFCFFFFFF
+#define     V_028C70_EXPORT_4C_32BPC                   0x0
+#define     V_028C70_EXPORT_4C_16BPC                   0x1
+#define     V_028C70_EXPORT_2C_32BPC                   0x2 /* Do not use */
+#define   S_028C70_RAT(x)                              (((x) & 0x1) << 26)
+#define   G_028C70_RAT(x)                              (((x) >> 26) & 0x1)
+#define   C_028C70_RAT                                 0xFBFFFFFF
+#define   S_028C70_RESOURCE_TYPE(x)                    (((x) & 0x7) << 27)
+#define   G_028C70_RESOURCE_TYPE(x)                    (((x) >> 27) & 0x7)
+#define   C_028C70_RESOURCE_TYPE                       0xC7FFFFFF
+
+#define	CB_COLOR0_INFO					0x28c70
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#       define ARRAY_LINEAR_GENERAL                     0
+#       define ARRAY_LINEAR_ALIGNED                     1
+#       define ARRAY_1D_TILED_THIN1                     2
+#       define ARRAY_2D_TILED_THIN1                     4
+#	define CB_SOURCE_FORMAT(x)			((x) << 24)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define R_028C74_CB_COLOR0_ATTRIB                      0x028C74
+#define   S_028C74_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 4)
+#define   G_028C74_NON_DISP_TILING_ORDER(x)            (((x) >> 4) & 0x1)
+#define   C_028C74_NON_DISP_TILING_ORDER               0xFFFFFFEF
+#define   S_028C74_TILE_SPLIT(x)                       (((x) & 0xf) << 5)
+#define   G_028C74_TILE_SPLIT(x)                       (((x) >> 5) & 0xf)
+#define   S_028C74_NUM_BANKS(x)                        (((x) & 0x3) << 10)
+#define   G_028C74_NUM_BANKS(x)                        (((x) >> 10) & 0x3)
+#define   S_028C74_BANK_WIDTH(x)                       (((x) & 0x3) << 13)
+#define   G_028C74_BANK_WIDTH(x)                       (((x) >> 13) & 0x3)
+#define   S_028C74_BANK_HEIGHT(x)                      (((x) & 0x3) << 16)
+#define   G_028C74_BANK_HEIGHT(x)                      (((x) >> 16) & 0x3)
+#define   S_028C74_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 19)
+#define   G_028C74_MACRO_TILE_ASPECT(x)                (((x) >> 19) & 0x3)
+#define	CB_COLOR0_ATTRIB				0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
+#       define CB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 19)
+#define	CB_COLOR0_DIM					0x28c78
+/* only CB0-7 blocks have these regs */
+#define	CB_COLOR0_CMASK					0x28c7c
+#define	CB_COLOR0_CMASK_SLICE				0x28c80
+#define	CB_COLOR0_FMASK					0x28c84
+#define	CB_COLOR0_FMASK_SLICE				0x28c88
+#define	CB_COLOR0_CLEAR_WORD0				0x28c8c
+#define	CB_COLOR0_CLEAR_WORD1				0x28c90
+#define	CB_COLOR0_CLEAR_WORD2				0x28c94
+#define	CB_COLOR0_CLEAR_WORD3				0x28c98
+
+#define	CB_COLOR1_BASE					0x28c9c
+#define	CB_COLOR2_BASE					0x28cd8
+#define	CB_COLOR3_BASE					0x28d14
+#define	CB_COLOR4_BASE					0x28d50
+#define	CB_COLOR5_BASE					0x28d8c
+#define	CB_COLOR6_BASE					0x28dc8
+#define	CB_COLOR7_BASE					0x28e04
+#define	CB_COLOR8_BASE					0x28e40
+#define	CB_COLOR9_BASE					0x28e5c
+#define	CB_COLOR10_BASE					0x28e78
+#define	CB_COLOR11_BASE					0x28e94
+
+#define	CB_COLOR1_PITCH					0x28ca0
+#define	CB_COLOR2_PITCH					0x28cdc
+#define	CB_COLOR3_PITCH					0x28d18
+#define	CB_COLOR4_PITCH					0x28d54
+#define	CB_COLOR5_PITCH					0x28d90
+#define	CB_COLOR6_PITCH					0x28dcc
+#define	CB_COLOR7_PITCH					0x28e08
+#define	CB_COLOR8_PITCH					0x28e44
+#define	CB_COLOR9_PITCH					0x28e60
+#define	CB_COLOR10_PITCH				0x28e7c
+#define	CB_COLOR11_PITCH				0x28e98
+
+#define	CB_COLOR1_SLICE					0x28ca4
+#define	CB_COLOR2_SLICE					0x28ce0
+#define	CB_COLOR3_SLICE					0x28d1c
+#define	CB_COLOR4_SLICE					0x28d58
+#define	CB_COLOR5_SLICE					0x28d94
+#define	CB_COLOR6_SLICE					0x28dd0
+#define	CB_COLOR7_SLICE					0x28e0c
+#define	CB_COLOR8_SLICE					0x28e48
+#define	CB_COLOR9_SLICE					0x28e64
+#define	CB_COLOR10_SLICE				0x28e80
+#define	CB_COLOR11_SLICE				0x28e9c
+
+#define	CB_COLOR1_VIEW					0x28ca8
+#define	CB_COLOR2_VIEW					0x28ce4
+#define	CB_COLOR3_VIEW					0x28d20
+#define	CB_COLOR4_VIEW					0x28d5c
+#define	CB_COLOR5_VIEW					0x28d98
+#define	CB_COLOR6_VIEW					0x28dd4
+#define	CB_COLOR7_VIEW					0x28e10
+#define	CB_COLOR8_VIEW					0x28e4c
+#define	CB_COLOR9_VIEW					0x28e68
+#define	CB_COLOR10_VIEW					0x28e84
+#define	CB_COLOR11_VIEW					0x28ea0
+
+#define	CB_COLOR1_INFO					0x28cac
+#define	CB_COLOR2_INFO					0x28ce8
+#define	CB_COLOR3_INFO					0x28d24
+#define	CB_COLOR4_INFO					0x28d60
+#define	CB_COLOR5_INFO					0x28d9c
+#define	CB_COLOR6_INFO					0x28dd8
+#define	CB_COLOR7_INFO					0x28e14
+#define	CB_COLOR8_INFO					0x28e50
+#define	CB_COLOR9_INFO					0x28e6c
+#define	CB_COLOR10_INFO					0x28e88
+#define	CB_COLOR11_INFO					0x28ea4
+
+#define	CB_COLOR1_ATTRIB				0x28cb0
+#define	CB_COLOR2_ATTRIB				0x28cec
+#define	CB_COLOR3_ATTRIB				0x28d28
+#define	CB_COLOR4_ATTRIB				0x28d64
+#define	CB_COLOR5_ATTRIB				0x28da0
+#define	CB_COLOR6_ATTRIB				0x28ddc
+#define	CB_COLOR7_ATTRIB				0x28e18
+#define	CB_COLOR8_ATTRIB				0x28e54
+#define	CB_COLOR9_ATTRIB				0x28e70
+#define	CB_COLOR10_ATTRIB				0x28e8c
+#define	CB_COLOR11_ATTRIB				0x28ea8
+
+#define	CB_COLOR1_DIM					0x28cb4
+#define	CB_COLOR2_DIM					0x28cf0
+#define	CB_COLOR3_DIM					0x28d2c
+#define	CB_COLOR4_DIM					0x28d68
+#define	CB_COLOR5_DIM					0x28da4
+#define	CB_COLOR6_DIM					0x28de0
+#define	CB_COLOR7_DIM					0x28e1c
+#define	CB_COLOR8_DIM					0x28e58
+#define	CB_COLOR9_DIM					0x28e74
+#define	CB_COLOR10_DIM					0x28e90
+#define	CB_COLOR11_DIM					0x28eac
+
+#define	CB_COLOR1_CMASK					0x28cb8
+#define	CB_COLOR2_CMASK					0x28cf4
+#define	CB_COLOR3_CMASK					0x28d30
+#define	CB_COLOR4_CMASK					0x28d6c
+#define	CB_COLOR5_CMASK					0x28da8
+#define	CB_COLOR6_CMASK					0x28de4
+#define	CB_COLOR7_CMASK					0x28e20
+
+#define	CB_COLOR1_CMASK_SLICE				0x28cbc
+#define	CB_COLOR2_CMASK_SLICE				0x28cf8
+#define	CB_COLOR3_CMASK_SLICE				0x28d34
+#define	CB_COLOR4_CMASK_SLICE				0x28d70
+#define	CB_COLOR5_CMASK_SLICE				0x28dac
+#define	CB_COLOR6_CMASK_SLICE				0x28de8
+#define	CB_COLOR7_CMASK_SLICE				0x28e24
+
+#define	CB_COLOR1_FMASK					0x28cc0
+#define	CB_COLOR2_FMASK					0x28cfc
+#define	CB_COLOR3_FMASK					0x28d38
+#define	CB_COLOR4_FMASK					0x28d74
+#define	CB_COLOR5_FMASK					0x28db0
+#define	CB_COLOR6_FMASK					0x28dec
+#define	CB_COLOR7_FMASK					0x28e28
+
+#define	CB_COLOR1_FMASK_SLICE				0x28cc4
+#define	CB_COLOR2_FMASK_SLICE				0x28d00
+#define	CB_COLOR3_FMASK_SLICE				0x28d3c
+#define	CB_COLOR4_FMASK_SLICE				0x28d78
+#define	CB_COLOR5_FMASK_SLICE				0x28db4
+#define	CB_COLOR6_FMASK_SLICE				0x28df0
+#define	CB_COLOR7_FMASK_SLICE				0x28e2c
+
+#define	CB_COLOR1_CLEAR_WORD0				0x28cc8
+#define	CB_COLOR2_CLEAR_WORD0				0x28d04
+#define	CB_COLOR3_CLEAR_WORD0				0x28d40
+#define	CB_COLOR4_CLEAR_WORD0				0x28d7c
+#define	CB_COLOR5_CLEAR_WORD0				0x28db8
+#define	CB_COLOR6_CLEAR_WORD0				0x28df4
+#define	CB_COLOR7_CLEAR_WORD0				0x28e30
+
+#define	CB_COLOR1_CLEAR_WORD1				0x28ccc
+#define	CB_COLOR2_CLEAR_WORD1				0x28d08
+#define	CB_COLOR3_CLEAR_WORD1				0x28d44
+#define	CB_COLOR4_CLEAR_WORD1				0x28d80
+#define	CB_COLOR5_CLEAR_WORD1				0x28dbc
+#define	CB_COLOR6_CLEAR_WORD1				0x28df8
+#define	CB_COLOR7_CLEAR_WORD1				0x28e34
+
+#define	CB_COLOR1_CLEAR_WORD2				0x28cd0
+#define	CB_COLOR2_CLEAR_WORD2				0x28d0c
+#define	CB_COLOR3_CLEAR_WORD2				0x28d48
+#define	CB_COLOR4_CLEAR_WORD2				0x28d84
+#define	CB_COLOR5_CLEAR_WORD2				0x28dc0
+#define	CB_COLOR6_CLEAR_WORD2				0x28dfc
+#define	CB_COLOR7_CLEAR_WORD2				0x28e38
+
+#define	CB_COLOR1_CLEAR_WORD3				0x28cd4
+#define	CB_COLOR2_CLEAR_WORD3				0x28d10
+#define	CB_COLOR3_CLEAR_WORD3				0x28d4c
+#define	CB_COLOR4_CLEAR_WORD3				0x28d88
+#define	CB_COLOR5_CLEAR_WORD3				0x28dc4
+#define	CB_COLOR6_CLEAR_WORD3				0x28e00
+#define	CB_COLOR7_CLEAR_WORD3				0x28e3c
+
+#define SQ_TEX_RESOURCE_WORD0_0                         0x30000
+#	define TEX_DIM(x)				((x) << 0)
+#	define SQ_TEX_DIM_1D				0
+#	define SQ_TEX_DIM_2D				1
+#	define SQ_TEX_DIM_3D				2
+#	define SQ_TEX_DIM_CUBEMAP			3
+#	define SQ_TEX_DIM_1D_ARRAY			4
+#	define SQ_TEX_DIM_2D_ARRAY			5
+#	define SQ_TEX_DIM_2D_MSAA			6
+#	define SQ_TEX_DIM_2D_ARRAY_MSAA			7
+#define SQ_TEX_RESOURCE_WORD1_0                         0x30004
+#       define TEX_ARRAY_MODE(x)                        ((x) << 28)
+#define SQ_TEX_RESOURCE_WORD2_0                         0x30008
+#define SQ_TEX_RESOURCE_WORD3_0                         0x3000C
+#define SQ_TEX_RESOURCE_WORD4_0                         0x30010
+#	define TEX_DST_SEL_X(x)				((x) << 16)
+#	define TEX_DST_SEL_Y(x)				((x) << 19)
+#	define TEX_DST_SEL_Z(x)				((x) << 22)
+#	define TEX_DST_SEL_W(x)				((x) << 25)
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define SQ_TEX_RESOURCE_WORD5_0                         0x30014
+#define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
+#define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define MACRO_TILE_ASPECT(x)                     (((x) & 0x3) << 6)
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
+#define R_030000_SQ_TEX_RESOURCE_WORD0_0             0x030000
+#define   S_030000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_030000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_030000_DIM                                 0xFFFFFFF8
+#define     V_030000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_030000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_030000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_030000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_030000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_030000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_030000_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 5)
+#define   G_030000_NON_DISP_TILING_ORDER(x)            (((x) >> 5) & 0x1)
+#define   C_030000_NON_DISP_TILING_ORDER               0xFFFFFFDF
+#define   S_030000_PITCH(x)                            (((x) & 0xFFF) << 6)
+#define   G_030000_PITCH(x)                            (((x) >> 6) & 0xFFF)
+#define   C_030000_PITCH                               0xFFFC003F
+#define   S_030000_TEX_WIDTH(x)                        (((x) & 0x3FFF) << 18)
+#define   G_030000_TEX_WIDTH(x)                        (((x) >> 18) & 0x3FFF)
+#define   C_030000_TEX_WIDTH                           0x0003FFFF
+#define R_030004_SQ_TEX_RESOURCE_WORD1_0             0x030004
+#define   S_030004_TEX_HEIGHT(x)                       (((x) & 0x3FFF) << 0)
+#define   G_030004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x3FFF)
+#define   C_030004_TEX_HEIGHT                          0xFFFFC000
+#define   S_030004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 14)
+#define   G_030004_TEX_DEPTH(x)                        (((x) >> 14) & 0x1FFF)
+#define   C_030004_TEX_DEPTH                           0xF8003FFF
+#define   S_030004_ARRAY_MODE(x)                       (((x) & 0xF) << 28)
+#define   G_030004_ARRAY_MODE(x)                       (((x) >> 28) & 0xF)
+#define   C_030004_ARRAY_MODE                          0x0FFFFFFF
+#define R_030008_SQ_TEX_RESOURCE_WORD2_0             0x030008
+#define   S_030008_BASE_ADDRESS(x)                     (((x) & 0xFFFFFFFF) << 0)
+#define   G_030008_BASE_ADDRESS(x)                     (((x) >> 0) & 0xFFFFFFFF)
+#define   C_030008_BASE_ADDRESS                        0x00000000
+#define R_03000C_SQ_TEX_RESOURCE_WORD3_0             0x03000C
+#define   S_03000C_MIP_ADDRESS(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_03000C_MIP_ADDRESS(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_03000C_MIP_ADDRESS                         0x00000000
+#define R_030010_SQ_TEX_RESOURCE_WORD4_0             0x030010
+#define   S_030010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_030010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_030010_FORMAT_COMP_X                       0xFFFFFFFC
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED           0x00000000
+#define     V_030010_SQ_FORMAT_COMP_SIGNED             0x00000001
+#define     V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED    0x00000002
+#define   S_030010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_030010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_030010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_030010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_030010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_030010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_030010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_030010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_030010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_030010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_030010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_030010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define     V_030010_SQ_NUM_FORMAT_NORM                0x00000000
+#define     V_030010_SQ_NUM_FORMAT_INT                 0x00000001
+#define     V_030010_SQ_NUM_FORMAT_SCALED              0x00000002
+#define   S_030010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_030010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_030010_SRF_MODE_ALL                        0xFFFFFBFF
+#define     V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE     0x00000000
+#define     V_030010_SRF_MODE_NO_ZERO                  0x00000001
+#define   S_030010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_030010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_030010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_030010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_030010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_030010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_030010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_030010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_030010_DST_SEL_X                           0xFFF8FFFF
+#define     V_030010_SQ_SEL_X                          0x00000000
+#define     V_030010_SQ_SEL_Y                          0x00000001
+#define     V_030010_SQ_SEL_Z                          0x00000002
+#define     V_030010_SQ_SEL_W                          0x00000003
+#define     V_030010_SQ_SEL_0                          0x00000004
+#define     V_030010_SQ_SEL_1                          0x00000005
+#define   S_030010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_030010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_030010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_030010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_030010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_030010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_030010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_030010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_030010_DST_SEL_W                           0xF1FFFFFF
+#define   S_030010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_030010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_030010_BASE_LEVEL                          0x0FFFFFFF
+#define R_030014_SQ_TEX_RESOURCE_WORD5_0             0x030014
+#define   S_030014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_030014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_030014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_030014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_030014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_030014_BASE_ARRAY                          0xFFFE000F
+#define   S_030014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_030014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_030014_LAST_ARRAY                          0xC001FFFF
+#define R_030018_SQ_TEX_RESOURCE_WORD6_0             0x030018
+#define   S_030018_MAX_ANISO(x)                        (((x) & 0x7) << 0)
+#define   G_030018_MAX_ANISO(x)                        (((x) >> 0) & 0x7)
+#define   C_030018_MAX_ANISO                           0xFFFFFFF8
+#define   S_030018_PERF_MODULATION(x)                  (((x) & 0x7) << 3)
+#define   G_030018_PERF_MODULATION(x)                  (((x) >> 3) & 0x7)
+#define   C_030018_PERF_MODULATION                     0xFFFFFFC7
+#define   S_030018_INTERLACED(x)                       (((x) & 0x1) << 6)
+#define   G_030018_INTERLACED(x)                       (((x) >> 6) & 0x1)
+#define   C_030018_INTERLACED                          0xFFFFFFBF
+#define   S_030018_TILE_SPLIT(x)                       (((x) & 0x7) << 29)
+#define   G_030018_TILE_SPLIT(x)                       (((x) >> 29) & 0x7)
+#define R_03001C_SQ_TEX_RESOURCE_WORD7_0             0x03001C
+#define   S_03001C_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 6)
+#define   G_03001C_MACRO_TILE_ASPECT(x)                (((x) >> 6) & 0x3)
+#define   S_03001C_BANK_WIDTH(x)                       (((x) & 0x3) << 8)
+#define   G_03001C_BANK_WIDTH(x)                       (((x) >> 8) & 0x3)
+#define   S_03001C_BANK_HEIGHT(x)                      (((x) & 0x3) << 10)
+#define   G_03001C_BANK_HEIGHT(x)                      (((x) >> 10) & 0x3)
+#define   S_03001C_NUM_BANKS(x)                        (((x) & 0x3) << 16)
+#define   G_03001C_NUM_BANKS(x)                        (((x) >> 16) & 0x3)
+#define   S_03001C_TYPE(x)                             (((x) & 0x3) << 30)
+#define   G_03001C_TYPE(x)                             (((x) >> 30) & 0x3)
+#define   C_03001C_TYPE                                0x3FFFFFFF
+#define     V_03001C_SQ_TEX_VTX_INVALID_TEXTURE        0x00000000
+#define     V_03001C_SQ_TEX_VTX_INVALID_BUFFER         0x00000001
+#define     V_03001C_SQ_TEX_VTX_VALID_TEXTURE          0x00000002
+#define     V_03001C_SQ_TEX_VTX_VALID_BUFFER           0x00000003
+#define   S_03001C_DATA_FORMAT(x)                      (((x) & 0x3F) << 0)
+#define   G_03001C_DATA_FORMAT(x)                      (((x) >> 0) & 0x3F)
+#define   C_03001C_DATA_FORMAT                         0xFFFFFFC0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000C
+#	define SQ_VTCX_SEL_X(x)				((x) << 3)
+#	define SQ_VTCX_SEL_Y(x)				((x) << 6)
+#	define SQ_VTCX_SEL_Z(x)				((x) << 9)
+#	define SQ_VTCX_SEL_W(x)				((x) << 12)
+#define SQ_VTX_CONSTANT_WORD4_0				0x30010
+#define SQ_VTX_CONSTANT_WORD5_0                         0x30014
+#define SQ_VTX_CONSTANT_WORD6_0                         0x30018
+#define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
+
+#define TD_PS_BORDER_COLOR_INDEX                        0xA400
+#define TD_PS_BORDER_COLOR_RED                          0xA404
+#define TD_PS_BORDER_COLOR_GREEN                        0xA408
+#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
+#define TD_VS_BORDER_COLOR_INDEX                        0xA414
+#define TD_VS_BORDER_COLOR_RED                          0xA418
+#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
+#define TD_VS_BORDER_COLOR_BLUE                         0xA420
+#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
+#define TD_GS_BORDER_COLOR_INDEX                        0xA428
+#define TD_GS_BORDER_COLOR_RED                          0xA42C
+#define TD_GS_BORDER_COLOR_GREEN                        0xA430
+#define TD_GS_BORDER_COLOR_BLUE                         0xA434
+#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
+#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
+#define TD_HS_BORDER_COLOR_RED                          0xA440
+#define TD_HS_BORDER_COLOR_GREEN                        0xA444
+#define TD_HS_BORDER_COLOR_BLUE                         0xA448
+#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
+#define TD_LS_BORDER_COLOR_INDEX                        0xA450
+#define TD_LS_BORDER_COLOR_RED                          0xA454
+#define TD_LS_BORDER_COLOR_GREEN                        0xA458
+#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
+#define TD_CS_BORDER_COLOR_INDEX                        0xA464
+#define TD_CS_BORDER_COLOR_RED                          0xA468
+#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
+#define TD_CS_BORDER_COLOR_BLUE                         0xA470
+#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
+
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE			0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS			0x8E48
+#define CAYMAN_DB_EQAA					0x28804
+#define CAYMAN_DB_DEPTH_INFO				0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG				0x28BE0
+#define         CAYMAN_MSAA_NUM_SAMPLES_SHIFT           0
+#define         CAYMAN_MSAA_NUM_SAMPLES_MASK            0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE			0x28358
+/* cayman packet3 addition */
+#define	CAYMAN_PACKET3_DEALLOC_STATE			0x14
+
+/* DMA regs common on r6xx/r7xx/evergreen/ni */
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/mkregtable.c b/linux-imx/drivers/gpu/drm/radeon/mkregtable.c
new file mode 100644
index 0000000..5a82b6b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/mkregtable.c
@@ -0,0 +1,725 @@
+/* utility to create the register check tables
+ * this includes inlined list.h safe for userspace.
+ *
+ * Copyright 2009 Jerome Glisse
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Authors:
+ * 	Jerome Glisse
+ * 	Dave Airlie
+ */
+
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <regex.h>
+#include <libgen.h>
+
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:    the pointer to the member.
+ * @type:   the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({          \
+	const typeof(((type *)0)->member)*__mptr = (ptr);    \
+		     (type *)((char *)__mptr - offsetof(type, member)); })
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+struct list_head {
+	struct list_head *next, *prev;
+};
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+	struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+	list->next = list;
+	list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add(struct list_head *new,
+			      struct list_head *prev, struct list_head *next)
+{
+	next->prev = new;
+	new->next = next;
+	new->prev = prev;
+	prev->next = new;
+}
+#else
+extern void __list_add(struct list_head *new,
+		       struct list_head *prev, struct list_head *next);
+#endif
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+	__list_add(new, head, head->next);
+}
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+	__list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head *prev, struct list_head *next)
+{
+	next->prev = prev;
+	prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void list_del(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	entry->next = (void *)0xDEADBEEF;
+	entry->prev = (void *)0xBEEFDEAD;
+}
+#else
+extern void list_del(struct list_head *entry);
+#endif
+
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old, struct list_head *new)
+{
+	new->next = old->next;
+	new->next->prev = new;
+	new->prev = old->prev;
+	new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+				     struct list_head *new)
+{
+	list_replace(old, new);
+	INIT_LIST_HEAD(old);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+	INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+	__list_del(list->prev, list->next);
+	list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+				  struct list_head *head)
+{
+	__list_del(list->prev, list->next);
+	list_add_tail(list, head);
+}
+
+/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+			       const struct list_head *head)
+{
+	return list->next == head;
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+	return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is empty and not being modified
+ * @head: the list to test
+ *
+ * Description:
+ * tests whether a list is empty _and_ checks that no other CPU might be
+ * in the process of modifying either member (next or prev)
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+	struct list_head *next = head->next;
+	return (next == head) && (next == head->prev);
+}
+
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+	return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+				       struct list_head *head,
+				       struct list_head *entry)
+{
+	struct list_head *new_first = entry->next;
+	list->next = head->next;
+	list->next->prev = list;
+	list->prev = entry;
+	entry->next = list;
+	head->next = new_first;
+	new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *	and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+				     struct list_head *head,
+				     struct list_head *entry)
+{
+	if (list_empty(head))
+		return;
+	if (list_is_singular(head) && (head->next != entry && head != entry))
+		return;
+	if (entry == head)
+		INIT_LIST_HEAD(list);
+	else
+		__list_cut_position(list, head, entry);
+}
+
+static inline void __list_splice(const struct list_head *list,
+				 struct list_head *prev, struct list_head *next)
+{
+	struct list_head *first = list->next;
+	struct list_head *last = list->prev;
+
+	first->prev = prev;
+	prev->next = first;
+
+	last->next = next;
+	next->prev = last;
+}
+
+/**
+ * list_splice - join two lists, this is designed for stacks
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(const struct list_head *list,
+			       struct list_head *head)
+{
+	if (!list_empty(list))
+		__list_splice(list, head, head->next);
+}
+
+/**
+ * list_splice_tail - join two lists, each list being a queue
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice_tail(struct list_head *list,
+				    struct list_head *head)
+{
+	if (!list_empty(list))
+		__list_splice(list, head->prev, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+				    struct list_head *head)
+{
+	if (!list_empty(list)) {
+		__list_splice(list, head, head->next);
+		INIT_LIST_HEAD(list);
+	}
+}
+
+/**
+ * list_splice_tail_init - join two lists and reinitialise the emptied list
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+					 struct list_head *head)
+{
+	if (!list_empty(list)) {
+		__list_splice(list, head->prev, head);
+		INIT_LIST_HEAD(list);
+	}
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr:	the &struct list_head pointer.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_entry(ptr, type, member) \
+	container_of(ptr, type, member)
+
+/**
+ * list_first_entry - get the first element from a list
+ * @ptr:	the list head to take the element from.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+
+/**
+ * list_for_each	-	iterate over a list
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @head:	the head for your list.
+ */
+#define list_for_each(pos, head) \
+	for (pos = (head)->next; prefetch(pos->next), pos != (head); \
+		pos = pos->next)
+
+/**
+ * __list_for_each	-	iterate over a list
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @head:	the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+	for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev	-	iterate over a list backwards
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @head:	the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+	for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
+		pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @n:		another &struct list_head to use as temporary storage
+ * @head:	the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+	for (pos = (head)->next, n = pos->next; pos != (head); \
+		pos = n, n = pos->next)
+
+/**
+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
+ * @pos:	the &struct list_head to use as a loop cursor.
+ * @n:		another &struct list_head to use as temporary storage
+ * @head:	the head for your list.
+ */
+#define list_for_each_prev_safe(pos, n, head) \
+	for (pos = (head)->prev, n = pos->prev; \
+	     prefetch(pos->prev), pos != (head); \
+	     pos = n, n = pos->prev)
+
+/**
+ * list_for_each_entry	-	iterate over list of given type
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry(pos, head, member)				\
+	for (pos = list_entry((head)->next, typeof(*pos), member);	\
+	     &pos->member != (head); 	\
+	     pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member)			\
+	for (pos = list_entry((head)->prev, typeof(*pos), member);	\
+	     prefetch(pos->member.prev), &pos->member != (head); 	\
+	     pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
+ * @pos:	the type * to use as a start point
+ * @head:	the head of the list
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
+ */
+#define list_prepare_entry(pos, head, member) \
+	((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - continue iteration over list of given type
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue(pos, head, member) 		\
+	for (pos = list_entry(pos->member.next, typeof(*pos), member);	\
+	     prefetch(pos->member.next), &pos->member != (head);	\
+	     pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_reverse(pos, head, member)		\
+	for (pos = list_entry(pos->member.prev, typeof(*pos), member);	\
+	     prefetch(pos->member.prev), &pos->member != (head);	\
+	     pos = list_entry(pos->member.prev, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_from - iterate over list of given type from the current point
+ * @pos:	the type * to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from(pos, head, member) 			\
+	for (; prefetch(pos->member.next), &pos->member != (head);	\
+	     pos = list_entry(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos:	the type * to use as a loop cursor.
+ * @n:		another type * to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member)			\
+	for (pos = list_entry((head)->next, typeof(*pos), member),	\
+		n = list_entry(pos->member.next, typeof(*pos), member);	\
+	     &pos->member != (head); 					\
+	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_continue
+ * @pos:	the type * to use as a loop cursor.
+ * @n:		another type * to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_for_each_entry_safe_continue(pos, n, head, member) 		\
+	for (pos = list_entry(pos->member.next, typeof(*pos), member), 		\
+		n = list_entry(pos->member.next, typeof(*pos), member);		\
+	     &pos->member != (head);						\
+	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_from
+ * @pos:	the type * to use as a loop cursor.
+ * @n:		another type * to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_for_each_entry_safe_from(pos, n, head, member) 			\
+	for (n = list_entry(pos->member.next, typeof(*pos), member);		\
+	     &pos->member != (head);						\
+	     pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+/**
+ * list_for_each_entry_safe_reverse
+ * @pos:	the type * to use as a loop cursor.
+ * @n:		another type * to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member)		\
+	for (pos = list_entry((head)->prev, typeof(*pos), member),	\
+		n = list_entry(pos->member.prev, typeof(*pos), member);	\
+	     &pos->member != (head); 					\
+	     pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+
+struct offset {
+	struct list_head list;
+	unsigned offset;
+};
+
+struct table {
+	struct list_head offsets;
+	unsigned offset_max;
+	unsigned nentry;
+	unsigned *table;
+	char *gpu_prefix;
+};
+
+static struct offset *offset_new(unsigned o)
+{
+	struct offset *offset;
+
+	offset = (struct offset *)malloc(sizeof(struct offset));
+	if (offset) {
+		INIT_LIST_HEAD(&offset->list);
+		offset->offset = o;
+	}
+	return offset;
+}
+
+static void table_offset_add(struct table *t, struct offset *offset)
+{
+	list_add_tail(&offset->list, &t->offsets);
+}
+
+static void table_init(struct table *t)
+{
+	INIT_LIST_HEAD(&t->offsets);
+	t->offset_max = 0;
+	t->nentry = 0;
+	t->table = NULL;
+}
+
+static void table_print(struct table *t)
+{
+	unsigned nlloop, i, j, n, c, id;
+
+	nlloop = (t->nentry + 3) / 4;
+	c = t->nentry;
+	printf("static const unsigned %s_reg_safe_bm[%d] = {\n", t->gpu_prefix,
+	       t->nentry);
+	for (i = 0, id = 0; i < nlloop; i++) {
+		n = 4;
+		if (n > c)
+			n = c;
+		c -= n;
+		for (j = 0; j < n; j++) {
+			if (j == 0)
+				printf("\t");
+			else
+				printf(" ");
+			printf("0x%08X,", t->table[id++]);
+		}
+		printf("\n");
+	}
+	printf("};\n");
+}
+
+static int table_build(struct table *t)
+{
+	struct offset *offset;
+	unsigned i, m;
+
+	t->nentry = ((t->offset_max >> 2) + 31) / 32;
+	t->table = (unsigned *)malloc(sizeof(unsigned) * t->nentry);
+	if (t->table == NULL)
+		return -1;
+	memset(t->table, 0xff, sizeof(unsigned) * t->nentry);
+	list_for_each_entry(offset, &t->offsets, list) {
+		i = (offset->offset >> 2) / 32;
+		m = (offset->offset >> 2) & 31;
+		m = 1 << m;
+		t->table[i] ^= m;
+	}
+	return 0;
+}
+
+static char gpu_name[10];
+static int parser_auth(struct table *t, const char *filename)
+{
+	FILE *file;
+	regex_t mask_rex;
+	regmatch_t match[4];
+	char buf[1024];
+	size_t end;
+	int len;
+	int done = 0;
+	int r;
+	unsigned o;
+	struct offset *offset;
+	char last_reg_s[10];
+	int last_reg;
+
+	if (regcomp
+	    (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+		fprintf(stderr, "Failed to compile regular expression\n");
+		return -1;
+	}
+	file = fopen(filename, "r");
+	if (file == NULL) {
+		fprintf(stderr, "Failed to open: %s\n", filename);
+		return -1;
+	}
+	fseek(file, 0, SEEK_END);
+	end = ftell(file);
+	fseek(file, 0, SEEK_SET);
+
+	/* get header */
+	if (fgets(buf, 1024, file) == NULL) {
+		fclose(file);
+		return -1;
+	}
+
+	/* first line will contain the last register
+	 * and gpu name */
+	sscanf(buf, "%s %s", gpu_name, last_reg_s);
+	t->gpu_prefix = gpu_name;
+	last_reg = strtol(last_reg_s, NULL, 16);
+
+	do {
+		if (fgets(buf, 1024, file) == NULL) {
+			fclose(file);
+			return -1;
+		}
+		len = strlen(buf);
+		if (ftell(file) == end)
+			done = 1;
+		if (len) {
+			r = regexec(&mask_rex, buf, 4, match, 0);
+			if (r == REG_NOMATCH) {
+			} else if (r) {
+				fprintf(stderr,
+					"Error matching regular expression %d in %s\n",
+					r, filename);
+				fclose(file);
+				return -1;
+			} else {
+				buf[match[0].rm_eo] = 0;
+				buf[match[1].rm_eo] = 0;
+				buf[match[2].rm_eo] = 0;
+				o = strtol(&buf[match[1].rm_so], NULL, 16);
+				offset = offset_new(o);
+				table_offset_add(t, offset);
+				if (o > t->offset_max)
+					t->offset_max = o;
+			}
+		}
+	} while (!done);
+	fclose(file);
+	if (t->offset_max < last_reg)
+		t->offset_max = last_reg;
+	return table_build(t);
+}
+
+int main(int argc, char *argv[])
+{
+	struct table t;
+
+	if (argc != 2) {
+		fprintf(stderr, "Usage: %s <authfile>\n", argv[0]);
+		exit(1);
+	}
+	table_init(&t);
+	if (parser_auth(&t, argv[1])) {
+		fprintf(stderr, "Failed to parse file %s\n", argv[1]);
+		return -1;
+	}
+	table_print(&t);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/ni.c b/linux-imx/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 0000000..451d788
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,2489 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void si_rlc_fini(struct radeon_device *rdev);
+extern int si_rlc_init(struct radeon_device *rdev);
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
+MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
+MODULE_FIRMWARE("radeon/ARUBA_me.bin");
+MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+
+
+static const u32 cayman_golden_registers2[] =
+{
+	0x3e5c, 0xffffffff, 0x00000000,
+	0x3e48, 0xffffffff, 0x00000000,
+	0x3e4c, 0xffffffff, 0x00000000,
+	0x3e64, 0xffffffff, 0x00000000,
+	0x3e50, 0xffffffff, 0x00000000,
+	0x3e60, 0xffffffff, 0x00000000
+};
+
+static const u32 cayman_golden_registers[] =
+{
+	0x5eb4, 0xffffffff, 0x00000002,
+	0x5e78, 0x8f311ff1, 0x001000f0,
+	0x3f90, 0xffff0000, 0xff000000,
+	0x9148, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0xc78, 0x00000080, 0x00000080,
+	0xbd4, 0x70073777, 0x00011003,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x02011003,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x98f8, 0x33773777, 0x02011003,
+	0x98fc, 0xffffffff, 0x76541032,
+	0x7030, 0x31000311, 0x00000011,
+	0x2f48, 0x33773777, 0x42010001,
+	0x6b28, 0x00000010, 0x00000012,
+	0x7728, 0x00000010, 0x00000012,
+	0x10328, 0x00000010, 0x00000012,
+	0x10f28, 0x00000010, 0x00000012,
+	0x11b28, 0x00000010, 0x00000012,
+	0x12728, 0x00000010, 0x00000012,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x10c, 0x00000001, 0x00010003,
+	0xa02c, 0xffffffff, 0x0000009b,
+	0x913c, 0x0000010f, 0x01000100,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0x3700001f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88d0, 0xffffffff, 0x0f40df40,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 dvst_golden_registers2[] =
+{
+	0x8f8, 0xffffffff, 0,
+	0x8fc, 0x00380000, 0,
+	0x8f8, 0xffffffff, 1,
+	0x8fc, 0x0e000000, 0
+};
+
+static const u32 dvst_golden_registers[] =
+{
+	0x690, 0x3fff3fff, 0x20c00033,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0x00000fff, 0x00000001,
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0xfffffffe, 0x00000000,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x12010001,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x00030000, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0xa008, 0xffffffff, 0x00010000,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0xf700071f, 0x00000002,
+	0x960c, 0xffffffff, 0x54763210,
+	0x20ef8, 0x01ff01ff, 0x00000002,
+	0x20e98, 0xfffffbff, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static const u32 scrapper_golden_registers[] =
+{
+	0x690, 0x3fff3fff, 0x20c00033,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x918c, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x91a8, 0x0fff0fff, 0x00010006,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x9150, 0xffffdfff, 0x6e944040,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x917c, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x9198, 0x0fff0fff, 0x00030002,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x915c, 0x0fff0fff, 0x00010000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x3f90, 0xffff0001, 0xff000000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9178, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9194, 0x0fff0fff, 0x00070000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9148, 0xffff0001, 0xff000000,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x9190, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x91ac, 0x0fff0fff, 0x00090008,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x3f94, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x914c, 0xffff0000, 0xff000000,
+	0x929c, 0x00000fff, 0x00000001,
+	0x929c, 0x00000fff, 0x00000001,
+	0x55e4, 0xff607fff, 0xfc000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8a18, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x8b28, 0xff000fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x9144, 0xfffc0fff, 0x00000100,
+	0x6ed8, 0x00010101, 0x00010000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0xfffffffe, 0x00000000,
+	0x9838, 0xfffffffe, 0x00000000,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd02c, 0xbfffff1f, 0x08421000,
+	0xd0b8, 0x73773777, 0x12010001,
+	0xd0b8, 0x73773777, 0x12010001,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98f8, 0x73773777, 0x12010001,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x98fc, 0xffffffff, 0x00000010,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x9b7c, 0x00ff0000, 0x00fc0000,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x8030, 0x00001f0f, 0x0000100a,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2f48, 0x73773777, 0x12010001,
+	0x2408, 0x00030000, 0x000c007f,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8a14, 0xf000003f, 0x00000007,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b24, 0x3fff3fff, 0x00ff0fff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x28a4c, 0x07ffffff, 0x06000000,
+	0x4d8, 0x00000fff, 0x00000100,
+	0x4d8, 0x00000fff, 0x00000100,
+	0xa008, 0xffffffff, 0x00010000,
+	0xa008, 0xffffffff, 0x00010000,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x913c, 0xffff03ff, 0x01000100,
+	0x90e8, 0x001fffff, 0x010400c0,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c00, 0x000000ff, 0x00000003,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c04, 0xf8ff00ff, 0x40600060,
+	0x8c30, 0x0000000f, 0x00040005,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x8cf0, 0x1fff1fff, 0x08e00410,
+	0x900c, 0x00ffffff, 0x0017071f,
+	0x28350, 0x00000f01, 0x00000000,
+	0x28350, 0x00000f01, 0x00000000,
+	0x9508, 0xf700071f, 0x00000002,
+	0x9508, 0xf700071f, 0x00000002,
+	0x9688, 0x00300000, 0x0017000f,
+	0x960c, 0xffffffff, 0x54763210,
+	0x960c, 0xffffffff, 0x54763210,
+	0x20ef8, 0x01ff01ff, 0x00000002,
+	0x20e98, 0xfffffbff, 0x00200000,
+	0x2015c, 0xffffffff, 0x00000f40,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x88c4, 0x001f3ae3, 0x00000082,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x8978, 0x3fffffff, 0x04050140,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x8974, 0xffffffff, 0x00000000,
+	0x8974, 0xffffffff, 0x00000000
+};
+
+static void ni_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_CAYMAN:
+		radeon_program_register_sequence(rdev,
+						 cayman_golden_registers,
+						 (const u32)ARRAY_SIZE(cayman_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 cayman_golden_registers2,
+						 (const u32)ARRAY_SIZE(cayman_golden_registers2));
+		break;
+	case CHIP_ARUBA:
+		if ((rdev->pdev->device == 0x9900) ||
+		    (rdev->pdev->device == 0x9901) ||
+		    (rdev->pdev->device == 0x9903) ||
+		    (rdev->pdev->device == 0x9904) ||
+		    (rdev->pdev->device == 0x9905) ||
+		    (rdev->pdev->device == 0x9906) ||
+		    (rdev->pdev->device == 0x9907) ||
+		    (rdev->pdev->device == 0x9908) ||
+		    (rdev->pdev->device == 0x9909) ||
+		    (rdev->pdev->device == 0x990A) ||
+		    (rdev->pdev->device == 0x990B) ||
+		    (rdev->pdev->device == 0x990C) ||
+		    (rdev->pdev->device == 0x990D) ||
+		    (rdev->pdev->device == 0x990E) ||
+		    (rdev->pdev->device == 0x990F) ||
+		    (rdev->pdev->device == 0x9910) ||
+		    (rdev->pdev->device == 0x9913) ||
+		    (rdev->pdev->device == 0x9917) ||
+		    (rdev->pdev->device == 0x9918)) {
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers));
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers2,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
+		} else {
+			radeon_program_register_sequence(rdev,
+							 scrapper_golden_registers,
+							 (const u32)ARRAY_SIZE(scrapper_golden_registers));
+			radeon_program_register_sequence(rdev,
+							 dvst_golden_registers2,
+							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00916a00}
+};
+
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 mem_type, running, blackout = 0;
+	u32 *io_mc_regs;
+	int i, ucode_size, regs_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		io_mc_regs = (u32 *)&barts_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_TURKS:
+		io_mc_regs = (u32 *)&turks_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAICOS:
+	default:
+		io_mc_regs = (u32 *)&caicos_io_mc_regs;
+		ucode_size = BTC_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_CAYMAN:
+		io_mc_regs = (u32 *)&cayman_io_mc_regs;
+		ucode_size = CAYMAN_MC_UCODE_SIZE;
+		regs_size = BTC_IO_MC_REGS_SIZE;
+		break;
+	}
+
+	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < ucode_size; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+				break;
+			udelay(1);
+		}
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		chip_name = "BARTS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_TURKS:
+		chip_name = "TURKS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_CAICOS:
+		chip_name = "CAICOS";
+		rlc_chip_name = "BTC";
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+		mc_req_size = BTC_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_CAYMAN:
+		chip_name = "CAYMAN";
+		rlc_chip_name = "CAYMAN";
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_ARUBA:
+		chip_name = "ARUBA";
+		rlc_chip_name = "ARUBA";
+		/* pfp/me same size as CAYMAN */
+		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
+		mc_req_size = 0;
+		break;
+	default: BUG();
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	/* no MC ucode on TN */
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+		err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+		if (err)
+			goto out;
+		if (rdev->mc_fw->size != mc_req_size) {
+			printk(KERN_ERR
+			       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+			       rdev->mc_fw->size, fw_name);
+			err = -EINVAL;
+		}
+	}
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "ni_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+	}
+	return err;
+}
+
+/*
+ * Core functions
+ */
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 cgts_tcc_disable;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 cgts_sm_ctrl_reg;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	u32 disabled_rb_mask;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_CAYMAN:
+		rdev->config.cayman.max_shader_engines = 2;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 8;
+		rdev->config.cayman.max_simds_per_se = 12;
+		rdev->config.cayman.max_backends_per_se = 4;
+		rdev->config.cayman.max_texture_channel_caches = 8;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sx_max_export_size = 256;
+		rdev->config.cayman.sx_max_export_pos_size = 64;
+		rdev->config.cayman.sx_max_export_smx_size = 192;
+		rdev->config.cayman.max_hw_contexts = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x100;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_ARUBA:
+	default:
+		rdev->config.cayman.max_shader_engines = 1;
+		rdev->config.cayman.max_pipes_per_simd = 4;
+		rdev->config.cayman.max_tile_pipes = 2;
+		if ((rdev->pdev->device == 0x9900) ||
+		    (rdev->pdev->device == 0x9901) ||
+		    (rdev->pdev->device == 0x9905) ||
+		    (rdev->pdev->device == 0x9906) ||
+		    (rdev->pdev->device == 0x9907) ||
+		    (rdev->pdev->device == 0x9908) ||
+		    (rdev->pdev->device == 0x9909) ||
+		    (rdev->pdev->device == 0x990B) ||
+		    (rdev->pdev->device == 0x990C) ||
+		    (rdev->pdev->device == 0x990F) ||
+		    (rdev->pdev->device == 0x9910) ||
+		    (rdev->pdev->device == 0x9917) ||
+		    (rdev->pdev->device == 0x9999) ||
+		    (rdev->pdev->device == 0x999C)) {
+			rdev->config.cayman.max_simds_per_se = 6;
+			rdev->config.cayman.max_backends_per_se = 2;
+			rdev->config.cayman.max_hw_contexts = 8;
+			rdev->config.cayman.sx_max_export_size = 256;
+			rdev->config.cayman.sx_max_export_pos_size = 64;
+			rdev->config.cayman.sx_max_export_smx_size = 192;
+		} else if ((rdev->pdev->device == 0x9903) ||
+			   (rdev->pdev->device == 0x9904) ||
+			   (rdev->pdev->device == 0x990A) ||
+			   (rdev->pdev->device == 0x990D) ||
+			   (rdev->pdev->device == 0x990E) ||
+			   (rdev->pdev->device == 0x9913) ||
+			   (rdev->pdev->device == 0x9918) ||
+			   (rdev->pdev->device == 0x999D)) {
+			rdev->config.cayman.max_simds_per_se = 4;
+			rdev->config.cayman.max_backends_per_se = 2;
+			rdev->config.cayman.max_hw_contexts = 8;
+			rdev->config.cayman.sx_max_export_size = 256;
+			rdev->config.cayman.sx_max_export_pos_size = 64;
+			rdev->config.cayman.sx_max_export_smx_size = 192;
+		} else if ((rdev->pdev->device == 0x9919) ||
+			   (rdev->pdev->device == 0x9990) ||
+			   (rdev->pdev->device == 0x9991) ||
+			   (rdev->pdev->device == 0x9994) ||
+			   (rdev->pdev->device == 0x9995) ||
+			   (rdev->pdev->device == 0x9996) ||
+			   (rdev->pdev->device == 0x999A) ||
+			   (rdev->pdev->device == 0x99A0)) {
+			rdev->config.cayman.max_simds_per_se = 3;
+			rdev->config.cayman.max_backends_per_se = 1;
+			rdev->config.cayman.max_hw_contexts = 4;
+			rdev->config.cayman.sx_max_export_size = 128;
+			rdev->config.cayman.sx_max_export_pos_size = 32;
+			rdev->config.cayman.sx_max_export_smx_size = 96;
+		} else {
+			rdev->config.cayman.max_simds_per_se = 2;
+			rdev->config.cayman.max_backends_per_se = 1;
+			rdev->config.cayman.max_hw_contexts = 4;
+			rdev->config.cayman.sx_max_export_size = 128;
+			rdev->config.cayman.sx_max_export_pos_size = 32;
+			rdev->config.cayman.sx_max_export_smx_size = 96;
+		}
+		rdev->config.cayman.max_texture_channel_caches = 2;
+		rdev->config.cayman.max_gprs = 256;
+		rdev->config.cayman.max_threads = 256;
+		rdev->config.cayman.max_gs_threads = 32;
+		rdev->config.cayman.max_stack_entries = 512;
+		rdev->config.cayman.sx_num_of_sets = 8;
+		rdev->config.cayman.sq_num_cf_insts = 2;
+
+		rdev->config.cayman.sc_prim_fifo_size = 0x40;
+		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.cayman.mem_row_size_in_kb > 4)
+		rdev->config.cayman.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.cayman.shader_engine_tile_size = 32;
+	rdev->config.cayman.num_gpus = 1;
+	rdev->config.cayman.multi_gpu_tile_size = 64;
+
+	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+	rdev->config.cayman.num_tile_pipes = (1 << tmp);
+	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+	rdev->config.cayman.num_shader_engines = tmp + 1;
+	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+	rdev->config.cayman.num_gpus = tmp + 1;
+	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.cayman.tile_config = 0;
+	switch (rdev->config.cayman.num_tile_pipes) {
+	case 1:
+	default:
+		rdev->config.cayman.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.cayman.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.cayman.tile_config |= (2 << 0);
+		break;
+	case 8:
+		rdev->config.cayman.tile_config |= (3 << 0);
+		break;
+	}
+
+	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->config.cayman.tile_config |= 1 << 4;
+	else {
+		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+		case 0: /* four banks */
+			rdev->config.cayman.tile_config |= 0 << 4;
+			break;
+		case 1: /* eight banks */
+			rdev->config.cayman.tile_config |= 1 << 4;
+			break;
+		case 2: /* sixteen banks */
+		default:
+			rdev->config.cayman.tile_config |= 2 << 4;
+			break;
+		}
+	}
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.cayman.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	tmp = 0;
+	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+		u32 rb_disable_bitmap;
+
+		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+		tmp <<= 4;
+		tmp |= rb_disable_bitmap;
+	}
+	/* enabled rb are just the one not disabled :) */
+	disabled_rb_mask = tmp;
+	tmp = 0;
+	for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+		tmp |= (1 << i);
+	/* if all the backends are disabled, fix it up here */
+	if ((disabled_rb_mask & tmp) == tmp) {
+		for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
+			disabled_rb_mask &= ~(1 << i);
+	}
+
+	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	if (ASIC_IS_DCE6(rdev))
+		WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+
+	if ((rdev->config.cayman.max_backends_per_se == 1) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		if ((disabled_rb_mask & 3) == 1) {
+			/* RB0 disabled, RB1 enabled */
+			tmp = 0x11111111;
+		} else {
+			/* RB1 disabled, RB0 enabled */
+			tmp = 0x00000000;
+		}
+	} else {
+		tmp = gb_addr_config & NUM_PIPES_MASK;
+		tmp = r6xx_remap_render_backend(rdev, tmp,
+						rdev->config.cayman.max_backends_per_se *
+						rdev->config.cayman.max_shader_engines,
+						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+	}
+	WREG32(GB_BACKEND_MAP, tmp);
+
+	cgts_tcc_disable = 0xffff0000;
+	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+		cgts_tcc_disable &= ~(1 << (16 + i));
+	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+	/* reprogram the shader complex */
+	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+	for (i = 0; i < 16; i++)
+		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+	/* need to be explicitly zero-ed */
+	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+	WREG32(SQ_LSTMP_RING_BASE, 0);
+	WREG32(SQ_HSTMP_RING_BASE, 0);
+	WREG32(SQ_ESTMP_RING_BASE, 0);
+	WREG32(SQ_GSTMP_RING_BASE, 0);
+	WREG32(SQ_VSTMP_RING_BASE, 0);
+	WREG32(SQ_PSTMP_RING_BASE, 0);
+
+	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
+
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
+				  FETCH_FIFO_HIWATER(0x4) |
+				  DONE_FIFO_HIWATER(0xe0) |
+				  ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+	WREG32(SQ_CONFIG, (VC_ENABLE |
+			   EXPORT_SRC_C |
+			   GFX_PRIO(0) |
+			   CS1_PRIO(0) |
+			   CS2_PRIO(1)));
+	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERF_CTR0_SEL_0, 0);
+	WREG32(CB_PERF_CTR0_SEL_1, 0);
+	WREG32(CB_PERF_CTR1_SEL_0, 0);
+	WREG32(CB_PERF_CTR1_SEL_1, 0);
+	WREG32(CB_PERF_CTR2_SEL_0, 0);
+	WREG32(CB_PERF_CTR2_SEL_1, 0);
+	WREG32(CB_PERF_CTR3_SEL_0, 0);
+	WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int i, r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-7 */
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 8; i++) {
+		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
+		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			rdev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-7 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	cayman_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+	cayman_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+			      int ring, u32 cp_int_cntl)
+{
+	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
+/*
+ * CP.
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	/* set to DX10/11 mode */
+	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(ring, 1);
+
+	if (ring->rptr_save_reg) {
+		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg - 
+					  PACKET3_SET_CONFIG_REG_START) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw | 
+			  (ib->vm ? (ib->vm->id << 24) : 0));
+
+	/* flush read cache over gart for this vmid */
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
+}
+
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+			       struct radeon_ring *ring,
+			       struct radeon_semaphore *semaphore,
+			       bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+	radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
+
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	}
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	cayman_cp_enable(rdev, false);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cayman_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < cayman_default_size; i++)
+		radeon_ring_write(ring, cayman_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	/* SQ_VTX_BASE_VTX_LOC */
+	radeon_ring_write(ring, 0xc0026f00);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+	radeon_ring_write(ring, 0x00000000);
+
+	/* Clear consts */
+	radeon_ring_write(ring, 0xc0036f00);
+	radeon_ring_write(ring, 0x00000bc4);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+	radeon_ring_write(ring, 0xffffffff);
+
+	radeon_ring_write(ring, 0xc0026900);
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /*  */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	/* XXX init other rings */
+
+	return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	cayman_cp_enable(rdev, false);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int cayman_cp_resume(struct radeon_device *rdev)
+{
+	static const int ridx[] = {
+		RADEON_RING_TYPE_GFX_INDEX,
+		CAYMAN_RING_TYPE_CP1_INDEX,
+		CAYMAN_RING_TYPE_CP2_INDEX
+	};
+	static const unsigned cp_rb_cntl[] = {
+		CP_RB0_CNTL,
+		CP_RB1_CNTL,
+		CP_RB2_CNTL,
+	};
+	static const unsigned cp_rb_rptr_addr[] = {
+		CP_RB0_RPTR_ADDR,
+		CP_RB1_RPTR_ADDR,
+		CP_RB2_RPTR_ADDR
+	};
+	static const unsigned cp_rb_rptr_addr_hi[] = {
+		CP_RB0_RPTR_ADDR_HI,
+		CP_RB1_RPTR_ADDR_HI,
+		CP_RB2_RPTR_ADDR_HI
+	};
+	static const unsigned cp_rb_base[] = {
+		CP_RB0_BASE,
+		CP_RB1_BASE,
+		CP_RB2_BASE
+	};
+	struct radeon_ring *ring;
+	int i, r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_SH |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, (1 << 27));
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+	WREG32(SCRATCH_UMSK, 0xff);
+
+	for (i = 0; i < 3; ++i) {
+		uint32_t rb_cntl;
+		uint64_t addr;
+
+		/* Set ring buffer size */
+		ring = &rdev->ring[ridx[i]];
+		rb_cntl = drm_order(ring->ring_size / 8);
+		rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= BUF_SWAP_32BIT;
+#endif
+		WREG32(cp_rb_cntl[i], rb_cntl);
+
+		/* set the wb address whether it's enabled or not */
+		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+	}
+
+	/* set the rb base addr, this causes an internal reset of ALL rings */
+	for (i = 0; i < 3; ++i) {
+		ring = &rdev->ring[ridx[i]];
+		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+	}
+
+	for (i = 0; i < 3; ++i) {
+		/* Initialize the ring buffer's read and write pointers */
+		ring = &rdev->ring[ridx[i]];
+		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
+
+		ring->rptr = ring->wptr = 0;
+		WREG32(ring->rptr_reg, ring->rptr);
+		WREG32(ring->wptr_reg, ring->wptr);
+
+		mdelay(1);
+		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+	}
+
+	/* start the rings */
+	cayman_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	/* this only test cp0 */
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+
+	return 0;
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl;
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	/* dma0 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+	/* dma1 */
+	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	u32 reg_offset, wb_offset;
+	int i, r;
+
+	/* Reset dma */
+	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	for (i = 0; i < 2; i++) {
+		if (i == 0) {
+			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+			reg_offset = DMA0_REGISTER_OFFSET;
+			wb_offset = R600_WB_DMA_RPTR_OFFSET;
+		} else {
+			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+			reg_offset = DMA1_REGISTER_OFFSET;
+			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+		}
+
+		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+		/* Set ring buffer size in dwords */
+		rb_bufsz = drm_order(ring->ring_size / 4);
+		rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+		/* Initialize the ring buffer's read and write pointers */
+		WREG32(DMA_RB_RPTR + reg_offset, 0);
+		WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+		/* set the wb address whether it's enabled or not */
+		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+		if (rdev->wb.enabled)
+			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+		/* enable DMA IBs */
+		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+		ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+		dma_cntl = RREG32(DMA_CNTL + reg_offset);
+		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+		WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+		ring->wptr = 0;
+		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+		ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+		ring->ready = true;
+
+		r = radeon_ring_test(rdev, ring->idx, ring);
+		if (r) {
+			ring->ready = false;
+			return r;
+		}
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+	cayman_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   SH_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   GDS_BUSY | SPI_BUSY |
+		   IA_BUSY | IA_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG 0 */
+	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* DMA_STATUS_REG 1 */
+	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	if (tmp & DMA1_BUSY)
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14F8));
+	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14D8));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(0x14FC));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(0x14DC));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* dma0 */
+		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	}
+
+	if (reset_mask & RADEON_RESET_DMA1) {
+		/* dma1 */
+		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		grbm_soft_reset = SOFT_RESET_CB |
+			SOFT_RESET_DB |
+			SOFT_RESET_GDS |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SH |
+			SOFT_RESET_SX |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VGT |
+			SOFT_RESET_IA;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DMA1)
+		srbm_soft_reset |= SOFT_RESET_DMA1;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= SOFT_RESET_MC;
+	}
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask;
+
+	reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	cayman_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+	u32 mask;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		mask = RADEON_RESET_DMA;
+	else
+		mask = RADEON_RESET_DMA1;
+
+	if (!(reset_mask & mask)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* enable pcie gen2 link */
+	evergreen_pcie_gen2_enable(rdev);
+
+	evergreen_mc_program(rdev);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+
+		r = ni_mc_load_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load MC firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	r = cayman_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	cayman_gpu_init(rdev);
+
+	r = evergreen_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate rlc buffers */
+	if (rdev->flags & RADEON_IS_IGP) {
+		r = si_rlc_init(rdev);
+		if (r) {
+			DRM_ERROR("Failed to init rlc BOs!\n");
+			return r;
+		}
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = rv770_uvd_resume(rdev);
+	if (!r) {
+		r = radeon_fence_driver_start_ring(rdev,
+						   R600_RING_TYPE_UVD_INDEX);
+		if (r)
+			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+	}
+	if (r)
+		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	evergreen_irq_set(rdev);
+
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     CP_RB0_RPTR, CP_RB0_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = cayman_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = cayman_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	if (ring->ring_size) {
+		r = radeon_ring_init(rdev, ring, ring->ring_size,
+				     R600_WB_UVD_RPTR_OFFSET,
+				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+				     0, 0xfffff, RADEON_CP_PACKET2);
+		if (!r)
+			r = r600_uvd_init(rdev);
+		if (r)
+			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	ni_init_golden_registers(rdev);
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		DRM_ERROR("cayman startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+	return r;
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	cayman_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_suspend(rdev);
+	evergreen_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	cayman_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	ni_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize memory controller */
+	r = evergreen_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	r = radeon_uvd_init(rdev);
+	if (!r) {
+		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+		ring->ring_obj = NULL;
+		r600_ring_init(rdev, ring, 4096);
+	}
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = cayman_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		cayman_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		if (rdev->flags & RADEON_IS_IGP)
+			si_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		cayman_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 *
+	 * We can skip this check for TN, because there is no MC
+	 * ucode.
+	 */
+	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+	r600_blit_fini(rdev);
+	cayman_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	if (rdev->flags & RADEON_IS_IGP)
+		si_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_fini(rdev);
+	cayman_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 8;
+	/* base offset of vram pages */
+	if (rdev->flags & RADEON_IS_IGP) {
+		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
+		tmp <<= 22;
+		rdev->vm_manager.vram_base_offset = tmp;
+	} else
+		rdev->vm_manager.vram_base_offset = 0;
+	return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+#define R600_ENTRY_VALID   (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
+{
+	uint32_t r600_flags = 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+	if (flags & RADEON_VM_PAGE_SYSTEM) {
+		r600_flags |= R600_PTE_SYSTEM;
+		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+	}
+	return r600_flags;
+}
+
+/**
+ * cayman_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (cayman/TN).
+ */
+void cayman_vm_set_page(struct radeon_device *rdev,
+			struct radeon_ib *ib,
+			uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags)
+{
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
+
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 1 + count * 2;
+			if (ndw > 0x3FFF)
+				ndw = 0x3FFF;
+
+			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
+			ib->ptr[ib->length_dw++] = pe;
+			ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				ib->ptr[ib->length_dw++] = value;
+				ib->ptr[ib->length_dw++] = upper_32_bits(value);
+			}
+		}
+	} else {
+		if ((flags & RADEON_VM_PAGE_SYSTEM) ||
+		    (count == 1)) {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				/* for non-physically contiguous pages (system) */
+				ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+				ib->ptr[ib->length_dw++] = pe;
+				ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+				for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+					if (flags & RADEON_VM_PAGE_SYSTEM) {
+						value = radeon_vm_map_gart(rdev, addr);
+						value &= 0xFFFFFFFFFFFFF000ULL;
+					} else if (flags & RADEON_VM_PAGE_VALID) {
+						value = addr;
+					} else {
+						value = 0;
+					}
+					addr += incr;
+					value |= r600_flags;
+					ib->ptr[ib->length_dw++] = value;
+					ib->ptr[ib->length_dw++] = upper_32_bits(value);
+				}
+			}
+			while (ib->length_dw & 0x7)
+				ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+		} else {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				if (flags & RADEON_VM_PAGE_VALID)
+					value = addr;
+				else
+					value = 0;
+				/* for physically contiguous pages (vram) */
+				ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+				ib->ptr[ib->length_dw++] = pe; /* dst addr */
+				ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+				ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+				ib->ptr[ib->length_dw++] = 0;
+				ib->ptr[ib->length_dw++] = value; /* value */
+				ib->ptr[ib->length_dw++] = upper_32_bits(value);
+				ib->ptr[ib->length_dw++] = incr; /* increment size */
+				ib->ptr[ib->length_dw++] = 0;
+				pe += ndw * 4;
+				addr += (ndw / 2) * incr;
+				count -= ndw / 2;
+			}
+		}
+		while (ib->length_dw & 0x7)
+			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+	}
+}
+
+/**
+ * cayman_vm_flush - vm flush using the CP
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using the CP (cayman-si).
+ */
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
+	radeon_ring_write(ring, 1 << vm->id);
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/ni_reg.h b/linux-imx/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 0000000..5db7b7d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL                         0x6840
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x68b4
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x68c4
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL                           0x68d4
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x68f0
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x6960
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x6964
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x6a80
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/nid.h b/linux-imx/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 0000000..e49f7b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,704 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define CAYMAN_MAX_SH_GPRS           256
+#define CAYMAN_MAX_TEMP_GPRS         16
+#define CAYMAN_MAX_SH_THREADS        256
+#define CAYMAN_MAX_SH_STACK_ENTRIES  4096
+#define CAYMAN_MAX_FRC_EOV_CNT       16384
+#define CAYMAN_MAX_BACKENDS          8
+#define CAYMAN_MAX_BACKENDS_MASK     0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS             16
+#define CAYMAN_MAX_SIMDS_MASK        0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES             8
+#define CAYMAN_MAX_PIPES_MASK        0xFF
+#define CAYMAN_MAX_LDS_NUM           0xFFFF
+#define CAYMAN_MAX_TCC               16
+#define CAYMAN_MAX_TCC_MASK          0xFF
+
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN       0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN        0x12010001
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+/* DCE6 only */
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	SRBM_GFX_CNTL				        0x0E44
+#define		RINGID(x)					(((x) & 0x3) << 0)
+#define		VMID(x)						(((x) & 0x7) << 0)
+#define	SRBM_STATUS				        0x0E50
+#define		RLC_RQ_PENDING 				(1 << 3)
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		RLC_BUSY 				(1 << 15)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_CG				(1 << 2)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_RLC				(1 << 13)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+#define	FUS_MC_VM_FB_OFFSET				0x2068
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+#define MC_SEQ_MISC0           				0x2a00
+#define		MC_SEQ_MISC0_GDDR5_SHIFT      		28
+#define		MC_SEQ_MISC0_GDDR5_MASK      		0xf0000000
+#define		MC_SEQ_MISC0_GDDR5_VALUE      		5
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0x3F8C
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+
+#define RLC_GFX_INDEX           			0x3FC4
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		SX_CLEAN					(1 << 11)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY_NO_DMA					(1 << 16)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SH_BUSY 					(1 << 21)
+#define		SPI_BUSY					(1 << 22)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_SX_CLEAN					(1 << 0)
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SH_BUSY					(1 << 28)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SH					(1 << 9)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+#define	CP_SEM_WAIT_TIMER				0x85BC
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+#define	CP_COHER_CNTL2					0x85E8
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1 << 28)
+#define		CP_PFP_HALT					(1 << 26)
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT				8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0xFFFF0000
+#define		INACTIVE_SIMDS_SHIFT				16
+
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define VGT_TF_RING_SIZE				0x8988
+#define VGT_OFFCHIP_LDS_BASE				0x89b4
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		GFX_PRIO(x)					((x) << 2)
+#define		CS1_PRIO(x)					((x) << 4)
+#define		CS2_PRIO(x)					((x) << 6)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define SQ_ESGS_RING_SIZE				0x8c44
+#define SQ_GSVS_RING_SIZE				0x8c4c
+#define SQ_ESTMP_RING_BASE				0x8c50
+#define SQ_ESTMP_RING_SIZE				0x8c54
+#define SQ_GSTMP_RING_BASE				0x8c58
+#define SQ_GSTMP_RING_SIZE				0x8c5c
+#define SQ_VSTMP_RING_BASE				0x8c60
+#define SQ_VSTMP_RING_SIZE				0x8c64
+#define SQ_PSTMP_RING_BASE				0x8c68
+#define SQ_PSTMP_RING_SIZE				0x8c6c
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define SQ_LSTMP_RING_BASE				0x8e10
+#define SQ_LSTMP_RING_SIZE				0x8e14
+#define SQ_HSTMP_RING_BASE				0x8e18
+#define SQ_HSTMP_RING_SIZE				0x8e1c
+#define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
+#define		DYN_GPR_ENABLE					(1 << 8)
+#define SQ_CONST_MEM_BASE				0x8df8
+
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define		CRC_SIMD_ID_WADDR_DISABLE			(1 << 8)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+#define	CGTS_SM_CTRL_REG				0x9150
+#define		OVERRIDE				(1 << 21)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		BANK_INTERLEAVE_SIZE(x)			((x) << 8)
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+#define		NUM_LOWER_PIPES(x)			((x) << 30)
+#define		NUM_LOWER_PIPES_MASK			0x40000000
+#define		NUM_LOWER_PIPES_SHIFT			30
+#define GB_BACKEND_MAP  				0x98FC
+
+#define CB_PERF_CTR0_SEL_0				0x9A20
+#define CB_PERF_CTR0_SEL_1				0x9A24
+#define CB_PERF_CTR1_SEL_0				0x9A28
+#define CB_PERF_CTR1_SEL_1				0x9A2C
+#define CB_PERF_CTR2_SEL_0				0x9A30
+#define CB_PERF_CTR2_SEL_1				0x9A34
+#define CB_PERF_CTR3_SEL_0				0x9A38
+#define CB_PERF_CTR3_SEL_1				0x9A3C
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		NUMBER_OF_SETS(x)				((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define CP_INT_CNTL                                     0xC124
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_DEBUG					0xC1FC
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW				0xEF00
+#define UVD_SEMA_ADDR_HIGH				0xEF04
+#define UVD_SEMA_CMD					0xEF08
+#define UVD_UDEC_ADDR_CONFIG				0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_RBC_RB_RPTR					0xF690
+#define UVD_RBC_RB_WPTR					0xF694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DEALLOC_STATE				0x14
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_OFFSET			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
+#              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
+#              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
+#              define PACKET3_ENGINE_ME            (1 << 31)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ALU_PS_CONST_BUFFER_COPY		0x4C
+#define	PACKET3_ALU_VS_CONST_BUFFER_COPY		0x4D
+#define	PACKET3_ALU_PS_CONST_UPDATE		        0x4E
+#define	PACKET3_ALU_VS_CONST_UPDATE		        0x4F
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+/* alu const buffers only; no reg file */
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_START			0x0003a500
+#define		PACKET3_SET_BOOL_CONST_END			0x0003a518
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_START			0x0003a200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003a500
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_START			0x00030000
+#define		PACKET3_SET_RESOURCE_END			0x00038000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_START			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003c600
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_START			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003ff0c
+#define	PACKET3_SET_RESOURCE_OFFSET			0x70
+#define	PACKET3_SET_ALU_CONST_VS			0x71
+#define	PACKET3_SET_ALU_CONST_DI			0x72
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_APPEND_CNT			        0x75
+#define	PACKET3_ME_WRITE				0x7A
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG  				  0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r100.c b/linux-imx/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 0000000..46470dd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,4113 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r100d.h"
+#include "rs100d.h"
+#include "rv200d.h"
+#include "rv250d.h"
+#include "atom.h"
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#include "r100_reg_safe.h"
+#include "rn50_reg_safe.h"
+
+/* Firmware Names */
+#define FIRMWARE_R100		"radeon/R100_cp.bin"
+#define FIRMWARE_R200		"radeon/R200_cp.bin"
+#define FIRMWARE_R300		"radeon/R300_cp.bin"
+#define FIRMWARE_R420		"radeon/R420_cp.bin"
+#define FIRMWARE_RS690		"radeon/RS690_cp.bin"
+#define FIRMWARE_RS600		"radeon/RS600_cp.bin"
+#define FIRMWARE_R520		"radeon/R520_cp.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+
+#include "r100_track.h"
+
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0) {
+		if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	} else {
+		if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
+			return true;
+		else
+			return false;
+	}
+}
+
+static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 vline1, vline2;
+
+	if (crtc == 0) {
+		vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	} else {
+		vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+		vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+	}
+	if (vline1 != vline2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
+void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (crtc == 0) {
+		if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
+			return;
+	} else {
+		if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
+			return;
+	}
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!r100_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!r100_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
+ */
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	int i;
+
+	/* Lock the graphics update lock */
+	/* update the scanout addresses */
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
+void r100_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	switch (rdev->pm.dynpm_planned_action) {
+	case DYNPM_ACTION_MINIMUM:
+		rdev->pm.requested_power_state_index = 0;
+		rdev->pm.dynpm_can_downclock = false;
+		break;
+	case DYNPM_ACTION_DOWNCLOCK:
+		if (rdev->pm.current_power_state_index == 0) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_downclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = 0; i < rdev->pm.num_power_states; i++) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i >= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index - 1;
+		}
+		/* don't use the power state if crtcs are active and no display flag is set */
+		if ((rdev->pm.active_crtc_count > 0) &&
+		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
+		     RADEON_PM_MODE_NO_DISPLAY)) {
+			rdev->pm.requested_power_state_index++;
+		}
+		break;
+	case DYNPM_ACTION_UPCLOCK:
+		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+			rdev->pm.dynpm_can_upclock = false;
+		} else {
+			if (rdev->pm.active_crtc_count > 1) {
+				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+						continue;
+					else if (i <= rdev->pm.current_power_state_index) {
+						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+						break;
+					} else {
+						rdev->pm.requested_power_state_index = i;
+						break;
+					}
+				}
+			} else
+				rdev->pm.requested_power_state_index =
+					rdev->pm.current_power_state_index + 1;
+		}
+		break;
+	case DYNPM_ACTION_DEFAULT:
+		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.dynpm_can_upclock = false;
+		break;
+	case DYNPM_ACTION_NONE:
+	default:
+		DRM_ERROR("Requested mode for not defined action\n");
+		return;
+	}
+	/* only one clock mode per power state */
+	rdev->pm.requested_clock_mode_index = 0;
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
+void r100_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
+void r100_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	}
+
+	sclk_cntl = RREG32_PLL(SCLK_CNTL);
+	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
+	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
+	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
+	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
+		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
+		else
+			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
+		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
+			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
+	} else
+		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
+
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
+			switch (voltage->delay) {
+			case 33:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
+				break;
+			case 66:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
+				break;
+			case 99:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
+				break;
+			case 132:
+				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
+				break;
+			}
+		} else
+			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
+
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		sclk_cntl &= ~FORCE_HDP;
+	else
+		sclk_cntl |= FORCE_HDP;
+
+	WREG32_PLL(SCLK_CNTL, sclk_cntl);
+	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
+	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
+void r100_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
+void r100_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			if (radeon_crtc->crtc_id) {
+				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
+				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+			} else {
+				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
+				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+			}
+		}
+	}
+}
+
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
+bool r100_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r100_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(RADEON_FP_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP_DETECT_INT_POL;
+		WREG32(RADEON_FP_GEN_CNTL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(RADEON_FP2_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP2_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP2_DETECT_INT_POL;
+		WREG32(RADEON_FP2_GEN_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
+void r100_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * PCI GART
+ */
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* TODO: can we do somethings here ? */
+	/* It seems hw only cache one entry so we should discard this
+	 * entry otherwise if first GPU GART read hit this entry it
+	 * could end up in wrong address. */
+}
+
+int r100_pci_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		WARN(1, "R100 PCI GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int r100_pci_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	radeon_gart_restore(rdev);
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	/* set address range for PCI address translate */
+	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
+	/* set PCI GART page-table base address */
+	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+	WREG32(RADEON_AIC_CNTL, tmp);
+	r100_pci_gart_tlb_flush(rdev);
+	DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void r100_pci_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	/* discard memory request outside of configured range */
+	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	WREG32(RADEON_AIC_LO_ADDR, 0);
+	WREG32(RADEON_AIC_HI_ADDR, 0);
+}
+
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	u32 *gtt = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	gtt[i] = cpu_to_le32(lower_32_bits(addr));
+	return 0;
+}
+
+void r100_pci_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r100_pci_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+int r100_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= RADEON_SW_INT_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		tmp |= RADEON_CRTC_VBLANK_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		tmp |= RADEON_CRTC2_VBLANK_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		tmp |= RADEON_FP_DETECT_MASK;
+	}
+	if (rdev->irq.hpd[1]) {
+		tmp |= RADEON_FP2_DETECT_MASK;
+	}
+	WREG32(RADEON_GEN_INT_CNTL, tmp);
+	return 0;
+}
+
+void r100_irq_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	tmp = RREG32(R_000044_GEN_INT_STATUS);
+	WREG32(R_000044_GEN_INT_STATUS, tmp);
+}
+
+static uint32_t r100_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+	uint32_t irq_mask = RADEON_SW_INT_TEST |
+		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+
+	if (irqs) {
+		WREG32(RADEON_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+int r100_irq_process(struct radeon_device *rdev)
+{
+	uint32_t status, msi_rearm;
+	bool queue_hotplug = false;
+
+	status = r100_irq_ack(rdev);
+	if (!status) {
+		return IRQ_NONE;
+	}
+	if (rdev->shutdown) {
+		return IRQ_NONE;
+	}
+	while (status) {
+		/* SW interrupt */
+		if (status & RADEON_SW_INT_TEST) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (status & RADEON_CRTC_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_flip(rdev, 0);
+		}
+		if (status & RADEON_CRTC2_VBLANK_STAT) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_flip(rdev, 1);
+		}
+		if (status & RADEON_FP_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (status & RADEON_FP2_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		status = r100_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		schedule_work(&rdev->hotplug_work);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS400:
+		case CHIP_RS480:
+			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+			WREG32(RADEON_AIC_CNTL, msi_rearm);
+			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(RADEON_CRTC_CRNT_FRAME);
+	else
+		return RREG32(RADEON_CRTC2_CRNT_FRAME);
+}
+
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* We have to make sure that caches are flushed before
+	 * CPU might read something from VRAM. */
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	/* Unused on older asics, since we don't have semaphores or multiple rings */
+	BUG();
+}
+
+int r100_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t cur_pages;
+	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
+	uint32_t pitch;
+	uint32_t stride_pixels;
+	unsigned ndw;
+	int num_loops;
+	int r = 0;
+
+	/* radeon limited to 16k stride */
+	stride_bytes &= 0x3fff;
+	/* radeon pitch is /64 */
+	pitch = stride_bytes / 64;
+	stride_pixels = stride_bytes / 4;
+	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
+
+	/* Ask for enough room for blit + flush + fence */
+	ndw = 64 + (10 * num_loops);
+	r = radeon_ring_lock(rdev, ring, ndw);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+		return -EINVAL;
+	}
+	while (num_gpu_pages > 0) {
+		cur_pages = num_gpu_pages;
+		if (cur_pages > 8191) {
+			cur_pages = 8191;
+		}
+		num_gpu_pages -= cur_pages;
+
+		/* pages are in Y direction - height
+		   page width in X direction - width */
+		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+		radeon_ring_write(ring,
+				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+				  RADEON_GMC_SRC_CLIPPING |
+				  RADEON_GMC_DST_CLIPPING |
+				  RADEON_GMC_BRUSH_NONE |
+				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
+				  RADEON_GMC_SRC_DATATYPE_COLOR |
+				  RADEON_ROP3_S |
+				  RADEON_DP_SRC_SOURCE_MEMORY |
+				  RADEON_GMC_CLR_CMP_CNTL_DIS |
+				  RADEON_GMC_WR_MSK_DIS);
+		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, num_gpu_pages);
+		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_HOST_IDLECLEAN |
+			  RADEON_WAIT_DMA_GUI_IDLE);
+	if (fence) {
+		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return r;
+}
+
+static int r100_cp_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(R_000E40_RBBM_STATUS);
+		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
+			return 0;
+		}
+		udelay(1);
+	}
+	return -1;
+}
+
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+
+/* Load the microcode for the CP */
+static int r100_cp_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *fw_name = NULL;
+	int err;
+
+	DRM_DEBUG_KMS("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		fw_name = FIRMWARE_R100;
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family == CHIP_RV250) ||
+		   (rdev->family == CHIP_RV280) ||
+		   (rdev->family == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		fw_name = FIRMWARE_R200;
+	} else if ((rdev->family == CHIP_R300) ||
+		   (rdev->family == CHIP_R350) ||
+		   (rdev->family == CHIP_RV350) ||
+		   (rdev->family == CHIP_RV380) ||
+		   (rdev->family == CHIP_RS400) ||
+		   (rdev->family == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		fw_name = FIRMWARE_R300;
+	} else if ((rdev->family == CHIP_R420) ||
+		   (rdev->family == CHIP_R423) ||
+		   (rdev->family == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		fw_name = FIRMWARE_R420;
+	} else if ((rdev->family == CHIP_RS690) ||
+		   (rdev->family == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
+		fw_name = FIRMWARE_RS690;
+	} else if (rdev->family == CHIP_RS600) {
+		DRM_INFO("Loading RS600 Microcode\n");
+		fw_name = FIRMWARE_RS600;
+	} else if ((rdev->family == CHIP_RV515) ||
+		   (rdev->family == CHIP_R520) ||
+		   (rdev->family == CHIP_RV530) ||
+		   (rdev->family == CHIP_R580) ||
+		   (rdev->family == CHIP_RV560) ||
+		   (rdev->family == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		fw_name = FIRMWARE_R520;
+	}
+
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	platform_device_unregister(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
+		       fw_name);
+	} else if (rdev->me_fw->size % 8) {
+		printk(KERN_ERR
+		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+	}
+	return err;
+}
+
+static void r100_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i, size;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->me_fw) {
+		size = rdev->me_fw->size / 4;
+		fw_data = (const __be32 *)&rdev->me_fw->data[0];
+		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
+		for (i = 0; i < size; i += 2) {
+			WREG32(RADEON_CP_ME_RAM_DATAH,
+			       be32_to_cpup(&fw_data[i]));
+			WREG32(RADEON_CP_ME_RAM_DATAL,
+			       be32_to_cpup(&fw_data[i + 1]));
+		}
+	}
+}
+
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	unsigned rb_bufsz;
+	unsigned rb_blksz;
+	unsigned max_fetch;
+	unsigned pre_write_timer;
+	unsigned pre_write_limit;
+	unsigned indirect2_start;
+	unsigned indirect1_start;
+	uint32_t tmp;
+	int r;
+
+	if (r100_debugfs_cp_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for CP !\n");
+	}
+	if (!rdev->me_fw) {
+		r = r100_cp_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	r100_cp_load_microcode(rdev);
+	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+			     0, 0x7fffff, RADEON_CP_PACKET2);
+	if (r) {
+		return r;
+	}
+	/* Each time the cp read 1024 bytes (16 dword/quadword) update
+	 * the rptr copy in system ram */
+	rb_blksz = 9;
+	/* cp will read 128bytes at a time (4 dwords) */
+	max_fetch = 1;
+	ring->align_mask = 16 - 1;
+	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+	pre_write_timer = 64;
+	/* Force CP_RB_WPTR write if written more than one time before the
+	 * delay expire
+	 */
+	pre_write_limit = 0;
+	/* Setup the cp cache like this (cache size is 96 dwords) :
+	 *	RING		0  to 15
+	 *	INDIRECT1	16 to 79
+	 *	INDIRECT2	80 to 95
+	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
+	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
+	 * Idea being that most of the gpu cmd will be through indirect1 buffer
+	 * so it gets the bigger cache.
+	 */
+	indirect2_start = 80;
+	indirect1_start = 16;
+	/* cp setup */
+	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
+	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
+	       REG_SET(RADEON_MAX_FETCH, max_fetch));
+#ifdef __BIG_ENDIAN
+	tmp |= RADEON_BUF_SWAP_32BIT;
+#endif
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
+
+	/* Set ring address */
+	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+	/* Force read & write ptr to 0 */
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(R_00070C_CP_RB_RPTR_ADDR,
+		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
+	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
+
+	if (rdev->wb.enabled)
+		WREG32(R_000770_SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RADEON_RB_NO_UPDATE;
+		WREG32(R_000770_SCRATCH_UMSK, 0);
+	}
+
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	udelay(10);
+	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+	/* Set cp mode to bus mastering & enable cp*/
+	WREG32(RADEON_CP_CSQ_MODE,
+	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
+	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
+	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_set_master(rdev->pdev);
+
+	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+		return r;
+	}
+	ring->ready = true;
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	if (!ring->rptr_save_reg /* not resuming from suspend */
+	    && radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+	return 0;
+}
+
+void r100_cp_fini(struct radeon_device *rdev)
+{
+	if (r100_cp_wait_for_idle(rdev)) {
+		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
+	}
+	/* Disable ring */
+	r100_cp_disable(rdev);
+	radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
+	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	DRM_INFO("radeon: cp finalized\n");
+}
+
+void r100_cp_disable(struct radeon_device *rdev)
+{
+	/* Disable ring */
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(RADEON_CP_CSQ_MODE, 0);
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	WREG32(R_000770_SCRATCH_UMSK, 0);
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+/*
+ * CS functions
+ */
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg)
+{
+	int r;
+	u32 tile_flags = 0;
+	u32 tmp;
+	struct radeon_cs_reloc *reloc;
+	u32 value;
+
+	r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+	if (r) {
+		DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+			  idx, reg);
+		radeon_cs_dump_packet(p, pkt);
+		return r;
+	}
+
+	value = radeon_get_ib_value(p, idx);
+	tmp = value & 0x003fffff;
+	tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+			tile_flags |= RADEON_DST_TILE_MACRO;
+		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+			if (reg == RADEON_SRC_PITCH_OFFSET) {
+				DRM_ERROR("Cannot src blit from microtiled surface\n");
+				radeon_cs_dump_packet(p, pkt);
+				return -EINVAL;
+			}
+			tile_flags |= RADEON_DST_TILE_MICRO;
+		}
+
+		tmp |= tile_flags;
+		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+	} else
+		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+	return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx)
+{
+	unsigned c, i;
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	int r = 0;
+	volatile uint32_t *ib;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	c = radeon_get_ib_value(p, idx++) & 0x1F;
+	if (c > 16) {
+	    DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+		      pkt->opcode);
+	    radeon_cs_dump_packet(p, pkt);
+	    return -EINVAL;
+	}
+	track->num_arrays = c;
+	for (i = 0; i < (c - 1); i+=2, idx+=3) {
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize &= 0x7F;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+				  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+		track->arrays[i + 1].robj = reloc->robj;
+		track->arrays[i + 1].esize = idx_value >> 24;
+		track->arrays[i + 1].esize &= 0x7F;
+	}
+	if (c & 1) {
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n",
+					  pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		idx_value = radeon_get_ib_value(p, idx);
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+		track->arrays[i + 0].robj = reloc->robj;
+		track->arrays[i + 0].esize = idx_value >> 8;
+		track->arrays[i + 0].esize &= 0x7F;
+	}
+	return r;
+}
+
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check)
+{
+	unsigned reg;
+	unsigned i, j, m;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	/* Check that register fall into register range
+	 * determined by the number of entry (n) in the
+	 * safe register bitmap.
+	 */
+	if (pkt->one_reg_wr) {
+		if ((reg >> 7) > n) {
+			return -EINVAL;
+		}
+	} else {
+		if (((reg + (pkt->count << 2)) >> 7) > n) {
+			return -EINVAL;
+		}
+	}
+	for (i = 0; i <= pkt->count; i++, idx++) {
+		j = (reg >> 7);
+		m = 1 << ((reg >> 2) & 31);
+		if (auth[j] & m) {
+			r = check(p, pkt, idx, reg);
+			if (r) {
+				return r;
+			}
+		}
+		if (pkt->one_reg_wr) {
+			if (!(auth[j] & m)) {
+				break;
+			}
+		} else {
+			reg += 4;
+		}
+	}
+	return 0;
+}
+
+/**
+ * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET0 - WAIT_UNTIL +_value
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT UNTIL packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
+ */
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, waitreloc;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the wait until */
+	r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
+	if (r)
+		return r;
+
+	/* check its a wait until and only 1 count */
+	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
+	    waitreloc.count != 0) {
+		DRM_ERROR("vline wait had illegal wait until segment\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
+		DRM_ERROR("vline wait had illegal wait until\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += waitreloc.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 5);
+	reg = R100_CP_PACKET0_GET_REG(header);
+	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* if the CRTC isn't enabled - we need to nop out the wait until */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+	} else if (crtc_id == 1) {
+		switch (reg) {
+		case AVIVO_D1MODE_VLINE_START_END:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+			break;
+		case RADEON_CRTC_GUI_TRIG_VLINE:
+			header &= ~R300_CP_PACKET0_REG_MASK;
+			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
+			break;
+		default:
+			DRM_ERROR("unknown crtc reloc\n");
+			return -EINVAL;
+		}
+		ib[h_idx] = header;
+		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
+	}
+
+	return 0;
+}
+
+static int r100_get_vtx_size(uint32_t vtx_fmt)
+{
+	int vtx_size;
+	vtx_size = 2;
+	/* ordered according to bits in spec */
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt & (0x7 << 15))
+		vtx_size += (vtx_fmt >> 15) & 0x7;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
+		vtx_size += 3;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
+		vtx_size += 2;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
+		vtx_size++;
+	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
+		vtx_size++;
+	return vtx_size;
+}
+
+static int r100_packet0_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt,
+			      unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i, face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_TXOFFSET_0:
+	case RADEON_PP_TXOFFSET_1:
+	case RADEON_PP_TXOFFSET_2:
+		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T0_0:
+	case RADEON_PP_CUBIC_OFFSET_T0_1:
+	case RADEON_PP_CUBIC_OFFSET_T0_2:
+	case RADEON_PP_CUBIC_OFFSET_T0_3:
+	case RADEON_PP_CUBIC_OFFSET_T0_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[0].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T1_0:
+	case RADEON_PP_CUBIC_OFFSET_T1_1:
+	case RADEON_PP_CUBIC_OFFSET_T1_2:
+	case RADEON_PP_CUBIC_OFFSET_T1_3:
+	case RADEON_PP_CUBIC_OFFSET_T1_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[1].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_OFFSET_T2_0:
+	case RADEON_PP_CUBIC_OFFSET_T2_1:
+	case RADEON_PP_CUBIC_OFFSET_T2_2:
+	case RADEON_PP_CUBIC_OFFSET_T2_3:
+	case RADEON_PP_CUBIC_OFFSET_T2_4:
+		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[2].cube_info[i].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case RADEON_SE_VTX_FMT:
+		track->vtx_size = r100_get_vtx_size(idx_value);
+		break;
+	case RADEON_PP_TEX_SIZE_0:
+	case RADEON_PP_TEX_SIZE_1:
+	case RADEON_PP_TEX_SIZE_2:
+		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TEX_PITCH_0:
+	case RADEON_PP_TEX_PITCH_1:
+	case RADEON_PP_TEX_PITCH_2:
+		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFILTER_0:
+	case RADEON_PP_TXFILTER_1:
+	case RADEON_PP_TXFILTER_2:
+		i = (reg - RADEON_PP_TXFILTER_0) / 24;
+		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
+						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_TXFORMAT_0:
+	case RADEON_PP_TXFORMAT_1:
+	case RADEON_PP_TXFORMAT_2:
+		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
+		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+			track->textures[i].tex_coord_type = 2;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case RADEON_TXFORMAT_I8:
+		case RADEON_TXFORMAT_RGB332:
+		case RADEON_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_AI88:
+		case RADEON_TXFORMAT_ARGB1555:
+		case RADEON_TXFORMAT_RGB565:
+		case RADEON_TXFORMAT_ARGB4444:
+		case RADEON_TXFORMAT_VYUY422:
+		case RADEON_TXFORMAT_YVYU422:
+		case RADEON_TXFORMAT_SHADOW16:
+		case RADEON_TXFORMAT_LDUDV655:
+		case RADEON_TXFORMAT_DUDV88:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_ARGB8888:
+		case RADEON_TXFORMAT_RGBA8888:
+		case RADEON_TXFORMAT_SHADOW32:
+		case RADEON_TXFORMAT_LDUDUV8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case RADEON_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case RADEON_TXFORMAT_DXT23:
+		case RADEON_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case RADEON_PP_CUBIC_FACES_0:
+	case RADEON_PP_CUBIC_FACES_1:
+	case RADEON_PP_CUBIC_FACES_2:
+		tmp = idx_value;
+		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj)
+{
+	unsigned idx;
+	u32 value;
+	idx = pkt->idx + 1;
+	value = radeon_get_ib_value(p, idx + 2);
+	if ((value + 1) > radeon_bo_size(robj)) {
+		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
+			  "(need %u have %lu) !\n",
+			  value + 1,
+			  radeon_bo_size(robj));
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r100_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	unsigned idx;
+	volatile uint32_t *ib;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch (pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	case 0x23:
+		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
+		track->num_arrays = 1;
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
+
+		track->arrays[0].robj = reloc->robj;
+		track->arrays[0].esize = track->vtx_size;
+
+		track->max_indx = radeon_get_ib_value(p, idx+1);
+
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+	case PACKET3_3D_DRAW_IMMD:
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_IMMD_2:
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using in-packet vertex data */
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing of vertex buffers setup elsewhere */
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r)
+			return r;
+		break;
+		/* triggers drawing using indices to vertex buffer */
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r100_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (!track)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			if (p->rdev->family >= CHIP_R200)
+				r = r100_cs_parse_packet0(p, &pkt,
+					p->rdev->config.r100.reg_safe_bm,
+					p->rdev->config.r100.reg_safe_bm_size,
+					&r200_packet0_check);
+			else
+				r = r100_cs_parse_packet0(p, &pkt,
+					p->rdev->config.r100.reg_safe_bm,
+					p->rdev->config.r100.reg_safe_bm_size,
+					&r100_packet0_check);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r100_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n",
+				  pkt.type);
+			return -EINVAL;
+		}
+		if (r)
+			return r;
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+	return 0;
+}
+
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+	DRM_ERROR("pitch                      %d\n", t->pitch);
+	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
+	DRM_ERROR("width                      %d\n", t->width);
+	DRM_ERROR("width_11                   %d\n", t->width_11);
+	DRM_ERROR("height                     %d\n", t->height);
+	DRM_ERROR("height_11                  %d\n", t->height_11);
+	DRM_ERROR("num levels                 %d\n", t->num_levels);
+	DRM_ERROR("depth                      %d\n", t->txdepth);
+	DRM_ERROR("bpp                        %d\n", t->cpp);
+	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
+	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
+	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+	DRM_ERROR("compress format            %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+	int block_width, block_height, block_bytes;
+	int wblocks, hblocks;
+	int min_wblocks;
+	int sz;
+
+	block_width = 4;
+	block_height = 4;
+
+	switch (compress_format) {
+	case R100_TRACK_COMP_DXT1:
+		block_bytes = 8;
+		min_wblocks = 4;
+		break;
+	default:
+	case R100_TRACK_COMP_DXT35:
+		block_bytes = 16;
+		min_wblocks = 2;
+		break;
+	}
+
+	hblocks = (h + block_height - 1) / block_height;
+	wblocks = (w + block_width - 1) / block_width;
+	if (wblocks < min_wblocks)
+		wblocks = min_wblocks;
+	sz = wblocks * hblocks * block_bytes;
+	return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+			      struct r100_cs_track *track, unsigned idx)
+{
+	unsigned face, w, h;
+	struct radeon_bo *cube_robj;
+	unsigned long size;
+	unsigned compress_format = track->textures[idx].compress_format;
+
+	for (face = 0; face < 5; face++) {
+		cube_robj = track->textures[idx].cube_info[face].robj;
+		w = track->textures[idx].cube_info[face].width;
+		h = track->textures[idx].cube_info[face].height;
+
+		if (compress_format) {
+			size = r100_track_compress_size(compress_format, w, h);
+		} else
+			size = w * h;
+		size *= track->textures[idx].cpp;
+
+		size += track->textures[idx].cube_info[face].offset;
+
+		if (size > radeon_bo_size(cube_robj)) {
+			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+				  size, radeon_bo_size(cube_robj));
+			r100_cs_track_texture_print(&track->textures[idx]);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+				       struct r100_cs_track *track)
+{
+	struct radeon_bo *robj;
+	unsigned long size;
+	unsigned u, i, w, h, d;
+	int ret;
+
+	for (u = 0; u < track->num_texture; u++) {
+		if (!track->textures[u].enabled)
+			continue;
+		if (track->textures[u].lookup_disable)
+			continue;
+		robj = track->textures[u].robj;
+		if (robj == NULL) {
+			DRM_ERROR("No texture bound to unit %u\n", u);
+			return -EINVAL;
+		}
+		size = 0;
+		for (i = 0; i <= track->textures[u].num_levels; i++) {
+			if (track->textures[u].use_pitch) {
+				if (rdev->family < CHIP_R300)
+					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+				else
+					w = track->textures[u].pitch / (1 << i);
+			} else {
+				w = track->textures[u].width;
+				if (rdev->family >= CHIP_RV515)
+					w |= track->textures[u].width_11;
+				w = w / (1 << i);
+				if (track->textures[u].roundup_w)
+					w = roundup_pow_of_two(w);
+			}
+			h = track->textures[u].height;
+			if (rdev->family >= CHIP_RV515)
+				h |= track->textures[u].height_11;
+			h = h / (1 << i);
+			if (track->textures[u].roundup_h)
+				h = roundup_pow_of_two(h);
+			if (track->textures[u].tex_coord_type == 1) {
+				d = (1 << track->textures[u].txdepth) / (1 << i);
+				if (!d)
+					d = 1;
+			} else {
+				d = 1;
+			}
+			if (track->textures[u].compress_format) {
+
+				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+				/* compressed textures are block based */
+			} else
+				size += w * h * d;
+		}
+		size *= track->textures[u].cpp;
+
+		switch (track->textures[u].tex_coord_type) {
+		case 0:
+		case 1:
+			break;
+		case 2:
+			if (track->separate_cube) {
+				ret = r100_cs_track_cube(rdev, track, u);
+				if (ret)
+					return ret;
+			} else
+				size *= 6;
+			break;
+		default:
+			DRM_ERROR("Invalid texture coordinate type %u for unit "
+				  "%u\n", track->textures[u].tex_coord_type, u);
+			return -EINVAL;
+		}
+		if (size > radeon_bo_size(robj)) {
+			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+				  "%lu\n", u, size, radeon_bo_size(robj));
+			r100_cs_track_texture_print(&track->textures[u]);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i;
+	unsigned long size;
+	unsigned prim_walk;
+	unsigned nverts;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+	    !track->blend_read_enable)
+		num_cb = 0;
+
+	for (i = 0; i < num_cb; i++) {
+		if (track->cb[i].robj == NULL) {
+			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+			return -EINVAL;
+		}
+		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+		size += track->cb[i].offset;
+		if (size > radeon_bo_size(track->cb[i].robj)) {
+			DRM_ERROR("[drm] Buffer too small for color buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->cb[i].robj));
+			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+				  i, track->cb[i].pitch, track->cb[i].cpp,
+				  track->cb[i].offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
+		if (track->zb.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for z buffer !\n");
+			return -EINVAL;
+		}
+		size = track->zb.pitch * track->zb.cpp * track->maxy;
+		size += track->zb.offset;
+		if (size > radeon_bo_size(track->zb.robj)) {
+			DRM_ERROR("[drm] Buffer too small for z buffer "
+				  "(need %lu have %lu) !\n", size,
+				  radeon_bo_size(track->zb.robj));
+			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+				  track->zb.pitch, track->zb.cpp,
+				  track->zb.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
+	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+	if (track->vap_vf_cntl & (1 << 14)) {
+		nverts = track->vap_alt_nverts;
+	} else {
+		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+	}
+	switch (prim_walk) {
+	case 1:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * track->max_indx * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				DRM_ERROR("Max indices %u\n", track->max_indx);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 2:
+		for (i = 0; i < track->num_arrays; i++) {
+			size = track->arrays[i].esize * (nverts - 1) * 4;
+			if (track->arrays[i].robj == NULL) {
+				DRM_ERROR("(PW %u) Vertex array %u no buffer "
+					  "bound\n", prim_walk, i);
+				return -EINVAL;
+			}
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
+				return -EINVAL;
+			}
+		}
+		break;
+	case 3:
+		size = track->vtx_size * nverts;
+		if (size != track->immd_dwords) {
+			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+				  track->immd_dwords, size);
+			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+				  nverts, track->vtx_size);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+			  prim_walk);
+		return -EINVAL;
+	}
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+	unsigned i, face;
+
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
+	if (rdev->family < CHIP_R300) {
+		track->num_cb = 1;
+		if (rdev->family <= CHIP_RS200)
+			track->num_texture = 3;
+		else
+			track->num_texture = 6;
+		track->maxy = 2048;
+		track->separate_cube = 1;
+	} else {
+		track->num_cb = 4;
+		track->num_texture = 16;
+		track->maxy = 4096;
+		track->separate_cube = 0;
+		track->aaresolve = false;
+		track->aa.robj = NULL;
+	}
+
+	for (i = 0; i < track->num_cb; i++) {
+		track->cb[i].robj = NULL;
+		track->cb[i].pitch = 8192;
+		track->cb[i].cpp = 16;
+		track->cb[i].offset = 0;
+	}
+	track->z_enabled = true;
+	track->zb.robj = NULL;
+	track->zb.pitch = 8192;
+	track->zb.cpp = 4;
+	track->zb.offset = 0;
+	track->vtx_size = 0x7F;
+	track->immd_dwords = 0xFFFFFFFFUL;
+	track->num_arrays = 11;
+	track->max_indx = 0x00FFFFFFUL;
+	for (i = 0; i < track->num_arrays; i++) {
+		track->arrays[i].robj = NULL;
+		track->arrays[i].esize = 0x7F;
+	}
+	for (i = 0; i < track->num_texture; i++) {
+		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+		track->textures[i].pitch = 16536;
+		track->textures[i].width = 16536;
+		track->textures[i].height = 16536;
+		track->textures[i].width_11 = 1 << 11;
+		track->textures[i].height_11 = 1 << 11;
+		track->textures[i].num_levels = 12;
+		if (rdev->family <= CHIP_RS200) {
+			track->textures[i].tex_coord_type = 0;
+			track->textures[i].txdepth = 0;
+		} else {
+			track->textures[i].txdepth = 16;
+			track->textures[i].tex_coord_type = 1;
+		}
+		track->textures[i].cpp = 64;
+		track->textures[i].robj = NULL;
+		/* CS IB emission code makes sure texture unit are disabled */
+		track->textures[i].enabled = false;
+		track->textures[i].lookup_disable = false;
+		track->textures[i].roundup_w = true;
+		track->textures[i].roundup_h = true;
+		if (track->separate_cube)
+			for (face = 0; face < 5; face++) {
+				track->textures[i].cube_info[face].robj = NULL;
+				track->textures[i].cube_info[face].width = 16536;
+				track->textures[i].cube_info[face].height = 16536;
+				track->textures[i].cube_info[face].offset = 0;
+			}
+	}
+}
+
+/*
+ * Global GPU functions
+ */
+static void r100_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
+	}
+
+	if (rdev->family == CHIP_RV100 ||
+	    rdev->family == CHIP_RS100 ||
+	    rdev->family == CHIP_RS200) {
+		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
+	}
+}
+
+static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
+		if (tmp >= n) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_gui_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
+		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
+		       " Bad things might happen.\n");
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(RADEON_RBBM_STATUS);
+		if (!(tmp & RADEON_RBBM_ACTIVE)) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+int r100_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 rbbm_status;
+
+	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	/* Enable bus mastering */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+}
+
+void r100_bm_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* disable bus mastering */
+	tmp = RREG32(R_000030_BUS_CNTL);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
+	mdelay(1);
+	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	mdelay(1);
+	pci_clear_master(rdev->pdev);
+	mdelay(1);
+}
+
+int r100_asic_reset(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
+					S_0000F0_SOFT_RESET_RE(1) |
+					S_0000F0_SOFT_RESET_PP(1) |
+					S_0000F0_SOFT_RESET_RB(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
+		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	bool force_dac2 = false;
+	u32 tmp;
+
+	/* set these so they don't interfere with anything */
+	WREG32(RADEON_OV0_SCALE_CNTL, 0);
+	WREG32(RADEON_SUBPIC_CNTL, 0);
+	WREG32(RADEON_VIPH_CONTROL, 0);
+	WREG32(RADEON_I2C_CNTL_1, 0);
+	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+	/* always set up dac2 on rn50 and some rv100 as lots
+	 * of servers seem to wire it up to a VGA port but
+	 * don't report it in the bios connector
+	 * table.
+	 */
+	switch (dev->pdev->device) {
+		/* RN50 */
+	case 0x515e:
+	case 0x5969:
+		force_dac2 = true;
+		break;
+		/* RV100*/
+	case 0x5159:
+	case 0x515a:
+		/* DELL triple head servers */
+		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+		    ((dev->pdev->subsystem_device == 0x016c) ||
+		     (dev->pdev->subsystem_device == 0x016d) ||
+		     (dev->pdev->subsystem_device == 0x016e) ||
+		     (dev->pdev->subsystem_device == 0x016f) ||
+		     (dev->pdev->subsystem_device == 0x0170) ||
+		     (dev->pdev->subsystem_device == 0x017d) ||
+		     (dev->pdev->subsystem_device == 0x017e) ||
+		     (dev->pdev->subsystem_device == 0x0183) ||
+		     (dev->pdev->subsystem_device == 0x018a) ||
+		     (dev->pdev->subsystem_device == 0x019a)))
+			force_dac2 = true;
+		break;
+	}
+
+	if (force_dac2) {
+		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+		/* For CRT on DAC2, don't turn it on if BIOS didn't
+		   enable it, even it's detected.
+		*/
+
+		/* force it to crtc0 */
+		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+		/* set up the TV DAC */
+		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+				 RADEON_TV_DAC_STD_MASK |
+				 RADEON_TV_DAC_RDACPD |
+				 RADEON_TV_DAC_GDACPD |
+				 RADEON_TV_DAC_BDACPD |
+				 RADEON_TV_DAC_BGADJ_MASK |
+				 RADEON_TV_DAC_DACADJ_MASK);
+		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+				RADEON_TV_DAC_NHOLD |
+				RADEON_TV_DAC_STD_PS2 |
+				(0x58 << 16));
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	/* switch PM block to ACPI mode */
+	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+	tmp &= ~RADEON_PM_MODE_SEL;
+	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
+}
+
+/*
+ * VRAM info
+ */
+static void r100_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_is_ddr = false;
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->mc.vram_is_ddr = true;
+	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
+		rdev->mc.vram_is_ddr = true;
+	if ((rdev->family == CHIP_RV100) ||
+	    (rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RV100_HALF_MODE) {
+			rdev->mc.vram_width = 32;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			rdev->mc.vram_width /= 4;
+			rdev->mc.vram_is_ddr = true;
+		}
+	} else if (rdev->family <= CHIP_RV280) {
+		tmp = RREG32(RADEON_MEM_CNTL);
+		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
+			rdev->mc.vram_width = 128;
+		} else {
+			rdev->mc.vram_width = 64;
+		}
+	} else {
+		/* newer IGPs */
+		rdev->mc.vram_width = 128;
+	}
+}
+
+static u32 r100_get_accessible_vram(struct radeon_device *rdev)
+{
+	u32 aper_size;
+	u8 byte;
+
+	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+
+	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
+	 * that is has the 2nd generation multifunction PCI interface
+	 */
+	if (rdev->family == CHIP_RV280 ||
+	    rdev->family >= CHIP_RV350) {
+		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
+		       ~RADEON_HDP_APER_CNTL);
+		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
+		return aper_size * 2;
+	}
+
+	/* Older cards have all sorts of funny issues to deal with. First
+	 * check if it's a multifunction card by reading the PCI config
+	 * header type... Limit those to one aperture size
+	 */
+	pci_read_config_byte(rdev->pdev, 0xe, &byte);
+	if (byte & 0x80) {
+		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
+		DRM_INFO("Limiting VRAM to one aperture\n");
+		return aper_size;
+	}
+
+	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
+	 * have set it up. We don't write this as it's broken on some ASICs but
+	 * we expect the BIOS to have done the right thing (might be too optimistic...)
+	 */
+	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
+		return aper_size * 2;
+	return aper_size;
+}
+
+void r100_vram_init_sizes(struct radeon_device *rdev)
+{
+	u64 config_aper_size;
+
+	/* work out accessible VRAM */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+	/* FIXME we don't use the second aperture yet when we could use it */
+	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+		rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
+	if (rdev->flags & RADEON_IS_IGP) {
+		uint32_t tom;
+		/* read NB_TOM to get the amount of ram stolen for the GPU */
+		tom = RREG32(RADEON_NB_TOM);
+		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
+		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	} else {
+		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+		/* Some production boards of m6 will report 0
+		 * if it's 8 MB
+		 */
+		if (rdev->mc.real_vram_size == 0) {
+			rdev->mc.real_vram_size = 8192 * 1024;
+			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+		}
+		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
+		 * Novell bug 204882 + along with lots of ubuntu ones
+		 */
+		if (rdev->mc.aper_size > config_aper_size)
+			config_aper_size = rdev->mc.aper_size;
+
+		if (config_aper_size > rdev->mc.real_vram_size)
+			rdev->mc.mc_vram_size = config_aper_size;
+		else
+			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	}
+}
+
+void r100_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(RADEON_CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~RADEON_CFG_VGA_RAM_EN;
+		temp |= RADEON_CFG_VGA_IO_DIS;
+	} else {
+		temp &= ~RADEON_CFG_VGA_IO_DIS;
+	}
+	WREG32(RADEON_CONFIG_CNTL, temp);
+}
+
+static void r100_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	r100_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+void r100_pll_errata_after_index(struct radeon_device *rdev)
+{
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
+		(void)RREG32(RADEON_CRTC_GEN_CNTL);
+	}
+}
+
+static void r100_pll_errata_after_data(struct radeon_device *rdev)
+{
+	/* This workarounds is necessary on RV100, RS100 and RS200 chips
+	 * or the chip could hang on a subsequent access
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+		mdelay(5);
+	}
+
+	/* This function is required to workaround a hardware bug in some (all?)
+	 * revisions of the R300.  This workaround should be called after every
+	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
+	 * may not be correct.
+	 */
+	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
+		uint32_t save, tmp;
+
+		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
+		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
+		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
+		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
+	}
+}
+
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t data;
+
+	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
+	r100_pll_errata_after_index(rdev);
+	data = RREG32(RADEON_CLOCK_CNTL_DATA);
+	r100_pll_errata_after_data(rdev);
+	return data;
+}
+
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
+	r100_pll_errata_after_index(rdev);
+	WREG32(RADEON_CLOCK_CNTL_DATA, v);
+	r100_pll_errata_after_data(rdev);
+}
+
+static void r100_set_safe_registers(struct radeon_device *rdev)
+{
+	if (ASIC_IS_RN50(rdev)) {
+		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
+	} else if (rdev->family < CHIP_R200) {
+		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
+		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
+	} else {
+		r200_set_safe_registers(rdev);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t reg, value;
+	unsigned i;
+
+	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
+	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	for (i = 0; i < 64; i++) {
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
+		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
+		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
+		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
+		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
+	}
+	return 0;
+}
+
+static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t rdp, wdp;
+	unsigned count, i, j;
+
+	radeon_ring_free_size(rdev, ring);
+	rdp = RREG32(RADEON_CP_RB_RPTR);
+	wdp = RREG32(RADEON_CP_RB_WPTR);
+	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+	if (ring->ready) {
+		for (j = 0; j <= count; j++) {
+			i = (rdp + j) & ring->ptr_mask;
+			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+		}
+	}
+	return 0;
+}
+
+
+static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t csq_stat, csq2_stat, tmp;
+	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
+	unsigned i;
+
+	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
+	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
+	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
+	r_rptr = (csq_stat >> 0) & 0x3ff;
+	r_wptr = (csq_stat >> 10) & 0x3ff;
+	ib1_rptr = (csq_stat >> 20) & 0x3ff;
+	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
+	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
+	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
+	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
+	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
+	seq_printf(m, "Ring rptr %u\n", r_rptr);
+	seq_printf(m, "Ring wptr %u\n", r_wptr);
+	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
+	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
+	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
+	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
+	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
+	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
+	seq_printf(m, "Ring fifo:\n");
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect1 fifo:\n");
+	for (i = 256; i <= 512; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	seq_printf(m, "Indirect2 fifo:\n");
+	for (i = 640; i < ib1_wptr; i++) {
+		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
+		tmp = RREG32(RADEON_CP_CSQ_DATA);
+		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
+	}
+	return 0;
+}
+
+static int r100_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
+	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_FB_LOCATION);
+	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_MC_AGP_LOCATION);
+	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AGP_BASE);
+	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(0x01D0);
+	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_LO_ADDR);
+	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_AIC_HI_ADDR);
+	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
+	tmp = RREG32(0x01E4);
+	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r100_debugfs_rbbm_list[] = {
+	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_cp_list[] = {
+	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
+	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
+};
+
+static struct drm_info_list r100_debugfs_mc_info_list[] = {
+	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r100_debugfs_rbbm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_cp_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
+#else
+	return 0;
+#endif
+}
+
+int r100_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	int surf_index = reg * 16;
+	int flags = 0;
+
+	if (rdev->family <= CHIP_RS200) {
+		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
+			flags |= RADEON_SURF_TILE_COLOR_BOTH;
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= RADEON_SURF_TILE_COLOR_MACRO;
+	} else if (rdev->family <= CHIP_RV280) {
+		if (tiling_flags & (RADEON_TILING_MACRO))
+			flags |= R200_SURF_TILE_COLOR_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R200_SURF_TILE_COLOR_MICRO;
+	} else {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			flags |= R300_SURF_TILE_MACRO;
+		if (tiling_flags & RADEON_TILING_MICRO)
+			flags |= R300_SURF_TILE_MICRO;
+	}
+
+	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
+		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
+	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
+		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
+
+	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
+	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
+		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
+			if (ASIC_IS_RN50(rdev))
+				pitch /= 16;
+	}
+
+	/* r100/r200 divide by 16 */
+	if (rdev->family < CHIP_R300)
+		flags |= pitch / 16;
+	else
+		flags |= pitch / 8;
+
+
+	DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
+	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
+	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
+	return 0;
+}
+
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	int surf_index = reg * 16;
+	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
+}
+
+void r100_bandwidth_update(struct radeon_device *rdev)
+{
+	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
+	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
+	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
+	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
+	fixed20_12 memtcas_ff[8] = {
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init(0),
+	};
+	fixed20_12 memtcas_rs480_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(0),
+		dfixed_init_half(1),
+		dfixed_init_half(2),
+		dfixed_init_half(3),
+	};
+	fixed20_12 memtcas2_ff[8] = {
+		dfixed_init(0),
+		dfixed_init(1),
+		dfixed_init(2),
+		dfixed_init(3),
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+	};
+	fixed20_12 memtrbs[8] = {
+		dfixed_init(1),
+		dfixed_init_half(1),
+		dfixed_init(2),
+		dfixed_init_half(2),
+		dfixed_init(3),
+		dfixed_init_half(3),
+		dfixed_init(4),
+		dfixed_init_half(4)
+	};
+	fixed20_12 memtrbs_r4xx[8] = {
+		dfixed_init(4),
+		dfixed_init(5),
+		dfixed_init(6),
+		dfixed_init(7),
+		dfixed_init(8),
+		dfixed_init(9),
+		dfixed_init(10),
+		dfixed_init(11)
+	};
+	fixed20_12 min_mem_eff;
+	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
+	fixed20_12 cur_latency_mclk, cur_latency_sclk;
+	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
+		disp_drain_rate2, read_return_rate;
+	fixed20_12 time_disp1_drop_priority;
+	int c;
+	int cur_size = 16;       /* in octawords */
+	int critical_point = 0, critical_point2;
+/* 	uint32_t read_return_rate, time_disp1_drop_priority; */
+	int stop_req, max_stop_req;
+	struct drm_display_mode *mode1 = NULL;
+	struct drm_display_mode *mode2 = NULL;
+	uint32_t pixel_bytes1 = 0;
+	uint32_t pixel_bytes2 = 0;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled) {
+		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
+		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
+	}
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		if (rdev->mode_info.crtcs[1]->base.enabled) {
+			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
+			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
+		}
+	}
+
+	min_mem_eff.full = dfixed_const_8(0);
+	/* get modes */
+	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
+		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		/* check crtc enables */
+		if (mode2)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode1)
+			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
+	}
+
+	/*
+	 * determine is there is enough bw for current mode
+	 */
+	sclk_ff = rdev->pm.sclk;
+	mclk_ff = rdev->pm.mclk;
+
+	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
+	temp_ff.full = dfixed_const(temp);
+	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
+
+	pix_clk.full = 0;
+	pix_clk2.full = 0;
+	peak_disp_bw.full = 0;
+	if (mode1) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
+		pix_clk.full = dfixed_div(pix_clk, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes1);
+		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
+	}
+	if (mode2) {
+		temp_ff.full = dfixed_const(1000);
+		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
+		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
+		temp_ff.full = dfixed_const(pixel_bytes2);
+		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
+	}
+
+	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
+	if (peak_disp_bw.full >= mem_bw.full) {
+		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
+			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
+	}
+
+	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
+	temp = RREG32(RADEON_MEM_TIMING_CNTL);
+	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
+		mem_trcd = ((temp >> 2) & 0x3) + 1;
+		mem_trp  = ((temp & 0x3)) + 1;
+		mem_tras = ((temp & 0x70) >> 4) + 1;
+	} else if (rdev->family == CHIP_R300 ||
+		   rdev->family == CHIP_R350) { /* r300, r350 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 11) & 0xf) + 4;
+	} else if (rdev->family == CHIP_RV350 ||
+		   rdev->family <= CHIP_RV380) {
+		/* rv3x0 */
+		mem_trcd = (temp & 0x7) + 3;
+		mem_trp = ((temp >> 8) & 0x7) + 3;
+		mem_tras = ((temp >> 11) & 0xf) + 6;
+	} else if (rdev->family == CHIP_R420 ||
+		   rdev->family == CHIP_R423 ||
+		   rdev->family == CHIP_RV410) {
+		/* r4xx */
+		mem_trcd = (temp & 0xf) + 3;
+		if (mem_trcd > 15)
+			mem_trcd = 15;
+		mem_trp = ((temp >> 8) & 0xf) + 3;
+		if (mem_trp > 15)
+			mem_trp = 15;
+		mem_tras = ((temp >> 12) & 0x1f) + 6;
+		if (mem_tras > 31)
+			mem_tras = 31;
+	} else { /* RV200, R200 */
+		mem_trcd = (temp & 0x7) + 1;
+		mem_trp = ((temp >> 8) & 0x7) + 1;
+		mem_tras = ((temp >> 12) & 0xf) + 4;
+	}
+	/* convert to FF */
+	trcd_ff.full = dfixed_const(mem_trcd);
+	trp_ff.full = dfixed_const(mem_trp);
+	tras_ff.full = dfixed_const(mem_tras);
+
+	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
+	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+	data = (temp & (7 << 20)) >> 20;
+	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family == CHIP_RS480) /* don't think rs400 */
+			tcas_ff = memtcas_rs480_ff[data];
+		else
+			tcas_ff = memtcas_ff[data];
+	} else
+		tcas_ff = memtcas2_ff[data];
+
+	if (rdev->family == CHIP_RS400 ||
+	    rdev->family == CHIP_RS480) {
+		/* extra cas latency stored in bits 23-25 0-4 clocks */
+		data = (temp >> 23) & 0x7;
+		if (data < 5)
+			tcas_ff.full += dfixed_const(data);
+	}
+
+	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+		/* on the R300, Tcas is included in Trbs.
+		 */
+		temp = RREG32(RADEON_MEM_CNTL);
+		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
+		if (data == 1) {
+			if (R300_MEM_USE_CD_CH_ONLY & temp) {
+				temp = RREG32(R300_MC_IND_INDEX);
+				temp &= ~R300_MC_IND_ADDR_MASK;
+				temp |= R300_MC_READ_CNTL_CD_mcind;
+				WREG32(R300_MC_IND_INDEX, temp);
+				temp = RREG32(R300_MC_IND_DATA);
+				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
+			} else {
+				temp = RREG32(R300_MC_READ_CNTL_AB);
+				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+			}
+		} else {
+			temp = RREG32(R300_MC_READ_CNTL_AB);
+			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
+		}
+		if (rdev->family == CHIP_RV410 ||
+		    rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423)
+			trbs_ff = memtrbs_r4xx[data];
+		else
+			trbs_ff = memtrbs[data];
+		tcas_ff.full += trbs_ff.full;
+	}
+
+	sclk_eff_ff.full = sclk_ff.full;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		fixed20_12 agpmode_ff;
+		agpmode_ff.full = dfixed_const(radeon_agpmode);
+		temp_ff.full = dfixed_const_666(16);
+		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
+	}
+	/* TODO PCIE lanes may affect this - agpmode == 16?? */
+
+	if (ASIC_IS_R300(rdev)) {
+		sclk_delay_ff.full = dfixed_const(250);
+	} else {
+		if ((rdev->family == CHIP_RV100) ||
+		    rdev->flags & RADEON_IS_IGP) {
+			if (rdev->mc.vram_is_ddr)
+				sclk_delay_ff.full = dfixed_const(41);
+			else
+				sclk_delay_ff.full = dfixed_const(33);
+		} else {
+			if (rdev->mc.vram_width == 128)
+				sclk_delay_ff.full = dfixed_const(57);
+			else
+				sclk_delay_ff.full = dfixed_const(41);
+		}
+	}
+
+	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
+
+	if (rdev->mc.vram_is_ddr) {
+		if (rdev->mc.vram_width == 32) {
+			k1.full = dfixed_const(40);
+			c  = 3;
+		} else {
+			k1.full = dfixed_const(20);
+			c  = 1;
+		}
+	} else {
+		k1.full = dfixed_const(40);
+		c  = 3;
+	}
+
+	temp_ff.full = dfixed_const(2);
+	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
+	temp_ff.full = dfixed_const(c);
+	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
+	temp_ff.full = dfixed_const(4);
+	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
+	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
+	mc_latency_mclk.full += k1.full;
+
+	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
+	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
+
+	/*
+	  HW cursor time assuming worst case of full size colour cursor.
+	*/
+	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
+	temp_ff.full += trcd_ff.full;
+	if (temp_ff.full < tras_ff.full)
+		temp_ff.full = tras_ff.full;
+	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
+
+	temp_ff.full = dfixed_const(cur_size);
+	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
+	/*
+	  Find the total latency for the display data.
+	*/
+	disp_latency_overhead.full = dfixed_const(8);
+	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
+	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
+	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
+
+	if (mc_latency_mclk.full > mc_latency_sclk.full)
+		disp_latency.full = mc_latency_mclk.full;
+	else
+		disp_latency.full = mc_latency_sclk.full;
+
+	/* setup Max GRPH_STOP_REQ default value */
+	if (ASIC_IS_RV100(rdev))
+		max_stop_req = 0x5c;
+	else
+		max_stop_req = 0x7c;
+
+	if (mode1) {
+		/*  CRTC1
+		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
+		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
+		*/
+		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes1));
+		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
+
+		/*
+		  Find the critical point of the display buffer.
+		*/
+		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
+		crit_point_ff.full += dfixed_const_half(0);
+
+		critical_point = dfixed_trunc(crit_point_ff);
+
+		if (rdev->disp_priority == 2) {
+			critical_point = 0;
+		}
+
+		/*
+		  The critical point should never be above max_stop_req-4.  Setting
+		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
+		*/
+		if (max_stop_req - critical_point < 4)
+			critical_point = 0;
+
+		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
+			critical_point = 0x10;
+		}
+
+		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
+		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		temp &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		temp |= RADEON_GRPH_BUFFER_SIZE;
+		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+		/*
+		  Write the result into the register.
+		*/
+		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+#if 0
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			/* attempt to program RS400 disp regs correctly ??? */
+			temp = RREG32(RS400_DISP1_REG_CNTL);
+			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
+				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
+						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DMIF_MEM_CNTL1);
+			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
+				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
+						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
+		}
+#endif
+
+		DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
+			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
+			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
+	}
+
+	if (mode2) {
+		u32 grph2_cntl;
+		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
+
+		if (stop_req > max_stop_req)
+			stop_req = max_stop_req;
+
+		/*
+		  Find the drain rate of the display buffer.
+		*/
+		temp_ff.full = dfixed_const((16/pixel_bytes2));
+		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
+
+		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
+		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
+		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
+		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
+		if ((rdev->family == CHIP_R350) &&
+		    (stop_req > 0x15)) {
+			stop_req -= 0x10;
+		}
+		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
+		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
+		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
+			  RADEON_GRPH_CRITICAL_AT_SOF |
+			  RADEON_GRPH_STOP_CNTL);
+
+		if ((rdev->family == CHIP_RS100) ||
+		    (rdev->family == CHIP_RS200))
+			critical_point2 = 0;
+		else {
+			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
+			temp_ff.full = dfixed_const(temp);
+			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
+			if (sclk_ff.full < temp_ff.full)
+				temp_ff.full = sclk_ff.full;
+
+			read_return_rate.full = temp_ff.full;
+
+			if (mode1) {
+				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
+				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
+			} else {
+				time_disp1_drop_priority.full = 0;
+			}
+			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
+			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
+			crit_point_ff.full += dfixed_const_half(0);
+
+			critical_point2 = dfixed_trunc(crit_point_ff);
+
+			if (rdev->disp_priority == 2) {
+				critical_point2 = 0;
+			}
+
+			if (max_stop_req - critical_point2 < 4)
+				critical_point2 = 0;
+
+		}
+
+		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
+			/* some R300 cards have problem with this set to 0 */
+			critical_point2 = 0x10;
+		}
+
+		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
+						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
+
+		if ((rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+#if 0
+			/* attempt to program RS400 disp2 regs correctly ??? */
+			temp = RREG32(RS400_DISP2_REQ_CNTL1);
+			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
+				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
+						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
+						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
+			temp = RREG32(RS400_DISP2_REQ_CNTL2);
+			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
+				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
+			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
+						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
+#endif
+			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
+			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
+			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
+			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
+		}
+
+		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
+			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
+	}
+}
+
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 2);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET0(scratch, 0));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test succeeded in %d usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	if (ring->rptr_save_reg) {
+		u32 next_rptr = ring->wptr + 2 + 3;
+		radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+	radeon_ring_write(ring, ib->gpu_addr);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET0(scratch, 0);
+	ib.ptr[1] = 0xDEADBEEF;
+	ib.ptr[2] = PACKET2(0);
+	ib.ptr[3] = PACKET2(0);
+	ib.ptr[4] = PACKET2(0);
+	ib.ptr[5] = PACKET2(0);
+	ib.ptr[6] = PACKET2(0);
+	ib.ptr[7] = PACKET2(0);
+	ib.length_dw = 8;
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF) {
+			break;
+		}
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test succeeded in %u usecs\n", i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Shutdown CP we shouldn't need to do that but better be safe than
+	 * sorry
+	 */
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+	WREG32(R_000740_CP_CSQ_CNTL, 0);
+
+	/* Save few CRTC registers */
+	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
+	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
+	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
+	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
+		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
+	}
+
+	/* Disable VGA aperture access */
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
+	/* Disable cursor, overlay, crtc */
+	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
+					S_000054_CRTC_DISPLAY_DIS(1));
+	WREG32(R_000050_CRTC_GEN_CNTL,
+			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
+			S_000050_CRTC_DISP_REQ_EN_B(1));
+	WREG32(R_000420_OV0_SCALE_CNTL,
+		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
+	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
+						S_000360_CUR2_LOCK(1));
+		WREG32(R_0003F8_CRTC2_GEN_CNTL,
+			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
+			S_0003F8_CRTC2_DISPLAY_DIS(1) |
+			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
+		WREG32(R_000360_CUR2_OFFSET,
+			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
+	}
+}
+
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
+{
+	/* Update base address for crtc */
+	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
+	}
+	/* Restore CRTC registers */
+	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
+	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
+	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
+	}
+}
+
+void r100_vga_render_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG8(R_0003C2_GENMO_WT);
+	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
+}
+
+static void r100_debugfs(struct radeon_device *rdev)
+{
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r)
+		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+}
+
+static void r100_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2,
+				upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		if (rdev->family > CHIP_RV200)
+			WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r100_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+static void r100_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
+		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r100_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r100_mc_program(rdev);
+	/* Resume clock */
+	r100_clock_startup(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r100_enable_bm(rdev);
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r100_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r100_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r100_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r100_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r100_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	tmp = RREG32(RADEON_CP_CSQ_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_CSQ_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	if (tmp) {
+		WREG32(RADEON_CP_RB_CNTL, 0);
+	}
+	tmp = RREG32(RADEON_SCRATCH_UMSK);
+	if (tmp) {
+		WREG32(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+int r100_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Register debugfs file specific to this group of asics */
+	r100_debugfs(rdev);
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* sanity check some register to avoid hangs like after kexec */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r100_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize VRAM */
+	r100_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r100_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = r100_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect)
+{
+	if (reg < rdev->rmmio_size && !always_indirect)
+		return readl(((void __iomem *)rdev->rmmio) + reg);
+	else {
+		unsigned long flags;
+		uint32_t ret;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+		ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+		return ret;
+	}
+}
+
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect)
+{
+	if (reg < rdev->rmmio_size && !always_indirect)
+		writel(v, ((void __iomem *)rdev->rmmio) + reg);
+	else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+	}
+}
+
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+	if (reg < rdev->rio_mem_size)
+		return ioread32(rdev->rio_mem + reg);
+	else {
+		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+		return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+	}
+}
+
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	if (reg < rdev->rio_mem_size)
+		iowrite32(v, rdev->rio_mem + reg);
+	else {
+		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+		iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r100_track.h b/linux-imx/drivers/gpu/drm/radeon/r100_track.h
new file mode 100644
index 0000000..eb40888
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r100_track.h
@@ -0,0 +1,97 @@
+
+#define R100_TRACK_MAX_TEXTURE 3
+#define R200_TRACK_MAX_TEXTURE 6
+#define R300_TRACK_MAX_TEXTURE 16
+
+#define R100_MAX_CB 1
+#define R300_MAX_CB 4
+
+/*
+ * CS functions
+ */
+struct r100_cs_track_cb {
+	struct radeon_bo	*robj;
+	unsigned		pitch;
+	unsigned		cpp;
+	unsigned		offset;
+};
+
+struct r100_cs_track_array {
+	struct radeon_bo	*robj;
+	unsigned		esize;
+};
+
+struct r100_cs_cube_info {
+	struct radeon_bo	*robj;
+	unsigned		offset;
+	unsigned		width;
+	unsigned		height;
+};
+
+#define R100_TRACK_COMP_NONE   0
+#define R100_TRACK_COMP_DXT1   1
+#define R100_TRACK_COMP_DXT35  2
+
+struct r100_cs_track_texture {
+	struct radeon_bo	*robj;
+	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
+	unsigned		pitch;
+	unsigned		width;
+	unsigned		height;
+	unsigned		num_levels;
+	unsigned		cpp;
+	unsigned		tex_coord_type;
+	unsigned		txdepth;
+	unsigned		width_11;
+	unsigned		height_11;
+	bool			use_pitch;
+	bool			enabled;
+	bool                    lookup_disable;
+	bool			roundup_w;
+	bool			roundup_h;
+	unsigned                compress_format;
+};
+
+struct r100_cs_track {
+	unsigned			num_cb;
+	unsigned                        num_texture;
+	unsigned			maxy;
+	unsigned			vtx_size;
+	unsigned			vap_vf_cntl;
+	unsigned			vap_alt_nverts;
+	unsigned			immd_dwords;
+	unsigned			num_arrays;
+	unsigned			max_indx;
+	unsigned			color_channel_mask;
+	struct r100_cs_track_array	arrays[16];
+	struct r100_cs_track_cb 	cb[R300_MAX_CB];
+	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
+	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
+	bool				z_enabled;
+	bool                            separate_cube;
+	bool				zb_cb_clear;
+	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
+};
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
+
+int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg);
+
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+			    struct radeon_cs_packet *pkt,
+			    unsigned idx,
+			    unsigned reg);
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int idx);
diff --git a/linux-imx/drivers/gpu/drm/radeon/r100d.h b/linux-imx/drivers/gpu/drm/radeon/r100d.h
new file mode 100644
index 0000000..f0f8ee6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r100d.h
@@ -0,0 +1,869 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R100D_H__
+#define __R100D_H__
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_SE(x)                    (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_SE(x)                    (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_SE                       0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define R_000030_BUS_CNTL                            0x000030
+#define   S_000030_BUS_DBL_RESYNC(x)                   (((x) & 0x1) << 0)
+#define   G_000030_BUS_DBL_RESYNC(x)                   (((x) >> 0) & 0x1)
+#define   C_000030_BUS_DBL_RESYNC                      0xFFFFFFFE
+#define   S_000030_BUS_MSTR_RESET(x)                   (((x) & 0x1) << 1)
+#define   G_000030_BUS_MSTR_RESET(x)                   (((x) >> 1) & 0x1)
+#define   C_000030_BUS_MSTR_RESET                      0xFFFFFFFD
+#define   S_000030_BUS_FLUSH_BUF(x)                    (((x) & 0x1) << 2)
+#define   G_000030_BUS_FLUSH_BUF(x)                    (((x) >> 2) & 0x1)
+#define   C_000030_BUS_FLUSH_BUF                       0xFFFFFFFB
+#define   S_000030_BUS_STOP_REQ_DIS(x)                 (((x) & 0x1) << 3)
+#define   G_000030_BUS_STOP_REQ_DIS(x)                 (((x) >> 3) & 0x1)
+#define   C_000030_BUS_STOP_REQ_DIS                    0xFFFFFFF7
+#define   S_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) & 0x1) << 4)
+#define   G_000030_BUS_PM4_READ_COMBINE_EN(x)          (((x) >> 4) & 0x1)
+#define   C_000030_BUS_PM4_READ_COMBINE_EN             0xFFFFFFEF
+#define   S_000030_BUS_WRT_COMBINE_EN(x)               (((x) & 0x1) << 5)
+#define   G_000030_BUS_WRT_COMBINE_EN(x)               (((x) >> 5) & 0x1)
+#define   C_000030_BUS_WRT_COMBINE_EN                  0xFFFFFFDF
+#define   S_000030_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 6)
+#define   G_000030_BUS_MASTER_DIS(x)                   (((x) >> 6) & 0x1)
+#define   C_000030_BUS_MASTER_DIS                      0xFFFFFFBF
+#define   S_000030_BIOS_ROM_WRT_EN(x)                  (((x) & 0x1) << 7)
+#define   G_000030_BIOS_ROM_WRT_EN(x)                  (((x) >> 7) & 0x1)
+#define   C_000030_BIOS_ROM_WRT_EN                     0xFFFFFF7F
+#define   S_000030_BM_DAC_CRIPPLE(x)                   (((x) & 0x1) << 8)
+#define   G_000030_BM_DAC_CRIPPLE(x)                   (((x) >> 8) & 0x1)
+#define   C_000030_BM_DAC_CRIPPLE                      0xFFFFFEFF
+#define   S_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) & 0x1) << 9)
+#define   G_000030_BUS_NON_PM4_READ_COMBINE_EN(x)      (((x) >> 9) & 0x1)
+#define   C_000030_BUS_NON_PM4_READ_COMBINE_EN         0xFFFFFDFF
+#define   S_000030_BUS_XFERD_DISCARD_EN(x)             (((x) & 0x1) << 10)
+#define   G_000030_BUS_XFERD_DISCARD_EN(x)             (((x) >> 10) & 0x1)
+#define   C_000030_BUS_XFERD_DISCARD_EN                0xFFFFFBFF
+#define   S_000030_BUS_SGL_READ_DISABLE(x)             (((x) & 0x1) << 11)
+#define   G_000030_BUS_SGL_READ_DISABLE(x)             (((x) >> 11) & 0x1)
+#define   C_000030_BUS_SGL_READ_DISABLE                0xFFFFF7FF
+#define   S_000030_BIOS_DIS_ROM(x)                     (((x) & 0x1) << 12)
+#define   G_000030_BIOS_DIS_ROM(x)                     (((x) >> 12) & 0x1)
+#define   C_000030_BIOS_DIS_ROM                        0xFFFFEFFF
+#define   S_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) & 0x1) << 13)
+#define   G_000030_BUS_PCI_READ_RETRY_EN(x)            (((x) >> 13) & 0x1)
+#define   C_000030_BUS_PCI_READ_RETRY_EN               0xFFFFDFFF
+#define   S_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) & 0x1) << 14)
+#define   G_000030_BUS_AGP_AD_STEPPING_EN(x)           (((x) >> 14) & 0x1)
+#define   C_000030_BUS_AGP_AD_STEPPING_EN              0xFFFFBFFF
+#define   S_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) & 0x1) << 15)
+#define   G_000030_BUS_PCI_WRT_RETRY_EN(x)             (((x) >> 15) & 0x1)
+#define   C_000030_BUS_PCI_WRT_RETRY_EN                0xFFFF7FFF
+#define   S_000030_BUS_RETRY_WS(x)                     (((x) & 0xF) << 16)
+#define   G_000030_BUS_RETRY_WS(x)                     (((x) >> 16) & 0xF)
+#define   C_000030_BUS_RETRY_WS                        0xFFF0FFFF
+#define   S_000030_BUS_MSTR_RD_MULT(x)                 (((x) & 0x1) << 20)
+#define   G_000030_BUS_MSTR_RD_MULT(x)                 (((x) >> 20) & 0x1)
+#define   C_000030_BUS_MSTR_RD_MULT                    0xFFEFFFFF
+#define   S_000030_BUS_MSTR_RD_LINE(x)                 (((x) & 0x1) << 21)
+#define   G_000030_BUS_MSTR_RD_LINE(x)                 (((x) >> 21) & 0x1)
+#define   C_000030_BUS_MSTR_RD_LINE                    0xFFDFFFFF
+#define   S_000030_BUS_SUSPEND(x)                      (((x) & 0x1) << 22)
+#define   G_000030_BUS_SUSPEND(x)                      (((x) >> 22) & 0x1)
+#define   C_000030_BUS_SUSPEND                         0xFFBFFFFF
+#define   S_000030_LAT_16X(x)                          (((x) & 0x1) << 23)
+#define   G_000030_LAT_16X(x)                          (((x) >> 23) & 0x1)
+#define   C_000030_LAT_16X                             0xFF7FFFFF
+#define   S_000030_BUS_RD_DISCARD_EN(x)                (((x) & 0x1) << 24)
+#define   G_000030_BUS_RD_DISCARD_EN(x)                (((x) >> 24) & 0x1)
+#define   C_000030_BUS_RD_DISCARD_EN                   0xFEFFFFFF
+#define   S_000030_ENFRCWRDY(x)                        (((x) & 0x1) << 25)
+#define   G_000030_ENFRCWRDY(x)                        (((x) >> 25) & 0x1)
+#define   C_000030_ENFRCWRDY                           0xFDFFFFFF
+#define   S_000030_BUS_MSTR_WS(x)                      (((x) & 0x1) << 26)
+#define   G_000030_BUS_MSTR_WS(x)                      (((x) >> 26) & 0x1)
+#define   C_000030_BUS_MSTR_WS                         0xFBFFFFFF
+#define   S_000030_BUS_PARKING_DIS(x)                  (((x) & 0x1) << 27)
+#define   G_000030_BUS_PARKING_DIS(x)                  (((x) >> 27) & 0x1)
+#define   C_000030_BUS_PARKING_DIS                     0xF7FFFFFF
+#define   S_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) & 0x1) << 28)
+#define   G_000030_BUS_MSTR_DISCONNECT_EN(x)           (((x) >> 28) & 0x1)
+#define   C_000030_BUS_MSTR_DISCONNECT_EN              0xEFFFFFFF
+#define   S_000030_SERR_EN(x)                          (((x) & 0x1) << 29)
+#define   G_000030_SERR_EN(x)                          (((x) >> 29) & 0x1)
+#define   C_000030_SERR_EN                             0xDFFFFFFF
+#define   S_000030_BUS_READ_BURST(x)                   (((x) & 0x1) << 30)
+#define   G_000030_BUS_READ_BURST(x)                   (((x) >> 30) & 0x1)
+#define   C_000030_BUS_READ_BURST                      0xBFFFFFFF
+#define   S_000030_BUS_RDY_READ_DLY(x)                 (((x) & 0x1) << 31)
+#define   G_000030_BUS_RDY_READ_DLY(x)                 (((x) >> 31) & 0x1)
+#define   C_000030_BUS_RDY_READ_DLY                    0x7FFFFFFF
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_CRTC_VBLANK(x)                      (((x) & 0x1) << 0)
+#define   G_000040_CRTC_VBLANK(x)                      (((x) >> 0) & 0x1)
+#define   C_000040_CRTC_VBLANK                         0xFFFFFFFE
+#define   S_000040_CRTC_VLINE(x)                       (((x) & 0x1) << 1)
+#define   G_000040_CRTC_VLINE(x)                       (((x) >> 1) & 0x1)
+#define   C_000040_CRTC_VLINE                          0xFFFFFFFD
+#define   S_000040_CRTC_VSYNC(x)                       (((x) & 0x1) << 2)
+#define   G_000040_CRTC_VSYNC(x)                       (((x) >> 2) & 0x1)
+#define   C_000040_CRTC_VSYNC                          0xFFFFFFFB
+#define   S_000040_SNAPSHOT(x)                         (((x) & 0x1) << 3)
+#define   G_000040_SNAPSHOT(x)                         (((x) >> 3) & 0x1)
+#define   C_000040_SNAPSHOT                            0xFFFFFFF7
+#define   S_000040_FP_DETECT(x)                        (((x) & 0x1) << 4)
+#define   G_000040_FP_DETECT(x)                        (((x) >> 4) & 0x1)
+#define   C_000040_FP_DETECT                           0xFFFFFFEF
+#define   S_000040_CRTC2_VLINE(x)                      (((x) & 0x1) << 5)
+#define   G_000040_CRTC2_VLINE(x)                      (((x) >> 5) & 0x1)
+#define   C_000040_CRTC2_VLINE                         0xFFFFFFDF
+#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
+#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
+#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
+#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
+#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
+#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
+#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
+#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
+#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
+#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
+#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
+#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
+#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
+#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
+#define   C_000040_FP2_DETECT                          0xFFFFFBFF
+#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
+#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
+#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_CRTC_VBLANK_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT                    0xFFFFFFFE
+#define   S_000044_CRTC_VBLANK_STAT_AK(x)              (((x) & 0x1) << 0)
+#define   G_000044_CRTC_VBLANK_STAT_AK(x)              (((x) >> 0) & 0x1)
+#define   C_000044_CRTC_VBLANK_STAT_AK                 0xFFFFFFFE
+#define   S_000044_CRTC_VLINE_STAT(x)                  (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT(x)                  (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT                     0xFFFFFFFD
+#define   S_000044_CRTC_VLINE_STAT_AK(x)               (((x) & 0x1) << 1)
+#define   G_000044_CRTC_VLINE_STAT_AK(x)               (((x) >> 1) & 0x1)
+#define   C_000044_CRTC_VLINE_STAT_AK                  0xFFFFFFFD
+#define   S_000044_CRTC_VSYNC_STAT(x)                  (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT(x)                  (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT                     0xFFFFFFFB
+#define   S_000044_CRTC_VSYNC_STAT_AK(x)               (((x) & 0x1) << 2)
+#define   G_000044_CRTC_VSYNC_STAT_AK(x)               (((x) >> 2) & 0x1)
+#define   C_000044_CRTC_VSYNC_STAT_AK                  0xFFFFFFFB
+#define   S_000044_SNAPSHOT_STAT(x)                    (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT(x)                    (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT                       0xFFFFFFF7
+#define   S_000044_SNAPSHOT_STAT_AK(x)                 (((x) & 0x1) << 3)
+#define   G_000044_SNAPSHOT_STAT_AK(x)                 (((x) >> 3) & 0x1)
+#define   C_000044_SNAPSHOT_STAT_AK                    0xFFFFFFF7
+#define   S_000044_FP_DETECT_STAT(x)                   (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT(x)                   (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT                      0xFFFFFFEF
+#define   S_000044_FP_DETECT_STAT_AK(x)                (((x) & 0x1) << 4)
+#define   G_000044_FP_DETECT_STAT_AK(x)                (((x) >> 4) & 0x1)
+#define   C_000044_FP_DETECT_STAT_AK                   0xFFFFFFEF
+#define   S_000044_CRTC2_VLINE_STAT(x)                 (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT(x)                 (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT                    0xFFFFFFDF
+#define   S_000044_CRTC2_VLINE_STAT_AK(x)              (((x) & 0x1) << 5)
+#define   G_000044_CRTC2_VLINE_STAT_AK(x)              (((x) >> 5) & 0x1)
+#define   C_000044_CRTC2_VLINE_STAT_AK                 0xFFFFFFDF
+#define   S_000044_CRTC2_VSYNC_STAT(x)                 (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT(x)                 (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT                    0xFFFFFFBF
+#define   S_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) & 0x1) << 6)
+#define   G_000044_CRTC2_VSYNC_STAT_AK(x)              (((x) >> 6) & 0x1)
+#define   C_000044_CRTC2_VSYNC_STAT_AK                 0xFFFFFFBF
+#define   S_000044_SNAPSHOT2_STAT(x)                   (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT(x)                   (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT                      0xFFFFFF7F
+#define   S_000044_SNAPSHOT2_STAT_AK(x)                (((x) & 0x1) << 7)
+#define   G_000044_SNAPSHOT2_STAT_AK(x)                (((x) >> 7) & 0x1)
+#define   C_000044_SNAPSHOT2_STAT_AK                   0xFFFFFF7F
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_CRTC2_VBLANK_STAT(x)                (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT(x)                (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT                   0xFFFFFDFF
+#define   S_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) & 0x1) << 9)
+#define   G_000044_CRTC2_VBLANK_STAT_AK(x)             (((x) >> 9) & 0x1)
+#define   C_000044_CRTC2_VBLANK_STAT_AK                0xFFFFFDFF
+#define   S_000044_FP2_DETECT_STAT(x)                  (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT(x)                  (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT                     0xFFFFFBFF
+#define   S_000044_FP2_DETECT_STAT_AK(x)               (((x) & 0x1) << 10)
+#define   G_000044_FP2_DETECT_STAT_AK(x)               (((x) >> 10) & 0x1)
+#define   C_000044_FP2_DETECT_STAT_AK                  0xFFFFFBFF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x)       (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT          0xFFFFF7FF
+#define   S_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) & 0x1) << 11)
+#define   G_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x)    (((x) >> 11) & 0x1)
+#define   C_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK       0xFFFFF7FF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH0_INT_AK(x)                 (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT_AK(x)                 (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT_AK                    0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH1_INT_AK(x)                 (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT_AK(x)                 (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT_AK                    0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH2_INT_AK(x)                 (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT_AK(x)                 (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT_AK                    0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_DMA_VIPH3_INT_AK(x)                 (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT_AK(x)                 (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT_AK                    0xFFFF7FFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_I2C_INT_AK(x)                       (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT_AK(x)                       (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT_AK                          0xFFFDFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_GUI_IDLE_STAT_AK(x)                 (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT_AK(x)                 (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT_AK                    0xFFF7FFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_AK(x)                        (((x) & 0x1) << 25)
+#define   G_000044_SW_INT_AK(x)                        (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT_AK                           0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_GEYSERVILLE_STAT(x)                 (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT(x)                 (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT                    0xF7FFFFFF
+#define   S_000044_GEYSERVILLE_STAT_AK(x)              (((x) & 0x1) << 27)
+#define   G_000044_GEYSERVILLE_STAT_AK(x)              (((x) >> 27) & 0x1)
+#define   C_000044_GEYSERVILLE_STAT_AK                 0xF7FFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_STAT(x)         (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_STAT            0xEFFFFFFF
+#define   S_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) & 0x1) << 28)
+#define   G_000044_HDCP_AUTHORIZED_INT_AK(x)           (((x) >> 28) & 0x1)
+#define   C_000044_HDCP_AUTHORIZED_INT_AK              0xEFFFFFFF
+#define   S_000044_DVI_I2C_INT_STAT(x)                 (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_STAT(x)                 (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_STAT                    0xDFFFFFFF
+#define   S_000044_DVI_I2C_INT_AK(x)                   (((x) & 0x1) << 29)
+#define   G_000044_DVI_I2C_INT_AK(x)                   (((x) >> 29) & 0x1)
+#define   C_000044_DVI_I2C_INT_AK                      0xDFFFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_GUIDMA_AK(x)                        (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_AK(x)                        (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_AK                           0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define   S_000044_VIDDMA_AK(x)                        (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_AK(x)                        (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_AK                           0x7FFFFFFF
+#define R_000050_CRTC_GEN_CNTL                       0x000050
+#define   S_000050_CRTC_DBL_SCAN_EN(x)                 (((x) & 0x1) << 0)
+#define   G_000050_CRTC_DBL_SCAN_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_000050_CRTC_DBL_SCAN_EN                    0xFFFFFFFE
+#define   S_000050_CRTC_INTERLACE_EN(x)                (((x) & 0x1) << 1)
+#define   G_000050_CRTC_INTERLACE_EN(x)                (((x) >> 1) & 0x1)
+#define   C_000050_CRTC_INTERLACE_EN                   0xFFFFFFFD
+#define   S_000050_CRTC_C_SYNC_EN(x)                   (((x) & 0x1) << 4)
+#define   G_000050_CRTC_C_SYNC_EN(x)                   (((x) >> 4) & 0x1)
+#define   C_000050_CRTC_C_SYNC_EN                      0xFFFFFFEF
+#define   S_000050_CRTC_PIX_WIDTH(x)                   (((x) & 0xF) << 8)
+#define   G_000050_CRTC_PIX_WIDTH(x)                   (((x) >> 8) & 0xF)
+#define   C_000050_CRTC_PIX_WIDTH                      0xFFFFF0FF
+#define   S_000050_CRTC_ICON_EN(x)                     (((x) & 0x1) << 15)
+#define   G_000050_CRTC_ICON_EN(x)                     (((x) >> 15) & 0x1)
+#define   C_000050_CRTC_ICON_EN                        0xFFFF7FFF
+#define   S_000050_CRTC_CUR_EN(x)                      (((x) & 0x1) << 16)
+#define   G_000050_CRTC_CUR_EN(x)                      (((x) >> 16) & 0x1)
+#define   C_000050_CRTC_CUR_EN                         0xFFFEFFFF
+#define   S_000050_CRTC_VSTAT_MODE(x)                  (((x) & 0x3) << 17)
+#define   G_000050_CRTC_VSTAT_MODE(x)                  (((x) >> 17) & 0x3)
+#define   C_000050_CRTC_VSTAT_MODE                     0xFFF9FFFF
+#define   S_000050_CRTC_CUR_MODE(x)                    (((x) & 0x7) << 20)
+#define   G_000050_CRTC_CUR_MODE(x)                    (((x) >> 20) & 0x7)
+#define   C_000050_CRTC_CUR_MODE                       0xFF8FFFFF
+#define   S_000050_CRTC_EXT_DISP_EN(x)                 (((x) & 0x1) << 24)
+#define   G_000050_CRTC_EXT_DISP_EN(x)                 (((x) >> 24) & 0x1)
+#define   C_000050_CRTC_EXT_DISP_EN                    0xFEFFFFFF
+#define   S_000050_CRTC_EN(x)                          (((x) & 0x1) << 25)
+#define   G_000050_CRTC_EN(x)                          (((x) >> 25) & 0x1)
+#define   C_000050_CRTC_EN                             0xFDFFFFFF
+#define   S_000050_CRTC_DISP_REQ_EN_B(x)               (((x) & 0x1) << 26)
+#define   G_000050_CRTC_DISP_REQ_EN_B(x)               (((x) >> 26) & 0x1)
+#define   C_000050_CRTC_DISP_REQ_EN_B                  0xFBFFFFFF
+#define R_000054_CRTC_EXT_CNTL                       0x000054
+#define   S_000054_CRTC_VGA_XOVERSCAN(x)               (((x) & 0x1) << 0)
+#define   G_000054_CRTC_VGA_XOVERSCAN(x)               (((x) >> 0) & 0x1)
+#define   C_000054_CRTC_VGA_XOVERSCAN                  0xFFFFFFFE
+#define   S_000054_VGA_BLINK_RATE(x)                   (((x) & 0x3) << 1)
+#define   G_000054_VGA_BLINK_RATE(x)                   (((x) >> 1) & 0x3)
+#define   C_000054_VGA_BLINK_RATE                      0xFFFFFFF9
+#define   S_000054_VGA_ATI_LINEAR(x)                   (((x) & 0x1) << 3)
+#define   G_000054_VGA_ATI_LINEAR(x)                   (((x) >> 3) & 0x1)
+#define   C_000054_VGA_ATI_LINEAR                      0xFFFFFFF7
+#define   S_000054_VGA_128KAP_PAGING(x)                (((x) & 0x1) << 4)
+#define   G_000054_VGA_128KAP_PAGING(x)                (((x) >> 4) & 0x1)
+#define   C_000054_VGA_128KAP_PAGING                   0xFFFFFFEF
+#define   S_000054_VGA_TEXT_132(x)                     (((x) & 0x1) << 5)
+#define   G_000054_VGA_TEXT_132(x)                     (((x) >> 5) & 0x1)
+#define   C_000054_VGA_TEXT_132                        0xFFFFFFDF
+#define   S_000054_VGA_XCRT_CNT_EN(x)                  (((x) & 0x1) << 6)
+#define   G_000054_VGA_XCRT_CNT_EN(x)                  (((x) >> 6) & 0x1)
+#define   C_000054_VGA_XCRT_CNT_EN                     0xFFFFFFBF
+#define   S_000054_CRTC_HSYNC_DIS(x)                   (((x) & 0x1) << 8)
+#define   G_000054_CRTC_HSYNC_DIS(x)                   (((x) >> 8) & 0x1)
+#define   C_000054_CRTC_HSYNC_DIS                      0xFFFFFEFF
+#define   S_000054_CRTC_VSYNC_DIS(x)                   (((x) & 0x1) << 9)
+#define   G_000054_CRTC_VSYNC_DIS(x)                   (((x) >> 9) & 0x1)
+#define   C_000054_CRTC_VSYNC_DIS                      0xFFFFFDFF
+#define   S_000054_CRTC_DISPLAY_DIS(x)                 (((x) & 0x1) << 10)
+#define   G_000054_CRTC_DISPLAY_DIS(x)                 (((x) >> 10) & 0x1)
+#define   C_000054_CRTC_DISPLAY_DIS                    0xFFFFFBFF
+#define   S_000054_CRTC_SYNC_TRISTATE(x)               (((x) & 0x1) << 11)
+#define   G_000054_CRTC_SYNC_TRISTATE(x)               (((x) >> 11) & 0x1)
+#define   C_000054_CRTC_SYNC_TRISTATE                  0xFFFFF7FF
+#define   S_000054_CRTC_HSYNC_TRISTATE(x)              (((x) & 0x1) << 12)
+#define   G_000054_CRTC_HSYNC_TRISTATE(x)              (((x) >> 12) & 0x1)
+#define   C_000054_CRTC_HSYNC_TRISTATE                 0xFFFFEFFF
+#define   S_000054_CRTC_VSYNC_TRISTATE(x)              (((x) & 0x1) << 13)
+#define   G_000054_CRTC_VSYNC_TRISTATE(x)              (((x) >> 13) & 0x1)
+#define   C_000054_CRTC_VSYNC_TRISTATE                 0xFFFFDFFF
+#define   S_000054_CRT_ON(x)                           (((x) & 0x1) << 15)
+#define   G_000054_CRT_ON(x)                           (((x) >> 15) & 0x1)
+#define   C_000054_CRT_ON                              0xFFFF7FFF
+#define   S_000054_VGA_CUR_B_TEST(x)                   (((x) & 0x1) << 17)
+#define   G_000054_VGA_CUR_B_TEST(x)                   (((x) >> 17) & 0x1)
+#define   C_000054_VGA_CUR_B_TEST                      0xFFFDFFFF
+#define   S_000054_VGA_PACK_DIS(x)                     (((x) & 0x1) << 18)
+#define   G_000054_VGA_PACK_DIS(x)                     (((x) >> 18) & 0x1)
+#define   C_000054_VGA_PACK_DIS                        0xFFFBFFFF
+#define   S_000054_VGA_MEM_PS_EN(x)                    (((x) & 0x1) << 19)
+#define   G_000054_VGA_MEM_PS_EN(x)                    (((x) >> 19) & 0x1)
+#define   C_000054_VGA_MEM_PS_EN                       0xFFF7FFFF
+#define   S_000054_VCRTC_IDX_MASTER(x)                 (((x) & 0x7F) << 24)
+#define   G_000054_VCRTC_IDX_MASTER(x)                 (((x) >> 24) & 0x7F)
+#define   C_000054_VCRTC_IDX_MASTER                    0x80FFFFFF
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_00023C_DISPLAY_BASE_ADDR                   0x00023C
+#define   S_00023C_DISPLAY_BASE_ADDR(x)                (((x) & 0xFFFFFFFF) << 0)
+#define   G_00023C_DISPLAY_BASE_ADDR(x)                (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00023C_DISPLAY_BASE_ADDR                   0x00000000
+#define R_000260_CUR_OFFSET                          0x000260
+#define   S_000260_CUR_OFFSET(x)                       (((x) & 0x7FFFFFF) << 0)
+#define   G_000260_CUR_OFFSET(x)                       (((x) >> 0) & 0x7FFFFFF)
+#define   C_000260_CUR_OFFSET                          0xF8000000
+#define   S_000260_CUR_LOCK(x)                         (((x) & 0x1) << 31)
+#define   G_000260_CUR_LOCK(x)                         (((x) >> 31) & 0x1)
+#define   C_000260_CUR_LOCK                            0x7FFFFFFF
+#define R_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00033C
+#define   S_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00033C_CRTC2_DISPLAY_BASE_ADDR(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00033C_CRTC2_DISPLAY_BASE_ADDR             0x00000000
+#define R_000360_CUR2_OFFSET                         0x000360
+#define   S_000360_CUR2_OFFSET(x)                      (((x) & 0x7FFFFFF) << 0)
+#define   G_000360_CUR2_OFFSET(x)                      (((x) >> 0) & 0x7FFFFFF)
+#define   C_000360_CUR2_OFFSET                         0xF8000000
+#define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
+#define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
+#define   C_000360_CUR2_LOCK                           0x7FFFFFFF
+#define R_0003C2_GENMO_WT                            0x0003C2
+#define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
+#define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
+#define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
+#define   S_0003C2_VGA_RAM_EN(x)                       (((x) & 0x1) << 1)
+#define   G_0003C2_VGA_RAM_EN(x)                       (((x) >> 1) & 0x1)
+#define   C_0003C2_VGA_RAM_EN                          0xFD
+#define   S_0003C2_VGA_CKSEL(x)                        (((x) & 0x3) << 2)
+#define   G_0003C2_VGA_CKSEL(x)                        (((x) >> 2) & 0x3)
+#define   C_0003C2_VGA_CKSEL                           0xF3
+#define   S_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) & 0x1) << 5)
+#define   G_0003C2_ODD_EVEN_MD_PGSEL(x)                (((x) >> 5) & 0x1)
+#define   C_0003C2_ODD_EVEN_MD_PGSEL                   0xDF
+#define   S_0003C2_VGA_HSYNC_POL(x)                    (((x) & 0x1) << 6)
+#define   G_0003C2_VGA_HSYNC_POL(x)                    (((x) >> 6) & 0x1)
+#define   C_0003C2_VGA_HSYNC_POL                       0xBF
+#define   S_0003C2_VGA_VSYNC_POL(x)                    (((x) & 0x1) << 7)
+#define   G_0003C2_VGA_VSYNC_POL(x)                    (((x) >> 7) & 0x1)
+#define   C_0003C2_VGA_VSYNC_POL                       0x7F
+#define R_0003F8_CRTC2_GEN_CNTL                      0x0003F8
+#define   S_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) & 0x1) << 0)
+#define   G_0003F8_CRTC2_DBL_SCAN_EN(x)                (((x) >> 0) & 0x1)
+#define   C_0003F8_CRTC2_DBL_SCAN_EN                   0xFFFFFFFE
+#define   S_0003F8_CRTC2_INTERLACE_EN(x)               (((x) & 0x1) << 1)
+#define   G_0003F8_CRTC2_INTERLACE_EN(x)               (((x) >> 1) & 0x1)
+#define   C_0003F8_CRTC2_INTERLACE_EN                  0xFFFFFFFD
+#define   S_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) & 0x1) << 4)
+#define   G_0003F8_CRTC2_SYNC_TRISTATE(x)              (((x) >> 4) & 0x1)
+#define   C_0003F8_CRTC2_SYNC_TRISTATE                 0xFFFFFFEF
+#define   S_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) & 0x1) << 5)
+#define   G_0003F8_CRTC2_HSYNC_TRISTATE(x)             (((x) >> 5) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_TRISTATE                0xFFFFFFDF
+#define   S_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) & 0x1) << 6)
+#define   G_0003F8_CRTC2_VSYNC_TRISTATE(x)             (((x) >> 6) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_TRISTATE                0xFFFFFFBF
+#define   S_0003F8_CRT2_ON(x)                          (((x) & 0x1) << 7)
+#define   G_0003F8_CRT2_ON(x)                          (((x) >> 7) & 0x1)
+#define   C_0003F8_CRT2_ON                             0xFFFFFF7F
+#define   S_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) & 0xF) << 8)
+#define   G_0003F8_CRTC2_PIX_WIDTH(x)                  (((x) >> 8) & 0xF)
+#define   C_0003F8_CRTC2_PIX_WIDTH                     0xFFFFF0FF
+#define   S_0003F8_CRTC2_ICON_EN(x)                    (((x) & 0x1) << 15)
+#define   G_0003F8_CRTC2_ICON_EN(x)                    (((x) >> 15) & 0x1)
+#define   C_0003F8_CRTC2_ICON_EN                       0xFFFF7FFF
+#define   S_0003F8_CRTC2_CUR_EN(x)                     (((x) & 0x1) << 16)
+#define   G_0003F8_CRTC2_CUR_EN(x)                     (((x) >> 16) & 0x1)
+#define   C_0003F8_CRTC2_CUR_EN                        0xFFFEFFFF
+#define   S_0003F8_CRTC2_CUR_MODE(x)                   (((x) & 0x7) << 20)
+#define   G_0003F8_CRTC2_CUR_MODE(x)                   (((x) >> 20) & 0x7)
+#define   C_0003F8_CRTC2_CUR_MODE                      0xFF8FFFFF
+#define   S_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) & 0x1) << 23)
+#define   G_0003F8_CRTC2_DISPLAY_DIS(x)                (((x) >> 23) & 0x1)
+#define   C_0003F8_CRTC2_DISPLAY_DIS                   0xFF7FFFFF
+#define   S_0003F8_CRTC2_EN(x)                         (((x) & 0x1) << 25)
+#define   G_0003F8_CRTC2_EN(x)                         (((x) >> 25) & 0x1)
+#define   C_0003F8_CRTC2_EN                            0xFDFFFFFF
+#define   S_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) & 0x1) << 26)
+#define   G_0003F8_CRTC2_DISP_REQ_EN_B(x)              (((x) >> 26) & 0x1)
+#define   C_0003F8_CRTC2_DISP_REQ_EN_B                 0xFBFFFFFF
+#define   S_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) & 0x1) << 27)
+#define   G_0003F8_CRTC2_C_SYNC_EN(x)                  (((x) >> 27) & 0x1)
+#define   C_0003F8_CRTC2_C_SYNC_EN                     0xF7FFFFFF
+#define   S_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) & 0x1) << 28)
+#define   G_0003F8_CRTC2_HSYNC_DIS(x)                  (((x) >> 28) & 0x1)
+#define   C_0003F8_CRTC2_HSYNC_DIS                     0xEFFFFFFF
+#define   S_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) & 0x1) << 29)
+#define   G_0003F8_CRTC2_VSYNC_DIS(x)                  (((x) >> 29) & 0x1)
+#define   C_0003F8_CRTC2_VSYNC_DIS                     0xDFFFFFFF
+#define R_000420_OV0_SCALE_CNTL                      0x000420
+#define   S_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) & 0x1) << 1)
+#define   G_000420_OV0_NO_READ_BEHIND_SCAN(x)          (((x) >> 1) & 0x1)
+#define   C_000420_OV0_NO_READ_BEHIND_SCAN             0xFFFFFFFD
+#define   S_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) & 0x1) << 2)
+#define   G_000420_OV0_HORZ_PICK_NEAREST(x)            (((x) >> 2) & 0x1)
+#define   C_000420_OV0_HORZ_PICK_NEAREST               0xFFFFFFFB
+#define   S_000420_OV0_VERT_PICK_NEAREST(x)            (((x) & 0x1) << 3)
+#define   G_000420_OV0_VERT_PICK_NEAREST(x)            (((x) >> 3) & 0x1)
+#define   C_000420_OV0_VERT_PICK_NEAREST               0xFFFFFFF7
+#define   S_000420_OV0_SIGNED_UV(x)                    (((x) & 0x1) << 4)
+#define   G_000420_OV0_SIGNED_UV(x)                    (((x) >> 4) & 0x1)
+#define   C_000420_OV0_SIGNED_UV                       0xFFFFFFEF
+#define   S_000420_OV0_GAMMA_SEL(x)                    (((x) & 0x7) << 5)
+#define   G_000420_OV0_GAMMA_SEL(x)                    (((x) >> 5) & 0x7)
+#define   C_000420_OV0_GAMMA_SEL                       0xFFFFFF1F
+#define   S_000420_OV0_SURFACE_FORMAT(x)               (((x) & 0xF) << 8)
+#define   G_000420_OV0_SURFACE_FORMAT(x)               (((x) >> 8) & 0xF)
+#define   C_000420_OV0_SURFACE_FORMAT                  0xFFFFF0FF
+#define   S_000420_OV0_ADAPTIVE_DEINT(x)               (((x) & 0x1) << 12)
+#define   G_000420_OV0_ADAPTIVE_DEINT(x)               (((x) >> 12) & 0x1)
+#define   C_000420_OV0_ADAPTIVE_DEINT                  0xFFFFEFFF
+#define   S_000420_OV0_CRTC_SEL(x)                     (((x) & 0x1) << 14)
+#define   G_000420_OV0_CRTC_SEL(x)                     (((x) >> 14) & 0x1)
+#define   C_000420_OV0_CRTC_SEL                        0xFFFFBFFF
+#define   S_000420_OV0_BURST_PER_PLANE(x)              (((x) & 0x7F) << 16)
+#define   G_000420_OV0_BURST_PER_PLANE(x)              (((x) >> 16) & 0x7F)
+#define   C_000420_OV0_BURST_PER_PLANE                 0xFF80FFFF
+#define   S_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) & 0x1) << 24)
+#define   G_000420_OV0_DOUBLE_BUFFER_REGS(x)           (((x) >> 24) & 0x1)
+#define   C_000420_OV0_DOUBLE_BUFFER_REGS              0xFEFFFFFF
+#define   S_000420_OV0_BANDWIDTH(x)                    (((x) & 0x1) << 26)
+#define   G_000420_OV0_BANDWIDTH(x)                    (((x) >> 26) & 0x1)
+#define   C_000420_OV0_BANDWIDTH                       0xFBFFFFFF
+#define   S_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) & 0x1) << 28)
+#define   G_000420_OV0_LIN_TRANS_BYPASS(x)             (((x) >> 28) & 0x1)
+#define   C_000420_OV0_LIN_TRANS_BYPASS                0xEFFFFFFF
+#define   S_000420_OV0_INT_EMU(x)                      (((x) & 0x1) << 29)
+#define   G_000420_OV0_INT_EMU(x)                      (((x) >> 29) & 0x1)
+#define   C_000420_OV0_INT_EMU                         0xDFFFFFFF
+#define   S_000420_OV0_OVERLAY_EN(x)                   (((x) & 0x1) << 30)
+#define   G_000420_OV0_OVERLAY_EN(x)                   (((x) >> 30) & 0x1)
+#define   C_000420_OV0_OVERLAY_EN                      0xBFFFFFFF
+#define   S_000420_OV0_SOFT_RESET(x)                   (((x) & 0x1) << 31)
+#define   G_000420_OV0_SOFT_RESET(x)                   (((x) >> 31) & 0x1)
+#define   C_000420_OV0_SOFT_RESET                      0x7FFFFFFF
+#define R_00070C_CP_RB_RPTR_ADDR                     0x00070C
+#define   S_00070C_RB_RPTR_SWAP(x)                     (((x) & 0x3) << 0)
+#define   G_00070C_RB_RPTR_SWAP(x)                     (((x) >> 0) & 0x3)
+#define   C_00070C_RB_RPTR_SWAP                        0xFFFFFFFC
+#define   S_00070C_RB_RPTR_ADDR(x)                     (((x) & 0x3FFFFFFF) << 2)
+#define   G_00070C_RB_RPTR_ADDR(x)                     (((x) >> 2) & 0x3FFFFFFF)
+#define   C_00070C_RB_RPTR_ADDR                        0x00000003
+#define R_000740_CP_CSQ_CNTL                         0x000740
+#define   S_000740_CSQ_CNT_PRIMARY(x)                  (((x) & 0xFF) << 0)
+#define   G_000740_CSQ_CNT_PRIMARY(x)                  (((x) >> 0) & 0xFF)
+#define   C_000740_CSQ_CNT_PRIMARY                     0xFFFFFF00
+#define   S_000740_CSQ_CNT_INDIRECT(x)                 (((x) & 0xFF) << 8)
+#define   G_000740_CSQ_CNT_INDIRECT(x)                 (((x) >> 8) & 0xFF)
+#define   C_000740_CSQ_CNT_INDIRECT                    0xFFFF00FF
+#define   S_000740_CSQ_MODE(x)                         (((x) & 0xF) << 28)
+#define   G_000740_CSQ_MODE(x)                         (((x) >> 28) & 0xF)
+#define   C_000740_CSQ_MODE                            0x0FFFFFFF
+#define R_000770_SCRATCH_UMSK                        0x000770
+#define   S_000770_SCRATCH_UMSK(x)                     (((x) & 0x3F) << 0)
+#define   G_000770_SCRATCH_UMSK(x)                     (((x) >> 0) & 0x3F)
+#define   C_000770_SCRATCH_UMSK                        0xFFFFFFC0
+#define   S_000770_SCRATCH_SWAP(x)                     (((x) & 0x3) << 16)
+#define   G_000770_SCRATCH_SWAP(x)                     (((x) >> 16) & 0x3)
+#define   C_000770_SCRATCH_SWAP                        0xFFFCFFFF
+#define R_000774_SCRATCH_ADDR                        0x000774
+#define   S_000774_SCRATCH_ADDR(x)                     (((x) & 0x7FFFFFF) << 5)
+#define   G_000774_SCRATCH_ADDR(x)                     (((x) >> 5) & 0x7FFFFFF)
+#define   C_000774_SCRATCH_ADDR                        0x0000001F
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_SE_BUSY(x)                          (((x) & 0x1) << 20)
+#define   G_000E40_SE_BUSY(x)                          (((x) >> 20) & 0x1)
+#define   C_000E40_SE_BUSY                             0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_TCLK_SRC_SEL(x)                     (((x) & 0x7) << 8)
+#define   G_00000D_TCLK_SRC_SEL(x)                     (((x) >> 8) & 0x7)
+#define   C_00000D_TCLK_SRC_SEL                        0xFFFFF8FF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP(x)                       (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP(x)                       (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP                          0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+
+/* PLL regs */
+#define SCLK_CNTL                                      0xd
+#define   FORCE_HDP                                    (1 << 17)
+#define CLK_PWRMGT_CNTL                                0x14
+#define   GLOBAL_PMAN_EN                               (1 << 10)
+#define   DISP_PM                                      (1 << 20)
+#define PLL_PWRMGT_CNTL                                0x15
+#define   MPLL_TURNOFF                                 (1 << 0)
+#define   SPLL_TURNOFF                                 (1 << 1)
+#define   PPLL_TURNOFF                                 (1 << 2)
+#define   P2PLL_TURNOFF                                (1 << 3)
+#define   TVPLL_TURNOFF                                (1 << 4)
+#define   MOBILE_SU                                    (1 << 16)
+#define   SU_SCLK_USE_BCLK                             (1 << 17)
+#define SCLK_CNTL2                                     0x1e
+#define   REDUCED_SPEED_SCLK_MODE                      (1 << 16)
+#define   REDUCED_SPEED_SCLK_SEL(x)                    ((x) << 17)
+#define MCLK_MISC                                      0x1f
+#define   EN_MCLK_TRISTATE_IN_SUSPEND                  (1 << 18)
+#define SCLK_MORE_CNTL                                 0x35
+#define   REDUCED_SPEED_SCLK_EN                        (1 << 16)
+#define   IO_CG_VOLTAGE_DROP                           (1 << 17)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 20)
+#define   VOLTAGE_DROP_SYNC                            (1 << 19)
+
+/* mmreg */
+#define DISP_PWR_MAN                                   0xd08
+#define   DISP_D3_GRPH_RST                             (1 << 18)
+#define   DISP_D3_SUBPIC_RST                           (1 << 19)
+#define   DISP_D3_OV0_RST                              (1 << 20)
+#define   DISP_D1D2_GRPH_RST                           (1 << 21)
+#define   DISP_D1D2_SUBPIC_RST                         (1 << 22)
+#define   DISP_D1D2_OV0_RST                            (1 << 23)
+#define   DISP_DVO_ENABLE_RST                          (1 << 24)
+#define   TV_ENABLE_RST                                (1 << 25)
+#define   AUTO_PWRUP_EN                                (1 << 26)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r200.c b/linux-imx/drivers/gpu/drm/radeon/r200.c
new file mode 100644
index 0000000..b3807ed
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r200.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+
+#include "r100d.h"
+#include "r200_reg_safe.h"
+
+#include "r100_track.h"
+
+static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
+{
+	int vtx_size, i;
+	vtx_size = 2;
+
+	if (vtx_fmt_0 & R200_VTX_Z0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W0)
+		vtx_size++;
+	/* blend weight */
+	if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
+		vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
+	if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N0)
+		vtx_size += 3;
+	if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_0)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_SHININESS_1)
+		vtx_size++;
+	for (i = 0; i < 8; i++) {
+		int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
+		switch (color_size) {
+		case 0: break;
+		case 1: vtx_size++; break;
+		case 2: vtx_size += 3; break;
+		case 3: vtx_size += 4; break;
+		}
+	}
+	if (vtx_fmt_0 & R200_VTX_XY1)
+		vtx_size += 2;
+	if (vtx_fmt_0 & R200_VTX_Z1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_W1)
+		vtx_size++;
+	if (vtx_fmt_0 & R200_VTX_N1)
+		vtx_size += 3;
+	return vtx_size;
+}
+
+int r200_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset,
+		  uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t size;
+	uint32_t cur_size;
+	int i, num_loops;
+	int r = 0;
+
+	/* radeon pitch is /64 */
+	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
+	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+	/* Must wait for 2D idle & clean before DMA or hangs might happen */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (1 << 16));
+	for (i = 0; i < num_loops; i++) {
+		cur_size = size;
+		if (cur_size > 0x1FFFFF) {
+			cur_size = 0x1FFFFF;
+		}
+		size -= cur_size;
+		radeon_ring_write(ring, PACKET0(0x720, 2));
+		radeon_ring_write(ring, src_offset);
+		radeon_ring_write(ring, dst_offset);
+		radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
+		src_offset += cur_size;
+		dst_offset += cur_size;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+	if (fence) {
+		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return r;
+}
+
+
+static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
+{
+	int vtx_size, i, tex_size;
+	vtx_size = 0;
+	for (i = 0; i < 6; i++) {
+		tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
+		if (tex_size > 4)
+			continue;
+		vtx_size += tex_size;
+	}
+	return vtx_size;
+}
+
+int r200_packet0_check(struct radeon_cs_parser *p,
+		       struct radeon_cs_packet *pkt,
+		       unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp;
+	int r;
+	int i;
+	int face;
+	u32 tile_flags = 0;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+	switch (reg) {
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+		/* FIXME: only allow PACKET3 blit? easier to check for out of
+		 * range access */
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case RADEON_RB3D_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_RB3D_COLOROFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[0].robj = reloc->robj;
+		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R200_PP_TXOFFSET_0:
+	case R200_PP_TXOFFSET_1:
+	case R200_PP_TXOFFSET_2:
+	case R200_PP_TXOFFSET_3:
+	case R200_PP_TXOFFSET_4:
+	case R200_PP_TXOFFSET_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R200_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R200_TXO_MICRO_TILE;
+
+			tmp = idx_value & ~(0x7 << 2);
+			tmp |= tile_flags;
+			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
+		} else
+			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_OFFSET_F1_0:
+	case R200_PP_CUBIC_OFFSET_F2_0:
+	case R200_PP_CUBIC_OFFSET_F3_0:
+	case R200_PP_CUBIC_OFFSET_F4_0:
+	case R200_PP_CUBIC_OFFSET_F5_0:
+	case R200_PP_CUBIC_OFFSET_F1_1:
+	case R200_PP_CUBIC_OFFSET_F2_1:
+	case R200_PP_CUBIC_OFFSET_F3_1:
+	case R200_PP_CUBIC_OFFSET_F4_1:
+	case R200_PP_CUBIC_OFFSET_F5_1:
+	case R200_PP_CUBIC_OFFSET_F1_2:
+	case R200_PP_CUBIC_OFFSET_F2_2:
+	case R200_PP_CUBIC_OFFSET_F3_2:
+	case R200_PP_CUBIC_OFFSET_F4_2:
+	case R200_PP_CUBIC_OFFSET_F5_2:
+	case R200_PP_CUBIC_OFFSET_F1_3:
+	case R200_PP_CUBIC_OFFSET_F2_3:
+	case R200_PP_CUBIC_OFFSET_F3_3:
+	case R200_PP_CUBIC_OFFSET_F4_3:
+	case R200_PP_CUBIC_OFFSET_F5_3:
+	case R200_PP_CUBIC_OFFSET_F1_4:
+	case R200_PP_CUBIC_OFFSET_F2_4:
+	case R200_PP_CUBIC_OFFSET_F3_4:
+	case R200_PP_CUBIC_OFFSET_F4_4:
+	case R200_PP_CUBIC_OFFSET_F5_4:
+	case R200_PP_CUBIC_OFFSET_F1_5:
+	case R200_PP_CUBIC_OFFSET_F2_5:
+	case R200_PP_CUBIC_OFFSET_F3_5:
+	case R200_PP_CUBIC_OFFSET_F4_5:
+	case R200_PP_CUBIC_OFFSET_F5_5:
+		i = (reg - R200_PP_TXOFFSET_0) / 24;
+		face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->textures[i].cube_info[face - 1].offset = idx_value;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	case RADEON_RE_WIDTH_HEIGHT:
+		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_COLORPITCH:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= RADEON_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		} else
+			ib[idx] = idx_value;
+
+		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
+		break;
+	case RADEON_RB3D_DEPTHPITCH:
+		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_CNTL:
+		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
+		case 7:
+		case 8:
+		case 9:
+		case 11:
+		case 12:
+			track->cb[0].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 15:
+			track->cb[0].cpp = 2;
+			break;
+		case 6:
+			track->cb[0].cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+			return -EINVAL;
+		}
+		if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
+			DRM_ERROR("No support for depth xy offset in kms\n");
+			return -EINVAL;
+		}
+
+		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZSTENCILCNTL:
+		switch (idx_value & 0xf) {
+		case 0:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+		case 3:
+		case 4:
+		case 5:
+		case 9:
+		case 11:
+			track->zb.cpp = 4;
+			break;
+		default:
+			break;
+		}
+		track->zb_dirty = true;
+		break;
+	case RADEON_RB3D_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case RADEON_PP_CNTL:
+		{
+			uint32_t temp = idx_value >> 4;
+			for (i = 0; i < track->num_texture; i++)
+				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
+		}
+		break;
+	case RADEON_SE_VF_CNTL:
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x210c:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case R200_SE_VTX_FMT_0:
+		track->vtx_size = r200_get_vtx_size_0(idx_value);
+		break;
+	case R200_SE_VTX_FMT_1:
+		track->vtx_size += r200_get_vtx_size_1(idx_value);
+		break;
+	case R200_PP_TXSIZE_0:
+	case R200_PP_TXSIZE_1:
+	case R200_PP_TXSIZE_2:
+	case R200_PP_TXSIZE_3:
+	case R200_PP_TXSIZE_4:
+	case R200_PP_TXSIZE_5:
+		i = (reg - R200_PP_TXSIZE_0) / 32;
+		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
+		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXPITCH_0:
+	case R200_PP_TXPITCH_1:
+	case R200_PP_TXPITCH_2:
+	case R200_PP_TXPITCH_3:
+	case R200_PP_TXPITCH_4:
+	case R200_PP_TXPITCH_5:
+		i = (reg - R200_PP_TXPITCH_0) / 32;
+		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFILTER_0:
+	case R200_PP_TXFILTER_1:
+	case R200_PP_TXFILTER_2:
+	case R200_PP_TXFILTER_3:
+	case R200_PP_TXFILTER_4:
+	case R200_PP_TXFILTER_5:
+		i = (reg - R200_PP_TXFILTER_0) / 32;
+		track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
+						 >> R200_MAX_MIP_LEVEL_SHIFT);
+		tmp = (idx_value >> 23) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_w = false;
+		tmp = (idx_value >> 27) & 0x7;
+		if (tmp == 2 || tmp == 6)
+			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXMULTI_CTL_0:
+	case R200_PP_TXMULTI_CTL_1:
+	case R200_PP_TXMULTI_CTL_2:
+	case R200_PP_TXMULTI_CTL_3:
+	case R200_PP_TXMULTI_CTL_4:
+	case R200_PP_TXMULTI_CTL_5:
+		i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
+		break;
+	case R200_PP_TXFORMAT_X_0:
+	case R200_PP_TXFORMAT_X_1:
+	case R200_PP_TXFORMAT_X_2:
+	case R200_PP_TXFORMAT_X_3:
+	case R200_PP_TXFORMAT_X_4:
+	case R200_PP_TXFORMAT_X_5:
+		i = (reg - R200_PP_TXFORMAT_X_0) / 32;
+		track->textures[i].txdepth = idx_value & 0x7;
+		tmp = (idx_value >> 16) & 0x3;
+		/* 2D, 3D, CUBE */
+		switch (tmp) {
+		case 0:
+		case 3:
+		case 4:
+		case 5:
+		case 6:
+		case 7:
+			/* 1D/2D */
+			track->textures[i].tex_coord_type = 0;
+			break;
+		case 1:
+			/* CUBE */
+			track->textures[i].tex_coord_type = 2;
+			break;
+		case 2:
+			/* 3D */
+			track->textures[i].tex_coord_type = 1;
+			break;
+		}
+		track->tex_dirty = true;
+		break;
+	case R200_PP_TXFORMAT_0:
+	case R200_PP_TXFORMAT_1:
+	case R200_PP_TXFORMAT_2:
+	case R200_PP_TXFORMAT_3:
+	case R200_PP_TXFORMAT_4:
+	case R200_PP_TXFORMAT_5:
+		i = (reg - R200_PP_TXFORMAT_0) / 32;
+		if (idx_value & R200_TXFORMAT_NON_POWER2) {
+			track->textures[i].use_pitch = 1;
+		} else {
+			track->textures[i].use_pitch = 0;
+			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+		}
+		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+			track->textures[i].lookup_disable = true;
+		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
+		case R200_TXFORMAT_I8:
+		case R200_TXFORMAT_RGB332:
+		case R200_TXFORMAT_Y8:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_AI88:
+		case R200_TXFORMAT_ARGB1555:
+		case R200_TXFORMAT_RGB565:
+		case R200_TXFORMAT_ARGB4444:
+		case R200_TXFORMAT_VYUY422:
+		case R200_TXFORMAT_YVYU422:
+		case R200_TXFORMAT_LDVDU655:
+		case R200_TXFORMAT_DVDU88:
+		case R200_TXFORMAT_AVYU4444:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_ARGB8888:
+		case R200_TXFORMAT_RGBA8888:
+		case R200_TXFORMAT_ABGR8888:
+		case R200_TXFORMAT_BGR111110:
+		case R200_TXFORMAT_LDVDU8888:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R200_TXFORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R200_TXFORMAT_DXT23:
+		case R200_TXFORMAT_DXT45:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		}
+		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
+		break;
+	case R200_PP_CUBIC_FACES_0:
+	case R200_PP_CUBIC_FACES_1:
+	case R200_PP_CUBIC_FACES_2:
+	case R200_PP_CUBIC_FACES_3:
+	case R200_PP_CUBIC_FACES_4:
+	case R200_PP_CUBIC_FACES_5:
+		tmp = idx_value;
+		i = (reg - R200_PP_CUBIC_FACES_0) / 32;
+		for (face = 0; face < 4; face++) {
+			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
+			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
+		}
+		track->tex_dirty = true;
+		break;
+	default:
+		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void r200_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
+	rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r300.c b/linux-imx/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 0000000..b9b776f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1558 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "r100_track.h"
+#include "r300d.h"
+#include "rv350d.h"
+#include "r300_reg_safe.h"
+
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ *   However, scheduling such write to the ring seems harmless, i suspect
+ *   the CP read collide with the flush somehow, or maybe the MC, hard to
+ *   tell. (Jerome Glisse)
+ */
+
+/*
+ * rv370,rv380 PCIE GART
+ */
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	int i;
+
+	/* Workaround HW bug do flush 2 times */
+	for (i = 0; i < 2; i++) {
+		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
+		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	}
+	mb();
+}
+
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	void __iomem *ptr = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	addr = (lower_32_bits(addr) >> 8) |
+	       ((upper_32_bits(addr) & 0xff) << 24) |
+	       R300_PTE_WRITEABLE | R300_PTE_READABLE;
+	/* on x86 we want this to be CPU endian, on powerpc
+	 * on powerpc without HW swappers, it'll get swapped on way
+	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
+	writel(addr, ((void __iomem *)ptr) + (i * 4));
+	return 0;
+}
+
+int rv370_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "RV370 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	r = rv370_debugfs_pcie_gart_info_init(rdev);
+	if (r)
+		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+int rv370_pcie_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t table_addr;
+	uint32_t tmp;
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* discard memory request outside of configured range */
+	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+	tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	table_addr = rdev->gart.table_addr;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
+	/* FIXME: setup default page */
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
+	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
+	/* Clear error */
+	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_EN;
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+	rv370_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rv370_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+void rv370_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv370_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+void r300_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+
+	/* Who ever call radeon_fence_emit should call ring_lock and ask
+	 * for enough space (today caller are ib schedule and buffer move) */
+	/* Write SC register so SC & US assert idle */
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+	radeon_ring_write(ring, 0);
+	/* Flush 3D cache */
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH);
+	/* Wait until IDLE & CLEAN */
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+				 RADEON_WAIT_2D_IDLECLEAN |
+				 RADEON_WAIT_DMA_GUI_IDLE));
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+	/* Emit fence sequence & fire IRQ */
+	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	unsigned gb_tile_config;
+	int r;
+
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch(rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	case 1:
+	default:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  RADEON_ISYNC_ANY2D_IDLE3D |
+			  RADEON_ISYNC_ANY3D_IDLE2D |
+			  RADEON_ISYNC_WAIT_IDLEGUI |
+			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+	radeon_ring_write(ring, gb_tile_config);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+	radeon_ring_write(ring,
+			  RADEON_WAIT_2D_IDLECLEAN |
+			  RADEON_WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X0_SHIFT) |
+			   (6 << R300_MS_Y0_SHIFT) |
+			   (6 << R300_MS_X1_SHIFT) |
+			   (6 << R300_MS_Y1_SHIFT) |
+			   (6 << R300_MS_X2_SHIFT) |
+			   (6 << R300_MS_Y2_SHIFT) |
+			   (6 << R300_MSBD0_Y_SHIFT) |
+			   (6 << R300_MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << R300_MS_X3_SHIFT) |
+			   (6 << R300_MS_Y3_SHIFT) |
+			   (6 << R300_MS_X4_SHIFT) |
+			   (6 << R300_MS_Y4_SHIFT) |
+			   (6 << R300_MS_X5_SHIFT) |
+			   (6 << R300_MS_Y5_SHIFT) |
+			   (6 << R300_MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+	radeon_ring_write(ring,
+			  R300_GEOMETRY_ROUND_NEAREST |
+			  R300_COLOR_ROUND_NEAREST);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r300_errata(struct radeon_device *rdev)
+{
+	rdev->pll_errata = 0;
+
+	if (rdev->family == CHIP_R300 &&
+	    (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
+		rdev->pll_errata |= CHIP_ERRATA_R300_CG;
+	}
+}
+
+int r300_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & R300_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r300_gpu_init(struct radeon_device *rdev)
+{
+	uint32_t gb_tile_config, tmp;
+
+	if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
+	    (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
+		/* r300,r350 */
+		rdev->num_gb_pipes = 2;
+	} else {
+		/* rv350,rv370,rv380,r300 AD, r350 AH */
+		rdev->num_gb_pipes = 1;
+	}
+	rdev->num_z_pipes = 1;
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
+	switch (rdev->num_gb_pipes) {
+	case 2:
+		gb_tile_config |= R300_PIPE_COUNT_R300;
+		break;
+	case 3:
+		gb_tile_config |= R300_PIPE_COUNT_R420_3P;
+		break;
+	case 4:
+		gb_tile_config |= R300_PIPE_COUNT_R420;
+		break;
+	default:
+	case 1:
+		gb_tile_config |= R300_PIPE_COUNT_RV350;
+		break;
+	}
+	WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	if (r300_mc_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+int r300_asic_reset(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	r100_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	/* save PCI state */
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	r100_bm_disable(rdev);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* resetting the CP seems to be problematic sometimes it end up
+	 * hard locking the computer, but it's necessary for successful
+	 * reset more test & playing is needed on R3XX/R4XX to find a
+	 * reliable (if any solution)
+	 */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	r100_enable_bm(rdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	r100_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * r300,r350,rv350,rv380 VRAM info
+ */
+void r300_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+	u32 tmp;
+
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RADEON_MEM_CNTL);
+	tmp &= R300_MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0: rdev->mc.vram_width = 64; break;
+	case 1: rdev->mc.vram_width = 128; break;
+	case 2: rdev->mc.vram_width = 256; break;
+	default:  rdev->mc.vram_width = 128; break;
+	}
+	r100_vram_init_sizes(rdev);
+	base = rdev->mc.aper_base;
+	if (rdev->flags & RADEON_IS_IGP)
+		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	uint32_t link_width_cntl, mask;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     RADEON_PCIE_LC_RECONFIG_LATER |
+			     RADEON_PCIE_LC_SHORT_RECONFIG_EN);
+	link_width_cntl |= mask;
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						     RADEON_PCIE_LC_RECONFIG_NOW));
+
+	/* wait for lane set to complete */
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+}
+
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+	seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
+	seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
+	seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
+	seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
+	seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
+	seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
+	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
+	seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv370_pcie_gart_info_list[] = {
+	{"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
+};
+#endif
+
+static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int r300_packet0_check(struct radeon_cs_parser *p,
+		struct radeon_cs_packet *pkt,
+		unsigned idx, unsigned reg)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	uint32_t tmp, tile_flags = 0;
+	unsigned i;
+	int r;
+	u32 idx_value;
+
+	ib = p->ib.ptr;
+	track = (struct r100_cs_track *)p->track;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch(reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+	case RADEON_CRTC_GUI_TRIG_VLINE:
+		r = r100_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		break;
+	case RADEON_DST_PITCH_OFFSET:
+	case RADEON_SRC_PITCH_OFFSET:
+		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
+		if (r)
+			return r;
+		break;
+	case R300_RB3D_COLOROFFSET0:
+	case R300_RB3D_COLOROFFSET1:
+	case R300_RB3D_COLOROFFSET2:
+	case R300_RB3D_COLOROFFSET3:
+		i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->cb[i].robj = reloc->robj;
+		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_ZB_DEPTHOFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->zb.robj = reloc->robj;
+		track->zb.offset = idx_value;
+		track->zb_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_TX_OFFSET_0:
+	case R300_TX_OFFSET_0+4:
+	case R300_TX_OFFSET_0+8:
+	case R300_TX_OFFSET_0+12:
+	case R300_TX_OFFSET_0+16:
+	case R300_TX_OFFSET_0+20:
+	case R300_TX_OFFSET_0+24:
+	case R300_TX_OFFSET_0+28:
+	case R300_TX_OFFSET_0+32:
+	case R300_TX_OFFSET_0+36:
+	case R300_TX_OFFSET_0+40:
+	case R300_TX_OFFSET_0+44:
+	case R300_TX_OFFSET_0+48:
+	case R300_TX_OFFSET_0+52:
+	case R300_TX_OFFSET_0+56:
+	case R300_TX_OFFSET_0+60:
+		i = (reg - R300_TX_OFFSET_0) >> 2;
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+
+		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
+			ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+				  ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+		} else {
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_TXO_MACRO_TILE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_TXO_MICRO_TILE;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
+
+			tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
+		break;
+	/* Tracked registers */
+	case 0x2084:
+		/* VAP_VF_CNTL */
+		track->vap_vf_cntl = idx_value;
+		break;
+	case 0x20B4:
+		/* VAP_VTX_SIZE */
+		track->vtx_size = idx_value & 0x7F;
+		break;
+	case 0x2134:
+		/* VAP_VF_MAX_VTX_INDX */
+		track->max_indx = idx_value & 0x00FFFFFFUL;
+		break;
+	case 0x2088:
+		/* VAP_ALT_NUM_VERTICES - only valid on r500 */
+		if (p->rdev->family < CHIP_RV515)
+			goto fail;
+		track->vap_alt_nverts = idx_value & 0xFFFFFF;
+		break;
+	case 0x43E4:
+		/* SC_SCISSOR1 */
+		track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
+		if (p->rdev->family < CHIP_RV515) {
+			track->maxy -= 1440;
+		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		break;
+	case 0x4E00:
+		/* RB3D_CCTL */
+		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+		    p->rdev->cmask_filp != p->filp) {
+			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+			return -EINVAL;
+		}
+		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
+		break;
+	case 0x4E38:
+	case 0x4E3C:
+	case 0x4E40:
+	case 0x4E44:
+		/* RB3D_COLORPITCH0 */
+		/* RB3D_COLORPITCH1 */
+		/* RB3D_COLORPITCH2 */
+		/* RB3D_COLORPITCH3 */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				radeon_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_COLOR_TILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		i = (reg - 0x4E38) >> 2;
+		track->cb[i].pitch = idx_value & 0x3FFE;
+		switch (((idx_value >> 21) & 0xF)) {
+		case 9:
+		case 11:
+		case 12:
+			track->cb[i].cpp = 1;
+			break;
+		case 3:
+		case 4:
+		case 13:
+		case 15:
+			track->cb[i].cpp = 2;
+			break;
+		case 5:
+			if (p->rdev->family < CHIP_RV515) {
+				DRM_ERROR("Invalid color buffer format (%d)!\n",
+					  ((idx_value >> 21) & 0xF));
+				return -EINVAL;
+			}
+			/* Pass through. */
+		case 6:
+			track->cb[i].cpp = 4;
+			break;
+		case 10:
+			track->cb[i].cpp = 8;
+			break;
+		case 7:
+			track->cb[i].cpp = 16;
+			break;
+		default:
+			DRM_ERROR("Invalid color buffer format (%d) !\n",
+				  ((idx_value >> 21) & 0xF));
+			return -EINVAL;
+		}
+		track->cb_dirty = true;
+		break;
+	case 0x4F00:
+		/* ZB_CNTL */
+		if (idx_value & 2) {
+			track->z_enabled = true;
+		} else {
+			track->z_enabled = false;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F10:
+		/* ZB_FORMAT */
+		switch ((idx_value & 0xF)) {
+		case 0:
+		case 1:
+			track->zb.cpp = 2;
+			break;
+		case 2:
+			track->zb.cpp = 4;
+			break;
+		default:
+			DRM_ERROR("Invalid z buffer format (%d) !\n",
+				  (idx_value & 0xF));
+			return -EINVAL;
+		}
+		track->zb_dirty = true;
+		break;
+	case 0x4F24:
+		/* ZB_DEPTHPITCH */
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+			if (r) {
+				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					  idx, reg);
+				radeon_cs_dump_packet(p, pkt);
+				return r;
+			}
+
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+				tile_flags |= R300_DEPTHMACROTILE_ENABLE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+				tile_flags |= R300_DEPTHMICROTILE_TILED;
+			else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+				tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
+
+			tmp = idx_value & ~(0x7 << 16);
+			tmp |= tile_flags;
+			ib[idx] = tmp;
+		}
+		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
+		break;
+	case 0x4104:
+		/* TX_ENABLE */
+		for (i = 0; i < 16; i++) {
+			bool enabled;
+
+			enabled = !!(idx_value & (1 << i));
+			track->textures[i].enabled = enabled;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x44C0:
+	case 0x44C4:
+	case 0x44C8:
+	case 0x44CC:
+	case 0x44D0:
+	case 0x44D4:
+	case 0x44D8:
+	case 0x44DC:
+	case 0x44E0:
+	case 0x44E4:
+	case 0x44E8:
+	case 0x44EC:
+	case 0x44F0:
+	case 0x44F4:
+	case 0x44F8:
+	case 0x44FC:
+		/* TX_FORMAT1_[0-15] */
+		i = (reg - 0x44C0) >> 2;
+		tmp = (idx_value >> 25) & 0x3;
+		track->textures[i].tex_coord_type = tmp;
+		switch ((idx_value & 0x1F)) {
+		case R300_TX_FORMAT_X8:
+		case R300_TX_FORMAT_Y4X4:
+		case R300_TX_FORMAT_Z3Y3X2:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
+		case R300_TX_FORMAT_Y8X8:
+		case R300_TX_FORMAT_Z5Y6X5:
+		case R300_TX_FORMAT_Z6Y5X5:
+		case R300_TX_FORMAT_W4Z4Y4X4:
+		case R300_TX_FORMAT_W1Z5Y5X5:
+		case R300_TX_FORMAT_D3DMFT_CxV8U8:
+		case R300_TX_FORMAT_B8G8_B8G8:
+		case R300_TX_FORMAT_G8R8_G8B8:
+			track->textures[i].cpp = 2;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
+		case R300_TX_FORMAT_Z11Y11X10:
+		case R300_TX_FORMAT_Z10Y11X11:
+		case R300_TX_FORMAT_W8Z8Y8X8:
+		case R300_TX_FORMAT_W2Z10Y10X10:
+		case 0x17:
+		case R300_TX_FORMAT_FL_I32:
+		case 0x1e:
+			track->textures[i].cpp = 4;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_W16Z16Y16X16:
+		case R300_TX_FORMAT_FL_R16G16B16A16:
+		case R300_TX_FORMAT_FL_I32A32:
+			track->textures[i].cpp = 8;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_FL_R32G32B32A32:
+			track->textures[i].cpp = 16;
+			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+			break;
+		case R300_TX_FORMAT_DXT1:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+			break;
+		case R300_TX_FORMAT_ATI2N:
+			if (p->rdev->family < CHIP_R420) {
+				DRM_ERROR("Invalid texture format %u\n",
+					  (idx_value & 0x1F));
+				return -EINVAL;
+			}
+			/* The same rules apply as for DXT3/5. */
+			/* Pass through. */
+		case R300_TX_FORMAT_DXT3:
+		case R300_TX_FORMAT_DXT5:
+			track->textures[i].cpp = 1;
+			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+			break;
+		default:
+			DRM_ERROR("Invalid texture format %u\n",
+				  (idx_value & 0x1F));
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4400:
+	case 0x4404:
+	case 0x4408:
+	case 0x440C:
+	case 0x4410:
+	case 0x4414:
+	case 0x4418:
+	case 0x441C:
+	case 0x4420:
+	case 0x4424:
+	case 0x4428:
+	case 0x442C:
+	case 0x4430:
+	case 0x4434:
+	case 0x4438:
+	case 0x443C:
+		/* TX_FILTER0_[0-15] */
+		i = (reg - 0x4400) >> 2;
+		tmp = idx_value & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_w = false;
+		}
+		tmp = (idx_value >> 3) & 0x7;
+		if (tmp == 2 || tmp == 4 || tmp == 6) {
+			track->textures[i].roundup_h = false;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4500:
+	case 0x4504:
+	case 0x4508:
+	case 0x450C:
+	case 0x4510:
+	case 0x4514:
+	case 0x4518:
+	case 0x451C:
+	case 0x4520:
+	case 0x4524:
+	case 0x4528:
+	case 0x452C:
+	case 0x4530:
+	case 0x4534:
+	case 0x4538:
+	case 0x453C:
+		/* TX_FORMAT2_[0-15] */
+		i = (reg - 0x4500) >> 2;
+		tmp = idx_value & 0x3FFF;
+		track->textures[i].pitch = tmp + 1;
+		if (p->rdev->family >= CHIP_RV515) {
+			tmp = ((idx_value >> 15) & 1) << 11;
+			track->textures[i].width_11 = tmp;
+			tmp = ((idx_value >> 16) & 1) << 11;
+			track->textures[i].height_11 = tmp;
+
+			/* ATI1N */
+			if (idx_value & (1 << 14)) {
+				/* The same rules apply as for DXT1. */
+				track->textures[i].compress_format =
+					R100_TRACK_COMP_DXT1;
+			}
+		} else if (idx_value & (1 << 14)) {
+			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+			return -EINVAL;
+		}
+		track->tex_dirty = true;
+		break;
+	case 0x4480:
+	case 0x4484:
+	case 0x4488:
+	case 0x448C:
+	case 0x4490:
+	case 0x4494:
+	case 0x4498:
+	case 0x449C:
+	case 0x44A0:
+	case 0x44A4:
+	case 0x44A8:
+	case 0x44AC:
+	case 0x44B0:
+	case 0x44B4:
+	case 0x44B8:
+	case 0x44BC:
+		/* TX_FORMAT0_[0-15] */
+		i = (reg - 0x4480) >> 2;
+		tmp = idx_value & 0x7FF;
+		track->textures[i].width = tmp + 1;
+		tmp = (idx_value >> 11) & 0x7FF;
+		track->textures[i].height = tmp + 1;
+		tmp = (idx_value >> 26) & 0xF;
+		track->textures[i].num_levels = tmp;
+		tmp = idx_value & (1 << 31);
+		track->textures[i].use_pitch = !!tmp;
+		tmp = (idx_value >> 22) & 0xF;
+		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
+		break;
+	case R300_ZB_ZPASS_ADDR:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case 0x4e0c:
+		/* RB3D_COLOR_CHANNEL_MASK */
+		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
+		break;
+	case 0x43a4:
+		/* SC_HYPERZ_EN */
+		/* r300c emits this register - we need to disable hyperz for it
+		 * without complaining */
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & 0x1)
+				ib[idx] = idx_value & ~1;
+		}
+		break;
+	case 0x4f1c:
+		/* ZB_BW_CNTL */
+		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
+		if (p->rdev->hyperz_filp != p->filp) {
+			if (idx_value & (R300_HIZ_ENABLE |
+					 R300_RD_COMP_ENABLE |
+					 R300_WR_COMP_ENABLE |
+					 R300_FAST_FILL_ENABLE))
+				goto fail;
+		}
+		break;
+	case 0x4e04:
+		/* RB3D_BLENDCNTL */
+		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
+		break;
+	case 0x4f30: /* ZB_MASK_OFFSET */
+	case 0x4f34: /* ZB_ZMASK_PITCH */
+	case 0x4f44: /* ZB_HIZ_OFFSET */
+	case 0x4f54: /* ZB_HIZ_PITCH */
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		break;
+	case 0x4028:
+		if (idx_value && (p->rdev->hyperz_filp != p->filp))
+			goto fail;
+		/* GB_Z_PEQ_CONFIG */
+		if (p->rdev->family >= CHIP_RV350)
+			break;
+		goto fail;
+		break;
+	case 0x4be8:
+		/* valid register only on RV530 */
+		if (p->rdev->family == CHIP_RV530)
+			break;
+		/* fallthrough do not move */
+	default:
+		goto fail;
+	}
+	return 0;
+fail:
+	printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+	       reg, idx, idx_value);
+	return -EINVAL;
+}
+
+static int r300_packet3_check(struct radeon_cs_parser *p,
+			      struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r100_cs_track *track;
+	volatile uint32_t *ib;
+	unsigned idx;
+	int r;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	track = (struct r100_cs_track *)p->track;
+	switch(pkt->opcode) {
+	case PACKET3_3D_LOAD_VBPNTR:
+		r = r100_packet3_load_vbpntr(p, pkt, idx);
+		if (r)
+			return r;
+		break;
+	case PACKET3_INDX_BUFFER:
+		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+		if (r) {
+			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+			radeon_cs_dump_packet(p, pkt);
+			return r;
+		}
+		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
+		if (r) {
+			return r;
+		}
+		break;
+	/* Draw packet */
+	case PACKET3_3D_DRAW_IMMD:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		track->immd_dwords = pkt->count - 1;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_IMMD_2:
+		/* Number of dwords is vtx_size * (num_vertices - 1)
+		 * PRIM_WALK must be equal to 3 vertex data in embedded
+		 * in cmd stream */
+		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
+			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+			return -EINVAL;
+		}
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		track->immd_dwords = pkt->count;
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_VBUF_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_DRAW_INDX_2:
+		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
+		r = r100_cs_track_check(p->rdev, track);
+		if (r) {
+			return r;
+		}
+		break;
+	case PACKET3_3D_CLEAR_HIZ:
+	case PACKET3_3D_CLEAR_ZMASK:
+		if (p->rdev->hyperz_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_3D_CLEAR_CMASK:
+		if (p->rdev->cmask_filp != p->filp)
+			return -EINVAL;
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r300_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r100_cs_track *track;
+	int r;
+
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (track == NULL)
+		return -ENOMEM;
+	r100_cs_track_clear(p->rdev, track);
+	p->track = track;
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = r100_cs_parse_packet0(p, &pkt,
+						  p->rdev->config.r300.reg_safe_bm,
+						  p->rdev->config.r300.reg_safe_bm_size,
+						  &r300_packet0_check);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r300_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			return -EINVAL;
+		}
+		if (r) {
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+	return 0;
+}
+
+void r300_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
+}
+
+void r300_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+	int r;
+
+	r = r100_debugfs_mc_info_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
+	}
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(R_00014C_MC_AGP_LOCATION,
+			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32(R_00015C_AGP_BASE_2,
+			upper_32_bits(rdev->mc.agp_base) & 0xff);
+	} else {
+		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
+		WREG32(R_000170_AGP_BASE, 0);
+		WREG32(R_00015C_AGP_BASE_2, 0);
+	}
+	/* Wait for mc idle */
+	if (r300_mc_wait_for_idle(rdev))
+		DRM_INFO("Failed to wait MC idle before programming MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	r100_mc_resume(rdev, &save);
+}
+
+void r300_clock_startup(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_legacy_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
+	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
+		tmp |= S_00000D_FORCE_VAP(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
+}
+
+static int r300_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r300_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350 ||
+	    rdev->family == CHIP_RV350)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r300_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r300_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r300_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int r300_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Set asic errata */
+	r300_errata(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r300_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = r300_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r300_cmdbuf.c b/linux-imx/drivers/gpu/drm/radeon/r300_cmdbuf.c
new file mode 100644
index 0000000..60170ea
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -0,0 +1,1186 @@
+/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc.  2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Nicolai Haehnle <prefect_@gmx.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_buffer.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#include <asm/unaligned.h>
+
+#define R300_SIMULTANEOUS_CLIPRECTS		4
+
+/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+	0xAAAA,
+	0xEEEE,
+	0xFEFE,
+	0xFFFE
+};
+
+/**
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf, int n)
+{
+	struct drm_clip_rect box;
+	int nr;
+	int i;
+	RING_LOCALS;
+
+	nr = cmdbuf->nbox - n;
+	if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+		nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+	DRM_DEBUG("%i cliprects\n", nr);
+
+	if (nr) {
+		BEGIN_RING(6 + nr * 2);
+		OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+		for (i = 0; i < nr; ++i) {
+			if (DRM_COPY_FROM_USER
+			    (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+				DRM_ERROR("copy cliprect faulted\n");
+				return -EFAULT;
+			}
+
+			box.x2--; /* Hardware expects inclusive bottom-right corner */
+			box.y2--;
+
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+				box.x1 = (box.x1) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2) &
+					R300_CLIPRECT_MASK;
+			} else {
+				box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+			}
+
+			OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y1 << R300_CLIPRECT_Y_SHIFT));
+			OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y2 << R300_CLIPRECT_Y_SHIFT));
+
+		}
+
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
+
+		/* TODO/SECURITY: Force scissors to a safe value, otherwise the
+		 * client might be able to trample over memory.
+		 * The impact should be very limited, but I'd rather be safe than
+		 * sorry.
+		 */
+		OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
+		OUT_RING(0);
+		OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
+		ADVANCE_RING();
+	} else {
+		/* Why we allow zero cliprect rendering:
+		 * There are some commands in a command buffer that must be submitted
+		 * even when there are no cliprects, e.g. DMA buffer discard
+		 * or state setting (though state setting could be avoided by
+		 * simulating a loss of context).
+		 *
+		 * Now since the cmdbuf interface is so chaotic right now (and is
+		 * bound to remain that way for a bit until things settle down),
+		 * it is basically impossible to filter out the commands that are
+		 * necessary and those that aren't.
+		 *
+		 * So I choose the safe way and don't do any filtering at all;
+		 * instead, I simply set up the engine so that all rendering
+		 * can't produce any fragments.
+		 */
+		BEGIN_RING(2);
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
+		ADVANCE_RING();
+	}
+
+	/* flus cache and wait idle clean after cliprect change */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	ADVANCE_RING();
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	return 0;
+}
+
+static u8 r300_reg_flags[0x10000 >> 2];
+
+void r300_init_reg_flags(struct drm_device *dev)
+{
+	int i;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	memset(r300_reg_flags, 0, 0x10000 >> 2);
+#define ADD_RANGE_MARK(reg, count,mark) \
+		for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
+			r300_reg_flags[i]|=(mark);
+
+#define MARK_SAFE		1
+#define MARK_CHECK_OFFSET	2
+
+#define ADD_RANGE(reg, count)	ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+	/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+	ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+	ADD_RANGE(R300_VAP_CNTL, 1);
+	ADD_RANGE(R300_SE_VTE_CNTL, 2);
+	ADD_RANGE(0x2134, 2);
+	ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
+	ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+	ADD_RANGE(0x21DC, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+	ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+	ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
+	ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+	ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+	ADD_RANGE(R300_GB_ENABLE, 1);
+	ADD_RANGE(R300_GB_MSPOS0, 5);
+	ADD_RANGE(R300_TX_INVALTAGS, 1);
+	ADD_RANGE(R300_TX_ENABLE, 1);
+	ADD_RANGE(0x4200, 4);
+	ADD_RANGE(0x4214, 1);
+	ADD_RANGE(R300_RE_POINTSIZE, 1);
+	ADD_RANGE(0x4230, 3);
+	ADD_RANGE(R300_RE_LINE_CNT, 1);
+	ADD_RANGE(R300_RE_UNK4238, 1);
+	ADD_RANGE(0x4260, 3);
+	ADD_RANGE(R300_RE_SHADE, 4);
+	ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+	ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
+	ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+	ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
+	ADD_RANGE(R300_RE_CULL_CNTL, 1);
+	ADD_RANGE(0x42C0, 2);
+	ADD_RANGE(R300_RS_CNTL_0, 2);
+
+	ADD_RANGE(R300_SU_REG_DEST, 1);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
+		ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
+
+	ADD_RANGE(R300_SC_HYPERZ, 2);
+	ADD_RANGE(0x43E8, 1);
+
+	ADD_RANGE(0x46A4, 5);
+
+	ADD_RANGE(R300_RE_FOG_STATE, 1);
+	ADD_RANGE(R300_FOG_COLOR_R, 3);
+	ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+	ADD_RANGE(0x4BD8, 1);
+	ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+	ADD_RANGE(0x4E00, 1);
+	ADD_RANGE(R300_RB3D_CBLEND, 2);
+	ADD_RANGE(R300_RB3D_COLORMASK, 1);
+	ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
+	ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+	ADD_RANGE(0x4E50, 9);
+	ADD_RANGE(0x4E88, 1);
+	ADD_RANGE(0x4EA0, 2);
+	ADD_RANGE(R300_ZB_CNTL, 3);
+	ADD_RANGE(R300_ZB_FORMAT, 4);
+	ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
+	ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
+	ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
+	ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
+
+	ADD_RANGE(R300_TX_FILTER_0, 16);
+	ADD_RANGE(R300_TX_FILTER1_0, 16);
+	ADD_RANGE(R300_TX_SIZE_0, 16);
+	ADD_RANGE(R300_TX_FORMAT_0, 16);
+	ADD_RANGE(R300_TX_PITCH_0, 16);
+	/* Texture offset is dangerous and needs more checking */
+	ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+	ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
+	ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+	/* Sporadic registers used as primitives are emitted */
+	ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
+		ADD_RANGE(R500_US_CONFIG, 2);
+		ADD_RANGE(R500_US_CODE_ADDR, 3);
+		ADD_RANGE(R500_US_FC_CTRL, 1);
+		ADD_RANGE(R500_RS_IP_0, 16);
+		ADD_RANGE(R500_RS_INST_0, 16);
+		ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
+		ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
+		ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
+	} else {
+		ADD_RANGE(R300_PFS_CNTL_0, 3);
+		ADD_RANGE(R300_PFS_NODE_0, 4);
+		ADD_RANGE(R300_PFS_TEXI_0, 64);
+		ADD_RANGE(R300_PFS_INSTR0_0, 64);
+		ADD_RANGE(R300_PFS_INSTR1_0, 64);
+		ADD_RANGE(R300_PFS_INSTR2_0, 64);
+		ADD_RANGE(R300_PFS_INSTR3_0, 64);
+		ADD_RANGE(R300_RS_INTERP_0, 8);
+		ADD_RANGE(R300_RS_ROUTE_0, 8);
+
+	}
+}
+
+static __inline__ int r300_check_range(unsigned reg, int count)
+{
+	int i;
+	if (reg & ~0xffff)
+		return -1;
+	for (i = (reg >> 2); i < (reg >> 2) + count; i++)
+		if (r300_reg_flags[i] != MARK_SAFE)
+			return 1;
+	return 0;
+}
+
+static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
+							  dev_priv,
+							  drm_radeon_kcmd_buffer_t
+							  * cmdbuf,
+							  drm_r300_cmd_header_t
+							  header)
+{
+	int reg;
+	int sz;
+	int i;
+	u32 *value;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if ((sz > 64) || (sz < 0)) {
+		DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+			 reg, sz);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < sz; i++) {
+		switch (r300_reg_flags[(reg >> 2) + i]) {
+		case MARK_SAFE:
+			break;
+		case MARK_CHECK_OFFSET:
+			value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+			if (!radeon_check_offset(dev_priv, *value)) {
+				DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+					 reg, sz);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Register %04x failed check as flag=%02x\n",
+				reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+			return -EINVAL;
+		}
+	}
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int reg;
+	int sz;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if (!sz)
+		return 0;
+
+	if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	if (reg + sz * 4 >= 0x10000) {
+		DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
+			  sz);
+		return -EINVAL;
+	}
+
+	if (r300_check_range(reg, sz)) {
+		/* go and check everything */
+		return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
+							   header);
+	}
+	/* the rest of the data is safe to emit, whatever the values the user passed */
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+				    drm_radeon_kcmd_buffer_t *cmdbuf,
+				    drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	RING_LOCALS;
+
+	sz = header.vpu.count;
+	addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+	if (!sz)
+		return 0;
+	if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	/* VAP is very sensitive so we purge cache before we program it
+	 * and we also flush its state before & after */
+	BEGIN_RING(6);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	BEGIN_RING(3 + sz * 4);
+	OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+	OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
+	ADVANCE_RING();
+
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+				      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	RING_LOCALS;
+
+	if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	BEGIN_RING(10);
+	OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
+	OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+		 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
+	ADVANCE_RING();
+
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DC_FLUSH);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush flag */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+					       drm_radeon_kcmd_buffer_t *cmdbuf,
+					       u32 header)
+{
+	int count, i, k;
+#define MAX_ARRAY_PACKET  64
+	u32 *data;
+	u32 narrays;
+	RING_LOCALS;
+
+	count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	if ((count + 1) > MAX_ARRAY_PACKET) {
+		DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+			  count);
+		return -EINVAL;
+	}
+	/* carefully check packet contents */
+
+	/* We have already read the header so advance the buffer. */
+	drm_buffer_advance(cmdbuf->buffer, 4);
+
+	narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	k = 0;
+	i = 1;
+	while ((k < narrays) && (i < (count + 1))) {
+		i++;		/* skip attribute field */
+		data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+		if (!radeon_check_offset(dev_priv, *data)) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+		if (k == narrays)
+			break;
+		/* have one more to process, they come in pairs */
+		data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+		if (!radeon_check_offset(dev_priv, *data)) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+	}
+	/* do the counts match what we expect ? */
+	if ((k != narrays) || (i != (count + 1))) {
+		DRM_ERROR
+		    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+		     k, i, narrays, count + 1);
+		return -EINVAL;
+	}
+
+	/* all clear, output packet */
+
+	BEGIN_RING(count + 2);
+	OUT_RING(header);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+					     drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	int count, ret;
+	RING_LOCALS;
+
+
+	count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	if (*cmd & 0x8000) {
+		u32 offset;
+		u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+
+			u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+			offset = *cmd2 << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
+				return -EINVAL;
+			}
+		}
+
+		if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+			offset = *cmd3 << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
+				return -EINVAL;
+			}
+
+		}
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
+					    drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+	int count;
+	int expected_count;
+	RING_LOCALS;
+
+	count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+	expected_count = *cmd1 >> 16;
+	if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+		expected_count = (expected_count+1)/2;
+
+	if (count && count != expected_count) {
+		DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
+			count, expected_count);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	if (!count) {
+		drm_r300_cmd_header_t stack_header, *header;
+		u32 *cmd1, *cmd2, *cmd3;
+
+		if (drm_buffer_unprocessed(cmdbuf->buffer)
+				< 4*4 + sizeof(stack_header)) {
+			DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
+			return -EINVAL;
+		}
+
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+		cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+		cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+
+		if (header->header.cmd_type != R300_CMD_PACKET3 ||
+		    header->packet3.packet != R300_CMD_PACKET3_RAW ||
+		    *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+			DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
+			return -EINVAL;
+		}
+
+		if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+			DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+					*cmd1);
+			return -EINVAL;
+		}
+		if (!radeon_check_offset(dev_priv, *cmd2)) {
+			DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+					*cmd2);
+			return -EINVAL;
+		}
+		if (*cmd3 != expected_count) {
+			DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
+				*cmd3, expected_count);
+			return -EINVAL;
+		}
+
+		BEGIN_RING(4);
+		OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+					    drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *header;
+	int count;
+	RING_LOCALS;
+
+	if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	/* Fixme !! This simply emits a packet without much checking.
+	   We need to be smarter. */
+
+	/* obtain first word - actual packet3 header */
+	header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+
+	/* Is it packet 3 ? */
+	if ((*header >> 30) != 0x3) {
+		DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
+		return -EINVAL;
+	}
+
+	count = (*header >> 16) & 0x3fff;
+
+	/* Check again now that we know how much data to expect */
+	if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR
+		    ("Expected packet3 of length %d but have only %d bytes left\n",
+		     (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
+		return -EINVAL;
+	}
+
+	/* Is it a packet type we know about ? */
+	switch (*header & 0xff00) {
+	case RADEON_3D_LOAD_VBPNTR:	/* load vertex array pointers */
+		return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
+
+	case RADEON_CNTL_BITBLT_MULTI:
+		return r300_emit_bitblt_multi(dev_priv, cmdbuf);
+
+	case RADEON_CP_INDX_BUFFER:
+		DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
+		return -EINVAL;
+	case RADEON_CP_3D_DRAW_IMMD_2:
+		/* triggers drawing using in-packet vertex data */
+	case RADEON_CP_3D_DRAW_VBUF_2:
+		/* triggers drawing of vertex buffers setup elsewhere */
+		dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+					   RADEON_PURGE_EMITED);
+		break;
+	case RADEON_CP_3D_DRAW_INDX_2:
+		/* triggers drawing using indices to vertex buffer */
+		/* whenever we send vertex we clear flush & purge */
+		dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
+					   RADEON_PURGE_EMITED);
+		return r300_emit_draw_indx_2(dev_priv, cmdbuf);
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+		/* these packets are safe */
+		break;
+	default:
+		DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count + 2);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int n;
+	int ret;
+	int orig_iter = cmdbuf->buffer->iterator;
+
+	/* This is a do-while-loop so that we run the interior at least once,
+	 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+	 */
+	n = 0;
+	do {
+		if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+			ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+			if (ret)
+				return ret;
+
+			cmdbuf->buffer->iterator = orig_iter;
+		}
+
+		switch (header.packet3.packet) {
+		case R300_CMD_PACKET3_CLEAR:
+			DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+			ret = r300_emit_clear(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_clear failed\n");
+				return ret;
+			}
+			break;
+
+		case R300_CMD_PACKET3_RAW:
+			DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+			ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_raw_packet3 failed\n");
+				return ret;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad packet3 type %i at byte %d\n",
+				  header.packet3.packet,
+				  cmdbuf->buffer->iterator - (int)sizeof(header));
+			return -EINVAL;
+		}
+
+		n += R300_SIMULTANEOUS_CLIPRECTS;
+	} while (n < cmdbuf->nbox);
+
+	return 0;
+}
+
+/* Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/**
+ * Emit the sequence to pacify R300.
+ */
+static void r300_pacify(drm_radeon_private_t *dev_priv)
+{
+	uint32_t cache_z, cache_3d, cache_2d;
+	RING_LOCALS;
+
+	cache_z = R300_ZC_FLUSH;
+	cache_2d = R300_RB2D_DC_FLUSH;
+	cache_3d = R300_RB3D_DC_FLUSH;
+	if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
+		/* we can purge, primitive where draw since last purge */
+		cache_z |= R300_ZC_FREE;
+		cache_2d |= R300_RB2D_DC_FREE;
+		cache_3d |= R300_RB3D_DC_FREE;
+	}
+
+	/* flush & purge zbuffer */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
+	OUT_RING(cache_z);
+	ADVANCE_RING();
+	/* flush & purge 3d */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(cache_3d);
+	ADVANCE_RING();
+	/* flush & purge texture */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	/* FIXME: is this one really needed ? */
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
+	OUT_RING(0);
+	ADVANCE_RING();
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
+	ADVANCE_RING();
+	/* flush & purge 2d through E2 as RB2D will trigger lockup */
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(cache_2d);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
+		 RADEON_WAIT_HOST_IDLECLEAN);
+	ADVANCE_RING();
+	/* set flush & purge flags */
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/**
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+	buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
+			  drm_r300_cmd_header_t header)
+{
+	u32 wait_until;
+	RING_LOCALS;
+
+	if (!header.wait.flags)
+		return;
+
+	wait_until = 0;
+
+	switch(header.wait.flags) {
+	case R300_WAIT_2D:
+		wait_until = RADEON_WAIT_2D_IDLE;
+		break;
+	case R300_WAIT_3D:
+		wait_until = RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_3D:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	default:
+		return;
+	}
+
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(wait_until);
+	ADVANCE_RING();
+}
+
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+			drm_radeon_kcmd_buffer_t *cmdbuf,
+			drm_r300_cmd_header_t header)
+{
+	u32 *ref_age_base;
+	u32 i, *buf_idx, h_pending;
+	u64 *ptr_addr;
+	u64 stack_ptr_addr;
+	RING_LOCALS;
+
+	if (drm_buffer_unprocessed(cmdbuf->buffer) <
+	    (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
+		return -EINVAL;
+	}
+
+	if (header.scratch.reg >= 5) {
+		return -EINVAL;
+	}
+
+	dev_priv->scratch_ages[header.scratch.reg]++;
+
+	ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+			sizeof(stack_ptr_addr), &stack_ptr_addr);
+	ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
+
+	for (i=0; i < header.scratch.n_bufs; i++) {
+		buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+		*buf_idx *= 2; /* 8 bytes per buf */
+
+		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+				&dev_priv->scratch_ages[header.scratch.reg],
+				sizeof(u32)))
+			return -EINVAL;
+
+		if (DRM_COPY_FROM_USER(&h_pending,
+				ref_age_base + *buf_idx + 1,
+				sizeof(u32)))
+			return -EINVAL;
+
+		if (h_pending == 0)
+			return -EINVAL;
+
+		h_pending--;
+
+		if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+					&h_pending,
+					sizeof(u32)))
+			return -EINVAL;
+
+		drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
+	}
+
+	BEGIN_RING(2);
+	OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+	OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+				       drm_radeon_kcmd_buffer_t *cmdbuf,
+				       drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	int type;
+	int isclamp;
+	int stride;
+	RING_LOCALS;
+
+	sz = header.r500fp.count;
+	/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
+	addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
+
+	type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
+	isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+
+	addr |= (type << 16);
+	addr |= (isclamp << 17);
+
+	stride = type ? 4 : 6;
+
+	DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
+	if (!sz)
+		return 0;
+	if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+		return -EINVAL;
+
+	BEGIN_RING(3 + sz * stride);
+	OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
+	OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+
+/**
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+int r300_do_cp_cmdbuf(struct drm_device *dev,
+		      struct drm_file *file_priv,
+		      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	int emit_dispatch_age = 0;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	/* pacify */
+	r300_pacify(dev_priv);
+
+	if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+		ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+		if (ret)
+			goto cleanup;
+	}
+
+	while (drm_buffer_unprocessed(cmdbuf->buffer)
+			>= sizeof(drm_r300_cmd_header_t)) {
+		int idx;
+		drm_r300_cmd_header_t *header, stack_header;
+
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		switch (header->header.cmd_type) {
+		case R300_CMD_PACKET0:
+			DRM_DEBUG("R300_CMD_PACKET0\n");
+			ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet0 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_VPU:
+			DRM_DEBUG("R300_CMD_VPU\n");
+			ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_vpu failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_PACKET3:
+			DRM_DEBUG("R300_CMD_PACKET3\n");
+			ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet3 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_END3D:
+			DRM_DEBUG("R300_CMD_END3D\n");
+			/* TODO:
+			   Ideally userspace driver should not need to issue this call,
+			   i.e. the drm driver should issue it automatically and prevent
+			   lockups.
+
+			   In practice, we do not understand why this call is needed and what
+			   it does (except for some vague guesses that it has to do with cache
+			   coherence) and so the user space driver does it.
+
+			   Once we are sure which uses prevent lockups the code could be moved
+			   into the kernel and the userspace driver will not
+			   need to use this command.
+
+			   Note that issuing this command does not hurt anything
+			   except, possibly, performance */
+			r300_pacify(dev_priv);
+			break;
+
+		case R300_CMD_CP_DELAY:
+			/* simple enough, we can do it here */
+			DRM_DEBUG("R300_CMD_CP_DELAY\n");
+			{
+				int i;
+				RING_LOCALS;
+
+				BEGIN_RING(header->delay.count);
+				for (i = 0; i < header->delay.count; i++)
+					OUT_RING(RADEON_CP_PACKET2);
+				ADVANCE_RING();
+			}
+			break;
+
+		case R300_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header->dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			emit_dispatch_age = 1;
+			r300_discard_buffer(dev, file_priv->master, buf);
+			break;
+
+		case R300_CMD_WAIT:
+			DRM_DEBUG("R300_CMD_WAIT\n");
+			r300_cmd_wait(dev_priv, *header);
+			break;
+
+		case R300_CMD_SCRATCH:
+			DRM_DEBUG("R300_CMD_SCRATCH\n");
+			ret = r300_scratch(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_scratch failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_R500FP:
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+				DRM_ERROR("Calling r500 command on r300 card\n");
+				ret = -EINVAL;
+				goto cleanup;
+			}
+			DRM_DEBUG("R300_CMD_R500FP\n");
+			ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
+			if (ret) {
+				DRM_ERROR("r300_emit_r500fp failed\n");
+				goto cleanup;
+			}
+			break;
+		default:
+			DRM_ERROR("bad cmd_type %i at byte %d\n",
+				  header->header.cmd_type,
+				  cmdbuf->buffer->iterator - (int)sizeof(*header));
+			ret = -EINVAL;
+			goto cleanup;
+		}
+	}
+
+	DRM_DEBUG("END\n");
+
+      cleanup:
+	r300_pacify(dev_priv);
+
+	/* We emit the vertex buffer age here, outside the pacifier "brackets"
+	 * for two reasons:
+	 *  (1) This may coalesce multiple age emissions into a single one and
+	 *  (2) more importantly, some chips lock up hard when scratch registers
+	 *      are written inside the pacifier bracket.
+	 */
+	if (emit_dispatch_age) {
+		RING_LOCALS;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
+		ADVANCE_RING();
+	}
+
+	COMMIT_RING();
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r300_reg.h b/linux-imx/drivers/gpu/drm/radeon/r300_reg.h
new file mode 100644
index 0000000..00c0d2b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r300_reg.h
@@ -0,0 +1,1789 @@
+/*
+ * Copyright 2005 Nicolai Haehnle et al.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Nicolai Haehnle
+ *          Jerome Glisse
+ */
+#ifndef _R300_REG_H_
+#define _R300_REG_H_
+
+#define R300_SURF_TILE_MACRO (1<<16)
+#define R300_SURF_TILE_MICRO (2<<16)
+#define R300_SURF_TILE_BOTH (3<<16)
+
+
+#define R300_MC_INIT_MISC_LAT_TIMER	0x180
+#	define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT	28
+
+#define R300_MC_INIT_GFX_LAT_TIMER	0x154
+#	define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE                0x1D98
+#define R300_SE_VPORT_XOFFSET               0x1D9C
+#define R300_SE_VPORT_YSCALE                0x1DA0
+#define R300_SE_VPORT_YOFFSET               0x1DA4
+#define R300_SE_VPORT_ZSCALE                0x1DA8
+#define R300_SE_VPORT_ZOFFSET               0x1DAC
+
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL	0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL	0x2084
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
+
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
+	/* State based - direct writes to registers trigger vertex
+           generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
+
+	/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
+
+	/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
+	/* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
+#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END: Wild guesses */
+
+#define R300_SE_VTE_CNTL                  0x20b0
+#	define     R300_VPORT_X_SCALE_ENA                0x00000001
+#	define     R300_VPORT_X_OFFSET_ENA               0x00000002
+#	define     R300_VPORT_Y_SCALE_ENA                0x00000004
+#	define     R300_VPORT_Y_OFFSET_ENA               0x00000008
+#	define     R300_VPORT_Z_SCALE_ENA                0x00000010
+#	define     R300_VPORT_Z_OFFSET_ENA               0x00000020
+#	define     R300_VTX_XY_FMT                       0x00000100
+#	define     R300_VTX_Z_FMT                        0x00000200
+#	define     R300_VTX_W0_FMT                       0x00000400
+#	define     R300_VTX_W0_NORMALIZE                 0x00000800
+#	define     R300_VTX_ST_DENORMALIZED              0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS              0x2140
+#	define R300_VC_NO_SWAP                  (0 << 0)
+#	define R300_VC_16BIT_SWAP               (1 << 0)
+#	define R300_VC_32BIT_SWAP               (2 << 0)
+#	define R300_VAP_TCL_BYPASS		(1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_INPUT_ROUTE_0_0            0x2150
+#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1            0x2154
+#define R300_VAP_INPUT_ROUTE_0_2            0x2158
+#define R300_VAP_INPUT_ROUTE_0_3            0x215C
+#define R300_VAP_INPUT_ROUTE_0_4            0x2160
+#define R300_VAP_INPUT_ROUTE_0_5            0x2164
+#define R300_VAP_INPUT_ROUTE_0_6            0x2168
+#define R300_VAP_INPUT_ROUTE_0_7            0x216C
+
+/* gap */
+
+/* Notes:
+ *  - always set up to produce at least two attributes:
+ *    if vertex program uses only position, fglrx will set normal, too
+ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_INPUT_CNTL_0               0x2180
+#       define R300_INPUT_CNTL_0_COLOR           0x00000001
+#define R300_VAP_INPUT_CNTL_1               0x2184
+#       define R300_INPUT_CNTL_POS               0x00000001
+#       define R300_INPUT_CNTL_NORMAL            0x00000002
+#       define R300_INPUT_CNTL_COLOR             0x00000004
+#       define R300_INPUT_CNTL_TC0               0x00000400
+#       define R300_INPUT_CNTL_TC1               0x00000800
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
+#       define R300_INPUT_ROUTE_SELECT_X    0
+#       define R300_INPUT_ROUTE_SELECT_Y    1
+#       define R300_INPUT_ROUTE_SELECT_Z    2
+#       define R300_INPUT_ROUTE_SELECT_W    3
+#       define R300_INPUT_ROUTE_SELECT_ZERO 4
+#       define R300_INPUT_ROUTE_SELECT_ONE  5
+#       define R300_INPUT_ROUTE_SELECT_MASK 7
+#       define R300_INPUT_ROUTE_X_SHIFT     0
+#       define R300_INPUT_ROUTE_Y_SHIFT     3
+#       define R300_INPUT_ROUTE_Z_SHIFT     6
+#       define R300_INPUT_ROUTE_W_SHIFT     9
+#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
+#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
+#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
+#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C               0x221C
+#       define R300_221C_NORMAL                  0x00000000
+#       define R300_221C_CLEAR                   0x0001C000
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0                   0x2220
+#define R300_VAP_CLIP_X_1                   0x2224
+#define R300_VAP_CLIP_Y_0                   0x2228
+#define R300_VAP_CLIP_Y_1                   0x2230
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_STATE_FLUSH_REG        0x2284
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288               0x2288
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1                 0x22D0
+#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
+#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
+#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2                 0x22D4
+#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
+#define R300_VAP_PVS_CNTL_3	           0x22D8
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R                0x2464
+#define R300_VAP_VTX_COLOR_G                0x2468
+#define R300_VAP_VTX_COLOR_B                0x246C
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1              0x2494
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2              0x24A4
+#define R300_VAP_VTX_POS_0_Z_2              0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT             0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT	(1<<2)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT	(1<<3)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT	(1<<4)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE	(0xf<<5)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT	(0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1	0x4004
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT	0
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT	3
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT	6
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT	9
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT	12
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT	15
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT	18
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE	0x4008
+#	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
+#	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
+#	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
+#	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
+#	define R300_GB_UNK31			(1<<31)
+	/* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE	0
+#define R300_GB_TEX_ST		1
+#define R300_GB_TEX_STR		2
+#	define R300_GB_TEX0_SOURCE_SHIFT	16
+#	define R300_GB_TEX1_SOURCE_SHIFT	18
+#	define R300_GB_TEX2_SOURCE_SHIFT	20
+#	define R300_GB_TEX3_SOURCE_SHIFT	22
+#	define R300_GB_TEX4_SOURCE_SHIFT	24
+#	define R300_GB_TEX5_SOURCE_SHIFT	26
+#	define R300_GB_TEX6_SOURCE_SHIFT	28
+#	define R300_GB_TEX7_SOURCE_SHIFT	30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0	0x4010
+	/* shifts - each of the fields is 4 bits */
+#	define R300_GB_MSPOS0__MS_X0_SHIFT	0
+#	define R300_GB_MSPOS0__MS_Y0_SHIFT	4
+#	define R300_GB_MSPOS0__MS_X1_SHIFT	8
+#	define R300_GB_MSPOS0__MS_Y1_SHIFT	12
+#	define R300_GB_MSPOS0__MS_X2_SHIFT	16
+#	define R300_GB_MSPOS0__MS_Y2_SHIFT	20
+#	define R300_GB_MSPOS0__MSBD0_Y		24
+#	define R300_GB_MSPOS0__MSBD0_X		28
+
+#define R300_GB_MSPOS1	0x4014
+#	define R300_GB_MSPOS1__MS_X3_SHIFT	0
+#	define R300_GB_MSPOS1__MS_Y3_SHIFT	4
+#	define R300_GB_MSPOS1__MS_X4_SHIFT	8
+#	define R300_GB_MSPOS1__MS_Y4_SHIFT	12
+#	define R300_GB_MSPOS1__MS_X5_SHIFT	16
+#	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
+#	define R300_GB_MSPOS1__MSBD1		24
+
+
+#define R300_GB_TILE_CONFIG	0x4018
+#	define R300_GB_TILE_ENABLE	(1<<0)
+#	define R300_GB_TILE_PIPE_COUNT_RV300	0
+#	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
+#	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_PIPE_COUNT_RV410	(3<<1)
+#	define R300_GB_TILE_SIZE_8		0
+#	define R300_GB_TILE_SIZE_16		(1<<4)
+#	define R300_GB_TILE_SIZE_32		(2<<4)
+#	define R300_GB_SUPER_SIZE_1		(0<<6)
+#	define R300_GB_SUPER_SIZE_2		(1<<6)
+#	define R300_GB_SUPER_SIZE_4		(2<<6)
+#	define R300_GB_SUPER_SIZE_8		(3<<6)
+#	define R300_GB_SUPER_SIZE_16		(4<<6)
+#	define R300_GB_SUPER_SIZE_32		(5<<6)
+#	define R300_GB_SUPER_SIZE_64		(6<<6)
+#	define R300_GB_SUPER_SIZE_128		(7<<6)
+#	define R300_GB_SUPER_X_SHIFT		9	/* 3 bits wide */
+#	define R300_GB_SUPER_Y_SHIFT		12	/* 3 bits wide */
+#	define R300_GB_SUPER_TILE_A		0
+#	define R300_GB_SUPER_TILE_B		(1<<15)
+#	define R300_GB_SUBPIXEL_1_12		0
+#	define R300_GB_SUBPIXEL_1_16		(1<<16)
+
+#define R300_GB_FIFO_SIZE	0x4024
+	/* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32	0
+#define R300_GB_FIFO_SIZE_64	1
+#define R300_GB_FIFO_SIZE_128	2
+#define R300_GB_FIFO_SIZE_256	3
+#	define R300_SC_IFIFO_SIZE_SHIFT	0
+#	define R300_SC_TZFIFO_SIZE_SHIFT	2
+#	define R300_SC_BFIFO_SIZE_SHIFT	4
+
+#	define R300_US_OFIFO_SIZE_SHIFT	12
+#	define R300_US_WFIFO_SIZE_SHIFT	14
+	/* the following use the same constants as above, but meaning is
+	   is times 2 (i.e. instead of 32 words it means 64 */
+#	define R300_RS_TFIFO_SIZE_SHIFT	6
+#	define R300_RS_CFIFO_SIZE_SHIFT	8
+#	define R300_US_RAM_SIZE_SHIFT		10
+	/* watermarks, 3 bits wide */
+#	define R300_RS_HIGHWATER_COL_SHIFT	16
+#	define R300_RS_HIGHWATER_TEX_SHIFT	19
+#	define R300_OFIFO_HIGHWATER_SHIFT	22	/* two bits only */
+#	define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT	24
+
+#define R300_GB_SELECT	0x401C
+#	define R300_GB_FOG_SELECT_C0A		0
+#	define R300_GB_FOG_SELECT_C1A		1
+#	define R300_GB_FOG_SELECT_C2A		2
+#	define R300_GB_FOG_SELECT_C3A		3
+#	define R300_GB_FOG_SELECT_1_1_W	4
+#	define R300_GB_FOG_SELECT_Z		5
+#	define R300_GB_DEPTH_SELECT_Z		0
+#	define R300_GB_DEPTH_SELECT_1_1_W	(1<<3)
+#	define R300_GB_W_SELECT_1_W		0
+#	define R300_GB_W_SELECT_1		(1<<4)
+
+#define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_DISABLE			0x00
+#	define R300_AA_ENABLE			0x01
+#	define R300_AA_SUBSAMPLES_2		0
+#	define R300_AA_SUBSAMPLES_3		(1<<1)
+#	define R300_AA_SUBSAMPLES_4		(2<<1)
+#	define R300_AA_SUBSAMPLES_6		(3<<1)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_INVALTAGS                   0x4100
+#define R300_TX_FLUSH                       0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE                      0x4104
+#       define R300_TX_ENABLE_0                  (1 << 0)
+#       define R300_TX_ENABLE_1                  (1 << 1)
+#       define R300_TX_ENABLE_2                  (1 << 2)
+#       define R300_TX_ENABLE_3                  (1 << 3)
+#       define R300_TX_ENABLE_4                  (1 << 4)
+#       define R300_TX_ENABLE_5                  (1 << 5)
+#       define R300_TX_ENABLE_6                  (1 << 6)
+#       define R300_TX_ENABLE_7                  (1 << 7)
+#       define R300_TX_ENABLE_8                  (1 << 8)
+#       define R300_TX_ENABLE_9                  (1 << 9)
+#       define R300_TX_ENABLE_10                 (1 << 10)
+#       define R300_TX_ENABLE_11                 (1 << 11)
+#       define R300_TX_ENABLE_12                 (1 << 12)
+#       define R300_TX_ENABLE_13                 (1 << 13)
+#       define R300_TX_ENABLE_14                 (1 << 14)
+#       define R300_TX_ENABLE_15                 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
+#define R300_RE_POINTSIZE                   0x421C
+#       define R300_POINTSIZE_Y_SHIFT            0
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_POINTSIZE_X_SHIFT            16
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
+#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT                      0x4234
+#       define R300_LINESIZE_SHIFT            0
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
+#       define R300_LINE_CNT_HO               (1 << 16)
+#       define R300_LINE_CNT_VE               (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238                       0x4238
+
+/* Something shade related */
+#define R300_RE_SHADE                         0x4274
+
+#define R300_RE_SHADE_MODEL                   0x4278
+#	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
+#	define R300_RE_SHADE_MODEL_FLAT       0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE                  0x4288
+#	define R300_PM_ENABLED                (1 << 0)
+#	define R300_PM_FRONT_POINT            (0 << 0)
+#	define R300_PM_BACK_POINT             (0 << 0)
+#	define R300_PM_FRONT_LINE             (1 << 4)
+#	define R300_PM_FRONT_FILL             (1 << 5)
+#	define R300_PM_BACK_LINE              (1 << 7)
+#	define R300_PM_BACK_FILL              (1 << 8)
+
+/* Fog parameters */
+#define R300_RE_FOG_SCALE                     0x4294
+#define R300_RE_FOG_START                     0x4298
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are separate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
+#define R300_RE_ZBIAS_T_FACTOR                0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
+#define R300_RE_ZBIAS_W_FACTOR                0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ *  One to enable depth test and one for depth write.
+ * Yet this doesn't explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL		    0x42B4
+#	define R300_OCCLUSION_ON		(1<<1)
+
+#define R300_RE_CULL_CNTL                   0x42B8
+#       define R300_CULL_FRONT                   (1 << 0)
+#       define R300_CULL_BACK                    (1 << 1)
+#       define R300_FRONT_FACE_CCW               (0 << 2)
+#       define R300_FRONT_FACE_CW                (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_CNTL_0                      0x4300
+#       define R300_RS_CNTL_TC_CNT_SHIFT         2
+#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
+	/* number of color interpolators used */
+#	define R300_RS_CNTL_CI_CNT_SHIFT         7
+#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
+	/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+	   register. */
+#define R300_RS_CNTL_1                      0x4304
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_INTERP_0                    0x4310
+#define R300_RS_INTERP_1                    0x4314
+#       define R300_RS_INTERP_1_UNKNOWN          0x40
+#define R300_RS_INTERP_2                    0x4318
+#       define R300_RS_INTERP_2_UNKNOWN          0x80
+#define R300_RS_INTERP_3                    0x431C
+#       define R300_RS_INTERP_3_UNKNOWN          0xC0
+#define R300_RS_INTERP_4                    0x4320
+#define R300_RS_INTERP_5                    0x4324
+#define R300_RS_INTERP_6                    0x4328
+#define R300_RS_INTERP_7                    0x432C
+#       define R300_RS_INTERP_SRC_SHIFT          2
+#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
+#       define R300_RS_INTERP_USED               0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_ROUTE_0                     0x4330
+#define R300_RS_ROUTE_1                     0x4334
+#define R300_RS_ROUTE_2                     0x4338
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
+#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
+#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
+#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
+#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
+#       define R300_RS_ROUTE_DEST_SHIFT          6
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
+#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
+#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
+#		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
+#		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ                   0x43a4
+#	define R300_SC_HYPERZ_DISABLE     (0 << 0)
+#	define R300_SC_HYPERZ_ENABLE      (1 << 0)
+#	define R300_SC_HYPERZ_MIN         (0 << 1)
+#	define R300_SC_HYPERZ_MAX         (1 << 1)
+#	define R300_SC_HYPERZ_ADJ_256     (0 << 2)
+#	define R300_SC_HYPERZ_ADJ_128     (1 << 2)
+#	define R300_SC_HYPERZ_ADJ_64      (2 << 2)
+#	define R300_SC_HYPERZ_ADJ_32      (3 << 2)
+#	define R300_SC_HYPERZ_ADJ_16      (4 << 2)
+#	define R300_SC_HYPERZ_ADJ_8       (5 << 2)
+#	define R300_SC_HYPERZ_ADJ_4       (6 << 2)
+#	define R300_SC_HYPERZ_ADJ_2       (7 << 2)
+#	define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+#	define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
+
+#define R300_SC_EDGERULE                 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_RE_CLIPRECT_TL_0               0x43B0
+#define R300_RE_CLIPRECT_BR_0               0x43B4
+#define R300_RE_CLIPRECT_TL_1               0x43B8
+#define R300_RE_CLIPRECT_BR_1               0x43BC
+#define R300_RE_CLIPRECT_TL_2               0x43C0
+#define R300_RE_CLIPRECT_BR_2               0x43C4
+#define R300_RE_CLIPRECT_TL_3               0x43C8
+#define R300_RE_CLIPRECT_BR_3               0x43CC
+#       define R300_CLIPRECT_OFFSET              1440
+#       define R300_CLIPRECT_MASK                0x1FFF
+#       define R300_CLIPRECT_X_SHIFT             0
+#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
+#       define R300_CLIPRECT_Y_SHIFT             13
+#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL               0x43D0
+#       define R300_CLIP_OUT                     (1 << 0)
+#       define R300_CLIP_0                       (1 << 1)
+#       define R300_CLIP_1                       (1 << 2)
+#       define R300_CLIP_10                      (1 << 3)
+#       define R300_CLIP_2                       (1 << 4)
+#       define R300_CLIP_20                      (1 << 5)
+#       define R300_CLIP_21                      (1 << 6)
+#       define R300_CLIP_210                     (1 << 7)
+#       define R300_CLIP_3                       (1 << 8)
+#       define R300_CLIP_30                      (1 << 9)
+#       define R300_CLIP_31                      (1 << 10)
+#       define R300_CLIP_310                     (1 << 11)
+#       define R300_CLIP_32                      (1 << 12)
+#       define R300_CLIP_320                     (1 << 13)
+#       define R300_CLIP_321                     (1 << 14)
+#       define R300_CLIP_3210                    (1 << 15)
+
+/* gap */
+
+#define R300_RE_SCISSORS_TL                 0x43E0
+#define R300_RE_SCISSORS_BR                 0x43E4
+#       define R300_SCISSORS_OFFSET              1440
+#       define R300_SCISSORS_X_SHIFT             0
+#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
+#       define R300_SCISSORS_Y_SHIFT             13
+#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER_0                    0x4400
+#       define R300_TX_REPEAT                    0
+#       define R300_TX_MIRRORED                  1
+#       define R300_TX_CLAMP                     4
+#       define R300_TX_CLAMP_TO_EDGE             2
+#       define R300_TX_CLAMP_TO_BORDER           6
+#       define R300_TX_WRAP_S_SHIFT              0
+#       define R300_TX_WRAP_S_MASK               (7 << 0)
+#       define R300_TX_WRAP_T_SHIFT              3
+#       define R300_TX_WRAP_T_MASK               (7 << 3)
+#       define R300_TX_WRAP_Q_SHIFT              6
+#       define R300_TX_WRAP_Q_MASK               (7 << 6)
+#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
+#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
+#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
+#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
+#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
+
+/* NOTE: NEAREST doesn't seem to exist.
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
+#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
+#	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
+#	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
+#	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
+#	define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
+#	define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#	define R300_TX_MAX_ANISO_MASK    (14 << 21)
+
+#define R300_TX_FILTER1_0                      0x4440
+#	define R300_CHROMA_KEY_MODE_DISABLE    0
+#	define R300_CHROMA_KEY_FORCE	       1
+#	define R300_CHROMA_KEY_BLEND           2
+#	define R300_MC_ROUND_NORMAL            (0<<2)
+#	define R300_MC_ROUND_MPEG4             (1<<2)
+#	define R300_LOD_BIAS_MASK	    0x1fff
+#	define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
+#	define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
+#	define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
+#	define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
+#	define R300_TX_TRI_PERF_0_8            (0<<15)
+#	define R300_TX_TRI_PERF_1_8            (1<<15)
+#	define R300_TX_TRI_PERF_1_4            (2<<15)
+#	define R300_TX_TRI_PERF_3_8            (3<<15)
+#	define R300_ANISO_THRESHOLD_MASK       (7<<17)
+
+#define R300_TX_SIZE_0                      0x4480
+#       define R300_TX_WIDTHMASK_SHIFT           0
+#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
+#       define R300_TX_HEIGHTMASK_SHIFT          11
+#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
+#       define R300_TX_UNK23                     (1 << 23)
+#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
+#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
+#       define R300_TX_SIZE_PROJECTED            (1<<30)
+#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
+#define R300_TX_FORMAT_0                    0x44C0
+	/* The interpretation of the format word by Wladimir van der Laan */
+	/* The X, Y, Z and W refer to the layout of the components.
+	   They are given meanings as R, G, B and Alpha by the swizzle
+	   specification */
+#	define R300_TX_FORMAT_X8		    0x0
+#	define R300_TX_FORMAT_X16		    0x1
+#	define R300_TX_FORMAT_Y4X4		    0x2
+#	define R300_TX_FORMAT_Y8X8		    0x3
+#	define R300_TX_FORMAT_Y16X16		    0x4
+#	define R300_TX_FORMAT_Z3Y3X2		    0x5
+#	define R300_TX_FORMAT_Z5Y6X5		    0x6
+#	define R300_TX_FORMAT_Z6Y5X5		    0x7
+#	define R300_TX_FORMAT_Z11Y11X10		    0x8
+#	define R300_TX_FORMAT_Z10Y11X11		    0x9
+#	define R300_TX_FORMAT_W4Z4Y4X4		    0xA
+#	define R300_TX_FORMAT_W1Z5Y5X5		    0xB
+#	define R300_TX_FORMAT_W8Z8Y8X8		    0xC
+#	define R300_TX_FORMAT_W2Z10Y10X10	    0xD
+#	define R300_TX_FORMAT_W16Z16Y16X16	    0xE
+#	define R300_TX_FORMAT_DXT1		    0xF
+#	define R300_TX_FORMAT_DXT3		    0x10
+#	define R300_TX_FORMAT_DXT5		    0x11
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8		    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8		    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8		    0x15     /* no swizzle */
+	/* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
+#	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26)
+
+	/* gap */
+	/* Floating point formats */
+	/* Note - hardware supports both 16 and 32 bit floating point */
+#	define R300_TX_FORMAT_FL_I16		    0x18
+#	define R300_TX_FORMAT_FL_I16A16		    0x19
+#	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A
+#	define R300_TX_FORMAT_FL_I32		    0x1B
+#	define R300_TX_FORMAT_FL_I32A32		    0x1C
+#	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
+#	define R300_TX_FORMAT_ATI2N		    0x1F
+	/* alpha modes, convenience mostly */
+	/* if you have alpha, pick constant appropriate to the
+	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+#	define R300_TX_FORMAT_ALPHA_1CH		    0x000
+#	define R300_TX_FORMAT_ALPHA_2CH		    0x200
+#	define R300_TX_FORMAT_ALPHA_4CH		    0x600
+#	define R300_TX_FORMAT_ALPHA_NONE	    0xA00
+	/* Swizzling */
+	/* constants */
+#	define R300_TX_FORMAT_X		0
+#	define R300_TX_FORMAT_Y		1
+#	define R300_TX_FORMAT_Z		2
+#	define R300_TX_FORMAT_W		3
+#	define R300_TX_FORMAT_ZERO	4
+#	define R300_TX_FORMAT_ONE	5
+	/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_Z	6
+	/* 2.0*W, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7
+
+#	define R300_TX_FORMAT_B_SHIFT	18
+#	define R300_TX_FORMAT_G_SHIFT	15
+#	define R300_TX_FORMAT_R_SHIFT	12
+#	define R300_TX_FORMAT_A_SHIFT	9
+	/* Convenience macro to take care of layout and swizzling */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(		\
+		((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)		\
+		| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)	\
+		| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)	\
+		| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)	\
+		| (R300_TX_FORMAT_##FMT)				\
+		)
+	/* These can be ORed with result of R300_EASY_TX_FORMAT()
+	   We don't really know what they do. Take values from a
+           constant color ? */
+#	define R300_TX_FORMAT_CONST_X		(1<<5)
+#	define R300_TX_FORMAT_CONST_Y		(2<<5)
+#	define R300_TX_FORMAT_CONST_Z		(4<<5)
+#	define R300_TX_FORMAT_CONST_W		(8<<5)
+
+#	define R300_TX_FORMAT_YUV_MODE		0x00800000
+
+#define R300_TX_PITCH_0			    0x4500 /* obvious missing in gap */
+#define R300_TX_OFFSET_0                    0x4540
+	/* BEGIN: Guess from R200 */
+#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
+#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
+#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
+#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
+#       define R300_TXO_MACRO_TILE               (1 << 2)
+#       define R300_TXO_MICRO_TILE               (1 << 3)
+#       define R300_TXO_MICRO_TILE_SQUARE        (2 << 3)
+#       define R300_TXO_OFFSET_MASK              0xffffffe0
+#       define R300_TXO_OFFSET_SHIFT             5
+	/* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0                      0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0              0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_PFS_CNTL_0                     0x4600
+#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
+#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
+#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
+#define R300_PFS_CNTL_1                     0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_PFS_CNTL_2                     0x4608
+#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_SHIFT       6
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
+#       define R300_PFS_CNTL_TEX_END_SHIFT       18
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_PFS_NODE_0                     0x4610
+#define R300_PFS_NODE_1                     0x4614
+#define R300_PFS_NODE_2                     0x4618
+#define R300_PFS_NODE_3                     0x461C
+#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_NODE_ALU_END_SHIFT       6
+#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
+#       define R300_PFS_NODE_TEX_END_SHIFT       17
+#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
+#		define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
+#		define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_PFS_TEXI_0                     0x4620
+#	define R300_FPITX_SRC_SHIFT              0
+#	define R300_FPITX_SRC_MASK               (31 << 0)
+	/* GUESS */
+#	define R300_FPITX_SRC_CONST              (1 << 5)
+#	define R300_FPITX_DST_SHIFT              6
+#	define R300_FPITX_DST_MASK               (31 << 6)
+#	define R300_FPITX_IMAGE_SHIFT            11
+	/* GUESS based on layout and native limits */
+#       define R300_FPITX_IMAGE_MASK             (15 << 11)
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#	define R300_FPITX_OPCODE_SHIFT		15
+#		define R300_FPITX_OP_TEX	1
+#		define R300_FPITX_OP_KIL	2
+#		define R300_FPITX_OP_TXP	3
+#		define R300_FPITX_OP_TXB	4
+#	define R300_FPITX_OPCODE_MASK           (7 << 15)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ *  - DP4: Use OUTC_DP4, OUTA_DP4
+ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ *  - FLR: use FRC+MAD
+ *  - XPD: use MAD+MAD
+ *  - SGE, SLT: use MAD+CMP
+ *  - RSQ: use ABS modifier for argument
+ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ *    (e.g. RCP) into color register
+ *  - apparently, there's no quick DST operation
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ *  - Argument order is the same as in ARB_fragment_program.
+ *  - Operation is MAD
+ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ *  - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_PFS_INSTR1_0                   0x46C0
+#       define R300_FPI1_SRC0C_SHIFT             0
+#       define R300_FPI1_SRC0C_MASK              (31 << 0)
+#       define R300_FPI1_SRC0C_CONST             (1 << 5)
+#       define R300_FPI1_SRC1C_SHIFT             6
+#       define R300_FPI1_SRC1C_MASK              (31 << 6)
+#       define R300_FPI1_SRC1C_CONST             (1 << 11)
+#       define R300_FPI1_SRC2C_SHIFT             12
+#       define R300_FPI1_SRC2C_MASK              (31 << 12)
+#       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_SRC_MASK                0x0003ffff
+#       define R300_FPI1_DSTC_SHIFT              18
+#       define R300_FPI1_DSTC_MASK               (31 << 18)
+#		define R300_FPI1_DSTC_REG_MASK_SHIFT     23
+#       define R300_FPI1_DSTC_REG_X              (1 << 23)
+#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
+#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
+#		define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
+#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
+#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
+#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
+
+#define R300_PFS_INSTR3_0                   0x47C0
+#       define R300_FPI3_SRC0A_SHIFT             0
+#       define R300_FPI3_SRC0A_MASK              (31 << 0)
+#       define R300_FPI3_SRC0A_CONST             (1 << 5)
+#       define R300_FPI3_SRC1A_SHIFT             6
+#       define R300_FPI3_SRC1A_MASK              (31 << 6)
+#       define R300_FPI3_SRC1A_CONST             (1 << 11)
+#       define R300_FPI3_SRC2A_SHIFT             12
+#       define R300_FPI3_SRC2A_MASK              (31 << 12)
+#       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_SRC_MASK                0x0003ffff
+#       define R300_FPI3_DSTA_SHIFT              18
+#       define R300_FPI3_DSTA_MASK               (31 << 18)
+#       define R300_FPI3_DSTA_REG                (1 << 23)
+#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
+#		define R300_FPI3_DSTA_DEPTH              (1 << 27)
+
+#define R300_PFS_INSTR0_0                   0x48C0
+#       define R300_FPI0_ARGC_SRC0C_XYZ          0
+#       define R300_FPI0_ARGC_SRC0C_XXX          1
+#       define R300_FPI0_ARGC_SRC0C_YYY          2
+#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
+#       define R300_FPI0_ARGC_SRC1C_XYZ          4
+#       define R300_FPI0_ARGC_SRC1C_XXX          5
+#       define R300_FPI0_ARGC_SRC1C_YYY          6
+#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
+#       define R300_FPI0_ARGC_SRC2C_XYZ          8
+#       define R300_FPI0_ARGC_SRC2C_XXX          9
+#       define R300_FPI0_ARGC_SRC2C_YYY          10
+#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
+#       define R300_FPI0_ARGC_SRC0A              12
+#       define R300_FPI0_ARGC_SRC1A              13
+#       define R300_FPI0_ARGC_SRC2A              14
+#       define R300_FPI0_ARGC_SRC1C_LRP          15
+#       define R300_FPI0_ARGC_ZERO               20
+#       define R300_FPI0_ARGC_ONE                21
+	/* GUESS */
+#       define R300_FPI0_ARGC_HALF               22
+#       define R300_FPI0_ARGC_SRC0C_YZX          23
+#       define R300_FPI0_ARGC_SRC1C_YZX          24
+#       define R300_FPI0_ARGC_SRC2C_YZX          25
+#       define R300_FPI0_ARGC_SRC0C_ZXY          26
+#       define R300_FPI0_ARGC_SRC1C_ZXY          27
+#       define R300_FPI0_ARGC_SRC2C_ZXY          28
+#       define R300_FPI0_ARGC_SRC0CA_WZY         29
+#       define R300_FPI0_ARGC_SRC1CA_WZY         30
+#       define R300_FPI0_ARGC_SRC2CA_WZY         31
+
+#       define R300_FPI0_ARG0C_SHIFT             0
+#       define R300_FPI0_ARG0C_MASK              (31 << 0)
+#       define R300_FPI0_ARG0C_NEG               (1 << 5)
+#       define R300_FPI0_ARG0C_ABS               (1 << 6)
+#       define R300_FPI0_ARG1C_SHIFT             7
+#       define R300_FPI0_ARG1C_MASK              (31 << 7)
+#       define R300_FPI0_ARG1C_NEG               (1 << 12)
+#       define R300_FPI0_ARG1C_ABS               (1 << 13)
+#       define R300_FPI0_ARG2C_SHIFT             14
+#       define R300_FPI0_ARG2C_MASK              (31 << 14)
+#       define R300_FPI0_ARG2C_NEG               (1 << 19)
+#       define R300_FPI0_ARG2C_ABS               (1 << 20)
+#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI0_OUTC_MAD                (0 << 23)
+#       define R300_FPI0_OUTC_DP3                (1 << 23)
+#       define R300_FPI0_OUTC_DP4                (2 << 23)
+#       define R300_FPI0_OUTC_MIN                (4 << 23)
+#       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMPH               (7 << 23)
+#       define R300_FPI0_OUTC_CMP                (8 << 23)
+#       define R300_FPI0_OUTC_FRC                (9 << 23)
+#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
+#       define R300_FPI0_OUTC_SAT                (1 << 30)
+#       define R300_FPI0_INSERT_NOP              (1 << 31)
+
+#define R300_PFS_INSTR2_0                   0x49C0
+#       define R300_FPI2_ARGA_SRC0C_X            0
+#       define R300_FPI2_ARGA_SRC0C_Y            1
+#       define R300_FPI2_ARGA_SRC0C_Z            2
+#       define R300_FPI2_ARGA_SRC1C_X            3
+#       define R300_FPI2_ARGA_SRC1C_Y            4
+#       define R300_FPI2_ARGA_SRC1C_Z            5
+#       define R300_FPI2_ARGA_SRC2C_X            6
+#       define R300_FPI2_ARGA_SRC2C_Y            7
+#       define R300_FPI2_ARGA_SRC2C_Z            8
+#       define R300_FPI2_ARGA_SRC0A              9
+#       define R300_FPI2_ARGA_SRC1A              10
+#       define R300_FPI2_ARGA_SRC2A              11
+#       define R300_FPI2_ARGA_SRC1A_LRP          15
+#       define R300_FPI2_ARGA_ZERO               16
+#       define R300_FPI2_ARGA_ONE                17
+	/* GUESS */
+#       define R300_FPI2_ARGA_HALF               18
+#       define R300_FPI2_ARG0A_SHIFT             0
+#       define R300_FPI2_ARG0A_MASK              (31 << 0)
+#       define R300_FPI2_ARG0A_NEG               (1 << 5)
+	/* GUESS */
+#	define R300_FPI2_ARG0A_ABS		 (1 << 6)
+#       define R300_FPI2_ARG1A_SHIFT             7
+#       define R300_FPI2_ARG1A_MASK              (31 << 7)
+#       define R300_FPI2_ARG1A_NEG               (1 << 12)
+	/* GUESS */
+#	define R300_FPI2_ARG1A_ABS		 (1 << 13)
+#       define R300_FPI2_ARG2A_SHIFT             14
+#       define R300_FPI2_ARG2A_MASK              (31 << 14)
+#       define R300_FPI2_ARG2A_NEG               (1 << 19)
+	/* GUESS */
+#	define R300_FPI2_ARG2A_ABS		 (1 << 20)
+#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI2_OUTA_MAD                (0 << 23)
+#       define R300_FPI2_OUTA_DP4                (1 << 23)
+#       define R300_FPI2_OUTA_MIN                (2 << 23)
+#       define R300_FPI2_OUTA_MAX                (3 << 23)
+#       define R300_FPI2_OUTA_CMP                (6 << 23)
+#       define R300_FPI2_OUTA_FRC                (7 << 23)
+#       define R300_FPI2_OUTA_EX2                (8 << 23)
+#       define R300_FPI2_OUTA_LG2                (9 << 23)
+#       define R300_FPI2_OUTA_RCP                (10 << 23)
+#       define R300_FPI2_OUTA_RSQ                (11 << 23)
+#       define R300_FPI2_OUTA_SAT                (1 << 30)
+#       define R300_FPI2_UNKNOWN_31              (1 << 31)
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE                   0x4BC0
+#       define R300_FOG_ENABLE                   (1 << 0)
+#	define R300_FOG_MODE_LINEAR              (0 << 1)
+#	define R300_FOG_MODE_EXP                 (1 << 1)
+#	define R300_FOG_MODE_EXP2                (2 << 1)
+#	define R300_FOG_MODE_MASK                (3 << 1)
+#define R300_FOG_COLOR_R                    0x4BC8
+#define R300_FOG_COLOR_G                    0x4BCC
+#define R300_FOG_COLOR_B                    0x4BD0
+
+#define R300_PP_ALPHA_TEST                  0x4BD4
+#       define R300_REF_ALPHA_MASK               0x000000ff
+#       define R300_ALPHA_TEST_FAIL              (0 << 8)
+#       define R300_ALPHA_TEST_LESS              (1 << 8)
+#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
+#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
+#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
+#       define R300_ALPHA_TEST_GREATER           (4 << 8)
+#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
+#       define R300_ALPHA_TEST_PASS              (7 << 8)
+#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
+#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X                  0x4C00
+#define R300_PFS_PARAM_0_Y                  0x4C04
+#define R300_PFS_PARAM_0_Z                  0x4C08
+#define R300_PFS_PARAM_0_W                  0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X                 0x4DF0
+#define R300_PFS_PARAM_31_Y                 0x4DF4
+#define R300_PFS_PARAM_31_Z                 0x4DF8
+#define R300_PFS_PARAM_31_W                 0x4DFC
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ *   the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ *    are set to the same
+ *   function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND                    0x4E04
+#define R300_RB3D_ABLEND                    0x4E08
+/* the following only appear in CBLEND */
+#       define R300_BLEND_ENABLE                     (1 << 0)
+#       define R300_BLEND_UNKNOWN                    (3 << 1)
+#       define R300_BLEND_NO_SEPARATE                (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+#       define R300_FCN_MASK                         (3  << 12)
+#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define R300_COMB_FCN_MIN                     (4  << 12)
+#       define R300_COMB_FCN_MAX                     (5  << 12)
+#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
+#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
+#       define R300_BLEND_GL_ZERO                    (32)
+#       define R300_BLEND_GL_ONE                     (33)
+#       define R300_BLEND_GL_SRC_COLOR               (34)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
+#       define R300_BLEND_GL_DST_COLOR               (36)
+#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
+#       define R300_BLEND_GL_SRC_ALPHA               (38)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
+#       define R300_BLEND_GL_DST_ALPHA               (40)
+#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
+#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
+#       define R300_BLEND_GL_CONST_COLOR             (43)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
+#       define R300_BLEND_GL_CONST_ALPHA             (45)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
+#       define R300_BLEND_MASK                       (63)
+#       define R300_SRC_BLEND_SHIFT                  (16)
+#       define R300_DST_BLEND_SHIFT                  (24)
+#define R300_RB3D_BLEND_COLOR               0x4E10
+#define R300_RB3D_COLORMASK                 0x4E0C
+#       define R300_COLORMASK0_B                 (1<<0)
+#       define R300_COLORMASK0_G                 (1<<1)
+#       define R300_COLORMASK0_R                 (1<<2)
+#       define R300_COLORMASK0_A                 (1<<3)
+
+/* gap */
+
+#define R300_RB3D_COLOROFFSET0              0x4E28
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+
+/* gap */
+
+/* Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0               0x4E38
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
+#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
+
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
+#define R300_RB3D_AARESOLVE_CTL             0x4E88
+/* gap */
+
+/* Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
+#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
+#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
+ */
+#define R300_ZB_CNTL                             0x4F00
+#	define R300_STENCIL_ENABLE		 (1 << 0)
+#	define R300_Z_ENABLE		         (1 << 1)
+#	define R300_Z_WRITE_ENABLE		 (1 << 2)
+#	define R300_Z_SIGNED_COMPARE		 (1 << 3)
+#	define R300_STENCIL_FRONT_BACK		 (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL                   0x4f04
+	/* functions */
+#	define R300_ZS_NEVER			0
+#	define R300_ZS_LESS			1
+#	define R300_ZS_LEQUAL			2
+#	define R300_ZS_EQUAL			3
+#	define R300_ZS_GEQUAL			4
+#	define R300_ZS_GREATER			5
+#	define R300_ZS_NOTEQUAL			6
+#	define R300_ZS_ALWAYS			7
+#       define R300_ZS_MASK                     7
+	/* operations */
+#	define R300_ZS_KEEP			0
+#	define R300_ZS_ZERO			1
+#	define R300_ZS_REPLACE			2
+#	define R300_ZS_INCR			3
+#	define R300_ZS_DECR			4
+#	define R300_ZS_INVERT			5
+#	define R300_ZS_INCR_WRAP		6
+#	define R300_ZS_DECR_WRAP		7
+#	define R300_Z_FUNC_SHIFT		0
+	/* front and back refer to operations done for front
+	   and back faces, i.e. separate stencil function support */
+#	define R300_S_FRONT_FUNC_SHIFT	        3
+#	define R300_S_FRONT_SFAIL_OP_SHIFT	6
+#	define R300_S_FRONT_ZPASS_OP_SHIFT	9
+#	define R300_S_FRONT_ZFAIL_OP_SHIFT      12
+#	define R300_S_BACK_FUNC_SHIFT           15
+#	define R300_S_BACK_SFAIL_OP_SHIFT       18
+#	define R300_S_BACK_ZPASS_OP_SHIFT       21
+#	define R300_S_BACK_ZFAIL_OP_SHIFT       24
+
+#define R300_ZB_STENCILREFMASK                        0x4f08
+#	define R300_STENCILREF_SHIFT       0
+#	define R300_STENCILREF_MASK        0x000000ff
+#	define R300_STENCILMASK_SHIFT      8
+#	define R300_STENCILMASK_MASK       0x0000ff00
+#	define R300_STENCILWRITEMASK_SHIFT 16
+#	define R300_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT                             0x4f10
+#	define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
+#	define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
+#	define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
+/* reserved up to (15 << 0) */
+#	define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
+#	define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP                             0x4F14
+#	define R300_ZTOP_DISABLE                 (0 << 0)
+#	define R300_ZTOP_ENABLE                  (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
+
+#define R300_ZB_BW_CNTL                     0x4f1c
+#	define R300_HIZ_DISABLE                              (0 << 0)
+#	define R300_HIZ_ENABLE                               (1 << 0)
+#	define R300_HIZ_MIN                                  (0 << 1)
+#	define R300_HIZ_MAX                                  (1 << 1)
+#	define R300_FAST_FILL_DISABLE                        (0 << 2)
+#	define R300_FAST_FILL_ENABLE                         (1 << 2)
+#	define R300_RD_COMP_DISABLE                          (0 << 3)
+#	define R300_RD_COMP_ENABLE                           (1 << 3)
+#	define R300_WR_COMP_DISABLE                          (0 << 4)
+#	define R300_WR_COMP_ENABLE                           (1 << 4)
+#	define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
+#	define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
+
+#	define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
+#	define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
+#	define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
+#	define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
+
+#	define R500_BMASK_ENABLE                             (0 << 10)
+#	define R500_BMASK_DISABLE                            (1 << 10)
+#	define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
+#	define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
+#	define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
+#	define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
+#	define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
+#	define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
+#	define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
+#	define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
+#	define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
+#	define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
+#	define R500_PEQ_PACKING_DISABLE                      (0 << 18)
+#	define R500_PEQ_PACKING_ENABLE                       (1 << 18)
+#	define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
+#	define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET               0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH                0x4f24
+#       define R300_DEPTHPITCH_MASK              0x00003FFC
+#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
+#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
+#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
+#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
+#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
+#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
+#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
+#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
+
+#define R300_ZB_ZMASK_OFFSET			 0x4f30
+#define R300_ZB_ZMASK_PITCH			 0x4f34
+#define R300_ZB_ZMASK_WRINDEX			 0x4f38
+#define R300_ZB_ZMASK_DWORD			 0x4f3c
+#define R300_ZB_ZMASK_RDINDEX			 0x4f40
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET                       0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX                      0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD                        0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX                      0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH                        0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA                       0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR                       0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
+#	define R300_DEPTHX_OFFSET_SHIFT  1
+#	define R300_DEPTHX_OFFSET_MASK   0x000007FE
+#	define R300_DEPTHY_OFFSET_SHIFT  17
+#	define R300_DEPTHY_OFFSET_MASK   0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE                        0x4fd0
+#	define R500_OP_FIFO_SIZE_FULL   (0 << 0)
+#	define R500_OP_FIFO_SIZE_HALF   (1 << 0)
+#	define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+#	define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF                0x4fd4
+#	define R500_STENCILREF_SHIFT       0
+#	define R500_STENCILREF_MASK        0x000000ff
+#	define R500_STENCILMASK_SHIFT      8
+#	define R500_STENCILMASK_MASK       0x0000ff00
+#	define R500_STENCILWRITEMASK_SHIFT 16
+#	define R500_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ *  DWORD 0: output and opcode
+ *  DWORD 1: first argument
+ *  DWORD 2: second argument
+ *  DWORD 3: third argument
+ *
+ * Notes:
+ *  - ABS r, a is implemented as MAX r, a, -a
+ *  - MOV is implemented as ADD to zero
+ *  - XPD is implemented as MUL + MAD
+ *  - FLR is implemented as FRC + ADD
+ *  - apparently, fglrx tries to schedule instructions so that there is at
+ *    least one instruction between the write to a temporary and the first
+ *    read from said temporary; however, violations of this scheduling are
+ *    allowed
+ *  - register indices seem to be unrelated with OpenGL aliasing to
+ *    conventional state
+ *  - only one attribute and one parameter can be loaded at a time; however,
+ *    the same attribute/parameter can be used for more than one argument
+ *  - the second software argument for POW is the third hardware argument
+ *    (no idea why)
+ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ *   The single argument is replicated across all three inputs, but swizzled:
+ *     First argument: xyzy
+ *     Second argument: xyzx
+ *     Third argument: xyzw
+ *   Whenever the result is used later in the fragment program, fglrx forces
+ *   x and w to be 1.0 in the input selection; I don't know whether this is
+ *   strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT                     (1 << 0)
+#define R300_VPI_OUT_OP_MUL                     (2 << 0)
+#define R300_VPI_OUT_OP_ADD                     (3 << 0)
+#define R300_VPI_OUT_OP_MAD                     (4 << 0)
+#define R300_VPI_OUT_OP_DST                     (5 << 0)
+#define R300_VPI_OUT_OP_FRC                     (6 << 0)
+#define R300_VPI_OUT_OP_MAX                     (7 << 0)
+#define R300_VPI_OUT_OP_MIN                     (8 << 0)
+#define R300_VPI_OUT_OP_SGE                     (9 << 0)
+#define R300_VPI_OUT_OP_SLT                     (10 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
+#define R300_VPI_OUT_OP_ARL                     (13 << 0)
+#define R300_VPI_OUT_OP_EXP                     (65 << 0)
+#define R300_VPI_OUT_OP_LOG                     (66 << 0)
+	/* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
+#define R300_VPI_OUT_OP_LIT                     (68 << 0)
+#define R300_VPI_OUT_OP_POW                     (69 << 0)
+#define R300_VPI_OUT_OP_RCP                     (70 << 0)
+#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
+#define R300_VPI_OUT_OP_EX2                     (75 << 0)
+#define R300_VPI_OUT_OP_LG2                     (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
+	/* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT            13
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
+
+#define R300_VPI_OUT_WRITE_X                    (1 << 20)
+#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
+#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
+#define R300_VPI_OUT_WRITE_W                    (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
+
+#define R300_VPI_IN_REG_INDEX_SHIFT             5
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
+
+/* The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X    0
+#define R300_VPI_IN_SELECT_Y    1
+#define R300_VPI_IN_SELECT_Z    2
+#define R300_VPI_IN_SELECT_W    3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE  5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT                     13
+#define R300_VPI_IN_Y_SHIFT                     16
+#define R300_VPI_IN_Z_SHIFT                     19
+#define R300_VPI_IN_W_SHIFT                     22
+
+#define R300_VPI_IN_NEG_X                       (1 << 25)
+#define R300_VPI_IN_NEG_Y                       (1 << 26)
+#define R300_VPI_IN_NEG_Z                       (1 << 27)
+#define R300_VPI_IN_NEG_W                       (1 << 28)
+/* END: Vertex program instruction set */
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE                     (0 << 0)
+#define R300_PRIM_TYPE_POINT                    (1 << 0)
+#define R300_PRIM_TYPE_LINE                     (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
+	/* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
+#define R300_PRIM_TYPE_QUADS                    (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
+#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
+#define R300_PRIM_TYPE_MASK                     0xF
+#define R300_PRIM_WALK_IND                      (1 << 4)
+#define R300_PRIM_WALK_LIST                     (2 << 4)
+#define R300_PRIM_WALK_RING                     (3 << 4)
+#define R300_PRIM_WALK_MASK                     (3 << 4)
+	/* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT            16
+#define R300_PRIM_NUM_VERTICES_MASK             0xffff
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER            0x00003300
+#    define R300_EB_UNK1_SHIFT                      24
+#    define R300_EB_UNK1                    (0x80<<24)
+#    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
+#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8	2
+#define R300_CP_COLOR_FORMAT_ARGB1555	3
+#define R300_CP_COLOR_FORMAT_RGB565	4
+#define R300_CP_COLOR_FORMAT_ARGB8888	6
+#define R300_CP_COLOR_FORMAT_RGB332	7
+#define R300_CP_COLOR_FORMAT_RGB8	9
+#define R300_CP_COLOR_FORMAT_ARGB4444	15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI	0xC0009B00
+
+#define R500_VAP_INDEX_OFFSET		0x208c
+
+#define R500_GA_US_VECTOR_INDEX         0x4250
+#define R500_GA_US_VECTOR_DATA          0x4254
+
+#define R500_RS_IP_0                    0x4074
+#define R500_RS_INST_0                  0x4320
+
+#define R500_US_CONFIG                  0x4600
+
+#define R500_US_FC_CTRL			0x4624
+#define R500_US_CODE_ADDR		0x4630
+
+#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
+#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
+
+#define R300_SU_REG_DEST                0x42c8
+#define RV530_FG_ZBREG_DEST             0x4be8
+#define R300_ZB_ZPASS_DATA              0x4f58
+#define R300_ZB_ZPASS_ADDR              0x4f5c
+
+#endif /* _R300_REG_H */
diff --git a/linux-imx/drivers/gpu/drm/radeon/r300d.h b/linux-imx/drivers/gpu/drm/radeon/r300d.h
new file mode 100644
index 0000000..ff229a0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r300d.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R300D_H__
+#define __R300D_H__
+
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_3D_CLEAR_ZMASK		0x32
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_3D_CLEAR_CMASK		0x38
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00014C_MC_AGP_LOCATION                     0x00014C
+#define   S_00014C_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_00014C_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_00014C_MC_AGP_START                        0xFFFF0000
+#define   S_00014C_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_00014C_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_00014C_MC_AGP_TOP                          0x0000FFFF
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000170_AGP_BASE                            0x000170
+#define   S_000170_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000170_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000170_AGP_BASE_ADDR                       0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r420.c b/linux-imx/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 0000000..4e796ec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r100d.h"
+#include "r420d.h"
+#include "r420_reg_safe.h"
+
+void r420_pm_init_profile(struct radeon_device *rdev)
+{
+	/* default */
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+	/* low sh */
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+	/* mid sh */
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+	/* high sh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+	/* low mh */
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+	/* mid mh */
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+	/* high mh */
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+}
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
+
+void r420_pipes_init(struct radeon_device *rdev)
+{
+	unsigned tmp;
+	unsigned gb_pipe_select;
+	unsigned num_pipes;
+
+	/* GA_ENHANCE workaround TCL deadlock issue */
+	WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+	       (1 << 2) | (1 << 3));
+	/* add idle wait as per freedesktop.org bug 24041 */
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+	/* get max number of pipes */
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
+
+	/* SE chips have 1 pipe */
+	if ((rdev->pdev->device == 0x5e4c) ||
+	    (rdev->pdev->device == 0x5e4f))
+		num_pipes = 1;
+
+	rdev->num_gb_pipes = num_pipes;
+	tmp = 0;
+	switch (num_pipes) {
+	default:
+		/* force to 1 pipe */
+		num_pipes = 1;
+	case 1:
+		tmp = (0 << 1);
+		break;
+	case 2:
+		tmp = (3 << 1);
+		break;
+	case 3:
+		tmp = (6 << 1);
+		break;
+	case 4:
+		tmp = (7 << 1);
+		break;
+	}
+	WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
+	/* Sub pixel 1/12 so we can have 4K rendering according to doc */
+	tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+	WREG32(R300_GB_TILE_CONFIG, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
+
+	WREG32(R300_RB2D_DSTCACHE_MODE,
+	       RREG32(R300_RB2D_DSTCACHE_MODE) |
+	       R300_DC_AUTOFLUSH_ENABLE |
+	       R300_DC_DC_DISABLE_IGNORE_PE);
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+
+	if (rdev->family == CHIP_RV530) {
+		tmp = RREG32(RV530_GB_PIPE_SELECT2);
+		if ((tmp & 3) == 3)
+			rdev->num_z_pipes = 2;
+		else
+			rdev->num_z_pipes = 1;
+	} else
+		rdev->num_z_pipes = 1;
+
+	DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
+		 rdev->num_gb_pipes, rdev->num_z_pipes);
+}
+
+u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
+{
+	u32 r;
+
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
+	r = RREG32(R_0001FC_MC_IND_DATA);
+	return r;
+}
+
+void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
+		S_0001F8_MC_IND_WR_EN(1));
+	WREG32(R_0001FC_MC_IND_DATA, v);
+}
+
+static void r420_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (r420_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+static void r420_clock_resume(struct radeon_device *rdev)
+{
+	u32 sclk_cntl;
+
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
+	sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
+	if (rdev->family == CHIP_R420)
+		sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
+	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
+}
+
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* RV410 and R420 can lock up if CP DMA to host memory happens
+	 * while the 2D engine is busy.
+	 *
+	 * The proper workaround is to queue a RESYNC at the beginning
+	 * of the CP init, apparently.
+	 */
+	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+	radeon_ring_lock(rdev, ring, 8);
+	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+	/* Catch the RESYNC we dispatched all the way back,
+	 * at the very beginning of the CP init.
+	 */
+	radeon_ring_lock(rdev, ring, 8);
+	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
+static int r420_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
+	r300_mc_program(rdev);
+	/* Resume clock */
+	r420_clock_resume(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r420_pipes_init(rdev);
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+	r420_cp_errata_init(rdev);
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r420_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r420_clock_resume(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (rdev->is_atom_bios) {
+		atom_asic_init(rdev->mode_info.atom_context);
+	} else {
+		radeon_combios_asic_init(rdev->ddev);
+	}
+	/* Resume clock after posting */
+	r420_clock_resume(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r420_suspend(struct radeon_device *rdev)
+{
+	r420_cp_errata_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_disable(rdev);
+	return 0;
+}
+
+void r420_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_fini(rdev);
+	if (rdev->flags & RADEON_IS_PCI)
+		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	if (rdev->is_atom_bios) {
+		radeon_atombios_fini(rdev);
+	} else {
+		radeon_combios_fini(rdev);
+	}
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int r420_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r) {
+			return r;
+		}
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r) {
+			return r;
+		}
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r300_mc_init(rdev);
+	r420_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r) {
+		return r;
+	}
+	if (rdev->family == CHIP_R420)
+		r100_enable_bm(rdev);
+
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	if (rdev->flags & RADEON_IS_PCI) {
+		r = r100_pci_gart_init(rdev);
+		if (r)
+			return r;
+	}
+	r420_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = r420_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCIE)
+			rv370_pcie_gart_fini(rdev);
+		if (rdev->flags & RADEON_IS_PCI)
+			r100_pci_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(R400_GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(R300_GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list r420_pipes_info_list[] = {
+	{"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
+};
+#endif
+
+int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r420d.h b/linux-imx/drivers/gpu/drm/radeon/r420d.h
new file mode 100644
index 0000000..fc78d31
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r420d.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R420D_H
+#define R420D_H
+
+#define R_0001F8_MC_IND_INDEX                        0x0001F8
+#define   S_0001F8_MC_IND_ADDR(x)                      (((x) & 0x7F) << 0)
+#define   G_0001F8_MC_IND_ADDR(x)                      (((x) >> 0) & 0x7F)
+#define   C_0001F8_MC_IND_ADDR                         0xFFFFFF80
+#define   S_0001F8_MC_IND_WR_EN(x)                     (((x) & 0x1) << 8)
+#define   G_0001F8_MC_IND_WR_EN(x)                     (((x) >> 8) & 0x1)
+#define   C_0001F8_MC_IND_WR_EN                        0xFFFFFEFF
+#define R_0001FC_MC_IND_DATA                         0x0001FC
+#define   S_0001FC_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_0001FC_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0001FC_MC_IND_DATA                         0x00000000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+/* CLK registers */
+#define R_00000D_SCLK_CNTL                           0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r500_reg.h b/linux-imx/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 0000000..1dd0d32
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R500_REG_H__
+#define __R500_REG_H__
+
+/* pipe config regs */
+#define R300_GA_POLY_MODE				0x4288
+#       define R300_FRONT_PTYPE_POINT                   (0 << 4)
+#       define R300_FRONT_PTYPE_LINE                    (1 << 4)
+#       define R300_FRONT_PTYPE_TRIANGE                 (2 << 4)
+#       define R300_BACK_PTYPE_POINT                    (0 << 7)
+#       define R300_BACK_PTYPE_LINE                     (1 << 7)
+#       define R300_BACK_PTYPE_TRIANGE                  (2 << 7)
+#define R300_GA_ROUND_MODE				0x428c
+#       define R300_GEOMETRY_ROUND_TRUNC                (0 << 0)
+#       define R300_GEOMETRY_ROUND_NEAREST              (1 << 0)
+#       define R300_COLOR_ROUND_TRUNC                   (0 << 2)
+#       define R300_COLOR_ROUND_NEAREST                 (1 << 2)
+#define R300_GB_MSPOS0				        0x4010
+#       define R300_MS_X0_SHIFT                         0
+#       define R300_MS_Y0_SHIFT                         4
+#       define R300_MS_X1_SHIFT                         8
+#       define R300_MS_Y1_SHIFT                         12
+#       define R300_MS_X2_SHIFT                         16
+#       define R300_MS_Y2_SHIFT                         20
+#       define R300_MSBD0_Y_SHIFT                       24
+#       define R300_MSBD0_X_SHIFT                       28
+#define R300_GB_MSPOS1				        0x4014
+#       define R300_MS_X3_SHIFT                         0
+#       define R300_MS_Y3_SHIFT                         4
+#       define R300_MS_X4_SHIFT                         8
+#       define R300_MS_Y4_SHIFT                         12
+#       define R300_MS_X5_SHIFT                         16
+#       define R300_MS_Y5_SHIFT                         20
+#       define R300_MSBD1_SHIFT                         24
+
+#define R300_GA_ENHANCE				        0x4274
+#       define R300_GA_DEADLOCK_CNTL                    (1 << 0)
+#       define R300_GA_FASTSYNC_CNTL                    (1 << 1)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FLUSH		(2 << 0)
+#	define R300_RB3D_DC_FREE		(2 << 2)
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define R300_RB3D_ZCACHE_CTLSTAT			0x4f18
+#       define R300_ZC_FLUSH                            (1 << 0)
+#       define R300_ZC_FREE                             (1 << 1)
+#       define R300_ZC_FLUSH_ALL                        0x3
+#define R400_GB_PIPE_SELECT             0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R500_SU_REG_DEST                0x42c8
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_CP_STAT		0x7C0
+#define RADEON_RBBM_CMDFIFO_ADDR	0xE70
+#define RADEON_RBBM_CMDFIFO_DATA	0xE74
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+/*
+ * RS690
+ */
+#define RS690_MCCFG_FB_LOCATION		0x100
+#define		RS690_MC_FB_START_MASK		0x0000FFFF
+#define		RS690_MC_FB_START_SHIFT		0
+#define		RS690_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS690_MC_FB_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_LOCATION	0x101
+#define		RS690_MC_AGP_START_MASK		0x0000FFFF
+#define		RS690_MC_AGP_START_SHIFT	0
+#define		RS690_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS690_MC_AGP_TOP_SHIFT		16
+#define RS690_MCCFG_AGP_BASE		0x102
+#define RS690_MCCFG_AGP_BASE_2		0x103
+#define RS690_MC_INIT_MISC_LAT_TIMER            0x104
+#define RS690_HDP_FB_LOCATION		0x0134
+#define RS690_MC_INDEX				0x78
+#	define RS690_MC_INDEX_MASK		0x1ff
+#	define RS690_MC_INDEX_WR_EN		(1 << 9)
+#	define RS690_MC_INDEX_WR_ACK		0x7f
+#define RS690_MC_DATA				0x7c
+#define RS690_MC_STATUS                         0x90
+#define RS690_MC_STATUS_IDLE                    (1 << 0)
+#define RS480_AGP_BASE_2		0x0164
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1 << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+
+#define RS690_AIC_CTRL_SCRATCH		0x3A
+#	define RS690_DIS_OUT_OF_PCI_GART_ACCESS	(1 << 1)
+
+/*
+ * RS600
+ */
+#define RS600_MC_STATUS                         0x0
+#define RS600_MC_STATUS_IDLE                    (1 << 0)
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define		RS600_MC_FB_START_MASK		0x0000FFFF
+#define		RS600_MC_FB_START_SHIFT		0
+#define		RS600_MC_FB_TOP_MASK		0xFFFF0000
+#define		RS600_MC_FB_TOP_SHIFT		16
+#define RS600_MC_AGP_LOCATION                   0x5
+#define		RS600_MC_AGP_START_MASK		0x0000FFFF
+#define		RS600_MC_AGP_START_SHIFT	0
+#define		RS600_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RS600_MC_AGP_TOP_SHIFT		16
+#define RS600_MC_AGP_BASE                          0x6
+#define RS600_MC_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+/* rs600/rs690/rs740 */
+#	define RS600_BUS_MASTER_DIS		(1 << 14)
+#	define RS600_MSI_REARM		        (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+
+
+#define RV515_MC_FB_LOCATION		0x01
+#define		RV515_MC_FB_START_MASK		0x0000FFFF
+#define		RV515_MC_FB_START_SHIFT		0
+#define		RV515_MC_FB_TOP_MASK		0xFFFF0000
+#define		RV515_MC_FB_TOP_SHIFT		16
+#define RV515_MC_AGP_LOCATION		0x02
+#define		RV515_MC_AGP_START_MASK		0x0000FFFF
+#define		RV515_MC_AGP_START_SHIFT	0
+#define		RV515_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RV515_MC_AGP_TOP_SHIFT		16
+#define RV515_MC_AGP_BASE		0x03
+#define RV515_MC_AGP_BASE_2		0x04
+
+#define R520_MC_FB_LOCATION		0x04
+#define		R520_MC_FB_START_MASK		0x0000FFFF
+#define		R520_MC_FB_START_SHIFT		0
+#define		R520_MC_FB_TOP_MASK		0xFFFF0000
+#define		R520_MC_FB_TOP_SHIFT		16
+#define R520_MC_AGP_LOCATION		0x05
+#define		R520_MC_AGP_START_MASK		0x0000FFFF
+#define		R520_MC_AGP_START_SHIFT		0
+#define		R520_MC_AGP_TOP_MASK		0xFFFF0000
+#define		R520_MC_AGP_TOP_SHIFT		16
+#define R520_MC_AGP_BASE		0x06
+#define R520_MC_AGP_BASE_2		0x07
+
+
+#define AVIVO_MC_INDEX						0x0070
+#define R520_MC_STATUS 0x00
+#define R520_MC_STATUS_IDLE (1<<1)
+#define RV515_MC_STATUS 0x08
+#define RV515_MC_STATUS_IDLE (1<<4)
+#define RV515_MC_INIT_MISC_LAT_TIMER            0x09
+#define AVIVO_MC_DATA						0x0074
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_CNTL          0x5
+#	define RV515_MEM_NUM_CHANNELS_MASK  0x3
+#define R520_MC_CNTL0          0x8
+#	define R520_MEM_NUM_CHANNELS_MASK  (0x3 << 24)
+#	define R520_MEM_NUM_CHANNELS_SHIFT  24
+#	define R520_MC_CHANNEL_SIZE  (1 << 23)
+
+#define AVIVO_CP_DYN_CNTL                              0x000f /* PLL */
+#       define AVIVO_CP_FORCEON                        (1 << 0)
+#define AVIVO_E2_DYN_CNTL                              0x0011 /* PLL */
+#       define AVIVO_E2_FORCEON                        (1 << 0)
+#define AVIVO_IDCT_DYN_CNTL                            0x0013 /* PLL */
+#       define AVIVO_IDCT_FORCEON                      (1 << 0)
+
+#define AVIVO_HDP_FB_LOCATION 0x134
+
+#define AVIVO_VGA_RENDER_CONTROL				0x0300
+#       define AVIVO_VGA_VSTATUS_CNTL_MASK                      (3 << 16)
+#define AVIVO_D1VGA_CONTROL					0x0330
+#       define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
+#       define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
+#       define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
+#       define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
+#       define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
+#define AVIVO_D2VGA_CONTROL					0x0338
+
+#define AVIVO_EXT1_PPLL_REF_DIV_SRC                             0x400
+#define AVIVO_EXT1_PPLL_REF_DIV                                 0x404
+#define AVIVO_EXT1_PPLL_UPDATE_LOCK                             0x408
+#define AVIVO_EXT1_PPLL_UPDATE_CNTL                             0x40c
+
+#define AVIVO_EXT2_PPLL_REF_DIV_SRC                             0x410
+#define AVIVO_EXT2_PPLL_REF_DIV                                 0x414
+#define AVIVO_EXT2_PPLL_UPDATE_LOCK                             0x418
+#define AVIVO_EXT2_PPLL_UPDATE_CNTL                             0x41c
+
+#define AVIVO_EXT1_PPLL_FB_DIV                                   0x430
+#define AVIVO_EXT2_PPLL_FB_DIV                                   0x434
+
+#define AVIVO_EXT1_PPLL_POST_DIV_SRC                                 0x438
+#define AVIVO_EXT1_PPLL_POST_DIV                                     0x43c
+
+#define AVIVO_EXT2_PPLL_POST_DIV_SRC                                 0x440
+#define AVIVO_EXT2_PPLL_POST_DIV                                     0x444
+
+#define AVIVO_EXT1_PPLL_CNTL                                    0x448
+#define AVIVO_EXT2_PPLL_CNTL                                    0x44c
+
+#define AVIVO_P1PLL_CNTL                                        0x450
+#define AVIVO_P2PLL_CNTL                                        0x454
+#define AVIVO_P1PLL_INT_SS_CNTL                                 0x458
+#define AVIVO_P2PLL_INT_SS_CNTL                                 0x45c
+#define AVIVO_P1PLL_TMDSA_CNTL                                  0x460
+#define AVIVO_P2PLL_LVTMA_CNTL                                  0x464
+
+#define AVIVO_PCLK_CRTC1_CNTL                                   0x480
+#define AVIVO_PCLK_CRTC2_CNTL                                   0x484
+
+#define AVIVO_D1CRTC_H_TOTAL					0x6000
+#define AVIVO_D1CRTC_H_BLANK_START_END                          0x6004
+#define AVIVO_D1CRTC_H_SYNC_A                                   0x6008
+#define AVIVO_D1CRTC_H_SYNC_A_CNTL                              0x600c
+#define AVIVO_D1CRTC_H_SYNC_B                                   0x6010
+#define AVIVO_D1CRTC_H_SYNC_B_CNTL                              0x6014
+
+#define AVIVO_D1CRTC_V_TOTAL					0x6020
+#define AVIVO_D1CRTC_V_BLANK_START_END                          0x6024
+#define AVIVO_D1CRTC_V_SYNC_A                                   0x6028
+#define AVIVO_D1CRTC_V_SYNC_A_CNTL                              0x602c
+#define AVIVO_D1CRTC_V_SYNC_B                                   0x6030
+#define AVIVO_D1CRTC_V_SYNC_B_CNTL                              0x6034
+
+#define AVIVO_D1CRTC_CONTROL                                    0x6080
+#       define AVIVO_CRTC_EN                                    (1 << 0)
+#       define AVIVO_CRTC_DISP_READ_REQUEST_DISABLE             (1 << 24)
+#define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
+#define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
+#define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
+#define AVIVO_D1CRTC_STATUS                                     0x609c
+#       define AVIVO_D1CRTC_V_BLANK                             (1 << 0)
+#define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
+#define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
+#define AVIVO_D1CRTC_STATUS_HV_COUNT                            0x60ac
+#define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
+
+#define AVIVO_D1MODE_MASTER_UPDATE_LOCK                         0x60e0
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+#define AVIVO_D1CRTC_UPDATE_LOCK                                0x60e8
+
+/* master controls */
+#define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
+#define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
+
+#define AVIVO_D1GRPH_ENABLE                                     0x6100
+#define AVIVO_D1GRPH_CONTROL                                    0x6104
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP                  (0 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP                 (1 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP                 (2 << 0)
+#       define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP                 (3 << 0)
+
+#       define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED                (0 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_RGB565                (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444              (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_AI88                  (3 << 8)
+#       define AVIVO_D1GRPH_CONTROL_16BPP_MONO16                (4 << 8)
+
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888              (0 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010           (1 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL               (2 << 8)
+#       define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010        (3 << 8)
+
+
+#       define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616          (0 << 8)
+
+#       define AVIVO_D1GRPH_SWAP_RB                             (1 << 16)
+#       define AVIVO_D1GRPH_TILED                               (1 << 20)
+#       define AVIVO_D1GRPH_MACRO_ADDRESS_MODE                  (1 << 21)
+
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL            (0 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED            (1 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1            (2 << 20)
+#       define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1            (4 << 20)
+
+/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
+ * block and vice versa.  This applies to GRPH, CUR, etc.
+ */
+#define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6914
+#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6114
+#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x691c
+#define R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH              0x611c
+#define AVIVO_D1GRPH_PITCH                                      0x6120
+#define AVIVO_D1GRPH_SURFACE_OFFSET_X                           0x6124
+#define AVIVO_D1GRPH_SURFACE_OFFSET_Y                           0x6128
+#define AVIVO_D1GRPH_X_START                                    0x612c
+#define AVIVO_D1GRPH_Y_START                                    0x6130
+#define AVIVO_D1GRPH_X_END                                      0x6134
+#define AVIVO_D1GRPH_Y_END                                      0x6138
+#define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING              (1 << 2)
+#       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
+#define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN         (1 << 0)
+
+#define AVIVO_D1CUR_CONTROL                     0x6400
+#       define AVIVO_D1CURSOR_EN                (1 << 0)
+#       define AVIVO_D1CURSOR_MODE_SHIFT        8
+#       define AVIVO_D1CURSOR_MODE_MASK         (3 << 8)
+#       define AVIVO_D1CURSOR_MODE_24BPP        2
+#define AVIVO_D1CUR_SURFACE_ADDRESS             0x6408
+#define R700_D1CUR_SURFACE_ADDRESS_HIGH         0x6c0c
+#define R700_D2CUR_SURFACE_ADDRESS_HIGH         0x640c
+#define AVIVO_D1CUR_SIZE                        0x6410
+#define AVIVO_D1CUR_POSITION                    0x6414
+#define AVIVO_D1CUR_HOT_SPOT                    0x6418
+#define AVIVO_D1CUR_UPDATE                      0x6424
+#       define AVIVO_D1CURSOR_UPDATE_LOCK       (1 << 16)
+
+#define AVIVO_DC_LUT_RW_SELECT                  0x6480
+#define AVIVO_DC_LUT_RW_MODE                    0x6484
+#define AVIVO_DC_LUT_RW_INDEX                   0x6488
+#define AVIVO_DC_LUT_SEQ_COLOR                  0x648c
+#define AVIVO_DC_LUT_PWL_DATA                   0x6490
+#define AVIVO_DC_LUT_30_COLOR                   0x6494
+#define AVIVO_DC_LUT_READ_PIPE_SELECT           0x6498
+#define AVIVO_DC_LUT_WRITE_EN_MASK              0x649c
+#define AVIVO_DC_LUT_AUTOFILL                   0x64a0
+
+#define AVIVO_DC_LUTA_CONTROL                   0x64c0
+#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE         0x64c4
+#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN        0x64c8
+#define AVIVO_DC_LUTA_BLACK_OFFSET_RED          0x64cc
+#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE         0x64d0
+#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN        0x64d4
+#define AVIVO_DC_LUTA_WHITE_OFFSET_RED          0x64d8
+
+#define AVIVO_DC_LB_MEMORY_SPLIT                0x6520
+#       define AVIVO_DC_LB_MEMORY_SPLIT_MASK    0x3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT   0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF  0
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q    1
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY        2
+#       define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q    3
+#       define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
+#       define AVIVO_DC_LB_DISP1_END_ADR_SHIFT  4
+#       define AVIVO_DC_LB_DISP1_END_ADR_MASK   0x7ff
+
+#define AVIVO_D1MODE_DATA_FORMAT                0x6528
+#       define AVIVO_D1MODE_INTERLEAVE_EN       (1 << 0)
+#define AVIVO_D1MODE_DESKTOP_HEIGHT             0x652C
+#define AVIVO_D1MODE_VBLANK_STATUS              0x6534
+#       define AVIVO_VBLANK_ACK                 (1 << 4)
+#define AVIVO_D1MODE_VLINE_START_END            0x6538
+#define AVIVO_D1MODE_VLINE_STATUS               0x653c
+#       define AVIVO_D1MODE_VLINE_STAT          (1 << 12)
+#define AVIVO_DxMODE_INT_MASK                   0x6540
+#       define AVIVO_D1MODE_INT_MASK            (1 << 0)
+#       define AVIVO_D2MODE_INT_MASK            (1 << 8)
+#define AVIVO_D1MODE_VIEWPORT_START             0x6580
+#define AVIVO_D1MODE_VIEWPORT_SIZE              0x6584
+#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6588
+#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM    0x658c
+
+#define AVIVO_D1SCL_SCALER_ENABLE               0x6590
+#define AVIVO_D1SCL_SCALER_TAP_CONTROL		0x6594
+#define AVIVO_D1SCL_UPDATE                      0x65cc
+#       define AVIVO_D1SCL_UPDATE_LOCK          (1 << 16)
+
+/* second crtc */
+#define AVIVO_D2CRTC_H_TOTAL					0x6800
+#define AVIVO_D2CRTC_H_BLANK_START_END                          0x6804
+#define AVIVO_D2CRTC_H_SYNC_A                                   0x6808
+#define AVIVO_D2CRTC_H_SYNC_A_CNTL                              0x680c
+#define AVIVO_D2CRTC_H_SYNC_B                                   0x6810
+#define AVIVO_D2CRTC_H_SYNC_B_CNTL                              0x6814
+
+#define AVIVO_D2CRTC_V_TOTAL					0x6820
+#define AVIVO_D2CRTC_V_BLANK_START_END                          0x6824
+#define AVIVO_D2CRTC_V_SYNC_A                                   0x6828
+#define AVIVO_D2CRTC_V_SYNC_A_CNTL                              0x682c
+#define AVIVO_D2CRTC_V_SYNC_B                                   0x6830
+#define AVIVO_D2CRTC_V_SYNC_B_CNTL                              0x6834
+
+#define AVIVO_D2CRTC_CONTROL                                    0x6880
+#define AVIVO_D2CRTC_BLANK_CONTROL                              0x6884
+#define AVIVO_D2CRTC_INTERLACE_CONTROL                          0x6888
+#define AVIVO_D2CRTC_INTERLACE_STATUS                           0x688c
+#define AVIVO_D2CRTC_STATUS_POSITION                            0x68a0
+#define AVIVO_D2CRTC_FRAME_COUNT                                0x68a4
+#define AVIVO_D2CRTC_STEREO_CONTROL                             0x68c4
+
+#define AVIVO_D2GRPH_ENABLE                                     0x6900
+#define AVIVO_D2GRPH_CONTROL                                    0x6904
+#define AVIVO_D2GRPH_LUT_SEL                                    0x6908
+#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS                    0x6910
+#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS                  0x6918
+#define AVIVO_D2GRPH_PITCH                                      0x6920
+#define AVIVO_D2GRPH_SURFACE_OFFSET_X                           0x6924
+#define AVIVO_D2GRPH_SURFACE_OFFSET_Y                           0x6928
+#define AVIVO_D2GRPH_X_START                                    0x692c
+#define AVIVO_D2GRPH_Y_START                                    0x6930
+#define AVIVO_D2GRPH_X_END                                      0x6934
+#define AVIVO_D2GRPH_Y_END                                      0x6938
+#define AVIVO_D2GRPH_UPDATE                                     0x6944
+#define AVIVO_D2GRPH_FLIP_CONTROL                               0x6948
+
+#define AVIVO_D2CUR_CONTROL                     0x6c00
+#define AVIVO_D2CUR_SURFACE_ADDRESS             0x6c08
+#define AVIVO_D2CUR_SIZE                        0x6c10
+#define AVIVO_D2CUR_POSITION                    0x6c14
+
+#define AVIVO_D2MODE_VBLANK_STATUS              0x6d34
+#define AVIVO_D2MODE_VLINE_START_END            0x6d38
+#define AVIVO_D2MODE_VLINE_STATUS               0x6d3c
+#define AVIVO_D2MODE_VIEWPORT_START             0x6d80
+#define AVIVO_D2MODE_VIEWPORT_SIZE              0x6d84
+#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT    0x6d88
+#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM    0x6d8c
+
+#define AVIVO_D2SCL_SCALER_ENABLE               0x6d90
+#define AVIVO_D2SCL_SCALER_TAP_CONTROL		0x6d94
+
+#define AVIVO_DDIA_BIT_DEPTH_CONTROL				0x7214
+
+#define AVIVO_DACA_ENABLE					0x7800
+#	define AVIVO_DAC_ENABLE				(1 << 0)
+#define AVIVO_DACA_SOURCE_SELECT				0x7804
+#       define AVIVO_DAC_SOURCE_CRTC1                   (0 << 0)
+#       define AVIVO_DAC_SOURCE_CRTC2                   (1 << 0)
+#       define AVIVO_DAC_SOURCE_TV                      (2 << 0)
+
+#define AVIVO_DACA_FORCE_OUTPUT_CNTL				0x783c
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACA_POWERDOWN					0x7850
+# define AVIVO_DACA_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACA_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACA_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACA_POWERDOWN_RED                               (1 << 24)
+
+#define AVIVO_DACB_ENABLE					0x7a00
+#define AVIVO_DACB_SOURCE_SELECT				0x7a04
+#define AVIVO_DACB_FORCE_OUTPUT_CNTL				0x7a3c
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT            (8)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE             (1 << 0)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN            (1 << 1)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED              (1 << 2)
+# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY       (1 << 24)
+#define AVIVO_DACB_POWERDOWN					0x7a50
+# define AVIVO_DACB_POWERDOWN_POWERDOWN                         (1 << 0)
+# define AVIVO_DACB_POWERDOWN_BLUE                              (1 << 8)
+# define AVIVO_DACB_POWERDOWN_GREEN                             (1 << 16)
+# define AVIVO_DACB_POWERDOWN_RED
+
+#define AVIVO_TMDSA_CNTL                    0x7880
+#   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_TMDSA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_TMDSA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_TMDSA_SOURCE_SELECT				0x7884
+/* 78a8 appears to be some kind of (reasonably tolerant) clock?
+ * 78d0 definitely hits the transmitter, definitely clock. */
+/* MYSTERY1 This appears to control dithering? */
+#define AVIVO_TMDSA_BIT_DEPTH_CONTROL		0x7894
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+#define AVIVO_TMDSA_DCBALANCER_CONTROL                  0x78d0
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+#define AVIVO_TMDSA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define AVIVO_TMDSA_CLOCK_ENABLE            0x7900
+#define AVIVO_TMDSA_TRANSMITTER_ENABLE              0x7904
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE          (1 << 0)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE          (1 << 8)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK  (1 << 16)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define AVIVO_TMDSA_TRANSMITTER_CONTROL				0x7910
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE	(1 << 0)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET	(1 << 1)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT	(2)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL	        (1 << 4)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP          (1 << 5)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	(1 << 6)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK	        (1 << 8)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	(1 << 13)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK	        (1 << 14)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	(1 << 15)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL	(1 << 28)
+#       define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA     (1 << 29)
+#	define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL	(1 << 31)
+
+#define AVIVO_LVTMA_CNTL					0x7a80
+#   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
+#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
+#   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
+#   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
+#   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
+#   define AVIVO_LVTMA_CNTL_PIXEL_ENCODING       (1 << 16)
+#   define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE     (1 << 24)
+#   define AVIVO_LVTMA_CNTL_SWAP                 (1 << 28)
+#define AVIVO_LVTMA_SOURCE_SELECT                               0x7a84
+#define AVIVO_LVTMA_COLOR_FORMAT                                0x7a88
+#define AVIVO_LVTMA_BIT_DEPTH_CONTROL                           0x7a94
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN           (1 << 0)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH        (1 << 4)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN     (1 << 8)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH  (1 << 12)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN    (1 << 16)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL        (1 << 24)
+#   define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
+
+
+
+#define AVIVO_LVTMA_DCBALANCER_CONTROL                  0x7ad0
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_EN                  (1 << 0)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN             (1 << 8)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT       (16)
+#   define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE               (1 << 24)
+
+#define AVIVO_LVTMA_DATA_SYNCHRONIZATION                0x78d8
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL           (1 << 0)
+#   define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG          (1 << 8)
+#define R500_LVTMA_CLOCK_ENABLE			0x7b00
+#define R600_LVTMA_CLOCK_ENABLE			0x7b04
+
+#define R500_LVTMA_TRANSMITTER_ENABLE              0x7b04
+#define R600_LVTMA_TRANSMITTER_ENABLE              0x7b08
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN             (1 << 1)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN            (1 << 2)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN            (1 << 3)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN            (1 << 4)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN            (1 << 5)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN             (1 << 9)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN            (1 << 10)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN            (1 << 11)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN            (1 << 12)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK     (1 << 17)
+#   define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK     (1 << 18)
+
+#define R500_LVTMA_TRANSMITTER_CONTROL			        0x7b10
+#define R600_LVTMA_TRANSMITTER_CONTROL			        0x7b14
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE	  (1 << 0)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET	  (1 << 1)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL	          (1 << 4)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP            (1 << 5)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN	  (1 << 6)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK	          (1 << 8)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS	  (1 << 13)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK	          (1 << 14)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS	  (1 << 15)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT  (16)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL	  (1 << 28)
+#       define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA       (1 << 29)
+#	define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
+
+#define R500_LVTMA_PWRSEQ_CNTL						0x7af0
+#define R600_LVTMA_PWRSEQ_CNTL						0x7af4
+#	define AVIVO_LVTMA_PWRSEQ_EN					    (1 << 0)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK			    (1 << 2)
+#	define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK			    (1 << 3)
+#	define AVIVO_LVTMA_PWRSEQ_TARGET_STATE				    (1 << 4)
+#	define AVIVO_LVTMA_SYNCEN					    (1 << 8)
+#	define AVIVO_LVTMA_SYNCEN_OVRD					    (1 << 9)
+#	define AVIVO_LVTMA_SYNCEN_POL					    (1 << 10)
+#	define AVIVO_LVTMA_DIGON					    (1 << 16)
+#	define AVIVO_LVTMA_DIGON_OVRD					    (1 << 17)
+#	define AVIVO_LVTMA_DIGON_POL					    (1 << 18)
+#	define AVIVO_LVTMA_BLON						    (1 << 24)
+#	define AVIVO_LVTMA_BLON_OVRD					    (1 << 25)
+#	define AVIVO_LVTMA_BLON_POL					    (1 << 26)
+
+#define R500_LVTMA_PWRSEQ_STATE                        0x7af4
+#define R600_LVTMA_PWRSEQ_STATE                        0x7af8
+#       define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R          (1 << 0)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DIGON                   (1 << 1)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN                  (1 << 2)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_BLON                    (1 << 3)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_DONE                    (1 << 4)
+#       define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT            (8)
+
+#define AVIVO_LVDS_BACKLIGHT_CNTL			0x7af8
+#	define AVIVO_LVDS_BACKLIGHT_CNTL_EN			(1 << 0)
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK		0x0000ff00
+#	define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT		8
+
+#define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
+
+#define AVIVO_DC_GPIO_HPD_A                 0x7e94
+#define AVIVO_DC_GPIO_HPD_Y                 0x7e9c
+
+#define AVIVO_DC_I2C_STATUS1				0x7d30
+#	define AVIVO_DC_I2C_DONE			(1 << 0)
+#	define AVIVO_DC_I2C_NACK			(1 << 1)
+#	define AVIVO_DC_I2C_HALT			(1 << 2)
+#	define AVIVO_DC_I2C_GO			        (1 << 3)
+#define AVIVO_DC_I2C_RESET 				0x7d34
+#	define AVIVO_DC_I2C_SOFT_RESET			(1 << 0)
+#	define AVIVO_DC_I2C_ABORT			(1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 				0x7d38
+#	define AVIVO_DC_I2C_START			(1 << 0)
+#	define AVIVO_DC_I2C_STOP			(1 << 1)
+#	define AVIVO_DC_I2C_RECEIVE			(1 << 2)
+#	define AVIVO_DC_I2C_EN			        (1 << 8)
+#	define AVIVO_DC_I2C_PIN_SELECT(x)		((x) << 16)
+#	define AVIVO_SEL_DDC1			        0
+#	define AVIVO_SEL_DDC2			        1
+#	define AVIVO_SEL_DDC3			        2
+#define AVIVO_DC_I2C_CONTROL2 				0x7d3c
+#	define AVIVO_DC_I2C_ADDR_COUNT(x)		((x) << 0)
+#	define AVIVO_DC_I2C_DATA_COUNT(x)		((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 				0x7d40
+#	define AVIVO_DC_I2C_DATA_DRIVE_EN		(1 << 0)
+#	define AVIVO_DC_I2C_DATA_DRIVE_SEL		(1 << 1)
+#	define AVIVO_DC_I2C_CLK_DRIVE_EN		(1 << 7)
+#	define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x)      ((x) << 8)
+#	define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x)	((x) << 16)
+#	define AVIVO_DC_I2C_TIME_LIMIT(x)		((x) << 24)
+#define AVIVO_DC_I2C_DATA 				0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 			0x7d48
+#	define AVIVO_DC_I2C_INTERRUPT_STATUS		(1 << 0)
+#	define AVIVO_DC_I2C_INTERRUPT_AK		(1 << 8)
+#	define AVIVO_DC_I2C_INTERRUPT_ENABLE		(1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 			0x7d50
+#	define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C		(1 << 0)
+#	define AVIVO_DC_I2C_SW_CAN_USE_I2C		(1 << 1)
+#	define AVIVO_DC_I2C_SW_DONE_USING_I2C		(1 << 8)
+#	define AVIVO_DC_I2C_HW_NEEDS_I2C		(1 << 9)
+#	define AVIVO_DC_I2C_ABORT_HDCP_I2C		(1 << 16)
+#	define AVIVO_DC_I2C_HW_USING_I2C		(1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 		        0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 		                0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 		                0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 		                0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 		        0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 		                0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 		                0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 		                0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 		        0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 		                0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 		                0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 		                0x7e6c
+
+#define AVIVO_DISP_INTERRUPT_STATUS                             0x7edc
+#       define AVIVO_D1_VBLANK_INTERRUPT                        (1 << 4)
+#       define AVIVO_D2_VBLANK_INTERRUPT                        (1 << 5)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r520.c b/linux-imx/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 0000000..e1aece7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "r520d.h"
+
+/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
+
+int r520_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R520_MC_STATUS);
+		if (tmp & R520_MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void r520_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	rv515_vga_render_disable(rdev);
+	/*
+	 * DST_PIPE_CONFIG		0x170C
+	 * GB_TILE_CONFIG		0x4018
+	 * GB_FIFO_SIZE			0x4024
+	 * GB_PIPE_SELECT		0x402C
+	 * GB_PIPE_SELECT2              0x4124
+	 *	Z_PIPE_SHIFT			0
+	 *	Z_PIPE_MASK			0x000000003
+	 * GB_FIFO_SIZE2                0x4128
+	 *	SC_SFIFO_SIZE_SHIFT		0
+	 *	SC_SFIFO_SIZE_MASK		0x000000003
+	 *	SC_MFIFO_SIZE_SHIFT		2
+	 *	SC_MFIFO_SIZE_MASK		0x00000000C
+	 *	FG_SFIFO_SIZE_SHIFT		4
+	 *	FG_SFIFO_SIZE_MASK		0x000000030
+	 *	ZB_MFIFO_SIZE_SHIFT		6
+	 *	ZB_MFIFO_SIZE_MASK		0x0000000C0
+	 * GA_ENHANCE			0x4274
+	 * SU_REG_DEST			0x42C8
+	 */
+	/* workaround for RV530 */
+	if (rdev->family == CHIP_RV530) {
+		WREG32(0x4128, 0xFF);
+	}
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r520_mc_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void r520_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(R520_MC_CNTL0);
+	switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
+	case 0:
+		rdev->mc.vram_width = 32;
+		break;
+	case 1:
+		rdev->mc.vram_width = 64;
+		break;
+	case 2:
+		rdev->mc.vram_width = 128;
+		break;
+	case 3:
+		rdev->mc.vram_width = 256;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+	if (tmp & R520_MC_CHANNEL_SIZE)
+		rdev->mc.vram_width *= 2;
+}
+
+static void r520_mc_init(struct radeon_device *rdev)
+{
+
+	r520_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+static void r520_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (r520_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000005_MC_AGP_LOCATION,
+			S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000007_AGP_BASE_2,
+			S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000006_AGP_BASE, 0);
+		WREG32_MC(R_000007_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int r520_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r520_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	r520_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int r520_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int r520_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	if (!radeon_card_posted(rdev) && rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	r520_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = r520_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r520d.h b/linux-imx/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 0000000..61af61f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R520D_H__
+#define __R520D_H__
+
+/* Registers */
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600.c b/linux-imx/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 0000000..4cf21ec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,4838 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_mode.h"
+#include "r600d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define ARUBA_RLC_UCODE_SIZE 1536
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
+MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
+MODULE_FIRMWARE("radeon/CEDAR_me.bin");
+MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
+MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
+MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
+MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
+MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO_me.bin");
+MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
+MODULE_FIRMWARE("radeon/SUMO2_me.bin");
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev);
+
+/* r600,rv610,rv630,rv620,rv635,rv670 */
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+static void r600_gpu_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
+
+/**
+ * r600_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r6xx, IGPs, APUs).
+ */
+u32 r600_get_xclk(struct radeon_device *rdev)
+{
+	return rdev->clock.spll.reference_freq;
+}
+
+/* get temperature in millidegrees */
+int rv6xx_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp = temp & 0xff;
+
+	if (temp & 0x100)
+		actual_temp -= 256;
+
+	return actual_temp * 1000;
+}
+
+void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+
+	/* power state array is low to high, default is first */
+	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
+		int min_power_state_index = 0;
+
+		if (rdev->pm.num_power_states > 2)
+			min_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_power_state_index = min_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.current_power_state_index == min_power_state_index) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_downclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = 0; i < rdev->pm.num_power_states; i++) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i >= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else {
+					if (rdev->pm.current_power_state_index == 0)
+						rdev->pm.requested_power_state_index =
+							rdev->pm.num_power_states - 1;
+					else
+						rdev->pm.requested_power_state_index =
+							rdev->pm.current_power_state_index - 1;
+				}
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_power_state_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
+				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
+				rdev->pm.dynpm_can_upclock = false;
+			} else {
+				if (rdev->pm.active_crtc_count > 1) {
+					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
+						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+							continue;
+						else if (i <= rdev->pm.current_power_state_index) {
+							rdev->pm.requested_power_state_index =
+								rdev->pm.current_power_state_index;
+							break;
+						} else {
+							rdev->pm.requested_power_state_index = i;
+							break;
+						}
+					}
+				} else
+					rdev->pm.requested_power_state_index =
+						rdev->pm.current_power_state_index + 1;
+			}
+			rdev->pm.requested_clock_mode_index = 0;
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	} else {
+		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
+		/* for now just select the first power state and switch between clock modes */
+		/* power state array is low to high, default is first (0) */
+		if (rdev->pm.active_crtc_count > 1) {
+			rdev->pm.requested_power_state_index = -1;
+			/* start at 1 as we don't want the default mode */
+			for (i = 1; i < rdev->pm.num_power_states; i++) {
+				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+					continue;
+				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
+					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
+					rdev->pm.requested_power_state_index = i;
+					break;
+				}
+			}
+			/* if nothing selected, grab the default state. */
+			if (rdev->pm.requested_power_state_index == -1)
+				rdev->pm.requested_power_state_index = 0;
+		} else
+			rdev->pm.requested_power_state_index = 1;
+
+		switch (rdev->pm.dynpm_planned_action) {
+		case DYNPM_ACTION_MINIMUM:
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_downclock = false;
+			break;
+		case DYNPM_ACTION_DOWNCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index == 0) {
+					rdev->pm.requested_clock_mode_index = 0;
+					rdev->pm.dynpm_can_downclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index - 1;
+			} else {
+				rdev->pm.requested_clock_mode_index = 0;
+				rdev->pm.dynpm_can_downclock = false;
+			}
+			/* don't use the power state if crtcs are active and no display flag is set */
+			if ((rdev->pm.active_crtc_count > 0) &&
+			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			     clock_info[rdev->pm.requested_clock_mode_index].flags &
+			     RADEON_PM_MODE_NO_DISPLAY)) {
+				rdev->pm.requested_clock_mode_index++;
+			}
+			break;
+		case DYNPM_ACTION_UPCLOCK:
+			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
+				if (rdev->pm.current_clock_mode_index ==
+				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
+					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
+					rdev->pm.dynpm_can_upclock = false;
+				} else
+					rdev->pm.requested_clock_mode_index =
+						rdev->pm.current_clock_mode_index + 1;
+			} else {
+				rdev->pm.requested_clock_mode_index =
+					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
+				rdev->pm.dynpm_can_upclock = false;
+			}
+			break;
+		case DYNPM_ACTION_DEFAULT:
+			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
+			rdev->pm.requested_clock_mode_index = 0;
+			rdev->pm.dynpm_can_upclock = false;
+			break;
+		case DYNPM_ACTION_NONE:
+		default:
+			DRM_ERROR("Requested mode for not defined action\n");
+			return;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
+		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
+		  pcie_lanes);
+}
+
+void rs780_pm_init_profile(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states == 2) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else if (rdev->pm.num_power_states == 3) {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	}
+}
+
+void r600_pm_init_profile(struct radeon_device *rdev)
+{
+	int idx;
+
+	if (rdev->family == CHIP_R600) {
+		/* XXX */
+		/* default */
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
+		/* low sh */
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+		/* mid sh */
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
+		/* high sh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
+		/* low mh */
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+		/* mid mh */
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
+		/* high mh */
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
+	} else {
+		if (rdev->pm.num_power_states < 4) {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* low mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		} else {
+			/* default */
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+			/* low sh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+			/* mid sh */
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+			/* high sh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+			/* low mh */
+			if (rdev->flags & RADEON_IS_MOBILITY)
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+			else
+				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+			/* mid mh */
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+			/* high mh */
+			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+		}
+	}
+}
+
+void r600_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+bool r600_gui_idle(struct radeon_device *rdev)
+{
+	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
+		return false;
+	else
+		return true;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_4:
+			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_5:
+			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_6:
+			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_2:
+			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		case RADEON_HPD_3:
+			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+				connected = true;
+			break;
+		default:
+			break;
+		}
+	}
+	return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r600_hpd_sense(rdev, hpd);
+
+	if (ASIC_IS_DCE3(rdev)) {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_4:
+			tmp = RREG32(DC_HPD4_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD4_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_5:
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			break;
+			/* DCE 3.2 */
+		case RADEON_HPD_6:
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HPDx_INT_POLARITY;
+			else
+				tmp |= DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (hpd) {
+		case RADEON_HPD_1:
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_2:
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+			break;
+		case RADEON_HPD_3:
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			if (connected)
+				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			else
+				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			/* don't try to enable hpd on eDP or LVDS avoid breaking the
+			 * aux dp channel on imac and help (but not completely fix)
+			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+			 */
+			continue;
+		}
+		if (ASIC_IS_DCE3(rdev)) {
+			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+			if (ASIC_IS_DCE32(rdev))
+				tmp |= DC_HPDx_EN;
+
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, tmp);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, tmp);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, tmp);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, tmp);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, tmp);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, tmp);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+				break;
+			default:
+				break;
+			}
+		}
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (ASIC_IS_DCE3(rdev)) {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HPD1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HPD2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HPD3_CONTROL, 0);
+				break;
+			case RADEON_HPD_4:
+				WREG32(DC_HPD4_CONTROL, 0);
+				break;
+				/* DCE 3.2 */
+			case RADEON_HPD_5:
+				WREG32(DC_HPD5_CONTROL, 0);
+				break;
+			case RADEON_HPD_6:
+				WREG32(DC_HPD6_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (radeon_connector->hpd.hpd) {
+			case RADEON_HPD_1:
+				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+				break;
+			case RADEON_HPD_2:
+				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+				break;
+			case RADEON_HPD_3:
+				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+				break;
+			default:
+				break;
+			}
+		}
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+/*
+ * R600 PCIE GART
+ */
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	/* flush hdp cache so updates hit vram */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    !(rdev->flags & RADEON_IS_AGP)) {
+		void __iomem *ptr = (void *)rdev->gart.ptr;
+		u32 tmp;
+
+		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+		 * This seems to cause problems on some AGP cards. Just use the old
+		 * method for them.
+		 */
+		WREG32(HDP_DEBUG1, 0);
+		tmp = readl((void __iomem *)ptr);
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
+	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
+		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
+		if (tmp == 2) {
+			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
+			return;
+		}
+		if (tmp) {
+			return;
+		}
+		udelay(1);
+	}
+}
+
+int r600_pcie_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "R600 PCIE GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int r600_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void r600_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Disable L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup L1 TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void r600_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	r600_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+static void r600_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
+		ENABLE_WAIT_L2_QUERY;
+	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
+	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
+	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+int r600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	u32 tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
+		if (!tmp)
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+	r = RREG32(R_0028FC_MC_DATA);
+	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+	return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+		S_0028F8_MC_IND_WR_EN(1));
+	WREG32(R_0028FC_MC_DATA, v);
+	WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
+static void r600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture (doesn't exist before R600) */
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = mc->mc_mask - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		u64 base = 0;
+		if (rdev->flags & RADEON_IS_IGP) {
+			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+			base <<= 24;
+		}
+		radeon_vram_location(rdev, &rdev->mc, base);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int r600_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+	uint32_t h_addr, l_addr;
+	unsigned long long k8_addr;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r600_vram_gtt_location(rdev, &rdev->mc);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rs690_pm_info(rdev);
+		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+		if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+			/* Use K8 direct mapping for fast fb access. */
+			rdev->fastfb_working = false;
+			h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+			l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+			k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+			if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+			{
+				/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+		 		* memory is present.
+		 		*/
+				if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+					DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+						(unsigned long long)rdev->mc.aper_base, k8_addr);
+					rdev->mc.aper_base = (resource_size_t)k8_addr;
+					rdev->fastfb_working = true;
+				}
+			}
+  		}
+	}
+
+	radeon_update_bandwidth_info(rdev);
+	return 0;
+}
+
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->vram_scratch.robj);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->vram_scratch.robj,
+			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->vram_scratch.robj,
+				(void **)&rdev->vram_scratch.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+	radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+	return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->vram_scratch.robj);
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+	}
+	radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
+void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
+{
+	u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
+
+	if (hung)
+		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+	else
+		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+
+	WREG32(R600_BIOS_3_SCRATCH, tmp);
+}
+
+static void r600_print_gpu_status_regs(struct radeon_device *rdev)
+{
+	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
+		 RREG32(R_008010_GRBM_STATUS));
+	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
+		 RREG32(R_008014_GRBM_STATUS2));
+	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
+		 RREG32(R_000E50_SRBM_STATUS));
+	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+		 RREG32(CP_STALLED_STAT1));
+	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+		 RREG32(CP_STALLED_STAT2));
+	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
+		 RREG32(CP_BUSY_STAT));
+	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
+		 RREG32(CP_STAT));
+	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
+		RREG32(DMA_STATUS_REG));
+}
+
+static bool r600_is_display_hung(struct radeon_device *rdev)
+{
+	u32 crtc_hung = 0;
+	u32 crtc_status[2];
+	u32 i, j, tmp;
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
+			crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+			crtc_hung |= (1 << i);
+		}
+	}
+
+	for (j = 0; j < 10; j++) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (crtc_hung & (1 << i)) {
+				tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+				if (tmp != crtc_status[i])
+					crtc_hung &= ~(1 << i);
+			}
+		}
+		if (crtc_hung == 0)
+			return false;
+		udelay(100);
+	}
+
+	return true;
+}
+
+static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(R_008010_GRBM_STATUS);
+	if (rdev->family >= CHIP_RV770) {
+		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+		    G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+			reset_mask |= RADEON_RESET_GFX;
+	} else {
+		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
+		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
+		    G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
+		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
+		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
+			reset_mask |= RADEON_RESET_GFX;
+	}
+
+	if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
+	    G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (G_008010_GRBM_EE_BUSY(tmp))
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* DMA_STATUS_REG */
+	tmp = RREG32(DMA_STATUS_REG);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(R_000E50_SRBM_STATUS);
+	if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
+		reset_mask |= RADEON_RESET_RLC;
+
+	if (G_000E50_IH_BUSY(tmp))
+		reset_mask |= RADEON_RESET_IH;
+
+	if (G_000E50_SEM_BUSY(tmp))
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (G_000E50_GRBM_RQ_PENDING(tmp))
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (G_000E50_VMC_BUSY(tmp))
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
+	    G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
+	    G_000E50_MCDW_BUSY(tmp))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (r600_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct rv515_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	r600_print_gpu_status_regs(rdev);
+
+	/* Disable CP parsing/prefetching */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+	else
+		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+
+	/* disable the RLC */
+	WREG32(RLC_CNTL, 0);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* Disable DMA */
+		tmp = RREG32(DMA_RB_CNTL);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL, tmp);
+	}
+
+	mdelay(50);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
+		if (rdev->family >= CHIP_RV770)
+			grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
+				S_008020_SOFT_RESET_CB(1) |
+				S_008020_SOFT_RESET_PA(1) |
+				S_008020_SOFT_RESET_SC(1) |
+				S_008020_SOFT_RESET_SPI(1) |
+				S_008020_SOFT_RESET_SX(1) |
+				S_008020_SOFT_RESET_SH(1) |
+				S_008020_SOFT_RESET_TC(1) |
+				S_008020_SOFT_RESET_TA(1) |
+				S_008020_SOFT_RESET_VC(1) |
+				S_008020_SOFT_RESET_VGT(1);
+		else
+			grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
+				S_008020_SOFT_RESET_DB(1) |
+				S_008020_SOFT_RESET_CB(1) |
+				S_008020_SOFT_RESET_PA(1) |
+				S_008020_SOFT_RESET_SC(1) |
+				S_008020_SOFT_RESET_SMX(1) |
+				S_008020_SOFT_RESET_SPI(1) |
+				S_008020_SOFT_RESET_SX(1) |
+				S_008020_SOFT_RESET_SH(1) |
+				S_008020_SOFT_RESET_TC(1) |
+				S_008020_SOFT_RESET_TA(1) |
+				S_008020_SOFT_RESET_VC(1) |
+				S_008020_SOFT_RESET_VGT(1);
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
+			S_008020_SOFT_RESET_VGT(1);
+
+		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+	}
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		if (rdev->family >= CHIP_RV770)
+			srbm_soft_reset |= RV770_SOFT_RESET_DMA;
+		else
+			srbm_soft_reset |= SOFT_RESET_DMA;
+	}
+
+	if (reset_mask & RADEON_RESET_RLC)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		if (reset_mask & RADEON_RESET_MC)
+			srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
+	}
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	mdelay(1);
+
+	rv515_mc_resume(rdev, &save);
+	udelay(50);
+
+	r600_print_gpu_status_regs(rdev);
+}
+
+int r600_asic_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask;
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	r600_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & RADEON_RESET_DMA)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+			      u32 tiling_pipe_num,
+			      u32 max_rb_num,
+			      u32 total_max_rb_num,
+			      u32 disabled_rb_mask)
+{
+	u32 rendering_pipe_num, rb_num_width, req_rb_num;
+	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
+	u32 data = 0, mask = 1 << (max_rb_num - 1);
+	unsigned i, j;
+
+	/* mask out the RBs that don't exist on that asic */
+	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+	/* make sure at least one RB is available */
+	if ((tmp & 0xff) != 0xff)
+		disabled_rb_mask = tmp;
+
+	rendering_pipe_num = 1 << tiling_pipe_num;
+	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+	BUG_ON(rendering_pipe_num < req_rb_num);
+
+	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+	if (rdev->family <= CHIP_RV740) {
+		/* r6xx/r7xx */
+		rb_num_width = 2;
+	} else {
+		/* eg+ */
+		rb_num_width = 4;
+	}
+
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(mask & disabled_rb_mask)) {
+			for (j = 0; j < pipe_rb_ratio; j++) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+			}
+			if (pipe_rb_remain) {
+				data <<= rb_num_width;
+				data |= max_rb_num - i - 1;
+				pipe_rb_remain--;
+			}
+		}
+		mask >>= 1;
+	}
+
+	return data;
+}
+
+int r600_count_pipe_bits(uint32_t val)
+{
+	return hweight32(val);
+}
+
+static void r600_gpu_init(struct radeon_device *rdev)
+{
+	u32 tiling_config;
+	u32 ramcfg;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 tmp;
+	int i, j;
+	u32 sq_config;
+	u32 sq_gpr_resource_mgmt_1 = 0;
+	u32 sq_gpr_resource_mgmt_2 = 0;
+	u32 sq_thread_resource_mgmt = 0;
+	u32 sq_stack_resource_mgmt_1 = 0;
+	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 disabled_rb_mask;
+
+	rdev->config.r600.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_R600:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 8;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 256;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		rdev->config.r600.max_pipes = 2;
+		rdev->config.r600.max_tile_pipes = 2;
+		rdev->config.r600.max_simds = 3;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->config.r600.max_pipes = 1;
+		rdev->config.r600.max_tile_pipes = 1;
+		rdev->config.r600.max_simds = 2;
+		rdev->config.r600.max_backends = 1;
+		rdev->config.r600.max_gprs = 128;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 128;
+		rdev->config.r600.max_hw_contexts = 4;
+		rdev->config.r600.max_gs_threads = 4;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 1;
+		break;
+	case CHIP_RV670:
+		rdev->config.r600.max_pipes = 4;
+		rdev->config.r600.max_tile_pipes = 4;
+		rdev->config.r600.max_simds = 4;
+		rdev->config.r600.max_backends = 4;
+		rdev->config.r600.max_gprs = 192;
+		rdev->config.r600.max_threads = 192;
+		rdev->config.r600.max_stack_entries = 256;
+		rdev->config.r600.max_hw_contexts = 8;
+		rdev->config.r600.max_gs_threads = 16;
+		rdev->config.r600.sx_max_export_size = 128;
+		rdev->config.r600.sx_max_export_pos_size = 16;
+		rdev->config.r600.sx_max_export_smx_size = 128;
+		rdev->config.r600.sq_num_cf_insts = 2;
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* Setup tiling */
+	tiling_config = 0;
+	ramcfg = RREG32(RAMCFG);
+	switch (rdev->config.r600.max_tile_pipes) {
+	case 1:
+		tiling_config |= PIPE_TILING(0);
+		break;
+	case 2:
+		tiling_config |= PIPE_TILING(1);
+		break;
+	case 4:
+		tiling_config |= PIPE_TILING(2);
+		break;
+	case 8:
+		tiling_config |= PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+
+	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
+	if (tmp > 3) {
+		tiling_config |= ROW_TILING(3);
+		tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		tiling_config |= ROW_TILING(tmp);
+		tiling_config |= SAMPLE_SPLIT(tmp);
+	}
+	tiling_config |= BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	tmp = R6XX_MAX_BACKENDS -
+		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+	if (tmp < rdev->config.r600.max_backends) {
+		rdev->config.r600.max_backends = tmp;
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+	tmp = R6XX_MAX_PIPES -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.r600.max_pipes) {
+		rdev->config.r600.max_pipes = tmp;
+	}
+	tmp = R6XX_MAX_SIMDS -
+		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.r600.max_simds) {
+		rdev->config.r600.max_simds = tmp;
+	}
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+					R6XX_MAX_BACKENDS, disabled_rb_mask);
+	tiling_config |= tmp << 16;
+	rdev->config.r600.backend_map = tmp;
+
+	rdev->config.r600.tile_config = tiling_config;
+	WREG32(GB_TILING_CONFIG, tiling_config);
+	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
+
+	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* Setup some CP states */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
+
+	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
+			     SYNC_WALKER | SYNC_ALIGNER));
+	/* Setup various GPU states */
+	if (rdev->family == CHIP_RV670)
+		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
+
+	tmp = RREG32(SX_DEBUG_1);
+	tmp |= SMX_EVENT_RELEASE;
+	if ((rdev->family > CHIP_R600))
+		tmp |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, tmp);
+
+	if (((rdev->family) == CHIP_R600) ||
+	    ((rdev->family) == CHIP_RV630) ||
+	    ((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+	} else {
+		WREG32(DB_DEBUG, 0);
+	}
+	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
+			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(VGT_NUM_INSTANCES, 0);
+
+	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
+
+	tmp = RREG32(SQ_MS_FIFO_SIZES);
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		tmp = (CACHE_FIFO_SIZE(0xa) |
+		       FETCH_FIFO_HIWATER(0xa) |
+		       DONE_FIFO_HIWATER(0xe0) |
+		       ALU_UPDATE_FIFO_HIWATER(0x8));
+	} else if (((rdev->family) == CHIP_R600) ||
+		   ((rdev->family) == CHIP_RV630)) {
+		tmp &= ~DONE_FIFO_HIWATER(0xff);
+		tmp |= DONE_FIFO_HIWATER(0x4);
+	}
+	WREG32(SQ_MS_FIFO_SIZES, tmp);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	if ((rdev->family) == CHIP_R600) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
+					  NUM_VS_GPRS(124) |
+					  NUM_CLAUSE_TEMP_GPRS(4));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
+					  NUM_ES_GPRS(0));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
+					   NUM_VS_THREADS(48) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(4));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
+					    NUM_VS_STACK_ENTRIES(128));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
+					    NUM_ES_STACK_ENTRIES(0));
+	} else if (((rdev->family) == CHIP_RV610) ||
+		   ((rdev->family) == CHIP_RV620) ||
+		   ((rdev->family) == CHIP_RS780) ||
+		   ((rdev->family) == CHIP_RS880)) {
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if (((rdev->family) == CHIP_RV630) ||
+		   ((rdev->family) == CHIP_RV635)) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
+					  NUM_ES_GPRS(18));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
+					    NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
+					    NUM_ES_STACK_ENTRIES(16));
+	} else if ((rdev->family) == CHIP_RV670) {
+		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
+					  NUM_VS_GPRS(44) |
+					  NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
+					  NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
+					   NUM_VS_THREADS(78) |
+					   NUM_GS_THREADS(4) |
+					   NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
+					    NUM_VS_STACK_ENTRIES(64));
+		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
+					    NUM_ES_STACK_ENTRIES(64));
+	}
+
+	WREG32(SQ_CONFIG, sq_config);
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+	if (((rdev->family) == CHIP_RV610) ||
+	    ((rdev->family) == CHIP_RV620) ||
+	    ((rdev->family) == CHIP_RS780) ||
+	    ((rdev->family) == CHIP_RS880)) {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
+	} else {
+		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
+	}
+
+	/* More default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
+					 S1_X(0x4) | S1_Y(0xc)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
+					 S1_X(0x2) | S1_Y(0x2) |
+					 S2_X(0xa) | S2_Y(0x6) |
+					 S3_X(0x6) | S3_Y(0xa)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
+					     S1_X(0x4) | S1_Y(0xc) |
+					     S2_X(0x1) | S2_Y(0x6) |
+					     S3_X(0xa) | S3_Y(0xe)));
+	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
+					     S5_X(0x0) | S5_Y(0x0) |
+					     S6_X(0xb) | S6_Y(0x4) |
+					     S7_X(0x7) | S7_Y(0x8)));
+
+	WREG32(VGT_STRMOUT_EN, 0);
+	tmp = rdev->config.r600.max_pipes * 16;
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp += 32;
+		break;
+	case CHIP_RV670:
+		tmp += 128;
+		break;
+	default:
+		break;
+	}
+	if (tmp > 256) {
+		tmp = 256;
+	}
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, tmp);
+	WREG32(VGT_GS_PER_VS, 2);
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* Clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	switch (rdev->family) {
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+		tmp = TC_L2_SIZE(8);
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		tmp = TC_L2_SIZE(4);
+		break;
+	case CHIP_R600:
+		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
+		break;
+	default:
+		tmp = TC_L2_SIZE(0);
+		break;
+	}
+	WREG32(TC_CNTL, tmp);
+
+	tmp = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, tmp);
+
+	tmp = RREG32(ARB_POP);
+	tmp |= ENABLE_TC128;
+	WREG32(ARB_POP, tmp);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+			       NUM_CLIP_SEQ(3)));
+	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+	WREG32(VC_ENHANCE, 0);
+}
+
+
+/*
+ * Indirect registers accessor
+ */
+u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
+{
+	u32 r;
+
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	r = RREG32(PCIE_PORT_DATA);
+	return r;
+}
+
+void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
+	(void)RREG32(PCIE_PORT_INDEX);
+	WREG32(PCIE_PORT_DATA, (v));
+	(void)RREG32(PCIE_PORT_DATA);
+}
+
+/*
+ * CP & Ring
+ */
+void r600_cp_stop(struct radeon_device *rdev)
+{
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+int r600_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_R600:
+		chip_name = "R600";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV610:
+		chip_name = "RV610";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV630:
+		chip_name = "RV630";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV620:
+		chip_name = "RV620";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV635:
+		chip_name = "RV635";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV670:
+		chip_name = "RV670";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		chip_name = "RS780";
+		rlc_chip_name = "R600";
+		break;
+	case CHIP_RV770:
+		chip_name = "RV770";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		chip_name = "RV730";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_RV710:
+		chip_name = "RV710";
+		rlc_chip_name = "R700";
+		break;
+	case CHIP_CEDAR:
+		chip_name = "CEDAR";
+		rlc_chip_name = "CEDAR";
+		break;
+	case CHIP_REDWOOD:
+		chip_name = "REDWOOD";
+		rlc_chip_name = "REDWOOD";
+		break;
+	case CHIP_JUNIPER:
+		chip_name = "JUNIPER";
+		rlc_chip_name = "JUNIPER";
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_name = "CYPRESS";
+		rlc_chip_name = "CYPRESS";
+		break;
+	case CHIP_PALM:
+		chip_name = "PALM";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO:
+		chip_name = "SUMO";
+		rlc_chip_name = "SUMO";
+		break;
+	case CHIP_SUMO2:
+		chip_name = "SUMO2";
+		rlc_chip_name = "SUMO";
+		break;
+	default: BUG();
+	}
+
+	if (rdev->family >= CHIP_CEDAR) {
+		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	} else if (rdev->family >= CHIP_RV770) {
+		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+		me_req_size = R700_PM4_UCODE_SIZE * 4;
+		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
+	} else {
+		pfp_req_size = PFP_UCODE_SIZE * 4;
+		me_req_size = PM4_UCODE_SIZE * 12;
+		rlc_req_size = RLC_UCODE_SIZE * 4;
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "r600_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+	}
+	return err;
+}
+
+static int r600_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r600_cp_stop(rdev);
+
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+		WREG32(CP_ME_RAM_DATA,
+		       be32_to_cpup(fw_data++));
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA,
+		       be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+int r600_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+	uint32_t cp_me;
+
+	r = radeon_ring_lock(rdev, ring, 7);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	if (rdev->family >= CHIP_RV770) {
+		radeon_ring_write(ring, 0x0);
+		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+	} else {
+		radeon_ring_write(ring, 0x3);
+		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+	}
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	cp_me = 0xff;
+	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
+	return 0;
+}
+
+int r600_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB_CNTL, tmp);
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+	WREG32(CP_RB_RPTR_WR, 0);
+	ring->wptr = 0;
+	WREG32(CP_RB_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB_RPTR_ADDR,
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
+	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB_CNTL, tmp);
+
+	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+
+	ring->rptr = RREG32(CP_RB_RPTR);
+
+	r600_cp_start(rdev);
+	ring->ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+	return 0;
+}
+
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+{
+	u32 rb_bufsz;
+	int r;
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 8);
+	ring_size = (1 << (rb_bufsz + 1)) * 4;
+	ring->ring_size = ring_size;
+	ring->align_mask = 16 - 1;
+
+	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+		if (r) {
+			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+			ring->rptr_save_reg = 0;
+		}
+	}
+}
+
+void r600_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r600_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+	u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+	rb_cntl &= ~DMA_RB_ENABLE;
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	u32 rb_cntl, dma_cntl, ib_cntl;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset dma */
+	if (rdev->family >= CHIP_RV770)
+		WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+	else
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+	RREG32(SRBM_SOFT_RESET);
+	udelay(50);
+	WREG32(SRBM_SOFT_RESET, 0);
+
+	WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+	WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+	/* Set ring buffer size in dwords */
+	rb_bufsz = drm_order(ring->ring_size / 4);
+	rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+	rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+	WREG32(DMA_RB_CNTL, rb_cntl);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(DMA_RB_RPTR, 0);
+	WREG32(DMA_RB_WPTR, 0);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(DMA_RB_RPTR_ADDR_HI,
+	       upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+	WREG32(DMA_RB_RPTR_ADDR_LO,
+	       ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+	if (rdev->wb.enabled)
+		rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+	WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+	/* enable DMA IBs */
+	ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+	ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+	WREG32(DMA_IB_CNTL, ib_cntl);
+
+	dma_cntl = RREG32(DMA_CNTL);
+	dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+	WREG32(DMA_CNTL, dma_cntl);
+
+	if (rdev->family >= CHIP_RV770)
+		WREG32(DMA_MODE, 1);
+
+	ring->wptr = 0;
+	WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+	ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+	WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+	ring->ready = true;
+
+	r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+	return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+	r600_dma_stop(rdev);
+	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/*
+ * UVD
+ */
+int r600_uvd_rbc_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	uint64_t rptr_addr;
+	uint32_t rb_bufsz, tmp;
+	int r;
+
+	rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
+
+	if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
+		DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
+		return -EINVAL;
+	}
+
+	/* force RBC into idle state */
+	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+	/* Set the write pointer delay */
+	WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+	/* set the wb address */
+	WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
+
+	/* programm the 4GB memory segment for rptr and ring buffer */
+	WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
+				   (0x7 << 16) | (0x1 << 31));
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+	ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+	WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+	/* set the ring address */
+	WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+	/* Set ring buffer size */
+	rb_bufsz = drm_order(ring->ring_size);
+	rb_bufsz = (0x1 << 8) | rb_bufsz;
+	WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
+
+	ring->ready = true;
+	r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+	if (r) {
+		ring->ready = false;
+		return r;
+	}
+
+	r = radeon_ring_lock(rdev, ring, 10);
+	if (r) {
+		DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+		return r;
+	}
+
+	tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+	radeon_ring_write(ring, tmp);
+	radeon_ring_write(ring, 0xFFFFF);
+
+	/* Clear timeout status bits */
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+	radeon_ring_write(ring, 0x8);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+	radeon_ring_write(ring, 3);
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	return 0;
+}
+
+void r600_uvd_stop(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+	/* force RBC into idle state */
+	WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
+	/* put VCPU into reset */
+	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+	mdelay(5);
+
+	/* disable VCPU clock */
+	WREG32(UVD_VCPU_CNTL, 0x0);
+
+	/* Unstall UMC and register bus */
+	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+	ring->ready = false;
+}
+
+int r600_uvd_init(struct radeon_device *rdev)
+{
+	int i, j, r;
+	/* disable byte swapping */
+	u32 lmi_swap_cntl = 0;
+	u32 mp_swap_cntl = 0;
+
+	/* raise clocks while booting up the VCPU */
+	radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+	/* disable clock gating */
+	WREG32(UVD_CGC_GATE, 0);
+
+	/* disable interupt */
+	WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+	/* Stall UMC and register bus before resetting VCPU */
+	WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+	WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+	mdelay(1);
+
+	/* put LMI, VCPU, RBC etc... into reset */
+	WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+	       LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+	       CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+	mdelay(5);
+
+	/* take UVD block out of reset */
+	WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+	mdelay(5);
+
+	/* initialize UVD memory controller */
+	WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+			     (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+	/* swap (8 in 32) RB and IB */
+	lmi_swap_cntl = 0xa;
+	mp_swap_cntl = 0;
+#endif
+	WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+	WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+	WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+	WREG32(UVD_MPC_SET_MUXA1, 0x0);
+	WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+	WREG32(UVD_MPC_SET_MUXB1, 0x0);
+	WREG32(UVD_MPC_SET_ALU, 0);
+	WREG32(UVD_MPC_SET_MUX, 0x88);
+
+	/* take all subblocks out of reset, except VCPU */
+	WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+	mdelay(5);
+
+	/* enable VCPU clock */
+	WREG32(UVD_VCPU_CNTL,  1 << 9);
+
+	/* enable UMC */
+	WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+	/* boot up the VCPU */
+	WREG32(UVD_SOFT_RESET, 0);
+	mdelay(10);
+
+	WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+	for (i = 0; i < 10; ++i) {
+		uint32_t status;
+		for (j = 0; j < 100; ++j) {
+			status = RREG32(UVD_STATUS);
+			if (status & 2)
+				break;
+			mdelay(10);
+		}
+		r = 0;
+		if (status & 2)
+			break;
+
+		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+		WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+		mdelay(10);
+		WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+		mdelay(10);
+		r = -1;
+	}
+
+	if (r) {
+		DRM_ERROR("UVD not responding, giving up!!!\n");
+		radeon_set_uvd_clocks(rdev, 0, 0);
+		return r;
+	}
+
+	/* enable interupt */
+	WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+	r = r600_uvd_rbc_start(rdev);
+	if (!r)
+		DRM_INFO("UVD initialized successfully.\n");
+
+	/* lower clocks again */
+	radeon_set_uvd_clocks(rdev, 0, 0);
+
+	return r;
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+void r600_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
+		radeon_scratch_free(rdev, scratch);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+			  ring->idx, scratch, tmp);
+		r = -EINVAL;
+	}
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+		       struct radeon_ring *ring)
+{
+	unsigned i;
+	int r;
+	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+	u32 tmp;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	writel(tmp, ptr);
+
+	r = radeon_ring_lock(rdev, ring, 4);
+	if (r) {
+		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = readl(ptr);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+	r = radeon_ring_lock(rdev, ring, 3);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+			  ring->idx, r);
+		return r;
+	}
+	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+	radeon_ring_write(ring, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev, ring);
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(UVD_CONTEXT_ID);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ring test on %d succeeded in %d usecs\n",
+			 ring->idx, i);
+	} else {
+		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+			  ring->idx, tmp);
+		r = -EINVAL;
+	}
+	return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+		PACKET3_SH_ACTION_ENA;
+
+	if (rdev->family >= CHIP_RV770)
+		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
+
+	if (rdev->wb.use_event) {
+		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, cp_coher_cntl);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		/* EVENT_WRITE_EOP - flush caches, send int */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+		radeon_ring_write(ring, addr & 0xffffffff);
+		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+		radeon_ring_write(ring, fence->seq);
+		radeon_ring_write(ring, 0);
+	} else {
+		/* flush read cache over gart */
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, cp_coher_cntl);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+		/* wait for 3D idle clean */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+		/* Emit fence sequence & fire IRQ */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, fence->seq);
+		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+		radeon_ring_write(ring, RB_INT_STAT);
+	}
+}
+
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+	radeon_ring_write(ring, 2);
+	return;
+}
+
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *ring,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+	if (rdev->family < CHIP_CAYMAN)
+		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* write the fence */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+	radeon_ring_write(ring, lower_32_bits(fence->seq));
+	/* generate an interrupt */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait)
+{
+	u64 addr = semaphore->gpu_addr;
+	u32 s = emit_wait ? 0 : 1;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+	radeon_ring_write(ring, addr & 0xfffffffc);
+	radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait)
+{
+	uint64_t addr = semaphore->gpu_addr;
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+	radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+	radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+	radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+	radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
+int r600_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	struct radeon_sa_bo *vb = NULL;
+	int r;
+
+	r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
+	if (r) {
+		return r;
+	}
+	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+	r600_blit_done_copy(rdev, fence, vb, sem);
+	return 0;
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFE)
+			cur_size_in_dw = 0xFFFE;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+					 (upper_32_bits(src_offset) & 0xff)));
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size)
+{
+	/* FIXME: implement */
+	return 0;
+}
+
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
+{
+	/* FIXME: implement */
+}
+
+static int r600_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	r600_pcie_gen2_enable(rdev);
+
+	r600_mc_program(rdev);
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		r600_agp_enable(rdev);
+	} else {
+		r = r600_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+	r600_gpu_init(rdev);
+	r = r600_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = r600_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+void r600_vga_set_state(struct radeon_device *rdev, bool state)
+{
+	uint32_t temp;
+
+	temp = RREG32(CONFIG_CNTL);
+	if (state == false) {
+		temp &= ~(1<<0);
+		temp |= (1<<1);
+	} else {
+		temp &= ~(1<<1);
+	}
+	WREG32(CONFIG_CNTL, temp);
+}
+
+int r600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+}
+
+int r600_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	r600_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int r600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (r600_debugfs_mc_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for mc !\n");
+	}
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = r600_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = r600_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r600_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		r600_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	return 0;
+}
+
+void r600_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_blit_fini(rdev);
+	r600_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	r600_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+
+/*
+ * CS stuff
+ */
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 next_rptr;
+
+	if (ring->rptr_save_reg) {
+		next_rptr = ring->wptr + 3 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, ((ring->rptr_save_reg -
+					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+		radeon_ring_write(ring, next_rptr);
+	} else if (rdev->wb.enabled) {
+		next_rptr = ring->wptr + 5 + 4;
+		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+		radeon_ring_write(ring, next_rptr);
+		radeon_ring_write(ring, 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+	radeon_ring_write(ring, ib->gpu_addr);
+	radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+	radeon_ring_write(ring, ib->length_dw);
+}
+
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	uint32_t scratch;
+	uint32_t tmp = 0;
+	unsigned i;
+	int r;
+
+	r = radeon_scratch_get(rdev, &scratch);
+	if (r) {
+		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
+		return r;
+	}
+	WREG32(scratch, 0xCAFEDEAD);
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		goto free_scratch;
+	}
+	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	ib.ptr[2] = 0xDEADBEEF;
+	ib.length_dw = 3;
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		goto free_ib;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto free_ib;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = RREG32(scratch);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+			  scratch, tmp);
+		r = -EINVAL;
+	}
+free_ib:
+	radeon_ib_free(rdev, &ib);
+free_scratch:
+	radeon_scratch_free(rdev, scratch);
+	return r;
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_ib ib;
+	unsigned i;
+	int r;
+	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+	u32 tmp = 0;
+
+	if (!ptr) {
+		DRM_ERROR("invalid vram scratch pointer\n");
+		return -EINVAL;
+	}
+
+	tmp = 0xCAFEDEAD;
+	writel(tmp, ptr);
+
+	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+	if (r) {
+		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+		return r;
+	}
+
+	ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+	ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+	ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+	ib.ptr[3] = 0xDEADBEEF;
+	ib.length_dw = 4;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+		return r;
+	}
+	r = radeon_fence_wait(ib.fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		tmp = readl(ptr);
+		if (tmp == 0xDEADBEEF)
+			break;
+		DRM_UDELAY(1);
+	}
+	if (i < rdev->usec_timeout) {
+		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+	} else {
+		DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+		r = -EINVAL;
+	}
+	radeon_ib_free(rdev, &ib);
+	return r;
+}
+
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	struct radeon_fence *fence = NULL;
+	int r;
+
+	r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+	if (r) {
+		DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+	if (r) {
+		DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+	if (r) {
+		DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+		goto error;
+	}
+
+	r = radeon_fence_wait(fence, false);
+	if (r) {
+		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+		goto error;
+	}
+	DRM_INFO("ib test on ring %d succeeded\n",  ring->idx);
+error:
+	radeon_fence_unref(&fence);
+	radeon_set_uvd_clocks(rdev, 0, 0);
+	return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+	if (rdev->wb.enabled) {
+		u32 next_rptr = ring->wptr + 4;
+		while ((next_rptr & 7) != 5)
+			next_rptr++;
+		next_rptr += 3;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+		radeon_ring_write(ring, next_rptr);
+	}
+
+	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+	 * Pad as necessary with NOPs.
+	 */
+	while ((ring->wptr & 7) != 5)
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+	radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
+ * the same as the CP ring buffer, but in reverse.  Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes.  As the host irq handler processes interrupts, it
+ * increments the rptr.  When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+	u32 rb_bufsz;
+
+	/* Align ring size */
+	rb_bufsz = drm_order(ring_size / 4);
+	ring_size = (1 << rb_bufsz) * 4;
+	rdev->ih.ring_size = ring_size;
+	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+	rdev->ih.rptr = 0;
+}
+
+int r600_ih_ring_alloc(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Allocate ring buffer */
+	if (rdev->ih.ring_obj == NULL) {
+		r = radeon_bo_create(rdev, rdev->ih.ring_size,
+				     PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT,
+				     NULL, &rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->ih.ring_obj,
+				  RADEON_GEM_DOMAIN_GTT,
+				  &rdev->ih.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->ih.ring_obj,
+				   (void **)&rdev->ih.ring);
+		radeon_bo_unreserve(rdev->ih.ring_obj);
+		if (r) {
+			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+			return r;
+		}
+	}
+	return 0;
+}
+
+void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+	int r;
+	if (rdev->ih.ring_obj) {
+		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ih.ring_obj);
+			radeon_bo_unpin(rdev->ih.ring_obj);
+			radeon_bo_unreserve(rdev->ih.ring_obj);
+		}
+		radeon_bo_unref(&rdev->ih.ring_obj);
+		rdev->ih.ring = NULL;
+		rdev->ih.ring_obj = NULL;
+	}
+}
+
+void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+	if ((rdev->family >= CHIP_RV770) &&
+	    (rdev->family <= CHIP_RV740)) {
+		/* r7xx asics need to soft reset RLC before halting */
+		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+		RREG32(SRBM_SOFT_RESET);
+		mdelay(15);
+		WREG32(SRBM_SOFT_RESET, 0);
+		RREG32(SRBM_SOFT_RESET);
+	}
+
+	WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	r600_rlc_stop(rdev);
+
+	WREG32(RLC_HB_CNTL, 0);
+
+	if (rdev->family == CHIP_ARUBA) {
+		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+	}
+	if (rdev->family <= CHIP_CAYMAN) {
+		WREG32(RLC_HB_BASE, 0);
+		WREG32(RLC_HB_RPTR, 0);
+		WREG32(RLC_HB_WPTR, 0);
+	}
+	if (rdev->family <= CHIP_CAICOS) {
+		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+	}
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	if (rdev->family >= CHIP_ARUBA) {
+		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_CAYMAN) {
+		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_CEDAR) {
+		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else if (rdev->family >= CHIP_RV770) {
+		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	} else {
+		for (i = 0; i < RLC_UCODE_SIZE; i++) {
+			WREG32(RLC_UCODE_ADDR, i);
+			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+		}
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	r600_rlc_start(rdev);
+
+	return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+void r600_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	WREG32(DxMODE_INT_MASK, 0);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		if (ASIC_IS_DCE32(rdev)) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		} else {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else {
+		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+	}
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	r600_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = r600_rlc_init(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	if (rdev->family >= CHIP_CEDAR)
+		evergreen_disable_interrupt_state(rdev);
+	else
+		r600_disable_interrupt_state(rdev);
+
+	/* at this point everything should be setup correctly to enable master */
+	pci_set_master(rdev->pdev);
+
+	/* enable irqs */
+	r600_enable_interrupts(rdev);
+
+	return ret;
+}
+
+void r600_irq_suspend(struct radeon_device *rdev)
+{
+	r600_irq_disable(rdev);
+	r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+	r600_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 mode_int = 0;
+	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 hdmi0, hdmi1;
+	u32 d1grph = 0, d2grph = 0;
+	u32 dma_cntl;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		r600_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		r600_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	if (ASIC_IS_DCE3(rdev)) {
+		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		if (ASIC_IS_DCE32(rdev)) {
+			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
+		} else {
+			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		}
+	} else {
+		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int\n");
+		cp_int_cntl |= RB_INT_ENABLE;
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("r600_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("r600_irq_set: vblank 0\n");
+		mode_int |= D1MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("r600_irq_set: vblank 1\n");
+		mode_int |= D2MODE_VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("r600_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("r600_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("r600_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("r600_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("r600_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("r600_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.afmt[0]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+	if (rdev->irq.afmt[1]) {
+		DRM_DEBUG("r600_irq_set: hdmi 0\n");
+		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
+	}
+
+	WREG32(CP_INT_CNTL, cp_int_cntl);
+	WREG32(DMA_CNTL, dma_cntl);
+	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+	if (ASIC_IS_DCE3(rdev)) {
+		WREG32(DC_HPD1_INT_CONTROL, hpd1);
+		WREG32(DC_HPD2_INT_CONTROL, hpd2);
+		WREG32(DC_HPD3_INT_CONTROL, hpd3);
+		WREG32(DC_HPD4_INT_CONTROL, hpd4);
+		if (ASIC_IS_DCE32(rdev)) {
+			WREG32(DC_HPD5_INT_CONTROL, hpd5);
+			WREG32(DC_HPD6_INT_CONTROL, hpd6);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
+		} else {
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+		}
+	} else {
+		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
+	}
+
+	return 0;
+}
+
+static void r600_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (ASIC_IS_DCE3(rdev)) {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+		if (ASIC_IS_DCE32(rdev)) {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
+		} else {
+			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
+		}
+	} else {
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
+		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
+		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
+	}
+	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
+
+	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
+		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
+		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD1_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD2_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+		if (ASIC_IS_DCE3(rdev)) {
+			tmp = RREG32(DC_HPD3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD3_INT_CONTROL, tmp);
+		} else {
+			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+		}
+	}
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (ASIC_IS_DCE32(rdev)) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD5_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp |= DC_HPDx_INT_ACK;
+			WREG32(DC_HPD6_INT_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
+			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
+			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
+		}
+	} else {
+		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+			if (ASIC_IS_DCE3(rdev)) {
+				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			} else {
+				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
+				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
+				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
+			}
+		}
+	}
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+	r600_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	r600_irq_ack(rdev);
+	r600_disable_interrupt_state(rdev);
+}
+
+static u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [127:60]  - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id  src_data  description
+ *      1         0  D1 Vblank
+ *      1         1  D1 Vline
+ *      5         0  D2 Vblank
+ *      5         1  D2 Vline
+ *     19         0  FP Hot plug detection A
+ *     19         1  FP Hot plug detection B
+ *     19         2  DAC A auto-detection
+ *     19         3  DAC B auto-detection
+ *     21         4  HDMI block A
+ *     21         5  HDMI block B
+ *    176         -  CP_INT RB
+ *    177         -  CP_INT IB1
+ *    178         -  CP_INT IB2
+ *    181         -  EOP Interrupt
+ *    233         -  GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data;
+	u32 ring_index;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	/* No MSIs, need a dummy read to flush PCI DMAs */
+	if (!rdev->msi_enabled)
+		RREG32(IH_RB_WPTR);
+
+	wptr = r600_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	r600_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 19: /* HPD/DAC hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 10:
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 12:
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 21: /* hdmi */
+			switch (src_data) {
+			case 4:
+				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI0\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+					queue_hdmi = true;
+					DRM_DEBUG("IH: HDMI1\n");
+				}
+				break;
+			default:
+				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 176: /* CP_INT in ring buffer */
+		case 177: /* CP_INT in IB1 */
+		case 178: /* CP_INT in IB2 */
+			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		schedule_work(&rdev->hotplug_work);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = r600_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
+	DREG32_SYS(m, rdev, VM_L2_STATUS);
+	return 0;
+}
+
+static struct drm_info_list r600_mc_info_list[] = {
+	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+};
+#endif
+
+int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
+#else
+	return 0;
+#endif
+}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
+	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+	 * This seems to cause problems on some AGP cards. Just use the old
+	 * method for them.
+	 */
+	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
+		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+		u32 tmp;
+
+		WREG32(HDP_DEBUG1, 0);
+		tmp = readl((void __iomem *)ptr);
+	} else
+		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	u32 link_width_cntl, mask;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	radeon_gui_idle(rdev);
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		/* not actually supported */
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	default:
+		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
+		return;
+	}
+
+	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
+	link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
+	link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
+			    R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+
+	WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return 0;
+
+	radeon_gui_idle(rdev);
+
+	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X12:
+		/* not actually supported */
+		return 12;
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+	u16 link_cntl2;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* only RV6xx+ chips are supported */
+	if (rdev->family <= CHIP_R600)
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if (speed_cntl & LC_CURRENT_DATA_RATE) {
+		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+		return;
+	}
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* 55 nm r6xx asics */
+	if ((rdev->family == CHIP_RV670) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RV635)) {
+		/* advertise upconfig capability */
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+					     LC_RECONFIG_ARC_MISSING_ESCAPE);
+			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		} else {
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		}
+	}
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		/* 55 nm r6xx asics */
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			WREG32(MM_CFGREGS_CNTL, 0x8);
+			link_cntl2 = RREG32(0x4088);
+			WREG32(MM_CFGREGS_CNTL, 0);
+			/* not supported yet */
+			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+				return;
+		}
+
+		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
+			training_cntl &= ~LC_POINT_7_PLUS_EN;
+			WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
+		} else {
+			speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+		}
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
+
+/**
+ * r600_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_audio.c b/linux-imx/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 0000000..c92eb86
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return true;
+	}
+	return false;
+}
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+	return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
+}
+
+struct r600_audio r600_audio_status(struct radeon_device *rdev)
+{
+	struct r600_audio status;
+	uint32_t value;
+
+	value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+
+	/* number of channels */
+	status.channels = (value & 0x7) + 1;
+
+	/* bits per sample */
+	switch ((value & 0xF0) >> 4) {
+	case 0x0:
+		status.bits_per_sample = 8;
+		break;
+	case 0x1:
+		status.bits_per_sample = 16;
+		break;
+	case 0x2:
+		status.bits_per_sample = 20;
+		break;
+	case 0x3:
+		status.bits_per_sample = 24;
+		break;
+	case 0x4:
+		status.bits_per_sample = 32;
+		break;
+	default:
+		dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
+			(int)value);
+		status.bits_per_sample = 16;
+	}
+
+	/* current sampling rate in HZ */
+	if (value & 0x4000)
+		status.rate = 44100;
+	else
+		status.rate = 48000;
+	status.rate *= ((value >> 11) & 0x7) + 1;
+	status.rate /= ((value >> 8) & 0x7) + 1;
+
+	value = RREG32(R600_AUDIO_STATUS_BITS);
+
+	/* iec 60958 status bits */
+	status.status_bits = value & 0xff;
+
+	/* iec 60958 category code */
+	status.category_code = (value >> 8) & 0xff;
+
+	return status;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+void r600_audio_update_hdmi(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  audio_work);
+	struct drm_device *dev = rdev->ddev;
+	struct r600_audio audio_status = r600_audio_status(rdev);
+	struct drm_encoder *encoder;
+	bool changed = false;
+
+	if (rdev->audio_status.channels != audio_status.channels ||
+	    rdev->audio_status.rate != audio_status.rate ||
+	    rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
+	    rdev->audio_status.status_bits != audio_status.status_bits ||
+	    rdev->audio_status.category_code != audio_status.category_code) {
+		rdev->audio_status = audio_status;
+		changed = true;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (!radeon_dig_encoder(encoder))
+			continue;
+		if (changed || r600_hdmi_buffer_status_changed(encoder))
+			r600_hdmi_update_audio_settings(encoder);
+	}
+}
+
+/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+	u32 value = 0;
+	DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+	if (ASIC_IS_DCE4(rdev)) {
+		if (enable) {
+			value |= 0x81000000; /* Required to enable audio */
+			value |= 0x0e1000f0; /* fglrx sets that too */
+		}
+		WREG32(EVERGREEN_AUDIO_ENABLE, value);
+	} else {
+		WREG32_P(R600_AUDIO_ENABLE,
+			 enable ? 0x81000000 : 0x0, ~0x81000000);
+	}
+	rdev->audio_enabled = enable;
+}
+
+/*
+ * initialize the audio vars
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+	if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+		return 0;
+
+	r600_audio_engine_enable(rdev, true);
+
+	rdev->audio_status.channels = -1;
+	rdev->audio_status.rate = -1;
+	rdev->audio_status.bits_per_sample = -1;
+	rdev->audio_status.status_bits = 0;
+	rdev->audio_status.category_code = 0;
+
+	return 0;
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+	if (!rdev->audio_enabled)
+		return;
+
+	r600_audio_engine_enable(rdev, false);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_blit.c b/linux-imx/drivers/gpu/drm/radeon/r600_blit.c
new file mode 100644
index 0000000..f651881
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_blit.c
@@ -0,0 +1,843 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include "r600_blit_shaders.h"
+
+#define DI_PT_RECTLIST        0x11
+#define DI_INDEX_SIZE_16_BIT  0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8                 0x1
+#define FMT_5_6_5             0x8
+#define FMT_8_8_8_8           0x1a
+#define COLOR_8               0x1
+#define COLOR_5_6_5           0x8
+#define COLOR_8_8_8_8         0x1a
+
+static void
+set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
+{
+	u32 cb_color_info;
+	int pitch, slice;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	h = ALIGN(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = ((format << 2) | (1 << 27));
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
+		BEGIN_RING(21 + 2);
+		OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+		OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+		OUT_RING(gpu_addr >> 8);
+		OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
+		OUT_RING(2 << 0);
+	} else {
+		BEGIN_RING(21);
+		OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+		OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+		OUT_RING(gpu_addr >> 8);
+	}
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((pitch << 0) | (slice << 10));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(cb_color_info);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	ADVANCE_RING();
+}
+
+static void
+cp_set_surface_sync(drm_radeon_private_t *dev_priv,
+		    u32 sync_type, u32 size, u64 mc_addr)
+{
+	u32 cp_coher_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
+	OUT_RING(sync_type);
+	OUT_RING(cp_coher_size);
+	OUT_RING((mc_addr >> 8));
+	OUT_RING(10); /* poll interval */
+	ADVANCE_RING();
+}
+
+static void
+set_shaders(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u64 gpu_addr;
+	int i;
+	u32 *vs, *ps;
+	uint32_t sq_pgm_resources;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* load shaders */
+	vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
+	ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
+
+	for (i = 0; i < r6xx_vs_size; i++)
+		vs[i] = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		ps[i] = cpu_to_le32(r6xx_ps[i]);
+
+	dev_priv->blit_vb->used = 512;
+
+	gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
+
+	/* setup shader regs */
+	sq_pgm_resources = (1 << 0);
+
+	BEGIN_RING(9 + 12);
+	/* VS */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(gpu_addr >> 8);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(sq_pgm_resources);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+
+	/* PS */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((gpu_addr + 256) >> 8);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(sq_pgm_resources | (1 << 28));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(2);
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
+	OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING(0);
+	ADVANCE_RING();
+
+	cp_set_surface_sync(dev_priv,
+			    R600_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+static void
+set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
+{
+	uint32_t sq_vtx_constant_word2;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
+
+	BEGIN_RING(9);
+	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+	OUT_RING(0x460);
+	OUT_RING(gpu_addr & 0xffffffff);
+	OUT_RING(48 - 1);
+	OUT_RING(sq_vtx_constant_word2);
+	OUT_RING(1 << 0);
+	OUT_RING(0);
+	OUT_RING(0);
+	OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
+	ADVANCE_RING();
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+		cp_set_surface_sync(dev_priv,
+				    R600_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(dev_priv,
+				    R600_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+static void
+set_tex_resource(drm_radeon_private_t *dev_priv,
+		 int format, int w, int h, int pitch, u64 gpu_addr)
+{
+	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = (1 << 0);
+	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
+				  ((w - 1) << 19));
+
+	sq_tex_resource_word1 = (format << 26);
+	sq_tex_resource_word1 |= ((h - 1) << 0);
+
+	sq_tex_resource_word4 = ((1 << 14) |
+				 (0 << 16) |
+				 (1 << 19) |
+				 (2 << 22) |
+				 (3 << 25));
+
+	BEGIN_RING(9);
+	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
+	OUT_RING(0);
+	OUT_RING(sq_tex_resource_word0);
+	OUT_RING(sq_tex_resource_word1);
+	OUT_RING(gpu_addr >> 8);
+	OUT_RING(gpu_addr >> 8);
+	OUT_RING(sq_tex_resource_word4);
+	OUT_RING(0);
+	OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
+	ADVANCE_RING();
+
+}
+
+static void
+set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(12);
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16));
+	OUT_RING((x2 << 0) | (y2 << 16));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
+	OUT_RING((x2 << 0) | (y2 << 16));
+
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
+	OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
+	OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
+	OUT_RING((x2 << 0) | (y2 << 16));
+	ADVANCE_RING();
+}
+
+static void
+draw_auto(drm_radeon_private_t *dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(10);
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(DI_PT_RECTLIST);
+
+	OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+	OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
+	OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
+
+	OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
+	OUT_RING(1);
+
+	OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
+	OUT_RING(3);
+	OUT_RING(DI_SRC_SEL_AUTO_INDEX);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+}
+
+static void
+set_default_state(drm_radeon_private_t *dev_priv)
+{
+	int i;
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+	u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	RING_LOCALS;
+
+	switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
+	case CHIP_R600:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 40;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	default:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV670:
+		num_ps_gprs = 144;
+		num_vs_gprs = 40;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV770:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 256;
+		num_vs_stack_entries = 256;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV710:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 48;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	}
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
+		sq_config = 0;
+	else
+		sq_config = R600_VC_ENABLE;
+
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_ALU_INST_PREFER_VECTOR |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+
+	sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
+				  R600_NUM_VS_GPRS(num_vs_gprs) |
+				  R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+	sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
+				  R600_NUM_ES_GPRS(num_es_gprs));
+	sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
+				   R600_NUM_VS_THREADS(num_vs_threads) |
+				   R600_NUM_GS_THREADS(num_gs_threads) |
+				   R600_NUM_ES_THREADS(num_es_threads));
+	sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+				    R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+	sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+				    R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		BEGIN_RING(r7xx_default_size + 10);
+		for (i = 0; i < r7xx_default_size; i++)
+			OUT_RING(r7xx_default_state[i]);
+	} else {
+		BEGIN_RING(r6xx_default_size + 10);
+		for (i = 0; i < r6xx_default_size; i++)
+			OUT_RING(r6xx_default_state[i]);
+	}
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* SQ config */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
+	OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(sq_config);
+	OUT_RING(sq_gpr_resource_mgmt_1);
+	OUT_RING(sq_gpr_resource_mgmt_2);
+	OUT_RING(sq_thread_resource_mgmt);
+	OUT_RING(sq_stack_resource_mgmt_1);
+	OUT_RING(sq_stack_resource_mgmt_2);
+	ADVANCE_RING();
+}
+
+static int r600_nomm_get_vb(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	dev_priv->blit_vb = radeon_freelist_get(dev);
+	if (!dev_priv->blit_vb) {
+		DRM_ERROR("Unable to allocate vertex buffer for blit\n");
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+static void r600_nomm_put_vb(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->blit_vb->used = 0;
+	radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
+}
+
+static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	return (((char *)dev->agp_buffer_map->handle +
+		 dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
+}
+
+int
+r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int ret;
+	DRM_DEBUG("\n");
+
+	ret = r600_nomm_get_vb(dev);
+	if (ret)
+		return ret;
+
+	dev_priv->blit_vb->file_priv = file_priv;
+
+	set_default_state(dev_priv);
+	set_shaders(dev);
+
+	return 0;
+}
+
+
+void
+r600_done_blit_copy(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* wait for 3D idle clean */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	r600_nomm_put_vb(dev);
+}
+
+void
+r600_blit_copy(struct drm_device *dev,
+	       uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+	       int size_bytes)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int max_bytes;
+	u64 vb_addr;
+	u32 *vb;
+
+	vb = r600_nomm_get_vb_ptr(dev);
+
+	if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
+		max_bytes = 8192;
+
+		while (size_bytes) {
+			int cur_size = size_bytes;
+			int src_x = src_gpu_addr & 255;
+			int dst_x = dst_gpu_addr & 255;
+			int h = 1;
+			src_gpu_addr = src_gpu_addr & ~255;
+			dst_gpu_addr = dst_gpu_addr & ~255;
+
+			if (!src_x && !dst_x) {
+				h = (cur_size / max_bytes);
+				if (h > 8192)
+					h = 8192;
+				if (h == 0)
+					h = 1;
+				else
+					cur_size = max_bytes;
+			} else {
+				if (cur_size > max_bytes)
+					cur_size = max_bytes;
+				if (cur_size > (max_bytes - dst_x))
+					cur_size = (max_bytes - dst_x);
+				if (cur_size > (max_bytes - src_x))
+					cur_size = (max_bytes - src_x);
+			}
+
+			if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+				r600_nomm_put_vb(dev);
+				r600_nomm_get_vb(dev);
+				if (!dev_priv->blit_vb)
+					return;
+				set_shaders(dev);
+				vb = r600_nomm_get_vb_ptr(dev);
+			}
+
+			vb[0] = int2float(dst_x);
+			vb[1] = 0;
+			vb[2] = int2float(src_x);
+			vb[3] = 0;
+
+			vb[4] = int2float(dst_x);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x);
+			vb[7] = int2float(h);
+
+			vb[8] = int2float(dst_x + cur_size);
+			vb[9] = int2float(h);
+			vb[10] = int2float(src_x + cur_size);
+			vb[11] = int2float(h);
+
+			/* src */
+			set_tex_resource(dev_priv, FMT_8,
+					 src_x + cur_size, h, src_x + cur_size,
+					 src_gpu_addr);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+			/* dst */
+			set_render_target(dev_priv, COLOR_8,
+					  dst_x + cur_size, h,
+					  dst_gpu_addr);
+
+			/* scissors */
+			set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
+
+			/* Vertex buffer setup */
+			vb_addr = dev_priv->gart_buffers_offset +
+				dev_priv->blit_vb->offset +
+				dev_priv->blit_vb->used;
+			set_vtx_resource(dev_priv, vb_addr);
+
+			/* draw */
+			draw_auto(dev_priv);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+					    cur_size * h, dst_gpu_addr);
+
+			vb += 12;
+			dev_priv->blit_vb->used += 12 * 4;
+
+			src_gpu_addr += cur_size * h;
+			dst_gpu_addr += cur_size * h;
+			size_bytes -= cur_size * h;
+		}
+	} else {
+		max_bytes = 8192 * 4;
+
+		while (size_bytes) {
+			int cur_size = size_bytes;
+			int src_x = (src_gpu_addr & 255);
+			int dst_x = (dst_gpu_addr & 255);
+			int h = 1;
+			src_gpu_addr = src_gpu_addr & ~255;
+			dst_gpu_addr = dst_gpu_addr & ~255;
+
+			if (!src_x && !dst_x) {
+				h = (cur_size / max_bytes);
+				if (h > 8192)
+					h = 8192;
+				if (h == 0)
+					h = 1;
+				else
+					cur_size = max_bytes;
+			} else {
+				if (cur_size > max_bytes)
+					cur_size = max_bytes;
+				if (cur_size > (max_bytes - dst_x))
+					cur_size = (max_bytes - dst_x);
+				if (cur_size > (max_bytes - src_x))
+					cur_size = (max_bytes - src_x);
+			}
+
+			if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+				r600_nomm_put_vb(dev);
+				r600_nomm_get_vb(dev);
+				if (!dev_priv->blit_vb)
+					return;
+
+				set_shaders(dev);
+				vb = r600_nomm_get_vb_ptr(dev);
+			}
+
+			vb[0] = int2float(dst_x / 4);
+			vb[1] = 0;
+			vb[2] = int2float(src_x / 4);
+			vb[3] = 0;
+
+			vb[4] = int2float(dst_x / 4);
+			vb[5] = int2float(h);
+			vb[6] = int2float(src_x / 4);
+			vb[7] = int2float(h);
+
+			vb[8] = int2float((dst_x + cur_size) / 4);
+			vb[9] = int2float(h);
+			vb[10] = int2float((src_x + cur_size) / 4);
+			vb[11] = int2float(h);
+
+			/* src */
+			set_tex_resource(dev_priv, FMT_8_8_8_8,
+					 (src_x + cur_size) / 4,
+					 h, (src_x + cur_size) / 4,
+					 src_gpu_addr);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
+
+			/* dst */
+			set_render_target(dev_priv, COLOR_8_8_8_8,
+					  (dst_x + cur_size) / 4, h,
+					  dst_gpu_addr);
+
+			/* scissors */
+			set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
+
+			/* Vertex buffer setup */
+			vb_addr = dev_priv->gart_buffers_offset +
+				dev_priv->blit_vb->offset +
+				dev_priv->blit_vb->used;
+			set_vtx_resource(dev_priv, vb_addr);
+
+			/* draw */
+			draw_auto(dev_priv);
+
+			cp_set_surface_sync(dev_priv,
+					    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+					    cur_size * h, dst_gpu_addr);
+
+			vb += 12;
+			dev_priv->blit_vb->used += 12 * 4;
+
+			src_gpu_addr += cur_size * h;
+			dst_gpu_addr += cur_size * h;
+			size_bytes -= cur_size * h;
+		}
+	}
+}
+
+void
+r600_blit_swap(struct drm_device *dev,
+	       uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+	       int sx, int sy, int dx, int dy,
+	       int w, int h, int src_pitch, int dst_pitch, int cpp)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int cb_format, tex_format;
+	int sx2, sy2, dx2, dy2;
+	u64 vb_addr;
+	u32 *vb;
+
+	if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
+
+		r600_nomm_put_vb(dev);
+		r600_nomm_get_vb(dev);
+		if (!dev_priv->blit_vb)
+			return;
+
+		set_shaders(dev);
+	}
+	vb = r600_nomm_get_vb_ptr(dev);
+
+	sx2 = sx + w;
+	sy2 = sy + h;
+	dx2 = dx + w;
+	dy2 = dy + h;
+
+	vb[0] = int2float(dx);
+	vb[1] = int2float(dy);
+	vb[2] = int2float(sx);
+	vb[3] = int2float(sy);
+
+	vb[4] = int2float(dx);
+	vb[5] = int2float(dy2);
+	vb[6] = int2float(sx);
+	vb[7] = int2float(sy2);
+
+	vb[8] = int2float(dx2);
+	vb[9] = int2float(dy2);
+	vb[10] = int2float(sx2);
+	vb[11] = int2float(sy2);
+
+	switch(cpp) {
+	case 4:
+		cb_format = COLOR_8_8_8_8;
+		tex_format = FMT_8_8_8_8;
+		break;
+	case 2:
+		cb_format = COLOR_5_6_5;
+		tex_format = FMT_5_6_5;
+		break;
+	default:
+		cb_format = COLOR_8;
+		tex_format = FMT_8;
+		break;
+	}
+
+	/* src */
+	set_tex_resource(dev_priv, tex_format,
+			 src_pitch / cpp,
+			 sy2, src_pitch / cpp,
+			 src_gpu_addr);
+
+	cp_set_surface_sync(dev_priv,
+			    R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
+
+	/* dst */
+	set_render_target(dev_priv, cb_format,
+			  dst_pitch / cpp, dy2,
+			  dst_gpu_addr);
+
+	/* scissors */
+	set_scissors(dev_priv, dx, dy, dx2, dy2);
+
+	/* Vertex buffer setup */
+	vb_addr = dev_priv->gart_buffers_offset +
+		dev_priv->blit_vb->offset +
+		dev_priv->blit_vb->used;
+	set_vtx_resource(dev_priv, vb_addr);
+
+	/* draw */
+	draw_auto(dev_priv);
+
+	cp_set_surface_sync(dev_priv,
+			    R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
+			    dst_pitch * dy2, dst_gpu_addr);
+
+	dev_priv->blit_vb->used += 12 * 4;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_blit_kms.c b/linux-imx/drivers/gpu/drm/radeon/r600_blit_kms.c
new file mode 100644
index 0000000..9fb5780
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "r600d.h"
+#include "r600_blit_shaders.h"
+#include "radeon_blit_common.h"
+
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS  23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24.  Above that, we round towards zero
+ * as the fractional bits will not fit in a float.  (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+__pure uint32_t int2float(uint32_t x)
+{
+	uint32_t msb, exponent, fraction;
+
+	/* Zero is special */
+	if (!x) return 0;
+
+	/* Get location of the most significant bit */
+	msb = __fls(x);
+
+	/*
+	 * Use a rotate instead of a shift because that works both leftwards
+	 * and rightwards due to the mod(32) behaviour.  This means we don't
+	 * need to check to see if we are above 2^24 or not.
+	 */
+	fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+	exponent = (127 + msb) << I2F_FRAC_BITS;
+
+	return fraction + exponent;
+}
+
+/* emits 21 on rv770+, 23 on r600 */
+static void
+set_render_target(struct radeon_device *rdev, int format,
+		  int w, int h, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cb_color_info;
+	int pitch, slice;
+
+	h = ALIGN(h, 8);
+	if (h < 8)
+		h = 8;
+
+	cb_color_info = CB_FORMAT(format) |
+		CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
+		CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+	pitch = (w / 8) - 1;
+	slice = ((w * h) / 64) - 1;
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+		radeon_ring_write(ring, 2 << 0);
+	}
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (pitch << 0) | (slice << 10));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, cb_color_info);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+}
+
+/* emits 5dw */
+static void
+cp_set_surface_sync(struct radeon_device *rdev,
+		    u32 sync_type, u32 size,
+		    u64 mc_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 cp_coher_size;
+
+	if (size == 0xffffffff)
+		cp_coher_size = 0xffffffff;
+	else
+		cp_coher_size = ((size + 255) >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, sync_type);
+	radeon_ring_write(ring, cp_coher_size);
+	radeon_ring_write(ring, mc_addr >> 8);
+	radeon_ring_write(ring, 10); /* poll interval */
+}
+
+/* emits 21dw + 1 surface sync = 26dw */
+static void
+set_shaders(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u64 gpu_addr;
+	u32 sq_pgm_resources;
+
+	/* setup shader regs */
+	sq_pgm_resources = (1 << 0);
+
+	/* VS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_pgm_resources);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	/* PS */
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, gpu_addr >> 8);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 2);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, 0);
+
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+}
+
+/* emits 9 + 1 sync (5) = 14*/
+static void
+set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_vtx_constant_word2;
+
+	sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+		SQ_VTXC_STRIDE(16);
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |=  SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+#endif
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+	radeon_ring_write(ring, 0x460);
+	radeon_ring_write(ring, gpu_addr & 0xffffffff);
+	radeon_ring_write(ring, 48 - 1);
+	radeon_ring_write(ring, sq_vtx_constant_word2);
+	radeon_ring_write(ring, 1 << 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
+
+	if ((rdev->family == CHIP_RV610) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RS780) ||
+	    (rdev->family == CHIP_RS880) ||
+	    (rdev->family == CHIP_RV710))
+		cp_set_surface_sync(rdev,
+				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
+	else
+		cp_set_surface_sync(rdev,
+				    PACKET3_VC_ACTION_ENA, 48, gpu_addr);
+}
+
+/* emits 9 */
+static void
+set_tex_resource(struct radeon_device *rdev,
+		 int format, int w, int h, int pitch,
+		 u64 gpu_addr, u32 size)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+
+	if (h < 1)
+		h = 1;
+
+	sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
+		S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+	sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
+		S_038000_TEX_WIDTH(w - 1);
+
+	sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
+	sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
+
+	sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
+		S_038010_DST_SEL_X(SQ_SEL_X) |
+		S_038010_DST_SEL_Y(SQ_SEL_Y) |
+		S_038010_DST_SEL_Z(SQ_SEL_Z) |
+		S_038010_DST_SEL_W(SQ_SEL_W);
+
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, sq_tex_resource_word0);
+	radeon_ring_write(ring, sq_tex_resource_word1);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, gpu_addr >> 8);
+	radeon_ring_write(ring, sq_tex_resource_word4);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
+}
+
+/* emits 12 */
+static void
+set_scissors(struct radeon_device *rdev, int x1, int y1,
+	     int x2, int y2)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+}
+
+/* emits 10 */
+static void
+draw_auto(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, DI_PT_RECTLIST);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+	radeon_ring_write(ring, 1);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+	radeon_ring_write(ring, 3);
+	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+
+}
+
+/* emits 14 */
+static void
+set_default_state(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+	u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+	int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+	int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
+	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
+	u64 gpu_addr;
+	int dwords;
+
+	switch (rdev->family) {
+	case CHIP_R600:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 40;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV610:
+	case CHIP_RV620:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	default:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV670:
+		num_ps_gprs = 144;
+		num_vs_gprs = 40;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 136;
+		num_vs_threads = 48;
+		num_gs_threads = 4;
+		num_es_threads = 4;
+		num_ps_stack_entries = 40;
+		num_vs_stack_entries = 40;
+		num_gs_stack_entries = 32;
+		num_es_stack_entries = 16;
+		break;
+	case CHIP_RV770:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 256;
+		num_vs_stack_entries = 256;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV730:
+	case CHIP_RV740:
+		num_ps_gprs = 84;
+		num_vs_gprs = 36;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 188;
+		num_vs_threads = 60;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	case CHIP_RV710:
+		num_ps_gprs = 192;
+		num_vs_gprs = 56;
+		num_temp_gprs = 4;
+		num_gs_gprs = 0;
+		num_es_gprs = 0;
+		num_ps_threads = 144;
+		num_vs_threads = 48;
+		num_gs_threads = 0;
+		num_es_threads = 0;
+		num_ps_stack_entries = 128;
+		num_vs_stack_entries = 128;
+		num_gs_stack_entries = 0;
+		num_es_stack_entries = 0;
+		break;
+	}
+
+	if ((rdev->family == CHIP_RV610) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RS780) ||
+	    (rdev->family == CHIP_RS880) ||
+	    (rdev->family == CHIP_RV710))
+		sq_config = 0;
+	else
+		sq_config = VC_ENABLE;
+
+	sq_config |= (DX9_CONSTS |
+		      ALU_INST_PREFER_VECTOR |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+
+	sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
+				  NUM_VS_GPRS(num_vs_gprs) |
+				  NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
+	sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
+				  NUM_ES_GPRS(num_es_gprs));
+	sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
+				   NUM_VS_THREADS(num_vs_threads) |
+				   NUM_GS_THREADS(num_gs_threads) |
+				   NUM_ES_THREADS(num_es_threads));
+	sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
+				    NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
+	sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
+				    NUM_ES_STACK_ENTRIES(num_es_stack_entries));
+
+	/* emit an IB pointing at default state */
+	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(ring, dwords);
+
+	/* SQ config */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+	radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+	radeon_ring_write(ring, sq_config);
+	radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+	radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+	radeon_ring_write(ring, sq_thread_resource_mgmt);
+	radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+	radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+}
+
+int r600_blit_init(struct radeon_device *rdev)
+{
+	u32 obj_size;
+	int i, r, dwords;
+	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
+
+	rdev->r600_blit.primitives.set_render_target = set_render_target;
+	rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
+	rdev->r600_blit.primitives.set_shaders = set_shaders;
+	rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
+	rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
+	rdev->r600_blit.primitives.set_scissors = set_scissors;
+	rdev->r600_blit.primitives.draw_auto = draw_auto;
+	rdev->r600_blit.primitives.set_default_state = set_default_state;
+
+	rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+	rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
+	rdev->r600_blit.ring_size_common += 5; /* done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
+
+	rdev->r600_blit.ring_size_per_loop = 76;
+	/* set_render_target emits 2 extra dwords on rv6xx */
+	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+		rdev->r600_blit.ring_size_per_loop += 2;
+
+	rdev->r600_blit.max_dim = 8192;
+
+	rdev->r600_blit.state_offset = 0;
+
+	if (rdev->family >= CHIP_RV770)
+		rdev->r600_blit.state_len = r7xx_default_size;
+	else
+		rdev->r600_blit.state_len = r6xx_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	rdev->r600_blit.vs_offset = obj_size;
+	obj_size += r6xx_vs_size * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	rdev->r600_blit.ps_offset = obj_size;
+	obj_size += r6xx_ps_size * 4;
+	obj_size = ALIGN(obj_size, 256);
+
+	/* pin copy shader into vram if not already initialized */
+	if (rdev->r600_blit.shader_obj == NULL) {
+		r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->r600_blit.shader_obj);
+		if (r) {
+			DRM_ERROR("r600 failed to allocate shader\n");
+			return r;
+		}
+
+		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+				  &rdev->r600_blit.shader_gpu_addr);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+			return r;
+		}
+	}
+
+	DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
+		  obj_size,
+		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+	if (r) {
+		DRM_ERROR("failed to map blit object %d\n", r);
+		return r;
+	}
+	if (rdev->family >= CHIP_RV770)
+		memcpy_toio(ptr + rdev->r600_blit.state_offset,
+			    r7xx_default_state, rdev->r600_blit.state_len * 4);
+	else
+		memcpy_toio(ptr + rdev->r600_blit.state_offset,
+			    r6xx_default_state, rdev->r600_blit.state_len * 4);
+	if (num_packet2s)
+		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+			    packet2s, num_packet2s * 4);
+	for (i = 0; i < r6xx_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+	return 0;
+}
+
+void r600_blit_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	if (rdev->r600_blit.shader_obj == NULL)
+		return;
+	/* If we can't reserve the bo, unref should be enough to destroy
+	 * it when it becomes idle.
+	 */
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (!r) {
+		radeon_bo_unpin(rdev->r600_blit.shader_obj);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+	}
+	radeon_bo_unref(&rdev->r600_blit.shader_obj);
+}
+
+static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
+				      int *width, int *height, int max_dim)
+{
+	unsigned max_pages;
+	unsigned pages = num_gpu_pages;
+	int w, h;
+
+	if (num_gpu_pages == 0) {
+		/* not supposed to be called with no pages, but just in case */
+		h = 0;
+		w = 0;
+		pages = 0;
+		WARN_ON(1);
+	} else {
+		int rect_order = 2;
+		h = RECT_UNIT_H;
+		while (num_gpu_pages / rect_order) {
+			h *= 2;
+			rect_order *= 4;
+			if (h >= max_dim) {
+				h = max_dim;
+				break;
+			}
+		}
+		max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
+		if (pages > max_pages)
+			pages = max_pages;
+		w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
+		w = (w / RECT_UNIT_W) * RECT_UNIT_W;
+		pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
+		BUG_ON(pages == 0);
+	}
+
+
+	DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
+
+	/* return width and height only of the caller wants it */
+	if (height)
+		*height = h;
+	if (width)
+		*width = w;
+
+	return pages;
+}
+
+
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+			   struct radeon_fence **fence, struct radeon_sa_bo **vb,
+			   struct radeon_semaphore **sem)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+	int ring_size;
+	int num_loops = 0;
+	int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
+
+	/* num loops */
+	while (num_gpu_pages) {
+		num_gpu_pages -=
+			r600_blit_create_rect(num_gpu_pages, NULL, NULL,
+					      rdev->r600_blit.max_dim);
+		num_loops++;
+	}
+
+	/* 48 bytes for vertex per loop */
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+			     (num_loops*48)+256, 256, true);
+	if (r) {
+		return r;
+	}
+
+	r = radeon_semaphore_create(rdev, sem);
+	if (r) {
+		radeon_sa_bo_free(rdev, vb, NULL);
+		return r;
+	}
+
+	/* calculate number of loops correctly */
+	ring_size = num_loops * dwords_per_loop;
+	ring_size += rdev->r600_blit.ring_size_common;
+	r = radeon_ring_lock(rdev, ring, ring_size);
+	if (r) {
+		radeon_sa_bo_free(rdev, vb, NULL);
+		radeon_semaphore_free(rdev, sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
+		radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
+					    RADEON_RING_TYPE_GFX_INDEX);
+		radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
+	} else {
+		radeon_semaphore_free(rdev, sem, NULL);
+	}
+
+	rdev->r600_blit.primitives.set_default_state(rdev);
+	rdev->r600_blit.primitives.set_shaders(rdev);
+	return 0;
+}
+
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+			 struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_sa_bo_free(rdev, &vb, *fence);
+	radeon_semaphore_free(rdev, &sem, *fence);
+}
+
+void r600_kms_blit_copy(struct radeon_device *rdev,
+			u64 src_gpu_addr, u64 dst_gpu_addr,
+			unsigned num_gpu_pages,
+			struct radeon_sa_bo *vb)
+{
+	u64 vb_gpu_addr;
+	u32 *vb_cpu_addr;
+
+	DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+		  src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+	vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+	vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
+
+	while (num_gpu_pages) {
+		int w, h;
+		unsigned size_in_bytes;
+		unsigned pages_per_loop =
+			r600_blit_create_rect(num_gpu_pages, &w, &h,
+					      rdev->r600_blit.max_dim);
+
+		size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
+		DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
+
+		vb_cpu_addr[0] = 0;
+		vb_cpu_addr[1] = 0;
+		vb_cpu_addr[2] = 0;
+		vb_cpu_addr[3] = 0;
+
+		vb_cpu_addr[4] = 0;
+		vb_cpu_addr[5] = int2float(h);
+		vb_cpu_addr[6] = 0;
+		vb_cpu_addr[7] = int2float(h);
+
+		vb_cpu_addr[8] = int2float(w);
+		vb_cpu_addr[9] = int2float(h);
+		vb_cpu_addr[10] = int2float(w);
+		vb_cpu_addr[11] = int2float(h);
+
+		rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
+							    w, h, w, src_gpu_addr, size_in_bytes);
+		rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
+							     w, h, dst_gpu_addr);
+		rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
+		rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
+		rdev->r600_blit.primitives.draw_auto(rdev);
+		rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
+				    PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
+				    size_in_bytes, dst_gpu_addr);
+
+		vb_cpu_addr += 12;
+		vb_gpu_addr += 4*12;
+		src_gpu_addr += size_in_bytes;
+		dst_gpu_addr += size_in_bytes;
+		num_gpu_pages -= pages_per_loop;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.c b/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.c
new file mode 100644
index 0000000..34c8b23
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup.  Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables.  The regsiter state and shaders
+ * were hand generated to support blitting functionality.  See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 r6xx_default_state[] =
+{
+	0xc0002400, /* START_3D_CMDBUF */
+	0x00000000,
+
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000003, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x82000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x01020204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000040, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc00f6900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* CB_FOG_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c,
+	0x01000000, /* CB_CLRCMP_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0046900,
+	0x00000048,
+	0x3f800000, /* CB_CLEAR_RED */
+	0x00000000,
+	0x3f800000,
+	0x3f800000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00004010, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000000, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+const u32 r7xx_default_state[] =
+{
+	0xc0012800, /* CONTEXT_CONTROL */
+	0x80000000,
+	0x80000000,
+
+	0xc0016800,
+	0x00000010,
+	0x00008000, /* WAIT_UNTIL */
+
+	0xc0016800,
+	0x00000542,
+	0x07000002, /* TA_CNTL_AUX */
+
+	0xc0016800,
+	0x000005c5,
+	0x00000000, /* VC_ENHANCE */
+
+	0xc0016800,
+	0x00000363,
+	0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
+	0xc0016800,
+	0x0000060c,
+	0x00000000, /* DB_DEBUG */
+
+	0xc0016800,
+	0x0000060e,
+	0x00420204, /* DB_WATERMARKS */
+
+	0xc0026f00,
+	0x00000000,
+	0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+	0x00000000, /* SQ_VTX_START_INST_LOC */
+
+	0xc0096900,
+	0x0000022a,
+	0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000004,
+	0x00000000, /* DB_DEPTH_INFO */
+
+	0xc0026900,
+	0x0000000a,
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0016900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+
+	0xc0026900,
+	0x00000343,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_RENDER_OVERRIDE */
+
+	0xc0016900,
+	0x00000351,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0096900,
+	0x00000100,
+	0x00000800, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+	0x00000000, /* SX_ALPHA_TEST_CONTROL */
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0036900,
+	0x0000010c,
+	0x00000000, /* DB_STENCILREFMASK */
+	0x00000000, /* DB_STENCILREFMASK_BF */
+	0x00000000, /* SX_ALPHA_REF */
+
+	0xc0046900,
+	0x0000030c, /* CB_CLRCMP_CNTL */
+	0x01000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00a6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+	0xc0406900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+	0x00000000,
+	0x3f800000,
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MPASS_PS_CNTL */
+	0x00514000, /* PA_SC_MODE_CNTL */
+
+	0xc0096900,
+	0x00000300,
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x0000002d, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000,
+	0x3f800000,
+	0x3f800000,
+	0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
+	0x00000000,
+
+	0xc0016900,
+	0x00000312,
+	0xffffffff, /* PA_SC_AA_MASK */
+
+	0xc0066900,
+	0x0000037e,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+	0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+	0xc0046900,
+	0x000001b6,
+	0x00000000, /* SPI_INPUT_Z */
+	0x00000000, /* SPI_FOG_CNTL */
+	0x00000000, /* SPI_FOG_FUNC_SCALE */
+	0x00000000, /* SPI_FOG_FUNC_BIAS */
+
+	0xc0016900,
+	0x00000225,
+	0x00000000, /* SQ_PGM_START_FS */
+
+	0xc0016900,
+	0x00000229,
+	0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+	0xc0016900,
+	0x00000237,
+	0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+	0x00000000, /* VGT_HOS_REUSE_DEPTH */
+	0x00000000, /* VGT_GROUP_PRIM_TYPE */
+	0x00000000, /* VGT_GROUP_FIRST_DECR */
+	0x00000000, /* VGT_GROUP_DECR */
+	0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+	0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+	0xc0036900,
+	0x000002ac,
+	0x00000000, /* VGT_STRMOUT_EN */
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000, /* VGT_VTX_CNT_EN */
+
+	0xc0016900,
+	0x000000d4,
+	0x00000000, /* SX_MISC */
+
+	0xc0016900,
+	0x000002c8,
+	0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+	0xc0076900,
+	0x00000202,
+	0x00cc0000, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CNTL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000244, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+
+	0xc0026900,
+	0x0000008e,
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0016900,
+	0x000001e8,
+	0x00000001, /* CB_SHADER_CONTROL */
+
+	0xc0016900,
+	0x00000185,
+	0x00000000, /* SPI_VS_OUT_ID_0 */
+
+	0xc0016900,
+	0x00000191,
+	0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+	0xc0056900,
+	0x000001b1,
+	0x00000000, /* SPI_VS_OUT_CONFIG */
+	0x00000001, /* SPI_THREAD_GROUPING */
+	0x00000001, /* SPI_PS_IN_CONTROL_0 */
+	0x00000000, /* SPI_PS_IN_CONTROL_1 */
+	0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+	0xc0036e00, /* SET_SAMPLER */
+	0x00000000,
+	0x00000012,
+	0x00000000,
+	0x00000000,
+};
+
+/* same for r6xx/r7xx */
+const u32 r6xx_vs[] =
+{
+	0x00000004,
+	0x81000000,
+	0x0000203c,
+	0x94000b08,
+	0x00004000,
+	0x14200b1a,
+	0x00000000,
+	0x00000000,
+	0x3c000000,
+	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
+	0x00080000,
+#endif
+	0x00000000,
+};
+
+const u32 r6xx_ps[] =
+{
+	0x00000002,
+	0x80800000,
+	0x00000000,
+	0x94200688,
+	0x00000010,
+	0x000d1000,
+	0xb0800000,
+	0x00000000,
+};
+
+const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
+const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
+const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
+const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.h b/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.h
new file mode 100644
index 0000000..2f3ce7a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef R600_BLIT_SHADERS_H
+#define R600_BLIT_SHADERS_H
+
+extern const u32 r6xx_ps[];
+extern const u32 r6xx_vs[];
+extern const u32 r7xx_default_state[];
+extern const u32 r6xx_default_state[];
+
+
+extern const u32 r6xx_ps_size, r6xx_vs_size;
+extern const u32 r6xx_default_size, r7xx_default_size;
+
+__pure uint32_t int2float(uint32_t x);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_cp.c b/linux-imx/drivers/gpu/drm/radeon/r600_cp.c
new file mode 100644
index 0000000..1c51c08
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_cp.c
@@ -0,0 +1,2660 @@
+/*
+ * Copyright 2008-2009 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Dave Airlie <airlied@redhat.com>
+ *     Alex Deucher <alexander.deucher@amd.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/R600_pfp.bin");
+MODULE_FIRMWARE("radeon/R600_me.bin");
+MODULE_FIRMWARE("radeon/RV610_pfp.bin");
+MODULE_FIRMWARE("radeon/RV610_me.bin");
+MODULE_FIRMWARE("radeon/RV630_pfp.bin");
+MODULE_FIRMWARE("radeon/RV630_me.bin");
+MODULE_FIRMWARE("radeon/RV620_pfp.bin");
+MODULE_FIRMWARE("radeon/RV620_me.bin");
+MODULE_FIRMWARE("radeon/RV635_pfp.bin");
+MODULE_FIRMWARE("radeon/RV635_me.bin");
+MODULE_FIRMWARE("radeon/RV670_pfp.bin");
+MODULE_FIRMWARE("radeon/RV670_me.bin");
+MODULE_FIRMWARE("radeon/RS780_pfp.bin");
+MODULE_FIRMWARE("radeon/RS780_me.bin");
+MODULE_FIRMWARE("radeon/RV770_pfp.bin");
+MODULE_FIRMWARE("radeon/RV770_me.bin");
+MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+MODULE_FIRMWARE("radeon/RV730_me.bin");
+MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+MODULE_FIRMWARE("radeon/RV710_me.bin");
+
+
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+			unsigned family, u32 *ib, int *l);
+void r600_cs_legacy_init(void);
+
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+# define ATI_PCIGART_PAGE_MASK		(~(ATI_PCIGART_PAGE_SIZE-1))
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+/* MAX values used for gfx init */
+#define R6XX_MAX_SH_GPRS           256
+#define R6XX_MAX_TEMP_GPRS         16
+#define R6XX_MAX_SH_THREADS        256
+#define R6XX_MAX_SH_STACK_ENTRIES  4096
+#define R6XX_MAX_BACKENDS          8
+#define R6XX_MAX_BACKENDS_MASK     0xff
+#define R6XX_MAX_SIMDS             8
+#define R6XX_MAX_SIMDS_MASK        0xff
+#define R6XX_MAX_PIPES             8
+#define R6XX_MAX_PIPES_MASK        0xff
+
+#define R7XX_MAX_SH_GPRS           256
+#define R7XX_MAX_TEMP_GPRS         16
+#define R7XX_MAX_SH_THREADS        256
+#define R7XX_MAX_SH_STACK_ENTRIES  4096
+#define R7XX_MAX_BACKENDS          8
+#define R7XX_MAX_BACKENDS_MASK     0xff
+#define R7XX_MAX_SIMDS             16
+#define R7XX_MAX_SIMDS_MASK        0xffff
+#define R7XX_MAX_PIPES             8
+#define R7XX_MAX_PIPES_MASK        0xff
+
+static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries)
+{
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots;
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+			slots = (RADEON_READ(R600_GRBM_STATUS)
+				 & R700_CMDFIFO_AVAIL_MASK);
+		else
+			slots = (RADEON_READ(R600_GRBM_STATUS)
+				 & R600_CMDFIFO_AVAIL_MASK);
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(R600_GRBM_STATUS),
+		 RADEON_READ(R600_GRBM_STATUS2));
+
+	return -EBUSY;
+}
+
+static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv)
+{
+	int i, ret;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		ret = r600_do_wait_for_fifo(dev_priv, 8);
+	else
+		ret = r600_do_wait_for_fifo(dev_priv, 16);
+	if (ret)
+		return ret;
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE))
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(R600_GRBM_STATUS),
+		 RADEON_READ(R600_GRBM_STATUS2));
+
+	return -EBUSY;
+}
+
+void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+	int max_pages;
+	int pages;
+	int i;
+
+	if (!entry)
+		return;
+
+	if (gart_info->bus_addr) {
+		max_pages = (gart_info->table_size / sizeof(u64));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+}
+
+/* R600 has page table setup */
+int r600_page_table_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+	struct drm_local_map *map = &gart_info->mapping;
+	struct drm_sg_mem *entry = dev->sg;
+	int ret = 0;
+	int i, j;
+	int pages;
+	u64 page_base;
+	dma_addr_t entry_addr;
+	int max_ati_pages, max_real_pages, gart_idx;
+
+	/* okay page table is available - lets rock */
+	max_ati_pages = (gart_info->table_size / sizeof(u64));
+	max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
+
+	pages = (entry->pages <= max_real_pages) ?
+		entry->pages : max_real_pages;
+
+	memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64));
+
+	gart_idx = 0;
+	for (i = 0; i < pages; i++) {
+		entry->busaddr[i] = pci_map_page(dev->pdev,
+						 entry->pagelist[i], 0,
+						 PAGE_SIZE,
+						 PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			r600_page_table_cleanup(dev, gart_info);
+			goto done;
+		}
+		entry_addr = entry->busaddr[i];
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK;
+			page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+			page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+
+			DRM_WRITE64(map, gart_idx * sizeof(u64), page_base);
+
+			gart_idx++;
+
+			if ((i % 128) == 0)
+				DRM_DEBUG("page entry %d: 0x%016llx\n",
+				    i, (unsigned long long)page_base);
+			entry_addr += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 1;
+done:
+	return ret;
+}
+
+static void r600_vm_flush_gart_range(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 resp, countdown = 1000;
+	RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2);
+
+	do {
+		resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE);
+		countdown--;
+		DRM_UDELAY(1);
+	} while (((resp & 0xf0) == 0) && countdown);
+}
+
+static void r600_vm_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	/* initialise the VM to use the page table we constructed up there */
+	u32 vm_c0, i;
+	u32 mc_rd_a;
+	u32 vm_l2_cntl, vm_l2_cntl3;
+	/* okay set up the PCIE aperture type thingo */
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+	/* setup MC RD a */
+	mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS |
+		R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) |
+		R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY;
+
+	RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING);
+	RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/);
+
+	RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a);
+	RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a);
+
+	RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE);
+	RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a);
+
+	vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+	vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7);
+	RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+	RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+	vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) |
+		       R600_VM_L2_CNTL3_BANK_SELECT_1(1) |
+		       R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2));
+	RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+	vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+	RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+	vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+	r600_vm_flush_gart_range(dev);
+}
+
+static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	size_t pfp_req_size, me_req_size;
+	char fw_name[30];
+	int err;
+
+	pdev = platform_device_register_simple("r600_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "r600_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:  chip_name = "R600";  break;
+	case CHIP_RV610: chip_name = "RV610"; break;
+	case CHIP_RV630: chip_name = "RV630"; break;
+	case CHIP_RV620: chip_name = "RV620"; break;
+	case CHIP_RV635: chip_name = "RV635"; break;
+	case CHIP_RV670: chip_name = "RV670"; break;
+	case CHIP_RS780:
+	case CHIP_RS880: chip_name = "RS780"; break;
+	case CHIP_RV770: chip_name = "RV770"; break;
+	case CHIP_RV730:
+	case CHIP_RV740: chip_name = "RV730"; break;
+	case CHIP_RV710: chip_name = "RV710"; break;
+	default:         BUG();
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+		me_req_size = R700_PM4_UCODE_SIZE * 4;
+	} else {
+		pfp_req_size = PFP_UCODE_SIZE * 4;
+		me_req_size = PM4_UCODE_SIZE * 12;
+	}
+
+	DRM_INFO("Loading %s CP Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (dev_priv->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (dev_priv->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "r600_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(dev_priv->pfp_fw);
+		dev_priv->pfp_fw = NULL;
+		release_firmware(dev_priv->me_fw);
+		dev_priv->me_fw = NULL;
+	}
+	return err;
+}
+
+static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+		return;
+
+	r600_do_cp_stop(dev_priv);
+
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_NO_UPDATE |
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)dev_priv->me_fw->data;
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
+		RADEON_WRITE(R600_CP_ME_RAM_DATA,
+			     be32_to_cpup(fw_data++));
+
+	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < PFP_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_PFP_UCODE_DATA,
+			     be32_to_cpup(fw_data++));
+
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r700_vm_init(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	/* initialise the VM to use the page table we constructed up there */
+	u32 vm_c0, i;
+	u32 mc_vm_md_l1;
+	u32 vm_l2_cntl, vm_l2_cntl3;
+	/* okay set up the PCIE aperture type thingo */
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+	RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+
+	mc_vm_md_l1 = R700_ENABLE_L1_TLB |
+	    R700_ENABLE_L1_FRAGMENT_PROCESSING |
+	    R700_SYSTEM_ACCESS_MODE_IN_SYS |
+	    R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+	    R700_EFFECTIVE_L1_TLB_SIZE(5) |
+	    R700_EFFECTIVE_L1_QUEUE_SIZE(5);
+
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1);
+	RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1);
+
+	vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W;
+	vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7);
+	RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl);
+
+	RADEON_WRITE(R600_VM_L2_CNTL2, 0);
+	vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2);
+	RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3);
+
+	vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT;
+
+	RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0);
+
+	vm_c0 &= ~R600_VM_ENABLE_CONTEXT;
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0);
+
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12);
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12);
+	RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12);
+
+	r600_vm_flush_gart_range(dev);
+}
+
+static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw)
+		return;
+
+	r600_do_cp_stop(dev_priv);
+
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_NO_UPDATE |
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)dev_priv->me_fw->data;
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+		RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+
+	RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
+	RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0);
+
+}
+
+static void r600_test_writeback(drm_radeon_private_t *dev_priv)
+{
+	u32 tmp;
+
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
+	/* Writeback doesn't seem to work everywhere, test it here and possibly
+	 * enable it if it appears to work
+	 */
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+
+	RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef);
+
+	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+		u32 val;
+
+		val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1));
+		if (val == 0xdeadbeef)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (tmp < dev_priv->usec_timeout) {
+		dev_priv->writeback_works = 1;
+		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+	} else {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback test failed\n");
+	}
+	if (radeon_no_wb == 1) {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback forced off\n");
+	}
+
+	if (!dev_priv->writeback_works) {
+		/* Disable writeback to avoid unnecessary bus master transfer */
+		RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+			     R600_BUF_SWAP_32BIT |
+#endif
+			     RADEON_READ(R600_CP_RB_CNTL) |
+			     R600_RB_NO_UPDATE);
+		RADEON_WRITE(R600_SCRATCH_UMSK, 0);
+	}
+}
+
+int r600_do_engine_reset(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 cp_ptr, cp_me_cntl, cp_rb_cntl;
+
+	DRM_INFO("Resetting GPU\n");
+
+	cp_ptr = RADEON_READ(R600_CP_RB_WPTR);
+	cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL);
+	RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT);
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	DRM_UDELAY(50);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+
+	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+	cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_RPTR_WR_ENA);
+
+	RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
+	RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
+	RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl);
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl);
+
+	/* Reset the CP ring */
+	r600_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	radeon_freelist_reset(dev);
+
+	return 0;
+
+}
+
+static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+					     u32 num_backends,
+					     u32 backend_disable_mask)
+{
+	u32 backend_map = 0;
+	u32 enabled_backends_mask;
+	u32 enabled_backends_count;
+	u32 cur_pipe;
+	u32 swizzle_pipe[R6XX_MAX_PIPES];
+	u32 cur_backend;
+	u32 i;
+
+	if (num_tile_pipes > R6XX_MAX_PIPES)
+		num_tile_pipes = R6XX_MAX_PIPES;
+	if (num_tile_pipes < 1)
+		num_tile_pipes = 1;
+	if (num_backends > R6XX_MAX_BACKENDS)
+		num_backends = R6XX_MAX_BACKENDS;
+	if (num_backends < 1)
+		num_backends = 1;
+
+	enabled_backends_mask = 0;
+	enabled_backends_count = 0;
+	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
+		if (((backend_disable_mask >> i) & 1) == 0) {
+			enabled_backends_mask |= (1 << i);
+			++enabled_backends_count;
+		}
+		if (enabled_backends_count == num_backends)
+			break;
+	}
+
+	if (enabled_backends_count == 0) {
+		enabled_backends_mask = 1;
+		enabled_backends_count = 1;
+	}
+
+	if (enabled_backends_count != num_backends)
+		num_backends = enabled_backends_count;
+
+	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
+	switch (num_tile_pipes) {
+	case 1:
+		swizzle_pipe[0] = 0;
+		break;
+	case 2:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		break;
+	case 3:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		break;
+	case 4:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		swizzle_pipe[3] = 3;
+		break;
+	case 5:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		swizzle_pipe[2] = 2;
+		swizzle_pipe[3] = 3;
+		swizzle_pipe[4] = 4;
+		break;
+	case 6:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 5;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		break;
+	case 7:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 6;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		swizzle_pipe[6] = 5;
+		break;
+	case 8:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 2;
+		swizzle_pipe[2] = 4;
+		swizzle_pipe[3] = 6;
+		swizzle_pipe[4] = 1;
+		swizzle_pipe[5] = 3;
+		swizzle_pipe[6] = 5;
+		swizzle_pipe[7] = 7;
+		break;
+	}
+
+	cur_backend = 0;
+	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+		while (((1 << cur_backend) & enabled_backends_mask) == 0)
+			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+
+		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+	}
+
+	return backend_map;
+}
+
+static int r600_count_pipe_bits(uint32_t val)
+{
+	return hweight32(val);
+}
+
+static void r600_gfx_init(struct drm_device *dev,
+			  drm_radeon_private_t *dev_priv)
+{
+	int i, j, num_qd_pipes;
+	u32 sx_debug_1;
+	u32 tc_cntl;
+	u32 arb_pop;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_gpr_resource_mgmt_1 = 0;
+	u32 sq_gpr_resource_mgmt_2 = 0;
+	u32 sq_thread_resource_mgmt = 0;
+	u32 sq_stack_resource_mgmt_1 = 0;
+	u32 sq_stack_resource_mgmt_2 = 0;
+	u32 hdp_host_path_cntl;
+	u32 backend_map;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 ramcfg;
+
+	/* setup chip specs */
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 8;
+		dev_priv->r600_max_simds = 4;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 2;
+		dev_priv->r600_max_simds = 3;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 128;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 4;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		dev_priv->r600_max_pipes = 1;
+		dev_priv->r600_max_tile_pipes = 1;
+		dev_priv->r600_max_simds = 2;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 128;
+		dev_priv->r600_max_hw_contexts = 4;
+		dev_priv->r600_max_gs_threads = 4;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 1;
+		break;
+	case CHIP_RV670:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 4;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 192;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 128;
+		dev_priv->r600_sq_num_cf_insts = 2;
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		RADEON_WRITE((0x2c14 + j), 0x00000000);
+		RADEON_WRITE((0x2c18 + j), 0x00000000);
+		RADEON_WRITE((0x2c1c + j), 0x00000000);
+		RADEON_WRITE((0x2c20 + j), 0x00000000);
+		RADEON_WRITE((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	ramcfg = RADEON_READ(R600_RAMCFG);
+
+	switch (dev_priv->r600_max_tile_pipes) {
+	case 1:
+		gb_tiling_config |= R600_PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config |= R600_PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config |= R600_PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config |= R600_PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+
+	gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK);
+
+	gb_tiling_config |= R600_GROUP_SIZE(0);
+
+	if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) {
+		gb_tiling_config |= R600_ROW_TILING(3);
+		gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+		gb_tiling_config |=
+			R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK));
+	}
+
+	gb_tiling_config |= R600_BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	cc_rb_backend_disable |=
+		R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+	cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
+
+	backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+							(R6XX_MAX_BACKENDS -
+							 r600_count_pipe_bits((cc_rb_backend_disable &
+									       R6XX_MAX_BACKENDS_MASK) >> 16)),
+							(cc_rb_backend_disable >> 16));
+	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+	RADEON_WRITE(R600_GB_TILING_CONFIG,      gb_tiling_config);
+	RADEON_WRITE(R600_DCP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	RADEON_WRITE(R600_HDP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	if (gb_tiling_config & 0xc0) {
+		dev_priv->r600_group_size = 512;
+	} else {
+		dev_priv->r600_group_size = 256;
+	}
+	dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+	if (gb_tiling_config & 0x30) {
+		dev_priv->r600_nbanks = 8;
+	} else {
+		dev_priv->r600_nbanks = 4;
+	}
+
+	RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
+	RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+	num_qd_pipes =
+		R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+	RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+	RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+						R600_ROQ_IB2_START(0x2b)));
+
+	RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) |
+					      R600_ROQ_END(0x40)));
+
+	RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
+					R600_SYNC_GRADIENT |
+					R600_SYNC_WALKER |
+					R600_SYNC_ALIGNER));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670)
+		RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021);
+
+	sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1);
+	sx_debug_1 |= R600_SMX_EVENT_RELEASE;
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600))
+		sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS;
+	RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1);
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+		RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE);
+	else
+		RADEON_WRITE(R600_DB_DEBUG, 0);
+
+	RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) |
+					  R600_DEPTH_FLUSH(16) |
+					  R600_DEPTH_PENDING_FREE(4) |
+					  R600_DEPTH_CACHELINE_FREE(16)));
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+	RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0);
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0));
+
+	sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+		sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) |
+				    R600_FETCH_FIFO_HIWATER(0xa) |
+				    R600_DONE_FIFO_HIWATER(0xe0) |
+				    R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) {
+		sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff);
+		sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4);
+	}
+	RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RADEON_READ(R600_SQ_CONFIG);
+	sq_config &= ~(R600_PS_PRIO(3) |
+		       R600_VS_PRIO(3) |
+		       R600_GS_PRIO(3) |
+		       R600_ES_PRIO(3));
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_VC_ENABLE |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) |
+					  R600_NUM_VS_GPRS(124) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(4));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) |
+					  R600_NUM_ES_GPRS(0));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) |
+					   R600_NUM_VS_THREADS(48) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(4));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) |
+					    R600_NUM_VS_STACK_ENTRIES(128));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) |
+					    R600_NUM_ES_STACK_ENTRIES(0));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) {
+		/* no vertex cache */
+		sq_config &= ~R600_VC_ENABLE;
+
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+					  R600_NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+					    R600_NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+					    R600_NUM_ES_STACK_ENTRIES(16));
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) |
+					  R600_NUM_ES_GPRS(18));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) |
+					    R600_NUM_VS_STACK_ENTRIES(40));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) |
+					    R600_NUM_ES_STACK_ENTRIES(16));
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) {
+		sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) |
+					  R600_NUM_VS_GPRS(44) |
+					  R600_NUM_CLAUSE_TEMP_GPRS(2));
+		sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) |
+					  R600_NUM_ES_GPRS(17));
+		sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) |
+					   R600_NUM_VS_THREADS(78) |
+					   R600_NUM_GS_THREADS(4) |
+					   R600_NUM_ES_THREADS(31));
+		sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) |
+					    R600_NUM_VS_STACK_ENTRIES(64));
+		sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) |
+					    R600_NUM_ES_STACK_ENTRIES(64));
+	}
+
+	RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
+	RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880))
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY));
+	else
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC));
+
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) |
+						    R600_S0_Y(0x4) |
+						    R600_S1_X(0x4) |
+						    R600_S1_Y(0xc)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) |
+						    R600_S0_Y(0xe) |
+						    R600_S1_X(0x2) |
+						    R600_S1_Y(0x2) |
+						    R600_S2_X(0xa) |
+						    R600_S2_Y(0x6) |
+						    R600_S3_X(0x6) |
+						    R600_S3_Y(0xa)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) |
+							R600_S0_Y(0xb) |
+							R600_S1_X(0x4) |
+							R600_S1_Y(0xc) |
+							R600_S2_X(0x1) |
+							R600_S2_Y(0x6) |
+							R600_S3_X(0xa) |
+							R600_S3_Y(0xe)));
+	RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) |
+							R600_S4_Y(0x1) |
+							R600_S5_X(0x0) |
+							R600_S5_Y(0x0) |
+							R600_S6_X(0xb) |
+							R600_S6_Y(0x4) |
+							R600_S7_X(0x7) |
+							R600_S7_Y(0x8)));
+
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_R600:
+	case CHIP_RV630:
+	case CHIP_RV635:
+		gs_prim_buffer_depth = 0;
+		break;
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		gs_prim_buffer_depth = 32;
+		break;
+	case CHIP_RV670:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+	RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+	RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+	RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+	RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+	RADEON_WRITE(R600_SX_MISC, 0);
+	RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+	RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+	RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+	RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+	RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV610:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV620:
+		tc_cntl = R600_TC_L2_SIZE(8);
+		break;
+	case CHIP_RV630:
+	case CHIP_RV635:
+		tc_cntl = R600_TC_L2_SIZE(4);
+		break;
+	case CHIP_R600:
+		tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT;
+		break;
+	default:
+		tc_cntl = R600_TC_L2_SIZE(0);
+		break;
+	}
+
+	RADEON_WRITE(R600_TC_CNTL, tc_cntl);
+
+	hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+	RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	arb_pop = RADEON_READ(R600_ARB_POP);
+	arb_pop |= R600_ENABLE_TC128;
+	RADEON_WRITE(R600_ARB_POP, arb_pop);
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+	RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+					  R600_NUM_CLIP_SEQ(3)));
+	RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095));
+
+}
+
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+					     u32 num_tile_pipes,
+					     u32 num_backends,
+					     u32 backend_disable_mask)
+{
+	u32 backend_map = 0;
+	u32 enabled_backends_mask;
+	u32 enabled_backends_count;
+	u32 cur_pipe;
+	u32 swizzle_pipe[R7XX_MAX_PIPES];
+	u32 cur_backend;
+	u32 i;
+	bool force_no_swizzle;
+
+	if (num_tile_pipes > R7XX_MAX_PIPES)
+		num_tile_pipes = R7XX_MAX_PIPES;
+	if (num_tile_pipes < 1)
+		num_tile_pipes = 1;
+	if (num_backends > R7XX_MAX_BACKENDS)
+		num_backends = R7XX_MAX_BACKENDS;
+	if (num_backends < 1)
+		num_backends = 1;
+
+	enabled_backends_mask = 0;
+	enabled_backends_count = 0;
+	for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
+		if (((backend_disable_mask >> i) & 1) == 0) {
+			enabled_backends_mask |= (1 << i);
+			++enabled_backends_count;
+		}
+		if (enabled_backends_count == num_backends)
+			break;
+	}
+
+	if (enabled_backends_count == 0) {
+		enabled_backends_mask = 1;
+		enabled_backends_count = 1;
+	}
+
+	if (enabled_backends_count != num_backends)
+		num_backends = enabled_backends_count;
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+		force_no_swizzle = false;
+		break;
+	case CHIP_RV710:
+	case CHIP_RV740:
+	default:
+		force_no_swizzle = true;
+		break;
+	}
+
+	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
+	switch (num_tile_pipes) {
+	case 1:
+		swizzle_pipe[0] = 0;
+		break;
+	case 2:
+		swizzle_pipe[0] = 0;
+		swizzle_pipe[1] = 1;
+		break;
+	case 3:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 1;
+		}
+		break;
+	case 4:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 3;
+			swizzle_pipe[3] = 1;
+		}
+		break;
+	case 5:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 1;
+			swizzle_pipe[4] = 3;
+		}
+		break;
+	case 6:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 5;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+		}
+		break;
+	case 7:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+			swizzle_pipe[6] = 6;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 6;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+			swizzle_pipe[6] = 5;
+		}
+		break;
+	case 8:
+		if (force_no_swizzle) {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 1;
+			swizzle_pipe[2] = 2;
+			swizzle_pipe[3] = 3;
+			swizzle_pipe[4] = 4;
+			swizzle_pipe[5] = 5;
+			swizzle_pipe[6] = 6;
+			swizzle_pipe[7] = 7;
+		} else {
+			swizzle_pipe[0] = 0;
+			swizzle_pipe[1] = 2;
+			swizzle_pipe[2] = 4;
+			swizzle_pipe[3] = 6;
+			swizzle_pipe[4] = 3;
+			swizzle_pipe[5] = 1;
+			swizzle_pipe[6] = 7;
+			swizzle_pipe[7] = 5;
+		}
+		break;
+	}
+
+	cur_backend = 0;
+	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+		while (((1 << cur_backend) & enabled_backends_mask) == 0)
+			cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+
+		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
+
+		cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
+	}
+
+	return backend_map;
+}
+
+static void r700_gfx_init(struct drm_device *dev,
+			  drm_radeon_private_t *dev_priv)
+{
+	int i, j, num_qd_pipes;
+	u32 ta_aux_cntl;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 db_debug3;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_thread_resource_mgmt;
+	u32 hdp_host_path_cntl;
+	u32 sq_dyn_gpr_size_simd_ab_0;
+	u32 backend_map;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable;
+	u32 cc_gc_shader_pipe_config;
+	u32 mc_arb_ramcfg;
+	u32 db_debug4;
+
+	/* setup chip specs */
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 8;
+		dev_priv->r600_max_simds = 10;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 512;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 112;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0xF9;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV730:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 8;
+		dev_priv->r600_max_backends = 2;
+		dev_priv->r600_max_gprs = 128;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 256;
+		dev_priv->r600_sx_max_export_pos_size = 32;
+		dev_priv->r600_sx_max_export_smx_size = 224;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0xf9;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		if (dev_priv->r600_sx_max_export_pos_size > 16) {
+			dev_priv->r600_sx_max_export_pos_size -= 16;
+			dev_priv->r600_sx_max_export_smx_size += 16;
+		}
+		break;
+	case CHIP_RV710:
+		dev_priv->r600_max_pipes = 2;
+		dev_priv->r600_max_tile_pipes = 2;
+		dev_priv->r600_max_simds = 2;
+		dev_priv->r600_max_backends = 1;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 192;
+		dev_priv->r600_max_stack_entries = 256;
+		dev_priv->r600_max_hw_contexts = 4;
+		dev_priv->r600_max_gs_threads = 8 * 2;
+		dev_priv->r600_sx_max_export_size = 128;
+		dev_priv->r600_sx_max_export_pos_size = 16;
+		dev_priv->r600_sx_max_export_smx_size = 112;
+		dev_priv->r600_sq_num_cf_insts = 1;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0x40;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV740:
+		dev_priv->r600_max_pipes = 4;
+		dev_priv->r600_max_tile_pipes = 4;
+		dev_priv->r600_max_simds = 8;
+		dev_priv->r600_max_backends = 4;
+		dev_priv->r600_max_gprs = 256;
+		dev_priv->r600_max_threads = 248;
+		dev_priv->r600_max_stack_entries = 512;
+		dev_priv->r600_max_hw_contexts = 8;
+		dev_priv->r600_max_gs_threads = 16 * 2;
+		dev_priv->r600_sx_max_export_size = 256;
+		dev_priv->r600_sx_max_export_pos_size = 32;
+		dev_priv->r600_sx_max_export_smx_size = 224;
+		dev_priv->r600_sq_num_cf_insts = 2;
+
+		dev_priv->r700_sx_num_of_sets = 7;
+		dev_priv->r700_sc_prim_fifo_size = 0x100;
+		dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
+		dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
+
+		if (dev_priv->r600_sx_max_export_pos_size > 16) {
+			dev_priv->r600_sx_max_export_pos_size -= 16;
+			dev_priv->r600_sx_max_export_smx_size += 16;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		RADEON_WRITE((0x2c14 + j), 0x00000000);
+		RADEON_WRITE((0x2c18 + j), 0x00000000);
+		RADEON_WRITE((0x2c1c + j), 0x00000000);
+		RADEON_WRITE((0x2c20 + j), 0x00000000);
+		RADEON_WRITE((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG);
+
+	switch (dev_priv->r600_max_tile_pipes) {
+	case 1:
+		gb_tiling_config |= R600_PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config |= R600_PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config |= R600_PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config |= R600_PIPE_TILING(3);
+		break;
+	default:
+		break;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
+		gb_tiling_config |= R600_BANK_TILING(1);
+	else
+		gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK);
+
+	gb_tiling_config |= R600_GROUP_SIZE(0);
+
+	if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) {
+		gb_tiling_config |= R600_ROW_TILING(3);
+		gb_tiling_config |= R600_SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+		gb_tiling_config |=
+			R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK));
+	}
+
+	gb_tiling_config |= R600_BANK_SWAPS(1);
+
+	cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	cc_rb_backend_disable |=
+		R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+
+	cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
+	cc_gc_shader_pipe_config |=
+		R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+		backend_map = 0x28;
+	else
+		backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+								dev_priv->r600_max_tile_pipes,
+								(R7XX_MAX_BACKENDS -
+								 r600_count_pipe_bits((cc_rb_backend_disable &
+										       R7XX_MAX_BACKENDS_MASK) >> 16)),
+								(cc_rb_backend_disable >> 16));
+	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+
+	RADEON_WRITE(R600_GB_TILING_CONFIG,      gb_tiling_config);
+	RADEON_WRITE(R600_DCP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	RADEON_WRITE(R600_HDP_TILING_CONFIG,    (gb_tiling_config & 0xffff));
+	if (gb_tiling_config & 0xc0) {
+		dev_priv->r600_group_size = 512;
+	} else {
+		dev_priv->r600_group_size = 256;
+	}
+	dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+	if (gb_tiling_config & 0x30) {
+		dev_priv->r600_nbanks = 8;
+	} else {
+		dev_priv->r600_nbanks = 4;
+	}
+
+	RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
+	RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
+	RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+	RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+	RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
+	RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
+
+	num_qd_pipes =
+		R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
+	RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
+	RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) |
+						R600_ROQ_IB2_START(0x2b)));
+
+	RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
+
+	ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+	RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
+
+	sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
+	sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
+	RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff);
+	smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
+	RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+		RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+						  R700_GS_FLUSH_CTL(4) |
+						  R700_ACK_FLUSH_CTL(3) |
+						  R700_SYNC_FLUSH_CTL));
+
+	db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+	db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV740:
+		db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+		break;
+	case CHIP_RV710:
+	case CHIP_RV730:
+	default:
+		db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+		break;
+	}
+	RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
+		db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
+		db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
+		RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
+	}
+
+	RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) |
+						   R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) |
+						   R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1)));
+
+	RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) |
+						 R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) |
+						 R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize)));
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+	RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1);
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0));
+
+	RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4));
+
+	RADEON_WRITE(R600_CP_PERFMON_CNTL, 0);
+
+	sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) |
+			    R600_DONE_FIFO_HIWATER(0xe0) |
+			    R600_ALU_UPDATE_FIFO_HIWATER(0x8));
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+		sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+		break;
+	case CHIP_RV740:
+	default:
+		sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
+		break;
+	}
+	RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RADEON_READ(R600_SQ_CONFIG);
+	sq_config &= ~(R600_PS_PRIO(3) |
+		       R600_VS_PRIO(3) |
+		       R600_GS_PRIO(3) |
+		       R600_ES_PRIO(3));
+	sq_config |= (R600_DX9_CONSTS |
+		      R600_VC_ENABLE |
+		      R600_EXPORT_SRC_C |
+		      R600_PS_PRIO(0) |
+		      R600_VS_PRIO(1) |
+		      R600_GS_PRIO(2) |
+		      R600_ES_PRIO(3));
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+		/* no vertex cache */
+		sq_config &= ~R600_VC_ENABLE;
+
+	RADEON_WRITE(R600_SQ_CONFIG, sq_config);
+
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1,  (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+						    R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) |
+						    R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2)));
+
+	RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2,  (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) |
+						    R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64)));
+
+	sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) |
+				   R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) |
+				   R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8));
+	if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads)
+		sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads);
+	else
+		sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8);
+	RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+						     R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+	RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) |
+						     R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4)));
+
+	sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) |
+				     R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64));
+
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+	RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+	RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) |
+						     R700_FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710)
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) |
+							   R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+	else
+		RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) |
+							   R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO)));
+
+	switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		gs_prim_buffer_depth = 384;
+		break;
+	case CHIP_RV710:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	RADEON_WRITE(R600_VGT_ES_PER_GS, 128);
+	RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es);
+	RADEON_WRITE(R600_VGT_GS_PER_VS, 2);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0);
+	RADEON_WRITE(R600_VGT_STRMOUT_EN, 0);
+	RADEON_WRITE(R600_SX_MISC, 0);
+	RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0);
+	RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa);
+	RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0);
+	RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff);
+	RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0);
+	RADEON_WRITE(R600_SPI_INPUT_Z, 0);
+	RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2));
+	RADEON_WRITE(R600_CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	RADEON_WRITE(R600_CB_COLOR0_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR1_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR2_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR3_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR4_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR5_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR6_BASE, 0);
+	RADEON_WRITE(R600_CB_COLOR7_BASE, 0);
+
+	RADEON_WRITE(R700_TCP_CNTL, 0);
+
+	hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL);
+	RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0);
+
+	RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA |
+					  R600_NUM_CLIP_SEQ(3)));
+
+}
+
+static void r600_cp_init_ring_buffer(struct drm_device *dev,
+				       drm_radeon_private_t *dev_priv,
+				       struct drm_file *file_priv)
+{
+	struct drm_radeon_master_private *master_priv;
+	u32 ring_start;
+	u64 rptr_addr;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+		r700_gfx_init(dev, dev_priv);
+	else
+		r600_gfx_init(dev, dev_priv);
+
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+	RADEON_READ(R600_GRBM_SOFT_RESET);
+	mdelay(15);
+	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+
+
+	/* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_BUF_SWAP_32BIT |
+		     R600_RB_NO_UPDATE |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     RADEON_RB_NO_UPDATE |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+	RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
+
+	/* Set the write pointer delay */
+	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_BUF_SWAP_32BIT |
+		     R600_RB_NO_UPDATE |
+		     R600_RB_RPTR_WR_ENA |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     R600_RB_NO_UPDATE |
+		     R600_RB_RPTR_WR_ENA |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+	/* Initialize the ring buffer's read and write pointers */
+	RADEON_WRITE(R600_CP_RB_RPTR_WR, 0);
+	RADEON_WRITE(R600_CP_RB_WPTR, 0);
+	SET_RING_HEAD(dev_priv, 0);
+	dev_priv->ring.tail = 0;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		rptr_addr = dev_priv->ring_rptr->offset
+			- dev->agp->base +
+			dev_priv->gart_vm_start;
+	} else
+#endif
+	{
+		rptr_addr = dev_priv->ring_rptr->offset
+			- ((unsigned long) dev->sg->virtual)
+			+ dev_priv->gart_vm_start;
+	}
+	RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
+	RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
+
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     RADEON_BUF_SWAP_32BIT |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(R600_CP_RB_CNTL,
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* XXX */
+		radeon_write_agp_base(dev_priv, dev->agp->base);
+
+		/* XXX */
+		radeon_write_agp_location(dev_priv,
+			     (((dev_priv->gart_vm_start - 1 +
+				dev_priv->gart_size) & 0xffff0000) |
+			      (dev_priv->gart_vm_start >> 16)));
+
+		ring_start = (dev_priv->cp_ring->offset
+			      - dev->agp->base
+			      + dev_priv->gart_vm_start);
+	} else
+#endif
+		ring_start = (dev_priv->cp_ring->offset
+			      - (unsigned long)dev->sg->virtual
+			      + dev_priv->gart_vm_start);
+
+	RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8);
+
+	RADEON_WRITE(R600_CP_ME_CNTL, 0xff);
+
+	RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28));
+
+	/* Initialize the scratch register pointer.  This will cause
+	 * the scratch register values to be written out to memory
+	 * whenever they are updated.
+	 *
+	 * We simply put this behind the ring read pointer, this works
+	 * with PCI GART as well as (whatever kind of) AGP GART
+	 */
+	{
+		u64 scratch_addr;
+
+		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
+		scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
+		scratch_addr += R600_SCRATCH_REG_OFFSET;
+		scratch_addr >>= 8;
+		scratch_addr &= 0xffffffff;
+
+		RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr);
+	}
+
+	RADEON_WRITE(R600_SCRATCH_UMSK, 0x7);
+
+	/* Turn on bus mastering */
+	radeon_enable_bm(dev_priv);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
+	RADEON_WRITE(R600_LAST_FRAME_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
+	RADEON_WRITE(R600_LAST_DISPATCH_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
+	RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
+
+	/* reset sarea copies of these */
+	master_priv = file_priv->master->driver_priv;
+	if (master_priv->sarea_priv) {
+		master_priv->sarea_priv->last_frame = 0;
+		master_priv->sarea_priv->last_dispatch = 0;
+		master_priv->sarea_priv->last_clear = 0;
+	}
+
+	r600_do_wait_for_idle(dev_priv);
+
+}
+
+int r600_do_cleanup_cp(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		if (dev_priv->cp_ring != NULL) {
+			drm_core_ioremapfree(dev_priv->cp_ring, dev);
+			dev_priv->cp_ring = NULL;
+		}
+		if (dev_priv->ring_rptr != NULL) {
+			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			dev_priv->ring_rptr = NULL;
+		}
+		if (dev->agp_buffer_map != NULL) {
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+			dev->agp_buffer_map = NULL;
+		}
+	} else
+#endif
+	{
+
+		if (dev_priv->gart_info.bus_addr)
+			r600_page_table_cleanup(dev, &dev_priv->gart_info);
+
+		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
+			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr = NULL;
+		}
+	}
+	/* only clear to the start of flags */
+	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+	return 0;
+}
+
+int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+		    struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+	DRM_DEBUG("\n");
+
+	mutex_init(&dev_priv->cs_mutex);
+	r600_cs_legacy_init();
+	/* if we require new memory map but we don't have it fail */
+	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+		DRM_DEBUG("Forcing AGP card to PCI mode\n");
+		dev_priv->flags &= ~RADEON_IS_AGP;
+		/* The writeback test succeeds, but when writeback is enabled,
+		 * the ring buffer read ptr update fails after first 128 bytes.
+		 */
+		radeon_no_wb = 1;
+	} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+		 && !init->is_pci) {
+		DRM_DEBUG("Restoring AGP flag\n");
+		dev_priv->flags |= RADEON_IS_AGP;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+	dev_priv->do_boxes = 0;
+	dev_priv->cp_mode = init->cp_mode;
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	dev_priv->ring_offset = init->ring_offset;
+	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+	dev_priv->buffers_offset = init->buffers_offset;
+	dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+	master_priv->sarea = drm_getsarea(dev);
+	if (!master_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cp_ring) {
+		DRM_ERROR("could not find cp ring region!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		r600_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->gart_textures_offset) {
+		dev_priv->gart_textures =
+		    drm_core_findmap(dev, init->gart_textures_offset);
+		if (!dev_priv->gart_textures) {
+			DRM_ERROR("could not find GART texture region!\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	}
+
+#if __OS_HAS_AGP
+	/* XXX */
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+		drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+		drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+		if (!dev_priv->cp_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("could not find ioremap agp regions!\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	} else
+#endif
+	{
+		dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
+		dev_priv->ring_rptr->handle =
+			(void *)(unsigned long)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+			(void *)(unsigned long)dev->agp_buffer_map->offset;
+
+		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+			  dev_priv->cp_ring->handle);
+		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+			  dev_priv->ring_rptr->handle);
+		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+			  dev->agp_buffer_map->handle);
+	}
+
+	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24;
+	dev_priv->fb_size =
+		(((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000)
+		- dev_priv->fb_location;
+
+	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+					((dev_priv->front_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+				       ((dev_priv->back_offset
+					 + dev_priv->fb_location) >> 10));
+
+	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+					((dev_priv->depth_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->gart_size = init->gart_size;
+
+	/* New let's set the memory map ... */
+	if (dev_priv->new_memmap) {
+		u32 base = 0;
+
+		DRM_INFO("Setting GART location based on new memory map\n");
+
+		/* If using AGP, try to locate the AGP aperture at the same
+		 * location in the card and on the bus, though we have to
+		 * align it down.
+		 */
+#if __OS_HAS_AGP
+		/* XXX */
+		if (dev_priv->flags & RADEON_IS_AGP) {
+			base = dev->agp->base;
+			/* Check if valid */
+			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+					 dev->agp->base);
+				base = 0;
+			}
+		}
+#endif
+		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+		if (base == 0) {
+			base = dev_priv->fb_location + dev_priv->fb_size;
+			if (base < dev_priv->fb_location ||
+			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+				base = dev_priv->fb_location
+					- dev_priv->gart_size;
+		}
+		dev_priv->gart_vm_start = base & 0xffc00000u;
+		if (dev_priv->gart_vm_start != base)
+			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+				 base, dev_priv->gart_vm_start);
+	}
+
+#if __OS_HAS_AGP
+	/* XXX */
+	if (dev_priv->flags & RADEON_IS_AGP)
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - dev->agp->base
+						 + dev_priv->gart_vm_start);
+	else
+#endif
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - (unsigned long)dev->sg->virtual
+						 + dev_priv->gart_vm_start);
+
+	DRM_DEBUG("fb 0x%08x size %d\n",
+		  (unsigned int) dev_priv->fb_location,
+		  (unsigned int) dev_priv->fb_size);
+	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+	DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n",
+		  (unsigned int) dev_priv->gart_vm_start);
+	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n",
+		  dev_priv->gart_buffers_offset);
+
+	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+	dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+
+	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+	dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* XXX turn off pcie gart */
+	} else
+#endif
+	{
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		/* if we have an offset set from userspace */
+		if (!dev_priv->pcigart_offset_set) {
+			DRM_ERROR("Need gart offset from userspace\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset);
+
+		dev_priv->gart_info.bus_addr =
+			dev_priv->pcigart_offset + dev_priv->fb_location;
+		dev_priv->gart_info.mapping.offset =
+			dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+		dev_priv->gart_info.mapping.size =
+			dev_priv->gart_info.table_size;
+
+		drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+		if (!dev_priv->gart_info.mapping.handle) {
+			DRM_ERROR("ioremap failed.\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		dev_priv->gart_info.addr =
+			dev_priv->gart_info.mapping.handle;
+
+		DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+			  dev_priv->gart_info.addr,
+			  dev_priv->pcigart_offset);
+
+		if (!r600_page_table_init(dev)) {
+			DRM_ERROR("Failed to init GART table\n");
+			r600_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+			r700_vm_init(dev);
+		else
+			r600_vm_init(dev);
+	}
+
+	if (!dev_priv->me_fw || !dev_priv->pfp_fw) {
+		int err = r600_cp_init_microcode(dev_priv);
+		if (err) {
+			DRM_ERROR("Failed to load firmware!\n");
+			r600_do_cleanup_cp(dev);
+			return err;
+		}
+	}
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770))
+		r700_cp_load_microcode(dev_priv);
+	else
+		r600_cp_load_microcode(dev_priv);
+
+	r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->last_buf = 0;
+
+	r600_do_engine_reset(dev);
+	r600_test_writeback(dev_priv);
+
+	return 0;
+}
+
+int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) {
+		r700_vm_init(dev);
+		r700_cp_load_microcode(dev_priv);
+	} else {
+		r600_vm_init(dev);
+		r600_cp_load_microcode(dev_priv);
+	}
+	r600_cp_init_ring_buffer(dev, dev_priv, file_priv);
+	r600_do_engine_reset(dev);
+
+	return 0;
+}
+
+/* Wait for the CP to go idle.
+ */
+int r600_do_cp_idle(drm_radeon_private_t *dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+	OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
+	OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
+	/* wait for 3D idle clean */
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
+	OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
+	OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return r600_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+void r600_do_cp_start(drm_radeon_private_t *dev_priv)
+{
+	u32 cp_me;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(7);
+	OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5));
+	OUT_RING(0x00000001);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770))
+		OUT_RING(0x00000003);
+	else
+		OUT_RING(0x00000000);
+	OUT_RING((dev_priv->r600_max_hw_contexts - 1));
+	OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1));
+	OUT_RING(0x00000000);
+	OUT_RING(0x00000000);
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	/* set the mux and reset the halt bit */
+	cp_me = 0xff;
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+	dev_priv->cp_running = 1;
+
+}
+
+void r600_do_cp_reset(drm_radeon_private_t *dev_priv)
+{
+	u32 cur_read_ptr;
+	DRM_DEBUG("\n");
+
+	cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR);
+	RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+}
+
+void r600_do_cp_stop(drm_radeon_private_t *dev_priv)
+{
+	uint32_t cp_me;
+
+	DRM_DEBUG("\n");
+
+	cp_me = 0xff | R600_CP_ME_HALT;
+
+	RADEON_WRITE(R600_CP_ME_CNTL, cp_me);
+
+	dev_priv->cp_running = 0;
+}
+
+int r600_cp_dispatch_indirect(struct drm_device *dev,
+			      struct drm_buf *buf, int start, int end)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	if (start != end) {
+		unsigned long offset = (dev_priv->gart_buffers_offset
+					+ buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		DRM_DEBUG("dwords:%d\n", dwords);
+		DRM_DEBUG("offset 0x%lx\n", offset);
+
+
+		/* Indirect buffer data must be a multiple of 16 dwords.
+		 * pad the data with a Type-2 CP packet.
+		 */
+		while (dwords & 0xf) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = RADEON_CP_PACKET2;
+		}
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2));
+		OUT_RING((offset & 0xfffffffc));
+		OUT_RING((upper_32_bits(offset) & 0xff));
+		OUT_RING(dwords);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_master *master = file_priv->master;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i, cpp, src_pitch, dst_pitch;
+	uint64_t src, dst;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
+		cpp = 4;
+	else
+		cpp = 2;
+
+	if (sarea_priv->pfCurrentPage == 0) {
+		src_pitch = dev_priv->back_pitch;
+		dst_pitch = dev_priv->front_pitch;
+		src = dev_priv->back_offset + dev_priv->fb_location;
+		dst = dev_priv->front_offset + dev_priv->fb_location;
+	} else {
+		src_pitch = dev_priv->front_pitch;
+		dst_pitch = dev_priv->back_pitch;
+		src = dev_priv->front_offset + dev_priv->fb_location;
+		dst = dev_priv->back_offset + dev_priv->fb_location;
+	}
+
+	if (r600_prepare_blit_copy(dev, file_priv)) {
+		DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+		return;
+	}
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+		r600_blit_swap(dev,
+			       src, dst,
+			       x, y, x, y, w, h,
+			       src_pitch, dst_pitch, cpp);
+	}
+	r600_done_blit_copy(dev);
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	sarea_priv->last_frame++;
+
+	BEGIN_RING(3);
+	R600_FRAME_AGE(sarea_priv->last_frame);
+	ADVANCE_RING();
+}
+
+int r600_cp_dispatch_texture(struct drm_device *dev,
+			     struct drm_file *file_priv,
+			     drm_radeon_texture_t *tex,
+			     drm_radeon_tex_image_t *image)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	u32 *buffer;
+	const u8 __user *data;
+	int size, pass_size;
+	u64 src_offset, dst_offset;
+
+	if (!radeon_check_offset(dev_priv, tex->offset)) {
+		DRM_ERROR("Invalid destination offset\n");
+		return -EINVAL;
+	}
+
+	/* this might fail for zero-sized uploads - are those illegal? */
+	if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
+		DRM_ERROR("Invalid final destination offset\n");
+		return -EINVAL;
+	}
+
+	size = tex->height * tex->pitch;
+
+	if (size == 0)
+		return 0;
+
+	dst_offset = tex->offset;
+
+	if (r600_prepare_blit_copy(dev, file_priv)) {
+		DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
+		return -EAGAIN;
+	}
+	do {
+		data = (const u8 __user *)image->data;
+		pass_size = size;
+
+		buf = radeon_freelist_get(dev);
+		if (!buf) {
+			DRM_DEBUG("EAGAIN\n");
+			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+				return -EFAULT;
+			return -EAGAIN;
+		}
+
+		if (pass_size > buf->total)
+			pass_size = buf->total;
+
+		/* Dispatch the indirect buffer.
+		 */
+		buffer =
+		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+		if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
+			DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
+			return -EFAULT;
+		}
+
+		buf->file_priv = file_priv;
+		buf->used = pass_size;
+		src_offset = dev_priv->gart_buffers_offset + buf->offset;
+
+		r600_blit_copy(dev, src_offset, dst_offset, pass_size);
+
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+		/* Update the input parameters for next time */
+		image->data = (const u8 __user *)image->data + pass_size;
+		dst_offset += pass_size;
+		size -= pass_size;
+	} while (size > 0);
+	r600_done_blit_copy(dev);
+
+	return 0;
+}
+
+/*
+ * Legacy cs ioctl
+ */
+static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
+{
+	/* FIXME: check if wrap affect last reported wrap & sequence */
+	radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
+	if (!radeon->cs_id_scnt) {
+		/* increment wrap counter */
+		radeon->cs_id_wcnt += 0x01000000;
+		/* valid sequence counter start at 1 */
+		radeon->cs_id_scnt = 1;
+	}
+	return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
+}
+
+static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
+{
+	RING_LOCALS;
+
+	*id = radeon_cs_id_get(dev_priv);
+
+	/* SCRATCH 2 */
+	BEGIN_RING(3);
+	R600_CLEAR_AGE(*id);
+	ADVANCE_RING();
+	COMMIT_RING();
+}
+
+static int r600_ib_get(struct drm_device *dev,
+			struct drm_file *fpriv,
+			struct drm_buf **buffer)
+{
+	struct drm_buf *buf;
+
+	*buffer = NULL;
+	buf = radeon_freelist_get(dev);
+	if (!buf) {
+		return -EBUSY;
+	}
+	buf->file_priv = fpriv;
+	*buffer = buf;
+	return 0;
+}
+
+static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
+			struct drm_file *fpriv, int l, int r)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (buf) {
+		if (!r)
+			r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
+		radeon_cp_discard_buffer(dev, fpriv->master, buf);
+		COMMIT_RING();
+	}
+}
+
+int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
+{
+	struct drm_radeon_private *dev_priv = dev->dev_private;
+	struct drm_radeon_cs *cs = data;
+	struct drm_buf *buf;
+	unsigned family;
+	int l, r = 0;
+	u32 *ib, cs_id = 0;
+
+	if (dev_priv == NULL) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+	family = dev_priv->flags & RADEON_FAMILY_MASK;
+	if (family < CHIP_R600) {
+		DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
+		return -EINVAL;
+	}
+	mutex_lock(&dev_priv->cs_mutex);
+	/* get ib */
+	r = r600_ib_get(dev, fpriv, &buf);
+	if (r) {
+		DRM_ERROR("ib_get failed\n");
+		goto out;
+	}
+	ib = dev->agp_buffer_map->handle + buf->offset;
+	/* now parse command stream */
+	r = r600_cs_legacy(dev, data,  fpriv, family, ib, &l);
+	if (r) {
+		goto out;
+	}
+
+out:
+	r600_ib_free(dev, buf, fpriv, l, r);
+	/* emit cs id sequence */
+	r600_cs_id_emit(dev_priv, &cs_id);
+	cs->cs_id = cs_id;
+	mutex_unlock(&dev_priv->cs_mutex);
+	return r;
+}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+	struct drm_radeon_private *dev_priv = dev->dev_private;
+
+	*npipes = dev_priv->r600_npipes;
+	*nbanks = dev_priv->r600_nbanks;
+	*group_size = dev_priv->r600_group_size;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_cs.c b/linux-imx/drivers/gpu/drm/radeon/r600_cs.c
new file mode 100644
index 0000000..745e66e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_cs.c
@@ -0,0 +1,2624 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/kernel.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "r600d.h"
+#include "r600_reg_safe.h"
+
+static int r600_nomm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
+
+struct r600_cs_track {
+	/* configuration we miror so that we use same code btw kms/ums */
+	u32			group_size;
+	u32			nbanks;
+	u32			npipes;
+	/* value we track */
+	u32			sq_config;
+	u32			log_nsamples;
+	u32			nsamples;
+	u32			cb_color_base_last[8];
+	struct radeon_bo	*cb_color_bo[8];
+	u64			cb_color_bo_mc[8];
+	u64			cb_color_bo_offset[8];
+	struct radeon_bo	*cb_color_frag_bo[8];
+	u64			cb_color_frag_offset[8];
+	struct radeon_bo	*cb_color_tile_bo[8];
+	u64			cb_color_tile_offset[8];
+	u32			cb_color_mask[8];
+	u32			cb_color_info[8];
+	u32			cb_color_view[8];
+	u32			cb_color_size_idx[8]; /* unused */
+	u32			cb_target_mask;
+	u32			cb_shader_mask;  /* unused */
+	bool			is_resolve;
+	u32			cb_color_size[8];
+	u32			vgt_strmout_en;
+	u32			vgt_strmout_buffer_en;
+	struct radeon_bo	*vgt_strmout_bo[4];
+	u64			vgt_strmout_bo_mc[4]; /* unused */
+	u32			vgt_strmout_bo_offset[4];
+	u32			vgt_strmout_size[4];
+	u32			db_depth_control;
+	u32			db_depth_info;
+	u32			db_depth_size_idx;
+	u32			db_depth_view;
+	u32			db_depth_size;
+	u32			db_offset;
+	struct radeon_bo	*db_bo;
+	u64			db_bo_mc;
+	bool			sx_misc_kill_all_prims;
+	bool			cb_dirty;
+	bool			db_dirty;
+	bool			streamout_dirty;
+	struct radeon_bo	*htile_bo;
+	u64			htile_offset;
+	u32			htile_surface;
+};
+
+#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
+#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
+#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
+
+struct gpu_formats {
+	unsigned blockwidth;
+	unsigned blockheight;
+	unsigned blocksize;
+	unsigned valid_color;
+	enum radeon_family min_family;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+	/* 8 bit */
+	FMT_8_BIT(V_038004_COLOR_8, 1),
+	FMT_8_BIT(V_038004_COLOR_4_4, 1),
+	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+	FMT_8_BIT(V_038004_FMT_1, 0),
+
+	/* 16-bit */
+	FMT_16_BIT(V_038004_COLOR_16, 1),
+	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+	FMT_16_BIT(V_038004_COLOR_8_8, 1),
+	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+	/* 24-bit */
+	FMT_24_BIT(V_038004_FMT_8_8_8),
+
+	/* 32-bit */
+	FMT_32_BIT(V_038004_COLOR_32, 1),
+	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16, 1),
+	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24, 1),
+	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8, 1),
+	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+	/* 48-bit */
+	FMT_48_BIT(V_038004_FMT_16_16_16),
+	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+	/* 64-bit */
+	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32, 1),
+	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+	FMT_96_BIT(V_038004_FMT_32_32_32),
+	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+	/* 128-bit */
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+	/* block compressed formats */
+	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
+	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
+
+	/* The other Evergreen formats */
+	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
+};
+
+bool r600_fmt_is_valid_color(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (color_formats_table[format].valid_color)
+		return true;
+
+	return false;
+}
+
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return false;
+
+	if (family < color_formats_table[format].min_family)
+		return false;
+
+	if (color_formats_table[format].blockwidth > 0)
+		return true;
+
+	return false;
+}
+
+int r600_fmt_get_blocksize(u32 format)
+{
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	return color_formats_table[format].blocksize;
+}
+
+int r600_fmt_get_nblocksx(u32 format, u32 w)
+{
+	unsigned bw;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bw = color_formats_table[format].blockwidth;
+	if (bw == 0)
+		return 0;
+
+	return (w + bw - 1) / bw;
+}
+
+int r600_fmt_get_nblocksy(u32 format, u32 h)
+{
+	unsigned bh;
+
+	if (format >= ARRAY_SIZE(color_formats_table))
+		return 0;
+
+	bh = color_formats_table[format].blockheight;
+	if (bh == 0)
+		return 0;
+
+	return (h + bh - 1) / bh;
+}
+
+struct array_mode_checker {
+	int array_mode;
+	u32 group_size;
+	u32 nbanks;
+	u32 npipes;
+	u32 nsamples;
+	u32 blocksize;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+						u32 *pitch_align,
+						u32 *height_align,
+						u32 *depth_align,
+						u64 *base_align)
+{
+	u32 tile_width = 8;
+	u32 tile_height = 8;
+	u32 macro_tile_width = values->nbanks;
+	u32 macro_tile_height = values->npipes;
+	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
+	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+	switch (values->array_mode) {
+	case ARRAY_LINEAR_GENERAL:
+		/* technically tile_width/_height for pitch/height */
+		*pitch_align = 1; /* tile_width */
+		*height_align = 1; /* tile_height */
+		*depth_align = 1;
+		*base_align = 1;
+		break;
+	case ARRAY_LINEAR_ALIGNED:
+		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
+		*height_align = 1;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_1D_TILED_THIN1:
+		*pitch_align = max((u32)tile_width,
+				   (u32)(values->group_size /
+					 (tile_height * values->blocksize * values->nsamples)));
+		*height_align = tile_height;
+		*depth_align = 1;
+		*base_align = values->group_size;
+		break;
+	case ARRAY_2D_TILED_THIN1:
+		*pitch_align = max((u32)macro_tile_width * tile_width,
+				(u32)((values->group_size * values->nbanks) /
+				(values->blocksize * values->nsamples * tile_width)));
+		*height_align = macro_tile_height * tile_height;
+		*depth_align = 1;
+		*base_align = max(macro_tile_bytes,
+				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+	int i;
+
+	/* assume DX9 mode */
+	track->sq_config = DX9_CONSTS;
+	for (i = 0; i < 8; i++) {
+		track->cb_color_base_last[i] = 0;
+		track->cb_color_size[i] = 0;
+		track->cb_color_size_idx[i] = 0;
+		track->cb_color_info[i] = 0;
+		track->cb_color_view[i] = 0xFFFFFFFF;
+		track->cb_color_bo[i] = NULL;
+		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
+		track->cb_color_frag_bo[i] = NULL;
+		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+		track->cb_color_tile_bo[i] = NULL;
+		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+		track->cb_color_mask[i] = 0xFFFFFFFF;
+	}
+	track->is_resolve = false;
+	track->nsamples = 16;
+	track->log_nsamples = 4;
+	track->cb_target_mask = 0xFFFFFFFF;
+	track->cb_shader_mask = 0xFFFFFFFF;
+	track->cb_dirty = true;
+	track->db_bo = NULL;
+	track->db_bo_mc = 0xFFFFFFFF;
+	/* assume the biggest format and that htile is enabled */
+	track->db_depth_info = 7 | (1 << 25);
+	track->db_depth_view = 0xFFFFC000;
+	track->db_depth_size = 0xFFFFFFFF;
+	track->db_depth_size_idx = 0;
+	track->db_depth_control = 0xFFFFFFFF;
+	track->db_dirty = true;
+	track->htile_bo = NULL;
+	track->htile_offset = 0xFFFFFFFF;
+	track->htile_surface = 0;
+
+	for (i = 0; i < 4; i++) {
+		track->vgt_strmout_size[i] = 0;
+		track->vgt_strmout_bo[i] = NULL;
+		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
+		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
+	}
+	track->streamout_dirty = true;
+	track->sx_misc_kill_all_prims = false;
+}
+
+static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+	struct r600_cs_track *track = p->track;
+	u32 slice_tile_max, size, tmp;
+	u32 height, height_align, pitch, pitch_align, depth_align;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	volatile u32 *ib = p->ib.ptr;
+	unsigned array_mode;
+	u32 format;
+	/* When resolve is used, the second colorbuffer has always 1 sample. */
+	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
+
+	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
+	format = G_0280A0_FORMAT(track->cb_color_info[i]);
+	if (!r600_fmt_is_valid_color(format)) {
+		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+			 __func__, __LINE__, format,
+			i, track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	/* pitch in pixels */
+	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
+	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+	slice_tile_max *= 64;
+	height = slice_tile_max / pitch;
+	if (height > 8192)
+		height = 8192;
+	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+	array_check.array_mode = array_mode;
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = nsamples;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			 track->cb_color_info[i]);
+		return -EINVAL;
+	}
+	switch (array_mode) {
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+		break;
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+		/* avoid breaking userspace */
+		if (height > 7)
+			height &= ~0x7;
+		break;
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		break;
+	default:
+		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+			track->cb_color_info[i]);
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(height, height_align)) {
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+			 base_offset, base_align, array_mode);
+		return -EINVAL;
+	}
+
+	/* check offset */
+	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+	      r600_fmt_get_blocksize(format) * nsamples;
+	switch (array_mode) {
+	default:
+	case V_0280A0_ARRAY_LINEAR_GENERAL:
+	case V_0280A0_ARRAY_LINEAR_ALIGNED:
+		tmp += track->cb_color_view[i] & 0xFF;
+		break;
+	case V_0280A0_ARRAY_1D_TILED_THIN1:
+	case V_0280A0_ARRAY_2D_TILED_THIN1:
+		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
+		break;
+	}
+	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+			/* the initial DDX does bad things with the CB size occasionally */
+			/* it rounds up height too far for slice tile max but the BO is smaller */
+			/* r600c,g also seem to flush at bad times in some apps resulting in
+			 * bogus values here. So for linear just allow anything to avoid breaking
+			 * broken userspace.
+			 */
+		} else {
+			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+				 __func__, i, array_mode,
+				 track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]),
+				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
+				 r600_fmt_get_nblocksy(format, height),
+				 r600_fmt_get_blocksize(format));
+			return -EINVAL;
+		}
+	}
+	/* limit max tile */
+	tmp = (height * pitch) >> 6;
+	if (tmp < slice_tile_max)
+		slice_tile_max = tmp;
+	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
+		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+	ib[track->cb_color_size_idx[i]] = tmp;
+
+	/* FMASK/CMASK */
+	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+	case V_0280A0_TILE_DISABLE:
+		break;
+	case V_0280A0_FRAG_ENABLE:
+		if (track->nsamples > 1) {
+			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+			/* the tile size is 8x8, but the size is in units of bits.
+			 * for bytes, do just * 8. */
+			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+			if (bytes + track->cb_color_frag_offset[i] >
+			    radeon_bo_size(track->cb_color_frag_bo[i])) {
+				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+					 __func__, tile_max, bytes,
+					 track->cb_color_frag_offset[i],
+					 radeon_bo_size(track->cb_color_frag_bo[i]));
+				return -EINVAL;
+			}
+		}
+		/* fall through */
+	case V_0280A0_CLEAR_ENABLE:
+	{
+		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
+		uint32_t bytes = (block_max + 1) * 128;
+
+		if (bytes + track->cb_color_tile_offset[i] >
+		    radeon_bo_size(track->cb_color_tile_bo[i])) {
+			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+				 __func__, block_max, bytes,
+				 track->cb_color_tile_offset[i],
+				 radeon_bo_size(track->cb_color_tile_bo[i]));
+			return -EINVAL;
+		}
+		break;
+	}
+	default:
+		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
+	u32 height_align, pitch_align, depth_align;
+	u32 pitch = 8192;
+	u32 height = 8192;
+	u64 base_offset, base_align;
+	struct array_mode_checker array_check;
+	int array_mode;
+	volatile u32 *ib = p->ib.ptr;
+
+
+	if (track->db_bo == NULL) {
+		dev_warn(p->dev, "z/stencil with no depth buffer\n");
+		return -EINVAL;
+	}
+	switch (G_028010_FORMAT(track->db_depth_info)) {
+	case V_028010_DEPTH_16:
+		bpe = 2;
+		break;
+	case V_028010_DEPTH_X8_24:
+	case V_028010_DEPTH_8_24:
+	case V_028010_DEPTH_X8_24_FLOAT:
+	case V_028010_DEPTH_8_24_FLOAT:
+	case V_028010_DEPTH_32_FLOAT:
+		bpe = 4;
+		break;
+	case V_028010_DEPTH_X24_8_32_FLOAT:
+		bpe = 8;
+		break;
+	default:
+		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+		return -EINVAL;
+	}
+	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+		if (!track->db_depth_size_idx) {
+			dev_warn(p->dev, "z/stencil buffer size not set\n");
+			return -EINVAL;
+		}
+		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+		tmp = (tmp / bpe) >> 6;
+		if (!tmp) {
+			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+					track->db_depth_size, bpe, track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+	} else {
+		size = radeon_bo_size(track->db_bo);
+		/* pitch in pixels */
+		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		slice_tile_max *= 64;
+		height = slice_tile_max / pitch;
+		if (height > 8192)
+			height = 8192;
+		base_offset = track->db_bo_mc + track->db_offset;
+		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+		array_check.array_mode = array_mode;
+		array_check.group_size = track->group_size;
+		array_check.nbanks = track->nbanks;
+		array_check.npipes = track->npipes;
+		array_check.nsamples = track->nsamples;
+		array_check.blocksize = bpe;
+		if (r600_get_array_mode_alignment(&array_check,
+					&pitch_align, &height_align, &depth_align, &base_align)) {
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+		switch (array_mode) {
+		case V_028010_ARRAY_1D_TILED_THIN1:
+			/* don't break userspace */
+			height &= ~0x7;
+			break;
+		case V_028010_ARRAY_2D_TILED_THIN1:
+			break;
+		default:
+			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+					G_028010_ARRAY_MODE(track->db_depth_info),
+					track->db_depth_info);
+			return -EINVAL;
+		}
+
+		if (!IS_ALIGNED(pitch, pitch_align)) {
+			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, pitch, pitch_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(height, height_align)) {
+			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					__func__, __LINE__, height, height_align, array_mode);
+			return -EINVAL;
+		}
+		if (!IS_ALIGNED(base_offset, base_align)) {
+			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+					base_offset, base_align, array_mode);
+			return -EINVAL;
+		}
+
+		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
+		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					array_mode,
+					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					radeon_bo_size(track->db_bo));
+			return -EINVAL;
+		}
+	}
+
+	/* hyperz */
+	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+		unsigned long size;
+		unsigned nbx, nby;
+
+		if (track->htile_bo == NULL) {
+			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_info);
+			return -EINVAL;
+		}
+		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+				 __func__, __LINE__, track->db_depth_size);
+			return -EINVAL;
+		}
+
+		nbx = pitch;
+		nby = height;
+		if (G_028D24_LINEAR(track->htile_surface)) {
+			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
+			nbx = round_up(nbx, 16 * 8);
+			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
+			nby = round_up(nby, track->npipes * 8);
+		} else {
+			/* always assume 8x8 htile */
+			/* align is htile align * 8, htile align vary according to
+			 * number of pipe and tile width and nby
+			 */
+			switch (track->npipes) {
+			case 8:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 64 * 8);
+				break;
+			case 4:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 64 * 8);
+				nby = round_up(nby, 32 * 8);
+				break;
+			case 2:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 32 * 8);
+				break;
+			case 1:
+				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+				nbx = round_up(nbx, 32 * 8);
+				nby = round_up(nby, 16 * 8);
+				break;
+			default:
+				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
+					 __func__, __LINE__, track->npipes);
+				return -EINVAL;
+			}
+		}
+		/* compute number of htile */
+		nbx = nbx >> 3;
+		nby = nby >> 3;
+		/* size must be aligned on npipes * 2K boundary */
+		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
+		size += track->htile_offset;
+
+		if (size > radeon_bo_size(track->htile_bo)) {
+			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
+				 size, nbx, nby);
+			return -EINVAL;
+		}
+	}
+
+	track->db_dirty = false;
+	return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+	struct r600_cs_track *track = p->track;
+	u32 tmp;
+	int r, i;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* check streamout */
+	if (track->streamout_dirty && track->vgt_strmout_en) {
+		for (i = 0; i < 4; i++) {
+			if (track->vgt_strmout_buffer_en & (1 << i)) {
+				if (track->vgt_strmout_bo[i]) {
+					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
+						(u64)track->vgt_strmout_size[i];
+					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
+						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
+							  i, offset,
+							  radeon_bo_size(track->vgt_strmout_bo[i]));
+						return -EINVAL;
+					}
+				} else {
+					dev_warn(p->dev, "No buffer for streamout %d\n", i);
+					return -EINVAL;
+				}
+			}
+		}
+		track->streamout_dirty = false;
+	}
+
+	if (track->sx_misc_kill_all_prims)
+		return 0;
+
+	/* check that we have a cb for each enabled target, we don't check
+	 * shader_mask because it seems mesa isn't always setting it :(
+	 */
+	if (track->cb_dirty) {
+		tmp = track->cb_target_mask;
+
+		/* We must check both colorbuffers for RESOLVE. */
+		if (track->is_resolve) {
+			tmp |= 0xff;
+		}
+
+		for (i = 0; i < 8; i++) {
+			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+			if (format != V_0280A0_COLOR_INVALID &&
+			    (tmp >> (i * 4)) & 0xF) {
+				/* at least one component is enabled */
+				if (track->cb_color_bo[i] == NULL) {
+					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+					return -EINVAL;
+				}
+				/* perform rewrite of CB_COLOR[0-7]_SIZE */
+				r = r600_cs_track_validate_cb(p, i);
+				if (r)
+					return r;
+			}
+		}
+		track->cb_dirty = false;
+	}
+
+	/* Check depth buffer */
+	if (track->db_dirty &&
+	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+	     G_028800_Z_ENABLE(track->db_depth_control))) {
+		r = r600_cs_track_validate_db(p);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+/**
+ * r600_cs_packet_parse_vline() - parse userspace VLINE packet
+ * @parser:		parser structure holding parsing context.
+ *
+ * This is an R600-specific function for parsing VLINE packets.
+ * Real work is done by r600_cs_common_vline_parse function.
+ * Here we just set up ASIC-specific register table and call
+ * the common implementation function.
+ */
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+{
+	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
+					      AVIVO_D2MODE_VLINE_START_END};
+	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
+					   AVIVO_D2MODE_VLINE_STATUS};
+
+	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+}
+
+/**
+ * r600_cs_common_vline_parse() - common vline parser
+ * @parser:		parser structure holding parsing context.
+ * @vline_start_end:    table of vline_start_end registers
+ * @vline_status:       table of vline_status registers
+ *
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case. This function is common for all ASICs that
+ * are R600 and newer. The parsing algorithm is the same, and only
+ * differs in which registers are used.
+ *
+ * Caller is the ASIC-specific function which passes the parser
+ * context and ASIC-specific register table
+ */
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+			       uint32_t *vline_start_end,
+			       uint32_t *vline_status)
+{
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_cs_packet p3reloc, wait_reg_mem;
+	int crtc_id;
+	int r;
+	uint32_t header, h_idx, reg, wait_reg_mem_info;
+	volatile uint32_t *ib;
+
+	ib = p->ib.ptr;
+
+	/* parse the WAIT_REG_MEM */
+	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
+	if (r)
+		return r;
+
+	/* check its a WAIT_REG_MEM */
+	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
+	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+		return -EINVAL;
+	}
+
+	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+	/* bit 4 is reg (0) or mem (1) */
+	if (wait_reg_mem_info & 0x10) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+		return -EINVAL;
+	}
+	/* bit 8 is me (0) or pfp (1) */
+	if (wait_reg_mem_info & 0x100) {
+		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+		return -EINVAL;
+	}
+	/* waiting for value to be equal */
+	if ((wait_reg_mem_info & 0x7) != 0x3) {
+		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+		return -EINVAL;
+	}
+	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
+		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+		return -EINVAL;
+	}
+
+	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
+		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+		return -EINVAL;
+	}
+
+	/* jump over the NOP */
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+	if (r)
+		return r;
+
+	h_idx = p->idx - 2;
+	p->idx += wait_reg_mem.count + 2;
+	p->idx += p3reloc.count + 2;
+
+	header = radeon_get_ib_value(p, h_idx);
+	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+	reg = R600_CP_PACKET0_GET_REG(header);
+
+	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		DRM_ERROR("cannot find crtc %d\n", crtc_id);
+		return -EINVAL;
+	}
+	crtc = obj_to_crtc(obj);
+	radeon_crtc = to_radeon_crtc(crtc);
+	crtc_id = radeon_crtc->crtc_id;
+
+	if (!crtc->enabled) {
+		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+		ib[h_idx + 2] = PACKET2(0);
+		ib[h_idx + 3] = PACKET2(0);
+		ib[h_idx + 4] = PACKET2(0);
+		ib[h_idx + 5] = PACKET2(0);
+		ib[h_idx + 6] = PACKET2(0);
+		ib[h_idx + 7] = PACKET2(0);
+		ib[h_idx + 8] = PACKET2(0);
+	} else if (reg == vline_start_end[0]) {
+		header &= ~R600_CP_PACKET0_REG_MASK;
+		header |= vline_start_end[crtc_id] >> 2;
+		ib[h_idx] = header;
+		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
+	} else {
+		DRM_ERROR("unknown crtc reloc\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_packet0_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt,
+				unsigned idx, unsigned reg)
+{
+	int r;
+
+	switch (reg) {
+	case AVIVO_D1MODE_VLINE_START_END:
+		r = r600_cs_packet_parse_vline(p);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+					idx, reg);
+			return r;
+		}
+		break;
+	default:
+		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
+		       reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	unsigned reg, i;
+	unsigned idx;
+	int r;
+
+	idx = pkt->idx + 1;
+	reg = pkt->reg;
+	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
+		r = r600_packet0_check(p, pkt, idx, reg);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+	struct radeon_cs_reloc *reloc;
+	u32 m, i, tmp, *ib;
+	int r;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return 0;
+	ib = p->ib.ptr;
+	switch (reg) {
+	/* force following reg to 0 in an attempt to disable out buffer
+	 * which will need us to better understand how it works to perform
+	 * security check on it (Jerome)
+	 */
+	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+	case R_008C44_SQ_ESGS_RING_SIZE:
+	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+	case R_008C54_SQ_ESTMP_RING_SIZE:
+	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+	case R_008C74_SQ_FBUF_RING_SIZE:
+	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+	case R_008C5C_SQ_GSTMP_RING_SIZE:
+	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+	case R_008C4C_SQ_GSVS_RING_SIZE:
+	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+	case R_008C6C_SQ_PSTMP_RING_SIZE:
+	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+	case R_008C7C_SQ_REDUC_RING_SIZE:
+	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+	case R_008C64_SQ_VSTMP_RING_SIZE:
+	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+		/* get value to populate the IB don't remove */
+		tmp =radeon_get_ib_value(p, idx);
+		ib[idx] = 0;
+		break;
+	case SQ_CONFIG:
+		track->sq_config = radeon_get_ib_value(p, idx);
+		break;
+	case R_028800_DB_DEPTH_CONTROL:
+		track->db_depth_control = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028010_DB_DEPTH_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		    radeon_cs_packet_next_is_pkt3_nop(p)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					 "0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+			ib[idx] &= C_028010_ARRAY_MODE;
+			track->db_depth_info &= C_028010_ARRAY_MODE;
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+			} else {
+				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			track->db_depth_info = radeon_get_ib_value(p, idx);
+		}
+		track->db_dirty = true;
+		break;
+	case R_028004_DB_DEPTH_VIEW:
+		track->db_depth_view = radeon_get_ib_value(p, idx);
+		track->db_dirty = true;
+		break;
+	case R_028000_DB_DEPTH_SIZE:
+		track->db_depth_size = radeon_get_ib_value(p, idx);
+		track->db_depth_size_idx = idx;
+		track->db_dirty = true;
+		break;
+	case R_028AB0_VGT_STRMOUT_EN:
+		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case R_028B20_VGT_STRMOUT_BUFFER_EN:
+		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_BASE_0:
+	case VGT_STRMOUT_BUFFER_BASE_1:
+	case VGT_STRMOUT_BUFFER_BASE_2:
+	case VGT_STRMOUT_BUFFER_BASE_3:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
+		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->vgt_strmout_bo[tmp] = reloc->robj;
+		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
+		track->streamout_dirty = true;
+		break;
+	case VGT_STRMOUT_BUFFER_SIZE_0:
+	case VGT_STRMOUT_BUFFER_SIZE_1:
+	case VGT_STRMOUT_BUFFER_SIZE_2:
+	case VGT_STRMOUT_BUFFER_SIZE_3:
+		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
+		/* size in register is DWs, convert to bytes */
+		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
+		track->streamout_dirty = true;
+		break;
+	case CP_COHER_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case R_028238_CB_TARGET_MASK:
+		track->cb_target_mask = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_02823C_CB_SHADER_MASK:
+		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+		break;
+	case R_028C04_PA_SC_AA_CONFIG:
+		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+		track->log_nsamples = tmp;
+		track->nsamples = 1 << tmp;
+		track->cb_dirty = true;
+		break;
+	case R_028808_CB_COLOR_CONTROL:
+		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+		track->cb_dirty = true;
+		break;
+	case R_0280A0_CB_COLOR0_INFO:
+	case R_0280A4_CB_COLOR1_INFO:
+	case R_0280A8_CB_COLOR2_INFO:
+	case R_0280AC_CB_COLOR3_INFO:
+	case R_0280B0_CB_COLOR4_INFO:
+	case R_0280B4_CB_COLOR5_INFO:
+	case R_0280B8_CB_COLOR6_INFO:
+	case R_0280BC_CB_COLOR7_INFO:
+		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+		     radeon_cs_packet_next_is_pkt3_nop(p)) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+			}
+		} else {
+			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+		}
+		track->cb_dirty = true;
+		break;
+	case R_028080_CB_COLOR0_VIEW:
+	case R_028084_CB_COLOR1_VIEW:
+	case R_028088_CB_COLOR2_VIEW:
+	case R_02808C_CB_COLOR3_VIEW:
+	case R_028090_CB_COLOR4_VIEW:
+	case R_028094_CB_COLOR5_VIEW:
+	case R_028098_CB_COLOR6_VIEW:
+	case R_02809C_CB_COLOR7_VIEW:
+		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
+		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_dirty = true;
+		break;
+	case R_028060_CB_COLOR0_SIZE:
+	case R_028064_CB_COLOR1_SIZE:
+	case R_028068_CB_COLOR2_SIZE:
+	case R_02806C_CB_COLOR3_SIZE:
+	case R_028070_CB_COLOR4_SIZE:
+	case R_028074_CB_COLOR5_SIZE:
+	case R_028078_CB_COLOR6_SIZE:
+	case R_02807C_CB_COLOR7_SIZE:
+		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+		track->cb_color_size_idx[tmp] = idx;
+		track->cb_dirty = true;
+		break;
+		/* This register were added late, there is userspace
+		 * which does provide relocation for those but set
+		 * 0 offset. In order to avoid breaking old userspace
+		 * we detect this and set address to point to last
+		 * CB_COLOR0_BASE, note that if userspace doesn't set
+		 * CB_COLOR0_BASE before this register we will report
+		 * error. Old userspace always set CB_COLOR0_BASE
+		 * before any of this.
+		 */
+	case R_0280E0_CB_COLOR0_FRAG:
+	case R_0280E4_CB_COLOR1_FRAG:
+	case R_0280E8_CB_COLOR2_FRAG:
+	case R_0280EC_CB_COLOR3_FRAG:
+	case R_0280F0_CB_COLOR4_FRAG:
+	case R_0280F4_CB_COLOR5_FRAG:
+	case R_0280F8_CB_COLOR6_FRAG:
+	case R_0280FC_CB_COLOR7_FRAG:
+		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_frag_bo[tmp] = reloc->robj;
+			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_0280C0_CB_COLOR0_TILE:
+	case R_0280C4_CB_COLOR1_TILE:
+	case R_0280C8_CB_COLOR2_TILE:
+	case R_0280CC_CB_COLOR3_TILE:
+	case R_0280D0_CB_COLOR4_TILE:
+	case R_0280D4_CB_COLOR5_TILE:
+	case R_0280D8_CB_COLOR6_TILE:
+	case R_0280DC_CB_COLOR7_TILE:
+		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+			if (!track->cb_color_base_last[tmp]) {
+				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+			ib[idx] = track->cb_color_base_last[tmp];
+		} else {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+				return -EINVAL;
+			}
+			track->cb_color_tile_bo[tmp] = reloc->robj;
+			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case R_028100_CB_COLOR0_MASK:
+	case R_028104_CB_COLOR1_MASK:
+	case R_028108_CB_COLOR2_MASK:
+	case R_02810C_CB_COLOR3_MASK:
+	case R_028110_CB_COLOR4_MASK:
+	case R_028114_CB_COLOR5_MASK:
+	case R_028118_CB_COLOR6_MASK:
+	case R_02811C_CB_COLOR7_MASK:
+		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
+		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+			track->cb_dirty = true;
+		}
+		break;
+	case CB_COLOR0_BASE:
+	case CB_COLOR1_BASE:
+	case CB_COLOR2_BASE:
+	case CB_COLOR3_BASE:
+	case CB_COLOR4_BASE:
+	case CB_COLOR5_BASE:
+	case CB_COLOR6_BASE:
+	case CB_COLOR7_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		tmp = (reg - CB_COLOR0_BASE) / 4;
+		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->cb_color_base_last[tmp] = ib[idx];
+		track->cb_color_bo[tmp] = reloc->robj;
+		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
+		track->cb_dirty = true;
+		break;
+	case DB_DEPTH_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->db_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->db_bo = reloc->robj;
+		track->db_bo_mc = reloc->lobj.gpu_offset;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_DATA_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		track->htile_bo = reloc->robj;
+		track->db_dirty = true;
+		break;
+	case DB_HTILE_SURFACE:
+		track->htile_surface = radeon_get_ib_value(p, idx);
+		/* force 8x8 htile width and height */
+		ib[idx] |= 3;
+		track->db_dirty = true;
+		break;
+	case SQ_PGM_START_FS:
+	case SQ_PGM_START_ES:
+	case SQ_PGM_START_VS:
+	case SQ_PGM_START_GS:
+	case SQ_PGM_START_PS:
+	case SQ_ALU_CONST_CACHE_GS_0:
+	case SQ_ALU_CONST_CACHE_GS_1:
+	case SQ_ALU_CONST_CACHE_GS_2:
+	case SQ_ALU_CONST_CACHE_GS_3:
+	case SQ_ALU_CONST_CACHE_GS_4:
+	case SQ_ALU_CONST_CACHE_GS_5:
+	case SQ_ALU_CONST_CACHE_GS_6:
+	case SQ_ALU_CONST_CACHE_GS_7:
+	case SQ_ALU_CONST_CACHE_GS_8:
+	case SQ_ALU_CONST_CACHE_GS_9:
+	case SQ_ALU_CONST_CACHE_GS_10:
+	case SQ_ALU_CONST_CACHE_GS_11:
+	case SQ_ALU_CONST_CACHE_GS_12:
+	case SQ_ALU_CONST_CACHE_GS_13:
+	case SQ_ALU_CONST_CACHE_GS_14:
+	case SQ_ALU_CONST_CACHE_GS_15:
+	case SQ_ALU_CONST_CACHE_PS_0:
+	case SQ_ALU_CONST_CACHE_PS_1:
+	case SQ_ALU_CONST_CACHE_PS_2:
+	case SQ_ALU_CONST_CACHE_PS_3:
+	case SQ_ALU_CONST_CACHE_PS_4:
+	case SQ_ALU_CONST_CACHE_PS_5:
+	case SQ_ALU_CONST_CACHE_PS_6:
+	case SQ_ALU_CONST_CACHE_PS_7:
+	case SQ_ALU_CONST_CACHE_PS_8:
+	case SQ_ALU_CONST_CACHE_PS_9:
+	case SQ_ALU_CONST_CACHE_PS_10:
+	case SQ_ALU_CONST_CACHE_PS_11:
+	case SQ_ALU_CONST_CACHE_PS_12:
+	case SQ_ALU_CONST_CACHE_PS_13:
+	case SQ_ALU_CONST_CACHE_PS_14:
+	case SQ_ALU_CONST_CACHE_PS_15:
+	case SQ_ALU_CONST_CACHE_VS_0:
+	case SQ_ALU_CONST_CACHE_VS_1:
+	case SQ_ALU_CONST_CACHE_VS_2:
+	case SQ_ALU_CONST_CACHE_VS_3:
+	case SQ_ALU_CONST_CACHE_VS_4:
+	case SQ_ALU_CONST_CACHE_VS_5:
+	case SQ_ALU_CONST_CACHE_VS_6:
+	case SQ_ALU_CONST_CACHE_VS_7:
+	case SQ_ALU_CONST_CACHE_VS_8:
+	case SQ_ALU_CONST_CACHE_VS_9:
+	case SQ_ALU_CONST_CACHE_VS_10:
+	case SQ_ALU_CONST_CACHE_VS_11:
+	case SQ_ALU_CONST_CACHE_VS_12:
+	case SQ_ALU_CONST_CACHE_VS_13:
+	case SQ_ALU_CONST_CACHE_VS_14:
+	case SQ_ALU_CONST_CACHE_VS_15:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONTEXT_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MEMORY_EXPORT_BASE:
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			dev_warn(p->dev, "bad SET_CONFIG_REG "
+					"0x%04X\n", reg);
+			return -EINVAL;
+		}
+		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		break;
+	case SX_MISC:
+		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
+		break;
+	default:
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+unsigned r600_mip_minify(unsigned size, unsigned level)
+{
+	unsigned val;
+
+	val = max(1U, size >> level);
+	if (level > 0)
+		val = roundup_pow_of_two(val);
+	return val;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
+			      unsigned block_align, unsigned height_align, unsigned base_align,
+			      unsigned *l0_size, unsigned *mipmap_size)
+{
+	unsigned offset, i, level;
+	unsigned width, height, depth, size;
+	unsigned blocksize;
+	unsigned nbx, nby;
+	unsigned nlevels = llevel - blevel + 1;
+
+	*l0_size = -1;
+	blocksize = r600_fmt_get_blocksize(format);
+
+	w0 = r600_mip_minify(w0, 0);
+	h0 = r600_mip_minify(h0, 0);
+	d0 = r600_mip_minify(d0, 0);
+	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+		width = r600_mip_minify(w0, i);
+		nbx = r600_fmt_get_nblocksx(format, width);
+
+		nbx = round_up(nbx, block_align);
+
+		height = r600_mip_minify(h0, i);
+		nby = r600_fmt_get_nblocksy(format, height);
+		nby = round_up(nby, height_align);
+
+		depth = r600_mip_minify(d0, i);
+
+		size = nbx * nby * blocksize * nsamples;
+		if (nfaces)
+			size *= nfaces;
+		else
+			size *= depth;
+
+		if (i == 0)
+			*l0_size = size;
+
+		if (i == 0 || i == 1)
+			offset = round_up(offset, base_align);
+
+		offset += size;
+	}
+	*mipmap_size = offset;
+	if (llevel == 0)
+		*mipmap_size = *l0_size;
+	if (!blevel)
+		*mipmap_size -= *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+					      struct radeon_bo *texture,
+					      struct radeon_bo *mipmap,
+					      u64 base_offset,
+					      u64 mip_offset,
+					      u32 tiling_flags)
+{
+	struct r600_cs_track *track = p->track;
+	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
+	u32 height_align, pitch, pitch_align, depth_align;
+	u32 barray, larray;
+	u64 base_align;
+	struct array_mode_checker array_check;
+	u32 format;
+	bool is_array;
+
+	/* on legacy kernel we don't perform advanced check */
+	if (p->rdev == NULL)
+		return 0;
+
+	/* convert to bytes */
+	base_offset <<= 8;
+	mip_offset <<= 8;
+
+	word0 = radeon_get_ib_value(p, idx + 0);
+	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+		if (tiling_flags & RADEON_TILING_MACRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+		else if (tiling_flags & RADEON_TILING_MICRO)
+			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+	}
+	word1 = radeon_get_ib_value(p, idx + 1);
+	word2 = radeon_get_ib_value(p, idx + 2) << 8;
+	word3 = radeon_get_ib_value(p, idx + 3) << 8;
+	word4 = radeon_get_ib_value(p, idx + 4);
+	word5 = radeon_get_ib_value(p, idx + 5);
+	dim = G_038000_DIM(word0);
+	w0 = G_038000_TEX_WIDTH(word0) + 1;
+	pitch = (G_038000_PITCH(word0) + 1) * 8;
+	h0 = G_038004_TEX_HEIGHT(word1) + 1;
+	d0 = G_038004_TEX_DEPTH(word1);
+	format = G_038004_DATA_FORMAT(word1);
+	blevel = G_038010_BASE_LEVEL(word4);
+	llevel = G_038014_LAST_LEVEL(word5);
+	/* pitch in texels */
+	array_check.array_mode = G_038000_TILE_MODE(word0);
+	array_check.group_size = track->group_size;
+	array_check.nbanks = track->nbanks;
+	array_check.npipes = track->npipes;
+	array_check.nsamples = 1;
+	array_check.blocksize = r600_fmt_get_blocksize(format);
+	nfaces = 1;
+	is_array = false;
+	switch (dim) {
+	case V_038000_SQ_TEX_DIM_1D:
+	case V_038000_SQ_TEX_DIM_2D:
+	case V_038000_SQ_TEX_DIM_3D:
+		break;
+	case V_038000_SQ_TEX_DIM_CUBEMAP:
+		if (p->family >= CHIP_RV770)
+			nfaces = 8;
+		else
+			nfaces = 6;
+		break;
+	case V_038000_SQ_TEX_DIM_1D_ARRAY:
+	case V_038000_SQ_TEX_DIM_2D_ARRAY:
+		is_array = true;
+		break;
+	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+		is_array = true;
+		/* fall through */
+	case V_038000_SQ_TEX_DIM_2D_MSAA:
+		array_check.nsamples = 1 << llevel;
+		llevel = 0;
+		break;
+	default:
+		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+		return -EINVAL;
+	}
+	if (!r600_fmt_is_valid_texture(format, p->family)) {
+		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+			 __func__, __LINE__, format);
+		return -EINVAL;
+	}
+
+	if (r600_get_array_mode_alignment(&array_check,
+					  &pitch_align, &height_align, &depth_align, &base_align)) {
+		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+			 __func__, __LINE__, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	/* XXX check height as well... */
+
+	if (!IS_ALIGNED(pitch, pitch_align)) {
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(base_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+	if (!IS_ALIGNED(mip_offset, base_align)) {
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+		return -EINVAL;
+	}
+
+	if (blevel > llevel) {
+		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
+			 blevel, llevel);
+	}
+	if (is_array) {
+		barray = G_038014_BASE_ARRAY(word5);
+		larray = G_038014_LAST_ARRAY(word5);
+
+		nfaces = larray - barray + 1;
+	}
+	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
+			  pitch_align, height_align, base_align,
+			  &l0_size, &mipmap_size);
+	/* using get ib will give us the offset into the texture bo */
+	if ((l0_size + word2) > radeon_bo_size(texture)) {
+		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+			 w0, h0, pitch_align, height_align,
+			 array_check.array_mode, format, word2,
+			 l0_size, radeon_bo_size(texture));
+		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+		return -EINVAL;
+	}
+	/* using get ib will give us the offset into the mipmap bo */
+	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
+		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
+	}
+	return 0;
+}
+
+static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+	u32 m, i;
+
+	i = (reg >> 7);
+	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
+		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+		return false;
+	}
+	m = 1 << ((reg >> 2) & 31);
+	if (!(r600_reg_safe_bm[i] & m))
+		return true;
+	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+	return false;
+}
+
+static int r600_packet3_check(struct radeon_cs_parser *p,
+				struct radeon_cs_packet *pkt)
+{
+	struct radeon_cs_reloc *reloc;
+	struct r600_cs_track *track;
+	volatile u32 *ib;
+	unsigned idx;
+	unsigned i;
+	unsigned start_reg, end_reg, reg;
+	int r;
+	u32 idx_value;
+
+	track = (struct r600_cs_track *)p->track;
+	ib = p->ib.ptr;
+	idx = pkt->idx + 1;
+	idx_value = radeon_get_ib_value(p, idx);
+
+	switch (pkt->opcode) {
+	case PACKET3_SET_PREDICATION:
+	{
+		int pred_op;
+		int tmp;
+		uint64_t offset;
+
+		if (pkt->count != 1) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		tmp = radeon_get_ib_value(p, idx + 1);
+		pred_op = (tmp >> 16) & 0x7;
+
+		/* for the clear predicate operation */
+		if (pred_op == 0)
+			return 0;
+
+		if (pred_op > 2) {
+			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+			return -EINVAL;
+		}
+
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad SET PREDICATION\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (idx_value & 0xfffffff0) +
+		         ((u64)(tmp & 0xff) << 32);
+
+		ib[idx + 0] = offset;
+		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+	}
+	break;
+
+	case PACKET3_START_3D_CMDBUF:
+		if (p->family >= CHIP_RV770 || pkt->count) {
+			DRM_ERROR("bad START_3D\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CONTEXT_CONTROL:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad CONTEXT_CONTROL\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_NUM_INSTANCES:
+		if (pkt->count) {
+			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_DRAW_INDEX:
+	{
+		uint64_t offset;
+		if (pkt->count != 3) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad DRAW_INDEX\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         idx_value +
+		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	}
+	case PACKET3_DRAW_INDEX_AUTO:
+		if (pkt->count != 1) {
+			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+			return r;
+		}
+		break;
+	case PACKET3_DRAW_INDEX_IMMD_BE:
+	case PACKET3_DRAW_INDEX_IMMD:
+		if (pkt->count < 2) {
+			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+			return -EINVAL;
+		}
+		r = r600_cs_track_check(p);
+		if (r) {
+			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+			return r;
+		}
+		break;
+	case PACKET3_WAIT_REG_MEM:
+		if (pkt->count != 5) {
+			DRM_ERROR("bad WAIT_REG_MEM\n");
+			return -EINVAL;
+		}
+		/* bit 4 is reg (0) or mem (1) */
+		if (idx_value & 0x10) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad WAIT_REG_MEM\n");
+				return -EINVAL;
+			}
+
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else if (idx_value & 0x100) {
+			DRM_ERROR("cannot use PFP on REG wait\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+	{
+		u32 command, size;
+		u64 offset, tmp;
+		if (pkt->count != 4) {
+			DRM_ERROR("bad CP DMA\n");
+			return -EINVAL;
+		}
+		command = radeon_get_ib_value(p, idx+4);
+		size = command & 0x1fffff;
+		if (command & PACKET3_CP_DMA_CMD_SAS) {
+			/* src address space is register */
+			DRM_ERROR("CP DMA SAS not supported\n");
+			return -EINVAL;
+		} else {
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				DRM_ERROR("CP DMA SAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			/* src address space is memory */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad CP DMA SRC\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx) +
+				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx] = offset;
+			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		}
+		if (command & PACKET3_CP_DMA_CMD_DAS) {
+			/* dst address space is register */
+			DRM_ERROR("CP DMA DAS not supported\n");
+			return -EINVAL;
+		} else {
+			/* dst address space is memory */
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				DRM_ERROR("CP DMA DAIC only supported for registers\n");
+				return -EINVAL;
+			}
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad CP DMA DST\n");
+				return -EINVAL;
+			}
+
+			tmp = radeon_get_ib_value(p, idx+2) +
+				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+			offset = reloc->lobj.gpu_offset + tmp;
+
+			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+					 tmp + size, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+
+			ib[idx+2] = offset;
+			ib[idx+3] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	}
+	case PACKET3_SURFACE_SYNC:
+		if (pkt->count != 3) {
+			DRM_ERROR("bad SURFACE_SYNC\n");
+			return -EINVAL;
+		}
+		/* 0xffffffff/0x0 is flush all cache flag */
+		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
+		    radeon_get_ib_value(p, idx + 2) != 0) {
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad SURFACE_SYNC\n");
+				return -EINVAL;
+			}
+			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_EVENT_WRITE:
+		if (pkt->count != 2 && pkt->count != 0) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			uint64_t offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad EVENT_WRITE\n");
+				return -EINVAL;
+			}
+			offset = reloc->lobj.gpu_offset +
+			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+			ib[idx+1] = offset & 0xfffffff8;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_EVENT_WRITE_EOP:
+	{
+		uint64_t offset;
+
+		if (pkt->count != 4) {
+			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad EVENT_WRITE\n");
+			return -EINVAL;
+		}
+
+		offset = reloc->lobj.gpu_offset +
+		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+
+		ib[idx+1] = offset & 0xfffffffc;
+		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+		break;
+	}
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_CONTEXT_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			r = r600_cs_check_reg(p, reg, idx+1+i);
+			if (r)
+				return r;
+		}
+		break;
+	case PACKET3_SET_RESOURCE:
+		if (pkt->count % 7) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
+		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
+		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
+			DRM_ERROR("bad SET_RESOURCE\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < (pkt->count / 7); i++) {
+			struct radeon_bo *texture, *mipmap;
+			u32 size, offset, base_offset, mip_offset;
+
+			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
+			case SQ_TEX_VTX_VALID_TEXTURE:
+				/* tex base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
+				}
+				texture = reloc->robj;
+				/* tex mip base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+				mipmap = reloc->robj;
+				r = r600_check_texture_resource(p,  idx+(i*7)+1,
+								texture, mipmap,
+								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+								reloc->lobj.tiling_flags);
+				if (r)
+					return r;
+				ib[idx+1+(i*7)+2] += base_offset;
+				ib[idx+1+(i*7)+3] += mip_offset;
+				break;
+			case SQ_TEX_VTX_VALID_BUFFER:
+			{
+				uint64_t offset64;
+				/* vtx base */
+				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+				if (r) {
+					DRM_ERROR("bad SET_RESOURCE\n");
+					return -EINVAL;
+				}
+				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
+				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+					/* force size to size of the buffer */
+					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+						 size + offset, radeon_bo_size(reloc->robj));
+					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+				}
+
+				offset64 = reloc->lobj.gpu_offset + offset;
+				ib[idx+1+(i*8)+0] = offset64;
+				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
+						    (upper_32_bits(offset64) & 0xff);
+				break;
+			}
+			case SQ_TEX_VTX_INVALID_TEXTURE:
+			case SQ_TEX_VTX_INVALID_BUFFER:
+			default:
+				DRM_ERROR("bad SET_RESOURCE\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_ALU_CONST:
+		if (track->sq_config & DX9_CONSTS) {
+			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+			end_reg = 4 * pkt->count + start_reg - 4;
+			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+				DRM_ERROR("bad SET_ALU_CONST\n");
+				return -EINVAL;
+			}
+		}
+		break;
+	case PACKET3_SET_BOOL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
+			DRM_ERROR("bad SET_BOOL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_LOOP_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
+		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
+			DRM_ERROR("bad SET_LOOP_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CTL_CONST:
+		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
+		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
+		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
+			DRM_ERROR("bad SET_CTL_CONST\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_SAMPLER:
+		if (pkt->count % 3) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
+		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
+		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
+			DRM_ERROR("bad SET_SAMPLER\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BASE_UPDATE:
+		/* RS780 and RS880 also need this */
+		if (p->family < CHIP_RS780) {
+			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+			return -EINVAL;
+		}
+		if (pkt->count != 1) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+			return -EINVAL;
+		}
+		if (idx_value > 3) {
+			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+			return -EINVAL;
+		}
+		{
+			u64 offset;
+
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+				return -EINVAL;
+			}
+
+			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+				return -EINVAL;
+			}
+
+			offset = radeon_get_ib_value(p, idx+1) << 8;
+			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+					  offset, track->vgt_strmout_bo_offset[idx_value]);
+				return -EINVAL;
+			}
+
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+		}
+		break;
+	case PACKET3_SURFACE_BASE_UPDATE:
+		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		if (pkt->count) {
+			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+			return -EINVAL;
+		}
+		break;
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+			return -EINVAL;
+		}
+		/* Updating memory at DST_ADDRESS. */
+		if (idx_value & 0x1) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		}
+		/* Reading data from SRC_ADDRESS. */
+		if (((idx_value >> 1) & 0x3) == 2) {
+			u64 offset;
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		}
+		break;
+	case PACKET3_MEM_WRITE:
+	{
+		u64 offset;
+
+		if (pkt->count != 3) {
+			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+			return -EINVAL;
+		}
+		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+		if (r) {
+			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+			return -EINVAL;
+		}
+		offset = radeon_get_ib_value(p, idx+0);
+		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
+		if (offset & 0x7) {
+			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+			return -EINVAL;
+		}
+		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
+			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+				  offset + 8, radeon_bo_size(reloc->robj));
+			return -EINVAL;
+		}
+		offset += reloc->lobj.gpu_offset;
+		ib[idx+0] = offset;
+		ib[idx+1] = upper_32_bits(offset) & 0xff;
+		break;
+	}
+	case PACKET3_COPY_DW:
+		if (pkt->count != 4) {
+			DRM_ERROR("bad COPY_DW (invalid count)\n");
+			return -EINVAL;
+		}
+		if (idx_value & 0x1) {
+			u64 offset;
+			/* SRC is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+1);
+			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+1] = offset;
+			ib[idx+2] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* SRC is a reg. */
+			reg = radeon_get_ib_value(p, idx+1) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+1))
+				return -EINVAL;
+		}
+		if (idx_value & 0x2) {
+			u64 offset;
+			/* DST is memory. */
+			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+			if (r) {
+				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+				return -EINVAL;
+			}
+			offset = radeon_get_ib_value(p, idx+3);
+			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+					  offset + 4, radeon_bo_size(reloc->robj));
+				return -EINVAL;
+			}
+			offset += reloc->lobj.gpu_offset;
+			ib[idx+3] = offset;
+			ib[idx+4] = upper_32_bits(offset) & 0xff;
+		} else {
+			/* DST is a reg. */
+			reg = radeon_get_ib_value(p, idx+3) << 2;
+			if (!r600_is_safe_reg(p, reg, idx+3))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_NOP:
+		break;
+	default:
+		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int r600_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	struct r600_cs_track *track;
+	int r;
+
+	if (p->track == NULL) {
+		/* initialize tracker, we are in kms */
+		track = kzalloc(sizeof(*track), GFP_KERNEL);
+		if (track == NULL)
+			return -ENOMEM;
+		r600_cs_track_init(track);
+		if (p->rdev->family < CHIP_RV770) {
+			track->npipes = p->rdev->config.r600.tiling_npipes;
+			track->nbanks = p->rdev->config.r600.tiling_nbanks;
+			track->group_size = p->rdev->config.r600.tiling_group_size;
+		} else if (p->rdev->family <= CHIP_RV740) {
+			track->npipes = p->rdev->config.rv770.tiling_npipes;
+			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+			track->group_size = p->rdev->config.rv770.tiling_group_size;
+		}
+		p->track = track;
+	}
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+		p->idx += pkt.count + 2;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = r600_cs_parse_packet0(p, &pkt);
+			break;
+		case RADEON_PACKET_TYPE2:
+			break;
+		case RADEON_PACKET_TYPE3:
+			r = r600_packet3_check(p, &pkt);
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			kfree(p->track);
+			p->track = NULL;
+			return -EINVAL;
+		}
+		if (r) {
+			kfree(p->track);
+			p->track = NULL;
+			return r;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib.length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	kfree(p->track);
+	p->track = NULL;
+	return 0;
+}
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+	unsigned i;
+
+	kfree(parser->relocs);
+	for (i = 0; i < parser->nchunks; i++) {
+		kfree(parser->chunks[i].kdata);
+		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+			kfree(parser->chunks[i].kpage[0]);
+			kfree(parser->chunks[i].kpage[1]);
+		}
+	}
+	kfree(parser->chunks);
+	kfree(parser->chunks_array);
+}
+
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+	if (p->chunk_relocs_idx == -1) {
+		return 0;
+	}
+	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+	if (p->relocs == NULL) {
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+			unsigned family, u32 *ib, int *l)
+{
+	struct radeon_cs_parser parser;
+	struct radeon_cs_chunk *ib_chunk;
+	struct r600_cs_track *track;
+	int r;
+
+	/* initialize tracker */
+	track = kzalloc(sizeof(*track), GFP_KERNEL);
+	if (track == NULL)
+		return -ENOMEM;
+	r600_cs_track_init(track);
+	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+	parser.filp = filp;
+	parser.dev = &dev->pdev->dev;
+	parser.rdev = NULL;
+	parser.family = family;
+	parser.track = track;
+	parser.ib.ptr = ib;
+	r = radeon_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r = r600_cs_parser_relocs_legacy(&parser);
+	if (r) {
+		DRM_ERROR("Failed to parse relocation !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	/* Copy the packet into the IB, the parser will read from the
+	 * input memory (cached) and write to the IB (which can be
+	 * uncached). */
+	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+	parser.ib.length_dw = ib_chunk->length_dw;
+	*l = parser.ib.length_dw;
+	r = r600_cs_parse(&parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r = radeon_cs_finish_pages(&parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		r600_cs_parser_fini(&parser, r);
+		return r;
+	}
+	r600_cs_parser_fini(&parser, r);
+	return r;
+}
+
+void r600_cs_legacy_init(void)
+{
+	r600_nomm = 1;
+}
+
+#endif
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:		parser structure holding parsing context.
+ * @cs_reloc:		reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+			   struct radeon_cs_reloc **cs_reloc)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	unsigned idx;
+
+	*cs_reloc = NULL;
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	idx = p->dma_reloc_idx;
+	if (idx >= p->nrelocs) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, p->nrelocs);
+		return -EINVAL;
+	}
+	*cs_reloc = p->relocs_ptr[idx];
+	p->dma_reloc_idx++;
+	return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:		parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_cs_reloc *src_reloc, *dst_reloc;
+	u32 header, cmd, count, tiled;
+	volatile u32 *ib = p->ib.ptr;
+	u32 idx, idx_value;
+	u64 src_offset, dst_offset;
+	int r;
+
+	do {
+		if (p->idx >= ib_chunk->length_dw) {
+			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+				  p->idx, ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		idx = p->idx;
+		header = radeon_get_ib_value(p, idx);
+		cmd = GET_DMA_CMD(header);
+		count = GET_DMA_COUNT(header);
+		tiled = GET_DMA_T(header);
+
+		switch (cmd) {
+		case DMA_PACKET_WRITE:
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset <<= 8;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				p->idx += count + 5;
+			} else {
+				dst_offset = radeon_get_ib_value(p, idx+1);
+				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
+
+				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				p->idx += count + 3;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_COPY:
+			r = r600_dma_cs_next_reloc(p, &src_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_COPY\n");
+				return -EINVAL;
+			}
+			if (tiled) {
+				idx_value = radeon_get_ib_value(p, idx + 2);
+				/* detile bit */
+				if (idx_value & (1 << 31)) {
+					/* tiled src, linear dst */
+					src_offset = radeon_get_ib_value(p, idx+1);
+					src_offset <<= 8;
+					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+					dst_offset = radeon_get_ib_value(p, idx+5);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+				} else {
+					/* linear src, tiled dst */
+					src_offset = radeon_get_ib_value(p, idx+5);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset <<= 8;
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+				}
+				p->idx += 7;
+			} else {
+				if (p->family >= CHIP_RV770) {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					p->idx += 5;
+				} else {
+					src_offset = radeon_get_ib_value(p, idx+2);
+					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+					dst_offset = radeon_get_ib_value(p, idx+1);
+					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
+
+					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+					p->idx += 4;
+				}
+			}
+			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+				return -EINVAL;
+			}
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			break;
+		case DMA_PACKET_CONSTANT_FILL:
+			if (p->family < CHIP_RV770) {
+				DRM_ERROR("Constant Fill is 7xx only !\n");
+				return -EINVAL;
+			}
+			r = r600_dma_cs_next_reloc(p, &dst_reloc);
+			if (r) {
+				DRM_ERROR("bad DMA_PACKET_WRITE\n");
+				return -EINVAL;
+			}
+			dst_offset = radeon_get_ib_value(p, idx+1);
+			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
+			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+				return -EINVAL;
+			}
+			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+			p->idx += 4;
+			break;
+		case DMA_PACKET_NOP:
+			p->idx += 1;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+	for (r = 0; r < p->ib->length_dw; r++) {
+		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+		mdelay(1);
+	}
+#endif
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_hdmi.c b/linux-imx/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 0000000..5df7116
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include <linux/hdmi.h>
+#include <linux/gcd.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+	RGB = 0,
+	YCC_422 = 1,
+	YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+	AUDIO_STATUS_DIG_ENABLE   = 0x01,
+	AUDIO_STATUS_V            = 0x02,
+	AUDIO_STATUS_VCFG         = 0x04,
+	AUDIO_STATUS_EMPHASIS     = 0x08,
+	AUDIO_STATUS_COPYRIGHT    = 0x10,
+	AUDIO_STATUS_NONAUDIO     = 0x20,
+	AUDIO_STATUS_PROFESSIONAL = 0x40,
+	AUDIO_STATUS_LEVEL        = 0x80
+};
+
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+    /*	     32kHz	  44.1kHz	48kHz    */
+    /* Clock      N     CTS      N     CTS      N     CTS */
+    {  25175,  4096,  25175, 28224, 125875,  6144,  25175 }, /*  25,20/1.001 MHz */
+    {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
+    {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
+    {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
+    {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
+    {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
+    {  74176,  4096,  74176,  5733,  75335,  6144,  74176 }, /*  74.25/1.001 MHz */
+    {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
+    { 148352,  4096, 148352,  5733, 150670,  6144, 148352 }, /* 148.50/1.001 MHz */
+    { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
+};
+
+
+/*
+ * calculate CTS and N values if they are not found in the table
+ */
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
+{
+	int n, cts;
+	unsigned long div, mul;
+
+	/* Safe, but overly large values */
+	n = 128 * freq;
+	cts = clock * 1000;
+
+	/* Smallest valid fraction */
+	div = gcd(n, cts);
+
+	n /= div;
+	cts /= div;
+
+	/*
+	 * The optimal N is 128*freq/1000. Calculate the closest larger
+	 * value that doesn't truncate any bits.
+	 */
+	mul = ((128*freq/1000) + (n-1))/n;
+
+	n *= mul;
+	cts *= mul;
+
+	/* Check that we are in spec (not always possible) */
+	if (n < (128*freq/1500))
+		printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
+	if (n > (128*freq/300))
+		printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
+
+	*N = n;
+	*CTS = cts;
+
+	DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
+		  *N, *CTS, freq);
+}
+
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+	struct radeon_hdmi_acr res;
+	u8 i;
+
+	/* Precalculated values for common clocks */
+	for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
+		if (r600_hdmi_predefined_acr[i].clock == clock)
+			return r600_hdmi_predefined_acr[i];
+	}
+
+	/* And odd clocks get manually calculated */
+	r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+	r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+	r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+	return res;
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+	WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+
+	WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+	WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+	WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+	WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
+					   void *buffer, size_t size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	uint8_t *frame = buffer + 3;
+	uint8_t *header = buffer;
+
+	WREG32(HDMI0_AVI_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AVI_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+	WREG32(HDMI0_AVI_INFO2 + offset,
+		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+	WREG32(HDMI0_AVI_INFO3 + offset,
+		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
+					     const void *buffer, size_t size)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	const u8 *frame = buffer + 3;
+
+	WREG32(HDMI0_AUDIO_INFO0 + offset,
+		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+	WREG32(HDMI0_AUDIO_INFO1 + offset,
+		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+
+	return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	int status, result;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return 0;
+
+	status = r600_hdmi_is_audio_buffer_filled(encoder);
+	result = dig->afmt->last_buffer_filled_status != status;
+	dig->afmt->last_buffer_filled_status = status;
+
+	return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	uint32_t offset = dig->afmt->offset;
+	bool hdmi_audio_workaround = false; /* FIXME */
+	u32 value;
+
+	if (!hdmi_audio_workaround ||
+	    r600_hdmi_is_audio_buffer_filled(encoder))
+		value = 0; /* disable workaround */
+	else
+		value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+	WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		 value, ~HDMI0_AUDIO_TEST_EN);
+}
+
+void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 base_rate = 24000;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
+	 * doesn't matter which one you use.  Just use the first one.
+	 */
+	/* XXX two dtos; generally use dto0 for hdmi */
+	/* Express [24MHz / target pixel clock] as an exact rational
+	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+	 */
+	if (ASIC_IS_DCE32(rdev)) {
+		if (dig->dig_encoder == 0) {
+			WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		} else {
+			WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+		}
+	} else {
+		/* according to the reg specs, this should DCE3.2 only, but in
+		 * practice it seems to cover DCE2.0/3.0/3.1 as well.
+		 */
+		if (dig->dig_encoder == 0) {
+			WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+		} else {
+			WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
+			WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
+			WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+		}
+	}
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+	struct hdmi_avi_infoframe frame;
+	uint32_t offset;
+	ssize_t err;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (!dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	r600_audio_set_dto(encoder, mode->clock);
+
+	WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+	       HDMI0_NULL_SEND); /* send null packets when required */
+
+	WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+	if (ASIC_IS_DCE32(rdev)) {
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+		       HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+		WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+		       AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+		       AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+	} else {
+		WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+		       HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+		       HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+		       HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+		       HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+	}
+
+	WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+	       HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+	       HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
+
+	WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+	       HDMI0_NULL_SEND | /* send null packets when required */
+	       HDMI0_GC_SEND | /* send general control packets */
+	       HDMI0_GC_CONT); /* send general control packets every frame */
+
+	/* TODO: HDMI0_AUDIO_INFO_UPDATE */
+	WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+	       HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+	       HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+	       HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+	       HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
+
+	WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+	       HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+	       HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+	WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
+	r600_hdmi_update_ACR(encoder, mode->clock);
+
+	/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+	WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+	WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+	WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+	WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+	r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct r600_audio audio = r600_audio_status(rdev);
+	uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
+	struct hdmi_audio_infoframe frame;
+	uint32_t offset;
+	uint32_t iec;
+	ssize_t err;
+
+	if (!dig->afmt || !dig->afmt->enabled)
+		return;
+	offset = dig->afmt->offset;
+
+	DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+		 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+		  audio.channels, audio.rate, audio.bits_per_sample);
+	DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+		  (int)audio.status_bits, (int)audio.category_code);
+
+	iec = 0;
+	if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
+		iec |= 1 << 0;
+	if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
+		iec |= 1 << 1;
+	if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
+		iec |= 1 << 2;
+	if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
+		iec |= 1 << 3;
+
+	iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+	switch (audio.rate) {
+	case 32000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+		break;
+	case 44100:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+		break;
+	case 48000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+		break;
+	case 88200:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+		break;
+	case 96000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+		break;
+	case 176400:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+		break;
+	case 192000:
+		iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+		break;
+	}
+
+	WREG32(HDMI0_60958_0 + offset, iec);
+
+	iec = 0;
+	switch (audio.bits_per_sample) {
+	case 16:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+		break;
+	case 20:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+		break;
+	case 24:
+		iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+		break;
+	}
+	if (audio.status_bits & AUDIO_STATUS_V)
+		iec |= 0x5 << 16;
+	WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
+
+	err = hdmi_audio_infoframe_init(&frame);
+	if (err < 0) {
+		DRM_ERROR("failed to setup audio infoframe\n");
+		return;
+	}
+
+	frame.channels = audio.channels;
+
+	err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		DRM_ERROR("failed to pack audio infoframe\n");
+		return;
+	}
+
+	r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
+	r600_hdmi_audio_workaround(encoder);
+}
+
+/*
+ * enable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	u32 hdmi = HDMI0_ERROR_ACK;
+
+	if (!dig || !dig->afmt)
+		return;
+
+	/* Silent, r600_hdmi_enable will raise WARN for us */
+	if (enable && dig->afmt->enabled)
+		return;
+	if (!enable && !dig->afmt->enabled)
+		return;
+
+	/* Older chipsets require setting HDMI and routing manually */
+	if (!ASIC_IS_DCE3(rdev)) {
+		if (enable)
+			hdmi |= HDMI0_ENABLE;
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+			if (enable) {
+				WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
+			} else {
+				WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+			if (enable) {
+				WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+			} else {
+				WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			if (enable) {
+				WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+			} else {
+				WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
+			}
+			break;
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+			if (enable)
+				hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+			break;
+		default:
+			dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+				radeon_encoder->encoder_id);
+			break;
+		}
+		WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
+	}
+
+	if (rdev->irq.installed) {
+		/* if irq is available use it */
+		/* XXX: shouldn't need this on any asics.  Double check DCE2/3 */
+		if (enable)
+			radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
+		else
+			radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+	}
+
+	dig->afmt->enabled = enable;
+
+	DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+		  enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600_reg.h b/linux-imx/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 0000000..909219b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __R600_REG_H__
+#define __R600_REG_H__
+
+#define R600_PCIE_PORT_INDEX                0x0038
+#define R600_PCIE_PORT_DATA                 0x003c
+
+#define R600_MC_VM_FB_LOCATION			0x2180
+#define		R600_MC_FB_BASE_MASK			0x0000FFFF
+#define		R600_MC_FB_BASE_SHIFT			0
+#define		R600_MC_FB_TOP_MASK			0xFFFF0000
+#define		R600_MC_FB_TOP_SHIFT			16
+#define R600_MC_VM_AGP_TOP			0x2184
+#define		R600_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R600_MC_AGP_TOP_SHIFT			0
+#define R600_MC_VM_AGP_BOT			0x2188
+#define		R600_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R600_MC_AGP_BOT_SHIFT			0
+#define R600_MC_VM_AGP_BASE			0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2190
+#define		R600_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R600_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x2198
+
+#define R700_MC_VM_FB_LOCATION			0x2024
+#define		R700_MC_FB_BASE_MASK			0x0000FFFF
+#define		R700_MC_FB_BASE_SHIFT			0
+#define		R700_MC_FB_TOP_MASK			0xFFFF0000
+#define		R700_MC_FB_TOP_SHIFT			16
+#define R700_MC_VM_AGP_TOP			0x2028
+#define		R700_MC_AGP_TOP_MASK			0x0003FFFF
+#define		R700_MC_AGP_TOP_SHIFT			0
+#define R700_MC_VM_AGP_BOT			0x202c
+#define		R700_MC_AGP_BOT_MASK			0x0003FFFF
+#define		R700_MC_AGP_BOT_SHIFT			0
+#define R700_MC_VM_AGP_BASE			0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR	0x2034
+#define		R700_LOGICAL_PAGE_NUMBER_MASK		0x000FFFFF
+#define		R700_LOGICAL_PAGE_NUMBER_SHIFT		0
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR	0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR	0x203c
+
+#define R600_RAMCFG				       0x2408
+#       define R600_CHANSIZE                           (1 << 7)
+#       define R600_CHANSIZE_OVERRIDE                  (1 << 10)
+
+
+#define R600_GENERAL_PWRMGT                                        0x618
+#	define R600_OPEN_DRAIN_PADS				   (1 << 11)
+
+#define R600_LOWER_GPIO_ENABLE                                     0x710
+#define R600_CTXSW_VID_LOWER_GPIO_CNTL                             0x718
+#define R600_HIGH_VID_LOWER_GPIO_CNTL                              0x71c
+#define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
+#define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
+
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
+
+#define R600_HDP_NONSURFACE_BASE                                0x2c04
+
+#define R600_BUS_CNTL                                           0x5420
+#       define R600_BIOS_ROM_DIS                                (1 << 1)
+#define R600_CONFIG_CNTL                                        0x5424
+#define R600_CONFIG_MEMSIZE                                     0x5428
+#define R600_CONFIG_F0_BASE                                     0x542C
+#define R600_CONFIG_APER_SIZE                                   0x5430
+
+#define	R600_BIF_FB_EN						0x5490
+#define		R600_FB_READ_EN					(1 << 0)
+#define		R600_FB_WRITE_EN				(1 << 1)
+
+#define R600_CITF_CNTL           				0x200c
+#define		R600_BLACKOUT_MASK				0x00000003
+
+#define R700_MC_CITF_CNTL           				0x25c0
+
+#define R600_ROM_CNTL                              0x1600
+#       define R600_SCK_OVERWRITE                  (1 << 1)
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
+#       define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK  (0xf << 28)
+
+#define R600_CG_SPLL_FUNC_CNTL                     0x600
+#       define R600_SPLL_BYPASS_EN                 (1 << 3)
+#define R600_CG_SPLL_STATUS                        0x60c
+#       define R600_SPLL_CHG_STATUS                (1 << 1)
+
+#define R600_BIOS_0_SCRATCH               0x1724
+#define R600_BIOS_1_SCRATCH               0x1728
+#define R600_BIOS_2_SCRATCH               0x172c
+#define R600_BIOS_3_SCRATCH               0x1730
+#define R600_BIOS_4_SCRATCH               0x1734
+#define R600_BIOS_5_SCRATCH               0x1738
+#define R600_BIOS_6_SCRATCH               0x173c
+#define R600_BIOS_7_SCRATCH               0x1740
+
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL               0x0514
+#define R600_AUDIO_PLL1_DIV               0x0518
+#define R600_AUDIO_PLL2_MUL               0x0524
+#define R600_AUDIO_PLL2_DIV               0x0528
+#define R600_AUDIO_CLK_SRCSEL             0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE                 0x7300
+#define R600_AUDIO_TIMING                 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID              0x7380
+#define R600_AUDIO_REVISION_ID            0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT        0x7388
+#define R600_AUDIO_NID1_NODE_COUNT        0x738c
+#define R600_AUDIO_NID1_TYPE              0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE    0x7394
+#define R600_AUDIO_SUPPORTED_CODEC        0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS              0x73a0
+#define R600_AUDIO_NID3_CAPS              0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS          0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN          0x73ac
+#define R600_AUDIO_CONN_LIST              0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL       0x73c0
+#define R600_AUDIO_PLAYING                0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID      0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT         0x73cc
+#define R600_AUDIO_PIN_SENSE              0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL        0x73d4
+#define R600_AUDIO_STATUS_BITS            0x73d8
+
+#define DCE2_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1		(0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0		(0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1		(0x7800 - 0x7400)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/r600d.h b/linux-imx/drivers/gpu/drm/radeon/r600d.h
new file mode 100644
index 0000000..eb28716
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/r600d.h
@@ -0,0 +1,2003 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef R600D_H
+#define R600D_H
+
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define R6XX_MAX_SH_GPRS			256
+#define R6XX_MAX_TEMP_GPRS			16
+#define R6XX_MAX_SH_THREADS			256
+#define R6XX_MAX_SH_STACK_ENTRIES		4096
+#define R6XX_MAX_BACKENDS			8
+#define R6XX_MAX_BACKENDS_MASK			0xff
+#define R6XX_MAX_SIMDS				8
+#define R6XX_MAX_SIMDS_MASK			0xff
+#define R6XX_MAX_PIPES				8
+#define R6XX_MAX_PIPES_MASK			0xff
+
+/* PTE flags */
+#define PTE_VALID				(1 << 0)
+#define PTE_SYSTEM				(1 << 1)
+#define PTE_SNOOPED				(1 << 2)
+#define PTE_READABLE				(1 << 5)
+#define PTE_WRITEABLE				(1 << 6)
+
+/* tiling bits */
+#define     ARRAY_LINEAR_GENERAL              0x00000000
+#define     ARRAY_LINEAR_ALIGNED              0x00000001
+#define     ARRAY_1D_TILED_THIN1              0x00000002
+#define     ARRAY_2D_TILED_THIN1              0x00000004
+
+/* Registers */
+#define	ARB_POP						0x2418
+#define 	ENABLE_TC128					(1 << 30)
+#define	ARB_GDEC_RD_CNTL				0x246C
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+
+#define R_028808_CB_COLOR_CONTROL			0x28808
+#define   S_028808_SPECIAL_OP(x)                       (((x) & 0x7) << 4)
+#define   G_028808_SPECIAL_OP(x)                       (((x) >> 4) & 0x7)
+#define   C_028808_SPECIAL_OP                          0xFFFFFF8F
+#define     V_028808_SPECIAL_NORMAL                     0x00
+#define     V_028808_SPECIAL_DISABLE                    0x01
+#define     V_028808_SPECIAL_RESOLVE_BOX                0x07
+
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define CB_COLOR0_SIZE                                  0x28060
+#define CB_COLOR0_VIEW                                  0x28080
+#define R_028080_CB_COLOR0_VIEW                      0x028080
+#define   S_028080_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028080_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028080_SLICE_START                         0xFFFFF800
+#define   S_028080_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028080_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028080_SLICE_MAX                           0xFF001FFF
+#define R_028084_CB_COLOR1_VIEW                      0x028084
+#define R_028088_CB_COLOR2_VIEW                      0x028088
+#define R_02808C_CB_COLOR3_VIEW                      0x02808C
+#define R_028090_CB_COLOR4_VIEW                      0x028090
+#define R_028094_CB_COLOR5_VIEW                      0x028094
+#define R_028098_CB_COLOR6_VIEW                      0x028098
+#define R_02809C_CB_COLOR7_VIEW                      0x02809C
+#define R_028100_CB_COLOR0_MASK                      0x028100
+#define   S_028100_CMASK_BLOCK_MAX(x)                  (((x) & 0xFFF) << 0)
+#define   G_028100_CMASK_BLOCK_MAX(x)                  (((x) >> 0) & 0xFFF)
+#define   C_028100_CMASK_BLOCK_MAX                     0xFFFFF000
+#define   S_028100_FMASK_TILE_MAX(x)                   (((x) & 0xFFFFF) << 12)
+#define   G_028100_FMASK_TILE_MAX(x)                   (((x) >> 12) & 0xFFFFF)
+#define   C_028100_FMASK_TILE_MAX                      0x00000FFF
+#define R_028104_CB_COLOR1_MASK                      0x028104
+#define R_028108_CB_COLOR2_MASK                      0x028108
+#define R_02810C_CB_COLOR3_MASK                      0x02810C
+#define R_028110_CB_COLOR4_MASK                      0x028110
+#define R_028114_CB_COLOR5_MASK                      0x028114
+#define R_028118_CB_COLOR6_MASK                      0x028118
+#define R_02811C_CB_COLOR7_MASK                      0x02811C
+#define CB_COLOR0_INFO                                  0x280a0
+#	define CB_FORMAT(x)				((x) << 2)
+#       define CB_ARRAY_MODE(x)                         ((x) << 8)
+#	define CB_SOURCE_FORMAT(x)			((x) << 27)
+#	define CB_SF_EXPORT_FULL			0
+#	define CB_SF_EXPORT_NORM			1
+#define CB_COLOR0_TILE                                  0x280c0
+#define CB_COLOR0_FRAG                                  0x280e0
+#define CB_COLOR0_MASK                                  0x28100
+
+#define SQ_ALU_CONST_CACHE_PS_0				0x28940
+#define SQ_ALU_CONST_CACHE_PS_1				0x28944
+#define SQ_ALU_CONST_CACHE_PS_2				0x28948
+#define SQ_ALU_CONST_CACHE_PS_3				0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4				0x28950
+#define SQ_ALU_CONST_CACHE_PS_5				0x28954
+#define SQ_ALU_CONST_CACHE_PS_6				0x28958
+#define SQ_ALU_CONST_CACHE_PS_7				0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8				0x28960
+#define SQ_ALU_CONST_CACHE_PS_9				0x28964
+#define SQ_ALU_CONST_CACHE_PS_10			0x28968
+#define SQ_ALU_CONST_CACHE_PS_11			0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12			0x28970
+#define SQ_ALU_CONST_CACHE_PS_13			0x28974
+#define SQ_ALU_CONST_CACHE_PS_14			0x28978
+#define SQ_ALU_CONST_CACHE_PS_15			0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0				0x28980
+#define SQ_ALU_CONST_CACHE_VS_1				0x28984
+#define SQ_ALU_CONST_CACHE_VS_2				0x28988
+#define SQ_ALU_CONST_CACHE_VS_3				0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4				0x28990
+#define SQ_ALU_CONST_CACHE_VS_5				0x28994
+#define SQ_ALU_CONST_CACHE_VS_6				0x28998
+#define SQ_ALU_CONST_CACHE_VS_7				0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8				0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9				0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10			0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11			0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12			0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13			0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14			0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15			0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0				0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1				0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2				0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3				0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4				0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5				0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6				0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7				0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8				0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9				0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10			0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11			0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12			0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13			0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14			0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15			0x289fc
+
+#define	CONFIG_MEMSIZE					0x5428
+#define CONFIG_CNTL					0x5424
+#define	CP_STALLED_STAT1			0x8674
+#define	CP_STALLED_STAT2			0x8678
+#define	CP_BUSY_STAT				0x867C
+#define	CP_STAT						0x8680
+#define	CP_COHER_BASE					0x85F8
+#define	CP_DEBUG					0xC1FC
+#define	R_0086D8_CP_ME_CNTL			0x86D8
+#define		S_0086D8_CP_PFP_HALT(x)			(((x) & 1)<<26)
+#define		C_0086D8_CP_PFP_HALT(x)			((x) & 0xFBFFFFFF)
+#define		S_0086D8_CP_ME_HALT(x)			(((x) & 1)<<28)
+#define		C_0086D8_CP_ME_HALT(x)			((x) & 0xEFFFFFFF)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ_END(x)					((x) << 16)
+#define		ROQ_END(x)					((x) << 24)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_BASE					0xC100
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_ROQ_IB1_STAT					0x8784
+#define	CP_ROQ_IB2_STAT					0x8788
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG					0x9830
+#define		PREZ_MUST_WAIT_FOR_POSTZ_DONE			(1 << 31)
+#define	DB_DEPTH_BASE					0x2800C
+#define	DB_HTILE_DATA_BASE				0x28014
+#define	DB_HTILE_SURFACE				0x28D24
+#define   S_028D24_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
+#define   G_028D24_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
+#define   C_028D24_HTILE_WIDTH                         0xFFFFFFFE
+#define   S_028D24_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
+#define   G_028D24_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
+#define   C_028D24_HTILE_HEIGHT                         0xFFFFFFFD
+#define   G_028D24_LINEAR(x)                           (((x) >> 2) & 0x1)
+#define	DB_WATERMARKS					0x9838
+#define		DEPTH_FREE(x)					((x) << 0)
+#define		DEPTH_FLUSH(x)					((x) << 5)
+#define		DEPTH_PENDING_FREE(x)				((x) << 15)
+#define		DEPTH_CACHELINE_FREE(x)				((x) << 20)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define SQ_CONFIG                                         0x8c00
+#       define VC_ENABLE                                  (1 << 0)
+#       define EXPORT_SRC_C                               (1 << 1)
+#       define DX9_CONSTS                                 (1 << 2)
+#       define ALU_INST_PREFER_VECTOR                     (1 << 3)
+#       define DX10_CLAMP                                 (1 << 4)
+#       define CLAUSE_SEQ_PRIO(x)                         ((x) << 8)
+#       define PS_PRIO(x)                                 ((x) << 24)
+#       define VS_PRIO(x)                                 ((x) << 26)
+#       define GS_PRIO(x)                                 ((x) << 28)
+#       define ES_PRIO(x)                                 ((x) << 30)
+#define SQ_GPR_RESOURCE_MGMT_1                            0x8c04
+#       define NUM_PS_GPRS(x)                             ((x) << 0)
+#       define NUM_VS_GPRS(x)                             ((x) << 16)
+#       define NUM_CLAUSE_TEMP_GPRS(x)                    ((x) << 28)
+#define SQ_GPR_RESOURCE_MGMT_2                            0x8c08
+#       define NUM_GS_GPRS(x)                             ((x) << 0)
+#       define NUM_ES_GPRS(x)                             ((x) << 16)
+#define SQ_THREAD_RESOURCE_MGMT                           0x8c0c
+#       define NUM_PS_THREADS(x)                          ((x) << 0)
+#       define NUM_VS_THREADS(x)                          ((x) << 8)
+#       define NUM_GS_THREADS(x)                          ((x) << 16)
+#       define NUM_ES_THREADS(x)                          ((x) << 24)
+#define SQ_STACK_RESOURCE_MGMT_1                          0x8c10
+#       define NUM_PS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_VS_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_STACK_RESOURCE_MGMT_2                          0x8c14
+#       define NUM_GS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define NUM_ES_STACK_ENTRIES(x)                    ((x) << 16)
+#define SQ_ESGS_RING_BASE                               0x8c40
+#define SQ_GSVS_RING_BASE                               0x8c48
+#define SQ_ESTMP_RING_BASE                              0x8c50
+#define SQ_GSTMP_RING_BASE                              0x8c58
+#define SQ_VSTMP_RING_BASE                              0x8c60
+#define SQ_PSTMP_RING_BASE                              0x8c68
+#define SQ_FBUF_RING_BASE                               0x8c70
+#define SQ_REDUC_RING_BASE                              0x8c78
+
+#define GRBM_CNTL                                       0x8000
+#       define GRBM_READ_TIMEOUT(x)                     ((x) << 0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000001F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+
+#define	CG_THERMAL_STATUS				0x7F4
+#define		ASIC_T(x)			        ((x) << 0)
+#define		ASIC_T_MASK			        0x1FF
+#define		ASIC_T_SHIFT			        0
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_VM_AGP_TOP					0x2184
+#define MC_VM_AGP_BOT					0x2188
+#define	MC_VM_AGP_BASE					0x218C
+#define MC_VM_FB_LOCATION				0x2180
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL			0x219C
+#define 	ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L1_STRICT_ORDERING			(1 << 2)
+#define		SYSTEM_ACCESS_MODE_MASK				0x000000C0
+#define		SYSTEM_ACCESS_MODE_SHIFT			6
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 6)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 6)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 6)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 6)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 8)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE	(1 << 8)
+#define		ENABLE_SEMAPHORE_MODE				(1 << 10)
+#define		ENABLE_WAIT_L2_QUERY				(1 << 11)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			(((x) & 7) << 12)
+#define		EFFECTIVE_L1_TLB_SIZE_MASK			0x00007000
+#define		EFFECTIVE_L1_TLB_SIZE_SHIFT			12
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		EFFECTIVE_L1_QUEUE_SIZE_MASK			0x00038000
+#define		EFFECTIVE_L1_QUEUE_SIZE_SHIFT			15
+#define MC_VM_L1_TLB_MCD_RD_B_CNTL			0x21A0
+#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL			0x21FC
+#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL			0x2204
+#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL			0x2208
+#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL			0x220C
+#define	MC_VM_L1_TLB_MCB_RD_SYS_CNTL			0x2200
+#define MC_VM_L1_TLB_MCD_WR_A_CNTL			0x21A4
+#define MC_VM_L1_TLB_MCD_WR_B_CNTL			0x21A8
+#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL			0x2210
+#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL			0x2218
+#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL			0x221C
+#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL			0x2220
+#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL			0x2214
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2190
+#define		LOGICAL_PAGE_NUMBER_MASK			0x000FFFFF
+#define		LOGICAL_PAGE_NUMBER_SHIFT			0
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2194
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x2198
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define	PA_SC_AA_SAMPLE_LOCS_2S				0x8B40
+#define	PA_SC_AA_SAMPLE_LOCS_4S				0x8B44
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD0			0x8B48
+#define	PA_SC_AA_SAMPLE_LOCS_8S_WD1			0x8B4C
+#define		S0_X(x)						((x) << 0)
+#define		S0_Y(x)						((x) << 4)
+#define		S1_X(x)						((x) << 8)
+#define		S1_Y(x)						((x) << 12)
+#define		S2_X(x)						((x) << 16)
+#define		S2_Y(x)						((x) << 20)
+#define		S3_X(x)						((x) << 24)
+#define		S3_Y(x)						((x) << 28)
+#define		S4_X(x)						((x) << 0)
+#define		S4_Y(x)						((x) << 4)
+#define		S5_X(x)						((x) << 8)
+#define		S5_Y(x)						((x) << 12)
+#define		S6_X(x)						((x) << 16)
+#define		S6_Y(x)						((x) << 20)
+#define		S7_X(x)						((x) << 24)
+#define		S7_Y(x)						((x) << 28)
+#define PA_SC_CLIPRECT_RULE				0x2820c
+#define	PA_SC_ENHANCE					0x8BF0
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_TILE_CNT(x)			((x) << 12)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+
+#define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+#define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+#define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+
+#define	PCIE_PORT_INDEX					0x0038
+#define	PCIE_PORT_DATA					0x003C
+
+#define CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+
+#define RAMCFG						0x2408
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000001
+#define		NOOFRANK_SHIFT					1
+#define		NOOFRANK_MASK					0x00000002
+#define		NOOFROWS_SHIFT					2
+#define		NOOFROWS_MASK					0x0000001C
+#define		NOOFCOLS_SHIFT					5
+#define		NOOFCOLS_MASK					0x00000060
+#define		CHANSIZE_SHIFT					7
+#define		CHANSIZE_MASK					0x00000080
+#define		BURSTLENGTH_SHIFT				8
+#define		BURSTLENGTH_MASK				0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 10)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+#define	SPI_PS_IN_CONTROL_1				0x286D0
+#define		GEN_INDEX_PIX					(1<<0)
+#define		GEN_INDEX_PIX_ADDR(x)				((x)<<1)
+#define		FRONT_FACE_ENA					(1<<8)
+#define		FRONT_FACE_CHAN(x)				((x)<<9)
+#define		FRONT_FACE_ALL_BITS				(1<<11)
+#define		FRONT_FACE_ADDR(x)				((x)<<12)
+#define		FOG_ADDR(x)					((x)<<17)
+#define		FIXED_PT_POSITION_ENA				(1<<24)
+#define		FIXED_PT_POSITION_ADDR(x)			((x)<<25)
+
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_PGM_START_ES					0x28880
+#define	SQ_PGM_START_FS					0x28894
+#define	SQ_PGM_START_GS					0x2886C
+#define	SQ_PGM_START_PS					0x28840
+#define SQ_PGM_RESOURCES_PS                             0x28850
+#define SQ_PGM_EXPORTS_PS                               0x28854
+#define SQ_PGM_CF_OFFSET_PS                             0x288cc
+#define	SQ_PGM_START_VS					0x28858
+#define SQ_PGM_RESOURCES_VS                             0x28868
+#define SQ_PGM_CF_OFFSET_VS                             0x288d0
+
+#define SQ_VTX_CONSTANT_WORD0_0				0x30000
+#define SQ_VTX_CONSTANT_WORD1_0				0x30004
+#define SQ_VTX_CONSTANT_WORD2_0				0x30008
+#	define SQ_VTXC_BASE_ADDR_HI(x)			((x) << 0)
+#	define SQ_VTXC_STRIDE(x)			((x) << 8)
+#	define SQ_VTXC_ENDIAN_SWAP(x)			((x) << 30)
+#	define SQ_ENDIAN_NONE				0
+#	define SQ_ENDIAN_8IN16				1
+#	define SQ_ENDIAN_8IN32				2
+#define SQ_VTX_CONSTANT_WORD3_0				0x3000c
+#define	SQ_VTX_CONSTANT_WORD6_0				0x38018
+#define		S__SQ_VTX_CONSTANT_TYPE(x)			(((x) & 3) << 30)
+#define		G__SQ_VTX_CONSTANT_TYPE(x)			(((x) >> 30) & 3)
+#define			SQ_TEX_VTX_INVALID_TEXTURE			0x0
+#define			SQ_TEX_VTX_INVALID_BUFFER			0x1
+#define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+#define			SQ_TEX_VTX_VALID_BUFFER				0x3
+
+
+#define	SX_MISC						0x28350
+#define	SX_MEMORY_EXPORT_BASE				0x9010
+#define	SX_DEBUG_1					0x9054
+#define		SMX_EVENT_RELEASE				(1 << 0)
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1 << 31)
+
+#define	TC_CNTL						0x9608
+#define		TC_L2_SIZE(x)					((x)<<5)
+#define		L2_DISABLE_LATE_HIT				(1<<9)
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define	VGT_DMA_BASE					0x287E8
+#define	VGT_DMA_BASE_HI					0x287E4
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define VGT_PRIMITIVE_TYPE                              0x8958
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_BASE_OFFSET_0			0x28B10
+#define	VGT_STRMOUT_BASE_OFFSET_1			0x28B14
+#define	VGT_STRMOUT_BASE_OFFSET_2			0x28B18
+#define	VGT_STRMOUT_BASE_OFFSET_3			0x28B1c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_0			0x28B44
+#define	VGT_STRMOUT_BASE_OFFSET_HI_1			0x28B48
+#define	VGT_STRMOUT_BASE_OFFSET_HI_2			0x28B4c
+#define	VGT_STRMOUT_BASE_OFFSET_HI_3			0x28B50
+#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
+#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
+#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
+#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
+#define	VGT_STRMOUT_BUFFER_OFFSET_0			0x28ADC
+#define	VGT_STRMOUT_BUFFER_OFFSET_1			0x28AEC
+#define	VGT_STRMOUT_BUFFER_OFFSET_2			0x28AFC
+#define	VGT_STRMOUT_BUFFER_OFFSET_3			0x28B0C
+#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
+#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
+#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
+#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define VM_CONTEXT0_INVALIDATION_LOW_ADDR		0x1490
+#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR		0x14B0
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x1574
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x1594
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x15B4
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1554
+#define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+#define		REQUEST_TYPE(x)					(((x) & 0xf) << 0)
+#define		RESPONSE_TYPE_MASK				0x000000F0
+#define		RESPONSE_TYPE_SHIFT				4
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 13)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT_0(x)				(((x) & 0x1f) << 0)
+#define		BANK_SELECT_1(x)				(((x) & 0x1f) << 5)
+#define		L2_CACHE_UPDATE_MODE(x)				(((x) & 3) << 10)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+#define         WAIT_2D_IDLE_bit                                (1 << 14)
+#define         WAIT_3D_IDLE_bit                                (1 << 15)
+#define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
+#define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
+
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd /* 7xx only */
+#define	DMA_PACKET_NOP					  0xf
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define RLC_CNTL                                          0x3f00
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB				  0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB				  0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT			  0x3f40
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+/* new for TN */
+#define TN_RLC_SAVE_AND_RESTORE_BASE                      0x3f10
+#define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
+
+#define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_DMA                             (1 << 12)
+#       define SOFT_RESET_RLC                             (1 << 13)
+#       define SOFT_RESET_UVD                             (1 << 18)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
+
+#define CP_INT_CNTL                                       0xc124
+#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
+#       define SCRATCH_INT_ENABLE                         (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
+#       define IB2_INT_ENABLE                             (1 << 29)
+#       define IB1_INT_ENABLE                             (1 << 30)
+#       define RB_INT_ENABLE                              (1 << 31)
+#define CP_INT_STATUS                                     0xc128
+#       define SCRATCH_INT_STAT                           (1 << 25)
+#       define TIME_STAMP_INT_STAT                        (1 << 26)
+#       define IB2_INT_STAT                               (1 << 29)
+#       define IB1_INT_STAT                               (1 << 30)
+#       define RB_INT_STAT                                (1 << 31)
+
+#define GRBM_INT_CNTL                                     0x8060
+#       define RDERR_INT_ENABLE                           (1 << 0)
+#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
+#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define D1MODE_VBLANK_STATUS                              0x6534
+#define D2MODE_VBLANK_STATUS                              0x6d34
+#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
+#       define DxMODE_VBLANK_ACK                          (1 << 4)
+#       define DxMODE_VBLANK_STAT                         (1 << 12)
+#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
+#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
+#define D1MODE_VLINE_STATUS                               0x653c
+#define D2MODE_VLINE_STATUS                               0x6d3c
+#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
+#       define DxMODE_VLINE_ACK                           (1 << 4)
+#       define DxMODE_VLINE_STAT                          (1 << 12)
+#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
+#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
+#define DxMODE_INT_MASK                                   0x6540
+#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
+#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
+#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
+#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
+#       define DC_HPD1_INTERRUPT                          (1 << 18)
+#       define DC_HPD2_INTERRUPT                          (1 << 19)
+#define DISP_INTERRUPT_STATUS                             0x7edc
+#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
+#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
+#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
+#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
+#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
+#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
+#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
+#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
+#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
+#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
+#       define DC_HPD4_INTERRUPT                          (1 << 14)
+#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
+#       define DC_HPD3_INTERRUPT                          (1 << 28)
+#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
+#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
+#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
+#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
+#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
+#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
+#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
+#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
+#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
+#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
+#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
+#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
+#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
+#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
+#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
+#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
+#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
+/* DCE 3.2 */
+#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
+#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
+#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
+#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
+#       define DC_HPD5_INTERRUPT                          (1 << 19)
+#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
+#       define DC_HPD6_INTERRUPT                          (1 << 21)
+#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL                          0x7828
+#define DACB_AUTO_DETECT_CONTROL                          0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
+#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
+#       define DACx_AUTODETECT_MODE_NONE                  0
+#       define DACx_AUTODETECT_MODE_CONNECT               1
+#       define DACx_AUTODETECT_MODE_DISCONNECT            2
+#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
+#define DACA_AUTODETECT_INT_CONTROL                       0x7838
+#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
+#       define DACx_AUTODETECT_ACK                        (1 << 0)
+#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
+#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
+#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS                                0x7d00
+#define DC_HPD2_INT_STATUS                                0x7d0c
+#define DC_HPD3_INT_STATUS                                0x7d18
+#define DC_HPD4_INT_STATUS                                0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS                                0x7dc0
+#define DC_HPD6_INT_STATUS                                0x7df4
+#       define DC_HPDx_INT_STATUS                         (1 << 0)
+#       define DC_HPDx_SENSE                              (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
+#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
+#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL                               0x7d04
+#define DC_HPD2_INT_CONTROL                               0x7d10
+#define DC_HPD3_INT_CONTROL                               0x7d1c
+#define DC_HPD4_INT_CONTROL                               0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL                               0x7dc4
+#define DC_HPD6_INT_CONTROL                               0x7df8
+#       define DC_HPDx_INT_ACK                            (1 << 0)
+#       define DC_HPDx_INT_POLARITY                       (1 << 8)
+#       define DC_HPDx_INT_EN                             (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
+#       define DC_HPDx_RX_INT_EN                          (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL                                   0x7d08
+#define DC_HPD2_CONTROL                                   0x7d14
+#define DC_HPD3_CONTROL                                   0x7d20
+#define DC_HPD4_CONTROL                                   0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL                                   0x7dc8
+#define DC_HPD6_CONTROL                                   0x7dfc
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+/* DCE 3.2 */
+#       define DC_HPDx_EN                                 (1 << 28)
+
+#define D1GRPH_INTERRUPT_STATUS                           0x6158
+#define D2GRPH_INTERRUPT_STATUS                           0x6958
+#       define DxGRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define DxGRPH_PFLIP_INT_CLEAR                     (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL                          0x615c
+#define D2GRPH_INTERRUPT_CONTROL                          0x695c
+#       define DxGRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define DxGRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#       define LC_POINT_7_PLUS_EN                         (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/* Audio clocks DCE 2.0/3.0 */
+#define AUDIO_DTO                         0x7340
+#       define AUDIO_DTO_PHASE(x)         (((x) & 0xffff) << 0)
+#       define AUDIO_DTO_MODULE(x)        (((x) & 0xffff) << 16)
+
+/* Audio clocks DCE 3.2 */
+#define DCCG_AUDIO_DTO0_PHASE             0x0514
+#define DCCG_AUDIO_DTO0_MODULE            0x0518
+#define DCCG_AUDIO_DTO0_LOAD              0x051c
+#       define DTO_LOAD                   (1 << 31)
+#define DCCG_AUDIO_DTO0_CNTL              0x0520
+
+#define DCCG_AUDIO_DTO1_PHASE             0x0524
+#define DCCG_AUDIO_DTO1_MODULE            0x0528
+#define DCCG_AUDIO_DTO1_LOAD              0x052c
+#define DCCG_AUDIO_DTO1_CNTL              0x0530
+
+#define DCCG_AUDIO_DTO_SELECT             0x0534
+
+/* digital blocks */
+#define TMDSA_CNTL                       0x7880
+#       define TMDSA_HDMI_EN             (1 << 2)
+#define LVTMA_CNTL                       0x7a80
+#       define LVTMA_HDMI_EN             (1 << 2)
+#define DDIA_CNTL                        0x7200
+#       define DDIA_HDMI_EN              (1 << 2)
+#define DIG0_CNTL                        0x75a0
+#       define DIG_MODE(x)               (((x) & 7) << 8)
+#       define DIG_MODE_DP               0
+#       define DIG_MODE_LVDS             1
+#       define DIG_MODE_TMDS_DVI         2
+#       define DIG_MODE_TMDS_HDMI        3
+#       define DIG_MODE_SDVO             4
+#define DIG1_CNTL                        0x79a0
+
+/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
+ * instance of the blocks while r6xx has 2.  DCE 3.0 cards are slightly
+ * different due to the new DIG blocks, but also have 2 instances.
+ * DCE 3.0 HDMI blocks are part of each DIG encoder.
+ */
+
+/* rs6xx/rs740/r6xx/dce3 */
+#define HDMI0_CONTROL                0x7400
+/* rs6xx/rs740/r6xx */
+#       define HDMI0_ENABLE          (1 << 0)
+#       define HDMI0_STREAM(x)       (((x) & 3) << 2)
+#       define HDMI0_STREAM_TMDSA    0
+#       define HDMI0_STREAM_LVTMA    1
+#       define HDMI0_STREAM_DVOA     2
+#       define HDMI0_STREAM_DDIA     3
+/* rs6xx/r6xx/dce3 */
+#       define HDMI0_ERROR_ACK       (1 << 8)
+#       define HDMI0_ERROR_MASK      (1 << 9)
+#define HDMI0_STATUS                 0x7404
+#       define HDMI0_ACTIVE_AVMUTE   (1 << 0)
+#       define HDMI0_AUDIO_ENABLE    (1 << 4)
+#       define HDMI0_AZ_FORMAT_WTRIG     (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
+#define HDMI0_AUDIO_PACKET_CONTROL   0x7408
+#       define HDMI0_AUDIO_SAMPLE_SEND  (1 << 0)
+#       define HDMI0_AUDIO_DELAY_EN(x)  (((x) & 3) << 4)
+#       define HDMI0_AUDIO_SEND_MAX_PACKETS  (1 << 8)
+#       define HDMI0_AUDIO_TEST_EN         (1 << 12)
+#       define HDMI0_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#       define HDMI0_AUDIO_CHANNEL_SWAP    (1 << 24)
+#       define HDMI0_60958_CS_UPDATE       (1 << 26)
+#       define HDMI0_AZ_FORMAT_WTRIG_MASK  (1 << 28)
+#       define HDMI0_AZ_FORMAT_WTRIG_ACK   (1 << 29)
+#define HDMI0_AUDIO_CRC_CONTROL      0x740c
+#       define HDMI0_AUDIO_CRC_EN    (1 << 0)
+#define HDMI0_VBI_PACKET_CONTROL     0x7410
+#       define HDMI0_NULL_SEND       (1 << 0)
+#       define HDMI0_GC_SEND         (1 << 4)
+#       define HDMI0_GC_CONT         (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI0_INFOFRAME_CONTROL0     0x7414
+#       define HDMI0_AVI_INFO_SEND   (1 << 0)
+#       define HDMI0_AVI_INFO_CONT   (1 << 1)
+#       define HDMI0_AUDIO_INFO_SEND (1 << 4)
+#       define HDMI0_AUDIO_INFO_CONT (1 << 5)
+#       define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
+#       define HDMI0_MPEG_INFO_SEND  (1 << 8)
+#       define HDMI0_MPEG_INFO_CONT  (1 << 9)
+#       define HDMI0_MPEG_INFO_UPDATE  (1 << 10)
+#define HDMI0_INFOFRAME_CONTROL1     0x7418
+#       define HDMI0_AVI_INFO_LINE(x)  (((x) & 0x3f) << 0)
+#       define HDMI0_AUDIO_INFO_LINE(x)  (((x) & 0x3f) << 8)
+#       define HDMI0_MPEG_INFO_LINE(x)  (((x) & 0x3f) << 16)
+#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
+#       define HDMI0_GENERIC0_SEND   (1 << 0)
+#       define HDMI0_GENERIC0_CONT   (1 << 1)
+#       define HDMI0_GENERIC0_UPDATE (1 << 2)
+#       define HDMI0_GENERIC1_SEND   (1 << 4)
+#       define HDMI0_GENERIC1_CONT   (1 << 5)
+#       define HDMI0_GENERIC0_LINE(x)  (((x) & 0x3f) << 16)
+#       define HDMI0_GENERIC1_LINE(x)  (((x) & 0x3f) << 24)
+#define HDMI0_GC                     0x7428
+#       define HDMI0_GC_AVMUTE       (1 << 0)
+#define HDMI0_AVI_INFO0              0x7454
+#       define HDMI0_AVI_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AVI_INFO_S(x)   (((x) & 3) << 8)
+#       define HDMI0_AVI_INFO_B(x)   (((x) & 3) << 10)
+#       define HDMI0_AVI_INFO_A(x)   (((x) & 1) << 12)
+#       define HDMI0_AVI_INFO_Y(x)   (((x) & 3) << 13)
+#       define HDMI0_AVI_INFO_Y_RGB       0
+#       define HDMI0_AVI_INFO_Y_YCBCR422  1
+#       define HDMI0_AVI_INFO_Y_YCBCR444  2
+#       define HDMI0_AVI_INFO_Y_A_B_S(x)   (((x) & 0xff) << 8)
+#       define HDMI0_AVI_INFO_R(x)   (((x) & 0xf) << 16)
+#       define HDMI0_AVI_INFO_M(x)   (((x) & 0x3) << 20)
+#       define HDMI0_AVI_INFO_C(x)   (((x) & 0x3) << 22)
+#       define HDMI0_AVI_INFO_C_M_R(x)   (((x) & 0xff) << 16)
+#       define HDMI0_AVI_INFO_SC(x)  (((x) & 0x3) << 24)
+#       define HDMI0_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define HDMI0_AVI_INFO1              0x7458
+#       define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_PR(x)  (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO2              0x745c
+#       define HDMI0_AVI_INFO_BOTTOM(x)  (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_LEFT(x)    (((x) & 0xffff) << 16)
+#define HDMI0_AVI_INFO3              0x7460
+#       define HDMI0_AVI_INFO_RIGHT(x)    (((x) & 0xffff) << 0)
+#       define HDMI0_AVI_INFO_VERSION(x)  (((x) & 3) << 24)
+#define HDMI0_MPEG_INFO0             0x7464
+#       define HDMI0_MPEG_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MB0(x)  (((x) & 0xff) << 8)
+#       define HDMI0_MPEG_INFO_MB1(x)  (((x) & 0xff) << 16)
+#       define HDMI0_MPEG_INFO_MB2(x)  (((x) & 0xff) << 24)
+#define HDMI0_MPEG_INFO1             0x7468
+#       define HDMI0_MPEG_INFO_MB3(x)  (((x) & 0xff) << 0)
+#       define HDMI0_MPEG_INFO_MF(x)   (((x) & 3) << 8)
+#       define HDMI0_MPEG_INFO_FR(x)   (((x) & 1) << 12)
+#define HDMI0_GENERIC0_HDR           0x746c
+#define HDMI0_GENERIC0_0             0x7470
+#define HDMI0_GENERIC0_1             0x7474
+#define HDMI0_GENERIC0_2             0x7478
+#define HDMI0_GENERIC0_3             0x747c
+#define HDMI0_GENERIC0_4             0x7480
+#define HDMI0_GENERIC0_5             0x7484
+#define HDMI0_GENERIC0_6             0x7488
+#define HDMI0_GENERIC1_HDR           0x748c
+#define HDMI0_GENERIC1_0             0x7490
+#define HDMI0_GENERIC1_1             0x7494
+#define HDMI0_GENERIC1_2             0x7498
+#define HDMI0_GENERIC1_3             0x749c
+#define HDMI0_GENERIC1_4             0x74a0
+#define HDMI0_GENERIC1_5             0x74a4
+#define HDMI0_GENERIC1_6             0x74a8
+#define HDMI0_ACR_32_0               0x74ac
+#       define HDMI0_ACR_CTS_32(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_32_1               0x74b0
+#       define HDMI0_ACR_N_32(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_44_0               0x74b4
+#       define HDMI0_ACR_CTS_44(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_44_1               0x74b8
+#       define HDMI0_ACR_N_44(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_48_0               0x74bc
+#       define HDMI0_ACR_CTS_48(x)   (((x) & 0xfffff) << 12)
+#define HDMI0_ACR_48_1               0x74c0
+#       define HDMI0_ACR_N_48(x)   (((x) & 0xfffff) << 0)
+#define HDMI0_ACR_STATUS_0           0x74c4
+#define HDMI0_ACR_STATUS_1           0x74c8
+#define HDMI0_AUDIO_INFO0            0x74cc
+#       define HDMI0_AUDIO_INFO_CHECKSUM(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_CC(x)  (((x) & 7) << 8)
+#define HDMI0_AUDIO_INFO1            0x74d0
+#       define HDMI0_AUDIO_INFO_CA(x)  (((x) & 0xff) << 0)
+#       define HDMI0_AUDIO_INFO_LSV(x)  (((x) & 0xf) << 11)
+#       define HDMI0_AUDIO_INFO_DM_INH(x)  (((x) & 1) << 15)
+#       define HDMI0_AUDIO_INFO_DM_INH_LSV(x)  (((x) & 0xff) << 8)
+#define HDMI0_60958_0                0x74d4
+#       define HDMI0_60958_CS_A(x)   (((x) & 1) << 0)
+#       define HDMI0_60958_CS_B(x)   (((x) & 1) << 1)
+#       define HDMI0_60958_CS_C(x)   (((x) & 1) << 2)
+#       define HDMI0_60958_CS_D(x)   (((x) & 3) << 3)
+#       define HDMI0_60958_CS_MODE(x)   (((x) & 3) << 6)
+#       define HDMI0_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define HDMI0_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define HDMI0_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define HDMI0_60958_1                0x74d8
+#       define HDMI0_60958_CS_WORD_LENGTH(x)        (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_VALID_L(x)   (((x) & 1) << 16)
+#       define HDMI0_60958_CS_VALID_R(x)   (((x) & 1) << 18)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define HDMI0_ACR_PACKET_CONTROL     0x74dc
+#       define HDMI0_ACR_SEND        (1 << 0)
+#       define HDMI0_ACR_CONT        (1 << 1)
+#       define HDMI0_ACR_SELECT(x)   (((x) & 3) << 4)
+#       define HDMI0_ACR_HW          0
+#       define HDMI0_ACR_32          1
+#       define HDMI0_ACR_44          2
+#       define HDMI0_ACR_48          3
+#       define HDMI0_ACR_SOURCE      (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI0_ACR_AUTO_SEND   (1 << 12)
+#define HDMI0_RAMP_CONTROL0          0x74e0
+#       define HDMI0_RAMP_MAX_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL1          0x74e4
+#       define HDMI0_RAMP_MIN_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL2          0x74e8
+#       define HDMI0_RAMP_INC_COUNT(x)   (((x) & 0xffffff) << 0)
+#define HDMI0_RAMP_CONTROL3          0x74ec
+#       define HDMI0_RAMP_DEC_COUNT(x)   (((x) & 0xffffff) << 0)
+/* HDMI0_60958_2 is r7xx only */
+#define HDMI0_60958_2                0x74f0
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define HDMI0_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+/* r6xx only; second instance starts at 0x7700 */
+#define HDMI1_CONTROL                0x7700
+#define HDMI1_STATUS                 0x7704
+#define HDMI1_AUDIO_PACKET_CONTROL   0x7708
+/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
+#define DCE3_HDMI1_CONTROL                0x7800
+#define DCE3_HDMI1_STATUS                 0x7804
+#define DCE3_HDMI1_AUDIO_PACKET_CONTROL   0x7808
+/* DCE3.2 (for interrupts) */
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+
+/*
+ * UVD
+ */
+#define UVD_SEMA_ADDR_LOW				0xef00
+#define UVD_SEMA_ADDR_HIGH				0xef04
+#define UVD_SEMA_CMD					0xef08
+
+#define UVD_GPCOM_VCPU_CMD				0xef0c
+#define UVD_GPCOM_VCPU_DATA0				0xef10
+#define UVD_GPCOM_VCPU_DATA1				0xef14
+#define UVD_ENGINE_CNTL					0xef18
+
+#define UVD_SEMA_CNTL					0xf400
+#define UVD_RB_ARB_CTRL					0xf480
+
+#define UVD_LMI_EXT40_ADDR				0xf498
+#define UVD_CGC_GATE					0xf4a8
+#define UVD_LMI_CTRL2					0xf4f4
+#define UVD_MASTINT_EN					0xf500
+#define UVD_LMI_ADDR_EXT				0xf594
+#define UVD_LMI_CTRL					0xf598
+#define UVD_LMI_SWAP_CNTL				0xf5b4
+#define UVD_MP_SWAP_CNTL				0xf5bC
+#define UVD_MPC_CNTL					0xf5dC
+#define UVD_MPC_SET_MUXA0				0xf5e4
+#define UVD_MPC_SET_MUXA1				0xf5e8
+#define UVD_MPC_SET_MUXB0				0xf5eC
+#define UVD_MPC_SET_MUXB1				0xf5f0
+#define UVD_MPC_SET_MUX					0xf5f4
+#define UVD_MPC_SET_ALU					0xf5f8
+
+#define UVD_VCPU_CNTL					0xf660
+#define UVD_SOFT_RESET					0xf680
+#define		RBC_SOFT_RESET					(1<<0)
+#define		LBSI_SOFT_RESET					(1<<1)
+#define		LMI_SOFT_RESET					(1<<2)
+#define		VCPU_SOFT_RESET					(1<<3)
+#define		CSM_SOFT_RESET					(1<<5)
+#define		CXW_SOFT_RESET					(1<<6)
+#define		TAP_SOFT_RESET					(1<<7)
+#define		LMI_UMC_SOFT_RESET				(1<<13)
+#define UVD_RBC_IB_BASE					0xf684
+#define UVD_RBC_IB_SIZE					0xf688
+#define UVD_RBC_RB_BASE					0xf68c
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+#define UVD_RBC_RB_WPTR_CNTL				0xf698
+
+#define UVD_STATUS					0xf6bc
+
+#define UVD_SEMA_TIMEOUT_STATUS				0xf6c0
+#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL		0xf6c4
+#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL		0xf6c8
+#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL		0xf6cc
+
+#define UVD_RBC_RB_CNTL					0xf6a4
+#define UVD_RBC_RB_RPTR_ADDR				0xf6a8
+
+#define UVD_CONTEXT_ID					0xf6f4
+
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_START_3D_CMDBUF				0x24
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_DRAW_INDEX_IMMD_BE			0x29
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDEX				0x2B
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_INDIRECT_BUFFER_MP			0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#              define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
+#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
+#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_VC_ACTION_ENA        (1 << 24)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_ACTION_ENA        (1 << 27)
+#              define PACKET3_SMX_ACTION_ENA       (1 << 28)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - TS events
+		 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_OFFSET			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000ac00
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_OFFSET			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_ALU_CONST				0x6A
+#define		PACKET3_SET_ALU_CONST_OFFSET			0x00030000
+#define		PACKET3_SET_ALU_CONST_END			0x00032000
+#define	PACKET3_SET_BOOL_CONST				0x6B
+#define		PACKET3_SET_BOOL_CONST_OFFSET			0x0003e380
+#define		PACKET3_SET_BOOL_CONST_END			0x00040000
+#define	PACKET3_SET_LOOP_CONST				0x6C
+#define		PACKET3_SET_LOOP_CONST_OFFSET			0x0003e200
+#define		PACKET3_SET_LOOP_CONST_END			0x0003e380
+#define	PACKET3_SET_RESOURCE				0x6D
+#define		PACKET3_SET_RESOURCE_OFFSET			0x00038000
+#define		PACKET3_SET_RESOURCE_END			0x0003c000
+#define	PACKET3_SET_SAMPLER				0x6E
+#define		PACKET3_SET_SAMPLER_OFFSET			0x0003c000
+#define		PACKET3_SET_SAMPLER_END				0x0003cff0
+#define	PACKET3_SET_CTL_CONST				0x6F
+#define		PACKET3_SET_CTL_CONST_OFFSET			0x0003cff0
+#define		PACKET3_SET_CTL_CONST_END			0x0003e200
+#define	PACKET3_STRMOUT_BASE_UPDATE			0x72 /* r7xx */
+#define	PACKET3_SURFACE_BASE_UPDATE			0x73
+
+#define R_000011_K8_FB_LOCATION                 0x11
+#define R_000012_MC_MISC_UMA_CNTL               0x12
+#define   G_000012_K8_ADDR_EXT(x)               (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX			0x28F8
+#define   	S_0028F8_MC_IND_ADDR(x)                 (((x) & 0x1FF) << 0)
+#define   	C_0028F8_MC_IND_ADDR                    0xFFFFFE00
+#define   	S_0028F8_MC_IND_WR_EN(x)                (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA                        0x28FC
+
+#define	R_008020_GRBM_SOFT_RESET		0x8020
+#define		S_008020_SOFT_RESET_CP(x)		(((x) & 1) << 0)
+#define		S_008020_SOFT_RESET_CB(x)		(((x) & 1) << 1)
+#define		S_008020_SOFT_RESET_CR(x)		(((x) & 1) << 2)
+#define		S_008020_SOFT_RESET_DB(x)		(((x) & 1) << 3)
+#define		S_008020_SOFT_RESET_PA(x)		(((x) & 1) << 5)
+#define		S_008020_SOFT_RESET_SC(x)		(((x) & 1) << 6)
+#define		S_008020_SOFT_RESET_SMX(x)		(((x) & 1) << 7)
+#define		S_008020_SOFT_RESET_SPI(x)		(((x) & 1) << 8)
+#define		S_008020_SOFT_RESET_SH(x)		(((x) & 1) << 9)
+#define		S_008020_SOFT_RESET_SX(x)		(((x) & 1) << 10)
+#define		S_008020_SOFT_RESET_TC(x)		(((x) & 1) << 11)
+#define		S_008020_SOFT_RESET_TA(x)		(((x) & 1) << 12)
+#define		S_008020_SOFT_RESET_VC(x)		(((x) & 1) << 13)
+#define		S_008020_SOFT_RESET_VGT(x)		(((x) & 1) << 14)
+#define	R_008010_GRBM_STATUS			0x8010
+#define		S_008010_CMDFIFO_AVAIL(x)		(((x) & 0x1F) << 0)
+#define		S_008010_CP_RQ_PENDING(x)		(((x) & 1) << 6)
+#define		S_008010_CF_RQ_PENDING(x)		(((x) & 1) << 7)
+#define		S_008010_PF_RQ_PENDING(x)		(((x) & 1) << 8)
+#define		S_008010_GRBM_EE_BUSY(x)		(((x) & 1) << 10)
+#define		S_008010_VC_BUSY(x)			(((x) & 1) << 11)
+#define		S_008010_DB03_CLEAN(x)			(((x) & 1) << 12)
+#define		S_008010_CB03_CLEAN(x)			(((x) & 1) << 13)
+#define		S_008010_VGT_BUSY_NO_DMA(x)		(((x) & 1) << 16)
+#define		S_008010_VGT_BUSY(x)			(((x) & 1) << 17)
+#define		S_008010_TA03_BUSY(x)			(((x) & 1) << 18)
+#define		S_008010_TC_BUSY(x)			(((x) & 1) << 19)
+#define		S_008010_SX_BUSY(x)			(((x) & 1) << 20)
+#define		S_008010_SH_BUSY(x)			(((x) & 1) << 21)
+#define		S_008010_SPI03_BUSY(x)			(((x) & 1) << 22)
+#define		S_008010_SMX_BUSY(x)			(((x) & 1) << 23)
+#define		S_008010_SC_BUSY(x)			(((x) & 1) << 24)
+#define		S_008010_PA_BUSY(x)			(((x) & 1) << 25)
+#define		S_008010_DB03_BUSY(x)			(((x) & 1) << 26)
+#define		S_008010_CR_BUSY(x)			(((x) & 1) << 27)
+#define		S_008010_CP_COHERENCY_BUSY(x)		(((x) & 1) << 28)
+#define		S_008010_CP_BUSY(x)			(((x) & 1) << 29)
+#define		S_008010_CB03_BUSY(x)			(((x) & 1) << 30)
+#define		S_008010_GUI_ACTIVE(x)			(((x) & 1) << 31)
+#define		G_008010_CMDFIFO_AVAIL(x)		(((x) >> 0) & 0x1F)
+#define		G_008010_CP_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_008010_CF_RQ_PENDING(x)		(((x) >> 7) & 1)
+#define		G_008010_PF_RQ_PENDING(x)		(((x) >> 8) & 1)
+#define		G_008010_GRBM_EE_BUSY(x)		(((x) >> 10) & 1)
+#define		G_008010_VC_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008010_DB03_CLEAN(x)			(((x) >> 12) & 1)
+#define		G_008010_CB03_CLEAN(x)			(((x) >> 13) & 1)
+#define		G_008010_TA_BUSY(x)			(((x) >> 14) & 1)
+#define		G_008010_VGT_BUSY_NO_DMA(x)		(((x) >> 16) & 1)
+#define		G_008010_VGT_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008010_TA03_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008010_TC_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008010_SX_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008010_SH_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008010_SPI03_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008010_SMX_BUSY(x)			(((x) >> 23) & 1)
+#define		G_008010_SC_BUSY(x)			(((x) >> 24) & 1)
+#define		G_008010_PA_BUSY(x)			(((x) >> 25) & 1)
+#define		G_008010_DB03_BUSY(x)			(((x) >> 26) & 1)
+#define		G_008010_CR_BUSY(x)			(((x) >> 27) & 1)
+#define		G_008010_CP_COHERENCY_BUSY(x)		(((x) >> 28) & 1)
+#define		G_008010_CP_BUSY(x)			(((x) >> 29) & 1)
+#define		G_008010_CB03_BUSY(x)			(((x) >> 30) & 1)
+#define		G_008010_GUI_ACTIVE(x)			(((x) >> 31) & 1)
+#define	R_008014_GRBM_STATUS2			0x8014
+#define		S_008014_CR_CLEAN(x)			(((x) & 1) << 0)
+#define		S_008014_SMX_CLEAN(x)			(((x) & 1) << 1)
+#define		S_008014_SPI0_BUSY(x)			(((x) & 1) << 8)
+#define		S_008014_SPI1_BUSY(x)			(((x) & 1) << 9)
+#define		S_008014_SPI2_BUSY(x)			(((x) & 1) << 10)
+#define		S_008014_SPI3_BUSY(x)			(((x) & 1) << 11)
+#define		S_008014_TA0_BUSY(x)			(((x) & 1) << 12)
+#define		S_008014_TA1_BUSY(x)			(((x) & 1) << 13)
+#define		S_008014_TA2_BUSY(x)			(((x) & 1) << 14)
+#define		S_008014_TA3_BUSY(x)			(((x) & 1) << 15)
+#define		S_008014_DB0_BUSY(x)			(((x) & 1) << 16)
+#define		S_008014_DB1_BUSY(x)			(((x) & 1) << 17)
+#define		S_008014_DB2_BUSY(x)			(((x) & 1) << 18)
+#define		S_008014_DB3_BUSY(x)			(((x) & 1) << 19)
+#define		S_008014_CB0_BUSY(x)			(((x) & 1) << 20)
+#define		S_008014_CB1_BUSY(x)			(((x) & 1) << 21)
+#define		S_008014_CB2_BUSY(x)			(((x) & 1) << 22)
+#define		S_008014_CB3_BUSY(x)			(((x) & 1) << 23)
+#define		G_008014_CR_CLEAN(x)			(((x) >> 0) & 1)
+#define		G_008014_SMX_CLEAN(x)			(((x) >> 1) & 1)
+#define		G_008014_SPI0_BUSY(x)			(((x) >> 8) & 1)
+#define		G_008014_SPI1_BUSY(x)			(((x) >> 9) & 1)
+#define		G_008014_SPI2_BUSY(x)			(((x) >> 10) & 1)
+#define		G_008014_SPI3_BUSY(x)			(((x) >> 11) & 1)
+#define		G_008014_TA0_BUSY(x)			(((x) >> 12) & 1)
+#define		G_008014_TA1_BUSY(x)			(((x) >> 13) & 1)
+#define		G_008014_TA2_BUSY(x)			(((x) >> 14) & 1)
+#define		G_008014_TA3_BUSY(x)			(((x) >> 15) & 1)
+#define		G_008014_DB0_BUSY(x)			(((x) >> 16) & 1)
+#define		G_008014_DB1_BUSY(x)			(((x) >> 17) & 1)
+#define		G_008014_DB2_BUSY(x)			(((x) >> 18) & 1)
+#define		G_008014_DB3_BUSY(x)			(((x) >> 19) & 1)
+#define		G_008014_CB0_BUSY(x)			(((x) >> 20) & 1)
+#define		G_008014_CB1_BUSY(x)			(((x) >> 21) & 1)
+#define		G_008014_CB2_BUSY(x)			(((x) >> 22) & 1)
+#define		G_008014_CB3_BUSY(x)			(((x) >> 23) & 1)
+#define	R_000E50_SRBM_STATUS				0x0E50
+#define		G_000E50_RLC_RQ_PENDING(x)		(((x) >> 3) & 1)
+#define		G_000E50_RCU_RQ_PENDING(x)		(((x) >> 4) & 1)
+#define		G_000E50_GRBM_RQ_PENDING(x)		(((x) >> 5) & 1)
+#define		G_000E50_HI_RQ_PENDING(x)		(((x) >> 6) & 1)
+#define		G_000E50_IO_EXTERN_SIGNAL(x)		(((x) >> 7) & 1)
+#define		G_000E50_VMC_BUSY(x)			(((x) >> 8) & 1)
+#define		G_000E50_MCB_BUSY(x)			(((x) >> 9) & 1)
+#define		G_000E50_MCDZ_BUSY(x)			(((x) >> 10) & 1)
+#define		G_000E50_MCDY_BUSY(x)			(((x) >> 11) & 1)
+#define		G_000E50_MCDX_BUSY(x)			(((x) >> 12) & 1)
+#define		G_000E50_MCDW_BUSY(x)			(((x) >> 13) & 1)
+#define		G_000E50_SEM_BUSY(x)			(((x) >> 14) & 1)
+#define		G_000E50_RLC_BUSY(x)			(((x) >> 15) & 1)
+#define		G_000E50_IH_BUSY(x)			(((x) >> 17) & 1)
+#define		G_000E50_BIF_BUSY(x)			(((x) >> 29) & 1)
+#define	R_000E60_SRBM_SOFT_RESET			0x0E60
+#define		S_000E60_SOFT_RESET_BIF(x)		(((x) & 1) << 1)
+#define		S_000E60_SOFT_RESET_CG(x)		(((x) & 1) << 2)
+#define		S_000E60_SOFT_RESET_CMC(x)		(((x) & 1) << 3)
+#define		S_000E60_SOFT_RESET_CSC(x)		(((x) & 1) << 4)
+#define		S_000E60_SOFT_RESET_DC(x)		(((x) & 1) << 5)
+#define		S_000E60_SOFT_RESET_GRBM(x)		(((x) & 1) << 8)
+#define		S_000E60_SOFT_RESET_HDP(x)		(((x) & 1) << 9)
+#define		S_000E60_SOFT_RESET_IH(x)		(((x) & 1) << 10)
+#define		S_000E60_SOFT_RESET_MC(x)		(((x) & 1) << 11)
+#define		S_000E60_SOFT_RESET_RLC(x)		(((x) & 1) << 13)
+#define		S_000E60_SOFT_RESET_ROM(x)		(((x) & 1) << 14)
+#define		S_000E60_SOFT_RESET_SEM(x)		(((x) & 1) << 15)
+#define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
+#define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
+
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
+
+#define R_028C04_PA_SC_AA_CONFIG                     0x028C04
+#define   S_028C04_MSAA_NUM_SAMPLES(x)                 (((x) & 0x3) << 0)
+#define   G_028C04_MSAA_NUM_SAMPLES(x)                 (((x) >> 0) & 0x3)
+#define   C_028C04_MSAA_NUM_SAMPLES                    0xFFFFFFFC
+#define   S_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) & 0x1) << 4)
+#define   G_028C04_AA_MASK_CENTROID_DTMN(x)            (((x) >> 4) & 0x1)
+#define   C_028C04_AA_MASK_CENTROID_DTMN               0xFFFFFFEF
+#define   S_028C04_MAX_SAMPLE_DIST(x)                  (((x) & 0xF) << 13)
+#define   G_028C04_MAX_SAMPLE_DIST(x)                  (((x) >> 13) & 0xF)
+#define   C_028C04_MAX_SAMPLE_DIST                     0xFFFE1FFF
+#define R_0280E0_CB_COLOR0_FRAG                      0x0280E0
+#define   S_0280E0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280E0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280E0_BASE_256B                           0x00000000
+#define R_0280E4_CB_COLOR1_FRAG                      0x0280E4
+#define R_0280E8_CB_COLOR2_FRAG                      0x0280E8
+#define R_0280EC_CB_COLOR3_FRAG                      0x0280EC
+#define R_0280F0_CB_COLOR4_FRAG                      0x0280F0
+#define R_0280F4_CB_COLOR5_FRAG                      0x0280F4
+#define R_0280F8_CB_COLOR6_FRAG                      0x0280F8
+#define R_0280FC_CB_COLOR7_FRAG                      0x0280FC
+#define R_0280C0_CB_COLOR0_TILE                      0x0280C0
+#define   S_0280C0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
+#define   G_0280C0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0280C0_BASE_256B                           0x00000000
+#define R_0280C4_CB_COLOR1_TILE                      0x0280C4
+#define R_0280C8_CB_COLOR2_TILE                      0x0280C8
+#define R_0280CC_CB_COLOR3_TILE                      0x0280CC
+#define R_0280D0_CB_COLOR4_TILE                      0x0280D0
+#define R_0280D4_CB_COLOR5_TILE                      0x0280D4
+#define R_0280D8_CB_COLOR6_TILE                      0x0280D8
+#define R_0280DC_CB_COLOR7_TILE                      0x0280DC
+#define R_0280A0_CB_COLOR0_INFO                      0x0280A0
+#define   S_0280A0_ENDIAN(x)                           (((x) & 0x3) << 0)
+#define   G_0280A0_ENDIAN(x)                           (((x) >> 0) & 0x3)
+#define   C_0280A0_ENDIAN                              0xFFFFFFFC
+#define   S_0280A0_FORMAT(x)                           (((x) & 0x3F) << 2)
+#define   G_0280A0_FORMAT(x)                           (((x) >> 2) & 0x3F)
+#define   C_0280A0_FORMAT                              0xFFFFFF03
+#define     V_0280A0_COLOR_INVALID                     0x00000000
+#define     V_0280A0_COLOR_8                           0x00000001
+#define     V_0280A0_COLOR_4_4                         0x00000002
+#define     V_0280A0_COLOR_3_3_2                       0x00000003
+#define     V_0280A0_COLOR_16                          0x00000005
+#define     V_0280A0_COLOR_16_FLOAT                    0x00000006
+#define     V_0280A0_COLOR_8_8                         0x00000007
+#define     V_0280A0_COLOR_5_6_5                       0x00000008
+#define     V_0280A0_COLOR_6_5_5                       0x00000009
+#define     V_0280A0_COLOR_1_5_5_5                     0x0000000A
+#define     V_0280A0_COLOR_4_4_4_4                     0x0000000B
+#define     V_0280A0_COLOR_5_5_5_1                     0x0000000C
+#define     V_0280A0_COLOR_32                          0x0000000D
+#define     V_0280A0_COLOR_32_FLOAT                    0x0000000E
+#define     V_0280A0_COLOR_16_16                       0x0000000F
+#define     V_0280A0_COLOR_16_16_FLOAT                 0x00000010
+#define     V_0280A0_COLOR_8_24                        0x00000011
+#define     V_0280A0_COLOR_8_24_FLOAT                  0x00000012
+#define     V_0280A0_COLOR_24_8                        0x00000013
+#define     V_0280A0_COLOR_24_8_FLOAT                  0x00000014
+#define     V_0280A0_COLOR_10_11_11                    0x00000015
+#define     V_0280A0_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_0280A0_COLOR_11_11_10                    0x00000017
+#define     V_0280A0_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_0280A0_COLOR_2_10_10_10                  0x00000019
+#define     V_0280A0_COLOR_8_8_8_8                     0x0000001A
+#define     V_0280A0_COLOR_10_10_10_2                  0x0000001B
+#define     V_0280A0_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_0280A0_COLOR_32_32                       0x0000001D
+#define     V_0280A0_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_0280A0_COLOR_16_16_16_16                 0x0000001F
+#define     V_0280A0_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_0280A0_COLOR_32_32_32_32                 0x00000022
+#define     V_0280A0_COLOR_32_32_32_32_FLOAT           0x00000023
+#define   S_0280A0_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
+#define   G_0280A0_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
+#define   C_0280A0_ARRAY_MODE                          0xFFFFF0FF
+#define     V_0280A0_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_0280A0_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_0280A0_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_0280A0_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_0280A0_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
+#define   G_0280A0_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
+#define   C_0280A0_NUMBER_TYPE                         0xFFFF8FFF
+#define   S_0280A0_READ_SIZE(x)                        (((x) & 0x1) << 15)
+#define   G_0280A0_READ_SIZE(x)                        (((x) >> 15) & 0x1)
+#define   C_0280A0_READ_SIZE                           0xFFFF7FFF
+#define   S_0280A0_COMP_SWAP(x)                        (((x) & 0x3) << 16)
+#define   G_0280A0_COMP_SWAP(x)                        (((x) >> 16) & 0x3)
+#define   C_0280A0_COMP_SWAP                           0xFFFCFFFF
+#define   S_0280A0_TILE_MODE(x)                        (((x) & 0x3) << 18)
+#define   G_0280A0_TILE_MODE(x)                        (((x) >> 18) & 0x3)
+#define   C_0280A0_TILE_MODE                           0xFFF3FFFF
+#define     V_0280A0_TILE_DISABLE			0
+#define     V_0280A0_CLEAR_ENABLE			1
+#define     V_0280A0_FRAG_ENABLE			2
+#define   S_0280A0_BLEND_CLAMP(x)                      (((x) & 0x1) << 20)
+#define   G_0280A0_BLEND_CLAMP(x)                      (((x) >> 20) & 0x1)
+#define   C_0280A0_BLEND_CLAMP                         0xFFEFFFFF
+#define   S_0280A0_CLEAR_COLOR(x)                      (((x) & 0x1) << 21)
+#define   G_0280A0_CLEAR_COLOR(x)                      (((x) >> 21) & 0x1)
+#define   C_0280A0_CLEAR_COLOR                         0xFFDFFFFF
+#define   S_0280A0_BLEND_BYPASS(x)                     (((x) & 0x1) << 22)
+#define   G_0280A0_BLEND_BYPASS(x)                     (((x) >> 22) & 0x1)
+#define   C_0280A0_BLEND_BYPASS                        0xFFBFFFFF
+#define   S_0280A0_BLEND_FLOAT32(x)                    (((x) & 0x1) << 23)
+#define   G_0280A0_BLEND_FLOAT32(x)                    (((x) >> 23) & 0x1)
+#define   C_0280A0_BLEND_FLOAT32                       0xFF7FFFFF
+#define   S_0280A0_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 24)
+#define   G_0280A0_SIMPLE_FLOAT(x)                     (((x) >> 24) & 0x1)
+#define   C_0280A0_SIMPLE_FLOAT                        0xFEFFFFFF
+#define   S_0280A0_ROUND_MODE(x)                       (((x) & 0x1) << 25)
+#define   G_0280A0_ROUND_MODE(x)                       (((x) >> 25) & 0x1)
+#define   C_0280A0_ROUND_MODE                          0xFDFFFFFF
+#define   S_0280A0_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_0280A0_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_0280A0_TILE_COMPACT                        0xFBFFFFFF
+#define   S_0280A0_SOURCE_FORMAT(x)                    (((x) & 0x1) << 27)
+#define   G_0280A0_SOURCE_FORMAT(x)                    (((x) >> 27) & 0x1)
+#define   C_0280A0_SOURCE_FORMAT                       0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO                      0x0280A4
+#define R_0280A8_CB_COLOR2_INFO                      0x0280A8
+#define R_0280AC_CB_COLOR3_INFO                      0x0280AC
+#define R_0280B0_CB_COLOR4_INFO                      0x0280B0
+#define R_0280B4_CB_COLOR5_INFO                      0x0280B4
+#define R_0280B8_CB_COLOR6_INFO                      0x0280B8
+#define R_0280BC_CB_COLOR7_INFO                      0x0280BC
+#define R_028060_CB_COLOR0_SIZE                      0x028060
+#define   S_028060_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028060_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028060_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028060_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028060_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028060_SLICE_TILE_MAX                      0xC00003FF
+#define R_028064_CB_COLOR1_SIZE                      0x028064
+#define R_028068_CB_COLOR2_SIZE                      0x028068
+#define R_02806C_CB_COLOR3_SIZE                      0x02806C
+#define R_028070_CB_COLOR4_SIZE                      0x028070
+#define R_028074_CB_COLOR5_SIZE                      0x028074
+#define R_028078_CB_COLOR6_SIZE                      0x028078
+#define R_02807C_CB_COLOR7_SIZE                      0x02807C
+#define R_028238_CB_TARGET_MASK                      0x028238
+#define   S_028238_TARGET0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_028238_TARGET0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_028238_TARGET0_ENABLE                      0xFFFFFFF0
+#define   S_028238_TARGET1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_028238_TARGET1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_028238_TARGET1_ENABLE                      0xFFFFFF0F
+#define   S_028238_TARGET2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_028238_TARGET2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_028238_TARGET2_ENABLE                      0xFFFFF0FF
+#define   S_028238_TARGET3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_028238_TARGET3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_028238_TARGET3_ENABLE                      0xFFFF0FFF
+#define   S_028238_TARGET4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_028238_TARGET4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_028238_TARGET4_ENABLE                      0xFFF0FFFF
+#define   S_028238_TARGET5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_028238_TARGET5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_028238_TARGET5_ENABLE                      0xFF0FFFFF
+#define   S_028238_TARGET6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_028238_TARGET6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_028238_TARGET6_ENABLE                      0xF0FFFFFF
+#define   S_028238_TARGET7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_028238_TARGET7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_028238_TARGET7_ENABLE                      0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK                      0x02823C
+#define   S_02823C_OUTPUT0_ENABLE(x)                   (((x) & 0xF) << 0)
+#define   G_02823C_OUTPUT0_ENABLE(x)                   (((x) >> 0) & 0xF)
+#define   C_02823C_OUTPUT0_ENABLE                      0xFFFFFFF0
+#define   S_02823C_OUTPUT1_ENABLE(x)                   (((x) & 0xF) << 4)
+#define   G_02823C_OUTPUT1_ENABLE(x)                   (((x) >> 4) & 0xF)
+#define   C_02823C_OUTPUT1_ENABLE                      0xFFFFFF0F
+#define   S_02823C_OUTPUT2_ENABLE(x)                   (((x) & 0xF) << 8)
+#define   G_02823C_OUTPUT2_ENABLE(x)                   (((x) >> 8) & 0xF)
+#define   C_02823C_OUTPUT2_ENABLE                      0xFFFFF0FF
+#define   S_02823C_OUTPUT3_ENABLE(x)                   (((x) & 0xF) << 12)
+#define   G_02823C_OUTPUT3_ENABLE(x)                   (((x) >> 12) & 0xF)
+#define   C_02823C_OUTPUT3_ENABLE                      0xFFFF0FFF
+#define   S_02823C_OUTPUT4_ENABLE(x)                   (((x) & 0xF) << 16)
+#define   G_02823C_OUTPUT4_ENABLE(x)                   (((x) >> 16) & 0xF)
+#define   C_02823C_OUTPUT4_ENABLE                      0xFFF0FFFF
+#define   S_02823C_OUTPUT5_ENABLE(x)                   (((x) & 0xF) << 20)
+#define   G_02823C_OUTPUT5_ENABLE(x)                   (((x) >> 20) & 0xF)
+#define   C_02823C_OUTPUT5_ENABLE                      0xFF0FFFFF
+#define   S_02823C_OUTPUT6_ENABLE(x)                   (((x) & 0xF) << 24)
+#define   G_02823C_OUTPUT6_ENABLE(x)                   (((x) >> 24) & 0xF)
+#define   C_02823C_OUTPUT6_ENABLE                      0xF0FFFFFF
+#define   S_02823C_OUTPUT7_ENABLE(x)                   (((x) & 0xF) << 28)
+#define   G_02823C_OUTPUT7_ENABLE(x)                   (((x) >> 28) & 0xF)
+#define   C_02823C_OUTPUT7_ENABLE                      0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN                      0x028AB0
+#define   S_028AB0_STREAMOUT(x)                        (((x) & 0x1) << 0)
+#define   G_028AB0_STREAMOUT(x)                        (((x) >> 0) & 0x1)
+#define   C_028AB0_STREAMOUT                           0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN               0x028B20
+#define   S_028B20_BUFFER_0_EN(x)                      (((x) & 0x1) << 0)
+#define   G_028B20_BUFFER_0_EN(x)                      (((x) >> 0) & 0x1)
+#define   C_028B20_BUFFER_0_EN                         0xFFFFFFFE
+#define   S_028B20_BUFFER_1_EN(x)                      (((x) & 0x1) << 1)
+#define   G_028B20_BUFFER_1_EN(x)                      (((x) >> 1) & 0x1)
+#define   C_028B20_BUFFER_1_EN                         0xFFFFFFFD
+#define   S_028B20_BUFFER_2_EN(x)                      (((x) & 0x1) << 2)
+#define   G_028B20_BUFFER_2_EN(x)                      (((x) >> 2) & 0x1)
+#define   C_028B20_BUFFER_2_EN                         0xFFFFFFFB
+#define   S_028B20_BUFFER_3_EN(x)                      (((x) & 0x1) << 3)
+#define   G_028B20_BUFFER_3_EN(x)                      (((x) >> 3) & 0x1)
+#define   C_028B20_BUFFER_3_EN                         0xFFFFFFF7
+#define   S_028B20_SIZE(x)                             (((x) & 0xFFFFFFFF) << 0)
+#define   G_028B20_SIZE(x)                             (((x) >> 0) & 0xFFFFFFFF)
+#define   C_028B20_SIZE                                0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0             0x038000
+#define   S_038000_DIM(x)                              (((x) & 0x7) << 0)
+#define   G_038000_DIM(x)                              (((x) >> 0) & 0x7)
+#define   C_038000_DIM                                 0xFFFFFFF8
+#define     V_038000_SQ_TEX_DIM_1D                     0x00000000
+#define     V_038000_SQ_TEX_DIM_2D                     0x00000001
+#define     V_038000_SQ_TEX_DIM_3D                     0x00000002
+#define     V_038000_SQ_TEX_DIM_CUBEMAP                0x00000003
+#define     V_038000_SQ_TEX_DIM_1D_ARRAY               0x00000004
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY               0x00000005
+#define     V_038000_SQ_TEX_DIM_2D_MSAA                0x00000006
+#define     V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
+#define   S_038000_TILE_MODE(x)                        (((x) & 0xF) << 3)
+#define   G_038000_TILE_MODE(x)                        (((x) >> 3) & 0xF)
+#define   C_038000_TILE_MODE                           0xFFFFFF87
+#define     V_038000_ARRAY_LINEAR_GENERAL              0x00000000
+#define     V_038000_ARRAY_LINEAR_ALIGNED              0x00000001
+#define     V_038000_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_038000_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_038000_TILE_TYPE(x)                        (((x) & 0x1) << 7)
+#define   G_038000_TILE_TYPE(x)                        (((x) >> 7) & 0x1)
+#define   C_038000_TILE_TYPE                           0xFFFFFF7F
+#define   S_038000_PITCH(x)                            (((x) & 0x7FF) << 8)
+#define   G_038000_PITCH(x)                            (((x) >> 8) & 0x7FF)
+#define   C_038000_PITCH                               0xFFF800FF
+#define   S_038000_TEX_WIDTH(x)                        (((x) & 0x1FFF) << 19)
+#define   G_038000_TEX_WIDTH(x)                        (((x) >> 19) & 0x1FFF)
+#define   C_038000_TEX_WIDTH                           0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0             0x038004
+#define   S_038004_TEX_HEIGHT(x)                       (((x) & 0x1FFF) << 0)
+#define   G_038004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x1FFF)
+#define   C_038004_TEX_HEIGHT                          0xFFFFE000
+#define   S_038004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 13)
+#define   G_038004_TEX_DEPTH(x)                        (((x) >> 13) & 0x1FFF)
+#define   C_038004_TEX_DEPTH                           0xFC001FFF
+#define   S_038004_DATA_FORMAT(x)                      (((x) & 0x3F) << 26)
+#define   G_038004_DATA_FORMAT(x)                      (((x) >> 26) & 0x3F)
+#define   C_038004_DATA_FORMAT                         0x03FFFFFF
+#define     V_038004_COLOR_INVALID                     0x00000000
+#define     V_038004_COLOR_8                           0x00000001
+#define     V_038004_COLOR_4_4                         0x00000002
+#define     V_038004_COLOR_3_3_2                       0x00000003
+#define     V_038004_COLOR_16                          0x00000005
+#define     V_038004_COLOR_16_FLOAT                    0x00000006
+#define     V_038004_COLOR_8_8                         0x00000007
+#define     V_038004_COLOR_5_6_5                       0x00000008
+#define     V_038004_COLOR_6_5_5                       0x00000009
+#define     V_038004_COLOR_1_5_5_5                     0x0000000A
+#define     V_038004_COLOR_4_4_4_4                     0x0000000B
+#define     V_038004_COLOR_5_5_5_1                     0x0000000C
+#define     V_038004_COLOR_32                          0x0000000D
+#define     V_038004_COLOR_32_FLOAT                    0x0000000E
+#define     V_038004_COLOR_16_16                       0x0000000F
+#define     V_038004_COLOR_16_16_FLOAT                 0x00000010
+#define     V_038004_COLOR_8_24                        0x00000011
+#define     V_038004_COLOR_8_24_FLOAT                  0x00000012
+#define     V_038004_COLOR_24_8                        0x00000013
+#define     V_038004_COLOR_24_8_FLOAT                  0x00000014
+#define     V_038004_COLOR_10_11_11                    0x00000015
+#define     V_038004_COLOR_10_11_11_FLOAT              0x00000016
+#define     V_038004_COLOR_11_11_10                    0x00000017
+#define     V_038004_COLOR_11_11_10_FLOAT              0x00000018
+#define     V_038004_COLOR_2_10_10_10                  0x00000019
+#define     V_038004_COLOR_8_8_8_8                     0x0000001A
+#define     V_038004_COLOR_10_10_10_2                  0x0000001B
+#define     V_038004_COLOR_X24_8_32_FLOAT              0x0000001C
+#define     V_038004_COLOR_32_32                       0x0000001D
+#define     V_038004_COLOR_32_32_FLOAT                 0x0000001E
+#define     V_038004_COLOR_16_16_16_16                 0x0000001F
+#define     V_038004_COLOR_16_16_16_16_FLOAT           0x00000020
+#define     V_038004_COLOR_32_32_32_32                 0x00000022
+#define     V_038004_COLOR_32_32_32_32_FLOAT           0x00000023
+#define     V_038004_FMT_1                             0x00000025
+#define     V_038004_FMT_GB_GR                         0x00000027
+#define     V_038004_FMT_BG_RG                         0x00000028
+#define     V_038004_FMT_32_AS_8                       0x00000029
+#define     V_038004_FMT_32_AS_8_8                     0x0000002A
+#define     V_038004_FMT_5_9_9_9_SHAREDEXP             0x0000002B
+#define     V_038004_FMT_8_8_8                         0x0000002C
+#define     V_038004_FMT_16_16_16                      0x0000002D
+#define     V_038004_FMT_16_16_16_FLOAT                0x0000002E
+#define     V_038004_FMT_32_32_32                      0x0000002F
+#define     V_038004_FMT_32_32_32_FLOAT                0x00000030
+#define     V_038004_FMT_BC1                           0x00000031
+#define     V_038004_FMT_BC2                           0x00000032
+#define     V_038004_FMT_BC3                           0x00000033
+#define     V_038004_FMT_BC4                           0x00000034
+#define     V_038004_FMT_BC5                           0x00000035
+#define     V_038004_FMT_BC6                           0x00000036
+#define     V_038004_FMT_BC7                           0x00000037
+#define     V_038004_FMT_32_AS_32_32_32_32             0x00000038
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0             0x038010
+#define   S_038010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
+#define   G_038010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
+#define   C_038010_FORMAT_COMP_X                       0xFFFFFFFC
+#define   S_038010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
+#define   G_038010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
+#define   C_038010_FORMAT_COMP_Y                       0xFFFFFFF3
+#define   S_038010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
+#define   G_038010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
+#define   C_038010_FORMAT_COMP_Z                       0xFFFFFFCF
+#define   S_038010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
+#define   G_038010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
+#define   C_038010_FORMAT_COMP_W                       0xFFFFFF3F
+#define   S_038010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
+#define   G_038010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
+#define   C_038010_NUM_FORMAT_ALL                      0xFFFFFCFF
+#define   S_038010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
+#define   G_038010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
+#define   C_038010_SRF_MODE_ALL                        0xFFFFFBFF
+#define   S_038010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
+#define   G_038010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
+#define   C_038010_FORCE_DEGAMMA                       0xFFFFF7FF
+#define   S_038010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
+#define   G_038010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
+#define   C_038010_ENDIAN_SWAP                         0xFFFFCFFF
+#define   S_038010_REQUEST_SIZE(x)                     (((x) & 0x3) << 14)
+#define   G_038010_REQUEST_SIZE(x)                     (((x) >> 14) & 0x3)
+#define   C_038010_REQUEST_SIZE                        0xFFFF3FFF
+#define   S_038010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
+#define   G_038010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
+#define   C_038010_DST_SEL_X                           0xFFF8FFFF
+#define   S_038010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
+#define   G_038010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
+#define   C_038010_DST_SEL_Y                           0xFFC7FFFF
+#define   S_038010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
+#define   G_038010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
+#define   C_038010_DST_SEL_Z                           0xFE3FFFFF
+#define   S_038010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
+#define   G_038010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
+#define   C_038010_DST_SEL_W                           0xF1FFFFFF
+#	define SQ_SEL_X					0
+#	define SQ_SEL_Y					1
+#	define SQ_SEL_Z					2
+#	define SQ_SEL_W					3
+#	define SQ_SEL_0					4
+#	define SQ_SEL_1					5
+#define   S_038010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
+#define   G_038010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
+#define   C_038010_BASE_LEVEL                          0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0             0x038014
+#define   S_038014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
+#define   G_038014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
+#define   C_038014_LAST_LEVEL                          0xFFFFFFF0
+#define   S_038014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
+#define   G_038014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
+#define   C_038014_BASE_ARRAY                          0xFFFE000F
+#define   S_038014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
+#define   G_038014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
+#define   C_038014_LAST_ARRAY                          0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE               0x0288A8
+#define   S_0288A8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288A8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288A8_ITEMSIZE                            0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE                   0x008C44
+#define   S_008C44_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C44_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C44_MEM_SIZE                            0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE              0x0288B0
+#define   S_0288B0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B0_ITEMSIZE                            0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE                  0x008C54
+#define   S_008C54_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C54_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C54_MEM_SIZE                            0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE               0x0288C0
+#define   S_0288C0_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C0_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C0_ITEMSIZE                            0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE                   0x008C74
+#define   S_008C74_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C74_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C74_MEM_SIZE                            0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE              0x0288B4
+#define   S_0288B4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B4_ITEMSIZE                            0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE                  0x008C5C
+#define   S_008C5C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C5C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C5C_MEM_SIZE                            0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE               0x0288AC
+#define   S_0288AC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288AC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288AC_ITEMSIZE                            0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE                   0x008C4C
+#define   S_008C4C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C4C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C4C_MEM_SIZE                            0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE              0x0288BC
+#define   S_0288BC_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288BC_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288BC_ITEMSIZE                            0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE                  0x008C6C
+#define   S_008C6C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C6C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C6C_MEM_SIZE                            0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE              0x0288C4
+#define   S_0288C4_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C4_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C4_ITEMSIZE                            0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE                  0x008C7C
+#define   S_008C7C_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C7C_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C7C_MEM_SIZE                            0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE              0x0288B8
+#define   S_0288B8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288B8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288B8_ITEMSIZE                            0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE                  0x008C64
+#define   S_008C64_MEM_SIZE(x)                         (((x) & 0xFFFFFFFF) << 0)
+#define   G_008C64_MEM_SIZE(x)                         (((x) >> 0) & 0xFFFFFFFF)
+#define   C_008C64_MEM_SIZE                            0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE                 0x0288C8
+#define   S_0288C8_ITEMSIZE(x)                         (((x) & 0x7FFF) << 0)
+#define   G_0288C8_ITEMSIZE(x)                         (((x) >> 0) & 0x7FFF)
+#define   C_0288C8_ITEMSIZE                            0xFFFF8000
+#define R_028010_DB_DEPTH_INFO                       0x028010
+#define   S_028010_FORMAT(x)                           (((x) & 0x7) << 0)
+#define   G_028010_FORMAT(x)                           (((x) >> 0) & 0x7)
+#define   C_028010_FORMAT                              0xFFFFFFF8
+#define     V_028010_DEPTH_INVALID                     0x00000000
+#define     V_028010_DEPTH_16                          0x00000001
+#define     V_028010_DEPTH_X8_24                       0x00000002
+#define     V_028010_DEPTH_8_24                        0x00000003
+#define     V_028010_DEPTH_X8_24_FLOAT                 0x00000004
+#define     V_028010_DEPTH_8_24_FLOAT                  0x00000005
+#define     V_028010_DEPTH_32_FLOAT                    0x00000006
+#define     V_028010_DEPTH_X24_8_32_FLOAT              0x00000007
+#define   S_028010_READ_SIZE(x)                        (((x) & 0x1) << 3)
+#define   G_028010_READ_SIZE(x)                        (((x) >> 3) & 0x1)
+#define   C_028010_READ_SIZE                           0xFFFFFFF7
+#define   S_028010_ARRAY_MODE(x)                       (((x) & 0xF) << 15)
+#define   G_028010_ARRAY_MODE(x)                       (((x) >> 15) & 0xF)
+#define   C_028010_ARRAY_MODE                          0xFFF87FFF
+#define     V_028010_ARRAY_1D_TILED_THIN1              0x00000002
+#define     V_028010_ARRAY_2D_TILED_THIN1              0x00000004
+#define   S_028010_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 25)
+#define   G_028010_TILE_SURFACE_ENABLE(x)              (((x) >> 25) & 0x1)
+#define   C_028010_TILE_SURFACE_ENABLE                 0xFDFFFFFF
+#define   S_028010_TILE_COMPACT(x)                     (((x) & 0x1) << 26)
+#define   G_028010_TILE_COMPACT(x)                     (((x) >> 26) & 0x1)
+#define   C_028010_TILE_COMPACT                        0xFBFFFFFF
+#define   S_028010_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
+#define   G_028010_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
+#define   C_028010_ZRANGE_PRECISION                    0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE                       0x028000
+#define   S_028000_PITCH_TILE_MAX(x)                   (((x) & 0x3FF) << 0)
+#define   G_028000_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x3FF)
+#define   C_028000_PITCH_TILE_MAX                      0xFFFFFC00
+#define   S_028000_SLICE_TILE_MAX(x)                   (((x) & 0xFFFFF) << 10)
+#define   G_028000_SLICE_TILE_MAX(x)                   (((x) >> 10) & 0xFFFFF)
+#define   C_028000_SLICE_TILE_MAX                      0xC00003FF
+#define R_028004_DB_DEPTH_VIEW                       0x028004
+#define   S_028004_SLICE_START(x)                      (((x) & 0x7FF) << 0)
+#define   G_028004_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
+#define   C_028004_SLICE_START                         0xFFFFF800
+#define   S_028004_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
+#define   G_028004_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
+#define   C_028004_SLICE_MAX                           0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL                    0x028800
+#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
+#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
+#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
+#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
+#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
+#define   C_028800_Z_ENABLE                            0xFFFFFFFD
+#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
+#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
+#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
+#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
+#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
+#define   C_028800_ZFUNC                               0xFFFFFF8F
+#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
+#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
+#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
+#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
+#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
+#define   C_028800_STENCILFUNC                         0xFFFFF8FF
+#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
+#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
+#define   C_028800_STENCILFAIL                         0xFFFFC7FF
+#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
+#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
+#define   C_028800_STENCILZPASS                        0xFFFE3FFF
+#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
+#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
+#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
+#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
+#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
+#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
+#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
+#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
+#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
+#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
+#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
+#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
+#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
+#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
+#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon.h b/linux-imx/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 0000000..d4ff48c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,2079 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_H__
+#define __RADEON_H__
+
+/* TODO: Here are things that needs to be done :
+ *	- surface allocator & initializer : (bit like scratch reg) should
+ *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+ *	  related to surface
+ *	- WB : write back stuff (do it bit like scratch reg things)
+ *	- Vblank : look at Jesse's rework and what we should do
+ *	- r600/r700: gart & cp
+ *	- cs : clean cs ioctl use bitmap & things like that.
+ *	- power management stuff
+ *	- Barrier in gart code
+ *	- Unmappabled vram ?
+ *	- TESTING, TESTING, TESTING
+ */
+
+/* Initialization path:
+ *  We expect that acceleration initialization might fail for various
+ *  reasons even thought we work hard to make it works on most
+ *  configurations. In order to still have a working userspace in such
+ *  situation the init path must succeed up to the memory controller
+ *  initialization point. Failure before this point are considered as
+ *  fatal error. Here is the init callchain :
+ *      radeon_device_init  perform common structure, mutex initialization
+ *      asic_init           setup the GPU memory layout and perform all
+ *                          one time initialization (failure in this
+ *                          function are considered fatal)
+ *      asic_startup        setup the GPU acceleration, in order to
+ *                          follow guideline the first thing this
+ *                          function should do is setting the GPU
+ *                          memory controller (only MC setup failure
+ *                          are considered as fatal)
+ */
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
+
+#include "radeon_family.h"
+#include "radeon_mode.h"
+#include "radeon_reg.h"
+
+/*
+ * Modules parameters.
+ */
+extern int radeon_no_wb;
+extern int radeon_modeset;
+extern int radeon_dynclks;
+extern int radeon_r4xx_atom;
+extern int radeon_agpmode;
+extern int radeon_vram_limit;
+extern int radeon_gart_size;
+extern int radeon_benchmarking;
+extern int radeon_testing;
+extern int radeon_connector_table;
+extern int radeon_tv;
+extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
+extern int radeon_pcie_gen2;
+extern int radeon_msi;
+extern int radeon_lockup_timeout;
+extern int radeon_fastfb;
+
+/*
+ * Copy from radeon_drv.h so we don't have to include both and have conflicting
+ * symbol;
+ */
+#define RADEON_MAX_USEC_TIMEOUT			100000	/* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT		(HZ / 2)
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
+#define RADEON_IB_POOL_SIZE			16
+#define RADEON_DEBUGFS_MAX_COMPONENTS		32
+#define RADEONFB_CONN_LIMIT			4
+#define RADEON_BIOS_NUM_SCRATCH			8
+
+/* max number of rings */
+#define RADEON_NUM_RINGS			6
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ		0LL
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX	0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX	1
+#define CAYMAN_RING_TYPE_CP2_INDEX	2
+
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX		3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX		4
+
+/* R600+ */
+#define R600_RING_TYPE_UVD_INDEX	5
+
+/* hardcode those limit for now */
+#define RADEON_VA_IB_OFFSET			(1 << 20)
+#define RADEON_VA_RESERVED_SIZE			(8 << 20)
+#define RADEON_IB_VM_MAX_SIZE			(64 << 10)
+
+/* reset flags */
+#define RADEON_RESET_GFX			(1 << 0)
+#define RADEON_RESET_COMPUTE			(1 << 1)
+#define RADEON_RESET_DMA			(1 << 2)
+#define RADEON_RESET_CP				(1 << 3)
+#define RADEON_RESET_GRBM			(1 << 4)
+#define RADEON_RESET_DMA1			(1 << 5)
+#define RADEON_RESET_RLC			(1 << 6)
+#define RADEON_RESET_SEM			(1 << 7)
+#define RADEON_RESET_IH				(1 << 8)
+#define RADEON_RESET_VMC			(1 << 9)
+#define RADEON_RESET_MC				(1 << 10)
+#define RADEON_RESET_DISPLAY			(1 << 11)
+
+/*
+ * Errata workarounds.
+ */
+enum radeon_pll_errata {
+	CHIP_ERRATA_R300_CG             = 0x00000001,
+	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
+	CHIP_ERRATA_PLL_DELAY           = 0x00000004
+};
+
+
+struct radeon_device;
+
+
+/*
+ * BIOS.
+ */
+bool radeon_get_bios(struct radeon_device *rdev);
+
+/*
+ * Dummy page
+ */
+struct radeon_dummy_page {
+	struct page	*page;
+	dma_addr_t	addr;
+};
+int radeon_dummy_page_init(struct radeon_device *rdev);
+void radeon_dummy_page_fini(struct radeon_device *rdev);
+
+
+/*
+ * Clocks
+ */
+struct radeon_clock {
+	struct radeon_pll p1pll;
+	struct radeon_pll p2pll;
+	struct radeon_pll dcpll;
+	struct radeon_pll spll;
+	struct radeon_pll mpll;
+	/* 10 Khz units */
+	uint32_t default_mclk;
+	uint32_t default_sclk;
+	uint32_t default_dispclk;
+	uint32_t dp_extclk;
+	uint32_t max_pixel_clock;
+};
+
+/*
+ * Power management
+ */
+int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_pm_suspend(struct radeon_device *rdev);
+void radeon_pm_resume(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+				   u8 clock_type,
+				   u32 clock,
+				   bool strobe_mode,
+				   struct atom_clock_dividers *dividers);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+void rs690_pm_info(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
+extern int si_get_temp(struct radeon_device *rdev);
+extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
+				    unsigned *bankh, unsigned *mtaspect,
+				    unsigned *tile_split);
+
+/*
+ * Fences.
+ */
+struct radeon_fence_driver {
+	uint32_t			scratch_reg;
+	uint64_t			gpu_addr;
+	volatile uint32_t		*cpu_addr;
+	/* sync_seq is protected by ring emission lock */
+	uint64_t			sync_seq[RADEON_NUM_RINGS];
+	atomic64_t			last_seq;
+	unsigned long			last_activity;
+	bool				initialized;
+};
+
+struct radeon_fence {
+	struct radeon_device		*rdev;
+	struct kref			kref;
+	/* protected by radeon_fence.lock */
+	uint64_t			seq;
+	/* RB, DMA, etc. */
+	unsigned			ring;
+};
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+int radeon_fence_driver_init(struct radeon_device *rdev);
+void radeon_fence_driver_fini(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
+bool radeon_fence_signaled(struct radeon_fence *fence);
+int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr);
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+void radeon_fence_unref(struct radeon_fence **fence);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+						      struct radeon_fence *b)
+{
+	if (!a) {
+		return b;
+	}
+
+	if (!b) {
+		return a;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	if (a->seq > b->seq) {
+		return a;
+	} else {
+		return b;
+	}
+}
+
+static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
+					   struct radeon_fence *b)
+{
+	if (!a) {
+		return false;
+	}
+
+	if (!b) {
+		return true;
+	}
+
+	BUG_ON(a->ring != b->ring);
+
+	return a->seq < b->seq;
+}
+
+/*
+ * Tiling registers
+ */
+struct radeon_surface_reg {
+	struct radeon_bo *bo;
+};
+
+#define RADEON_GEM_MAX_SURFACES 8
+
+/*
+ * TTM.
+ */
+struct radeon_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct drm_global_reference	mem_global_ref;
+	struct ttm_bo_device		bdev;
+	bool				mem_global_referenced;
+	bool				initialized;
+};
+
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+	/* protected by bo being reserved */
+	struct list_head		bo_list;
+	uint64_t			soffset;
+	uint64_t			eoffset;
+	uint32_t			flags;
+	bool				valid;
+	unsigned			ref_count;
+
+	/* protected by vm mutex */
+	struct list_head		vm_list;
+
+	/* constant after initialization */
+	struct radeon_vm		*vm;
+	struct radeon_bo		*bo;
+};
+
+struct radeon_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	u32				tiling_flags;
+	u32				pitch;
+	int				surface_reg;
+	/* list of all virtual address to which this bo
+	 * is associated to
+	 */
+	struct list_head		va;
+	/* Constant after initialization */
+	struct radeon_device		*rdev;
+	struct drm_gem_object		gem_base;
+
+	struct ttm_bo_kmap_obj		dma_buf_vmap;
+	pid_t				pid;
+};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+
+struct radeon_bo_list {
+	struct ttm_validate_buffer tv;
+	struct radeon_bo	*bo;
+	uint64_t		gpu_offset;
+	bool			written;
+	unsigned		domain;
+	unsigned		alt_domain;
+	u32			tiling_flags;
+};
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev);
+
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+	wait_queue_head_t	wq;
+	struct radeon_bo	*bo;
+	struct list_head	*hole;
+	struct list_head	flist[RADEON_NUM_RINGS];
+	struct list_head	olist;
+	unsigned		size;
+	uint64_t		gpu_addr;
+	void			*cpu_ptr;
+	uint32_t		domain;
+	uint32_t		align;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+	struct list_head		olist;
+	struct list_head		flist;
+	struct radeon_sa_manager	*manager;
+	unsigned			soffset;
+	unsigned			eoffset;
+	struct radeon_fence		*fence;
+};
+
+/*
+ * GEM objects.
+ */
+struct radeon_gem {
+	struct mutex		mutex;
+	struct list_head	objects;
+};
+
+int radeon_gem_init(struct radeon_device *rdev);
+void radeon_gem_fini(struct radeon_device *rdev);
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj);
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle);
+
+/*
+ * Semaphores.
+ */
+/* everything here is constant */
+struct radeon_semaphore {
+	struct radeon_sa_bo		*sa_bo;
+	signed				waiters;
+	uint64_t			gpu_addr;
+};
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+				  struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+				struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+				struct radeon_semaphore *semaphore,
+				int signaler, int waiter);
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence);
+
+/*
+ * GART structures, functions & helpers
+ */
+struct radeon_mc;
+
+#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+
+struct radeon_gart {
+	dma_addr_t			table_addr;
+	struct radeon_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+	struct page			**pages;
+	dma_addr_t			*pages_addr;
+	bool				ready;
+};
+
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_ram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
+void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
+int radeon_gart_init(struct radeon_device *rdev);
+void radeon_gart_fini(struct radeon_device *rdev);
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages);
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, struct page **pagelist,
+		     dma_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
+
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct radeon_mc {
+	resource_size_t		aper_size;
+	resource_size_t		aper_base;
+	resource_size_t		agp_base;
+	/* for some chips with <= 32MB we need to lie
+	 * about vram size near mc fb location */
+	u64			mc_vram_size;
+	u64			visible_vram_size;
+	u64			gtt_size;
+	u64			gtt_start;
+	u64			gtt_end;
+	u64			vram_start;
+	u64			vram_end;
+	unsigned		vram_width;
+	u64			real_vram_size;
+	int			vram_mtrr;
+	bool			vram_is_ddr;
+	bool			igp_sideport_enabled;
+	u64                     gtt_base_align;
+	u64                     mc_mask;
+};
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
+
+/*
+ * GPU scratch registers structures, functions & helpers
+ */
+struct radeon_scratch {
+	unsigned		num_reg;
+	uint32_t                reg_base;
+	bool			free[32];
+	uint32_t		reg[32];
+};
+
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
+
+
+/*
+ * IRQS.
+ */
+
+struct radeon_unpin_work {
+	struct work_struct work;
+	struct radeon_device *rdev;
+	int crtc_id;
+	struct radeon_fence *fence;
+	struct drm_pending_vblank_event *event;
+	struct radeon_bo *old_rbo;
+	u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+	u32 disp_int;
+	u32 hdmi0_status;
+};
+
+struct r600_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 hdmi0_status;
+	u32 hdmi1_status;
+};
+
+struct evergreen_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 disp_int_cont3;
+	u32 disp_int_cont4;
+	u32 disp_int_cont5;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 d3grph_int;
+	u32 d4grph_int;
+	u32 d5grph_int;
+	u32 d6grph_int;
+	u32 afmt_status1;
+	u32 afmt_status2;
+	u32 afmt_status3;
+	u32 afmt_status4;
+	u32 afmt_status5;
+	u32 afmt_status6;
+};
+
+union radeon_irq_stat_regs {
+	struct r500_irq_stat_regs r500;
+	struct r600_irq_stat_regs r600;
+	struct evergreen_irq_stat_regs evergreen;
+};
+
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 6
+
+struct radeon_irq {
+	bool				installed;
+	spinlock_t			lock;
+	atomic_t			ring_int[RADEON_NUM_RINGS];
+	bool				crtc_vblank_int[RADEON_MAX_CRTCS];
+	atomic_t			pflip[RADEON_MAX_CRTCS];
+	wait_queue_head_t		vblank_queue;
+	bool				hpd[RADEON_MAX_HPD_PINS];
+	bool				afmt[RADEON_MAX_AFMT_BLOCKS];
+	union radeon_irq_stat_regs	stat_regs;
+};
+
+int radeon_irq_kms_init(struct radeon_device *rdev);
+void radeon_irq_kms_fini(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+
+/*
+ * CP & rings.
+ */
+
+struct radeon_ib {
+	struct radeon_sa_bo		*sa_bo;
+	uint32_t			length_dw;
+	uint64_t			gpu_addr;
+	uint32_t			*ptr;
+	int				ring;
+	struct radeon_fence		*fence;
+	struct radeon_vm		*vm;
+	bool				is_const_ib;
+	struct radeon_fence		*sync_to[RADEON_NUM_RINGS];
+	struct radeon_semaphore		*semaphore;
+};
+
+struct radeon_ring {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		rptr_offs;
+	unsigned		rptr_reg;
+	unsigned		rptr_save_reg;
+	u64			next_rptr_gpu_addr;
+	volatile u32		*next_rptr_cpu_addr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		wptr_reg;
+	unsigned		ring_size;
+	unsigned		ring_free_dw;
+	int			count_dw;
+	unsigned long		last_activity;
+	unsigned		last_rptr;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	bool			ready;
+	u32			ptr_reg_shift;
+	u32			ptr_reg_mask;
+	u32			nop;
+	u32			idx;
+	u64			last_semaphore_signal_addr;
+	u64			last_semaphore_wait_addr;
+};
+
+/*
+ * VM
+ */
+
+/* maximum number of VMIDs */
+#define RADEON_NUM_VM	16
+
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE   9
+
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
+
+struct radeon_vm {
+	struct list_head		list;
+	struct list_head		va;
+	unsigned			id;
+
+	/* contains the page directory */
+	struct radeon_sa_bo		*page_directory;
+	uint64_t			pd_gpu_addr;
+
+	/* array of page tables, one for each page directory entry */
+	struct radeon_sa_bo		**page_tables;
+
+	struct mutex			mutex;
+	/* last fence for cs using this vm */
+	struct radeon_fence		*fence;
+	/* last flush or NULL if we still need to flush */
+	struct radeon_fence		*last_flush;
+};
+
+struct radeon_vm_manager {
+	struct mutex			lock;
+	struct list_head		lru_vm;
+	struct radeon_fence		*active[RADEON_NUM_VM];
+	struct radeon_sa_manager	sa_manager;
+	uint32_t			max_pfn;
+	/* number of VMIDs */
+	unsigned			nvm;
+	/* vram base address for page table entry  */
+	u64				vram_base_offset;
+	/* is vm enabled? */
+	bool				enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+	struct radeon_vm		vm;
+};
+
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		ptr_mask;
+	atomic_t		lock;
+	bool                    enabled;
+};
+
+struct r600_blit_cp_primitives {
+	void (*set_render_target)(struct radeon_device *rdev, int format,
+				  int w, int h, u64 gpu_addr);
+	void (*cp_set_surface_sync)(struct radeon_device *rdev,
+				    u32 sync_type, u32 size,
+				    u64 mc_addr);
+	void (*set_shaders)(struct radeon_device *rdev);
+	void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
+	void (*set_tex_resource)(struct radeon_device *rdev,
+				 int format, int w, int h, int pitch,
+				 u64 gpu_addr, u32 size);
+	void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
+			     int x2, int y2);
+	void (*draw_auto)(struct radeon_device *rdev);
+	void (*set_default_state)(struct radeon_device *rdev);
+};
+
+struct r600_blit {
+	struct radeon_bo	*shader_obj;
+	struct r600_blit_cp_primitives primitives;
+	int max_dim;
+	int ring_size_common;
+	int ring_size_per_loop;
+	u64 shader_gpu_addr;
+	u32 vs_offset, ps_offset;
+	u32 state_offset;
+	u32 state_len;
+};
+
+/*
+ * SI RLC stuff
+ */
+struct si_rlc {
+	/* for power gating */
+	struct radeon_bo	*save_restore_obj;
+	uint64_t		save_restore_gpu_addr;
+	/* for clear state */
+	struct radeon_bo	*clear_state_obj;
+	uint64_t		clear_state_gpu_addr;
+};
+
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib);
+int radeon_ib_pool_init(struct radeon_device *rdev);
+void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_ring_tests(struct radeon_device *rdev);
+/* Ring access between begin & end cannot sleep */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+
+
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
+/*
+ * CS.
+ */
+struct radeon_cs_reloc {
+	struct drm_gem_object		*gobj;
+	struct radeon_bo		*robj;
+	struct radeon_bo_list		lobj;
+	uint32_t			handle;
+	uint32_t			flags;
+};
+
+struct radeon_cs_chunk {
+	uint32_t		chunk_id;
+	uint32_t		length_dw;
+	int			kpage_idx[2];
+	uint32_t		*kpage[2];
+	uint32_t		*kdata;
+	void __user		*user_ptr;
+	int			last_copied_page;
+	int			last_page_index;
+};
+
+struct radeon_cs_parser {
+	struct device		*dev;
+	struct radeon_device	*rdev;
+	struct drm_file		*filp;
+	/* chunks */
+	unsigned		nchunks;
+	struct radeon_cs_chunk	*chunks;
+	uint64_t		*chunks_array;
+	/* IB */
+	unsigned		idx;
+	/* relocations */
+	unsigned		nrelocs;
+	struct radeon_cs_reloc	*relocs;
+	struct radeon_cs_reloc	**relocs_ptr;
+	struct list_head	validated;
+	unsigned		dma_reloc_idx;
+	/* indices of various chunks */
+	int			chunk_ib_idx;
+	int			chunk_relocs_idx;
+	int			chunk_flags_idx;
+	int			chunk_const_ib_idx;
+	struct radeon_ib	ib;
+	struct radeon_ib	const_ib;
+	void			*track;
+	unsigned		family;
+	int			parser_error;
+	u32			cs_flags;
+	u32			ring;
+	s32			priority;
+};
+
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
+
+struct radeon_cs_packet {
+	unsigned	idx;
+	unsigned	type;
+	unsigned	reg;
+	unsigned	opcode;
+	int		count;
+	unsigned	one_reg_wr;
+};
+
+typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt,
+				      unsigned idx, unsigned reg);
+typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
+				      struct radeon_cs_packet *pkt);
+
+
+/*
+ * AGP
+ */
+int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
+void radeon_agp_suspend(struct radeon_device *rdev);
+void radeon_agp_fini(struct radeon_device *rdev);
+
+
+/*
+ * Writeback
+ */
+struct radeon_wb {
+	struct radeon_bo	*wb_obj;
+	volatile uint32_t	*wb;
+	uint64_t		gpu_addr;
+	bool                    enabled;
+	bool                    use_event;
+};
+
+#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
+#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
+#define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
+#define R600_WB_UVD_RPTR_OFFSET  2560
+#define R600_WB_EVENT_OFFSET     3072
+
+/**
+ * struct radeon_pm - power management datas
+ * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
+ * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
+ * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
+ * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
+ * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
+ * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
+ * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
+ * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
+ * @needed_bandwidth:   current bandwidth needs
+ *
+ * It keeps track of various data needed to take powermanagement decision.
+ * Bandwidth need is used to determine minimun clock of the GPU and memory.
+ * Equation between gpu/memory clock and available bandwidth is hw dependent
+ * (type of memory, bus size, efficiency, ...)
+ */
+
+enum radeon_pm_method {
+	PM_METHOD_PROFILE,
+	PM_METHOD_DYNPM,
+};
+
+enum radeon_dynpm_state {
+	DYNPM_STATE_DISABLED,
+	DYNPM_STATE_MINIMUM,
+	DYNPM_STATE_PAUSED,
+	DYNPM_STATE_ACTIVE,
+	DYNPM_STATE_SUSPENDED,
+};
+enum radeon_dynpm_action {
+	DYNPM_ACTION_NONE,
+	DYNPM_ACTION_MINIMUM,
+	DYNPM_ACTION_DOWNCLOCK,
+	DYNPM_ACTION_UPCLOCK,
+	DYNPM_ACTION_DEFAULT
+};
+
+enum radeon_voltage_type {
+	VOLTAGE_NONE = 0,
+	VOLTAGE_GPIO,
+	VOLTAGE_VDDC,
+	VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+	POWER_STATE_TYPE_DEFAULT,
+	POWER_STATE_TYPE_POWERSAVE,
+	POWER_STATE_TYPE_BATTERY,
+	POWER_STATE_TYPE_BALANCED,
+	POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_profile_type {
+	PM_PROFILE_DEFAULT,
+	PM_PROFILE_AUTO,
+	PM_PROFILE_LOW,
+	PM_PROFILE_MID,
+	PM_PROFILE_HIGH,
+};
+
+#define PM_PROFILE_DEFAULT_IDX 0
+#define PM_PROFILE_LOW_SH_IDX  1
+#define PM_PROFILE_MID_SH_IDX  2
+#define PM_PROFILE_HIGH_SH_IDX 3
+#define PM_PROFILE_LOW_MH_IDX  4
+#define PM_PROFILE_MID_MH_IDX  5
+#define PM_PROFILE_HIGH_MH_IDX 6
+#define PM_PROFILE_MAX         7
+
+struct radeon_pm_profile {
+	int dpms_off_ps_idx;
+	int dpms_on_ps_idx;
+	int dpms_off_cm_idx;
+	int dpms_on_cm_idx;
+};
+
+enum radeon_int_thermal_type {
+	THERMAL_TYPE_NONE,
+	THERMAL_TYPE_RV6XX,
+	THERMAL_TYPE_RV770,
+	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
+	THERMAL_TYPE_SI,
+};
+
+struct radeon_voltage {
+	enum radeon_voltage_type type;
+	/* gpio voltage */
+	struct radeon_gpio_rec gpio;
+	u32 delay; /* delay in usec from voltage drop to sclk change */
+	bool active_high; /* voltage drop is active when bit is high */
+	/* VDDC voltage */
+	u8 vddc_id; /* index into vddc voltage table */
+	u8 vddci_id; /* index into vddci voltage table */
+	bool vddci_enabled;
+	/* r6xx+ sw */
+	u16 voltage;
+	/* evergreen+ vddci */
+	u16 vddci;
+};
+
+/* clock mode flags */
+#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)
+
+struct radeon_pm_clock_info {
+	/* memory clock */
+	u32 mclk;
+	/* engine clock */
+	u32 sclk;
+	/* voltage info */
+	struct radeon_voltage voltage;
+	/* standardized clock flags */
+	u32 flags;
+};
+
+/* state flags */
+#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
+
+struct radeon_power_state {
+	enum radeon_pm_state_type type;
+	struct radeon_pm_clock_info *clock_info;
+	/* number of valid clock modes in this power state */
+	int num_clock_modes;
+	struct radeon_pm_clock_info *default_clock_mode;
+	/* standardized state flags */
+	u32 flags;
+	u32 misc; /* vbios specific flags */
+	u32 misc2; /* vbios specific flags */
+	int pcie_lanes; /* pcie lanes */
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
+struct radeon_pm {
+	struct mutex		mutex;
+	/* write locked while reprogramming mclk */
+	struct rw_semaphore	mclk_lock;
+	u32			active_crtcs;
+	int			active_crtc_count;
+	int			req_vblank;
+	bool			vblank_sync;
+	fixed20_12		max_bandwidth;
+	fixed20_12		igp_sideport_mclk;
+	fixed20_12		igp_system_mclk;
+	fixed20_12		igp_ht_link_clk;
+	fixed20_12		igp_ht_link_width;
+	fixed20_12		k8_bandwidth;
+	fixed20_12		sideport_bandwidth;
+	fixed20_12		ht_bandwidth;
+	fixed20_12		core_bandwidth;
+	fixed20_12		sclk;
+	fixed20_12		mclk;
+	fixed20_12		needed_bandwidth;
+	struct radeon_power_state *power_state;
+	/* number of valid power states */
+	int                     num_power_states;
+	int                     current_power_state_index;
+	int                     current_clock_mode_index;
+	int                     requested_power_state_index;
+	int                     requested_clock_mode_index;
+	int                     default_power_state_index;
+	u32                     current_sclk;
+	u32                     current_mclk;
+	u16                     current_vddc;
+	u16                     current_vddci;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	u16                     default_vddc;
+	u16                     default_vddci;
+	struct radeon_i2c_chan *i2c_bus;
+	/* selected pm method */
+	enum radeon_pm_method     pm_method;
+	/* dynpm power management */
+	struct delayed_work	dynpm_idle_work;
+	enum radeon_dynpm_state	dynpm_state;
+	enum radeon_dynpm_action	dynpm_planned_action;
+	unsigned long		dynpm_action_timeout;
+	bool                    dynpm_can_upclock;
+	bool                    dynpm_can_downclock;
+	/* profile-based power management */
+	enum radeon_pm_profile_type profile;
+	int                     profile_index;
+	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+	/* internal thermal controller on rv6xx+ */
+	enum radeon_int_thermal_type int_thermal_type;
+	struct device	        *int_hwmon_dev;
+};
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance);
+/*
+ * UVD
+ */
+#define RADEON_MAX_UVD_HANDLES	10
+#define RADEON_UVD_STACK_SIZE	(1024*1024)
+#define RADEON_UVD_HEAP_SIZE	(1024*1024)
+
+struct radeon_uvd {
+	struct radeon_bo	*vcpu_bo;
+	void			*cpu_addr;
+	uint64_t		gpu_addr;
+	void			*saved_bo;
+	atomic_t		handles[RADEON_MAX_UVD_HANDLES];
+	struct drm_file		*filp[RADEON_MAX_UVD_HANDLES];
+	struct delayed_work	idle_work;
+};
+
+int radeon_uvd_init(struct radeon_device *rdev);
+void radeon_uvd_fini(struct radeon_device *rdev);
+int radeon_uvd_suspend(struct radeon_device *rdev);
+int radeon_uvd_resume(struct radeon_device *rdev);
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence);
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_free_handles(struct radeon_device *rdev,
+			     struct drm_file *filp);
+int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+void radeon_uvd_note_usage(struct radeon_device *rdev);
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+				  unsigned vclk, unsigned dclk,
+				  unsigned vco_min, unsigned vco_max,
+				  unsigned fb_factor, unsigned fb_mask,
+				  unsigned pd_min, unsigned pd_max,
+				  unsigned pd_even,
+				  unsigned *optimal_fb_div,
+				  unsigned *optimal_vclk_div,
+				  unsigned *optimal_dclk_div);
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+                                unsigned cg_upll_func_cntl);
+
+struct r600_audio {
+	int			channels;
+	int			rate;
+	int			bits_per_sample;
+	u8			status_bits;
+	u8			category_code;
+};
+
+/*
+ * Benchmarking
+ */
+void radeon_benchmark(struct radeon_device *rdev, int test_number);
+
+
+/*
+ * Testing
+ */
+void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *cpA,
+			   struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
+
+
+/*
+ * Debugfs
+ */
+struct radeon_debugfs {
+	struct drm_info_list	*files;
+	unsigned		num_files;
+};
+
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles);
+int radeon_debugfs_fence_init(struct radeon_device *rdev);
+
+
+/*
+ * ASIC specific functions.
+ */
+struct radeon_asic {
+	int (*init)(struct radeon_device *rdev);
+	void (*fini)(struct radeon_device *rdev);
+	int (*resume)(struct radeon_device *rdev);
+	int (*suspend)(struct radeon_device *rdev);
+	void (*vga_set_state)(struct radeon_device *rdev, bool state);
+	int (*asic_reset)(struct radeon_device *rdev);
+	/* ioctl hw specific callback. Some hw might want to perform special
+	 * operation on specific ioctl. For instance on wait idle some hw
+	 * might want to perform and HDP flush through MMIO as it seems that
+	 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+	 * through ring.
+	 */
+	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+	/* check if 3D engine is idle */
+	bool (*gui_idle)(struct radeon_device *rdev);
+	/* wait for mc_idle */
+	int (*mc_wait_for_idle)(struct radeon_device *rdev);
+	/* get the reference clock */
+	u32 (*get_xclk)(struct radeon_device *rdev);
+	/* get the gpu clock counter */
+	uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
+	/* gart */
+	struct {
+		void (*tlb_flush)(struct radeon_device *rdev);
+		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+	} gart;
+	struct {
+		int (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+
+		u32 pt_ring_index;
+		void (*set_page)(struct radeon_device *rdev,
+				 struct radeon_ib *ib,
+				 uint64_t pe,
+				 uint64_t addr, unsigned count,
+				 uint32_t incr, uint32_t flags);
+	} vm;
+	/* ring specific callbacks */
+	struct {
+		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+				       struct radeon_semaphore *semaphore, bool emit_wait);
+		int (*cs_parse)(struct radeon_cs_parser *p);
+		void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+		bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+		void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+	} ring[RADEON_NUM_RINGS];
+	/* irqs */
+	struct {
+		int (*set)(struct radeon_device *rdev);
+		int (*process)(struct radeon_device *rdev);
+	} irq;
+	/* displays */
+	struct {
+		/* display watermarks */
+		void (*bandwidth_update)(struct radeon_device *rdev);
+		/* get frame count */
+		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+		/* wait for vblank */
+		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
+		/* set backlight level */
+		void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
+		/* get backlight level */
+		u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
+		/* audio callbacks */
+		void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
+		void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
+	} display;
+	/* copy functions for bo handling */
+	struct {
+		int (*blit)(struct radeon_device *rdev,
+			    uint64_t src_offset,
+			    uint64_t dst_offset,
+			    unsigned num_gpu_pages,
+			    struct radeon_fence **fence);
+		u32 blit_ring_index;
+		int (*dma)(struct radeon_device *rdev,
+			   uint64_t src_offset,
+			   uint64_t dst_offset,
+			   unsigned num_gpu_pages,
+			   struct radeon_fence **fence);
+		u32 dma_ring_index;
+		/* method used for bo copy */
+		int (*copy)(struct radeon_device *rdev,
+			    uint64_t src_offset,
+			    uint64_t dst_offset,
+			    unsigned num_gpu_pages,
+			    struct radeon_fence **fence);
+		/* ring used for bo copies */
+		u32 copy_ring_index;
+	} copy;
+	/* surfaces */
+	struct {
+		int (*set_reg)(struct radeon_device *rdev, int reg,
+				       uint32_t tiling_flags, uint32_t pitch,
+				       uint32_t offset, uint32_t obj_size);
+		void (*clear_reg)(struct radeon_device *rdev, int reg);
+	} surface;
+	/* hotplug detect */
+	struct {
+		void (*init)(struct radeon_device *rdev);
+		void (*fini)(struct radeon_device *rdev);
+		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+	} hpd;
+	/* power management */
+	struct {
+		void (*misc)(struct radeon_device *rdev);
+		void (*prepare)(struct radeon_device *rdev);
+		void (*finish)(struct radeon_device *rdev);
+		void (*init_profile)(struct radeon_device *rdev);
+		void (*get_dynpm_state)(struct radeon_device *rdev);
+		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+		int (*get_pcie_lanes)(struct radeon_device *rdev);
+		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+		int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
+	} pm;
+	/* pageflipping */
+	struct {
+		void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+		u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+		void (*post_page_flip)(struct radeon_device *rdev, int crtc);
+	} pflip;
+};
+
+/*
+ * Asic structures
+ */
+struct r100_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			hdp_cntl;
+};
+
+struct r300_asic {
+	const unsigned		*reg_safe_bm;
+	unsigned		reg_safe_bm_size;
+	u32			resync_scratch;
+	u32			hdp_cntl;
+};
+
+struct r600_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+};
+
+struct rv770_asic {
+	unsigned		max_pipes;
+	unsigned		max_tile_pipes;
+	unsigned		max_simds;
+	unsigned		max_backends;
+	unsigned		max_gprs;
+	unsigned		max_threads;
+	unsigned		max_stack_entries;
+	unsigned		max_hw_contexts;
+	unsigned		max_gs_threads;
+	unsigned		sx_max_export_size;
+	unsigned		sx_max_export_pos_size;
+	unsigned		sx_max_export_smx_size;
+	unsigned		sq_num_cf_insts;
+	unsigned		sx_num_of_sets;
+	unsigned		sc_prim_fifo_size;
+	unsigned		sc_hiz_tile_fifo_size;
+	unsigned		sc_earlyz_tile_fifo_fize;
+	unsigned		tiling_nbanks;
+	unsigned		tiling_npipes;
+	unsigned		tiling_group_size;
+	unsigned		tile_config;
+	unsigned		backend_map;
+};
+
+struct evergreen_asic {
+	unsigned num_ses;
+	unsigned max_pipes;
+	unsigned max_tile_pipes;
+	unsigned max_simds;
+	unsigned max_backends;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_stack_entries;
+	unsigned max_hw_contexts;
+	unsigned max_gs_threads;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned sq_num_cf_insts;
+	unsigned sx_num_of_sets;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+	unsigned tiling_nbanks;
+	unsigned tiling_npipes;
+	unsigned tiling_group_size;
+	unsigned tile_config;
+	unsigned backend_map;
+};
+
+struct cayman_asic {
+	unsigned max_shader_engines;
+	unsigned max_pipes_per_simd;
+	unsigned max_tile_pipes;
+	unsigned max_simds_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_threads;
+	unsigned max_gs_threads;
+	unsigned max_stack_entries;
+	unsigned sx_num_of_sets;
+	unsigned sx_max_export_size;
+	unsigned sx_max_export_pos_size;
+	unsigned sx_max_export_smx_size;
+	unsigned max_hw_contexts;
+	unsigned sq_num_cf_insts;
+	unsigned sc_prim_fifo_size;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_shader_engines;
+	unsigned num_shader_pipes_per_simd;
+	unsigned num_tile_pipes;
+	unsigned num_simds_per_se;
+	unsigned num_backends_per_se;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+};
+
+struct si_asic {
+	unsigned max_shader_engines;
+	unsigned max_tile_pipes;
+	unsigned max_cu_per_sh;
+	unsigned max_sh_per_se;
+	unsigned max_backends_per_se;
+	unsigned max_texture_channel_caches;
+	unsigned max_gprs;
+	unsigned max_gs_threads;
+	unsigned max_hw_contexts;
+	unsigned sc_prim_fifo_size_frontend;
+	unsigned sc_prim_fifo_size_backend;
+	unsigned sc_hiz_tile_fifo_size;
+	unsigned sc_earlyz_tile_fifo_size;
+
+	unsigned num_tile_pipes;
+	unsigned num_backends_per_se;
+	unsigned backend_disable_mask_per_asic;
+	unsigned backend_map;
+	unsigned num_texture_channel_caches;
+	unsigned mem_max_burst_length_bytes;
+	unsigned mem_row_size_in_kb;
+	unsigned shader_engine_tile_size;
+	unsigned num_gpus;
+	unsigned multi_gpu_tile_size;
+
+	unsigned tile_config;
+	uint32_t tile_mode_array[32];
+};
+
+union radeon_asic_config {
+	struct r300_asic	r300;
+	struct r100_asic	r100;
+	struct r600_asic	r600;
+	struct rv770_asic	rv770;
+	struct evergreen_asic	evergreen;
+	struct cayman_asic	cayman;
+	struct si_asic		si;
+};
+
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
+
+/*
+ * IOCTL.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp);
+int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp);
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp);
+
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
+	struct radeon_bo		*robj;
+	volatile uint32_t		*ptr;
+	u64				gpu_addr;
+};
+
+/*
+ * ACPI
+ */
+struct radeon_atif_notification_cfg {
+	bool enabled;
+	int command_code;
+};
+
+struct radeon_atif_notifications {
+	bool display_switch;
+	bool expansion_mode_change;
+	bool thermal_state;
+	bool forced_power_state;
+	bool system_power_state;
+	bool display_conf_change;
+	bool px_gfx_switch;
+	bool brightness_change;
+	bool dgpu_display_event;
+};
+
+struct radeon_atif_functions {
+	bool system_params;
+	bool sbios_requests;
+	bool select_active_disp;
+	bool lid_state;
+	bool get_tv_standard;
+	bool set_tv_standard;
+	bool get_panel_expansion_mode;
+	bool set_panel_expansion_mode;
+	bool temperature_change;
+	bool graphics_device_types;
+};
+
+struct radeon_atif {
+	struct radeon_atif_notifications notifications;
+	struct radeon_atif_functions functions;
+	struct radeon_atif_notification_cfg notification_cfg;
+	struct radeon_encoder *encoder_for_bl;
+};
+
+struct radeon_atcs_functions {
+	bool get_ext_state;
+	bool pcie_perf_req;
+	bool pcie_dev_rdy;
+	bool pcie_bus_width;
+};
+
+struct radeon_atcs {
+	struct radeon_atcs_functions functions;
+};
+
+/*
+ * Core structure, functions and helpers.
+ */
+typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
+
+struct radeon_device {
+	struct device			*dev;
+	struct drm_device		*ddev;
+	struct pci_dev			*pdev;
+	struct rw_semaphore		exclusive_lock;
+	/* ASIC */
+	union radeon_asic_config	config;
+	enum radeon_family		family;
+	unsigned long			flags;
+	int				usec_timeout;
+	enum radeon_pll_errata		pll_errata;
+	int				num_gb_pipes;
+	int				num_z_pipes;
+	int				disp_priority;
+	/* BIOS */
+	uint8_t				*bios;
+	bool				is_atom_bios;
+	uint16_t			bios_header_start;
+	struct radeon_bo		*stollen_vga_memory;
+	/* Register mmio */
+	resource_size_t			rmmio_base;
+	resource_size_t			rmmio_size;
+	/* protects concurrent MM_INDEX/DATA based register access */
+	spinlock_t mmio_idx_lock;
+	void __iomem			*rmmio;
+	radeon_rreg_t			mc_rreg;
+	radeon_wreg_t			mc_wreg;
+	radeon_rreg_t			pll_rreg;
+	radeon_wreg_t			pll_wreg;
+	uint32_t                        pcie_reg_mask;
+	radeon_rreg_t			pciep_rreg;
+	radeon_wreg_t			pciep_wreg;
+	/* io port */
+	void __iomem                    *rio_mem;
+	resource_size_t			rio_mem_size;
+	struct radeon_clock             clock;
+	struct radeon_mc		mc;
+	struct radeon_gart		gart;
+	struct radeon_mode_info		mode_info;
+	struct radeon_scratch		scratch;
+	struct radeon_mman		mman;
+	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
+	wait_queue_head_t		fence_queue;
+	struct mutex			ring_lock;
+	struct radeon_ring		ring[RADEON_NUM_RINGS];
+	bool				ib_pool_ready;
+	struct radeon_sa_manager	ring_tmp_bo;
+	struct radeon_irq		irq;
+	struct radeon_asic		*asic;
+	struct radeon_gem		gem;
+	struct radeon_pm		pm;
+	struct radeon_uvd		uvd;
+	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
+	struct radeon_wb		wb;
+	struct radeon_dummy_page	dummy_page;
+	bool				shutdown;
+	bool				suspend;
+	bool				need_dma32;
+	bool				accel_working;
+	bool				fastfb_working; /* IGP feature*/
+	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
+	const struct firmware *me_fw;	/* all family ME firmware */
+	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+	const struct firmware *mc_fw;	/* NI MC firmware */
+	const struct firmware *ce_fw;	/* SI CE firmware */
+	const struct firmware *uvd_fw;	/* UVD firmware */
+	struct r600_blit r600_blit;
+	struct r600_vram_scratch vram_scratch;
+	int msi_enabled; /* msi enabled */
+	struct r600_ih ih; /* r6/700 interrupt ring */
+	struct si_rlc rlc;
+	struct work_struct hotplug_work;
+	struct work_struct audio_work;
+	int num_crtc; /* number of crtcs */
+	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+	bool audio_enabled;
+	bool has_uvd;
+	struct r600_audio audio_status; /* audio stuff */
+	struct notifier_block acpi_nb;
+	/* only one userspace can use Hyperz features or CMASK at a time */
+	struct drm_file *hyperz_filp;
+	struct drm_file *cmask_filp;
+	/* i2c buses */
+	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+	/* debugfs */
+	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+	unsigned 		debugfs_count;
+	/* virtual memory */
+	struct radeon_vm_manager	vm_manager;
+	struct mutex			gpu_clock_mutex;
+	/* ACPI interface */
+	struct radeon_atif		atif;
+	struct radeon_atcs		atcs;
+};
+
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags);
+void radeon_device_fini(struct radeon_device *rdev);
+int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
+
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+		      bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+		  bool always_indirect);
+u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
+void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
+
+/*
+ * Registers read & write functions.
+ */
+#define RREG8(reg) readb((rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
+#define RREG16(reg) readw((rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
+#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
+#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
+#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
+#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
+#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
+#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
+#define WREG32_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32(reg);			\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32(reg, tmp_);				\
+	} while (0)
+#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define WREG32_PLL_P(reg, val, mask)				\
+	do {							\
+		uint32_t tmp_ = RREG32_PLL(reg);		\
+		tmp_ &= (mask);					\
+		tmp_ |= ((val) & ~(mask));			\
+		WREG32_PLL(reg, tmp_);				\
+	} while (0)
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
+
+/*
+ * Indirect registers accessor
+ */
+static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	r = RREG32(RADEON_PCIE_DATA);
+	return r;
+}
+
+static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+	WREG32(RADEON_PCIE_DATA, (v));
+}
+
+void r100_pll_errata_after_index(struct radeon_device *rdev);
+
+
+/*
+ * ASICs helpers.
+ */
+#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
+			    (rdev->pdev->device == 0x5969))
+#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
+		(rdev->family == CHIP_RV200) || \
+		(rdev->family == CHIP_RS100) || \
+		(rdev->family == CHIP_RS200) || \
+		(rdev->family == CHIP_RV250) || \
+		(rdev->family == CHIP_RV280) || \
+		(rdev->family == CHIP_RS300))
+#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
+		(rdev->family == CHIP_RV350) ||			\
+		(rdev->family == CHIP_R350)  ||			\
+		(rdev->family == CHIP_RV380) ||			\
+		(rdev->family == CHIP_R420)  ||			\
+		(rdev->family == CHIP_R423)  ||			\
+		(rdev->family == CHIP_RV410) ||			\
+		(rdev->family == CHIP_RS400) ||			\
+		(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+		(rdev->ddev->pdev->device == 0x9443) || \
+		(rdev->ddev->pdev->device == 0x944B) || \
+		(rdev->ddev->pdev->device == 0x9506) || \
+		(rdev->ddev->pdev->device == 0x9509) || \
+		(rdev->ddev->pdev->device == 0x950F) || \
+		(rdev->ddev->pdev->device == 0x689C) || \
+		(rdev->ddev->pdev->device == 0x689D))
+#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
+			    (rdev->family == CHIP_RS690)  ||	\
+			    (rdev->family == CHIP_RS740)  ||	\
+			    (rdev->family >= CHIP_R600))
+#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
+#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
+#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
+#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
+#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
+
+/*
+ * BIOS helpers.
+ */
+#define RBIOS8(i) (rdev->bios[i])
+#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
+#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
+
+int radeon_combios_init(struct radeon_device *rdev);
+void radeon_combios_fini(struct radeon_device *rdev);
+int radeon_atombios_init(struct radeon_device *rdev);
+void radeon_atombios_fini(struct radeon_device *rdev);
+
+
+/*
+ * RING helpers.
+ */
+#if DRM_DEBUG_CODE == 0
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+#else
+/* With debugging this is just too big to inline */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+#endif
+
+/*
+ * ASICs macro.
+ */
+#define radeon_init(rdev) (rdev)->asic->init((rdev))
+#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
+#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
+#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
+#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
+#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
+#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
+#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
+#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
+#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
+#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
+#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
+#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
+#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
+#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
+#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
+#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
+#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
+#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
+#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
+#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
+#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
+#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
+#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
+#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
+#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
+#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
+#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
+#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
+#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
+#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
+#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
+#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
+
+/* Common functions */
+/* AGP */
+extern int radeon_gpu_reset(struct radeon_device *rdev);
+extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
+extern void radeon_agp_disable(struct radeon_device *rdev);
+extern int radeon_modeset_init(struct radeon_device *rdev);
+extern void radeon_modeset_fini(struct radeon_device *rdev);
+extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+extern void radeon_scratch_init(struct radeon_device *rdev);
+extern void radeon_wb_fini(struct radeon_device *rdev);
+extern int radeon_wb_init(struct radeon_device *rdev);
+extern void radeon_wb_disable(struct radeon_device *rdev);
+extern void radeon_surface_init(struct radeon_device *rdev);
+extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+extern void radeon_program_register_sequence(struct radeon_device *rdev,
+					     const u32 *registers,
+					     const u32 array_size);
+
+/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring);
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence);
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+			    struct radeon_vm *vm,
+			    struct radeon_bo *bo,
+			    struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo);
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo);
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t offset,
+			  uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+		     struct radeon_bo_va *bo_va);
+
+/* audio */
+void r600_audio_update_hdmi(struct work_struct *work);
+
+/*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
+ * r600 cs checking helper
+ */
+unsigned r600_mip_minify(unsigned size, unsigned level);
+bool r600_fmt_is_valid_color(u32 format);
+bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
+int r600_fmt_get_blocksize(u32 format);
+int r600_fmt_get_nblocksx(u32 format, u32 w);
+int r600_fmt_get_nblocksy(u32 format, u32 h);
+
+/*
+ * r600 functions used by radeon_encoder.c
+ */
+struct radeon_hdmi_acr {
+	u32 clock;
+
+	int n_32khz;
+	int cts_32khz;
+
+	int n_44_1khz;
+	int cts_44_1khz;
+
+	int n_48khz;
+	int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+				     u32 tiling_pipe_num,
+				     u32 max_rb_num,
+				     u32 total_max_rb_num,
+				     u32 enabled_rb_mask);
+
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
+
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+extern void radeon_acpi_fini(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
+#endif
+
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt,
+			   unsigned idx);
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt);
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+				struct radeon_cs_reloc **cs_reloc,
+				int nomm);
+int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
+			       uint32_t *vline_start_end,
+			       uint32_t *vline_status);
+
+#include "radeon_object.h"
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.c b/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 0000000..196d28d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+#include "radeon_acpi.h"
+#include "atom.h"
+
+#include <linux/vga_switcheroo.h>
+
+#define ACPI_AC_CLASS           "ac_adapter"
+
+extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
+
+struct atif_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 notification_mask;	/* supported notifications mask */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atif_system_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_mask;		/* valid flags mask */
+	u32 flags;		/* flags */
+	u8 command_code;	/* notify command code */
+} __packed;
+
+struct atif_sbios_requests {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 pending;		/* pending sbios requests */
+	u8 panel_exp_mode;	/* panel expansion mode */
+	u8 thermal_gfx;		/* thermal state: target gfx controller */
+	u8 thermal_state;	/* thermal state: state id (0: exit state, non-0: state) */
+	u8 forced_power_gfx;	/* forced power state: target gfx controller */
+	u8 forced_power_state;	/* forced power state: state id */
+	u8 system_power_src;	/* system power source */
+	u8 backlight_level;	/* panel backlight level (0-255) */
+} __packed;
+
+#define ATIF_NOTIFY_MASK	0x3
+#define ATIF_NOTIFY_NONE	0
+#define ATIF_NOTIFY_81		1
+#define ATIF_NOTIFY_N		2
+
+struct atcs_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+/* Call the ATIF method
+ */
+/**
+ * radeon_atif_call - call an ATIF method
+ *
+ * @handle: acpi handle
+ * @function: the ATIF function to execute
+ * @params: ATIF function params
+ *
+ * Executes the requested ATIF function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
+		struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atif_arg_elements[2];
+	struct acpi_object_list atif_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atif_arg.count = 2;
+	atif_arg.pointer = &atif_arg_elements[0];
+
+	atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atif_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atif_arg_elements[1].buffer.length = params->length;
+		atif_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atif_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atif_parse_notification - parse supported notifications
+ *
+ * @n: supported notifications struct
+ * @mask: supported notifications mask from ATIF
+ *
+ * Use the supported notifications mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
+{
+	n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
+	n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
+	n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
+	n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
+	n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
+	n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
+	n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
+	n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+}
+
+/**
+ * radeon_atif_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATIF
+ *
+ * Use the supported functions mask from ATIF function
+ * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
+{
+	f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
+	f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
+	f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
+	f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
+	f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
+	f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
+	f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
+	f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
+	f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
+	f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+}
+
+/**
+ * radeon_atif_verify_interface - verify ATIF
+ *
+ * @handle: acpi handle
+ * @atif: radeon atif struct
+ *
+ * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
+ * to initialize ATIF and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atif_verify_interface(acpi_handle handle,
+		struct radeon_atif *atif)
+{
+	union acpi_object *info;
+	struct atif_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 12) {
+		DRM_INFO("ATIF buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
+
+	radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
+	radeon_atif_parse_functions(&atif->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_notification_params - determine notify configuration
+ *
+ * @handle: acpi handle
+ * @n: atif notification configuration struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
+ * to determine if a notifier is used and if so which one
+ * (all asics).  This is either Notify(VGA, 0x81) or Notify(VGA, n)
+ * where n is specified in the result if a notifier is used.
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_notification_params(acpi_handle handle,
+		struct radeon_atif_notification_cfg *n)
+{
+	union acpi_object *info;
+	struct atif_system_params params;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+	if (!info) {
+		err = -EIO;
+		goto out;
+	}
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 10) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	memset(&params, 0, sizeof(params));
+	size = min(sizeof(params), size);
+	memcpy(&params, info->buffer.pointer, size);
+
+	DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
+			params.flags, params.valid_mask);
+	params.flags = params.flags & params.valid_mask;
+
+	if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
+		n->enabled = false;
+		n->command_code = 0;
+	} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
+		n->enabled = true;
+		n->command_code = 0x81;
+	} else {
+		if (size < 11) {
+			err = -EINVAL;
+			goto out;
+		}
+		n->enabled = true;
+		n->command_code = params.command_code;
+	}
+
+out:
+	DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
+			(n->enabled ? "enabled" : "disabled"),
+			n->command_code);
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atif_get_sbios_requests - get requested sbios event
+ *
+ * @handle: acpi handle
+ * @req: atif sbios request struct
+ *
+ * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
+ * to determine what requests the sbios is making to the driver
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atif_get_sbios_requests(acpi_handle handle,
+		struct atif_sbios_requests *req)
+{
+	union acpi_object *info;
+	size_t size;
+	int count = 0;
+
+	info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+	if (!info)
+		return -EIO;
+
+	size = *(u16 *)info->buffer.pointer;
+	if (size < 0xd) {
+		count = -EINVAL;
+		goto out;
+	}
+	memset(req, 0, sizeof(*req));
+
+	size = min(sizeof(*req), size);
+	memcpy(req, info->buffer.pointer, size);
+	DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
+
+	count = hweight32(req->pending);
+
+out:
+	kfree(info);
+	return count;
+}
+
+/**
+ * radeon_atif_handler - handle ATIF notify requests
+ *
+ * @rdev: radeon_device pointer
+ * @event: atif sbios request struct
+ *
+ * Checks the acpi event and if it matches an atif event,
+ * handles it.
+ * Returns NOTIFY code
+ */
+int radeon_atif_handler(struct radeon_device *rdev,
+		struct acpi_bus_event *event)
+{
+	struct radeon_atif *atif = &rdev->atif;
+	struct atif_sbios_requests req;
+	acpi_handle handle;
+	int count;
+
+	DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
+			event->device_class, event->type);
+
+	if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+		return NOTIFY_DONE;
+
+	if (!atif->notification_cfg.enabled ||
+			event->type != atif->notification_cfg.command_code)
+		/* Not our event */
+		return NOTIFY_DONE;
+
+	/* Check pending SBIOS requests */
+	handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+	count = radeon_atif_get_sbios_requests(handle, &req);
+
+	if (count <= 0)
+		return NOTIFY_DONE;
+
+	DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+
+	if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+		struct radeon_encoder *enc = atif->encoder_for_bl;
+
+		if (enc) {
+			DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+					req.backlight_level);
+
+			radeon_set_backlight_level(rdev, enc, req.backlight_level);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+			if (rdev->is_atom_bios) {
+				struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			} else {
+				struct radeon_encoder_lvds *dig = enc->enc_priv;
+				backlight_force_update(dig->bl_dev,
+						       BACKLIGHT_UPDATE_HOTKEY);
+			}
+#endif
+		}
+	}
+	/* TODO: check other events */
+
+	/* We've handled the event, stop the notifier chain. The ACPI interface
+	 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
+	 * userspace if the event was generated only to signal a SBIOS
+	 * request.
+	 */
+	return NOTIFY_BAD;
+}
+
+/* Call the ATCS method
+ */
+/**
+ * radeon_atcs_call - call an ATCS method
+ *
+ * @handle: acpi handle
+ * @function: the ATCS function to execute
+ * @params: ATCS function params
+ *
+ * Executes the requested ATCS function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atcs_arg_elements[2];
+	struct acpi_object_list atcs_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atcs_arg.count = 2;
+	atcs_arg.pointer = &atcs_arg_elements[0];
+
+	atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atcs_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atcs_arg_elements[1].buffer.length = params->length;
+		atcs_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atcs_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATIF is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
+				 acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atcs_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATCS
+ *
+ * Use the supported functions mask from ATCS function
+ * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
+{
+	f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
+	f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
+	f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
+	f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+}
+
+/**
+ * radeon_atcs_verify_interface - verify ATCS
+ *
+ * @handle: acpi handle
+ * @atcs: radeon atcs struct
+ *
+ * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
+ * to initialize ATCS and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atcs_verify_interface(acpi_handle handle,
+					struct radeon_atcs *atcs)
+{
+	union acpi_object *info;
+	struct atcs_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		DRM_INFO("ATCS buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
+
+	radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_acpi_event - handle notify events
+ *
+ * @nb: notifier block
+ * @val: val
+ * @data: acpi event
+ *
+ * Calls relevant radeon functions in response to various
+ * acpi events.
+ * Returns NOTIFY code
+ */
+static int radeon_acpi_event(struct notifier_block *nb,
+			     unsigned long val,
+			     void *data)
+{
+	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
+		if (power_supply_is_system_supplied() > 0)
+			DRM_DEBUG_DRIVER("pm: AC\n");
+		else
+			DRM_DEBUG_DRIVER("pm: DC\n");
+
+		radeon_pm_acpi_event_handler(rdev);
+	}
+
+	/* Check for pending SBIOS requests */
+	return radeon_atif_handler(rdev, entry);
+}
+
+/* Call all ACPI methods here */
+/**
+ * radeon_acpi_init - init driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Verifies the AMD ACPI interfaces and registers with the acpi
+ * notifier chain (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+	acpi_handle handle;
+	struct radeon_atif *atif = &rdev->atif;
+	struct radeon_atcs *atcs = &rdev->atcs;
+	int ret;
+
+	/* Get the device handle */
+	handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+
+	/* No need to proceed if we're sure that ATIF is not supported */
+	if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+		return 0;
+
+	/* Call the ATCS method */
+	ret = radeon_atcs_verify_interface(handle, atcs);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+	}
+
+	/* Call the ATIF method */
+	ret = radeon_atif_verify_interface(handle, atif);
+	if (ret) {
+		DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+		goto out;
+	}
+
+	if (atif->notifications.brightness_change) {
+		struct drm_encoder *tmp;
+		struct radeon_encoder *target = NULL;
+
+		/* Find the encoder controlling the brightness */
+		list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
+				head) {
+			struct radeon_encoder *enc = to_radeon_encoder(tmp);
+
+			if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+			    enc->enc_priv) {
+				if (rdev->is_atom_bios) {
+					struct radeon_encoder_atom_dig *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				} else {
+					struct radeon_encoder_lvds *dig = enc->enc_priv;
+					if (dig->bl_dev) {
+						target = enc;
+						break;
+					}
+				}
+			}
+		}
+
+		atif->encoder_for_bl = target;
+		if (!target) {
+			/* Brightness change notification is enabled, but we
+			 * didn't find a backlight controller, this should
+			 * never happen.
+			 */
+			DRM_ERROR("Cannot find a backlight controller\n");
+		}
+	}
+
+	if (atif->functions.sbios_requests && !atif->functions.system_params) {
+		/* XXX check this workraround, if sbios request function is
+		 * present we have to see how it's configured in the system
+		 * params
+		 */
+		atif->functions.system_params = true;
+	}
+
+	if (atif->functions.system_params) {
+		ret = radeon_atif_get_notification_params(handle,
+				&atif->notification_cfg);
+		if (ret) {
+			DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
+					ret);
+			/* Disable notification */
+			atif->notification_cfg.enabled = false;
+		}
+	}
+
+out:
+	rdev->acpi_nb.notifier_call = radeon_acpi_event;
+	register_acpi_notifier(&rdev->acpi_nb);
+
+	return ret;
+}
+
+/**
+ * radeon_acpi_fini - tear down driver acpi support
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void radeon_acpi_fini(struct radeon_device *rdev)
+{
+	unregister_acpi_notifier(&rdev->acpi_nb);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.h b/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 0000000..be4af76
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_ACPI_H
+#define RADEON_ACPI_H
+
+struct radeon_device;
+struct acpi_bus_event;
+
+int radeon_atif_handler(struct radeon_device *rdev,
+		struct acpi_bus_event *event);
+
+/* AMD hw uses four ACPI control methods:
+ * 1. ATIF
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATIF provides an entry point for the gfx driver to interact with the sbios.
+ * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
+ * notification. Which notification is used as indicated by the ATIF Control
+ * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
+ * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
+ * to identify pending System BIOS requests and associated parameters. For
+ * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
+ * will perform display device detection and invoke ATIF Control Method
+ * SELECT_ACTIVE_DISPLAYS.
+ *
+ * 2. ATPX
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATPX methods are used on PowerXpress systems to handle mux switching and
+ * discrete GPU power control.
+ *
+ * 3. ATRM
+ * ARG0: (ACPI_INTEGER) offset of vbios rom data
+ * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
+ * OUTPUT: (ACPI_BUFFER) output buffer
+ * ATRM provides an interfacess to access the discrete GPU vbios image on
+ * PowerXpress systems with multiple GPUs.
+ *
+ * 4. ATCS
+ * ARG0: (ACPI_INTEGER) function code
+ * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
+ * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
+ * ATCS provides an interface to AMD chipset specific functionality.
+ *
+ */
+/* ATIF */
+#define ATIF_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported notifications mask
+ * DWORD - supported functions bit vector
+ */
+/* Notifications mask */
+#       define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED               (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED        (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED         (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED    (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED   (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED          (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED                (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED      (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED                   (1 << 8)
+/* supported functions vector */
+#       define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED               (1 << 0)
+#       define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED            (1 << 1)
+#       define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED              (1 << 2)
+#       define ATIF_GET_LID_STATE_SUPPORTED                       (1 << 3)
+#       define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED           (1 << 4)
+#       define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED             (1 << 5)
+#       define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED  (1 << 6)
+#       define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED    (1 << 7)
+#       define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED     (1 << 12)
+#       define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED           (1 << 14)
+#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS                        0x1
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ *
+ * OR
+ *
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ * BYTE  - notify command code
+ *
+ * flags
+ * bits 1:0:
+ * 0 - Notify(VGA, 0x81) is not used for notification
+ * 1 - Notify(VGA, 0x81) is used for notification
+ * 2 - Notify(VGA, n) is used for notification where
+ * n (0xd0-0xd9) is specified in notify command code.
+ * bit 2:
+ * 1 - lid changes not reported though int10
+ */
+#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS                     0x2
+/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - pending sbios requests
+ * BYTE  - panel expansion mode
+ * BYTE  - thermal state: target gfx controller
+ * BYTE  - thermal state: state id (0: exit state, non-0: state)
+ * BYTE  - forced power state: target gfx controller
+ * BYTE  - forced power state: state id
+ * BYTE  - system power source
+ * BYTE  - panel backlight level (0-255)
+ */
+/* pending sbios requests */
+#       define ATIF_DISPLAY_SWITCH_REQUEST                         (1 << 0)
+#       define ATIF_EXPANSION_MODE_CHANGE_REQUEST                  (1 << 1)
+#       define ATIF_THERMAL_STATE_CHANGE_REQUEST                   (1 << 2)
+#       define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST              (1 << 3)
+#       define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST             (1 << 4)
+#       define ATIF_DISPLAY_CONF_CHANGE_REQUEST                    (1 << 5)
+#       define ATIF_PX_GFX_SWITCH_REQUEST                          (1 << 6)
+#       define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST                (1 << 7)
+#       define ATIF_DGPU_DISPLAY_EVENT                             (1 << 8)
+/* panel expansion mode */
+#       define ATIF_PANEL_EXPANSION_DISABLE                        0
+#       define ATIF_PANEL_EXPANSION_FULL                           1
+#       define ATIF_PANEL_EXPANSION_ASPECT                         2
+/* target gfx controller */
+#       define ATIF_TARGET_GFX_SINGLE                              0
+#       define ATIF_TARGET_GFX_PX_IGPU                             1
+#       define ATIF_TARGET_GFX_PX_DGPU                             2
+/* system power source */
+#       define ATIF_POWER_SOURCE_AC                                1
+#       define ATIF_POWER_SOURCE_DC                                2
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_1                   3
+#       define ATIF_POWER_SOURCE_RESTRICTED_AC_2                   4
+#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS                       0x3
+/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ * WORD  - connected displays
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - selected displays
+ */
+#       define ATIF_LCD1                                           (1 << 0)
+#       define ATIF_CRT1                                           (1 << 1)
+#       define ATIF_TV                                             (1 << 2)
+#       define ATIF_DFP1                                           (1 << 3)
+#       define ATIF_CRT2                                           (1 << 4)
+#       define ATIF_LCD2                                           (1 << 5)
+#       define ATIF_DFP2                                           (1 << 7)
+#       define ATIF_CV                                             (1 << 8)
+#       define ATIF_DFP3                                           (1 << 9)
+#       define ATIF_DFP4                                           (1 << 10)
+#       define ATIF_DFP5                                           (1 << 11)
+#       define ATIF_DFP6                                           (1 << 12)
+#define ATIF_FUNCTION_GET_LID_STATE                                0x4
+/* ARG0: ATIF_FUNCTION_GET_LID_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - lid state (0: open, 1: closed)
+ *
+ * GET_LID_STATE only works at boot and resume, for general lid
+ * status, use the kernel provided status
+ */
+#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS                    0x5
+/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ */
+#       define ATIF_TV_STD_NTSC                                    0
+#       define ATIF_TV_STD_PAL                                     1
+#       define ATIF_TV_STD_PALM                                    2
+#       define ATIF_TV_STD_PAL60                                   3
+#       define ATIF_TV_STD_NTSCJ                                   4
+#       define ATIF_TV_STD_PALCN                                   5
+#       define ATIF_TV_STD_PALN                                    6
+#       define ATIF_TV_STD_SCART_RGB                               9
+#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS                      0x6
+/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - 0
+ * BYTE  - TV standard
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS           0x7
+/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ */
+#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS             0x8
+/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - panel expansion mode
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION              0xD
+/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - gfx controller id
+ * BYTE  - current temperature (degress Celsius)
+ * OUTPUT: none
+ */
+#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES                    0xF
+/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of gfx devices
+ * WORD  - device structure size in bytes (excludes device size field)
+ * DWORD - flags         \
+ * WORD  - bus number     } repeated structure
+ * WORD  - device number /
+ */
+/* flags */
+#       define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE                   (1 << 0)
+#       define ATIF_XGP_PORT                                       (1 << 1)
+#       define ATIF_VGA_ENABLED_GRAPHICS_DEVICE                    (1 << 2)
+#       define ATIF_XGP_PORT_IN_DOCK                               (1 << 3)
+
+/* ATPX */
+#define ATPX_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATPX_GET_PX_PARAMETERS_SUPPORTED                    (1 << 0)
+#       define ATPX_POWER_CONTROL_SUPPORTED                        (1 << 1)
+#       define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED                  (1 << 2)
+#       define ATPX_I2C_MUX_CONTROL_SUPPORTED                      (1 << 3)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
+#       define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED   (1 << 5)
+#       define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED       (1 << 7)
+#       define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED          (1 << 8)
+#define ATPX_FUNCTION_GET_PX_PARAMETERS                            0x1
+/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags
+ */
+/* flags */
+#       define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 0)
+#       define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 1)
+#       define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS                (1 << 2)
+#       define ATPX_CRT1_RGB_SIGNAL_MUXED                          (1 << 3)
+#       define ATPX_TV_SIGNAL_MUXED                                (1 << 4)
+#       define ATPX_DFP_SIGNAL_MUXED                               (1 << 5)
+#       define ATPX_SEPARATE_MUX_FOR_I2C                           (1 << 6)
+#       define ATPX_DYNAMIC_PX_SUPPORTED                           (1 << 7)
+#       define ATPX_ACF_NOT_SUPPORTED                              (1 << 8)
+#       define ATPX_FIXED_NOT_SUPPORTED                            (1 << 9)
+#       define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED               (1 << 10)
+#       define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS                    (1 << 11)
+#define ATPX_FUNCTION_POWER_CONTROL                                0x2
+/* ARG0: ATPX_FUNCTION_POWER_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - dGPU power state (0: power off, 1: power on)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL                          0x3
+/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - display mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#       define ATPX_INTEGRATED_GPU                                 0
+#       define ATPX_DISCRETE_GPU                                   1
+#define ATPX_FUNCTION_I2C_MUX_CONTROL                              0x4
+/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION    0x5
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION      0x6
+/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - target gpu (0: iGPU, 1: dGPU)
+ * OUTPUT: none
+ */
+#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING               0x8
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of display connectors
+ * WORD  - connector structure size in bytes (excludes connector size field)
+ * BYTE  - flags                                                     \
+ * BYTE  - ATIF display vector bit position                           } repeated
+ * BYTE  - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
+ * WORD  - connector ACPI id                                         /
+ */
+/* flags */
+#       define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE  (1 << 0)
+#       define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 1)
+#       define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE     (1 << 2)
+#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS                  0x9
+/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - number of HPD/DDC ports
+ * WORD  - port structure size in bytes (excludes port size field)
+ * BYTE  - ATIF display vector bit position \
+ * BYTE  - hpd id                            } reapeated structure
+ * BYTE  - ddc id                           /
+ *
+ * available on A+A systems only
+ */
+/* hpd id */
+#       define ATPX_HPD_NONE                                       0
+#       define ATPX_HPD1                                           1
+#       define ATPX_HPD2                                           2
+#       define ATPX_HPD3                                           3
+#       define ATPX_HPD4                                           4
+#       define ATPX_HPD5                                           5
+#       define ATPX_HPD6                                           6
+/* ddc id */
+#       define ATPX_DDC_NONE                                       0
+#       define ATPX_DDC1                                           1
+#       define ATPX_DDC2                                           2
+#       define ATPX_DDC3                                           3
+#       define ATPX_DDC4                                           4
+#       define ATPX_DDC5                                           5
+#       define ATPX_DDC6                                           6
+#       define ATPX_DDC7                                           7
+#       define ATPX_DDC8                                           8
+
+/* ATCS */
+#define ATCS_FUNCTION_VERIFY_INTERFACE                             0x0
+/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - version
+ * DWORD - supported functions bit vector
+ */
+/* supported functions vector */
+#       define ATCS_GET_EXTERNAL_STATE_SUPPORTED                   (1 << 0)
+#       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
+#       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
+#       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
+/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
+ * ARG1: none
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * DWORD - valid flags mask
+ * DWORD - flags (0: undocked, 1: docked)
+ */
+/* flags */
+#       define ATCS_DOCKED                                         (1 << 0)
+#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST                     0x2
+/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * WORD  - valid flags mask
+ * WORD  - flags
+ * BYTE  - request type
+ * BYTE  - performance request
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - return value
+ */
+/* flags */
+#       define ATCS_ADVERTISE_CAPS                                 (1 << 0)
+#       define ATCS_WAIT_FOR_COMPLETION                            (1 << 1)
+/* request type */
+#       define ATCS_PCIE_LINK_SPEED                                1
+/* performance request */
+#       define ATCS_REMOVE                                         0
+#       define ATCS_FORCE_LOW_POWER                                1
+#       define ATCS_PERF_LEVEL_1                                   2 /* PCIE Gen 1 */
+#       define ATCS_PERF_LEVEL_2                                   3 /* PCIE Gen 2 */
+#       define ATCS_PERF_LEVEL_3                                   4 /* PCIE Gen 3 */
+/* return value */
+#       define ATCS_REQUEST_REFUSED                                1
+#       define ATCS_REQUEST_COMPLETE                               2
+#       define ATCS_REQUEST_IN_PROGRESS                            3
+#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION               0x3
+/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
+ */
+#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH                           0x4
+/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - number of active lanes
+ * OUTPUT:
+ * WORD  - structure size in bytes (includes size field)
+ * BYTE  - number of active lanes
+ */
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_agp.c b/linux-imx/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 0000000..4243334
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dave Airlie
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+#if __OS_HAS_AGP
+
+struct radeon_agpmode_quirk {
+	u32 hostbridge_vendor;
+	u32 hostbridge_device;
+	u32 chip_vendor;
+	u32 chip_device;
+	u32 subsys_vendor;
+	u32 subsys_device;
+	u32 default_mode;
+};
+
+static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+	/* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
+	{ PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
+	/* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
+	{ PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_IBM, 0x052f, 1},
+	/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_IBM, 0x0550, 1},
+	/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
+		PCI_VENDOR_ID_IBM, 0x0530, 1},
+	/* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
+		PCI_VENDOR_ID_IBM, 0x054f, 2},
+	/* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x816b, 2},
+	/* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8195, 8},
+	/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+	{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+		PCI_VENDOR_ID_DELL, 0x00e3, 2},
+	/* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_DELL, 0x0149, 1},
+	/* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+	{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+		PCI_VENDOR_ID_IBM, 0x0531, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0061, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x1025, 0x0064, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
+	/* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
+	{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+		0x10cf, 0x127f, 1},
+	/* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
+	{ 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1787, 0x5960, 4},
+	/* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
+	{ PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
+		0x17af, 0x2020, 4},
+	/* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
+	{ PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
+		PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
+	/* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
+	{ PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
+		PCI_VENDOR_ID_ATI, 0x013a, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
+	/* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
+	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
+		PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
+	/* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
+		0x174b, 0x7149, 4},
+	/* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
+		0x1462, 0x0380, 4},
+	/* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
+	{ PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
+		0x148c, 0x2073, 4},
+	/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
+	{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
+		PCI_VENDOR_ID_SONY, 0x8175, 1},
+	/* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+	{ PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+		PCI_VENDOR_ID_ATI, 0x0152, 2},
+	{ 0, 0, 0, 0, 0, 0, 0 },
+};
+#endif
+
+int radeon_agp_init(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	uint32_t agp_status;
+	int default_mode;
+	bool is_v3;
+	int ret;
+
+	/* Acquire AGP. */
+	ret = drm_agp_acquire(rdev->ddev);
+	if (ret) {
+		DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+		return ret;
+	}
+
+	ret = drm_agp_info(rdev->ddev, &info);
+	if (ret) {
+		drm_agp_release(rdev->ddev);
+		DRM_ERROR("Unable to get AGP info: %d\n", ret);
+		return ret;
+	}
+
+	if (rdev->ddev->agp->agp_info.aper_size < 32) {
+		drm_agp_release(rdev->ddev);
+		dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
+			"need at least 32M, disabling AGP\n",
+			rdev->ddev->agp->agp_info.aper_size);
+		return -EINVAL;
+	}
+
+	mode.mode = info.mode;
+	/* chips with the agp to pcie bridge don't have the AGP_STATUS register
+	 * Just use the whatever mode the host sets up.
+	 */
+	if (rdev->family <= CHIP_RV350)
+		agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+	else
+		agp_status = mode.mode;
+	is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+
+	if (is_v3) {
+		default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
+	} else {
+		if (agp_status & RADEON_AGP_4X_MODE) {
+			default_mode = 4;
+		} else if (agp_status & RADEON_AGP_2X_MODE) {
+			default_mode = 2;
+		} else {
+			default_mode = 1;
+		}
+	}
+
+	/* Apply AGPMode Quirks */
+	while (p && p->chip_device != 0) {
+		if (info.id_vendor == p->hostbridge_vendor &&
+		    info.id_device == p->hostbridge_device &&
+		    rdev->pdev->vendor == p->chip_vendor &&
+		    rdev->pdev->device == p->chip_device &&
+		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+		    rdev->pdev->subsystem_device == p->subsys_device) {
+			default_mode = p->default_mode;
+		}
+		++p;
+	}
+
+	if (radeon_agpmode > 0) {
+		if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
+		    (radeon_agpmode > (is_v3 ? 8 : 4)) ||
+		    (radeon_agpmode & (radeon_agpmode - 1))) {
+			DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
+				  radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
+				  default_mode);
+			radeon_agpmode = default_mode;
+		} else {
+			DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
+		}
+	} else {
+		radeon_agpmode = default_mode;
+	}
+
+	mode.mode &= ~RADEON_AGP_MODE_MASK;
+	if (is_v3) {
+		switch (radeon_agpmode) {
+		case 8:
+			mode.mode |= RADEON_AGPv3_8X_MODE;
+			break;
+		case 4:
+		default:
+			mode.mode |= RADEON_AGPv3_4X_MODE;
+			break;
+		}
+	} else {
+		switch (radeon_agpmode) {
+		case 4:
+			mode.mode |= RADEON_AGP_4X_MODE;
+			break;
+		case 2:
+			mode.mode |= RADEON_AGP_2X_MODE;
+			break;
+		case 1:
+		default:
+			mode.mode |= RADEON_AGP_1X_MODE;
+			break;
+		}
+	}
+
+	mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
+	ret = drm_agp_enable(rdev->ddev, mode);
+	if (ret) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		drm_agp_release(rdev->ddev);
+		return ret;
+	}
+
+	rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
+	rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+	rdev->mc.gtt_start = rdev->mc.agp_base;
+	rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+		rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
+
+	/* workaround some hw issues */
+	if (rdev->family < CHIP_R200) {
+		WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
+	}
+	return 0;
+#else
+	return 0;
+#endif
+}
+
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	int r;
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+	}
+#endif
+}
+
+void radeon_agp_fini(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+		drm_agp_release(rdev->ddev);
+	}
+#endif
+}
+
+void radeon_agp_suspend(struct radeon_device *rdev)
+{
+	radeon_agp_fini(rdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_asic.c b/linux-imx/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 0000000..de36c47
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,2097 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+	BUG_ON(1);
+	return 0;
+}
+
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function.  Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+		  reg, v);
+	BUG_ON(1);
+}
+
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures.  Not all asics have all apertures (all asics).
+ */
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+	rdev->mc_rreg = &radeon_invalid_rreg;
+	rdev->mc_wreg = &radeon_invalid_wreg;
+	rdev->pll_rreg = &radeon_invalid_rreg;
+	rdev->pll_wreg = &radeon_invalid_wreg;
+	rdev->pciep_rreg = &radeon_invalid_rreg;
+	rdev->pciep_wreg = &radeon_invalid_wreg;
+
+	/* Don't change order as we are overridding accessor. */
+	if (rdev->family < CHIP_RV515) {
+		rdev->pcie_reg_mask = 0xff;
+	} else {
+		rdev->pcie_reg_mask = 0x7ff;
+	}
+	/* FIXME: not sure here */
+	if (rdev->family <= CHIP_R580) {
+		rdev->pll_rreg = &r100_pll_rreg;
+		rdev->pll_wreg = &r100_pll_wreg;
+	}
+	if (rdev->family >= CHIP_R420) {
+		rdev->mc_rreg = &r420_mc_rreg;
+		rdev->mc_wreg = &r420_mc_wreg;
+	}
+	if (rdev->family >= CHIP_RV515) {
+		rdev->mc_rreg = &rv515_mc_rreg;
+		rdev->mc_wreg = &rv515_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+		rdev->mc_rreg = &rs400_mc_rreg;
+		rdev->mc_wreg = &rs400_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		rdev->mc_rreg = &rs690_mc_rreg;
+		rdev->mc_wreg = &rs690_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS600) {
+		rdev->mc_rreg = &rs600_mc_rreg;
+		rdev->mc_wreg = &rs600_mc_wreg;
+	}
+	if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+		rdev->mc_rreg = &rs780_mc_rreg;
+		rdev->mc_wreg = &rs780_mc_wreg;
+	}
+	if (rdev->family >= CHIP_R600) {
+		rdev->pciep_rreg = &r600_pciep_rreg;
+		rdev->pciep_wreg = &r600_pciep_wreg;
+	}
+}
+
+
+/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+	rdev->flags &= ~RADEON_IS_AGP;
+	if (rdev->family >= CHIP_R600) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+	} else if (rdev->family >= CHIP_RV515 ||
+			rdev->family == CHIP_RV380 ||
+			rdev->family == CHIP_RV410 ||
+			rdev->family == CHIP_R423) {
+		DRM_INFO("Forcing AGP to PCIE mode\n");
+		rdev->flags |= RADEON_IS_PCIE;
+		rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+		rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+	} else {
+		DRM_INFO("Forcing AGP to PCI mode\n");
+		rdev->flags |= RADEON_IS_PCI;
+		rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+		rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+	}
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r100_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r100_cs_parse,
+			.ring_start = &r100_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = NULL,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r200_asic = {
+	.init = &r100_init,
+	.fini = &r100_fini,
+	.suspend = &r100_suspend,
+	.resume = &r100_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r100_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r100_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r100_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r100_cs_parse,
+			.ring_start = &r100_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r300_asic = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &r100_pci_gart_tlb_flush,
+		.set_page = &r100_pci_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r300_asic_pcie = {
+	.init = &r300_init,
+	.fini = &r300_fini,
+	.suspend = &r300_suspend,
+	.resume = &r300_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic r420_asic = {
+	.init = &r420_init,
+	.fini = &r420_fini,
+	.suspend = &r420_suspend,
+	.resume = &r420_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r300_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs400_asic = {
+	.init = &rs400_init,
+	.fini = &rs400_fini,
+	.suspend = &rs400_suspend,
+	.resume = &rs400_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &r300_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs400_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r100_irq_set,
+		.process = &r100_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &r100_bandwidth_update,
+		.get_vblank_counter = &r100_get_vblank_counter,
+		.wait_for_vblank = &r100_wait_for_vblank,
+		.set_backlight_level = &radeon_legacy_set_backlight_level,
+		.get_backlight_level = &radeon_legacy_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r100_hpd_init,
+		.fini = &r100_hpd_fini,
+		.sense = &r100_hpd_sense,
+		.set_polarity = &r100_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r100_pm_misc,
+		.prepare = &r100_pm_prepare,
+		.finish = &r100_pm_finish,
+		.init_profile = &r100_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_legacy_get_engine_clock,
+		.set_engine_clock = &radeon_legacy_set_engine_clock,
+		.get_memory_clock = &radeon_legacy_get_memory_clock,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_legacy_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &r100_pre_page_flip,
+		.page_flip = &r100_page_flip,
+		.post_page_flip = &r100_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs600_asic = {
+	.init = &rs600_init,
+	.fini = &rs600_fini,
+	.suspend = &rs600_suspend,
+	.resume = &rs600_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs600_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs600_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs600_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs690_asic = {
+	.init = &rs690_init,
+	.fini = &rs690_fini,
+	.suspend = &rs690_suspend,
+	.resume = &rs690_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rs690_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rs400_gart_tlb_flush,
+		.set_page = &rs400_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &r300_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rs690_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r200_copy_dma,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rv515_asic = {
+	.init = &rv515_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &rv515_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &rv515_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &rv515_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.bandwidth_update = &rv515_bandwidth_update,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic r520_asic = {
+	.init = &r520_init,
+	.fini = &rv515_fini,
+	.suspend = &rv515_suspend,
+	.resume = &r520_resume,
+	.vga_set_state = &r100_vga_set_state,
+	.asic_reset = &rs600_asic_reset,
+	.ioctl_wait_idle = NULL,
+	.gui_idle = &r100_gui_idle,
+	.mc_wait_for_idle = &r520_mc_wait_for_idle,
+	.gart = {
+		.tlb_flush = &rv370_pcie_gart_tlb_flush,
+		.set_page = &rv370_pcie_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r100_ring_ib_execute,
+			.emit_fence = &r300_fence_ring_emit,
+			.emit_semaphore = &r100_semaphore_ring_emit,
+			.cs_parse = &r300_cs_parse,
+			.ring_start = &rv515_ring_start,
+			.ring_test = &r100_ring_test,
+			.ib_test = &r100_ib_test,
+			.is_lockup = &r100_gpu_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &rs600_irq_set,
+		.process = &rs600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r100_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r200_copy_dma,
+		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.copy = &r100_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r100_set_surface_reg,
+		.clear_reg = r100_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &rs600_hpd_init,
+		.fini = &rs600_hpd_fini,
+		.sense = &rs600_hpd_sense,
+		.set_polarity = &rs600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rs600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r420_pm_init_profile,
+		.get_dynpm_state = &r100_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &rv370_get_pcie_lanes,
+		.set_pcie_lanes = &rv370_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic r600_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rs780_asic = {
+	.init = &r600_init,
+	.fini = &r600_fini,
+	.suspend = &r600_suspend,
+	.resume = &r600_resume,
+	.vga_set_state = &r600_vga_set_state,
+	.asic_reset = &r600_asic_reset,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rs690_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &r600_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &r600_copy_blit,
+		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &r600_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &rs780_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rs600_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic rv770_asic = {
+	.init = &rv770_init,
+	.fini = &rv770_fini,
+	.suspend = &rv770_suspend,
+	.resume = &rv770_resume,
+	.asic_reset = &r600_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &r600_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &r600_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &r600_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &r600_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &r600_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &r600_dma_ring_ib_execute,
+			.emit_fence = &r600_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &r600_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &r600_dma_is_lockup,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &r600_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &r600_irq_set,
+		.process = &r600_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &rv515_bandwidth_update,
+		.get_vblank_counter = &rs600_get_vblank_counter,
+		.wait_for_vblank = &avivo_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &r600_hdmi_enable,
+		.hdmi_setmode = &r600_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &rv770_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &rv770_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &r600_hpd_init,
+		.fini = &r600_hpd_fini,
+		.sense = &r600_hpd_sense,
+		.set_polarity = &r600_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &rv770_pm_misc,
+		.prepare = &rs600_pm_prepare,
+		.finish = &rs600_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = &radeon_atom_set_clock_gating,
+		.set_uvd_clocks = &rv770_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &rs600_pre_page_flip,
+		.page_flip = &rv770_page_flip,
+		.post_page_flip = &rs600_post_page_flip,
+	},
+};
+
+static struct radeon_asic evergreen_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &evergreen_dma_is_lockup,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &r600_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &evergreen_hdmi_enable,
+		.hdmi_setmode = &evergreen_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &r600_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic sumo_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &evergreen_dma_is_lockup,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &r600_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &evergreen_hdmi_enable,
+		.hdmi_setmode = &evergreen_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &sumo_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic btc_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &evergreen_ring_ib_execute,
+			.emit_fence = &r600_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &evergreen_gfx_is_lockup,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &evergreen_dma_ring_ib_execute,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &evergreen_dma_is_lockup,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &r600_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &evergreen_hdmi_enable,
+		.hdmi_setmode = &evergreen_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic cayman_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &rv770_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &cayman_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &evergreen_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+		.hdmi_enable = &evergreen_hdmi_enable,
+		.hdmi_setmode = &evergreen_hdmi_setmode,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &btc_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &evergreen_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic trinity_asic = {
+	.init = &cayman_init,
+	.fini = &cayman_fini,
+	.suspend = &cayman_suspend,
+	.resume = &cayman_resume,
+	.asic_reset = &cayman_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &r600_get_xclk,
+	.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &cayman_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &cayman_vm_init,
+		.fini = &cayman_vm_fini,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.set_page = &cayman_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &cayman_ring_ib_execute,
+			.ib_parse = &evergreen_ib_parse,
+			.emit_fence = &cayman_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = &evergreen_cs_parse,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &cayman_gfx_is_lockup,
+			.vm_flush = &cayman_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = &evergreen_dma_cs_parse,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &cayman_dma_is_lockup,
+			.vm_flush = &cayman_dma_vm_flush,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &cayman_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &evergreen_irq_set,
+		.process = &evergreen_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = &r600_copy_blit,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &evergreen_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &evergreen_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = NULL,
+		.set_memory_clock = NULL,
+		.get_pcie_lanes = NULL,
+		.set_pcie_lanes = NULL,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &sumo_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+static struct radeon_asic si_asic = {
+	.init = &si_init,
+	.fini = &si_fini,
+	.suspend = &si_suspend,
+	.resume = &si_resume,
+	.asic_reset = &si_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.ioctl_wait_idle = r600_ioctl_wait_idle,
+	.gui_idle = &r600_gui_idle,
+	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
+	.get_xclk = &si_get_xclk,
+	.get_gpu_clock_counter = &si_get_gpu_clock_counter,
+	.gart = {
+		.tlb_flush = &si_pcie_gart_tlb_flush,
+		.set_page = &rs600_gart_set_page,
+	},
+	.vm = {
+		.init = &si_vm_init,
+		.fini = &si_vm_fini,
+		.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.set_page = &si_vm_set_page,
+	},
+	.ring = {
+		[RADEON_RING_TYPE_GFX_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gfx_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP1_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gfx_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_CP2_INDEX] = {
+			.ib_execute = &si_ring_ib_execute,
+			.ib_parse = &si_ib_parse,
+			.emit_fence = &si_fence_ring_emit,
+			.emit_semaphore = &r600_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_ring_test,
+			.ib_test = &r600_ib_test,
+			.is_lockup = &si_gfx_is_lockup,
+			.vm_flush = &si_vm_flush,
+		},
+		[R600_RING_TYPE_DMA_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &si_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
+		},
+		[CAYMAN_RING_TYPE_DMA1_INDEX] = {
+			.ib_execute = &cayman_dma_ring_ib_execute,
+			.ib_parse = &evergreen_dma_ib_parse,
+			.emit_fence = &evergreen_dma_fence_ring_emit,
+			.emit_semaphore = &r600_dma_semaphore_ring_emit,
+			.cs_parse = NULL,
+			.ring_test = &r600_dma_ring_test,
+			.ib_test = &r600_dma_ib_test,
+			.is_lockup = &si_dma_is_lockup,
+			.vm_flush = &si_dma_vm_flush,
+		},
+		[R600_RING_TYPE_UVD_INDEX] = {
+			.ib_execute = &r600_uvd_ib_execute,
+			.emit_fence = &r600_uvd_fence_emit,
+			.emit_semaphore = &cayman_uvd_semaphore_emit,
+			.cs_parse = &radeon_uvd_cs_parse,
+			.ring_test = &r600_uvd_ring_test,
+			.ib_test = &r600_uvd_ib_test,
+			.is_lockup = &radeon_ring_test_lockup,
+		}
+	},
+	.irq = {
+		.set = &si_irq_set,
+		.process = &si_irq_process,
+	},
+	.display = {
+		.bandwidth_update = &dce6_bandwidth_update,
+		.get_vblank_counter = &evergreen_get_vblank_counter,
+		.wait_for_vblank = &dce4_wait_for_vblank,
+		.set_backlight_level = &atombios_set_backlight_level,
+		.get_backlight_level = &atombios_get_backlight_level,
+	},
+	.copy = {
+		.blit = NULL,
+		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+		.dma = &si_copy_dma,
+		.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+		.copy = &si_copy_dma,
+		.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
+	},
+	.surface = {
+		.set_reg = r600_set_surface_reg,
+		.clear_reg = r600_clear_surface_reg,
+	},
+	.hpd = {
+		.init = &evergreen_hpd_init,
+		.fini = &evergreen_hpd_fini,
+		.sense = &evergreen_hpd_sense,
+		.set_polarity = &evergreen_hpd_set_polarity,
+	},
+	.pm = {
+		.misc = &evergreen_pm_misc,
+		.prepare = &evergreen_pm_prepare,
+		.finish = &evergreen_pm_finish,
+		.init_profile = &sumo_pm_init_profile,
+		.get_dynpm_state = &r600_pm_get_dynpm_state,
+		.get_engine_clock = &radeon_atom_get_engine_clock,
+		.set_engine_clock = &radeon_atom_set_engine_clock,
+		.get_memory_clock = &radeon_atom_get_memory_clock,
+		.set_memory_clock = &radeon_atom_set_memory_clock,
+		.get_pcie_lanes = &r600_get_pcie_lanes,
+		.set_pcie_lanes = &r600_set_pcie_lanes,
+		.set_clock_gating = NULL,
+		.set_uvd_clocks = &si_set_uvd_clocks,
+	},
+	.pflip = {
+		.pre_page_flip = &evergreen_pre_page_flip,
+		.page_flip = &evergreen_page_flip,
+		.post_page_flip = &evergreen_post_page_flip,
+	},
+};
+
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family.  Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
+int radeon_asic_init(struct radeon_device *rdev)
+{
+	radeon_register_accessor_init(rdev);
+
+	/* set the number of crtcs */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		rdev->num_crtc = 1;
+	else
+		rdev->num_crtc = 2;
+
+	rdev->has_uvd = false;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+		rdev->asic = &r100_asic;
+		break;
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+		rdev->asic = &r200_asic;
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+		if (rdev->flags & RADEON_IS_PCIE)
+			rdev->asic = &r300_asic_pcie;
+		else
+			rdev->asic = &r300_asic;
+		break;
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		rdev->asic = &r420_asic;
+		/* handle macs */
+		if (rdev->bios == NULL) {
+			rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
+			rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
+			rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
+			rdev->asic->pm.set_memory_clock = NULL;
+			rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
+		}
+		break;
+	case CHIP_RS400:
+	case CHIP_RS480:
+		rdev->asic = &rs400_asic;
+		break;
+	case CHIP_RS600:
+		rdev->asic = &rs600_asic;
+		break;
+	case CHIP_RS690:
+	case CHIP_RS740:
+		rdev->asic = &rs690_asic;
+		break;
+	case CHIP_RV515:
+		rdev->asic = &rv515_asic;
+		break;
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		rdev->asic = &r520_asic;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RV670:
+		rdev->asic = &r600_asic;
+		if (rdev->family == CHIP_R600)
+			rdev->has_uvd = false;
+		else
+			rdev->has_uvd = true;
+		break;
+	case CHIP_RS780:
+	case CHIP_RS880:
+		rdev->asic = &rs780_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		rdev->asic = &rv770_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CEDAR)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &evergreen_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_PALM:
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		rdev->asic = &sumo_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		/* set num crtcs */
+		if (rdev->family == CHIP_CAICOS)
+			rdev->num_crtc = 4;
+		else
+			rdev->num_crtc = 6;
+		rdev->asic = &btc_asic;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_CAYMAN:
+		rdev->asic = &cayman_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 6;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_ARUBA:
+		rdev->asic = &trinity_asic;
+		/* set num crtcs */
+		rdev->num_crtc = 4;
+		rdev->has_uvd = true;
+		break;
+	case CHIP_TAHITI:
+	case CHIP_PITCAIRN:
+	case CHIP_VERDE:
+	case CHIP_OLAND:
+	case CHIP_HAINAN:
+		rdev->asic = &si_asic;
+		/* set num crtcs */
+		if (rdev->family == CHIP_HAINAN)
+			rdev->num_crtc = 0;
+		else if (rdev->family == CHIP_OLAND)
+			rdev->num_crtc = 2;
+		else
+			rdev->num_crtc = 6;
+		if (rdev->family == CHIP_HAINAN)
+			rdev->has_uvd = false;
+		else
+			rdev->has_uvd = true;
+		break;
+	default:
+		/* FIXME: not supported yet */
+		return -EINVAL;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		rdev->asic->pm.get_memory_clock = NULL;
+		rdev->asic->pm.set_memory_clock = NULL;
+	}
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_asic.h b/linux-imx/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 0000000..34223fc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_ASIC_H__
+#define __RADEON_ASIC_H__
+
+/*
+ * common functions
+ */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
+void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
+void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+
+void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
+void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
+u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
+
+/*
+ * r100,rv100,rs100,rv200,rs200
+ */
+struct r100_mc_save {
+	u32	GENMO_WT;
+	u32	CRTC_EXT_CNTL;
+	u32	CRTC_GEN_CNTL;
+	u32	CRTC2_GEN_CNTL;
+	u32	CUR_OFFSET;
+	u32	CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
+void r100_vga_set_state(struct radeon_device *rdev, bool state);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r100_asic_reset(struct radeon_device *rdev);
+u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+int r100_irq_set(struct radeon_device *rdev);
+int r100_irq_process(struct radeon_device *rdev);
+void r100_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+int r100_cs_parse(struct radeon_cs_parser *p);
+void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+int r100_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset,
+		   uint64_t dst_offset,
+		   unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
+int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_bandwidth_update(struct radeon_device *rdev);
+void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+					 struct radeon_cs_packet *pkt,
+					 struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+			  struct radeon_cs_packet *pkt,
+			  const unsigned *auth, unsigned n,
+			  radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+			 struct radeon_cs_packet *pkt,
+			 unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
+void r100_bm_disable(struct radeon_device *rdev);
+extern bool r100_gui_idle(struct radeon_device *rdev);
+extern void r100_pm_misc(struct radeon_device *rdev);
+extern void r100_pm_prepare(struct radeon_device *rdev);
+extern void r100_pm_finish(struct radeon_device *rdev);
+extern void r100_pm_init_profile(struct radeon_device *rdev);
+extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+			 uint64_t src_offset,
+			 uint64_t dst_offset,
+			 unsigned num_gpu_pages,
+			 struct radeon_fence **fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
+
+/*
+ * r300,r350,rv350,rv380
+ */
+extern int r300_init(struct radeon_device *rdev);
+extern void r300_fini(struct radeon_device *rdev);
+extern int r300_suspend(struct radeon_device *rdev);
+extern int r300_resume(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev);
+extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+extern void r300_fence_ring_emit(struct radeon_device *rdev,
+				struct radeon_fence *fence);
+extern int r300_cs_parse(struct radeon_cs_parser *p);
+extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r420,r423,rv410
+ */
+extern int r420_init(struct radeon_device *rdev);
+extern void r420_fini(struct radeon_device *rdev);
+extern int r420_suspend(struct radeon_device *rdev);
+extern int r420_resume(struct radeon_device *rdev);
+extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
+
+/*
+ * rs400,rs480
+ */
+extern int rs400_init(struct radeon_device *rdev);
+extern void rs400_fini(struct radeon_device *rdev);
+extern int rs400_suspend(struct radeon_device *rdev);
+extern int rs400_resume(struct radeon_device *rdev);
+void rs400_gart_tlb_flush(struct radeon_device *rdev);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs600.
+ */
+extern int rs600_asic_reset(struct radeon_device *rdev);
+extern int rs600_init(struct radeon_device *rdev);
+extern void rs600_fini(struct radeon_device *rdev);
+extern int rs600_suspend(struct radeon_device *rdev);
+extern int rs600_resume(struct radeon_device *rdev);
+int rs600_irq_set(struct radeon_device *rdev);
+int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
+void rs600_gart_tlb_flush(struct radeon_device *rdev);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd);
+extern void rs600_pm_misc(struct radeon_device *rdev);
+extern void rs600_pm_prepare(struct radeon_device *rdev);
+extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
+extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rs690,rs740
+ */
+int rs690_init(struct radeon_device *rdev);
+void rs690_fini(struct radeon_device *rdev);
+int rs690_resume(struct radeon_device *rdev);
+int rs690_suspend(struct radeon_device *rdev);
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+					struct drm_display_mode *mode1,
+					struct drm_display_mode *mode2);
+extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * rv515
+ */
+struct rv515_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[2];
+};
+
+int rv515_init(struct radeon_device *rdev);
+void rv515_fini(struct radeon_device *rdev);
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+void rv515_bandwidth_update(struct radeon_device *rdev);
+int rv515_resume(struct radeon_device *rdev);
+int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r520,rv530,rv560,rv570,r580
+ */
+int r520_init(struct radeon_device *rdev);
+int r520_resume(struct radeon_device *rdev);
+int r520_mc_wait_for_idle(struct radeon_device *rdev);
+
+/*
+ * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
+ */
+int r600_init(struct radeon_device *rdev);
+void r600_fini(struct radeon_device *rdev);
+int r600_suspend(struct radeon_device *rdev);
+int r600_resume(struct radeon_device *rdev);
+void r600_vga_set_state(struct radeon_device *rdev, bool state);
+int r600_wb_init(struct radeon_device *rdev);
+void r600_wb_fini(struct radeon_device *rdev);
+void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
+uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
+void r600_fence_ring_emit(struct radeon_device *rdev,
+			  struct radeon_fence *fence);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+			      struct radeon_ring *cp,
+			      struct radeon_semaphore *semaphore,
+			      bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+			      struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+				  struct radeon_ring *ring,
+				  struct radeon_semaphore *semaphore,
+				  bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_asic_reset(struct radeon_device *rdev);
+int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+			 uint32_t tiling_flags, uint32_t pitch,
+			 uint32_t offset, uint32_t obj_size);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_copy_blit(struct radeon_device *rdev,
+		   uint64_t src_offset, uint64_t dst_offset,
+		   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages, struct radeon_fence **fence);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern bool r600_gui_idle(struct radeon_device *rdev);
+extern void r600_pm_misc(struct radeon_device *rdev);
+extern void r600_pm_init_profile(struct radeon_device *rdev);
+extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
+void r600_audio_fini(struct radeon_device *rdev);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+			   struct radeon_fence **fence, struct radeon_sa_bo **vb,
+			   struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+			 struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+			u64 src_gpu_addr, u64 dst_gpu_addr,
+			unsigned num_gpu_pages,
+			struct radeon_sa_bo *vb);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+u32 r600_get_xclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
+
+/* uvd */
+int r600_uvd_init(struct radeon_device *rdev);
+int r600_uvd_rbc_start(struct radeon_device *rdev);
+void r600_uvd_stop(struct radeon_device *rdev);
+int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void r600_uvd_fence_emit(struct radeon_device *rdev,
+			 struct radeon_fence *fence);
+void r600_uvd_semaphore_emit(struct radeon_device *rdev,
+			     struct radeon_ring *ring,
+			     struct radeon_semaphore *semaphore,
+			     bool emit_wait);
+void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/*
+ * rv770,rv730,rv710,rv740
+ */
+int rv770_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+int rv770_suspend(struct radeon_device *rdev);
+int rv770_resume(struct radeon_device *rdev);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		   struct radeon_fence **fence);
+u32 rv770_get_xclk(struct radeon_device *rdev);
+int rv770_uvd_resume(struct radeon_device *rdev);
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+/*
+ * evergreen
+ */
+struct evergreen_mc_save {
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	bool crtc_enabled[RADEON_MAX_CRTCS];
+};
+
+void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int evergreen_asic_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+				enum radeon_hpd_id hpd);
+u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
+int evergreen_irq_set(struct radeon_device *rdev);
+int evergreen_irq_process(struct radeon_device *rdev);
+extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
+extern void evergreen_pm_misc(struct radeon_device *rdev);
+extern void evergreen_pm_prepare(struct radeon_device *rdev);
+extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void sumo_pm_init_profile(struct radeon_device *rdev);
+extern void btc_pm_init_profile(struct radeon_device *rdev);
+int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+				   struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+				   struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+		       uint64_t src_offset, uint64_t dst_offset,
+		       unsigned num_gpu_pages,
+		       struct radeon_fence **fence);
+void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
+/*
+ * cayman
+ */
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+			    struct radeon_fence *fence);
+void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
+			       struct radeon_ring *ring,
+			       struct radeon_semaphore *semaphore,
+			       bool emit_wait);
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev,
+			struct radeon_ib *ib,
+			uint64_t pe,
+			uint64_t addr, unsigned count,
+			uint32_t incr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+				struct radeon_ib *ib);
+bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+
+/* DCE6 - SI */
+void dce6_bandwidth_update(struct radeon_device *rdev);
+
+/*
+ * si
+ */
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence);
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int si_init(struct radeon_device *rdev);
+void si_fini(struct radeon_device *rdev);
+int si_suspend(struct radeon_device *rdev);
+int si_resume(struct radeon_device *rdev);
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+int si_asic_reset(struct radeon_device *rdev);
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_irq_set(struct radeon_device *rdev);
+int si_irq_process(struct radeon_device *rdev);
+int si_vm_init(struct radeon_device *rdev);
+void si_vm_fini(struct radeon_device *rdev);
+void si_vm_set_page(struct radeon_device *rdev,
+		    struct radeon_ib *ib,
+		    uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+u32 si_get_xclk(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_atombios.c b/linux-imx/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 0000000..efb06e3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,3319 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+extern void
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			uint32_t supported_device, u16 caps);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+
+/* local */
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+				    u16 voltage_id, u16 *voltage);
+
+union atom_supported_devices {
+	struct _ATOM_SUPPORTED_DEVICES_INFO info;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+};
+
+static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
+					  ATOM_GPIO_I2C_ASSIGMENT *gpio,
+					  u8 index)
+{
+	/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
+	if ((rdev->family == CHIP_R420) ||
+	    (rdev->family == CHIP_R423) ||
+	    (rdev->family == CHIP_RV410)) {
+		if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
+			gpio->ucClkMaskShift = 0x19;
+			gpio->ucDataMaskShift = 0x18;
+		}
+	}
+
+	/* some evergreen boards have bad data for this entry */
+	if (ASIC_IS_DCE4(rdev)) {
+		if ((index == 7) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
+		    (gpio->sucI2cId.ucAccess == 0)) {
+			gpio->sucI2cId.ucAccess = 0x97;
+			gpio->ucDataMaskShift = 8;
+			gpio->ucDataEnShift = 8;
+			gpio->ucDataY_Shift = 8;
+			gpio->ucDataA_Shift = 8;
+		}
+	}
+
+	/* some DCE3 boards have bad data for this entry */
+	if (ASIC_IS_DCE3(rdev)) {
+		if ((index == 4) &&
+		    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
+		    (gpio->sucI2cId.ucAccess == 0x94))
+			gpio->sucI2cId.ucAccess = 0x14;
+	}
+}
+
+static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
+{
+	struct radeon_i2c_bus_rec i2c;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+	i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+	i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+	i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+	i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+	i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+	i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+	i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+	i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+	i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+	i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+	i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+	i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+	i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+	i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+	i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+	i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+	if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+		i2c.hw_capable = true;
+	else
+		i2c.hw_capable = false;
+
+	if (gpio->sucI2cId.ucAccess == 0xa0)
+		i2c.mm_i2c = true;
+	else
+		i2c.mm_i2c = false;
+
+	i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+	if (i2c.mask_clk_reg)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+							       uint8_t id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+
+	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+	i2c.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			gpio = &i2c_info->asGPIO_Info[i];
+
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			if (gpio->sucI2cId.ucAccess == id) {
+				i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+				break;
+			}
+		}
+	}
+
+	return i2c;
+}
+
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+	struct radeon_i2c_bus_rec i2c;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+	uint16_t data_offset, size;
+	int i, num_indices;
+	char stmp[32];
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			gpio = &i2c_info->asGPIO_Info[i];
+
+			radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
+
+			i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
+
+			if (i2c.valid) {
+				sprintf(stmp, "0x%x", i2c.i2c_id);
+				rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+			}
+		}
+	}
+}
+
+static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+							u8 id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	struct radeon_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+	gpio.valid = false;
+
+	if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			pin = &gpio_info->asGPIO_Pin[i];
+			if (id == pin->ucGPIO_ID) {
+				gpio.id = pin->ucGPIO_ID;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
+				gpio.mask = (1 << pin->ucGpioPinBitShift);
+				gpio.valid = true;
+				break;
+			}
+		}
+	}
+
+	return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+							    struct radeon_gpio_rec *gpio)
+{
+	struct radeon_hpd hpd;
+	u32 reg;
+
+	memset(&hpd, 0, sizeof(struct radeon_hpd));
+
+	if (ASIC_IS_DCE6(rdev))
+		reg = SI_DC_GPIO_HPD_A;
+	else if (ASIC_IS_DCE4(rdev))
+		reg = EVERGREEN_DC_GPIO_HPD_A;
+	else
+		reg = AVIVO_DC_GPIO_HPD_A;
+
+	hpd.gpio = *gpio;
+	if (gpio->reg == reg) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = RADEON_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = RADEON_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = RADEON_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = RADEON_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = RADEON_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = RADEON_HPD_6;
+			break;
+		default:
+			hpd.hpd = RADEON_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = RADEON_HPD_NONE;
+	return hpd;
+}
+
+static bool radeon_atom_apply_quirks(struct drm_device *dev,
+				     uint32_t supported_device,
+				     int *connector_type,
+				     struct radeon_i2c_bus_rec *i2c_bus,
+				     uint16_t *line_mux,
+				     struct radeon_hpd *hpd)
+{
+
+	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x791e) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x826d)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* Asrock RS600 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x7941) &&
+	    (dev->pdev->subsystem_vendor == 0x1849) &&
+	    (dev->pdev->subsystem_device == 0x7941)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
+	/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
+	if ((dev->pdev->device == 0x796e) &&
+	    (dev->pdev->subsystem_vendor == 0x1462) &&
+	    (dev->pdev->subsystem_device == 0x7302)) {
+		if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+			return false;
+	}
+
+	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+	if ((dev->pdev->device == 0x7941) &&
+	    (dev->pdev->subsystem_vendor == 0x147b) &&
+	    (dev->pdev->subsystem_device == 0x2412)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_DVII)
+			return false;
+	}
+
+	/* Falcon NW laptop lists vga ddc line for LVDS */
+	if ((dev->pdev->device == 0x5653) &&
+	    (dev->pdev->subsystem_vendor == 0x1462) &&
+	    (dev->pdev->subsystem_device == 0x0291)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_LVDS) {
+			i2c_bus->valid = false;
+			*line_mux = 53;
+		}
+	}
+
+	/* HIS X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7146) &&
+	    (dev->pdev->subsystem_vendor == 0x17af) &&
+	    (dev->pdev->subsystem_device == 0x2058)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7142) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x2134)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+
+	/* Funky macbooks */
+	if ((dev->pdev->device == 0x71C5) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x0080)) {
+		if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
+		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+			return false;
+		if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+			*line_mux = 0x90;
+	}
+
+	/* mac rv630, rv730, others */
+	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+	}
+
+	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x9598) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01da)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3600 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x9598) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01e4)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* ASUS HD 3450 board lists the DVI port as HDMI */
+	if ((dev->pdev->device == 0x95C5) &&
+	    (dev->pdev->subsystem_vendor == 0x1043) &&
+	    (dev->pdev->subsystem_device == 0x01e2)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+	/* some BIOSes seem to report DAC on HDMI - usually this is a board with
+	 * HDMI + VGA reporting as HDMI
+	 */
+	if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+		if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
+			*connector_type = DRM_MODE_CONNECTOR_VGA;
+			*line_mux = 0;
+		}
+	}
+
+	/* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
+	 * on the laptop and a DVI port on the docking station and
+	 * both share the same encoder, hpd pin, and ddc line.
+	 * So while the bios table is technically correct,
+	 * we drop the DVI port here since xrandr has no concept of
+	 * encoders and will try and drive both connectors
+	 * with different crtcs which isn't possible on the hardware
+	 * side and leaves no crtcs for LVDS or VGA.
+	 */
+	if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
+	    (dev->pdev->subsystem_vendor == 0x1025) &&
+	    (dev->pdev->subsystem_device == 0x013c)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+			/* actually it's a DVI-D port not DVI-I */
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+			return false;
+		}
+	}
+
+	/* XFX Pine Group device rv730 reports no VGA DDC lines
+	 * even though they are wired up to record 0x93
+	 */
+	if ((dev->pdev->device == 0x9498) &&
+	    (dev->pdev->subsystem_vendor == 0x1682) &&
+	    (dev->pdev->subsystem_device == 0x2452) &&
+	    (i2c_bus->valid == false) &&
+	    !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+		struct radeon_device *rdev = dev->dev_private;
+		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+	}
+
+	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+	    (dev->pdev->subsystem_vendor == 0x1734) &&
+	    (dev->pdev->subsystem_device == 0x11bd)) {
+		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+			*line_mux = 0x3103;
+		} else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
+			*connector_type = DRM_MODE_CONNECTOR_DVII;
+		}
+	}
+
+
+	return true;
+}
+
+const int supported_devices_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVIA,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_DisplayPort
+};
+
+const uint16_t supported_devices_connector_object_id_convert[] = {
+	CONNECTOR_OBJECT_ID_NONE,
+	CONNECTOR_OBJECT_ID_VGA,
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */
+	CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */
+	CONNECTOR_OBJECT_ID_COMPOSITE,
+	CONNECTOR_OBJECT_ID_SVIDEO,
+	CONNECTOR_OBJECT_ID_LVDS,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_9PIN_DIN,
+	CONNECTOR_OBJECT_ID_DISPLAYPORT,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_A,
+	CONNECTOR_OBJECT_ID_HDMI_TYPE_B,
+	CONNECTOR_OBJECT_ID_SVIDEO
+};
+
+const int object_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_HDMIA,
+	DRM_MODE_CONNECTOR_HDMIB,
+	DRM_MODE_CONNECTOR_LVDS,
+	DRM_MODE_CONNECTOR_9PinDIN,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DisplayPort,
+	DRM_MODE_CONNECTOR_eDP,
+	DRM_MODE_CONNECTOR_Unknown
+};
+
+bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+	u16 size, data_offset;
+	u8 frev, crev;
+	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
+	ATOM_OBJECT_TABLE *router_obj;
+	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+	ATOM_OBJECT_HEADER *obj_header;
+	int i, j, k, path_size, device_support;
+	int connector_type;
+	u16 igp_lane_info, conn_id, connector_object_id;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_router router;
+	struct radeon_gpio_rec gpio;
+	struct radeon_hpd hpd;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+		return false;
+
+	if (crev < 2)
+		return false;
+
+	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
+	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
+	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
+	router_obj = (ATOM_OBJECT_TABLE *)
+		(ctx->bios + data_offset +
+		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
+	device_support = le16_to_cpu(obj_header->usDeviceSupport);
+
+	path_size = 0;
+	for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
+		uint8_t *addr = (uint8_t *) path_obj->asDispPath;
+		ATOM_DISPLAY_OBJECT_PATH *path;
+		addr += path_size;
+		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+		path_size += le16_to_cpu(path->usSize);
+
+		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+			uint8_t con_obj_id, con_obj_num, con_obj_type;
+
+			con_obj_id =
+			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
+			    >> OBJECT_ID_SHIFT;
+			con_obj_num =
+			    (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
+			    >> ENUM_ID_SHIFT;
+			con_obj_type =
+			    (le16_to_cpu(path->usConnObjectId) &
+			     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+			/* TODO CV support */
+			if (le16_to_cpu(path->usDeviceTag) ==
+				ATOM_DEVICE_CV_SUPPORT)
+				continue;
+
+			/* IGP chips */
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (con_obj_id ==
+			     CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
+				uint16_t igp_offset = 0;
+				ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
+
+				index =
+				    GetIndexIntoMasterTable(DATA,
+							    IntegratedSystemInfo);
+
+				if (atom_parse_data_header(ctx, index, &size, &frev,
+							   &crev, &igp_offset)) {
+
+					if (crev >= 2) {
+						igp_obj =
+							(ATOM_INTEGRATED_SYSTEM_INFO_V2
+							 *) (ctx->bios + igp_offset);
+
+						if (igp_obj) {
+							uint32_t slot_config, ct;
+
+							if (con_obj_num == 1)
+								slot_config =
+									igp_obj->
+									ulDDISlot1Config;
+							else
+								slot_config =
+									igp_obj->
+									ulDDISlot2Config;
+
+							ct = (slot_config >> 16) & 0xff;
+							connector_type =
+								object_connector_convert
+								[ct];
+							connector_object_id = ct;
+							igp_lane_info =
+								slot_config & 0xffff;
+						} else
+							continue;
+					} else
+						continue;
+				} else {
+					igp_lane_info = 0;
+					connector_type =
+						object_connector_convert[con_obj_id];
+					connector_object_id = con_obj_id;
+				}
+			} else {
+				igp_lane_info = 0;
+				connector_type =
+				    object_connector_convert[con_obj_id];
+				connector_object_id = con_obj_id;
+			}
+
+			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+				continue;
+
+			router.ddc_valid = false;
+			router.cd_valid = false;
+			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
+
+				grph_obj_id =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+				grph_obj_num =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+				grph_obj_type =
+				    (le16_to_cpu(path->usGraphicObjIds[j]) &
+				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+
+				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							radeon_add_atom_encoder(dev,
+										encoder_obj,
+										le16_to_cpu
+										(path->
+										 usDeviceTag),
+										caps);
+						}
+					}
+				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+							ATOM_I2C_RECORD *i2c_record;
+							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
+							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+							u8 *num_dst_objs = (u8 *)
+								((u8 *)router_src_dst_table + 1 +
+								 (router_src_dst_table->ucNumberOfSrc * 2));
+							u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+							int enum_id;
+
+							router.router_id = router_obj_id;
+							for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+								if (le16_to_cpu(path->usConnObjectId) ==
+								    le16_to_cpu(dst_objs[enum_id]))
+									break;
+							}
+
+							while (record->ucRecordSize > 0 &&
+							       record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_I2C_RECORD_TYPE:
+									i2c_record =
+										(ATOM_I2C_RECORD *)
+										record;
+									i2c_config =
+										(ATOM_I2C_ID_CONFIG_ACCESS *)
+										&i2c_record->sucI2cId;
+									router.i2c_info =
+										radeon_lookup_i2c_gpio(rdev,
+												       i2c_config->
+												       ucAccess);
+									router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+									break;
+								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+										record;
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+						}
+					}
+				}
+			}
+
+			/* look up gpio for ddc, hpd */
+			ddc_bus.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			if ((le16_to_cpu(path->usDeviceTag) &
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+					if (le16_to_cpu(path->usConnObjectId) ==
+					    le16_to_cpu(con_obj->asObjects[j].
+							usObjectID)) {
+						ATOM_COMMON_RECORD_HEADER
+						    *record =
+						    (ATOM_COMMON_RECORD_HEADER
+						     *)
+						    (ctx->bios + data_offset +
+						     le16_to_cpu(con_obj->
+								 asObjects[j].
+								 usRecordOffset));
+						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+
+						while (record->ucRecordSize > 0 &&
+						       record->ucRecordType > 0 &&
+						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+							switch (record->ucRecordType) {
+							case ATOM_I2C_RECORD_TYPE:
+								i2c_record =
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = radeon_lookup_i2c_gpio(rdev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = radeon_lookup_gpio(rdev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
+								break;
+							}
+							record =
+							    (ATOM_COMMON_RECORD_HEADER
+							     *) ((char *)record
+								 +
+								 record->
+								 ucRecordSize);
+						}
+						break;
+					}
+				}
+			}
+
+			/* needed for aux chan transactions */
+			ddc_bus.hpd = hpd.hpd;
+
+			conn_id = le16_to_cpu(path->usConnObjectId);
+
+			if (!radeon_atom_apply_quirks
+			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
+			     &ddc_bus, &conn_id, &hpd))
+				continue;
+
+			radeon_add_atom_connector(dev,
+						  conn_id,
+						  le16_to_cpu(path->
+							      usDeviceTag),
+						  connector_type, &ddc_bus,
+						  igp_lane_info,
+						  connector_object_id,
+						  &hpd,
+						  &router);
+
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
+						 int connector_type,
+						 uint16_t devices)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	} else if (((connector_type == DRM_MODE_CONNECTOR_DVII) ||
+		    (connector_type == DRM_MODE_CONNECTOR_DVID)) &&
+		   (devices & ATOM_DEVICE_DFP2_SUPPORT))  {
+		struct radeon_mode_info *mode_info = &rdev->mode_info;
+		struct atom_context *ctx = mode_info->atom_context;
+		int index = GetIndexIntoMasterTable(DATA, XTMDS_Info);
+		uint16_t size, data_offset;
+		uint8_t frev, crev;
+		ATOM_XTMDS_INFO *xtmds;
+
+		if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+			xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+
+			if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			} else {
+				if (connector_type == DRM_MODE_CONNECTOR_DVII)
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				else
+					return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+			}
+		} else
+			return supported_devices_connector_object_id_convert
+				[connector_type];
+	} else {
+		return supported_devices_connector_object_id_convert
+			[connector_type];
+	}
+}
+
+struct bios_connector {
+	bool valid;
+	uint16_t line_mux;
+	uint16_t devices;
+	int connector_type;
+	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_hpd hpd;
+};
+
+bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+								 drm_device
+								 *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct atom_context *ctx = mode_info->atom_context;
+	int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
+	uint16_t size, data_offset;
+	uint8_t frev, crev;
+	uint16_t device_support;
+	uint8_t dac;
+	union atom_supported_devices *supported_devices;
+	int i, j, max_device;
+	struct bios_connector *bios_connectors;
+	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+	struct radeon_router router;
+
+	router.ddc_valid = false;
+	router.cd_valid = false;
+
+	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
+	if (!bios_connectors)
+		return false;
+
+	if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
+				    &data_offset)) {
+		kfree(bios_connectors);
+		return false;
+	}
+
+	supported_devices =
+	    (union atom_supported_devices *)(ctx->bios + data_offset);
+
+	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+
+	if (frev > 1)
+		max_device = ATOM_MAX_SUPPORTED_DEVICE;
+	else
+		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+	for (i = 0; i < max_device; i++) {
+		ATOM_CONNECTOR_INFO_I2C ci =
+		    supported_devices->info.asConnInfo[i];
+
+		bios_connectors[i].valid = false;
+
+		if (!(device_support & (1 << i))) {
+			continue;
+		}
+
+		if (i == ATOM_DEVICE_CV_INDEX) {
+			DRM_DEBUG_KMS("Skipping Component Video\n");
+			continue;
+		}
+
+		bios_connectors[i].connector_type =
+		    supported_devices_connector_convert[ci.sucConnectorInfo.
+							sbfAccess.
+							bfConnectorType];
+
+		if (bios_connectors[i].connector_type ==
+		    DRM_MODE_CONNECTOR_Unknown)
+			continue;
+
+		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+
+		bios_connectors[i].line_mux =
+			ci.sucI2cId.ucAccess;
+
+		/* give tv unique connector ids */
+		if (i == ATOM_DEVICE_TV1_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 50;
+		} else if (i == ATOM_DEVICE_TV2_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 51;
+		} else if (i == ATOM_DEVICE_CV_INDEX) {
+			bios_connectors[i].ddc_bus.valid = false;
+			bios_connectors[i].line_mux = 52;
+		} else
+			bios_connectors[i].ddc_bus =
+			    radeon_lookup_i2c_gpio(rdev,
+						   bios_connectors[i].line_mux);
+
+		if ((crev > 1) && (frev > 1)) {
+			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+			switch (isb) {
+			case 0x4:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+				break;
+			case 0xa:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+				break;
+			default:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+		} else {
+			if (i == ATOM_DEVICE_DFP1_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+			else if (i == ATOM_DEVICE_DFP2_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+			else
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+		}
+
+		/* Always set the connector type to VGA for CRT1/CRT2. if they are
+		 * shared with a DVI port, we'll pick up the DVI connector when we
+		 * merge the outputs.  Some bioses incorrectly list VGA ports as DVI.
+		 */
+		if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
+			bios_connectors[i].connector_type =
+			    DRM_MODE_CONNECTOR_VGA;
+
+		if (!radeon_atom_apply_quirks
+		    (dev, (1 << i), &bios_connectors[i].connector_type,
+		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+		     &bios_connectors[i].hpd))
+			continue;
+
+		bios_connectors[i].valid = true;
+		bios_connectors[i].devices = (1 << i);
+
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
+			radeon_add_atom_encoder(dev,
+						radeon_get_encoder_enum(dev,
+								      (1 << i),
+								      dac),
+						(1 << i),
+						0);
+		else
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									(1 << i),
+									dac),
+						  (1 << i));
+	}
+
+	/* combine shared connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			for (j = 0; j < max_device; j++) {
+				if (bios_connectors[j].valid && (i != j)) {
+					if (bios_connectors[i].line_mux ==
+					    bios_connectors[j].line_mux) {
+						/* make sure not to combine LVDS */
+						if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[i].line_mux = 53;
+							bios_connectors[i].ddc_bus.valid = false;
+							continue;
+						}
+						if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+							bios_connectors[j].line_mux = 53;
+							bios_connectors[j].ddc_bus.valid = false;
+							continue;
+						}
+						/* combine analog and digital for DVI-I */
+						if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+						    ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+						     (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+							bios_connectors[i].devices |=
+								bios_connectors[j].devices;
+							bios_connectors[i].connector_type =
+								DRM_MODE_CONNECTOR_DVII;
+							if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
+								bios_connectors[i].hpd =
+									bios_connectors[j].hpd;
+							bios_connectors[j].valid = false;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	/* add the connectors */
+	for (i = 0; i < max_device; i++) {
+		if (bios_connectors[i].valid) {
+			uint16_t connector_object_id =
+				atombios_get_connector_object_id(dev,
+						      bios_connectors[i].connector_type,
+						      bios_connectors[i].devices);
+			radeon_add_atom_connector(dev,
+						  bios_connectors[i].line_mux,
+						  bios_connectors[i].devices,
+						  bios_connectors[i].
+						  connector_type,
+						  &bios_connectors[i].ddc_bus,
+						  0,
+						  connector_object_id,
+						  &bios_connectors[i].hpd,
+						  &router);
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	kfree(bios_connectors);
+	return true;
+}
+
+union firmware_info {
+	ATOM_FIRMWARE_INFO info;
+	ATOM_FIRMWARE_INFO_V1_2 info_12;
+	ATOM_FIRMWARE_INFO_V1_3 info_13;
+	ATOM_FIRMWARE_INFO_V1_4 info_14;
+	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
+};
+
+bool radeon_atom_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	union firmware_info *firmware_info;
+	uint8_t frev, crev;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint16_t data_offset;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		/* pixel clocks */
+		p1pll->reference_freq =
+		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		p1pll->reference_div = 0;
+
+		if (crev < 2)
+			p1pll->pll_out_min =
+				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+		else
+			p1pll->pll_out_min =
+				le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+		p1pll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+
+		if (crev >= 4) {
+			p1pll->lcd_pll_out_min =
+				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_min == 0)
+				p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max =
+				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+			if (p1pll->lcd_pll_out_max == 0)
+				p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		} else {
+			p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+			p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+		}
+
+		if (p1pll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				p1pll->pll_out_min = 64800;
+			else
+				p1pll->pll_out_min = 20000;
+		}
+
+		p1pll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
+		p1pll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
+
+		*p2pll = *p1pll;
+
+		/* system clock */
+		if (ASIC_IS_DCE4(rdev))
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		else
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		spll->reference_div = 0;
+
+		spll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
+		spll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
+
+		/* ??? */
+		if (spll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				spll->pll_out_min = 64800;
+			else
+				spll->pll_out_min = 20000;
+		}
+
+		spll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
+		spll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
+
+		/* memory clock */
+		if (ASIC_IS_DCE4(rdev))
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		else
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
+		mpll->reference_div = 0;
+
+		mpll->pll_out_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
+		mpll->pll_out_max =
+		    le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
+
+		/* ??? */
+		if (mpll->pll_out_min == 0) {
+			if (ASIC_IS_AVIVO(rdev))
+				mpll->pll_out_min = 64800;
+			else
+				mpll->pll_out_min = 20000;
+		}
+
+		mpll->pll_in_min =
+		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
+		mpll->pll_in_max =
+		    le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
+
+		rdev->clock.default_sclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
+		rdev->clock.default_mclk =
+		    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+
+		if (ASIC_IS_DCE4(rdev)) {
+			rdev->clock.default_dispclk =
+				le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+			if (rdev->clock.default_dispclk == 0) {
+				if (ASIC_IS_DCE5(rdev))
+					rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+				else
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			}
+			rdev->clock.dp_extclk =
+				le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+		}
+		*dcpll = *p1pll;
+
+		rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
+		if (rdev->clock.max_pixel_clock == 0)
+			rdev->clock.max_pixel_clock = 40000;
+
+		/* not technically a clock, but... */
+		rdev->mode_info.firmware_flags =
+			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+
+		return true;
+	}
+
+	return false;
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS600)
+		return false;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)(mode_info->atom_context->bios +
+				      data_offset);
+		switch (crev) {
+		case 1:
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
+				return true;
+			break;
+		case 2:
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
+				return true;
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+	}
+	return false;
+}
+
+bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+				   struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
+	uint16_t data_offset;
+	struct _ATOM_TMDS_INFO *tmds_info;
+	uint8_t frev, crev;
+	uint16_t maxfreq;
+	int i;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		tmds_info =
+			(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+						   data_offset);
+
+		maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
+		for (i = 0; i < 4; i++) {
+			tmds->tmds_pll[i].freq =
+			    le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
+			tmds->tmds_pll[i].value =
+			    tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VCO_Gain & 0x3f) << 6;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_DutyCycle & 0xf) << 12;
+			tmds->tmds_pll[i].value |=
+			    (tmds_info->asMiscInfo[i].
+			     ucPLL_VoltageSwing & 0xf) << 16;
+
+			DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
+				  tmds->tmds_pll[i].freq,
+				  tmds->tmds_pll[i].value);
+
+			if (maxfreq == tmds->tmds_pll[i].freq) {
+				tmds->tmds_pll[i].freq = 0xffffffff;
+				break;
+			}
+		}
+		return true;
+	}
+	return false;
+}
+
+bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
+	uint16_t data_offset, size;
+	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		ss_info =
+			(struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
+
+		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+			sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+		for (i = 0; i < num_indices; i++) {
+			if (ss_info->asSS_Info[i].ucSS_Id == id) {
+				ss->percentage =
+					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+				ss->step = ss_info->asSS_Info[i].ucSS_Step;
+				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+				ss->range = ss_info->asSS_Info[i].ucSS_Range;
+				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+				return true;
+			}
+		}
+	}
+	return false;
+}
+
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+						 struct radeon_atom_ss *ss,
+						 int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (union igp_info *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (crev) {
+		case 6:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		case 7:
+			switch (id) {
+			case ASIC_INTERNAL_SS_ON_TMDS:
+				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_HDMI:
+				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+				break;
+			case ASIC_INTERNAL_SS_ON_LVDS:
+				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+				break;
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
+union asic_ss_info {
+	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
+	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
+};
+
+bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+				      struct radeon_atom_ss *ss,
+				      int id, u32 clock)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+	uint16_t data_offset, size;
+	union asic_ss_info *ss_info;
+	uint8_t frev, crev;
+	int i, num_indices;
+
+	memset(ss, 0, sizeof(struct radeon_atom_ss));
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+
+		ss_info =
+			(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
+
+		switch (frev) {
+		case 1:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
+
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
+					return true;
+				}
+			}
+			break;
+		case 2:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					return true;
+				}
+			}
+			break;
+		case 3:
+			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+			for (i = 0; i < num_indices; i++) {
+				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
+					ss->percentage =
+						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+					ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					if (rdev->flags & RADEON_IS_IGP)
+						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
+					return true;
+				}
+			}
+			break;
+		default:
+			DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
+			break;
+		}
+
+	}
+	return false;
+}
+
+union lvds_info {
+	struct _ATOM_LVDS_INFO info;
+	struct _ATOM_LVDS_INFO_V12 info_12;
+};
+
+struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+							      radeon_encoder
+							      *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+	uint16_t data_offset, misc;
+	union lvds_info *lvds_info;
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *lvds = NULL;
+	int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		lvds_info =
+			(union lvds_info *)(mode_info->atom_context->bios + data_offset);
+		lvds =
+		    kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+		if (!lvds)
+			return NULL;
+
+		lvds->native_mode.clock =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
+		lvds->native_mode.hdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
+		lvds->native_mode.vdisplay =
+		    le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
+		lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
+		lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
+		lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
+		lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
+		lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
+		lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+			le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
+		lvds->panel_pwr_delay =
+		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
+		lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
+
+		/* set crtc values */
+		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+		lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
+
+		encoder->native_mode = lvds->native_mode;
+
+		if (encoder_enum == 2)
+			lvds->linkb = true;
+		else
+			lvds->linkb = false;
+
+		/* parse the lcd record table */
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record;
+
+			if ((frev == 1) && (crev < 2))
+				/* absolute */
+				record = (u8 *)(mode_info->atom_context->bios +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			else
+				/* relative */
+				record = (u8 *)(mode_info->atom_context->bios +
+						data_offset +
+						le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = kmalloc(edid_size, GFP_KERNEL);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid)) {
+								rdev->mode_info.bios_hardcoded_edid = edid;
+								rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+							} else
+								kfree(edid);
+						}
+					}
+					record += fake_edid_record->ucFakeEDIDLength ?
+						fake_edid_record->ucFakeEDIDLength + 2 :
+						sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
+	}
+	return lvds;
+}
+
+struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
+
+		p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
+
+		if (!p_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC1_BG_Adjustment;
+		dac = dac_info->ucDAC1_DAC_Adjustment;
+		p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+
+	}
+	return p_dac;
+}
+
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	ATOM_ANALOG_TV_INFO *tv_info;
+	ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2;
+	ATOM_DTD_FORMAT *dtd_timings;
+	int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	u8 frev, crev;
+	u16 data_offset, misc;
+
+	if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+				    &frev, &crev, &data_offset))
+		return false;
+
+	switch (crev) {
+	case 1:
+		tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING)
+			return false;
+
+		mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
+		mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp);
+		mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart);
+		mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total);
+		mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp);
+		mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart);
+		mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) +
+			le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+
+		if (index == 1) {
+			/* PAL timings appear to have wrong values for totals */
+			mode->crtc_htotal -= 1;
+			mode->crtc_vtotal -= 1;
+		}
+		break;
+	case 2:
+		tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
+		if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
+			return false;
+
+		dtd_timings = &tv_info_v1_2->aModeTimings[index];
+		mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHBlanking_Time);
+		mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive);
+		mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) +
+			le16_to_cpu(dtd_timings->usHSyncOffset);
+		mode->crtc_hsync_end = mode->crtc_hsync_start +
+			le16_to_cpu(dtd_timings->usHSyncWidth);
+
+		mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVBlanking_Time);
+		mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive);
+		mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) +
+			le16_to_cpu(dtd_timings->usVSyncOffset);
+		mode->crtc_vsync_end = mode->crtc_vsync_start +
+			le16_to_cpu(dtd_timings->usVSyncWidth);
+
+		mode->flags = 0;
+		misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			mode->flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			mode->flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			mode->flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+		mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
+		break;
+	}
+	return true;
+}
+
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	struct _ATOM_ANALOG_TV_INFO *tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+			(mode_info->atom_context->bios + data_offset);
+
+		switch (tv_info->ucTV_BootUpDefaultStandard) {
+		case ATOM_TV_NTSC:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+			break;
+		case ATOM_TV_NTSCJ:
+			tv_std = TV_STD_NTSC_J;
+			DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+			break;
+		case ATOM_TV_PAL:
+			tv_std = TV_STD_PAL;
+			DRM_DEBUG_KMS("Default TV standard: PAL\n");
+			break;
+		case ATOM_TV_PALM:
+			tv_std = TV_STD_PAL_M;
+			DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+			break;
+		case ATOM_TV_PALN:
+			tv_std = TV_STD_PAL_N;
+			DRM_DEBUG_KMS("Default TV standard: PAL-N\n");
+			break;
+		case ATOM_TV_PALCN:
+			tv_std = TV_STD_PAL_CN;
+			DRM_DEBUG_KMS("Default TV standard: PAL-CN\n");
+			break;
+		case ATOM_TV_PAL60:
+			tv_std = TV_STD_PAL_60;
+			DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+			break;
+		case ATOM_TV_SECAM:
+			tv_std = TV_STD_SECAM;
+			DRM_DEBUG_KMS("Default TV standard: SECAM\n");
+			break;
+		default:
+			tv_std = TV_STD_NTSC;
+			DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n");
+			break;
+		}
+	}
+	return tv_std;
+}
+
+struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, CompassionateData);
+	uint16_t data_offset;
+	struct _COMPASSIONATE_DATA *dac_info;
+	uint8_t frev, crev;
+	uint8_t bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+
+		dac_info = (struct _COMPASSIONATE_DATA *)
+			(mode_info->atom_context->bios + data_offset);
+
+		tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+
+		if (!tv_dac)
+			return NULL;
+
+		bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
+		dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
+		tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_PAL_BG_Adjustment;
+		dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
+		tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+		bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
+		dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+		tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+
+		tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	}
+	return tv_dac;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"asc7xxx",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+	"adm1030",
+	"max6649",
+	"lm64",
+	"f75375",
+	"RV6xx",
+	"RV770",
+	"adt7473",
+	"NONE",
+	"External GPIO",
+	"Evergreen",
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
+	"Southern Islands",
+	"lm96163",
+};
+
+union power_info {
+	struct _ATOM_POWERPLAY_INFO info;
+	struct _ATOM_POWERPLAY_INFO_V2 info_2;
+	struct _ATOM_POWERPLAY_INFO_V3 info_3;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+						 int state_index,
+						 u32 misc, u32 misc2)
+{
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	/* order matters! */
+	if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_POWERSAVE;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	}
+	if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[0];
+	} else if (state_index == 0) {
+		rdev->pm.power_state[state_index].clock_info[0].flags |=
+			RADEON_PM_MODE_NO_DISPLAY;
+	}
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	u32 misc, misc2 = 0;
+	int num_modes = 0, i;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* add the i2c bus for thermal/fan chip */
+	if ((power_info->info.ucOverdriveThermalController > 0) &&
+	    (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
+		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+			 power_info->info.ucOverdriveControllerAddress >> 1);
+		i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+		rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		if (rdev->pm.i2c_bus) {
+			struct i2c_board_info info = { };
+			const char *name = thermal_controller_names[power_info->info.
+								    ucOverdriveThermalController];
+			info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+			strlcpy(info.type, name, sizeof(info.type));
+			i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+		}
+	}
+	num_modes = power_info->info.ucNumOfPowerModeEntries;
+	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	if (num_modes == 0)
+		return state_index;
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* last mode is usually default, array is low to high */
+	for (i = 0; i < num_modes; i++) {
+		rdev->pm.power_state[state_index].clock_info =
+			kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+		if (!rdev->pm.power_state[state_index].clock_info)
+			return state_index;
+		rdev->pm.power_state[state_index].num_clock_modes = 1;
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+		switch (frev) {
+		case 1:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+			state_index++;
+			break;
+		case 2:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		case 3:
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+				if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+						true;
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+						power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+				}
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		}
+	}
+	/* last mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[state_index - 1].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index - 1;
+		rdev->pm.power_state[state_index - 1].default_clock_mode =
+			&rdev->pm.power_state[state_index - 1].clock_info[0];
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+		rdev->pm.power_state[state_index].misc = 0;
+		rdev->pm.power_state[state_index].misc2 = 0;
+	}
+	return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+							 ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
+		} else if ((controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+			DRM_INFO("Special thermal controller config\n");
+		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		} else {
+			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+				 controller->ucType,
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+		}
+	}
+}
+
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+						 u16 *vddc, u16 *vddci)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+
+	*vddc = 0;
+	*vddci = 0;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+		if ((frev == 2) && (crev >= 2))
+			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
+	}
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+						       int state_index, int mode_index,
+						       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	int j;
+	u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+	u16 vddc, vddci;
+
+	radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
+
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	rdev->pm.power_state[state_index].pcie_lanes =
+		((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+		 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+		if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	}
+	rdev->pm.power_state[state_index].flags = 0;
+	if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		rdev->pm.power_state[state_index].flags |=
+			RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+		if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
+			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+			rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+			rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
+		} else {
+			/* patch the table values with the default slck/mclk from firmware info */
+			for (j = 0; j < mode_index; j++) {
+				rdev->pm.power_state[state_index].clock_info[j].mclk =
+					rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[j].sclk =
+					rdev->clock.default_sclk;
+				if (vddc)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+						vddc;
+			}
+		}
+	}
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+						   int state_index, int mode_index,
+						   union pplib_clock_info *clock_info)
+{
+	u32 sclk, mclk;
+	u16 vddc;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family >= CHIP_PALM) {
+			sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+			sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		} else {
+			sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		}
+	} else if (rdev->family >= CHIP_TAHITI) {
+		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
+		sclk |= clock_info->si.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
+		mclk |= clock_info->si.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->si.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->si.usVDDCI);
+	} else if (rdev->family >= CHIP_CEDAR) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->evergreen.usVDDC);
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+			le16_to_cpu(clock_info->evergreen.usVDDCI);
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			le16_to_cpu(clock_info->r600.usVDDC);
+	}
+
+	/* patch up vddc if necessary */
+	switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
+	case ATOM_VIRTUAL_VOLTAGE_ID0:
+	case ATOM_VIRTUAL_VOLTAGE_ID1:
+	case ATOM_VIRTUAL_VOLTAGE_ID2:
+	case ATOM_VIRTUAL_VOLTAGE_ID3:
+		if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
+					     rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
+					     &vddc) == 0)
+			rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+		break;
+	default:
+		break;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* skip invalid modes */
+		if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+			return false;
+	} else {
+		/* skip invalid modes */
+		if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+		    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+			return false;
+	}
+	return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	if (power_info->pplib.ucNumStates == 0)
+		return state_index;
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       power_info->pplib.ucNumStates, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	/* first mode is usually default, followed by low to high */
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+							     ((power_info->pplib.ucStateEntrySize - 1) ?
+							      (power_info->pplib.ucStateEntrySize - 1) : 1),
+							     GFP_KERNEL);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_info->pplib.ucStateEntrySize - 1) {
+			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+				clock_info = (union pplib_clock_info *)
+					(mode_info->atom_context->bios + data_offset +
+					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+					 (power_state->v1.ucClockStateIndices[j] *
+					  power_info->pplib.ucClockInfoSize));
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, non_clock_array_index, clock_array_index;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	struct _StateArray *state_array;
+	struct _ClockInfoArray *clock_info_array;
+	struct _NonClockInfoArray *non_clock_info_array;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+	u8 *power_state_offset;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	state_array = (struct _StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+	clock_info_array = (struct _ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+	non_clock_info_array = (struct _NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+	if (state_array->ucNumEntries == 0)
+		return state_index;
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       state_array->ucNumEntries, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
+	power_state_offset = (u8 *)state_array->states;
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)power_state_offset;
+		non_clock_array_index = power_state->v2.nonClockInfoIndex;
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+							     (power_state->v2.ucNumDPMLevels ?
+							      power_state->v2.ucNumDPMLevels : 1),
+							     GFP_KERNEL);
+		if (!rdev->pm.power_state[i].clock_info)
+			return state_index;
+		if (power_state->v2.ucNumDPMLevels) {
+			for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+				clock_array_index = power_state->v2.clockInfoIndex[j];
+				clock_info = (union pplib_clock_info *)
+					&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+									       state_index, mode_index,
+									       clock_info);
+				if (valid)
+					mode_index++;
+			}
+		} else {
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				rdev->clock.default_sclk;
+			mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+	u16 data_offset;
+	u8 frev, crev;
+	int state_index = 0;
+
+	rdev->pm.default_power_state_index = -1;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		switch (frev) {
+		case 1:
+		case 2:
+		case 3:
+			state_index = radeon_atombios_parse_power_table_1_3(rdev);
+			break;
+		case 4:
+		case 5:
+			state_index = radeon_atombios_parse_power_table_4_5(rdev);
+			break;
+		case 6:
+			state_index = radeon_atombios_parse_power_table_6(rdev);
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (state_index == 0) {
+		rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+		if (rdev->pm.power_state) {
+			rdev->pm.power_state[0].clock_info =
+				kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+			if (rdev->pm.power_state[0].clock_info) {
+				/* add the default mode */
+				rdev->pm.power_state[state_index].type =
+					POWER_STATE_TYPE_DEFAULT;
+				rdev->pm.power_state[state_index].num_clock_modes = 1;
+				rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+				rdev->pm.power_state[state_index].default_clock_mode =
+					&rdev->pm.power_state[state_index].clock_info[0];
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+				rdev->pm.power_state[state_index].pcie_lanes = 16;
+				rdev->pm.default_power_state_index = state_index;
+				rdev->pm.power_state[state_index].flags = 0;
+				state_index++;
+			}
+		}
+	}
+
+	rdev->pm.num_power_states = state_index;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	if (rdev->pm.default_power_state_index >= 0)
+		rdev->pm.current_vddc =
+			rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+	else
+		rdev->pm.current_vddc = 0;
+}
+
+union get_clock_dividers {
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
+	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
+};
+
+int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
+				   u8 clock_type,
+				   u32 clock,
+				   bool strobe_mode,
+				   struct atom_clock_dividers *dividers)
+{
+	union get_clock_dividers args;
+	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
+	u8 frev, crev;
+
+	memset(&args, 0, sizeof(args));
+	memset(dividers, 0, sizeof(struct atom_clock_dividers));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		/* r4xx, r5xx */
+		args.v1.ucAction = clock_type;
+		args.v1.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->post_div = args.v1.ucPostDiv;
+		dividers->fb_div = args.v1.ucFbDiv;
+		dividers->enable_post_div = true;
+		break;
+	case 2:
+	case 3:
+		/* r6xx, r7xx, evergreen, ni */
+		if (rdev->family <= CHIP_RV770) {
+			args.v2.ucAction = clock_type;
+			args.v2.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+			dividers->post_div = args.v2.ucPostDiv;
+			dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
+			dividers->ref_div = args.v2.ucAction;
+			if (rdev->family == CHIP_RV770) {
+				dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
+					true : false;
+				dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
+			} else
+				dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
+		} else {
+			if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
+				args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+				dividers->post_div = args.v3.ucPostDiv;
+				dividers->enable_post_div = (args.v3.ucCntlFlag &
+							     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+				dividers->enable_dithen = (args.v3.ucCntlFlag &
+							   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+				dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+				dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
+				dividers->ref_div = args.v3.ucRefDiv;
+				dividers->vco_mode = (args.v3.ucCntlFlag &
+						      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+			} else {
+				args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
+				if (strobe_mode)
+					args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
+
+				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+				dividers->post_div = args.v5.ucPostDiv;
+				dividers->enable_post_div = (args.v5.ucCntlFlag &
+							     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
+				dividers->enable_dithen = (args.v5.ucCntlFlag &
+							   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
+				dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
+				dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
+				dividers->ref_div = args.v5.ucRefDiv;
+				dividers->vco_mode = (args.v5.ucCntlFlag &
+						      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
+			}
+		}
+		break;
+	case 4:
+		/* fusion */
+		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		dividers->post_div = args.v4.ucPostDiv;
+		dividers->real_clock = le32_to_cpu(args.v4.ulClock);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+
+	args.ucEnable = enable;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
+{
+	GET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnEngineClock);
+}
+
+uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
+{
+	GET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return le32_to_cpu(args.ulReturnMemoryClock);
+}
+
+void radeon_atom_set_engine_clock(struct radeon_device *rdev,
+				  uint32_t eng_clock)
+{
+	SET_ENGINE_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
+
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+void radeon_atom_set_memory_clock(struct radeon_device *rdev,
+				  uint32_t mem_clock)
+{
+	SET_MEMORY_CLOCK_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union set_voltage {
+	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+	struct _SET_VOLTAGE_PARAMETERS v1;
+	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
+	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+};
+
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev, volt_index = voltage_level;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	/* 0xff01 is a flag rather then an actual voltage */
+	if (voltage_level == 0xff01)
+		return;
+
+	switch (crev) {
+	case 1:
+		args.v1.ucVoltageType = voltage_type;
+		args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
+		args.v1.ucVoltageIndex = volt_index;
+		break;
+	case 2:
+		args.v2.ucVoltageType = voltage_type;
+		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+				    u16 voltage_id, u16 *voltage)
+{
+	union set_voltage args;
+	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+	u8 frev, crev;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return -EINVAL;
+
+	switch (crev) {
+	case 1:
+		return -EINVAL;
+	case 2:
+		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+		args.v2.ucVoltageMode = 0;
+		args.v2.usVoltageLevel = 0;
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+		break;
+	case 3:
+		args.v3.ucVoltageType = voltage_type;
+		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
+		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_2_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	/* let the bios control the backlight */
+	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
+	/* clear the vbios dpms state */
+	if (ASIC_IS_DCE4(rdev))
+		bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+
+}
+
+void radeon_save_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4));
+}
+
+void radeon_restore_bios_scratch_regs(struct radeon_device *rdev)
+{
+	uint32_t scratch_reg;
+	int i;
+
+	if (rdev->family >= CHIP_R600)
+		scratch_reg = R600_BIOS_0_SCRATCH;
+	else
+		scratch_reg = RADEON_BIOS_0_SCRATCH;
+
+	for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++)
+		WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]);
+}
+
+void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600)
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	else
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock) {
+		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
+		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	else
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+/* at some point we may want to break this out into individual functions */
+void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
+
+	if (rdev->family >= CHIP_R600) {
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
+	} else {
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	}
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_TV1_MASK;
+			bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CV connected\n");
+			bios_3_scratch |= ATOM_S3_CV_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
+		} else {
+			DRM_DEBUG_KMS("CV disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CV_MASK;
+			bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_0_scratch |= ATOM_S0_LCD1;
+			bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_LCD1;
+			bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT1_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_0_scratch |= ATOM_S0_CRT2_COLOR;
+			bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
+			bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP1;
+			bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP1;
+			bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP2;
+			bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP2;
+			bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP3 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP3;
+			bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
+		} else {
+			DRM_DEBUG_KMS("DFP3 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP3;
+			bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP4 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP4;
+			bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
+		} else {
+			DRM_DEBUG_KMS("DFP4 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP4;
+			bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP5 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP5;
+			bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
+		} else {
+			DRM_DEBUG_KMS("DFP5 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP5;
+			bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP6 connected\n");
+			bios_0_scratch |= ATOM_S0_DFP6;
+			bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+		} else {
+			DRM_DEBUG_KMS("DFP6 disconnected\n");
+			bios_0_scratch &= ~ATOM_S0_DFP6;
+			bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+	} else {
+		WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+		WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	}
+}
+
+void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_3_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+	else
+		bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 18);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 24);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 16);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 20);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 17);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 19);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 23);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
+		bios_3_scratch |= (crtc << 25);
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
+	else
+		WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
+}
+
+void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_2_scratch;
+
+	if (ASIC_IS_DCE4(rdev))
+		return;
+
+	if (rdev->family >= CHIP_R600)
+		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+	else
+		bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
+		if (on)
+			bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
+		else
+			bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+	else
+		WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/linux-imx/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 0000000..8c44ef5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/pci.h>
+
+#include "radeon_acpi.h"
+
+struct radeon_atpx_functions {
+	bool px_params;
+	bool power_cntl;
+	bool disp_mux_cntl;
+	bool i2c_mux_cntl;
+	bool switch_start;
+	bool switch_end;
+	bool disp_connectors_mapping;
+	bool disp_detetion_ports;
+};
+
+struct radeon_atpx {
+	acpi_handle handle;
+	struct radeon_atpx_functions functions;
+};
+
+static struct radeon_atpx_priv {
+	bool atpx_detected;
+	/* handle for device - and atpx */
+	acpi_handle dhandle;
+	struct radeon_atpx atpx;
+} radeon_atpx_priv;
+
+struct atpx_verify_interface {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u16 version;		/* version */
+	u32 function_bits;	/* supported functions bit vector */
+} __packed;
+
+struct atpx_px_params {
+	u16 size;		/* structure size in bytes (includes size field) */
+	u32 valid_flags;	/* which flags are valid */
+	u32 flags;		/* flags */
+} __packed;
+
+struct atpx_power_control {
+	u16 size;
+	u8 dgpu_state;
+} __packed;
+
+struct atpx_mux {
+	u16 size;
+	u16 mux;
+} __packed;
+
+/**
+ * radeon_atpx_call - call an ATPX method
+ *
+ * @handle: acpi handle
+ * @function: the ATPX function to execute
+ * @params: ATPX function params
+ *
+ * Executes the requested ATPX function (all asics).
+ * Returns a pointer to the acpi output buffer.
+ */
+static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
+					   struct acpi_buffer *params)
+{
+	acpi_status status;
+	union acpi_object atpx_arg_elements[2];
+	struct acpi_object_list atpx_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	atpx_arg.count = 2;
+	atpx_arg.pointer = &atpx_arg_elements[0];
+
+	atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atpx_arg_elements[0].integer.value = function;
+
+	if (params) {
+		atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+		atpx_arg_elements[1].buffer.length = params->length;
+		atpx_arg_elements[1].buffer.pointer = params->pointer;
+	} else {
+		/* We need a second fake parameter */
+		atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+		atpx_arg_elements[1].integer.value = 0;
+	}
+
+	status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+
+	/* Fail only if calling the method fails and ATPX is supported */
+	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+		printk("failed to evaluate ATPX got %s\n",
+		       acpi_format_exception(status));
+		kfree(buffer.pointer);
+		return NULL;
+	}
+
+	return buffer.pointer;
+}
+
+/**
+ * radeon_atpx_parse_functions - parse supported functions
+ *
+ * @f: supported functions struct
+ * @mask: supported functions mask from ATPX
+ *
+ * Use the supported functions mask from ATPX function
+ * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
+ * are supported (all asics).
+ */
+static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
+{
+	f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
+	f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
+	f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
+	f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
+	f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
+	f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
+	f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
+	f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+}
+
+/**
+ * radeon_atpx_validate_functions - validate ATPX functions
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Validate that required functions are enabled (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_validate(struct radeon_atpx *atpx)
+{
+	/* make sure required functions are enabled */
+	/* dGPU power control is required */
+	atpx->functions.power_cntl = true;
+
+	if (atpx->functions.px_params) {
+		union acpi_object *info;
+		struct atpx_px_params output;
+		size_t size;
+		u32 valid_bits;
+
+		info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
+		if (!info)
+			return -EIO;
+
+		memset(&output, 0, sizeof(output));
+
+		size = *(u16 *) info->buffer.pointer;
+		if (size < 10) {
+			printk("ATPX buffer is too small: %zu\n", size);
+			kfree(info);
+			return -EINVAL;
+		}
+		size = min(sizeof(output), size);
+
+		memcpy(&output, info->buffer.pointer, size);
+
+		valid_bits = output.flags & output.valid_flags;
+		/* if separate mux flag is set, mux controls are required */
+		if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+			atpx->functions.i2c_mux_cntl = true;
+			atpx->functions.disp_mux_cntl = true;
+		}
+		/* if any outputs are muxed, mux controls are required */
+		if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+				  ATPX_TV_SIGNAL_MUXED |
+				  ATPX_DFP_SIGNAL_MUXED))
+			atpx->functions.disp_mux_cntl = true;
+
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_verify_interface - verify ATPX
+ *
+ * @atpx: radeon atpx struct
+ *
+ * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
+ * to initialize ATPX and determine what features are supported
+ * (all asics).
+ * returns 0 on success, error on failure.
+ */
+static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
+{
+	union acpi_object *info;
+	struct atpx_verify_interface output;
+	size_t size;
+	int err = 0;
+
+	info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
+	if (!info)
+		return -EIO;
+
+	memset(&output, 0, sizeof(output));
+
+	size = *(u16 *) info->buffer.pointer;
+	if (size < 8) {
+		printk("ATPX buffer is too small: %zu\n", size);
+		err = -EINVAL;
+		goto out;
+	}
+	size = min(sizeof(output), size);
+
+	memcpy(&output, info->buffer.pointer, size);
+
+	/* TODO: check version? */
+	printk("ATPX version %u, functions 0x%08x\n",
+	       output.version, output.function_bits);
+
+	radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
+
+out:
+	kfree(info);
+	return err;
+}
+
+/**
+ * radeon_atpx_set_discrete_state - power up/down discrete GPU
+ *
+ * @atpx: atpx info struct
+ * @state: discrete GPU state (0 = power down, 1 = power up)
+ *
+ * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
+ * power down/up the discrete GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_power_control input;
+
+	if (atpx->functions.power_cntl) {
+		input.size = 3;
+		input.dgpu_state = state;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_POWER_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_disp_mux - switch display mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
+ * switch the display mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.disp_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
+ * switch the i2c/hpd mux between the discrete GPU and integrated GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.i2c_mux_cntl) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_I2C_MUX_CONTROL,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_start - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has begun (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_start) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switch_end - notify the sbios of a GPU switch
+ *
+ * @atpx: atpx info struct
+ * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
+ *
+ * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
+ * function to notify the sbios that a switch between the discrete GPU and
+ * integrated GPU has ended (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
+{
+	struct acpi_buffer params;
+	union acpi_object *info;
+	struct atpx_mux input;
+
+	if (atpx->functions.switch_end) {
+		input.size = 4;
+		input.mux = mux_id;
+		params.length = input.size;
+		params.pointer = &input;
+		info = radeon_atpx_call(atpx->handle,
+					ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
+					&params);
+		if (!info)
+			return -EIO;
+		kfree(info);
+	}
+	return 0;
+}
+
+/**
+ * radeon_atpx_switchto - switch to the requested GPU
+ *
+ * @id: GPU to switch to
+ *
+ * Execute the necessary ATPX functions to switch between the discrete GPU and
+ * integrated GPU (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+	u16 gpu_id;
+
+	if (id == VGA_SWITCHEROO_IGD)
+		gpu_id = ATPX_INTEGRATED_GPU;
+	else
+		gpu_id = ATPX_DISCRETE_GPU;
+
+	radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
+	radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
+
+	return 0;
+}
+
+/**
+ * radeon_atpx_power_state - power down/up the requested GPU
+ *
+ * @id: GPU to power down/up
+ * @state: requested power state (0 = off, 1 = on)
+ *
+ * Execute the necessary ATPX function to power down/up the discrete GPU
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+				   enum vga_switcheroo_state state)
+{
+	/* on w500 ACPI can't change intel gpu state */
+	if (id == VGA_SWITCHEROO_IGD)
+		return 0;
+
+	radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
+	return 0;
+}
+
+/**
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATPX handles (all asics).
+ * Returns true if the handles are found, false if not.
+ */
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, atpx_handle;
+	acpi_status status;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	radeon_atpx_priv.dhandle = dhandle;
+	radeon_atpx_priv.atpx.handle = atpx_handle;
+	return true;
+}
+
+/**
+ * radeon_atpx_init - verify the ATPX interface
+ *
+ * Verify the ATPX interface (all asics).
+ * Returns 0 on success, error on failure.
+ */
+static int radeon_atpx_init(void)
+{
+	int r;
+
+	/* set up the ATPX handle */
+	r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	/* validate the atpx setup */
+	r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
+	if (r)
+		return r;
+
+	return 0;
+}
+
+/**
+ * radeon_atpx_get_client_id - get the client id
+ *
+ * @pdev: pci device
+ *
+ * look up whether we are the integrated or discrete GPU (all asics).
+ * Returns the client id.
+ */
+static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+	if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+		return VGA_SWITCHEROO_IGD;
+	else
+		return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler radeon_atpx_handler = {
+	.switchto = radeon_atpx_switchto,
+	.power_state = radeon_atpx_power_state,
+	.init = radeon_atpx_init,
+	.get_client_id = radeon_atpx_get_client_id,
+};
+
+/**
+ * radeon_atpx_detect - detect whether we have PX
+ *
+ * Check if we have a PX system (all asics).
+ * Returns true if we have a PX system, false if not.
+ */
+static bool radeon_atpx_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_atpx = false;
+	int vga_count = 0;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+	}
+
+	/* some newer PX laptops mark the dGPU as a non-VGA display device */
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+		vga_count++;
+
+		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+	}
+
+	if (has_atpx && vga_count == 2) {
+		acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
+		printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+		       acpi_method_name);
+		radeon_atpx_priv.atpx_detected = true;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_register_atpx_handler - register with vga_switcheroo
+ *
+ * Register the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_register_atpx_handler(void)
+{
+	bool r;
+
+	/* detect if we have any ATPX + 2 VGA in the system */
+	r = radeon_atpx_detect();
+	if (!r)
+		return;
+
+	vga_switcheroo_register_handler(&radeon_atpx_handler);
+}
+
+/**
+ * radeon_unregister_atpx_handler - unregister with vga_switcheroo
+ *
+ * Unregister the PX callbacks with vga_switcheroo (all asics).
+ */
+void radeon_unregister_atpx_handler(void)
+{
+	vga_switcheroo_unregister_handler();
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_benchmark.c b/linux-imx/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 0000000..6e05a2e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_BENCHMARK_COPY_BLIT 1
+#define RADEON_BENCHMARK_COPY_DMA  0
+
+#define RADEON_BENCHMARK_ITERATIONS 1024
+#define RADEON_BENCHMARK_COMMON_MODES_N 17
+
+static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+				    uint64_t saddr, uint64_t daddr,
+				    int flag, int n)
+{
+	unsigned long start_jiffies;
+	unsigned long end_jiffies;
+	struct radeon_fence *fence = NULL;
+	int i, r;
+
+	start_jiffies = jiffies;
+	for (i = 0; i < n; i++) {
+		switch (flag) {
+		case RADEON_BENCHMARK_COPY_DMA:
+			r = radeon_copy_dma(rdev, saddr, daddr,
+					    size / RADEON_GPU_PAGE_SIZE,
+					    &fence);
+			break;
+		case RADEON_BENCHMARK_COPY_BLIT:
+			r = radeon_copy_blit(rdev, saddr, daddr,
+					     size / RADEON_GPU_PAGE_SIZE,
+					     &fence);
+			break;
+		default:
+			DRM_ERROR("Unknown copy method\n");
+			r = -EINVAL;
+		}
+		if (r)
+			goto exit_do_move;
+		r = radeon_fence_wait(fence, false);
+		if (r)
+			goto exit_do_move;
+		radeon_fence_unref(&fence);
+	}
+	end_jiffies = jiffies;
+	r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+	if (fence)
+		radeon_fence_unref(&fence);
+	return r;
+}
+
+
+static void radeon_benchmark_log_results(int n, unsigned size,
+					 unsigned int time,
+					 unsigned sdomain, unsigned ddomain,
+					 char *kind)
+{
+	unsigned int throughput = (n * (size >> 10)) / time;
+	DRM_INFO("radeon: %s %u bo moves of %u kB from"
+		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
+		 kind, n, size >> 10, sdomain, ddomain, time,
+		 throughput * 8, throughput);
+}
+
+static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+				  unsigned sdomain, unsigned ddomain)
+{
+	struct radeon_bo *dobj = NULL;
+	struct radeon_bo *sobj = NULL;
+	uint64_t saddr, daddr;
+	int r, n;
+	int time;
+
+	n = RADEON_BENCHMARK_ITERATIONS;
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(sobj, sdomain, &saddr);
+	radeon_bo_unreserve(sobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(dobj, ddomain, &daddr);
+	radeon_bo_unreserve(dobj);
+	if (r) {
+		goto out_cleanup;
+	}
+
+	if (rdev->asic->copy.dma) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_DMA, n);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "dma");
+	}
+
+	if (rdev->asic->copy.blit) {
+		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+						RADEON_BENCHMARK_COPY_BLIT, n);
+		if (time < 0)
+			goto out_cleanup;
+		if (time > 0)
+			radeon_benchmark_log_results(n, size, time,
+						     sdomain, ddomain, "blit");
+	}
+
+out_cleanup:
+	if (sobj) {
+		r = radeon_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(sobj);
+			radeon_bo_unreserve(sobj);
+		}
+		radeon_bo_unref(&sobj);
+	}
+	if (dobj) {
+		r = radeon_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(dobj);
+			radeon_bo_unreserve(dobj);
+		}
+		radeon_bo_unref(&dobj);
+	}
+
+	if (r) {
+		DRM_ERROR("Error while benchmarking BO move.\n");
+	}
+}
+
+void radeon_benchmark(struct radeon_device *rdev, int test_number)
+{
+	int i;
+	int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
+		640 * 480 * 4,
+		720 * 480 * 4,
+		800 * 600 * 4,
+		848 * 480 * 4,
+		1024 * 768 * 4,
+		1152 * 768 * 4,
+		1280 * 720 * 4,
+		1280 * 800 * 4,
+		1280 * 854 * 4,
+		1280 * 960 * 4,
+		1280 * 1024 * 4,
+		1440 * 900 * 4,
+		1400 * 1050 * 4,
+		1680 * 1050 * 4,
+		1600 * 1200 * 4,
+		1920 * 1080 * 4,
+		1920 * 1200 * 4
+	};
+
+	switch (test_number) {
+	case 1:
+		/* simple test, VRAM to GTT and GTT to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
+				      RADEON_GEM_DOMAIN_VRAM);
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 2:
+		/* simple test, VRAM to VRAM */
+		radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
+				      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 3:
+		/* GTT to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 4:
+		/* VRAM to GTT, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 5:
+		/* VRAM to VRAM, buffer size sweep, powers of 2 */
+		for (i = 1; i <= 16384; i <<= 1)
+			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 6:
+		/* GTT to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_GTT,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+	case 7:
+		/* VRAM to GTT, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_GTT);
+		break;
+	case 8:
+		/* VRAM to VRAM, buffer size sweep, common modes */
+		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+			radeon_benchmark_move(rdev, common_modes[i],
+					      RADEON_GEM_DOMAIN_VRAM,
+					      RADEON_GEM_DOMAIN_VRAM);
+		break;
+
+	default:
+		DRM_ERROR("Unknown benchmark\n");
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_bios.c b/linux-imx/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 0000000..b131520
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+/*
+ * BIOS.
+ */
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios.  On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
+static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios;
+	resource_size_t vram_base;
+	resource_size_t size = 256 * 1024; /* ??? */
+
+	if (!(rdev->flags & RADEON_IS_IGP))
+		if (!radeon_card_posted(rdev))
+			return false;
+
+	rdev->bios = NULL;
+	vram_base = pci_resource_start(rdev->pdev, 0);
+	bios = ioremap(vram_base, size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		iounmap(bios);
+		return false;
+	}
+	rdev->bios = kmalloc(size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		iounmap(bios);
+		return false;
+	}
+	memcpy_fromio(rdev->bios, bios, size);
+	iounmap(bios);
+	return true;
+}
+
+static bool radeon_read_bios(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios;
+	size_t size;
+
+	rdev->bios = NULL;
+	/* XXX: some cards may return 0 for rom size? ddx has a workaround */
+	bios = pci_map_rom(rdev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		pci_unmap_rom(rdev->pdev, bios);
+		return false;
+	}
+	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		pci_unmap_rom(rdev->pdev, bios);
+		return false;
+	}
+	pci_unmap_rom(rdev->pdev, bios);
+	return true;
+}
+
+static bool radeon_read_platform_bios(struct radeon_device *rdev)
+{
+	uint8_t __iomem *bios;
+	size_t size;
+
+	rdev->bios = NULL;
+
+	bios = pci_platform_rom(rdev->pdev, &size);
+	if (!bios) {
+		return false;
+	}
+
+	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
+		return false;
+	}
+	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+	if (rdev->bios == NULL) {
+		return false;
+	}
+
+	return true;
+}
+
+#ifdef CONFIG_ACPI
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+			    int offset, int len)
+{
+	acpi_status status;
+	union acpi_object atrm_arg_elements[2], *obj;
+	struct acpi_object_list atrm_arg;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+	atrm_arg.count = 2;
+	atrm_arg.pointer = &atrm_arg_elements[0];
+
+	atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[0].integer.value = offset;
+
+	atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+	atrm_arg_elements[1].integer.value = len;
+
+	status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+	if (ACPI_FAILURE(status)) {
+		printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+		return -ENODEV;
+	}
+
+	obj = (union acpi_object *)buffer.pointer;
+	memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+	len = obj->buffer.length;
+	kfree(buffer.pointer);
+	return len;
+}
+
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	int ret;
+	int size = 256 * 1024;
+	int i;
+	struct pci_dev *pdev = NULL;
+	acpi_handle dhandle, atrm_handle;
+	acpi_status status;
+	bool found = false;
+
+	/* ATRM is for the discrete card only */
+	if (rdev->flags & RADEON_IS_IGP)
+		return false;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+		if (!dhandle)
+			continue;
+
+		status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+		if (!ACPI_FAILURE(status)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+			dhandle = ACPI_HANDLE(&pdev->dev);
+			if (!dhandle)
+				continue;
+
+			status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+			if (!ACPI_FAILURE(status)) {
+				found = true;
+				break;
+			}
+		}
+	}
+
+	if (!found)
+		return false;
+
+	rdev->bios = kmalloc(size, GFP_KERNEL);
+	if (!rdev->bios) {
+		DRM_ERROR("Unable to allocate bios\n");
+		return false;
+	}
+
+	for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+		ret = radeon_atrm_call(atrm_handle,
+				       rdev->bios,
+				       (i * ATRM_BIOS_PAGE),
+				       ATRM_BIOS_PAGE);
+		if (ret < ATRM_BIOS_PAGE)
+			break;
+	}
+
+	if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		kfree(rdev->bios);
+		return false;
+	}
+	return true;
+}
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* Disable VGA mode */
+		WREG32(AVIVO_D1VGA_CONTROL,
+		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_D2VGA_CONTROL,
+		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+		WREG32(AVIVO_VGA_RENDER_CONTROL,
+		       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+		WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	}
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r700_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t cg_spll_func_cntl = 0;
+	uint32_t cg_spll_status;
+	bool r;
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	if (rdev->family == CHIP_RV730) {
+		cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
+
+		/* enable bypass mode */
+		WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
+						R600_SPLL_BYPASS_EN));
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+
+		WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
+	} else
+		WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	if (rdev->family == CHIP_RV730) {
+		WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
+
+		/* wait for SPLL_CHG_STATUS to change to 1 */
+		cg_spll_status = 0;
+		while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
+			cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
+	}
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
+static bool r600_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t rom_cntl;
+	uint32_t general_pwrmgt;
+	uint32_t low_vid_lower_gpio_cntl;
+	uint32_t medium_vid_lower_gpio_cntl;
+	uint32_t high_vid_lower_gpio_cntl;
+	uint32_t ctxsw_vid_lower_gpio_cntl;
+	uint32_t lower_gpio_enable;
+	bool r;
+
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+	general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
+	low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
+	medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
+	high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
+	ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
+	lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	WREG32(R600_ROM_CNTL,
+	       ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
+		(1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
+		R600_SCK_OVERWRITE));
+
+	WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
+	       (low_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
+	       (medium_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
+	       (high_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
+	       (ctxsw_vid_lower_gpio_cntl & ~0x400));
+	WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
+	WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
+	WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
+	WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
+	WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
+	WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
+	return r;
+}
+
+static bool avivo_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t d1vga_control;
+	uint32_t d2vga_control;
+	uint32_t vga_render_control;
+	uint32_t gpiopad_a;
+	uint32_t gpiopad_en;
+	uint32_t gpiopad_mask;
+	bool r;
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	bus_cntl = RREG32(RV370_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
+	gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+	WREG32(RADEON_GPIOPAD_A, 0);
+	WREG32(RADEON_GPIOPAD_EN, 0);
+	WREG32(RADEON_GPIOPAD_MASK, 0);
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	WREG32(RV370_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(RADEON_GPIOPAD_A, gpiopad_a);
+	WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
+	WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
+	return r;
+}
+
+static bool legacy_read_disabled_bios(struct radeon_device *rdev)
+{
+	uint32_t seprom_cntl1;
+	uint32_t viph_control;
+	uint32_t bus_cntl;
+	uint32_t crtc_gen_cntl;
+	uint32_t crtc2_gen_cntl;
+	uint32_t crtc_ext_cntl;
+	uint32_t fp2_gen_cntl;
+	bool r;
+
+	seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
+	viph_control = RREG32(RADEON_VIPH_CONTROL);
+	if (rdev->flags & RADEON_IS_PCIE)
+		bus_cntl = RREG32(RV370_BUS_CNTL);
+	else
+		bus_cntl = RREG32(RADEON_BUS_CNTL);
+	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
+	crtc2_gen_cntl = 0;
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	fp2_gen_cntl = 0;
+
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	}
+
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+
+	WREG32(RADEON_SEPROM_CNTL1,
+	       ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
+		(0xc << RADEON_SCK_PRESCALE_SHIFT)));
+
+	/* disable VIP */
+	WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
+
+	/* enable the rom */
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+	else
+		WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+
+	/* Turn off mem requests and CRTC for both controllers */
+	WREG32(RADEON_CRTC_GEN_CNTL,
+	       ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
+		(RADEON_CRTC_DISP_REQ_EN_B |
+		 RADEON_CRTC_EXT_DISP_EN)));
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL,
+		       ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
+			RADEON_CRTC2_DISP_REQ_EN_B));
+	}
+	/* Turn off CRTC */
+	WREG32(RADEON_CRTC_EXT_CNTL,
+	       ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
+		(RADEON_CRTC_SYNC_TRISTAT |
+		 RADEON_CRTC_DISPLAY_DIS)));
+
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
+	}
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
+	WREG32(RADEON_VIPH_CONTROL, viph_control);
+	if (rdev->flags & RADEON_IS_PCIE)
+		WREG32(RV370_BUS_CNTL, bus_cntl);
+	else
+		WREG32(RADEON_BUS_CNTL, bus_cntl);
+	WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	}
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	}
+	return r;
+}
+
+static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+{
+	if (rdev->flags & RADEON_IS_IGP)
+		return igp_read_bios_from_vram(rdev);
+	else if (rdev->family >= CHIP_BARTS)
+		return ni_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RV770)
+		return r700_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_R600)
+		return r600_read_disabled_bios(rdev);
+	else if (rdev->family >= CHIP_RS600)
+		return avivo_read_disabled_bios(rdev);
+	else
+		return legacy_read_disabled_bios(rdev);
+}
+
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	bool ret = false;
+	struct acpi_table_header *hdr;
+	acpi_size tbl_size;
+	UEFI_ACPI_VFCT *vfct;
+	GOP_VBIOS_CONTENT *vbios;
+	VFCT_IMAGE_HEADER *vhdr;
+
+	if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+		return false;
+	if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+		goto out_unmap;
+	}
+
+	vfct = (UEFI_ACPI_VFCT *)hdr;
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+		DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+		goto out_unmap;
+	}
+
+	vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+	vhdr = &vbios->VbiosHeader;
+	DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+			vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+			vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+	if (vhdr->PCIBus != rdev->pdev->bus->number ||
+	    vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+	    vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+	    vhdr->VendorID != rdev->pdev->vendor ||
+	    vhdr->DeviceID != rdev->pdev->device) {
+		DRM_INFO("ACPI VFCT table is not for this card\n");
+		goto out_unmap;
+	};
+
+	if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+		DRM_ERROR("ACPI VFCT image truncated\n");
+		goto out_unmap;
+	}
+
+	rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+	ret = !!rdev->bios;
+
+out_unmap:
+	return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+	return false;
+}
+#endif
+
+bool radeon_get_bios(struct radeon_device *rdev)
+{
+	bool r;
+	uint16_t tmp;
+
+	r = radeon_atrm_get_bios(rdev);
+	if (r == false)
+		r = radeon_acpi_vfct_bios(rdev);
+	if (r == false)
+		r = igp_read_bios_from_vram(rdev);
+	if (r == false)
+		r = radeon_read_bios(rdev);
+	if (r == false) {
+		r = radeon_read_disabled_bios(rdev);
+	}
+	if (r == false) {
+		r = radeon_read_platform_bios(rdev);
+	}
+	if (r == false || rdev->bios == NULL) {
+		DRM_ERROR("Unable to locate a BIOS ROM\n");
+		rdev->bios = NULL;
+		return false;
+	}
+	if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+		printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+		goto free_bios;
+	}
+
+	tmp = RBIOS16(0x18);
+	if (RBIOS8(tmp + 0x14) != 0x0) {
+		DRM_INFO("Not an x86 BIOS ROM, not using.\n");
+		goto free_bios;
+	}
+
+	rdev->bios_header_start = RBIOS16(0x48);
+	if (!rdev->bios_header_start) {
+		goto free_bios;
+	}
+	tmp = rdev->bios_header_start + 4;
+	if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
+	    !memcmp(rdev->bios + tmp, "MOTA", 4)) {
+		rdev->is_atom_bios = true;
+	} else {
+		rdev->is_atom_bios = false;
+	}
+
+	DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
+	return true;
+free_bios:
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+	return false;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_blit_common.h b/linux-imx/drivers/gpu/drm/radeon/radeon_blit_common.h
new file mode 100644
index 0000000..4ecbe72
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_blit_common.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ * Copyright 2012 Alcatel-Lucent, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RADEON_BLIT_COMMON_H__
+
+#define DI_PT_RECTLIST        0x11
+#define DI_INDEX_SIZE_16_BIT  0x0
+#define DI_SRC_SEL_AUTO_INDEX 0x2
+
+#define FMT_8                 0x1
+#define FMT_5_6_5             0x8
+#define FMT_8_8_8_8           0x1a
+#define COLOR_8               0x1
+#define COLOR_5_6_5           0x8
+#define COLOR_8_8_8_8         0x1a
+
+#define RECT_UNIT_H           32
+#define RECT_UNIT_W           (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
+
+#define __RADEON_BLIT_COMMON_H__
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_clocks.c b/linux-imx/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 0000000..38e396d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,912 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/* 10 khz */
+uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	uint32_t fb_div, ref_div, post_div, sclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= spll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	sclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+	if (post_div == 2)
+		sclk >>= 1;
+	else if (post_div == 3)
+		sclk >>= 2;
+	else if (post_div == 4)
+		sclk >>= 3;
+
+	return sclk;
+}
+
+/* 10 khz */
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+{
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	uint32_t fb_div, ref_div, post_div, mclk;
+
+	fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
+	fb_div <<= 1;
+	fb_div *= mpll->reference_freq;
+
+	ref_div =
+	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
+	mclk = fb_div / ref_div;
+
+	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+	if (post_div == 2)
+		mclk >>= 1;
+	else if (post_div == 3)
+		mclk >>= 2;
+	else if (post_div == 4)
+		mclk >>= 3;
+
+	return mclk;
+}
+
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct device_node *dp = rdev->pdev->dev.of_node;
+	const u32 *val;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+
+	if (dp == NULL)
+		return false;
+	val = of_get_property(dp, "ATY,RefCLK", NULL);
+	if (!val || !*val) {
+		printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
+		return false;
+	}
+	p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+	p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+	if (p1pll->reference_div < 2)
+		p1pll->reference_div = 12;
+	p2pll->reference_div = p1pll->reference_div;
+
+	/* These aren't in the device-tree */
+	if (rdev->family >= CHIP_R420) {
+		p1pll->pll_in_min = 100;
+		p1pll->pll_in_max = 1350;
+		p1pll->pll_out_min = 20000;
+		p1pll->pll_out_max = 50000;
+		p2pll->pll_in_min = 100;
+		p2pll->pll_in_max = 1350;
+		p2pll->pll_out_min = 20000;
+		p2pll->pll_out_max = 50000;
+	} else {
+		p1pll->pll_in_min = 40;
+		p1pll->pll_in_max = 500;
+		p1pll->pll_out_min = 12500;
+		p1pll->pll_out_max = 35000;
+		p2pll->pll_in_min = 40;
+		p2pll->pll_in_max = 500;
+		p2pll->pll_out_min = 12500;
+		p2pll->pll_out_max = 35000;
+	}
+	/* not sure what the max should be in all cases */
+	rdev->clock.max_pixel_clock = 35000;
+
+	spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+	spll->reference_div = mpll->reference_div =
+		RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+
+	val = of_get_property(dp, "ATY,SCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_sclk = (*val) / 10;
+	else
+		rdev->clock.default_sclk =
+			radeon_legacy_get_engine_clock(rdev);
+
+	val = of_get_property(dp, "ATY,MCLK", NULL);
+	if (val && *val)
+		rdev->clock.default_mclk = (*val) / 10;
+	else
+		rdev->clock.default_mclk =
+			radeon_legacy_get_memory_clock(rdev);
+
+	DRM_INFO("Using device-tree clock info\n");
+
+	return true;
+}
+#else
+static bool radeon_read_clocks_OF(struct drm_device *dev)
+{
+	return false;
+}
+#endif /* CONFIG_OF */
+
+void radeon_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *dcpll = &rdev->clock.dcpll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int ret;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atom_get_clock_info(dev);
+	else
+		ret = radeon_combios_get_clock_info(dev);
+	if (!ret)
+		ret = radeon_read_clocks_OF(dev);
+
+	if (ret) {
+		if (p1pll->reference_div < 2) {
+			if (!ASIC_IS_AVIVO(rdev)) {
+				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+				if (ASIC_IS_R300(rdev))
+					p1pll->reference_div =
+						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+				else
+					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+				if (p1pll->reference_div < 2)
+					p1pll->reference_div = 12;
+			} else
+				p1pll->reference_div = 12;
+		}
+		if (p2pll->reference_div < 2)
+			p2pll->reference_div = 12;
+		if (rdev->family < CHIP_RS600) {
+			if (spll->reference_div < 2)
+				spll->reference_div =
+					RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+					RADEON_M_SPLL_REF_DIV_MASK;
+		}
+		if (mpll->reference_div < 2)
+			mpll->reference_div = spll->reference_div;
+	} else {
+		if (ASIC_IS_AVIVO(rdev)) {
+			/* TODO FALLBACK */
+		} else {
+			DRM_INFO("Using generic clock info\n");
+
+			/* may need to be per card */
+			rdev->clock.max_pixel_clock = 35000;
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				p1pll->reference_freq = 1432;
+				p2pll->reference_freq = 1432;
+				spll->reference_freq = 1432;
+				mpll->reference_freq = 1432;
+			} else {
+				p1pll->reference_freq = 2700;
+				p2pll->reference_freq = 2700;
+				spll->reference_freq = 2700;
+				mpll->reference_freq = 2700;
+			}
+			p1pll->reference_div =
+			    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+			if (p1pll->reference_div < 2)
+				p1pll->reference_div = 12;
+			p2pll->reference_div = p1pll->reference_div;
+
+			if (rdev->family >= CHIP_R420) {
+				p1pll->pll_in_min = 100;
+				p1pll->pll_in_max = 1350;
+				p1pll->pll_out_min = 20000;
+				p1pll->pll_out_max = 50000;
+				p2pll->pll_in_min = 100;
+				p2pll->pll_in_max = 1350;
+				p2pll->pll_out_min = 20000;
+				p2pll->pll_out_max = 50000;
+			} else {
+				p1pll->pll_in_min = 40;
+				p1pll->pll_in_max = 500;
+				p1pll->pll_out_min = 12500;
+				p1pll->pll_out_max = 35000;
+				p2pll->pll_in_min = 40;
+				p2pll->pll_in_max = 500;
+				p2pll->pll_out_min = 12500;
+				p2pll->pll_out_max = 35000;
+			}
+
+			spll->reference_div =
+			    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+			    RADEON_M_SPLL_REF_DIV_MASK;
+			mpll->reference_div = spll->reference_div;
+			rdev->clock.default_sclk =
+			    radeon_legacy_get_engine_clock(rdev);
+			rdev->clock.default_mclk =
+			    radeon_legacy_get_memory_clock(rdev);
+		}
+	}
+
+	/* pixel clocks */
+	if (ASIC_IS_AVIVO(rdev)) {
+		p1pll->min_post_div = 2;
+		p1pll->max_post_div = 0x7f;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 9;
+		p2pll->min_post_div = 2;
+		p2pll->max_post_div = 0x7f;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 9;
+	} else {
+		p1pll->min_post_div = 1;
+		p1pll->max_post_div = 16;
+		p1pll->min_frac_feedback_div = 0;
+		p1pll->max_frac_feedback_div = 0;
+		p2pll->min_post_div = 1;
+		p2pll->max_post_div = 12;
+		p2pll->min_frac_feedback_div = 0;
+		p2pll->max_frac_feedback_div = 0;
+	}
+
+	/* dcpll is DCE4 only */
+	dcpll->min_post_div = 2;
+	dcpll->max_post_div = 0x7f;
+	dcpll->min_frac_feedback_div = 0;
+	dcpll->max_frac_feedback_div = 9;
+	dcpll->min_ref_div = 2;
+	dcpll->max_ref_div = 0x3ff;
+	dcpll->min_feedback_div = 4;
+	dcpll->max_feedback_div = 0xfff;
+	dcpll->best_vco = 0;
+
+	p1pll->min_ref_div = 2;
+	p1pll->max_ref_div = 0x3ff;
+	p1pll->min_feedback_div = 4;
+	p1pll->max_feedback_div = 0x7ff;
+	p1pll->best_vco = 0;
+
+	p2pll->min_ref_div = 2;
+	p2pll->max_ref_div = 0x3ff;
+	p2pll->min_feedback_div = 4;
+	p2pll->max_feedback_div = 0x7ff;
+	p2pll->best_vco = 0;
+
+	/* system clock */
+	spll->min_post_div = 1;
+	spll->max_post_div = 1;
+	spll->min_ref_div = 2;
+	spll->max_ref_div = 0xff;
+	spll->min_feedback_div = 4;
+	spll->max_feedback_div = 0xff;
+	spll->best_vco = 0;
+
+	/* memory clock */
+	mpll->min_post_div = 1;
+	mpll->max_post_div = 1;
+	mpll->min_ref_div = 2;
+	mpll->max_ref_div = 0xff;
+	mpll->min_feedback_div = 4;
+	mpll->max_feedback_div = 0xff;
+	mpll->best_vco = 0;
+
+	if (!rdev->clock.default_sclk)
+		rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+	if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+		rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+
+}
+
+/* 10 khz */
+static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
+				   uint32_t req_clock,
+				   int *fb_div, int *post_div)
+{
+	struct radeon_pll *spll = &rdev->clock.spll;
+	int ref_div = spll->reference_div;
+
+	if (!ref_div)
+		ref_div =
+		    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+		    RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (req_clock < 15000) {
+		*post_div = 8;
+		req_clock *= 8;
+	} else if (req_clock < 30000) {
+		*post_div = 4;
+		req_clock *= 4;
+	} else if (req_clock < 60000) {
+		*post_div = 2;
+		req_clock *= 2;
+	} else
+		*post_div = 1;
+
+	req_clock *= ref_div;
+	req_clock += spll->reference_freq;
+	req_clock /= (2 * spll->reference_freq);
+
+	*fb_div = req_clock & 0xff;
+
+	req_clock = (req_clock & 0xffff) << 1;
+	req_clock *= spll->reference_freq;
+	req_clock /= ref_div;
+	req_clock /= *post_div;
+
+	return req_clock;
+}
+
+/* 10 khz */
+void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
+				    uint32_t eng_clock)
+{
+	uint32_t tmp;
+	int fb_div, post_div;
+
+	/* XXX: wait for idle */
+
+	eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp &= ~RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(10);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp |= RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
+	tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
+	tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
+	WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
+
+	/* XXX: verify on different asics */
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_PVG_MASK;
+	if ((eng_clock * post_div) >= 90000)
+		tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
+	else
+		tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_SLEEP;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(2);
+
+	tmp = RREG32_PLL(RADEON_SPLL_CNTL);
+	tmp &= ~RADEON_SPLL_RESET;
+	WREG32_PLL(RADEON_SPLL_CNTL, tmp);
+
+	udelay(200);
+
+	tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+	tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
+	switch (post_div) {
+	case 1:
+	default:
+		tmp |= 1;
+		break;
+	case 2:
+		tmp |= 2;
+		break;
+	case 4:
+		tmp |= 3;
+		break;
+	case 8:
+		tmp |= 4;
+		break;
+	}
+	WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+	udelay(20);
+
+	tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+	tmp |= RADEON_DONT_USE_XTALIN;
+	WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+
+	udelay(10);
+}
+
+void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+{
+	uint32_t tmp;
+
+	if (enable) {
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			if ((RREG32(RADEON_CONFIG_CNTL) &
+			     RADEON_CFG_ATI_REV_ID_MASK) >
+			    RADEON_CFG_ATI_REV_A13) {
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_RB);
+			}
+			tmp &=
+			    ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
+			      RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
+			      RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
+			      RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
+			      RADEON_SCLK_FORCE_TDM);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if (ASIC_IS_R300(rdev)) {
+			if ((rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480)) {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				tmp |=
+				    RADEON_SCLK_FORCE_TOP |
+				    RADEON_SCLK_FORCE_VIP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			} else if (rdev->family >= CHIP_RV350) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
+					R300_SCLK_GA_MAX_DYN_STOP_LAT |
+					R300_SCLK_CBA_MAX_DYN_STOP_LAT);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &=
+				    ~(RADEON_SCLK_FORCE_DISP2 |
+				      RADEON_SCLK_FORCE_CP |
+				      RADEON_SCLK_FORCE_HDP |
+				      RADEON_SCLK_FORCE_DISP1 |
+				      RADEON_SCLK_FORCE_TOP |
+				      RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
+				      | RADEON_SCLK_FORCE_IDCT |
+				      RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
+				      | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
+				      | R300_SCLK_FORCE_US |
+				      RADEON_SCLK_FORCE_TV_SCLK |
+				      R300_SCLK_FORCE_SU |
+				      RADEON_SCLK_FORCE_OV0);
+				tmp |= RADEON_DYN_STOP_LAT_MASK;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+				tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+					RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+					R300_DVOCLK_ALWAYS_ONb |
+					RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+					RADEON_PIXCLK_GV_ALWAYS_ONb |
+					R300_PIXCLK_DVO_ALWAYS_ONb |
+					RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+					RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+					R300_PIXCLK_TRANS_ALWAYS_ONb |
+					R300_PIXCLK_TVO_ALWAYS_ONb |
+					R300_P2G2CLK_ALWAYS_ONb |
+					R300_P2G2CLK_DAC_ALWAYS_ONb);
+				WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_MISC);
+				tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
+					RADEON_IO_MCLK_DYN_ENABLE);
+				WREG32_PLL(RADEON_MCLK_MISC, tmp);
+
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp |= (RADEON_FORCEON_MCLKA |
+					RADEON_FORCEON_MCLKB);
+
+				tmp &= ~(RADEON_FORCEON_YCLKA |
+					 RADEON_FORCEON_YCLKB |
+					 RADEON_FORCEON_MC);
+
+				/* Some releases of vbios have set DISABLE_MC_MCLKA
+				   and DISABLE_MC_MCLKB bits in the vbios table.  Setting these
+				   bits will cause H/W hang when reading video memory with dynamic clocking
+				   enabled. */
+				if ((tmp & R300_DISABLE_MC_MCLKA) &&
+				    (tmp & R300_DISABLE_MC_MCLKB)) {
+					/* If both bits are set, then check the active channels */
+					tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+					if (rdev->mc.vram_width == 64) {
+						if (RREG32(RADEON_MEM_CNTL) &
+						    R300_MEM_USE_CD_CH_ONLY)
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKB;
+						else
+							tmp &=
+							    ~R300_DISABLE_MC_MCLKA;
+					} else {
+						tmp &= ~(R300_DISABLE_MC_MCLKA |
+							 R300_DISABLE_MC_MCLKB);
+					}
+				}
+
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+			} else {
+				tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+				tmp &= ~(R300_SCLK_FORCE_VAP);
+				tmp |= RADEON_SCLK_FORCE_CP;
+				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+				mdelay(15);
+
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp &= ~(R300_SCLK_FORCE_TCL |
+					 R300_SCLK_FORCE_GA |
+					 R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+			}
+		} else {
+			tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+
+			tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
+				 RADEON_DISP_DYN_STOP_LAT_MASK |
+				 RADEON_DYN_STOP_MODE_MASK);
+
+			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+			tmp |= RADEON_SCLK_DYN_START_CNTL;
+			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+			mdelay(15);
+
+			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+			   to lockup randomly, leave them as set by BIOS.
+			 */
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			/*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
+			tmp &= ~RADEON_SCLK_FORCEON_MASK;
+
+			/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
+			if (((rdev->family == CHIP_RV250) &&
+			     ((RREG32(RADEON_CONFIG_CNTL) &
+			       RADEON_CFG_ATI_REV_ID_MASK) <
+			      RADEON_CFG_ATI_REV_A13))
+			    || ((rdev->family == CHIP_RV100)
+				&&
+				((RREG32(RADEON_CONFIG_CNTL) &
+				  RADEON_CFG_ATI_REV_ID_MASK) <=
+				 RADEON_CFG_ATI_REV_A13))) {
+				tmp |= RADEON_SCLK_FORCE_CP;
+				tmp |= RADEON_SCLK_FORCE_VIP;
+			}
+
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp &= ~RADEON_SCLK_MORE_FORCEON;
+
+				/* RV200::A11 A12 RV250::A11 A12 */
+				if (((rdev->family == CHIP_RV200) ||
+				     (rdev->family == CHIP_RV250)) &&
+				    ((RREG32(RADEON_CONFIG_CNTL) &
+				      RADEON_CFG_ATI_REV_ID_MASK) <
+				     RADEON_CFG_ATI_REV_A13)) {
+					tmp |= RADEON_SCLK_MORE_FORCEON;
+				}
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(15);
+			}
+
+			/* RV200::A11 A12, RV250::A11 A12 */
+			if (((rdev->family == CHIP_RV200) ||
+			     (rdev->family == CHIP_RV250)) &&
+			    ((RREG32(RADEON_CONFIG_CNTL) &
+			      RADEON_CFG_ATI_REV_ID_MASK) <
+			     RADEON_CFG_ATI_REV_A13)) {
+				tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+				tmp |= RADEON_TCL_BYPASS_DISABLE;
+				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+			}
+			mdelay(15);
+
+			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
+				RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				RADEON_PIXCLK_GV_ALWAYS_ONb |
+				RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(15);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+				RADEON_PIXCLK_DAC_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+			mdelay(15);
+		}
+	} else {
+		/* Turn everything OFF (ForceON to everything) */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
+				RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
+				| RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
+				RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
+				RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
+				RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
+				RADEON_SCLK_FORCE_RB);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+		} else if ((rdev->family == CHIP_RS400) ||
+			   (rdev->family == CHIP_RS480)) {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else if (rdev->family >= CHIP_RV350) {
+			/* for RV350/M10, no delays are required. */
+			tmp = RREG32_PLL(R300_SCLK_CNTL2);
+			tmp |= (R300_SCLK_FORCE_TCL |
+				R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
+			WREG32_PLL(R300_SCLK_CNTL2, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
+				RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
+				| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
+				R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
+				RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
+				R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
+				R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
+				R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+			tmp |= RADEON_SCLK_MORE_FORCEON;
+			WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+			tmp |= (RADEON_FORCEON_MCLKA |
+				RADEON_FORCEON_MCLKB |
+				RADEON_FORCEON_YCLKA |
+				RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
+			WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
+				 R300_DVOCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 R300_PIXCLK_DVO_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
+				 R300_PIXCLK_TRANS_ALWAYS_ONb |
+				 R300_PIXCLK_TVO_ALWAYS_ONb |
+				 R300_P2G2CLK_ALWAYS_ONb |
+				 R300_P2G2CLK_DAC_ALWAYS_ONb |
+				 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+		} else {
+			tmp = RREG32_PLL(RADEON_SCLK_CNTL);
+			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
+			tmp |= RADEON_SCLK_FORCE_SE;
+
+			if (rdev->flags & RADEON_SINGLE_CRTC) {
+				tmp |= (RADEON_SCLK_FORCE_RB |
+					RADEON_SCLK_FORCE_TDM |
+					RADEON_SCLK_FORCE_TAM |
+					RADEON_SCLK_FORCE_PB |
+					RADEON_SCLK_FORCE_RE |
+					RADEON_SCLK_FORCE_VIP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_HDP);
+			} else if ((rdev->family == CHIP_R300) ||
+				   (rdev->family == CHIP_R350)) {
+				tmp |= (RADEON_SCLK_FORCE_HDP |
+					RADEON_SCLK_FORCE_DISP1 |
+					RADEON_SCLK_FORCE_DISP2 |
+					RADEON_SCLK_FORCE_TOP |
+					RADEON_SCLK_FORCE_IDCT |
+					RADEON_SCLK_FORCE_VIP);
+			}
+			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+
+			mdelay(16);
+
+			if ((rdev->family == CHIP_R300) ||
+			    (rdev->family == CHIP_R350)) {
+				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+				tmp |= (R300_SCLK_FORCE_TCL |
+					R300_SCLK_FORCE_GA |
+					R300_SCLK_FORCE_CBA);
+				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+				mdelay(16);
+			}
+
+			if (rdev->flags & RADEON_IS_IGP) {
+				tmp = RREG32_PLL(RADEON_MCLK_CNTL);
+				tmp &= ~(RADEON_FORCEON_MCLKA |
+					 RADEON_FORCEON_YCLKA);
+				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+				mdelay(16);
+			}
+
+			if ((rdev->family == CHIP_RV200) ||
+			    (rdev->family == CHIP_RV250) ||
+			    (rdev->family == CHIP_RV280)) {
+				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+				tmp |= RADEON_SCLK_MORE_FORCEON;
+				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+				mdelay(16);
+			}
+
+			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
+				 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
+				 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
+				 RADEON_PIXCLK_GV_ALWAYS_ONb |
+				 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
+				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+			mdelay(16);
+
+			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+				 RADEON_PIXCLK_DAC_ALWAYS_ONb);
+			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+		}
+	}
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_combios.c b/linux-imx/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 0000000..68ce360
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,3619 @@
+/*
+ * Copyright 2004 ATI Technologies Inc., Markham, Ontario
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+#ifdef CONFIG_PPC_PMAC
+/* not sure which of these are needed */
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#endif /* CONFIG_PPC_PMAC */
+
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+			uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd);
+
+/* from radeon_legacy_encoder.c */
+extern void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+			  uint32_t supported_device);
+
+/* old legacy ATI BIOS routines */
+
+/* COMBIOS table offsets */
+enum radeon_combios_table_offset {
+	/* absolute offset tables */
+	COMBIOS_ASIC_INIT_1_TABLE,
+	COMBIOS_BIOS_SUPPORT_TABLE,
+	COMBIOS_DAC_PROGRAMMING_TABLE,
+	COMBIOS_MAX_COLOR_DEPTH_TABLE,
+	COMBIOS_CRTC_INFO_TABLE,
+	COMBIOS_PLL_INFO_TABLE,
+	COMBIOS_TV_INFO_TABLE,
+	COMBIOS_DFP_INFO_TABLE,
+	COMBIOS_HW_CONFIG_INFO_TABLE,
+	COMBIOS_MULTIMEDIA_INFO_TABLE,
+	COMBIOS_TV_STD_PATCH_TABLE,
+	COMBIOS_LCD_INFO_TABLE,
+	COMBIOS_MOBILE_INFO_TABLE,
+	COMBIOS_PLL_INIT_TABLE,
+	COMBIOS_MEM_CONFIG_TABLE,
+	COMBIOS_SAVE_MASK_TABLE,
+	COMBIOS_HARDCODED_EDID_TABLE,
+	COMBIOS_ASIC_INIT_2_TABLE,
+	COMBIOS_CONNECTOR_INFO_TABLE,
+	COMBIOS_DYN_CLK_1_TABLE,
+	COMBIOS_RESERVED_MEM_TABLE,
+	COMBIOS_EXT_TMDS_INFO_TABLE,
+	COMBIOS_MEM_CLK_INFO_TABLE,
+	COMBIOS_EXT_DAC_INFO_TABLE,
+	COMBIOS_MISC_INFO_TABLE,
+	COMBIOS_CRT_INFO_TABLE,
+	COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
+	COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
+	COMBIOS_FAN_SPEED_INFO_TABLE,
+	COMBIOS_OVERDRIVE_INFO_TABLE,
+	COMBIOS_OEM_INFO_TABLE,
+	COMBIOS_DYN_CLK_2_TABLE,
+	COMBIOS_POWER_CONNECTOR_INFO_TABLE,
+	COMBIOS_I2C_INFO_TABLE,
+	/* relative offset tables */
+	COMBIOS_ASIC_INIT_3_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_4_TABLE,	/* offset from misc info */
+	COMBIOS_DETECTED_MEM_TABLE,	/* offset from misc info */
+	COMBIOS_ASIC_INIT_5_TABLE,	/* offset from misc info */
+	COMBIOS_RAM_RESET_TABLE,	/* offset from mem config */
+	COMBIOS_POWERPLAY_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_GPIO_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_LCD_DDC_INFO_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_TABLE,	/* offset from mobile info */
+	COMBIOS_TMDS_POWER_ON_TABLE,	/* offset from tmds power */
+	COMBIOS_TMDS_POWER_OFF_TABLE,	/* offset from tmds power */
+};
+
+enum radeon_combios_ddc {
+	DDC_NONE_DETECTED,
+	DDC_MONID,
+	DDC_DVI,
+	DDC_VGA,
+	DDC_CRT2,
+	DDC_LCD,
+	DDC_GPIO,
+};
+
+enum radeon_combios_connector {
+	CONNECTOR_NONE_LEGACY,
+	CONNECTOR_PROPRIETARY_LEGACY,
+	CONNECTOR_CRT_LEGACY,
+	CONNECTOR_DVI_I_LEGACY,
+	CONNECTOR_DVI_D_LEGACY,
+	CONNECTOR_CTV_LEGACY,
+	CONNECTOR_STV_LEGACY,
+	CONNECTOR_UNSUPPORTED_LEGACY
+};
+
+const int legacy_connector_convert[] = {
+	DRM_MODE_CONNECTOR_Unknown,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_VGA,
+	DRM_MODE_CONNECTOR_DVII,
+	DRM_MODE_CONNECTOR_DVID,
+	DRM_MODE_CONNECTOR_Composite,
+	DRM_MODE_CONNECTOR_SVIDEO,
+	DRM_MODE_CONNECTOR_Unknown,
+};
+
+static uint16_t combios_get_table_offset(struct drm_device *dev,
+					 enum radeon_combios_table_offset table)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int rev, size;
+	uint16_t offset = 0, check_offset;
+
+	if (!rdev->bios)
+		return 0;
+
+	switch (table) {
+		/* absolute offset tables */
+	case COMBIOS_ASIC_INIT_1_TABLE:
+		check_offset = 0xc;
+		break;
+	case COMBIOS_BIOS_SUPPORT_TABLE:
+		check_offset = 0x14;
+		break;
+	case COMBIOS_DAC_PROGRAMMING_TABLE:
+		check_offset = 0x2a;
+		break;
+	case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+		check_offset = 0x2c;
+		break;
+	case COMBIOS_CRTC_INFO_TABLE:
+		check_offset = 0x2e;
+		break;
+	case COMBIOS_PLL_INFO_TABLE:
+		check_offset = 0x30;
+		break;
+	case COMBIOS_TV_INFO_TABLE:
+		check_offset = 0x32;
+		break;
+	case COMBIOS_DFP_INFO_TABLE:
+		check_offset = 0x34;
+		break;
+	case COMBIOS_HW_CONFIG_INFO_TABLE:
+		check_offset = 0x36;
+		break;
+	case COMBIOS_MULTIMEDIA_INFO_TABLE:
+		check_offset = 0x38;
+		break;
+	case COMBIOS_TV_STD_PATCH_TABLE:
+		check_offset = 0x3e;
+		break;
+	case COMBIOS_LCD_INFO_TABLE:
+		check_offset = 0x40;
+		break;
+	case COMBIOS_MOBILE_INFO_TABLE:
+		check_offset = 0x42;
+		break;
+	case COMBIOS_PLL_INIT_TABLE:
+		check_offset = 0x46;
+		break;
+	case COMBIOS_MEM_CONFIG_TABLE:
+		check_offset = 0x48;
+		break;
+	case COMBIOS_SAVE_MASK_TABLE:
+		check_offset = 0x4a;
+		break;
+	case COMBIOS_HARDCODED_EDID_TABLE:
+		check_offset = 0x4c;
+		break;
+	case COMBIOS_ASIC_INIT_2_TABLE:
+		check_offset = 0x4e;
+		break;
+	case COMBIOS_CONNECTOR_INFO_TABLE:
+		check_offset = 0x50;
+		break;
+	case COMBIOS_DYN_CLK_1_TABLE:
+		check_offset = 0x52;
+		break;
+	case COMBIOS_RESERVED_MEM_TABLE:
+		check_offset = 0x54;
+		break;
+	case COMBIOS_EXT_TMDS_INFO_TABLE:
+		check_offset = 0x58;
+		break;
+	case COMBIOS_MEM_CLK_INFO_TABLE:
+		check_offset = 0x5a;
+		break;
+	case COMBIOS_EXT_DAC_INFO_TABLE:
+		check_offset = 0x5c;
+		break;
+	case COMBIOS_MISC_INFO_TABLE:
+		check_offset = 0x5e;
+		break;
+	case COMBIOS_CRT_INFO_TABLE:
+		check_offset = 0x60;
+		break;
+	case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+		check_offset = 0x62;
+		break;
+	case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+		check_offset = 0x64;
+		break;
+	case COMBIOS_FAN_SPEED_INFO_TABLE:
+		check_offset = 0x66;
+		break;
+	case COMBIOS_OVERDRIVE_INFO_TABLE:
+		check_offset = 0x68;
+		break;
+	case COMBIOS_OEM_INFO_TABLE:
+		check_offset = 0x6a;
+		break;
+	case COMBIOS_DYN_CLK_2_TABLE:
+		check_offset = 0x6c;
+		break;
+	case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+		check_offset = 0x6e;
+		break;
+	case COMBIOS_I2C_INFO_TABLE:
+		check_offset = 0x70;
+		break;
+		/* relative offset tables */
+	case COMBIOS_ASIC_INIT_3_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x3);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_4_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x5);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_DETECTED_MEM_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev > 0) {
+				check_offset = RBIOS16(check_offset + 0x7);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_ASIC_INIT_5_TABLE:	/* offset from misc info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
+		if (check_offset) {
+			rev = RBIOS8(check_offset);
+			if (rev == 2) {
+				check_offset = RBIOS16(check_offset + 0x9);
+				if (check_offset)
+					offset = check_offset;
+			}
+		}
+		break;
+	case COMBIOS_RAM_RESET_TABLE:	/* offset from mem config */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (check_offset) {
+			while (RBIOS8(check_offset++));
+			check_offset += 2;
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_POWERPLAY_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x11);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_GPIO_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x13);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_LCD_DDC_INFO_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x15);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_TABLE:	/* offset from mobile info */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x17);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_ON_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x2);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	case COMBIOS_TMDS_POWER_OFF_TABLE:	/* offset from tmds power */
+		check_offset =
+		    combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
+		if (check_offset) {
+			check_offset = RBIOS16(check_offset + 0x4);
+			if (check_offset)
+				offset = check_offset;
+		}
+		break;
+	default:
+		check_offset = 0;
+		break;
+	}
+
+	size = RBIOS8(rdev->bios_header_start + 0x6);
+	/* check absolute offset tables */
+	if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+		offset = RBIOS16(rdev->bios_header_start + check_offset);
+
+	return offset;
+}
+
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+	int edid_info, size;
+	struct edid *edid;
+	unsigned char *raw;
+	edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+	if (!edid_info)
+		return false;
+
+	raw = rdev->bios + edid_info;
+	size = EDID_LENGTH * (raw[0x7e] + 1);
+	edid = kmalloc(size, GFP_KERNEL);
+	if (edid == NULL)
+		return false;
+
+	memcpy((unsigned char *)edid, raw, size);
+
+	if (!drm_edid_is_valid(edid)) {
+		kfree(edid);
+		return false;
+	}
+
+	rdev->mode_info.bios_hardcoded_edid = edid;
+	rdev->mode_info.bios_hardcoded_edid_size = size;
+	return true;
+}
+
+/* this is used for atom LCDs as well */
+struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+	struct edid *edid;
+
+	if (rdev->mode_info.bios_hardcoded_edid) {
+		edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
+		if (edid) {
+			memcpy((unsigned char *)edid,
+			       (unsigned char *)rdev->mode_info.bios_hardcoded_edid,
+			       rdev->mode_info.bios_hardcoded_edid_size);
+			return edid;
+		}
+	}
+	return NULL;
+}
+
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+						       enum radeon_combios_ddc ddc,
+						       u32 clk_mask,
+						       u32 data_mask)
+{
+	struct radeon_i2c_bus_rec i2c;
+	int ddc_line = 0;
+
+	/* ddc id            = mask reg
+	 * DDC_NONE_DETECTED = none
+	 * DDC_DVI           = RADEON_GPIO_DVI_DDC
+	 * DDC_VGA           = RADEON_GPIO_VGA_DDC
+	 * DDC_LCD           = RADEON_GPIOPAD_MASK
+	 * DDC_GPIO          = RADEON_MDGPIO_MASK
+	 * r1xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_CRT2_DDC
+	 * r200
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * r300/r350
+	 * DDC_MONID         = RADEON_GPIO_DVI_DDC
+	 * DDC_CRT2          = RADEON_GPIO_DVI_DDC
+	 * rv2xx/rv3xx
+	 * DDC_MONID         = RADEON_GPIO_MONID
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 * rs3xx/rs4xx
+	 * DDC_MONID         = RADEON_GPIOPAD_MASK
+	 * DDC_CRT2          = RADEON_GPIO_MONID
+	 */
+	switch (ddc) {
+	case DDC_NONE_DETECTED:
+	default:
+		ddc_line = 0;
+		break;
+	case DDC_DVI:
+		ddc_line = RADEON_GPIO_DVI_DDC;
+		break;
+	case DDC_VGA:
+		ddc_line = RADEON_GPIO_VGA_DDC;
+		break;
+	case DDC_LCD:
+		ddc_line = RADEON_GPIOPAD_MASK;
+		break;
+	case DDC_GPIO:
+		ddc_line = RADEON_MDGPIO_MASK;
+		break;
+	case DDC_MONID:
+		if (rdev->family == CHIP_RS300 ||
+		    rdev->family == CHIP_RS400 ||
+		    rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIOPAD_MASK;
+		else if (rdev->family == CHIP_R300 ||
+			 rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else
+			ddc_line = RADEON_GPIO_MONID;
+		break;
+	case DDC_CRT2:
+		if (rdev->family == CHIP_R200 ||
+		    rdev->family == CHIP_R300 ||
+		    rdev->family == CHIP_R350) {
+			ddc_line = RADEON_GPIO_DVI_DDC;
+			ddc = DDC_DVI;
+		} else if (rdev->family == CHIP_RS300 ||
+			   rdev->family == CHIP_RS400 ||
+			   rdev->family == CHIP_RS480)
+			ddc_line = RADEON_GPIO_MONID;
+		else if (rdev->family >= CHIP_RV350) {
+			ddc_line = RADEON_GPIO_MONID;
+			ddc = DDC_MONID;
+		} else
+			ddc_line = RADEON_GPIO_CRT2_DDC;
+		break;
+	}
+
+	if (ddc_line == RADEON_GPIOPAD_MASK) {
+		i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+		i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+		i2c.a_clk_reg = RADEON_GPIOPAD_A;
+		i2c.a_data_reg = RADEON_GPIOPAD_A;
+		i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+		i2c.en_data_reg = RADEON_GPIOPAD_EN;
+		i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+		i2c.y_data_reg = RADEON_GPIOPAD_Y;
+	} else if (ddc_line == RADEON_MDGPIO_MASK) {
+		i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+		i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+		i2c.a_clk_reg = RADEON_MDGPIO_A;
+		i2c.a_data_reg = RADEON_MDGPIO_A;
+		i2c.en_clk_reg = RADEON_MDGPIO_EN;
+		i2c.en_data_reg = RADEON_MDGPIO_EN;
+		i2c.y_clk_reg = RADEON_MDGPIO_Y;
+		i2c.y_data_reg = RADEON_MDGPIO_Y;
+	} else {
+		i2c.mask_clk_reg = ddc_line;
+		i2c.mask_data_reg = ddc_line;
+		i2c.a_clk_reg = ddc_line;
+		i2c.a_data_reg = ddc_line;
+		i2c.en_clk_reg = ddc_line;
+		i2c.en_data_reg = ddc_line;
+		i2c.y_clk_reg = ddc_line;
+		i2c.y_data_reg = ddc_line;
+	}
+
+	if (clk_mask && data_mask) {
+		/* system specific masks */
+		i2c.mask_clk_mask = clk_mask;
+		i2c.mask_data_mask = data_mask;
+		i2c.a_clk_mask = clk_mask;
+		i2c.a_data_mask = data_mask;
+		i2c.en_clk_mask = clk_mask;
+		i2c.en_data_mask = data_mask;
+		i2c.y_clk_mask = clk_mask;
+		i2c.y_data_mask = data_mask;
+	} else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+		   (ddc_line == RADEON_MDGPIO_MASK)) {
+		/* default gpiopad masks */
+		i2c.mask_clk_mask = (0x20 << 8);
+		i2c.mask_data_mask = 0x80;
+		i2c.a_clk_mask = (0x20 << 8);
+		i2c.a_data_mask = 0x80;
+		i2c.en_clk_mask = (0x20 << 8);
+		i2c.en_data_mask = 0x80;
+		i2c.y_clk_mask = (0x20 << 8);
+		i2c.y_data_mask = 0x80;
+	} else {
+		/* default masks for ddc pads */
+		i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
+		i2c.mask_data_mask = RADEON_GPIO_MASK_0;
+		i2c.a_clk_mask = RADEON_GPIO_A_1;
+		i2c.a_data_mask = RADEON_GPIO_A_0;
+		i2c.en_clk_mask = RADEON_GPIO_EN_1;
+		i2c.en_data_mask = RADEON_GPIO_EN_0;
+		i2c.y_clk_mask = RADEON_GPIO_Y_1;
+		i2c.y_data_mask = RADEON_GPIO_Y_0;
+	}
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_RS300:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R200:
+		switch (ddc_line) {
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_MONID:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV250:
+	case CHIP_RV280:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+		case RADEON_GPIO_CRT2_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_R300:
+	case CHIP_R350:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		switch (ddc_line) {
+		case RADEON_GPIO_VGA_DDC:
+		case RADEON_GPIO_DVI_DDC:
+			i2c.hw_capable = true;
+			break;
+		case RADEON_GPIO_MONID:
+			/* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+			 * reliably on some pre-r4xx hardware; not sure why.
+			 */
+			i2c.hw_capable = false;
+			break;
+		default:
+			i2c.hw_capable = false;
+			break;
+		}
+		break;
+	default:
+		i2c.hw_capable = false;
+		break;
+	}
+	i2c.mm_i2c = false;
+
+	i2c.i2c_id = ddc;
+	i2c.hpd = RADEON_HPD_NONE;
+
+	if (ddc_line)
+		i2c.valid = true;
+	else
+		i2c.valid = false;
+
+	return i2c;
+}
+
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+	u16 offset;
+	u8 id, blocks, clk, data;
+	int i;
+
+	i2c.valid = false;
+
+	offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+	if (offset) {
+		blocks = RBIOS8(offset + 2);
+		for (i = 0; i < blocks; i++) {
+			id = RBIOS8(offset + 3 + (i * 5) + 0);
+			if (id == 136) {
+				clk = RBIOS8(offset + 3 + (i * 5) + 3);
+				data = RBIOS8(offset + 3 + (i * 5) + 4);
+				/* gpiopad */
+				i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+							    (1 << clk), (1 << data));
+				break;
+			}
+		}
+	}
+	return i2c;
+}
+
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct radeon_i2c_bus_rec i2c;
+
+	/* actual hw pads
+	 * r1xx/rs2xx/rs3xx
+	 * 0x60, 0x64, 0x68, 0x6c, gpiopads, mm
+	 * r200
+	 * 0x60, 0x64, 0x68, mm
+	 * r300/r350
+	 * 0x60, 0x64, mm
+	 * rv2xx/rv3xx/rs4xx
+	 * 0x60, 0x64, 0x68, gpiopads, mm
+	 */
+
+	/* 0x60 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+	rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+	/* 0x64 */
+	i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+	rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+	/* mm i2c */
+	i2c.valid = true;
+	i2c.hw_capable = true;
+	i2c.mm_i2c = true;
+	i2c.i2c_id = 0xa0;
+	rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350) {
+		/* only 2 sw i2c pads */
+	} else if (rdev->family == CHIP_RS300 ||
+		   rdev->family == CHIP_RS400 ||
+		   rdev->family == CHIP_RS480) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+		/* gpiopad */
+		i2c = radeon_combios_get_i2c_info_from_table(rdev);
+		if (i2c.valid)
+			rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+	} else if ((rdev->family == CHIP_R200) ||
+		   (rdev->family >= CHIP_R300)) {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+	} else {
+		/* 0x68 */
+		i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+		/* 0x6c */
+		i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+	}
+}
+
+bool radeon_combios_get_clock_info(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t pll_info;
+	struct radeon_pll *p1pll = &rdev->clock.p1pll;
+	struct radeon_pll *p2pll = &rdev->clock.p2pll;
+	struct radeon_pll *spll = &rdev->clock.spll;
+	struct radeon_pll *mpll = &rdev->clock.mpll;
+	int8_t rev;
+	uint16_t sclk, mclk;
+
+	pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+	if (pll_info) {
+		rev = RBIOS8(pll_info);
+
+		/* pixel clocks */
+		p1pll->reference_freq = RBIOS16(pll_info + 0xe);
+		p1pll->reference_div = RBIOS16(pll_info + 0x10);
+		p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
+		p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+		p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+		p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+
+		if (rev > 9) {
+			p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
+			p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
+		} else {
+			p1pll->pll_in_min = 40;
+			p1pll->pll_in_max = 500;
+		}
+		*p2pll = *p1pll;
+
+		/* system clock */
+		spll->reference_freq = RBIOS16(pll_info + 0x1a);
+		spll->reference_div = RBIOS16(pll_info + 0x1c);
+		spll->pll_out_min = RBIOS32(pll_info + 0x1e);
+		spll->pll_out_max = RBIOS32(pll_info + 0x22);
+
+		if (rev > 10) {
+			spll->pll_in_min = RBIOS32(pll_info + 0x48);
+			spll->pll_in_max = RBIOS32(pll_info + 0x4c);
+		} else {
+			/* ??? */
+			spll->pll_in_min = 40;
+			spll->pll_in_max = 500;
+		}
+
+		/* memory clock */
+		mpll->reference_freq = RBIOS16(pll_info + 0x26);
+		mpll->reference_div = RBIOS16(pll_info + 0x28);
+		mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
+		mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
+
+		if (rev > 10) {
+			mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
+			mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
+		} else {
+			/* ??? */
+			mpll->pll_in_min = 40;
+			mpll->pll_in_max = 500;
+		}
+
+		/* default sclk/mclk */
+		sclk = RBIOS16(pll_info + 0xa);
+		mclk = RBIOS16(pll_info + 0x8);
+		if (sclk == 0)
+			sclk = 200 * 100;
+		if (mclk == 0)
+			mclk = 200 * 100;
+
+		rdev->clock.default_sclk = sclk;
+		rdev->clock.default_mclk = mclk;
+
+		if (RBIOS32(pll_info + 0x16))
+			rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
+		else
+			rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
+
+		return true;
+	}
+	return false;
+}
+
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 igp_info;
+
+	/* sideport is AMD only */
+	if (rdev->family == CHIP_RS400)
+		return false;
+
+	igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+	if (igp_info) {
+		if (RBIOS16(igp_info + 0x4))
+			return true;
+	}
+	return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+	0x00000808,		/* r100  */
+	0x00000808,		/* rv100 */
+	0x00000808,		/* rs100 */
+	0x00000808,		/* rv200 */
+	0x00000808,		/* rs200 */
+	0x00000808,		/* r200  */
+	0x00000808,		/* rv250 */
+	0x00000000,		/* rs300 */
+	0x00000808,		/* rv280 */
+	0x00000808,		/* r300  */
+	0x00000808,		/* r350  */
+	0x00000808,		/* rv350 */
+	0x00000808,		/* rv380 */
+	0x00000808,		/* r420  */
+	0x00000808,		/* r423  */
+	0x00000808,		/* rv410 */
+	0x00000000,		/* rs400 */
+	0x00000000,		/* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+							  struct radeon_encoder_primary_dac *p_dac)
+{
+	p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+	return;
+}
+
+struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+								       radeon_encoder
+								       *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_primary_dac *p_dac = NULL;
+	int found = 0;
+
+	p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+			GFP_KERNEL);
+
+	if (!p_dac)
+		return NULL;
+
+	/* check CRT table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info) & 0x3;
+		if (rev < 2) {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		} else {
+			bg = RBIOS8(dac_info + 0x2) & 0xf;
+			dac = RBIOS8(dac_info + 0x3) & 0xf;
+			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+		}
+		/* if the values are zeros, use the table */
+		if ((dac == 0) || (bg == 0))
+			found = 0;
+		else
+			found = 1;
+	}
+
+	/* quirks */
+	/* Radeon 7000 (RV100) */
+	if (((dev->pdev->device == 0x5159) &&
+	    (dev->pdev->subsystem_vendor == 0x174B) &&
+	    (dev->pdev->subsystem_device == 0x7c28)) ||
+	/* Radeon 9100 (R200) */
+	   ((dev->pdev->device == 0x514D) &&
+	    (dev->pdev->subsystem_vendor == 0x174B) &&
+	    (dev->pdev->subsystem_device == 0x7149))) {
+		/* vbios value is bad, use the default */
+		found = 0;
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+	return p_dac;
+}
+
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	uint16_t tv_info;
+	enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+	tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (tv_info) {
+		if (RBIOS8(tv_info + 6) == 'T') {
+			switch (RBIOS8(tv_info + 7) & 0xf) {
+			case 1:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS("Default TV standard: NTSC\n");
+				break;
+			case 2:
+				tv_std = TV_STD_PAL;
+				DRM_DEBUG_KMS("Default TV standard: PAL\n");
+				break;
+			case 3:
+				tv_std = TV_STD_PAL_M;
+				DRM_DEBUG_KMS("Default TV standard: PAL-M\n");
+				break;
+			case 4:
+				tv_std = TV_STD_PAL_60;
+				DRM_DEBUG_KMS("Default TV standard: PAL-60\n");
+				break;
+			case 5:
+				tv_std = TV_STD_NTSC_J;
+				DRM_DEBUG_KMS("Default TV standard: NTSC-J\n");
+				break;
+			case 6:
+				tv_std = TV_STD_SCART_PAL;
+				DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n");
+				break;
+			default:
+				tv_std = TV_STD_NTSC;
+				DRM_DEBUG_KMS
+				    ("Unknown TV standard; defaulting to NTSC\n");
+				break;
+			}
+
+			switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
+			case 0:
+				DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n");
+				break;
+			case 1:
+				DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n");
+				break;
+			case 2:
+				DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n");
+				break;
+			case 3:
+				DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n");
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return tv_std;
+}
+
+static const uint32_t default_tvdac_adj[CHIP_LAST] = {
+	0x00000000,		/* r100  */
+	0x00280000,		/* rv100 */
+	0x00000000,		/* rs100 */
+	0x00880000,		/* rv200 */
+	0x00000000,		/* rs200 */
+	0x00000000,		/* r200  */
+	0x00770000,		/* rv250 */
+	0x00290000,		/* rs300 */
+	0x00560000,		/* rv280 */
+	0x00780000,		/* r300  */
+	0x00770000,		/* r350  */
+	0x00780000,		/* rv350 */
+	0x00780000,		/* rv380 */
+	0x01080000,		/* r420  */
+	0x01080000,		/* r423  */
+	0x01080000,		/* rv410 */
+	0x00780000,		/* rs400 */
+	0x00780000,		/* rs480 */
+};
+
+static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
+						     struct radeon_encoder_tv_dac *tv_dac)
+{
+	tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
+	if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
+		tv_dac->ps2_tvdac_adj = 0x00880000;
+	tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+	return;
+}
+
+struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+							     radeon_encoder
+							     *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t dac_info;
+	uint8_t rev, bg, dac;
+	struct radeon_encoder_tv_dac *tv_dac = NULL;
+	int found = 0;
+
+	tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
+	if (!tv_dac)
+		return NULL;
+
+	/* first check TV table */
+	dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+	if (dac_info) {
+		rev = RBIOS8(dac_info + 0x3);
+		if (rev > 4) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = RBIOS8(dac_info + 0xd) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = RBIOS8(dac_info + 0xf) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0x10) & 0xf;
+			dac = RBIOS8(dac_info + 0x11) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		} else if (rev > 1) {
+			bg = RBIOS8(dac_info + 0xc) & 0xf;
+			dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
+			tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xd) & 0xf;
+			dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
+			tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
+
+			bg = RBIOS8(dac_info + 0xe) & 0xf;
+			dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
+			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+			/* if the values are all zeros, use the table */
+			if (tv_dac->ps2_tvdac_adj)
+				found = 1;
+		}
+		tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+	}
+	if (!found) {
+		/* then check CRT table */
+		dac_info =
+		    combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+		if (dac_info) {
+			rev = RBIOS8(dac_info) & 0x3;
+			if (rev < 2) {
+				bg = RBIOS8(dac_info + 0x3) & 0xf;
+				dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			} else {
+				bg = RBIOS8(dac_info + 0x4) & 0xf;
+				dac = RBIOS8(dac_info + 0x5) & 0xf;
+				tv_dac->ps2_tvdac_adj =
+				    (bg << 16) | (dac << 20);
+				tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
+				/* if the values are all zeros, use the table */
+				if (tv_dac->ps2_tvdac_adj)
+					found = 1;
+			}
+		} else {
+			DRM_INFO("No TV DAC info found in BIOS\n");
+		}
+	}
+
+	if (!found) /* fallback to defaults */
+		radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
+
+	return tv_dac;
+}
+
+static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
+									 radeon_device
+									 *rdev)
+{
+	struct radeon_encoder_lvds *lvds = NULL;
+	uint32_t fp_vert_stretch, fp_horz_stretch;
+	uint32_t ppll_div_sel, ppll_val;
+	uint32_t lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+
+	lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+	if (!lvds)
+		return NULL;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
+
+	/* These should be fail-safe defaults, fingers crossed */
+	lvds->panel_pwr_delay = 200;
+	lvds->panel_vcc_delay = 2000;
+
+	lvds->lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	lvds->panel_digon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) & 0xf;
+	lvds->panel_blon_delay = (lvds_ss_gen_cntl >> RADEON_LVDS_PWRSEQ_DELAY2_SHIFT) & 0xf;
+
+	if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
+		lvds->native_mode.vdisplay =
+		    ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
+		     RADEON_VERT_PANEL_SHIFT) + 1;
+	else
+		lvds->native_mode.vdisplay =
+		    (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
+
+	if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
+		lvds->native_mode.hdisplay =
+		    (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
+		      RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
+	else
+		lvds->native_mode.hdisplay =
+		    ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
+
+	if ((lvds->native_mode.hdisplay < 640) ||
+	    (lvds->native_mode.vdisplay < 480)) {
+		lvds->native_mode.hdisplay = 640;
+		lvds->native_mode.vdisplay = 480;
+	}
+
+	ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
+	ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
+	if ((ppll_val & 0x000707ff) == 0x1bb)
+		lvds->use_bios_dividers = false;
+	else {
+		lvds->panel_ref_divider =
+		    RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+		lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
+		lvds->panel_fb_divider = ppll_val & 0x7ff;
+
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+	}
+	lvds->panel_vcc_delay = 200;
+
+	DRM_INFO("Panel info derived from registers\n");
+	DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+		 lvds->native_mode.vdisplay);
+
+	return lvds;
+}
+
+struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+							 *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t lcd_info;
+	uint32_t panel_setup;
+	char stmp[30];
+	int tmp, i;
+	struct radeon_encoder_lvds *lvds = NULL;
+
+	lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+
+	if (lcd_info) {
+		lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
+
+		if (!lvds)
+			return NULL;
+
+		for (i = 0; i < 24; i++)
+			stmp[i] = RBIOS8(lcd_info + i + 1);
+		stmp[24] = 0;
+
+		DRM_INFO("Panel ID String: %s\n", stmp);
+
+		lvds->native_mode.hdisplay = RBIOS16(lcd_info + 0x19);
+		lvds->native_mode.vdisplay = RBIOS16(lcd_info + 0x1b);
+
+		DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.hdisplay,
+			 lvds->native_mode.vdisplay);
+
+		lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+		lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
+
+		lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+		lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+		lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
+
+		lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
+		lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
+		lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
+		if ((lvds->panel_ref_divider != 0) &&
+		    (lvds->panel_fb_divider > 3))
+			lvds->use_bios_dividers = true;
+
+		panel_setup = RBIOS32(lcd_info + 0x39);
+		lvds->lvds_gen_cntl = 0xff00;
+		if (panel_setup & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
+
+		if ((panel_setup >> 4) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
+
+		switch ((panel_setup >> 8) & 0x7) {
+		case 0:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
+			break;
+		case 1:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
+			break;
+		case 2:
+			lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
+			break;
+		default:
+			break;
+		}
+
+		if ((panel_setup >> 16) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
+
+		if ((panel_setup >> 17) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
+
+		if ((panel_setup >> 18) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
+
+		if ((panel_setup >> 23) & 0x1)
+			lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
+
+		lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
+
+		for (i = 0; i < 32; i++) {
+			tmp = RBIOS16(lcd_info + 64 + i * 2);
+			if (tmp == 0)
+				break;
+
+			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+					(RBIOS8(tmp + 23) * 8);
+
+				lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
+					(RBIOS16(tmp + 24) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
+					((RBIOS16(tmp + 28) & 0x7ff) - RBIOS16(tmp + 26));
+				lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
+					((RBIOS16(tmp + 28) & 0xf800) >> 11);
+
+				lvds->native_mode.clock = RBIOS16(tmp + 9) * 10;
+				lvds->native_mode.flags = 0;
+				/* set crtc values */
+				drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+
+			}
+		}
+	} else {
+		DRM_INFO("No panel info found in BIOS\n");
+		lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
+	}
+
+	if (lvds)
+		encoder->native_mode = lvds->native_mode;
+	return lvds;
+}
+
+static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R100  */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV100 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS100 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RV200 */
+	{{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_RS200 */
+	{{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}},	/* CHIP_R200  */
+	{{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}},	/* CHIP_RV250 */
+	{{0, 0}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RS300 */
+	{{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}},	/* CHIP_RV280 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R300  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R350  */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV350 */
+	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RV380 */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R420  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R423  */
+	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RV410 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS400 */
+	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS480 */
+};
+
+bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+					    struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		tmds->tmds_pll[i].value =
+			default_tmds_pll[rdev->family][i].value;
+		tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+					      struct radeon_encoder_int_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t tmds_info;
+	int i, n;
+	uint8_t ver;
+
+	tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+
+	if (tmds_info) {
+		ver = RBIOS8(tmds_info);
+		DRM_DEBUG_KMS("DFP table revision: %d\n", ver);
+		if (ver == 3) {
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + i * 10 + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + i * 10 + 0x10);
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		} else if (ver == 4) {
+			int stride = 0;
+			n = RBIOS8(tmds_info + 5) + 1;
+			if (n > 4)
+				n = 4;
+			for (i = 0; i < n; i++) {
+				tmds->tmds_pll[i].value =
+				    RBIOS32(tmds_info + stride + 0x08);
+				tmds->tmds_pll[i].freq =
+				    RBIOS16(tmds_info + stride + 0x10);
+				if (i == 0)
+					stride += 10;
+				else
+					stride += 6;
+				DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
+					  tmds->tmds_pll[i].freq,
+					  tmds->tmds_pll[i].value);
+			}
+		}
+	} else {
+		DRM_INFO("No TMDS info found in BIOS\n");
+		return false;
+	}
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* default for macs */
+	i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+	tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+
+	/* XXX some macs have duallink chips */
+	switch (rdev->mode_info.connector_table) {
+	case CT_POWERBOOK_EXTERNAL:
+	case CT_MINI_EXTERNAL:
+	default:
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+		break;
+	}
+
+	return true;
+}
+
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+						  struct radeon_encoder_ext_tmds *tmds)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t offset;
+	uint8_t ver;
+	enum radeon_combios_ddc gpio;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	tmds->i2c_bus = NULL;
+	if (rdev->flags & RADEON_IS_IGP) {
+		i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		tmds->dvo_chip = DVO_SIL164;
+		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			ver = RBIOS8(offset);
+			DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver);
+			tmds->slave_addr = RBIOS8(offset + 4 + 2);
+			tmds->slave_addr >>= 1; /* 7 bit addressing */
+			gpio = RBIOS8(offset + 4 + 3);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		}
+	}
+
+	if (!tmds->i2c_bus) {
+		DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+		return false;
+	}
+
+	return true;
+}
+
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	rdev->mode_info.connector_table = radeon_connector_table;
+	if (rdev->mode_info.connector_table == CT_NONE) {
+#ifdef CONFIG_PPC_PMAC
+		if (of_machine_is_compatible("PowerBook3,3")) {
+			/* powerbook with VGA */
+			rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
+		} else if (of_machine_is_compatible("PowerBook3,4") ||
+			   of_machine_is_compatible("PowerBook3,5")) {
+			/* powerbook with internal tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,1") ||
+			   of_machine_is_compatible("PowerBook5,2") ||
+			   of_machine_is_compatible("PowerBook5,3") ||
+			   of_machine_is_compatible("PowerBook5,4") ||
+			   of_machine_is_compatible("PowerBook5,5")) {
+			/* powerbook with external single link tmds (sil164) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,6")) {
+			/* powerbook with external dual or single link tmds */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook5,7") ||
+			   of_machine_is_compatible("PowerBook5,8") ||
+			   of_machine_is_compatible("PowerBook5,9")) {
+			/* PowerBook6,2 ? */
+			/* powerbook with external dual link tmds (sil1178?) */
+			rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerBook4,1") ||
+			   of_machine_is_compatible("PowerBook4,2") ||
+			   of_machine_is_compatible("PowerBook4,3") ||
+			   of_machine_is_compatible("PowerBook6,3") ||
+			   of_machine_is_compatible("PowerBook6,5") ||
+			   of_machine_is_compatible("PowerBook6,7")) {
+			/* ibook */
+			rdev->mode_info.connector_table = CT_IBOOK;
+		} else if (of_machine_is_compatible("PowerMac3,5")) {
+			/* PowerMac G4 Silver radeon 7500 */
+			rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+		} else if (of_machine_is_compatible("PowerMac4,4")) {
+			/* emac */
+			rdev->mode_info.connector_table = CT_EMAC;
+		} else if (of_machine_is_compatible("PowerMac10,1")) {
+			/* mini with internal tmds */
+			rdev->mode_info.connector_table = CT_MINI_INTERNAL;
+		} else if (of_machine_is_compatible("PowerMac10,2")) {
+			/* mini with external tmds */
+			rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
+		} else if (of_machine_is_compatible("PowerMac12,1")) {
+			/* PowerMac8,1 ? */
+			/* imac g5 isight */
+			rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
+		} else if ((rdev->pdev->device == 0x4a48) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4a48)) {
+			/* Mac X800 */
+			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((of_machine_is_compatible("PowerMac7,2") ||
+			    of_machine_is_compatible("PowerMac7,3")) &&
+			   (rdev->pdev->device == 0x4150) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4150)) {
+			/* Mac G5 tower 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
+		} else if ((rdev->pdev->device == 0x4c66) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4c66)) {
+			/* SAM440ep RV250 embedded board */
+			rdev->mode_info.connector_table = CT_SAM440EP;
+		} else
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC64
+		if (ASIC_IS_RN50(rdev))
+			rdev->mode_info.connector_table = CT_RN50_POWER;
+		else
+#endif
+			rdev->mode_info.connector_table = CT_GENERIC;
+	}
+
+	switch (rdev->mode_info.connector_table) {
+	case CT_GENERIC:
+		DRM_INFO("Connector Table: %d (generic)\n",
+			 rdev->mode_info.connector_table);
+		/* these are the most common settings */
+		if (rdev->flags & RADEON_SINGLE_CRTC) {
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* LVDS */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		} else {
+			/* DVI-I - tv dac, int tmds */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT2_SUPPORT,
+									2),
+						  ATOM_DEVICE_CRT2_SUPPORT);
+			radeon_add_legacy_connector(dev, 0,
+						    ATOM_DEVICE_DFP1_SUPPORT |
+						    ATOM_DEVICE_CRT2_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+
+			/* VGA - primary dac */
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_connector(dev, 1,
+						    ATOM_DEVICE_CRT1_SUPPORT,
+						    DRM_MODE_CONNECTOR_VGA,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_VGA,
+						    &hpd);
+		}
+
+		if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+			/* TV - tv dac */
+			ddc_i2c.valid = false;
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_TV1_SUPPORT,
+									2),
+						  ATOM_DEVICE_TV1_SUPPORT);
+			radeon_add_legacy_connector(dev, 2,
+						    ATOM_DEVICE_TV1_SUPPORT,
+						    DRM_MODE_CONNECTOR_SVIDEO,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SVIDEO,
+						    &hpd);
+		}
+		break;
+	case CT_IBOOK:
+		DRM_INFO("Connector Table: %d (ibook)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - TV DAC */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_EXTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		/* XXX some are SL */
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_INTERNAL:
+		DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - primary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_POWERBOOK_VGA:
+		DRM_INFO("Connector Table: %d (powerbook vga)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_EXTERNAL:
+		DRM_INFO("Connector Table: %d (mini external tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, ext tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP2_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		/* XXX are any DL? */
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MINI_INTERNAL:
+		DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_IMAC_G5_ISIGHT:
+		DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-D - int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_EMAC:
+		DRM_INFO("Connector Table: %d (emac)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* VGA - tv dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_RN50_POWER:
+		DRM_INFO("Connector Table: %d (rn50-power)\n",
+			 rdev->mode_info.connector_table);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		break;
+	case CT_MAC_X800:
+		DRM_INFO("Connector Table: %d (mac x800)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+					    &hpd);
+		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_SAM440EP:
+		DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+			 rdev->mode_info.connector_table);
+		/* LVDS */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_LCD1_SUPPORT,
+								0),
+					  ATOM_DEVICE_LCD1_SUPPORT);
+		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_LVDS,
+					    &hpd);
+		/* DVI-I - secondary dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2,
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	case CT_MAC_G4_SILVER:
+		DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI-I - tv dac, int tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_DFP1_SUPPORT,
+								0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT2_SUPPORT,
+								2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* VGA - primary dac */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_CRT1_SUPPORT,
+								1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_VGA,
+					    &hpd);
+		/* TV - TV DAC */
+		ddc_i2c.valid = false;
+		hpd.hpd = RADEON_HPD_NONE;
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								ATOM_DEVICE_TV1_SUPPORT,
+								2),
+					  ATOM_DEVICE_TV1_SUPPORT);
+		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+					    DRM_MODE_CONNECTOR_SVIDEO,
+					    &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SVIDEO,
+					    &hpd);
+		break;
+	default:
+		DRM_INFO("Connector table: %d (invalid)\n",
+			 rdev->mode_info.connector_table);
+		return false;
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+				       int bios_index,
+				       enum radeon_combios_connector
+				       *legacy_connector,
+				       struct radeon_i2c_bus_rec *ddc_i2c,
+				       struct radeon_hpd *hpd)
+{
+
+	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+	if (dev->pdev->device == 0x515e &&
+	    dev->pdev->subsystem_vendor == 0x1014) {
+		if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
+		    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+			return false;
+	}
+
+	/* X300 card with extra non-existent DVI port */
+	if (dev->pdev->device == 0x5B60 &&
+	    dev->pdev->subsystem_vendor == 0x17af &&
+	    dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
+		if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
+			return false;
+	}
+
+	return true;
+}
+
+static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+{
+	/* Acer 5102 has non-existent TV port */
+	if (dev->pdev->device == 0x5975 &&
+	    dev->pdev->subsystem_vendor == 0x1025 &&
+	    dev->pdev->subsystem_device == 0x009f)
+		return false;
+
+	/* HP dc5750 has non-existent TV port */
+	if (dev->pdev->device == 0x5974 &&
+	    dev->pdev->subsystem_vendor == 0x103c &&
+	    dev->pdev->subsystem_device == 0x280a)
+		return false;
+
+	/* MSI S270 has non-existent TV port */
+	if (dev->pdev->device == 0x5955 &&
+	    dev->pdev->subsystem_vendor == 0x1462 &&
+	    dev->pdev->subsystem_device == 0x0131)
+		return false;
+
+	return true;
+}
+
+static uint16_t combios_check_dl_dvi(struct drm_device *dev, int is_dvi_d)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ext_tmds_info;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (is_dvi_d)
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+		else
+			return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+	}
+	ext_tmds_info = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+	if (ext_tmds_info) {
+		uint8_t rev = RBIOS8(ext_tmds_info);
+		uint8_t flags = RBIOS8(ext_tmds_info + 4 + 5);
+		if (rev >= 3) {
+			if (is_dvi_d)
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+			else
+				return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+		} else {
+			if (flags & 1) {
+				if (is_dvi_d)
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+				else
+					return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+			}
+		}
+	}
+	if (is_dvi_d)
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+	else
+		return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+}
+
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t conn_info, entry, devices;
+	uint16_t tmp, connector_object_id;
+	enum radeon_combios_ddc ddc_type;
+	enum radeon_combios_connector connector;
+	int i = 0;
+	struct radeon_i2c_bus_rec ddc_i2c;
+	struct radeon_hpd hpd;
+
+	conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
+	if (conn_info) {
+		for (i = 0; i < 4; i++) {
+			entry = conn_info + 2 + i * 2;
+
+			if (!RBIOS16(entry))
+				break;
+
+			tmp = RBIOS16(entry);
+
+			connector = (tmp >> 12) & 0xf;
+
+			ddc_type = (tmp >> 8) & 0xf;
+			if (ddc_type == 5)
+				ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+			else
+				ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+			case CONNECTOR_DVI_I_LEGACY:
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					hpd.hpd = RADEON_HPD_2;
+				else
+					hpd.hpd = RADEON_HPD_1;
+				break;
+			default:
+				hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+
+			if (!radeon_apply_legacy_quirks(dev, i, &connector,
+							&ddc_i2c, &hpd))
+				continue;
+
+			switch (connector) {
+			case CONNECTOR_PROPRIETARY_LEGACY:
+				if ((tmp >> 4) & 0x1)
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+				else
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+							    &hpd);
+				break;
+			case CONNECTOR_CRT_LEGACY:
+				if (tmp & 0x1) {
+					devices = ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices = ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_I_LEGACY:
+				devices = 0;
+				if (tmp & 0x1) {
+					devices |= ATOM_DEVICE_CRT2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT2_SUPPORT,
+								   2),
+								  ATOM_DEVICE_CRT2_SUPPORT);
+				} else {
+					devices |= ATOM_DEVICE_CRT1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_CRT1_SUPPORT,
+								   1),
+								  ATOM_DEVICE_CRT1_SUPPORT);
+				}
+				/* RV100 board with external TDMS bit mis-set.
+				 * Actually uses internal TMDS, clear the bit.
+				 */
+				if (dev->pdev->device == 0x5159 &&
+				    dev->pdev->subsystem_vendor == 0x1014 &&
+				    dev->pdev->subsystem_device == 0x029A) {
+					tmp &= ~(1 << 4);
+				}
+				if ((tmp >> 4) & 0x1) {
+					devices |= ATOM_DEVICE_DFP2_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP2_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP2_SUPPORT);
+					connector_object_id = combios_check_dl_dvi(dev, 0);
+				} else {
+					devices |= ATOM_DEVICE_DFP1_SUPPORT;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_DFP1_SUPPORT,
+								   0),
+								  ATOM_DEVICE_DFP1_SUPPORT);
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_connector(dev,
+							    i,
+							    devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_DVI_D_LEGACY:
+				if ((tmp >> 4) & 0x1) {
+					devices = ATOM_DEVICE_DFP2_SUPPORT;
+					connector_object_id = combios_check_dl_dvi(dev, 1);
+				} else {
+					devices = ATOM_DEVICE_DFP1_SUPPORT;
+					connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+				}
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev, devices, 0),
+							  devices);
+				radeon_add_legacy_connector(dev, i, devices,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    connector_object_id,
+							    &hpd);
+				break;
+			case CONNECTOR_CTV_LEGACY:
+			case CONNECTOR_STV_LEGACY:
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum
+							  (dev,
+							   ATOM_DEVICE_TV1_SUPPORT,
+							   2),
+							  ATOM_DEVICE_TV1_SUPPORT);
+				radeon_add_legacy_connector(dev, i,
+							    ATOM_DEVICE_TV1_SUPPORT,
+							    legacy_connector_convert
+							    [connector],
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_SVIDEO,
+							    &hpd);
+				break;
+			default:
+				DRM_ERROR("Unknown connector type: %d\n",
+					  connector);
+				continue;
+			}
+
+		}
+	} else {
+		uint16_t tmds_info =
+		    combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+		if (tmds_info) {
+			DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_CRT1_SUPPORT,
+									1),
+						  ATOM_DEVICE_CRT1_SUPPORT);
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_DFP1_SUPPORT,
+									0),
+						  ATOM_DEVICE_DFP1_SUPPORT);
+
+			ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+			hpd.hpd = RADEON_HPD_1;
+			radeon_add_legacy_connector(dev,
+						    0,
+						    ATOM_DEVICE_CRT1_SUPPORT |
+						    ATOM_DEVICE_DFP1_SUPPORT,
+						    DRM_MODE_CONNECTOR_DVII,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+						    &hpd);
+		} else {
+			uint16_t crt_info =
+				combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+			DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
+			if (crt_info) {
+				radeon_add_legacy_encoder(dev,
+							  radeon_get_encoder_enum(dev,
+										ATOM_DEVICE_CRT1_SUPPORT,
+										1),
+							  ATOM_DEVICE_CRT1_SUPPORT);
+				ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+				hpd.hpd = RADEON_HPD_NONE;
+				radeon_add_legacy_connector(dev,
+							    0,
+							    ATOM_DEVICE_CRT1_SUPPORT,
+							    DRM_MODE_CONNECTOR_VGA,
+							    &ddc_i2c,
+							    CONNECTOR_OBJECT_ID_VGA,
+							    &hpd);
+			} else {
+				DRM_DEBUG_KMS("No connector info found\n");
+				return false;
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
+		uint16_t lcd_info =
+		    combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
+		if (lcd_info) {
+			uint16_t lcd_ddc_info =
+			    combios_get_table_offset(dev,
+						     COMBIOS_LCD_DDC_INFO_TABLE);
+
+			radeon_add_legacy_encoder(dev,
+						  radeon_get_encoder_enum(dev,
+									ATOM_DEVICE_LCD1_SUPPORT,
+									0),
+						  ATOM_DEVICE_LCD1_SUPPORT);
+
+			if (lcd_ddc_info) {
+				ddc_type = RBIOS8(lcd_ddc_info + 2);
+				switch (ddc_type) {
+				case DDC_LCD:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_LCD,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				case DDC_GPIO:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev,
+								      DDC_GPIO,
+								      RBIOS32(lcd_ddc_info + 3),
+								      RBIOS32(lcd_ddc_info + 7));
+					radeon_i2c_add(rdev, &ddc_i2c, "LCD");
+					break;
+				default:
+					ddc_i2c =
+						combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+					break;
+				}
+				DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
+			} else
+				ddc_i2c.valid = false;
+
+			hpd.hpd = RADEON_HPD_NONE;
+			radeon_add_legacy_connector(dev,
+						    5,
+						    ATOM_DEVICE_LCD1_SUPPORT,
+						    DRM_MODE_CONNECTOR_LVDS,
+						    &ddc_i2c,
+						    CONNECTOR_OBJECT_ID_LVDS,
+						    &hpd);
+		}
+	}
+
+	/* check TV table */
+	if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+		uint32_t tv_info =
+		    combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+		if (tv_info) {
+			if (RBIOS8(tv_info + 6) == 'T') {
+				if (radeon_apply_legacy_tv_quirks(dev)) {
+					hpd.hpd = RADEON_HPD_NONE;
+					ddc_i2c.valid = false;
+					radeon_add_legacy_encoder(dev,
+								  radeon_get_encoder_enum
+								  (dev,
+								   ATOM_DEVICE_TV1_SUPPORT,
+								   2),
+								  ATOM_DEVICE_TV1_SUPPORT);
+					radeon_add_legacy_connector(dev, 6,
+								    ATOM_DEVICE_TV1_SUPPORT,
+								    DRM_MODE_CONNECTOR_SVIDEO,
+								    &ddc_i2c,
+								    CONNECTOR_OBJECT_ID_SVIDEO,
+								    &hpd);
+				}
+			}
+		}
+	}
+
+	radeon_link_encoder_connector(dev);
+
+	return true;
+}
+
+static const char *thermal_controller_names[] = {
+	"NONE",
+	"lm63",
+	"adm1032",
+};
+
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 offset, misc, misc2 = 0;
+	u8 rev, blocks, tmp;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	rdev->pm.default_power_state_index = -1;
+
+	/* allocate 2 power states */
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+	if (rdev->pm.power_state) {
+		/* allocate 1 clock mode per state */
+		rdev->pm.power_state[0].clock_info =
+			kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+		rdev->pm.power_state[1].clock_info =
+			kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
+		if (!rdev->pm.power_state[0].clock_info ||
+		    !rdev->pm.power_state[1].clock_info)
+			goto pm_failed;
+	} else
+		goto pm_failed;
+
+	/* check for a thermal chip */
+	offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
+	if (offset) {
+		u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
+
+		rev = RBIOS8(offset);
+
+		if (rev == 0) {
+			thermal_controller = RBIOS8(offset + 3);
+			gpio = RBIOS8(offset + 4) & 0x3f;
+			i2c_addr = RBIOS8(offset + 5);
+		} else if (rev == 1) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+		} else if (rev == 2) {
+			thermal_controller = RBIOS8(offset + 4);
+			gpio = RBIOS8(offset + 5) & 0x3f;
+			i2c_addr = RBIOS8(offset + 6);
+			clk_bit = RBIOS8(offset + 0xa);
+			data_bit = RBIOS8(offset + 0xb);
+		}
+		if ((thermal_controller > 0) && (thermal_controller < 3)) {
+			DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+				 thermal_controller_names[thermal_controller],
+				 i2c_addr >> 1);
+			if (gpio == DDC_LCD) {
+				/* MM i2c */
+				i2c_bus.valid = true;
+				i2c_bus.hw_capable = true;
+				i2c_bus.mm_i2c = true;
+				i2c_bus.i2c_id = 0xa0;
+			} else if (gpio == DDC_GPIO)
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 1 << clk_bit, 1 << data_bit);
+			else
+				i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = thermal_controller_names[thermal_controller];
+				info.addr = i2c_addr >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		}
+	} else {
+		/* boards with a thermal chip, but no overdrive table */
+
+		/* Asus 9600xt has an f75375 on the monid bus */
+		if ((dev->pdev->device == 0x4152) &&
+		    (dev->pdev->subsystem_vendor == 0x1043) &&
+		    (dev->pdev->subsystem_device == 0xc002)) {
+			i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = "f75375";
+				info.addr = 0x28;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+					 name, info.addr);
+			}
+		}
+	}
+
+	if (rdev->flags & RADEON_IS_MOBILITY) {
+		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset);
+			blocks = RBIOS8(offset + 0x2);
+			/* power mode 0 tends to be the only valid one */
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+			rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				goto default_mode;
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_BATTERY;
+			misc = RBIOS16(offset + 0x5 + 0x0);
+			if (rev > 4)
+				misc2 = RBIOS16(offset + 0x5 + 0xe);
+			rdev->pm.power_state[state_index].misc = misc;
+			rdev->pm.power_state[state_index].misc2 = misc2;
+			if (misc & 0x4) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+				if (misc & 0x8)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+				if (rev < 6) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+						RBIOS16(offset + 0x5 + 0xb) * 4;
+					tmp = RBIOS8(offset + 0x5 + 0xd);
+					rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+				} else {
+					u8 entries = RBIOS8(offset + 0x5 + 0xb);
+					u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+					if (entries && voltage_table_offset) {
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+							RBIOS16(voltage_table_offset) * 4;
+						tmp = RBIOS8(voltage_table_offset + 0x2);
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+					} else
+						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+				}
+				switch ((misc2 & 0x700) >> 8) {
+				case 0:
+				default:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+					break;
+				case 1:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+					break;
+				case 2:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+					break;
+				case 3:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+					break;
+				case 4:
+					rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+					break;
+				}
+			} else
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+			if (rev > 6)
+				rdev->pm.power_state[state_index].pcie_lanes =
+					RBIOS8(offset + 0x5 + 0x10);
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			state_index++;
+		} else {
+			/* XXX figure out some good default low power mode for mobility cards w/out power tables */
+		}
+	} else {
+		/* XXX figure out some good default low power mode for desktop cards */
+	}
+
+default_mode:
+	/* add the default mode */
+	rdev->pm.power_state[state_index].type =
+		POWER_STATE_TYPE_DEFAULT;
+	rdev->pm.power_state[state_index].num_clock_modes = 1;
+	rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+	rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+	rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+	if ((state_index > 0) &&
+	    (rdev->pm.power_state[0].clock_info[0].voltage.type == VOLTAGE_GPIO))
+		rdev->pm.power_state[state_index].clock_info[0].voltage =
+			rdev->pm.power_state[0].clock_info[0].voltage;
+	else
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+	rdev->pm.power_state[state_index].pcie_lanes = 16;
+	rdev->pm.power_state[state_index].flags = 0;
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = state_index + 1;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	return;
+
+pm_failed:
+	rdev->pm.default_power_state_index = state_index;
+	rdev->pm.num_power_states = 0;
+
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+}
+
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return;
+
+	switch (tmds->dvo_chip) {
+	case DVO_SIL164:
+		/* sil 164 */
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x08, 0x30);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x09, 0x00);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0a, 0x90);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				    tmds->slave_addr,
+				    0x0c, 0x89);
+		radeon_i2c_put_byte(tmds->i2c_bus,
+				       tmds->slave_addr,
+				       0x08, 0x3b);
+		break;
+	case DVO_SIL1178:
+		/* sil 1178 - untested */
+		/*
+		 * 0x0f, 0x44
+		 * 0x0f, 0x4c
+		 * 0x0e, 0x01
+		 * 0x0a, 0x80
+		 * 0x09, 0x30
+		 * 0x0c, 0xc9
+		 * 0x0d, 0x70
+		 * 0x08, 0x32
+		 * 0x08, 0x33
+		 */
+		break;
+	default:
+		break;
+	}
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint16_t offset;
+	uint8_t blocks, slave_addr, rev;
+	uint32_t index, id;
+	uint32_t reg, val, and_mask, or_mask;
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+	if (!tmds)
+		return false;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+		rev = RBIOS8(offset);
+		if (offset) {
+			rev = RBIOS8(offset);
+			if (rev > 1) {
+				blocks = RBIOS8(offset + 3);
+				index = offset + 4;
+				while (blocks > 0) {
+					id = RBIOS16(index);
+					index += 2;
+					switch (id >> 13) {
+					case 0:
+						reg = (id & 0x1fff) * 4;
+						val = RBIOS32(index);
+						index += 4;
+						WREG32(reg, val);
+						break;
+					case 2:
+						reg = (id & 0x1fff) * 4;
+						and_mask = RBIOS32(index);
+						index += 4;
+						or_mask = RBIOS32(index);
+						index += 4;
+						val = RREG32(reg);
+						val = (val & and_mask) | or_mask;
+						WREG32(reg, val);
+						break;
+					case 3:
+						val = RBIOS16(index);
+						index += 2;
+						udelay(val);
+						break;
+					case 4:
+						val = RBIOS16(index);
+						index += 2;
+						mdelay(val);
+						break;
+					case 6:
+						slave_addr = id & 0xff;
+						slave_addr >>= 1; /* 7 bit addressing */
+						index++;
+						reg = RBIOS8(index);
+						index++;
+						val = RBIOS8(index);
+						index++;
+						radeon_i2c_put_byte(tmds->i2c_bus,
+								    slave_addr,
+								    reg, val);
+						break;
+					default:
+						DRM_ERROR("Unknown id %d\n", id >> 13);
+						break;
+					}
+					blocks--;
+				}
+				return true;
+			}
+		}
+	} else {
+		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+		if (offset) {
+			index = offset + 10;
+			id = RBIOS16(index);
+			while (id != 0xffff) {
+				index += 2;
+				switch (id >> 13) {
+				case 0:
+					reg = (id & 0x1fff) * 4;
+					val = RBIOS32(index);
+					WREG32(reg, val);
+					break;
+				case 2:
+					reg = (id & 0x1fff) * 4;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32(reg, val);
+					break;
+				case 4:
+					val = RBIOS16(index);
+					index += 2;
+					udelay(val);
+					break;
+				case 5:
+					reg = id & 0x1fff;
+					and_mask = RBIOS32(index);
+					index += 4;
+					or_mask = RBIOS32(index);
+					index += 4;
+					val = RREG32_PLL(reg);
+					val = (val & and_mask) | or_mask;
+					WREG32_PLL(reg, val);
+					break;
+				case 6:
+					reg = id & 0x1fff;
+					val = RBIOS8(index);
+					index += 1;
+					radeon_i2c_put_byte(tmds->i2c_bus,
+							    tmds->slave_addr,
+							    reg, val);
+					break;
+				default:
+					DRM_ERROR("Unknown id %d\n", id >> 13);
+					break;
+				}
+				id = RBIOS16(index);
+			}
+			return true;
+		}
+	}
+	return false;
+}
+
+static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS16(offset)) {
+			uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
+			uint32_t addr = (RBIOS16(offset) & 0x1fff);
+			uint32_t val, and_mask, or_mask;
+			uint32_t tmp;
+
+			offset += 2;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 1:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32(addr, val);
+				break;
+			case 2:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 3:
+				and_mask = RBIOS32(offset);
+				offset += 4;
+				or_mask = RBIOS32(offset);
+				offset += 4;
+				tmp = RREG32(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32(addr, tmp);
+				break;
+			case 4:
+				val = RBIOS16(offset);
+				offset += 2;
+				udelay(val);
+				break;
+			case 5:
+				val = RBIOS16(offset);
+				offset += 2;
+				switch (addr) {
+				case 8:
+					while (val--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 9:
+					while (val--) {
+						if ((RREG32(RADEON_MC_STATUS) &
+						     RADEON_MC_IDLE))
+							break;
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (offset) {
+		while (RBIOS8(offset)) {
+			uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
+			uint8_t addr = (RBIOS8(offset) & 0x3f);
+			uint32_t val, shift, tmp;
+			uint32_t and_mask, or_mask;
+
+			offset++;
+			switch (cmd) {
+			case 0:
+				val = RBIOS32(offset);
+				offset += 4;
+				WREG32_PLL(addr, val);
+				break;
+			case 1:
+				shift = RBIOS8(offset) * 8;
+				offset++;
+				and_mask = RBIOS8(offset) << shift;
+				and_mask |= ~(0xff << shift);
+				offset++;
+				or_mask = RBIOS8(offset) << shift;
+				offset++;
+				tmp = RREG32_PLL(addr);
+				tmp &= and_mask;
+				tmp |= or_mask;
+				WREG32_PLL(addr, tmp);
+				break;
+			case 2:
+			case 3:
+				tmp = 1000;
+				switch (addr) {
+				case 1:
+					udelay(150);
+					break;
+				case 2:
+					mdelay(1);
+					break;
+				case 3:
+					while (tmp--) {
+						if (!
+						    (RREG32_PLL
+						     (RADEON_CLK_PWRMGT_CNTL) &
+						     RADEON_MC_BUSY))
+							break;
+					}
+					break;
+				case 4:
+					while (tmp--) {
+						if (RREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL) &
+						    RADEON_DLL_READY)
+							break;
+					}
+					break;
+				case 5:
+					tmp =
+					    RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
+					if (tmp & RADEON_CG_NO1_DEBUG_0) {
+#if 0
+						uint32_t mclk_cntl =
+						    RREG32_PLL
+						    (RADEON_MCLK_CNTL);
+						mclk_cntl &= 0xffff0000;
+						/*mclk_cntl |= 0x00001111;*//* ??? */
+						WREG32_PLL(RADEON_MCLK_CNTL,
+							   mclk_cntl);
+						mdelay(10);
+#endif
+						WREG32_PLL
+						    (RADEON_CLK_PWRMGT_CNTL,
+						     tmp &
+						     ~RADEON_CG_NO1_DEBUG_0);
+						mdelay(10);
+					}
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+}
+
+static void combios_parse_ram_reset_table(struct drm_device *dev,
+					  uint16_t offset)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	if (offset) {
+		uint8_t val = RBIOS8(offset);
+		while (val != 0xff) {
+			offset++;
+
+			if (val == 0x0f) {
+				uint32_t channel_complete_mask;
+
+				if (ASIC_IS_R300(rdev))
+					channel_complete_mask =
+					    R300_MEM_PWRUP_COMPLETE;
+				else
+					channel_complete_mask =
+					    RADEON_MEM_PWRUP_COMPLETE;
+				tmp = 20000;
+				while (tmp--) {
+					if ((RREG32(RADEON_MEM_STR_CNTL) &
+					     channel_complete_mask) ==
+					    channel_complete_mask)
+						break;
+				}
+			} else {
+				uint32_t or_mask = RBIOS16(offset);
+				offset += 2;
+
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_SDRAM_MODE_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+
+				or_mask = val << 24;
+				tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
+				tmp &= RADEON_B3MEM_RESET_MASK;
+				tmp |= or_mask;
+				WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
+			}
+			val = RBIOS8(offset);
+		}
+	}
+}
+
+static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
+				   int mem_addr_mapping)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t mem_cntl;
+	uint32_t mem_size;
+	uint32_t addr = 0;
+
+	mem_cntl = RREG32(RADEON_MEM_CNTL);
+	if (mem_cntl & RV100_HALF_MODE)
+		ram /= 2;
+	mem_size = ram;
+	mem_cntl &= ~(0xff << 8);
+	mem_cntl |= (mem_addr_mapping & 0xff) << 8;
+	WREG32(RADEON_MEM_CNTL, mem_cntl);
+	RREG32(RADEON_MEM_CNTL);
+
+	/* sdram reset ? */
+
+	/* something like this????  */
+	while (ram--) {
+		addr = ram * 1024 * 1024;
+		/* write to each page */
+		WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
+		/* read back and verify */
+		if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
+			return 0;
+	}
+
+	return mem_size;
+}
+
+static void combios_write_ram_size(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t rev;
+	uint16_t offset;
+	uint32_t mem_size = 0;
+	uint32_t mem_cntl = 0;
+
+	/* should do something smarter here I guess... */
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	/* first check detected mem table */
+	offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
+	if (offset) {
+		rev = RBIOS8(offset);
+		if (rev < 3) {
+			mem_cntl = RBIOS32(offset + 1);
+			mem_size = RBIOS16(offset + 5);
+			if ((rdev->family < CHIP_R200) &&
+			    !ASIC_IS_RN50(rdev))
+				WREG32(RADEON_MEM_CNTL, mem_cntl);
+		}
+	}
+
+	if (!mem_size) {
+		offset =
+		    combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
+		if (offset) {
+			rev = RBIOS8(offset - 1);
+			if (rev < 1) {
+				if ((rdev->family < CHIP_R200)
+				    && !ASIC_IS_RN50(rdev)) {
+					int ram = 0;
+					int mem_addr_mapping = 0;
+
+					while (RBIOS8(offset)) {
+						ram = RBIOS8(offset);
+						mem_addr_mapping =
+						    RBIOS8(offset + 1);
+						if (mem_addr_mapping != 0x25)
+							ram *= 2;
+						mem_size =
+						    combios_detect_ram(dev, ram,
+								       mem_addr_mapping);
+						if (mem_size)
+							break;
+						offset += 2;
+					}
+				} else
+					mem_size = RBIOS8(offset);
+			} else {
+				mem_size = RBIOS8(offset);
+				mem_size *= 2;	/* convert to MB */
+			}
+		}
+	}
+
+	mem_size *= (1024 * 1024);	/* convert to bytes */
+	WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
+}
+
+void radeon_combios_asic_init(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint16_t table;
+
+	/* port hardcoded mac stuff from radeonfb */
+	if (rdev->bios == NULL)
+		return;
+
+	/* ASIC INIT 1 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	/* PLL INIT */
+	table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+	/* ASIC INIT 2 */
+	table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
+	if (table)
+		combios_parse_mmio_table(dev, table);
+
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		/* ASIC INIT 4 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* RAM RESET */
+		table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
+		if (table)
+			combios_parse_ram_reset_table(dev, table);
+
+		/* ASIC INIT 3 */
+		table =
+		    combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
+		if (table)
+			combios_parse_mmio_table(dev, table);
+
+		/* write CONFIG_MEMSIZE */
+		combios_write_ram_size(dev);
+	}
+
+	/* quirk for rs4xx HP nx6125 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x308b)
+		return;
+
+	/* quirk for rs4xx HP dv5000 laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x30a4)
+		return;
+
+	/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
+	 * - it hangs on resume inside the dynclk 1 table.
+	 */
+	if (rdev->family == CHIP_RS480 &&
+	    rdev->pdev->subsystem_vendor == 0x103c &&
+	    rdev->pdev->subsystem_device == 0x30ae)
+		return;
+
+	/* DYN CLK 1 */
+	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+	if (table)
+		combios_parse_pll_table(dev, table);
+
+}
+
+void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
+
+	bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+	bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
+
+	/* let the bios control the backlight */
+	bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
+
+	/* tell the bios not to handle mode switching */
+	bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
+			   RADEON_ACC_MODE_CHANGE);
+
+	/* tell the bios a driver is loaded */
+	bios_7_scratch |= RADEON_DRV_LOADED;
+
+	WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+	WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
+}
+
+void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t bios_6_scratch;
+
+	bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (lock)
+		bios_6_scratch |= RADEON_DRIVER_CRITICAL;
+	else
+		bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
+
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
+
+void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector =
+	    to_radeon_connector(connector);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("TV1 connected\n");
+			/* fix me */
+			bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
+			/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
+			bios_5_scratch |= RADEON_TV1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_TV1;
+		} else {
+			DRM_DEBUG_KMS("TV1 disconnected\n");
+			bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_TV1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("LCD1 connected\n");
+			bios_4_scratch |= RADEON_LCD1_ATTACHED;
+			bios_5_scratch |= RADEON_LCD1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_LCD1;
+		} else {
+			DRM_DEBUG_KMS("LCD1 disconnected\n");
+			bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
+			bios_5_scratch &= ~RADEON_LCD1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT1 connected\n");
+			bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT1;
+		} else {
+			DRM_DEBUG_KMS("CRT1 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("CRT2 connected\n");
+			bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
+			bios_5_scratch |= RADEON_CRT2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_CRT2;
+		} else {
+			DRM_DEBUG_KMS("CRT2 disconnected\n");
+			bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
+			bios_5_scratch &= ~RADEON_CRT2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP1 connected\n");
+			bios_4_scratch |= RADEON_DFP1_ATTACHED;
+			bios_5_scratch |= RADEON_DFP1_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP1;
+		} else {
+			DRM_DEBUG_KMS("DFP1 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP1_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
+		}
+	}
+	if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
+	    (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
+		if (connected) {
+			DRM_DEBUG_KMS("DFP2 connected\n");
+			bios_4_scratch |= RADEON_DFP2_ATTACHED;
+			bios_5_scratch |= RADEON_DFP2_ON;
+			bios_5_scratch |= RADEON_ACC_REQ_DFP2;
+		} else {
+			DRM_DEBUG_KMS("DFP2 disconnected\n");
+			bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
+			bios_5_scratch &= ~RADEON_DFP2_ON;
+			bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
+		}
+	}
+	WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
+
+	if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
+	}
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
+		bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
+		bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
+	}
+	WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
+}
+
+void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_TV_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_TV_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_CRT_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_LCD_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
+	}
+	if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (on)
+			bios_6_scratch |= RADEON_DFP_DPMS_ON;
+		else
+			bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
+	}
+	WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_connectors.c b/linux-imx/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 0000000..fc604fc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,2051 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+				      struct drm_encoder *encoder,
+				      bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+				       struct drm_encoder *encoder,
+				       bool connected);
+
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	/* bail if the connector does not have hpd pin, e.g.,
+	 * VGA, TV, etc.
+	 */
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+		return;
+
+	radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+	/* if the connector is already off, don't turn it back on */
+	if (connector->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	/* just deal with DP (not eDP) here. */
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		/* if existing sink type was not DP no need to retrain */
+		if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return;
+
+		/* first get sink type as it may be reset after (un)plug */
+		dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		/* don't do anything if sink is not display port, i.e.,
+		 * passive dp->(dvi|hdmi) adaptor
+		 */
+		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			int saved_dpms = connector->dpms;
+			/* Only turn off the display if it's physically disconnected */
+			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+				/* set it to OFF so that drm_helper_connector_dpms()
+				 * won't return immediately since the current state
+				 * is ON at this point.
+				 */
+				connector->dpms = DRM_MODE_DPMS_OFF;
+				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+			}
+			connector->dpms = saved_dpms;
+		}
+	}
+}
+
+static void radeon_property_change_mode(struct drm_encoder *encoder)
+{
+	struct drm_crtc *crtc = encoder->crtc;
+
+	if (crtc && crtc->enabled) {
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y, crtc->fb);
+	}
+}
+
+int radeon_get_monitor_bpc(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *dig_connector;
+	int bpc = 8;
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (connector->display_info.bpc)
+					bpc = connector->display_info.bpc;
+			}
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
+		    drm_detect_hdmi_monitor(radeon_connector->edid)) {
+			if (connector->display_info.bpc)
+				bpc = connector->display_info.bpc;
+		}
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+	case DRM_MODE_CONNECTOR_LVDS:
+		if (connector->display_info.bpc)
+			bpc = connector->display_info.bpc;
+		else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+			struct drm_connector_helper_funcs *connector_funcs =
+				connector->helper_private;
+			struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+			if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
+				bpc = 6;
+			else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
+				bpc = 8;
+		}
+		break;
+	}
+	return bpc;
+}
+
+static void
+radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *best_encoder = NULL;
+	struct drm_encoder *encoder = NULL;
+	struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+	struct drm_mode_object *obj;
+	bool connected;
+	int i;
+
+	best_encoder = connector_funcs->best_encoder(connector);
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev,
+					   connector->encoder_ids[i],
+					   DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+
+		if ((encoder == best_encoder) && (status == connector_status_connected))
+			connected = true;
+		else
+			connected = false;
+
+		if (rdev->is_atom_bios)
+			radeon_atombios_connected_scratch_regs(connector, encoder, connected);
+		else
+			radeon_combios_connected_scratch_regs(connector, encoder, connected);
+
+	}
+}
+
+static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		if (encoder->encoder_type == encoder_type)
+			return encoder;
+	}
+	return NULL;
+}
+
+static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	/* pick the encoder ids */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+/*
+ * radeon_connector_analog_encoder_conflict_solve
+ * - search for other connectors sharing this encoder
+ *   if priority is true, then set them disconnected if this is connected
+ *   if priority is false, set us disconnected if they are connected
+ */
+static enum drm_connector_status
+radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
+					       struct drm_encoder *encoder,
+					       enum drm_connector_status current_status,
+					       bool priority)
+{
+	struct drm_device *dev = connector->dev;
+	struct drm_connector *conflict;
+	struct radeon_connector *radeon_conflict;
+	int i;
+
+	list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+		if (conflict == connector)
+			continue;
+
+		radeon_conflict = to_radeon_connector(conflict);
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (conflict->encoder_ids[i] == 0)
+				break;
+
+			/* if the IDs match */
+			if (conflict->encoder_ids[i] == encoder->base.id) {
+				if (conflict->status != connector_status_connected)
+					continue;
+
+				if (radeon_conflict->use_digital)
+					continue;
+
+				if (priority == true) {
+					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
+					conflict->status = connector_status_disconnected;
+					radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
+				} else {
+					DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
+					current_status = connector_status_disconnected;
+				}
+				break;
+			}
+		}
+	}
+	return current_status;
+
+}
+
+static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+	if (native_mode->hdisplay != 0 &&
+	    native_mode->vdisplay != 0 &&
+	    native_mode->clock != 0) {
+		mode = drm_mode_duplicate(dev, native_mode);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		drm_mode_set_name(mode);
+
+		DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
+	} else if (native_mode->hdisplay != 0 &&
+		   native_mode->vdisplay != 0) {
+		/* mac laptops without an edid */
+		/* Note that this is not necessarily the exact panel mode,
+		 * but an approximation based on the cvt formula.  For these
+		 * systems we should ideally read the mode info out of the
+		 * registers or add a mode table, but this works and is much
+		 * simpler.
+		 */
+		mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+		DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
+	}
+	return mode;
+}
+
+static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	int i;
+	struct mode_size {
+		int w;
+		int h;
+	} common_modes[17] = {
+		{ 640,  480},
+		{ 720,  480},
+		{ 800,  600},
+		{ 848,  480},
+		{1024,  768},
+		{1152,  768},
+		{1280,  720},
+		{1280,  800},
+		{1280,  854},
+		{1280,  960},
+		{1280, 1024},
+		{1440,  900},
+		{1400, 1050},
+		{1680, 1050},
+		{1600, 1200},
+		{1920, 1080},
+		{1920, 1200}
+	};
+
+	for (i = 0; i < 17; i++) {
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
+			if (common_modes[i].w > 1024 ||
+			    common_modes[i].h > 768)
+				continue;
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			if (common_modes[i].w > native_mode->hdisplay ||
+			    common_modes[i].h > native_mode->vdisplay ||
+			    (common_modes[i].w == native_mode->hdisplay &&
+			     common_modes[i].h == native_mode->vdisplay))
+				continue;
+		}
+		if (common_modes[i].w < 320 || common_modes[i].h < 200)
+			continue;
+
+		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+		drm_mode_probed_add(connector, mode);
+	}
+}
+
+static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
+				  uint64_t val)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	if (property == rdev->mode_info.coherent_mode_property) {
+		struct radeon_encoder_atom_dig *dig;
+		bool new_coherent_mode;
+
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (!radeon_encoder->enc_priv)
+			return 0;
+
+		dig = radeon_encoder->enc_priv;
+		new_coherent_mode = val ? true : false;
+		if (dig->coherent_mode != new_coherent_mode) {
+			dig->coherent_mode = new_coherent_mode;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_type != val) {
+			radeon_encoder->underscan_type = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_hborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_hborder != val) {
+			radeon_encoder->underscan_hborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.underscan_vborder_property) {
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		if (radeon_encoder->underscan_vborder != val) {
+			radeon_encoder->underscan_vborder = val;
+			radeon_property_change_mode(&radeon_encoder->base);
+		}
+	}
+
+	if (property == rdev->mode_info.tv_std_property) {
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
+		if (!encoder) {
+			encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC);
+		}
+
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (!radeon_encoder->enc_priv)
+			return 0;
+		if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
+			struct radeon_encoder_atom_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		} else {
+			struct radeon_encoder_tv_dac *dac_int;
+			dac_int = radeon_encoder->enc_priv;
+			dac_int->tv_std = val;
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	if (property == rdev->mode_info.load_detect_property) {
+		struct radeon_connector *radeon_connector =
+			to_radeon_connector(connector);
+
+		if (val == 0)
+			radeon_connector->dac_load_detect = false;
+		else
+			radeon_connector->dac_load_detect = true;
+	}
+
+	if (property == rdev->mode_info.tmds_pll_property) {
+		struct radeon_encoder_int_tmds *tmds = NULL;
+		bool ret = false;
+		/* need to find digital encoder on connector */
+		encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+		if (!encoder)
+			return 0;
+
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		tmds = radeon_encoder->enc_priv;
+		if (!tmds)
+			return 0;
+
+		if (val == 0) {
+			if (rdev->is_atom_bios)
+				ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds);
+			else
+				ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds);
+		}
+		if (val == 1 || ret == false) {
+			radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds);
+		}
+		radeon_property_change_mode(&radeon_encoder->base);
+	}
+
+	return 0;
+}
+
+static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
+					  struct drm_connector *connector)
+{
+	struct radeon_encoder *radeon_encoder =	to_radeon_encoder(encoder);
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	struct drm_display_mode *t, *mode;
+
+	/* If the EDID preferred mode doesn't match the native mode, use it */
+	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+			if (mode->hdisplay != native_mode->hdisplay ||
+			    mode->vdisplay != native_mode->vdisplay)
+				memcpy(native_mode, mode, sizeof(*mode));
+		}
+	}
+
+	/* Try to get native mode details from EDID if necessary */
+	if (!native_mode->clock) {
+		list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+			if (mode->hdisplay == native_mode->hdisplay &&
+			    mode->vdisplay == native_mode->vdisplay) {
+				*native_mode = *mode;
+				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
+				break;
+			}
+		}
+	}
+
+	if (!native_mode->clock) {
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
+		radeon_encoder->rmx_type = RMX_OFF;
+	}
+}
+
+static int radeon_lvds_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	int ret = 0;
+	struct drm_display_mode *mode;
+
+	if (radeon_connector->ddc_bus) {
+		ret = radeon_ddc_get_modes(radeon_connector);
+		if (ret > 0) {
+			encoder = radeon_best_single_encoder(connector);
+			if (encoder) {
+				radeon_fixup_lvds_native_mode(encoder, connector);
+				/* add scaled modes */
+				radeon_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+	}
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* we have no EDID modes */
+	mode = radeon_fp_native_mode(encoder);
+	if (mode) {
+		ret = 1;
+		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	}
+
+	return ret;
+}
+
+static int radeon_lvds_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+	if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+		return MODE_PANEL;
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* AVIVO hardware supports downscaling modes larger than the panel
+		 * to the panel size, but I'm not sure this is desirable.
+		 */
+		if ((mode->hdisplay > native_mode->hdisplay) ||
+		    (mode->vdisplay > native_mode->vdisplay))
+			return MODE_PANEL;
+
+		/* if scaling is disabled, block non-native modes */
+		if (radeon_encoder->rmx_type == RMX_OFF) {
+			if ((mode->hdisplay != native_mode->hdisplay) ||
+			    (mode->vdisplay != native_mode->vdisplay))
+				return MODE_PANEL;
+		}
+	}
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_lvds_detect(struct drm_connector *connector, bool force)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	if (encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+		/* check if panel is valid */
+		if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+			ret = connector_status_connected;
+
+	}
+
+	/* check for edid as well */
+	if (radeon_connector->edid)
+		ret = connector_status_connected;
+	else {
+		if (radeon_connector->ddc_bus) {
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &radeon_connector->ddc_bus->adapter);
+			if (radeon_connector->edid)
+				ret = connector_status_connected;
+		}
+	}
+	/* check acpi lid status ??? */
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static void radeon_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->edid)
+		kfree(radeon_connector->edid);
+	kfree(radeon_connector->con_priv);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int radeon_lvds_set_property(struct drm_connector *connector,
+				    struct drm_property *property,
+				    uint64_t value)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_encoder *radeon_encoder;
+	enum radeon_rmx_type rmx_type;
+
+	DRM_DEBUG_KMS("\n");
+	if (property != dev->mode_config.scaling_mode_property)
+		return 0;
+
+	if (connector->encoder)
+		radeon_encoder = to_radeon_encoder(connector->encoder);
+	else {
+		struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+		radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+	}
+
+	switch (value) {
+	case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+	case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+	case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+	default:
+	case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+	}
+	if (radeon_encoder->rmx_type == rmx_type)
+		return 0;
+
+	radeon_encoder->rmx_type = rmx_type;
+
+	radeon_property_change_mode(&radeon_encoder->base);
+	return 0;
+}
+
+
+static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
+	.get_modes = radeon_lvds_get_modes,
+	.mode_valid = radeon_lvds_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_lvds_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_lvds_set_property,
+};
+
+static int radeon_vga_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+
+	return ret;
+}
+
+static int radeon_vga_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* XXX check mode bandwidth */
+
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_vga_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	bool dret = false;
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+
+	if (radeon_connector->ddc_bus)
+		dret = radeon_ddc_probe(radeon_connector, false);
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		if (radeon_connector->edid) {
+			kfree(radeon_connector->edid);
+			radeon_connector->edid = NULL;
+		}
+		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+
+		if (!radeon_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					drm_get_connector_name(connector));
+			ret = connector_status_connected;
+		} else {
+			radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
+				kfree(radeon_connector->edid);
+				radeon_connector->edid = NULL;
+				ret = connector_status_disconnected;
+			} else
+				ret = connector_status_connected;
+		}
+	} else {
+
+		/* if we aren't forcing don't do destructive polling */
+		if (!force) {
+			/* only return the previous status if we last
+			 * detected a monitor via load.
+			 */
+			if (radeon_connector->detected_by_load)
+				return connector->status;
+			else
+				return ret;
+		}
+
+		if (radeon_connector->dac_load_detect && encoder) {
+			encoder_funcs = encoder->helper_private;
+			ret = encoder_funcs->detect(encoder, connector);
+			if (ret != connector_status_disconnected)
+				radeon_connector->detected_by_load = true;
+		}
+	}
+
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the CRT is connected and use that EDID.
+	 */
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		ret = connector_status_connected;
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
+	.get_modes = radeon_vga_get_modes,
+	.mode_valid = radeon_vga_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_vga_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_vga_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static int radeon_tv_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *tv_mode;
+	struct drm_encoder *encoder;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		return 0;
+
+	/* avivo chips can scale any mode */
+	if (rdev->family >= CHIP_RS600)
+		/* add scaled modes */
+		radeon_add_common_modes(encoder, connector);
+	else {
+		/* only 800x600 is supported right now on pre-avivo chips */
+		tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
+		tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_probed_add(connector, tv_mode);
+	}
+	return 1;
+}
+
+static int radeon_tv_mode_valid(struct drm_connector *connector,
+				struct drm_display_mode *mode)
+{
+	if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
+		return MODE_CLOCK_RANGE;
+	return MODE_OK;
+}
+
+static enum drm_connector_status
+radeon_tv_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+
+	if (!radeon_connector->dac_load_detect)
+		return ret;
+
+	encoder = radeon_best_single_encoder(connector);
+	if (!encoder)
+		ret = connector_status_disconnected;
+	else {
+		encoder_funcs = encoder->helper_private;
+		ret = encoder_funcs->detect(encoder, connector);
+	}
+	if (ret == connector_status_connected)
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
+	.get_modes = radeon_tv_get_modes,
+	.mode_valid = radeon_tv_mode_valid,
+	.best_encoder = radeon_best_single_encoder,
+};
+
+static const struct drm_connector_funcs radeon_tv_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_tv_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = radeon_connector_destroy,
+	.set_property = radeon_connector_set_property,
+};
+
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+	return ret;
+}
+
+static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status status;
+
+	/* We only trust HPD on R600 and newer ASICS. */
+	if (rdev->family >= CHIP_R600
+	  && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+		if (connector->status == status)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ * DVI is complicated
+ * Do a DDC probe, if DDC probe passes, get the full EDID so
+ * we can do analog/digital monitor detection at this point.
+ * If the monitor is an analog monitor or we got no DDC,
+ * we need to find the DAC encoder object for this connector.
+ * If we got no DDC, we do load detection on the DAC encoder object.
+ * If we got analog DDC or load detection passes on the DAC encoder
+ * we have to check if this analog encoder is shared with anyone else (TV)
+ * if its shared we have to set the other connector to disconnected.
+ */
+static enum drm_connector_status
+radeon_dvi_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *encoder = NULL;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_mode_object *obj;
+	int i;
+	enum drm_connector_status ret = connector_status_disconnected;
+	bool dret = false, broken_edid = false;
+
+	if (!force && radeon_check_hpd_status_unchanged(connector))
+		return connector->status;
+
+	if (radeon_connector->ddc_bus)
+		dret = radeon_ddc_probe(radeon_connector, false);
+	if (dret) {
+		radeon_connector->detected_by_load = false;
+		if (radeon_connector->edid) {
+			kfree(radeon_connector->edid);
+			radeon_connector->edid = NULL;
+		}
+		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+
+		if (!radeon_connector->edid) {
+			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+					drm_get_connector_name(connector));
+			/* rs690 seems to have a problem with connectors not existing and always
+			 * return a block of 0's. If we see this just stop polling on this output */
+			if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+				ret = connector_status_disconnected;
+				DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
+				radeon_connector->ddc_bus = NULL;
+			} else {
+				ret = connector_status_connected;
+				broken_edid = true; /* defer use_digital to later */
+			}
+		} else {
+			radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+
+			/* some oems have boards with separate digital and analog connectors
+			 * with a shared ddc line (often vga + hdmi)
+			 */
+			if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
+				kfree(radeon_connector->edid);
+				radeon_connector->edid = NULL;
+				ret = connector_status_disconnected;
+			} else
+				ret = connector_status_connected;
+
+			/* This gets complicated.  We have boards with VGA + HDMI with a
+			 * shared DDC line and we have boards with DVI-D + HDMI with a shared
+			 * DDC line.  The latter is more complex because with DVI<->HDMI adapters
+			 * you don't really know what's connected to which port as both are digital.
+			 */
+			if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
+				struct drm_connector *list_connector;
+				struct radeon_connector *list_radeon_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_radeon_connector = to_radeon_connector(list_connector);
+					if (list_radeon_connector->shared_ddc &&
+					    (list_radeon_connector->ddc_bus->rec.i2c_id ==
+					     radeon_connector->ddc_bus->rec.i2c_id)) {
+						/* cases where both connectors are digital */
+						if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+							/* hpd is our only option in this case */
+							if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+								kfree(radeon_connector->edid);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
+		goto out;
+
+	/* DVI-D and HDMI-A are digital only */
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
+		goto out;
+
+	/* if we aren't forcing don't do destructive polling */
+	if (!force) {
+		/* only return the previous status if we last
+		 * detected a monitor via load.
+		 */
+		if (radeon_connector->detected_by_load)
+			ret = connector->status;
+		goto out;
+	}
+
+	/* find analog encoder */
+	if (radeon_connector->dac_load_detect) {
+		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+			if (connector->encoder_ids[i] == 0)
+				break;
+
+			obj = drm_mode_object_find(connector->dev,
+						   connector->encoder_ids[i],
+						   DRM_MODE_OBJECT_ENCODER);
+			if (!obj)
+				continue;
+
+			encoder = obj_to_encoder(obj);
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+			    encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+				continue;
+
+			encoder_funcs = encoder->helper_private;
+			if (encoder_funcs->detect) {
+				if (!broken_edid) {
+					if (ret != connector_status_connected) {
+						/* deal with analog monitors without DDC */
+						ret = encoder_funcs->detect(encoder, connector);
+						if (ret == connector_status_connected) {
+							radeon_connector->use_digital = false;
+						}
+						if (ret != connector_status_disconnected)
+							radeon_connector->detected_by_load = true;
+					}
+				} else {
+					enum drm_connector_status lret;
+					/* assume digital unless load detected otherwise */
+					radeon_connector->use_digital = true;
+					lret = encoder_funcs->detect(encoder, connector);
+					DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+					if (lret == connector_status_connected)
+						radeon_connector->use_digital = false;
+				}
+				break;
+			}
+		}
+	}
+
+	if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) &&
+	    encoder) {
+		ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true);
+	}
+
+	/* RN50 and some RV100 asics in servers often have a hardcoded EDID in the
+	 * vbios to deal with KVMs. If we have one and are not able to detect a monitor
+	 * by other means, assume the DFP is connected and use that EDID.  In most
+	 * cases the DVI port is actually a virtual KVM port connected to the service
+	 * processor.
+	 */
+out:
+	if ((!rdev->is_atom_bios) &&
+	    (ret == connector_status_disconnected) &&
+	    rdev->mode_info.bios_hardcoded_edid_size) {
+		radeon_connector->use_digital = true;
+		ret = connector_status_connected;
+	}
+
+	/* updated in get modes as well since we need to know if it's analog or digital */
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+/* okay need to be smart in here about which encoder to pick */
+static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	int i;
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+
+		if (radeon_connector->use_digital == true) {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+				return encoder;
+		} else {
+			if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
+			    encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+				return encoder;
+		}
+	}
+
+	/* see if we have a default encoder  TODO */
+
+	/* then check use digitial */
+	/* pick the first one */
+	if (enc_id) {
+		obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			return NULL;
+		encoder = obj_to_encoder(obj);
+		return encoder;
+	}
+	return NULL;
+}
+
+static void radeon_dvi_force(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	if (connector->force == DRM_FORCE_ON)
+		radeon_connector->use_digital = false;
+	if (connector->force == DRM_FORCE_ON_DIGITAL)
+		radeon_connector->use_digital = true;
+}
+
+static int radeon_dvi_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	/* XXX check mode bandwidth */
+
+	/* clocks over 135 MHz have heat issues with DVI on RV100 */
+	if (radeon_connector->use_digital &&
+	    (rdev->family == CHIP_RV100) &&
+	    (mode->clock > 135000))
+		return MODE_CLOCK_HIGH;
+
+	if (radeon_connector->use_digital && (mode->clock > 165000)) {
+		if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+			return MODE_OK;
+		else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+			if (ASIC_IS_DCE6(rdev)) {
+				/* HDMI 1.3+ supports max clock of 340 Mhz */
+				if (mode->clock > 340000)
+					return MODE_CLOCK_HIGH;
+				else
+					return MODE_OK;
+			} else
+				return MODE_CLOCK_HIGH;
+		} else
+			return MODE_CLOCK_HIGH;
+	}
+
+	/* check against the max pixel clock */
+	if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
+	.get_modes = radeon_dvi_get_modes,
+	.mode_valid = radeon_dvi_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dvi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	if (radeon_connector->edid)
+		kfree(radeon_connector->edid);
+	if (radeon_dig_connector->dp_i2c_bus)
+		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+	kfree(radeon_connector->con_priv);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+	int ret;
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_display_mode *mode;
+
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+			ret = radeon_ddc_get_modes(radeon_connector);
+			if (!radeon_dig_connector->edp_on)
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+		} else {
+			/* need to setup ddc on the bridge */
+			if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			    ENCODER_OBJECT_ID_NONE) {
+				if (encoder)
+					radeon_atom_ext_encoder_setup_ddc(encoder);
+			}
+			ret = radeon_ddc_get_modes(radeon_connector);
+		}
+
+		if (ret > 0) {
+			if (encoder) {
+				radeon_fixup_lvds_native_mode(encoder, connector);
+				/* add scaled modes */
+				radeon_add_common_modes(encoder, connector);
+			}
+			return ret;
+		}
+
+		if (!encoder)
+			return 0;
+
+		/* we have no EDID modes */
+		mode = radeon_fp_native_mode(encoder);
+		if (mode) {
+			ret = 1;
+			drm_mode_probed_add(connector, mode);
+			/* add the width/height from vbios tables if available */
+			connector->display_info.width_mm = mode->width_mm;
+			connector->display_info.height_mm = mode->height_mm;
+			/* add scaled modes */
+			radeon_add_common_modes(encoder, connector);
+		}
+	} else {
+		/* need to setup ddc on the bridge */
+		if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			ENCODER_OBJECT_ID_NONE) {
+			if (encoder)
+				radeon_atom_ext_encoder_setup_ddc(encoder);
+		}
+		ret = radeon_ddc_get_modes(radeon_connector);
+	}
+
+	return ret;
+}
+
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		radeon_encoder = to_radeon_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			break;
+		}
+	}
+
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+{
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	int i;
+	bool found = false;
+
+	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+		if (connector->encoder_ids[i] == 0)
+			break;
+
+		obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+		if (!obj)
+			continue;
+
+		encoder = obj_to_encoder(obj);
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
+			found = true;
+	}
+
+	return found;
+}
+
+bool radeon_connector_is_dp12_capable(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE5(rdev) &&
+	    (rdev->clock.default_dispclk >= 53900) &&
+	    radeon_connector_encoder_is_hbr2(connector)) {
+		return true;
+	}
+
+	return false;
+}
+
+static enum drm_connector_status
+radeon_dp_detect(struct drm_connector *connector, bool force)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+	if (!force && radeon_check_hpd_status_unchanged(connector))
+		return connector->status;
+
+	if (radeon_connector->edid) {
+		kfree(radeon_connector->edid);
+		radeon_connector->edid = NULL;
+	}
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* check if panel is valid */
+			if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
+				ret = connector_status_connected;
+		}
+		/* eDP is always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+		if (radeon_dp_getdpcd(radeon_connector))
+			ret = connector_status_connected;
+		if (!radeon_dig_connector->edp_on)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+	} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+		   ENCODER_OBJECT_ID_NONE) {
+		/* DP bridges are always DP */
+		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+		/* get the DPCD from the bridge */
+		radeon_dp_getdpcd(radeon_connector);
+
+		if (encoder) {
+			/* setup ddc on the bridge */
+			radeon_atom_ext_encoder_setup_ddc(encoder);
+			/* bridge chips are always aux */
+			if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+				ret = connector_status_connected;
+			else if (radeon_connector->dac_load_detect) { /* try load detection */
+				struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+				ret = encoder_funcs->detect(encoder, connector);
+			}
+		}
+	} else {
+		radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+			ret = connector_status_connected;
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+				radeon_dp_getdpcd(radeon_connector);
+		} else {
+			if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+				if (radeon_dp_getdpcd(radeon_connector))
+					ret = connector_status_connected;
+			} else {
+				/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
+				if (radeon_ddc_probe(radeon_connector, false))
+					ret = connector_status_connected;
+			}
+		}
+	}
+
+	radeon_connector_update_scratch_regs(connector, ret);
+	return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+		struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+
+		if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
+			return MODE_PANEL;
+
+		if (encoder) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+
+			/* AVIVO hardware supports downscaling modes larger than the panel
+			 * to the panel size, but I'm not sure this is desirable.
+			 */
+			if ((mode->hdisplay > native_mode->hdisplay) ||
+			    (mode->vdisplay > native_mode->vdisplay))
+				return MODE_PANEL;
+
+			/* if scaling is disabled, block non-native modes */
+			if (radeon_encoder->rmx_type == RMX_OFF) {
+				if ((mode->hdisplay != native_mode->hdisplay) ||
+				    (mode->vdisplay != native_mode->vdisplay))
+					return MODE_PANEL;
+			}
+		}
+		return MODE_OK;
+	} else {
+		if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return radeon_dp_mode_valid_helper(connector, mode);
+		else
+			return MODE_OK;
+	}
+}
+
+static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+	.get_modes = radeon_dp_get_modes,
+	.mode_valid = radeon_dp_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+static const struct drm_connector_funcs radeon_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_edp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_lvds_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
+void
+radeon_add_atom_connector(struct drm_device *dev,
+			  uint32_t connector_id,
+			  uint32_t supported_device,
+			  int connector_type,
+			  struct radeon_i2c_bus_rec *i2c_bus,
+			  uint32_t igp_lane_info,
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd,
+			  struct radeon_router *router)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *radeon_dig_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t subpixel_order = SubPixelNone;
+	bool shared_ddc = false;
+	bool is_dp_bridge = false;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+		if (radeon_connector->ddc_bus && i2c_bus->valid) {
+			if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+				radeon_connector->shared_ddc = true;
+				shared_ddc = true;
+			}
+			if (radeon_connector->router_bus && router->ddc_valid &&
+			    (radeon_connector->router.router_id == router->router_id)) {
+				radeon_connector->shared_ddc = false;
+				shared_ddc = false;
+			}
+		}
+	}
+
+	/* check if it's a dp bridge */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->devices & supported_device) {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_TRAVIS:
+			case ENCODER_OBJECT_ID_NUTMEG:
+				is_dp_bridge = true;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->shared_ddc = shared_ddc;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	radeon_connector->router = *router;
+	if (router->ddc_valid || router->cd_valid) {
+		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+		if (!radeon_connector->router_bus)
+			DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
+	}
+
+	if (is_dp_bridge) {
+		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+		if (!radeon_dig_connector)
+			goto failed;
+		radeon_dig_connector->igp_lane_info = igp_lane_info;
+		radeon_connector->con_priv = radeon_dig_connector;
+		if (i2c_bus->valid) {
+			/* add DP i2c bus */
+			if (connector_type == DRM_MODE_CONNECTOR_eDP)
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+			else
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+			if (!radeon_dig_connector->dp_i2c_bus)
+				DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+		case DRM_MODE_CONNECTOR_DVIA:
+		default:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_property,
+						      UNDERSCAN_OFF);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_hborder_property,
+						      0);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.underscan_vborder_property,
+						      0);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+		case DRM_MODE_CONNECTOR_eDP:
+			drm_connector_init(dev, &radeon_connector->base,
+					   &radeon_lvds_bridge_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base,
+						 &radeon_dp_connector_helper_funcs);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	} else {
+		switch (connector_type) {
+		case DRM_MODE_CONNECTOR_VGA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVIA:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = true;
+			connector->doublescan_allowed = true;
+			break;
+		case DRM_MODE_CONNECTOR_DVII:
+		case DRM_MODE_CONNECTOR_DVID:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+				radeon_connector->dac_load_detect = true;
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.load_detect_property,
+							      1);
+			}
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_DVII)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_HDMIA:
+		case DRM_MODE_CONNECTOR_HDMIB:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = true;
+			if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				connector->doublescan_allowed = true;
+			else
+				connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_DisplayPort:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				/* add DP i2c bus */
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+				if (!radeon_dig_connector->dp_i2c_bus)
+					DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			subpixel_order = SubPixelHorizontalRGB;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.coherent_mode_property,
+						      1);
+			if (ASIC_IS_AVIVO(rdev)) {
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_property,
+							      UNDERSCAN_OFF);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_hborder_property,
+							      0);
+				drm_object_attach_property(&radeon_connector->base.base,
+							      rdev->mode_info.underscan_vborder_property,
+							      0);
+			}
+			connector->interlace_allowed = true;
+			/* in theory with a DP to VGA converter... */
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_eDP:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				/* add DP i2c bus */
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+				if (!radeon_dig_connector->dp_i2c_bus)
+					DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_SVIDEO:
+		case DRM_MODE_CONNECTOR_Composite:
+		case DRM_MODE_CONNECTOR_9PinDIN:
+			drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.tv_std_property,
+						      radeon_atombios_get_tv_info(rdev));
+			/* no HPD on analog connectors */
+			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		case DRM_MODE_CONNECTOR_LVDS:
+			radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+			if (!radeon_dig_connector)
+				goto failed;
+			radeon_dig_connector->igp_lane_info = igp_lane_info;
+			radeon_connector->con_priv = radeon_dig_connector;
+			drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+			drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+			if (i2c_bus->valid) {
+				radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+				if (!radeon_connector->ddc_bus)
+					DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+			}
+			drm_object_attach_property(&radeon_connector->base.base,
+						      dev->mode_config.scaling_mode_property,
+						      DRM_MODE_SCALE_FULLSCREEN);
+			subpixel_order = SubPixelHorizontalRGB;
+			connector->interlace_allowed = false;
+			connector->doublescan_allowed = false;
+			break;
+		}
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->display_info.subpixel_order = subpixel_order;
+	drm_sysfs_connector_add(connector);
+	return;
+
+failed:
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+void
+radeon_add_legacy_connector(struct drm_device *dev,
+			    uint32_t connector_id,
+			    uint32_t supported_device,
+			    int connector_type,
+			    struct radeon_i2c_bus_rec *i2c_bus,
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	uint32_t subpixel_order = SubPixelNone;
+
+	if (connector_type == DRM_MODE_CONNECTOR_Unknown)
+		return;
+
+	/* if the user selected tv=0 don't try and add the connector */
+	if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+	     (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+	     (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+	    (radeon_tv == 0))
+		return;
+
+	/* see if we already added it */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->connector_id == connector_id) {
+			radeon_connector->devices |= supported_device;
+			return;
+		}
+	}
+
+	radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
+	if (!radeon_connector)
+		return;
+
+	connector = &radeon_connector->base;
+
+	radeon_connector->connector_id = connector_id;
+	radeon_connector->devices = supported_device;
+	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
+
+	switch (connector_type) {
+	case DRM_MODE_CONNECTOR_VGA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVIA:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		radeon_connector->dac_load_detect = true;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      1);
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = true;
+		connector->doublescan_allowed = true;
+		break;
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_DVID:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+			radeon_connector->dac_load_detect = true;
+			drm_object_attach_property(&radeon_connector->base.base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+		}
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = true;
+		if (connector_type == DRM_MODE_CONNECTOR_DVII)
+			connector->doublescan_allowed = true;
+		else
+			connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+		radeon_connector->dac_load_detect = true;
+		/* RS400,RC410,RS480 chipset seems to report a lot
+		 * of false positive on load detect, we haven't yet
+		 * found a way to make load detect reliable on those
+		 * chipset, thus just disable it for TV.
+		 */
+		if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+			radeon_connector->dac_load_detect = false;
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.load_detect_property,
+					      radeon_connector->dac_load_detect);
+		drm_object_attach_property(&radeon_connector->base.base,
+					      rdev->mode_info.tv_std_property,
+					      radeon_combios_get_tv_info(rdev));
+		/* no HPD on analog connectors */
+		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
+		drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
+		if (i2c_bus->valid) {
+			radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
+			if (!radeon_connector->ddc_bus)
+				DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+		}
+		drm_object_attach_property(&radeon_connector->base.base,
+					      dev->mode_config.scaling_mode_property,
+					      DRM_MODE_SCALE_FULLSCREEN);
+		subpixel_order = SubPixelHorizontalRGB;
+		connector->interlace_allowed = false;
+		connector->doublescan_allowed = false;
+		break;
+	}
+
+	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+		if (i2c_bus->valid)
+			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+	} else
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+	connector->display_info.subpixel_order = subpixel_order;
+	drm_sysfs_connector_add(connector);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_cp.c b/linux-imx/drivers/gpu/drm/radeon/radeon_cp.c
new file mode 100644
index 0000000..efc4f64
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_cp.c
@@ -0,0 +1,2243 @@
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2007 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define RADEON_FIFO_DEBUG	0
+
+/* Firmware Names */
+#define FIRMWARE_R100		"radeon/R100_cp.bin"
+#define FIRMWARE_R200		"radeon/R200_cp.bin"
+#define FIRMWARE_R300		"radeon/R300_cp.bin"
+#define FIRMWARE_R420		"radeon/R420_cp.bin"
+#define FIRMWARE_RS690		"radeon/RS690_cp.bin"
+#define FIRMWARE_RS600		"radeon/RS600_cp.bin"
+#define FIRMWARE_R520		"radeon/R520_cp.bin"
+
+MODULE_FIRMWARE(FIRMWARE_R100);
+MODULE_FIRMWARE(FIRMWARE_R200);
+MODULE_FIRMWARE(FIRMWARE_R300);
+MODULE_FIRMWARE(FIRMWARE_R420);
+MODULE_FIRMWARE(FIRMWARE_RS690);
+MODULE_FIRMWARE(FIRMWARE_RS600);
+MODULE_FIRMWARE(FIRMWARE_R520);
+
+static int radeon_do_cleanup_cp(struct drm_device * dev);
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv);
+
+u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off)
+{
+	u32 val;
+
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		val = DRM_READ32(dev_priv->ring_rptr, off);
+	} else {
+		val = *(((volatile u32 *)
+			 dev_priv->ring_rptr->handle) +
+			(off / sizeof(u32)));
+		val = le32_to_cpu(val);
+	}
+	return val;
+}
+
+u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv)
+{
+	if (dev_priv->writeback_works)
+		return radeon_read_ring_rptr(dev_priv, 0);
+	else {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return RADEON_READ(R600_CP_RB_RPTR);
+		else
+			return RADEON_READ(RADEON_CP_RB_RPTR);
+	}
+}
+
+void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val)
+{
+	if (dev_priv->flags & RADEON_IS_AGP)
+		DRM_WRITE32(dev_priv->ring_rptr, off, val);
+	else
+		*(((volatile u32 *) dev_priv->ring_rptr->handle) +
+		  (off / sizeof(u32))) = cpu_to_le32(val);
+}
+
+void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val)
+{
+	radeon_write_ring_rptr(dev_priv, 0, val);
+}
+
+u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
+{
+	if (dev_priv->writeback_works) {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return radeon_read_ring_rptr(dev_priv,
+						     R600_SCRATCHOFF(index));
+		else
+			return radeon_read_ring_rptr(dev_priv,
+						     RADEON_SCRATCHOFF(index));
+	} else {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return RADEON_READ(R600_SCRATCH_REG0 + 4*index);
+		else
+			return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index);
+	}
+}
+
+static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+	ret = RADEON_READ(R520_MC_IND_DATA);
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);
+	return ret;
+}
+
+static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
+	ret = RADEON_READ(RS480_NB_MC_DATA);
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
+	return ret;
+}
+
+static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
+	ret = RADEON_READ(RS690_MC_DATA);
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+	return ret;
+}
+
+static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) |
+				      RS600_MC_IND_CITF_ARB0));
+	ret = RADEON_READ(RS600_MC_DATA);
+	return ret;
+}
+
+static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		return RS690_READ_MCIND(dev_priv, addr);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		return RS600_READ_MCIND(dev_priv, addr);
+	else
+		return RS480_READ_MCIND(dev_priv, addr);
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		return RADEON_READ(R700_MC_VM_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return RADEON_READ(R600_MC_VM_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+	else
+		return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+	else
+		RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+	/*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
+		RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+		RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */
+		RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+		RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+	else
+		RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
+void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+	u32 agp_base_hi = upper_32_bits(agp_base);
+	u32 agp_base_lo = agp_base & 0xffffffff;
+	u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff;
+
+	/* R6xx/R7xx must be aligned to a 4MB boundary */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
+		RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo);
+		RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
+	} else {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+			RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+	}
+}
+
+void radeon_enable_bm(struct drm_radeon_private *dev_priv)
+{
+	u32 tmp;
+	/* Turn on bus mastering */
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		/* rs600/rs690/rs740 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} /* PCIE cards appears to not need this */
+}
+
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
+}
+
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+{
+	RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
+	return RADEON_READ(RADEON_PCIE_DATA);
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status(drm_radeon_private_t * dev_priv)
+{
+	printk("%s:\n", __func__);
+	printk("RBBM_STATUS = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
+	printk("CP_RB_RTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
+	printk("CP_RB_WTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
+	printk("AIC_CNTL = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
+	printk("AIC_STAT = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_STAT));
+	printk("AIC_PT_BASE = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
+	printk("TLB_ADDR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
+	printk("TLB_DATA = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
+		tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
+		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+		RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+		for (i = 0; i < dev_priv->usec_timeout; i++) {
+			if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
+			      & RADEON_RB3D_DC_BUSY)) {
+				return 0;
+			}
+			DRM_UDELAY(1);
+		}
+	} else {
+		/* don't flush or purge cache here or lockup */
+		return 0;
+	}
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
+{
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots = (RADEON_READ(RADEON_RBBM_STATUS)
+			     & RADEON_RBBM_FIFOCNT_MASK);
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+	DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(RADEON_RBBM_STATUS),
+		 RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
+{
+	int i, ret;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	ret = radeon_do_wait_for_fifo(dev_priv, 64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(RADEON_READ(RADEON_RBBM_STATUS)
+		      & RADEON_RBBM_ACTIVE)) {
+			radeon_do_pixcache_flush(dev_priv);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
+		 RADEON_READ(RADEON_RBBM_STATUS),
+		 RADEON_READ(R300_VAP_CNTL_STATUS));
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static void radeon_init_pipes(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	uint32_t gb_tile_config, gb_pipe_sel = 0;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
+		uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
+		if ((z_pipe_sel & 3) == 3)
+			dev_priv->num_z_pipes = 2;
+		else
+			dev_priv->num_z_pipes = 1;
+	} else
+		dev_priv->num_z_pipes = 1;
+
+	/* RS4xx/RS6xx/R4xx/R5xx */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
+		gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
+		dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+		/* SE cards have 1 pipe */
+		if ((dev->pdev->device == 0x5e4c) ||
+		    (dev->pdev->device == 0x5e4f))
+			dev_priv->num_gb_pipes = 1;
+	} else {
+		/* R3xx */
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
+		     dev->pdev->device != 0x4144) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
+		     dev->pdev->device != 0x4148)) {
+			dev_priv->num_gb_pipes = 2;
+		} else {
+			/* RV3xx/R300 AD/R350 AH */
+			dev_priv->num_gb_pipes = 1;
+		}
+	}
+	DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
+
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
+
+	switch (dev_priv->num_gb_pipes) {
+	case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
+	case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
+	case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
+	default:
+	case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
+		RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
+	}
+	RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
+	radeon_do_wait_for_idle(dev_priv);
+	RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
+	RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
+					       R300_DC_AUTOFLUSH_ENABLE |
+					       R300_DC_DC_DISABLE_IGNORE_PE));
+
+
+}
+
+/* ================================================================
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv)
+{
+	struct platform_device *pdev;
+	const char *fw_name = NULL;
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		fw_name = FIRMWARE_R100;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		fw_name = FIRMWARE_R200;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		fw_name = FIRMWARE_R300;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		fw_name = FIRMWARE_R420;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
+		fw_name = FIRMWARE_RS690;
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		DRM_INFO("Loading RS600 Microcode\n");
+		fw_name = FIRMWARE_RS600;
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		fw_name = FIRMWARE_R520;
+	}
+
+	err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev);
+	platform_device_unregister(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
+		       fw_name);
+	} else if (dev_priv->me_fw->size % 8) {
+		printk(KERN_ERR
+		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
+		       dev_priv->me_fw->size, fw_name);
+		err = -EINVAL;
+		release_firmware(dev_priv->me_fw);
+		dev_priv->me_fw = NULL;
+	}
+	return err;
+}
+
+static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv)
+{
+	const __be32 *fw_data;
+	int i, size;
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	if (dev_priv->me_fw) {
+		size = dev_priv->me_fw->size / 4;
+		fw_data = (const __be32 *)&dev_priv->me_fw->data[0];
+		RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
+		for (i = 0; i < size; i += 2) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     be32_to_cpup(&fw_data[i]));
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     be32_to_cpup(&fw_data[i + 1]));
+		}
+	}
+}
+
+/* Flush any pending commands to the CP.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
+{
+	DRM_DEBUG("\n");
+#if 0
+	u32 tmp;
+
+	tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
+#endif
+}
+
+/* Wait for the CP to go idle.
+ */
+int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return radeon_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
+
+	dev_priv->cp_running = 1;
+
+	/* on r420, any DMA from CP to system memory while 2D is active
+	 * can cause a hang.  workaround is to queue a CP RESYNC token
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+		BEGIN_RING(3);
+		OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1));
+		OUT_RING(5); /* scratch reg 5 */
+		OUT_RING(0xdeadbeef);
+		ADVANCE_RING();
+		COMMIT_RING();
+	}
+
+	BEGIN_RING(8);
+	/* isync can only be written through cp on r5xx write it here */
+	OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0));
+	OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D |
+		 RADEON_ISYNC_ANY3D_IDLE2D |
+		 RADEON_ISYNC_WAIT_IDLEGUI |
+		 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
+}
+
+/* Reset the Command Processor.  This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
+{
+	u32 cur_read_ptr;
+	DRM_DEBUG("\n");
+
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+}
+
+/* Stop the Command Processor.  This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* finish the pending CP_RESYNC token */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+		OUT_RING(R300_RB3D_DC_FINISH);
+		ADVANCE_RING();
+		COMMIT_RING();
+		radeon_do_wait_for_idle(dev_priv);
+	}
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
+
+	dev_priv->cp_running = 0;
+}
+
+/* Reset the engine.  This will stop the CP if it is running.
+ */
+static int radeon_do_engine_reset(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
+	DRM_DEBUG("\n");
+
+	radeon_do_pixcache_flush(dev_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		/* may need something similar for newer chips */
+		clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+		mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+						    RADEON_FORCEON_MCLKA |
+						    RADEON_FORCEON_MCLKB |
+						    RADEON_FORCEON_YCLKA |
+						    RADEON_FORCEON_YCLKB |
+						    RADEON_FORCEON_MC |
+						    RADEON_FORCEON_AIC));
+	}
+
+	rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+					      RADEON_SOFT_RESET_CP |
+					      RADEON_SOFT_RESET_HI |
+					      RADEON_SOFT_RESET_SE |
+					      RADEON_SOFT_RESET_RE |
+					      RADEON_SOFT_RESET_PP |
+					      RADEON_SOFT_RESET_E2 |
+					      RADEON_SOFT_RESET_RB));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+					      ~(RADEON_SOFT_RESET_CP |
+						RADEON_SOFT_RESET_HI |
+						RADEON_SOFT_RESET_SE |
+						RADEON_SOFT_RESET_RE |
+						RADEON_SOFT_RESET_PP |
+						RADEON_SOFT_RESET_E2 |
+						RADEON_SOFT_RESET_RB)));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+		RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+	}
+
+	/* setup the raster pipes */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
+	    radeon_init_pipes(dev);
+
+	/* Reset the CP ring */
+	radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	radeon_freelist_reset(dev);
+
+	return 0;
+}
+
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
+				       drm_radeon_private_t *dev_priv,
+				       struct drm_file *file_priv)
+{
+	struct drm_radeon_master_private *master_priv;
+	u32 ring_start, cur_read_ptr;
+
+	/* Initialize the memory controller. With new memory map, the fb location
+	 * is not changed, it should have been properly initialized already. Part
+	 * of the problem is that the code below is bogus, assuming the GART is
+	 * always appended to the fb which is not necessarily the case
+	 */
+	if (!dev_priv->new_memmap)
+		radeon_write_fb_location(dev_priv,
+			     ((dev_priv->gart_vm_start - 1) & 0xffff0000)
+			     | (dev_priv->fb_location >> 16));
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		radeon_write_agp_base(dev_priv, dev->agp->base);
+
+		radeon_write_agp_location(dev_priv,
+			     (((dev_priv->gart_vm_start - 1 +
+				dev_priv->gart_size) & 0xffff0000) |
+			      (dev_priv->gart_vm_start >> 16)));
+
+		ring_start = (dev_priv->cp_ring->offset
+			      - dev->agp->base
+			      + dev_priv->gart_vm_start);
+	} else
+#endif
+		ring_start = (dev_priv->cp_ring->offset
+			      - (unsigned long)dev->sg->virtual
+			      + dev_priv->gart_vm_start);
+
+	RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
+
+	/* Set the write pointer delay */
+	RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+			     dev_priv->ring_rptr->offset
+			     - dev->agp->base + dev_priv->gart_vm_start);
+	} else
+#endif
+	{
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+			     dev_priv->ring_rptr->offset
+			     - ((unsigned long) dev->sg->virtual)
+			     + dev_priv->gart_vm_start);
+	}
+
+	/* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     RADEON_BUF_SWAP_32BIT |
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+
+	/* Initialize the scratch register pointer.  This will cause
+	 * the scratch register values to be written out to memory
+	 * whenever they are updated.
+	 *
+	 * We simply put this behind the ring read pointer, this works
+	 * with PCI GART as well as (whatever kind of) AGP GART
+	 */
+	RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+		     + RADEON_SCRATCH_REG_OFFSET);
+
+	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
+
+	radeon_enable_bm(dev_priv);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0);
+	RADEON_WRITE(RADEON_LAST_FRAME_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+	RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0);
+
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0);
+	RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0);
+
+	/* reset sarea copies of these */
+	master_priv = file_priv->master->driver_priv;
+	if (master_priv->sarea_priv) {
+		master_priv->sarea_priv->last_frame = 0;
+		master_priv->sarea_priv->last_dispatch = 0;
+		master_priv->sarea_priv->last_clear = 0;
+	}
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	/* Sync everything up */
+	RADEON_WRITE(RADEON_ISYNC_CNTL,
+		     (RADEON_ISYNC_ANY2D_IDLE3D |
+		      RADEON_ISYNC_ANY3D_IDLE2D |
+		      RADEON_ISYNC_WAIT_IDLEGUI |
+		      RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
+	/* Writeback doesn't seem to work everywhere, test it here and possibly
+	 * enable it if it appears to work
+	 */
+	radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0);
+
+	RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+		u32 val;
+
+		val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
+		if (val == 0xdeadbeef)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (tmp < dev_priv->usec_timeout) {
+		dev_priv->writeback_works = 1;
+		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+	} else {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback test failed\n");
+	}
+	if (radeon_no_wb == 1) {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback forced off\n");
+	}
+
+	if (!dev_priv->writeback_works) {
+		/* Disable writeback to avoid unnecessary bus master transfer */
+		RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
+			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+/* Enable or disable IGP GART on the chip */
+static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 temp;
+
+	if (on) {
+		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
+							     RS690_BLOCK_GFX_D3_EN));
+		else
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
+		IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
+							RS480_TLB_ENABLE |
+							RS480_GTW_LAC_EN |
+							RS480_1LEVEL_GART));
+
+		temp = dev_priv->gart_info.bus_addr & 0xfffff000;
+		temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
+		IGP_WRITE_MCIND(RS480_GART_BASE, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
+		IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
+						      RS480_REQ_TYPE_SNOOP_DIS));
+
+		radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
+
+		dev_priv->gart_size = 32*1024*1024;
+		temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
+			 0xffff0000) | (dev_priv->gart_vm_start >> 16));
+
+		radeon_write_agp_location(dev_priv, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
+				RS480_GART_CACHE_INVALIDATE);
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
+	} else {
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+	}
+}
+
+/* Enable or disable IGP GART on the chip */
+static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on)
+{
+	u32 temp;
+	int i;
+
+	if (on) {
+		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+			 dev_priv->gart_vm_start,
+			 (long)dev_priv->gart_info.bus_addr,
+			 dev_priv->gart_size);
+
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
+						    RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+		for (i = 0; i < 19; i++)
+			IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i,
+					(RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
+					 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
+					 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH |
+					 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
+					 RS600_ENABLE_FRAGMENT_PROCESSING |
+					 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
+
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE |
+							     RS600_PAGE_TABLE_TYPE_FLAT));
+
+		/* disable all other contexts */
+		for (i = 1; i < 8; i++)
+			IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+		/* setup the page table aperture */
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+				dev_priv->gart_info.bus_addr);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR,
+				dev_priv->gart_vm_start);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR,
+				(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+		IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+		/* setup the system aperture */
+		IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR,
+				dev_priv->gart_vm_start);
+		IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR,
+				(dev_priv->gart_vm_start + dev_priv->gart_size - 1));
+
+		/* enable page tables */
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT));
+
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+		IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES));
+
+		/* invalidate the cache */
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+		temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL);
+
+	} else {
+		IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0);
+		temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1);
+		temp &= ~RS600_ENABLE_PAGE_TABLES;
+		IGP_WRITE_MCIND(RS600_MC_CNTL1, temp);
+	}
+}
+
+static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+	if (on) {
+
+		DRM_DEBUG("programming pcie %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
+				  dev_priv->gart_info.bus_addr);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
+				  dev_priv->gart_vm_start +
+				  dev_priv->gart_size - 1);
+
+		radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
+
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  RADEON_PCIE_TX_GART_EN);
+	} else {
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  tmp & ~RADEON_PCIE_TX_GART_EN);
+	}
+}
+
+/* Enable or disable PCI GART on the chip */
+static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
+	    (dev_priv->flags & RADEON_IS_IGPGART)) {
+		radeon_set_igpgart(dev_priv, on);
+		return;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) {
+		rs600_set_igpgart(dev_priv, on);
+		return;
+	}
+
+	if (dev_priv->flags & RADEON_IS_PCIE) {
+		radeon_set_pciegart(dev_priv, on);
+		return;
+	}
+
+	tmp = RADEON_READ(RADEON_AIC_CNTL);
+
+	if (on) {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp | RADEON_PCIGART_TRANSLATE_EN);
+
+		/* set PCI GART page-table base address
+		 */
+		RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
+
+		/* set address range for PCI address translate
+		 */
+		RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
+		RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
+			     + dev_priv->gart_size - 1);
+
+		/* Turn off AGP aperture -- is this required for PCI GART?
+		 */
+		radeon_write_agp_location(dev_priv, 0xffffffc0);
+		RADEON_WRITE(RADEON_AGP_COMMAND, 0);	/* clear AGP_COMMAND */
+	} else {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	}
+}
+
+static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv)
+{
+	struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info;
+	struct radeon_virt_surface *vp;
+	int i;
+
+	for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) {
+		if (!dev_priv->virt_surfaces[i].file_priv ||
+		    dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV)
+			break;
+	}
+	if (i >= 2 * RADEON_MAX_SURFACES)
+		return -ENOMEM;
+	vp = &dev_priv->virt_surfaces[i];
+
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		struct radeon_surface *sp = &dev_priv->surfaces[i];
+		if (sp->refcount)
+			continue;
+
+		vp->surface_index = i;
+		vp->lower = gart_info->bus_addr;
+		vp->upper = vp->lower + gart_info->table_size;
+		vp->flags = 0;
+		vp->file_priv = PCIGART_FILE_PRIV;
+
+		sp->refcount = 1;
+		sp->lower = vp->lower;
+		sp->upper = vp->upper;
+		sp->flags = 0;
+
+		RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags);
+		RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower);
+		RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper);
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+			     struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+
+	DRM_DEBUG("\n");
+
+	/* if we require new memory map but we don't have it fail */
+	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+		DRM_DEBUG("Forcing AGP card to PCI mode\n");
+		dev_priv->flags &= ~RADEON_IS_AGP;
+	} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+		   && !init->is_pci) {
+		DRM_DEBUG("Restoring AGP flag\n");
+		dev_priv->flags |= RADEON_IS_AGP;
+	}
+
+	if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
+		DRM_ERROR("PCI GART memory not allocated!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
+	switch(init->func) {
+	case RADEON_INIT_R200_CP:
+		dev_priv->microcode_version = UCODE_R200;
+		break;
+	case RADEON_INIT_R300_CP:
+		dev_priv->microcode_version = UCODE_R300;
+		break;
+	default:
+		dev_priv->microcode_version = UCODE_R100;
+	}
+
+	dev_priv->do_boxes = 0;
+	dev_priv->cp_mode = init->cp_mode;
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	switch (init->depth_bpp) {
+	case 16:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+		break;
+	case 32:
+	default:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+		break;
+	}
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	/* Hardware state for depth clears.  Remove this if/when we no
+	 * longer clear the depth buffer with a 3D rectangle.  Hard-code
+	 * all values to prevent unwanted 3D state from slipping through
+	 * and screwing with the clear operation.
+	 */
+	dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+					   (dev_priv->color_fmt << 10) |
+					   (dev_priv->microcode_version ==
+					    UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+
+	dev_priv->depth_clear.rb3d_zstencilcntl =
+	    (dev_priv->depth_fmt |
+	     RADEON_Z_TEST_ALWAYS |
+	     RADEON_STENCIL_TEST_ALWAYS |
+	     RADEON_STENCIL_S_FAIL_REPLACE |
+	     RADEON_STENCIL_ZPASS_REPLACE |
+	     RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
+
+	dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+					 RADEON_BFACE_SOLID |
+					 RADEON_FFACE_SOLID |
+					 RADEON_FLAT_SHADE_VTX_LAST |
+					 RADEON_DIFFUSE_SHADE_FLAT |
+					 RADEON_ALPHA_SHADE_FLAT |
+					 RADEON_SPECULAR_SHADE_FLAT |
+					 RADEON_FOG_SHADE_FLAT |
+					 RADEON_VTX_PIX_CENTER_OGL |
+					 RADEON_ROUND_MODE_TRUNC |
+					 RADEON_ROUND_PREC_8TH_PIX);
+
+
+	dev_priv->ring_offset = init->ring_offset;
+	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+	dev_priv->buffers_offset = init->buffers_offset;
+	dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+	master_priv->sarea = drm_getsarea(dev);
+	if (!master_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cp_ring) {
+		DRM_ERROR("could not find cp ring region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->gart_textures_offset) {
+		dev_priv->gart_textures =
+		    drm_core_findmap(dev, init->gart_textures_offset);
+		if (!dev_priv->gart_textures) {
+			DRM_ERROR("could not find GART texture region!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	}
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		drm_core_ioremap_wc(dev_priv->cp_ring, dev);
+		drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
+		drm_core_ioremap_wc(dev->agp_buffer_map, dev);
+		if (!dev_priv->cp_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("could not find ioremap agp regions!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	} else
+#endif
+	{
+		dev_priv->cp_ring->handle =
+			(void *)(unsigned long)dev_priv->cp_ring->offset;
+		dev_priv->ring_rptr->handle =
+			(void *)(unsigned long)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+			(void *)(unsigned long)dev->agp_buffer_map->offset;
+
+		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+			  dev_priv->cp_ring->handle);
+		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+			  dev_priv->ring_rptr->handle);
+		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+			  dev->agp_buffer_map->handle);
+	}
+
+	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+	dev_priv->fb_size =
+		((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
+		- dev_priv->fb_location;
+
+	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+					((dev_priv->front_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+				       ((dev_priv->back_offset
+					 + dev_priv->fb_location) >> 10));
+
+	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+					((dev_priv->depth_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->gart_size = init->gart_size;
+
+	/* New let's set the memory map ... */
+	if (dev_priv->new_memmap) {
+		u32 base = 0;
+
+		DRM_INFO("Setting GART location based on new memory map\n");
+
+		/* If using AGP, try to locate the AGP aperture at the same
+		 * location in the card and on the bus, though we have to
+		 * align it down.
+		 */
+#if __OS_HAS_AGP
+		if (dev_priv->flags & RADEON_IS_AGP) {
+			base = dev->agp->base;
+			/* Check if valid */
+			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+					 dev->agp->base);
+				base = 0;
+			}
+		}
+#endif
+		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+		if (base == 0) {
+			base = dev_priv->fb_location + dev_priv->fb_size;
+			if (base < dev_priv->fb_location ||
+			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+				base = dev_priv->fb_location
+					- dev_priv->gart_size;
+		}
+		dev_priv->gart_vm_start = base & 0xffc00000u;
+		if (dev_priv->gart_vm_start != base)
+			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+				 base, dev_priv->gart_vm_start);
+	} else {
+		DRM_INFO("Setting GART location based on old memory map\n");
+		dev_priv->gart_vm_start = dev_priv->fb_location +
+			RADEON_READ(RADEON_CONFIG_APER_SIZE);
+	}
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP)
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - dev->agp->base
+						 + dev_priv->gart_vm_start);
+	else
+#endif
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+					- (unsigned long)dev->sg->virtual
+					+ dev_priv->gart_vm_start);
+
+	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+	DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
+	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
+		  dev_priv->gart_buffers_offset);
+
+	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+	dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+	dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		u32 sctrl;
+		int ret;
+
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		/* if we have an offset set from userspace */
+		if (dev_priv->pcigart_offset_set) {
+			dev_priv->gart_info.bus_addr =
+				(resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location;
+			dev_priv->gart_info.mapping.offset =
+			    dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+			dev_priv->gart_info.mapping.size =
+			    dev_priv->gart_info.table_size;
+
+			drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr =
+			    dev_priv->gart_info.mapping.handle;
+
+			if (dev_priv->flags & RADEON_IS_PCIE)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_FB;
+
+			DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+				  dev_priv->gart_info.addr,
+				  dev_priv->pcigart_offset);
+		} else {
+			if (dev_priv->flags & RADEON_IS_IGPGART)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_MAIN;
+			dev_priv->gart_info.addr = NULL;
+			dev_priv->gart_info.bus_addr = 0;
+			if (dev_priv->flags & RADEON_IS_PCIE) {
+				DRM_ERROR
+				    ("Cannot use PCI Express without GART in FB memory\n");
+				radeon_do_cleanup_cp(dev);
+				return -EINVAL;
+			}
+		}
+
+		sctrl = RADEON_READ(RADEON_SURFACE_CNTL);
+		RADEON_WRITE(RADEON_SURFACE_CNTL, 0);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+			ret = r600_page_table_init(dev);
+		else
+			ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
+		RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl);
+
+		if (!ret) {
+			DRM_ERROR("failed to init PCI GART!\n");
+			radeon_do_cleanup_cp(dev);
+			return -ENOMEM;
+		}
+
+		ret = radeon_setup_pcigart_surface(dev_priv);
+		if (ret) {
+			DRM_ERROR("failed to setup GART surface!\n");
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+				r600_page_table_cleanup(dev, &dev_priv->gart_info);
+			else
+				drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info);
+			radeon_do_cleanup_cp(dev);
+			return ret;
+		}
+
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	if (!dev_priv->me_fw) {
+		int err = radeon_cp_init_microcode(dev_priv);
+		if (err) {
+			DRM_ERROR("Failed to load firmware!\n");
+			radeon_do_cleanup_cp(dev);
+			return err;
+		}
+	}
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->last_buf = 0;
+
+	radeon_do_engine_reset(dev);
+	radeon_test_writeback(dev_priv);
+
+	return 0;
+}
+
+static int radeon_do_cleanup_cp(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		if (dev_priv->cp_ring != NULL) {
+			drm_core_ioremapfree(dev_priv->cp_ring, dev);
+			dev_priv->cp_ring = NULL;
+		}
+		if (dev_priv->ring_rptr != NULL) {
+			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			dev_priv->ring_rptr = NULL;
+		}
+		if (dev->agp_buffer_map != NULL) {
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+			dev->agp_buffer_map = NULL;
+		}
+	} else
+#endif
+	{
+
+		if (dev_priv->gart_info.bus_addr) {
+			/* Turn off PCI GART */
+			radeon_set_pcigart(dev_priv, 0);
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)
+				r600_page_table_cleanup(dev, &dev_priv->gart_info);
+			else {
+				if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
+					DRM_ERROR("failed to cleanup PCI GART!\n");
+			}
+		}
+
+		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+		{
+			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr = NULL;
+		}
+	}
+	/* only clear to the start of flags */
+	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+	return 0;
+}
+
+/* This code will reinit the Radeon CP hardware after a resume from disc.
+ * AFAIK, it would be very difficult to pickle the state at suspend time, so
+ * here we make sure that all Radeon hardware initialisation is re-done without
+ * affecting running applications.
+ *
+ * Charl P. Botha <http://cpbotha.net>
+ */
+static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_ERROR("Called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("Starting radeon_do_resume_cp()\n");
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+
+	dev_priv->have_z_offset = 0;
+	radeon_do_engine_reset(dev);
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	DRM_DEBUG("radeon_do_resume_cp() complete\n");
+
+	return 0;
+}
+
+int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_init_t *init = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (init->func == RADEON_INIT_R300_CP)
+		r300_init_reg_flags(dev);
+
+	switch (init->func) {
+	case RADEON_INIT_CP:
+	case RADEON_INIT_R200_CP:
+	case RADEON_INIT_R300_CP:
+		return radeon_do_init_cp(dev, init, file_priv);
+	case RADEON_INIT_R600_CP:
+		return r600_do_init_cp(dev, init, file_priv);
+	case RADEON_CLEANUP_CP:
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			return r600_do_cleanup_cp(dev);
+		else
+			return radeon_do_cleanup_cp(dev);
+	}
+
+	return -EINVAL;
+}
+
+int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dev_priv->cp_running) {
+		DRM_DEBUG("while CP running\n");
+		return 0;
+	}
+	if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
+		DRM_DEBUG("called with bogus CP mode (%d)\n",
+			  dev_priv->cp_mode);
+		return 0;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_start(dev_priv);
+	else
+		radeon_do_cp_start(dev_priv);
+
+	return 0;
+}
+
+/* Stop the CP.  The engine must have been idled before calling this
+ * routine.
+ */
+int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_cp_stop_t *stop = data;
+	int ret;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->cp_running)
+		return 0;
+
+	/* Flush any pending CP commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if (stop->flush) {
+		radeon_do_cp_flush(dev_priv);
+	}
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if (stop->idle) {
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			ret = r600_do_cp_idle(dev_priv);
+		else
+			ret = radeon_do_cp_idle(dev_priv);
+		if (ret)
+			return ret;
+	}
+
+	/* Finally, we can turn off the CP.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CP is shut down.
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_stop(dev_priv);
+	else
+		radeon_do_cp_stop(dev_priv);
+
+	/* Reset the engine */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_engine_reset(dev);
+	else
+		radeon_do_engine_reset(dev);
+
+	return 0;
+}
+
+void radeon_do_release(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if (dev_priv) {
+		if (dev_priv->cp_running) {
+			/* Stop the cp */
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+				while ((ret = r600_do_cp_idle(dev_priv)) != 0) {
+					DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+					schedule();
+#else
+					tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+				}
+			} else {
+				while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
+					DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+					schedule();
+#else
+					tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+				}
+			}
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+				r600_do_cp_stop(dev_priv);
+				r600_do_engine_reset(dev);
+			} else {
+				radeon_do_cp_stop(dev_priv);
+				radeon_do_engine_reset(dev);
+			}
+		}
+
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) {
+			/* Disable *all* interrupts */
+			if (dev_priv->mmio)	/* remove this after permanent addmaps */
+				RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+			if (dev_priv->mmio) {	/* remove all surfaces */
+				for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+					RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
+					RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
+						     16 * i, 0);
+					RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
+						     16 * i, 0);
+				}
+			}
+		}
+
+		/* Free memory heap structures */
+		radeon_mem_takedown(&(dev_priv->gart_heap));
+		radeon_mem_takedown(&(dev_priv->fb_heap));
+
+		/* deallocate kernel resources */
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			r600_do_cleanup_cp(dev);
+		else
+			radeon_do_cleanup_cp(dev);
+		release_firmware(dev_priv->me_fw);
+		dev_priv->me_fw = NULL;
+		release_firmware(dev_priv->pfp_fw);
+		dev_priv->pfp_fw = NULL;
+	}
+}
+
+/* Just reset the CP ring.  Called as part of an X Server engine reset.
+ */
+int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_DEBUG("called before init done\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_do_cp_reset(dev_priv);
+	else
+		radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	return 0;
+}
+
+int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_cp_idle(dev_priv);
+	else
+		return radeon_do_cp_idle(dev_priv);
+}
+
+/* Added by Charl P. Botha to call radeon_do_resume_cp().
+ */
+int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_resume_cp(dev, file_priv);
+	else
+		return radeon_do_resume_cp(dev, file_priv);
+}
+
+int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return r600_do_engine_reset(dev);
+	else
+		return radeon_do_engine_reset(dev);
+}
+
+/* ================================================================
+ * Fullscreen mode
+ */
+
+/* KW: Deprecated to say the least:
+ */
+int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
+ *   bufs until freelist code is used.  Note this hides a problem with
+ *   the scratch register * (used to keep track of last buffer
+ *   completed) being written to before * the last buffer has actually
+ *   completed rendering.
+ *
+ * KW:  It's also a good way to find free buffers quickly.
+ *
+ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
+ * sleep.  However, bugs in older versions of radeon_accel.c mean that
+ * we essentially have to do this, else old clients will break.
+ *
+ * However, it does leave open a potential deadlock where all the
+ * buffers are held by other clients, which can't release them because
+ * they can't get the lock.
+ */
+
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+	int start;
+
+	if (++dev_priv->last_buf >= dma->buf_count)
+		dev_priv->last_buf = 0;
+
+	start = dev_priv->last_buf;
+
+	for (t = 0; t < dev_priv->usec_timeout; t++) {
+		u32 done_age = GET_SCRATCH(dev_priv, 1);
+		DRM_DEBUG("done_age = %d\n", done_age);
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[start];
+			buf_priv = buf->dev_private;
+			if (buf->file_priv == NULL || (buf->pending &&
+						       buf_priv->age <=
+						       done_age)) {
+				dev_priv->stats.requested_bufs++;
+				buf->pending = 0;
+				return buf;
+			}
+			if (++start >= dma->buf_count)
+				start = 0;
+		}
+
+		if (t) {
+			DRM_UDELAY(1);
+			dev_priv->stats.freelist_loops++;
+		}
+	}
+
+	return NULL;
+}
+
+void radeon_freelist_reset(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->last_buf = 0;
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+/* ================================================================
+ * CP command submission
+ */
+
+int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
+{
+	drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+	u32 last_head = GET_RING_HEAD(dev_priv);
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		u32 head = GET_RING_HEAD(dev_priv);
+
+		ring->space = (head - ring->tail) * sizeof(u32);
+		if (ring->space <= 0)
+			ring->space += ring->size;
+		if (ring->space > n)
+			return 0;
+
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+		if (head != last_head)
+			i = 0;
+		last_head = head;
+
+		DRM_UDELAY(1);
+	}
+
+	/* FIXME: This return value is ignored in the BEGIN_RING macro! */
+#if RADEON_FIFO_DEBUG
+	radeon_status(dev_priv);
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int radeon_cp_get_buffers(struct drm_device *dev,
+				 struct drm_file *file_priv,
+				 struct drm_dma * d)
+{
+	int i;
+	struct drm_buf *buf;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = radeon_freelist_get(dev);
+		if (!buf)
+			return -EBUSY;	/* NOTE: broken client */
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+				     sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+				     sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int ret = 0;
+	struct drm_dma *d = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = radeon_cp_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	drm_radeon_private_t *dev_priv;
+	int ret = 0;
+
+	dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->flags = flags;
+
+	switch (flags & RADEON_FAMILY_MASK) {
+	case CHIP_R100:
+	case CHIP_RV200:
+	case CHIP_R200:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV570:
+	case CHIP_R580:
+		dev_priv->flags |= RADEON_HAS_HIERZ;
+		break;
+	default:
+		/* all other chips have no hierarchical z buffer */
+		break;
+	}
+
+	pci_set_master(dev->pdev);
+
+	if (drm_pci_device_is_agp(dev))
+		dev_priv->flags |= RADEON_IS_AGP;
+	else if (pci_is_pcie(dev->pdev))
+		dev_priv->flags |= RADEON_IS_PCIE;
+	else
+		dev_priv->flags |= RADEON_IS_PCI;
+
+	ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
+			 pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
+			 _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
+	if (ret != 0)
+		return ret;
+
+	ret = drm_vblank_init(dev, 2);
+	if (ret) {
+		radeon_driver_unload(dev);
+		return ret;
+	}
+
+	DRM_DEBUG("%s card detected\n",
+		  ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
+	return ret;
+}
+
+int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_radeon_master_private *master_priv;
+	unsigned long sareapage;
+	int ret;
+
+	master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
+	if (!master_priv)
+		return -ENOMEM;
+
+	/* prebuild the SAREA */
+	sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
+	ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
+			 &master_priv->sarea);
+	if (ret) {
+		DRM_ERROR("SAREA setup failed\n");
+		kfree(master_priv);
+		return ret;
+	}
+	master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+	master_priv->sarea_priv->pfCurrentPage = 0;
+
+	master->driver_priv = master_priv;
+	return 0;
+}
+
+void radeon_master_destroy(struct drm_device *dev, struct drm_master *master)
+{
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+
+	if (!master_priv)
+		return;
+
+	if (master_priv->sarea_priv &&
+	    master_priv->sarea_priv->pfCurrentPage != 0)
+		radeon_cp_dispatch_flip(dev, master);
+
+	master_priv->sarea_priv = NULL;
+	if (master_priv->sarea)
+		drm_rmmap_locked(dev, master_priv->sarea);
+
+	kfree(master_priv);
+
+	master->driver_priv = NULL;
+}
+
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
+{
+	int ret;
+	drm_local_map_t *map;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+
+	dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
+	ret = drm_addmap(dev, dev_priv->fb_aper_offset,
+			 pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &map);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
+int radeon_driver_unload(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	drm_rmmap(dev, dev_priv->mmio);
+
+	kfree(dev_priv);
+
+	dev->dev_private = NULL;
+	return 0;
+}
+
+void radeon_commit_ring(drm_radeon_private_t *dev_priv)
+{
+	int i;
+	u32 *ring;
+	int tail_aligned;
+
+	/* check if the ring is padded out to 16-dword alignment */
+
+	tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
+	if (tail_aligned) {
+		int num_p2 = RADEON_RING_ALIGN - tail_aligned;
+
+		ring = dev_priv->ring.start;
+		/* pad with some CP_PACKET2 */
+		for (i = 0; i < num_p2; i++)
+			ring[dev_priv->ring.tail + i] = CP_PACKET2();
+
+		dev_priv->ring.tail += i;
+
+		dev_priv->ring.space -= num_p2 * sizeof(u32);
+	}
+
+	dev_priv->ring.tail &= dev_priv->ring.tail_mask;
+
+	DRM_MEMORYBARRIER();
+	GET_RING_HEAD( dev_priv );
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail);
+		/* read from PCI bus to ensure correct posting */
+		RADEON_READ(R600_CP_RB_RPTR);
+	} else {
+		RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail);
+		/* read from PCI bus to ensure correct posting */
+		RADEON_READ(RADEON_CP_RB_RPTR);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_cs.c b/linux-imx/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 0000000..60af3cd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright 2008 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+{
+	struct drm_device *ddev = p->rdev->ddev;
+	struct radeon_cs_chunk *chunk;
+	unsigned i, j;
+	bool duplicate;
+
+	if (p->chunk_relocs_idx == -1) {
+		return 0;
+	}
+	chunk = &p->chunks[p->chunk_relocs_idx];
+	p->dma_reloc_idx = 0;
+	/* FIXME: we assume that each relocs use 4 dwords */
+	p->nrelocs = chunk->length_dw / 4;
+	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
+	if (p->relocs_ptr == NULL) {
+		return -ENOMEM;
+	}
+	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+	if (p->relocs == NULL) {
+		return -ENOMEM;
+	}
+	for (i = 0; i < p->nrelocs; i++) {
+		struct drm_radeon_cs_reloc *r;
+
+		duplicate = false;
+		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+		for (j = 0; j < i; j++) {
+			if (r->handle == p->relocs[j].handle) {
+				p->relocs_ptr[i] = &p->relocs[j];
+				duplicate = true;
+				break;
+			}
+		}
+		if (duplicate) {
+			p->relocs[i].handle = 0;
+			continue;
+		}
+
+		p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
+							  r->handle);
+		if (p->relocs[i].gobj == NULL) {
+			DRM_ERROR("gem object lookup failed 0x%x\n",
+				  r->handle);
+			return -ENOENT;
+		}
+		p->relocs_ptr[i] = &p->relocs[i];
+		p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+		p->relocs[i].lobj.bo = p->relocs[i].robj;
+		p->relocs[i].lobj.written = !!r->write_domain;
+
+		/* the first reloc of an UVD job is the msg and that must be in
+		   VRAM, also but everything into VRAM on AGP cards to avoid
+		   image corruptions */
+		if (p->ring == R600_RING_TYPE_UVD_INDEX &&
+		    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
+			/* TODO: is this still needed for NI+ ? */
+			p->relocs[i].lobj.domain =
+				RADEON_GEM_DOMAIN_VRAM;
+
+			p->relocs[i].lobj.alt_domain =
+				RADEON_GEM_DOMAIN_VRAM;
+
+		} else {
+			uint32_t domain = r->write_domain ?
+				r->write_domain : r->read_domains;
+
+			if (domain & RADEON_GEM_DOMAIN_CPU) {
+				DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
+					  "for command submission\n");
+				return -EINVAL;
+			}
+
+			p->relocs[i].lobj.domain = domain;
+			if (domain == RADEON_GEM_DOMAIN_VRAM)
+				domain |= RADEON_GEM_DOMAIN_GTT;
+			p->relocs[i].lobj.alt_domain = domain;
+		}
+
+		p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+		p->relocs[i].handle = r->handle;
+
+		radeon_bo_list_add_object(&p->relocs[i].lobj,
+					  &p->validated);
+	}
+	return radeon_bo_list_validate(&p->validated, p->ring);
+}
+
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+	p->priority = priority;
+
+	switch (ring) {
+	default:
+		DRM_ERROR("unknown ring id: %d\n", ring);
+		return -EINVAL;
+	case RADEON_CS_RING_GFX:
+		p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_COMPUTE:
+		if (p->rdev->family >= CHIP_TAHITI) {
+			if (p->priority > 0)
+				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
+		} else
+			p->ring = RADEON_RING_TYPE_GFX_INDEX;
+		break;
+	case RADEON_CS_RING_DMA:
+		if (p->rdev->family >= CHIP_CAYMAN) {
+			if (p->priority > 0)
+				p->ring = R600_RING_TYPE_DMA_INDEX;
+			else
+				p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+		} else if (p->rdev->family >= CHIP_R600) {
+			p->ring = R600_RING_TYPE_DMA_INDEX;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_CS_RING_UVD:
+		p->ring = R600_RING_TYPE_UVD_INDEX;
+		break;
+	}
+	return 0;
+}
+
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+	int i;
+
+	for (i = 0; i < p->nrelocs; i++) {
+		if (!p->relocs[i].robj)
+			continue;
+
+		radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
+	}
+}
+
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
+int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+{
+	struct drm_radeon_cs *cs = data;
+	uint64_t *chunk_array_ptr;
+	unsigned size, i;
+	u32 ring = RADEON_CS_RING_GFX;
+	s32 priority = 0;
+
+	if (!cs->num_chunks) {
+		return 0;
+	}
+	/* get chunks */
+	INIT_LIST_HEAD(&p->validated);
+	p->idx = 0;
+	p->ib.sa_bo = NULL;
+	p->ib.semaphore = NULL;
+	p->const_ib.sa_bo = NULL;
+	p->const_ib.semaphore = NULL;
+	p->chunk_ib_idx = -1;
+	p->chunk_relocs_idx = -1;
+	p->chunk_flags_idx = -1;
+	p->chunk_const_ib_idx = -1;
+	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+	if (p->chunks_array == NULL) {
+		return -ENOMEM;
+	}
+	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
+	if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
+			       sizeof(uint64_t)*cs->num_chunks)) {
+		return -EFAULT;
+	}
+	p->cs_flags = 0;
+	p->nchunks = cs->num_chunks;
+	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+	if (p->chunks == NULL) {
+		return -ENOMEM;
+	}
+	for (i = 0; i < p->nchunks; i++) {
+		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
+		struct drm_radeon_cs_chunk user_chunk;
+		uint32_t __user *cdata;
+
+		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
+		if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
+				       sizeof(struct drm_radeon_cs_chunk))) {
+			return -EFAULT;
+		}
+		p->chunks[i].length_dw = user_chunk.length_dw;
+		p->chunks[i].kdata = NULL;
+		p->chunks[i].chunk_id = user_chunk.chunk_id;
+		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+			p->chunk_relocs_idx = i;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+			p->chunk_ib_idx = i;
+			/* zero length IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+			p->chunk_const_ib_idx = i;
+			/* zero length CONST IB isn't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+			p->chunk_flags_idx = i;
+			/* zero length flags aren't useful */
+			if (p->chunks[i].length_dw == 0)
+				return -EINVAL;
+		}
+
+		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+			size = p->chunks[i].length_dw * sizeof(uint32_t);
+			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
+			if (p->chunks[i].kdata == NULL) {
+				return -ENOMEM;
+			}
+			if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
+					       p->chunks[i].user_ptr, size)) {
+				return -EFAULT;
+			}
+			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+				p->cs_flags = p->chunks[i].kdata[0];
+				if (p->chunks[i].length_dw > 1)
+					ring = p->chunks[i].kdata[1];
+				if (p->chunks[i].length_dw > 2)
+					priority = (s32)p->chunks[i].kdata[2];
+			}
+		}
+	}
+
+	/* these are KMS only */
+	if (p->rdev) {
+		if ((p->cs_flags & RADEON_CS_USE_VM) &&
+		    !p->rdev->vm_manager.enabled) {
+			DRM_ERROR("VM not active on asic!\n");
+			return -EINVAL;
+		}
+
+		if (radeon_cs_get_ring(p, ring, priority))
+			return -EINVAL;
+
+		/* we only support VM on some SI+ rings */
+		if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+		   ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+			DRM_ERROR("Ring %d requires VM!\n", p->ring);
+			return -EINVAL;
+		}
+	}
+
+	/* deal with non-vm */
+	if ((p->chunk_ib_idx != -1) &&
+	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+			DRM_ERROR("cs IB too big: %d\n",
+				  p->chunks[p->chunk_ib_idx].length_dw);
+			return -EINVAL;
+		}
+		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
+			p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+			p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+			    p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+				kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+				kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+				p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+				p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
+				return -ENOMEM;
+			}
+		}
+		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+		p->chunks[p->chunk_ib_idx].last_page_index =
+			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+	}
+
+	return 0;
+}
+
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+{
+	unsigned i;
+
+	if (!error) {
+		ttm_eu_fence_buffer_objects(&parser->validated,
+					    parser->ib.fence);
+	} else {
+		ttm_eu_backoff_reservation(&parser->validated);
+	}
+
+	if (parser->relocs != NULL) {
+		for (i = 0; i < parser->nrelocs; i++) {
+			if (parser->relocs[i].gobj)
+				drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+		}
+	}
+	kfree(parser->track);
+	kfree(parser->relocs);
+	kfree(parser->relocs_ptr);
+	for (i = 0; i < parser->nchunks; i++) {
+		kfree(parser->chunks[i].kdata);
+		if ((parser->rdev->flags & RADEON_IS_AGP)) {
+			kfree(parser->chunks[i].kpage[0]);
+			kfree(parser->chunks[i].kpage[1]);
+		}
+	}
+	kfree(parser->chunks);
+	kfree(parser->chunks_array);
+	radeon_ib_free(parser->rdev, &parser->ib);
+	radeon_ib_free(parser->rdev, &parser->const_ib);
+}
+
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+			      struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+
+	if (parser->cs_flags & RADEON_CS_USE_VM)
+		return 0;
+
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+	/* Copy the packet into the IB, the parser will read from the
+	 * input memory (cached) and write to the IB (which can be
+	 * uncached).
+	 */
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   NULL, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	r = radeon_cs_parse(rdev, parser->ring, parser);
+	if (r || parser->parser_error) {
+		DRM_ERROR("Invalid command stream !\n");
+		return r;
+	}
+	r = radeon_cs_finish_pages(parser);
+	if (r) {
+		DRM_ERROR("Invalid command stream !\n");
+		return r;
+	}
+	radeon_cs_sync_rings(parser);
+	r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+	if (r) {
+		DRM_ERROR("Failed to schedule IB !\n");
+	}
+	return r;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+				   struct radeon_vm *vm)
+{
+	struct radeon_device *rdev = parser->rdev;
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
+	int r;
+
+	r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+	if (r) {
+		return r;
+	}
+	list_for_each_entry(lobj, &parser->validated, tv.head) {
+		bo = lobj->bo;
+		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+				 struct radeon_cs_parser *parser)
+{
+	struct radeon_cs_chunk *ib_chunk;
+	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	int r;
+
+	if (parser->chunk_ib_idx == -1)
+		return 0;
+	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+		return 0;
+
+	if ((rdev->family >= CHIP_TAHITI) &&
+	    (parser->chunk_const_ib_idx != -1)) {
+		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+			return -EINVAL;
+		}
+		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+				   vm, ib_chunk->length_dw * 4);
+		if (r) {
+			DRM_ERROR("Failed to get const ib !\n");
+			return r;
+		}
+		parser->const_ib.is_const_ib = true;
+		parser->const_ib.length_dw = ib_chunk->length_dw;
+		/* Copy the packet into the IB */
+		if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
+				       ib_chunk->length_dw * 4)) {
+			return -EFAULT;
+		}
+		r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
+		if (r) {
+			return r;
+		}
+	}
+
+	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
+			   vm, ib_chunk->length_dw * 4);
+	if (r) {
+		DRM_ERROR("Failed to get ib !\n");
+		return r;
+	}
+	parser->ib.length_dw = ib_chunk->length_dw;
+	/* Copy the packet into the IB */
+	if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
+			       ib_chunk->length_dw * 4)) {
+		return -EFAULT;
+	}
+	r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
+	if (r) {
+		return r;
+	}
+
+	mutex_lock(&rdev->vm_manager.lock);
+	mutex_lock(&vm->mutex);
+	r = radeon_vm_alloc_pt(rdev, vm);
+	if (r) {
+		goto out;
+	}
+	r = radeon_bo_vm_update_pte(parser, vm);
+	if (r) {
+		goto out;
+	}
+	radeon_cs_sync_rings(parser);
+	radeon_ib_sync_to(&parser->ib, vm->fence);
+	radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+		rdev, vm, parser->ring));
+
+	if ((rdev->family >= CHIP_TAHITI) &&
+	    (parser->chunk_const_ib_idx != -1)) {
+		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+	} else {
+		r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+	}
+
+	if (!r) {
+		radeon_vm_fence(rdev, vm, parser->ib.fence);
+	}
+
+out:
+	radeon_vm_add_to_lru(rdev, vm);
+	mutex_unlock(&vm->mutex);
+	mutex_unlock(&rdev->vm_manager.lock);
+	return r;
+}
+
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_cs_parser parser;
+	int r;
+
+	down_read(&rdev->exclusive_lock);
+	if (!rdev->accel_working) {
+		up_read(&rdev->exclusive_lock);
+		return -EBUSY;
+	}
+	/* initialize parser */
+	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+	parser.filp = filp;
+	parser.rdev = rdev;
+	parser.dev = rdev->dev;
+	parser.family = rdev->family;
+	r = radeon_cs_parser_init(&parser, data);
+	if (r) {
+		DRM_ERROR("Failed to initialize parser !\n");
+		radeon_cs_parser_fini(&parser, r);
+		up_read(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+	r = radeon_cs_parser_relocs(&parser);
+	if (r) {
+		if (r != -ERESTARTSYS)
+			DRM_ERROR("Failed to parse relocation %d!\n", r);
+		radeon_cs_parser_fini(&parser, r);
+		up_read(&rdev->exclusive_lock);
+		r = radeon_cs_handle_lockup(rdev, r);
+		return r;
+	}
+
+	if (parser.ring == R600_RING_TYPE_UVD_INDEX)
+		radeon_uvd_note_usage(rdev);
+
+	r = radeon_cs_ib_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+	r = radeon_cs_ib_vm_chunk(rdev, &parser);
+	if (r) {
+		goto out;
+	}
+out:
+	radeon_cs_parser_fini(&parser, r);
+	up_read(&rdev->exclusive_lock);
+	r = radeon_cs_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_cs_finish_pages(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	int i;
+	int size = PAGE_SIZE;
+
+	for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
+		if (i == ibc->last_page_index) {
+			size = (ibc->length_dw * 4) % PAGE_SIZE;
+			if (size == 0)
+				size = PAGE_SIZE;
+		}
+		
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+				       ibc->user_ptr + (i * PAGE_SIZE),
+				       size))
+			return -EFAULT;
+	}
+	return 0;
+}
+
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+{
+	int new_page;
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	int i;
+	int size = PAGE_SIZE;
+	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+		false : true;
+
+	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+				       ibc->user_ptr + (i * PAGE_SIZE),
+				       PAGE_SIZE)) {
+			p->parser_error = -EFAULT;
+			return 0;
+		}
+	}
+
+	if (pg_idx == ibc->last_page_index) {
+		size = (ibc->length_dw * 4) % PAGE_SIZE;
+		if (size == 0)
+			size = PAGE_SIZE;
+	}
+
+	new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+	if (copy1)
+		ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
+	if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
+			       ibc->user_ptr + (pg_idx * PAGE_SIZE),
+			       size)) {
+		p->parser_error = -EFAULT;
+		return 0;
+	}
+
+	/* copy to IB for non single case */
+	if (!copy1)
+		memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+
+	ibc->last_copied_page = pg_idx;
+	ibc->kpage_idx[new_page] = pg_idx;
+
+	return new_page;
+}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+	u32 pg_idx, pg_offset;
+	u32 idx_value = 0;
+	int new_page;
+
+	pg_idx = (idx * 4) / PAGE_SIZE;
+	pg_offset = (idx * 4) % PAGE_SIZE;
+
+	if (ibc->kpage_idx[0] == pg_idx)
+		return ibc->kpage[0][pg_offset/4];
+	if (ibc->kpage_idx[1] == pg_idx)
+		return ibc->kpage[1][pg_offset/4];
+
+	new_page = radeon_cs_update_pages(p, pg_idx);
+	if (new_page < 0) {
+		p->parser_error = new_page;
+		return 0;
+	}
+
+	idx_value = ibc->kpage[new_page][pg_offset/4];
+	return idx_value;
+}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser:	parser structure holding parsing context.
+ * @pkt:	where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt,
+			   unsigned idx)
+{
+	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+	struct radeon_device *rdev = p->rdev;
+	uint32_t header;
+
+	if (idx >= ib_chunk->length_dw) {
+		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+			  idx, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	header = radeon_get_ib_value(p, idx);
+	pkt->idx = idx;
+	pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+	pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+	pkt->one_reg_wr = 0;
+	switch (pkt->type) {
+	case RADEON_PACKET_TYPE0:
+		if (rdev->family < CHIP_R600) {
+			pkt->reg = R100_CP_PACKET0_GET_REG(header);
+			pkt->one_reg_wr =
+				RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+		} else
+			pkt->reg = R600_CP_PACKET0_GET_REG(header);
+		break;
+	case RADEON_PACKET_TYPE3:
+		pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+		break;
+	case RADEON_PACKET_TYPE2:
+		pkt->count = -1;
+		break;
+	default:
+		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+		return -EINVAL;
+	}
+	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p:		structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet p3reloc;
+	int r;
+
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r)
+		return false;
+	if (p3reloc.type != RADEON_PACKET_TYPE3)
+		return false;
+	if (p3reloc.opcode != RADEON_PACKET3_NOP)
+		return false;
+	return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p:		structure holding the parser context.
+ * @pkt:	structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+			   struct radeon_cs_packet *pkt)
+{
+	volatile uint32_t *ib;
+	unsigned i;
+	unsigned idx;
+
+	ib = p->ib.ptr;
+	idx = pkt->idx;
+	for (i = 0; i <= (pkt->count + 1); i++, idx++)
+		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser:		parser structure holding parsing context.
+ * @data:		pointer to relocation data
+ * @offset_start:	starting offset
+ * @offset_mask:	offset mask (to align start offset on)
+ * @reloc:		reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+				struct radeon_cs_reloc **cs_reloc,
+				int nomm)
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_packet p3reloc;
+	unsigned idx;
+	int r;
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+	*cs_reloc = NULL;
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+	if (r)
+		return r;
+	p->idx += p3reloc.count + 2;
+	if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+	    p3reloc.opcode != RADEON_PACKET3_NOP) {
+		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+			  p3reloc.idx);
+		radeon_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		radeon_cs_dump_packet(p, &p3reloc);
+		return -EINVAL;
+	}
+	/* FIXME: we assume reloc size is 4 dwords */
+	if (nomm) {
+		*cs_reloc = p->relocs;
+		(*cs_reloc)->lobj.gpu_offset =
+			(u64)relocs_chunk->kdata[idx + 3] << 32;
+		(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+	} else
+		*cs_reloc = p->relocs_ptr[(idx / 4)];
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_cursor.c b/linux-imx/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 0000000..b097d5b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
+{
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	uint32_t cur_lock;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+		WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
+		else
+			cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
+		WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+	} else {
+		cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
+		if (lock)
+			cur_lock |= RADEON_CUR_LOCK;
+		else
+			cur_lock &= ~RADEON_CUR_LOCK;
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
+	}
+}
+
+static void radeon_hide_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+			   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+			   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+			   (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		u32 reg;
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			reg = RADEON_CRTC_GEN_CNTL;
+			break;
+		case 1:
+			reg = RADEON_CRTC2_GEN_CNTL;
+			break;
+		default:
+			return;
+		}
+		WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
+	}
+}
+
+static void radeon_show_cursor(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+		       EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+		       EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
+		WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
+		       (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+	} else {
+		switch (radeon_crtc->crtc_id) {
+		case 0:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+			break;
+		case 1:
+			WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+			break;
+		default:
+			return;
+		}
+
+		WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
+					  (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
+			 ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
+	}
+}
+
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+			      uint64_t gpu_addr)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+		       upper_32_bits(gpu_addr));
+		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       gpu_addr & 0xffffffff);
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (rdev->family >= CHIP_RV770) {
+			if (radeon_crtc->crtc_id)
+				WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+			else
+				WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+		}
+		WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+		       gpu_addr & 0xffffffff);
+	} else {
+		radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
+	}
+}
+
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+			   struct drm_file *file_priv,
+			   uint32_t handle,
+			   uint32_t width,
+			   uint32_t height)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	struct drm_gem_object *obj;
+	struct radeon_bo *robj;
+	uint64_t gpu_addr;
+	int ret;
+
+	if (!handle) {
+		/* turn off cursor */
+		radeon_hide_cursor(crtc);
+		obj = NULL;
+		goto unpin;
+	}
+
+	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
+		return -ENOENT;
+	}
+
+	robj = gem_to_radeon_bo(obj);
+	ret = radeon_bo_reserve(robj, false);
+	if (unlikely(ret != 0))
+		goto fail;
+	/* Only 27 bit offset for legacy cursor */
+	ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       &gpu_addr);
+	radeon_bo_unreserve(robj);
+	if (ret)
+		goto fail;
+
+	radeon_crtc->cursor_width = width;
+	radeon_crtc->cursor_height = height;
+
+	radeon_lock_cursor(crtc, true);
+	radeon_set_cursor(crtc, obj, gpu_addr);
+	radeon_show_cursor(crtc);
+	radeon_lock_cursor(crtc, false);
+
+unpin:
+	if (radeon_crtc->cursor_bo) {
+		robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+		ret = radeon_bo_reserve(robj, false);
+		if (likely(ret == 0)) {
+			radeon_bo_unpin(robj);
+			radeon_bo_unreserve(robj);
+		}
+		drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+	}
+
+	radeon_crtc->cursor_bo = obj;
+	return 0;
+fail:
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+			    int x, int y)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_device *rdev = crtc->dev->dev_private;
+	int xorigin = 0, yorigin = 0;
+	int w = radeon_crtc->cursor_width;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		/* avivo cursor are offset into the total surface */
+		x += crtc->x;
+		y += crtc->y;
+	}
+	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+	if (x < 0) {
+		xorigin = min(-x, CURSOR_WIDTH - 1);
+		x = 0;
+	}
+	if (y < 0) {
+		yorigin = min(-y, CURSOR_HEIGHT - 1);
+		y = 0;
+	}
+
+	/* fixed on DCE6 and newer */
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+		int i = 0;
+		struct drm_crtc *crtc_p;
+
+		/*
+		 * avivo cursor image can't end on 128 pixel boundary or
+		 * go past the end of the frame if both crtcs are enabled
+		 *
+		 * NOTE: It is safe to access crtc->enabled of other crtcs
+		 * without holding either the mode_config lock or the other
+		 * crtc's lock as long as write access to this flag _always_
+		 * grabs all locks.
+		 */
+		list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+			if (crtc_p->enabled)
+				i++;
+		}
+		if (i > 1) {
+			int cursor_end, frame_end;
+
+			cursor_end = x - xorigin + w;
+			frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+			if (cursor_end >= frame_end) {
+				w = w - (cursor_end - frame_end);
+				if (!(frame_end & 0x7f))
+					w--;
+			} else {
+				if (!(cursor_end & 0x7f))
+					w--;
+			}
+			if (w <= 0) {
+				w = 1;
+				cursor_end = x - xorigin + w;
+				if (!(cursor_end & 0x7f)) {
+					x--;
+					WARN_ON_ONCE(x < 0);
+				}
+			}
+		}
+	}
+
+	radeon_lock_cursor(crtc, true);
+	if (ASIC_IS_DCE4(rdev)) {
+		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+		WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+		WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+	} else {
+		if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+			y *= 2;
+
+		WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (xorigin << 16)
+			| yorigin));
+		WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+		       (RADEON_CUR_LOCK
+			| (x << 16)
+			| y));
+		/* offset is from DISP(2)_BASE_ADDRESS */
+		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+								      (yorigin * 256)));
+	}
+	radeon_lock_cursor(crtc, false);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_device.c b/linux-imx/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 0000000..8df1525
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,1549 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/console.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+static const char radeon_family_name[][16] = {
+	"R100",
+	"RV100",
+	"RS100",
+	"RV200",
+	"RS200",
+	"R200",
+	"RV250",
+	"RS300",
+	"RV280",
+	"R300",
+	"R350",
+	"RV350",
+	"RV380",
+	"R420",
+	"R423",
+	"RV410",
+	"RS400",
+	"RS480",
+	"RS600",
+	"RS690",
+	"RS740",
+	"RV515",
+	"R520",
+	"RV530",
+	"RV560",
+	"RV570",
+	"R580",
+	"R600",
+	"RV610",
+	"RV630",
+	"RV670",
+	"RV620",
+	"RV635",
+	"RS780",
+	"RS880",
+	"RV770",
+	"RV730",
+	"RV710",
+	"RV740",
+	"CEDAR",
+	"REDWOOD",
+	"JUNIPER",
+	"CYPRESS",
+	"HEMLOCK",
+	"PALM",
+	"SUMO",
+	"SUMO2",
+	"BARTS",
+	"TURKS",
+	"CAICOS",
+	"CAYMAN",
+	"ARUBA",
+	"TAHITI",
+	"PITCAIRN",
+	"VERDE",
+	"OLAND",
+	"HAINAN",
+	"LAST",
+};
+
+/**
+ * radeon_program_register_sequence - program an array of registers.
+ *
+ * @rdev: radeon_device pointer
+ * @registers: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+void radeon_program_register_sequence(struct radeon_device *rdev,
+				      const u32 *registers,
+				      const u32 array_size)
+{
+	u32 tmp, reg, and_mask, or_mask;
+	int i;
+
+	if (array_size % 3)
+		return;
+
+	for (i = 0; i < array_size; i +=3) {
+		reg = registers[i + 0];
+		and_mask = registers[i + 1];
+		or_mask = registers[i + 2];
+
+		if (and_mask == 0xffffffff) {
+			tmp = or_mask;
+		} else {
+			tmp = RREG32(reg);
+			tmp &= ~and_mask;
+			tmp |= or_mask;
+		}
+		WREG32(reg, tmp);
+	}
+}
+
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
+ */
+void radeon_surface_init(struct radeon_device *rdev)
+{
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R600) {
+		int i;
+
+		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+			if (rdev->surface_regs[i].bo)
+				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+			else
+				radeon_clear_surface_reg(rdev, i);
+		}
+		/* enable surfaces */
+		WREG32(RADEON_SURFACE_CNTL, 0);
+	}
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
+void radeon_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	/* FIXME: check this out */
+	if (rdev->family < CHIP_R300) {
+		rdev->scratch.num_reg = 5;
+	} else {
+		rdev->scratch.num_reg = 7;
+	}
+	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
+int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.free[i]) {
+			rdev->scratch.free[i] = false;
+			*reg = rdev->scratch.reg[i];
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
+void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
+{
+	int i;
+
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		if (rdev->scratch.reg[i] == reg) {
+			rdev->scratch.free[i] = true;
+			return;
+		}
+	}
+}
+
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics).  Used for suspend.
+ */
+void radeon_wb_disable(struct radeon_device *rdev)
+{
+	rdev->wb.enabled = false;
+}
+
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
+void radeon_wb_fini(struct radeon_device *rdev)
+{
+	radeon_wb_disable(rdev);
+	if (rdev->wb.wb_obj) {
+		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+			radeon_bo_kunmap(rdev->wb.wb_obj);
+			radeon_bo_unpin(rdev->wb.wb_obj);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+		}
+		radeon_bo_unref(&rdev->wb.wb_obj);
+		rdev->wb.wb = NULL;
+		rdev->wb.wb_obj = NULL;
+	}
+}
+
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
+int radeon_wb_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->wb.wb_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+				&rdev->wb.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+	}
+
+	/* clear wb memory */
+	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
+	/* disable event_write fences */
+	rdev->wb.use_event = false;
+	/* disabled via module param */
+	if (radeon_no_wb == 1) {
+		rdev->wb.enabled = false;
+	} else {
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* often unreliable on AGP */
+			rdev->wb.enabled = false;
+		} else if (rdev->family < CHIP_R300) {
+			/* often unreliable on pre-r300 */
+			rdev->wb.enabled = false;
+		} else {
+			rdev->wb.enabled = true;
+			/* event_write fences are only available on r600+ */
+			if (rdev->family >= CHIP_R600) {
+				rdev->wb.use_event = true;
+			}
+		}
+	}
+	/* always use writeback/events on NI, APUs */
+	if (rdev->family >= CHIP_PALM) {
+		rdev->wb.enabled = true;
+		rdev->wb.use_event = true;
+	}
+
+	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+
+	return 0;
+}
+
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
+	mc->vram_start = base;
+	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
+		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+		mc->real_vram_size = mc->aper_size;
+		mc->mc_vram_size = mc->aper_size;
+	}
+	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+	if (limit && limit < mc->real_vram_size)
+		mc->real_vram_size = limit;
+	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+			mc->mc_vram_size >> 20, mc->vram_start,
+			mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
+ */
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	if (size_bf > size_af) {
+		if (mc->gtt_size > size_bf) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_bf;
+		}
+		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+	} else {
+		if (mc->gtt_size > size_af) {
+			dev_warn(rdev->dev, "limiting GTT\n");
+			mc->gtt_size = size_af;
+		}
+		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+	}
+	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+}
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
+bool radeon_card_posted(struct radeon_device *rdev)
+{
+	uint32_t reg;
+
+	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+	if (efi_enabled(EFI_BOOT) &&
+	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+	    (rdev->family < CHIP_R600))
+		return false;
+
+	if (ASIC_IS_NODCE(rdev))
+		goto check_memsize;
+
+	/* first check CRTCs */
+	if (ASIC_IS_DCE4(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+			if (rdev->num_crtc >= 4) {
+				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+			}
+			if (rdev->num_crtc >= 6) {
+				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+			}
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
+		      RREG32(AVIVO_D2CRTC_CONTROL);
+		if (reg & AVIVO_CRTC_EN) {
+			return true;
+		}
+	} else {
+		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
+		      RREG32(RADEON_CRTC2_GEN_CNTL);
+		if (reg & RADEON_CRTC_EN) {
+			return true;
+		}
+	}
+
+check_memsize:
+	/* then check MEM_SIZE, in case the crtcs are off */
+	if (rdev->family >= CHIP_R600)
+		reg = RREG32(R600_CONFIG_MEMSIZE);
+	else
+		reg = RREG32(RADEON_CONFIG_MEMSIZE);
+
+	if (reg)
+		return true;
+
+	return false;
+
+}
+
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+	fixed20_12 a;
+	u32 sclk = rdev->pm.current_sclk;
+	u32 mclk = rdev->pm.current_mclk;
+
+	/* sclk/mclk in Mhz */
+	a.full = dfixed_const(100);
+	rdev->pm.sclk.full = dfixed_const(sclk);
+	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+	rdev->pm.mclk.full = dfixed_const(mclk);
+	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		a.full = dfixed_const(16);
+		/* core_bandwidth = sclk(Mhz) * 16 */
+		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
+	}
+}
+
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+	if (radeon_card_posted(rdev))
+		return true;
+
+	if (rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (rdev->is_atom_bios)
+			atom_asic_init(rdev->mode_info.atom_context);
+		else
+			radeon_combios_asic_init(rdev->ddev);
+		return true;
+	} else {
+		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+int radeon_dummy_page_init(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.page)
+		return 0;
+	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+	if (rdev->dummy_page.page == NULL)
+		return -ENOMEM;
+	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
+					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
+		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+		__free_page(rdev->dummy_page.page);
+		rdev->dummy_page.page = NULL;
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+void radeon_dummy_page_fini(struct radeon_device *rdev)
+{
+	if (rdev->dummy_page.page == NULL)
+		return;
+	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
+			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	__free_page(rdev->dummy_page.page);
+	rdev->dummy_page.page = NULL;
+}
+
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios.  The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->pll_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->pll_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = rdev->mc_rreg(rdev, reg);
+	return r;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	rdev->mc_wreg(rdev, reg, val);
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32(reg*4, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32(reg*4);
+	return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+
+	WREG32_IO(reg*4, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+	struct radeon_device *rdev = info->dev->dev_private;
+	uint32_t r;
+
+	r = RREG32_IO(reg*4);
+	return r;
+}
+
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int radeon_atombios_init(struct radeon_device *rdev)
+{
+	struct card_info *atom_card_info =
+	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+	if (!atom_card_info)
+		return -ENOMEM;
+
+	rdev->mode_info.atom_card_info = atom_card_info;
+	atom_card_info->dev = rdev->ddev;
+	atom_card_info->reg_read = cail_reg_read;
+	atom_card_info->reg_write = cail_reg_write;
+	/* needed for iio ops */
+	if (rdev->rio_mem) {
+		atom_card_info->ioreg_read = cail_ioreg_read;
+		atom_card_info->ioreg_write = cail_ioreg_write;
+	} else {
+		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+		atom_card_info->ioreg_read = cail_reg_read;
+		atom_card_info->ioreg_write = cail_reg_write;
+	}
+	atom_card_info->mc_read = cail_mc_read;
+	atom_card_info->mc_write = cail_mc_write;
+	atom_card_info->pll_read = cail_pll_read;
+	atom_card_info->pll_write = cail_pll_write;
+
+	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+	if (!rdev->mode_info.atom_context) {
+		radeon_atombios_fini(rdev);
+		return -ENOMEM;
+	}
+
+	mutex_init(&rdev->mode_info.atom_context->mutex);
+	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
+	return 0;
+}
+
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void radeon_atombios_fini(struct radeon_device *rdev)
+{
+	if (rdev->mode_info.atom_context) {
+		kfree(rdev->mode_info.atom_context->scratch);
+	}
+	kfree(rdev->mode_info.atom_context);
+	rdev->mode_info.atom_context = NULL;
+	kfree(rdev->mode_info.atom_card_info);
+	rdev->mode_info.atom_card_info = NULL;
+}
+
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser.  See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
+int radeon_combios_init(struct radeon_device *rdev)
+{
+	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
+	return 0;
+}
+
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
+void radeon_combios_fini(struct radeon_device *rdev)
+{
+}
+
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
+static unsigned int radeon_vga_set_decode(void *cookie, bool state)
+{
+	struct radeon_device *rdev = cookie;
+	radeon_vga_set_state(rdev, state);
+	if (state)
+		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+	else
+		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+	return (arg & (arg - 1)) == 0;
+}
+
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
+static void radeon_check_arguments(struct radeon_device *rdev)
+{
+	/* vramlimit must be a power of two */
+	if (!radeon_check_pot_argument(radeon_vram_limit)) {
+		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+				radeon_vram_limit);
+		radeon_vram_limit = 0;
+	}
+
+	/* gtt size must be power of two and greater or equal to 32M */
+	if (radeon_gart_size < 32) {
+		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+				radeon_gart_size);
+		radeon_gart_size = 512;
+
+	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
+		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+				radeon_gart_size);
+		radeon_gart_size = 512;
+	}
+	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
+	/* AGP mode can only be -1, 1, 2, 4, 8 */
+	switch (radeon_agpmode) {
+	case -1:
+	case 0:
+	case 1:
+	case 2:
+	case 4:
+	case 8:
+		break;
+	default:
+		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+		radeon_agpmode = 0;
+		break;
+	}
+}
+
+/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+	/* 6600m in a macbook pro */
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+	    pdev->subsystem_device == 0x00e2) {
+		printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver.  Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+	if (state == VGA_SWITCHEROO_ON) {
+		unsigned d3_delay = dev->pdev->d3_delay;
+
+		printk(KERN_INFO "radeon: switched on\n");
+		/* don't suspend or resume card normally */
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+		if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+			dev->pdev->d3_delay = 20;
+
+		radeon_resume_kms(dev);
+
+		dev->pdev->d3_delay = d3_delay;
+
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
+		drm_kms_helper_poll_enable(dev);
+	} else {
+		printk(KERN_INFO "radeon: switched off\n");
+		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+		radeon_suspend_kms(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+	}
+}
+
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver.  Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	bool can_switch;
+
+	spin_lock(&dev->count_lock);
+	can_switch = (dev->open_count == 0);
+	spin_unlock(&dev->count_lock);
+	return can_switch;
+}
+
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+	.set_gpu_state = radeon_switcheroo_set_state,
+	.reprobe = NULL,
+	.can_switch = radeon_switcheroo_can_switch,
+};
+
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
+int radeon_device_init(struct radeon_device *rdev,
+		       struct drm_device *ddev,
+		       struct pci_dev *pdev,
+		       uint32_t flags)
+{
+	int r, i;
+	int dma_bits;
+
+	rdev->shutdown = false;
+	rdev->dev = &pdev->dev;
+	rdev->ddev = ddev;
+	rdev->pdev = pdev;
+	rdev->flags = flags;
+	rdev->family = flags & RADEON_FAMILY_MASK;
+	rdev->is_atom_bios = false;
+	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
+	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+	rdev->accel_working = false;
+	/* set up ring ids */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		rdev->ring[i].idx = i;
+	}
+
+	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+		pdev->subsystem_vendor, pdev->subsystem_device);
+
+	/* mutex initialization are all done here so we
+	 * can recall function without having locking issues */
+	mutex_init(&rdev->ring_lock);
+	mutex_init(&rdev->dc_hw_i2c_mutex);
+	atomic_set(&rdev->ih.lock, 0);
+	mutex_init(&rdev->gem.mutex);
+	mutex_init(&rdev->pm.mutex);
+	mutex_init(&rdev->gpu_clock_mutex);
+	init_rwsem(&rdev->pm.mclk_lock);
+	init_rwsem(&rdev->exclusive_lock);
+	init_waitqueue_head(&rdev->irq.vblank_queue);
+	r = radeon_gem_init(rdev);
+	if (r)
+		return r;
+	/* initialize vm here */
+	mutex_init(&rdev->vm_manager.lock);
+	/* Adjust VM size here.
+	 * Currently set to 4GB ((1 << 20) 4k pages).
+	 * Max GPUVM size for cayman and SI is 40 bits.
+	 */
+	rdev->vm_manager.max_pfn = 1 << 20;
+	INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
+
+	/* Set asic functions */
+	r = radeon_asic_init(rdev);
+	if (r)
+		return r;
+	radeon_check_arguments(rdev);
+
+	/* all of the newer IGP chips have an internal gart
+	 * However some rs4xx report as AGP, so remove that here.
+	 */
+	if ((rdev->family >= CHIP_RS400) &&
+	    (rdev->flags & RADEON_IS_IGP)) {
+		rdev->flags &= ~RADEON_IS_AGP;
+	}
+
+	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+		radeon_agp_disable(rdev);
+	}
+
+	/* Set the internal MC address mask
+	 * This is the max address of the GPU's
+	 * internal address space.
+	 */
+	if (rdev->family >= CHIP_CAYMAN)
+		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
+	else if (rdev->family >= CHIP_CEDAR)
+		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
+	else
+		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
+
+	/* set DMA mask + need_dma32 flags.
+	 * PCIE - can handle 40-bits.
+	 * IGP - can handle 40-bits
+	 * AGP - generally dma32 is safest
+	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
+	 */
+	rdev->need_dma32 = false;
+	if (rdev->flags & RADEON_IS_AGP)
+		rdev->need_dma32 = true;
+	if ((rdev->flags & RADEON_IS_PCI) &&
+	    (rdev->family <= CHIP_RS740))
+		rdev->need_dma32 = true;
+
+	dma_bits = rdev->need_dma32 ? 32 : 40;
+	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		rdev->need_dma32 = true;
+		dma_bits = 32;
+		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+	}
+	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+	if (r) {
+		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+	}
+
+	/* Registers mapping */
+	/* TODO: block userspace mapping of io register */
+	spin_lock_init(&rdev->mmio_idx_lock);
+	rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+	rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
+	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
+	if (rdev->rmmio == NULL) {
+		return -ENOMEM;
+	}
+	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
+	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+
+	/* io port mapping */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
+			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
+			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
+			break;
+		}
+	}
+	if (rdev->rio_mem == NULL)
+		DRM_ERROR("Unable to find PCI I/O BAR\n");
+
+	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
+	/* this will fail for cards that aren't VGA class devices, just
+	 * ignore it */
+	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
+
+	r = radeon_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	r = radeon_gem_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+	}
+
+	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
+		/* Acceleration not working on AGP card try again
+		 * with fallback to PCI or PCIE GART
+		 */
+		radeon_asic_reset(rdev);
+		radeon_fini(rdev);
+		radeon_agp_disable(rdev);
+		r = radeon_init(rdev);
+		if (r)
+			return r;
+	}
+	if ((radeon_testing & 1)) {
+		if (rdev->accel_working)
+			radeon_test_moves(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
+	}
+	if ((radeon_testing & 2)) {
+		if (rdev->accel_working)
+			radeon_test_syncing(rdev);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
+	}
+	if (radeon_benchmarking) {
+		if (rdev->accel_working)
+			radeon_benchmark(rdev, radeon_benchmarking);
+		else
+			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
+	}
+	return 0;
+}
+
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
+void radeon_device_fini(struct radeon_device *rdev)
+{
+	DRM_INFO("radeon: finishing device.\n");
+	rdev->shutdown = true;
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+	radeon_fini(rdev);
+	vga_switcheroo_unregister_client(rdev->pdev);
+	vga_client_register(rdev->pdev, NULL, NULL, NULL);
+	if (rdev->rio_mem)
+		pci_iounmap(rdev->pdev, rdev->rio_mem);
+	rdev->rio_mem = NULL;
+	iounmap(rdev->rmmio);
+	rdev->rmmio = NULL;
+	radeon_debugfs_remove_files(rdev);
+}
+
+
+/*
+ * Suspend & resume.
+ */
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+{
+	struct radeon_device *rdev;
+	struct drm_crtc *crtc;
+	struct drm_connector *connector;
+	int i, r;
+	bool force_completion = false;
+
+	if (dev == NULL || dev->dev_private == NULL) {
+		return -ENODEV;
+	}
+	if (state.event == PM_EVENT_PRETHAW) {
+		return 0;
+	}
+	rdev = dev->dev_private;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	drm_kms_helper_poll_disable(dev);
+
+	/* turn off display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	}
+
+	/* unpin the front buffers */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+		struct radeon_bo *robj;
+
+		if (rfb == NULL || rfb->obj == NULL) {
+			continue;
+		}
+		robj = gem_to_radeon_bo(rfb->obj);
+		/* don't unpin kernel fb objects */
+		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+			r = radeon_bo_reserve(robj, false);
+			if (r == 0) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
+		}
+	}
+	/* evict vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	mutex_lock(&rdev->ring_lock);
+	/* wait for gpu to finish processing current batch */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		r = radeon_fence_wait_empty_locked(rdev, i);
+		if (r) {
+			/* delay GPU reset to resume */
+			force_completion = true;
+		}
+	}
+	if (force_completion) {
+		radeon_fence_driver_force_completion(rdev);
+	}
+	mutex_unlock(&rdev->ring_lock);
+
+	radeon_save_bios_scratch_regs(rdev);
+
+	radeon_pm_suspend(rdev);
+	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
+	/* evict remaining vram memory */
+	radeon_bo_evict_vram(rdev);
+
+	radeon_agp_suspend(rdev);
+
+	pci_save_state(dev->pdev);
+	if (state.event == PM_EVENT_SUSPEND) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+	console_lock();
+	radeon_fbdev_set_suspend(rdev, 1);
+	console_unlock();
+	return 0;
+}
+
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
+int radeon_resume_kms(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_device *rdev = dev->dev_private;
+	int r;
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
+	console_lock();
+	pci_set_power_state(dev->pdev, PCI_D0);
+	pci_restore_state(dev->pdev);
+	if (pci_enable_device(dev->pdev)) {
+		console_unlock();
+		return -1;
+	}
+	/* resume AGP if in use */
+	radeon_agp_resume(rdev);
+	radeon_resume(rdev);
+
+	r = radeon_ib_ring_tests(rdev);
+	if (r)
+		DRM_ERROR("ib ring test failed (%d).\n", r);
+
+	radeon_pm_resume(rdev);
+	radeon_restore_bios_scratch_regs(rdev);
+
+	radeon_fbdev_set_suspend(rdev, 0);
+	console_unlock();
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+		/* turn on the BL */
+		if (rdev->mode_info.bl_encoder) {
+			u8 bl_level = radeon_get_backlight_level(rdev,
+								 rdev->mode_info.bl_encoder);
+			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
+						   bl_level);
+		}
+	}
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
+	/* blat the mode back in */
+	drm_helper_resume_force_mode(dev);
+	/* turn on display hw */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+	}
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
+int radeon_gpu_reset(struct radeon_device *rdev)
+{
+	unsigned ring_sizes[RADEON_NUM_RINGS];
+	uint32_t *ring_data[RADEON_NUM_RINGS];
+
+	bool saved = false;
+
+	int i, r;
+	int resched;
+
+	down_write(&rdev->exclusive_lock);
+	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	radeon_suspend(rdev);
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+						   &ring_data[i]);
+		if (ring_sizes[i]) {
+			saved = true;
+			dev_info(rdev->dev, "Saved %d dwords of commands "
+				 "on ring %d.\n", ring_sizes[i], i);
+		}
+	}
+
+retry:
+	r = radeon_asic_reset(rdev);
+	if (!r) {
+		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
+		radeon_resume(rdev);
+	}
+
+	radeon_restore_bios_scratch_regs(rdev);
+
+	if (!r) {
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			radeon_ring_restore(rdev, &rdev->ring[i],
+					    ring_sizes[i], ring_data[i]);
+			ring_sizes[i] = 0;
+			ring_data[i] = NULL;
+		}
+
+		r = radeon_ib_ring_tests(rdev);
+		if (r) {
+			dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+			if (saved) {
+				saved = false;
+				radeon_suspend(rdev);
+				goto retry;
+			}
+		}
+	} else {
+		radeon_fence_driver_force_completion(rdev);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			kfree(ring_data[i]);
+		}
+	}
+
+	drm_helper_resume_force_mode(rdev->ddev);
+
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+	if (r) {
+		/* bad news, how to tell it to userspace ? */
+		dev_info(rdev->dev, "GPU reset failed\n");
+	}
+
+	up_write(&rdev->exclusive_lock);
+	return r;
+}
+
+
+/*
+ * Debugfs
+ */
+int radeon_debugfs_add_files(struct radeon_device *rdev,
+			     struct drm_info_list *files,
+			     unsigned nfiles)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->debugfs_count; i++) {
+		if (rdev->debugfs[i].files == files) {
+			/* Already registered */
+			return 0;
+		}
+	}
+
+	i = rdev->debugfs_count + 1;
+	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+		DRM_ERROR("Reached maximum number of debugfs components.\n");
+		DRM_ERROR("Report so we increase "
+		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+		return -EINVAL;
+	}
+	rdev->debugfs[rdev->debugfs_count].files = files;
+	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+	rdev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+	drm_debugfs_create_files(files, nfiles,
+				 rdev->ddev->control->debugfs_root,
+				 rdev->ddev->control);
+	drm_debugfs_create_files(files, nfiles,
+				 rdev->ddev->primary->debugfs_root,
+				 rdev->ddev->primary);
+#endif
+	return 0;
+}
+
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+
+	for (i = 0; i < rdev->debugfs_count; i++) {
+		drm_debugfs_remove_files(rdev->debugfs[i].files,
+					 rdev->debugfs[i].num_files,
+					 rdev->ddev->control);
+		drm_debugfs_remove_files(rdev->debugfs[i].files,
+					 rdev->debugfs[i].num_files,
+					 rdev->ddev->primary);
+	}
+#endif
+}
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor)
+{
+	return 0;
+}
+
+void radeon_debugfs_cleanup(struct drm_minor *minor)
+{
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_display.c b/linux-imx/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 0000000..a84de32
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,1672 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include "atom.h"
+#include <asm/div64.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static void avivo_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
+	WREG32(AVIVO_DC_LUT_RW_MODE, 0);
+	WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
+
+	WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(AVIVO_DC_LUT_30_COLOR,
+			     (radeon_crtc->lut_r[i] << 20) |
+			     (radeon_crtc->lut_g[i] << 10) |
+			     (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+}
+
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+}
+
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
+static void legacy_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+	uint32_t dac2_cntl;
+
+	dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+	if (radeon_crtc->crtc_id == 0)
+		dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
+	else
+		dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
+	WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+
+	WREG8(RADEON_PALETTE_INDEX, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(RADEON_PALETTE_30_DATA,
+			     (radeon_crtc->lut_r[i] << 20) |
+			     (radeon_crtc->lut_g[i] << 10) |
+			     (radeon_crtc->lut_b[i] << 0));
+	}
+}
+
+void radeon_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (!crtc->enabled)
+		return;
+
+	if (ASIC_IS_DCE5(rdev))
+		dce5_crtc_load_lut(crtc);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_crtc_load_lut(crtc);
+	else if (ASIC_IS_AVIVO(rdev))
+		avivo_crtc_load_lut(crtc);
+	else
+		legacy_crtc_load_lut(crtc);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			      u16 blue, int regno)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	radeon_crtc->lut_r[regno] = red >> 6;
+	radeon_crtc->lut_g[regno] = green >> 6;
+	radeon_crtc->lut_b[regno] = blue >> 6;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			      u16 *blue, int regno)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	*red = radeon_crtc->lut_r[regno] << 6;
+	*green = radeon_crtc->lut_g[regno] << 6;
+	*blue = radeon_crtc->lut_b[regno] << 6;
+}
+
+static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+				  u16 *blue, uint32_t start, uint32_t size)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	int end = (start + size > 256) ? 256 : start + size, i;
+
+	/* userspace palettes are always correct as is */
+	for (i = start; i < end; i++) {
+		radeon_crtc->lut_r[i] = red[i] >> 6;
+		radeon_crtc->lut_g[i] = green[i] >> 6;
+		radeon_crtc->lut_b[i] = blue[i] >> 6;
+	}
+	radeon_crtc_load_lut(crtc);
+}
+
+static void radeon_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	drm_crtc_cleanup(crtc);
+	kfree(radeon_crtc);
+}
+
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+	struct radeon_unpin_work *work =
+		container_of(__work, struct radeon_unpin_work, work);
+	int r;
+
+	/* unpin of the old buffer */
+	r = radeon_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = radeon_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		radeon_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+
+	drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+	kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	struct radeon_unpin_work *work;
+	unsigned long flags;
+	u32 update_pending;
+	int vpos, hpos;
+
+	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+	work = radeon_crtc->unpin_work;
+	if (work == NULL ||
+	    (work->fence && !radeon_fence_signaled(work->fence))) {
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+	/* New pageflip, or just completion of a previous one? */
+	if (!radeon_crtc->deferred_flip_completion) {
+		/* do the flip (mmio) */
+		update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+	} else {
+		/* This is just a completion of a flip queued in crtc
+		 * at last invocation. Make sure we go directly to
+		 * completion routine.
+		 */
+		update_pending = 0;
+		radeon_crtc->deferred_flip_completion = 0;
+	}
+
+	/* Has the pageflip already completed in crtc, or is it certain
+	 * to complete in this vblank?
+	 */
+	if (update_pending &&
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+							       &vpos, &hpos)) &&
+	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
+	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. Based on the current
+		 * scanout position we know that the current frame is
+		 * (nearly) complete and the flip will (likely)
+		 * complete before the start of the next frame.
+		 */
+		update_pending = 0;
+	}
+	if (update_pending) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. It will complete it
+		 * in next vblank interval, so complete the flip at
+		 * next vblank irq.
+		 */
+		radeon_crtc->deferred_flip_completion = 1;
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* Pageflip (will be) certainly completed in this vblank. Clean up. */
+	radeon_crtc->unpin_work = NULL;
+
+	/* wakeup userspace */
+	if (work->event)
+		drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+
+	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+	drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+	radeon_fence_unref(&work->fence);
+	radeon_post_page_flip(work->rdev, work->crtc_id);
+	schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *old_radeon_fb;
+	struct radeon_framebuffer *new_radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct radeon_unpin_work *work;
+	unsigned long flags;
+	u32 tiling_flags, pitch_pixels;
+	u64 base;
+	int r;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	work->event = event;
+	work->rdev = rdev;
+	work->crtc_id = radeon_crtc->crtc_id;
+	old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+	new_radeon_fb = to_radeon_framebuffer(fb);
+	/* schedule unpin of the old buffer */
+	obj = old_radeon_fb->obj;
+	/* take a reference to the old object */
+	drm_gem_object_reference(obj);
+	rbo = gem_to_radeon_bo(obj);
+	work->old_rbo = rbo;
+	obj = new_radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+
+	spin_lock(&rbo->tbo.bdev->fence_lock);
+	if (rbo->tbo.sync_obj)
+		work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+	spin_unlock(&rbo->tbo.bdev->fence_lock);
+
+	INIT_WORK(&work->work, radeon_unpin_work_func);
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (radeon_crtc->unpin_work) {
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		r = -EBUSY;
+		goto unlock_free;
+	}
+	radeon_crtc->unpin_work = work;
+	radeon_crtc->deferred_flip_completion = 0;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* pin the new buffer */
+	DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+			 work->old_rbo, rbo);
+
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+				     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		/* crtc offset is from display base addr not FB location */
+		base -= radeon_crtc->legacy_display_base_addr;
+		pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+
+		if (tiling_flags & RADEON_TILING_MACRO) {
+			if (ASIC_IS_R300(rdev)) {
+				base &= ~0x7ff;
+			} else {
+				int byteshift = fb->bits_per_pixel >> 4;
+				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
+				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+			}
+		} else {
+			int offset = crtc->y * pitch_pixels + crtc->x;
+			switch (fb->bits_per_pixel) {
+			case 8:
+			default:
+				offset *= 1;
+				break;
+			case 15:
+			case 16:
+				offset *= 2;
+				break;
+			case 24:
+				offset *= 3;
+				break;
+			case 32:
+				offset *= 4;
+				break;
+			}
+			base += offset;
+		}
+		base &= ~7;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work->new_crtc_base = base;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* update crtc fb */
+	crtc->fb = fb;
+
+	r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+	if (r) {
+		DRM_ERROR("failed to get vblank before flip\n");
+		goto pflip_cleanup1;
+	}
+
+	/* set the proper interrupt */
+	radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+
+	return 0;
+
+pflip_cleanup1:
+	if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	if (unlikely(radeon_bo_unpin(rbo) != 0)) {
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+	}
+	radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	radeon_crtc->unpin_work = NULL;
+unlock_free:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+	radeon_fence_unref(&work->fence);
+	kfree(work);
+
+	return r;
+}
+
+static const struct drm_crtc_funcs radeon_crtc_funcs = {
+	.cursor_set = radeon_crtc_cursor_set,
+	.cursor_move = radeon_crtc_cursor_move,
+	.gamma_set = radeon_crtc_gamma_set,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = radeon_crtc_destroy,
+	.page_flip = radeon_crtc_page_flip,
+};
+
+static void radeon_crtc_init(struct drm_device *dev, int index)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+
+	radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+	if (radeon_crtc == NULL)
+		return;
+
+	drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
+	radeon_crtc->crtc_id = index;
+	rdev->mode_info.crtcs[index] = radeon_crtc;
+
+#if 0
+	radeon_crtc->mode_set.crtc = &radeon_crtc->base;
+	radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
+	radeon_crtc->mode_set.num_connectors = 0;
+#endif
+
+	for (i = 0; i < 256; i++) {
+		radeon_crtc->lut_r[i] = i << 2;
+		radeon_crtc->lut_g[i] = i << 2;
+		radeon_crtc->lut_b[i] = i << 2;
+	}
+
+	if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
+		radeon_atombios_init_crtc(dev, radeon_crtc);
+	else
+		radeon_legacy_init_crtc(dev, radeon_crtc);
+}
+
+static const char *encoder_names[37] = {
+	"NONE",
+	"INTERNAL_LVDS",
+	"INTERNAL_TMDS1",
+	"INTERNAL_TMDS2",
+	"INTERNAL_DAC1",
+	"INTERNAL_DAC2",
+	"INTERNAL_SDVOA",
+	"INTERNAL_SDVOB",
+	"SI170B",
+	"CH7303",
+	"CH7301",
+	"INTERNAL_DVO1",
+	"EXTERNAL_SDVOA",
+	"EXTERNAL_SDVOB",
+	"TITFP513",
+	"INTERNAL_LVTM1",
+	"VT1623",
+	"HDMI_SI1930",
+	"HDMI_INTERNAL",
+	"INTERNAL_KLDSCP_TMDS1",
+	"INTERNAL_KLDSCP_DVO1",
+	"INTERNAL_KLDSCP_DAC1",
+	"INTERNAL_KLDSCP_DAC2",
+	"SI178",
+	"MVPU_FPGA",
+	"INTERNAL_DDI",
+	"VT1625",
+	"HDMI_SI1932",
+	"DP_AN9801",
+	"DP_DP501",
+	"INTERNAL_UNIPHY",
+	"INTERNAL_KLDSCP_LVTMA",
+	"INTERNAL_UNIPHY1",
+	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
+	"INTERNAL_VCE"
+};
+
+static const char *hpd_names[6] = {
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
+static void radeon_print_display_setup(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+	uint32_t devices;
+	int i = 0;
+
+	DRM_INFO("Radeon Display Connectors\n");
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		DRM_INFO("Connector %d:\n", i);
+		DRM_INFO("  %s\n", drm_get_connector_name(connector));
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
+		if (radeon_connector->ddc_bus) {
+			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+				 radeon_connector->ddc_bus->rec.mask_clk_reg,
+				 radeon_connector->ddc_bus->rec.mask_data_reg,
+				 radeon_connector->ddc_bus->rec.a_clk_reg,
+				 radeon_connector->ddc_bus->rec.a_data_reg,
+				 radeon_connector->ddc_bus->rec.en_clk_reg,
+				 radeon_connector->ddc_bus->rec.en_data_reg,
+				 radeon_connector->ddc_bus->rec.y_clk_reg,
+				 radeon_connector->ddc_bus->rec.y_data_reg);
+			if (radeon_connector->router.ddc_valid)
+				DRM_INFO("  DDC Router 0x%x/0x%x\n",
+					 radeon_connector->router.ddc_mux_control_pin,
+					 radeon_connector->router.ddc_mux_state);
+			if (radeon_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 radeon_connector->router.cd_mux_control_pin,
+					 radeon_connector->router.cd_mux_state);
+		} else {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+		}
+		DRM_INFO("  Encoders:\n");
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			devices = radeon_encoder->devices & radeon_connector->devices;
+			if (devices) {
+				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
+					DRM_INFO("    CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
+					DRM_INFO("    CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
+					DRM_INFO("    LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
+					DRM_INFO("    DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
+					DRM_INFO("    DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
+					DRM_INFO("    DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
+					DRM_INFO("    DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
+					DRM_INFO("    DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
+					DRM_INFO("    DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_TV1_SUPPORT)
+					DRM_INFO("    TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
+				if (devices & ATOM_DEVICE_CV_SUPPORT)
+					DRM_INFO("    CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
+			}
+		}
+		i++;
+	}
+}
+
+static bool radeon_setup_enc_conn(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	bool ret = false;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios) {
+			ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+			if (ret == false)
+				ret = radeon_get_atom_connector_info_from_object_table(dev);
+		} else {
+			ret = radeon_get_legacy_connector_info_from_bios(dev);
+			if (ret == false)
+				ret = radeon_get_legacy_connector_info_from_table(dev);
+		}
+	} else {
+		if (!ASIC_IS_AVIVO(rdev))
+			ret = radeon_get_legacy_connector_info_from_table(dev);
+	}
+	if (ret) {
+		radeon_setup_encoder_clones(dev);
+		radeon_print_display_setup(dev);
+	}
+
+	return ret;
+}
+
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+{
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret = 0;
+
+	/* don't leak the edid if we already fetched it in detect() */
+	if (radeon_connector->edid)
+		goto got_edid;
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+	    ENCODER_OBJECT_ID_NONE) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+		if (dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &dig->dp_i2c_bus->adapter);
+	} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+		   (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &dig->dp_i2c_bus->adapter);
+		else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &radeon_connector->ddc_bus->adapter);
+	} else {
+		if (radeon_connector->ddc_bus && !radeon_connector->edid)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+							      &radeon_connector->ddc_bus->adapter);
+	}
+
+	if (!radeon_connector->edid) {
+		if (rdev->is_atom_bios) {
+			/* some laptops provide a hardcoded edid in rom for LCDs */
+			if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+			     (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		} else
+			/* some servers provide a hardcoded edid in rom for KVMs */
+			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+	}
+	if (radeon_connector->edid) {
+got_edid:
+		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+		drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+		return ret;
+	}
+	drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+	return 0;
+}
+
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+			     u32 target_clock,
+			     u32 post_div,
+			     u32 ref_div,
+			     u32 *fb_div,
+			     u32 *frac_fb_div)
+{
+	u32 tmp = post_div * ref_div;
+
+	tmp *= target_clock;
+	*fb_div = tmp / pll->reference_freq;
+	*frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+		*fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+			      u32 target_clock)
+{
+	u32 vco, post_div, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		return pll->post_div;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_min;
+		else
+			vco = pll->pll_out_min;
+	} else {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_max;
+		else
+			vco = pll->pll_out_max;
+	}
+
+	post_div = vco / target_clock;
+	tmp = vco % target_clock;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (tmp)
+			post_div++;
+	} else {
+		if (!tmp)
+			post_div--;
+	}
+
+	if (post_div > pll->max_post_div)
+		post_div = pll->max_post_div;
+	else if (post_div < pll->min_post_div)
+		post_div = pll->min_post_div;
+
+	return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      u32 freq,
+			      u32 *dot_clock_p,
+			      u32 *fb_div_p,
+			      u32 *frac_fb_div_p,
+			      u32 *ref_div_p,
+			      u32 *post_div_p)
+{
+	u32 target_clock = freq / 10;
+	u32 post_div = avivo_get_post_div(pll, target_clock);
+	u32 ref_div = pll->min_ref_div;
+	u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div = pll->reference_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+		frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+		if (frac_fb_div >= 5) {
+			frac_fb_div -= 5;
+			frac_fb_div = frac_fb_div / 10;
+			frac_fb_div++;
+		}
+		if (frac_fb_div >= 10) {
+			fb_div++;
+			frac_fb_div = 0;
+		}
+	} else {
+		while (ref_div <= pll->max_ref_div) {
+			avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+					 &fb_div, &frac_fb_div);
+			if (frac_fb_div >= (pll->reference_freq / 2))
+				fb_div++;
+			frac_fb_div = 0;
+			tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+			tmp = (tmp * 10000) / target_clock;
+
+			if (tmp > (10000 + MAX_TOLERANCE))
+				ref_div++;
+			else if (tmp >= (10000 - MAX_TOLERANCE))
+				break;
+			else
+				ref_div++;
+		}
+	}
+
+	*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+		(ref_div * post_div * 10);
+	*fb_div_p = fb_div;
+	*frac_fb_div_p = frac_fb_div;
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+	DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
+static inline uint32_t radeon_div(uint64_t n, uint32_t d)
+{
+	uint64_t mod;
+
+	n += d / 2;
+
+	mod = do_div(n, d);
+	return n;
+}
+
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+			       uint64_t freq,
+			       uint32_t *dot_clock_p,
+			       uint32_t *fb_div_p,
+			       uint32_t *frac_fb_div_p,
+			       uint32_t *ref_div_p,
+			       uint32_t *post_div_p)
+{
+	uint32_t min_ref_div = pll->min_ref_div;
+	uint32_t max_ref_div = pll->max_ref_div;
+	uint32_t min_post_div = pll->min_post_div;
+	uint32_t max_post_div = pll->max_post_div;
+	uint32_t min_fractional_feed_div = 0;
+	uint32_t max_fractional_feed_div = 0;
+	uint32_t best_vco = pll->best_vco;
+	uint32_t best_post_div = 1;
+	uint32_t best_ref_div = 1;
+	uint32_t best_feedback_div = 1;
+	uint32_t best_frac_feedback_div = 0;
+	uint32_t best_freq = -1;
+	uint32_t best_error = 0xffffffff;
+	uint32_t best_vco_diff = 1;
+	uint32_t post_div;
+	u32 pll_out_min, pll_out_max;
+
+	DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+	freq = freq * 1000;
+
+	if (pll->flags & RADEON_PLL_IS_LCD) {
+		pll_out_min = pll->lcd_pll_out_min;
+		pll_out_max = pll->lcd_pll_out_max;
+	} else {
+		pll_out_min = pll->pll_out_min;
+		pll_out_max = pll->pll_out_max;
+	}
+
+	if (pll_out_min > 64800)
+		pll_out_min = 64800;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		min_ref_div = max_ref_div = pll->reference_div;
+	else {
+		while (min_ref_div < max_ref_div-1) {
+			uint32_t mid = (min_ref_div + max_ref_div) / 2;
+			uint32_t pll_in = pll->reference_freq / mid;
+			if (pll_in < pll->pll_in_min)
+				max_ref_div = mid;
+			else if (pll_in > pll->pll_in_max)
+				min_ref_div = mid;
+			else
+				break;
+		}
+	}
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		min_post_div = max_post_div = pll->post_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		min_fractional_feed_div = pll->min_frac_feedback_div;
+		max_fractional_feed_div = pll->max_frac_feedback_div;
+	}
+
+	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+		uint32_t ref_div;
+
+		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+			continue;
+
+		/* legacy radeons only have a few post_divs */
+		if (pll->flags & RADEON_PLL_LEGACY) {
+			if ((post_div == 5) ||
+			    (post_div == 7) ||
+			    (post_div == 9) ||
+			    (post_div == 10) ||
+			    (post_div == 11) ||
+			    (post_div == 13) ||
+			    (post_div == 14) ||
+			    (post_div == 15))
+				continue;
+		}
+
+		for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
+			uint32_t feedback_div, current_freq = 0, error, vco_diff;
+			uint32_t pll_in = pll->reference_freq / ref_div;
+			uint32_t min_feed_div = pll->min_feedback_div;
+			uint32_t max_feed_div = pll->max_feedback_div + 1;
+
+			if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
+				continue;
+
+			while (min_feed_div < max_feed_div) {
+				uint32_t vco;
+				uint32_t min_frac_feed_div = min_fractional_feed_div;
+				uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
+				uint32_t frac_feedback_div;
+				uint64_t tmp;
+
+				feedback_div = (min_feed_div + max_feed_div) / 2;
+
+				tmp = (uint64_t)pll->reference_freq * feedback_div;
+				vco = radeon_div(tmp, ref_div);
+
+				if (vco < pll_out_min) {
+					min_feed_div = feedback_div + 1;
+					continue;
+				} else if (vco > pll_out_max) {
+					max_feed_div = feedback_div;
+					continue;
+				}
+
+				while (min_frac_feed_div < max_frac_feed_div) {
+					frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
+					tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
+					tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+					current_freq = radeon_div(tmp, ref_div * post_div);
+
+					if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+						if (freq < current_freq)
+							error = 0xffffffff;
+						else
+							error = freq - current_freq;
+					} else
+						error = abs(current_freq - freq);
+					vco_diff = abs(vco - best_vco);
+
+					if ((best_vco == 0 && error < best_error) ||
+					    (best_vco != 0 &&
+					     ((best_error > 100 && error < best_error - 100) ||
+					      (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
+						best_post_div = post_div;
+						best_ref_div = ref_div;
+						best_feedback_div = feedback_div;
+						best_frac_feedback_div = frac_feedback_div;
+						best_freq = current_freq;
+						best_error = error;
+						best_vco_diff = vco_diff;
+					} else if (current_freq == freq) {
+						if (best_freq == -1) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+							   ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+							best_post_div = post_div;
+							best_ref_div = ref_div;
+							best_feedback_div = feedback_div;
+							best_frac_feedback_div = frac_feedback_div;
+							best_freq = current_freq;
+							best_error = error;
+							best_vco_diff = vco_diff;
+						}
+					}
+					if (current_freq < freq)
+						min_frac_feed_div = frac_feedback_div + 1;
+					else
+						max_frac_feed_div = frac_feedback_div;
+				}
+				if (current_freq < freq)
+					min_feed_div = feedback_div + 1;
+				else
+					max_feed_div = feedback_div;
+			}
+		}
+	}
+
+	*dot_clock_p = best_freq / 10000;
+	*fb_div_p = best_feedback_div;
+	*frac_fb_div_p = best_frac_feedback_div;
+	*ref_div_p = best_ref_div;
+	*post_div_p = best_post_div;
+	DRM_DEBUG_KMS("%lld %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      (long long)freq,
+		      best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+		      best_ref_div, best_post_div);
+
+}
+
+static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+	if (radeon_fb->obj) {
+		drm_gem_object_unreference_unlocked(radeon_fb->obj);
+	}
+	drm_framebuffer_cleanup(fb);
+	kfree(radeon_fb);
+}
+
+static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+						  struct drm_file *file_priv,
+						  unsigned int *handle)
+{
+	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+
+	return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+	.destroy = radeon_user_framebuffer_destroy,
+	.create_handle = radeon_user_framebuffer_create_handle,
+};
+
+int
+radeon_framebuffer_init(struct drm_device *dev,
+			struct radeon_framebuffer *rfb,
+			struct drm_mode_fb_cmd2 *mode_cmd,
+			struct drm_gem_object *obj)
+{
+	int ret;
+	rfb->obj = obj;
+	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+	if (ret) {
+		rfb->obj = NULL;
+		return ret;
+	}
+	return 0;
+}
+
+static struct drm_framebuffer *
+radeon_user_framebuffer_create(struct drm_device *dev,
+			       struct drm_file *file_priv,
+			       struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct radeon_framebuffer *radeon_fb;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+	if (obj ==  NULL) {
+		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+			"can't create framebuffer\n", mode_cmd->handles[0]);
+		return ERR_PTR(-ENOENT);
+	}
+
+	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+	if (radeon_fb == NULL) {
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+	if (ret) {
+		kfree(radeon_fb);
+		drm_gem_object_unreference_unlocked(obj);
+		return ERR_PTR(ret);
+	}
+
+	return &radeon_fb->base;
+}
+
+static void radeon_output_poll_changed(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	radeon_fb_output_poll_changed(rdev);
+}
+
+static const struct drm_mode_config_funcs radeon_mode_funcs = {
+	.fb_create = radeon_user_framebuffer_create,
+	.output_poll_changed = radeon_output_poll_changed
+};
+
+static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+{	{ 0, "driver" },
+	{ 1, "bios" },
+};
+
+static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+{	{ TV_STD_NTSC, "ntsc" },
+	{ TV_STD_PAL, "pal" },
+	{ TV_STD_PAL_M, "pal-m" },
+	{ TV_STD_PAL_60, "pal-60" },
+	{ TV_STD_NTSC_J, "ntsc-j" },
+	{ TV_STD_SCART_PAL, "scart-pal" },
+	{ TV_STD_PAL_CN, "pal-cn" },
+	{ TV_STD_SECAM, "secam" },
+};
+
+static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{	{ UNDERSCAN_OFF, "off" },
+	{ UNDERSCAN_ON, "on" },
+	{ UNDERSCAN_AUTO, "auto" },
+};
+
+static int radeon_modeset_create_props(struct radeon_device *rdev)
+{
+	int sz;
+
+	if (rdev->is_atom_bios) {
+		rdev->mode_info.coherent_mode_property =
+			drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+		if (!rdev->mode_info.coherent_mode_property)
+			return -ENOMEM;
+	}
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
+		rdev->mode_info.tmds_pll_property =
+			drm_property_create_enum(rdev->ddev, 0,
+					    "tmds_pll",
+					    radeon_tmds_pll_enum_list, sz);
+	}
+
+	rdev->mode_info.load_detect_property =
+		drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+	if (!rdev->mode_info.load_detect_property)
+		return -ENOMEM;
+
+	drm_mode_create_scaling_mode_property(rdev->ddev);
+
+	sz = ARRAY_SIZE(radeon_tv_std_enum_list);
+	rdev->mode_info.tv_std_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "tv standard",
+				    radeon_tv_std_enum_list, sz);
+
+	sz = ARRAY_SIZE(radeon_underscan_enum_list);
+	rdev->mode_info.underscan_property =
+		drm_property_create_enum(rdev->ddev, 0,
+				    "underscan",
+				    radeon_underscan_enum_list, sz);
+
+	rdev->mode_info.underscan_hborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan hborder", 0, 128);
+	if (!rdev->mode_info.underscan_hborder_property)
+		return -ENOMEM;
+
+	rdev->mode_info.underscan_vborder_property =
+		drm_property_create_range(rdev->ddev, 0,
+					"underscan vborder", 0, 128);
+	if (!rdev->mode_info.underscan_vborder_property)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+	/* adjustment options for the display watermarks */
+	if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+		/* set display priority to high for r3xx, rv515 chips
+		 * this avoids flickering due to underflow to the
+		 * display controllers during heavy acceleration.
+		 * Don't force high on rs4xx igp chips as it seems to
+		 * affect the sound card.  See kernel bug 15982.
+		 */
+		if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) &&
+		    !(rdev->flags & RADEON_IS_IGP))
+			rdev->disp_priority = 2;
+		else
+			rdev->disp_priority = 0;
+	} else
+		rdev->disp_priority = radeon_disp_priority;
+
+}
+
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+		rdev->mode_info.afmt[i] = NULL;
+
+	if (ASIC_IS_DCE6(rdev)) {
+		/* todo */
+	} else if (ASIC_IS_DCE4(rdev)) {
+		/* DCE4/5 has 6 audio blocks tied to DIG encoders */
+		/* DCE4.1 has 2 audio blocks tied to DIG encoders */
+		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[1]) {
+			rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+			rdev->mode_info.afmt[1]->id = 1;
+		}
+		if (!ASIC_IS_DCE41(rdev)) {
+			rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[2]) {
+				rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+				rdev->mode_info.afmt[2]->id = 2;
+			}
+			rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[3]) {
+				rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+				rdev->mode_info.afmt[3]->id = 3;
+			}
+			rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[4]) {
+				rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+				rdev->mode_info.afmt[4]->id = 4;
+			}
+			rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[5]) {
+				rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+				rdev->mode_info.afmt[5]->id = 5;
+			}
+		}
+	} else if (ASIC_IS_DCE3(rdev)) {
+		/* DCE3.x has 2 audio blocks tied to DIG encoders */
+		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[1]) {
+			rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+			rdev->mode_info.afmt[1]->id = 1;
+		}
+	} else if (ASIC_IS_DCE2(rdev)) {
+		/* DCE2 has at least 1 routable audio block */
+		rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+		if (rdev->mode_info.afmt[0]) {
+			rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+			rdev->mode_info.afmt[0]->id = 0;
+		}
+		/* r6xx has 2 routable audio blocks */
+		if (rdev->family >= CHIP_R600) {
+			rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+			if (rdev->mode_info.afmt[1]) {
+				rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+				rdev->mode_info.afmt[1]->id = 1;
+			}
+		}
+	}
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+		kfree(rdev->mode_info.afmt[i]);
+		rdev->mode_info.afmt[i] = NULL;
+	}
+}
+
+int radeon_modeset_init(struct radeon_device *rdev)
+{
+	int i;
+	int ret;
+
+	drm_mode_config_init(rdev->ddev);
+	rdev->mode_info.mode_config_initialized = true;
+
+	rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->ddev->mode_config.max_width = 16384;
+		rdev->ddev->mode_config.max_height = 16384;
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		rdev->ddev->mode_config.max_width = 8192;
+		rdev->ddev->mode_config.max_height = 8192;
+	} else {
+		rdev->ddev->mode_config.max_width = 4096;
+		rdev->ddev->mode_config.max_height = 4096;
+	}
+
+	rdev->ddev->mode_config.preferred_depth = 24;
+	rdev->ddev->mode_config.prefer_shadow = 1;
+
+	rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+
+	ret = radeon_modeset_create_props(rdev);
+	if (ret) {
+		return ret;
+	}
+
+	/* init i2c buses */
+	radeon_i2c_init(rdev);
+
+	/* check combios for a valid hardcoded EDID - Sun servers */
+	if (!rdev->is_atom_bios) {
+		/* check for hardcoded EDID in BIOS */
+		radeon_combios_check_hardcoded_edid(rdev);
+	}
+
+	/* allocate crtcs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		radeon_crtc_init(rdev->ddev, i);
+	}
+
+	/* okay we should have all the bios connectors */
+	ret = radeon_setup_enc_conn(rdev->ddev);
+	if (!ret) {
+		return ret;
+	}
+
+	/* init dig PHYs, disp eng pll */
+	if (rdev->is_atom_bios) {
+		radeon_atom_encoder_init(rdev);
+		radeon_atom_disp_eng_pll_init(rdev);
+	}
+
+	/* initialize hpd */
+	radeon_hpd_init(rdev);
+
+	/* setup afmt */
+	radeon_afmt_init(rdev);
+
+	/* Initialize power management */
+	radeon_pm_init(rdev);
+
+	radeon_fbdev_init(rdev);
+	drm_kms_helper_poll_init(rdev->ddev);
+
+	return 0;
+}
+
+void radeon_modeset_fini(struct radeon_device *rdev)
+{
+	radeon_fbdev_fini(rdev);
+	kfree(rdev->mode_info.bios_hardcoded_edid);
+	radeon_pm_fini(rdev);
+
+	if (rdev->mode_info.mode_config_initialized) {
+		radeon_afmt_fini(rdev);
+		drm_kms_helper_poll_fini(rdev->ddev);
+		radeon_hpd_fini(rdev);
+		drm_mode_config_cleanup(rdev->ddev);
+		rdev->mode_info.mode_config_initialized = false;
+	}
+	/* free i2c buses */
+	radeon_i2c_fini(rdev);
+}
+
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
+{
+	/* try and guess if this is a tv or a monitor */
+	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+	    (mode->vdisplay == 576) || /* 576p */
+	    (mode->vdisplay == 720) || /* 720p */
+	    (mode->vdisplay == 1080)) /* 1080p */
+		return true;
+	else
+		return false;
+}
+
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+				const struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_encoder *radeon_encoder;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	bool first = true;
+	u32 src_v = 1, dst_v = 1;
+	u32 src_h = 1, dst_h = 1;
+
+	radeon_crtc->h_border = 0;
+	radeon_crtc->v_border = 0;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		radeon_encoder = to_radeon_encoder(encoder);
+		connector = radeon_get_connector_for_encoder(encoder);
+		radeon_connector = to_radeon_connector(connector);
+
+		if (first) {
+			/* set scaling */
+			if (radeon_encoder->rmx_type == RMX_OFF)
+				radeon_crtc->rmx_type = RMX_OFF;
+			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			else
+				radeon_crtc->rmx_type = RMX_OFF;
+			/* copy native mode */
+			memcpy(&radeon_crtc->native_mode,
+			       &radeon_encoder->native_mode,
+				sizeof(struct drm_display_mode));
+			src_v = crtc->mode.vdisplay;
+			dst_v = radeon_crtc->native_mode.vdisplay;
+			src_h = crtc->mode.hdisplay;
+			dst_h = radeon_crtc->native_mode.hdisplay;
+
+			/* fix up for overscan on hdmi */
+			if (ASIC_IS_AVIVO(rdev) &&
+			    (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
+			    ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+			     ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+			      drm_detect_hdmi_monitor(radeon_connector->edid) &&
+			      is_hdtv_mode(mode)))) {
+				if (radeon_encoder->underscan_hborder != 0)
+					radeon_crtc->h_border = radeon_encoder->underscan_hborder;
+				else
+					radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+				if (radeon_encoder->underscan_vborder != 0)
+					radeon_crtc->v_border = radeon_encoder->underscan_vborder;
+				else
+					radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+				radeon_crtc->rmx_type = RMX_FULL;
+				src_v = crtc->mode.vdisplay;
+				dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+				src_h = crtc->mode.hdisplay;
+				dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+			}
+			first = false;
+		} else {
+			if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
+				/* WARNING: Right now this can't happen but
+				 * in the future we need to check that scaling
+				 * are consistent across different encoder
+				 * (ie all encoder can work with the same
+				 *  scaling).
+				 */
+				DRM_ERROR("Scaling not consistent across encoder.\n");
+				return false;
+			}
+		}
+	}
+	if (radeon_crtc->rmx_type != RMX_OFF) {
+		fixed20_12 a, b;
+		a.full = dfixed_const(src_v);
+		b.full = dfixed_const(dst_v);
+		radeon_crtc->vsc.full = dfixed_div(a, b);
+		a.full = dfixed_const(src_h);
+		b.full = dfixed_const(dst_h);
+		radeon_crtc->hsc.full = dfixed_div(a, b);
+	} else {
+		radeon_crtc->vsc.full = dfixed_const(1);
+		radeon_crtc->hsc.full = dfixed_const(1);
+	}
+	return true;
+}
+
+/*
+ * Retrieve current video scanout position of crtc on a given gpu.
+ *
+ * \param dev Device to query.
+ * \param crtc Crtc to query.
+ * \param *vpos Location where vertical scanout position should be stored.
+ * \param *hpos Location where horizontal scanout position should go.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant but
+ * unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
+{
+	u32 stat_crtc = 0, vbl = 0, position = 0;
+	int vbl_start, vbl_end, vtotal, ret = 0;
+	bool in_vbl = true;
+
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (ASIC_IS_DCE4(rdev)) {
+		if (crtc == 0) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC0_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC0_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC1_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC1_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 2) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC2_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC2_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 3) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC3_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC3_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 4) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC4_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC4_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 5) {
+			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
+				     EVERGREEN_CRTC5_REGISTER_OFFSET);
+			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
+					  EVERGREEN_CRTC5_REGISTER_OFFSET);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else if (ASIC_IS_AVIVO(rdev)) {
+		if (crtc == 0) {
+			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
+			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	} else {
+		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
+		if (crtc == 0) {
+			/* Assume vbl_end == 0, get vbl_start from
+			 * upper 16 bits.
+			 */
+			vbl = (RREG32(RADEON_CRTC_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			/* Only retrieve vpos from upper 16 bits, set hpos == 0. */
+			position = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+		if (crtc == 1) {
+			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
+				RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
+			position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
+			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
+			if (!(stat_crtc & 1))
+				in_vbl = false;
+
+			ret |= DRM_SCANOUTPOS_VALID;
+		}
+	}
+
+	/* Decode into vertical and horizontal scanout position. */
+	*vpos = position & 0x1fff;
+	*hpos = (position >> 16) & 0x1fff;
+
+	/* Valid vblank area boundaries from gpu retrieved? */
+	if (vbl > 0) {
+		/* Yes: Decode. */
+		ret |= DRM_SCANOUTPOS_ACCURATE;
+		vbl_start = vbl & 0x1fff;
+		vbl_end = (vbl >> 16) & 0x1fff;
+	}
+	else {
+		/* No: Fake something reasonable which gives at least ok results. */
+		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+		vbl_end = 0;
+	}
+
+	/* Test scanout position against vblank region. */
+	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
+		in_vbl = false;
+
+	/* Check if inside vblank area and apply corrective offsets:
+	 * vpos will then be >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+
+	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
+	if (in_vbl && (*vpos >= vbl_start)) {
+		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+		*vpos = *vpos - vtotal;
+	}
+
+	/* Correct for shifted end of vbl at vbl_end. */
+	*vpos = *vpos - vbl_end;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_drv.c b/linux-imx/drivers/gpu/drm/radeon/radeon_drv.c
new file mode 100644
index 0000000..094e7e5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_drv.c
@@ -0,0 +1,505 @@
+/**
+ * \file radeon_drv.c
+ * ATI Radeon driver
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+#include <drm/drm_pciids.h>
+#include <linux/console.h>
+#include <linux/module.h>
+
+
+/*
+ * KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
+ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
+ * - 2.4.0 - add crtc id query
+ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
+ *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ *   2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
+ *   2.10.0 - fusion 2D tiling
+ *   2.11.0 - backend map, initial compute support for the CS checker
+ *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ *   2.13.0 - virtual memory support, streamout
+ *   2.14.0 - add evergreen tiling informations
+ *   2.15.0 - add max_pipes query
+ *   2.16.0 - fix evergreen 2D tiled surface calculation
+ *   2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ *   2.18.0 - r600-eg: allow "invalid" DB formats
+ *   2.19.0 - r600-eg: MSAA textures
+ *   2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ *   2.21.0 - r600-r700: FMASK and CMASK
+ *   2.22.0 - r600 only: RESOLVE_BOX allowed
+ *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
+ *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
+ *   2.28.0 - r600-eg: Add MEM_WRITE packet support
+ *   2.29.0 - R500 FP16 color clear registers
+ *   2.30.0 - fix for FMASK texturing
+ *   2.31.0 - Add fastfb support for rs690
+ *   2.32.0 - new info request for rings working
+ *   2.33.0 - Add SI tiling mode array query
+ */
+#define KMS_DRIVER_MAJOR	2
+#define KMS_DRIVER_MINOR	33
+#define KMS_DRIVER_PATCHLEVEL	0
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int radeon_driver_unload_kms(struct drm_device *dev);
+int radeon_driver_firstopen_kms(struct drm_device *dev);
+void radeon_driver_lastclose_kms(struct drm_device *dev);
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv);
+void radeon_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv);
+int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+int radeon_resume_kms(struct drm_device *dev);
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags);
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int radeon_gem_object_init(struct drm_gem_object *obj);
+void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+				struct drm_file *file_priv);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern int radeon_max_kms_ioctl;
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+							size_t size,
+							struct sg_table *sg);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
+				    unsigned long arg);
+
+#if defined(CONFIG_DEBUG_FS)
+int radeon_debugfs_init(struct drm_minor *minor);
+void radeon_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
+int radeon_no_wb;
+int radeon_modeset = -1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_testing = 0;
+int radeon_connector_table = 0;
+int radeon_tv = 1;
+int radeon_audio = 0;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = -1;
+int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
+int radeon_fastfb = 0;
+
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, radeon_testing, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+
+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+module_param_named(tv, radeon_tv, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
+MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)");
+module_param_named(fastfb, radeon_fastfb, int, 0444);
+
+static struct pci_device_id pciidlist[] = {
+	radeon_PCI_IDS
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+#ifdef CONFIG_DRM_RADEON_UMS
+
+static int radeon_suspend(struct drm_device *dev, pm_message_t state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return 0;
+
+	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+	return 0;
+}
+
+static int radeon_resume(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return 0;
+
+	/* Restore interrupt registers */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+	return 0;
+}
+
+static const struct file_operations radeon_driver_old_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = radeon_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver_old = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
+	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
+	.load = radeon_driver_load,
+	.firstopen = radeon_driver_firstopen,
+	.open = radeon_driver_open,
+	.preclose = radeon_driver_preclose,
+	.postclose = radeon_driver_postclose,
+	.lastclose = radeon_driver_lastclose,
+	.unload = radeon_driver_unload,
+	.suspend = radeon_suspend,
+	.resume = radeon_resume,
+	.get_vblank_counter = radeon_get_vblank_counter,
+	.enable_vblank = radeon_enable_vblank,
+	.disable_vblank = radeon_disable_vblank,
+	.master_create = radeon_master_create,
+	.master_destroy = radeon_master_destroy,
+	.irq_preinstall = radeon_driver_irq_preinstall,
+	.irq_postinstall = radeon_driver_irq_postinstall,
+	.irq_uninstall = radeon_driver_irq_uninstall,
+	.irq_handler = radeon_driver_irq_handler,
+	.ioctls = radeon_ioctls,
+	.dma_ioctl = radeon_cp_buffers,
+	.fops = &radeon_driver_old_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+#endif
+
+static struct drm_driver kms_driver;
+
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
+	kfree(ap);
+
+	return 0;
+}
+
+static int radeon_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret;
+
+	/* Get rid of things like offb */
+	ret = radeon_kick_out_firmware_fb(pdev);
+	if (ret)
+		return ret;
+
+	return drm_get_pci_dev(pdev, ent, &kms_driver);
+}
+
+static void
+radeon_pci_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int
+radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	return radeon_suspend_kms(dev, state);
+}
+
+static int
+radeon_pci_resume(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	return radeon_resume_kms(dev);
+}
+
+static const struct file_operations radeon_driver_kms_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = radeon_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
+static struct drm_driver kms_driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
+	    DRIVER_PRIME,
+	.dev_priv_size = 0,
+	.load = radeon_driver_load_kms,
+	.firstopen = radeon_driver_firstopen_kms,
+	.open = radeon_driver_open_kms,
+	.preclose = radeon_driver_preclose_kms,
+	.postclose = radeon_driver_postclose_kms,
+	.lastclose = radeon_driver_lastclose_kms,
+	.unload = radeon_driver_unload_kms,
+	.suspend = radeon_suspend_kms,
+	.resume = radeon_resume_kms,
+	.get_vblank_counter = radeon_get_vblank_counter_kms,
+	.enable_vblank = radeon_enable_vblank_kms,
+	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+	.get_scanout_position = radeon_get_crtc_scanoutpos,
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = radeon_debugfs_init,
+	.debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+	.irq_preinstall = radeon_driver_irq_preinstall_kms,
+	.irq_postinstall = radeon_driver_irq_postinstall_kms,
+	.irq_uninstall = radeon_driver_irq_uninstall_kms,
+	.irq_handler = radeon_driver_irq_handler_kms,
+	.ioctls = radeon_ioctls_kms,
+	.gem_init_object = radeon_gem_object_init,
+	.gem_free_object = radeon_gem_object_free,
+	.gem_open_object = radeon_gem_object_open,
+	.gem_close_object = radeon_gem_object_close,
+	.dma_ioctl = radeon_dma_ioctl_kms,
+	.dumb_create = radeon_mode_dumb_create,
+	.dumb_map_offset = radeon_mode_dumb_mmap,
+	.dumb_destroy = radeon_mode_dumb_destroy,
+	.fops = &radeon_driver_kms_fops,
+
+	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_export = drm_gem_prime_export,
+	.gem_prime_import = drm_gem_prime_import,
+	.gem_prime_pin = radeon_gem_prime_pin,
+	.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
+	.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
+	.gem_prime_vmap = radeon_gem_prime_vmap,
+	.gem_prime_vunmap = radeon_gem_prime_vunmap,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = KMS_DRIVER_MAJOR,
+	.minor = KMS_DRIVER_MINOR,
+	.patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+#ifdef CONFIG_DRM_RADEON_UMS
+static struct pci_driver radeon_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+#endif
+
+static struct pci_driver radeon_kms_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+	.probe = radeon_pci_probe,
+	.remove = radeon_pci_remove,
+	.suspend = radeon_pci_suspend,
+	.resume = radeon_pci_resume,
+};
+
+static int __init radeon_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+	if (vgacon_text_force() && radeon_modeset == -1) {
+		DRM_INFO("VGACON disable radeon kernel modesetting.\n");
+		radeon_modeset = 0;
+	}
+#endif
+	/* set to modesetting by default if not nomodeset */
+	if (radeon_modeset == -1)
+		radeon_modeset = 1;
+
+	if (radeon_modeset == 1) {
+		DRM_INFO("radeon kernel modesetting enabled.\n");
+		driver = &kms_driver;
+		pdriver = &radeon_kms_pci_driver;
+		driver->driver_features |= DRIVER_MODESET;
+		driver->num_ioctls = radeon_max_kms_ioctl;
+		radeon_register_atpx_handler();
+
+	} else {
+#ifdef CONFIG_DRM_RADEON_UMS
+		DRM_INFO("radeon userspace modesetting enabled.\n");
+		driver = &driver_old;
+		pdriver = &radeon_pci_driver;
+		driver->driver_features &= ~DRIVER_MODESET;
+		driver->num_ioctls = radeon_max_ioctl;
+#else
+		DRM_ERROR("No UMS support in radeon module!\n");
+		return -EINVAL;
+#endif
+	}
+
+	/* let modprobe override vga console setting */
+	return drm_pci_init(driver, pdriver);
+}
+
+static void __exit radeon_exit(void)
+{
+	drm_pci_exit(driver, pdriver);
+	radeon_unregister_atpx_handler();
+}
+
+module_init(radeon_init);
+module_exit(radeon_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_drv.h b/linux-imx/drivers/gpu/drm/radeon/radeon_drv.h
new file mode 100644
index 0000000..b369d42
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_drv.h
@@ -0,0 +1,2164 @@
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+
+#include "radeon_family.h"
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME		"radeon"
+#define DRIVER_DESC		"ATI Radeon"
+#define DRIVER_DATE		"20080528"
+
+/* Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ *     - Add stencil capability to clear ioctl (gareth, keith)
+ *     - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ *     - Add support for new radeon packets (keith)
+ *     - Add getparam ioctl (keith)
+ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ *     - Add r200 function to init ioctl
+ *     - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ *       Add irq handler (won't be turned on unless X server knows to)
+ *       Add irq ioctls and irq_active getparam.
+ *       Add wait command for cmdbuf ioctl
+ *       Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ *       Add 'GET' queries for starting additional clients on different VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ *       Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ *       clients use to tell the DRM where they think the framebuffer is
+ *       located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ *       and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ *       (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ *     - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ *     - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ *     - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ *       texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
+ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ *       new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
+ * 1.29- R500 3D cmd buffer support
+ * 1.30- Add support for occlusion queries
+ * 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		33
+#define DRIVER_PATCHLEVEL	0
+
+/* The rest of the file is DEPRECATED! */
+#ifdef CONFIG_DRM_RADEON_UMS
+
+enum radeon_cp_microcode_version {
+	UCODE_R100,
+	UCODE_R200,
+	UCODE_R300,
+};
+
+typedef struct drm_radeon_freelist {
+	unsigned int age;
+	struct drm_buf *buf;
+	struct drm_radeon_freelist *next;
+	struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	int rptr_update; /* Double Words */
+	int rptr_update_l2qw; /* log2 Quad Words */
+
+	int fetch_size; /* Double Words */
+	int fetch_size_l2ow; /* log2 Oct Words */
+
+	u32 tail;
+	u32 tail_mask;
+	int space;
+
+	int high_mark;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+	u32 rb3d_cntl;
+	u32 rb3d_zstencilcntl;
+	u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+struct drm_radeon_driver_file_fields {
+	int64_t radeon_fb_delta;
+};
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	int start;
+	int size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct radeon_surface {
+	int refcount;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+};
+
+struct radeon_virt_surface {
+	int surface_index;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+	struct drm_file *file_priv;
+#define PCIGART_FILE_PRIV	((void *) -1L)
+};
+
+#define RADEON_FLUSH_EMITED	(1 << 0)
+#define RADEON_PURGE_EMITED	(1 << 1)
+
+struct drm_radeon_master_private {
+	drm_local_map_t *sarea;
+	drm_radeon_sarea_t *sarea_priv;
+};
+
+typedef struct drm_radeon_private {
+	drm_radeon_ring_buffer_t ring;
+
+	u32 fb_location;
+	u32 fb_size;
+	int new_memmap;
+
+	int gart_size;
+	u32 gart_vm_start;
+	unsigned long gart_buffers_offset;
+
+	int cp_mode;
+	int cp_running;
+
+	drm_radeon_freelist_t *head;
+	drm_radeon_freelist_t *tail;
+	int last_buf;
+	int writeback_works;
+
+	int usec_timeout;
+
+	int microcode_version;
+
+	struct {
+		u32 boxes;
+		int freelist_timeouts;
+		int freelist_loops;
+		int requested_bufs;
+		int last_frame_reads;
+		int last_clear_reads;
+		int clears;
+		int texture_uploads;
+	} stats;
+
+	int do_boxes;
+	int page_flipping;
+
+	u32 color_fmt;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	u32 depth_fmt;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+
+	u32 front_pitch_offset;
+	u32 back_pitch_offset;
+	u32 depth_pitch_offset;
+
+	drm_radeon_depth_clear_t depth_clear;
+
+	unsigned long ring_offset;
+	unsigned long ring_rptr_offset;
+	unsigned long buffers_offset;
+	unsigned long gart_textures_offset;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *cp_ring;
+	drm_local_map_t *ring_rptr;
+	drm_local_map_t *gart_textures;
+
+	struct mem_block *gart_heap;
+	struct mem_block *fb_heap;
+
+	/* SW interrupt */
+	wait_queue_head_t swi_queue;
+	atomic_t swi_emitted;
+	int vblank_crtc;
+	uint32_t irq_enable_reg;
+	uint32_t r500_disp_irq_reg;
+
+	struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+	struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
+
+	unsigned long pcigart_offset;
+	unsigned int pcigart_offset_set;
+	struct drm_ati_pcigart_info gart_info;
+
+	u32 scratch_ages[5];
+
+	int have_z_offset;
+
+	/* starting from here on, data is preserved across an open */
+	uint32_t flags;		/* see radeon_chip_flags */
+	resource_size_t fb_aper_offset;
+
+	int num_gb_pipes;
+	int num_z_pipes;
+	int track_flush;
+	drm_local_map_t *mmio;
+
+	/* r6xx/r7xx pipe/shader config */
+	int r600_max_pipes;
+	int r600_max_tile_pipes;
+	int r600_max_simds;
+	int r600_max_backends;
+	int r600_max_gprs;
+	int r600_max_threads;
+	int r600_max_stack_entries;
+	int r600_max_hw_contexts;
+	int r600_max_gs_threads;
+	int r600_sx_max_export_size;
+	int r600_sx_max_export_pos_size;
+	int r600_sx_max_export_smx_size;
+	int r600_sq_num_cf_insts;
+	int r700_sx_num_of_sets;
+	int r700_sc_prim_fifo_size;
+	int r700_sc_hiz_tile_fifo_size;
+	int r700_sc_earlyz_tile_fifo_fize;
+	int r600_group_size;
+	int r600_npipes;
+	int r600_nbanks;
+
+	struct mutex cs_mutex;
+	u32 cs_id_scnt;
+	u32 cs_id_wcnt;
+	/* r6xx/r7xx drm blit vertex buffer */
+	struct drm_buf *blit_vb;
+
+	/* firmware */
+	const struct firmware *me_fw, *pfp_fw;
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+	u32 age;
+} drm_radeon_buf_priv_t;
+
+struct drm_buffer;
+
+typedef struct drm_radeon_kcmd_buffer {
+	int bufsz;
+	struct drm_buffer *buffer;
+	int nbox;
+	struct drm_clip_rect __user *boxes;
+} drm_radeon_kcmd_buffer_t;
+
+extern int radeon_no_wb;
+extern struct drm_ioctl_desc radeon_ioctls[];
+extern int radeon_max_ioctl;
+
+extern u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv);
+extern void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val);
+
+#define GET_RING_HEAD(dev_priv)	radeon_get_ring_head(dev_priv)
+#define SET_RING_HEAD(dev_priv, val) radeon_set_ring_head(dev_priv, val)
+
+/* Check whether the given hardware address is inside the framebuffer or the
+ * GART area.
+ */
+static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
+					  u64 off)
+{
+	u32 fb_start = dev_priv->fb_location;
+	u32 fb_end = fb_start + dev_priv->fb_size - 1;
+	u32 gart_start = dev_priv->gart_vm_start;
+	u32 gart_end = gart_start + dev_priv->gart_size - 1;
+
+	return ((off >= fb_start && off <= fb_end) ||
+		(off >= gart_start && off <= gart_end));
+}
+
+/* radeon_state.c */
+extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
+
+				/* radeon_cp.c */
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
+extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
+extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
+
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
+
+extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
+
+extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
+
+extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
+extern int radeon_presetup(struct drm_device *dev);
+extern int radeon_driver_postcleanup(struct drm_device *dev);
+
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern void radeon_mem_takedown(struct mem_block **heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+			       struct mem_block *heap);
+
+extern void radeon_enable_bm(struct drm_radeon_private *dev_priv);
+extern u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off);
+extern void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val);
+
+				/* radeon_irq.c */
+extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void radeon_do_release(struct drm_device * dev);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
+extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device *dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern void radeon_enable_interrupt(struct drm_device *dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
+
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(struct drm_device *dev,
+				   struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device *dev,
+				    struct drm_file *file_priv);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device *dev,
+			      struct drm_file *file_priv);
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg);
+
+extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
+extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
+extern void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master);
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(struct drm_device *dev);
+
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
+			     struct drm_file *file_priv,
+			     drm_radeon_kcmd_buffer_t *cmdbuf);
+
+/* r600_cp.c */
+extern int r600_do_engine_reset(struct drm_device *dev);
+extern int r600_do_cleanup_cp(struct drm_device *dev);
+extern int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
+			   struct drm_file *file_priv);
+extern int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_do_cp_idle(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_start(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_reset(drm_radeon_private_t *dev_priv);
+extern void r600_do_cp_stop(drm_radeon_private_t *dev_priv);
+extern int r600_cp_dispatch_indirect(struct drm_device *dev,
+				     struct drm_buf *buf, int start, int end);
+extern int r600_page_table_init(struct drm_device *dev);
+extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
+extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
+extern int r600_cp_dispatch_texture(struct drm_device *dev,
+				    struct drm_file *file_priv,
+				    drm_radeon_texture_t *tex,
+				    drm_radeon_tex_image_t *image);
+/* r600_blit.c */
+extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
+extern void r600_done_blit_copy(struct drm_device *dev);
+extern void r600_blit_copy(struct drm_device *dev,
+			   uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+			   int size_bytes);
+extern void r600_blit_swap(struct drm_device *dev,
+			   uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
+			   int sx, int sy, int dx, int dy,
+			   int w, int h, int src_pitch, int dst_pitch, int cpp);
+
+/* Flags for stats.boxes
+ */
+#define RADEON_BOX_DMA_IDLE      0x1
+#define RADEON_BOX_RING_FULL     0x2
+#define RADEON_BOX_FLIP          0x4
+#define RADEON_BOX_WAIT_IDLE     0x8
+#define RADEON_BOX_TEXTURE_LOAD  0x10
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+#define RADEON_MM_INDEX		        0x0000
+#define RADEON_MM_DATA		        0x0004
+
+#define RADEON_AGP_COMMAND		0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG   0x0060	/* offset in PCI config */
+#	define RADEON_AGP_ENABLE	(1<<8)
+#define RADEON_AUX_SCISSOR_CNTL		0x26f0
+#	define RADEON_EXCLUSIVE_SCISSOR_0	(1 << 24)
+#	define RADEON_EXCLUSIVE_SCISSOR_1	(1 << 25)
+#	define RADEON_EXCLUSIVE_SCISSOR_2	(1 << 26)
+#	define RADEON_SCISSOR_0_ENABLE		(1 << 28)
+#	define RADEON_SCISSOR_1_ENABLE		(1 << 29)
+#	define RADEON_SCISSOR_2_ENABLE		(1 << 30)
+
+/*
+ * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
+ * don't have an explicit bus mastering disable bit.  It's handled
+ * by the PCI D-states.  PMI_BM_DIS disables D-state bus master
+ * handling, not bus mastering itself.
+ */
+#define RADEON_BUS_CNTL			0x0030
+/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+#	define RADEON_BUS_MASTER_DIS		(1 << 6)
+/* rs600/rs690/rs740 */
+#	define RS600_BUS_MASTER_DIS		(1 << 14)
+#	define RS600_MSI_REARM		        (1 << 20)
+/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
+
+#define RADEON_BUS_CNTL1		0x0034
+#	define RADEON_PMI_BM_DIS		(1 << 2)
+#	define RADEON_PMI_INT_DIS		(1 << 3)
+
+#define RV370_BUS_CNTL			0x004c
+#	define RV370_PMI_BM_DIS		        (1 << 5)
+#	define RV370_PMI_INT_DIS		(1 << 6)
+
+#define RADEON_MSI_REARM_EN		0x0160
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#	define RV370_MSI_REARM_EN		(1 << 0)
+
+#define RADEON_CLOCK_CNTL_DATA		0x000c
+#	define RADEON_PLL_WR_EN			(1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX		0x0008
+#define RADEON_CONFIG_APER_SIZE		0x0108
+#define RADEON_CONFIG_MEMSIZE		0x00f8
+#define RADEON_CRTC_OFFSET		0x0224
+#define RADEON_CRTC_OFFSET_CNTL		0x0228
+#	define RADEON_CRTC_TILE_EN		(1 << 15)
+#	define RADEON_CRTC_OFFSET_FLIP_CNTL	(1 << 16)
+#define RADEON_CRTC2_OFFSET		0x0324
+#define RADEON_CRTC2_OFFSET_CNTL	0x0328
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+#define RS690_MC_INDEX                  0x78
+#   define RS690_MC_INDEX_MASK          0x1ff
+#   define RS690_MC_INDEX_WR_EN         (1 << 9)
+#   define RS690_MC_INDEX_WR_ACK        0x7f
+#define RS690_MC_DATA                   0x7c
+
+/* MC indirect registers */
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_K8_FB_LOCATION            0x1e
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1 << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+#define RS480_MC_MISC_UMA_CNTL          0x5f
+#define RS480_MC_MCLK_CNTL              0x7a
+#define RS480_MC_UMA_DUALCH_CNTL        0x86
+
+#define RS690_MC_FB_LOCATION            0x100
+#define RS690_MC_AGP_LOCATION           0x101
+#define RS690_MC_AGP_BASE               0x102
+#define RS690_MC_AGP_BASE_2             0x103
+
+#define RS600_MC_INDEX                          0x70
+#       define RS600_MC_ADDR_MASK               0xffff
+#       define RS600_MC_IND_SEQ_RBS_0           (1 << 16)
+#       define RS600_MC_IND_SEQ_RBS_1           (1 << 17)
+#       define RS600_MC_IND_SEQ_RBS_2           (1 << 18)
+#       define RS600_MC_IND_SEQ_RBS_3           (1 << 19)
+#       define RS600_MC_IND_AIC_RBS             (1 << 20)
+#       define RS600_MC_IND_CITF_ARB0           (1 << 21)
+#       define RS600_MC_IND_CITF_ARB1           (1 << 22)
+#       define RS600_MC_IND_WR_EN               (1 << 23)
+#define RS600_MC_DATA                           0x74
+
+#define RS600_MC_STATUS                         0x0
+#       define RS600_MC_IDLE                    (1 << 1)
+#define RS600_MC_FB_LOCATION                    0x4
+#define RS600_MC_AGP_LOCATION                   0x5
+#define RS600_AGP_BASE                          0x6
+#define RS600_AGP_BASE_2                        0x7
+#define RS600_MC_CNTL1                          0x9
+#       define RS600_ENABLE_PAGE_TABLES         (1 << 26)
+#define RS600_MC_PT0_CNTL                       0x100
+#       define RS600_ENABLE_PT                  (1 << 0)
+#       define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
+#       define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
+#       define RS600_INVALIDATE_ALL_L1_TLBS     (1 << 28)
+#       define RS600_INVALIDATE_L2_CACHE        (1 << 29)
+#define RS600_MC_PT0_CONTEXT0_CNTL              0x102
+#       define RS600_ENABLE_PAGE_TABLE          (1 << 0)
+#       define RS600_PAGE_TABLE_TYPE_FLAT       (0 << 1)
+#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR   0x112
+#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR  0x114
+#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
+#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR    0x12c
+#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR   0x13c
+#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR     0x14c
+#define RS600_MC_PT0_CLIENT0_CNTL               0x16c
+#       define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE       (1 << 0)
+#       define RS600_TRANSLATION_MODE_OVERRIDE              (1 << 1)
+#       define RS600_SYSTEM_ACCESS_MODE_MASK                (3 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 8)
+#       define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 8)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH        (0 << 10)
+#       define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE       (1 << 10)
+#       define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
+#       define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
+#       define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
+#       define RS600_INVALIDATE_L1_TLB          (1 << 20)
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE     0x03
+#define RV515_MC_AGP_BASE_2   0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE     0x06
+#define R520_MC_AGP_BASE_2   0x07
+
+#define RADEON_MPP_TB_CONFIG		0x01c0
+#define RADEON_MEM_CNTL			0x0140
+#define RADEON_MEM_SDRAM_MODE_REG	0x0158
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RS480_AGP_BASE_2		0x0164
+#define RADEON_AGP_BASE			0x0170
+
+/* pipe config regs */
+#define R400_GB_PIPE_SELECT             0x402c
+#define RV530_GB_PIPE_SELECT2           0x4124
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_RB3D_COLOROFFSET		0x1c40
+#define RADEON_RB3D_COLORPITCH		0x1c48
+
+#define	RADEON_SRC_X_Y			0x1590
+
+#define RADEON_DP_GUI_MASTER_CNTL	0x146c
+#	define RADEON_GMC_SRC_PITCH_OFFSET_CNTL	(1 << 0)
+#	define RADEON_GMC_DST_PITCH_OFFSET_CNTL	(1 << 1)
+#	define RADEON_GMC_BRUSH_SOLID_COLOR	(13 << 4)
+#	define RADEON_GMC_BRUSH_NONE		(15 << 4)
+#	define RADEON_GMC_DST_16BPP		(4 << 8)
+#	define RADEON_GMC_DST_24BPP		(5 << 8)
+#	define RADEON_GMC_DST_32BPP		(6 << 8)
+#	define RADEON_GMC_DST_DATATYPE_SHIFT	8
+#	define RADEON_GMC_SRC_DATATYPE_COLOR	(3 << 12)
+#	define RADEON_DP_SRC_SOURCE_MEMORY	(2 << 24)
+#	define RADEON_DP_SRC_SOURCE_HOST_DATA	(3 << 24)
+#	define RADEON_GMC_CLR_CMP_CNTL_DIS	(1 << 28)
+#	define RADEON_GMC_WR_MSK_DIS		(1 << 30)
+#	define RADEON_ROP3_S			0x00cc0000
+#	define RADEON_ROP3_P			0x00f00000
+#define RADEON_DP_WRITE_MASK		0x16cc
+#define RADEON_SRC_PITCH_OFFSET		0x1428
+#define RADEON_DST_PITCH_OFFSET		0x142c
+#define RADEON_DST_PITCH_OFFSET_C	0x1c80
+#	define RADEON_DST_TILE_LINEAR		(0 << 30)
+#	define RADEON_DST_TILE_MACRO		(1 << 30)
+#	define RADEON_DST_TILE_MICRO		(2 << 30)
+#	define RADEON_DST_TILE_BOTH		(3 << 30)
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+#define RADEON_SCRATCH_UMSK		0x0770
+#define RADEON_SCRATCH_ADDR		0x0774
+
+#define RADEON_SCRATCHOFF( x )		(RADEON_SCRATCH_REG_OFFSET + 4*(x))
+
+extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
+
+#define GET_SCRATCH(dev_priv, x) radeon_get_scratch(dev_priv, x)
+
+#define R600_SCRATCH_REG0		0x8500
+#define R600_SCRATCH_REG1		0x8504
+#define R600_SCRATCH_REG2		0x8508
+#define R600_SCRATCH_REG3		0x850c
+#define R600_SCRATCH_REG4		0x8510
+#define R600_SCRATCH_REG5		0x8514
+#define R600_SCRATCH_REG6		0x8518
+#define R600_SCRATCH_REG7		0x851c
+#define R600_SCRATCH_UMSK		0x8540
+#define R600_SCRATCH_ADDR		0x8544
+
+#define R600_SCRATCHOFF(x)		(R600_SCRATCH_REG_OFFSET + 4*(x))
+
+#define RADEON_GEN_INT_CNTL		0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_GUI_IDLE_INT_ENABLE	(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+
+#define RADEON_GEN_INT_STATUS		0x0044
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+#       define R500_DISPLAY_INT_STATUS          (1 << 0)
+
+#define RADEON_HOST_PATH_CNTL		0x0130
+#	define RADEON_HDP_SOFT_RESET		(1 << 26)
+#	define RADEON_HDP_WC_TIMEOUT_MASK	(7 << 28)
+#	define RADEON_HDP_WC_TIMEOUT_28BCLK	(7 << 28)
+
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RADEON_RBBM_GUICNTL		0x172c
+#	define RADEON_HOST_DATA_SWAP_NONE	(0 << 0)
+#	define RADEON_HOST_DATA_SWAP_16BIT	(1 << 0)
+#	define RADEON_HOST_DATA_SWAP_32BIT	(2 << 0)
+#	define RADEON_HOST_DATA_SWAP_HDW	(3 << 0)
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define RADEON_MC_FB_LOCATION		0x0148
+#define RADEON_MCLK_CNTL		0x0012
+#	define RADEON_FORCEON_MCLKA		(1 << 16)
+#	define RADEON_FORCEON_MCLKB		(1 << 17)
+#	define RADEON_FORCEON_YCLKA		(1 << 18)
+#	define RADEON_FORCEON_YCLKB		(1 << 19)
+#	define RADEON_FORCEON_MC		(1 << 20)
+#	define RADEON_FORCEON_AIC		(1 << 21)
+
+#define RADEON_PP_BORDER_COLOR_0	0x1d40
+#define RADEON_PP_BORDER_COLOR_1	0x1d44
+#define RADEON_PP_BORDER_COLOR_2	0x1d48
+#define RADEON_PP_CNTL			0x1c38
+#	define RADEON_SCISSOR_ENABLE		(1 <<  1)
+#define RADEON_PP_LUM_MATRIX		0x1d00
+#define RADEON_PP_MISC			0x1c14
+#define RADEON_PP_ROT_MATRIX_0		0x1d58
+#define RADEON_PP_TXFILTER_0		0x1c54
+#define RADEON_PP_TXOFFSET_0		0x1c5c
+#define RADEON_PP_TXFILTER_1		0x1c6c
+#define RADEON_PP_TXFILTER_2		0x1c84
+
+#define R300_RB2D_DSTCACHE_CTLSTAT	0x342c /* use R300_DSTCACHE_CTLSTAT */
+#define R300_DSTCACHE_CTLSTAT		0x1714
+#	define R300_RB2D_DC_FLUSH		(3 << 0)
+#	define R300_RB2D_DC_FREE		(3 << 2)
+#	define R300_RB2D_DC_FLUSH_ALL		0xf
+#	define R300_RB2D_DC_BUSY		(1 << 31)
+#define RADEON_RB3D_CNTL		0x1c3c
+#	define RADEON_ALPHA_BLEND_ENABLE	(1 << 0)
+#	define RADEON_PLANE_MASK_ENABLE		(1 << 1)
+#	define RADEON_DITHER_ENABLE		(1 << 2)
+#	define RADEON_ROUND_ENABLE		(1 << 3)
+#	define RADEON_SCALE_DITHER_ENABLE	(1 << 4)
+#	define RADEON_DITHER_INIT		(1 << 5)
+#	define RADEON_ROP_ENABLE		(1 << 6)
+#	define RADEON_STENCIL_ENABLE		(1 << 7)
+#	define RADEON_Z_ENABLE			(1 << 8)
+#	define RADEON_ZBLOCK16			(1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET		0x1c24
+#define RADEON_RB3D_DEPTHCLEARVALUE	0x3230
+#define RADEON_RB3D_DEPTHPITCH		0x1c28
+#define RADEON_RB3D_PLANEMASK		0x1d84
+#define RADEON_RB3D_STENCILREFMASK	0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE		0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT	0x3254
+#	define RADEON_RB3D_ZC_FLUSH		(1 << 0)
+#	define RADEON_RB3D_ZC_FREE		(1 << 2)
+#	define RADEON_RB3D_ZC_FLUSH_ALL		0x5
+#	define RADEON_RB3D_ZC_BUSY		(1 << 31)
+#define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
+#	define R300_ZC_FLUSH		        (1 << 0)
+#	define R300_ZC_FREE		        (1 << 1)
+#	define R300_ZC_BUSY		        (1 << 31)
+#define RADEON_RB3D_DSTCACHE_CTLSTAT	0x325c
+#	define RADEON_RB3D_DC_FLUSH		(3 << 0)
+#	define RADEON_RB3D_DC_FREE		(3 << 2)
+#	define RADEON_RB3D_DC_FLUSH_ALL		0xf
+#	define RADEON_RB3D_DC_BUSY		(1 << 31)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FLUSH		(2 << 0)
+#	define R300_RB3D_DC_FREE		(2 << 2)
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_Z_TEST_MASK		(7 << 4)
+#	define RADEON_Z_TEST_ALWAYS		(7 << 4)
+#	define RADEON_Z_HIERARCHY_ENABLE	(1 << 8)
+#	define RADEON_STENCIL_TEST_ALWAYS	(7 << 12)
+#	define RADEON_STENCIL_S_FAIL_REPLACE	(2 << 16)
+#	define RADEON_STENCIL_ZPASS_REPLACE	(2 << 20)
+#	define RADEON_STENCIL_ZFAIL_REPLACE	(2 << 24)
+#	define RADEON_Z_COMPRESSION_ENABLE	(1 << 28)
+#	define RADEON_FORCE_Z_DIRTY		(1 << 29)
+#	define RADEON_Z_WRITE_ENABLE		(1 << 30)
+#	define RADEON_Z_DECOMPRESSION_ENABLE	(1 << 31)
+#define RADEON_RBBM_SOFT_RESET		0x00f0
+#	define RADEON_SOFT_RESET_CP		(1 <<  0)
+#	define RADEON_SOFT_RESET_HI		(1 <<  1)
+#	define RADEON_SOFT_RESET_SE		(1 <<  2)
+#	define RADEON_SOFT_RESET_RE		(1 <<  3)
+#	define RADEON_SOFT_RESET_PP		(1 <<  4)
+#	define RADEON_SOFT_RESET_E2		(1 <<  5)
+#	define RADEON_SOFT_RESET_RB		(1 <<  6)
+#	define RADEON_SOFT_RESET_HDP		(1 <<  7)
+/*
+ *   6:0  Available slots in the FIFO
+ *   8    Host Interface active
+ *   9    CP request active
+ *   10   FIFO request active
+ *   11   Host Interface retry active
+ *   12   CP retry active
+ *   13   FIFO retry active
+ *   14   FIFO pipeline busy
+ *   15   Event engine busy
+ *   16   CP command stream busy
+ *   17   2D engine busy
+ *   18   2D portion of render backend busy
+ *   20   3D setup engine busy
+ *   26   GA engine busy
+ *   27   CBA 2D engine busy
+ *   31   2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ *           command stream queue not empty or Ring Buffer not empty
+ */
+#define RADEON_RBBM_STATUS		0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */
+/* #define RADEON_RBBM_STATUS		0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
+#	define RADEON_RBBM_FIFOCNT_MASK		0x007f
+#	define RADEON_HIRQ_ON_RBB	(1 <<  8)
+#	define RADEON_CPRQ_ON_RBB	(1 <<  9)
+#	define RADEON_CFRQ_ON_RBB	(1 << 10)
+#	define RADEON_HIRQ_IN_RTBUF	(1 << 11)
+#	define RADEON_CPRQ_IN_RTBUF	(1 << 12)
+#	define RADEON_CFRQ_IN_RTBUF	(1 << 13)
+#	define RADEON_PIPE_BUSY		(1 << 14)
+#	define RADEON_ENG_EV_BUSY	(1 << 15)
+#	define RADEON_CP_CMDSTRM_BUSY	(1 << 16)
+#	define RADEON_E2_BUSY		(1 << 17)
+#	define RADEON_RB2D_BUSY		(1 << 18)
+#	define RADEON_RB3D_BUSY		(1 << 19) /* not used on r300 */
+#	define RADEON_VAP_BUSY		(1 << 20)
+#	define RADEON_RE_BUSY		(1 << 21) /* not used on r300 */
+#	define RADEON_TAM_BUSY		(1 << 22) /* not used on r300 */
+#	define RADEON_TDM_BUSY		(1 << 23) /* not used on r300 */
+#	define RADEON_PB_BUSY		(1 << 24) /* not used on r300 */
+#	define RADEON_TIM_BUSY		(1 << 25) /* not used on r300 */
+#	define RADEON_GA_BUSY		(1 << 26)
+#	define RADEON_CBA2D_BUSY	(1 << 27)
+#	define RADEON_RBBM_ACTIVE	(1 << 31)
+#define RADEON_RE_LINE_PATTERN		0x1cd0
+#define RADEON_RE_MISC			0x26c4
+#define RADEON_RE_TOP_LEFT		0x26c0
+#define RADEON_RE_WIDTH_HEIGHT		0x1c44
+#define RADEON_RE_STIPPLE_ADDR		0x1cc8
+#define RADEON_RE_STIPPLE_DATA		0x1ccc
+
+#define RADEON_SCISSOR_TL_0		0x1cd8
+#define RADEON_SCISSOR_BR_0		0x1cdc
+#define RADEON_SCISSOR_TL_1		0x1ce0
+#define RADEON_SCISSOR_BR_1		0x1ce4
+#define RADEON_SCISSOR_TL_2		0x1ce8
+#define RADEON_SCISSOR_BR_2		0x1cec
+#define RADEON_SE_COORD_FMT		0x1c50
+#define RADEON_SE_CNTL			0x1c4c
+#	define RADEON_FFACE_CULL_CW		(0 << 0)
+#	define RADEON_BFACE_SOLID		(3 << 1)
+#	define RADEON_FFACE_SOLID		(3 << 3)
+#	define RADEON_FLAT_SHADE_VTX_LAST	(3 << 6)
+#	define RADEON_DIFFUSE_SHADE_FLAT	(1 << 8)
+#	define RADEON_DIFFUSE_SHADE_GOURAUD	(2 << 8)
+#	define RADEON_ALPHA_SHADE_FLAT		(1 << 10)
+#	define RADEON_ALPHA_SHADE_GOURAUD	(2 << 10)
+#	define RADEON_SPECULAR_SHADE_FLAT	(1 << 12)
+#	define RADEON_SPECULAR_SHADE_GOURAUD	(2 << 12)
+#	define RADEON_FOG_SHADE_FLAT		(1 << 14)
+#	define RADEON_FOG_SHADE_GOURAUD		(2 << 14)
+#	define RADEON_VPORT_XY_XFORM_ENABLE	(1 << 24)
+#	define RADEON_VPORT_Z_XFORM_ENABLE	(1 << 25)
+#	define RADEON_VTX_PIX_CENTER_OGL	(1 << 27)
+#	define RADEON_ROUND_MODE_TRUNC		(0 << 28)
+#	define RADEON_ROUND_PREC_8TH_PIX	(1 << 30)
+#define RADEON_SE_CNTL_STATUS		0x2140
+#define RADEON_SE_LINE_WIDTH		0x1db8
+#define RADEON_SE_VPORT_XSCALE		0x1d98
+#define RADEON_SE_ZBIAS_FACTOR		0x1db0
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT         0x2254
+#define RADEON_SE_TCL_VECTOR_INDX_REG        0x2200
+#       define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT  16
+#       define RADEON_VEC_INDX_DWORD_COUNT_SHIFT     28
+#define RADEON_SE_TCL_VECTOR_DATA_REG       0x2204
+#define RADEON_SE_TCL_SCALAR_INDX_REG       0x2208
+#       define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT  16
+#define RADEON_SE_TCL_SCALAR_DATA_REG       0x220C
+#define RADEON_SURFACE_ACCESS_FLAGS	0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR	0x0bfc
+#define RADEON_SURFACE_CNTL		0x0b00
+#	define RADEON_SURF_TRANSLATION_DIS	(1 << 8)
+#	define RADEON_NONSURF_AP0_SWP_MASK	(3 << 20)
+#	define RADEON_NONSURF_AP0_SWP_LITTLE	(0 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG16	(1 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG32	(2 << 20)
+#	define RADEON_NONSURF_AP1_SWP_MASK	(3 << 22)
+#	define RADEON_NONSURF_AP1_SWP_LITTLE	(0 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG16	(1 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG32	(2 << 22)
+#define RADEON_SURFACE0_INFO		0x0b0c
+#	define RADEON_SURF_PITCHSEL_MASK	(0x1ff << 0)
+#	define RADEON_SURF_TILE_MODE_MASK	(3 << 16)
+#	define RADEON_SURF_TILE_MODE_MACRO	(0 << 16)
+#	define RADEON_SURF_TILE_MODE_MICRO	(1 << 16)
+#	define RADEON_SURF_TILE_MODE_32BIT_Z	(2 << 16)
+#	define RADEON_SURF_TILE_MODE_16BIT_Z	(3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND	0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND	0x0b08
+#	define RADEON_SURF_ADDRESS_FIXED_MASK	(0x3ff << 0)
+#define RADEON_SURFACE1_INFO		0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND	0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND	0x0b18
+#define RADEON_SURFACE2_INFO		0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND	0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND	0x0b28
+#define RADEON_SURFACE3_INFO		0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND	0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND	0x0b38
+#define RADEON_SURFACE4_INFO		0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND	0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND	0x0b48
+#define RADEON_SURFACE5_INFO		0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND	0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND	0x0b58
+#define RADEON_SURFACE6_INFO		0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND	0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND	0x0b68
+#define RADEON_SURFACE7_INFO		0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND	0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND	0x0b78
+#define RADEON_SW_SEMAPHORE		0x013c
+
+#define RADEON_WAIT_UNTIL		0x1720
+#	define RADEON_WAIT_CRTC_PFLIP		(1 << 0)
+#	define RADEON_WAIT_2D_IDLE		(1 << 14)
+#	define RADEON_WAIT_3D_IDLE		(1 << 15)
+#	define RADEON_WAIT_2D_IDLECLEAN		(1 << 16)
+#	define RADEON_WAIT_3D_IDLECLEAN		(1 << 17)
+#	define RADEON_WAIT_HOST_IDLECLEAN	(1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET		0x3234
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_DEPTH_FORMAT_16BIT_INT_Z	(0 << 0)
+#	define RADEON_DEPTH_FORMAT_24BIT_INT_Z	(2 << 0)
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR		0x07d4
+#define RADEON_CP_ME_RAM_RADDR		0x07d8
+#define RADEON_CP_ME_RAM_DATAH		0x07dc
+#define RADEON_CP_ME_RAM_DATAL		0x07e0
+
+#define RADEON_CP_RB_BASE		0x0700
+#define RADEON_CP_RB_CNTL		0x0704
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#	define RADEON_RB_RPTR_WR_ENA		(1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR		0x070c
+#define RADEON_CP_RB_RPTR		0x0710
+#define RADEON_CP_RB_WPTR		0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY		0x0718
+#	define RADEON_PRE_WRITE_TIMER_SHIFT	0
+#	define RADEON_PRE_WRITE_LIMIT_SHIFT	23
+
+#define RADEON_CP_IB_BASE		0x0738
+
+#define RADEON_CP_CSQ_CNTL		0x0740
+#	define RADEON_CSQ_CNT_PRIMARY_MASK	(0xff << 0)
+#	define RADEON_CSQ_PRIDIS_INDDIS		(0 << 28)
+#	define RADEON_CSQ_PRIPIO_INDDIS		(1 << 28)
+#	define RADEON_CSQ_PRIBM_INDDIS		(2 << 28)
+#	define RADEON_CSQ_PRIPIO_INDBM		(3 << 28)
+#	define RADEON_CSQ_PRIBM_INDBM		(4 << 28)
+#	define RADEON_CSQ_PRIPIO_INDPIO		(15 << 28)
+
+#define R300_CP_RESYNC_ADDR		0x0778
+#define R300_CP_RESYNC_DATA		0x077c
+
+#define RADEON_AIC_CNTL			0x01d0
+#	define RADEON_PCIGART_TRANSLATE_EN	(1 << 0)
+#	define RS400_MSI_REARM	                (1 << 3)
+#define RADEON_AIC_STAT			0x01d4
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_LO_ADDR		0x01dc
+#define RADEON_AIC_HI_ADDR		0x01e0
+#define RADEON_AIC_TLB_ADDR		0x01e4
+#define RADEON_AIC_TLB_DATA		0x01e8
+
+/* CP command packets */
+#define RADEON_CP_PACKET0		0x00000000
+#	define RADEON_ONE_REG_WR		(1 << 15)
+#define RADEON_CP_PACKET1		0x40000000
+#define RADEON_CP_PACKET2		0x80000000
+#define RADEON_CP_PACKET3		0xC0000000
+#       define RADEON_CP_NOP                    0x00001000
+#       define RADEON_CP_NEXT_CHAR              0x00001900
+#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
+#       define RADEON_CP_SET_SCISSORS           0x00001E00
+	     /* GEN_INDX_PRIM is unsupported starting with R300 */
+#	define RADEON_3D_RNDR_GEN_INDX_PRIM	0x00002300
+#	define RADEON_WAIT_FOR_IDLE		0x00002600
+#	define RADEON_3D_DRAW_VBUF		0x00002800
+#	define RADEON_3D_DRAW_IMMD		0x00002900
+#	define RADEON_3D_DRAW_INDX		0x00002A00
+#       define RADEON_CP_LOAD_PALETTE           0x00002C00
+#	define RADEON_3D_LOAD_VBPNTR		0x00002F00
+#	define RADEON_MPEG_IDCT_MACROBLOCK	0x00003000
+#	define RADEON_MPEG_IDCT_MACROBLOCK_REV	0x00003100
+#	define RADEON_3D_CLEAR_ZMASK		0x00003200
+#	define RADEON_CP_INDX_BUFFER		0x00003300
+#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
+#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
+#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
+#	define RADEON_3D_CLEAR_HIZ		0x00003700
+#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
+#	define RADEON_CNTL_HOSTDATA_BLT		0x00009400
+#	define RADEON_CNTL_PAINT_MULTI		0x00009A00
+#	define RADEON_CNTL_BITBLT_MULTI		0x00009B00
+#	define RADEON_CNTL_SET_SCISSORS		0xC0001E00
+
+#       define R600_IT_INDIRECT_BUFFER_END      0x00001700
+#       define R600_IT_SET_PREDICATION          0x00002000
+#       define R600_IT_REG_RMW                  0x00002100
+#       define R600_IT_COND_EXEC                0x00002200
+#       define R600_IT_PRED_EXEC                0x00002300
+#       define R600_IT_START_3D_CMDBUF          0x00002400
+#       define R600_IT_DRAW_INDEX_2             0x00002700
+#       define R600_IT_CONTEXT_CONTROL          0x00002800
+#       define R600_IT_DRAW_INDEX_IMMD_BE       0x00002900
+#       define R600_IT_INDEX_TYPE               0x00002A00
+#       define R600_IT_DRAW_INDEX               0x00002B00
+#       define R600_IT_DRAW_INDEX_AUTO          0x00002D00
+#       define R600_IT_DRAW_INDEX_IMMD          0x00002E00
+#       define R600_IT_NUM_INSTANCES            0x00002F00
+#       define R600_IT_STRMOUT_BUFFER_UPDATE    0x00003400
+#       define R600_IT_INDIRECT_BUFFER_MP       0x00003800
+#       define R600_IT_MEM_SEMAPHORE            0x00003900
+#       define R600_IT_MPEG_INDEX               0x00003A00
+#       define R600_IT_WAIT_REG_MEM             0x00003C00
+#       define R600_IT_MEM_WRITE                0x00003D00
+#       define R600_IT_INDIRECT_BUFFER          0x00003200
+#       define R600_IT_SURFACE_SYNC             0x00004300
+#              define R600_CB0_DEST_BASE_ENA    (1 << 6)
+#              define R600_TC_ACTION_ENA        (1 << 23)
+#              define R600_VC_ACTION_ENA        (1 << 24)
+#              define R600_CB_ACTION_ENA        (1 << 25)
+#              define R600_DB_ACTION_ENA        (1 << 26)
+#              define R600_SH_ACTION_ENA        (1 << 27)
+#              define R600_SMX_ACTION_ENA       (1 << 28)
+#       define R600_IT_ME_INITIALIZE            0x00004400
+#	       define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#       define R600_IT_COND_WRITE               0x00004500
+#       define R600_IT_EVENT_WRITE              0x00004600
+#       define R600_IT_EVENT_WRITE_EOP          0x00004700
+#       define R600_IT_ONE_REG_WRITE            0x00005700
+#       define R600_IT_SET_CONFIG_REG           0x00006800
+#              define R600_SET_CONFIG_REG_OFFSET 0x00008000
+#              define R600_SET_CONFIG_REG_END   0x0000ac00
+#       define R600_IT_SET_CONTEXT_REG          0x00006900
+#              define R600_SET_CONTEXT_REG_OFFSET 0x00028000
+#              define R600_SET_CONTEXT_REG_END  0x00029000
+#       define R600_IT_SET_ALU_CONST            0x00006A00
+#              define R600_SET_ALU_CONST_OFFSET 0x00030000
+#              define R600_SET_ALU_CONST_END    0x00032000
+#       define R600_IT_SET_BOOL_CONST           0x00006B00
+#              define R600_SET_BOOL_CONST_OFFSET 0x0003e380
+#              define R600_SET_BOOL_CONST_END   0x00040000
+#       define R600_IT_SET_LOOP_CONST           0x00006C00
+#              define R600_SET_LOOP_CONST_OFFSET 0x0003e200
+#              define R600_SET_LOOP_CONST_END   0x0003e380
+#       define R600_IT_SET_RESOURCE             0x00006D00
+#              define R600_SET_RESOURCE_OFFSET  0x00038000
+#              define R600_SET_RESOURCE_END     0x0003c000
+#              define R600_SQ_TEX_VTX_INVALID_TEXTURE  0x0
+#              define R600_SQ_TEX_VTX_INVALID_BUFFER   0x1
+#              define R600_SQ_TEX_VTX_VALID_TEXTURE    0x2
+#              define R600_SQ_TEX_VTX_VALID_BUFFER     0x3
+#       define R600_IT_SET_SAMPLER              0x00006E00
+#              define R600_SET_SAMPLER_OFFSET   0x0003c000
+#              define R600_SET_SAMPLER_END      0x0003cff0
+#       define R600_IT_SET_CTL_CONST            0x00006F00
+#              define R600_SET_CTL_CONST_OFFSET 0x0003cff0
+#              define R600_SET_CTL_CONST_END    0x0003e200
+#       define R600_IT_SURFACE_BASE_UPDATE      0x00007300
+
+#define RADEON_CP_PACKET_MASK		0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK	0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK	0x003ff800
+
+#define RADEON_VTX_Z_PRESENT			(1 << 31)
+#define RADEON_VTX_PKCOLOR_PRESENT		(1 << 3)
+
+#define RADEON_PRIM_TYPE_NONE			(0 << 0)
+#define RADEON_PRIM_TYPE_POINT			(1 << 0)
+#define RADEON_PRIM_TYPE_LINE			(2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP		(3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST		(4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN		(5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP		(6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2		(7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST		(8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST	(9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST		(10 << 0)
+#define RADEON_PRIM_TYPE_MASK                   0xf
+#define RADEON_PRIM_WALK_IND			(1 << 4)
+#define RADEON_PRIM_WALK_LIST			(2 << 4)
+#define RADEON_PRIM_WALK_RING			(3 << 4)
+#define RADEON_COLOR_ORDER_BGRA			(0 << 6)
+#define RADEON_COLOR_ORDER_RGBA			(1 << 6)
+#define RADEON_MAOS_ENABLE			(1 << 7)
+#define RADEON_VTX_FMT_R128_MODE		(0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE		(1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT		16
+
+#define RADEON_COLOR_FORMAT_CI8		2
+#define RADEON_COLOR_FORMAT_ARGB1555	3
+#define RADEON_COLOR_FORMAT_RGB565	4
+#define RADEON_COLOR_FORMAT_ARGB8888	6
+#define RADEON_COLOR_FORMAT_RGB332	7
+#define RADEON_COLOR_FORMAT_RGB8	9
+#define RADEON_COLOR_FORMAT_ARGB4444	15
+
+#define RADEON_TXFORMAT_I8		0
+#define RADEON_TXFORMAT_AI88		1
+#define RADEON_TXFORMAT_RGB332		2
+#define RADEON_TXFORMAT_ARGB1555	3
+#define RADEON_TXFORMAT_RGB565		4
+#define RADEON_TXFORMAT_ARGB4444	5
+#define RADEON_TXFORMAT_ARGB8888	6
+#define RADEON_TXFORMAT_RGBA8888	7
+#define RADEON_TXFORMAT_Y8		8
+#define RADEON_TXFORMAT_VYUY422         10
+#define RADEON_TXFORMAT_YVYU422         11
+#define RADEON_TXFORMAT_DXT1            12
+#define RADEON_TXFORMAT_DXT23           14
+#define RADEON_TXFORMAT_DXT45           15
+
+#define R200_PP_TXCBLEND_0                0x2f00
+#define R200_PP_TXCBLEND_1                0x2f10
+#define R200_PP_TXCBLEND_2                0x2f20
+#define R200_PP_TXCBLEND_3                0x2f30
+#define R200_PP_TXCBLEND_4                0x2f40
+#define R200_PP_TXCBLEND_5                0x2f50
+#define R200_PP_TXCBLEND_6                0x2f60
+#define R200_PP_TXCBLEND_7                0x2f70
+#define R200_SE_TCL_LIGHT_MODEL_CTL_0     0x2268
+#define R200_PP_TFACTOR_0                 0x2ee0
+#define R200_SE_VTX_FMT_0                 0x2088
+#define R200_SE_VAP_CNTL                  0x2080
+#define R200_SE_TCL_MATRIX_SEL_0          0x2230
+#define R200_SE_TCL_TEX_PROC_CTL_2        0x22a8
+#define R200_SE_TCL_UCP_VERT_BLEND_CTL    0x22c0
+#define R200_PP_TXFILTER_5                0x2ca0
+#define R200_PP_TXFILTER_4                0x2c80
+#define R200_PP_TXFILTER_3                0x2c60
+#define R200_PP_TXFILTER_2                0x2c40
+#define R200_PP_TXFILTER_1                0x2c20
+#define R200_PP_TXFILTER_0                0x2c00
+#define R200_PP_TXOFFSET_5                0x2d78
+#define R200_PP_TXOFFSET_4                0x2d60
+#define R200_PP_TXOFFSET_3                0x2d48
+#define R200_PP_TXOFFSET_2                0x2d30
+#define R200_PP_TXOFFSET_1                0x2d18
+#define R200_PP_TXOFFSET_0                0x2d00
+
+#define R200_PP_CUBIC_FACES_0             0x2c18
+#define R200_PP_CUBIC_FACES_1             0x2c38
+#define R200_PP_CUBIC_FACES_2             0x2c58
+#define R200_PP_CUBIC_FACES_3             0x2c78
+#define R200_PP_CUBIC_FACES_4             0x2c98
+#define R200_PP_CUBIC_FACES_5             0x2cb8
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTE_CNTL                  0x20b0
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL   0x2250
+#define R200_PP_TAM_DEBUG3                0x2d9c
+#define R200_PP_CNTL_X                    0x2cc4
+#define R200_SE_VAP_CNTL_STATUS           0x2140
+#define R200_RE_SCISSOR_TL_0              0x1cd8
+#define R200_RE_SCISSOR_TL_1              0x1ce0
+#define R200_RE_SCISSOR_TL_2              0x1ce8
+#define R200_RB3D_DEPTHXY_OFFSET          0x1d60
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTX_STATE_CNTL            0x2180
+#define R200_RE_POINTSIZE                 0x2648
+#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04	/* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0	/* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+
+#define RADEON_SE_TCL_STATE_FLUSH           0x2284
+
+#define SE_VAP_CNTL__TCL_ENA_MASK                          0x00000001
+#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK                   0x00010000
+#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT                 0x00000012
+#define SE_VTE_CNTL__VTX_XY_FMT_MASK                       0x00000100
+#define SE_VTE_CNTL__VTX_Z_FMT_MASK                        0x00000200
+#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK                  0x00000001
+#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK                  0x00000002
+#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT               0x0000000b
+#define R200_3D_DRAW_IMMD_2      0xC0003500
+#define R200_SE_VTX_FMT_1                 0x208c
+#define R200_RE_CNTL                      0x1c50
+
+#define R200_RB3D_BLENDCOLOR              0x3218
+
+#define R200_SE_TCL_POINT_SPRITE_CNTL     0x22c4
+
+#define R200_PP_TRI_PERF 0x2cf8
+
+#define R200_PP_AFS_0                     0x2f80
+#define R200_PP_AFS_1                     0x2f00	/* same as txcblend_0 */
+
+#define R200_VAP_PVS_CNTL_1               0x22D0
+
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
+#define R500_D1CRTC_STATUS 0x609c
+#define R500_D2CRTC_STATUS 0x689c
+#define R500_CRTC_V_BLANK (1<<0)
+
+#define R500_D1CRTC_FRAME_COUNT 0x60a4
+#define R500_D2CRTC_FRAME_COUNT 0x68a4
+
+#define R500_D1MODE_V_COUNTER 0x6530
+#define R500_D2MODE_V_COUNTER 0x6d30
+
+#define R500_D1MODE_VBLANK_STATUS 0x6534
+#define R500_D2MODE_VBLANK_STATUS 0x6d34
+#define R500_VBLANK_OCCURED (1<<0)
+#define R500_VBLANK_ACK     (1<<4)
+#define R500_VBLANK_STAT    (1<<12)
+#define R500_VBLANK_INT     (1<<16)
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define R500_DISP_INTERRUPT_STATUS 0x7edc
+#define R500_D1_VBLANK_INTERRUPT (1 << 4)
+#define R500_D2_VBLANK_INTERRUPT (1 << 5)
+
+/* R6xx/R7xx registers */
+#define R600_MC_VM_FB_LOCATION                                 0x2180
+#define R600_MC_VM_AGP_TOP                                     0x2184
+#define R600_MC_VM_AGP_BOT                                     0x2188
+#define R600_MC_VM_AGP_BASE                                    0x218c
+#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR                    0x2190
+#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR                   0x2194
+#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR                0x2198
+
+#define R700_MC_VM_FB_LOCATION                                 0x2024
+#define R700_MC_VM_AGP_TOP                                     0x2028
+#define R700_MC_VM_AGP_BOT                                     0x202c
+#define R700_MC_VM_AGP_BASE                                    0x2030
+#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR                    0x2034
+#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR                   0x2038
+#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR                0x203c
+
+#define R600_MCD_RD_A_CNTL                                     0x219c
+#define R600_MCD_RD_B_CNTL                                     0x21a0
+
+#define R600_MCD_WR_A_CNTL                                     0x21a4
+#define R600_MCD_WR_B_CNTL                                     0x21a8
+
+#define R600_MCD_RD_SYS_CNTL                                   0x2200
+#define R600_MCD_WR_SYS_CNTL                                   0x2214
+
+#define R600_MCD_RD_GFX_CNTL                                   0x21fc
+#define R600_MCD_RD_HDP_CNTL                                   0x2204
+#define R600_MCD_RD_PDMA_CNTL                                  0x2208
+#define R600_MCD_RD_SEM_CNTL                                   0x220c
+#define R600_MCD_WR_GFX_CNTL                                   0x2210
+#define R600_MCD_WR_HDP_CNTL                                   0x2218
+#define R600_MCD_WR_PDMA_CNTL                                  0x221c
+#define R600_MCD_WR_SEM_CNTL                                   0x2220
+
+#       define R600_MCD_L1_TLB                                 (1 << 0)
+#       define R600_MCD_L1_FRAG_PROC                           (1 << 1)
+#       define R600_MCD_L1_STRICT_ORDERING                     (1 << 2)
+
+#       define R600_MCD_SYSTEM_ACCESS_MODE_MASK                (3 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_PA_ONLY             (0 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_USE_SYS_MAP         (1 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS              (2 << 6)
+#       define R600_MCD_SYSTEM_ACCESS_MODE_NOT_IN_SYS          (3 << 6)
+
+#       define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU    (0 << 8)
+#       define R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
+
+#       define R600_MCD_SEMAPHORE_MODE                         (1 << 10)
+#       define R600_MCD_WAIT_L2_QUERY                          (1 << 11)
+#       define R600_MCD_EFFECTIVE_L1_TLB_SIZE(x)               ((x) << 12)
+#       define R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(x)             ((x) << 15)
+
+#define R700_MC_VM_MD_L1_TLB0_CNTL                             0x2654
+#define R700_MC_VM_MD_L1_TLB1_CNTL                             0x2658
+#define R700_MC_VM_MD_L1_TLB2_CNTL                             0x265c
+
+#define R700_MC_VM_MB_L1_TLB0_CNTL                             0x2234
+#define R700_MC_VM_MB_L1_TLB1_CNTL                             0x2238
+#define R700_MC_VM_MB_L1_TLB2_CNTL                             0x223c
+#define R700_MC_VM_MB_L1_TLB3_CNTL                             0x2240
+
+#       define R700_ENABLE_L1_TLB                              (1 << 0)
+#       define R700_ENABLE_L1_FRAGMENT_PROCESSING              (1 << 1)
+#       define R700_SYSTEM_ACCESS_MODE_IN_SYS                  (2 << 3)
+#       define R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU  (0 << 5)
+#       define R700_EFFECTIVE_L1_TLB_SIZE(x)                   ((x) << 15)
+#       define R700_EFFECTIVE_L1_QUEUE_SIZE(x)                 ((x) << 18)
+
+#define R700_MC_ARB_RAMCFG                                     0x2760
+#       define R700_NOOFBANK_SHIFT                             0
+#       define R700_NOOFBANK_MASK                              0x3
+#       define R700_NOOFRANK_SHIFT                             2
+#       define R700_NOOFRANK_MASK                              0x1
+#       define R700_NOOFROWS_SHIFT                             3
+#       define R700_NOOFROWS_MASK                              0x7
+#       define R700_NOOFCOLS_SHIFT                             6
+#       define R700_NOOFCOLS_MASK                              0x3
+#       define R700_CHANSIZE_SHIFT                             8
+#       define R700_CHANSIZE_MASK                              0x1
+#       define R700_BURSTLENGTH_SHIFT                          9
+#       define R700_BURSTLENGTH_MASK                           0x1
+#define R600_RAMCFG                                            0x2408
+#       define R600_NOOFBANK_SHIFT                             0
+#       define R600_NOOFBANK_MASK                              0x1
+#       define R600_NOOFRANK_SHIFT                             1
+#       define R600_NOOFRANK_MASK                              0x1
+#       define R600_NOOFROWS_SHIFT                             2
+#       define R600_NOOFROWS_MASK                              0x7
+#       define R600_NOOFCOLS_SHIFT                             5
+#       define R600_NOOFCOLS_MASK                              0x3
+#       define R600_CHANSIZE_SHIFT                             7
+#       define R600_CHANSIZE_MASK                              0x1
+#       define R600_BURSTLENGTH_SHIFT                          8
+#       define R600_BURSTLENGTH_MASK                           0x1
+
+#define R600_VM_L2_CNTL                                        0x1400
+#       define R600_VM_L2_CACHE_EN                             (1 << 0)
+#       define R600_VM_L2_FRAG_PROC                            (1 << 1)
+#       define R600_VM_ENABLE_PTE_CACHE_LRU_W                  (1 << 9)
+#       define R600_VM_L2_CNTL_QUEUE_SIZE(x)                   ((x) << 13)
+#       define R700_VM_L2_CNTL_QUEUE_SIZE(x)                   ((x) << 14)
+
+#define R600_VM_L2_CNTL2                                       0x1404
+#       define R600_VM_L2_CNTL2_INVALIDATE_ALL_L1_TLBS         (1 << 0)
+#       define R600_VM_L2_CNTL2_INVALIDATE_L2_CACHE            (1 << 1)
+#define R600_VM_L2_CNTL3                                       0x1408
+#       define R600_VM_L2_CNTL3_BANK_SELECT_0(x)               ((x) << 0)
+#       define R600_VM_L2_CNTL3_BANK_SELECT_1(x)               ((x) << 5)
+#       define R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(x)           ((x) << 10)
+#       define R700_VM_L2_CNTL3_BANK_SELECT(x)                 ((x) << 0)
+#       define R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(x)           ((x) << 6)
+
+#define R600_VM_L2_STATUS                                      0x140c
+
+#define R600_VM_CONTEXT0_CNTL                                  0x1410
+#       define R600_VM_ENABLE_CONTEXT                          (1 << 0)
+#       define R600_VM_PAGE_TABLE_DEPTH_FLAT                   (0 << 1)
+
+#define R600_VM_CONTEXT0_CNTL2                                 0x1430
+#define R600_VM_CONTEXT0_REQUEST_RESPONSE                      0x1470
+#define R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR                 0x1490
+#define R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR                0x14b0
+#define R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                  0x1574
+#define R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR                 0x1594
+#define R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR                   0x15b4
+
+#define R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                  0x153c
+#define R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR                 0x155c
+#define R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR                   0x157c
+
+#define R600_HDP_HOST_PATH_CNTL                                0x2c00
+
+#define R600_GRBM_CNTL                                         0x8000
+#       define R600_GRBM_READ_TIMEOUT(x)                       ((x) << 0)
+
+#define R600_GRBM_STATUS                                       0x8010
+#       define R600_CMDFIFO_AVAIL_MASK                         0x1f
+#       define R700_CMDFIFO_AVAIL_MASK                         0xf
+#       define R600_GUI_ACTIVE                                 (1 << 31)
+#define R600_GRBM_STATUS2                                      0x8014
+#define R600_GRBM_SOFT_RESET                                   0x8020
+#       define R600_SOFT_RESET_CP                              (1 << 0)
+#define R600_WAIT_UNTIL		                               0x8040
+
+#define R600_CP_SEM_WAIT_TIMER                                 0x85bc
+#define R600_CP_ME_CNTL                                        0x86d8
+#       define R600_CP_ME_HALT                                 (1 << 28)
+#define R600_CP_QUEUE_THRESHOLDS                               0x8760
+#       define R600_ROQ_IB1_START(x)                           ((x) << 0)
+#       define R600_ROQ_IB2_START(x)                           ((x) << 8)
+#define R600_CP_MEQ_THRESHOLDS                                 0x8764
+#       define R700_STQ_SPLIT(x)                               ((x) << 0)
+#       define R600_MEQ_END(x)                                 ((x) << 16)
+#       define R600_ROQ_END(x)                                 ((x) << 24)
+#define R600_CP_PERFMON_CNTL                                   0x87fc
+#define R600_CP_RB_BASE                                        0xc100
+#define R600_CP_RB_CNTL                                        0xc104
+#       define R600_RB_BUFSZ(x)                                ((x) << 0)
+#       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#	define R600_BUF_SWAP_32BIT		               (2 << 16)
+#       define R600_RB_NO_UPDATE                               (1 << 27)
+#       define R600_RB_RPTR_WR_ENA                             (1 << 31)
+#define R600_CP_RB_RPTR_WR                                     0xc108
+#define R600_CP_RB_RPTR_ADDR                                   0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI                                0xc110
+#define R600_CP_RB_WPTR                                        0xc114
+#define R600_CP_RB_WPTR_ADDR                                   0xc118
+#define R600_CP_RB_WPTR_ADDR_HI                                0xc11c
+#define R600_CP_RB_RPTR                                        0x8700
+#define R600_CP_RB_WPTR_DELAY                                  0x8704
+#define R600_CP_PFP_UCODE_ADDR                                 0xc150
+#define R600_CP_PFP_UCODE_DATA                                 0xc154
+#define R600_CP_ME_RAM_RADDR                                   0xc158
+#define R600_CP_ME_RAM_WADDR                                   0xc15c
+#define R600_CP_ME_RAM_DATA                                    0xc160
+#define R600_CP_DEBUG                                          0xc1fc
+
+#define R600_PA_CL_ENHANCE                                     0x8a14
+#       define R600_CLIP_VTX_REORDER_ENA                       (1 << 0)
+#       define R600_NUM_CLIP_SEQ(x)                            ((x) << 1)
+#define R600_PA_SC_LINE_STIPPLE_STATE                          0x8b10
+#define R600_PA_SC_MULTI_CHIP_CNTL                             0x8b20
+#define R700_PA_SC_FORCE_EOV_MAX_CNTS                          0x8b24
+#       define R700_FORCE_EOV_MAX_CLK_CNT(x)                   ((x) << 0)
+#       define R700_FORCE_EOV_MAX_REZ_CNT(x)                   ((x) << 16)
+#define R600_PA_SC_AA_SAMPLE_LOCS_2S                           0x8b40
+#define R600_PA_SC_AA_SAMPLE_LOCS_4S                           0x8b44
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0                       0x8b48
+#define R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1                       0x8b4c
+#       define R600_S0_X(x)                                    ((x) << 0)
+#       define R600_S0_Y(x)                                    ((x) << 4)
+#       define R600_S1_X(x)                                    ((x) << 8)
+#       define R600_S1_Y(x)                                    ((x) << 12)
+#       define R600_S2_X(x)                                    ((x) << 16)
+#       define R600_S2_Y(x)                                    ((x) << 20)
+#       define R600_S3_X(x)                                    ((x) << 24)
+#       define R600_S3_Y(x)                                    ((x) << 28)
+#       define R600_S4_X(x)                                    ((x) << 0)
+#       define R600_S4_Y(x)                                    ((x) << 4)
+#       define R600_S5_X(x)                                    ((x) << 8)
+#       define R600_S5_Y(x)                                    ((x) << 12)
+#       define R600_S6_X(x)                                    ((x) << 16)
+#       define R600_S6_Y(x)                                    ((x) << 20)
+#       define R600_S7_X(x)                                    ((x) << 24)
+#       define R600_S7_Y(x)                                    ((x) << 28)
+#define R600_PA_SC_FIFO_SIZE                                   0x8bd0
+#       define R600_SC_PRIM_FIFO_SIZE(x)                       ((x) << 0)
+#       define R600_SC_HIZ_TILE_FIFO_SIZE(x)                   ((x) << 8)
+#       define R600_SC_EARLYZ_TILE_FIFO_SIZE(x)                ((x) << 16)
+#define R700_PA_SC_FIFO_SIZE_R7XX                              0x8bcc
+#       define R700_SC_PRIM_FIFO_SIZE(x)                       ((x) << 0)
+#       define R700_SC_HIZ_TILE_FIFO_SIZE(x)                   ((x) << 12)
+#       define R700_SC_EARLYZ_TILE_FIFO_SIZE(x)                ((x) << 20)
+#define R600_PA_SC_ENHANCE                                     0x8bf0
+#       define R600_FORCE_EOV_MAX_CLK_CNT(x)                   ((x) << 0)
+#       define R600_FORCE_EOV_MAX_TILE_CNT(x)                  ((x) << 12)
+#define R600_PA_SC_CLIPRECT_RULE                               0x2820c
+#define R700_PA_SC_EDGERULE                                    0x28230
+#define R600_PA_SC_LINE_STIPPLE                                0x28a0c
+#define R600_PA_SC_MODE_CNTL                                   0x28a4c
+#define R600_PA_SC_AA_CONFIG                                   0x28c04
+
+#define R600_SX_EXPORT_BUFFER_SIZES                            0x900c
+#       define R600_COLOR_BUFFER_SIZE(x)                       ((x) << 0)
+#       define R600_POSITION_BUFFER_SIZE(x)                    ((x) << 8)
+#       define R600_SMX_BUFFER_SIZE(x)                         ((x) << 16)
+#define R600_SX_DEBUG_1                                        0x9054
+#       define R600_SMX_EVENT_RELEASE                          (1 << 0)
+#       define R600_ENABLE_NEW_SMX_ADDRESS                     (1 << 16)
+#define R700_SX_DEBUG_1                                        0x9058
+#       define R700_ENABLE_NEW_SMX_ADDRESS                     (1 << 16)
+#define R600_SX_MISC                                           0x28350
+
+#define R600_DB_DEBUG                                          0x9830
+#       define R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE              (1 << 31)
+#define R600_DB_WATERMARKS                                     0x9838
+#       define R600_DEPTH_FREE(x)                              ((x) << 0)
+#       define R600_DEPTH_FLUSH(x)                             ((x) << 5)
+#       define R600_DEPTH_PENDING_FREE(x)                      ((x) << 15)
+#       define R600_DEPTH_CACHELINE_FREE(x)                    ((x) << 20)
+#define R700_DB_DEBUG3                                         0x98b0
+#       define R700_DB_CLK_OFF_DELAY(x)                        ((x) << 11)
+#define RV700_DB_DEBUG4                                        0x9b8c
+#       define RV700_DISABLE_TILE_COVERED_FOR_PS_ITER          (1 << 6)
+
+#define R600_VGT_CACHE_INVALIDATION                            0x88c4
+#       define R600_CACHE_INVALIDATION(x)                      ((x) << 0)
+#       define R600_VC_ONLY                                    0
+#       define R600_TC_ONLY                                    1
+#       define R600_VC_AND_TC                                  2
+#       define R700_AUTO_INVLD_EN(x)                           ((x) << 6)
+#       define R700_NO_AUTO                                    0
+#       define R700_ES_AUTO                                    1
+#       define R700_GS_AUTO                                    2
+#       define R700_ES_AND_GS_AUTO                             3
+#define R600_VGT_GS_PER_ES                                     0x88c8
+#define R600_VGT_ES_PER_GS                                     0x88cc
+#define R600_VGT_GS_PER_VS                                     0x88e8
+#define R600_VGT_GS_VERTEX_REUSE                               0x88d4
+#define R600_VGT_NUM_INSTANCES                                 0x8974
+#define R600_VGT_STRMOUT_EN                                    0x28ab0
+#define R600_VGT_EVENT_INITIATOR                               0x28a90
+#       define R600_CACHE_FLUSH_AND_INV_EVENT                  (0x16 << 0)
+#define R600_VGT_VERTEX_REUSE_BLOCK_CNTL                       0x28c58
+#       define R600_VTX_REUSE_DEPTH_MASK                       0xff
+#define R600_VGT_OUT_DEALLOC_CNTL                              0x28c5c
+#       define R600_DEALLOC_DIST_MASK                          0x7f
+
+#define R600_CB_COLOR0_BASE                                    0x28040
+#define R600_CB_COLOR1_BASE                                    0x28044
+#define R600_CB_COLOR2_BASE                                    0x28048
+#define R600_CB_COLOR3_BASE                                    0x2804c
+#define R600_CB_COLOR4_BASE                                    0x28050
+#define R600_CB_COLOR5_BASE                                    0x28054
+#define R600_CB_COLOR6_BASE                                    0x28058
+#define R600_CB_COLOR7_BASE                                    0x2805c
+#define R600_CB_COLOR7_FRAG                                    0x280fc
+
+#define R600_CB_COLOR0_SIZE                                    0x28060
+#define R600_CB_COLOR0_VIEW                                    0x28080
+#define R600_CB_COLOR0_INFO                                    0x280a0
+#define R600_CB_COLOR0_TILE                                    0x280c0
+#define R600_CB_COLOR0_FRAG                                    0x280e0
+#define R600_CB_COLOR0_MASK                                    0x28100
+
+#define AVIVO_D1MODE_VLINE_START_END                           0x6538
+#define AVIVO_D2MODE_VLINE_START_END                           0x6d38
+#define R600_CP_COHER_BASE                                     0x85f8
+#define R600_DB_DEPTH_BASE                                     0x2800c
+#define R600_SQ_PGM_START_FS                                   0x28894
+#define R600_SQ_PGM_START_ES                                   0x28880
+#define R600_SQ_PGM_START_VS                                   0x28858
+#define R600_SQ_PGM_RESOURCES_VS                               0x28868
+#define R600_SQ_PGM_CF_OFFSET_VS                               0x288d0
+#define R600_SQ_PGM_START_GS                                   0x2886c
+#define R600_SQ_PGM_START_PS                                   0x28840
+#define R600_SQ_PGM_RESOURCES_PS                               0x28850
+#define R600_SQ_PGM_EXPORTS_PS                                 0x28854
+#define R600_SQ_PGM_CF_OFFSET_PS                               0x288cc
+#define R600_VGT_DMA_BASE                                      0x287e8
+#define R600_VGT_DMA_BASE_HI                                   0x287e4
+#define R600_VGT_STRMOUT_BASE_OFFSET_0                         0x28b10
+#define R600_VGT_STRMOUT_BASE_OFFSET_1                         0x28b14
+#define R600_VGT_STRMOUT_BASE_OFFSET_2                         0x28b18
+#define R600_VGT_STRMOUT_BASE_OFFSET_3                         0x28b1c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0                      0x28b44
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1                      0x28b48
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2                      0x28b4c
+#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3                      0x28b50
+#define R600_VGT_STRMOUT_BUFFER_BASE_0                         0x28ad8
+#define R600_VGT_STRMOUT_BUFFER_BASE_1                         0x28ae8
+#define R600_VGT_STRMOUT_BUFFER_BASE_2                         0x28af8
+#define R600_VGT_STRMOUT_BUFFER_BASE_3                         0x28b08
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_0                       0x28adc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_1                       0x28aec
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_2                       0x28afc
+#define R600_VGT_STRMOUT_BUFFER_OFFSET_3                       0x28b0c
+
+#define R600_VGT_PRIMITIVE_TYPE                                0x8958
+
+#define R600_PA_SC_SCREEN_SCISSOR_TL                           0x28030
+#define R600_PA_SC_GENERIC_SCISSOR_TL                          0x28240
+#define R600_PA_SC_WINDOW_SCISSOR_TL                           0x28204
+
+#define R600_TC_CNTL                                           0x9608
+#       define R600_TC_L2_SIZE(x)                              ((x) << 5)
+#       define R600_L2_DISABLE_LATE_HIT                        (1 << 9)
+
+#define R600_ARB_POP                                           0x2418
+#       define R600_ENABLE_TC128                               (1 << 30)
+#define R600_ARB_GDEC_RD_CNTL                                  0x246c
+
+#define R600_TA_CNTL_AUX                                       0x9508
+#       define R600_DISABLE_CUBE_WRAP                          (1 << 0)
+#       define R600_DISABLE_CUBE_ANISO                         (1 << 1)
+#       define R700_GETLOD_SELECT(x)                           ((x) << 2)
+#       define R600_SYNC_GRADIENT                              (1 << 24)
+#       define R600_SYNC_WALKER                                (1 << 25)
+#       define R600_SYNC_ALIGNER                               (1 << 26)
+#       define R600_BILINEAR_PRECISION_6_BIT                   (0 << 31)
+#       define R600_BILINEAR_PRECISION_8_BIT                   (1 << 31)
+
+#define R700_TCP_CNTL                                          0x9610
+
+#define R600_SMX_DC_CTL0                                       0xa020
+#       define R700_USE_HASH_FUNCTION                          (1 << 0)
+#       define R700_CACHE_DEPTH(x)                             ((x) << 1)
+#       define R700_FLUSH_ALL_ON_EVENT                         (1 << 10)
+#       define R700_STALL_ON_EVENT                             (1 << 11)
+#define R700_SMX_EVENT_CTL                                     0xa02c
+#       define R700_ES_FLUSH_CTL(x)                            ((x) << 0)
+#       define R700_GS_FLUSH_CTL(x)                            ((x) << 3)
+#       define R700_ACK_FLUSH_CTL(x)                           ((x) << 6)
+#       define R700_SYNC_FLUSH_CTL                             (1 << 8)
+
+#define R600_SQ_CONFIG                                         0x8c00
+#       define R600_VC_ENABLE                                  (1 << 0)
+#       define R600_EXPORT_SRC_C                               (1 << 1)
+#       define R600_DX9_CONSTS                                 (1 << 2)
+#       define R600_ALU_INST_PREFER_VECTOR                     (1 << 3)
+#       define R600_DX10_CLAMP                                 (1 << 4)
+#       define R600_CLAUSE_SEQ_PRIO(x)                         ((x) << 8)
+#       define R600_PS_PRIO(x)                                 ((x) << 24)
+#       define R600_VS_PRIO(x)                                 ((x) << 26)
+#       define R600_GS_PRIO(x)                                 ((x) << 28)
+#       define R600_ES_PRIO(x)                                 ((x) << 30)
+#define R600_SQ_GPR_RESOURCE_MGMT_1                            0x8c04
+#       define R600_NUM_PS_GPRS(x)                             ((x) << 0)
+#       define R600_NUM_VS_GPRS(x)                             ((x) << 16)
+#       define R700_DYN_GPR_ENABLE                             (1 << 27)
+#       define R600_NUM_CLAUSE_TEMP_GPRS(x)                    ((x) << 28)
+#define R600_SQ_GPR_RESOURCE_MGMT_2                            0x8c08
+#       define R600_NUM_GS_GPRS(x)                             ((x) << 0)
+#       define R600_NUM_ES_GPRS(x)                             ((x) << 16)
+#define R600_SQ_THREAD_RESOURCE_MGMT                           0x8c0c
+#       define R600_NUM_PS_THREADS(x)                          ((x) << 0)
+#       define R600_NUM_VS_THREADS(x)                          ((x) << 8)
+#       define R600_NUM_GS_THREADS(x)                          ((x) << 16)
+#       define R600_NUM_ES_THREADS(x)                          ((x) << 24)
+#define R600_SQ_STACK_RESOURCE_MGMT_1                          0x8c10
+#       define R600_NUM_PS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define R600_NUM_VS_STACK_ENTRIES(x)                    ((x) << 16)
+#define R600_SQ_STACK_RESOURCE_MGMT_2                          0x8c14
+#       define R600_NUM_GS_STACK_ENTRIES(x)                    ((x) << 0)
+#       define R600_NUM_ES_STACK_ENTRIES(x)                    ((x) << 16)
+#define R600_SQ_MS_FIFO_SIZES                                  0x8cf0
+#       define R600_CACHE_FIFO_SIZE(x)                         ((x) << 0)
+#       define R600_FETCH_FIFO_HIWATER(x)                      ((x) << 8)
+#       define R600_DONE_FIFO_HIWATER(x)                       ((x) << 16)
+#       define R600_ALU_UPDATE_FIFO_HIWATER(x)                 ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_0                         0x8db0
+#       define R700_SIMDA_RING0(x)                             ((x) << 0)
+#       define R700_SIMDA_RING1(x)                             ((x) << 8)
+#       define R700_SIMDB_RING0(x)                             ((x) << 16)
+#       define R700_SIMDB_RING1(x)                             ((x) << 24)
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_1                         0x8db4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_2                         0x8db8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_3                         0x8dbc
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_4                         0x8dc0
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_5                         0x8dc4
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_6                         0x8dc8
+#define R700_SQ_DYN_GPR_SIZE_SIMD_AB_7                         0x8dcc
+
+#define R600_SPI_PS_IN_CONTROL_0                               0x286cc
+#       define R600_NUM_INTERP(x)                              ((x) << 0)
+#       define R600_POSITION_ENA                               (1 << 8)
+#       define R600_POSITION_CENTROID                          (1 << 9)
+#       define R600_POSITION_ADDR(x)                           ((x) << 10)
+#       define R600_PARAM_GEN(x)                               ((x) << 15)
+#       define R600_PARAM_GEN_ADDR(x)                          ((x) << 19)
+#       define R600_BARYC_SAMPLE_CNTL(x)                       ((x) << 26)
+#       define R600_PERSP_GRADIENT_ENA                         (1 << 28)
+#       define R600_LINEAR_GRADIENT_ENA                        (1 << 29)
+#       define R600_POSITION_SAMPLE                            (1 << 30)
+#       define R600_BARYC_AT_SAMPLE_ENA                        (1 << 31)
+#define R600_SPI_PS_IN_CONTROL_1                               0x286d0
+#       define R600_GEN_INDEX_PIX                              (1 << 0)
+#       define R600_GEN_INDEX_PIX_ADDR(x)                      ((x) << 1)
+#       define R600_FRONT_FACE_ENA                             (1 << 8)
+#       define R600_FRONT_FACE_CHAN(x)                         ((x) << 9)
+#       define R600_FRONT_FACE_ALL_BITS                        (1 << 11)
+#       define R600_FRONT_FACE_ADDR(x)                         ((x) << 12)
+#       define R600_FOG_ADDR(x)                                ((x) << 17)
+#       define R600_FIXED_PT_POSITION_ENA                      (1 << 24)
+#       define R600_FIXED_PT_POSITION_ADDR(x)                  ((x) << 25)
+#       define R700_POSITION_ULC                               (1 << 30)
+#define R600_SPI_INPUT_Z                                       0x286d8
+
+#define R600_SPI_CONFIG_CNTL                                   0x9100
+#       define R600_GPR_WRITE_PRIORITY(x)                      ((x) << 0)
+#       define R600_DISABLE_INTERP_1                           (1 << 5)
+#define R600_SPI_CONFIG_CNTL_1                                 0x913c
+#       define R600_VTX_DONE_DELAY(x)                          ((x) << 0)
+#       define R600_INTERP_ONE_PRIM_PER_ROW                    (1 << 4)
+
+#define R600_GB_TILING_CONFIG                                  0x98f0
+#       define R600_PIPE_TILING(x)                             ((x) << 1)
+#       define R600_BANK_TILING(x)                             ((x) << 4)
+#       define R600_GROUP_SIZE(x)                              ((x) << 6)
+#       define R600_ROW_TILING(x)                              ((x) << 8)
+#       define R600_BANK_SWAPS(x)                              ((x) << 11)
+#       define R600_SAMPLE_SPLIT(x)                            ((x) << 14)
+#       define R600_BACKEND_MAP(x)                             ((x) << 16)
+#define R600_DCP_TILING_CONFIG                                 0x6ca0
+#define R600_HDP_TILING_CONFIG                                 0x2f3c
+
+#define R600_CC_RB_BACKEND_DISABLE                             0x98f4
+#define R700_CC_SYS_RB_BACKEND_DISABLE                         0x3f88
+#       define R600_BACKEND_DISABLE(x)                         ((x) << 16)
+
+#define R600_CC_GC_SHADER_PIPE_CONFIG                          0x8950
+#define R600_GC_USER_SHADER_PIPE_CONFIG                        0x8954
+#       define R600_INACTIVE_QD_PIPES(x)                       ((x) << 8)
+#       define R600_INACTIVE_QD_PIPES_MASK                     (0xff << 8)
+#       define R600_INACTIVE_SIMDS(x)                          ((x) << 16)
+#       define R600_INACTIVE_SIMDS_MASK                        (0xff << 16)
+
+#define R700_CGTS_SYS_TCC_DISABLE                              0x3f90
+#define R700_CGTS_USER_SYS_TCC_DISABLE                         0x3f94
+#define R700_CGTS_TCC_DISABLE                                  0x9148
+#define R700_CGTS_USER_TCC_DISABLE                             0x914c
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+
+#define RADEON_LAST_FRAME_REG		RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG	RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG		RADEON_SCRATCH_REG2
+#define RADEON_LAST_SWI_REG		RADEON_SCRATCH_REG3
+#define RADEON_LAST_DISPATCH		1
+
+#define R600_LAST_FRAME_REG		R600_SCRATCH_REG0
+#define R600_LAST_DISPATCH_REG	        R600_SCRATCH_REG1
+#define R600_LAST_CLEAR_REG		R600_SCRATCH_REG2
+#define R600_LAST_SWI_REG		R600_SCRATCH_REG3
+
+#define RADEON_MAX_VB_AGE		0x7fffffff
+#define RADEON_MAX_VB_VERTS		(0xffff)
+
+#define RADEON_RING_HIGH_MARK		128
+
+#define RADEON_PCIGART_TABLE_SIZE      (32*1024)
+
+#define RADEON_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE(reg, val)                                          \
+do {									\
+	if (reg < 0x10000) {				                \
+		DRM_WRITE32(dev_priv->mmio, (reg), (val));		\
+	} else {                                                        \
+		DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, (reg));	\
+		DRM_WRITE32(dev_priv->mmio, RADEON_MM_DATA, (val));	\
+	}                                                               \
+} while (0)
+#define RADEON_READ8(reg)	DRM_READ8(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE8(reg,val)	DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+
+#define RADEON_WRITE_PLL(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX,				\
+		       ((addr) & 0x1f) | RADEON_PLL_WR_EN );		\
+	RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val));			\
+} while (0)
+
+#define RADEON_WRITE_PCIE(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_PCIE_INDEX,				\
+			((addr) & 0xff));				\
+	RADEON_WRITE(RADEON_PCIE_DATA, (val));			\
+} while (0)
+
+#define R500_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));	\
+	RADEON_WRITE(R520_MC_IND_DATA, (val));			\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);	\
+} while (0)
+
+#define RS480_WRITE_MCIND(addr, val)				\
+do {									\
+	RADEON_WRITE(RS480_NB_MC_INDEX,				\
+			((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);	\
+	RADEON_WRITE(RS480_NB_MC_DATA, (val));			\
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);			\
+} while (0)
+
+#define RS690_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));	\
+	RADEON_WRITE(RS690_MC_DATA, val);			\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);	\
+} while (0)
+
+#define RS600_WRITE_MCIND(addr, val)				\
+do {							        \
+	RADEON_WRITE(RS600_MC_INDEX, RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | ((addr) & RS600_MC_ADDR_MASK)); \
+	RADEON_WRITE(RS600_MC_DATA, val);                       \
+} while (0)
+
+#define IGP_WRITE_MCIND(addr, val)				\
+do {									\
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||   \
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))      \
+		RS690_WRITE_MCIND(addr, val);				\
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600)  \
+		RS600_WRITE_MCIND(addr, val);				\
+	else								\
+		RS480_WRITE_MCIND(addr, val);				\
+} while (0)
+
+#define CP_PACKET0( reg, n )						\
+	(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE( reg, n )					\
+	(RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1( reg0, reg1 )					\
+	(RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2()							\
+	(RADEON_CP_PACKET2)
+#define CP_PACKET3( pkt, n )						\
+	(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/* ================================================================
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_IDLE() do {					\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( RADEON_WAIT_CRTC_PFLIP );				\
+} while (0)
+
+#define RADEON_FLUSH_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_RB3D_DC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE);	\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);	\
+	}                                                               \
+} while (0)
+
+#define RADEON_FLUSH_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE);			\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE);				\
+	}                                                               \
+} while (0)
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+/* Perfbox functionality only.
+ */
+#define RING_SPACE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) {		\
+		u32 head = GET_RING_HEAD( dev_priv );			\
+		if (head == dev_priv->ring.tail)			\
+			dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE;	\
+	}								\
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN( dev_priv )				\
+do {								\
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;	\
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;	\
+	if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {		\
+		int __ret;						\
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) \
+			__ret = r600_do_cp_idle(dev_priv);		\
+		else							\
+			__ret = radeon_do_cp_idle(dev_priv);		\
+		if ( __ret ) return __ret;				\
+		sarea_priv->last_dispatch = 0;				\
+		radeon_freelist_reset( dev );				\
+	}								\
+} while (0)
+
+#define RADEON_DISPATCH_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_FRAME_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_CLEAR_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define R600_DISPATCH_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_DISPATCH_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+#define R600_FRAME_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_FRAME_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+#define R600_CLEAR_AGE(age) do {					\
+	OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));		\
+	OUT_RING((R600_LAST_CLEAR_REG - R600_SET_CONFIG_REG_OFFSET) >> 2);  \
+	OUT_RING(age);							\
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define RADEON_VERBOSE	0
+
+#define RING_LOCALS	int write, _nr, _align_nr; unsigned int mask; u32 *ring;
+
+#define RADEON_RING_ALIGN 16
+
+#define BEGIN_RING( n ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\
+	}								\
+	_align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1));	\
+	_align_nr += n;							\
+	if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) {	\
+                COMMIT_RING();						\
+		radeon_wait_ring( dev_priv, _align_nr * sizeof(u32));	\
+	}								\
+	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+#define ADVANCE_RING() do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
+			  write, dev_priv->ring.tail );			\
+	}								\
+	if (((dev_priv->ring.tail + _nr) & mask) != write) {		\
+		DRM_ERROR(						\
+			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\
+			((dev_priv->ring.tail + _nr) & mask),		\
+			write, __LINE__);				\
+	} else								\
+		dev_priv->ring.tail = write;				\
+} while (0)
+
+extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
+
+#define COMMIT_RING() do {						\
+		radeon_commit_ring(dev_priv);				\
+	} while(0)
+
+#define OUT_RING( x ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			   (unsigned int)(x), write );			\
+	}								\
+	ring[write++] = (x);						\
+	write &= mask;							\
+} while (0)
+
+#define OUT_RING_REG( reg, val ) do {					\
+	OUT_RING( CP_PACKET0( reg, 0 ) );				\
+	OUT_RING( val );						\
+} while (0)
+
+#define OUT_RING_TABLE( tab, sz ) do {					\
+	int _size = (sz);					\
+	int *_tab = (int *)(tab);				\
+								\
+	if (write + _size > mask) {				\
+		int _i = (mask+1) - write;			\
+		_size -= _i;					\
+		while (_i > 0 ) {				\
+			*(int *)(ring + write) = *_tab++;	\
+			write++;				\
+			_i--;					\
+		}						\
+		write = 0;					\
+		_tab += _i;					\
+	}							\
+	while (_size > 0) {					\
+		*(ring + write) = *_tab++;			\
+		write++;					\
+		_size--;					\
+	}							\
+	write &= mask;						\
+} while (0)
+
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do {				\
+	int _size = (sz) * 4;						\
+	struct drm_buffer *_buf = (buf);				\
+	int _part_size;							\
+	while (_size > 0) {						\
+		_part_size = _size;					\
+									\
+		if (write + _part_size/4 > mask)			\
+			_part_size = ((mask + 1) - write)*4;		\
+									\
+		if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE)	\
+			_part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+									\
+									\
+									\
+		memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)]	\
+			[drm_buffer_index(_buf)], _part_size);		\
+									\
+		_size -= _part_size;					\
+		write = (write + _part_size/4) & mask;			\
+		drm_buffer_advance(_buf, _part_size);			\
+	}								\
+} while (0)
+
+
+#endif				/* CONFIG_DRM_RADEON_UMS */
+
+#endif				/* __RADEON_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_encoders.c b/linux-imx/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 0000000..bd4959c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern void
+radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+			     struct drm_connector *drm_connector);
+extern void
+radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+			   struct drm_connector *drm_connector);
+
+
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *clone_encoder;
+	uint32_t index_mask = 0;
+	int count;
+
+	/* DIG routing gets problematic */
+	if (rdev->family >= CHIP_R600)
+		return index_mask;
+	/* LVDS/TV are too wacky */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return index_mask;
+	/* DVO requires 2x ppll clocks depending on tmds chip */
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+		return index_mask;
+
+	count = -1;
+	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+		count++;
+
+		if (clone_encoder == encoder)
+			continue;
+		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			continue;
+		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+			continue;
+		else
+			index_mask |= (1 << count);
+	}
+	return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->possible_clones = radeon_encoder_clones(encoder);
+	}
+}
+
+uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t ret = 0;
+
+	switch (supported_device) {
+	case ATOM_DEVICE_CRT1_SUPPORT:
+	case ATOM_DEVICE_TV1_SUPPORT:
+	case ATOM_DEVICE_TV2_SUPPORT:
+	case ATOM_DEVICE_CRT2_SUPPORT:
+	case ATOM_DEVICE_CV_SUPPORT:
+		switch (dac) {
+		case 1: /* dac a */
+			if ((rdev->family == CHIP_RS300) ||
+			    (rdev->family == CHIP_RS400) ||
+			    (rdev->family == CHIP_RS480))
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			else if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
+			break;
+		case 2: /* dac b */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
+			else {
+				/*if (rdev->family == CHIP_R200)
+				  ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+				  else*/
+				ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
+			}
+			break;
+		case 3: /* external dac */
+			if (ASIC_IS_AVIVO(rdev))
+				ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+			else
+				ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+			break;
+		}
+		break;
+	case ATOM_DEVICE_LCD1_SUPPORT:
+		if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP1_SUPPORT:
+		if ((rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480))
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_LCD2_SUPPORT:
+	case ATOM_DEVICE_DFP2_SUPPORT:
+		if ((rdev->family == CHIP_RS600) ||
+		    (rdev->family == CHIP_RS690) ||
+		    (rdev->family == CHIP_RS740))
+			ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
+		else if (ASIC_IS_AVIVO(rdev))
+			ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
+		else
+			ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
+		break;
+	case ATOM_DEVICE_DFP3_SUPPORT:
+		ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
+		break;
+	}
+
+	return ret;
+}
+
+void
+radeon_link_encoder_connector(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* walk the list and link encoders to connectors */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+			radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->devices & radeon_connector->devices) {
+				drm_mode_connector_attach_encoder(connector, encoder);
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+					if (rdev->is_atom_bios)
+						radeon_atom_backlight_init(radeon_encoder, connector);
+					else
+						radeon_legacy_backlight_init(radeon_encoder, connector);
+					rdev->mode_info.bl_encoder = radeon_encoder;
+				}
+			}
+		}
+	}
+}
+
+void radeon_encoder_set_active_device(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+			radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
+			DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
+				  radeon_encoder->active_device, radeon_encoder->devices,
+				  radeon_connector->devices, encoder->encoder_type);
+		}
+	}
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->active_device & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->devices & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *other_encoder;
+	struct radeon_encoder *other_radeon_encoder;
+
+	if (radeon_encoder->is_ext_encoder)
+		return NULL;
+
+	list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+		if (other_encoder == encoder)
+			continue;
+		other_radeon_encoder = to_radeon_encoder(other_encoder);
+		if (other_radeon_encoder->is_ext_encoder &&
+		    (radeon_encoder->devices & other_radeon_encoder->devices))
+			return other_encoder;
+	}
+	return NULL;
+}
+
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
+{
+	struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
+
+	if (other_encoder) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_TRAVIS:
+		case ENCODER_OBJECT_ID_NUTMEG:
+			return radeon_encoder->encoder_id;
+		default:
+			return ENCODER_OBJECT_ID_NONE;
+		}
+	}
+	return ENCODER_OBJECT_ID_NONE;
+}
+
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+	unsigned hblank = native_mode->htotal - native_mode->hdisplay;
+	unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
+	unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
+	unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
+	unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+	unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+
+	adjusted_mode->clock = native_mode->clock;
+	adjusted_mode->flags = native_mode->flags;
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->hdisplay = native_mode->hdisplay;
+		adjusted_mode->vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->htotal = native_mode->hdisplay + hblank;
+	adjusted_mode->hsync_start = native_mode->hdisplay + hover;
+	adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
+
+	adjusted_mode->vtotal = native_mode->vdisplay + vblank;
+	adjusted_mode->vsync_start = native_mode->vdisplay + vover;
+	adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
+
+	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+	if (ASIC_IS_AVIVO(rdev)) {
+		adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
+		adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
+	}
+
+	adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
+	adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
+	adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
+
+	adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
+	adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
+	adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
+
+}
+
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB:
+		if (radeon_connector->use_digital) {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		} else
+			return false;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return false;
+		else {
+			/* HDMI 1.3 supports up to 340 Mhz over single link */
+			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+				if (pixel_clock > 340000)
+					return true;
+				else
+					return false;
+			} else {
+				if (pixel_clock > 165000)
+					return true;
+				else
+					return false;
+			}
+		}
+	default:
+		return false;
+	}
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_family.h b/linux-imx/drivers/gpu/drm/radeon/radeon_family.h
new file mode 100644
index 0000000..36e9803
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_family.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+
+/* this file defines the CHIP_  and family flags used in the pciids,
+ * its is common between kms and non-kms because duplicating it and
+ * changing one place is fail.
+ */
+#ifndef RADEON_FAMILY_H
+#define RADEON_FAMILY_H
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+	CHIP_R100 = 0,
+	CHIP_RV100,
+	CHIP_RS100,
+	CHIP_RV200,
+	CHIP_RS200,
+	CHIP_R200,
+	CHIP_RV250,
+	CHIP_RS300,
+	CHIP_RV280,
+	CHIP_R300,
+	CHIP_R350,
+	CHIP_RV350,
+	CHIP_RV380,
+	CHIP_R420,
+	CHIP_R423,
+	CHIP_RV410,
+	CHIP_RS400,
+	CHIP_RS480,
+	CHIP_RS600,
+	CHIP_RS690,
+	CHIP_RS740,
+	CHIP_RV515,
+	CHIP_R520,
+	CHIP_RV530,
+	CHIP_RV560,
+	CHIP_RV570,
+	CHIP_R580,
+	CHIP_R600,
+	CHIP_RV610,
+	CHIP_RV630,
+	CHIP_RV670,
+	CHIP_RV620,
+	CHIP_RV635,
+	CHIP_RS780,
+	CHIP_RS880,
+	CHIP_RV770,
+	CHIP_RV730,
+	CHIP_RV710,
+	CHIP_RV740,
+	CHIP_CEDAR,
+	CHIP_REDWOOD,
+	CHIP_JUNIPER,
+	CHIP_CYPRESS,
+	CHIP_HEMLOCK,
+	CHIP_PALM,
+	CHIP_SUMO,
+	CHIP_SUMO2,
+	CHIP_BARTS,
+	CHIP_TURKS,
+	CHIP_CAICOS,
+	CHIP_CAYMAN,
+	CHIP_ARUBA,
+	CHIP_TAHITI,
+	CHIP_PITCAIRN,
+	CHIP_VERDE,
+	CHIP_OLAND,
+	CHIP_HAINAN,
+	CHIP_LAST,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+	RADEON_FAMILY_MASK = 0x0000ffffUL,
+	RADEON_FLAGS_MASK = 0xffff0000UL,
+	RADEON_IS_MOBILITY = 0x00010000UL,
+	RADEON_IS_IGP = 0x00020000UL,
+	RADEON_SINGLE_CRTC = 0x00040000UL,
+	RADEON_IS_AGP = 0x00080000UL,
+	RADEON_HAS_HIERZ = 0x00100000UL,
+	RADEON_IS_PCIE = 0x00200000UL,
+	RADEON_NEW_MEMMAP = 0x00400000UL,
+	RADEON_IS_PCI = 0x00800000UL,
+	RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_fb.c b/linux-imx/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 0000000..b174674
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     David Airlie
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#include <drm/drm_fb_helper.h>
+
+#include <linux/vga_switcheroo.h>
+
+/* object hierarchy -
+   this contains a helper + a radeon fb
+   the helper contains a pointer to radeon framebuffer baseclass.
+*/
+struct radeon_fbdev {
+	struct drm_fb_helper helper;
+	struct radeon_framebuffer rfb;
+	struct list_head fbdev_list;
+	struct radeon_device *rdev;
+};
+
+static struct fb_ops radeonfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = cfb_fillrect,
+	.fb_copyarea = cfb_copyarea,
+	.fb_imageblit = cfb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+{
+	int aligned = width;
+	int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
+	int pitch_mask = 0;
+
+	switch (bpp / 8) {
+	case 1:
+		pitch_mask = align_large ? 255 : 127;
+		break;
+	case 2:
+		pitch_mask = align_large ? 127 : 31;
+		break;
+	case 3:
+	case 4:
+		pitch_mask = align_large ? 63 : 15;
+		break;
+	}
+
+	aligned += pitch_mask;
+	aligned &= ~pitch_mask;
+	return aligned;
+}
+
+static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
+	int ret;
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (likely(ret == 0)) {
+		radeon_bo_kunmap(rbo);
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+}
+
+static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+					 struct drm_mode_fb_cmd2 *mode_cmd,
+					 struct drm_gem_object **gobj_p)
+{
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	bool fb_tiled = false; /* useful for testing */
+	u32 tiling_flags = 0;
+	int ret;
+	int aligned_size, size;
+	int height = mode_cmd->height;
+	u32 bpp, depth;
+
+	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+	/* need to align pitch with crtc limits */
+	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+						  fb_tiled) * ((bpp + 1) / 8);
+
+	if (rdev->family >= CHIP_R600)
+		height = ALIGN(mode_cmd->height, 8);
+	size = mode_cmd->pitches[0] * height;
+	aligned_size = ALIGN(size, PAGE_SIZE);
+	ret = radeon_gem_object_create(rdev, aligned_size, 0,
+				       RADEON_GEM_DOMAIN_VRAM,
+				       false, true,
+				       &gobj);
+	if (ret) {
+		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
+		       aligned_size);
+		return -ENOMEM;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+
+	if (fb_tiled)
+		tiling_flags = RADEON_TILING_MACRO;
+
+#ifdef __BIG_ENDIAN
+	switch (bpp) {
+	case 32:
+		tiling_flags |= RADEON_TILING_SWAP_32BIT;
+		break;
+	case 16:
+		tiling_flags |= RADEON_TILING_SWAP_16BIT;
+	default:
+		break;
+	}
+#endif
+
+	if (tiling_flags) {
+		ret = radeon_bo_set_tiling_flags(rbo,
+						 tiling_flags | RADEON_TILING_SURFACE,
+						 mode_cmd->pitches[0]);
+		if (ret)
+			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+	}
+
+
+	ret = radeon_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	/* Only 27 bit offset for legacy CRTC */
+	ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
+				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+				       NULL);
+	if (ret) {
+		radeon_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	if (fb_tiled)
+		radeon_bo_check_tiling(rbo, 0, 0);
+	ret = radeon_bo_kmap(rbo, NULL);
+	radeon_bo_unreserve(rbo);
+	if (ret) {
+		goto out_unref;
+	}
+
+	*gobj_p = gobj;
+	return 0;
+out_unref:
+	radeonfb_destroy_pinned_object(gobj);
+	*gobj_p = NULL;
+	return ret;
+}
+
+static int radeonfb_create(struct drm_fb_helper *helper,
+			   struct drm_fb_helper_surface_size *sizes)
+{
+	struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+	struct radeon_device *rdev = rfbdev->rdev;
+	struct fb_info *info;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct drm_gem_object *gobj = NULL;
+	struct radeon_bo *rbo = NULL;
+	struct device *device = &rdev->pdev->dev;
+	int ret;
+	unsigned long tmp;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	/* avivo can't scanout real 24bpp */
+	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+		sizes->surface_bpp = 32;
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+	if (ret) {
+		DRM_ERROR("failed to create fbcon object %d\n", ret);
+		return ret;
+	}
+
+	rbo = gem_to_radeon_bo(gobj);
+
+	/* okay we have an object now allocate the framebuffer */
+	info = framebuffer_alloc(0, device);
+	if (info == NULL) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	info->par = rfbdev;
+
+	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+	if (ret) {
+		DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+		goto out_unref;
+	}
+
+	fb = &rfbdev->rfb.base;
+
+	/* setup helper */
+	rfbdev->helper.fb = fb;
+	rfbdev->helper.fbdev = info;
+
+	memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
+
+	strcpy(info->fix.id, "radeondrmfb");
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &radeonfb_ops;
+
+	tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
+	info->fix.smem_start = rdev->mc.aper_base + tmp;
+	info->fix.smem_len = radeon_bo_size(rbo);
+	info->screen_base = rbo->kptr;
+	info->screen_size = radeon_bo_size(rbo);
+
+	drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	/* setup aperture base/size for vesafb takeover */
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+	info->apertures->ranges[0].size = rdev->mc.aper_size;
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	if (info->screen_base == NULL) {
+		ret = -ENOSPC;
+		goto out_unref;
+	}
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unref;
+	}
+
+	DRM_INFO("fb mappable at 0x%lX\n",  info->fix.smem_start);
+	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
+	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
+	DRM_INFO("fb depth is %d\n", fb->depth);
+	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+
+	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+	return 0;
+
+out_unref:
+	if (rbo) {
+
+	}
+	if (fb && ret) {
+		drm_gem_object_unreference(gobj);
+		drm_framebuffer_unregister_private(fb);
+		drm_framebuffer_cleanup(fb);
+		kfree(fb);
+	}
+	return ret;
+}
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+{
+	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+}
+
+static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+{
+	struct fb_info *info;
+	struct radeon_framebuffer *rfb = &rfbdev->rfb;
+
+	if (rfbdev->helper.fbdev) {
+		info = rfbdev->helper.fbdev;
+
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+
+	if (rfb->obj) {
+		radeonfb_destroy_pinned_object(rfb->obj);
+		rfb->obj = NULL;
+	}
+	drm_fb_helper_fini(&rfbdev->helper);
+	drm_framebuffer_unregister_private(&rfb->base);
+	drm_framebuffer_cleanup(&rfb->base);
+
+	return 0;
+}
+
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+	.gamma_set = radeon_crtc_fb_gamma_set,
+	.gamma_get = radeon_crtc_fb_gamma_get,
+	.fb_probe = radeonfb_create,
+};
+
+int radeon_fbdev_init(struct radeon_device *rdev)
+{
+	struct radeon_fbdev *rfbdev;
+	int bpp_sel = 32;
+	int ret;
+
+	/* select 8 bpp console on RN50 or 16MB cards */
+	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
+	if (!rfbdev)
+		return -ENOMEM;
+
+	rfbdev->rdev = rdev;
+	rdev->mode_info.rfbdev = rfbdev;
+	rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
+				 rdev->num_crtc,
+				 RADEONFB_CONN_LIMIT);
+	if (ret) {
+		kfree(rfbdev);
+		return ret;
+	}
+
+	drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(rdev->ddev);
+
+	drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+	return 0;
+}
+
+void radeon_fbdev_fini(struct radeon_device *rdev)
+{
+	if (!rdev->mode_info.rfbdev)
+		return;
+
+	radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+	kfree(rdev->mode_info.rfbdev);
+	rdev->mode_info.rfbdev = NULL;
+}
+
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+{
+	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+}
+
+int radeon_fbdev_total_size(struct radeon_device *rdev)
+{
+	struct radeon_bo *robj;
+	int size = 0;
+
+	robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
+	size += radeon_bo_size(robj);
+	return size;
+}
+
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+{
+	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+		return true;
+	return false;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_fence.c b/linux-imx/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 0000000..ddb8f8e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Dave Airlie
+ */
+#include <linux/seq_file.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_trace.h"
+
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization.  When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed.  Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		if (drv->cpu_addr) {
+			*drv->cpu_addr = cpu_to_le32(seq);
+		}
+	} else {
+		WREG32(drv->scratch_reg, seq);
+	}
+}
+
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+{
+	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+	u32 seq = 0;
+
+	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+		if (drv->cpu_addr) {
+			seq = le32_to_cpu(*drv->cpu_addr);
+		} else {
+			seq = lower_32_bits(atomic64_read(&drv->last_seq));
+		}
+	} else {
+		seq = RREG32(drv->scratch_reg);
+	}
+	return seq;
+}
+
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+		      struct radeon_fence **fence,
+		      int ring)
+{
+	/* we are protected by the ring emission mutex */
+	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+	if ((*fence) == NULL) {
+		return -ENOMEM;
+	}
+	kref_init(&((*fence)->kref));
+	(*fence)->rdev = rdev;
+	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+	(*fence)->ring = ring;
+	radeon_fence_ring_emit(rdev, ring, *fence);
+	trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
+	return 0;
+}
+
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
+void radeon_fence_process(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq, last_seq, last_emitted;
+	unsigned count_loop = 0;
+	bool wake = false;
+
+	/* Note there is a scenario here for an infinite loop but it's
+	 * very unlikely to happen. For it to happen, the current polling
+	 * process need to be interrupted by another process and another
+	 * process needs to update the last_seq btw the atomic read and
+	 * xchg of the current process.
+	 *
+	 * More over for this to go in infinite loop there need to be
+	 * continuously new fence signaled ie radeon_fence_read needs
+	 * to return a different value each time for both the currently
+	 * polling process and the other process that xchg the last_seq
+	 * btw atomic read and xchg of the current process. And the
+	 * value the other process set as last seq must be higher than
+	 * the seq value we just read. Which means that current process
+	 * need to be interrupted after radeon_fence_read and before
+	 * atomic xchg.
+	 *
+	 * To be even more safe we count the number of time we loop and
+	 * we bail after 10 loop just accepting the fact that we might
+	 * have temporarly set the last_seq not to the true real last
+	 * seq but to an older one.
+	 */
+	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+	do {
+		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
+		seq = radeon_fence_read(rdev, ring);
+		seq |= last_seq & 0xffffffff00000000LL;
+		if (seq < last_seq) {
+			seq &= 0xffffffff;
+			seq |= last_emitted & 0xffffffff00000000LL;
+		}
+
+		if (seq <= last_seq || seq > last_emitted) {
+			break;
+		}
+		/* If we loop over we don't want to return without
+		 * checking if a fence is signaled as it means that the
+		 * seq we just read is different from the previous on.
+		 */
+		wake = true;
+		last_seq = seq;
+		if ((count_loop++) > 10) {
+			/* We looped over too many time leave with the
+			 * fact that we might have set an older fence
+			 * seq then the current real last seq as signaled
+			 * by the hw.
+			 */
+			break;
+		}
+	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+	if (wake) {
+		rdev->fence_drv[ring].last_activity = jiffies;
+		wake_up_all(&rdev->fence_queue);
+	}
+}
+
+/**
+ * radeon_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
+static void radeon_fence_destroy(struct kref *kref)
+{
+	struct radeon_fence *fence;
+
+	fence = container_of(kref, struct radeon_fence, kref);
+	kfree(fence);
+}
+
+/**
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value.  Helper function for
+ * radeon_fence_signaled().
+ */
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+				      u64 seq, unsigned ring)
+{
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	/* poll new last sequence at least once */
+	radeon_fence_process(rdev, ring);
+	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+	if (!fence) {
+		return true;
+	}
+	if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+		return true;
+	}
+	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+		fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
+ *
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
+ */
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+				 unsigned ring, bool intr, bool lock_ring)
+{
+	unsigned long timeout, last_activity;
+	uint64_t seq;
+	unsigned i;
+	bool signaled;
+	int r;
+
+	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+		if (!rdev->ring[ring].ready) {
+			return -EBUSY;
+		}
+
+		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+			/* the normal case, timeout is somewhere before last_activity */
+			timeout = rdev->fence_drv[ring].last_activity - timeout;
+		} else {
+			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
+			 * anyway we will just wait for the minimum amount and then check for a lockup
+			 */
+			timeout = 1;
+		}
+		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+		/* Save current last activity valuee, used to check for GPU lockups */
+		last_activity = rdev->fence_drv[ring].last_activity;
+
+		trace_radeon_fence_wait_begin(rdev->ddev, seq);
+		radeon_irq_kms_sw_irq_get(rdev, ring);
+		if (intr) {
+			r = wait_event_interruptible_timeout(rdev->fence_queue,
+				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+				timeout);
+                } else {
+			r = wait_event_timeout(rdev->fence_queue,
+				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+				timeout);
+		}
+		radeon_irq_kms_sw_irq_put(rdev, ring);
+		if (unlikely(r < 0)) {
+			return r;
+		}
+		trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+		if (unlikely(!signaled)) {
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r) {
+				continue;
+			}
+
+			/* check if sequence value has changed since last_activity */
+			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+				continue;
+			}
+
+			if (lock_ring) {
+				mutex_lock(&rdev->ring_lock);
+			}
+
+			/* test if somebody else has already decided that this is a lockup */
+			if (last_activity != rdev->fence_drv[ring].last_activity) {
+				if (lock_ring) {
+					mutex_unlock(&rdev->ring_lock);
+				}
+				continue;
+			}
+
+			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+				/* good news we believe it's a lockup */
+				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+					 target_seq, seq);
+
+				/* change last activity so nobody else think there is a lockup */
+				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+					rdev->fence_drv[i].last_activity = jiffies;
+				}
+
+				/* mark the ring as not ready any more */
+				rdev->ring[ring].ready = false;
+				if (lock_ring) {
+					mutex_unlock(&rdev->ring_lock);
+				}
+				return -EDEADLK;
+			}
+
+			if (lock_ring) {
+				mutex_unlock(&rdev->ring_lock);
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+{
+	int r;
+
+	if (fence == NULL) {
+		WARN(1, "Querying an invalid fence : %p !\n", fence);
+		return -EINVAL;
+	}
+
+	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+				  fence->ring, intr, true);
+	if (r) {
+		return r;
+	}
+	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+	return 0;
+}
+
+static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
+{
+	unsigned i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics).  Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number.  Helper function
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ */
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+				     u64 *target_seq, bool intr)
+{
+	unsigned long timeout, last_activity, tmp;
+	unsigned i, ring = RADEON_NUM_RINGS;
+	bool signaled;
+	int r;
+
+	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!target_seq[i]) {
+			continue;
+		}
+
+		/* use the most recent one as indicator */
+		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+			last_activity = rdev->fence_drv[i].last_activity;
+		}
+
+		/* For lockup detection just pick the lowest ring we are
+		 * actively waiting for
+		 */
+		if (i < ring) {
+			ring = i;
+		}
+	}
+
+	/* nothing to wait for ? */
+	if (ring == RADEON_NUM_RINGS) {
+		return -ENOENT;
+	}
+
+	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+		if (time_after(last_activity, timeout)) {
+			/* the normal case, timeout is somewhere before last_activity */
+			timeout = last_activity - timeout;
+		} else {
+			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
+			 * anyway we will just wait for the minimum amount and then check for a lockup
+			 */
+			timeout = 1;
+		}
+
+		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_get(rdev, i);
+			}
+		}
+		if (intr) {
+			r = wait_event_interruptible_timeout(rdev->fence_queue,
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+				timeout);
+		} else {
+			r = wait_event_timeout(rdev->fence_queue,
+				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+				timeout);
+		}
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			if (target_seq[i]) {
+				radeon_irq_kms_sw_irq_put(rdev, i);
+			}
+		}
+		if (unlikely(r < 0)) {
+			return r;
+		}
+		trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+		if (unlikely(!signaled)) {
+			/* we were interrupted for some reason and fence
+			 * isn't signaled yet, resume waiting */
+			if (r) {
+				continue;
+			}
+
+			mutex_lock(&rdev->ring_lock);
+			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+					tmp = rdev->fence_drv[i].last_activity;
+				}
+			}
+			/* test if somebody else has already decided that this is a lockup */
+			if (last_activity != tmp) {
+				last_activity = tmp;
+				mutex_unlock(&rdev->ring_lock);
+				continue;
+			}
+
+			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+				/* good news we believe it's a lockup */
+				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+					 target_seq[ring]);
+
+				/* change last activity so nobody else think there is a lockup */
+				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+					rdev->fence_drv[i].last_activity = jiffies;
+				}
+
+				/* mark the ring as not ready any more */
+				rdev->ring[ring].ready = false;
+				mutex_unlock(&rdev->ring_lock);
+				return -EDEADLK;
+			}
+			mutex_unlock(&rdev->ring_lock);
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics).  Fence
+ * array is indexed by ring id.  @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
+int radeon_fence_wait_any(struct radeon_device *rdev,
+			  struct radeon_fence **fences,
+			  bool intr)
+{
+	uint64_t seq[RADEON_NUM_RINGS];
+	unsigned i;
+	int r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		seq[i] = 0;
+
+		if (!fences[i]) {
+			continue;
+		}
+
+		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+			/* something was allready signaled */
+			return 0;
+		}
+
+		seq[i] = fences[i]->seq;
+	}
+
+	r = radeon_fence_wait_any_seq(rdev, seq, intr);
+	if (r) {
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq;
+
+	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
+		/* nothing to wait for, last_seq is
+		   already the last emited fence */
+		return -ENOENT;
+	}
+	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+/**
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+	int r;
+
+	r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+	if (r) {
+		if (r == -EDEADLK) {
+			return -EDEADLK;
+		}
+		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
+			ring, r);
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
+struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
+{
+	kref_get(&fence->kref);
+	return fence;
+}
+
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
+void radeon_fence_unref(struct radeon_fence **fence)
+{
+	struct radeon_fence *tmp = *fence;
+
+	*fence = NULL;
+	if (tmp) {
+		kref_put(&tmp->kref, radeon_fence_destroy);
+	}
+}
+
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring.  Used by the
+ * dynpm code to ring track activity.
+ */
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+	uint64_t emitted;
+
+	/* We are not protected by ring lock when reading the last sequence
+	 * but it's ok to report slightly wrong fence count here.
+	 */
+	radeon_fence_process(rdev, ring);
+	emitted = rdev->fence_drv[ring].sync_seq[ring]
+		- atomic64_read(&rdev->fence_drv[ring].last_seq);
+	/* to avoid 32bits warp around */
+	if (emitted > 0x10000000) {
+		emitted = 0x10000000;
+	}
+	return (unsigned)emitted;
+}
+
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics).  If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *fdrv;
+
+	if (!fence) {
+		return false;
+	}
+
+	if (fence->ring == dst_ring) {
+		return false;
+	}
+
+	/* we are protected by the ring mutex */
+	fdrv = &fence->rdev->fence_drv[dst_ring];
+	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+	struct radeon_fence_driver *dst, *src;
+	unsigned i;
+
+	if (!fence) {
+		return;
+	}
+
+	if (fence->ring == dst_ring) {
+		return;
+	}
+
+	/* we are protected by the ring mutex */
+	src = &fence->rdev->fence_drv[fence->ring];
+	dst = &fence->rdev->fence_drv[dst_ring];
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (i == dst_ring) {
+			continue;
+		}
+		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+	}
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
+{
+	uint64_t index;
+	int r;
+
+	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
+		rdev->fence_drv[ring].scratch_reg = 0;
+		if (ring != R600_RING_TYPE_UVD_INDEX) {
+			index = R600_WB_EVENT_OFFSET + ring * 4;
+			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
+							 index;
+
+		} else {
+			/* put fence directly behind firmware */
+			index = ALIGN(rdev->uvd_fw->size, 8);
+			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
+			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
+		}
+
+	} else {
+		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+		if (r) {
+			dev_err(rdev->dev, "fence failed to get scratch register\n");
+			return r;
+		}
+		index = RADEON_WB_SCRATCH_OFFSET +
+			rdev->fence_drv[ring].scratch_reg -
+			rdev->scratch.reg_base;
+		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+	}
+	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
+	rdev->fence_drv[ring].initialized = true;
+	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
+		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+	int i;
+
+	rdev->fence_drv[ring].scratch_reg = -1;
+	rdev->fence_drv[ring].cpu_addr = NULL;
+	rdev->fence_drv[ring].gpu_addr = 0;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		rdev->fence_drv[ring].sync_seq[i] = 0;
+	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+	rdev->fence_drv[ring].last_activity = jiffies;
+	rdev->fence_drv[ring].initialized = false;
+}
+
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+	int ring;
+
+	init_waitqueue_head(&rdev->fence_queue);
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		radeon_fence_driver_init_ring(rdev, ring);
+	}
+	if (radeon_debugfs_fence_init(rdev)) {
+		dev_err(rdev->dev, "fence debugfs file creation failed\n");
+	}
+	return 0;
+}
+
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
+void radeon_fence_driver_fini(struct radeon_device *rdev)
+{
+	int ring, r;
+
+	mutex_lock(&rdev->ring_lock);
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		if (!rdev->fence_drv[ring].initialized)
+			continue;
+		r = radeon_fence_wait_empty_locked(rdev, ring);
+		if (r) {
+			/* no need to trigger GPU reset as we are unloading */
+			radeon_fence_driver_force_completion(rdev);
+		}
+		wake_up_all(&rdev->fence_queue);
+		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+		rdev->fence_drv[ring].initialized = false;
+	}
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_fence_driver_force_completion - force all fence waiter to complete
+ *
+ * @rdev: radeon device pointer
+ *
+ * In case of GPU reset failure make sure no process keep waiting on fence
+ * that will never complete.
+ */
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+{
+	int ring;
+
+	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+		if (!rdev->fence_drv[ring].initialized)
+			continue;
+		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
+	}
+}
+
+
+/*
+ * Fence debugfs
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i, j;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!rdev->fence_drv[i].initialized)
+			continue;
+
+		seq_printf(m, "--- ring %d ---\n", i);
+		seq_printf(m, "Last signaled fence 0x%016llx\n",
+			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+		seq_printf(m, "Last emitted        0x%016llx\n",
+			   rdev->fence_drv[i].sync_seq[i]);
+
+		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+			if (i != j && rdev->fence_drv[j].initialized)
+				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+					   j, rdev->fence_drv[i].sync_seq[j]);
+		}
+	}
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_fence_list[] = {
+	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
+};
+#endif
+
+int radeon_debugfs_fence_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
+#else
+	return 0;
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_gart.c b/linux-imx/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 0000000..2915a1c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,1289 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_reg.h"
+
+/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
+ * Common GART table functions.
+ */
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
+{
+	void *ptr;
+
+	ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
+				   &rdev->gart.table_addr);
+	if (ptr == NULL) {
+		return -ENOMEM;
+	}
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		set_memory_uc((unsigned long)ptr,
+			      rdev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	rdev->gart.ptr = ptr;
+	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
+	return 0;
+}
+
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void radeon_gart_table_ram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
+	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+		set_memory_wb((unsigned long)rdev->gart.ptr,
+			      rdev->gart.table_size >> PAGE_SHIFT);
+	}
+#endif
+	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
+			    (void *)rdev->gart.ptr,
+			    rdev->gart.table_addr);
+	rdev->gart.ptr = NULL;
+	rdev->gart.table_addr = 0;
+}
+
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		r = radeon_bo_create(rdev, rdev->gart.table_size,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     NULL, &rdev->gart.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->gart.robj,
+				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->gart.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->gart.robj);
+	radeon_bo_unreserve(rdev->gart.robj);
+	rdev->gart.table_addr = gpu_addr;
+	return r;
+}
+
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->gart.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->gart.robj);
+		radeon_bo_unpin(rdev->gart.robj);
+		radeon_bo_unreserve(rdev->gart.robj);
+		rdev->gart.ptr = NULL;
+	}
+}
+
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	radeon_gart_table_vram_unpin(rdev);
+	radeon_bo_unref(&rdev->gart.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
+void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+	u64 page_base;
+
+	if (!rdev->gart.ready) {
+		WARN(1, "trying to unbind memory from uninitialized GART !\n");
+		return;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+		if (rdev->gart.pages[p]) {
+			rdev->gart.pages[p] = NULL;
+			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+			page_base = rdev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				if (rdev->gart.ptr) {
+					radeon_gart_set_page(rdev, t, page_base);
+				}
+				page_base += RADEON_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
+{
+	unsigned t;
+	unsigned p;
+	uint64_t page_base;
+	int i, j;
+
+	if (!rdev->gart.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+	t = offset / RADEON_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+
+	for (i = 0; i < pages; i++, p++) {
+		rdev->gart.pages_addr[p] = dma_addr[i];
+		rdev->gart.pages[p] = pagelist[i];
+		if (rdev->gart.ptr) {
+			page_base = rdev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				radeon_gart_set_page(rdev, t, page_base);
+				page_base += RADEON_GPU_PAGE_SIZE;
+			}
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+	return 0;
+}
+
+/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+	int i, j, t;
+	u64 page_base;
+
+	if (!rdev->gart.ptr) {
+		return;
+	}
+	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+		page_base = rdev->gart.pages_addr[i];
+		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+			radeon_gart_set_page(rdev, t, page_base);
+			page_base += RADEON_GPU_PAGE_SIZE;
+		}
+	}
+	mb();
+	radeon_gart_tlb_flush(rdev);
+}
+
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_gart_init(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.pages) {
+		return 0;
+	}
+	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = radeon_dummy_page_init(rdev);
+	if (r)
+		return r;
+	/* Compute table size */
+	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
+	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+	/* Allocate pages table */
+	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
+	if (rdev->gart.pages == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+					rdev->gart.num_cpu_pages);
+	if (rdev->gart.pages_addr == NULL) {
+		radeon_gart_fini(rdev);
+		return -ENOMEM;
+	}
+	/* set GART entry to point to the dummy page by default */
+	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+	}
+	return 0;
+}
+
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void radeon_gart_fini(struct radeon_device *rdev)
+{
+	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+		/* unbind pages */
+		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
+	}
+	rdev->gart.ready = false;
+	vfree(rdev->gart.pages);
+	vfree(rdev->gart.pages_addr);
+	rdev->gart.pages = NULL;
+	rdev->gart.pages_addr = NULL;
+
+	radeon_dummy_page_fini(rdev);
+}
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time.  The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID.  When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer.  VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+	return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+	return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+	struct radeon_vm *vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+	unsigned size;
+
+	if (!rdev->vm_manager.enabled) {
+		/* allocate enough for 2 full VM pts */
+		size = radeon_vm_directory_size(rdev);
+		size += rdev->vm_manager.max_pfn * 8;
+		size *= 2;
+		r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+					      RADEON_GPU_PAGE_ALIGN(size),
+					      RADEON_GPU_PAGE_SIZE,
+					      RADEON_GEM_DOMAIN_VRAM);
+		if (r) {
+			dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+				(rdev->vm_manager.max_pfn * 8) >> 10);
+			return r;
+		}
+
+		r = radeon_asic_vm_init(rdev);
+		if (r)
+			return r;
+
+		rdev->vm_manager.enabled = true;
+
+		r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+		if (r)
+			return r;
+	}
+
+	/* restore page table */
+	list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+		if (vm->page_directory == NULL)
+			continue;
+
+		list_for_each_entry(bo_va, &vm->va, vm_list) {
+			bo_va->valid = false;
+		}
+	}
+	return 0;
+}
+
+/**
+ * radeon_vm_free_pt - free the page table for a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
+ */
+static void radeon_vm_free_pt(struct radeon_device *rdev,
+				    struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va;
+	int i;
+
+	if (!vm->page_directory)
+		return;
+
+	list_del_init(&vm->list);
+	radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+
+	list_for_each_entry(bo_va, &vm->va, vm_list) {
+		bo_va->valid = false;
+	}
+
+	if (vm->page_tables == NULL)
+		return;
+
+	for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+		radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
+
+	kfree(vm->page_tables);
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+	struct radeon_vm *vm, *tmp;
+	int i;
+
+	if (!rdev->vm_manager.enabled)
+		return;
+
+	mutex_lock(&rdev->vm_manager.lock);
+	/* free all allocated page tables */
+	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+		mutex_lock(&vm->mutex);
+		radeon_vm_free_pt(rdev, vm);
+		mutex_unlock(&vm->mutex);
+	}
+	for (i = 0; i < RADEON_NUM_VM; ++i) {
+		radeon_fence_unref(&rdev->vm_manager.active[i]);
+	}
+	radeon_asic_vm_fini(rdev);
+	mutex_unlock(&rdev->vm_manager.lock);
+
+	radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+	radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+	rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_evict - evict page table to make room for new one
+ *
+ * @rdev: radeon_device pointer
+ * @vm: VM we want to allocate something for
+ *
+ * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
+ * Returns 0 for success, -ENOMEM for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	struct radeon_vm *vm_evict;
+
+	if (list_empty(&rdev->vm_manager.lru_vm))
+		return -ENOMEM;
+
+	vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
+				    struct radeon_vm, list);
+	if (vm_evict == vm)
+		return -ENOMEM;
+
+	mutex_lock(&vm_evict->mutex);
+	radeon_vm_free_pt(rdev, vm_evict);
+	mutex_unlock(&vm_evict->mutex);
+	return 0;
+}
+
+/**
+ * radeon_vm_alloc_pt - allocates a page table for a VM
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Allocate a page table for the requested vm (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	unsigned pd_size, pts_size;
+	u64 *pd_addr;
+	int r;
+
+	if (vm == NULL) {
+		return -EINVAL;
+	}
+
+	if (vm->page_directory != NULL) {
+		return 0;
+	}
+
+retry:
+	pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+			     &vm->page_directory, pd_size,
+			     RADEON_GPU_PAGE_SIZE, false);
+	if (r == -ENOMEM) {
+		r = radeon_vm_evict(rdev, vm);
+		if (r)
+			return r;
+		goto retry;
+
+	} else if (r) {
+		return r;
+	}
+
+	vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
+
+	/* Initially clear the page directory */
+	pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
+	memset(pd_addr, 0, pd_size);
+
+	pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
+	vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+
+	if (vm->page_tables == NULL) {
+		DRM_ERROR("Cannot allocate memory for page table array\n");
+		radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_add_to_lru - add VMs page table to LRU list
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to add to LRU
+ *
+ * Add the allocated page table to the LRU list (cayman+).
+ *
+ * Global mutex must be locked!
+ */
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	list_del_init(&vm->list);
+	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+				       struct radeon_vm *vm, int ring)
+{
+	struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+	unsigned choices[2] = {};
+	unsigned i;
+
+	/* check if the id is still valid */
+	if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+		return NULL;
+
+	/* we definately need to flush */
+	radeon_fence_unref(&vm->last_flush);
+
+	/* skip over VMID 0, since it is the system VM */
+	for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+		struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+		if (fence == NULL) {
+			/* found a free one */
+			vm->id = i;
+			return NULL;
+		}
+
+		if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+			best[fence->ring] = fence;
+			choices[fence->ring == ring ? 0 : 1] = i;
+		}
+	}
+
+	for (i = 0; i < 2; ++i) {
+		if (choices[i]) {
+			vm->id = choices[i];
+			return rdev->vm_manager.active[choices[i]];
+		}
+	}
+
+	/* should never happen */
+	BUG();
+	return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+		     struct radeon_vm *vm,
+		     struct radeon_fence *fence)
+{
+	radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+	rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+				       struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		if (bo_va->vm == vm) {
+			return bo_va;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+				      struct radeon_vm *vm,
+				      struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+	if (bo_va == NULL) {
+		return NULL;
+	}
+	bo_va->vm = vm;
+	bo_va->bo = bo;
+	bo_va->soffset = 0;
+	bo_va->eoffset = 0;
+	bo_va->flags = 0;
+	bo_va->valid = false;
+	bo_va->ref_count = 1;
+	INIT_LIST_HEAD(&bo_va->bo_list);
+	INIT_LIST_HEAD(&bo_va->vm_list);
+
+	mutex_lock(&vm->mutex);
+	list_add(&bo_va->vm_list, &vm->va);
+	list_add_tail(&bo_va->bo_list, &bo->va);
+	mutex_unlock(&vm->mutex);
+
+	return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+			  struct radeon_bo_va *bo_va,
+			  uint64_t soffset,
+			  uint32_t flags)
+{
+	uint64_t size = radeon_bo_size(bo_va->bo);
+	uint64_t eoffset, last_offset = 0;
+	struct radeon_vm *vm = bo_va->vm;
+	struct radeon_bo_va *tmp;
+	struct list_head *head;
+	unsigned last_pfn;
+
+	if (soffset) {
+		/* make sure object fit at this offset */
+		eoffset = soffset + size;
+		if (soffset >= eoffset) {
+			return -EINVAL;
+		}
+
+		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+		if (last_pfn > rdev->vm_manager.max_pfn) {
+			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+				last_pfn, rdev->vm_manager.max_pfn);
+			return -EINVAL;
+		}
+
+	} else {
+		eoffset = last_pfn = 0;
+	}
+
+	mutex_lock(&vm->mutex);
+	head = &vm->va;
+	last_offset = 0;
+	list_for_each_entry(tmp, &vm->va, vm_list) {
+		if (bo_va == tmp) {
+			/* skip over currently modified bo */
+			continue;
+		}
+
+		if (soffset >= last_offset && eoffset <= tmp->soffset) {
+			/* bo can be added before this one */
+			break;
+		}
+		if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+			/* bo and tmp overlap, invalid offset */
+			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+				bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+			mutex_unlock(&vm->mutex);
+			return -EINVAL;
+		}
+		last_offset = tmp->eoffset;
+		head = &tmp->vm_list;
+	}
+
+	bo_va->soffset = soffset;
+	bo_va->eoffset = eoffset;
+	bo_va->flags = flags;
+	bo_va->valid = false;
+	list_move(&bo_va->vm_list, head);
+
+	mutex_unlock(&vm->mutex);
+	return 0;
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+	uint64_t result;
+
+	/* page table offset */
+	result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+	/* in case cpu page size != gpu page size*/
+	result |= addr & (~PAGE_MASK);
+
+	return result;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+				 struct radeon_vm *vm,
+				 struct radeon_ib *ib,
+				 uint64_t start, uint64_t end)
+{
+	static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+	uint64_t last_pde = ~0, last_pt = ~0;
+	unsigned count = 0;
+	uint64_t pt_idx;
+	int r;
+
+	start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+	end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+	/* walk over the address space and update the page directory */
+	for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+		uint64_t pde, pt;
+
+		if (vm->page_tables[pt_idx])
+			continue;
+
+retry:
+		r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+				     &vm->page_tables[pt_idx],
+				     RADEON_VM_PTE_COUNT * 8,
+				     RADEON_GPU_PAGE_SIZE, false);
+
+		if (r == -ENOMEM) {
+			r = radeon_vm_evict(rdev, vm);
+			if (r)
+				return r;
+			goto retry;
+		} else if (r) {
+			return r;
+		}
+
+		pde = vm->pd_gpu_addr + pt_idx * 8;
+
+		pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+		if (((last_pde + 8 * count) != pde) ||
+		    ((last_pt + incr * count) != pt)) {
+
+			if (count) {
+				radeon_asic_vm_set_page(rdev, ib, last_pde,
+							last_pt, count, incr,
+							RADEON_VM_PAGE_VALID);
+			}
+
+			count = 1;
+			last_pde = pde;
+			last_pt = pt;
+		} else {
+			++count;
+		}
+	}
+
+	if (count) {
+		radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
+					incr, RADEON_VM_PAGE_VALID);
+
+	}
+
+	return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+				  struct radeon_vm *vm,
+				  struct radeon_ib *ib,
+				  uint64_t start, uint64_t end,
+				  uint64_t dst, uint32_t flags)
+{
+	static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+	uint64_t last_pte = ~0, last_dst = ~0;
+	unsigned count = 0;
+	uint64_t addr;
+
+	start = start / RADEON_GPU_PAGE_SIZE;
+	end = end / RADEON_GPU_PAGE_SIZE;
+
+	/* walk over the address space and update the page tables */
+	for (addr = start; addr < end; ) {
+		uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+		unsigned nptes;
+		uint64_t pte;
+
+		if ((addr & ~mask) == (end & ~mask))
+			nptes = end - addr;
+		else
+			nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+		pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+		pte += (addr & mask) * 8;
+
+		if ((last_pte + 8 * count) != pte) {
+
+			if (count) {
+				radeon_asic_vm_set_page(rdev, ib, last_pte,
+							last_dst, count,
+							RADEON_GPU_PAGE_SIZE,
+							flags);
+			}
+
+			count = nptes;
+			last_pte = pte;
+			last_dst = dst;
+		} else {
+			count += nptes;
+		}
+
+		addr += nptes;
+		dst += nptes * RADEON_GPU_PAGE_SIZE;
+	}
+
+	if (count) {
+		radeon_asic_vm_set_page(rdev, ib, last_pte,
+					last_dst, count,
+					RADEON_GPU_PAGE_SIZE, flags);
+	}
+}
+
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
+ */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+			    struct radeon_vm *vm,
+			    struct radeon_bo *bo,
+			    struct ttm_mem_reg *mem)
+{
+	unsigned ridx = rdev->asic->vm.pt_ring_index;
+	struct radeon_ib ib;
+	struct radeon_bo_va *bo_va;
+	unsigned nptes, npdes, ndw;
+	uint64_t addr;
+	int r;
+
+	/* nothing to do if vm isn't bound */
+	if (vm->page_directory == NULL)
+		return 0;
+
+	bo_va = radeon_vm_bo_find(vm, bo);
+	if (bo_va == NULL) {
+		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+		return -EINVAL;
+	}
+
+	if (!bo_va->soffset) {
+		dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+			bo, vm);
+		return -EINVAL;
+	}
+
+	if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+		return 0;
+
+	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+	if (mem) {
+		addr = mem->start << PAGE_SHIFT;
+		if (mem->mem_type != TTM_PL_SYSTEM) {
+			bo_va->flags |= RADEON_VM_PAGE_VALID;
+			bo_va->valid = true;
+		}
+		if (mem->mem_type == TTM_PL_TT) {
+			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+		} else {
+			addr += rdev->vm_manager.vram_base_offset;
+		}
+	} else {
+		addr = 0;
+		bo_va->valid = false;
+	}
+
+	nptes = radeon_bo_ngpu_pages(bo);
+
+	/* assume two extra pdes in case the mapping overlaps the borders */
+	npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
+
+	/* padding, etc. */
+	ndw = 64;
+
+	if (RADEON_VM_BLOCK_SIZE > 11)
+		/* reserve space for one header for every 2k dwords */
+		ndw += (nptes >> 11) * 4;
+	else
+		/* reserve space for one header for
+		    every (1 << BLOCK_SIZE) entries */
+		ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+	/* reserve space for pte addresses */
+	ndw += nptes * 2;
+
+	/* reserve space for one header for every 2k dwords */
+	ndw += (npdes >> 11) * 4;
+
+	/* reserve space for pde addresses */
+	ndw += npdes * 2;
+
+	/* update too big for an IB */
+	if (ndw > 0xfffff)
+		return -ENOMEM;
+
+	r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
+	if (r)
+		return r;
+	ib.length_dw = 0;
+
+	r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		return r;
+	}
+
+	radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
+			      addr, bo_va->flags);
+
+	radeon_ib_sync_to(&ib, vm->fence);
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		radeon_ib_free(rdev, &ib);
+		return r;
+	}
+	radeon_fence_unref(&vm->fence);
+	vm->fence = radeon_fence_ref(ib.fence);
+	radeon_ib_free(rdev, &ib);
+	radeon_fence_unref(&vm->last_flush);
+
+	return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+		     struct radeon_bo_va *bo_va)
+{
+	int r = 0;
+
+	mutex_lock(&rdev->vm_manager.lock);
+	mutex_lock(&bo_va->vm->mutex);
+	if (bo_va->soffset) {
+		r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	}
+	mutex_unlock(&rdev->vm_manager.lock);
+	list_del(&bo_va->vm_list);
+	mutex_unlock(&bo_va->vm->mutex);
+	list_del(&bo_va->bo_list);
+
+	kfree(bo_va);
+	return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+			     struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va;
+
+	list_for_each_entry(bo_va, &bo->va, bo_list) {
+		bo_va->valid = false;
+	}
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	vm->id = 0;
+	vm->fence = NULL;
+	mutex_init(&vm->mutex);
+	INIT_LIST_HEAD(&vm->list);
+	INIT_LIST_HEAD(&vm->va);
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+	int r;
+
+	mutex_lock(&rdev->vm_manager.lock);
+	mutex_lock(&vm->mutex);
+	radeon_vm_free_pt(rdev, vm);
+	mutex_unlock(&rdev->vm_manager.lock);
+
+	if (!list_empty(&vm->va)) {
+		dev_err(rdev->dev, "still active bo inside vm\n");
+	}
+	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+		list_del_init(&bo_va->vm_list);
+		r = radeon_bo_reserve(bo_va->bo, false);
+		if (!r) {
+			list_del_init(&bo_va->bo_list);
+			radeon_bo_unreserve(bo_va->bo);
+			kfree(bo_va);
+		}
+	}
+	radeon_fence_unref(&vm->fence);
+	radeon_fence_unref(&vm->last_flush);
+	mutex_unlock(&vm->mutex);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_gem.c b/linux-imx/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 0000000..aa79603
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,627 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+int radeon_gem_object_init(struct drm_gem_object *obj)
+{
+	BUG();
+
+	return 0;
+}
+
+void radeon_gem_object_free(struct drm_gem_object *gobj)
+{
+	struct radeon_bo *robj = gem_to_radeon_bo(gobj);
+
+	if (robj) {
+		if (robj->gem_base.import_attach)
+			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+		radeon_bo_unref(&robj);
+	}
+}
+
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj)
+{
+	struct radeon_bo *robj;
+	unsigned long max_size;
+	int r;
+
+	*obj = NULL;
+	/* At least align on page size */
+	if (alignment < PAGE_SIZE) {
+		alignment = PAGE_SIZE;
+	}
+
+	/* maximun bo size is the minimun btw visible vram and gtt size */
+	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+	if (size > max_size) {
+		printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+		       __func__, __LINE__, size >> 20, max_size >> 20);
+		return -ENOMEM;
+	}
+
+retry:
+	r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+	if (r) {
+		if (r != -ERESTARTSYS) {
+			if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+				initial_domain |= RADEON_GEM_DOMAIN_GTT;
+				goto retry;
+			}
+			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+				  size, initial_domain, alignment, r);
+		}
+		return r;
+	}
+	*obj = &robj->gem_base;
+	robj->pid = task_pid_nr(current);
+
+	mutex_lock(&rdev->gem.mutex);
+	list_add_tail(&robj->list, &rdev->gem.objects);
+	mutex_unlock(&rdev->gem.mutex);
+
+	return 0;
+}
+
+int radeon_gem_set_domain(struct drm_gem_object *gobj,
+			  uint32_t rdomain, uint32_t wdomain)
+{
+	struct radeon_bo *robj;
+	uint32_t domain;
+	int r;
+
+	/* FIXME: reeimplement */
+	robj = gem_to_radeon_bo(gobj);
+	/* work out where to validate the buffer to */
+	domain = wdomain;
+	if (!domain) {
+		domain = rdomain;
+	}
+	if (!domain) {
+		/* Do nothings */
+		printk(KERN_WARNING "Set domain without domain !\n");
+		return 0;
+	}
+	if (domain == RADEON_GEM_DOMAIN_CPU) {
+		/* Asking for cpu access wait for object idle */
+		r = radeon_bo_wait(robj, NULL, false);
+		if (r) {
+			printk(KERN_ERR "Failed to wait for object !\n");
+			return r;
+		}
+	}
+	return 0;
+}
+
+int radeon_gem_init(struct radeon_device *rdev)
+{
+	INIT_LIST_HEAD(&rdev->gem.objects);
+	return 0;
+}
+
+void radeon_gem_fini(struct radeon_device *rdev)
+{
+	radeon_bo_force_delete(rdev);
+}
+
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if (rdev->family < CHIP_CAYMAN) {
+		return 0;
+	}
+
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		return r;
+	}
+
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (!bo_va) {
+		bo_va = radeon_vm_bo_add(rdev, vm, rbo);
+	} else {
+		++bo_va->ref_count;
+	}
+	radeon_bo_unreserve(rbo);
+
+	return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+			     struct drm_file *file_priv)
+{
+	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+	struct radeon_device *rdev = rbo->rdev;
+	struct radeon_fpriv *fpriv = file_priv->driver_priv;
+	struct radeon_vm *vm = &fpriv->vm;
+	struct radeon_bo_va *bo_va;
+	int r;
+
+	if (rdev->family < CHIP_CAYMAN) {
+		return;
+	}
+
+	r = radeon_bo_reserve(rbo, true);
+	if (r) {
+		dev_err(rdev->dev, "leaking bo va because "
+			"we fail to reserve bo (%d)\n", r);
+		return;
+	}
+	bo_va = radeon_vm_bo_find(vm, rbo);
+	if (bo_va) {
+		if (--bo_va->ref_count == 0) {
+			radeon_vm_bo_rmv(rdev, bo_va);
+		}
+	}
+	radeon_bo_unreserve(rbo);
+}
+
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+		if (!r)
+			r = -EAGAIN;
+	}
+	return r;
+}
+
+/*
+ * GEM ioctls.
+ */
+int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_info *args = data;
+	struct ttm_mem_type_manager *man;
+	unsigned i;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+
+	args->vram_size = rdev->mc.real_vram_size;
+	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	if (rdev->stollen_vga_memory)
+		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+	args->vram_visible -= radeon_fbdev_total_size(rdev);
+	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+	for(i = 0; i < RADEON_NUM_RINGS; ++i)
+		args->gart_size -= rdev->ring[i].ring_size;
+	return 0;
+}
+
+int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	/* TODO: implement */
+	DRM_ERROR("unimplemented %s\n", __func__);
+	return -ENOSYS;
+}
+
+int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_create *args = data;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	down_read(&rdev->exclusive_lock);
+	/* create a gem object to contain this object in */
+	args->size = roundup(args->size, PAGE_SIZE);
+	r = radeon_gem_object_create(rdev, args->size, args->alignment,
+					args->initial_domain, false,
+					false, &gobj);
+	if (r) {
+		up_read(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	r = drm_gem_handle_create(filp, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r) {
+		up_read(&rdev->exclusive_lock);
+		r = radeon_gem_handle_lockup(rdev, r);
+		return r;
+	}
+	args->handle = handle;
+	up_read(&rdev->exclusive_lock);
+	return 0;
+}
+
+int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	/* transition the BO to a domain -
+	 * just validate the BO into a certain domain */
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_set_domain *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	/* for now if someone requests domain CPU -
+	 * just make sure the buffer is finished with */
+	down_read(&rdev->exclusive_lock);
+
+	/* just do a BO wait for now */
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		up_read(&rdev->exclusive_lock);
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+
+	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
+
+	drm_gem_object_unreference_unlocked(gobj);
+	up_read(&rdev->exclusive_lock);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
+	return r;
+}
+
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+			  struct drm_device *dev,
+			  uint32_t handle, uint64_t *offset_p)
+{
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+
+	gobj = drm_gem_object_lookup(dev, filp, handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	*offset_p = radeon_bo_mmap_offset(robj);
+	drm_gem_object_unreference_unlocked(gobj);
+	return 0;
+}
+
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_mmap *args = data;
+
+	return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
+int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_busy *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+	uint32_t cur_placement = 0;
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_wait(robj, &cur_placement, true);
+	switch (cur_placement) {
+	case TTM_PL_VRAM:
+		args->domain = RADEON_GEM_DOMAIN_VRAM;
+		break;
+	case TTM_PL_TT:
+		args->domain = RADEON_GEM_DOMAIN_GTT;
+		break;
+	case TTM_PL_SYSTEM:
+		args->domain = RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_gem_wait_idle *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r;
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		return -ENOENT;
+	}
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_wait(robj, NULL, false);
+	/* callback hw specific functions if any */
+	if (rdev->asic->ioctl_wait_idle)
+		robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(rdev, r);
+	return r;
+}
+
+int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_set_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *robj;
+	int r = 0;
+
+	DRM_DEBUG("%d \n", args->handle);
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	robj = gem_to_radeon_bo(gobj);
+	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *filp)
+{
+	struct drm_radeon_gem_get_tiling *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_bo *rbo;
+	int r = 0;
+
+	DRM_DEBUG("\n");
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL)
+		return -ENOENT;
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		goto out;
+	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+	radeon_bo_unreserve(rbo);
+out:
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *filp)
+{
+	struct drm_radeon_gem_va *args = data;
+	struct drm_gem_object *gobj;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_fpriv *fpriv = filp->driver_priv;
+	struct radeon_bo *rbo;
+	struct radeon_bo_va *bo_va;
+	u32 invalid_flags;
+	int r = 0;
+
+	if (!rdev->vm_manager.enabled) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOTTY;
+	}
+
+	/* !! DONT REMOVE !!
+	 * We don't support vm_id yet, to be sure we don't have have broken
+	 * userspace, reject anyone trying to use non 0 value thus moving
+	 * forward we can use those fields without breaking existant userspace
+	 */
+	if (args->vm_id) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	if (args->offset < RADEON_VA_RESERVED_SIZE) {
+		dev_err(&dev->pdev->dev,
+			"offset 0x%lX is in reserved area 0x%X\n",
+			(unsigned long)args->offset,
+			RADEON_VA_RESERVED_SIZE);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	/* don't remove, we need to enforce userspace to set the snooped flag
+	 * otherwise we will endup with broken userspace and we won't be able
+	 * to enable this feature without adding new interface
+	 */
+	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+	if ((args->flags & invalid_flags)) {
+		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+			args->flags, invalid_flags);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+		dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+	case RADEON_VA_UNMAP:
+		break;
+	default:
+		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+			args->operation);
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -EINVAL;
+	}
+
+	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+	if (gobj == NULL) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		return -ENOENT;
+	}
+	rbo = gem_to_radeon_bo(gobj);
+	r = radeon_bo_reserve(rbo, false);
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return r;
+	}
+	bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
+	if (!bo_va) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+		drm_gem_object_unreference_unlocked(gobj);
+		return -ENOENT;
+	}
+
+	switch (args->operation) {
+	case RADEON_VA_MAP:
+		if (bo_va->soffset) {
+			args->operation = RADEON_VA_RESULT_VA_EXIST;
+			args->offset = bo_va->soffset;
+			goto out;
+		}
+		r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
+		break;
+	case RADEON_VA_UNMAP:
+		r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
+		break;
+	default:
+		break;
+	}
+	args->operation = RADEON_VA_RESULT_OK;
+	if (r) {
+		args->operation = RADEON_VA_RESULT_ERROR;
+	}
+out:
+	radeon_bo_unreserve(rbo);
+	drm_gem_object_unreference_unlocked(gobj);
+	return r;
+}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+			    struct drm_device *dev,
+			    struct drm_mode_create_dumb *args)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_gem_object *gobj;
+	uint32_t handle;
+	int r;
+
+	args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+	args->size = args->pitch * args->height;
+	args->size = ALIGN(args->size, PAGE_SIZE);
+
+	r = radeon_gem_object_create(rdev, args->size, 0,
+				     RADEON_GEM_DOMAIN_VRAM,
+				     false, ttm_bo_type_device,
+				     &gobj);
+	if (r)
+		return -ENOMEM;
+
+	r = drm_gem_handle_create(file_priv, gobj, &handle);
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(gobj);
+	if (r) {
+		return r;
+	}
+	args->handle = handle;
+	return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+			     struct drm_device *dev,
+			     uint32_t handle)
+{
+	return drm_gem_handle_delete(file_priv, handle);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_bo *rbo;
+	unsigned i = 0;
+
+	mutex_lock(&rdev->gem.mutex);
+	list_for_each_entry(rbo, &rdev->gem.objects, list) {
+		unsigned domain;
+		const char *placement;
+
+		domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
+		switch (domain) {
+		case RADEON_GEM_DOMAIN_VRAM:
+			placement = "VRAM";
+			break;
+		case RADEON_GEM_DOMAIN_GTT:
+			placement = " GTT";
+			break;
+		case RADEON_GEM_DOMAIN_CPU:
+		default:
+			placement = " CPU";
+			break;
+		}
+		seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
+			   i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
+			   placement, (unsigned long)rbo->pid);
+		i++;
+	}
+	mutex_unlock(&rdev->gem.mutex);
+	return 0;
+}
+
+static struct drm_info_list radeon_debugfs_gem_list[] = {
+	{"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
+};
+#endif
+
+int radeon_gem_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
+#endif
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_i2c.c b/linux-imx/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 0000000..e24ca6a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,1191 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_edid.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+
+extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+				   struct i2c_msg *msgs, int num);
+extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
+
+/**
+ * radeon_ddc_probe
+ *
+ */
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+{
+	u8 out = 0x0;
+	u8 buf[8];
+	int ret;
+	struct i2c_msg msgs[] = {
+		{
+			.addr = DDC_ADDR,
+			.flags = 0,
+			.len = 1,
+			.buf = &out,
+		},
+		{
+			.addr = DDC_ADDR,
+			.flags = I2C_M_RD,
+			.len = 8,
+			.buf = buf,
+		}
+	};
+
+	/* on hw with routers, select right port */
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
+
+	if (use_aux) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+		ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
+	} else {
+		ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
+	}
+
+	if (ret != 2)
+		/* Couldn't find an accessible DDC on this connector */
+		return false;
+	/* Probe also for valid EDID header
+	 * EDID header starts with:
+	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+	 * Only the first 6 bytes must be valid as
+	 * drm_edid_block_valid() can fix the last 2 bytes */
+	if (drm_edid_header_is_valid(buf) < 6) {
+		/* Couldn't find an accessible EDID on this
+		 * connector */
+		return false;
+	}
+	return true;
+}
+
+/* bit banging i2c */
+
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* RV410 appears to have a bug where the hw i2c in reset
+	 * holds the i2c port in a bad state - switch hw i2c away before
+	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
+	 */
+	if (rec->hw_capable) {
+		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+			u32 reg;
+
+			if (rdev->family >= CHIP_RV350)
+				reg = RADEON_GPIO_MONID;
+			else if ((rdev->family == CHIP_R300) ||
+				 (rdev->family == CHIP_R350))
+				reg = RADEON_GPIO_DVI_DDC;
+			else
+				reg = RADEON_GPIO_CRT2_DDC;
+
+			mutex_lock(&rdev->dc_hw_i2c_mutex);
+			if (rec->a_clk_reg == reg) {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+			} else {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+			}
+			mutex_unlock(&rdev->dc_hw_i2c_mutex);
+		}
+	}
+
+	/* switch the pads to ddc mode */
+	if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+		temp = RREG32(rec->mask_clk_reg);
+		temp &= ~(1 << 16);
+		WREG32(rec->mask_clk_reg, temp);
+	}
+
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
+
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+
+	return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t temp;
+
+	/* unmask the gpio pins for software use */
+	temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, temp);
+	temp = RREG32(rec->mask_clk_reg);
+
+	temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, temp);
+	temp = RREG32(rec->mask_data_reg);
+}
+
+static int get_clock(void *i2c_priv)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
+
+	return (val != 0);
+}
+
+
+static int get_data(void *i2c_priv)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
+	return (val != 0);
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+	struct radeon_i2c_chan *i2c = i2c_priv;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	uint32_t val;
+
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
+}
+
+/* hw i2c */
+
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+	u32 sclk = rdev->pm.current_sclk;
+	u32 prescale = 0;
+	u32 nm;
+	u8 n, m, loop;
+	int i2c_clock;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+		i2c_clock = 60;
+		nm = (sclk * 10) / (i2c_clock * 4);
+		for (loop = 1; loop < 255; loop++) {
+			if ((nm / loop) < loop)
+				break;
+		}
+		n = loop - 1;
+		m = loop - 2;
+		prescale = m | (n << 8);
+		break;
+	case CHIP_RV380:
+	case CHIP_RS400:
+	case CHIP_RS480:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+		prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* todo */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		i2c_clock = 50;
+		if (rdev->family == CHIP_R520)
+			prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+		else
+			prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* todo */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* todo */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* todo */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		break;
+	}
+	return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct i2c_msg *p;
+	int i, j, k, ret = num;
+	u32 prescale;
+	u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+	u32 tmp, reg;
+
+	mutex_lock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	mutex_lock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+	       RADEON_I2C_DRIVE_EN |
+	       RADEON_I2C_START |
+	       RADEON_I2C_STOP |
+	       RADEON_I2C_GO);
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	}
+
+	if (rec->mm_i2c) {
+		i2c_cntl_0 = RADEON_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_I2C_CNTL_1;
+		i2c_data = RADEON_I2C_DATA;
+	} else {
+		i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+		i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+		i2c_data = RADEON_DVI_I2C_DATA;
+
+		switch (rdev->family) {
+		case CHIP_R100:
+		case CHIP_RV100:
+		case CHIP_RS100:
+		case CHIP_RV200:
+		case CHIP_RS200:
+		case CHIP_RS300:
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				/* no gpio select bit */
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R200:
+			/* only bit 4 on r200 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV250:
+		case CHIP_RV280:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_CRT2_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_R300:
+		case CHIP_R350:
+			/* only bit 4 on r300/r350 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		case CHIP_RV350:
+		case CHIP_RV380:
+		case CHIP_R420:
+		case CHIP_R423:
+		case CHIP_RV410:
+		case CHIP_RS400:
+		case CHIP_RS480:
+			/* bits 3 and 4 */
+			switch (rec->mask_clk_reg) {
+			case RADEON_GPIO_VGA_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+				break;
+			case RADEON_GPIO_DVI_DDC:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+				break;
+			case RADEON_GPIO_MONID:
+				reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+				break;
+			default:
+				DRM_ERROR("gpio not supported with hw i2c\n");
+				ret = -EINVAL;
+				goto done;
+			}
+			break;
+		default:
+			DRM_ERROR("unsupported asic\n");
+			ret = -EINVAL;
+			goto done;
+			break;
+		}
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+				    RADEON_I2C_NACK |
+				    RADEON_I2C_HALT |
+				    RADEON_I2C_SOFT_RST));
+		WREG32(i2c_data, (p->addr << 1) & 0xff);
+		WREG32(i2c_data, 0);
+		WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+				    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+				    RADEON_I2C_EN |
+				    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+		WREG32(i2c_cntl_0, reg);
+		for (k = 0; k < 32; k++) {
+			udelay(10);
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_GO)
+				continue;
+			tmp = RREG32(i2c_cntl_0);
+			if (tmp & RADEON_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		for (j = 0; j < p->len; j++) {
+			if (p->flags & I2C_M_RD) {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				p->buf[j] = RREG32(i2c_data) & 0xff;
+			} else {
+				WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+						    RADEON_I2C_NACK |
+						    RADEON_I2C_HALT |
+						    RADEON_I2C_SOFT_RST));
+				WREG32(i2c_data, (p->addr << 1) & 0xff);
+				WREG32(i2c_data, p->buf[j]);
+				WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+						    (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+						    RADEON_I2C_EN |
+						    (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+				WREG32(i2c_cntl_0, reg);
+				for (k = 0; k < 32; k++) {
+					udelay(10);
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_GO)
+						continue;
+					tmp = RREG32(i2c_cntl_0);
+					if (tmp & RADEON_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+			}
+		}
+	}
+
+done:
+	WREG32(i2c_cntl_0, 0);
+	WREG32(i2c_cntl_1, 0);
+	WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+			    RADEON_I2C_NACK |
+			    RADEON_I2C_HALT |
+			    RADEON_I2C_SOFT_RST));
+
+	if (rdev->is_atom_bios) {
+		tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+		tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+		WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+	}
+
+	mutex_unlock(&rdev->pm.mutex);
+	mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			    struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	struct i2c_msg *p;
+	int i, j, remaining, current_count, buffer_offset, ret = num;
+	u32 prescale;
+	u32 tmp, reg;
+	u32 saved1, saved2;
+
+	mutex_lock(&rdev->dc_hw_i2c_mutex);
+	/* take the pm lock since we need a constant sclk */
+	mutex_lock(&rdev->pm.mutex);
+
+	prescale = radeon_get_i2c_prescale(rdev);
+
+	/* clear gpio mask bits */
+	tmp = RREG32(rec->mask_clk_reg);
+	tmp &= ~rec->mask_clk_mask;
+	WREG32(rec->mask_clk_reg, tmp);
+	tmp = RREG32(rec->mask_clk_reg);
+
+	tmp = RREG32(rec->mask_data_reg);
+	tmp &= ~rec->mask_data_mask;
+	WREG32(rec->mask_data_reg, tmp);
+	tmp = RREG32(rec->mask_data_reg);
+
+	/* clear pin values */
+	tmp = RREG32(rec->a_clk_reg);
+	tmp &= ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, tmp);
+	tmp = RREG32(rec->a_clk_reg);
+
+	tmp = RREG32(rec->a_data_reg);
+	tmp &= ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, tmp);
+	tmp = RREG32(rec->a_data_reg);
+
+	/* set the pins to input */
+	tmp = RREG32(rec->en_clk_reg);
+	tmp &= ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, tmp);
+	tmp = RREG32(rec->en_clk_reg);
+
+	tmp = RREG32(rec->en_data_reg);
+	tmp &= ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, tmp);
+	tmp = RREG32(rec->en_data_reg);
+
+	/* */
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+	saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+	saved2 = RREG32(0x494);
+	WREG32(0x494, saved2 | 0x1);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+	for (i = 0; i < 50; i++) {
+		udelay(1);
+		if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+			break;
+	}
+	if (i == 50) {
+		DRM_ERROR("failed to get i2c bus\n");
+		ret = -EBUSY;
+		goto done;
+	}
+
+	reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+	switch (rec->mask_clk_reg) {
+	case AVIVO_DC_GPIO_DDC1_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+		break;
+	case AVIVO_DC_GPIO_DDC2_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+		break;
+	case AVIVO_DC_GPIO_DDC3_MASK:
+		reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+		break;
+	default:
+		DRM_ERROR("gpio not supported with hw i2c\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* check for bus probe */
+	p = &msgs[0];
+	if ((num == 1) && (p->len == 0)) {
+		WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+					      AVIVO_DC_I2C_NACK |
+					      AVIVO_DC_I2C_HALT));
+		WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+		udelay(1);
+		WREG32(AVIVO_DC_I2C_RESET, 0);
+
+		WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+		WREG32(AVIVO_DC_I2C_DATA, 0);
+
+		WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+		WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+					       AVIVO_DC_I2C_DATA_COUNT(1) |
+					       (prescale << 16)));
+		WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+		WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+		for (j = 0; j < 200; j++) {
+			udelay(50);
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_GO)
+				continue;
+			tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+			if (tmp & AVIVO_DC_I2C_DONE)
+				break;
+			else {
+				DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+				ret = -EIO;
+				goto done;
+			}
+		}
+		goto done;
+	}
+
+	for (i = 0; i < num; i++) {
+		p = &msgs[i];
+		remaining = p->len;
+		buffer_offset = 0;
+		if (p->flags & I2C_M_RD) {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				for (j = 0; j < current_count; j++)
+					p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		} else {
+			while (remaining) {
+				if (remaining > 15)
+					current_count = 15;
+				else
+					current_count = remaining;
+				WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+							      AVIVO_DC_I2C_NACK |
+							      AVIVO_DC_I2C_HALT));
+				WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+				udelay(1);
+				WREG32(AVIVO_DC_I2C_RESET, 0);
+
+				WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+				for (j = 0; j < current_count; j++)
+					WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+				WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+				WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+							       AVIVO_DC_I2C_DATA_COUNT(current_count) |
+							       (prescale << 16)));
+				WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+				WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+				for (j = 0; j < 200; j++) {
+					udelay(50);
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_GO)
+						continue;
+					tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+					if (tmp & AVIVO_DC_I2C_DONE)
+						break;
+					else {
+						DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+						WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+						ret = -EIO;
+						goto done;
+					}
+				}
+				remaining -= current_count;
+				buffer_offset += current_count;
+			}
+		}
+	}
+
+done:
+	WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+				      AVIVO_DC_I2C_NACK |
+				      AVIVO_DC_I2C_HALT));
+	WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+	udelay(1);
+	WREG32(AVIVO_DC_I2C_RESET, 0);
+
+	WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+	WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+	WREG32(0x494, saved2);
+	tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+	tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+	WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+	mutex_unlock(&rdev->pm.mutex);
+	mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+	return ret;
+}
+
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+			      struct i2c_msg *msgs, int num)
+{
+	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+	int ret = 0;
+
+	switch (rdev->family) {
+	case CHIP_R100:
+	case CHIP_RV100:
+	case CHIP_RS100:
+	case CHIP_RV200:
+	case CHIP_RS200:
+	case CHIP_R200:
+	case CHIP_RV250:
+	case CHIP_RS300:
+	case CHIP_RV280:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_RV350:
+	case CHIP_RV380:
+	case CHIP_R420:
+	case CHIP_R423:
+	case CHIP_RV410:
+	case CHIP_RS400:
+	case CHIP_RS480:
+		ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+		break;
+	case CHIP_RS600:
+	case CHIP_RS690:
+	case CHIP_RS740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV530:
+	case CHIP_RV560:
+	case CHIP_RV570:
+	case CHIP_R580:
+		if (rec->mm_i2c)
+			ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+		else
+			ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+		break;
+	case CHIP_R600:
+	case CHIP_RV610:
+	case CHIP_RV630:
+	case CHIP_RV670:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_RV620:
+	case CHIP_RV635:
+	case CHIP_RS780:
+	case CHIP_RS880:
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+	case CHIP_RV740:
+		/* XXX fill in hw i2c implementation */
+		break;
+	case CHIP_CEDAR:
+	case CHIP_REDWOOD:
+	case CHIP_JUNIPER:
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		/* XXX fill in hw i2c implementation */
+		break;
+	default:
+		DRM_ERROR("i2c: unhandled radeon chip\n");
+		ret = -EIO;
+		break;
+	}
+
+	return ret;
+}
+
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+	.master_xfer = radeon_hw_i2c_xfer,
+	.functionality = radeon_hw_i2c_func,
+};
+
+static const struct i2c_algorithm radeon_atom_i2c_algo = {
+	.master_xfer = radeon_atom_hw_i2c_xfer,
+	.functionality = radeon_atom_hw_i2c_func,
+};
+
+struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+					  struct radeon_i2c_bus_rec *rec,
+					  const char *name)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	/* don't add the mm_i2c bus unless hw_i2c is enabled */
+	if (rec->mm_i2c && (radeon_hw_i2c == 0))
+		return NULL;
+
+	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	if (rec->mm_i2c ||
+	    (rec->hw_capable &&
+	     radeon_hw_i2c &&
+	     ((rdev->family <= CHIP_RS480) ||
+	      ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+		/* set the radeon hw i2c adapter */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c hw bus %s", name);
+		i2c->adapter.algo = &radeon_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register hw i2c %s\n", name);
+			goto out_free;
+		}
+	} else if (rec->hw_capable &&
+		   radeon_hw_i2c &&
+		   ASIC_IS_DCE3(rdev)) {
+		/* hw i2c using atom */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c hw bus %s", name);
+		i2c->adapter.algo = &radeon_atom_i2c_algo;
+		ret = i2c_add_adapter(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register hw i2c %s\n", name);
+			goto out_free;
+		}
+	} else {
+		/* set the radeon bit adapter */
+		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+			 "Radeon i2c bit bus %s", name);
+		i2c->adapter.algo_data = &i2c->algo.bit;
+		i2c->algo.bit.pre_xfer = pre_xfer;
+		i2c->algo.bit.post_xfer = post_xfer;
+		i2c->algo.bit.setsda = set_data;
+		i2c->algo.bit.setscl = set_clock;
+		i2c->algo.bit.getsda = get_data;
+		i2c->algo.bit.getscl = get_clock;
+		i2c->algo.bit.udelay = 10;
+		i2c->algo.bit.timeout = usecs_to_jiffies(2200);	/* from VESA */
+		i2c->algo.bit.data = i2c;
+		ret = i2c_bit_add_bus(&i2c->adapter);
+		if (ret) {
+			DRM_ERROR("Failed to register bit i2c %s\n", name);
+			goto out_free;
+		}
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+					     struct radeon_i2c_bus_rec *rec,
+					     const char *name)
+{
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->adapter.class = I2C_CLASS_DDC;
+	i2c->adapter.dev.parent = &dev->pdev->dev;
+	i2c->dev = dev;
+	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+		 "Radeon aux bus %s", name);
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	i2c->adapter.algo_data = &i2c->algo.dp;
+	i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+	i2c->algo.dp.address = 0;
+	ret = i2c_dp_aux_add_bus(&i2c->adapter);
+	if (ret) {
+		DRM_INFO("Failed to register i2c %s\n", name);
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+{
+	if (!i2c)
+		return;
+	i2c_del_adapter(&i2c->adapter);
+	kfree(i2c);
+}
+
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+	if (radeon_hw_i2c)
+		DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_i2c_init(rdev);
+	else
+		radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i]) {
+			radeon_i2c_destroy(rdev->i2c_bus[i]);
+			rdev->i2c_bus[i] = NULL;
+		}
+	}
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+		    struct radeon_i2c_bus_rec *rec,
+		    const char *name)
+{
+	struct drm_device *dev = rdev->ddev;
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (!rdev->i2c_bus[i]) {
+			rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+			return;
+		}
+	}
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+					  struct radeon_i2c_bus_rec *i2c_bus)
+{
+	int i;
+
+	for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+		if (rdev->i2c_bus[i] &&
+		    (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+			return rdev->i2c_bus[i];
+		}
+	}
+	return NULL;
+}
+
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+	return NULL;
+}
+
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+			 u8 slave_addr,
+			 u8 addr,
+			 u8 val)
+{
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+		DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.ddc_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	val |= radeon_connector->router.ddc_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.cd_valid)
+		return;
+
+	if (!radeon_connector->router_bus)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	val |= radeon_connector->router.cd_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_ioc32.c b/linux-imx/drivers/gpu/drm/radeon/radeon_ioc32.c
new file mode 100644
index 0000000..c180df8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -0,0 +1,424 @@
+/**
+ * \file radeon_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the Radeon DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+typedef struct drm_radeon_init32 {
+	int func;
+	u32 sarea_priv_offset;
+	int is_pci;
+	int cp_mode;
+	int gart_size;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	u32 fb_offset;
+	u32 mmio_offset;
+	u32 ring_offset;
+	u32 ring_rptr_offset;
+	u32 buffers_offset;
+	u32 gart_textures_offset;
+} drm_radeon_init32_t;
+
+static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_radeon_init32_t init32;
+	drm_radeon_init_t __user *init;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.is_pci, &init->is_pci)
+	    || __put_user(init32.cp_mode, &init->cp_mode)
+	    || __put_user(init32.gart_size, &init->gart_size)
+	    || __put_user(init32.ring_size, &init->ring_size)
+	    || __put_user(init32.usec_timeout, &init->usec_timeout)
+	    || __put_user(init32.fb_bpp, &init->fb_bpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_bpp, &init->depth_bpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.ring_offset, &init->ring_offset)
+	    || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset)
+	    || __put_user(init32.gart_textures_offset,
+			  &init->gart_textures_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+}
+
+typedef struct drm_radeon_clear32 {
+	unsigned int flags;
+	unsigned int clear_color;
+	unsigned int clear_depth;
+	unsigned int color_mask;
+	unsigned int depth_mask;	/* misnamed field:  should be stencil */
+	u32 depth_boxes;
+} drm_radeon_clear32_t;
+
+static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_radeon_clear32_t clr32;
+	drm_radeon_clear_t __user *clr;
+
+	if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
+		return -EFAULT;
+
+	clr = compat_alloc_user_space(sizeof(*clr));
+	if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
+	    || __put_user(clr32.flags, &clr->flags)
+	    || __put_user(clr32.clear_color, &clr->clear_color)
+	    || __put_user(clr32.clear_depth, &clr->clear_depth)
+	    || __put_user(clr32.color_mask, &clr->color_mask)
+	    || __put_user(clr32.depth_mask, &clr->depth_mask)
+	    || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
+			  &clr->depth_boxes))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+}
+
+typedef struct drm_radeon_stipple32 {
+	u32 mask;
+} drm_radeon_stipple32_t;
+
+static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_stipple32_t __user *argp = (void __user *)arg;
+	drm_radeon_stipple_t __user *request;
+	u32 mask;
+
+	if (get_user(mask, &argp->mask))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((unsigned int __user *)(unsigned long)mask,
+			  &request->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_tex_image32 {
+	unsigned int x, y;	/* Blit coordinates */
+	unsigned int width, height;
+	u32 data;
+} drm_radeon_tex_image32_t;
+
+typedef struct drm_radeon_texture32 {
+	unsigned int offset;
+	int pitch;
+	int format;
+	int width;		/* Texture image coordinates */
+	int height;
+	u32 image;
+} drm_radeon_texture32_t;
+
+static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_texture32_t req32;
+	drm_radeon_texture_t __user *request;
+	drm_radeon_tex_image32_t img32;
+	drm_radeon_tex_image_t __user *image;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+	if (req32.image == 0)
+		return -EINVAL;
+	if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
+			   sizeof(img32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
+	if (!access_ok(VERIFY_WRITE, request,
+		       sizeof(*request) + sizeof(*image)))
+		return -EFAULT;
+	image = (drm_radeon_tex_image_t __user *) (request + 1);
+
+	if (__put_user(req32.offset, &request->offset)
+	    || __put_user(req32.pitch, &request->pitch)
+	    || __put_user(req32.format, &request->format)
+	    || __put_user(req32.width, &request->width)
+	    || __put_user(req32.height, &request->height)
+	    || __put_user(image, &request->image)
+	    || __put_user(img32.x, &image->x)
+	    || __put_user(img32.y, &image->y)
+	    || __put_user(img32.width, &image->width)
+	    || __put_user(img32.height, &image->height)
+	    || __put_user((const void __user *)(unsigned long)img32.data,
+			  &image->data))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_vertex2_32 {
+	int idx;		/* Index of vertex buffer */
+	int discard;		/* Client finished with buffer? */
+	int nr_states;
+	u32 state;
+	int nr_prims;
+	u32 prim;
+} drm_radeon_vertex2_32_t;
+
+static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_vertex2_32_t req32;
+	drm_radeon_vertex2_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.idx, &request->idx)
+	    || __put_user(req32.discard, &request->discard)
+	    || __put_user(req32.nr_states, &request->nr_states)
+	    || __put_user((void __user *)(unsigned long)req32.state,
+			  &request->state)
+	    || __put_user(req32.nr_prims, &request->nr_prims)
+	    || __put_user((void __user *)(unsigned long)req32.prim,
+			  &request->prim))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+}
+
+typedef struct drm_radeon_cmd_buffer32 {
+	int bufsz;
+	u32 buf;
+	int nbox;
+	u32 boxes;
+} drm_radeon_cmd_buffer32_t;
+
+static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_radeon_cmd_buffer32_t req32;
+	drm_radeon_cmd_buffer_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.bufsz, &request->bufsz)
+	    || __put_user((void __user *)(unsigned long)req32.buf,
+			  &request->buf)
+	    || __put_user(req32.nbox, &request->nbox)
+	    || __put_user((void __user *)(unsigned long)req32.boxes,
+			  &request->boxes))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+}
+
+typedef struct drm_radeon_getparam32 {
+	int param;
+	u32 value;
+} drm_radeon_getparam32_t;
+
+static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
+				     unsigned long arg)
+{
+	drm_radeon_getparam32_t req32;
+	drm_radeon_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+}
+
+typedef struct drm_radeon_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or GART */
+} drm_radeon_mem_alloc32_t;
+
+static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_radeon_mem_alloc32_t req32;
+	drm_radeon_mem_alloc_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.region, &request->region)
+	    || __put_user(req32.alignment, &request->alignment)
+	    || __put_user(req32.size, &request->size)
+	    || __put_user((int __user *)(unsigned long)req32.region_offset,
+			  &request->region_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+}
+
+typedef struct drm_radeon_irq_emit32 {
+	u32 irq_seq;
+} drm_radeon_irq_emit32_t;
+
+static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_radeon_irq_emit32_t req32;
+	drm_radeon_irq_emit_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
+			  &request->irq_seq))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+}
+
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
+typedef struct drm_radeon_setparam32 {
+	int param;
+	u64 value;
+} __attribute__((packed)) drm_radeon_setparam32_t;
+
+static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+				     unsigned long arg)
+{
+	drm_radeon_setparam32_t req32;
+	drm_radeon_setparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+}
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
+
+static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+	[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+	[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+	[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+	[DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
+	[DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
+	[DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
+	[DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
+	[DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
+	[DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
+	[DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+		fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
+
+long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	ret = drm_ioctl(filp, cmd, arg);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_irq.c b/linux-imx/drivers/gpu/drm/radeon/radeon_irq.c
new file mode 100644
index 0000000..8d68e97
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_irq.c
@@ -0,0 +1,402 @@
+/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Michel D�zer <michel@daenzer.net>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->irq_enable_reg |= mask;
+	else
+		dev_priv->irq_enable_reg &= ~mask;
+
+	if (dev->irq_enabled)
+		RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->r500_disp_irq_reg |= mask;
+	else
+		dev_priv->r500_disp_irq_reg &= ~mask;
+
+	if (dev->irq_enabled)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return -EINVAL;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	}
+}
+
+static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+{
+	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
+	u32 irq_mask = RADEON_SW_INT_TEST;
+
+	*r500_disp_int = 0;
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		/* vbl interrupts in a different place */
+
+		if (irqs & R500_DISPLAY_INT_STATUS) {
+			/* if a display interrupt */
+			u32 disp_irq;
+
+			disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
+
+			*r500_disp_int = disp_irq;
+			if (disp_irq & R500_D1_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+			if (disp_irq & R500_D2_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+		}
+		irq_mask |= R500_DISPLAY_INT_STATUS;
+	} else
+		irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
+
+	irqs &=	irq_mask;
+
+	if (irqs)
+		RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
+	return irqs;
+}
+
+/* Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ *    - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ *    - Drop marker irq's into command stream ahead of time.
+ *    - Wait on irq's with lock *not held*
+ *    - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ *    - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	u32 stat;
+	u32 r500_disp_int;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return IRQ_NONE;
+
+	/* Only consider the bits we're interested in - others could be used
+	 * outside the DRM
+	 */
+	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
+	if (!stat)
+		return IRQ_NONE;
+
+	stat &= dev_priv->irq_enable_reg;
+
+	/* SW interrupt */
+	if (stat & RADEON_SW_INT_TEST)
+		DRM_WAKEUP(&dev_priv->swi_queue);
+
+	/* VBLANK interrupt */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 0);
+		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 1);
+	} else {
+		if (stat & RADEON_CRTC_VBLANK_STAT)
+			drm_handle_vblank(dev, 0);
+		if (stat & RADEON_CRTC2_VBLANK_STAT)
+			drm_handle_vblank(dev, 1);
+	}
+	return IRQ_HANDLED;
+}
+
+static int radeon_emit_irq(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int ret;
+	RING_LOCALS;
+
+	atomic_inc(&dev_priv->swi_emitted);
+	ret = atomic_read(&dev_priv->swi_emitted);
+
+	BEGIN_RING(4);
+	OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+	OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return ret;
+}
+
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	int ret = 0;
+
+	if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
+		return 0;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+		    RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+
+	return ret;
+}
+
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (crtc < 0 || crtc > 1) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
+		if (crtc == 0)
+			return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+		else
+			return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
+	} else {
+		if (crtc == 0)
+			return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+		else
+			return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
+	}
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_emit_t *emit = data;
+	int result;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return -EINVAL;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	result = radeon_emit_irq(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return -EINVAL;
+
+	return radeon_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* drm_dma.h hooks
+*/
+void radeon_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	u32 dummy;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return;
+
+	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+	/* Clear bits if they're already high */
+	radeon_acknowledge_irqs(dev_priv, &dummy);
+}
+
+int radeon_driver_irq_postinstall(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+
+	atomic_set(&dev_priv->swi_emitted, 0);
+	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+	dev->max_vblank_count = 0x001fffff;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return 0;
+
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	return 0;
+}
+
+void radeon_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		return;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	/* Disable *all* interrupts */
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+}
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+	return dev_priv->vblank_crtc;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+		DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+		return -EINVAL;
+	}
+	dev_priv->vblank_crtc = (unsigned int)value;
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_irq_kms.c b/linux-imx/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 0000000..1fe12ab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @DRM_IRQ_ARGS: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
+irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_irq_process(rdev);
+}
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt.  It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  hotplug_work);
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	if (mode_config->num_connector) {
+		list_for_each_entry(connector, &mode_config->connector_list, head)
+			radeon_connector_hotplug(connector);
+	}
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
+void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	/* Clear bits */
+	radeon_irq_process(rdev);
+}
+
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+{
+	dev->max_vblank_count = 0x001fffff;
+	return 0;
+}
+
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
+void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	unsigned i;
+
+	if (rdev == NULL) {
+		return;
+	}
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	/* Disable *all* interrupts */
+	for (i = 0; i < RADEON_NUM_RINGS; i++)
+		atomic_set(&rdev->irq.ring_int[i], 0);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
+		atomic_set(&rdev->irq.pflip[i], 0);
+		rdev->irq.afmt[i] = false;
+	}
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+	/* RV370/RV380 was first asic with MSI support */
+	if (rdev->family < CHIP_RV380)
+		return false;
+
+	/* MSIs don't work on AGP */
+	if (rdev->flags & RADEON_IS_AGP)
+		return false;
+
+	/* force MSI on */
+	if (radeon_msi == 1)
+		return true;
+	else if (radeon_msi == 0)
+		return false;
+
+	/* Quirks */
+	/* HP RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x103c) &&
+	    (rdev->pdev->subsystem_device == 0x30c2))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+	    (rdev->pdev->subsystem_device == 0x01fc))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+	    (rdev->pdev->subsystem_device == 0x01fd))
+		return true;
+
+	/* Gateway RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x107b) &&
+	    (rdev->pdev->subsystem_device == 0x0185))
+		return true;
+
+	/* try and enable MSIs by default on all RS690s */
+	if (rdev->family == CHIP_RS690)
+		return true;
+
+	/* RV515 seems to have MSI issues where it loses
+	 * MSI rearms occasionally. This leads to lockups and freezes.
+	 * disable it by default.
+	 */
+	if (rdev->family == CHIP_RV515)
+		return false;
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* APUs work fine with MSIs */
+		if (rdev->family >= CHIP_PALM)
+			return true;
+		/* lots of IGPs have problems with MSIs */
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_irq_kms_init(struct radeon_device *rdev)
+{
+	int r = 0;
+
+	spin_lock_init(&rdev->irq.lock);
+	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
+	if (r) {
+		return r;
+	}
+	/* enable msi */
+	rdev->msi_enabled = 0;
+
+	if (radeon_msi_ok(rdev)) {
+		int ret = pci_enable_msi(rdev->pdev);
+		if (!ret) {
+			rdev->msi_enabled = 1;
+			dev_info(rdev->dev, "radeon: using MSI.\n");
+		}
+	}
+	rdev->irq.installed = true;
+	r = drm_irq_install(rdev->ddev);
+	if (r) {
+		rdev->irq.installed = false;
+		return r;
+	}
+
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+
+	DRM_INFO("radeon: irq initialized.\n");
+	return 0;
+}
+
+/**
+ * radeon_irq_kms_fini - tear down driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
+void radeon_irq_kms_fini(struct radeon_device *rdev)
+{
+	drm_vblank_cleanup(rdev->ddev);
+	if (rdev->irq.installed) {
+		drm_irq_uninstall(rdev->ddev);
+		rdev->irq.installed = false;
+		if (rdev->msi_enabled)
+			pci_disable_msi(rdev->pdev);
+		flush_work(&rdev->hotplug_work);
+	}
+}
+
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+		spin_lock_irqsave(&rdev->irq.lock, irqflags);
+		radeon_irq_set(rdev);
+		spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	}
+}
+
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = true;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+	unsigned long irqflags;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.afmt[block] = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+	unsigned long irqflags;
+	int i;
+
+	if (!rdev->ddev->irq_enabled)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+		rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_kms.c b/linux-imx/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 0000000..1113e8f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,768 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+#include "radeon_asic.h"
+
+#include <linux/vga_switcheroo.h>
+#include <linux/slab.h>
+
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev == NULL)
+		return 0;
+	if (rdev->rmmio == NULL)
+		goto done_free;
+	radeon_acpi_fini(rdev);
+	radeon_modeset_fini(rdev);
+	radeon_device_fini(rdev);
+
+done_free:
+	kfree(rdev);
+	dev->dev_private = NULL;
+	return 0;
+}
+
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+{
+	struct radeon_device *rdev;
+	int r, acpi_status;
+
+	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
+	if (rdev == NULL) {
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)rdev;
+
+	/* update BUS flag */
+	if (drm_pci_device_is_agp(dev)) {
+		flags |= RADEON_IS_AGP;
+	} else if (pci_is_pcie(dev->pdev)) {
+		flags |= RADEON_IS_PCIE;
+	} else {
+		flags |= RADEON_IS_PCI;
+	}
+
+	/* radeon_device_init should report only fatal error
+	 * like memory allocation failure or iomapping failure,
+	 * or memory manager initialization failure, it must
+	 * properly initialize the GPU MC controller and permit
+	 * VRAM allocation
+	 */
+	r = radeon_device_init(rdev, dev, dev->pdev, flags);
+	if (r) {
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		goto out;
+	}
+
+	/* Again modeset_init should fail only on fatal error
+	 * otherwise it should provide enough functionalities
+	 * for shadowfb to run
+	 */
+	r = radeon_modeset_init(rdev);
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+
+	/* Call ACPI methods: require modeset init
+	 * but failure is not fatal
+	 */
+	if (!r) {
+		acpi_status = radeon_acpi_init(rdev);
+		if (acpi_status)
+		dev_dbg(&dev->pdev->dev,
+				"Error during ACPI methods call\n");
+	}
+
+out:
+	if (r)
+		radeon_driver_unload_kms(dev);
+	return r;
+}
+
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
+static void radeon_set_filp_rights(struct drm_device *dev,
+				   struct drm_file **owner,
+				   struct drm_file *applier,
+				   uint32_t *value)
+{
+	mutex_lock(&dev->struct_mutex);
+	if (*value == 1) {
+		/* wants rights */
+		if (!*owner)
+			*owner = applier;
+	} else if (*value == 0) {
+		/* revokes rights */
+		if (*owner == applier)
+			*owner = NULL;
+	}
+	*value = *owner == applier ? 1 : 0;
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/*
+ * Userspace get information ioctl
+ */
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers.  Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_radeon_info *info = data;
+	struct radeon_mode_info *minfo = &rdev->mode_info;
+	uint32_t *value, value_tmp, *value_ptr, value_size;
+	uint64_t value64;
+	struct drm_crtc *crtc;
+	int i, found;
+
+	value_ptr = (uint32_t *)((unsigned long)info->value);
+	value = &value_tmp;
+	value_size = sizeof(uint32_t);
+
+	switch (info->request) {
+	case RADEON_INFO_DEVICE_ID:
+		*value = dev->pci_device;
+		break;
+	case RADEON_INFO_NUM_GB_PIPES:
+		*value = rdev->num_gb_pipes;
+		break;
+	case RADEON_INFO_NUM_Z_PIPES:
+		*value = rdev->num_z_pipes;
+		break;
+	case RADEON_INFO_ACCEL_WORKING:
+		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
+		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
+			*value = false;
+		else
+			*value = rdev->accel_working;
+		break;
+	case RADEON_INFO_CRTC_FROM_ID:
+		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
+			crtc = (struct drm_crtc *)minfo->crtcs[i];
+			if (crtc && crtc->base.id == *value) {
+				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+				*value = radeon_crtc->crtc_id;
+				found = 1;
+				break;
+			}
+		}
+		if (!found) {
+			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_ACCEL_WORKING2:
+		*value = rdev->accel_working;
+		break;
+	case RADEON_INFO_TILING_CONFIG:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.tile_config;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.tile_config;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.tile_config;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.tile_config;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.tile_config;
+		else {
+			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_WANT_HYPERZ:
+		/* The "value" here is both an input and output parameter.
+		 * If the input value is 1, filp requests hyper-z access.
+		 * If the input value is 0, filp revokes its hyper-z access.
+		 *
+		 * When returning, the value is 1 if filp owns hyper-z access,
+		 * 0 otherwise. */
+		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		if (*value >= 2) {
+			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
+		break;
+	case RADEON_INFO_WANT_CMASK:
+		/* The same logic as Hyper-Z. */
+		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		if (*value >= 2) {
+			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
+			return -EINVAL;
+		}
+		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
+		break;
+	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+		/* return clock value in KHz */
+		if (rdev->asic->get_xclk)
+			*value = radeon_get_xclk(rdev) * 10;
+		else
+			*value = rdev->clock.spll.reference_freq * 10;
+		break;
+	case RADEON_INFO_NUM_BACKENDS:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_backends_per_se *
+				rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_backends_per_se *
+				rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_backends;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_backends;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_backends;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_NUM_TILE_PIPES:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_tile_pipes;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_tile_pipes;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_tile_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_tile_pipes;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_tile_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_FUSION_GART_WORKING:
+		*value = 1;
+		break;
+	case RADEON_INFO_BACKEND_MAP:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.backend_map;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.backend_map;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.backend_map;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.backend_map;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.backend_map;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_VA_START:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		*value = RADEON_VA_RESERVED_SIZE;
+		break;
+	case RADEON_INFO_IB_VM_MAX_SIZE:
+		/* this is where we report if vm is supported or not */
+		if (rdev->family < CHIP_CAYMAN)
+			return -EINVAL;
+		*value = RADEON_IB_VM_MAX_SIZE;
+		break;
+	case RADEON_INFO_MAX_PIPES:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_cu_per_sh;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_pipes_per_simd;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.max_pipes;
+		else if (rdev->family >= CHIP_RV770)
+			*value = rdev->config.rv770.max_pipes;
+		else if (rdev->family >= CHIP_R600)
+			*value = rdev->config.r600.max_pipes;
+		else {
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_TIMESTAMP:
+		if (rdev->family < CHIP_R600) {
+			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+			return -EINVAL;
+		}
+		value = (uint32_t*)&value64;
+		value_size = sizeof(uint64_t);
+		value64 = radeon_get_gpu_clock_counter(rdev);
+		break;
+	case RADEON_INFO_MAX_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_shader_engines;
+		else if (rdev->family >= CHIP_CAYMAN)
+			*value = rdev->config.cayman.max_shader_engines;
+		else if (rdev->family >= CHIP_CEDAR)
+			*value = rdev->config.evergreen.num_ses;
+		else
+			*value = 1;
+		break;
+	case RADEON_INFO_MAX_SH_PER_SE:
+		if (rdev->family >= CHIP_TAHITI)
+			*value = rdev->config.si.max_sh_per_se;
+		else
+			return -EINVAL;
+		break;
+	case RADEON_INFO_FASTFB_WORKING:
+		*value = rdev->fastfb_working;
+		break;
+	case RADEON_INFO_RING_WORKING:
+		if (DRM_COPY_FROM_USER(value, value_ptr, sizeof(uint32_t))) {
+			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+			return -EFAULT;
+		}
+		switch (*value) {
+		case RADEON_CS_RING_GFX:
+		case RADEON_CS_RING_COMPUTE:
+			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
+			break;
+		case RADEON_CS_RING_DMA:
+			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
+			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
+			break;
+		case RADEON_CS_RING_UVD:
+			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case RADEON_INFO_SI_TILE_MODE_ARRAY:
+		if (rdev->family < CHIP_TAHITI) {
+			DRM_DEBUG_KMS("tile mode array is si only!\n");
+			return -EINVAL;
+		}
+		value = rdev->config.si.tile_mode_array;
+		value_size = sizeof(uint32_t)*32;
+		break;
+	case RADEON_INFO_SI_CP_DMA_COMPUTE:
+		*value = 1;
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+		return -EINVAL;
+	}
+	if (DRM_COPY_TO_USER(value_ptr, (char*)value, value_size)) {
+		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+
+/*
+ * Outdated mess for old drm with Xorg being in charge (void function now).
+ */
+/**
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+	return 0;
+}
+
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
+void radeon_driver_lastclose_kms(struct drm_device *dev)
+{
+	vga_switcheroo_process_delayed_switch();
+}
+
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	file_priv->driver_priv = NULL;
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN) {
+		struct radeon_fpriv *fpriv;
+		struct radeon_bo_va *bo_va;
+		int r;
+
+		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+		if (unlikely(!fpriv)) {
+			return -ENOMEM;
+		}
+
+		radeon_vm_init(rdev, &fpriv->vm);
+
+		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+		if (r)
+			return r;
+
+		/* map the ib pool buffer read only into
+		 * virtual address space */
+		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+					 rdev->ring_tmp_bo.bo);
+		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+					  RADEON_VM_PAGE_READABLE |
+					  RADEON_VM_PAGE_SNOOPED);
+
+		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+		if (r) {
+			radeon_vm_fini(rdev, &fpriv->vm);
+			kfree(fpriv);
+			return r;
+		}
+
+		file_priv->driver_priv = fpriv;
+	}
+	return 0;
+}
+
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
+void radeon_driver_postclose_kms(struct drm_device *dev,
+				 struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* new gpu have virtual address space support */
+	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+		struct radeon_fpriv *fpriv = file_priv->driver_priv;
+		struct radeon_bo_va *bo_va;
+		int r;
+
+		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+		if (!r) {
+			bo_va = radeon_vm_bo_find(&fpriv->vm,
+						  rdev->ring_tmp_bo.bo);
+			if (bo_va)
+				radeon_vm_bo_rmv(rdev, bo_va);
+			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+		}
+
+		radeon_vm_fini(rdev, &fpriv->vm);
+		kfree(fpriv);
+		file_priv->driver_priv = NULL;
+	}
+}
+
+/**
+ * radeon_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
+void radeon_driver_preclose_kms(struct drm_device *dev,
+				struct drm_file *file_priv)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	if (rdev->hyperz_filp == file_priv)
+		rdev->hyperz_filp = NULL;
+	if (rdev->cmask_filp == file_priv)
+		rdev->cmask_filp = NULL;
+	radeon_uvd_free_handles(rdev, file_priv);
+}
+
+/*
+ * VBlank related functions.
+ */
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	return radeon_get_vblank_counter(rdev, crtc);
+}
+
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+	int r;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.crtc_vblank_int[crtc] = true;
+	r = radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+	return r;
+}
+
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
+void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return;
+	}
+
+	spin_lock_irqsave(&rdev->irq.lock, irqflags);
+	rdev->irq.crtc_vblank_int[crtc] = false;
+	radeon_irq_set(rdev);
+	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position.  (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get associated drm_crtc: */
+	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags,
+						     drmcrtc);
+}
+
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	/* Not valid in KMS. */
+	return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name)						\
+int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+{									\
+	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
+	return -EINVAL;							\
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+	/* KMS */
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+};
+int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 0000000..bc73021
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1105 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <drm/drm_fixed.h>
+#include "radeon.h"
+#include "atom.h"
+
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
+static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
+				       struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	int xres = mode->hdisplay;
+	int yres = mode->vdisplay;
+	bool hscale = true, vscale = true;
+	int hsync_wid;
+	int vsync_wid;
+	int hsync_start;
+	int blank_width;
+	u32 scale, inc, crtc_more_cntl;
+	u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active;
+	u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp;
+	u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp;
+	struct drm_display_mode *native_mode = &radeon_crtc->native_mode;
+
+	fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
+		(RADEON_VERT_STRETCH_RESERVED |
+		 RADEON_VERT_AUTO_RATIO_INC);
+	fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
+		(RADEON_HORZ_FP_LOOP_STRETCH |
+		 RADEON_HORZ_AUTO_RATIO_INC);
+
+	crtc_more_cntl = 0;
+	if ((rdev->family == CHIP_RS100) ||
+	    (rdev->family == CHIP_RS200)) {
+		/* This is to workaround the asic bug for RMX, some versions
+		   of BIOS dosen't have this register initialized correctly. */
+		crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
+	}
+
+
+	fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
+			      | ((hsync_wid & 0x3f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				 ? RADEON_CRTC_H_SYNC_POL
+				 : 0));
+
+	fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+			      | ((vsync_wid & 0x1f) << 16)
+			      | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				 ? RADEON_CRTC_V_SYNC_POL
+				 : 0));
+
+	fp_horz_vert_active = 0;
+
+	if (native_mode->hdisplay == 0 ||
+	    native_mode->vdisplay == 0) {
+		hscale = false;
+		vscale = false;
+	} else {
+		if (xres > native_mode->hdisplay)
+			xres = native_mode->hdisplay;
+		if (yres > native_mode->vdisplay)
+			yres = native_mode->vdisplay;
+
+		if (xres == native_mode->hdisplay)
+			hscale = false;
+		if (yres == native_mode->vdisplay)
+			vscale = false;
+	}
+
+	switch (radeon_crtc->rmx_type) {
+	case RMX_FULL:
+	case RMX_ASPECT:
+		if (!hscale)
+			fp_horz_stretch |= ((xres/8-1) << 16);
+		else {
+			inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
+				/ native_mode->hdisplay + 1;
+			fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
+					RADEON_HORZ_STRETCH_BLEND |
+					RADEON_HORZ_STRETCH_ENABLE |
+					((native_mode->hdisplay/8-1) << 16));
+		}
+
+		if (!vscale)
+			fp_vert_stretch |= ((yres-1) << 12);
+		else {
+			inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
+			scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
+				/ native_mode->vdisplay + 1;
+			fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
+					RADEON_VERT_STRETCH_ENABLE |
+					RADEON_VERT_STRETCH_BLEND |
+					((native_mode->vdisplay-1) << 12));
+		}
+		break;
+	case RMX_CENTER:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+
+		crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
+				RADEON_CRTC_AUTO_VERT_CENTER_EN);
+
+		blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
+		if (blank_width > 110)
+			blank_width = 110;
+
+		fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
+				| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+		hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+		if (!hsync_wid)
+			hsync_wid = 1;
+
+		fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+					? RADEON_CRTC_H_SYNC_POL
+					: 0));
+
+		fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
+				| ((mode->crtc_vdisplay - 1) << 16));
+
+		vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+		if (!vsync_wid)
+			vsync_wid = 1;
+
+		fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
+					| ((vsync_wid & 0x1f) << 16)
+					| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+						? RADEON_CRTC_V_SYNC_POL
+						: 0)));
+
+		fp_horz_vert_active = (((native_mode->vdisplay) & 0xfff) |
+				(((native_mode->hdisplay / 8) & 0x1ff) << 16));
+		break;
+	case RMX_OFF:
+	default:
+		fp_horz_stretch |= ((xres/8-1) << 16);
+		fp_vert_stretch |= ((yres-1) << 12);
+		break;
+	}
+
+	WREG32(RADEON_FP_HORZ_STRETCH,      fp_horz_stretch);
+	WREG32(RADEON_FP_VERT_STRETCH,      fp_vert_stretch);
+	WREG32(RADEON_CRTC_MORE_CNTL,       crtc_more_cntl);
+	WREG32(RADEON_FP_HORZ_VERT_ACTIVE,  fp_horz_vert_active);
+	WREG32(RADEON_FP_H_SYNC_STRT_WID,   fp_h_sync_strt_wid);
+	WREG32(RADEON_FP_V_SYNC_STRT_WID,   fp_v_sync_strt_wid);
+	WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
+	WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
+}
+
+static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+			   RADEON_PPLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_PPLL_ATOMIC_UPDATE_W));
+}
+
+static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	int i = 0;
+
+
+	/* FIXME: Certain revisions of R300 can't recover here.  Not sure of
+	   the cause yet, but this workaround will mask the problem for now.
+	   Other chips usually will pass at the very first test, so the
+	   workaround shouldn't have any effect on them. */
+	for (i = 0;
+	     (i < 10000 &&
+	      RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+	     i++);
+}
+
+static void radeon_pll2_write_update(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
+
+	WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			   RADEON_P2PLL_ATOMIC_UPDATE_W,
+			   ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
+}
+
+static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
+				       uint16_t fb_div)
+{
+	unsigned int vcoFreq;
+
+	if (!ref_div)
+		return 1;
+
+	vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
+
+	/*
+	 * This is horribly crude: the VCO frequency range is divided into
+	 * 3 parts, each part having a fixed PLL gain value.
+	 */
+	if (vcoFreq >= 30000)
+		/*
+		 * [300..max] MHz : 7
+		 */
+		return 7;
+	else if (vcoFreq >= 18000)
+		/*
+		 * [180..300) MHz : 4
+		 */
+		return 4;
+	else
+		/*
+		 * [0..180) MHz : 1
+		 */
+		return 1;
+}
+
+static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = 0;
+	uint32_t mask;
+
+	if (radeon_crtc->crtc_id)
+		mask = (RADEON_CRTC2_DISP_DIS |
+			RADEON_CRTC2_VSYNC_DIS |
+			RADEON_CRTC2_HSYNC_DIS |
+			RADEON_CRTC2_DISP_REQ_EN_B);
+	else
+		mask = (RADEON_CRTC_DISPLAY_DIS |
+			RADEON_CRTC_VSYNC_DIS |
+			RADEON_CRTC_HSYNC_DIS);
+
+	/*
+	 * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+	 * Therefore it is set in the DAC DMPS function.
+	 * This is different for GPU's with a single CRTC but a primary and a
+	 * TV DAC: here it controls the single CRTC no matter where it is
+	 * routed. Therefore we set it here.
+	 */
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+	
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		radeon_crtc->enabled = true;
+		/* adjust pm to dpms changes BEFORE enabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
+									 RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
+		}
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+		if (radeon_crtc->crtc_id)
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
+		else {
+			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
+										    RADEON_CRTC_DISP_REQ_EN_B));
+			WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
+		}
+		radeon_crtc->enabled = false;
+		/* adjust pm to dpms changes AFTER disabling crtcs */
+		radeon_pm_compute_clocks(rdev);
+		break;
+	}
+}
+
+int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+			 struct drm_framebuffer *old_fb)
+{
+	return radeon_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				struct drm_framebuffer *fb,
+				int x, int y, enum mode_set_atomic state)
+{
+	return radeon_crtc_do_set_base(crtc, fb, x, y, 1);
+}
+
+int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+			 struct drm_framebuffer *fb,
+			 int x, int y, int atomic)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *radeon_fb;
+	struct drm_framebuffer *target_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	uint64_t base;
+	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+	uint32_t crtc_pitch, pitch_pixels;
+	uint32_t tiling_flags;
+	int format;
+	uint32_t gen_cntl_reg, gen_cntl_val;
+	int r;
+
+	DRM_DEBUG_KMS("\n");
+	/* no fb bound */
+	if (!atomic && !crtc->fb) {
+		DRM_DEBUG_KMS("No FB bound\n");
+		return 0;
+	}
+
+	if (atomic) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		target_fb = fb;
+	}
+	else {
+		radeon_fb = to_radeon_framebuffer(crtc->fb);
+		target_fb = crtc->fb;
+	}
+
+	switch (target_fb->bits_per_pixel) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	/* Pin framebuffer & get tilling informations */
+	obj = radeon_fb->obj;
+	rbo = gem_to_radeon_bo(obj);
+retry:
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	/* Only 27 bit offset for legacy CRTC */
+	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
+				     &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+
+		/* On old GPU like RN50 with little vram pining can fails because
+		 * current fb is taking all space needed. So instead of unpining
+		 * the old buffer after pining the new one, first unpin old one
+		 * and then retry pining new one.
+		 *
+		 * As only master can set mode only master can pin and it is
+		 * unlikely the master client will race with itself especialy
+		 * on those old gpu with single crtc.
+		 *
+		 * We don't shutdown the display controller because new buffer
+		 * will end up in same spot.
+		 */
+		if (!atomic && fb && fb != crtc->fb) {
+			struct radeon_bo *old_rbo;
+			unsigned long nsize, osize;
+
+			old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
+			osize = radeon_bo_size(old_rbo);
+			nsize = radeon_bo_size(rbo);
+			if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
+				radeon_bo_unpin(old_rbo);
+				radeon_bo_unreserve(old_rbo);
+				fb = NULL;
+				goto retry;
+			}
+		}
+		return -EINVAL;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+	if (tiling_flags & RADEON_TILING_MICRO)
+		DRM_ERROR("trying to scanout microtiled buffer\n");
+
+	/* if scanout was in GTT this really wouldn't work */
+	/* crtc offset is from display base addr not FB location */
+	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
+
+	base -= radeon_crtc->legacy_display_base_addr;
+
+	crtc_offset_cntl = 0;
+
+	pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+	crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
+			((target_fb->bits_per_pixel * 8) - 1)) /
+		       (target_fb->bits_per_pixel * 8));
+	crtc_pitch |= crtc_pitch << 16;
+
+	crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
+					     R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					     R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
+					      R300_CRTC_MICRO_TILE_BUFFER_DIS |
+					      R300_CRTC_MACRO_TILE_EN);
+		else
+			crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
+	}
+
+	if (tiling_flags & RADEON_TILING_MACRO) {
+		if (ASIC_IS_R300(rdev)) {
+			crtc_tile_x0_y0 = x | (y << 16);
+			base &= ~0x7ff;
+		} else {
+			int byteshift = target_fb->bits_per_pixel >> 4;
+			int tile_addr = (((y >> 3) * pitch_pixels +  x) >> (8 - byteshift)) << 11;
+			base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
+			crtc_offset_cntl |= (y % 16);
+		}
+	} else {
+		int offset = y * pitch_pixels + x;
+		switch (target_fb->bits_per_pixel) {
+		case 8:
+			offset *= 1;
+			break;
+		case 15:
+		case 16:
+			offset *= 2;
+			break;
+		case 24:
+			offset *= 3;
+			break;
+		case 32:
+			offset *= 4;
+			break;
+		default:
+			return false;
+		}
+		base += offset;
+	}
+
+	base &= ~7;
+
+	if (radeon_crtc->crtc_id == 1)
+		gen_cntl_reg = RADEON_CRTC2_GEN_CNTL;
+	else
+		gen_cntl_reg = RADEON_CRTC_GEN_CNTL;
+
+	gen_cntl_val = RREG32(gen_cntl_reg);
+	gen_cntl_val &= ~(0xf << 8);
+	gen_cntl_val |= (format << 8);
+	gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+	WREG32(gen_cntl_reg, gen_cntl_val);
+
+	crtc_offset = (u32)base;
+
+	WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (radeon_crtc->crtc_id)
+			WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
+		else
+			WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
+	}
+	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
+	WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
+	if (!atomic && fb && fb != crtc->fb) {
+		radeon_fb = to_radeon_framebuffer(fb);
+		rbo = gem_to_radeon_bo(radeon_fb->obj);
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
+	}
+
+	/* Bytes per pixel may have changed */
+	radeon_bandwidth_update(rdev);
+
+	return 0;
+}
+
+static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_encoder *encoder;
+	int format;
+	int hsync_start;
+	int hsync_wid;
+	int vsync_wid;
+	uint32_t crtc_h_total_disp;
+	uint32_t crtc_h_sync_strt_wid;
+	uint32_t crtc_v_total_disp;
+	uint32_t crtc_v_sync_strt_wid;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id);
+				break;
+			}
+		}
+	}
+
+	switch (crtc->fb->bits_per_pixel) {
+	case 8:
+		format = 2;
+		break;
+	case 15:      /*  555 */
+		format = 3;
+		break;
+	case 16:      /*  565 */
+		format = 4;
+		break;
+	case 24:      /*  RGB */
+		format = 5;
+		break;
+	case 32:      /* xRGB */
+		format = 6;
+		break;
+	default:
+		return false;
+	}
+
+	crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
+			     | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
+
+	hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
+	if (!hsync_wid)
+		hsync_wid = 1;
+	hsync_start = mode->crtc_hsync_start - 8;
+
+	crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
+				| ((hsync_wid & 0x3f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NHSYNC)
+				   ? RADEON_CRTC_H_SYNC_POL
+				   : 0));
+
+	/* This works for double scan mode. */
+	crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
+			     | ((mode->crtc_vdisplay - 1) << 16));
+
+	vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
+	if (!vsync_wid)
+		vsync_wid = 1;
+
+	crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
+				| ((vsync_wid & 0x1f) << 16)
+				| ((mode->flags & DRM_MODE_FLAG_NVSYNC)
+				   ? RADEON_CRTC_V_SYNC_POL
+				   : 0));
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t crtc2_gen_cntl;
+		uint32_t disp2_merge_cntl;
+
+		/* if TV DAC is enabled for another crtc and keep it enabled */
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
+		crtc2_gen_cntl |= ((format << 8)
+				   | RADEON_CRTC2_VSYNC_DIS
+				   | RADEON_CRTC2_HSYNC_DIS
+				   | RADEON_CRTC2_DISP_DIS
+				   | RADEON_CRTC2_DISP_REQ_EN_B
+				   | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				      ? RADEON_CRTC2_DBL_SCAN_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				      ? RADEON_CRTC2_CSYNC_EN
+				      : 0)
+				   | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				      ? RADEON_CRTC2_INTERLACE_EN
+				      : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
+		disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+		disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+
+		WREG32(RADEON_FP_H2_SYNC_STRT_WID, crtc_h_sync_strt_wid);
+		WREG32(RADEON_FP_V2_SYNC_STRT_WID, crtc_v_sync_strt_wid);
+	} else {
+		uint32_t crtc_gen_cntl;
+		uint32_t crtc_ext_cntl;
+		uint32_t disp_merge_cntl;
+
+		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
+				 | (format << 8)
+				 | RADEON_CRTC_DISP_REQ_EN_B
+				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+				    ? RADEON_CRTC_DBL_SCAN_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
+				    ? RADEON_CRTC_CSYNC_EN
+				    : 0)
+				 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+				    ? RADEON_CRTC_INTERLACE_EN
+				    : 0));
+
+		/* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+		if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+			crtc_gen_cntl |= RADEON_CRTC_EN;
+
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+		crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
+				  RADEON_CRTC_VSYNC_DIS |
+				  RADEON_CRTC_HSYNC_DIS |
+				  RADEON_CRTC_DISPLAY_DIS);
+
+		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+
+		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+		WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	}
+
+	if (is_tv)
+		radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp,
+						 &crtc_h_sync_strt_wid, &crtc_v_total_disp,
+						 &crtc_v_sync_strt_wid);
+
+	WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
+	WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
+	WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
+	WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
+
+	return true;
+}
+
+static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_encoder *encoder;
+	uint32_t feedback_div = 0;
+	uint32_t frac_fb_div = 0;
+	uint32_t reference_div = 0;
+	uint32_t post_divider = 0;
+	uint32_t freq = 0;
+	uint8_t pll_gain;
+	bool use_bios_divs = false;
+	/* PLL registers */
+	uint32_t pll_ref_div = 0;
+	uint32_t pll_fb_post_div = 0;
+	uint32_t htotal_cntl = 0;
+	bool is_tv = false;
+	struct radeon_pll *pll;
+
+	struct {
+		int divider;
+		int bitvalue;
+	} *post_div, post_divs[]   = {
+		/* From RAGE 128 VR/RAGE 128 GL Register
+		 * Reference Manual (Technical Reference
+		 * Manual P/N RRG-G04100-C Rev. 0.04), page
+		 * 3-17 (PLL_DIV_[3:0]).
+		 */
+		{  1, 0 },              /* VCLK_SRC                 */
+		{  2, 1 },              /* VCLK_SRC/2               */
+		{  4, 2 },              /* VCLK_SRC/4               */
+		{  8, 3 },              /* VCLK_SRC/8               */
+		{  3, 4 },              /* VCLK_SRC/3               */
+		{ 16, 5 },              /* VCLK_SRC/16              */
+		{  6, 6 },              /* VCLK_SRC/6               */
+		{ 12, 7 },              /* VCLK_SRC/12              */
+		{  0, 0 }
+	};
+
+	if (radeon_crtc->crtc_id)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	pll->flags = RADEON_PLL_LEGACY;
+
+	if (mode->clock > 200000) /* range limits??? */
+		pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+	else
+		pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc == crtc) {
+			struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+			if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
+				is_tv = true;
+				break;
+			}
+
+			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+				pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+				if (!rdev->is_atom_bios) {
+					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+					if (lvds) {
+						if (lvds->use_bios_dividers) {
+							pll_ref_div = lvds->panel_ref_divider;
+							pll_fb_post_div   = (lvds->panel_fb_divider |
+									     (lvds->panel_post_divider << 16));
+							htotal_cntl  = 0;
+							use_bios_divs = true;
+						}
+					}
+				}
+				pll->flags |= RADEON_PLL_USE_REF_DIV;
+			}
+		}
+	}
+
+	DRM_DEBUG_KMS("\n");
+
+	if (!use_bios_divs) {
+		radeon_compute_pll_legacy(pll, mode->clock,
+					  &freq, &feedback_div, &frac_fb_div,
+					  &reference_div, &post_divider);
+
+		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+			if (post_div->divider == post_divider)
+				break;
+		}
+
+		if (!post_div->divider)
+			post_div = &post_divs[0];
+
+		DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
+			  (unsigned)freq,
+			  feedback_div,
+			  reference_div,
+			  post_divider);
+
+		pll_ref_div   = reference_div;
+#if defined(__powerpc__) && (0) /* TODO */
+		/* apparently programming this otherwise causes a hang??? */
+		if (info->MacModel == RADEON_MAC_IBOOK)
+			pll_fb_post_div = 0x000600ad;
+		else
+#endif
+			pll_fb_post_div     = (feedback_div | (post_div->bitvalue << 16));
+
+		htotal_cntl    = mode->htotal & 0x7;
+
+	}
+
+	pll_gain = radeon_compute_pll_gain(pll->reference_freq,
+					   pll_ref_div & 0x3ff,
+					   pll_fb_post_div & 0x7ff);
+
+	if (radeon_crtc->crtc_id) {
+		uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
+					  ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
+					 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
+
+		if (is_tv) {
+			radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl,
+						     &pll_ref_div, &pll_fb_post_div,
+						     &pixclks_cntl);
+		}
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     RADEON_P2PLL_RESET
+			     | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN
+			       | RADEON_P2PLL_PVG_MASK));
+
+		WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
+			     pll_ref_div,
+			     ~RADEON_P2PLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_FB0_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_P2PLL_DIV_0,
+			     pll_fb_post_div,
+			     ~RADEON_P2PLL_POST0_DIV_MASK);
+
+		radeon_pll2_write_update(dev);
+		radeon_pll2_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_P2PLL_CNTL,
+			     0,
+			     ~(RADEON_P2PLL_RESET
+			       | RADEON_P2PLL_SLEEP
+			       | RADEON_P2PLL_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  (unsigned)pll_ref_div,
+			  (unsigned)pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_P2PLL_CNTL));
+		DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
+			  (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
+			  (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
+			  (unsigned)((pll_fb_post_div &
+				      RADEON_P2PLL_POST0_DIV_MASK) >> 16));
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
+			     RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
+			     ~(RADEON_PIX2CLK_SRC_SEL_MASK));
+
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	} else {
+		uint32_t pixclks_cntl;
+
+
+		if (is_tv) {
+			pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+			radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div,
+						     &pll_fb_post_div, &pixclks_cntl);
+		}
+
+		if (rdev->flags & RADEON_IS_MOBILITY) {
+			/* A temporal workaround for the occasional blanking on certain laptop panels.
+			   This appears to related to the PLL divider registers (fail to lock?).
+			   It occurs even when all dividers are the same with their old settings.
+			   In this case we really don't need to fiddle with PLL registers.
+			   By doing this we can avoid the blanking problem with some panels.
+			*/
+			if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
+			    (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
+						 (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
+				WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+					 RADEON_PLL_DIV_SEL,
+					 ~(RADEON_PLL_DIV_SEL));
+				r100_pll_errata_after_index(rdev);
+				return;
+			}
+		}
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_CPUCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     RADEON_PPLL_RESET
+			     | RADEON_PPLL_ATOMIC_UPDATE_EN
+			     | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			     | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_PVG_MASK));
+
+		WREG32_P(RADEON_CLOCK_CNTL_INDEX,
+			 RADEON_PLL_DIV_SEL,
+			 ~(RADEON_PLL_DIV_SEL));
+		r100_pll_errata_after_index(rdev);
+
+		if (ASIC_IS_R300(rdev) ||
+		    (rdev->family == CHIP_RS300) ||
+		    (rdev->family == CHIP_RS400) ||
+		    (rdev->family == CHIP_RS480)) {
+			if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
+				/* When restoring console mode, use saved PPLL_REF_DIV
+				 * setting.
+				 */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     pll_ref_div,
+					     0);
+			} else {
+				/* R300 uses ref_div_acc field as real ref divider */
+				WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+					     (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+					     ~R300_PPLL_REF_DIV_ACC_MASK);
+			}
+		} else
+			WREG32_PLL_P(RADEON_PPLL_REF_DIV,
+				     pll_ref_div,
+				     ~RADEON_PPLL_REF_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_FB3_DIV_MASK);
+
+		WREG32_PLL_P(RADEON_PPLL_DIV_3,
+			     pll_fb_post_div,
+			     ~RADEON_PPLL_POST3_DIV_MASK);
+
+		radeon_pll_write_update(dev);
+		radeon_pll_wait_for_read_update_complete(dev);
+
+		WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
+
+		WREG32_PLL_P(RADEON_PPLL_CNTL,
+			     0,
+			     ~(RADEON_PPLL_RESET
+			       | RADEON_PPLL_SLEEP
+			       | RADEON_PPLL_ATOMIC_UPDATE_EN
+			       | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
+
+		DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+			  pll_ref_div,
+			  pll_fb_post_div,
+			  (unsigned)htotal_cntl,
+			  RREG32_PLL(RADEON_PPLL_CNTL));
+		DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
+			  pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
+			  pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
+			  (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
+
+		mdelay(50); /* Let the clock to lock */
+
+		WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
+			     RADEON_VCLK_SRC_SEL_PPLLCLK,
+			     ~(RADEON_VCLK_SRC_SEL_MASK));
+
+		if (is_tv)
+			WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+	}
+}
+
+static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+				   const struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+		return false;
+	return true;
+}
+
+static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode,
+				 int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	/* TODO TV */
+	radeon_crtc_set_base(crtc, x, y, old_fb);
+	radeon_set_crtc_timing(crtc, adjusted_mode);
+	radeon_set_pll(crtc, adjusted_mode);
+	radeon_overscan_setup(crtc, adjusted_mode);
+	if (radeon_crtc->crtc_id == 0) {
+		radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
+	} else {
+		if (radeon_crtc->rmx_type != RMX_OFF) {
+			/* FIXME: only first crtc has rmx what should we
+			 * do ?
+			 */
+			DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
+		}
+	}
+	return 0;
+}
+
+static void radeon_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* The hardware wedges sometimes if you reconfigure one CRTC
+	* whilst another is running (see fdo bug #24611).
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_crtc_commit(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* Reenable the CRTCs that should be running.
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+		if (crtci->enabled)
+			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+	}
+}
+
+static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+	.dpms = radeon_crtc_dpms,
+	.mode_fixup = radeon_crtc_mode_fixup,
+	.mode_set = radeon_crtc_mode_set,
+	.mode_set_base = radeon_crtc_set_base,
+	.mode_set_base_atomic = radeon_crtc_set_base_atomic,
+	.prepare = radeon_crtc_prepare,
+	.commit = radeon_crtc_commit,
+	.load_lut = radeon_crtc_load_lut,
+};
+
+
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc)
+{
+	if (radeon_crtc->crtc_id == 1)
+		radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
+	drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 0000000..62cd512
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1810 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "atom.h"
+#include <linux/backlight.h>
+#ifdef CONFIG_PMAC_BACKLIGHT
+#include <asm/backlight.h>
+#endif
+
+static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder_helper_funcs *encoder_funcs;
+
+	encoder_funcs = encoder->helper_private;
+	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+	radeon_encoder->active_device = 0;
+}
+
+static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+	int panel_pwr_delay = 2000;
+	bool is_mac = false;
+	uint8_t backlight_level;
+	DRM_DEBUG_KMS("\n");
+
+	lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			panel_pwr_delay = lvds->panel_pwr_delay;
+			if (lvds->bl_dev)
+				backlight_level = lvds->backlight_level;
+		}
+	}
+
+	/* macs (and possibly some x86 oem systems?) wire up LVDS strangely
+	 * Taken from radeonfb.
+	 */
+	if ((rdev->mode_info.connector_table == CT_IBOOK) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
+	    (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
+		is_mac = true;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+		disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
+		WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+		mdelay(1);
+
+		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+
+		lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
+				   RADEON_LVDS_BL_MOD_LEVEL_MASK);
+		lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
+				  RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
+				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
+		if (is_mac)
+			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+		WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+		lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+		if (is_mac) {
+			lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
+		} else {
+			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+		}
+		mdelay(panel_pwr_delay);
+		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+		mdelay(panel_pwr_delay);
+		break;
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DRM_DEBUG("\n");
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			lvds->dpms_mode = mode;
+		}
+	}
+
+	radeon_legacy_lvds_update(encoder, mode);
+}
+
+static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+					struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+
+	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+	if (rdev->is_atom_bios) {
+		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+		 * need to call that on resume to set up the reg properly.
+		 */
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	} else {
+		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+		if (lvds) {
+			DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+			lvds_gen_cntl = lvds->lvds_gen_cntl;
+			lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					      (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+			lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
+					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+		} else
+			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	}
+	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+			   RADEON_LVDS_BLON |
+			   RADEON_LVDS_EN |
+			   RADEON_LVDS_RST_FM);
+
+	if (ASIC_IS_R300(rdev))
+		lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (ASIC_IS_R300(rdev)) {
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
+		} else
+			lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
+	} else {
+		if (ASIC_IS_R300(rdev))
+			lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
+		else
+			lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+	WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+	WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
+
+	if (rdev->family == CHIP_RV410)
+		WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+	.dpms = radeon_legacy_lvds_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_lvds_prepare,
+	.mode_set = radeon_legacy_lvds_mode_set,
+	.commit = radeon_legacy_lvds_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+u8
+radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	u8 backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return backlight_level;
+}
+
+void
+radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int dpms_mode = DRM_MODE_DPMS_ON;
+
+	if (radeon_encoder->enc_priv) {
+		if (rdev->is_atom_bios) {
+			struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		} else {
+			struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+			if (lvds->backlight_level > 0)
+				dpms_mode = lvds->dpms_mode;
+			else
+				dpms_mode = DRM_MODE_DPMS_OFF;
+			lvds->backlight_level = level;
+		}
+	}
+
+	radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	uint8_t level;
+
+	/* Convert brightness to hardware level */
+	if (bd->props.brightness < 0)
+		level = 0;
+	else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+		level = RADEON_MAX_BL_LEVEL;
+	else
+		level = bd->props.brightness;
+
+	if (pdata->negative)
+		level = RADEON_MAX_BL_LEVEL - level;
+
+	return level;
+}
+
+static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+
+	radeon_legacy_set_backlight_level(radeon_encoder,
+					  radeon_legacy_lvds_level(bd));
+
+	return 0;
+}
+
+static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
+{
+	struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+	struct radeon_encoder *radeon_encoder = pdata->encoder;
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint8_t backlight_level;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
+}
+
+static const struct backlight_ops radeon_backlight_ops = {
+	.get_brightness = radeon_legacy_backlight_get_brightness,
+	.update_status	= radeon_legacy_backlight_update_status,
+};
+
+void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+				  struct drm_connector *drm_connector)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd;
+	struct backlight_properties props;
+	struct radeon_backlight_privdata *pdata;
+	uint8_t backlight_level;
+	char bl_name[16];
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+	if (!pmac_has_backlight_type("ati") &&
+	    !pmac_has_backlight_type("mnca"))
+		return;
+#endif
+
+	pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
+	if (!pdata) {
+		DRM_ERROR("Memory allocation failed\n");
+		goto error;
+	}
+
+	memset(&props, 0, sizeof(props));
+	props.max_brightness = RADEON_MAX_BL_LEVEL;
+	props.type = BACKLIGHT_RAW;
+	snprintf(bl_name, sizeof(bl_name),
+		 "radeon_bl%d", dev->primary->index);
+	bd = backlight_device_register(bl_name, &drm_connector->kdev,
+				       pdata, &radeon_backlight_ops, &props);
+	if (IS_ERR(bd)) {
+		DRM_ERROR("Backlight registration failed\n");
+		goto error;
+	}
+
+	pdata->encoder = radeon_encoder;
+
+	backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
+			   RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
+
+	/* First, try to detect backlight level sense based on the assumption
+	 * that firmware set it up at full brightness
+	 */
+	if (backlight_level == 0)
+		pdata->negative = true;
+	else if (backlight_level == 0xff)
+		pdata->negative = false;
+	else {
+		/* XXX hack... maybe some day we can figure out in what direction
+		 * backlight should work on a given panel?
+		 */
+		pdata->negative = (rdev->family != CHIP_RV200 &&
+				   rdev->family != CHIP_RV250 &&
+				   rdev->family != CHIP_RV280 &&
+				   rdev->family != CHIP_RV350);
+
+#ifdef CONFIG_PMAC_BACKLIGHT
+		pdata->negative = (pdata->negative ||
+				   of_machine_is_compatible("PowerBook4,3") ||
+				   of_machine_is_compatible("PowerBook6,3") ||
+				   of_machine_is_compatible("PowerBook6,5"));
+#endif
+	}
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		lvds->bl_dev = bd;
+	}
+
+	bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
+	bd->props.power = FB_BLANK_UNBLANK;
+	backlight_update_status(bd);
+
+	DRM_INFO("radeon legacy LVDS backlight initialized\n");
+
+	return;
+
+error:
+	kfree(pdata);
+	return;
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct backlight_device *bd = NULL;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+
+	if (rdev->is_atom_bios) {
+		struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	} else {
+		struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
+		bd = lvds->bl_dev;
+		lvds->bl_dev = NULL;
+	}
+
+	if (bd) {
+		struct radeon_backlight_privdata *pdata;
+
+		pdata = bl_get_data(bd);
+		backlight_device_unregister(bd);
+		kfree(pdata);
+
+		DRM_INFO("radeon legacy LVDS backlight unloaded\n");
+	}
+}
+
+#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
+{
+}
+
+static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
+{
+}
+
+#endif
+
+
+static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+	if (radeon_encoder->enc_priv) {
+		radeon_legacy_backlight_exit(radeon_encoder);
+		kfree(radeon_encoder->enc_priv);
+	}
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+	.destroy = radeon_lvds_enc_destroy,
+};
+
+static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
+	uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
+		dac_cntl &= ~RADEON_DAC_PDWN;
+		dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
+				    RADEON_DAC_PDWN_G |
+				    RADEON_DAC_PDWN_B);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
+		dac_cntl |= RADEON_DAC_PDWN;
+		dac_macro_cntl |= (RADEON_DAC_PDWN_R |
+				   RADEON_DAC_PDWN_G |
+				   RADEON_DAC_PDWN_B);
+		break;
+	}
+
+	/* handled in radeon_crtc_dpms() */
+	if (!(rdev->flags & RADEON_SINGLE_CRTC))
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
+					       struct drm_display_mode *mode,
+					       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (radeon_crtc->crtc_id == 0) {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2)  & ~(RADEON_DAC2_DAC_CLK_SEL);
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	} else {
+		if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
+				~(RADEON_DISP_DAC_SOURCE_MASK);
+			disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+		} else {
+			dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
+			WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+		}
+	}
+
+	dac_cntl = (RADEON_DAC_MASK_ALL |
+		    RADEON_DAC_VGA_ADR_EN |
+		    /* TODO 6-bits */
+		    RADEON_DAC_8BIT_EN);
+
+	WREG32_P(RADEON_DAC_CNTL,
+		       dac_cntl,
+		       RADEON_DAC_RANGE_CNTL |
+		       RADEON_DAC_BLANKING);
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
+		dac_macro_cntl = p_dac->ps2_pdac_adj;
+	} else
+		dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+	dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
+								  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t vclk_ecp_cntl, crtc_ext_cntl;
+	uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
+	enum drm_connector_status found = connector_status_disconnected;
+	bool color = true;
+
+	/* just don't bother on RN50 those chip are often connected to remoting
+	 * console hw and often we get failure to load detect those. So to make
+	 * everyone happy report the encoder as always connected.
+	 */
+	if (ASIC_IS_RN50(rdev)) {
+		return connector_status_connected;
+	}
+
+	/* save the regs we need */
+	vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl = RREG32(RADEON_DAC_CNTL);
+	dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
+
+	tmp = vclk_ecp_cntl &
+		~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+
+	tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+	WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+
+	tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
+		RADEON_DAC_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else if (ASIC_IS_RV100(rdev))
+		tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
+	tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+	WREG32(RADEON_DAC_CNTL, tmp);
+
+	tmp = dac_macro_cntl;
+	tmp &= ~(RADEON_DAC_PDWN_R |
+		 RADEON_DAC_PDWN_G |
+		 RADEON_DAC_PDWN_B);
+
+	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+
+	mdelay(2);
+
+	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+		found = connector_status_connected;
+
+	/* restore the regs we used */
+	WREG32(RADEON_DAC_CNTL, dac_cntl);
+	WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
+
+	return found;
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+	.dpms = radeon_legacy_primary_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_primary_dac_prepare,
+	.mode_set = radeon_legacy_primary_dac_mode_set,
+	.commit = radeon_legacy_primary_dac_commit,
+	.detect = radeon_legacy_primary_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
+	int i;
+
+	DRM_DEBUG_KMS("\n");
+
+	tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
+	tmp &= 0xfffff;
+	if (rdev->family == CHIP_RV280) {
+		/* bit 22 of TMDS_PLL_CNTL is read-back inverted */
+		tmp ^= (1 << 22);
+		tmds_pll_cntl ^= (1 << 22);
+	}
+
+	if (radeon_encoder->enc_priv) {
+		struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
+
+		for (i = 0; i < 4; i++) {
+			if (tmds->tmds_pll[i].freq == 0)
+				break;
+			if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
+				tmp = tmds->tmds_pll[i].value ;
+				break;
+			}
+		}
+	}
+
+	if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
+		if (tmp & 0xfff00000)
+			tmds_pll_cntl = tmp;
+		else {
+			tmds_pll_cntl &= 0xfff00000;
+			tmds_pll_cntl |= tmp;
+		}
+	} else
+		tmds_pll_cntl = tmp;
+
+	tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
+		~(RADEON_TMDS_TRANSMITTER_PLLRST);
+
+    if (rdev->family == CHIP_R200 ||
+	rdev->family == CHIP_R100 ||
+	ASIC_IS_R300(rdev))
+	    tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+    else /* RV chips got this bit reversed */
+	    tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+    fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+		   (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+		    RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+    fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+    fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+		     RADEON_FP_DFP_SYNC_SEL |
+		     RADEON_FP_CRT_SYNC_SEL |
+		     RADEON_FP_CRTC_LOCK_8DOT |
+		     RADEON_FP_USE_SHADOW_EN |
+		     RADEON_FP_CRTC_USE_SHADOW_VEND |
+		     RADEON_FP_CRT_SYNC_ALT);
+
+    if (1) /*  FIXME rgbBits == 8 */
+	    fp_gen_cntl |= RADEON_FP_PANEL_FORMAT;  /* 24 bit format */
+    else
+	    fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+    if (radeon_crtc->crtc_id == 0) {
+	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+		    if (radeon_encoder->rmx_type != RMX_OFF)
+			    fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+		    else
+			    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+	    } else
+		    fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+    } else {
+	    if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+		    fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+		    fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+	    } else
+		    fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+    }
+
+    WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+    WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+    WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+	.dpms = radeon_legacy_tmds_int_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_int_prepare,
+	.mode_set = radeon_legacy_tmds_int_mode_set,
+	.commit = radeon_legacy_tmds_int_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	DRM_DEBUG_KMS("\n");
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
+		fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		break;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+	radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, false);
+	else
+		radeon_combios_output_lock(encoder, false);
+}
+
+static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+					    struct drm_display_mode *mode,
+					    struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl;
+
+	DRM_DEBUG_KMS("\n");
+
+	if (rdev->is_atom_bios) {
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	} else {
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+		if (1) /*  FIXME rgbBits == 8 */
+			fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
+		else
+			fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
+
+		fp2_gen_cntl &= ~(RADEON_FP2_ON |
+				  RADEON_FP2_DVO_EN |
+				  RADEON_FP2_DVO_RATE_SEL_SDR);
+
+		/* XXX: these are oem specific */
+		if (ASIC_IS_R300(rdev)) {
+			if ((dev->pdev->device == 0x4850) &&
+			    (dev->pdev->subsystem_vendor == 0x1028) &&
+			    (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
+				fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
+			else
+				fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
+
+			/*if (mode->clock > 165000)
+			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+		}
+		if (!radeon_combios_external_tmds_setup(encoder))
+			radeon_external_tmds_setup(encoder);
+	}
+
+	if (radeon_crtc->crtc_id == 0) {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			if (radeon_encoder->rmx_type != RMX_OFF)
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
+			else
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
+		} else
+			fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
+	} else {
+		if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
+			fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
+			fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+		} else
+			fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
+	}
+
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+	.dpms = radeon_legacy_tmds_ext_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tmds_ext_prepare,
+	.mode_set = radeon_legacy_tmds_ext_mode_set,
+	.commit = radeon_legacy_tmds_ext_commit,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+	.destroy = radeon_ext_tmds_enc_destroy,
+};
+
+static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
+	uint32_t tv_master_cntl = 0;
+	bool is_tv;
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	else {
+		if (is_tv)
+			tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+		else
+			crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		if (rdev->family == CHIP_R200) {
+			fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		} else {
+			if (is_tv)
+				tv_master_cntl |= RADEON_TV_ON;
+			else
+				crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
+						 R420_TV_DAC_GDACPD |
+						 R420_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
+						 RADEON_TV_DAC_GDACPD |
+						 RADEON_TV_DAC_BDACPD |
+						 RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (rdev->family == CHIP_R200)
+			fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
+		else {
+			if (is_tv)
+				tv_master_cntl &= ~RADEON_TV_ON;
+			else
+				crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
+
+			if (rdev->family == CHIP_R420 ||
+			    rdev->family == CHIP_R423 ||
+			    rdev->family == CHIP_RV410)
+				tv_dac_cntl |= (R420_TV_DAC_RDACPD |
+						R420_TV_DAC_GDACPD |
+						R420_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+			else
+				tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
+						RADEON_TV_DAC_GDACPD |
+						RADEON_TV_DAC_BDACPD |
+						RADEON_TV_DAC_BGSLEEP);
+		}
+		break;
+	}
+
+	if (rdev->family == CHIP_R200) {
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	} else {
+		if (is_tv)
+			WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+		/* handled in radeon_crtc_dpms() */
+		else if (!(rdev->flags & RADEON_SINGLE_CRTC))
+			WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+	else
+		radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
+{
+	struct radeon_device *rdev = encoder->dev->dev_private;
+
+	radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+	if (rdev->is_atom_bios)
+		radeon_atom_output_lock(encoder, true);
+	else
+		radeon_combios_output_lock(encoder, true);
+}
+
+static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
+	uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
+	bool is_tv = false;
+
+	DRM_DEBUG_KMS("\n");
+
+	is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
+
+	if (rdev->family != CHIP_R200) {
+		tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+		if (rdev->family == CHIP_R420 ||
+		    rdev->family == CHIP_R423 ||
+		    rdev->family == CHIP_RV410) {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 R420_TV_DAC_DACADJ_MASK |
+					 R420_TV_DAC_RDACPD |
+					 R420_TV_DAC_GDACPD |
+					 R420_TV_DAC_BDACPD |
+					 R420_TV_DAC_TVENABLE);
+		} else {
+			tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
+					 RADEON_TV_DAC_BGADJ_MASK |
+					 RADEON_TV_DAC_DACADJ_MASK |
+					 RADEON_TV_DAC_RDACPD |
+					 RADEON_TV_DAC_GDACPD |
+					 RADEON_TV_DAC_BDACPD);
+		}
+
+		tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+		if (is_tv) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M ||
+			    tv_dac->tv_std == TV_STD_PAL_60)
+				tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
+			else
+				tv_dac_cntl |= tv_dac->pal_tvdac_adj;
+
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J)
+				tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+			else
+				tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+		} else
+			tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
+					tv_dac->ps2_tvdac_adj);
+
+		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
+		disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	} else if (rdev->family != CHIP_R200)
+		disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+	else if (rdev->family == CHIP_R200)
+		fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+
+	if (rdev->family >= CHIP_R200)
+		disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
+
+	if (is_tv) {
+		uint32_t dac_cntl;
+
+		dac_cntl = RREG32(RADEON_DAC_CNTL);
+		dac_cntl &= ~RADEON_DAC_TVO_EN;
+		WREG32(RADEON_DAC_CNTL, dac_cntl);
+
+		if (ASIC_IS_R300(rdev))
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1;
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL;
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC |
+						     RADEON_DISP_TV_SOURCE_CRTC);
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+			}
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC;
+			}
+			if (rdev->family >= CHIP_R200) {
+				disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2;
+			} else {
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+			}
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	} else {
+
+		dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
+
+		if (radeon_crtc->crtc_id == 0) {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+			} else
+				disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+		} else {
+			if (ASIC_IS_R300(rdev)) {
+				disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
+				disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			} else if (rdev->family == CHIP_R200) {
+				fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
+						  RADEON_FP2_DVO_RATE_SEL_SDR);
+				fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
+			} else
+				disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
+		}
+		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+	}
+
+	if (ASIC_IS_R300(rdev)) {
+		WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	} else if (rdev->family != CHIP_R200)
+		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+	else if (rdev->family == CHIP_R200)
+		WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+
+	if (rdev->family >= CHIP_R200)
+		WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
+
+	if (is_tv)
+		radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);
+
+	if (rdev->is_atom_bios)
+		radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+	else
+		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+
+}
+
+static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
+				  struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t disp_output_cntl, gpiopad_a, tmp;
+	bool found = false;
+
+	/* save regs needed */
+	gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+
+	WREG32_P(RADEON_GPIOPAD_A, 0, ~1);
+
+	WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL);
+
+	WREG32(RADEON_CRTC2_GEN_CNTL,
+	       RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT);
+
+	tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+	tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+	WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+
+	WREG32(RADEON_DAC_EXT_CNTL,
+	       RADEON_DAC2_FORCE_BLANK_OFF_EN |
+	       RADEON_DAC2_FORCE_DATA_EN |
+	       RADEON_DAC_FORCE_DATA_SEL_RGB |
+	       (0xec << RADEON_DAC_FORCE_DATA_SHIFT));
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(4);
+
+	WREG32(RADEON_TV_DAC_CNTL,
+	       RADEON_TV_DAC_NBLANK |
+	       RADEON_TV_DAC_NHOLD |
+	       RADEON_TV_MONITOR_DETECT_EN |
+	       RADEON_TV_DAC_STD_NTSC |
+	       (8 << RADEON_TV_DAC_BGADJ_SHIFT) |
+	       (6 << RADEON_TV_DAC_DACADJ_SHIFT));
+
+	RREG32(RADEON_TV_DAC_CNTL);
+	mdelay(6);
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+	return found;
+}
+
+static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
+				    struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tv_dac_cntl, dac_cntl2;
+	uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp;
+	bool found = false;
+
+	if (ASIC_IS_R300(rdev))
+		return r300_legacy_tv_detect(encoder, connector);
+
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+	tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL);
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	config_cntl = RREG32(RADEON_CONFIG_CNTL);
+	tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL);
+
+	tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	tmp = tv_master_cntl | RADEON_TV_ON;
+	tmp &= ~(RADEON_TV_ASYNC_RST |
+		 RADEON_RESTART_PHASE_FIX |
+		 RADEON_CRT_FIFO_CE_EN |
+		 RADEON_TV_FIFO_CE_EN |
+		 RADEON_RE_SYNC_NOW_SEL_MASK);
+	tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST;
+	WREG32(RADEON_TV_MASTER_CNTL, tmp);
+
+	tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC |
+		(8 << RADEON_TV_DAC_BGADJ_SHIFT);
+
+	if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK)
+		tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT);
+	else
+		tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT);
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN |
+		RADEON_RED_MX_FORCE_DAC_DATA |
+		RADEON_GRN_MX_FORCE_DAC_DATA |
+		RADEON_BLU_MX_FORCE_DAC_DATA |
+		(0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp);
+
+	mdelay(3);
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	if (tmp & RADEON_TV_DAC_GDACDET) {
+		found = true;
+		DRM_DEBUG_KMS("S-video TV connection detected\n");
+	} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
+		found = true;
+		DRM_DEBUG_KMS("Composite TV connection detected\n");
+	}
+
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	return found;
+}
+
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+					 struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+	uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+	uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+	uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+	uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+	bool found = false;
+	int i;
+
+	/* save the regs we need */
+	gpio_monid = RREG32(RADEON_GPIO_MONID);
+	fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+	disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+	disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+	disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+	disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+	disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+	disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+	crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+	crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+	crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+	crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+	tmp = RREG32(RADEON_GPIO_MONID);
+	tmp &= ~RADEON_GPIO_A_0;
+	WREG32(RADEON_GPIO_MONID, tmp);
+
+	WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+				     RADEON_FP2_PANEL_FORMAT |
+				     R200_FP2_SOURCE_SEL_TRANS_UNIT |
+				     RADEON_FP2_DVO_EN |
+				     R200_FP2_DVO_RATE_SEL_SDR));
+
+	WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+					 RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+	WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+				       RADEON_CRTC2_DISP_REQ_EN_B));
+
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+	for (i = 0; i < 200; i++) {
+		tmp = RREG32(RADEON_GPIO_MONID);
+		if (tmp & RADEON_GPIO_Y_0)
+			found = true;
+
+		if (found)
+			break;
+
+		if (!drm_can_sleep())
+			mdelay(1);
+		else
+			msleep(1);
+	}
+
+	/* restore the regs we used */
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+	WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+	WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+	WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+	WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+	WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+	WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+	WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+	WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+	WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+	return found;
+}
+
+static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
+							     struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+	uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+	uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
+	enum drm_connector_status found = connector_status_disconnected;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	bool color = true;
+	struct drm_crtc *crtc;
+
+	/* find out if crtc2 is in use or if this encoder is using it */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+		if ((radeon_crtc->crtc_id == 1) && crtc->enabled) {
+			if (encoder->crtc != crtc) {
+				return connector_status_disconnected;
+			}
+		}
+	}
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_Composite ||
+	    connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) {
+		bool tv_detect;
+
+		if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT))
+			return connector_status_disconnected;
+
+		tv_detect = radeon_legacy_tv_detect(encoder, connector);
+		if (tv_detect && tv_dac)
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* don't probe if the encoder is being used for something else not CRT related */
+	if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) {
+		DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device);
+		return connector_status_disconnected;
+	}
+
+	/* R200 uses an external DAC for secondary DAC */
+	if (rdev->family == CHIP_R200) {
+		if (radeon_legacy_ext_dac_detect(encoder, connector))
+			found = connector_status_connected;
+		return found;
+	}
+
+	/* save the regs we need */
+	pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+	} else {
+		if (ASIC_IS_R300(rdev)) {
+			gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+			disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+		} else {
+			disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+		}
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+	}
+	tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+	dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
+	dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
+
+	tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
+			       | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+		WREG32(RADEON_CRTC_EXT_CNTL, tmp);
+	} else {
+		tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+		tmp |= RADEON_CRTC2_CRT2_ON |
+			(2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+		WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+		if (ASIC_IS_R300(rdev)) {
+			WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+			tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+			tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+			WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+		} else {
+			tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+			WREG32(RADEON_DISP_HW_DEBUG, tmp);
+		}
+	}
+
+	tmp = RADEON_TV_DAC_NBLANK |
+		RADEON_TV_DAC_NHOLD |
+		RADEON_TV_MONITOR_DETECT_EN |
+		RADEON_TV_DAC_STD_PS2;
+
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
+		RADEON_DAC2_FORCE_DATA_EN;
+
+	if (color)
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
+	else
+		tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
+
+	if (ASIC_IS_R300(rdev))
+		tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+	else
+		tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
+
+	WREG32(RADEON_DAC_EXT_CNTL, tmp);
+
+	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+	WREG32(RADEON_DAC_CNTL2, tmp);
+
+	mdelay(10);
+
+	if (ASIC_IS_R300(rdev)) {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+			found = connector_status_connected;
+	} else {
+		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
+			found = connector_status_connected;
+	}
+
+	/* restore regs we used */
+	WREG32(RADEON_DAC_CNTL2, dac_cntl2);
+	WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+	if (rdev->flags & RADEON_SINGLE_CRTC) {
+		WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+	} else {
+		WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+		if (ASIC_IS_R300(rdev)) {
+			WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+			WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+		} else {
+			WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+		}
+	}
+
+	WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+
+	return found;
+
+}
+
+static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+	.dpms = radeon_legacy_tv_dac_dpms,
+	.mode_fixup = radeon_legacy_mode_fixup,
+	.prepare = radeon_legacy_tv_dac_prepare,
+	.mode_set = radeon_legacy_tv_dac_mode_set,
+	.commit = radeon_legacy_tv_dac_commit,
+	.detect = radeon_legacy_tv_dac_detect,
+	.disable = radeon_legacy_encoder_disable,
+};
+
+
+static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+
+static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_int_tmds *tmds = NULL;
+	bool ret;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	if (rdev->is_atom_bios)
+		ret = radeon_atombios_get_tmds_info(encoder, tmds);
+	else
+		ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_ext_tmds *tmds = NULL;
+	bool ret;
+
+	if (rdev->is_atom_bios)
+		return NULL;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
+void
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	if (rdev->flags & RADEON_SINGLE_CRTC)
+		encoder->possible_crtcs = 0x1;
+	else
+		encoder->possible_crtcs = 0x3;
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		encoder->possible_crtcs = 0x1;
+		drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
+		radeon_encoder->rmx_type = RMX_FULL;
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
+		radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
+		drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
+		if (rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
+		else
+			radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+		if (!rdev->is_atom_bios)
+			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
+		break;
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_tv.c
new file mode 100644
index 0000000..49750d0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -0,0 +1,923 @@
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "radeon.h"
+
+/*
+ * Integrated TV out support based on the GATOS code by
+ * Federico Ulivi <fulivi@lycos.com>
+ */
+
+
+/*
+ * Limits of h/v positions (hPos & vPos)
+ */
+#define MAX_H_POSITION 5 /* Range: [-5..5], negative is on the left, 0 is default, positive is on the right */
+#define MAX_V_POSITION 5 /* Range: [-5..5], negative is up, 0 is default, positive is down */
+
+/*
+ * Unit for hPos (in TV clock periods)
+ */
+#define H_POS_UNIT 10
+
+/*
+ * Indexes in h. code timing table for horizontal line position adjustment
+ */
+#define H_TABLE_POS1 6
+#define H_TABLE_POS2 8
+
+/*
+ * Limits of hor. size (hSize)
+ */
+#define MAX_H_SIZE 5 /* Range: [-5..5], negative is smaller, positive is larger */
+
+/* tv standard constants */
+#define NTSC_TV_CLOCK_T 233
+#define NTSC_TV_VFTOTAL 1
+#define NTSC_TV_LINES_PER_FRAME 525
+#define NTSC_TV_ZERO_H_SIZE 479166
+#define NTSC_TV_H_SIZE_UNIT 9478
+
+#define PAL_TV_CLOCK_T 188
+#define PAL_TV_VFTOTAL 3
+#define PAL_TV_LINES_PER_FRAME 625
+#define PAL_TV_ZERO_H_SIZE 473200
+#define PAL_TV_H_SIZE_UNIT 9360
+
+/* tv pll setting for 27 mhz ref clk */
+#define NTSC_TV_PLL_M_27 22
+#define NTSC_TV_PLL_N_27 175
+#define NTSC_TV_PLL_P_27 5
+
+#define PAL_TV_PLL_M_27 113
+#define PAL_TV_PLL_N_27 668
+#define PAL_TV_PLL_P_27 3
+
+/* tv pll setting for 14 mhz ref clk */
+#define NTSC_TV_PLL_M_14 33
+#define NTSC_TV_PLL_N_14 693
+#define NTSC_TV_PLL_P_14 7
+
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
+#define VERT_LEAD_IN_LINES 2
+#define FRAC_BITS 0xe
+#define FRAC_MASK 0x3fff
+
+struct radeon_tv_mode_constants {
+	uint16_t hor_resolution;
+	uint16_t ver_resolution;
+	enum radeon_tv_std standard;
+	uint16_t hor_total;
+	uint16_t ver_total;
+	uint16_t hor_start;
+	uint16_t hor_syncstart;
+	uint16_t ver_syncstart;
+	unsigned def_restart;
+	uint16_t crtcPLL_N;
+	uint8_t  crtcPLL_M;
+	uint8_t  crtcPLL_post_div;
+	unsigned pix_to_tv;
+};
+
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x003f,
+	0x0263,
+	0x0a24,
+	0x2a6b,
+	0x0a36,
+	0x126d, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1a8f, /* H_TABLE_POS2 */
+	0x1ec7,
+	0x3863,
+	0x1bfe,
+	0x1bfe,
+	0x1a2a,
+	0x1e95,
+	0x0e31,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200d,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1818,
+	0x21e3,
+	0x1006,
+	0x0c06,
+	0x1006,
+	0x1817,
+	0x21d4,
+	0x0002,
+	0
+};
+
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
+	0x0007,
+	0x0058,
+	0x027c,
+	0x0a31,
+	0x2a77,
+	0x0a95,
+	0x124f, /* H_TABLE_POS1 */
+	0x1bfe,
+	0x1b22, /* H_TABLE_POS2 */
+	0x1ef9,
+	0x387c,
+	0x1bfe,
+	0x1bfe,
+	0x1b31,
+	0x1eb5,
+	0x0e43,
+	0x201b,
+	0
+};
+
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
+	0x2001,
+	0x200c,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1821,
+	0x2240,
+	0x1005,
+	0x0c05,
+	0x1005,
+	0x1401,
+	0x1822,
+	0x2230,
+	0x0002,
+	0
+};
+
+/**********************************************************************
+ *
+ * availableModes
+ *
+ * Table of all allowed modes for tv output
+ *
+ **********************************************************************/
+static const struct radeon_tv_mode_constants available_tv_modes[] = {
+	{   /* NTSC timing for 27 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		990,                /* horTotal */
+		740,                /* verTotal */
+		813,                /* horStart */
+		824,                /* horSyncStart */
+		632,                /* verSyncStart */
+		625592,             /* defRestart */
+		592,                /* crtcPLL_N */
+		91,                 /* crtcPLL_M */
+		4,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{   /* PAL timing for 27 Mhz ref clk */
+		800,               /* horResolution */
+		600,               /* verResolution */
+		TV_STD_PAL,        /* standard */
+		1144,              /* horTotal */
+		706,               /* verTotal */
+		812,               /* horStart */
+		824,               /* horSyncStart */
+		669,               /* verSyncStart */
+		696700,            /* defRestart */
+		1382,              /* crtcPLL_N */
+		231,               /* crtcPLL_M */
+		4,                 /* crtcPLL_postDiv */
+		759,               /* pixToTV */
+	},
+	{   /* NTSC timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_NTSC,        /* standard */
+		1018,               /* horTotal */
+		727,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		630627,             /* defRestart */
+		347,                /* crtcPLL_N */
+		14,                 /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		1022,               /* pixToTV */
+	},
+	{ /* PAL timing for 14 Mhz ref clk */
+		800,                /* horResolution */
+		600,                /* verResolution */
+		TV_STD_PAL,         /* standard */
+		1131,               /* horTotal */
+		742,                /* verTotal */
+		813,                /* horStart */
+		840,                /* horSyncStart */
+		633,                /* verSyncStart */
+		708369,             /* defRestart */
+		211,                /* crtcPLL_N */
+		9,                  /* crtcPLL_M */
+		8,                  /* crtcPLL_postDiv */
+		759,                /* pixToTV */
+	},
+};
+
+#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
+
+static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(struct radeon_encoder *radeon_encoder,
+									    uint16_t *pll_ref_freq)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	if (pll_ref_freq)
+		*pll_ref_freq = pll->reference_freq;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[0];
+		else
+			const_ptr = &available_tv_modes[2];
+	} else {
+		if (pll->reference_freq == 2700)
+			const_ptr = &available_tv_modes[1];
+		else
+			const_ptr = &available_tv_modes[3];
+	}
+	return const_ptr;
+}
+
+static long YCOEF_value[5] = { 2, 2, 0, 4, 0 };
+static long YCOEF_EN_value[5] = { 1, 1, 0, 1, 0 };
+static long SLOPE_value[5] = { 1, 2, 2, 4, 8 };
+static long SLOPE_limit[5] = { 6, 5, 4, 3, 2 };
+
+static void radeon_wait_pll_lock(struct drm_encoder *encoder, unsigned n_tests,
+				 unsigned n_wait_loops, unsigned cnt_threshold)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t save_pll_test;
+	unsigned int i, j;
+
+	WREG32(RADEON_TEST_DEBUG_MUX, (RREG32(RADEON_TEST_DEBUG_MUX) & 0xffff60ff) | 0x100);
+	save_pll_test = RREG32_PLL(RADEON_PLL_TEST_CNTL);
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test & ~RADEON_PLL_MASK_READ_B);
+
+	WREG8(RADEON_CLOCK_CNTL_INDEX, RADEON_PLL_TEST_CNTL);
+	for (i = 0; i < n_tests; i++) {
+		WREG8(RADEON_CLOCK_CNTL_DATA + 3, 0);
+		for (j = 0; j < n_wait_loops; j++)
+			if (RREG8(RADEON_CLOCK_CNTL_DATA + 3) >= cnt_threshold)
+				break;
+	}
+	WREG32_PLL(RADEON_PLL_TEST_CNTL, save_pll_test);
+	WREG32(RADEON_TEST_DEBUG_MUX, RREG32(RADEON_TEST_DEBUG_MUX) & 0xffffe0ff);
+}
+
+
+static void radeon_legacy_tv_write_fifo(struct radeon_encoder *radeon_encoder,
+					uint16_t addr, uint32_t value)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_WRITE_DATA, value);
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_WT);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_WT_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+}
+
+#if 0 /* included for completeness */
+static uint32_t radeon_legacy_tv_read_fifo(struct radeon_encoder *radeon_encoder, uint16_t addr)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+	int i = 0;
+
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, addr | RADEON_HOST_FIFO_RD);
+
+	do {
+		tmp = RREG32(RADEON_TV_HOST_RD_WT_CNTL);
+		if ((tmp & RADEON_HOST_FIFO_RD_ACK) == 0)
+			break;
+		i++;
+	} while (i < 10000);
+	WREG32(RADEON_TV_HOST_RD_WT_CNTL, 0);
+	return RREG32(RADEON_TV_HOST_READ_DATA);
+}
+#endif
+
+static uint16_t radeon_get_htiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t h_table;
+
+	switch ((tv_uv_adr & RADEON_HCODE_TABLE_SEL_MASK) >> RADEON_HCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		h_table = RADEON_TV_MAX_FIFO_ADDR_INTERNAL;
+		break;
+	case 1:
+		h_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2;
+		break;
+	case 2:
+		h_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2;
+		break;
+	default:
+		h_table = 0;
+		break;
+	}
+	return h_table;
+}
+
+static uint16_t radeon_get_vtiming_tables_addr(uint32_t tv_uv_adr)
+{
+	uint16_t v_table;
+
+	switch ((tv_uv_adr & RADEON_VCODE_TABLE_SEL_MASK) >> RADEON_VCODE_TABLE_SEL_SHIFT) {
+	case 0:
+		v_table = ((tv_uv_adr & RADEON_MAX_UV_ADR_MASK) >> RADEON_MAX_UV_ADR_SHIFT) * 2 + 1;
+		break;
+	case 1:
+		v_table = ((tv_uv_adr & RADEON_TABLE1_BOT_ADR_MASK) >> RADEON_TABLE1_BOT_ADR_SHIFT) * 2 + 1;
+		break;
+	case 2:
+		v_table = ((tv_uv_adr & RADEON_TABLE3_TOP_ADR_MASK) >> RADEON_TABLE3_TOP_ADR_SHIFT) * 2 + 1;
+		break;
+	default:
+		v_table = 0;
+		break;
+	}
+	return v_table;
+}
+
+static void radeon_restore_tv_timing_tables(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	uint16_t h_table, v_table;
+	uint32_t tmp;
+	int i;
+
+	WREG32(RADEON_TV_UV_ADR, tv_dac->tv.tv_uv_adr);
+	h_table = radeon_get_htiming_tables_addr(tv_dac->tv.tv_uv_adr);
+	v_table = radeon_get_vtiming_tables_addr(tv_dac->tv.tv_uv_adr);
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i += 2, h_table--) {
+		tmp = ((uint32_t)tv_dac->tv.h_code_timing[i] << 14) | ((uint32_t)tv_dac->tv.h_code_timing[i+1]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, h_table, tmp);
+		if (tv_dac->tv.h_code_timing[i] == 0 || tv_dac->tv.h_code_timing[i + 1] == 0)
+			break;
+	}
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i += 2, v_table++) {
+		tmp = ((uint32_t)tv_dac->tv.v_code_timing[i+1] << 14) | ((uint32_t)tv_dac->tv.v_code_timing[i]);
+		radeon_legacy_tv_write_fifo(radeon_encoder, v_table, tmp);
+		if (tv_dac->tv.v_code_timing[i] == 0 || tv_dac->tv.v_code_timing[i + 1] == 0)
+			break;
+	}
+}
+
+static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	WREG32(RADEON_TV_FRESTART, tv_dac->tv.frestart);
+	WREG32(RADEON_TV_HRESTART, tv_dac->tv.hrestart);
+	WREG32(RADEON_TV_VRESTART, tv_dac->tv.vrestart);
+}
+
+static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	struct radeon_crtc *radeon_crtc;
+	int restart;
+	unsigned int h_total, v_total, f_total;
+	int v_offset, h_offset;
+	u16 p1, p2, h_inc;
+	bool h_changed;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_pll *pll;
+
+	radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
+	if (radeon_crtc->crtc_id == 1)
+		pll = &rdev->clock.p2pll;
+	else
+		pll = &rdev->clock.p1pll;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return false;
+
+	h_total = const_ptr->hor_total;
+	v_total = const_ptr->ver_total;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		f_total = NTSC_TV_VFTOTAL + 1;
+	else
+		f_total = PAL_TV_VFTOTAL + 1;
+
+	/* adjust positions 1&2 in hor. cod timing table */
+	h_offset = tv_dac->h_pos * H_POS_UNIT;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M) {
+		h_offset -= 50;
+		p1 = hor_timing_NTSC[H_TABLE_POS1];
+		p2 = hor_timing_NTSC[H_TABLE_POS2];
+	} else {
+		p1 = hor_timing_PAL[H_TABLE_POS1];
+		p2 = hor_timing_PAL[H_TABLE_POS2];
+	}
+
+	p1 = (u16)((int)p1 + h_offset);
+	p2 = (u16)((int)p2 - h_offset);
+
+	h_changed = (p1 != tv_dac->tv.h_code_timing[H_TABLE_POS1] ||
+		     p2 != tv_dac->tv.h_code_timing[H_TABLE_POS2]);
+
+	tv_dac->tv.h_code_timing[H_TABLE_POS1] = p1;
+	tv_dac->tv.h_code_timing[H_TABLE_POS2] = p2;
+
+	/* Convert hOffset from n. of TV clock periods to n. of CRTC clock periods (CRTC pixels) */
+	h_offset = (h_offset * (int)(const_ptr->pix_to_tv)) / 1000;
+
+	/* adjust restart */
+	restart = const_ptr->def_restart;
+
+	/*
+	 * convert v_pos TV lines to n. of CRTC pixels
+	 */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(NTSC_TV_LINES_PER_FRAME);
+	else
+		v_offset = ((int)(v_total * h_total) * 2 * tv_dac->v_pos) / (int)(PAL_TV_LINES_PER_FRAME);
+
+	restart -= v_offset + h_offset;
+
+	DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+		  const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
+
+	tv_dac->tv.hrestart = restart % h_total;
+	restart /= h_total;
+	tv_dac->tv.vrestart = restart % v_total;
+	restart /= v_total;
+	tv_dac->tv.frestart = restart % f_total;
+
+	DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
+		  (unsigned)tv_dac->tv.frestart,
+		  (unsigned)tv_dac->tv.vrestart,
+		  (unsigned)tv_dac->tv.hrestart);
+
+	/* compute h_inc from hsize */
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M)
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * NTSC_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(NTSC_TV_H_SIZE_UNIT) + (int)(NTSC_TV_ZERO_H_SIZE)));
+	else
+		h_inc = (u16)((int)(const_ptr->hor_resolution * 4096 * PAL_TV_CLOCK_T) /
+			      (tv_dac->h_size * (int)(PAL_TV_H_SIZE_UNIT) + (int)(PAL_TV_ZERO_H_SIZE)));
+
+	tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
+		((u32)h_inc << RADEON_H_INC_SHIFT);
+
+	DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+
+	return h_changed;
+}
+
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
+	const struct radeon_tv_mode_constants *const_ptr;
+	struct radeon_crtc *radeon_crtc;
+	int i;
+	uint16_t pll_ref_freq;
+	uint32_t vert_space, flicker_removal, tmp;
+	uint32_t tv_master_cntl, tv_rgb_cntl, tv_dac_cntl;
+	uint32_t tv_modulator_cntl1, tv_modulator_cntl2;
+	uint32_t tv_vscaler_cntl1, tv_vscaler_cntl2;
+	uint32_t tv_pll_cntl, tv_pll_cntl1, tv_ftotal;
+	uint32_t tv_y_fall_cntl, tv_y_rise_cntl, tv_y_saw_tooth_cntl;
+	uint32_t m, n, p;
+	const uint16_t *hor_timing;
+	const uint16_t *vert_timing;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, &pll_ref_freq);
+	if (!const_ptr)
+		return;
+
+	radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	tv_master_cntl = (RADEON_VIN_ASYNC_RST |
+			  RADEON_CRT_FIFO_CE_EN |
+			  RADEON_TV_FIFO_CE_EN |
+			  RADEON_TV_ON);
+
+	if (!ASIC_IS_R300(rdev))
+		tv_master_cntl |= RADEON_TVCLK_ALWAYS_ONb;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_master_cntl |= RADEON_RESTART_PHASE_FIX;
+
+	tv_modulator_cntl1 = (RADEON_SLEW_RATE_LIMIT |
+			      RADEON_SYNC_TIP_LEVEL |
+			      RADEON_YFLT_EN |
+			      RADEON_UVFLT_EN |
+			      (6 << RADEON_CY_FILT_BLEND_SHIFT));
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		tv_modulator_cntl1 |= (0x46 << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-111 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else if (tv_dac->tv_std == TV_STD_SCART_PAL) {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN;
+		tv_modulator_cntl2 = (0 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((0 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	} else {
+		tv_modulator_cntl1 |= RADEON_ALT_PHASE_EN |
+			(0x3b << RADEON_SET_UP_LEVEL_SHIFT) |
+			(0x3b << RADEON_BLANK_LEVEL_SHIFT);
+		tv_modulator_cntl2 = (-78 & RADEON_TV_U_BURST_LEVEL_MASK) |
+			((62 & RADEON_TV_V_BURST_LEVEL_MASK) << RADEON_TV_V_BURST_LEVEL_SHIFT);
+	}
+
+
+	tv_rgb_cntl = (RADEON_RGB_DITHER_EN
+		       | RADEON_TVOUT_SCALE_EN
+		       | (0x0b << RADEON_UVRAM_READ_MARGIN_SHIFT)
+		       | (0x07 << RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT)
+		       | RADEON_RGB_ATTEN_SEL(0x3)
+		       | RADEON_RGB_ATTEN_VAL(0xc));
+
+	if (radeon_crtc->crtc_id == 1)
+		tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC2;
+	else {
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_RMX;
+		else
+			tv_rgb_cntl |= RADEON_RGB_SRC_SEL_CRTC1;
+	}
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		vert_space = const_ptr->ver_total * 2 * 10000 / NTSC_TV_LINES_PER_FRAME;
+	else
+		vert_space = const_ptr->ver_total * 2 * 10000 / PAL_TV_LINES_PER_FRAME;
+
+	tmp = RREG32(RADEON_TV_VSCALER_CNTL1);
+	tmp &= 0xe3ff0000;
+	tmp |= (vert_space * (1 << FRAC_BITS) / 10000);
+	tv_vscaler_cntl1 = tmp;
+
+	if (pll_ref_freq == 2700)
+		tv_vscaler_cntl1 |= RADEON_RESTART_FIELD;
+
+	if (const_ptr->hor_resolution == 1024)
+		tv_vscaler_cntl1 |= (4 << RADEON_Y_DEL_W_SIG_SHIFT);
+	else
+		tv_vscaler_cntl1 |= (2 << RADEON_Y_DEL_W_SIG_SHIFT);
+
+	/* scale up for int divide */
+	tmp = const_ptr->ver_total * 2 * 1000;
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tmp /= NTSC_TV_LINES_PER_FRAME;
+	} else {
+		tmp /= PAL_TV_LINES_PER_FRAME;
+	}
+	flicker_removal = (tmp + 500) / 1000;
+
+	if (flicker_removal < 3)
+		flicker_removal = 3;
+	for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
+		if (flicker_removal == SLOPE_limit[i])
+			break;
+	}
+
+	tv_y_saw_tooth_cntl = (vert_space * SLOPE_value[i] * (1 << (FRAC_BITS - 1)) +
+				5001) / 10000 / 8 | ((SLOPE_value[i] *
+				(1 << (FRAC_BITS - 1)) / 8) << 16);
+	tv_y_fall_cntl =
+		(YCOEF_EN_value[i] << 17) | ((YCOEF_value[i] * (1 << 8) / 8) << 24) |
+		RADEON_Y_FALL_PING_PONG | (272 * SLOPE_value[i] / 8) * (1 << (FRAC_BITS - 1)) /
+		1024;
+	tv_y_rise_cntl = RADEON_Y_RISE_PING_PONG|
+		(flicker_removal * 1024 - 272) * SLOPE_value[i] / 8 * (1 << (FRAC_BITS - 1)) / 1024;
+
+	tv_vscaler_cntl2 = RREG32(RADEON_TV_VSCALER_CNTL2) & 0x00fffff0;
+	tv_vscaler_cntl2 |= (0x10 << 24) |
+		RADEON_DITHER_MODE |
+		RADEON_Y_OUTPUT_DITHER_EN |
+		RADEON_UV_OUTPUT_DITHER_EN |
+		RADEON_UV_TO_BUF_DITHER_EN;
+
+	tmp = (tv_vscaler_cntl1 >> RADEON_UV_INC_SHIFT) & RADEON_UV_INC_MASK;
+	tmp = ((16384 * 256 * 10) / tmp + 5) / 10;
+	tmp = (tmp << RADEON_UV_OUTPUT_POST_SCALE_SHIFT) | 0x000b0000;
+	tv_dac->tv.timing_cntl = tmp;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60)
+		tv_dac_cntl = tv_dac->ntsc_tvdac_adj;
+	else
+		tv_dac_cntl = tv_dac->pal_tvdac_adj;
+
+	tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J)
+		tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
+	else
+		tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J) {
+		if (pll_ref_freq == 2700) {
+			m = NTSC_TV_PLL_M_27;
+			n = NTSC_TV_PLL_N_27;
+			p = NTSC_TV_PLL_P_27;
+		} else {
+			m = NTSC_TV_PLL_M_14;
+			n = NTSC_TV_PLL_N_14;
+			p = NTSC_TV_PLL_P_14;
+		}
+	} else {
+		if (pll_ref_freq == 2700) {
+			m = PAL_TV_PLL_M_27;
+			n = PAL_TV_PLL_N_27;
+			p = PAL_TV_PLL_P_27;
+		} else {
+			m = PAL_TV_PLL_M_14;
+			n = PAL_TV_PLL_N_14;
+			p = PAL_TV_PLL_P_14;
+		}
+	}
+
+	tv_pll_cntl = (m & RADEON_TV_M0LO_MASK) |
+		(((m >> 8) & RADEON_TV_M0HI_MASK) << RADEON_TV_M0HI_SHIFT) |
+		((n & RADEON_TV_N0LO_MASK) << RADEON_TV_N0LO_SHIFT) |
+		(((n >> 9) & RADEON_TV_N0HI_MASK) << RADEON_TV_N0HI_SHIFT) |
+		((p & RADEON_TV_P_MASK) << RADEON_TV_P_SHIFT);
+
+	tv_pll_cntl1 = (((4 & RADEON_TVPCP_MASK) << RADEON_TVPCP_SHIFT) |
+			((4 & RADEON_TVPVG_MASK) << RADEON_TVPVG_SHIFT) |
+			((1 & RADEON_TVPDC_MASK) << RADEON_TVPDC_SHIFT) |
+			RADEON_TVCLK_SRC_SEL_TVPLL |
+			RADEON_TVPLL_TEST_DIS);
+
+	tv_dac->tv.tv_uv_adr = 0xc8;
+
+	if (tv_dac->tv_std == TV_STD_NTSC ||
+	    tv_dac->tv_std == TV_STD_NTSC_J ||
+	    tv_dac->tv_std == TV_STD_PAL_M ||
+	    tv_dac->tv_std == TV_STD_PAL_60) {
+		tv_ftotal = NTSC_TV_VFTOTAL;
+		hor_timing = hor_timing_NTSC;
+		vert_timing = vert_timing_NTSC;
+	} else {
+		hor_timing = hor_timing_PAL;
+		vert_timing = vert_timing_PAL;
+		tv_ftotal = PAL_TV_VFTOTAL;
+	}
+
+	for (i = 0; i < MAX_H_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.h_code_timing[i] = hor_timing[i]) == 0)
+			break;
+	}
+
+	for (i = 0; i < MAX_V_CODE_TIMING_LEN; i++) {
+		if ((tv_dac->tv.v_code_timing[i] = vert_timing[i]) == 0)
+			break;
+	}
+
+	radeon_legacy_tv_init_restarts(encoder);
+
+	/* play with DAC_CNTL */
+	/* play with GPIOPAD_A */
+	/* DISP_OUTPUT_CNTL */
+	/* use reference freq */
+
+	/* program the TV registers */
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST | RADEON_TV_FIFO_ASYNC_RST));
+
+	tmp = RREG32(RADEON_TV_DAC_CNTL);
+	tmp &= ~RADEON_TV_DAC_NBLANK;
+	tmp |= RADEON_TV_DAC_BGSLEEP |
+		RADEON_TV_DAC_RDACPD |
+		RADEON_TV_DAC_GDACPD |
+		RADEON_TV_DAC_BDACPD;
+	WREG32(RADEON_TV_DAC_CNTL, tmp);
+
+	/* TV PLL */
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+	WREG32_PLL(RADEON_TV_PLL_CNTL, tv_pll_cntl);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVPLL_RESET, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_RESET);
+
+	radeon_wait_pll_lock(encoder, 300, 160, 27);
+	radeon_wait_pll_lock(encoder, 200, 800, 135);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~0xf);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, RADEON_TVCLK_SRC_SEL_TVPLL, ~RADEON_TVCLK_SRC_SEL_TVPLL);
+
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, (1 << RADEON_TVPDC_SHIFT), ~RADEON_TVPDC_MASK);
+	WREG32_PLL_P(RADEON_TV_PLL_CNTL1, 0, ~RADEON_TVPLL_SLEEP);
+
+	/* TV HV */
+	WREG32(RADEON_TV_RGB_CNTL, tv_rgb_cntl);
+	WREG32(RADEON_TV_HTOTAL, const_ptr->hor_total - 1);
+	WREG32(RADEON_TV_HDISP, const_ptr->hor_resolution - 1);
+	WREG32(RADEON_TV_HSTART, const_ptr->hor_start);
+
+	WREG32(RADEON_TV_VTOTAL, const_ptr->ver_total - 1);
+	WREG32(RADEON_TV_VDISP, const_ptr->ver_resolution - 1);
+	WREG32(RADEON_TV_FTOTAL, tv_ftotal);
+	WREG32(RADEON_TV_VSCALER_CNTL1, tv_vscaler_cntl1);
+	WREG32(RADEON_TV_VSCALER_CNTL2, tv_vscaler_cntl2);
+
+	WREG32(RADEON_TV_Y_FALL_CNTL, tv_y_fall_cntl);
+	WREG32(RADEON_TV_Y_RISE_CNTL, tv_y_rise_cntl);
+	WREG32(RADEON_TV_Y_SAW_TOOTH_CNTL, tv_y_saw_tooth_cntl);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST |
+				       RADEON_CRT_ASYNC_RST));
+
+	/* TV restarts */
+	radeon_legacy_write_tv_restarts(radeon_encoder);
+
+	/* tv timings */
+	radeon_restore_tv_timing_tables(radeon_encoder);
+
+	WREG32(RADEON_TV_MASTER_CNTL, (tv_master_cntl | RADEON_TV_ASYNC_RST));
+
+	/* tv std */
+	WREG32(RADEON_TV_SYNC_CNTL, (RADEON_SYNC_PUB | RADEON_TV_SYNC_IO_DRIVE));
+	WREG32(RADEON_TV_TIMING_CNTL, tv_dac->tv.timing_cntl);
+	WREG32(RADEON_TV_MODULATOR_CNTL1, tv_modulator_cntl1);
+	WREG32(RADEON_TV_MODULATOR_CNTL2, tv_modulator_cntl2);
+	WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, (RADEON_Y_RED_EN |
+					    RADEON_C_GRN_EN |
+					    RADEON_CMP_BLU_EN |
+					    RADEON_DAC_DITHER_EN));
+
+	WREG32(RADEON_TV_CRC_CNTL, 0);
+
+	WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
+
+	WREG32(RADEON_TV_GAIN_LIMIT_SETTINGS, ((0x17f << RADEON_UV_GAIN_LIMIT_SHIFT) |
+					       (0x5ff << RADEON_Y_GAIN_LIMIT_SHIFT)));
+	WREG32(RADEON_TV_LINEAR_GAIN_SETTINGS, ((0x100 << RADEON_UV_GAIN_SHIFT) |
+						(0x100 << RADEON_Y_GAIN_SHIFT)));
+
+	WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+
+}
+
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+	uint32_t tmp;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*h_total_disp = (((const_ptr->hor_resolution / 8) - 1) << RADEON_CRTC_H_DISP_SHIFT) |
+		(((const_ptr->hor_total / 8) - 1) << RADEON_CRTC_H_TOTAL_SHIFT);
+
+	tmp = *h_sync_strt_wid;
+	tmp &= ~(RADEON_CRTC_H_SYNC_STRT_PIX | RADEON_CRTC_H_SYNC_STRT_CHAR);
+	tmp |= (((const_ptr->hor_syncstart / 8) - 1) << RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT) |
+		(const_ptr->hor_syncstart & 7);
+	*h_sync_strt_wid = tmp;
+
+	*v_total_disp = ((const_ptr->ver_resolution - 1) << RADEON_CRTC_V_DISP_SHIFT) |
+		((const_ptr->ver_total - 1) << RADEON_CRTC_V_TOTAL_SHIFT);
+
+	tmp = *v_sync_strt_wid;
+	tmp &= ~RADEON_CRTC_V_SYNC_STRT;
+	tmp |= ((const_ptr->ver_syncstart - 1) << RADEON_CRTC_V_SYNC_STRT_SHIFT);
+	*v_sync_strt_wid = tmp;
+}
+
+static int get_post_div(int value)
+{
+	int post_div;
+	switch (value) {
+	case 1: post_div = 0; break;
+	case 2: post_div = 1; break;
+	case 3: post_div = 4; break;
+	case 4: post_div = 2; break;
+	case 6: post_div = 6; break;
+	case 8: post_div = 3; break;
+	case 12: post_div = 7; break;
+	case 16:
+	default: post_div = 5; break;
+	}
+	return post_div;
+}
+
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal_cntl = (const_ptr->hor_total & 0x7) | RADEON_HTOT_CNTL_VGA_EN;
+
+	*ppll_ref_div = const_ptr->crtcPLL_M;
+
+	*ppll_div_3 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~(RADEON_PIX2CLK_SRC_SEL_MASK | RADEON_PIXCLK_TV_SRC_SEL);
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK;
+}
+
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	const struct radeon_tv_mode_constants *const_ptr;
+
+	const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
+	if (!const_ptr)
+		return;
+
+	*htotal2_cntl = (const_ptr->hor_total & 0x7);
+
+	*p2pll_ref_div = const_ptr->crtcPLL_M;
+
+	*p2pll_div_0 = (const_ptr->crtcPLL_N & 0x7ff) | (get_post_div(const_ptr->crtcPLL_post_div) << 16);
+	*pixclks_cntl &= ~RADEON_PIX2CLK_SRC_SEL_MASK;
+	*pixclks_cntl |= RADEON_PIX2CLK_SRC_SEL_P2PLLCLK | RADEON_PIXCLK_TV_SRC_SEL;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_mem.c b/linux-imx/drivers/gpu/drm/radeon/radeon_mem.c
new file mode 100644
index 0000000..d54d2d7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_mem.c
@@ -0,0 +1,302 @@
+/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* Very simple allocator for GART memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+				     struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock = kmalloc(sizeof(*newblock),
+						     GFP_KERNEL);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock = kmalloc(sizeof(*newblock),
+						     GFP_KERNEL);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+      out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+				     int align2, struct drm_file *file_priv)
+{
+	struct mem_block *p;
+	int mask = (1 << align2) - 1;
+
+	list_for_each(p, heap) {
+		int start = (p->start + mask) & ~mask;
+		if (p->file_priv == NULL && start + size <= p->start + p->size)
+			return split_block(p, start, size, file_priv);
+	}
+
+	return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+	struct mem_block *p;
+
+	list_for_each(p, heap)
+	    if (p->start == start)
+		return p;
+
+	return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		kfree(q);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		kfree(p);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+	struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = kzalloc(sizeof(**heap), GFP_KERNEL);
+	if (!*heap) {
+		kfree(blocks);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	list_for_each(p, heap) {
+		if (p->file_priv == file_priv)
+			p->file_priv = NULL;
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	list_for_each(p, heap) {
+		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			kfree(q);
+		}
+	}
+}
+
+/* Shutdown.
+ */
+void radeon_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		kfree(q);
+	}
+
+	kfree(*heap);
+	*heap = NULL;
+}
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
+{
+	switch (region) {
+	case RADEON_MEM_REGION_GART:
+		return &dev_priv->gart_heap;
+	case RADEON_MEM_REGION_FB:
+		return &dev_priv->fb_heap;
+	default:
+		return NULL;
+	}
+}
+
+int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_alloc_t *alloc = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, alloc->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	/* Make things easier on ourselves: all allocations at least
+	 * 4k aligned.
+	 */
+	if (alloc->alignment < 12)
+		alloc->alignment = 12;
+
+	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+	if (!block)
+		return -ENOMEM;
+
+	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+			     sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_free_t *memfree = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, memfree->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	block = find_block(*heap, memfree->region_offset);
+	if (!block)
+		return -EFAULT;
+
+	if (block->file_priv != file_priv)
+		return -EPERM;
+
+	free_block(block);
+	return 0;
+}
+
+int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_init_heap_t *initheap = data;
+	struct mem_block **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, initheap->region);
+	if (!heap)
+		return -EFAULT;
+
+	if (*heap) {
+		DRM_ERROR("heap already initialized?");
+		return -EFAULT;
+	}
+
+	return init_heap(heap, initheap->start, initheap->size);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_mode.h b/linux-imx/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 0000000..69ad4fe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,761 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Original Authors:
+ *   Kevin E. Martin, Rickard E. Faith, Alan Hourihane
+ *
+ * Kernel port Author: Dave Airlie
+ */
+
+#ifndef RADEON_MODE_H
+#define RADEON_MODE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct radeon_bo;
+struct radeon_device;
+
+#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
+#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
+#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+
+enum radeon_rmx_type {
+	RMX_OFF,
+	RMX_FULL,
+	RMX_CENTER,
+	RMX_ASPECT
+};
+
+enum radeon_tv_std {
+	TV_STD_NTSC,
+	TV_STD_PAL,
+	TV_STD_PAL_M,
+	TV_STD_PAL_60,
+	TV_STD_NTSC_J,
+	TV_STD_SCART_PAL,
+	TV_STD_SECAM,
+	TV_STD_PAL_CN,
+	TV_STD_PAL_N,
+};
+
+enum radeon_underscan_type {
+	UNDERSCAN_OFF,
+	UNDERSCAN_ON,
+	UNDERSCAN_AUTO,
+};
+
+enum radeon_hpd_id {
+	RADEON_HPD_1 = 0,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+	RADEON_HPD_NONE = 0xff,
+};
+
+#define RADEON_MAX_I2C_BUS 16
+
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
+struct radeon_i2c_bus_rec {
+	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* id used by atom */
+	enum radeon_hpd_id hpd;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
+	uint32_t mask_clk_reg;
+	uint32_t mask_data_reg;
+	uint32_t a_clk_reg;
+	uint32_t a_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
+	uint32_t mask_clk_mask;
+	uint32_t mask_data_mask;
+	uint32_t a_clk_mask;
+	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
+};
+
+struct radeon_tmds_pll {
+    uint32_t freq;
+    uint32_t value;
+};
+
+#define RADEON_MAX_BIOS_CONNECTOR 16
+
+/* pll flags */
+#define RADEON_PLL_USE_BIOS_DIVS        (1 << 0)
+#define RADEON_PLL_NO_ODD_POST_DIV      (1 << 1)
+#define RADEON_PLL_USE_REF_DIV          (1 << 2)
+#define RADEON_PLL_LEGACY               (1 << 3)
+#define RADEON_PLL_PREFER_LOW_REF_DIV   (1 << 4)
+#define RADEON_PLL_PREFER_HIGH_REF_DIV  (1 << 5)
+#define RADEON_PLL_PREFER_LOW_FB_DIV    (1 << 6)
+#define RADEON_PLL_PREFER_HIGH_FB_DIV   (1 << 7)
+#define RADEON_PLL_PREFER_LOW_POST_DIV  (1 << 8)
+#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+#define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
+#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
+#define RADEON_PLL_USE_POST_DIV         (1 << 12)
+#define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
+
+struct radeon_pll {
+	/* reference frequency */
+	uint32_t reference_freq;
+
+	/* fixed dividers */
+	uint32_t reference_div;
+	uint32_t post_div;
+
+	/* pll in/out limits */
+	uint32_t pll_in_min;
+	uint32_t pll_in_max;
+	uint32_t pll_out_min;
+	uint32_t pll_out_max;
+	uint32_t lcd_pll_out_min;
+	uint32_t lcd_pll_out_max;
+	uint32_t best_vco;
+
+	/* divider limits */
+	uint32_t min_ref_div;
+	uint32_t max_ref_div;
+	uint32_t min_post_div;
+	uint32_t max_post_div;
+	uint32_t min_feedback_div;
+	uint32_t max_feedback_div;
+	uint32_t min_frac_feedback_div;
+	uint32_t max_frac_feedback_div;
+
+	/* flags for the current clock */
+	uint32_t flags;
+
+	/* pll id */
+	uint32_t id;
+};
+
+struct radeon_i2c_chan {
+	struct i2c_adapter adapter;
+	struct drm_device *dev;
+	union {
+		struct i2c_algo_bit_data bit;
+		struct i2c_algo_dp_aux_data dp;
+	} algo;
+	struct radeon_i2c_bus_rec rec;
+};
+
+/* mostly for macs, but really any system without connector tables */
+enum radeon_connector_table {
+	CT_NONE = 0,
+	CT_GENERIC,
+	CT_IBOOK,
+	CT_POWERBOOK_EXTERNAL,
+	CT_POWERBOOK_INTERNAL,
+	CT_POWERBOOK_VGA,
+	CT_MINI_EXTERNAL,
+	CT_MINI_INTERNAL,
+	CT_IMAC_G5_ISIGHT,
+	CT_EMAC,
+	CT_RN50_POWER,
+	CT_MAC_X800,
+	CT_MAC_G5_9600,
+	CT_SAM440EP,
+	CT_MAC_G4_SILVER
+};
+
+enum radeon_dvo_chip {
+	DVO_SIL164,
+	DVO_SIL1178,
+};
+
+struct radeon_fbdev;
+
+struct radeon_afmt {
+	bool enabled;
+	int offset;
+	bool last_buffer_filled_status;
+	int id;
+};
+
+struct radeon_mode_info {
+	struct atom_context *atom_context;
+	struct card_info *atom_card_info;
+	enum radeon_connector_table connector_table;
+	bool mode_config_initialized;
+	struct radeon_crtc *crtcs[6];
+	struct radeon_afmt *afmt[6];
+	/* DVI-I properties */
+	struct drm_property *coherent_mode_property;
+	/* DAC enable load detect */
+	struct drm_property *load_detect_property;
+	/* TV standard */
+	struct drm_property *tv_std_property;
+	/* legacy TMDS PLL detect */
+	struct drm_property *tmds_pll_property;
+	/* underscan */
+	struct drm_property *underscan_property;
+	struct drm_property *underscan_hborder_property;
+	struct drm_property *underscan_vborder_property;
+	/* hardcoded DFP edid from BIOS */
+	struct edid *bios_hardcoded_edid;
+	int bios_hardcoded_edid_size;
+
+	/* pointer to fbdev info structure */
+	struct radeon_fbdev *rfbdev;
+	/* firmware flags */
+	u16 firmware_flags;
+	/* pointer to backlight encoder */
+	struct radeon_encoder *bl_encoder;
+};
+
+#define RADEON_MAX_BL_LEVEL 0xFF
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+struct radeon_backlight_privdata {
+	struct radeon_encoder *encoder;
+	uint8_t negative;
+};
+
+#endif
+
+#define MAX_H_CODE_TIMING_LEN 32
+#define MAX_V_CODE_TIMING_LEN 32
+
+/* need to store these as reading
+   back code tables is excessive */
+struct radeon_tv_regs {
+	uint32_t tv_uv_adr;
+	uint32_t timing_cntl;
+	uint32_t hrestart;
+	uint32_t vrestart;
+	uint32_t frestart;
+	uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN];
+	uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
+};
+
+struct radeon_atom_ss {
+	uint16_t percentage;
+	uint8_t type;
+	uint16_t step;
+	uint8_t delay;
+	uint8_t range;
+	uint8_t refdiv;
+	/* asic_ss */
+	uint16_t rate;
+	uint16_t amount;
+};
+
+struct radeon_crtc {
+	struct drm_crtc base;
+	int crtc_id;
+	u16 lut_r[256], lut_g[256], lut_b[256];
+	bool enabled;
+	bool can_tile;
+	uint32_t crtc_offset;
+	struct drm_gem_object *cursor_bo;
+	uint64_t cursor_addr;
+	int cursor_width;
+	int cursor_height;
+	uint32_t legacy_display_base_addr;
+	uint32_t legacy_cursor_offset;
+	enum radeon_rmx_type rmx_type;
+	u8 h_border;
+	u8 v_border;
+	fixed20_12 vsc;
+	fixed20_12 hsc;
+	struct drm_display_mode native_mode;
+	int pll_id;
+	/* page flipping */
+	struct radeon_unpin_work *unpin_work;
+	int deferred_flip_completion;
+	/* pll sharing */
+	struct radeon_atom_ss ss;
+	bool ss_enabled;
+	u32 adjusted_clock;
+	int bpc;
+	u32 pll_reference_div;
+	u32 pll_post_div;
+	u32 pll_flags;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+};
+
+struct radeon_encoder_primary_dac {
+	/* legacy primary dac */
+	uint32_t ps2_pdac_adj;
+};
+
+struct radeon_encoder_lvds {
+	/* legacy lvds */
+	uint16_t panel_vcc_delay;
+	uint8_t  panel_pwr_delay;
+	uint8_t  panel_digon_delay;
+	uint8_t  panel_blon_delay;
+	uint16_t panel_ref_divider;
+	uint8_t  panel_post_divider;
+	uint16_t panel_fb_divider;
+	bool     use_bios_dividers;
+	uint32_t lvds_gen_cntl;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int      dpms_mode;
+	uint8_t  backlight_level;
+};
+
+struct radeon_encoder_tv_dac {
+	/* legacy tv dac */
+	uint32_t ps2_tvdac_adj;
+	uint32_t ntsc_tvdac_adj;
+	uint32_t pal_tvdac_adj;
+
+	int               h_pos;
+	int               v_pos;
+	int               h_size;
+	int               supported_tv_stds;
+	bool              tv_on;
+	enum radeon_tv_std tv_std;
+	struct radeon_tv_regs tv;
+};
+
+struct radeon_encoder_int_tmds {
+	/* legacy int tmds */
+	struct radeon_tmds_pll tmds_pll[4];
+};
+
+struct radeon_encoder_ext_tmds {
+	/* tmds over dvo */
+	struct radeon_i2c_chan *i2c_bus;
+	uint8_t slave_addr;
+	enum radeon_dvo_chip dvo_chip;
+};
+
+/* spread spectrum */
+struct radeon_encoder_atom_dig {
+	bool linkb;
+	/* atom dig */
+	bool coherent_mode;
+	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB, etc. */
+	/* atom lvds/edp */
+	uint32_t lcd_misc;
+	uint16_t panel_pwr_delay;
+	uint32_t lcd_ss_id;
+	/* panel mode */
+	struct drm_display_mode native_mode;
+	struct backlight_device *bl_dev;
+	int dpms_mode;
+	uint8_t backlight_level;
+	int panel_mode;
+	struct radeon_afmt *afmt;
+};
+
+struct radeon_encoder_atom_dac {
+	enum radeon_tv_std tv_std;
+};
+
+struct radeon_encoder {
+	struct drm_encoder base;
+	uint32_t encoder_enum;
+	uint32_t encoder_id;
+	uint32_t devices;
+	uint32_t active_device;
+	uint32_t flags;
+	uint32_t pixel_clock;
+	enum radeon_rmx_type rmx_type;
+	enum radeon_underscan_type underscan_type;
+	uint32_t underscan_hborder;
+	uint32_t underscan_vborder;
+	struct drm_display_mode native_mode;
+	void *enc_priv;
+	int audio_polling_active;
+	bool is_ext_encoder;
+	u16 caps;
+};
+
+struct radeon_connector_atom_dig {
+	uint32_t igp_lane_info;
+	/* displayport */
+	struct radeon_i2c_chan *dp_i2c_bus;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+	bool edp_on;
+};
+
+struct radeon_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+};
+
+struct radeon_hpd {
+	enum radeon_hpd_id hpd;
+	u8 plugged_state;
+	struct radeon_gpio_rec gpio;
+};
+
+struct radeon_router {
+	u32 router_id;
+	struct radeon_i2c_bus_rec i2c_info;
+	u8 i2c_addr;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
+};
+
+struct radeon_connector {
+	struct drm_connector base;
+	uint32_t connector_id;
+	uint32_t devices;
+	struct radeon_i2c_chan *ddc_bus;
+	/* some systems have an hdmi and vga port with a shared ddc line */
+	bool shared_ddc;
+	bool use_digital;
+	/* we need to mind the EDID between detect
+	   and get modes due to analog/digital/tvencoder */
+	struct edid *edid;
+	void *con_priv;
+	bool dac_load_detect;
+	bool detected_by_load; /* if the connection status was determined by load */
+	uint16_t connector_object_id;
+	struct radeon_hpd hpd;
+	struct radeon_router router;
+	struct radeon_i2c_chan *router_bus;
+};
+
+struct radeon_framebuffer {
+	struct drm_framebuffer base;
+	struct drm_gem_object *obj;
+};
+
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+				((em) == ATOM_ENCODER_MODE_DP_MST))
+
+struct atom_clock_dividers {
+	u32 post_div;
+	union {
+		struct {
+#ifdef __BIG_ENDIAN
+			u32 reserved : 6;
+			u32 whole_fb_div : 12;
+			u32 frac_fb_div : 14;
+#else
+			u32 frac_fb_div : 14;
+			u32 whole_fb_div : 12;
+			u32 reserved : 6;
+#endif
+		};
+		u32 fb_div;
+	};
+	u32 ref_div;
+	bool enable_post_div;
+	bool enable_dithen;
+	u32 vco_mode;
+	u32 real_clock;
+};
+
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+				    u32 pixel_clock);
+
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
+extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
+extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+				       struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+				      const struct drm_display_mode *mode);
+extern void radeon_dp_link_train(struct drm_encoder *encoder,
+				 struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+				    struct drm_connector *connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+					   int action, uint8_t lane_num,
+					   uint8_t lane_set);
+extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+				u8 write_byte, u8 *read_byte);
+
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+			   struct radeon_i2c_bus_rec *rec,
+			   const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+						 struct radeon_i2c_bus_rec *i2c_bus);
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+						    struct radeon_i2c_bus_rec *rec,
+						    const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+						 struct radeon_i2c_bus_rec *rec,
+						 const char *name);
+extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+				u8 slave_addr,
+				u8 addr,
+				u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+				u8 slave_addr,
+				u8 addr,
+				u8 val);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+
+extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id);
+extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
+					     struct radeon_atom_ss *ss,
+					     int id, u32 clock);
+
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+				      uint64_t freq,
+				      uint32_t *dot_clock_p,
+				      uint32_t *fb_div_p,
+				      uint32_t *frac_fb_div_p,
+				      uint32_t *ref_div_p,
+				      uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     u32 freq,
+				     u32 *dot_clock_p,
+				     u32 *fb_div_p,
+				     u32 *frac_fb_div_p,
+				     u32 *ref_div_p,
+				     u32 *post_div_p);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
+struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
+extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
+extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+
+extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
+extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
+					 struct drm_framebuffer *fb,
+					 int x, int y,
+					 enum mode_set_atomic state);
+extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb);
+extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+
+extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+				 struct drm_framebuffer *old_fb);
+extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
+				       struct drm_framebuffer *fb,
+				       int x, int y,
+				       enum mode_set_atomic state);
+extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+				   struct drm_framebuffer *fb,
+				   int x, int y, int atomic);
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+				  struct drm_file *file_priv,
+				  uint32_t handle,
+				  uint32_t width,
+				  uint32_t height);
+extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+				   int x, int y);
+
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
+
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
+extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+extern struct radeon_encoder_atom_dig *
+radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+					  struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+						     struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+						   struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+							 struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						       struct radeon_encoder_ext_tmds *tmds);
+extern struct radeon_encoder_primary_dac *
+radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_lvds *
+radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
+extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_tv_dac *
+radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+extern struct radeon_encoder_primary_dac *
+radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
+extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev);
+extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev);
+extern void
+radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void
+radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
+extern void
+radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
+extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+				     u16 blue, int regno);
+extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+				     u16 *blue, int regno);
+int radeon_framebuffer_init(struct drm_device *dev,
+			     struct radeon_framebuffer *rfb,
+			     struct drm_mode_fb_cmd2 *mode_cmd,
+			     struct drm_gem_object *obj);
+
+int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
+bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
+void radeon_atombios_init_crtc(struct drm_device *dev,
+			       struct radeon_crtc *radeon_crtc);
+void radeon_legacy_init_crtc(struct drm_device *dev,
+			     struct radeon_crtc *radeon_crtc);
+
+void radeon_get_clock_info(struct drm_device *dev);
+
+extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+
+void radeon_enc_destroy(struct drm_encoder *encoder);
+void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+void radeon_combios_asic_init(struct drm_device *dev);
+bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+					const struct drm_display_mode *mode,
+					struct drm_display_mode *adjusted_mode);
+void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+			     struct drm_display_mode *adjusted_mode);
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
+
+/* legacy tv */
+void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder,
+				      uint32_t *h_total_disp, uint32_t *h_sync_strt_wid,
+				      uint32_t *v_total_disp, uint32_t *v_sync_strt_wid);
+void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder,
+				  uint32_t *htotal_cntl, uint32_t *ppll_ref_div,
+				  uint32_t *ppll_div_3, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder,
+				  uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div,
+				  uint32_t *p2pll_div_0, uint32_t *pixclks_cntl);
+void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode);
+
+/* fbdev layer */
+int radeon_fbdev_init(struct radeon_device *rdev);
+void radeon_fbdev_fini(struct radeon_device *rdev);
+void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+
+void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_object.c b/linux-imx/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 0000000..f837279
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,650 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+#include "radeon_trace.h"
+
+
+int radeon_ttm_init(struct radeon_device *rdev);
+void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+
+/*
+ * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+ * function are calling it.
+ */
+
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+	struct radeon_bo_va *bo_va, *tmp;
+
+	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+		/* remove from all vm address space */
+		radeon_vm_bo_rmv(bo->rdev, bo_va);
+	}
+}
+
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+{
+	struct radeon_bo *bo;
+
+	bo = container_of(tbo, struct radeon_bo, tbo);
+	mutex_lock(&bo->rdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&bo->rdev->gem.mutex);
+	radeon_bo_clear_surface_reg(bo);
+	radeon_bo_clear_va(bo);
+	drm_gem_object_release(&bo->gem_base);
+	kfree(bo);
+}
+
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+{
+	if (bo->destroy == &radeon_ttm_bo_destroy)
+		return true;
+	return false;
+}
+
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+	u32 c = 0;
+
+	rbo->placement.fpfn = 0;
+	rbo->placement.lpfn = 0;
+	rbo->placement.placement = rbo->placements;
+	rbo->placement.busy_placement = rbo->placements;
+	if (domain & RADEON_GEM_DOMAIN_VRAM)
+		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+					TTM_PL_FLAG_VRAM;
+	if (domain & RADEON_GEM_DOMAIN_GTT) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+		}
+	}
+	if (domain & RADEON_GEM_DOMAIN_CPU) {
+		if (rbo->rdev->flags & RADEON_IS_AGP) {
+			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+		} else {
+			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+		}
+	}
+	if (!c)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	rbo->placement.num_placement = c;
+	rbo->placement.num_busy_placement = c;
+}
+
+int radeon_bo_create(struct radeon_device *rdev,
+		     unsigned long size, int byte_align, bool kernel, u32 domain,
+		     struct sg_table *sg, struct radeon_bo **bo_ptr)
+{
+	struct radeon_bo *bo;
+	enum ttm_bo_type type;
+	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+	size_t acc_size;
+	int r;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+	if (kernel) {
+		type = ttm_bo_type_kernel;
+	} else if (sg) {
+		type = ttm_bo_type_sg;
+	} else {
+		type = ttm_bo_type_device;
+	}
+	*bo_ptr = NULL;
+
+	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+				       sizeof(struct radeon_bo));
+
+	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+	if (bo == NULL)
+		return -ENOMEM;
+	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+	if (unlikely(r)) {
+		kfree(bo);
+		return r;
+	}
+	bo->rdev = rdev;
+	bo->gem_base.driver_private = NULL;
+	bo->surface_reg = -1;
+	INIT_LIST_HEAD(&bo->list);
+	INIT_LIST_HEAD(&bo->va);
+	radeon_ttm_placement_from_domain(bo, domain);
+	/* Kernel allocation are uninterruptible */
+	down_read(&rdev->pm.mclk_lock);
+	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+			&bo->placement, page_align, !kernel, NULL,
+			acc_size, sg, &radeon_ttm_bo_destroy);
+	up_read(&rdev->pm.mclk_lock);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	*bo_ptr = bo;
+
+	trace_radeon_bo_create(bo);
+
+	return 0;
+}
+
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
+{
+	bool is_iomem;
+	int r;
+
+	if (bo->kptr) {
+		if (ptr) {
+			*ptr = bo->kptr;
+		}
+		return 0;
+	}
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	if (r) {
+		return r;
+	}
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+	if (ptr) {
+		*ptr = bo->kptr;
+	}
+	radeon_bo_check_tiling(bo, 0, 0);
+	return 0;
+}
+
+void radeon_bo_kunmap(struct radeon_bo *bo)
+{
+	if (bo->kptr == NULL)
+		return;
+	bo->kptr = NULL;
+	radeon_bo_check_tiling(bo, 0, 0);
+	ttm_bo_kunmap(&bo->kmap);
+}
+
+void radeon_bo_unref(struct radeon_bo **bo)
+{
+	struct ttm_buffer_object *tbo;
+	struct radeon_device *rdev;
+
+	if ((*bo) == NULL)
+		return;
+	rdev = (*bo)->rdev;
+	tbo = &((*bo)->tbo);
+	down_read(&rdev->pm.mclk_lock);
+	ttm_bo_unref(&tbo);
+	up_read(&rdev->pm.mclk_lock);
+	if (tbo == NULL)
+		*bo = NULL;
+}
+
+int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+			     u64 *gpu_addr)
+{
+	int r, i;
+
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+
+		if (max_offset != 0) {
+			u64 domain_start;
+
+			if (domain == RADEON_GEM_DOMAIN_VRAM)
+				domain_start = bo->rdev->mc.vram_start;
+			else
+				domain_start = bo->rdev->mc.gtt_start;
+			WARN_ON_ONCE(max_offset <
+				     (radeon_bo_gpu_offset(bo) - domain_start));
+		}
+
+		return 0;
+	}
+	radeon_ttm_placement_from_domain(bo, domain);
+	if (domain == RADEON_GEM_DOMAIN_VRAM) {
+		/* force to pin into visible video ram */
+		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	}
+	if (max_offset) {
+		u64 lpfn = max_offset >> PAGE_SHIFT;
+
+		if (!bo->placement.lpfn)
+			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+
+		if (lpfn < bo->placement.lpfn)
+			bo->placement.lpfn = lpfn;
+	}
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+	}
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+	return r;
+}
+
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+{
+	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
+}
+
+int radeon_bo_unpin(struct radeon_bo *bo)
+{
+	int r, i;
+
+	if (!bo->pin_count) {
+		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
+	}
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+	return r;
+}
+
+int radeon_bo_evict_vram(struct radeon_device *rdev)
+{
+	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
+	if (0 && (rdev->flags & RADEON_IS_IGP)) {
+		if (rdev->mc.igp_sideport_enabled == false)
+			/* Useless to evict on IGP chips */
+			return 0;
+	}
+	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+}
+
+void radeon_bo_force_delete(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects)) {
+		return;
+	}
+	dev_err(rdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		mutex_lock(&rdev->ddev->struct_mutex);
+		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+			*((unsigned long *)&bo->gem_base.refcount));
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&bo->rdev->gem.mutex);
+		/* this should unref the ttm bo */
+		drm_gem_object_unreference(&bo->gem_base);
+		mutex_unlock(&rdev->ddev->struct_mutex);
+	}
+}
+
+int radeon_bo_init(struct radeon_device *rdev)
+{
+	/* Add an MTRR for the VRAM */
+	if (!rdev->fastfb_working) {
+		rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+			MTRR_TYPE_WRCOMB, 1);
+	}
+	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
+		rdev->mc.mc_vram_size >> 20,
+		(unsigned long long)rdev->mc.aper_size >> 20);
+	DRM_INFO("RAM width %dbits %cDR\n",
+			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+	return radeon_ttm_init(rdev);
+}
+
+void radeon_bo_fini(struct radeon_device *rdev)
+{
+	radeon_ttm_fini(rdev);
+}
+
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head)
+{
+	if (lobj->written) {
+		list_add(&lobj->tv.head, head);
+	} else {
+		list_add_tail(&lobj->tv.head, head);
+	}
+}
+
+int radeon_bo_list_validate(struct list_head *head, int ring)
+{
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
+	u32 domain;
+	int r;
+
+	r = ttm_eu_reserve_buffers(head);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	list_for_each_entry(lobj, head, tv.head) {
+		bo = lobj->bo;
+		if (!bo->pin_count) {
+			domain = lobj->domain;
+			
+		retry:
+			radeon_ttm_placement_from_domain(bo, domain);
+			if (ring == R600_RING_TYPE_UVD_INDEX)
+				radeon_uvd_force_into_uvd_segment(bo);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement,
+						true, false);
+			if (unlikely(r)) {
+				if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
+					domain = lobj->alt_domain;
+					goto retry;
+				}
+				return r;
+			}
+		}
+		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+		lobj->tiling_flags = bo->tiling_flags;
+	}
+	return 0;
+}
+
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+			     struct vm_area_struct *vma)
+{
+	return ttm_fbdev_mmap(vma, &bo->tbo);
+}
+
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+	struct radeon_bo *old_object;
+	int steal;
+	int i;
+
+	BUG_ON(!radeon_bo_is_reserved(bo));
+
+	if (!bo->tiling_flags)
+		return 0;
+
+	if (bo->surface_reg >= 0) {
+		reg = &rdev->surface_regs[bo->surface_reg];
+		i = bo->surface_reg;
+		goto out;
+	}
+
+	steal = -1;
+	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+
+		reg = &rdev->surface_regs[i];
+		if (!reg->bo)
+			break;
+
+		old_object = reg->bo;
+		if (old_object->pin_count == 0)
+			steal = i;
+	}
+
+	/* if we are all out */
+	if (i == RADEON_GEM_MAX_SURFACES) {
+		if (steal == -1)
+			return -ENOMEM;
+		/* find someone with a surface reg and nuke their BO */
+		reg = &rdev->surface_regs[steal];
+		old_object = reg->bo;
+		/* blow away the mapping */
+		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+		ttm_bo_unmap_virtual(&old_object->tbo);
+		old_object->surface_reg = -1;
+		i = steal;
+	}
+
+	bo->surface_reg = i;
+	reg->bo = bo;
+
+out:
+	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+			       bo->tbo.mem.start << PAGE_SHIFT,
+			       bo->tbo.num_pages << PAGE_SHIFT);
+	return 0;
+}
+
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+{
+	struct radeon_device *rdev = bo->rdev;
+	struct radeon_surface_reg *reg;
+
+	if (bo->surface_reg == -1)
+		return;
+
+	reg = &rdev->surface_regs[bo->surface_reg];
+	radeon_clear_surface_reg(rdev, bo->surface_reg);
+
+	reg->bo = NULL;
+	bo->surface_reg = -1;
+}
+
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				uint32_t tiling_flags, uint32_t pitch)
+{
+	struct radeon_device *rdev = bo->rdev;
+	int r;
+
+	if (rdev->family >= CHIP_CEDAR) {
+		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
+
+		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
+		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
+		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
+		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
+		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
+		switch (bankw) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (bankh) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		switch (mtaspect) {
+		case 0:
+		case 1:
+		case 2:
+		case 4:
+		case 8:
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (tilesplit > 6) {
+			return -EINVAL;
+		}
+		if (stilesplit > 6) {
+			return -EINVAL;
+		}
+	}
+	r = radeon_bo_reserve(bo, false);
+	if (unlikely(r != 0))
+		return r;
+	bo->tiling_flags = tiling_flags;
+	bo->pitch = pitch;
+	radeon_bo_unreserve(bo);
+	return 0;
+}
+
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				uint32_t *tiling_flags,
+				uint32_t *pitch)
+{
+	BUG_ON(!radeon_bo_is_reserved(bo));
+	if (tiling_flags)
+		*tiling_flags = bo->tiling_flags;
+	if (pitch)
+		*pitch = bo->pitch;
+}
+
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop)
+{
+	BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
+
+	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+		return 0;
+
+	if (force_drop) {
+		radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+		if (!has_moved)
+			return 0;
+
+		if (bo->surface_reg >= 0)
+			radeon_bo_clear_surface_reg(bo);
+		return 0;
+	}
+
+	if ((bo->surface_reg >= 0) && !has_moved)
+		return 0;
+
+	return radeon_bo_get_surface_reg(bo);
+}
+
+void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+			   struct ttm_mem_reg *mem)
+{
+	struct radeon_bo *rbo;
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return;
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 1);
+	radeon_vm_bo_invalidate(rbo->rdev, rbo);
+}
+
+int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	struct radeon_device *rdev;
+	struct radeon_bo *rbo;
+	unsigned long offset, size;
+	int r;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo))
+		return 0;
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 0);
+	rdev = rbo->rdev;
+	if (bo->mem.mem_type != TTM_PL_VRAM)
+		return 0;
+
+	size = bo->mem.num_pages << PAGE_SHIFT;
+	offset = bo->mem.start << PAGE_SHIFT;
+	if ((offset + size) <= rdev->mc.visible_vram_size)
+		return 0;
+
+	/* hurrah the memory is not visible ! */
+	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+	rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+	r = ttm_bo_validate(bo, &rbo->placement, false, false);
+	if (unlikely(r == -ENOMEM)) {
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		return ttm_bo_validate(bo, &rbo->placement, false, false);
+	} else if (unlikely(r != 0)) {
+		return r;
+	}
+
+	offset = bo->mem.start << PAGE_SHIFT;
+	/* this should never happen */
+	if ((offset + size) > rdev->mc.visible_vram_size)
+		return -EINVAL;
+
+	return 0;
+}
+
+int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0))
+		return r;
+	spin_lock(&bo->tbo.bdev->fence_lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	spin_unlock(&bo->tbo.bdev->fence_lock);
+	ttm_bo_unreserve(&bo->tbo);
+	return r;
+}
+
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_intr:	don't return -ERESTARTSYS on pending signal
+ *
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
+{
+	int r;
+
+	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+	if (unlikely(r != 0)) {
+		if (r != -ERESTARTSYS)
+			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_object.h b/linux-imx/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 0000000..2943823
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RADEON_OBJECT_H__
+#define __RADEON_OBJECT_H__
+
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return RADEON_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return RADEON_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	return 0;
+}
+
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be useful to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+	return ttm_bo_is_reserved(&bo->tbo);
+}
+
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+			  bool no_wait);
+
+extern int radeon_bo_create(struct radeon_device *rdev,
+			    unsigned long size, int byte_align,
+			    bool kernel, u32 domain,
+			    struct sg_table *sg,
+			    struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
+				    u64 max_offset, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, int ring);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+				struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+					struct ttm_mem_reg *mem);
+extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+
+/*
+ * sub allocation
+ */
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+	return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+				     struct radeon_sa_manager *sa_manager,
+				     unsigned size, u32 align, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+				      struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+					struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+			    struct radeon_sa_manager *sa_manager,
+			    struct radeon_sa_bo **sa_bo,
+			    unsigned size, unsigned align, bool block);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+			      struct radeon_sa_bo **sa_bo,
+			      struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+					 struct seq_file *m);
+#endif
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_pm.c b/linux-imx/drivers/gpu/drm/radeon/radeon_pm.c
new file mode 100644
index 0000000..469ba71
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_pm.c
@@ -0,0 +1,876 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rafał Miłecki <zajec5@gmail.com>
+ *          Alex Deucher <alexdeucher@gmail.com>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "avivod.h"
+#include "atom.h"
+#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static const char *radeon_pm_state_type_name[5] = {
+	"",
+	"Powersave",
+	"Battery",
+	"Balanced",
+	"Performance",
+};
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+static bool radeon_pm_in_vbl(struct radeon_device *rdev);
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
+static void radeon_pm_update_profile(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+
+int radeon_pm_get_type_index(struct radeon_device *rdev,
+			     enum radeon_pm_state_type ps_type,
+			     int instance)
+{
+	int i;
+	int found_instance = -1;
+
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		if (rdev->pm.power_state[i].type == ps_type) {
+			found_instance++;
+			if (found_instance == instance)
+				return i;
+		}
+	}
+	/* return default if no match */
+	return rdev->pm.default_power_state_index;
+}
+
+void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
+{
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (rdev->pm.profile == PM_PROFILE_AUTO) {
+			mutex_lock(&rdev->pm.mutex);
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+			mutex_unlock(&rdev->pm.mutex);
+		}
+	}
+}
+
+static void radeon_pm_update_profile(struct radeon_device *rdev)
+{
+	switch (rdev->pm.profile) {
+	case PM_PROFILE_DEFAULT:
+		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
+		break;
+	case PM_PROFILE_AUTO:
+		if (power_supply_is_system_supplied() > 0) {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		} else {
+			if (rdev->pm.active_crtc_count > 1)
+				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+			else
+				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		}
+		break;
+	case PM_PROFILE_LOW:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
+		break;
+	case PM_PROFILE_MID:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
+		break;
+	case PM_PROFILE_HIGH:
+		if (rdev->pm.active_crtc_count > 1)
+			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
+		else
+			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
+		break;
+	}
+
+	if (rdev->pm.active_crtc_count == 0) {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
+	} else {
+		rdev->pm.requested_power_state_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
+		rdev->pm.requested_clock_mode_index =
+			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
+	}
+}
+
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+	struct radeon_bo *bo, *n;
+
+	if (list_empty(&rdev->gem.objects))
+		return;
+
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+			ttm_bo_unmap_virtual(&bo->tbo);
+	}
+}
+
+static void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+	if (rdev->pm.active_crtcs) {
+		rdev->pm.vblank_sync = false;
+		wait_event_timeout(
+			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+	}
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+	u32 sclk, mclk;
+	bool misc_after = false;
+
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	if (radeon_gui_idle(rdev)) {
+		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+			clock_info[rdev->pm.requested_clock_mode_index].sclk;
+		if (sclk > rdev->pm.default_sclk)
+			sclk = rdev->pm.default_sclk;
+
+		/* starting with BTC, there is one state that is used for both
+		 * MH and SH.  Difference is that we always use the high clock index for
+		 * mclk and vddci.
+		 */
+		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
+		    (rdev->family >= CHIP_BARTS) &&
+		    rdev->pm.active_crtc_count &&
+		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
+		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
+		else
+			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
+				clock_info[rdev->pm.requested_clock_mode_index].mclk;
+
+		if (mclk > rdev->pm.default_mclk)
+			mclk = rdev->pm.default_mclk;
+
+		/* upvolt before raising clocks, downvolt after lowering clocks */
+		if (sclk < rdev->pm.current_sclk)
+			misc_after = true;
+
+		radeon_sync_with_vblank(rdev);
+
+		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			if (!radeon_pm_in_vbl(rdev))
+				return;
+		}
+
+		radeon_pm_prepare(rdev);
+
+		if (!misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		/* set engine clock */
+		if (sclk != rdev->pm.current_sclk) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_engine_clock(rdev, sclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_sclk = sclk;
+			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
+		}
+
+		/* set memory clock */
+		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+			radeon_pm_debug_check_in_vbl(rdev, false);
+			radeon_set_memory_clock(rdev, mclk);
+			radeon_pm_debug_check_in_vbl(rdev, true);
+			rdev->pm.current_mclk = mclk;
+			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
+		}
+
+		if (misc_after)
+			/* voltage, pcie lanes, etc.*/
+			radeon_pm_misc(rdev);
+
+		radeon_pm_finish(rdev);
+
+		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
+		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
+	} else
+		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+	int i, r;
+
+	/* no need to take locks, etc. if nothing's going to change */
+	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+		return;
+
+	mutex_lock(&rdev->ddev->struct_mutex);
+	down_write(&rdev->pm.mclk_lock);
+	mutex_lock(&rdev->ring_lock);
+
+	/* wait for the rings to drain */
+	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+		struct radeon_ring *ring = &rdev->ring[i];
+		if (!ring->ready) {
+			continue;
+		}
+		r = radeon_fence_wait_empty_locked(rdev, i);
+		if (r) {
+			/* needs a GPU reset dont reset here */
+			mutex_unlock(&rdev->ring_lock);
+			up_write(&rdev->pm.mclk_lock);
+			mutex_unlock(&rdev->ddev->struct_mutex);
+			return;
+		}
+	}
+
+	radeon_unmap_vram_bos(rdev);
+
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.active_crtcs & (1 << i)) {
+				rdev->pm.req_vblank |= (1 << i);
+				drm_vblank_get(rdev->ddev, i);
+			}
+		}
+	}
+
+	radeon_set_power_state(rdev);
+
+	if (rdev->irq.installed) {
+		for (i = 0; i < rdev->num_crtc; i++) {
+			if (rdev->pm.req_vblank & (1 << i)) {
+				rdev->pm.req_vblank &= ~(1 << i);
+				drm_vblank_put(rdev->ddev, i);
+			}
+		}
+	}
+
+	/* update display watermarks based on new power state */
+	radeon_update_bandwidth_info(rdev);
+	if (rdev->pm.active_crtc_count)
+		radeon_bandwidth_update(rdev);
+
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+
+	mutex_unlock(&rdev->ring_lock);
+	up_write(&rdev->pm.mclk_lock);
+	mutex_unlock(&rdev->ddev->struct_mutex);
+}
+
+static void radeon_pm_print_states(struct radeon_device *rdev)
+{
+	int i, j;
+	struct radeon_power_state *power_state;
+	struct radeon_pm_clock_info *clock_info;
+
+	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
+	for (i = 0; i < rdev->pm.num_power_states; i++) {
+		power_state = &rdev->pm.power_state[i];
+		DRM_DEBUG_DRIVER("State %d: %s\n", i,
+			radeon_pm_state_type_name[power_state->type]);
+		if (i == rdev->pm.default_power_state_index)
+			DRM_DEBUG_DRIVER("\tDefault");
+		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
+			DRM_DEBUG_DRIVER("\tSingle display only\n");
+		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+		for (j = 0; j < power_state->num_clock_modes; j++) {
+			clock_info = &(power_state->clock_info[j]);
+			if (rdev->flags & RADEON_IS_IGP)
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
+						 j,
+						 clock_info->sclk * 10);
+			else
+				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
+						 j,
+						 clock_info->sclk * 10,
+						 clock_info->mclk * 10,
+						 clock_info->voltage.voltage);
+		}
+	}
+}
+
+static ssize_t radeon_get_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int cp = rdev->pm.profile;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(cp == PM_PROFILE_AUTO) ? "auto" :
+			(cp == PM_PROFILE_LOW) ? "low" :
+			(cp == PM_PROFILE_MID) ? "mid" :
+			(cp == PM_PROFILE_HIGH) ? "high" : "default");
+}
+
+static ssize_t radeon_set_pm_profile(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		if (strncmp("default", buf, strlen("default")) == 0)
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+		else if (strncmp("auto", buf, strlen("auto")) == 0)
+			rdev->pm.profile = PM_PROFILE_AUTO;
+		else if (strncmp("low", buf, strlen("low")) == 0)
+			rdev->pm.profile = PM_PROFILE_LOW;
+		else if (strncmp("mid", buf, strlen("mid")) == 0)
+			rdev->pm.profile = PM_PROFILE_MID;
+		else if (strncmp("high", buf, strlen("high")) == 0)
+			rdev->pm.profile = PM_PROFILE_HIGH;
+		else {
+			count = -EINVAL;
+			goto fail;
+		}
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else
+		count = -EINVAL;
+
+fail:
+	mutex_unlock(&rdev->pm.mutex);
+
+	return count;
+}
+
+static ssize_t radeon_get_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int pm = rdev->pm.pm_method;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
+}
+
+static ssize_t radeon_set_pm_method(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t count)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+
+
+	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		rdev->pm.pm_method = PM_METHOD_DYNPM;
+		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+		mutex_unlock(&rdev->pm.mutex);
+	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
+		mutex_lock(&rdev->pm.mutex);
+		/* disable dynpm */
+		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+		rdev->pm.pm_method = PM_METHOD_PROFILE;
+		mutex_unlock(&rdev->pm.mutex);
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+	} else {
+		count = -EINVAL;
+		goto fail;
+	}
+	radeon_pm_compute_clocks(rdev);
+fail:
+	return count;
+}
+
+static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
+static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+	struct radeon_device *rdev = ddev->dev_private;
+	int temp;
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV6XX:
+		temp = rv6xx_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_RV770:
+		temp = rv770_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
+		temp = evergreen_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_SUMO:
+		temp = sumo_get_temp(rdev);
+		break;
+	case THERMAL_TYPE_SI:
+		temp = si_get_temp(rdev);
+		break;
+	default:
+		temp = 0;
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "radeon\n");
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_name.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+	.attrs = hwmon_attributes,
+};
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+	int err = 0;
+
+	rdev->pm.int_hwmon_dev = NULL;
+
+	switch (rdev->pm.int_thermal_type) {
+	case THERMAL_TYPE_RV6XX:
+	case THERMAL_TYPE_RV770:
+	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
+	case THERMAL_TYPE_SUMO:
+	case THERMAL_TYPE_SI:
+		/* No support for TN yet */
+		if (rdev->family == CHIP_ARUBA)
+			return err;
+		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+			err = PTR_ERR(rdev->pm.int_hwmon_dev);
+			dev_err(rdev->dev,
+				"Unable to register hwmon device: %d\n", err);
+			break;
+		}
+		dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+		err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+					 &hwmon_attrgroup);
+		if (err) {
+			dev_err(rdev->dev,
+				"Unable to create hwmon sysfs file: %d\n", err);
+			hwmon_device_unregister(rdev->dev);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.int_hwmon_dev) {
+		sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
+		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+	}
+}
+
+void radeon_pm_suspend(struct radeon_device *rdev)
+{
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
+			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
+	}
+	mutex_unlock(&rdev->pm.mutex);
+
+	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+}
+
+void radeon_pm_resume(struct radeon_device *rdev)
+{
+	/* set up the default clocks if the MC ucode is loaded */
+	if ((rdev->family >= CHIP_BARTS) &&
+	    (rdev->family <= CHIP_CAYMAN) &&
+	    rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+						SET_VOLTAGE_TYPE_ASIC_VDDC);
+		if (rdev->pm.default_vddci)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+						SET_VOLTAGE_TYPE_ASIC_VDDCI);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
+	/* asic init will reset the default power state */
+	mutex_lock(&rdev->pm.mutex);
+	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+	rdev->pm.current_clock_mode_index = 0;
+	rdev->pm.current_sclk = rdev->pm.default_sclk;
+	rdev->pm.current_mclk = rdev->pm.default_mclk;
+	if (rdev->pm.power_state) {
+		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+	}
+	if (rdev->pm.pm_method == PM_METHOD_DYNPM
+	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
+	mutex_unlock(&rdev->pm.mutex);
+	radeon_pm_compute_clocks(rdev);
+}
+
+int radeon_pm_init(struct radeon_device *rdev)
+{
+	int ret;
+
+	/* default to profile method */
+	rdev->pm.pm_method = PM_METHOD_PROFILE;
+	rdev->pm.profile = PM_PROFILE_DEFAULT;
+	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+	rdev->pm.dynpm_can_upclock = true;
+	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->clock.default_sclk;
+	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
+
+	if (rdev->bios) {
+		if (rdev->is_atom_bios)
+			radeon_atombios_get_power_modes(rdev);
+		else
+			radeon_combios_get_power_modes(rdev);
+		radeon_pm_print_states(rdev);
+		radeon_pm_init_profile(rdev);
+		/* set up the default clocks if the MC ucode is loaded */
+		if ((rdev->family >= CHIP_BARTS) &&
+		    (rdev->family <= CHIP_CAYMAN) &&
+		    rdev->mc_fw) {
+			if (rdev->pm.default_vddc)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+							SET_VOLTAGE_TYPE_ASIC_VDDC);
+			if (rdev->pm.default_vddci)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+							SET_VOLTAGE_TYPE_ASIC_VDDCI);
+			if (rdev->pm.default_sclk)
+				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+			if (rdev->pm.default_mclk)
+				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+		}
+	}
+
+	/* set up the internal thermal sensor if applicable */
+	ret = radeon_hwmon_init(rdev);
+	if (ret)
+		return ret;
+
+	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
+	if (rdev->pm.num_power_states > 1) {
+		/* where's the best place to put these? */
+		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+		if (ret)
+			DRM_ERROR("failed to create device file for power profile\n");
+		ret = device_create_file(rdev->dev, &dev_attr_power_method);
+		if (ret)
+			DRM_ERROR("failed to create device file for power method\n");
+
+		if (radeon_debugfs_pm_init(rdev)) {
+			DRM_ERROR("Failed to register debugfs file for PM!\n");
+		}
+
+		DRM_INFO("radeon: power management initialized\n");
+	}
+
+	return 0;
+}
+
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+	if (rdev->pm.num_power_states > 1) {
+		mutex_lock(&rdev->pm.mutex);
+		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+			rdev->pm.profile = PM_PROFILE_DEFAULT;
+			radeon_pm_update_profile(rdev);
+			radeon_pm_set_clocks(rdev);
+		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+			/* reset default clocks */
+			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
+			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+			radeon_pm_set_clocks(rdev);
+		}
+		mutex_unlock(&rdev->pm.mutex);
+
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
+
+		device_remove_file(rdev->dev, &dev_attr_power_profile);
+		device_remove_file(rdev->dev, &dev_attr_power_method);
+	}
+
+	if (rdev->pm.power_state)
+		kfree(rdev->pm.power_state);
+
+	radeon_hwmon_fini(rdev);
+}
+
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+
+	if (rdev->pm.num_power_states < 2)
+		return;
+
+	mutex_lock(&rdev->pm.mutex);
+
+	rdev->pm.active_crtcs = 0;
+	rdev->pm.active_crtc_count = 0;
+	list_for_each_entry(crtc,
+		&ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+			rdev->pm.active_crtc_count++;
+		}
+	}
+
+	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+		radeon_pm_update_profile(rdev);
+		radeon_pm_set_clocks(rdev);
+	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
+		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
+			if (rdev->pm.active_crtc_count > 1) {
+				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
+				}
+			} else if (rdev->pm.active_crtc_count == 1) {
+				/* TODO: Increase clocks if needed for current mode */
+
+				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
+					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
+				}
+			} else { /* count == 0 */
+				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
+					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
+
+					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
+					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
+					radeon_pm_get_dynpm_state(rdev);
+					radeon_pm_set_clocks(rdev);
+				}
+			}
+		}
+	}
+
+	mutex_unlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_in_vbl(struct radeon_device *rdev)
+{
+	int  crtc, vpos, hpos, vbl_status;
+	bool in_vbl = true;
+
+	/* Iterate over all active crtc's. All crtc's must be in vblank,
+	 * otherwise return in_vbl == false.
+	 */
+	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
+		if (rdev->pm.active_crtcs & (1 << crtc)) {
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
+				in_vbl = false;
+		}
+	}
+
+	return in_vbl;
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+	u32 stat_crtc = 0;
+	bool in_vbl = radeon_pm_in_vbl(rdev);
+
+	if (in_vbl == false)
+		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
+			 finish ? "exit" : "entry");
+	return in_vbl;
+}
+
+static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev;
+	int resched;
+	rdev = container_of(work, struct radeon_device,
+				pm.dynpm_idle_work.work);
+
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+	mutex_lock(&rdev->pm.mutex);
+	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+		int not_processed = 0;
+		int i;
+
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			struct radeon_ring *ring = &rdev->ring[i];
+
+			if (ring->ready) {
+				not_processed += radeon_fence_count_emitted(rdev, i);
+				if (not_processed >= 3)
+					break;
+			}
+		}
+
+		if (not_processed >= 3) { /* should upclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_upclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_UPCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		} else if (not_processed == 0) { /* should downclock */
+			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
+				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
+				   rdev->pm.dynpm_can_downclock) {
+				rdev->pm.dynpm_planned_action =
+					DYNPM_ACTION_DOWNCLOCK;
+				rdev->pm.dynpm_action_timeout = jiffies +
+				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+			}
+		}
+
+		/* Note, radeon_pm_set_clocks is called with static_switch set
+		 * to false since we want to wait for vbl to avoid flicker.
+		 */
+		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
+		    jiffies > rdev->pm.dynpm_action_timeout) {
+			radeon_pm_get_dynpm_state(rdev);
+			radeon_pm_set_clocks(rdev);
+		}
+
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+	}
+	mutex_unlock(&rdev->pm.mutex);
+	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+	/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+	if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+		seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+	else
+		seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+	if (rdev->asic->pm.get_memory_clock)
+		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+	if (rdev->pm.current_vddc)
+		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+	if (rdev->asic->pm.get_pcie_lanes)
+		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
+
+	return 0;
+}
+
+static struct drm_info_list radeon_pm_info_list[] = {
+	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
+};
+#endif
+
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
+#else
+	return 0;
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_prime.c b/linux-imx/drivers/gpu/drm/radeon/radeon_prime.c
new file mode 100644
index 0000000..4940af7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_prime.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+
+#include "radeon.h"
+#include <drm/radeon_drm.h>
+
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int npages = bo->tbo.num_pages;
+
+	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+}
+
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret;
+
+	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+			  &bo->dma_buf_vmap);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return bo->dma_buf_vmap.virtual;
+}
+
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+
+	ttm_bo_kunmap(&bo->dma_buf_vmap);
+}
+
+struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
+							size_t size,
+							struct sg_table *sg)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_bo *bo;
+	int ret;
+
+	ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+			       RADEON_GEM_DOMAIN_GTT, sg, &bo);
+	if (ret)
+		return ERR_PTR(ret);
+	bo->gem_base.driver_private = bo;
+
+	mutex_lock(&rdev->gem.mutex);
+	list_add_tail(&bo->list, &rdev->gem.objects);
+	mutex_unlock(&rdev->gem.mutex);
+
+	return &bo->gem_base;
+}
+
+int radeon_gem_prime_pin(struct drm_gem_object *obj)
+{
+	struct radeon_bo *bo = gem_to_radeon_bo(obj);
+	int ret = 0;
+
+	ret = radeon_bo_reserve(bo, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* pin buffer into GTT */
+	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+	if (ret) {
+		radeon_bo_unreserve(bo);
+		return ret;
+	}
+	radeon_bo_unreserve(bo);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_reg.h b/linux-imx/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 0000000..7e2c2b7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3724 @@
+/*
+ * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
+ *                VA Linux Systems Inc., Fremont, California.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
+ * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ *   Kevin E. Martin <martin@xfree86.org>
+ *   Rickard E. Faith <faith@valinux.com>
+ *   Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ * References:
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
+ *   Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
+ *   1999.
+ *
+ * !!!! FIXME !!!!
+ *   RAGE 128 Software Development Manual (Technical Reference Manual P/N
+ *   SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
+ *
+ */
+
+/* !!!! FIXME !!!!  NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
+ * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
+ * ON THE RADEON.  A FULL AUDIT OF THIS CODE IS NEEDED!  */
+#ifndef _RADEON_REG_H_
+#define _RADEON_REG_H_
+
+#include "r300_reg.h"
+#include "r500_reg.h"
+#include "r600_reg.h"
+#include "evergreen_reg.h"
+#include "ni_reg.h"
+#include "si_reg.h"
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define		RADEON_MC_AGP_START_MASK	0x0000FFFF
+#define		RADEON_MC_AGP_START_SHIFT	0
+#define		RADEON_MC_AGP_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_AGP_TOP_SHIFT		16
+#define RADEON_MC_FB_LOCATION		0x0148
+#define		RADEON_MC_FB_START_MASK		0x0000FFFF
+#define		RADEON_MC_FB_START_SHIFT	0
+#define		RADEON_MC_FB_TOP_MASK		0xFFFF0000
+#define		RADEON_MC_FB_TOP_SHIFT		16
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RADEON_AGP_BASE			0x0170
+
+#define ATI_DATATYPE_VQ				0
+#define ATI_DATATYPE_CI4			1
+#define ATI_DATATYPE_CI8			2
+#define ATI_DATATYPE_ARGB1555			3
+#define ATI_DATATYPE_RGB565			4
+#define ATI_DATATYPE_RGB888			5
+#define ATI_DATATYPE_ARGB8888			6
+#define ATI_DATATYPE_RGB332			7
+#define ATI_DATATYPE_Y8				8
+#define ATI_DATATYPE_RGB8			9
+#define ATI_DATATYPE_CI16			10
+#define ATI_DATATYPE_VYUY_422			11
+#define ATI_DATATYPE_YVYU_422			12
+#define ATI_DATATYPE_AYUV_444			14
+#define ATI_DATATYPE_ARGB4444			15
+
+				/* Registers for 2D/Video/Overlay */
+#define RADEON_ADAPTER_ID                   0x0f2c /* PCI */
+#define RADEON_AGP_BASE                     0x0170
+#define RADEON_AGP_CNTL                     0x0174
+#       define RADEON_AGP_APER_SIZE_256MB   (0x00 << 0)
+#       define RADEON_AGP_APER_SIZE_128MB   (0x20 << 0)
+#       define RADEON_AGP_APER_SIZE_64MB    (0x30 << 0)
+#       define RADEON_AGP_APER_SIZE_32MB    (0x38 << 0)
+#       define RADEON_AGP_APER_SIZE_16MB    (0x3c << 0)
+#       define RADEON_AGP_APER_SIZE_8MB     (0x3e << 0)
+#       define RADEON_AGP_APER_SIZE_4MB     (0x3f << 0)
+#       define RADEON_AGP_APER_SIZE_MASK    (0x3f << 0)
+#define RADEON_STATUS_PCI_CONFIG            0x06
+#       define RADEON_CAP_LIST              0x100000
+#define RADEON_CAPABILITIES_PTR_PCI_CONFIG  0x34 /* offset in PCI config*/
+#       define RADEON_CAP_PTR_MASK          0xfc /* mask off reserved bits of CAP_PTR */
+#       define RADEON_CAP_ID_NULL           0x00 /* End of capability list */
+#       define RADEON_CAP_ID_AGP            0x02 /* AGP capability ID */
+#       define RADEON_CAP_ID_EXP            0x10 /* PCI Express */
+#define RADEON_AGP_COMMAND                  0x0f60 /* PCI */
+#define RADEON_AGP_COMMAND_PCI_CONFIG       0x0060 /* offset in PCI config*/
+#       define RADEON_AGP_ENABLE            (1<<8)
+#define RADEON_AGP_PLL_CNTL                 0x000b /* PLL */
+#define RADEON_AGP_STATUS                   0x0f5c /* PCI */
+#       define RADEON_AGP_1X_MODE           0x01
+#       define RADEON_AGP_2X_MODE           0x02
+#       define RADEON_AGP_4X_MODE           0x04
+#       define RADEON_AGP_FW_MODE           0x10
+#       define RADEON_AGP_MODE_MASK         0x17
+#       define RADEON_AGPv3_MODE            0x08
+#       define RADEON_AGPv3_4X_MODE         0x01
+#       define RADEON_AGPv3_8X_MODE         0x02
+#define RADEON_ATTRDR                       0x03c1 /* VGA */
+#define RADEON_ATTRDW                       0x03c0 /* VGA */
+#define RADEON_ATTRX                        0x03c0 /* VGA */
+#define RADEON_AUX_SC_CNTL                  0x1660
+#       define RADEON_AUX1_SC_EN            (1 << 0)
+#       define RADEON_AUX1_SC_MODE_OR       (0 << 1)
+#       define RADEON_AUX1_SC_MODE_NAND     (1 << 1)
+#       define RADEON_AUX2_SC_EN            (1 << 2)
+#       define RADEON_AUX2_SC_MODE_OR       (0 << 3)
+#       define RADEON_AUX2_SC_MODE_NAND     (1 << 3)
+#       define RADEON_AUX3_SC_EN            (1 << 4)
+#       define RADEON_AUX3_SC_MODE_OR       (0 << 5)
+#       define RADEON_AUX3_SC_MODE_NAND     (1 << 5)
+#define RADEON_AUX1_SC_BOTTOM               0x1670
+#define RADEON_AUX1_SC_LEFT                 0x1664
+#define RADEON_AUX1_SC_RIGHT                0x1668
+#define RADEON_AUX1_SC_TOP                  0x166c
+#define RADEON_AUX2_SC_BOTTOM               0x1680
+#define RADEON_AUX2_SC_LEFT                 0x1674
+#define RADEON_AUX2_SC_RIGHT                0x1678
+#define RADEON_AUX2_SC_TOP                  0x167c
+#define RADEON_AUX3_SC_BOTTOM               0x1690
+#define RADEON_AUX3_SC_LEFT                 0x1684
+#define RADEON_AUX3_SC_RIGHT                0x1688
+#define RADEON_AUX3_SC_TOP                  0x168c
+#define RADEON_AUX_WINDOW_HORZ_CNTL         0x02d8
+#define RADEON_AUX_WINDOW_VERT_CNTL         0x02dc
+
+#define RADEON_BASE_CODE                    0x0f0b
+#define RADEON_BIOS_0_SCRATCH               0x0010
+#       define RADEON_FP_PANEL_SCALABLE     (1 << 16)
+#       define RADEON_FP_PANEL_SCALE_EN     (1 << 17)
+#       define RADEON_FP_CHIP_SCALE_EN      (1 << 18)
+#       define RADEON_DRIVER_BRIGHTNESS_EN  (1 << 26)
+#       define RADEON_DISPLAY_ROT_MASK      (3 << 28)
+#       define RADEON_DISPLAY_ROT_00        (0 << 28)
+#       define RADEON_DISPLAY_ROT_90        (1 << 28)
+#       define RADEON_DISPLAY_ROT_180       (2 << 28)
+#       define RADEON_DISPLAY_ROT_270       (3 << 28)
+#define RADEON_BIOS_1_SCRATCH               0x0014
+#define RADEON_BIOS_2_SCRATCH               0x0018
+#define RADEON_BIOS_3_SCRATCH               0x001c
+#define RADEON_BIOS_4_SCRATCH               0x0020
+#       define RADEON_CRT1_ATTACHED_MASK    (3 << 0)
+#       define RADEON_CRT1_ATTACHED_MONO    (1 << 0)
+#       define RADEON_CRT1_ATTACHED_COLOR   (2 << 0)
+#       define RADEON_LCD1_ATTACHED         (1 << 2)
+#       define RADEON_DFP1_ATTACHED         (1 << 3)
+#       define RADEON_TV1_ATTACHED_MASK     (3 << 4)
+#       define RADEON_TV1_ATTACHED_COMP     (1 << 4)
+#       define RADEON_TV1_ATTACHED_SVIDEO   (2 << 4)
+#       define RADEON_CRT2_ATTACHED_MASK    (3 << 8)
+#       define RADEON_CRT2_ATTACHED_MONO    (1 << 8)
+#       define RADEON_CRT2_ATTACHED_COLOR   (2 << 8)
+#       define RADEON_DFP2_ATTACHED         (1 << 11)
+#define RADEON_BIOS_5_SCRATCH               0x0024
+#       define RADEON_LCD1_ON               (1 << 0)
+#       define RADEON_CRT1_ON               (1 << 1)
+#       define RADEON_TV1_ON                (1 << 2)
+#       define RADEON_DFP1_ON               (1 << 3)
+#       define RADEON_CRT2_ON               (1 << 5)
+#       define RADEON_CV1_ON                (1 << 6)
+#       define RADEON_DFP2_ON               (1 << 7)
+#       define RADEON_LCD1_CRTC_MASK        (1 << 8)
+#       define RADEON_LCD1_CRTC_SHIFT       8
+#       define RADEON_CRT1_CRTC_MASK        (1 << 9)
+#       define RADEON_CRT1_CRTC_SHIFT       9
+#       define RADEON_TV1_CRTC_MASK         (1 << 10)
+#       define RADEON_TV1_CRTC_SHIFT        10
+#       define RADEON_DFP1_CRTC_MASK        (1 << 11)
+#       define RADEON_DFP1_CRTC_SHIFT       11
+#       define RADEON_CRT2_CRTC_MASK        (1 << 12)
+#       define RADEON_CRT2_CRTC_SHIFT       12
+#       define RADEON_CV1_CRTC_MASK         (1 << 13)
+#       define RADEON_CV1_CRTC_SHIFT        13
+#       define RADEON_DFP2_CRTC_MASK        (1 << 14)
+#       define RADEON_DFP2_CRTC_SHIFT       14
+#       define RADEON_ACC_REQ_LCD1          (1 << 16)
+#       define RADEON_ACC_REQ_CRT1          (1 << 17)
+#       define RADEON_ACC_REQ_TV1           (1 << 18)
+#       define RADEON_ACC_REQ_DFP1          (1 << 19)
+#       define RADEON_ACC_REQ_CRT2          (1 << 21)
+#       define RADEON_ACC_REQ_TV2           (1 << 22)
+#       define RADEON_ACC_REQ_DFP2          (1 << 23)
+#define RADEON_BIOS_6_SCRATCH               0x0028
+#       define RADEON_ACC_MODE_CHANGE       (1 << 2)
+#       define RADEON_EXT_DESKTOP_MODE      (1 << 3)
+#       define RADEON_LCD_DPMS_ON           (1 << 20)
+#       define RADEON_CRT_DPMS_ON           (1 << 21)
+#       define RADEON_TV_DPMS_ON            (1 << 22)
+#       define RADEON_DFP_DPMS_ON           (1 << 23)
+#       define RADEON_DPMS_MASK             (3 << 24)
+#       define RADEON_DPMS_ON               (0 << 24)
+#       define RADEON_DPMS_STANDBY          (1 << 24)
+#       define RADEON_DPMS_SUSPEND          (2 << 24)
+#       define RADEON_DPMS_OFF              (3 << 24)
+#       define RADEON_SCREEN_BLANKING       (1 << 26)
+#       define RADEON_DRIVER_CRITICAL       (1 << 27)
+#       define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
+#define RADEON_BIOS_7_SCRATCH               0x002c
+#       define RADEON_SYS_HOTKEY            (1 << 10)
+#       define RADEON_DRV_LOADED            (1 << 12)
+#define RADEON_BIOS_ROM                     0x0f30 /* PCI */
+#define RADEON_BIST                         0x0f0f /* PCI */
+#define RADEON_BRUSH_DATA0                  0x1480
+#define RADEON_BRUSH_DATA1                  0x1484
+#define RADEON_BRUSH_DATA10                 0x14a8
+#define RADEON_BRUSH_DATA11                 0x14ac
+#define RADEON_BRUSH_DATA12                 0x14b0
+#define RADEON_BRUSH_DATA13                 0x14b4
+#define RADEON_BRUSH_DATA14                 0x14b8
+#define RADEON_BRUSH_DATA15                 0x14bc
+#define RADEON_BRUSH_DATA16                 0x14c0
+#define RADEON_BRUSH_DATA17                 0x14c4
+#define RADEON_BRUSH_DATA18                 0x14c8
+#define RADEON_BRUSH_DATA19                 0x14cc
+#define RADEON_BRUSH_DATA2                  0x1488
+#define RADEON_BRUSH_DATA20                 0x14d0
+#define RADEON_BRUSH_DATA21                 0x14d4
+#define RADEON_BRUSH_DATA22                 0x14d8
+#define RADEON_BRUSH_DATA23                 0x14dc
+#define RADEON_BRUSH_DATA24                 0x14e0
+#define RADEON_BRUSH_DATA25                 0x14e4
+#define RADEON_BRUSH_DATA26                 0x14e8
+#define RADEON_BRUSH_DATA27                 0x14ec
+#define RADEON_BRUSH_DATA28                 0x14f0
+#define RADEON_BRUSH_DATA29                 0x14f4
+#define RADEON_BRUSH_DATA3                  0x148c
+#define RADEON_BRUSH_DATA30                 0x14f8
+#define RADEON_BRUSH_DATA31                 0x14fc
+#define RADEON_BRUSH_DATA32                 0x1500
+#define RADEON_BRUSH_DATA33                 0x1504
+#define RADEON_BRUSH_DATA34                 0x1508
+#define RADEON_BRUSH_DATA35                 0x150c
+#define RADEON_BRUSH_DATA36                 0x1510
+#define RADEON_BRUSH_DATA37                 0x1514
+#define RADEON_BRUSH_DATA38                 0x1518
+#define RADEON_BRUSH_DATA39                 0x151c
+#define RADEON_BRUSH_DATA4                  0x1490
+#define RADEON_BRUSH_DATA40                 0x1520
+#define RADEON_BRUSH_DATA41                 0x1524
+#define RADEON_BRUSH_DATA42                 0x1528
+#define RADEON_BRUSH_DATA43                 0x152c
+#define RADEON_BRUSH_DATA44                 0x1530
+#define RADEON_BRUSH_DATA45                 0x1534
+#define RADEON_BRUSH_DATA46                 0x1538
+#define RADEON_BRUSH_DATA47                 0x153c
+#define RADEON_BRUSH_DATA48                 0x1540
+#define RADEON_BRUSH_DATA49                 0x1544
+#define RADEON_BRUSH_DATA5                  0x1494
+#define RADEON_BRUSH_DATA50                 0x1548
+#define RADEON_BRUSH_DATA51                 0x154c
+#define RADEON_BRUSH_DATA52                 0x1550
+#define RADEON_BRUSH_DATA53                 0x1554
+#define RADEON_BRUSH_DATA54                 0x1558
+#define RADEON_BRUSH_DATA55                 0x155c
+#define RADEON_BRUSH_DATA56                 0x1560
+#define RADEON_BRUSH_DATA57                 0x1564
+#define RADEON_BRUSH_DATA58                 0x1568
+#define RADEON_BRUSH_DATA59                 0x156c
+#define RADEON_BRUSH_DATA6                  0x1498
+#define RADEON_BRUSH_DATA60                 0x1570
+#define RADEON_BRUSH_DATA61                 0x1574
+#define RADEON_BRUSH_DATA62                 0x1578
+#define RADEON_BRUSH_DATA63                 0x157c
+#define RADEON_BRUSH_DATA7                  0x149c
+#define RADEON_BRUSH_DATA8                  0x14a0
+#define RADEON_BRUSH_DATA9                  0x14a4
+#define RADEON_BRUSH_SCALE                  0x1470
+#define RADEON_BRUSH_Y_X                    0x1474
+#define RADEON_BUS_CNTL                     0x0030
+#       define RADEON_BUS_MASTER_DIS         (1 << 6)
+#       define RADEON_BUS_BIOS_DIS_ROM       (1 << 12)
+#	define RS600_BUS_MASTER_DIS	     (1 << 14)
+#	define RS600_MSI_REARM		     (1 << 20) /* rs600/rs690/rs740 */
+#       define RADEON_BUS_RD_DISCARD_EN      (1 << 24)
+#       define RADEON_BUS_RD_ABORT_EN        (1 << 25)
+#       define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
+#       define RADEON_BUS_WRT_BURST          (1 << 29)
+#       define RADEON_BUS_READ_BURST         (1 << 30)
+#define RADEON_BUS_CNTL1                    0x0034
+#       define RADEON_BUS_WAIT_ON_LOCK_EN    (1 << 4)
+#define RV370_BUS_CNTL                      0x004c
+#       define RV370_BUS_BIOS_DIS_ROM        (1 << 2)
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#define RADEON_MSI_REARM_EN		    0x0160
+#	define RV370_MSI_REARM_EN	     (1 << 0)
+
+/* #define RADEON_PCIE_INDEX                   0x0030 */
+/* #define RADEON_PCIE_DATA                    0x0034 */
+#define RADEON_PCIE_LC_LINK_WIDTH_CNTL             0xa2 /* PCIE */
+#       define RADEON_PCIE_LC_LINK_WIDTH_SHIFT     0
+#       define RADEON_PCIE_LC_LINK_WIDTH_MASK      0x7
+#       define RADEON_PCIE_LC_LINK_WIDTH_X0        0
+#       define RADEON_PCIE_LC_LINK_WIDTH_X1        1
+#       define RADEON_PCIE_LC_LINK_WIDTH_X2        2
+#       define RADEON_PCIE_LC_LINK_WIDTH_X4        3
+#       define RADEON_PCIE_LC_LINK_WIDTH_X8        4
+#       define RADEON_PCIE_LC_LINK_WIDTH_X12       5
+#       define RADEON_PCIE_LC_LINK_WIDTH_X16       6
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT  4
+#       define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK   0x70
+#       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
+#       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
+#       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+#       define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE   (1 << 7)
+#       define R600_PCIE_LC_RENEGOTIATION_SUPPORT  (1 << 9)
+#       define R600_PCIE_LC_RENEGOTIATE_EN         (1 << 10)
+#       define R600_PCIE_LC_SHORT_RECONFIG_EN      (1 << 11)
+#       define R600_PCIE_LC_UPCONFIGURE_SUPPORT    (1 << 12)
+#       define R600_PCIE_LC_UPCONFIGURE_DIS        (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX      0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX      0x66c
+
+#define RADEON_CACHE_CNTL                   0x1724
+#define RADEON_CACHE_LINE                   0x0f0c /* PCI */
+#define RADEON_CAPABILITIES_ID              0x0f50 /* PCI */
+#define RADEON_CAPABILITIES_PTR             0x0f34 /* PCI */
+#define RADEON_CLK_PIN_CNTL                 0x0001 /* PLL */
+#       define RADEON_DONT_USE_XTALIN       (1 << 4)
+#       define RADEON_SCLK_DYN_START_CNTL   (1 << 15)
+#define RADEON_CLOCK_CNTL_DATA              0x000c
+#define RADEON_CLOCK_CNTL_INDEX             0x0008
+#       define RADEON_PLL_WR_EN             (1 << 7)
+#       define RADEON_PLL_DIV_SEL           (3 << 8)
+#       define RADEON_PLL2_DIV_SEL_MASK     (~(3 << 8))
+#define RADEON_CLK_PWRMGT_CNTL              0x0014
+#       define RADEON_ENGIN_DYNCLK_MODE     (1 << 12)
+#       define RADEON_ACTIVE_HILO_LAT_MASK  (3 << 13)
+#       define RADEON_ACTIVE_HILO_LAT_SHIFT 13
+#       define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
+#       define RADEON_MC_BUSY               (1 << 16)
+#       define RADEON_DLL_READY             (1 << 19)
+#       define RADEON_CG_NO1_DEBUG_0        (1 << 24)
+#       define RADEON_CG_NO1_DEBUG_MASK     (0x1f << 24)
+#       define RADEON_DYN_STOP_MODE_MASK    (7 << 21)
+#       define RADEON_TVPLL_PWRMGT_OFF      (1 << 30)
+#       define RADEON_TVCLK_TURNOFF         (1 << 31)
+#define RADEON_PLL_PWRMGT_CNTL              0x0015 /* PLL */
+#	define RADEON_PM_MODE_SEL           (1 << 13)
+#       define RADEON_TCL_BYPASS_DISABLE    (1 << 20)
+#define RADEON_CLR_CMP_CLR_3D               0x1a24
+#define RADEON_CLR_CMP_CLR_DST              0x15c8
+#define RADEON_CLR_CMP_CLR_SRC              0x15c4
+#define RADEON_CLR_CMP_CNTL                 0x15c0
+#       define RADEON_SRC_CMP_EQ_COLOR      (4 <<  0)
+#       define RADEON_SRC_CMP_NEQ_COLOR     (5 <<  0)
+#       define RADEON_CLR_CMP_SRC_SOURCE    (1 << 24)
+#define RADEON_CLR_CMP_MASK                 0x15cc
+#       define RADEON_CLR_CMP_MSK           0xffffffff
+#define RADEON_CLR_CMP_MASK_3D              0x1A28
+#define RADEON_COMMAND                      0x0f04 /* PCI */
+#define RADEON_COMPOSITE_SHADOW_ID          0x1a0c
+#define RADEON_CONFIG_APER_0_BASE           0x0100
+#define RADEON_CONFIG_APER_1_BASE           0x0104
+#define RADEON_CONFIG_APER_SIZE             0x0108
+#define RADEON_CONFIG_BONDS                 0x00e8
+#define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
+#       define RADEON_CFG_ATI_REV_A11       (0   << 16)
+#       define RADEON_CFG_ATI_REV_A12       (1   << 16)
+#       define RADEON_CFG_ATI_REV_A13       (2   << 16)
+#       define RADEON_CFG_ATI_REV_ID_MASK   (0xf << 16)
+#define RADEON_CONFIG_MEMSIZE               0x00f8
+#define RADEON_CONFIG_MEMSIZE_EMBEDDED      0x0114
+#define RADEON_CONFIG_REG_1_BASE            0x010c
+#define RADEON_CONFIG_REG_APER_SIZE         0x0110
+#define RADEON_CONFIG_XSTRAP                0x00e4
+#define RADEON_CONSTANT_COLOR_C             0x1d34
+#       define RADEON_CONSTANT_COLOR_MASK   0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ONE    0x00ffffff
+#       define RADEON_CONSTANT_COLOR_ZERO   0x00000000
+#define RADEON_CRC_CMDFIFO_ADDR             0x0740
+#define RADEON_CRC_CMDFIFO_DOUT             0x0744
+#define RADEON_GRPH_BUFFER_CNTL             0x02f0
+#       define RADEON_GRPH_START_REQ_MASK          (0x7f)
+#       define RADEON_GRPH_START_REQ_SHIFT         0
+#       define RADEON_GRPH_STOP_REQ_MASK           (0x7f<<8)
+#       define RADEON_GRPH_STOP_REQ_SHIFT          8
+#       define RADEON_GRPH_CRITICAL_POINT_MASK     (0x7f<<16)
+#       define RADEON_GRPH_CRITICAL_POINT_SHIFT    16
+#       define RADEON_GRPH_CRITICAL_CNTL           (1<<28)
+#       define RADEON_GRPH_BUFFER_SIZE             (1<<29)
+#       define RADEON_GRPH_CRITICAL_AT_SOF         (1<<30)
+#       define RADEON_GRPH_STOP_CNTL               (1<<31)
+#define RADEON_GRPH2_BUFFER_CNTL            0x03f0
+#       define RADEON_GRPH2_START_REQ_MASK         (0x7f)
+#       define RADEON_GRPH2_START_REQ_SHIFT         0
+#       define RADEON_GRPH2_STOP_REQ_MASK          (0x7f<<8)
+#       define RADEON_GRPH2_STOP_REQ_SHIFT         8
+#       define RADEON_GRPH2_CRITICAL_POINT_MASK    (0x7f<<16)
+#       define RADEON_GRPH2_CRITICAL_POINT_SHIFT   16
+#       define RADEON_GRPH2_CRITICAL_CNTL          (1<<28)
+#       define RADEON_GRPH2_BUFFER_SIZE            (1<<29)
+#       define RADEON_GRPH2_CRITICAL_AT_SOF        (1<<30)
+#       define RADEON_GRPH2_STOP_CNTL              (1<<31)
+#define RADEON_CRTC_CRNT_FRAME              0x0214
+#define RADEON_CRTC_EXT_CNTL                0x0054
+#       define RADEON_CRTC_VGA_XOVERSCAN    (1 <<  0)
+#       define RADEON_VGA_ATI_LINEAR        (1 <<  3)
+#       define RADEON_XCRT_CNT_EN           (1 <<  6)
+#       define RADEON_CRTC_HSYNC_DIS        (1 <<  8)
+#       define RADEON_CRTC_VSYNC_DIS        (1 <<  9)
+#       define RADEON_CRTC_DISPLAY_DIS      (1 << 10)
+#       define RADEON_CRTC_SYNC_TRISTAT     (1 << 11)
+#       define RADEON_CRTC_CRT_ON           (1 << 15)
+#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE      0x0055
+#       define RADEON_CRTC_HSYNC_DIS_BYTE   (1 <<  0)
+#       define RADEON_CRTC_VSYNC_DIS_BYTE   (1 <<  1)
+#       define RADEON_CRTC_DISPLAY_DIS_BYTE (1 <<  2)
+#define RADEON_CRTC_GEN_CNTL                0x0050
+#       define RADEON_CRTC_DBL_SCAN_EN      (1 <<  0)
+#       define RADEON_CRTC_INTERLACE_EN     (1 <<  1)
+#       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
+#       define RADEON_CRTC_ICON_EN          (1 << 15)
+#       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_VSTAT_MODE_MASK  (3 << 17)
+#       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
+#       define RADEON_CRTC_CUR_MODE_SHIFT   20
+#       define RADEON_CRTC_CUR_MODE_MONO    0
+#       define RADEON_CRTC_CUR_MODE_24BPP   2
+#       define RADEON_CRTC_EXT_DISP_EN      (1 << 24)
+#       define RADEON_CRTC_EN               (1 << 25)
+#       define RADEON_CRTC_DISP_REQ_EN_B    (1 << 26)
+#define RADEON_CRTC2_GEN_CNTL               0x03f8
+#       define RADEON_CRTC2_DBL_SCAN_EN     (1 <<  0)
+#       define RADEON_CRTC2_INTERLACE_EN    (1 <<  1)
+#       define RADEON_CRTC2_SYNC_TRISTAT    (1 <<  4)
+#       define RADEON_CRTC2_HSYNC_TRISTAT   (1 <<  5)
+#       define RADEON_CRTC2_VSYNC_TRISTAT   (1 <<  6)
+#       define RADEON_CRTC2_CRT2_ON         (1 <<  7)
+#       define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
+#       define RADEON_CRTC2_PIX_WIDTH_MASK  (0xf << 8)
+#       define RADEON_CRTC2_ICON_EN         (1 << 15)
+#       define RADEON_CRTC2_CUR_EN          (1 << 16)
+#       define RADEON_CRTC2_CUR_MODE_MASK   (7 << 20)
+#       define RADEON_CRTC2_DISP_DIS        (1 << 23)
+#       define RADEON_CRTC2_EN              (1 << 25)
+#       define RADEON_CRTC2_DISP_REQ_EN_B   (1 << 26)
+#       define RADEON_CRTC2_CSYNC_EN        (1 << 27)
+#       define RADEON_CRTC2_HSYNC_DIS       (1 << 28)
+#       define RADEON_CRTC2_VSYNC_DIS       (1 << 29)
+#define RADEON_CRTC_MORE_CNTL               0x27c
+#       define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
+#       define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
+#       define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
+#       define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
+#define RADEON_CRTC_GUI_TRIG_VLINE          0x0218
+#define RADEON_CRTC_H_SYNC_STRT_WID         0x0204
+#       define RADEON_CRTC_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC2_H_SYNC_STRT_WID        0x0304
+#       define RADEON_CRTC2_H_SYNC_STRT_PIX        (0x07  <<  0)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR       (0x3ff <<  3)
+#       define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
+#       define RADEON_CRTC2_H_SYNC_WID             (0x3f  << 16)
+#       define RADEON_CRTC2_H_SYNC_WID_SHIFT       16
+#       define RADEON_CRTC2_H_SYNC_POL             (1     << 23)
+#define RADEON_CRTC_H_TOTAL_DISP            0x0200
+#       define RADEON_CRTC_H_TOTAL          (0x03ff << 0)
+#       define RADEON_CRTC_H_TOTAL_SHIFT    0
+#       define RADEON_CRTC_H_DISP           (0x01ff << 16)
+#       define RADEON_CRTC_H_DISP_SHIFT     16
+#define RADEON_CRTC2_H_TOTAL_DISP           0x0300
+#       define RADEON_CRTC2_H_TOTAL         (0x03ff << 0)
+#       define RADEON_CRTC2_H_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_H_DISP          (0x01ff << 16)
+#       define RADEON_CRTC2_H_DISP_SHIFT    16
+
+#define RADEON_CRTC_OFFSET_RIGHT	    0x0220
+#define RADEON_CRTC_OFFSET                  0x0224
+#	define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC_OFFSET__OFFSET_LOCK	   (1<<31)
+
+#define RADEON_CRTC2_OFFSET                 0x0324
+#	define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
+#	define RADEON_CRTC2_OFFSET__OFFSET_LOCK	    (1<<31)
+#define RADEON_CRTC_OFFSET_CNTL             0x0228
+#       define RADEON_CRTC_TILE_LINE_SHIFT              0
+#       define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT        4
+#	define R300_CRTC_X_Y_MODE_EN_RIGHT		(1 << 6)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK   (3 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO   (0 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
+#	define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS    (3 << 7)
+#	define R300_CRTC_X_Y_MODE_EN			(1 << 9)
+#	define R300_CRTC_MICRO_TILE_BUFFER_MASK		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_AUTO		(0 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_SINGLE	(1 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE	(2 << 10)
+#	define R300_CRTC_MICRO_TILE_BUFFER_DIS		(3 << 10)
+#	define R300_CRTC_MICRO_TILE_EN_RIGHT		(1 << 12)
+#	define R300_CRTC_MICRO_TILE_EN			(1 << 13)
+#	define R300_CRTC_MACRO_TILE_EN_RIGHT		(1 << 14)
+#       define R300_CRTC_MACRO_TILE_EN                  (1 << 15)
+#       define RADEON_CRTC_TILE_EN_RIGHT                (1 << 14)
+#       define RADEON_CRTC_TILE_EN                      (1 << 15)
+#       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
+#       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN      (1 << 28)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN     (1 << 29)
+
+#define R300_CRTC_TILE_X0_Y0	            0x0350
+#define R300_CRTC2_TILE_X0_Y0	            0x0358
+
+#define RADEON_CRTC2_OFFSET_CNTL            0x0328
+#       define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
+#       define RADEON_CRTC2_TILE_EN         (1 << 15)
+#define RADEON_CRTC_PITCH                   0x022c
+#	define RADEON_CRTC_PITCH__SHIFT		 0
+#	define RADEON_CRTC_PITCH__RIGHT_SHIFT	16
+
+#define RADEON_CRTC2_PITCH                  0x032c
+#define RADEON_CRTC_STATUS                  0x005c
+#       define RADEON_CRTC_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC2_STATUS                  0x03fc
+#       define RADEON_CRTC2_VBLANK_CUR       (1 <<  0)
+#       define RADEON_CRTC2_VBLANK_SAVE      (1 <<  1)
+#       define RADEON_CRTC2_VBLANK_SAVE_CLEAR  (1 <<  1)
+#define RADEON_CRTC_V_SYNC_STRT_WID         0x020c
+#       define RADEON_CRTC_V_SYNC_STRT        (0x7ff <<  0)
+#       define RADEON_CRTC_V_SYNC_STRT_SHIFT  0
+#       define RADEON_CRTC_V_SYNC_WID         (0x1f  << 16)
+#       define RADEON_CRTC_V_SYNC_WID_SHIFT   16
+#       define RADEON_CRTC_V_SYNC_POL         (1     << 23)
+#define RADEON_CRTC2_V_SYNC_STRT_WID        0x030c
+#       define RADEON_CRTC2_V_SYNC_STRT       (0x7ff <<  0)
+#       define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
+#       define RADEON_CRTC2_V_SYNC_WID        (0x1f  << 16)
+#       define RADEON_CRTC2_V_SYNC_WID_SHIFT  16
+#       define RADEON_CRTC2_V_SYNC_POL        (1     << 23)
+#define RADEON_CRTC_V_TOTAL_DISP            0x0208
+#       define RADEON_CRTC_V_TOTAL          (0x07ff << 0)
+#       define RADEON_CRTC_V_TOTAL_SHIFT    0
+#       define RADEON_CRTC_V_DISP           (0x07ff << 16)
+#       define RADEON_CRTC_V_DISP_SHIFT     16
+#define RADEON_CRTC2_V_TOTAL_DISP           0x0308
+#       define RADEON_CRTC2_V_TOTAL         (0x07ff << 0)
+#       define RADEON_CRTC2_V_TOTAL_SHIFT   0
+#       define RADEON_CRTC2_V_DISP          (0x07ff << 16)
+#       define RADEON_CRTC2_V_DISP_SHIFT    16
+#define RADEON_CRTC_VLINE_CRNT_VLINE        0x0210
+#       define RADEON_CRTC_CRNT_VLINE_MASK  (0x7ff << 16)
+#define RADEON_CRTC2_CRNT_FRAME             0x0314
+#define RADEON_CRTC2_GUI_TRIG_VLINE         0x0318
+#define RADEON_CRTC2_VLINE_CRNT_VLINE       0x0310
+#define RADEON_CRTC8_DATA                   0x03d5 /* VGA, 0x3b5 */
+#define RADEON_CRTC8_IDX                    0x03d4 /* VGA, 0x3b4 */
+#define RADEON_CUR_CLR0                     0x026c
+#define RADEON_CUR_CLR1                     0x0270
+#define RADEON_CUR_HORZ_VERT_OFF            0x0268
+#define RADEON_CUR_HORZ_VERT_POSN           0x0264
+#define RADEON_CUR_OFFSET                   0x0260
+#       define RADEON_CUR_LOCK              (1 << 31)
+#define RADEON_CUR2_CLR0                    0x036c
+#define RADEON_CUR2_CLR1                    0x0370
+#define RADEON_CUR2_HORZ_VERT_OFF           0x0368
+#define RADEON_CUR2_HORZ_VERT_POSN          0x0364
+#define RADEON_CUR2_OFFSET                  0x0360
+#       define RADEON_CUR2_LOCK             (1 << 31)
+
+#define RADEON_DAC_CNTL                     0x0058
+#       define RADEON_DAC_RANGE_CNTL        (3 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_PS2    (2 <<  0)
+#       define RADEON_DAC_RANGE_CNTL_MASK   0x03
+#       define RADEON_DAC_BLANKING          (1 <<  2)
+#       define RADEON_DAC_CMP_EN            (1 <<  3)
+#       define RADEON_DAC_CMP_OUTPUT        (1 <<  7)
+#       define RADEON_DAC_8BIT_EN           (1 <<  8)
+#       define RADEON_DAC_TVO_EN            (1 << 10)
+#       define RADEON_DAC_VGA_ADR_EN        (1 << 13)
+#       define RADEON_DAC_PDWN              (1 << 15)
+#       define RADEON_DAC_MASK_ALL          (0xff << 24)
+#define RADEON_DAC_CNTL2                    0x007c
+#       define RADEON_DAC2_TV_CLK_SEL       (0 <<  1)
+#       define RADEON_DAC2_DAC_CLK_SEL      (1 <<  0)
+#       define RADEON_DAC2_DAC2_CLK_SEL     (1 <<  1)
+#       define RADEON_DAC2_PALETTE_ACC_CTL  (1 <<  5)
+#       define RADEON_DAC2_CMP_EN           (1 <<  7)
+#       define RADEON_DAC2_CMP_OUT_R        (1 <<  8)
+#       define RADEON_DAC2_CMP_OUT_G        (1 <<  9)
+#       define RADEON_DAC2_CMP_OUT_B        (1 << 10)
+#       define RADEON_DAC2_CMP_OUTPUT       (1 << 11)
+#define RADEON_DAC_EXT_CNTL                 0x0280
+#       define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
+#       define RADEON_DAC2_FORCE_DATA_EN      (1 << 1)
+#       define RADEON_DAC_FORCE_BLANK_OFF_EN  (1 << 4)
+#       define RADEON_DAC_FORCE_DATA_EN       (1 << 5)
+#       define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_R    (0 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_G    (1 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_B    (2 << 6)
+#       define RADEON_DAC_FORCE_DATA_SEL_RGB  (3 << 6)
+#       define RADEON_DAC_FORCE_DATA_MASK   0x0003ff00
+#       define RADEON_DAC_FORCE_DATA_SHIFT  8
+#define RADEON_DAC_MACRO_CNTL               0x0d04
+#       define RADEON_DAC_PDWN_R            (1 << 16)
+#       define RADEON_DAC_PDWN_G            (1 << 17)
+#       define RADEON_DAC_PDWN_B            (1 << 18)
+#define RADEON_DISP_PWR_MAN                 0x0d08
+#       define RADEON_DISP_PWR_MAN_D3_CRTC_EN      (1 << 0)
+#       define RADEON_DISP_PWR_MAN_D3_CRTC2_EN     (1 << 4)
+#       define RADEON_DISP_PWR_MAN_DPMS_ON  (0 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_STANDBY    (1 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_SUSPEND    (2 << 8)
+#       define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
+#       define RADEON_DISP_D3_RST           (1 << 16)
+#       define RADEON_DISP_D3_REG_RST       (1 << 17)
+#       define RADEON_DISP_D3_GRPH_RST      (1 << 18)
+#       define RADEON_DISP_D3_SUBPIC_RST    (1 << 19)
+#       define RADEON_DISP_D3_OV0_RST       (1 << 20)
+#       define RADEON_DISP_D1D2_GRPH_RST    (1 << 21)
+#       define RADEON_DISP_D1D2_SUBPIC_RST  (1 << 22)
+#       define RADEON_DISP_D1D2_OV0_RST     (1 << 23)
+#       define RADEON_DIG_TMDS_ENABLE_RST   (1 << 24)
+#       define RADEON_TV_ENABLE_RST         (1 << 25)
+#       define RADEON_AUTO_PWRUP_EN         (1 << 26)
+#define RADEON_TV_DAC_CNTL                  0x088c
+#       define RADEON_TV_DAC_NBLANK         (1 << 0)
+#       define RADEON_TV_DAC_NHOLD          (1 << 1)
+#       define RADEON_TV_DAC_PEDESTAL       (1 <<  2)
+#       define RADEON_TV_MONITOR_DETECT_EN  (1 <<  4)
+#       define RADEON_TV_DAC_CMPOUT         (1 <<  5)
+#       define RADEON_TV_DAC_STD_MASK       (3 <<  8)
+#       define RADEON_TV_DAC_STD_PAL        (0 <<  8)
+#       define RADEON_TV_DAC_STD_NTSC       (1 <<  8)
+#       define RADEON_TV_DAC_STD_PS2        (2 <<  8)
+#       define RADEON_TV_DAC_STD_RS343      (3 <<  8)
+#       define RADEON_TV_DAC_BGSLEEP        (1 <<  6)
+#       define RADEON_TV_DAC_BGADJ_MASK     (0xf <<  16)
+#       define RADEON_TV_DAC_BGADJ_SHIFT    16
+#       define RADEON_TV_DAC_DACADJ_MASK    (0xf <<  20)
+#       define RADEON_TV_DAC_DACADJ_SHIFT   20
+#       define RADEON_TV_DAC_RDACPD         (1 <<  24)
+#       define RADEON_TV_DAC_GDACPD         (1 <<  25)
+#       define RADEON_TV_DAC_BDACPD         (1 <<  26)
+#       define RADEON_TV_DAC_RDACDET        (1 << 29)
+#       define RADEON_TV_DAC_GDACDET        (1 << 30)
+#       define RADEON_TV_DAC_BDACDET        (1 << 31)
+#       define R420_TV_DAC_DACADJ_MASK      (0x1f <<  20)
+#       define R420_TV_DAC_RDACPD           (1 <<  25)
+#       define R420_TV_DAC_GDACPD           (1 <<  26)
+#       define R420_TV_DAC_BDACPD           (1 <<  27)
+#       define R420_TV_DAC_TVENABLE         (1 <<  28)
+#define RADEON_DISP_HW_DEBUG                0x0d14
+#       define RADEON_CRT2_DISP1_SEL        (1 <<  5)
+#define RADEON_DISP_OUTPUT_CNTL             0x0d64
+#       define RADEON_DISP_DAC_SOURCE_MASK  0x03
+#       define RADEON_DISP_DAC2_SOURCE_MASK  0x0c
+#       define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
+#       define RADEON_DISP_DAC_SOURCE_RMX   0x02
+#       define RADEON_DISP_DAC_SOURCE_LTU   0x03
+#       define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
+#       define RADEON_DISP_TVDAC_SOURCE_MASK  (0x03 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC  0x0
+#       define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_RMX   (0x02 << 2)
+#       define RADEON_DISP_TVDAC_SOURCE_LTU   (0x03 << 2)
+#       define RADEON_DISP_TRANS_MATRIX_MASK  (0x03 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_GRAPHICS  (0x01 << 4)
+#       define RADEON_DISP_TRANS_MATRIX_VIDEO     (0x02 << 4)
+#       define RADEON_DISP_TV_SOURCE_CRTC   (1 << 16) /* crtc1 or crtc2 */
+#       define RADEON_DISP_TV_SOURCE_LTU    (0 << 16) /* linear transform unit */
+#define RADEON_DISP_TV_OUT_CNTL             0x0d6c
+#       define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
+#       define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
+#define RADEON_DAC_CRC_SIG                  0x02cc
+#define RADEON_DAC_DATA                     0x03c9 /* VGA */
+#define RADEON_DAC_MASK                     0x03c6 /* VGA */
+#define RADEON_DAC_R_INDEX                  0x03c7 /* VGA */
+#define RADEON_DAC_W_INDEX                  0x03c8 /* VGA */
+#define RADEON_DDA_CONFIG                   0x02e0
+#define RADEON_DDA_ON_OFF                   0x02e4
+#define RADEON_DEFAULT_OFFSET               0x16e0
+#define RADEON_DEFAULT_PITCH                0x16e4
+#define RADEON_DEFAULT_SC_BOTTOM_RIGHT      0x16e8
+#       define RADEON_DEFAULT_SC_RIGHT_MAX  (0x1fff <<  0)
+#       define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
+#define RADEON_DESTINATION_3D_CLR_CMP_VAL   0x1820
+#define RADEON_DESTINATION_3D_CLR_CMP_MSK   0x1824
+#define RADEON_DEVICE_ID                    0x0f02 /* PCI */
+#define RADEON_DISP_MISC_CNTL               0x0d00
+#       define RADEON_SOFT_RESET_GRPH_PP    (1 << 0)
+#define RADEON_DISP_MERGE_CNTL		  0x0d60
+#       define RADEON_DISP_ALPHA_MODE_MASK  0x03
+#       define RADEON_DISP_ALPHA_MODE_KEY   0
+#       define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
+#       define RADEON_DISP_ALPHA_MODE_GLOBAL 2
+#       define RADEON_DISP_RGB_OFFSET_EN    (1 << 8)
+#       define RADEON_DISP_GRPH_ALPHA_MASK  (0xff << 16)
+#       define RADEON_DISP_OV0_ALPHA_MASK   (0xff << 24)
+#	define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
+#define RADEON_DISP2_MERGE_CNTL		    0x0d68
+#       define RADEON_DISP2_RGB_OFFSET_EN   (1 << 8)
+#define RADEON_DISP_LIN_TRANS_GRPH_A        0x0d80
+#define RADEON_DISP_LIN_TRANS_GRPH_B        0x0d84
+#define RADEON_DISP_LIN_TRANS_GRPH_C        0x0d88
+#define RADEON_DISP_LIN_TRANS_GRPH_D        0x0d8c
+#define RADEON_DISP_LIN_TRANS_GRPH_E        0x0d90
+#define RADEON_DISP_LIN_TRANS_GRPH_F        0x0d98
+#define RADEON_DP_BRUSH_BKGD_CLR            0x1478
+#define RADEON_DP_BRUSH_FRGD_CLR            0x147c
+#define RADEON_DP_CNTL                      0x16c0
+#       define RADEON_DST_X_LEFT_TO_RIGHT   (1 <<  0)
+#       define RADEON_DST_Y_TOP_TO_BOTTOM   (1 <<  1)
+#       define RADEON_DP_DST_TILE_LINEAR    (0 <<  3)
+#       define RADEON_DP_DST_TILE_MACRO     (1 <<  3)
+#       define RADEON_DP_DST_TILE_MICRO     (2 <<  3)
+#       define RADEON_DP_DST_TILE_BOTH      (3 <<  3)
+#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR     0x16d0
+#       define RADEON_DST_Y_MAJOR             (1 <<  2)
+#       define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
+#       define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
+#define RADEON_DP_DATATYPE                  0x16c4
+#       define RADEON_HOST_BIG_ENDIAN_EN    (1 << 29)
+#define RADEON_DP_GUI_MASTER_CNTL           0x146c
+#       define RADEON_GMC_SRC_PITCH_OFFSET_CNTL   (1    <<  0)
+#       define RADEON_GMC_DST_PITCH_OFFSET_CNTL   (1    <<  1)
+#       define RADEON_GMC_SRC_CLIPPING            (1    <<  2)
+#       define RADEON_GMC_DST_CLIPPING            (1    <<  3)
+#       define RADEON_GMC_BRUSH_DATATYPE_MASK     (0x0f <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_BG    (0    <<  4)
+#       define RADEON_GMC_BRUSH_8X8_MONO_FG_LA    (1    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_BG    (4    <<  4)
+#       define RADEON_GMC_BRUSH_1X8_MONO_FG_LA    (5    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_BG   (6    <<  4)
+#       define RADEON_GMC_BRUSH_32x1_MONO_FG_LA   (7    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_BG  (8    <<  4)
+#       define RADEON_GMC_BRUSH_32x32_MONO_FG_LA  (9    <<  4)
+#       define RADEON_GMC_BRUSH_8x8_COLOR         (10   <<  4)
+#       define RADEON_GMC_BRUSH_1X8_COLOR         (12   <<  4)
+#       define RADEON_GMC_BRUSH_SOLID_COLOR       (13   <<  4)
+#       define RADEON_GMC_BRUSH_NONE              (15   <<  4)
+#       define RADEON_GMC_DST_8BPP_CI             (2    <<  8)
+#       define RADEON_GMC_DST_15BPP               (3    <<  8)
+#       define RADEON_GMC_DST_16BPP               (4    <<  8)
+#       define RADEON_GMC_DST_24BPP               (5    <<  8)
+#       define RADEON_GMC_DST_32BPP               (6    <<  8)
+#       define RADEON_GMC_DST_8BPP_RGB            (7    <<  8)
+#       define RADEON_GMC_DST_Y8                  (8    <<  8)
+#       define RADEON_GMC_DST_RGB8                (9    <<  8)
+#       define RADEON_GMC_DST_VYUY                (11   <<  8)
+#       define RADEON_GMC_DST_YVYU                (12   <<  8)
+#       define RADEON_GMC_DST_AYUV444             (14   <<  8)
+#       define RADEON_GMC_DST_ARGB4444            (15   <<  8)
+#       define RADEON_GMC_DST_DATATYPE_MASK       (0x0f <<  8)
+#       define RADEON_GMC_DST_DATATYPE_SHIFT      8
+#       define RADEON_GMC_SRC_DATATYPE_MASK       (3    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1    << 12)
+#       define RADEON_GMC_SRC_DATATYPE_COLOR      (3    << 12)
+#       define RADEON_GMC_BYTE_PIX_ORDER          (1    << 14)
+#       define RADEON_GMC_BYTE_MSB_TO_LSB         (0    << 14)
+#       define RADEON_GMC_BYTE_LSB_TO_MSB         (1    << 14)
+#       define RADEON_GMC_CONVERSION_TEMP         (1    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_6500    (0    << 15)
+#       define RADEON_GMC_CONVERSION_TEMP_9300    (1    << 15)
+#       define RADEON_GMC_ROP3_MASK               (0xff << 16)
+#       define RADEON_DP_SRC_SOURCE_MASK          (7    << 24)
+#       define RADEON_DP_SRC_SOURCE_MEMORY        (2    << 24)
+#       define RADEON_DP_SRC_SOURCE_HOST_DATA     (3    << 24)
+#       define RADEON_GMC_3D_FCN_EN               (1    << 27)
+#       define RADEON_GMC_CLR_CMP_CNTL_DIS        (1    << 28)
+#       define RADEON_GMC_AUX_CLIP_DIS            (1    << 29)
+#       define RADEON_GMC_WR_MSK_DIS              (1    << 30)
+#       define RADEON_GMC_LD_BRUSH_Y_X            (1    << 31)
+#       define RADEON_ROP3_ZERO             0x00000000
+#       define RADEON_ROP3_DSa              0x00880000
+#       define RADEON_ROP3_SDna             0x00440000
+#       define RADEON_ROP3_S                0x00cc0000
+#       define RADEON_ROP3_DSna             0x00220000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DSx              0x00660000
+#       define RADEON_ROP3_DSo              0x00ee0000
+#       define RADEON_ROP3_DSon             0x00110000
+#       define RADEON_ROP3_DSxn             0x00990000
+#       define RADEON_ROP3_Dn               0x00550000
+#       define RADEON_ROP3_SDno             0x00dd0000
+#       define RADEON_ROP3_Sn               0x00330000
+#       define RADEON_ROP3_DSno             0x00bb0000
+#       define RADEON_ROP3_DSan             0x00770000
+#       define RADEON_ROP3_ONE              0x00ff0000
+#       define RADEON_ROP3_DPa              0x00a00000
+#       define RADEON_ROP3_PDna             0x00500000
+#       define RADEON_ROP3_P                0x00f00000
+#       define RADEON_ROP3_DPna             0x000a0000
+#       define RADEON_ROP3_D                0x00aa0000
+#       define RADEON_ROP3_DPx              0x005a0000
+#       define RADEON_ROP3_DPo              0x00fa0000
+#       define RADEON_ROP3_DPon             0x00050000
+#       define RADEON_ROP3_PDxn             0x00a50000
+#       define RADEON_ROP3_PDno             0x00f50000
+#       define RADEON_ROP3_Pn               0x000f0000
+#       define RADEON_ROP3_DPno             0x00af0000
+#       define RADEON_ROP3_DPan             0x005f0000
+#define RADEON_DP_GUI_MASTER_CNTL_C         0x1c84
+#define RADEON_DP_MIX                       0x16c8
+#define RADEON_DP_SRC_BKGD_CLR              0x15dc
+#define RADEON_DP_SRC_FRGD_CLR              0x15d8
+#define RADEON_DP_WRITE_MASK                0x16cc
+#define RADEON_DST_BRES_DEC                 0x1630
+#define RADEON_DST_BRES_ERR                 0x1628
+#define RADEON_DST_BRES_INC                 0x162c
+#define RADEON_DST_BRES_LNTH                0x1634
+#define RADEON_DST_BRES_LNTH_SUB            0x1638
+#define RADEON_DST_HEIGHT                   0x1410
+#define RADEON_DST_HEIGHT_WIDTH             0x143c
+#define RADEON_DST_HEIGHT_WIDTH_8           0x158c
+#define RADEON_DST_HEIGHT_WIDTH_BW          0x15b4
+#define RADEON_DST_HEIGHT_Y                 0x15a0
+#define RADEON_DST_LINE_START               0x1600
+#define RADEON_DST_LINE_END                 0x1604
+#define RADEON_DST_LINE_PATCOUNT            0x1608
+#       define RADEON_BRES_CNTL_SHIFT       8
+#define RADEON_DST_OFFSET                   0x1404
+#define RADEON_DST_PITCH                    0x1408
+#define RADEON_DST_PITCH_OFFSET             0x142c
+#define RADEON_DST_PITCH_OFFSET_C           0x1c80
+#       define RADEON_PITCH_SHIFT           21
+#       define RADEON_DST_TILE_LINEAR       (0 << 30)
+#       define RADEON_DST_TILE_MACRO        (1 << 30)
+#       define RADEON_DST_TILE_MICRO        (2 << 30)
+#       define RADEON_DST_TILE_BOTH         (3 << 30)
+#define RADEON_DST_WIDTH                    0x140c
+#define RADEON_DST_WIDTH_HEIGHT             0x1598
+#define RADEON_DST_WIDTH_X                  0x1588
+#define RADEON_DST_WIDTH_X_INCY             0x159c
+#define RADEON_DST_X                        0x141c
+#define RADEON_DST_X_SUB                    0x15a4
+#define RADEON_DST_X_Y                      0x1594
+#define RADEON_DST_Y                        0x1420
+#define RADEON_DST_Y_SUB                    0x15a8
+#define RADEON_DST_Y_X                      0x1438
+
+#define RADEON_FCP_CNTL                     0x0910
+#      define RADEON_FCP0_SRC_PCICLK             0
+#      define RADEON_FCP0_SRC_PCLK               1
+#      define RADEON_FCP0_SRC_PCLKb              2
+#      define RADEON_FCP0_SRC_HREF               3
+#      define RADEON_FCP0_SRC_GND                4
+#      define RADEON_FCP0_SRC_HREFb              5
+#define RADEON_FLUSH_1                      0x1704
+#define RADEON_FLUSH_2                      0x1708
+#define RADEON_FLUSH_3                      0x170c
+#define RADEON_FLUSH_4                      0x1710
+#define RADEON_FLUSH_5                      0x1714
+#define RADEON_FLUSH_6                      0x1718
+#define RADEON_FLUSH_7                      0x171c
+#define RADEON_FOG_3D_TABLE_START           0x1810
+#define RADEON_FOG_3D_TABLE_END             0x1814
+#define RADEON_FOG_3D_TABLE_DENSITY         0x181c
+#define RADEON_FOG_TABLE_INDEX              0x1a14
+#define RADEON_FOG_TABLE_DATA               0x1a18
+#define RADEON_FP_CRTC_H_TOTAL_DISP         0x0250
+#define RADEON_FP_CRTC_V_TOTAL_DISP         0x0254
+#       define RADEON_FP_CRTC_H_TOTAL_MASK      0x000003ff
+#       define RADEON_FP_CRTC_H_DISP_MASK       0x01ff0000
+#       define RADEON_FP_CRTC_V_TOTAL_MASK      0x00000fff
+#       define RADEON_FP_CRTC_V_DISP_MASK       0x0fff0000
+#       define RADEON_FP_H_SYNC_STRT_CHAR_MASK  0x00001ff8
+#       define RADEON_FP_H_SYNC_WID_MASK        0x003f0000
+#       define RADEON_FP_V_SYNC_STRT_MASK       0x00000fff
+#       define RADEON_FP_V_SYNC_WID_MASK        0x001f0000
+#       define RADEON_FP_CRTC_H_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_H_DISP_SHIFT      0x00000010
+#       define RADEON_FP_CRTC_V_TOTAL_SHIFT     0x00000000
+#       define RADEON_FP_CRTC_V_DISP_SHIFT      0x00000010
+#       define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
+#       define RADEON_FP_H_SYNC_WID_SHIFT       0x00000010
+#       define RADEON_FP_V_SYNC_STRT_SHIFT      0x00000000
+#       define RADEON_FP_V_SYNC_WID_SHIFT       0x00000010
+#define RADEON_FP_GEN_CNTL                  0x0284
+#       define RADEON_FP_FPON                  (1 <<  0)
+#       define RADEON_FP_BLANK_EN              (1 <<  1)
+#       define RADEON_FP_TMDS_EN               (1 <<  2)
+#       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
+#       define RADEON_FP_EN_TMDS               (1 <<  7)
+#       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
+#       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
+#       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
+#       define R200_FP_SOURCE_SEL_RMX          (2 <<  10)
+#       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
+#       define RADEON_FP_SEL_CRTC1             (0 << 13)
+#       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define R300_HPD_SEL(x)                 ((x) << 13)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+#       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+#       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+#       define RADEON_FP_CRTC_USE_SHADOW_VEND  (1 << 18)
+#       define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
+#       define RADEON_FP_DFP_SYNC_SEL          (1 << 21)
+#       define RADEON_FP_CRTC_LOCK_8DOT        (1 << 22)
+#       define RADEON_FP_CRT_SYNC_SEL          (1 << 23)
+#       define RADEON_FP_USE_SHADOW_EN         (1 << 24)
+#       define RADEON_FP_CRT_SYNC_ALT          (1 << 26)
+#define RADEON_FP2_GEN_CNTL                 0x0288
+#       define RADEON_FP2_BLANK_EN             (1 <<  1)
+#       define RADEON_FP2_ON                   (1 <<  2)
+#       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
+#       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
+#       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define R200_FP2_SOURCE_SEL_RMX         (2 << 10)
+#       define R200_FP2_SOURCE_SEL_TRANS_UNIT  (3 << 10)
+#       define RADEON_FP2_SRC_SEL_MASK         (3 << 13)
+#       define RADEON_FP2_SRC_SEL_CRTC2        (1 << 13)
+#       define RADEON_FP2_FP_POL               (1 << 16)
+#       define RADEON_FP2_LP_POL               (1 << 17)
+#       define RADEON_FP2_SCK_POL              (1 << 18)
+#       define RADEON_FP2_LCD_CNTL_MASK        (7 << 19)
+#       define RADEON_FP2_PAD_FLOP_EN          (1 << 22)
+#       define RADEON_FP2_CRC_EN               (1 << 23)
+#       define RADEON_FP2_CRC_READ_EN          (1 << 24)
+#       define RADEON_FP2_DVO_EN               (1 << 25)
+#       define RADEON_FP2_DVO_RATE_SEL_SDR     (1 << 26)
+#       define R200_FP2_DVO_RATE_SEL_SDR       (1 << 27)
+#       define R300_FP2_DVO_CLOCK_MODE_SINGLE  (1 << 28)
+#       define R300_FP2_DVO_DUAL_CHANNEL_EN    (1 << 29)
+#define RADEON_FP_H_SYNC_STRT_WID           0x02c4
+#define RADEON_FP_H2_SYNC_STRT_WID          0x03c4
+#define RADEON_FP_HORZ_STRETCH              0x028c
+#define RADEON_FP_HORZ2_STRETCH             0x038c
+#       define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
+#       define RADEON_HORZ_STRETCH_RATIO_MAX  4096
+#       define RADEON_HORZ_PANEL_SIZE         (0x1ff   << 16)
+#       define RADEON_HORZ_PANEL_SHIFT        16
+#       define RADEON_HORZ_STRETCH_PIXREP     (0      << 25)
+#       define RADEON_HORZ_STRETCH_BLEND      (1      << 26)
+#       define RADEON_HORZ_STRETCH_ENABLE     (1      << 25)
+#       define RADEON_HORZ_AUTO_RATIO         (1      << 27)
+#       define RADEON_HORZ_FP_LOOP_STRETCH    (0x7    << 28)
+#       define RADEON_HORZ_AUTO_RATIO_INC     (1      << 31)
+#define RADEON_FP_HORZ_VERT_ACTIVE          0x0278
+#define RADEON_FP_V_SYNC_STRT_WID           0x02c8
+#define RADEON_FP_VERT_STRETCH              0x0290
+#define RADEON_FP_V2_SYNC_STRT_WID          0x03c8
+#define RADEON_FP_VERT2_STRETCH             0x0390
+#       define RADEON_VERT_PANEL_SIZE          (0xfff << 12)
+#       define RADEON_VERT_PANEL_SHIFT         12
+#       define RADEON_VERT_STRETCH_RATIO_MASK  0xfff
+#       define RADEON_VERT_STRETCH_RATIO_SHIFT 0
+#       define RADEON_VERT_STRETCH_RATIO_MAX   4096
+#       define RADEON_VERT_STRETCH_ENABLE      (1     << 25)
+#       define RADEON_VERT_STRETCH_LINEREP     (0     << 26)
+#       define RADEON_VERT_STRETCH_BLEND       (1     << 26)
+#       define RADEON_VERT_AUTO_RATIO_EN       (1     << 27)
+#	define RADEON_VERT_AUTO_RATIO_INC      (1     << 31)
+#       define RADEON_VERT_STRETCH_RESERVED    0x71000000
+#define RS400_FP_2ND_GEN_CNTL               0x0384
+#       define RS400_FP_2ND_ON              (1 << 0)
+#       define RS400_FP_2ND_BLANK_EN        (1 << 1)
+#       define RS400_TMDS_2ND_EN            (1 << 2)
+#       define RS400_PANEL_FORMAT_2ND       (1 << 3)
+#       define RS400_FP_2ND_EN_TMDS         (1 << 7)
+#       define RS400_FP_2ND_DETECT_SENSE    (1 << 8)
+#       define RS400_FP_2ND_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP_2ND_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP_2ND_DETECT_EN       (1 << 12)
+#       define RS400_HPD_2ND_SEL            (1 << 13)
+#define RS400_FP2_2_GEN_CNTL                0x0388
+#       define RS400_FP2_2_BLANK_EN         (1 << 1)
+#       define RS400_FP2_2_ON               (1 << 2)
+#       define RS400_FP2_2_PANEL_FORMAT     (1 << 3)
+#       define RS400_FP2_2_DETECT_SENSE     (1 << 8)
+#       define RS400_FP2_2_SOURCE_SEL_MASK        (3 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC1       (0 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_CRTC2       (1 << 10)
+#       define RS400_FP2_2_SOURCE_SEL_RMX         (2 << 10)
+#       define RS400_FP2_2_DVO2_EN          (1 << 25)
+#define RS400_TMDS2_CNTL                    0x0394
+#define RS400_TMDS2_TRANSMITTER_CNTL        0x03a4
+#       define RS400_TMDS2_PLLEN            (1 << 0)
+#       define RS400_TMDS2_PLLRST           (1 << 1)
+
+#define RADEON_GEN_INT_CNTL                 0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_FP_DETECT_MASK		(1 << 4)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_FP2_DETECT_MASK		(1 << 10)
+#	define RADEON_GUI_IDLE_MASK		(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+#define RADEON_GEN_INT_STATUS               0x0044
+#	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_FP_DETECT_STAT		(1 << 4)
+#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_FP2_DETECT_STAT		(1 << 10)
+#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
+#	define RADEON_GUI_IDLE_STAT		(1 << 19)
+#	define RADEON_GUI_IDLE_STAT_ACK		(1 << 19)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#define RADEON_GENENB                       0x03c3 /* VGA */
+#define RADEON_GENFC_RD                     0x03ca /* VGA */
+#define RADEON_GENFC_WT                     0x03da /* VGA, 0x03ba */
+#define RADEON_GENMO_RD                     0x03cc /* VGA */
+#define RADEON_GENMO_WT                     0x03c2 /* VGA */
+#define RADEON_GENS0                        0x03c2 /* VGA */
+#define RADEON_GENS1                        0x03da /* VGA, 0x03ba */
+#define RADEON_GPIO_MONID                   0x0068 /* DDC interface via I2C */ /* DDC3 */
+#define RADEON_GPIO_MONIDB                  0x006c
+#define RADEON_GPIO_CRT2_DDC                0x006c
+#define RADEON_GPIO_DVI_DDC                 0x0064 /* DDC2 */
+#define RADEON_GPIO_VGA_DDC                 0x0060 /* DDC1 */
+#       define RADEON_GPIO_A_0              (1 <<  0)
+#       define RADEON_GPIO_A_1              (1 <<  1)
+#       define RADEON_GPIO_Y_0              (1 <<  8)
+#       define RADEON_GPIO_Y_1              (1 <<  9)
+#       define RADEON_GPIO_Y_SHIFT_0        8
+#       define RADEON_GPIO_Y_SHIFT_1        9
+#       define RADEON_GPIO_EN_0             (1 << 16)
+#       define RADEON_GPIO_EN_1             (1 << 17)
+#       define RADEON_GPIO_MASK_0           (1 << 24) /*??*/
+#       define RADEON_GPIO_MASK_1           (1 << 25) /*??*/
+#define RADEON_GRPH8_DATA                   0x03cf /* VGA */
+#define RADEON_GRPH8_IDX                    0x03ce /* VGA */
+#define RADEON_GUI_SCRATCH_REG0             0x15e0
+#define RADEON_GUI_SCRATCH_REG1             0x15e4
+#define RADEON_GUI_SCRATCH_REG2             0x15e8
+#define RADEON_GUI_SCRATCH_REG3             0x15ec
+#define RADEON_GUI_SCRATCH_REG4             0x15f0
+#define RADEON_GUI_SCRATCH_REG5             0x15f4
+
+#define RADEON_HEADER                       0x0f0e /* PCI */
+#define RADEON_HOST_DATA0                   0x17c0
+#define RADEON_HOST_DATA1                   0x17c4
+#define RADEON_HOST_DATA2                   0x17c8
+#define RADEON_HOST_DATA3                   0x17cc
+#define RADEON_HOST_DATA4                   0x17d0
+#define RADEON_HOST_DATA5                   0x17d4
+#define RADEON_HOST_DATA6                   0x17d8
+#define RADEON_HOST_DATA7                   0x17dc
+#define RADEON_HOST_DATA_LAST               0x17e0
+#define RADEON_HOST_PATH_CNTL               0x0130
+#	define RADEON_HP_LIN_RD_CACHE_DIS   (1 << 24)
+#	define RADEON_HDP_READ_BUFFER_INVALIDATE   (1 << 27)
+#       define RADEON_HDP_SOFT_RESET        (1 << 26)
+#       define RADEON_HDP_APER_CNTL         (1 << 23)
+#define RADEON_HTOTAL_CNTL                  0x0009 /* PLL */
+#       define RADEON_HTOT_CNTL_VGA_EN      (1 << 28)
+#define RADEON_HTOTAL2_CNTL                 0x002e /* PLL */
+
+       /* Multimedia I2C bus */
+#define RADEON_I2C_CNTL_0		    0x0090
+#       define RADEON_I2C_DONE              (1 << 0)
+#       define RADEON_I2C_NACK              (1 << 1)
+#       define RADEON_I2C_HALT              (1 << 2)
+#       define RADEON_I2C_SOFT_RST          (1 << 5)
+#       define RADEON_I2C_DRIVE_EN          (1 << 6)
+#       define RADEON_I2C_DRIVE_SEL         (1 << 7)
+#       define RADEON_I2C_START             (1 << 8)
+#       define RADEON_I2C_STOP              (1 << 9)
+#       define RADEON_I2C_RECEIVE           (1 << 10)
+#       define RADEON_I2C_ABORT             (1 << 11)
+#       define RADEON_I2C_GO                (1 << 12)
+#       define RADEON_I2C_PRESCALE_SHIFT    16
+#define RADEON_I2C_CNTL_1                   0x0094
+#       define RADEON_I2C_DATA_COUNT_SHIFT  0
+#       define RADEON_I2C_ADDR_COUNT_SHIFT  4
+#       define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
+#       define RADEON_I2C_SEL               (1 << 16)
+#       define RADEON_I2C_EN                (1 << 17)
+#       define RADEON_I2C_TIME_LIMIT_SHIFT  24
+#define RADEON_I2C_DATA			    0x0098
+
+#define RADEON_DVI_I2C_CNTL_0		    0x02e0
+#       define R200_DVI_I2C_PIN_SEL(x)      ((x) << 3)
+#       define R200_SEL_DDC1                0 /* depends on asic */
+#       define R200_SEL_DDC2                1 /* depends on asic */
+#       define R200_SEL_DDC3                2 /* depends on asic */
+#	define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+#	define RADEON_SW_CAN_USE_DVI_I2C      (1 << 13)
+#	define RADEON_SW_DONE_USING_DVI_I2C   (1 << 14)
+#	define RADEON_HW_NEEDS_DVI_I2C        (1 << 14)
+#	define RADEON_ABORT_HW_DVI_I2C        (1 << 15)
+#	define RADEON_HW_USING_DVI_I2C        (1 << 15)
+#define RADEON_DVI_I2C_CNTL_1               0x02e4
+#define RADEON_DVI_I2C_DATA		    0x02e8
+
+#define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
+#define RADEON_INTERRUPT_PIN                0x0f3d /* PCI */
+#define RADEON_IO_BASE                      0x0f14 /* PCI */
+
+#define RADEON_LATENCY                      0x0f0d /* PCI */
+#define RADEON_LEAD_BRES_DEC                0x1608
+#define RADEON_LEAD_BRES_LNTH               0x161c
+#define RADEON_LEAD_BRES_LNTH_SUB           0x1624
+#define RADEON_LVDS_GEN_CNTL                0x02d0
+#       define RADEON_LVDS_ON               (1   <<  0)
+#       define RADEON_LVDS_DISPLAY_DIS      (1   <<  1)
+#       define RADEON_LVDS_PANEL_TYPE       (1   <<  2)
+#       define RADEON_LVDS_PANEL_FORMAT     (1   <<  3)
+#       define RADEON_LVDS_NO_FM            (0   <<  4)
+#       define RADEON_LVDS_2_GREY           (1   <<  4)
+#       define RADEON_LVDS_4_GREY           (2   <<  4)
+#       define RADEON_LVDS_RST_FM           (1   <<  6)
+#       define RADEON_LVDS_EN               (1   <<  7)
+#       define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
+#       define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
+#       define RADEON_LVDS_BL_MOD_EN        (1   << 16)
+#       define RADEON_LVDS_BL_CLK_SEL       (1   << 17)
+#       define RADEON_LVDS_DIGON            (1   << 18)
+#       define RADEON_LVDS_BLON             (1   << 19)
+#       define RADEON_LVDS_FP_POL_LOW       (1   << 20)
+#       define RADEON_LVDS_LP_POL_LOW       (1   << 21)
+#       define RADEON_LVDS_DTM_POL_LOW      (1   << 22)
+#       define RADEON_LVDS_SEL_CRTC2        (1   << 23)
+#       define RADEON_LVDS_FPDI_EN          (1   << 27)
+#       define RADEON_LVDS_HSYNC_DELAY_SHIFT        28
+#define RADEON_LVDS_PLL_CNTL                0x02d4
+#       define RADEON_HSYNC_DELAY_SHIFT     28
+#       define RADEON_HSYNC_DELAY_MASK      (0xf << 28)
+#       define RADEON_LVDS_PLL_EN           (1   << 16)
+#       define RADEON_LVDS_PLL_RESET        (1   << 17)
+#       define R300_LVDS_SRC_SEL_MASK       (3   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC1      (0   << 18)
+#       define R300_LVDS_SRC_SEL_CRTC2      (1   << 18)
+#       define R300_LVDS_SRC_SEL_RMX        (2   << 18)
+#define RADEON_LVDS_SS_GEN_CNTL             0x02ec
+#       define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT     16
+#       define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT     20
+
+#define RADEON_MAX_LATENCY                  0x0f3f /* PCI */
+#define RADEON_DISPLAY_BASE_ADDR            0x23c
+#define RADEON_DISPLAY2_BASE_ADDR           0x33c
+#define RADEON_OV0_BASE_ADDR                0x43c
+#define RADEON_NB_TOM                       0x15c
+#define R300_MC_INIT_MISC_LAT_TIMER         0x180
+#       define R300_MC_DISP0R_INIT_LAT_SHIFT 8
+#       define R300_MC_DISP0R_INIT_LAT_MASK  0xf
+#       define R300_MC_DISP1R_INIT_LAT_SHIFT 12
+#       define R300_MC_DISP1R_INIT_LAT_MASK  0xf
+#define RADEON_MCLK_CNTL                    0x0012 /* PLL */
+#       define RADEON_MCLKA_SRC_SEL_MASK    0x7
+#       define RADEON_FORCEON_MCLKA         (1 << 16)
+#       define RADEON_FORCEON_MCLKB         (1 << 17)
+#       define RADEON_FORCEON_YCLKA         (1 << 18)
+#       define RADEON_FORCEON_YCLKB         (1 << 19)
+#       define RADEON_FORCEON_MC            (1 << 20)
+#       define RADEON_FORCEON_AIC           (1 << 21)
+#       define R300_DISABLE_MC_MCLKA        (1 << 21)
+#       define R300_DISABLE_MC_MCLKB        (1 << 21)
+#define RADEON_MCLK_MISC                    0x001f /* PLL */
+#       define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
+#       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+#       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
+#       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
+
+#define RADEON_GPIOPAD_MASK                 0x0198
+#define RADEON_GPIOPAD_A		    0x019c
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_GPIOPAD_Y                    0x01a4
+#define RADEON_MDGPIO_MASK                  0x01a8
+#define RADEON_MDGPIO_A                     0x01ac
+#define RADEON_MDGPIO_EN                    0x01b0
+#define RADEON_MDGPIO_Y                     0x01b4
+
+#define RADEON_MEM_ADDR_CONFIG              0x0148
+#define RADEON_MEM_BASE                     0x0f10 /* PCI */
+#define RADEON_MEM_CNTL                     0x0140
+#       define RADEON_MEM_NUM_CHANNELS_MASK 0x01
+#       define RADEON_MEM_USE_B_CH_ONLY     (1 <<  1)
+#       define RV100_HALF_MODE              (1 <<  3)
+#       define R300_MEM_NUM_CHANNELS_MASK   0x03
+#       define R300_MEM_USE_CD_CH_ONLY      (1 <<  2)
+#define RADEON_MEM_TIMING_CNTL              0x0144 /* EXT_MEM_CNTL */
+#define RADEON_MEM_INIT_LAT_TIMER           0x0154
+#define RADEON_MEM_INTF_CNTL                0x014c
+#define RADEON_MEM_SDRAM_MODE_REG           0x0158
+#       define RADEON_SDRAM_MODE_MASK       0xffff0000
+#       define RADEON_B3MEM_RESET_MASK      0x6fffffff
+#       define RADEON_MEM_CFG_TYPE_DDR      (1 << 30)
+#define RADEON_MEM_STR_CNTL                 0x0150
+#       define RADEON_MEM_PWRUP_COMPL_A     (1 <<  0)
+#       define RADEON_MEM_PWRUP_COMPL_B     (1 <<  1)
+#       define R300_MEM_PWRUP_COMPL_C       (1 <<  2)
+#       define R300_MEM_PWRUP_COMPL_D       (1 <<  3)
+#       define RADEON_MEM_PWRUP_COMPLETE    0x03
+#       define R300_MEM_PWRUP_COMPLETE      0x0f
+#define RADEON_MC_STATUS                    0x0150
+#       define RADEON_MC_IDLE               (1 << 2)
+#       define R300_MC_IDLE                 (1 << 4)
+#define RADEON_MEM_VGA_RP_SEL               0x003c
+#define RADEON_MEM_VGA_WP_SEL               0x0038
+#define RADEON_MIN_GRANT                    0x0f3e /* PCI */
+#define RADEON_MM_DATA                      0x0004
+#define RADEON_MM_INDEX                     0x0000
+#	define RADEON_MM_APER		(1 << 31)
+#define RADEON_MPLL_CNTL                    0x000e /* PLL */
+#define RADEON_MPP_TB_CONFIG                0x01c0 /* ? */
+#define RADEON_MPP_GP_CONFIG                0x01c8 /* ? */
+#define RADEON_SEPROM_CNTL1                 0x01c0
+#       define RADEON_SCK_PRESCALE_SHIFT    24
+#       define RADEON_SCK_PRESCALE_MASK     (0xff << 24)
+#define R300_MC_IND_INDEX                   0x01f8
+#       define R300_MC_IND_ADDR_MASK        0x3f
+#       define R300_MC_IND_WR_EN            (1 << 8)
+#define R300_MC_IND_DATA                    0x01fc
+#define R300_MC_READ_CNTL_AB                0x017c
+#       define R300_MEM_RBS_POSITION_A_MASK 0x03
+#define R300_MC_READ_CNTL_CD_mcind	    0x24
+#       define R300_MEM_RBS_POSITION_C_MASK 0x03
+
+#define RADEON_N_VIF_COUNT                  0x0248
+
+#define RADEON_OV0_AUTO_FLIP_CNTL           0x0470
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM        0x00000007
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD   0x00000008
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD        0x00000010
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE     0x00000040
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT     0x00000300
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN  0x00010000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN     0x00040000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN      0x00080000
+#       define  RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE    0x00800000
+
+#define RADEON_OV0_COLOUR_CNTL              0x04E0
+#define RADEON_OV0_DEINTERLACE_PATTERN      0x0474
+#define RADEON_OV0_EXCLUSIVE_HORZ           0x0408
+#       define  RADEON_EXCL_HORZ_START_MASK        0x000000ff
+#       define  RADEON_EXCL_HORZ_END_MASK          0x0000ff00
+#       define  RADEON_EXCL_HORZ_BACK_PORCH_MASK   0x00ff0000
+#       define  RADEON_EXCL_HORZ_EXCLUSIVE_EN      0x80000000
+#define RADEON_OV0_EXCLUSIVE_VERT           0x040C
+#       define  RADEON_EXCL_VERT_START_MASK        0x000003ff
+#       define  RADEON_EXCL_VERT_END_MASK          0x03ff0000
+#define RADEON_OV0_FILTER_CNTL              0x04A0
+#       define RADEON_FILTER_PROGRAMMABLE_COEF            0x0
+#       define RADEON_FILTER_HC_COEF_HORZ_Y               0x1
+#       define RADEON_FILTER_HC_COEF_HORZ_UV              0x2
+#       define RADEON_FILTER_HC_COEF_VERT_Y               0x4
+#       define RADEON_FILTER_HC_COEF_VERT_UV              0x8
+#       define RADEON_FILTER_HARDCODED_COEF               0xf
+#       define RADEON_FILTER_COEF_MASK                    0xf
+
+#define RADEON_OV0_FOUR_TAP_COEF_0          0x04B0
+#define RADEON_OV0_FOUR_TAP_COEF_1          0x04B4
+#define RADEON_OV0_FOUR_TAP_COEF_2          0x04B8
+#define RADEON_OV0_FOUR_TAP_COEF_3          0x04BC
+#define RADEON_OV0_FOUR_TAP_COEF_4          0x04C0
+#define RADEON_OV0_FLAG_CNTL                0x04DC
+#define RADEON_OV0_GAMMA_000_00F            0x0d40
+#define RADEON_OV0_GAMMA_010_01F            0x0d44
+#define RADEON_OV0_GAMMA_020_03F            0x0d48
+#define RADEON_OV0_GAMMA_040_07F            0x0d4c
+#define RADEON_OV0_GAMMA_080_0BF            0x0e00
+#define RADEON_OV0_GAMMA_0C0_0FF            0x0e04
+#define RADEON_OV0_GAMMA_100_13F            0x0e08
+#define RADEON_OV0_GAMMA_140_17F            0x0e0c
+#define RADEON_OV0_GAMMA_180_1BF            0x0e10
+#define RADEON_OV0_GAMMA_1C0_1FF            0x0e14
+#define RADEON_OV0_GAMMA_200_23F            0x0e18
+#define RADEON_OV0_GAMMA_240_27F            0x0e1c
+#define RADEON_OV0_GAMMA_280_2BF            0x0e20
+#define RADEON_OV0_GAMMA_2C0_2FF            0x0e24
+#define RADEON_OV0_GAMMA_300_33F            0x0e28
+#define RADEON_OV0_GAMMA_340_37F            0x0e2c
+#define RADEON_OV0_GAMMA_380_3BF            0x0d50
+#define RADEON_OV0_GAMMA_3C0_3FF            0x0d54
+#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW     0x04EC
+#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH    0x04F0
+#define RADEON_OV0_H_INC                    0x0480
+#define RADEON_OV0_KEY_CNTL                 0x04F4
+#       define  RADEON_VIDEO_KEY_FN_MASK    0x00000003L
+#       define  RADEON_VIDEO_KEY_FN_FALSE   0x00000000L
+#       define  RADEON_VIDEO_KEY_FN_TRUE    0x00000001L
+#       define  RADEON_VIDEO_KEY_FN_EQ      0x00000002L
+#       define  RADEON_VIDEO_KEY_FN_NE      0x00000003L
+#       define  RADEON_GRAPHIC_KEY_FN_MASK  0x00000030L
+#       define  RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
+#       define  RADEON_GRAPHIC_KEY_FN_TRUE  0x00000010L
+#       define  RADEON_GRAPHIC_KEY_FN_EQ    0x00000020L
+#       define  RADEON_GRAPHIC_KEY_FN_NE    0x00000030L
+#       define  RADEON_CMP_MIX_MASK         0x00000100L
+#       define  RADEON_CMP_MIX_OR           0x00000000L
+#       define  RADEON_CMP_MIX_AND          0x00000100L
+#define RADEON_OV0_LIN_TRANS_A              0x0d20
+#define RADEON_OV0_LIN_TRANS_B              0x0d24
+#define RADEON_OV0_LIN_TRANS_C              0x0d28
+#define RADEON_OV0_LIN_TRANS_D              0x0d2c
+#define RADEON_OV0_LIN_TRANS_E              0x0d30
+#define RADEON_OV0_LIN_TRANS_F              0x0d34
+#define RADEON_OV0_P1_BLANK_LINES_AT_TOP    0x0430
+#       define  RADEON_P1_BLNK_LN_AT_TOP_M1_MASK   0x00000fffL
+#       define  RADEON_P1_ACTIVE_LINES_M1          0x0fff0000L
+#define RADEON_OV0_P1_H_ACCUM_INIT          0x0488
+#define RADEON_OV0_P1_V_ACCUM_INIT          0x0428
+#       define  RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
+#       define  RADEON_OV0_P1_V_ACCUM_INIT_MASK    0x01ff8000L
+#define RADEON_OV0_P1_X_START_END           0x0494
+#define RADEON_OV0_P2_X_START_END           0x0498
+#define RADEON_OV0_P23_BLANK_LINES_AT_TOP   0x0434
+#       define  RADEON_P23_BLNK_LN_AT_TOP_M1_MASK  0x000007ffL
+#       define  RADEON_P23_ACTIVE_LINES_M1         0x07ff0000L
+#define RADEON_OV0_P23_H_ACCUM_INIT         0x048C
+#define RADEON_OV0_P23_V_ACCUM_INIT         0x042C
+#define RADEON_OV0_P3_X_START_END           0x049C
+#define RADEON_OV0_REG_LOAD_CNTL            0x0410
+#       define  RADEON_REG_LD_CTL_LOCK                 0x00000001L
+#       define  RADEON_REG_LD_CTL_VBLANK_DURING_LOCK   0x00000002L
+#       define  RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
+#       define  RADEON_REG_LD_CTL_LOCK_READBACK        0x00000008L
+#       define  RADEON_REG_LD_CTL_FLIP_READBACK        0x00000010L
+#define RADEON_OV0_SCALE_CNTL               0x0420
+#       define  RADEON_SCALER_HORZ_PICK_NEAREST    0x00000004L
+#       define  RADEON_SCALER_VERT_PICK_NEAREST    0x00000008L
+#       define  RADEON_SCALER_SIGNED_UV            0x00000010L
+#       define  RADEON_SCALER_GAMMA_SEL_MASK       0x00000060L
+#       define  RADEON_SCALER_GAMMA_SEL_BRIGHT     0x00000000L
+#       define  RADEON_SCALER_GAMMA_SEL_G22        0x00000020L
+#       define  RADEON_SCALER_GAMMA_SEL_G18        0x00000040L
+#       define  RADEON_SCALER_GAMMA_SEL_G14        0x00000060L
+#       define  RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
+#       define  RADEON_SCALER_SURFAC_FORMAT        0x00000f00L
+#       define  RADEON_SCALER_SOURCE_15BPP         0x00000300L
+#       define  RADEON_SCALER_SOURCE_16BPP         0x00000400L
+#       define  RADEON_SCALER_SOURCE_32BPP         0x00000600L
+#       define  RADEON_SCALER_SOURCE_YUV9          0x00000900L
+#       define  RADEON_SCALER_SOURCE_YUV12         0x00000A00L
+#       define  RADEON_SCALER_SOURCE_VYUY422       0x00000B00L
+#       define  RADEON_SCALER_SOURCE_YVYU422       0x00000C00L
+#       define  RADEON_SCALER_ADAPTIVE_DEINT       0x00001000L
+#       define  RADEON_SCALER_TEMPORAL_DEINT       0x00002000L
+#       define  RADEON_SCALER_CRTC_SEL             0x00004000L
+#       define  RADEON_SCALER_SMART_SWITCH         0x00008000L
+#       define  RADEON_SCALER_BURST_PER_PLANE      0x007F0000L
+#       define  RADEON_SCALER_DOUBLE_BUFFER        0x01000000L
+#       define  RADEON_SCALER_DIS_LIMIT            0x08000000L
+#       define  RADEON_SCALER_LIN_TRANS_BYPASS     0x10000000L
+#       define  RADEON_SCALER_INT_EMU              0x20000000L
+#       define  RADEON_SCALER_ENABLE               0x40000000L
+#       define  RADEON_SCALER_SOFT_RESET           0x80000000L
+#define RADEON_OV0_STEP_BY                  0x0484
+#define RADEON_OV0_TEST                     0x04F8
+#define RADEON_OV0_V_INC                    0x0424
+#define RADEON_OV0_VID_BUF_PITCH0_VALUE     0x0460
+#define RADEON_OV0_VID_BUF_PITCH1_VALUE     0x0464
+#define RADEON_OV0_VID_BUF0_BASE_ADRS       0x0440
+#       define  RADEON_VIF_BUF0_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF0_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF0_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF1_BASE_ADRS       0x0444
+#       define  RADEON_VIF_BUF1_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF1_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF1_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF2_BASE_ADRS       0x0448
+#       define  RADEON_VIF_BUF2_PITCH_SEL          0x00000001L
+#       define  RADEON_VIF_BUF2_TILE_ADRS          0x00000002L
+#       define  RADEON_VIF_BUF2_BASE_ADRS_MASK     0x03fffff0L
+#       define  RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
+#define RADEON_OV0_VID_BUF3_BASE_ADRS       0x044C
+#define RADEON_OV0_VID_BUF4_BASE_ADRS       0x0450
+#define RADEON_OV0_VID_BUF5_BASE_ADRS       0x0454
+#define RADEON_OV0_VIDEO_KEY_CLR_HIGH       0x04E8
+#define RADEON_OV0_VIDEO_KEY_CLR_LOW        0x04E4
+#define RADEON_OV0_Y_X_START                0x0400
+#define RADEON_OV0_Y_X_END                  0x0404
+#define RADEON_OV1_Y_X_START                0x0600
+#define RADEON_OV1_Y_X_END                  0x0604
+#define RADEON_OVR_CLR                      0x0230
+#define RADEON_OVR_WID_LEFT_RIGHT           0x0234
+#define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+#define RADEON_OVR2_CLR                     0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
+
+/* first capture unit */
+
+#define RADEON_CAP0_BUF0_OFFSET           0x0920
+#define RADEON_CAP0_BUF1_OFFSET           0x0924
+#define RADEON_CAP0_BUF0_EVEN_OFFSET      0x0928
+#define RADEON_CAP0_BUF1_EVEN_OFFSET      0x092C
+
+#define RADEON_CAP0_BUF_PITCH             0x0930
+#define RADEON_CAP0_V_WINDOW              0x0934
+#define RADEON_CAP0_H_WINDOW              0x0938
+#define RADEON_CAP0_VBI0_OFFSET           0x093C
+#define RADEON_CAP0_VBI1_OFFSET           0x0940
+#define RADEON_CAP0_VBI_V_WINDOW          0x0944
+#define RADEON_CAP0_VBI_H_WINDOW          0x0948
+#define RADEON_CAP0_PORT_MODE_CNTL        0x094C
+#define RADEON_CAP0_TRIG_CNTL             0x0950
+#define RADEON_CAP0_DEBUG                 0x0954
+#define RADEON_CAP0_CONFIG                0x0958
+#       define RADEON_CAP0_CONFIG_CONTINUOS          0x00000001
+#       define RADEON_CAP0_CONFIG_START_FIELD_EVEN   0x00000002
+#       define RADEON_CAP0_CONFIG_START_BUF_GET      0x00000004
+#       define RADEON_CAP0_CONFIG_START_BUF_SET      0x00000008
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_ALT       0x00000010
+#       define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME     0x00000020
+#       define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
+#       define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE    0x00000080
+#       define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE    0x00000100
+#       define RADEON_CAP0_CONFIG_MIRROR_EN          0x00000200
+#       define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN  0x00000400
+#       define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV    0x00000800
+#       define RADEON_CAP0_CONFIG_ANC_DECODE_EN      0x00001000
+#       define RADEON_CAP0_CONFIG_VBI_EN             0x00002000
+#       define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN  0x00004000
+#       define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
+#       define RADEON_CAP0_CONFIG_FAKE_FIELD_EN      0x00010000
+#       define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE  0x00020000
+#       define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2      0x00080000
+#       define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4      0x00100000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_2      0x00200000
+#       define RADEON_CAP0_CONFIG_VERT_DIVIDE_4      0x00400000
+#       define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE   0x00000000
+#       define RADEON_CAP0_CONFIG_FORMAT_CCIR656     0x00800000
+#       define RADEON_CAP0_CONFIG_FORMAT_ZV          0x01000000
+#       define RADEON_CAP0_CONFIG_FORMAT_VIP         0x01800000
+#       define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT   0x02000000
+#       define RADEON_CAP0_CONFIG_HORZ_DECIMATOR     0x04000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422   0x00000000
+#       define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422   0x20000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_2       0x40000000
+#       define RADEON_CAP0_CONFIG_VBI_DIVIDE_4       0x80000000
+#define RADEON_CAP0_ANC_ODD_OFFSET        0x095C
+#define RADEON_CAP0_ANC_EVEN_OFFSET       0x0960
+#define RADEON_CAP0_ANC_H_WINDOW          0x0964
+#define RADEON_CAP0_VIDEO_SYNC_TEST       0x0968
+#define RADEON_CAP0_ONESHOT_BUF_OFFSET    0x096C
+#define RADEON_CAP0_BUF_STATUS            0x0970
+/* #define RADEON_CAP0_DWNSC_XRATIO       0x0978 */
+/* #define RADEON_CAP0_XSHARPNESS                 0x097C */
+#define RADEON_CAP0_VBI2_OFFSET           0x0980
+#define RADEON_CAP0_VBI3_OFFSET           0x0984
+#define RADEON_CAP0_ANC2_OFFSET           0x0988
+#define RADEON_CAP0_ANC3_OFFSET           0x098C
+#define RADEON_VID_BUFFER_CONTROL         0x0900
+
+/* second capture unit */
+
+#define RADEON_CAP1_BUF0_OFFSET           0x0990
+#define RADEON_CAP1_BUF1_OFFSET           0x0994
+#define RADEON_CAP1_BUF0_EVEN_OFFSET      0x0998
+#define RADEON_CAP1_BUF1_EVEN_OFFSET      0x099C
+
+#define RADEON_CAP1_BUF_PITCH             0x09A0
+#define RADEON_CAP1_V_WINDOW              0x09A4
+#define RADEON_CAP1_H_WINDOW              0x09A8
+#define RADEON_CAP1_VBI_ODD_OFFSET        0x09AC
+#define RADEON_CAP1_VBI_EVEN_OFFSET       0x09B0
+#define RADEON_CAP1_VBI_V_WINDOW                  0x09B4
+#define RADEON_CAP1_VBI_H_WINDOW                  0x09B8
+#define RADEON_CAP1_PORT_MODE_CNTL        0x09BC
+#define RADEON_CAP1_TRIG_CNTL             0x09C0
+#define RADEON_CAP1_DEBUG                         0x09C4
+#define RADEON_CAP1_CONFIG                0x09C8
+#define RADEON_CAP1_ANC_ODD_OFFSET        0x09CC
+#define RADEON_CAP1_ANC_EVEN_OFFSET       0x09D0
+#define RADEON_CAP1_ANC_H_WINDOW                  0x09D4
+#define RADEON_CAP1_VIDEO_SYNC_TEST       0x09D8
+#define RADEON_CAP1_ONESHOT_BUF_OFFSET    0x09DC
+#define RADEON_CAP1_BUF_STATUS            0x09E0
+#define RADEON_CAP1_DWNSC_XRATIO                  0x09E8
+#define RADEON_CAP1_XSHARPNESS            0x09EC
+
+/* misc multimedia registers */
+
+#define RADEON_IDCT_RUNS                  0x1F80
+#define RADEON_IDCT_LEVELS                0x1F84
+#define RADEON_IDCT_CONTROL               0x1FBC
+#define RADEON_IDCT_AUTH_CONTROL          0x1F88
+#define RADEON_IDCT_AUTH                  0x1F8C
+
+#define RADEON_P2PLL_CNTL                   0x002a /* P2PLL */
+#       define RADEON_P2PLL_RESET                (1 <<  0)
+#       define RADEON_P2PLL_SLEEP                (1 <<  1)
+#       define RADEON_P2PLL_PVG_MASK             (7 << 11)
+#       define RADEON_P2PLL_PVG_SHIFT            11
+#       define RADEON_P2PLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_P2PLL_DIV_0                  0x002c
+#       define RADEON_P2PLL_FB0_DIV_MASK    0x07ff
+#       define RADEON_P2PLL_POST0_DIV_MASK  0x00070000
+#define RADEON_P2PLL_REF_DIV                0x002B /* PLL */
+#       define RADEON_P2PLL_REF_DIV_MASK    0x03ff
+#       define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
+#       define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
+#       define R300_PPLL_REF_DIV_ACC_MASK   (0x3ff << 18)
+#       define R300_PPLL_REF_DIV_ACC_SHIFT  18
+#define RADEON_PALETTE_DATA                 0x00b4
+#define RADEON_PALETTE_30_DATA              0x00b8
+#define RADEON_PALETTE_INDEX                0x00b0
+#define RADEON_PCI_GART_PAGE                0x017c
+#define RADEON_PIXCLKS_CNTL                 0x002d
+#       define RADEON_PIX2CLK_SRC_SEL_MASK     0x03
+#       define RADEON_PIX2CLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_PIX2CLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
+#       define RADEON_PIX2CLK_ALWAYS_ONb       (1<<6)
+#       define RADEON_PIX2CLK_DAC_ALWAYS_ONb   (1<<7)
+#       define RADEON_PIXCLK_TV_SRC_SEL        (1 << 8)
+#       define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
+#       define R300_DVOCLK_ALWAYS_ONb          (1 << 10)
+#       define RADEON_PIXCLK_BLEND_ALWAYS_ONb  (1 << 11)
+#       define RADEON_PIXCLK_GV_ALWAYS_ONb     (1 << 12)
+#       define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
+#       define R300_PIXCLK_DVO_ALWAYS_ONb      (1 << 13)
+#       define RADEON_PIXCLK_LVDS_ALWAYS_ONb   (1 << 14)
+#       define RADEON_PIXCLK_TMDS_ALWAYS_ONb   (1 << 15)
+#       define R300_PIXCLK_TRANS_ALWAYS_ONb    (1 << 16)
+#       define R300_PIXCLK_TVO_ALWAYS_ONb      (1 << 17)
+#       define R300_P2G2CLK_ALWAYS_ONb         (1 << 18)
+#       define R300_P2G2CLK_DAC_ALWAYS_ONb     (1 << 19)
+#       define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
+#define RADEON_PLANE_3D_MASK_C              0x1d44
+#define RADEON_PLL_TEST_CNTL                0x0013 /* PLL */
+#       define RADEON_PLL_MASK_READ_B          (1 << 9)
+#define RADEON_PMI_CAP_ID                   0x0f5c /* PCI */
+#define RADEON_PMI_DATA                     0x0f63 /* PCI */
+#define RADEON_PMI_NXT_CAP_PTR              0x0f5d /* PCI */
+#define RADEON_PMI_PMC_REG                  0x0f5e /* PCI */
+#define RADEON_PMI_PMCSR_REG                0x0f60 /* PCI */
+#define RADEON_PMI_REGISTER                 0x0f5c /* PCI */
+#define RADEON_PPLL_CNTL                    0x0002 /* PLL */
+#       define RADEON_PPLL_RESET                (1 <<  0)
+#       define RADEON_PPLL_SLEEP                (1 <<  1)
+#       define RADEON_PPLL_PVG_MASK             (7 << 11)
+#       define RADEON_PPLL_PVG_SHIFT            11
+#       define RADEON_PPLL_ATOMIC_UPDATE_EN     (1 << 16)
+#       define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
+#       define RADEON_PPLL_ATOMIC_UPDATE_VSYNC  (1 << 18)
+#define RADEON_PPLL_DIV_0                   0x0004 /* PLL */
+#define RADEON_PPLL_DIV_1                   0x0005 /* PLL */
+#define RADEON_PPLL_DIV_2                   0x0006 /* PLL */
+#define RADEON_PPLL_DIV_3                   0x0007 /* PLL */
+#       define RADEON_PPLL_FB3_DIV_MASK     0x07ff
+#       define RADEON_PPLL_POST3_DIV_MASK   0x00070000
+#define RADEON_PPLL_REF_DIV                 0x0003 /* PLL */
+#       define RADEON_PPLL_REF_DIV_MASK     0x03ff
+#       define RADEON_PPLL_ATOMIC_UPDATE_R  (1 << 15) /* same as _W */
+#       define RADEON_PPLL_ATOMIC_UPDATE_W  (1 << 15) /* same as _R */
+#define RADEON_PWR_MNGMT_CNTL_STATUS        0x0f60 /* PCI */
+
+#define RADEON_RBBM_GUICNTL                 0x172c
+#       define RADEON_HOST_DATA_SWAP_NONE   (0 << 0)
+#       define RADEON_HOST_DATA_SWAP_16BIT  (1 << 0)
+#       define RADEON_HOST_DATA_SWAP_32BIT  (2 << 0)
+#       define RADEON_HOST_DATA_SWAP_HDW    (3 << 0)
+#define RADEON_RBBM_SOFT_RESET              0x00f0
+#       define RADEON_SOFT_RESET_CP         (1 <<  0)
+#       define RADEON_SOFT_RESET_HI         (1 <<  1)
+#       define RADEON_SOFT_RESET_SE         (1 <<  2)
+#       define RADEON_SOFT_RESET_RE         (1 <<  3)
+#       define RADEON_SOFT_RESET_PP         (1 <<  4)
+#       define RADEON_SOFT_RESET_E2         (1 <<  5)
+#       define RADEON_SOFT_RESET_RB         (1 <<  6)
+#       define RADEON_SOFT_RESET_HDP        (1 <<  7)
+#define RADEON_RBBM_STATUS                  0x0e40
+#       define RADEON_RBBM_FIFOCNT_MASK     0x007f
+#       define RADEON_RBBM_ACTIVE           (1 << 31)
+#define RADEON_RB2D_DSTCACHE_CTLSTAT        0x342c
+#       define RADEON_RB2D_DC_FLUSH         (3 << 0)
+#       define RADEON_RB2D_DC_FREE          (3 << 2)
+#       define RADEON_RB2D_DC_FLUSH_ALL     0xf
+#       define RADEON_RB2D_DC_BUSY          (1 << 31)
+#define RADEON_RB2D_DSTCACHE_MODE           0x3428
+#define RADEON_DSTCACHE_CTLSTAT             0x1714
+
+#define RADEON_RB3D_ZCACHE_MODE             0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT          0x3254
+#       define RADEON_RB3D_ZC_FLUSH_ALL     0x5
+#define RADEON_RB3D_DSTCACHE_MODE           0x3258
+# define RADEON_RB3D_DC_CACHE_ENABLE            (0)
+# define RADEON_RB3D_DC_2D_CACHE_DISABLE        (1)
+# define RADEON_RB3D_DC_3D_CACHE_DISABLE        (2)
+# define RADEON_RB3D_DC_CACHE_DISABLE           (3)
+# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128   (1 << 2)
+# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128   (2 << 2)
+# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH      (1 << 8)
+# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH      (2 << 8)
+# define R200_RB3D_DC_2D_CACHE_AUTOFREE         (1 << 10)
+# define R200_RB3D_DC_3D_CACHE_AUTOFREE         (2 << 10)
+# define RADEON_RB3D_DC_FORCE_RMW               (1 << 16)
+# define RADEON_RB3D_DC_DISABLE_RI_FILL         (1 << 24)
+# define RADEON_RB3D_DC_DISABLE_RI_READ         (1 << 25)
+
+#define RADEON_RB3D_DSTCACHE_CTLSTAT            0x325C
+# define RADEON_RB3D_DC_FLUSH                   (3 << 0)
+# define RADEON_RB3D_DC_FREE                    (3 << 2)
+# define RADEON_RB3D_DC_FLUSH_ALL               0xf
+# define RADEON_RB3D_DC_BUSY                    (1 << 31)
+
+#define RADEON_REG_BASE                     0x0f18 /* PCI */
+#define RADEON_REGPROG_INF                  0x0f09 /* PCI */
+#define RADEON_REVISION_ID                  0x0f08 /* PCI */
+
+#define RADEON_SC_BOTTOM                    0x164c
+#define RADEON_SC_BOTTOM_RIGHT              0x16f0
+#define RADEON_SC_BOTTOM_RIGHT_C            0x1c8c
+#define RADEON_SC_LEFT                      0x1640
+#define RADEON_SC_RIGHT                     0x1644
+#define RADEON_SC_TOP                       0x1648
+#define RADEON_SC_TOP_LEFT                  0x16ec
+#define RADEON_SC_TOP_LEFT_C                0x1c88
+#       define RADEON_SC_SIGN_MASK_LO       0x8000
+#       define RADEON_SC_SIGN_MASK_HI       0x80000000
+#define RADEON_M_SPLL_REF_FB_DIV            0x000a /* PLL */
+#	define RADEON_M_SPLL_REF_DIV_SHIFT  0
+#	define RADEON_M_SPLL_REF_DIV_MASK   0xff
+#	define RADEON_MPLL_FB_DIV_SHIFT     8
+#	define RADEON_MPLL_FB_DIV_MASK      0xff
+#	define RADEON_SPLL_FB_DIV_SHIFT     16
+#	define RADEON_SPLL_FB_DIV_MASK      0xff
+#define RADEON_SPLL_CNTL                    0x000c /* PLL */
+#       define RADEON_SPLL_SLEEP            (1 << 0)
+#       define RADEON_SPLL_RESET            (1 << 1)
+#       define RADEON_SPLL_PCP_MASK         0x7
+#       define RADEON_SPLL_PCP_SHIFT        8
+#       define RADEON_SPLL_PVG_MASK         0x7
+#       define RADEON_SPLL_PVG_SHIFT        11
+#       define RADEON_SPLL_PDC_MASK         0x3
+#       define RADEON_SPLL_PDC_SHIFT        14
+#define RADEON_SCLK_CNTL                    0x000d /* PLL */
+#       define RADEON_SCLK_SRC_SEL_MASK     0x0007
+#       define RADEON_DYN_STOP_LAT_MASK     0x00007ff8
+#       define RADEON_CP_MAX_DYN_STOP_LAT   0x0008
+#       define RADEON_SCLK_FORCEON_MASK     0xffff8000
+#       define RADEON_SCLK_FORCE_DISP2      (1<<15)
+#       define RADEON_SCLK_FORCE_CP         (1<<16)
+#       define RADEON_SCLK_FORCE_HDP        (1<<17)
+#       define RADEON_SCLK_FORCE_DISP1      (1<<18)
+#       define RADEON_SCLK_FORCE_TOP        (1<<19)
+#       define RADEON_SCLK_FORCE_E2         (1<<20)
+#       define RADEON_SCLK_FORCE_SE         (1<<21)
+#       define RADEON_SCLK_FORCE_IDCT       (1<<22)
+#       define RADEON_SCLK_FORCE_VIP        (1<<23)
+#       define RADEON_SCLK_FORCE_RE         (1<<24)
+#       define RADEON_SCLK_FORCE_PB         (1<<25)
+#       define RADEON_SCLK_FORCE_TAM        (1<<26)
+#       define RADEON_SCLK_FORCE_TDM        (1<<27)
+#       define RADEON_SCLK_FORCE_RB         (1<<28)
+#       define RADEON_SCLK_FORCE_TV_SCLK    (1<<29)
+#       define RADEON_SCLK_FORCE_SUBPIC     (1<<30)
+#       define RADEON_SCLK_FORCE_OV0        (1<<31)
+#       define R300_SCLK_FORCE_VAP          (1<<21)
+#       define R300_SCLK_FORCE_SR           (1<<25)
+#       define R300_SCLK_FORCE_PX           (1<<26)
+#       define R300_SCLK_FORCE_TX           (1<<27)
+#       define R300_SCLK_FORCE_US           (1<<28)
+#       define R300_SCLK_FORCE_SU           (1<<30)
+#define R300_SCLK_CNTL2                     0x1e   /* PLL */
+#       define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
+#       define R300_SCLK_GA_MAX_DYN_STOP_LAT  (1<<11)
+#       define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
+#       define R300_SCLK_FORCE_TCL          (1<<13)
+#       define R300_SCLK_FORCE_CBA          (1<<14)
+#       define R300_SCLK_FORCE_GA           (1<<15)
+#define RADEON_SCLK_MORE_CNTL               0x0035 /* PLL */
+#       define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
+#       define RADEON_SCLK_MORE_FORCEON     0x0700
+#define RADEON_SDRAM_MODE_REG               0x0158
+#define RADEON_SEQ8_DATA                    0x03c5 /* VGA */
+#define RADEON_SEQ8_IDX                     0x03c4 /* VGA */
+#define RADEON_SNAPSHOT_F_COUNT             0x0244
+#define RADEON_SNAPSHOT_VH_COUNTS           0x0240
+#define RADEON_SNAPSHOT_VIF_COUNT           0x024c
+#define RADEON_SRC_OFFSET                   0x15ac
+#define RADEON_SRC_PITCH                    0x15b0
+#define RADEON_SRC_PITCH_OFFSET             0x1428
+#define RADEON_SRC_SC_BOTTOM                0x165c
+#define RADEON_SRC_SC_BOTTOM_RIGHT          0x16f4
+#define RADEON_SRC_SC_RIGHT                 0x1654
+#define RADEON_SRC_X                        0x1414
+#define RADEON_SRC_X_Y                      0x1590
+#define RADEON_SRC_Y                        0x1418
+#define RADEON_SRC_Y_X                      0x1434
+#define RADEON_STATUS                       0x0f06 /* PCI */
+#define RADEON_SUBPIC_CNTL                  0x0540 /* ? */
+#define RADEON_SUB_CLASS                    0x0f0a /* PCI */
+#define RADEON_SURFACE_CNTL                 0x0b00
+#       define RADEON_SURF_TRANSLATION_DIS  (1 << 8)
+#       define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
+#       define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
+#       define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
+#       define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
+#define RADEON_SURFACE0_INFO                0x0b0c
+#       define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
+#       define RADEON_SURF_TILE_COLOR_BOTH  (1 << 16)
+#       define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
+#       define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
+#       define R200_SURF_TILE_NONE          (0 << 16)
+#       define R200_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R200_SURF_TILE_COLOR_MICRO   (2 << 16)
+#       define R200_SURF_TILE_COLOR_BOTH    (3 << 16)
+#       define R200_SURF_TILE_DEPTH_32BPP   (4 << 16)
+#       define R200_SURF_TILE_DEPTH_16BPP   (5 << 16)
+#       define R300_SURF_TILE_NONE          (0 << 16)
+#       define R300_SURF_TILE_COLOR_MACRO   (1 << 16)
+#       define R300_SURF_TILE_DEPTH_32BPP   (2 << 16)
+#       define RADEON_SURF_AP0_SWP_16BPP    (1 << 20)
+#       define RADEON_SURF_AP0_SWP_32BPP    (1 << 21)
+#       define RADEON_SURF_AP1_SWP_16BPP    (1 << 22)
+#       define RADEON_SURF_AP1_SWP_32BPP    (1 << 23)
+#define RADEON_SURFACE0_LOWER_BOUND         0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND         0x0b08
+#define RADEON_SURFACE1_INFO                0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND         0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND         0x0b18
+#define RADEON_SURFACE2_INFO                0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND         0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND         0x0b28
+#define RADEON_SURFACE3_INFO                0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND         0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND         0x0b38
+#define RADEON_SURFACE4_INFO                0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND         0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND         0x0b48
+#define RADEON_SURFACE5_INFO                0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND         0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND         0x0b58
+#define RADEON_SURFACE6_INFO                0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND         0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND         0x0b68
+#define RADEON_SURFACE7_INFO                0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND         0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND         0x0b78
+#define RADEON_SW_SEMAPHORE                 0x013c
+
+#define RADEON_TEST_DEBUG_CNTL              0x0120
+#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
+
+#define RADEON_TEST_DEBUG_MUX               0x0124
+#define RADEON_TEST_DEBUG_OUT               0x012c
+#define RADEON_TMDS_PLL_CNTL                0x02a8
+#define RADEON_TMDS_TRANSMITTER_CNTL        0x02a4
+#       define RADEON_TMDS_TRANSMITTER_PLLEN  1
+#       define RADEON_TMDS_TRANSMITTER_PLLRST 2
+#define RADEON_TRAIL_BRES_DEC               0x1614
+#define RADEON_TRAIL_BRES_ERR               0x160c
+#define RADEON_TRAIL_BRES_INC               0x1610
+#define RADEON_TRAIL_X                      0x1618
+#define RADEON_TRAIL_X_SUB                  0x1620
+
+#define RADEON_VCLK_ECP_CNTL                0x0008 /* PLL */
+#       define RADEON_VCLK_SRC_SEL_MASK     0x03
+#       define RADEON_VCLK_SRC_SEL_CPUCLK   0x00
+#       define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
+#       define RADEON_VCLK_SRC_SEL_BYTECLK  0x02
+#       define RADEON_VCLK_SRC_SEL_PPLLCLK  0x03
+#       define RADEON_PIXCLK_ALWAYS_ONb     (1<<6)
+#       define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
+#       define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
+
+#define RADEON_VENDOR_ID                    0x0f00 /* PCI */
+#define RADEON_VGA_DDA_CONFIG               0x02e8
+#define RADEON_VGA_DDA_ON_OFF               0x02ec
+#define RADEON_VID_BUFFER_CONTROL           0x0900
+#define RADEON_VIDEOMUX_CNTL                0x0190
+
+/* VIP bus */
+#define RADEON_VIPH_CH0_DATA                0x0c00
+#define RADEON_VIPH_CH1_DATA                0x0c04
+#define RADEON_VIPH_CH2_DATA                0x0c08
+#define RADEON_VIPH_CH3_DATA                0x0c0c
+#define RADEON_VIPH_CH0_ADDR                0x0c10
+#define RADEON_VIPH_CH1_ADDR                0x0c14
+#define RADEON_VIPH_CH2_ADDR                0x0c18
+#define RADEON_VIPH_CH3_ADDR                0x0c1c
+#define RADEON_VIPH_CH0_SBCNT               0x0c20
+#define RADEON_VIPH_CH1_SBCNT               0x0c24
+#define RADEON_VIPH_CH2_SBCNT               0x0c28
+#define RADEON_VIPH_CH3_SBCNT               0x0c2c
+#define RADEON_VIPH_CH0_ABCNT               0x0c30
+#define RADEON_VIPH_CH1_ABCNT               0x0c34
+#define RADEON_VIPH_CH2_ABCNT               0x0c38
+#define RADEON_VIPH_CH3_ABCNT               0x0c3c
+#define RADEON_VIPH_CONTROL                 0x0c40
+#       define RADEON_VIP_BUSY 0
+#       define RADEON_VIP_IDLE 1
+#       define RADEON_VIP_RESET 2
+#       define RADEON_VIPH_EN               (1 << 21)
+#define RADEON_VIPH_DV_LAT                  0x0c44
+#define RADEON_VIPH_BM_CHUNK                0x0c48
+#define RADEON_VIPH_DV_INT                  0x0c4c
+#define RADEON_VIPH_TIMEOUT_STAT            0x0c50
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK   0x00000010
+#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
+
+#define RADEON_VIPH_REG_DATA                0x0084
+#define RADEON_VIPH_REG_ADDR                0x0080
+
+
+#define RADEON_WAIT_UNTIL                   0x1720
+#       define RADEON_WAIT_CRTC_PFLIP       (1 << 0)
+#       define RADEON_WAIT_RE_CRTC_VLINE    (1 << 1)
+#       define RADEON_WAIT_FE_CRTC_VLINE    (1 << 2)
+#       define RADEON_WAIT_CRTC_VLINE       (1 << 3)
+#       define RADEON_WAIT_DMA_VID_IDLE     (1 << 8)
+#       define RADEON_WAIT_DMA_GUI_IDLE     (1 << 9)
+#       define RADEON_WAIT_CMDFIFO          (1 << 10) /* wait for CMDFIFO_ENTRIES */
+#       define RADEON_WAIT_OV0_FLIP         (1 << 11)
+#       define RADEON_WAIT_AGP_FLUSH        (1 << 13)
+#       define RADEON_WAIT_2D_IDLE          (1 << 14)
+#       define RADEON_WAIT_3D_IDLE          (1 << 15)
+#       define RADEON_WAIT_2D_IDLECLEAN     (1 << 16)
+#       define RADEON_WAIT_3D_IDLECLEAN     (1 << 17)
+#       define RADEON_WAIT_HOST_IDLECLEAN   (1 << 18)
+#       define RADEON_CMDFIFO_ENTRIES_SHIFT 10
+#       define RADEON_CMDFIFO_ENTRIES_MASK  0x7f
+#       define RADEON_WAIT_VAP_IDLE         (1 << 28)
+#       define RADEON_WAIT_BOTH_CRTC_PFLIP  (1 << 30)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC0    (0 << 31)
+#       define RADEON_ENG_DISPLAY_SELECT_CRTC1    (1 << 31)
+
+#define RADEON_X_MPLL_REF_FB_DIV            0x000a /* PLL */
+#define RADEON_XCLK_CNTL                    0x000d /* PLL */
+#define RADEON_XDLL_CNTL                    0x000c /* PLL */
+#define RADEON_XPLL_CNTL                    0x000b /* PLL */
+
+
+
+				/* Registers for 3D/TCL */
+#define RADEON_PP_BORDER_COLOR_0            0x1d40
+#define RADEON_PP_BORDER_COLOR_1            0x1d44
+#define RADEON_PP_BORDER_COLOR_2            0x1d48
+#define RADEON_PP_CNTL                      0x1c38
+#       define RADEON_STIPPLE_ENABLE        (1 <<  0)
+#       define RADEON_SCISSOR_ENABLE        (1 <<  1)
+#       define RADEON_PATTERN_ENABLE        (1 <<  2)
+#       define RADEON_SHADOW_ENABLE         (1 <<  3)
+#       define RADEON_TEX_ENABLE_MASK       (0xf << 4)
+#       define RADEON_TEX_0_ENABLE          (1 <<  4)
+#       define RADEON_TEX_1_ENABLE          (1 <<  5)
+#       define RADEON_TEX_2_ENABLE          (1 <<  6)
+#       define RADEON_TEX_3_ENABLE          (1 <<  7)
+#       define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
+#       define RADEON_TEX_BLEND_0_ENABLE    (1 << 12)
+#       define RADEON_TEX_BLEND_1_ENABLE    (1 << 13)
+#       define RADEON_TEX_BLEND_2_ENABLE    (1 << 14)
+#       define RADEON_TEX_BLEND_3_ENABLE    (1 << 15)
+#       define RADEON_PLANAR_YUV_ENABLE     (1 << 20)
+#       define RADEON_SPECULAR_ENABLE       (1 << 21)
+#       define RADEON_FOG_ENABLE            (1 << 22)
+#       define RADEON_ALPHA_TEST_ENABLE     (1 << 23)
+#       define RADEON_ANTI_ALIAS_NONE       (0 << 24)
+#       define RADEON_ANTI_ALIAS_LINE       (1 << 24)
+#       define RADEON_ANTI_ALIAS_POLY       (2 << 24)
+#       define RADEON_ANTI_ALIAS_LINE_POLY  (3 << 24)
+#       define RADEON_BUMP_MAP_ENABLE       (1 << 26)
+#       define RADEON_BUMPED_MAP_T0         (0 << 27)
+#       define RADEON_BUMPED_MAP_T1         (1 << 27)
+#       define RADEON_BUMPED_MAP_T2         (2 << 27)
+#       define RADEON_TEX_3D_ENABLE_0       (1 << 29)
+#       define RADEON_TEX_3D_ENABLE_1       (1 << 30)
+#       define RADEON_MC_ENABLE             (1 << 31)
+#define RADEON_PP_FOG_COLOR                 0x1c18
+#       define RADEON_FOG_COLOR_MASK        0x00ffffff
+#       define RADEON_FOG_VERTEX            (0 << 24)
+#       define RADEON_FOG_TABLE             (1 << 24)
+#       define RADEON_FOG_USE_DEPTH         (0 << 25)
+#       define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
+#       define RADEON_FOG_USE_SPEC_ALPHA    (3 << 25)
+#define RADEON_PP_LUM_MATRIX                0x1d00
+#define RADEON_PP_MISC                      0x1c14
+#       define RADEON_REF_ALPHA_MASK        0x000000ff
+#       define RADEON_ALPHA_TEST_FAIL       (0 << 8)
+#       define RADEON_ALPHA_TEST_LESS       (1 << 8)
+#       define RADEON_ALPHA_TEST_LEQUAL     (2 << 8)
+#       define RADEON_ALPHA_TEST_EQUAL      (3 << 8)
+#       define RADEON_ALPHA_TEST_GEQUAL     (4 << 8)
+#       define RADEON_ALPHA_TEST_GREATER    (5 << 8)
+#       define RADEON_ALPHA_TEST_NEQUAL     (6 << 8)
+#       define RADEON_ALPHA_TEST_PASS       (7 << 8)
+#       define RADEON_ALPHA_TEST_OP_MASK    (7 << 8)
+#       define RADEON_CHROMA_FUNC_FAIL      (0 << 16)
+#       define RADEON_CHROMA_FUNC_PASS      (1 << 16)
+#       define RADEON_CHROMA_FUNC_NEQUAL    (2 << 16)
+#       define RADEON_CHROMA_FUNC_EQUAL     (3 << 16)
+#       define RADEON_CHROMA_KEY_NEAREST    (0 << 18)
+#       define RADEON_CHROMA_KEY_ZERO       (1 << 18)
+#       define RADEON_SHADOW_ID_AUTO_INC    (1 << 20)
+#       define RADEON_SHADOW_FUNC_EQUAL     (0 << 21)
+#       define RADEON_SHADOW_FUNC_NEQUAL    (1 << 21)
+#       define RADEON_SHADOW_PASS_1         (0 << 22)
+#       define RADEON_SHADOW_PASS_2         (1 << 22)
+#       define RADEON_RIGHT_HAND_CUBE_D3D   (0 << 24)
+#       define RADEON_RIGHT_HAND_CUBE_OGL   (1 << 24)
+#define RADEON_PP_ROT_MATRIX_0              0x1d58
+#define RADEON_PP_ROT_MATRIX_1              0x1d5c
+#define RADEON_PP_TXFILTER_0                0x1c54
+#define RADEON_PP_TXFILTER_1                0x1c6c
+#define RADEON_PP_TXFILTER_2                0x1c84
+#       define RADEON_MAG_FILTER_NEAREST                   (0  <<  0)
+#       define RADEON_MAG_FILTER_LINEAR                    (1  <<  0)
+#       define RADEON_MAG_FILTER_MASK                      (1  <<  0)
+#       define RADEON_MIN_FILTER_NEAREST                   (0  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR                    (1  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST       (2  <<  1)
+#       define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR        (3  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  1)
+#       define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR         (7  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST             (8  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_LINEAR              (9  <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (11 <<  1)
+#       define RADEON_MIN_FILTER_MASK                      (15 <<  1)
+#       define RADEON_MAX_ANISO_1_TO_1                     (0  <<  5)
+#       define RADEON_MAX_ANISO_2_TO_1                     (1  <<  5)
+#       define RADEON_MAX_ANISO_4_TO_1                     (2  <<  5)
+#       define RADEON_MAX_ANISO_8_TO_1                     (3  <<  5)
+#       define RADEON_MAX_ANISO_16_TO_1                    (4  <<  5)
+#       define RADEON_MAX_ANISO_MASK                       (7  <<  5)
+#       define RADEON_LOD_BIAS_MASK                        (0xff <<  8)
+#       define RADEON_LOD_BIAS_SHIFT                       8
+#       define RADEON_MAX_MIP_LEVEL_MASK                   (0x0f << 16)
+#       define RADEON_MAX_MIP_LEVEL_SHIFT                  16
+#       define RADEON_YUV_TO_RGB                           (1  << 20)
+#       define RADEON_YUV_TEMPERATURE_COOL                 (0  << 21)
+#       define RADEON_YUV_TEMPERATURE_HOT                  (1  << 21)
+#       define RADEON_YUV_TEMPERATURE_MASK                 (1  << 21)
+#       define RADEON_WRAPEN_S                             (1  << 22)
+#       define RADEON_CLAMP_S_WRAP                         (0  << 23)
+#       define RADEON_CLAMP_S_MIRROR                       (1  << 23)
+#       define RADEON_CLAMP_S_CLAMP_LAST                   (2  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_LAST            (3  << 23)
+#       define RADEON_CLAMP_S_CLAMP_BORDER                 (4  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER          (5  << 23)
+#       define RADEON_CLAMP_S_CLAMP_GL                     (6  << 23)
+#       define RADEON_CLAMP_S_MIRROR_CLAMP_GL              (7  << 23)
+#       define RADEON_CLAMP_S_MASK                         (7  << 23)
+#       define RADEON_WRAPEN_T                             (1  << 26)
+#       define RADEON_CLAMP_T_WRAP                         (0  << 27)
+#       define RADEON_CLAMP_T_MIRROR                       (1  << 27)
+#       define RADEON_CLAMP_T_CLAMP_LAST                   (2  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_LAST            (3  << 27)
+#       define RADEON_CLAMP_T_CLAMP_BORDER                 (4  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER          (5  << 27)
+#       define RADEON_CLAMP_T_CLAMP_GL                     (6  << 27)
+#       define RADEON_CLAMP_T_MIRROR_CLAMP_GL              (7  << 27)
+#       define RADEON_CLAMP_T_MASK                         (7  << 27)
+#       define RADEON_BORDER_MODE_OGL                      (0  << 31)
+#       define RADEON_BORDER_MODE_D3D                      (1  << 31)
+#define RADEON_PP_TXFORMAT_0                0x1c58
+#define RADEON_PP_TXFORMAT_1                0x1c70
+#define RADEON_PP_TXFORMAT_2                0x1c88
+#       define RADEON_TXFORMAT_I8                 (0  <<  0)
+#       define RADEON_TXFORMAT_AI88               (1  <<  0)
+#       define RADEON_TXFORMAT_RGB332             (2  <<  0)
+#       define RADEON_TXFORMAT_ARGB1555           (3  <<  0)
+#       define RADEON_TXFORMAT_RGB565             (4  <<  0)
+#       define RADEON_TXFORMAT_ARGB4444           (5  <<  0)
+#       define RADEON_TXFORMAT_ARGB8888           (6  <<  0)
+#       define RADEON_TXFORMAT_RGBA8888           (7  <<  0)
+#       define RADEON_TXFORMAT_Y8                 (8  <<  0)
+#       define RADEON_TXFORMAT_VYUY422            (10 <<  0)
+#       define RADEON_TXFORMAT_YVYU422            (11 <<  0)
+#       define RADEON_TXFORMAT_DXT1               (12 <<  0)
+#       define RADEON_TXFORMAT_DXT23              (14 <<  0)
+#       define RADEON_TXFORMAT_DXT45              (15 <<  0)
+#	define RADEON_TXFORMAT_SHADOW16           (16 <<  0)
+#	define RADEON_TXFORMAT_SHADOW32           (17 <<  0)
+#       define RADEON_TXFORMAT_DUDV88             (18 <<  0)
+#       define RADEON_TXFORMAT_LDUDV655           (19 <<  0)
+#       define RADEON_TXFORMAT_LDUDUV8888         (20 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_MASK        (31 <<  0)
+#       define RADEON_TXFORMAT_FORMAT_SHIFT       0
+#       define RADEON_TXFORMAT_APPLE_YUV_MODE     (1  <<  5)
+#       define RADEON_TXFORMAT_ALPHA_IN_MAP       (1  <<  6)
+#       define RADEON_TXFORMAT_NON_POWER2         (1  <<  7)
+#       define RADEON_TXFORMAT_WIDTH_MASK         (15 <<  8)
+#       define RADEON_TXFORMAT_WIDTH_SHIFT        8
+#       define RADEON_TXFORMAT_HEIGHT_MASK        (15 << 12)
+#       define RADEON_TXFORMAT_HEIGHT_SHIFT       12
+#       define RADEON_TXFORMAT_F5_WIDTH_MASK      (15 << 16)
+#       define RADEON_TXFORMAT_F5_WIDTH_SHIFT     16
+#       define RADEON_TXFORMAT_F5_HEIGHT_MASK     (15 << 20)
+#       define RADEON_TXFORMAT_F5_HEIGHT_SHIFT    20
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ0      (0  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_MASK      (3  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ1      (1  << 24)
+#       define RADEON_TXFORMAT_ST_ROUTE_STQ2      (2  << 24)
+#       define RADEON_TXFORMAT_ENDIAN_NO_SWAP     (0  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP  (1  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP  (2  << 26)
+#       define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3  << 26)
+#       define RADEON_TXFORMAT_ALPHA_MASK_ENABLE  (1  << 28)
+#       define RADEON_TXFORMAT_CHROMA_KEY_ENABLE  (1  << 29)
+#       define RADEON_TXFORMAT_CUBIC_MAP_ENABLE   (1  << 30)
+#       define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1  << 31)
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#       define RADEON_FACE_WIDTH_1_SHIFT          0
+#       define RADEON_FACE_HEIGHT_1_SHIFT         4
+#       define RADEON_FACE_WIDTH_1_MASK           (0xf << 0)
+#       define RADEON_FACE_HEIGHT_1_MASK          (0xf << 4)
+#       define RADEON_FACE_WIDTH_2_SHIFT          8
+#       define RADEON_FACE_HEIGHT_2_SHIFT         12
+#       define RADEON_FACE_WIDTH_2_MASK           (0xf << 8)
+#       define RADEON_FACE_HEIGHT_2_MASK          (0xf << 12)
+#       define RADEON_FACE_WIDTH_3_SHIFT          16
+#       define RADEON_FACE_HEIGHT_3_SHIFT         20
+#       define RADEON_FACE_WIDTH_3_MASK           (0xf << 16)
+#       define RADEON_FACE_HEIGHT_3_MASK          (0xf << 20)
+#       define RADEON_FACE_WIDTH_4_SHIFT          24
+#       define RADEON_FACE_HEIGHT_4_SHIFT         28
+#       define RADEON_FACE_WIDTH_4_MASK           (0xf << 24)
+#       define RADEON_FACE_HEIGHT_4_MASK          (0xf << 28)
+
+#define RADEON_PP_TXOFFSET_0                0x1c5c
+#define RADEON_PP_TXOFFSET_1                0x1c74
+#define RADEON_PP_TXOFFSET_2                0x1c8c
+#       define RADEON_TXO_ENDIAN_NO_SWAP     (0 << 0)
+#       define RADEON_TXO_ENDIAN_BYTE_SWAP   (1 << 0)
+#       define RADEON_TXO_ENDIAN_WORD_SWAP   (2 << 0)
+#       define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
+#       define RADEON_TXO_MACRO_LINEAR       (0 << 2)
+#       define RADEON_TXO_MACRO_TILE         (1 << 2)
+#       define RADEON_TXO_MICRO_LINEAR       (0 << 3)
+#       define RADEON_TXO_MICRO_TILE_X2      (1 << 3)
+#       define RADEON_TXO_MICRO_TILE_OPT     (2 << 3)
+#       define RADEON_TXO_OFFSET_MASK        0xffffffe0
+#       define RADEON_TXO_OFFSET_SHIFT       5
+
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0  /* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T0_1         0x1dd4
+#define RADEON_PP_CUBIC_OFFSET_T0_2         0x1dd8
+#define RADEON_PP_CUBIC_OFFSET_T0_3         0x1ddc
+#define RADEON_PP_CUBIC_OFFSET_T0_4         0x1de0
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T1_1         0x1e04
+#define RADEON_PP_CUBIC_OFFSET_T1_2         0x1e08
+#define RADEON_PP_CUBIC_OFFSET_T1_3         0x1e0c
+#define RADEON_PP_CUBIC_OFFSET_T1_4         0x1e10
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+#define RADEON_PP_CUBIC_OFFSET_T2_1         0x1e18
+#define RADEON_PP_CUBIC_OFFSET_T2_2         0x1e1c
+#define RADEON_PP_CUBIC_OFFSET_T2_3         0x1e20
+#define RADEON_PP_CUBIC_OFFSET_T2_4         0x1e24
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04  /* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+#       define RADEON_TEX_USIZE_MASK        (0x7ff << 0)
+#       define RADEON_TEX_USIZE_SHIFT       0
+#       define RADEON_TEX_VSIZE_MASK        (0x7ff << 16)
+#       define RADEON_TEX_VSIZE_SHIFT       16
+#       define RADEON_SIGNED_RGB_MASK       (1 << 30)
+#       define RADEON_SIGNED_RGB_SHIFT      30
+#       define RADEON_SIGNED_ALPHA_MASK     (1 << 31)
+#       define RADEON_SIGNED_ALPHA_SHIFT    31
+#define RADEON_PP_TEX_PITCH_0               0x1d08  /* NPOT */
+#define RADEON_PP_TEX_PITCH_1               0x1d10  /* NPOT */
+#define RADEON_PP_TEX_PITCH_2               0x1d18  /* NPOT */
+/* note: bits 13-5: 32 byte aligned stride of texture map */
+
+#define RADEON_PP_TXCBLEND_0                0x1c60
+#define RADEON_PP_TXCBLEND_1                0x1c78
+#define RADEON_PP_TXCBLEND_2                0x1c90
+#       define RADEON_COLOR_ARG_A_SHIFT          0
+#       define RADEON_COLOR_ARG_A_MASK           (0x1f << 0)
+#       define RADEON_COLOR_ARG_A_ZERO           (0    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_COLOR  (2    << 0)
+#       define RADEON_COLOR_ARG_A_CURRENT_ALPHA  (3    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_COLOR  (4    << 0)
+#       define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA  (5    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6    << 0)
+#       define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_COLOR  (8    << 0)
+#       define RADEON_COLOR_ARG_A_TFACTOR_ALPHA  (9    << 0)
+#       define RADEON_COLOR_ARG_A_T0_COLOR       (10   << 0)
+#       define RADEON_COLOR_ARG_A_T0_ALPHA       (11   << 0)
+#       define RADEON_COLOR_ARG_A_T1_COLOR       (12   << 0)
+#       define RADEON_COLOR_ARG_A_T1_ALPHA       (13   << 0)
+#       define RADEON_COLOR_ARG_A_T2_COLOR       (14   << 0)
+#       define RADEON_COLOR_ARG_A_T2_ALPHA       (15   << 0)
+#       define RADEON_COLOR_ARG_A_T3_COLOR       (16   << 0)
+#       define RADEON_COLOR_ARG_A_T3_ALPHA       (17   << 0)
+#       define RADEON_COLOR_ARG_B_SHIFT          5
+#       define RADEON_COLOR_ARG_B_MASK           (0x1f << 5)
+#       define RADEON_COLOR_ARG_B_ZERO           (0    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_COLOR  (2    << 5)
+#       define RADEON_COLOR_ARG_B_CURRENT_ALPHA  (3    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_COLOR  (4    << 5)
+#       define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA  (5    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6    << 5)
+#       define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_COLOR  (8    << 5)
+#       define RADEON_COLOR_ARG_B_TFACTOR_ALPHA  (9    << 5)
+#       define RADEON_COLOR_ARG_B_T0_COLOR       (10   << 5)
+#       define RADEON_COLOR_ARG_B_T0_ALPHA       (11   << 5)
+#       define RADEON_COLOR_ARG_B_T1_COLOR       (12   << 5)
+#       define RADEON_COLOR_ARG_B_T1_ALPHA       (13   << 5)
+#       define RADEON_COLOR_ARG_B_T2_COLOR       (14   << 5)
+#       define RADEON_COLOR_ARG_B_T2_ALPHA       (15   << 5)
+#       define RADEON_COLOR_ARG_B_T3_COLOR       (16   << 5)
+#       define RADEON_COLOR_ARG_B_T3_ALPHA       (17   << 5)
+#       define RADEON_COLOR_ARG_C_SHIFT          10
+#       define RADEON_COLOR_ARG_C_MASK           (0x1f << 10)
+#       define RADEON_COLOR_ARG_C_ZERO           (0    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_COLOR  (2    << 10)
+#       define RADEON_COLOR_ARG_C_CURRENT_ALPHA  (3    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_COLOR  (4    << 10)
+#       define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA  (5    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6    << 10)
+#       define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_COLOR  (8    << 10)
+#       define RADEON_COLOR_ARG_C_TFACTOR_ALPHA  (9    << 10)
+#       define RADEON_COLOR_ARG_C_T0_COLOR       (10   << 10)
+#       define RADEON_COLOR_ARG_C_T0_ALPHA       (11   << 10)
+#       define RADEON_COLOR_ARG_C_T1_COLOR       (12   << 10)
+#       define RADEON_COLOR_ARG_C_T1_ALPHA       (13   << 10)
+#       define RADEON_COLOR_ARG_C_T2_COLOR       (14   << 10)
+#       define RADEON_COLOR_ARG_C_T2_ALPHA       (15   << 10)
+#       define RADEON_COLOR_ARG_C_T3_COLOR       (16   << 10)
+#       define RADEON_COLOR_ARG_C_T3_ALPHA       (17   << 10)
+#       define RADEON_COMP_ARG_A                 (1 << 15)
+#       define RADEON_COMP_ARG_A_SHIFT           15
+#       define RADEON_COMP_ARG_B                 (1 << 16)
+#       define RADEON_COMP_ARG_B_SHIFT           16
+#       define RADEON_COMP_ARG_C                 (1 << 17)
+#       define RADEON_COMP_ARG_C_SHIFT           17
+#       define RADEON_BLEND_CTL_MASK             (7 << 18)
+#       define RADEON_BLEND_CTL_ADD              (0 << 18)
+#       define RADEON_BLEND_CTL_SUBTRACT         (1 << 18)
+#       define RADEON_BLEND_CTL_ADDSIGNED        (2 << 18)
+#       define RADEON_BLEND_CTL_BLEND            (3 << 18)
+#       define RADEON_BLEND_CTL_DOT3             (4 << 18)
+#       define RADEON_SCALE_SHIFT                21
+#       define RADEON_SCALE_MASK                 (3 << 21)
+#       define RADEON_SCALE_1X                   (0 << 21)
+#       define RADEON_SCALE_2X                   (1 << 21)
+#       define RADEON_SCALE_4X                   (2 << 21)
+#       define RADEON_CLAMP_TX                   (1 << 23)
+#       define RADEON_T0_EQ_TCUR                 (1 << 24)
+#       define RADEON_T1_EQ_TCUR                 (1 << 25)
+#       define RADEON_T2_EQ_TCUR                 (1 << 26)
+#       define RADEON_T3_EQ_TCUR                 (1 << 27)
+#       define RADEON_COLOR_ARG_MASK             0x1f
+#       define RADEON_COMP_ARG_SHIFT             15
+#define RADEON_PP_TXABLEND_0                0x1c64
+#define RADEON_PP_TXABLEND_1                0x1c7c
+#define RADEON_PP_TXABLEND_2                0x1c94
+#       define RADEON_ALPHA_ARG_A_SHIFT          0
+#       define RADEON_ALPHA_ARG_A_MASK           (0xf << 0)
+#       define RADEON_ALPHA_ARG_A_ZERO           (0   << 0)
+#       define RADEON_ALPHA_ARG_A_CURRENT_ALPHA  (1   << 0)
+#       define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA  (2   << 0)
+#       define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3   << 0)
+#       define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA  (4   << 0)
+#       define RADEON_ALPHA_ARG_A_T0_ALPHA       (5   << 0)
+#       define RADEON_ALPHA_ARG_A_T1_ALPHA       (6   << 0)
+#       define RADEON_ALPHA_ARG_A_T2_ALPHA       (7   << 0)
+#       define RADEON_ALPHA_ARG_A_T3_ALPHA       (8   << 0)
+#       define RADEON_ALPHA_ARG_B_SHIFT          4
+#       define RADEON_ALPHA_ARG_B_MASK           (0xf << 4)
+#       define RADEON_ALPHA_ARG_B_ZERO           (0   << 4)
+#       define RADEON_ALPHA_ARG_B_CURRENT_ALPHA  (1   << 4)
+#       define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA  (2   << 4)
+#       define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3   << 4)
+#       define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA  (4   << 4)
+#       define RADEON_ALPHA_ARG_B_T0_ALPHA       (5   << 4)
+#       define RADEON_ALPHA_ARG_B_T1_ALPHA       (6   << 4)
+#       define RADEON_ALPHA_ARG_B_T2_ALPHA       (7   << 4)
+#       define RADEON_ALPHA_ARG_B_T3_ALPHA       (8   << 4)
+#       define RADEON_ALPHA_ARG_C_SHIFT          8
+#       define RADEON_ALPHA_ARG_C_MASK           (0xf << 8)
+#       define RADEON_ALPHA_ARG_C_ZERO           (0   << 8)
+#       define RADEON_ALPHA_ARG_C_CURRENT_ALPHA  (1   << 8)
+#       define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA  (2   << 8)
+#       define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3   << 8)
+#       define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA  (4   << 8)
+#       define RADEON_ALPHA_ARG_C_T0_ALPHA       (5   << 8)
+#       define RADEON_ALPHA_ARG_C_T1_ALPHA       (6   << 8)
+#       define RADEON_ALPHA_ARG_C_T2_ALPHA       (7   << 8)
+#       define RADEON_ALPHA_ARG_C_T3_ALPHA       (8   << 8)
+#       define RADEON_DOT_ALPHA_DONT_REPLICATE   (1   << 9)
+#       define RADEON_ALPHA_ARG_MASK             0xf
+
+#define RADEON_PP_TFACTOR_0                 0x1c68
+#define RADEON_PP_TFACTOR_1                 0x1c80
+#define RADEON_PP_TFACTOR_2                 0x1c98
+
+#define RADEON_RB3D_BLENDCNTL               0x1c20
+#       define RADEON_COMB_FCN_MASK                    (3  << 12)
+#       define RADEON_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define RADEON_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define RADEON_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define RADEON_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define RADEON_SRC_BLEND_GL_ZERO                (32 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE                 (33 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_COLOR           (36 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#       define RADEON_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
+#       define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#       define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
+#       define RADEON_SRC_BLEND_MASK                   (63 << 16)
+#       define RADEON_DST_BLEND_GL_ZERO                (32 << 24)
+#       define RADEON_DST_BLEND_GL_ONE                 (33 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_COLOR           (34 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#       define RADEON_DST_BLEND_GL_DST_COLOR           (36 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#       define RADEON_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#       define RADEON_DST_BLEND_GL_DST_ALPHA           (40 << 24)
+#       define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#       define RADEON_DST_BLEND_MASK                   (63 << 24)
+#define RADEON_RB3D_CNTL                    0x1c3c
+#       define RADEON_ALPHA_BLEND_ENABLE       (1  <<  0)
+#       define RADEON_PLANE_MASK_ENABLE        (1  <<  1)
+#       define RADEON_DITHER_ENABLE            (1  <<  2)
+#       define RADEON_ROUND_ENABLE             (1  <<  3)
+#       define RADEON_SCALE_DITHER_ENABLE      (1  <<  4)
+#       define RADEON_DITHER_INIT              (1  <<  5)
+#       define RADEON_ROP_ENABLE               (1  <<  6)
+#       define RADEON_STENCIL_ENABLE           (1  <<  7)
+#       define RADEON_Z_ENABLE                 (1  <<  8)
+#       define RADEON_DEPTHXY_OFFSET_ENABLE    (1  <<  9)
+#       define RADEON_RB3D_COLOR_FORMAT_SHIFT  10
+
+#       define RADEON_COLOR_FORMAT_ARGB1555    3
+#       define RADEON_COLOR_FORMAT_RGB565      4
+#       define RADEON_COLOR_FORMAT_ARGB8888    6
+#       define RADEON_COLOR_FORMAT_RGB332      7
+#       define RADEON_COLOR_FORMAT_Y8          8
+#       define RADEON_COLOR_FORMAT_RGB8        9
+#       define RADEON_COLOR_FORMAT_YUV422_VYUY 11
+#       define RADEON_COLOR_FORMAT_YUV422_YVYU 12
+#       define RADEON_COLOR_FORMAT_aYUV444     14
+#       define RADEON_COLOR_FORMAT_ARGB4444    15
+
+#       define RADEON_CLRCMP_FLIP_ENABLE       (1  << 14)
+#define RADEON_RB3D_COLOROFFSET             0x1c40
+#       define RADEON_COLOROFFSET_MASK      0xfffffff0
+#define RADEON_RB3D_COLORPITCH              0x1c48
+#       define RADEON_COLORPITCH_MASK         0x000001ff8
+#       define RADEON_COLOR_TILE_ENABLE       (1 << 16)
+#       define RADEON_COLOR_MICROTILE_ENABLE  (1 << 17)
+#       define RADEON_COLOR_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_COLOR_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_DEPTHOFFSET             0x1c24
+#define RADEON_RB3D_DEPTHPITCH              0x1c28
+#       define RADEON_DEPTHPITCH_MASK         0x00001ff8
+#       define RADEON_DEPTH_ENDIAN_NO_SWAP    (0 << 18)
+#       define RADEON_DEPTH_ENDIAN_WORD_SWAP  (1 << 18)
+#       define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
+#define RADEON_RB3D_PLANEMASK               0x1d84
+#define RADEON_RB3D_ROPCNTL                 0x1d80
+#       define RADEON_ROP_MASK              (15 << 8)
+#       define RADEON_ROP_CLEAR             (0  << 8)
+#       define RADEON_ROP_NOR               (1  << 8)
+#       define RADEON_ROP_AND_INVERTED      (2  << 8)
+#       define RADEON_ROP_COPY_INVERTED     (3  << 8)
+#       define RADEON_ROP_AND_REVERSE       (4  << 8)
+#       define RADEON_ROP_INVERT            (5  << 8)
+#       define RADEON_ROP_XOR               (6  << 8)
+#       define RADEON_ROP_NAND              (7  << 8)
+#       define RADEON_ROP_AND               (8  << 8)
+#       define RADEON_ROP_EQUIV             (9  << 8)
+#       define RADEON_ROP_NOOP              (10 << 8)
+#       define RADEON_ROP_OR_INVERTED       (11 << 8)
+#       define RADEON_ROP_COPY              (12 << 8)
+#       define RADEON_ROP_OR_REVERSE        (13 << 8)
+#       define RADEON_ROP_OR                (14 << 8)
+#       define RADEON_ROP_SET               (15 << 8)
+#define RADEON_RB3D_STENCILREFMASK          0x1d7c
+#       define RADEON_STENCIL_REF_SHIFT       0
+#       define RADEON_STENCIL_REF_MASK        (0xff << 0)
+#       define RADEON_STENCIL_MASK_SHIFT      16
+#       define RADEON_STENCIL_VALUE_MASK      (0xff << 16)
+#       define RADEON_STENCIL_WRITEMASK_SHIFT 24
+#       define RADEON_STENCIL_WRITE_MASK      (0xff << 24)
+#define RADEON_RB3D_ZSTENCILCNTL            0x1c2c
+#       define RADEON_DEPTH_FORMAT_MASK          (0xf << 0)
+#       define RADEON_DEPTH_FORMAT_16BIT_INT_Z   (0  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_INT_Z   (2  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_INT_Z   (4  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5  <<  0)
+#       define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7  <<  0)
+#       define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9  <<  0)
+#       define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 <<  0)
+#       define RADEON_Z_TEST_NEVER               (0  <<  4)
+#       define RADEON_Z_TEST_LESS                (1  <<  4)
+#       define RADEON_Z_TEST_LEQUAL              (2  <<  4)
+#       define RADEON_Z_TEST_EQUAL               (3  <<  4)
+#       define RADEON_Z_TEST_GEQUAL              (4  <<  4)
+#       define RADEON_Z_TEST_GREATER             (5  <<  4)
+#       define RADEON_Z_TEST_NEQUAL              (6  <<  4)
+#       define RADEON_Z_TEST_ALWAYS              (7  <<  4)
+#       define RADEON_Z_TEST_MASK                (7  <<  4)
+#       define RADEON_STENCIL_TEST_NEVER         (0  << 12)
+#       define RADEON_STENCIL_TEST_LESS          (1  << 12)
+#       define RADEON_STENCIL_TEST_LEQUAL        (2  << 12)
+#       define RADEON_STENCIL_TEST_EQUAL         (3  << 12)
+#       define RADEON_STENCIL_TEST_GEQUAL        (4  << 12)
+#       define RADEON_STENCIL_TEST_GREATER       (5  << 12)
+#       define RADEON_STENCIL_TEST_NEQUAL        (6  << 12)
+#       define RADEON_STENCIL_TEST_ALWAYS        (7  << 12)
+#       define RADEON_STENCIL_TEST_MASK          (0x7 << 12)
+#       define RADEON_STENCIL_FAIL_KEEP          (0  << 16)
+#       define RADEON_STENCIL_FAIL_ZERO          (1  << 16)
+#       define RADEON_STENCIL_FAIL_REPLACE       (2  << 16)
+#       define RADEON_STENCIL_FAIL_INC           (3  << 16)
+#       define RADEON_STENCIL_FAIL_DEC           (4  << 16)
+#       define RADEON_STENCIL_FAIL_INVERT        (5  << 16)
+#       define RADEON_STENCIL_FAIL_MASK          (0x7 << 16)
+#       define RADEON_STENCIL_ZPASS_KEEP         (0  << 20)
+#       define RADEON_STENCIL_ZPASS_ZERO         (1  << 20)
+#       define RADEON_STENCIL_ZPASS_REPLACE      (2  << 20)
+#       define RADEON_STENCIL_ZPASS_INC          (3  << 20)
+#       define RADEON_STENCIL_ZPASS_DEC          (4  << 20)
+#       define RADEON_STENCIL_ZPASS_INVERT       (5  << 20)
+#       define RADEON_STENCIL_ZPASS_MASK         (0x7 << 20)
+#       define RADEON_STENCIL_ZFAIL_KEEP         (0  << 24)
+#       define RADEON_STENCIL_ZFAIL_ZERO         (1  << 24)
+#       define RADEON_STENCIL_ZFAIL_REPLACE      (2  << 24)
+#       define RADEON_STENCIL_ZFAIL_INC          (3  << 24)
+#       define RADEON_STENCIL_ZFAIL_DEC          (4  << 24)
+#       define RADEON_STENCIL_ZFAIL_INVERT       (5  << 24)
+#       define RADEON_STENCIL_ZFAIL_MASK         (0x7 << 24)
+#       define RADEON_Z_COMPRESSION_ENABLE       (1  << 28)
+#       define RADEON_FORCE_Z_DIRTY              (1  << 29)
+#       define RADEON_Z_WRITE_ENABLE             (1  << 30)
+#define RADEON_RE_LINE_PATTERN              0x1cd0
+#       define RADEON_LINE_PATTERN_MASK             0x0000ffff
+#       define RADEON_LINE_REPEAT_COUNT_SHIFT       16
+#       define RADEON_LINE_PATTERN_START_SHIFT      24
+#       define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
+#       define RADEON_LINE_PATTERN_BIG_BIT_ORDER    (1 << 28)
+#       define RADEON_LINE_PATTERN_AUTO_RESET       (1 << 29)
+#define RADEON_RE_LINE_STATE                0x1cd4
+#       define RADEON_LINE_CURRENT_PTR_SHIFT   0
+#       define RADEON_LINE_CURRENT_COUNT_SHIFT 8
+#define RADEON_RE_MISC                      0x26c4
+#       define RADEON_STIPPLE_COORD_MASK       0x1f
+#       define RADEON_STIPPLE_X_OFFSET_SHIFT   0
+#       define RADEON_STIPPLE_X_OFFSET_MASK    (0x1f << 0)
+#       define RADEON_STIPPLE_Y_OFFSET_SHIFT   8
+#       define RADEON_STIPPLE_Y_OFFSET_MASK    (0x1f << 8)
+#       define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
+#       define RADEON_STIPPLE_BIG_BIT_ORDER    (1 << 16)
+#define RADEON_RE_SOLID_COLOR               0x1c1c
+#define RADEON_RE_TOP_LEFT                  0x26c0
+#       define RADEON_RE_LEFT_SHIFT         0
+#       define RADEON_RE_TOP_SHIFT          16
+#define RADEON_RE_WIDTH_HEIGHT              0x1c44
+#       define RADEON_RE_WIDTH_SHIFT        0
+#       define RADEON_RE_HEIGHT_SHIFT       16
+
+#define RADEON_RB3D_ZPASS_DATA 0x3290
+#define RADEON_RB3D_ZPASS_ADDR 0x3294
+
+#define RADEON_SE_CNTL                      0x1c4c
+#       define RADEON_FFACE_CULL_CW          (0 <<  0)
+#       define RADEON_FFACE_CULL_CCW         (1 <<  0)
+#       define RADEON_FFACE_CULL_DIR_MASK    (1 <<  0)
+#       define RADEON_BFACE_CULL             (0 <<  1)
+#       define RADEON_BFACE_SOLID            (3 <<  1)
+#       define RADEON_FFACE_CULL             (0 <<  3)
+#       define RADEON_FFACE_SOLID            (3 <<  3)
+#       define RADEON_FFACE_CULL_MASK        (3 <<  3)
+#       define RADEON_BADVTX_CULL_DISABLE    (1 <<  5)
+#       define RADEON_FLAT_SHADE_VTX_0       (0 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_1       (1 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_2       (2 <<  6)
+#       define RADEON_FLAT_SHADE_VTX_LAST    (3 <<  6)
+#       define RADEON_DIFFUSE_SHADE_SOLID    (0 <<  8)
+#       define RADEON_DIFFUSE_SHADE_FLAT     (1 <<  8)
+#       define RADEON_DIFFUSE_SHADE_GOURAUD  (2 <<  8)
+#       define RADEON_DIFFUSE_SHADE_MASK     (3 <<  8)
+#       define RADEON_ALPHA_SHADE_SOLID      (0 << 10)
+#       define RADEON_ALPHA_SHADE_FLAT       (1 << 10)
+#       define RADEON_ALPHA_SHADE_GOURAUD    (2 << 10)
+#       define RADEON_ALPHA_SHADE_MASK       (3 << 10)
+#       define RADEON_SPECULAR_SHADE_SOLID   (0 << 12)
+#       define RADEON_SPECULAR_SHADE_FLAT    (1 << 12)
+#       define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
+#       define RADEON_SPECULAR_SHADE_MASK    (3 << 12)
+#       define RADEON_FOG_SHADE_SOLID        (0 << 14)
+#       define RADEON_FOG_SHADE_FLAT         (1 << 14)
+#       define RADEON_FOG_SHADE_GOURAUD      (2 << 14)
+#       define RADEON_FOG_SHADE_MASK         (3 << 14)
+#       define RADEON_ZBIAS_ENABLE_POINT     (1 << 16)
+#       define RADEON_ZBIAS_ENABLE_LINE      (1 << 17)
+#       define RADEON_ZBIAS_ENABLE_TRI       (1 << 18)
+#       define RADEON_WIDELINE_ENABLE        (1 << 20)
+#       define RADEON_VPORT_XY_XFORM_ENABLE  (1 << 24)
+#       define RADEON_VPORT_Z_XFORM_ENABLE   (1 << 25)
+#       define RADEON_VTX_PIX_CENTER_D3D     (0 << 27)
+#       define RADEON_VTX_PIX_CENTER_OGL     (1 << 27)
+#       define RADEON_ROUND_MODE_TRUNC       (0 << 28)
+#       define RADEON_ROUND_MODE_ROUND       (1 << 28)
+#       define RADEON_ROUND_MODE_ROUND_EVEN  (2 << 28)
+#       define RADEON_ROUND_MODE_ROUND_ODD   (3 << 28)
+#       define RADEON_ROUND_PREC_16TH_PIX    (0 << 30)
+#       define RADEON_ROUND_PREC_8TH_PIX     (1 << 30)
+#       define RADEON_ROUND_PREC_4TH_PIX     (2 << 30)
+#       define RADEON_ROUND_PREC_HALF_PIX    (3 << 30)
+#define R200_RE_CNTL				0x1c50
+#       define R200_STIPPLE_ENABLE		0x1
+#       define R200_SCISSOR_ENABLE		0x2
+#       define R200_PATTERN_ENABLE		0x4
+#       define R200_PERSPECTIVE_ENABLE		0x8
+#       define R200_POINT_SMOOTH		0x20
+#       define R200_VTX_STQ0_D3D		0x00010000
+#       define R200_VTX_STQ1_D3D		0x00040000
+#       define R200_VTX_STQ2_D3D		0x00100000
+#       define R200_VTX_STQ3_D3D		0x00400000
+#       define R200_VTX_STQ4_D3D		0x01000000
+#       define R200_VTX_STQ5_D3D		0x04000000
+#define RADEON_SE_CNTL_STATUS               0x2140
+#       define RADEON_VC_NO_SWAP            (0 << 0)
+#       define RADEON_VC_16BIT_SWAP         (1 << 0)
+#       define RADEON_VC_32BIT_SWAP         (2 << 0)
+#       define RADEON_VC_HALF_DWORD_SWAP    (3 << 0)
+#       define RADEON_TCL_BYPASS            (1 << 8)
+#define RADEON_SE_COORD_FMT                 0x1c50
+#       define RADEON_VTX_XY_PRE_MULT_1_OVER_W0  (1 <<  0)
+#       define RADEON_VTX_Z_PRE_MULT_1_OVER_W0   (1 <<  1)
+#       define RADEON_VTX_ST0_NONPARAMETRIC      (1 <<  8)
+#       define RADEON_VTX_ST1_NONPARAMETRIC      (1 <<  9)
+#       define RADEON_VTX_ST2_NONPARAMETRIC      (1 << 10)
+#       define RADEON_VTX_ST3_NONPARAMETRIC      (1 << 11)
+#       define RADEON_VTX_W0_NORMALIZE           (1 << 12)
+#       define RADEON_VTX_W0_IS_NOT_1_OVER_W0    (1 << 16)
+#       define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
+#       define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
+#       define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
+#       define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
+#       define RADEON_TEX1_W_ROUTING_USE_W0      (0 << 26)
+#       define RADEON_TEX1_W_ROUTING_USE_Q1      (1 << 26)
+#define RADEON_SE_LINE_WIDTH                0x1db8
+#define RADEON_SE_TCL_LIGHT_MODEL_CTL       0x226c
+#       define RADEON_LIGHTING_ENABLE              (1 << 0)
+#       define RADEON_LIGHT_IN_MODELSPACE          (1 << 1)
+#       define RADEON_LOCAL_VIEWER                 (1 << 2)
+#       define RADEON_NORMALIZE_NORMALS            (1 << 3)
+#       define RADEON_RESCALE_NORMALS              (1 << 4)
+#       define RADEON_SPECULAR_LIGHTS              (1 << 5)
+#       define RADEON_DIFFUSE_SPECULAR_COMBINE     (1 << 6)
+#       define RADEON_LIGHT_ALPHA                  (1 << 7)
+#       define RADEON_LOCAL_LIGHT_VEC_GL           (1 << 8)
+#       define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
+#       define RADEON_LM_SOURCE_STATE_PREMULT      0
+#       define RADEON_LM_SOURCE_STATE_MULT         1
+#       define RADEON_LM_SOURCE_VERTEX_DIFFUSE     2
+#       define RADEON_LM_SOURCE_VERTEX_SPECULAR    3
+#       define RADEON_EMISSIVE_SOURCE_SHIFT        16
+#       define RADEON_AMBIENT_SOURCE_SHIFT         18
+#       define RADEON_DIFFUSE_SOURCE_SHIFT         20
+#       define RADEON_SPECULAR_SOURCE_SHIFT        22
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED     0x2220
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN   0x2224
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE    0x2228
+#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA   0x222c
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED     0x2230
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN   0x2234
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE    0x2238
+#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA   0x223c
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED   0x2210
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE  0x2218
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED    0x2240
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN  0x2244
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE   0x2248
+#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA  0x224c
+#define RADEON_SE_TCL_MATRIX_SELECT_0       0x225c
+#       define RADEON_MODELVIEW_0_SHIFT        0
+#       define RADEON_MODELVIEW_1_SHIFT        4
+#       define RADEON_MODELVIEW_2_SHIFT        8
+#       define RADEON_MODELVIEW_3_SHIFT        12
+#       define RADEON_IT_MODELVIEW_0_SHIFT     16
+#       define RADEON_IT_MODELVIEW_1_SHIFT     20
+#       define RADEON_IT_MODELVIEW_2_SHIFT     24
+#       define RADEON_IT_MODELVIEW_3_SHIFT     28
+#define RADEON_SE_TCL_MATRIX_SELECT_1       0x2260
+#       define RADEON_MODELPROJECT_0_SHIFT     0
+#       define RADEON_MODELPROJECT_1_SHIFT     4
+#       define RADEON_MODELPROJECT_2_SHIFT     8
+#       define RADEON_MODELPROJECT_3_SHIFT     12
+#       define RADEON_TEXMAT_0_SHIFT           16
+#       define RADEON_TEXMAT_1_SHIFT           20
+#       define RADEON_TEXMAT_2_SHIFT           24
+#       define RADEON_TEXMAT_3_SHIFT           28
+
+
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT        0x2254
+#       define RADEON_TCL_VTX_W0                 (1 <<  0)
+#       define RADEON_TCL_VTX_FP_DIFFUSE         (1 <<  1)
+#       define RADEON_TCL_VTX_FP_ALPHA           (1 <<  2)
+#       define RADEON_TCL_VTX_PK_DIFFUSE         (1 <<  3)
+#       define RADEON_TCL_VTX_FP_SPEC            (1 <<  4)
+#       define RADEON_TCL_VTX_FP_FOG             (1 <<  5)
+#       define RADEON_TCL_VTX_PK_SPEC            (1 <<  6)
+#       define RADEON_TCL_VTX_ST0                (1 <<  7)
+#       define RADEON_TCL_VTX_ST1                (1 <<  8)
+#       define RADEON_TCL_VTX_Q1                 (1 <<  9)
+#       define RADEON_TCL_VTX_ST2                (1 << 10)
+#       define RADEON_TCL_VTX_Q2                 (1 << 11)
+#       define RADEON_TCL_VTX_ST3                (1 << 12)
+#       define RADEON_TCL_VTX_Q3                 (1 << 13)
+#       define RADEON_TCL_VTX_Q0                 (1 << 14)
+#       define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
+#       define RADEON_TCL_VTX_NORM0              (1 << 18)
+#       define RADEON_TCL_VTX_XY1                (1 << 27)
+#       define RADEON_TCL_VTX_Z1                 (1 << 28)
+#       define RADEON_TCL_VTX_W1                 (1 << 29)
+#       define RADEON_TCL_VTX_NORM1              (1 << 30)
+#       define RADEON_TCL_VTX_Z0                 (1 << 31)
+
+#define RADEON_SE_TCL_OUTPUT_VTX_SEL        0x2258
+#       define RADEON_TCL_COMPUTE_XYZW           (1 << 0)
+#       define RADEON_TCL_COMPUTE_DIFFUSE        (1 << 1)
+#       define RADEON_TCL_COMPUTE_SPECULAR       (1 << 2)
+#       define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
+#       define RADEON_TCL_FORCE_INORDER_PROC     (1 << 4)
+#       define RADEON_TCL_TEX_INPUT_TEX_0        0
+#       define RADEON_TCL_TEX_INPUT_TEX_1        1
+#       define RADEON_TCL_TEX_INPUT_TEX_2        2
+#       define RADEON_TCL_TEX_INPUT_TEX_3        3
+#       define RADEON_TCL_TEX_COMPUTED_TEX_0     8
+#       define RADEON_TCL_TEX_COMPUTED_TEX_1     9
+#       define RADEON_TCL_TEX_COMPUTED_TEX_2     10
+#       define RADEON_TCL_TEX_COMPUTED_TEX_3     11
+#       define RADEON_TCL_TEX_0_OUTPUT_SHIFT     16
+#       define RADEON_TCL_TEX_1_OUTPUT_SHIFT     20
+#       define RADEON_TCL_TEX_2_OUTPUT_SHIFT     24
+#       define RADEON_TCL_TEX_3_OUTPUT_SHIFT     28
+
+#define RADEON_SE_TCL_PER_LIGHT_CTL_0       0x2270
+#       define RADEON_LIGHT_0_ENABLE               (1 <<  0)
+#       define RADEON_LIGHT_0_ENABLE_AMBIENT       (1 <<  1)
+#       define RADEON_LIGHT_0_ENABLE_SPECULAR      (1 <<  2)
+#       define RADEON_LIGHT_0_IS_LOCAL             (1 <<  3)
+#       define RADEON_LIGHT_0_IS_SPOT              (1 <<  4)
+#       define RADEON_LIGHT_0_DUAL_CONE            (1 <<  5)
+#       define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN   (1 <<  6)
+#       define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 <<  7)
+#       define RADEON_LIGHT_0_SHIFT                0
+#       define RADEON_LIGHT_1_ENABLE               (1 << 16)
+#       define RADEON_LIGHT_1_ENABLE_AMBIENT       (1 << 17)
+#       define RADEON_LIGHT_1_ENABLE_SPECULAR      (1 << 18)
+#       define RADEON_LIGHT_1_IS_LOCAL             (1 << 19)
+#       define RADEON_LIGHT_1_IS_SPOT              (1 << 20)
+#       define RADEON_LIGHT_1_DUAL_CONE            (1 << 21)
+#       define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN   (1 << 22)
+#       define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
+#       define RADEON_LIGHT_1_SHIFT                16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_1       0x2274
+#       define RADEON_LIGHT_2_SHIFT            0
+#       define RADEON_LIGHT_3_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_2       0x2278
+#       define RADEON_LIGHT_4_SHIFT            0
+#       define RADEON_LIGHT_5_SHIFT            16
+#define RADEON_SE_TCL_PER_LIGHT_CTL_3       0x227c
+#       define RADEON_LIGHT_6_SHIFT            0
+#       define RADEON_LIGHT_7_SHIFT            16
+
+#define RADEON_SE_TCL_SHININESS             0x2250
+
+#define RADEON_SE_TCL_TEXTURE_PROC_CTL      0x2268
+#       define RADEON_TEXGEN_TEXMAT_0_ENABLE      (1 << 0)
+#       define RADEON_TEXGEN_TEXMAT_1_ENABLE      (1 << 1)
+#       define RADEON_TEXGEN_TEXMAT_2_ENABLE      (1 << 2)
+#       define RADEON_TEXGEN_TEXMAT_3_ENABLE      (1 << 3)
+#       define RADEON_TEXMAT_0_ENABLE             (1 << 4)
+#       define RADEON_TEXMAT_1_ENABLE             (1 << 5)
+#       define RADEON_TEXMAT_2_ENABLE             (1 << 6)
+#       define RADEON_TEXMAT_3_ENABLE             (1 << 7)
+#       define RADEON_TEXGEN_INPUT_MASK           0xf
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_0     0
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_1     1
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_2     2
+#       define RADEON_TEXGEN_INPUT_TEXCOORD_3     3
+#       define RADEON_TEXGEN_INPUT_OBJ            4
+#       define RADEON_TEXGEN_INPUT_EYE            5
+#       define RADEON_TEXGEN_INPUT_EYE_NORMAL     6
+#       define RADEON_TEXGEN_INPUT_EYE_REFLECT    7
+#       define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
+#       define RADEON_TEXGEN_0_INPUT_SHIFT        16
+#       define RADEON_TEXGEN_1_INPUT_SHIFT        20
+#       define RADEON_TEXGEN_2_INPUT_SHIFT        24
+#       define RADEON_TEXGEN_3_INPUT_SHIFT        28
+
+#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL    0x2264
+#       define RADEON_UCP_IN_CLIP_SPACE            (1 <<  0)
+#       define RADEON_UCP_IN_MODEL_SPACE           (1 <<  1)
+#       define RADEON_UCP_ENABLE_0                 (1 <<  2)
+#       define RADEON_UCP_ENABLE_1                 (1 <<  3)
+#       define RADEON_UCP_ENABLE_2                 (1 <<  4)
+#       define RADEON_UCP_ENABLE_3                 (1 <<  5)
+#       define RADEON_UCP_ENABLE_4                 (1 <<  6)
+#       define RADEON_UCP_ENABLE_5                 (1 <<  7)
+#       define RADEON_TCL_FOG_MASK                 (3 <<  8)
+#       define RADEON_TCL_FOG_DISABLE              (0 <<  8)
+#       define RADEON_TCL_FOG_EXP                  (1 <<  8)
+#       define RADEON_TCL_FOG_EXP2                 (2 <<  8)
+#       define RADEON_TCL_FOG_LINEAR               (3 <<  8)
+#       define RADEON_RNG_BASED_FOG                (1 << 10)
+#       define RADEON_LIGHT_TWOSIDE                (1 << 11)
+#       define RADEON_BLEND_OP_COUNT_MASK          (7 << 12)
+#       define RADEON_BLEND_OP_COUNT_SHIFT         12
+#       define RADEON_POSITION_BLEND_OP_ENABLE     (1 << 16)
+#       define RADEON_NORMAL_BLEND_OP_ENABLE       (1 << 17)
+#       define RADEON_VERTEX_BLEND_SRC_0_PRIMARY   (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
+#       define RADEON_VERTEX_BLEND_SRC_1_PRIMARY   (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
+#       define RADEON_VERTEX_BLEND_SRC_2_PRIMARY   (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
+#       define RADEON_VERTEX_BLEND_SRC_3_PRIMARY   (1 << 21)
+#       define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
+#       define RADEON_VERTEX_BLEND_WGT_MINUS_ONE   (1 << 22)
+#       define RADEON_CULL_FRONT_IS_CW             (0 << 28)
+#       define RADEON_CULL_FRONT_IS_CCW            (1 << 28)
+#       define RADEON_CULL_FRONT                   (1 << 29)
+#       define RADEON_CULL_BACK                    (1 << 30)
+#       define RADEON_FORCE_W_TO_ONE               (1 << 31)
+
+#define RADEON_SE_VPORT_XSCALE              0x1d98
+#define RADEON_SE_VPORT_XOFFSET             0x1d9c
+#define RADEON_SE_VPORT_YSCALE              0x1da0
+#define RADEON_SE_VPORT_YOFFSET             0x1da4
+#define RADEON_SE_VPORT_ZSCALE              0x1da8
+#define RADEON_SE_VPORT_ZOFFSET             0x1dac
+#define RADEON_SE_ZBIAS_FACTOR              0x1db0
+#define RADEON_SE_ZBIAS_CONSTANT            0x1db4
+
+#define RADEON_SE_VTX_FMT                   0x2080
+#       define RADEON_SE_VTX_FMT_XY         0x00000000
+#       define RADEON_SE_VTX_FMT_W0         0x00000001
+#       define RADEON_SE_VTX_FMT_FPCOLOR    0x00000002
+#       define RADEON_SE_VTX_FMT_FPALPHA    0x00000004
+#       define RADEON_SE_VTX_FMT_PKCOLOR    0x00000008
+#       define RADEON_SE_VTX_FMT_FPSPEC     0x00000010
+#       define RADEON_SE_VTX_FMT_FPFOG      0x00000020
+#       define RADEON_SE_VTX_FMT_PKSPEC     0x00000040
+#       define RADEON_SE_VTX_FMT_ST0        0x00000080
+#       define RADEON_SE_VTX_FMT_ST1        0x00000100
+#       define RADEON_SE_VTX_FMT_Q1         0x00000200
+#       define RADEON_SE_VTX_FMT_ST2        0x00000400
+#       define RADEON_SE_VTX_FMT_Q2         0x00000800
+#       define RADEON_SE_VTX_FMT_ST3        0x00001000
+#       define RADEON_SE_VTX_FMT_Q3         0x00002000
+#       define RADEON_SE_VTX_FMT_Q0         0x00004000
+#       define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK  0x00038000
+#       define RADEON_SE_VTX_FMT_N0         0x00040000
+#       define RADEON_SE_VTX_FMT_XY1        0x08000000
+#       define RADEON_SE_VTX_FMT_Z1         0x10000000
+#       define RADEON_SE_VTX_FMT_W1         0x20000000
+#       define RADEON_SE_VTX_FMT_N1         0x40000000
+#       define RADEON_SE_VTX_FMT_Z          0x80000000
+
+#define RADEON_SE_VF_CNTL                             0x2084
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST         1
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST          2
+#       define RADEON_VF_PRIM_TYPE_LINE_STRIP         3
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST      4
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN       5
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP     6
+#       define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG      7
+#       define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST     8
+#       define RADEON_VF_PRIM_TYPE_POINT_LIST_3       9
+#       define RADEON_VF_PRIM_TYPE_LINE_LIST_3        10
+#       define RADEON_VF_PRIM_TYPE_SPIRIT_LIST        11
+#       define RADEON_VF_PRIM_TYPE_LINE_LOOP          12
+#       define RADEON_VF_PRIM_TYPE_QUAD_LIST          13
+#       define RADEON_VF_PRIM_TYPE_QUAD_STRIP         14
+#       define RADEON_VF_PRIM_TYPE_POLYGON            15
+#       define RADEON_VF_PRIM_WALK_STATE              (0<<4)
+#       define RADEON_VF_PRIM_WALK_INDEX              (1<<4)
+#       define RADEON_VF_PRIM_WALK_LIST               (2<<4)
+#       define RADEON_VF_PRIM_WALK_DATA               (3<<4)
+#       define RADEON_VF_COLOR_ORDER_RGBA             (1<<6)
+#       define RADEON_VF_RADEON_MODE                  (1<<8)
+#       define RADEON_VF_TCL_OUTPUT_CTL_ENA           (1<<9)
+#       define RADEON_VF_PROG_STREAM_ENA              (1<<10)
+#       define RADEON_VF_INDEX_SIZE_SHIFT             11
+#       define RADEON_VF_NUM_VERTICES_SHIFT           16
+
+#define RADEON_SE_PORT_DATA0			0x2000
+
+#define R200_SE_VAP_CNTL			0x2080
+#       define R200_VAP_TCL_ENABLE		0x00000001
+#       define R200_VAP_SINGLE_BUF_STATE_ENABLE	0x00000010
+#       define R200_VAP_FORCE_W_TO_ONE		0x00010000
+#       define R200_VAP_D3D_TEX_DEFAULT		0x00020000
+#       define R200_VAP_VF_MAX_VTX_NUM__SHIFT	18
+#       define R200_VAP_VF_MAX_VTX_NUM		(9 << 18)
+#       define R200_VAP_DX_CLIP_SPACE_DEF	0x00400000
+#define R200_VF_MAX_VTX_INDX			0x210c
+#define R200_VF_MIN_VTX_INDX			0x2110
+#define R200_SE_VTE_CNTL			0x20b0
+#       define R200_VPORT_X_SCALE_ENA			0x00000001
+#       define R200_VPORT_X_OFFSET_ENA			0x00000002
+#       define R200_VPORT_Y_SCALE_ENA			0x00000004
+#       define R200_VPORT_Y_OFFSET_ENA			0x00000008
+#       define R200_VPORT_Z_SCALE_ENA			0x00000010
+#       define R200_VPORT_Z_OFFSET_ENA			0x00000020
+#       define R200_VTX_XY_FMT				0x00000100
+#       define R200_VTX_Z_FMT				0x00000200
+#       define R200_VTX_W0_FMT				0x00000400
+#       define R200_VTX_W0_NORMALIZE			0x00000800
+#       define R200_VTX_ST_DENORMALIZED		0x00001000
+#define R200_SE_VAP_CNTL_STATUS			0x2140
+#       define R200_VC_NO_SWAP			(0 << 0)
+#       define R200_VC_16BIT_SWAP		(1 << 0)
+#       define R200_VC_32BIT_SWAP		(2 << 0)
+#define R200_PP_TXFILTER_0			0x2c00
+#define R200_PP_TXFILTER_1			0x2c20
+#define R200_PP_TXFILTER_2			0x2c40
+#define R200_PP_TXFILTER_3			0x2c60
+#define R200_PP_TXFILTER_4			0x2c80
+#define R200_PP_TXFILTER_5			0x2ca0
+#       define R200_MAG_FILTER_NEAREST		(0  <<  0)
+#       define R200_MAG_FILTER_LINEAR		(1  <<  0)
+#       define R200_MAG_FILTER_MASK		(1  <<  0)
+#       define R200_MIN_FILTER_NEAREST		(0  <<  1)
+#       define R200_MIN_FILTER_LINEAR		(1  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2  <<  1)
+#       define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6  <<  1)
+#       define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST	(8  <<  1)
+#       define R200_MIN_FILTER_ANISO_LINEAR	(9  <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 <<  1)
+#       define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 <<  1)
+#       define R200_MIN_FILTER_MASK		(15 <<  1)
+#       define R200_MAX_ANISO_1_TO_1		(0  <<  5)
+#       define R200_MAX_ANISO_2_TO_1		(1  <<  5)
+#       define R200_MAX_ANISO_4_TO_1		(2  <<  5)
+#       define R200_MAX_ANISO_8_TO_1		(3  <<  5)
+#       define R200_MAX_ANISO_16_TO_1		(4  <<  5)
+#       define R200_MAX_ANISO_MASK		(7  <<  5)
+#       define R200_MAX_MIP_LEVEL_MASK		(0x0f << 16)
+#       define R200_MAX_MIP_LEVEL_SHIFT		16
+#       define R200_YUV_TO_RGB			(1  << 20)
+#       define R200_YUV_TEMPERATURE_COOL	(0  << 21)
+#       define R200_YUV_TEMPERATURE_HOT		(1  << 21)
+#       define R200_YUV_TEMPERATURE_MASK	(1  << 21)
+#       define R200_WRAPEN_S			(1  << 22)
+#       define R200_CLAMP_S_WRAP		(0  << 23)
+#       define R200_CLAMP_S_MIRROR		(1  << 23)
+#       define R200_CLAMP_S_CLAMP_LAST		(2  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_LAST	(3  << 23)
+#       define R200_CLAMP_S_CLAMP_BORDER	(4  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_BORDER	(5  << 23)
+#       define R200_CLAMP_S_CLAMP_GL		(6  << 23)
+#       define R200_CLAMP_S_MIRROR_CLAMP_GL	(7  << 23)
+#       define R200_CLAMP_S_MASK		(7  << 23)
+#       define R200_WRAPEN_T			(1  << 26)
+#       define R200_CLAMP_T_WRAP		(0  << 27)
+#       define R200_CLAMP_T_MIRROR		(1  << 27)
+#       define R200_CLAMP_T_CLAMP_LAST		(2  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_LAST	(3  << 27)
+#       define R200_CLAMP_T_CLAMP_BORDER	(4  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_BORDER	(5  << 27)
+#       define R200_CLAMP_T_CLAMP_GL		(6  << 27)
+#       define R200_CLAMP_T_MIRROR_CLAMP_GL	(7  << 27)
+#       define R200_CLAMP_T_MASK		(7  << 27)
+#       define R200_KILL_LT_ZERO		(1  << 30)
+#       define R200_BORDER_MODE_OGL		(0  << 31)
+#       define R200_BORDER_MODE_D3D		(1  << 31)
+#define R200_PP_TXFORMAT_0			0x2c04
+#define R200_PP_TXFORMAT_1			0x2c24
+#define R200_PP_TXFORMAT_2			0x2c44
+#define R200_PP_TXFORMAT_3			0x2c64
+#define R200_PP_TXFORMAT_4			0x2c84
+#define R200_PP_TXFORMAT_5			0x2ca4
+#       define R200_TXFORMAT_I8			(0 << 0)
+#       define R200_TXFORMAT_AI88		(1 << 0)
+#       define R200_TXFORMAT_RGB332		(2 << 0)
+#       define R200_TXFORMAT_ARGB1555		(3 << 0)
+#       define R200_TXFORMAT_RGB565		(4 << 0)
+#       define R200_TXFORMAT_ARGB4444		(5 << 0)
+#       define R200_TXFORMAT_ARGB8888		(6 << 0)
+#       define R200_TXFORMAT_RGBA8888		(7 << 0)
+#       define R200_TXFORMAT_Y8			(8 << 0)
+#       define R200_TXFORMAT_AVYU4444		(9 << 0)
+#       define R200_TXFORMAT_VYUY422		(10 << 0)
+#       define R200_TXFORMAT_YVYU422		(11 << 0)
+#       define R200_TXFORMAT_DXT1		(12 << 0)
+#       define R200_TXFORMAT_DXT23		(14 << 0)
+#       define R200_TXFORMAT_DXT45		(15 << 0)
+#       define R200_TXFORMAT_DVDU88		(18 << 0)
+#       define R200_TXFORMAT_LDVDU655		(19 << 0)
+#       define R200_TXFORMAT_LDVDU8888		(20 << 0)
+#       define R200_TXFORMAT_GR1616		(21 << 0)
+#       define R200_TXFORMAT_ABGR8888		(22 << 0)
+#       define R200_TXFORMAT_BGR111110		(23 << 0)
+#       define R200_TXFORMAT_FORMAT_MASK	(31 <<	0)
+#       define R200_TXFORMAT_FORMAT_SHIFT	0
+#       define R200_TXFORMAT_ALPHA_IN_MAP	(1 << 6)
+#       define R200_TXFORMAT_NON_POWER2		(1 << 7)
+#       define R200_TXFORMAT_WIDTH_MASK		(15 <<	8)
+#       define R200_TXFORMAT_WIDTH_SHIFT	8
+#       define R200_TXFORMAT_HEIGHT_MASK	(15 << 12)
+#       define R200_TXFORMAT_HEIGHT_SHIFT	12
+#       define R200_TXFORMAT_F5_WIDTH_MASK	(15 << 16)	/* cube face 5 */
+#       define R200_TXFORMAT_F5_WIDTH_SHIFT	16
+#       define R200_TXFORMAT_F5_HEIGHT_MASK	(15 << 20)
+#       define R200_TXFORMAT_F5_HEIGHT_SHIFT	20
+#       define R200_TXFORMAT_ST_ROUTE_STQ0	(0 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ1	(1 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ2	(2 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ3	(3 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ4	(4 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_STQ5	(5 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_MASK	(7 << 24)
+#       define R200_TXFORMAT_ST_ROUTE_SHIFT	24
+#       define R200_TXFORMAT_LOOKUP_DISABLE	(1 << 27)
+#       define R200_TXFORMAT_ALPHA_MASK_ENABLE	(1 << 28)
+#       define R200_TXFORMAT_CHROMA_KEY_ENABLE	(1 << 29)
+#       define R200_TXFORMAT_CUBIC_MAP_ENABLE		(1 << 30)
+#define R200_PP_TXFORMAT_X_0                    0x2c08
+#define R200_PP_TXFORMAT_X_1                    0x2c28
+#define R200_PP_TXFORMAT_X_2                    0x2c48
+#define R200_PP_TXFORMAT_X_3                    0x2c68
+#define R200_PP_TXFORMAT_X_4                    0x2c88
+#define R200_PP_TXFORMAT_X_5                    0x2ca8
+
+#define R200_PP_TXSIZE_0			0x2c0c /* NPOT only */
+#define R200_PP_TXSIZE_1			0x2c2c /* NPOT only */
+#define R200_PP_TXSIZE_2			0x2c4c /* NPOT only */
+#define R200_PP_TXSIZE_3			0x2c6c /* NPOT only */
+#define R200_PP_TXSIZE_4			0x2c8c /* NPOT only */
+#define R200_PP_TXSIZE_5			0x2cac /* NPOT only */
+
+#define R200_PP_TXPITCH_0                       0x2c10 /* NPOT only */
+#define R200_PP_TXPITCH_1			0x2c30 /* NPOT only */
+#define R200_PP_TXPITCH_2			0x2c50 /* NPOT only */
+#define R200_PP_TXPITCH_3			0x2c70 /* NPOT only */
+#define R200_PP_TXPITCH_4			0x2c90 /* NPOT only */
+#define R200_PP_TXPITCH_5			0x2cb0 /* NPOT only */
+
+#define R200_PP_CUBIC_FACES_0			0x2c18
+#define R200_PP_CUBIC_FACES_1			0x2c38
+#define R200_PP_CUBIC_FACES_2			0x2c58
+#define R200_PP_CUBIC_FACES_3			0x2c78
+#define R200_PP_CUBIC_FACES_4			0x2c98
+#define R200_PP_CUBIC_FACES_5			0x2cb8
+
+#define R200_PP_TXOFFSET_0			0x2d00
+#       define R200_TXO_ENDIAN_NO_SWAP		(0 << 0)
+#       define R200_TXO_ENDIAN_BYTE_SWAP	(1 << 0)
+#       define R200_TXO_ENDIAN_WORD_SWAP	(2 << 0)
+#       define R200_TXO_ENDIAN_HALFDW_SWAP	(3 << 0)
+#       define R200_TXO_MACRO_LINEAR		(0 << 2)
+#       define R200_TXO_MACRO_TILE		(1 << 2)
+#       define R200_TXO_MICRO_LINEAR		(0 << 3)
+#       define R200_TXO_MICRO_TILE		(1 << 3)
+#       define R200_TXO_OFFSET_MASK		0xffffffe0
+#       define R200_TXO_OFFSET_SHIFT		5
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+
+#define R200_PP_TXOFFSET_1			0x2d18
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+
+#define R200_PP_TXOFFSET_2			0x2d30
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+
+#define R200_PP_TXOFFSET_3			0x2d48
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_TXOFFSET_4			0x2d60
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_TXOFFSET_5			0x2d78
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_PP_TFACTOR_0			0x2ee0
+#define R200_PP_TFACTOR_1			0x2ee4
+#define R200_PP_TFACTOR_2			0x2ee8
+#define R200_PP_TFACTOR_3			0x2eec
+#define R200_PP_TFACTOR_4			0x2ef0
+#define R200_PP_TFACTOR_5			0x2ef4
+
+#define R200_PP_TXCBLEND_0			0x2f00
+#       define R200_TXC_ARG_A_ZERO		(0)
+#       define R200_TXC_ARG_A_CURRENT_COLOR	(2)
+#       define R200_TXC_ARG_A_CURRENT_ALPHA	(3)
+#       define R200_TXC_ARG_A_DIFFUSE_COLOR	(4)
+#       define R200_TXC_ARG_A_DIFFUSE_ALPHA	(5)
+#       define R200_TXC_ARG_A_SPECULAR_COLOR	(6)
+#       define R200_TXC_ARG_A_SPECULAR_ALPHA	(7)
+#       define R200_TXC_ARG_A_TFACTOR_COLOR	(8)
+#       define R200_TXC_ARG_A_TFACTOR_ALPHA	(9)
+#       define R200_TXC_ARG_A_R0_COLOR		(10)
+#       define R200_TXC_ARG_A_R0_ALPHA		(11)
+#       define R200_TXC_ARG_A_R1_COLOR		(12)
+#       define R200_TXC_ARG_A_R1_ALPHA		(13)
+#       define R200_TXC_ARG_A_R2_COLOR		(14)
+#       define R200_TXC_ARG_A_R2_ALPHA		(15)
+#       define R200_TXC_ARG_A_R3_COLOR		(16)
+#       define R200_TXC_ARG_A_R3_ALPHA		(17)
+#       define R200_TXC_ARG_A_R4_COLOR		(18)
+#       define R200_TXC_ARG_A_R4_ALPHA		(19)
+#       define R200_TXC_ARG_A_R5_COLOR		(20)
+#       define R200_TXC_ARG_A_R5_ALPHA		(21)
+#       define R200_TXC_ARG_A_TFACTOR1_COLOR	(26)
+#       define R200_TXC_ARG_A_TFACTOR1_ALPHA	(27)
+#       define R200_TXC_ARG_A_MASK		(31 << 0)
+#       define R200_TXC_ARG_A_SHIFT		0
+#       define R200_TXC_ARG_B_ZERO		(0 << 5)
+#       define R200_TXC_ARG_B_CURRENT_COLOR	(2 << 5)
+#       define R200_TXC_ARG_B_CURRENT_ALPHA	(3 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_COLOR	(4 << 5)
+#       define R200_TXC_ARG_B_DIFFUSE_ALPHA	(5 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_COLOR	(6 << 5)
+#       define R200_TXC_ARG_B_SPECULAR_ALPHA	(7 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_COLOR	(8 << 5)
+#       define R200_TXC_ARG_B_TFACTOR_ALPHA	(9 << 5)
+#       define R200_TXC_ARG_B_R0_COLOR		(10 << 5)
+#       define R200_TXC_ARG_B_R0_ALPHA		(11 << 5)
+#       define R200_TXC_ARG_B_R1_COLOR		(12 << 5)
+#       define R200_TXC_ARG_B_R1_ALPHA		(13 << 5)
+#       define R200_TXC_ARG_B_R2_COLOR		(14 << 5)
+#       define R200_TXC_ARG_B_R2_ALPHA		(15 << 5)
+#       define R200_TXC_ARG_B_R3_COLOR		(16 << 5)
+#       define R200_TXC_ARG_B_R3_ALPHA		(17 << 5)
+#       define R200_TXC_ARG_B_R4_COLOR		(18 << 5)
+#       define R200_TXC_ARG_B_R4_ALPHA		(19 << 5)
+#       define R200_TXC_ARG_B_R5_COLOR		(20 << 5)
+#       define R200_TXC_ARG_B_R5_ALPHA		(21 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_COLOR	(26 << 5)
+#       define R200_TXC_ARG_B_TFACTOR1_ALPHA	(27 << 5)
+#       define R200_TXC_ARG_B_MASK		(31 << 5)
+#       define R200_TXC_ARG_B_SHIFT		5
+#       define R200_TXC_ARG_C_ZERO		(0 << 10)
+#       define R200_TXC_ARG_C_CURRENT_COLOR	(2 << 10)
+#       define R200_TXC_ARG_C_CURRENT_ALPHA	(3 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_COLOR	(4 << 10)
+#       define R200_TXC_ARG_C_DIFFUSE_ALPHA	(5 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_COLOR	(6 << 10)
+#       define R200_TXC_ARG_C_SPECULAR_ALPHA	(7 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_COLOR	(8 << 10)
+#       define R200_TXC_ARG_C_TFACTOR_ALPHA	(9 << 10)
+#       define R200_TXC_ARG_C_R0_COLOR		(10 << 10)
+#       define R200_TXC_ARG_C_R0_ALPHA		(11 << 10)
+#       define R200_TXC_ARG_C_R1_COLOR		(12 << 10)
+#       define R200_TXC_ARG_C_R1_ALPHA		(13 << 10)
+#       define R200_TXC_ARG_C_R2_COLOR		(14 << 10)
+#       define R200_TXC_ARG_C_R2_ALPHA		(15 << 10)
+#       define R200_TXC_ARG_C_R3_COLOR		(16 << 10)
+#       define R200_TXC_ARG_C_R3_ALPHA		(17 << 10)
+#       define R200_TXC_ARG_C_R4_COLOR		(18 << 10)
+#       define R200_TXC_ARG_C_R4_ALPHA		(19 << 10)
+#       define R200_TXC_ARG_C_R5_COLOR		(20 << 10)
+#       define R200_TXC_ARG_C_R5_ALPHA		(21 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_COLOR	(26 << 10)
+#       define R200_TXC_ARG_C_TFACTOR1_ALPHA	(27 << 10)
+#       define R200_TXC_ARG_C_MASK		(31 << 10)
+#       define R200_TXC_ARG_C_SHIFT		10
+#       define R200_TXC_COMP_ARG_A		(1 << 16)
+#       define R200_TXC_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXC_BIAS_ARG_A		(1 << 17)
+#       define R200_TXC_SCALE_ARG_A		(1 << 18)
+#       define R200_TXC_NEG_ARG_A		(1 << 19)
+#       define R200_TXC_COMP_ARG_B		(1 << 20)
+#       define R200_TXC_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXC_BIAS_ARG_B		(1 << 21)
+#       define R200_TXC_SCALE_ARG_B		(1 << 22)
+#       define R200_TXC_NEG_ARG_B		(1 << 23)
+#       define R200_TXC_COMP_ARG_C		(1 << 24)
+#       define R200_TXC_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXC_BIAS_ARG_C		(1 << 25)
+#       define R200_TXC_SCALE_ARG_C		(1 << 26)
+#       define R200_TXC_NEG_ARG_C		(1 << 27)
+#       define R200_TXC_OP_MADD			(0 << 28)
+#       define R200_TXC_OP_CND0			(2 << 28)
+#       define R200_TXC_OP_LERP			(3 << 28)
+#       define R200_TXC_OP_DOT3			(4 << 28)
+#       define R200_TXC_OP_DOT4			(5 << 28)
+#       define R200_TXC_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXC_OP_DOT2_ADD		(7 << 28)
+#       define R200_TXC_OP_MASK			(7 << 28)
+#define R200_PP_TXCBLEND2_0		0x2f04
+#       define R200_TXC_TFACTOR_SEL_SHIFT	0
+#       define R200_TXC_TFACTOR_SEL_MASK	0x7
+#       define R200_TXC_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXC_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXC_SCALE_SHIFT		8
+#       define R200_TXC_SCALE_MASK		(7 << 8)
+#       define R200_TXC_SCALE_1X		(0 << 8)
+#       define R200_TXC_SCALE_2X		(1 << 8)
+#       define R200_TXC_SCALE_4X		(2 << 8)
+#       define R200_TXC_SCALE_8X		(3 << 8)
+#       define R200_TXC_SCALE_INV2		(5 << 8)
+#       define R200_TXC_SCALE_INV4		(6 << 8)
+#       define R200_TXC_SCALE_INV8		(7 << 8)
+#       define R200_TXC_CLAMP_SHIFT		12
+#       define R200_TXC_CLAMP_MASK		(3 << 12)
+#       define R200_TXC_CLAMP_WRAP		(0 << 12)
+#       define R200_TXC_CLAMP_0_1		(1 << 12)
+#       define R200_TXC_CLAMP_8_8		(2 << 12)
+#       define R200_TXC_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXC_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXC_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXC_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXC_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXC_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXC_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXC_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXC_OUTPUT_MASK_MASK	(7 << 20)
+#       define R200_TXC_OUTPUT_MASK_RGB		(0 << 20)
+#       define R200_TXC_OUTPUT_MASK_RG		(1 << 20)
+#       define R200_TXC_OUTPUT_MASK_RB		(2 << 20)
+#       define R200_TXC_OUTPUT_MASK_R		(3 << 20)
+#       define R200_TXC_OUTPUT_MASK_GB		(4 << 20)
+#       define R200_TXC_OUTPUT_MASK_G		(5 << 20)
+#       define R200_TXC_OUTPUT_MASK_B		(6 << 20)
+#       define R200_TXC_OUTPUT_MASK_NONE	(7 << 20)
+#       define R200_TXC_REPL_NORMAL		0
+#       define R200_TXC_REPL_RED		1
+#       define R200_TXC_REPL_GREEN		2
+#       define R200_TXC_REPL_BLUE		3
+#       define R200_TXC_REPL_ARG_A_SHIFT	26
+#       define R200_TXC_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXC_REPL_ARG_B_SHIFT	28
+#       define R200_TXC_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXC_REPL_ARG_C_SHIFT	30
+#       define R200_TXC_REPL_ARG_C_MASK		(3 << 30)
+#define R200_PP_TXABLEND_0			0x2f08
+#       define R200_TXA_ARG_A_ZERO		(0)
+#       define R200_TXA_ARG_A_CURRENT_ALPHA	(2) /* guess */
+#       define R200_TXA_ARG_A_CURRENT_BLUE	(3) /* guess */
+#       define R200_TXA_ARG_A_DIFFUSE_ALPHA	(4)
+#       define R200_TXA_ARG_A_DIFFUSE_BLUE	(5)
+#       define R200_TXA_ARG_A_SPECULAR_ALPHA	(6)
+#       define R200_TXA_ARG_A_SPECULAR_BLUE	(7)
+#       define R200_TXA_ARG_A_TFACTOR_ALPHA	(8)
+#       define R200_TXA_ARG_A_TFACTOR_BLUE	(9)
+#       define R200_TXA_ARG_A_R0_ALPHA		(10)
+#       define R200_TXA_ARG_A_R0_BLUE		(11)
+#       define R200_TXA_ARG_A_R1_ALPHA		(12)
+#       define R200_TXA_ARG_A_R1_BLUE		(13)
+#       define R200_TXA_ARG_A_R2_ALPHA		(14)
+#       define R200_TXA_ARG_A_R2_BLUE		(15)
+#       define R200_TXA_ARG_A_R3_ALPHA		(16)
+#       define R200_TXA_ARG_A_R3_BLUE		(17)
+#       define R200_TXA_ARG_A_R4_ALPHA		(18)
+#       define R200_TXA_ARG_A_R4_BLUE		(19)
+#       define R200_TXA_ARG_A_R5_ALPHA		(20)
+#       define R200_TXA_ARG_A_R5_BLUE		(21)
+#       define R200_TXA_ARG_A_TFACTOR1_ALPHA	(26)
+#       define R200_TXA_ARG_A_TFACTOR1_BLUE	(27)
+#       define R200_TXA_ARG_A_MASK		(31 << 0)
+#       define R200_TXA_ARG_A_SHIFT		0
+#       define R200_TXA_ARG_B_ZERO		(0 << 5)
+#       define R200_TXA_ARG_B_CURRENT_ALPHA	(2 << 5) /* guess */
+#       define R200_TXA_ARG_B_CURRENT_BLUE	(3 << 5) /* guess */
+#       define R200_TXA_ARG_B_DIFFUSE_ALPHA	(4 << 5)
+#       define R200_TXA_ARG_B_DIFFUSE_BLUE	(5 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_ALPHA	(6 << 5)
+#       define R200_TXA_ARG_B_SPECULAR_BLUE	(7 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_ALPHA	(8 << 5)
+#       define R200_TXA_ARG_B_TFACTOR_BLUE	(9 << 5)
+#       define R200_TXA_ARG_B_R0_ALPHA		(10 << 5)
+#       define R200_TXA_ARG_B_R0_BLUE		(11 << 5)
+#       define R200_TXA_ARG_B_R1_ALPHA		(12 << 5)
+#       define R200_TXA_ARG_B_R1_BLUE		(13 << 5)
+#       define R200_TXA_ARG_B_R2_ALPHA		(14 << 5)
+#       define R200_TXA_ARG_B_R2_BLUE		(15 << 5)
+#       define R200_TXA_ARG_B_R3_ALPHA		(16 << 5)
+#       define R200_TXA_ARG_B_R3_BLUE		(17 << 5)
+#       define R200_TXA_ARG_B_R4_ALPHA		(18 << 5)
+#       define R200_TXA_ARG_B_R4_BLUE		(19 << 5)
+#       define R200_TXA_ARG_B_R5_ALPHA		(20 << 5)
+#       define R200_TXA_ARG_B_R5_BLUE		(21 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_ALPHA	(26 << 5)
+#       define R200_TXA_ARG_B_TFACTOR1_BLUE	(27 << 5)
+#       define R200_TXA_ARG_B_MASK		(31 << 5)
+#       define R200_TXA_ARG_B_SHIFT			5
+#       define R200_TXA_ARG_C_ZERO		(0 << 10)
+#       define R200_TXA_ARG_C_CURRENT_ALPHA	(2 << 10) /* guess */
+#       define R200_TXA_ARG_C_CURRENT_BLUE	(3 << 10) /* guess */
+#       define R200_TXA_ARG_C_DIFFUSE_ALPHA	(4 << 10)
+#       define R200_TXA_ARG_C_DIFFUSE_BLUE	(5 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_ALPHA	(6 << 10)
+#       define R200_TXA_ARG_C_SPECULAR_BLUE	(7 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_ALPHA	(8 << 10)
+#       define R200_TXA_ARG_C_TFACTOR_BLUE	(9 << 10)
+#       define R200_TXA_ARG_C_R0_ALPHA		(10 << 10)
+#       define R200_TXA_ARG_C_R0_BLUE		(11 << 10)
+#       define R200_TXA_ARG_C_R1_ALPHA		(12 << 10)
+#       define R200_TXA_ARG_C_R1_BLUE		(13 << 10)
+#       define R200_TXA_ARG_C_R2_ALPHA		(14 << 10)
+#       define R200_TXA_ARG_C_R2_BLUE		(15 << 10)
+#       define R200_TXA_ARG_C_R3_ALPHA		(16 << 10)
+#       define R200_TXA_ARG_C_R3_BLUE		(17 << 10)
+#       define R200_TXA_ARG_C_R4_ALPHA		(18 << 10)
+#       define R200_TXA_ARG_C_R4_BLUE		(19 << 10)
+#       define R200_TXA_ARG_C_R5_ALPHA		(20 << 10)
+#       define R200_TXA_ARG_C_R5_BLUE		(21 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_ALPHA	(26 << 10)
+#       define R200_TXA_ARG_C_TFACTOR1_BLUE	(27 << 10)
+#       define R200_TXA_ARG_C_MASK		(31 << 10)
+#       define R200_TXA_ARG_C_SHIFT		10
+#       define R200_TXA_COMP_ARG_A		(1 << 16)
+#       define R200_TXA_COMP_ARG_A_SHIFT	(16)
+#       define R200_TXA_BIAS_ARG_A		(1 << 17)
+#       define R200_TXA_SCALE_ARG_A		(1 << 18)
+#       define R200_TXA_NEG_ARG_A		(1 << 19)
+#       define R200_TXA_COMP_ARG_B		(1 << 20)
+#       define R200_TXA_COMP_ARG_B_SHIFT	(20)
+#       define R200_TXA_BIAS_ARG_B		(1 << 21)
+#       define R200_TXA_SCALE_ARG_B		(1 << 22)
+#       define R200_TXA_NEG_ARG_B		(1 << 23)
+#       define R200_TXA_COMP_ARG_C		(1 << 24)
+#       define R200_TXA_COMP_ARG_C_SHIFT	(24)
+#       define R200_TXA_BIAS_ARG_C		(1 << 25)
+#       define R200_TXA_SCALE_ARG_C		(1 << 26)
+#       define R200_TXA_NEG_ARG_C		(1 << 27)
+#       define R200_TXA_OP_MADD			(0 << 28)
+#       define R200_TXA_OP_CND0			(2 << 28)
+#       define R200_TXA_OP_LERP			(3 << 28)
+#       define R200_TXA_OP_CONDITIONAL		(6 << 28)
+#       define R200_TXA_OP_MASK			(7 << 28)
+#define R200_PP_TXABLEND2_0			0x2f0c
+#       define R200_TXA_TFACTOR_SEL_SHIFT	0
+#       define R200_TXA_TFACTOR_SEL_MASK	0x7
+#       define R200_TXA_TFACTOR1_SEL_SHIFT	4
+#       define R200_TXA_TFACTOR1_SEL_MASK	(0x7 << 4)
+#       define R200_TXA_SCALE_SHIFT		8
+#       define R200_TXA_SCALE_MASK		(7 << 8)
+#       define R200_TXA_SCALE_1X		(0 << 8)
+#       define R200_TXA_SCALE_2X		(1 << 8)
+#       define R200_TXA_SCALE_4X		(2 << 8)
+#       define R200_TXA_SCALE_8X		(3 << 8)
+#       define R200_TXA_SCALE_INV2		(5 << 8)
+#       define R200_TXA_SCALE_INV4		(6 << 8)
+#       define R200_TXA_SCALE_INV8		(7 << 8)
+#       define R200_TXA_CLAMP_SHIFT		12
+#       define R200_TXA_CLAMP_MASK		(3 << 12)
+#       define R200_TXA_CLAMP_WRAP		(0 << 12)
+#       define R200_TXA_CLAMP_0_1		(1 << 12)
+#       define R200_TXA_CLAMP_8_8		(2 << 12)
+#       define R200_TXA_OUTPUT_REG_MASK		(7 << 16)
+#       define R200_TXA_OUTPUT_REG_NONE		(0 << 16)
+#       define R200_TXA_OUTPUT_REG_R0		(1 << 16)
+#       define R200_TXA_OUTPUT_REG_R1		(2 << 16)
+#       define R200_TXA_OUTPUT_REG_R2		(3 << 16)
+#       define R200_TXA_OUTPUT_REG_R3		(4 << 16)
+#       define R200_TXA_OUTPUT_REG_R4		(5 << 16)
+#       define R200_TXA_OUTPUT_REG_R5		(6 << 16)
+#       define R200_TXA_DOT_ALPHA		(1 << 20)
+#       define R200_TXA_REPL_NORMAL		0
+#       define R200_TXA_REPL_RED		1
+#       define R200_TXA_REPL_GREEN		2
+#       define R200_TXA_REPL_ARG_A_SHIFT	26
+#       define R200_TXA_REPL_ARG_A_MASK		(3 << 26)
+#       define R200_TXA_REPL_ARG_B_SHIFT	28
+#       define R200_TXA_REPL_ARG_B_MASK		(3 << 28)
+#       define R200_TXA_REPL_ARG_C_SHIFT	30
+#       define R200_TXA_REPL_ARG_C_MASK		(3 << 30)
+
+#define R200_SE_VTX_FMT_0			0x2088
+#       define R200_VTX_XY			0 /* always have xy */
+#       define R200_VTX_Z0			(1<<0)
+#       define R200_VTX_W0			(1<<1)
+#       define R200_VTX_WEIGHT_COUNT_SHIFT	(2)
+#       define R200_VTX_PV_MATRIX_SEL		(1<<5)
+#       define R200_VTX_N0			(1<<6)
+#       define R200_VTX_POINT_SIZE		(1<<7)
+#       define R200_VTX_DISCRETE_FOG		(1<<8)
+#       define R200_VTX_SHININESS_0		(1<<9)
+#       define R200_VTX_SHININESS_1		(1<<10)
+#       define   R200_VTX_COLOR_NOT_PRESENT	0
+#       define   R200_VTX_PK_RGBA		1
+#       define   R200_VTX_FP_RGB		2
+#       define   R200_VTX_FP_RGBA		3
+#       define   R200_VTX_COLOR_MASK		3
+#       define R200_VTX_COLOR_0_SHIFT		11
+#       define R200_VTX_COLOR_1_SHIFT		13
+#       define R200_VTX_COLOR_2_SHIFT		15
+#       define R200_VTX_COLOR_3_SHIFT		17
+#       define R200_VTX_COLOR_4_SHIFT		19
+#       define R200_VTX_COLOR_5_SHIFT		21
+#       define R200_VTX_COLOR_6_SHIFT		23
+#       define R200_VTX_COLOR_7_SHIFT		25
+#       define R200_VTX_XY1			(1<<28)
+#       define R200_VTX_Z1			(1<<29)
+#       define R200_VTX_W1			(1<<30)
+#       define R200_VTX_N1			(1<<31)
+#define R200_SE_VTX_FMT_1			0x208c
+#       define R200_VTX_TEX0_COMP_CNT_SHIFT	0
+#       define R200_VTX_TEX1_COMP_CNT_SHIFT	3
+#       define R200_VTX_TEX2_COMP_CNT_SHIFT	6
+#       define R200_VTX_TEX3_COMP_CNT_SHIFT	9
+#       define R200_VTX_TEX4_COMP_CNT_SHIFT	12
+#       define R200_VTX_TEX5_COMP_CNT_SHIFT	15
+
+#define R200_SE_TCL_OUTPUT_VTX_FMT_0		0x2090
+#define R200_SE_TCL_OUTPUT_VTX_FMT_1		0x2094
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL		0x2250
+#       define R200_OUTPUT_XYZW			(1<<0)
+#       define R200_OUTPUT_COLOR_0		(1<<8)
+#       define R200_OUTPUT_COLOR_1		(1<<9)
+#       define R200_OUTPUT_TEX_0		(1<<16)
+#       define R200_OUTPUT_TEX_1		(1<<17)
+#       define R200_OUTPUT_TEX_2		(1<<18)
+#       define R200_OUTPUT_TEX_3		(1<<19)
+#       define R200_OUTPUT_TEX_4		(1<<20)
+#       define R200_OUTPUT_TEX_5		(1<<21)
+#       define R200_OUTPUT_TEX_MASK		(0x3f<<16)
+#       define R200_OUTPUT_DISCRETE_FOG		(1<<24)
+#       define R200_OUTPUT_PT_SIZE		(1<<25)
+#       define R200_FORCE_INORDER_PROC		(1<<31)
+#define R200_PP_CNTL_X				0x2cc4
+#define R200_PP_TXMULTI_CTL_0			0x2c1c
+#define R200_PP_TXMULTI_CTL_1			0x2c3c
+#define R200_PP_TXMULTI_CTL_2			0x2c5c
+#define R200_PP_TXMULTI_CTL_3			0x2c7c
+#define R200_PP_TXMULTI_CTL_4			0x2c9c
+#define R200_PP_TXMULTI_CTL_5			0x2cbc
+#define R200_SE_VTX_STATE_CNTL			0x2180
+#       define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
+
+				/* Registers for CP and Microcode Engine */
+#define RADEON_CP_ME_RAM_ADDR               0x07d4
+#define RADEON_CP_ME_RAM_RADDR              0x07d8
+#define RADEON_CP_ME_RAM_DATAH              0x07dc
+#define RADEON_CP_ME_RAM_DATAL              0x07e0
+
+#define RADEON_CP_RB_BASE                   0x0700
+#define RADEON_CP_RB_CNTL                   0x0704
+#	define RADEON_RB_BUFSZ_SHIFT		0
+#	define RADEON_RB_BUFSZ_MASK		(0x3f << 0)
+#	define RADEON_RB_BLKSZ_SHIFT		8
+#	define RADEON_RB_BLKSZ_MASK		(0x3f << 8)
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_MAX_FETCH_SHIFT		18
+#	define RADEON_MAX_FETCH_MASK		(0x3 << 18)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#	define RADEON_RB_RPTR_WR_ENA		(1 << 31)
+#define RADEON_CP_RB_RPTR_ADDR              0x070c
+#define RADEON_CP_RB_RPTR                   0x0710
+#define RADEON_CP_RB_WPTR                   0x0714
+#define RADEON_CP_RB_RPTR_WR                0x071c
+
+#define RADEON_SCRATCH_UMSK		    0x0770
+#define RADEON_SCRATCH_ADDR		    0x0774
+
+#define R600_CP_RB_BASE                     0xc100
+#define R600_CP_RB_CNTL                     0xc104
+#       define R600_RB_BUFSZ(x)             ((x) << 0)
+#       define R600_RB_BLKSZ(x)             ((x) << 8)
+#       define R600_RB_NO_UPDATE            (1 << 27)
+#       define R600_RB_RPTR_WR_ENA          (1 << 31)
+#define R600_CP_RB_RPTR_WR                  0xc108
+#define R600_CP_RB_RPTR_ADDR                0xc10c
+#define R600_CP_RB_RPTR_ADDR_HI             0xc110
+#define R600_CP_RB_WPTR                     0xc114
+#define R600_CP_RB_WPTR_ADDR                0xc118
+#define R600_CP_RB_WPTR_ADDR_HI             0xc11c
+#define R600_CP_RB_RPTR                     0x8700
+#define R600_CP_RB_WPTR_DELAY               0x8704
+
+#define RADEON_CP_IB_BASE                   0x0738
+#define RADEON_CP_IB_BUFSZ                  0x073c
+
+#define RADEON_CP_CSQ_CNTL                  0x0740
+#       define RADEON_CSQ_CNT_PRIMARY_MASK     (0xff << 0)
+#       define RADEON_CSQ_PRIDIS_INDDIS        (0    << 28)
+#       define RADEON_CSQ_PRIPIO_INDDIS        (1    << 28)
+#       define RADEON_CSQ_PRIBM_INDDIS         (2    << 28)
+#       define RADEON_CSQ_PRIPIO_INDBM         (3    << 28)
+#       define RADEON_CSQ_PRIBM_INDBM          (4    << 28)
+#       define RADEON_CSQ_PRIPIO_INDPIO        (15   << 28)
+
+#define R300_CP_RESYNC_ADDR                 0x778
+#define R300_CP_RESYNC_DATA                 0x77c
+
+#define RADEON_CP_CSQ_STAT                  0x07f8
+#       define RADEON_CSQ_RPTR_PRIMARY_MASK    (0xff <<  0)
+#       define RADEON_CSQ_WPTR_PRIMARY_MASK    (0xff <<  8)
+#       define RADEON_CSQ_RPTR_INDIRECT_MASK   (0xff << 16)
+#       define RADEON_CSQ_WPTR_INDIRECT_MASK   (0xff << 24)
+#define RADEON_CP_CSQ2_STAT                  0x07fc
+#define RADEON_CP_CSQ_ADDR                  0x07f0
+#define RADEON_CP_CSQ_DATA                  0x07f4
+#define RADEON_CP_CSQ_APER_PRIMARY          0x1000
+#define RADEON_CP_CSQ_APER_INDIRECT         0x1300
+
+#define RADEON_CP_RB_WPTR_DELAY             0x0718
+#       define RADEON_PRE_WRITE_TIMER_SHIFT    0
+#       define RADEON_PRE_WRITE_LIMIT_SHIFT    23
+#define RADEON_CP_CSQ_MODE		0x0744
+#	define RADEON_INDIRECT2_START_SHIFT	0
+#	define RADEON_INDIRECT2_START_MASK	(0x7f << 0)
+#	define RADEON_INDIRECT1_START_SHIFT	8
+#	define RADEON_INDIRECT1_START_MASK	(0x7f << 8)
+
+#define RADEON_AIC_CNTL                     0x01d0
+#       define RADEON_PCIGART_TRANSLATE_EN     (1 << 0)
+#       define RADEON_DIS_OUT_OF_PCI_GART_ACCESS     (1 << 1)
+#	define RS400_MSI_REARM	                (1 << 3) /* rs400/rs480 */
+#define RADEON_AIC_LO_ADDR                  0x01dc
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_HI_ADDR		0x01e0
+
+
+
+				/* Constants */
+/* #define RADEON_LAST_FRAME_REG               RADEON_GUI_SCRATCH_REG0 */
+/* efine RADEON_LAST_CLEAR_REG               RADEON_GUI_SCRATCH_REG2 */
+
+
+
+				/* CP packet types */
+#define RADEON_CP_PACKET0                           0x00000000
+#define RADEON_CP_PACKET1                           0x40000000
+#define RADEON_CP_PACKET2                           0x80000000
+#define RADEON_CP_PACKET3                           0xC0000000
+#       define RADEON_CP_PACKET_MASK                0xC0000000
+#       define RADEON_CP_PACKET_COUNT_MASK          0x3fff0000
+#       define RADEON_CP_PACKET_MAX_DWORDS          (1 << 12)
+#       define RADEON_CP_PACKET0_REG_MASK           0x000007ff
+#       define R300_CP_PACKET0_REG_MASK             0x00001fff
+#       define R600_CP_PACKET0_REG_MASK             0x0000ffff
+#       define RADEON_CP_PACKET1_REG0_MASK          0x000007ff
+#       define RADEON_CP_PACKET1_REG1_MASK          0x003ff800
+
+#define RADEON_CP_PACKET0_ONE_REG_WR                0x00008000
+
+#define RADEON_CP_PACKET3_NOP                       0xC0001000
+#define RADEON_CP_PACKET3_NEXT_CHAR                 0xC0001900
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN              0xC0001D00
+#define RADEON_CP_PACKET3_SET_SCISSORS              0xC0001E00
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM     0xC0002300
+#define RADEON_CP_PACKET3_LOAD_MICROCODE            0xC0002400
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE             0xC0002600
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF              0xC0002800
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD              0xC0002900
+#define RADEON_CP_PACKET3_3D_DRAW_INDX              0xC0002A00
+#define RADEON_CP_PACKET3_LOAD_PALETTE              0xC0002C00
+#define R200_CP_PACKET3_3D_DRAW_IMMD_2              0xc0003500
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR            0xC0002F00
+#define RADEON_CP_PACKET3_CNTL_PAINT                0xC0009100
+#define RADEON_CP_PACKET3_CNTL_BITBLT               0xC0009200
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT            0xC0009300
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT         0xC0009400
+#define RADEON_CP_PACKET3_CNTL_POLYLINE             0xC0009500
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES        0xC0009800
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI          0xC0009A00
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI         0xC0009B00
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT         0xC0009C00
+
+
+#define RADEON_CP_VC_FRMT_XY                        0x00000000
+#define RADEON_CP_VC_FRMT_W0                        0x00000001
+#define RADEON_CP_VC_FRMT_FPCOLOR                   0x00000002
+#define RADEON_CP_VC_FRMT_FPALPHA                   0x00000004
+#define RADEON_CP_VC_FRMT_PKCOLOR                   0x00000008
+#define RADEON_CP_VC_FRMT_FPSPEC                    0x00000010
+#define RADEON_CP_VC_FRMT_FPFOG                     0x00000020
+#define RADEON_CP_VC_FRMT_PKSPEC                    0x00000040
+#define RADEON_CP_VC_FRMT_ST0                       0x00000080
+#define RADEON_CP_VC_FRMT_ST1                       0x00000100
+#define RADEON_CP_VC_FRMT_Q1                        0x00000200
+#define RADEON_CP_VC_FRMT_ST2                       0x00000400
+#define RADEON_CP_VC_FRMT_Q2                        0x00000800
+#define RADEON_CP_VC_FRMT_ST3                       0x00001000
+#define RADEON_CP_VC_FRMT_Q3                        0x00002000
+#define RADEON_CP_VC_FRMT_Q0                        0x00004000
+#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK      0x00038000
+#define RADEON_CP_VC_FRMT_N0                        0x00040000
+#define RADEON_CP_VC_FRMT_XY1                       0x08000000
+#define RADEON_CP_VC_FRMT_Z1                        0x10000000
+#define RADEON_CP_VC_FRMT_W1                        0x20000000
+#define RADEON_CP_VC_FRMT_N1                        0x40000000
+#define RADEON_CP_VC_FRMT_Z                         0x80000000
+
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE            0x00000000
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT           0x00000001
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE            0x00000002
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP      0x00000003
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST        0x00000004
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN         0x00000005
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP       0x00000006
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2      0x00000007
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST       0x00000008
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
+#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST  0x0000000a
+#define RADEON_CP_VC_CNTL_PRIM_WALK_IND             0x00000010
+#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST            0x00000020
+#define RADEON_CP_VC_CNTL_PRIM_WALK_RING            0x00000030
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA          0x00000000
+#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA          0x00000040
+#define RADEON_CP_VC_CNTL_MAOS_ENABLE               0x00000080
+#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE   0x00000000
+#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE       0x00000100
+#define RADEON_CP_VC_CNTL_TCL_DISABLE               0x00000000
+#define RADEON_CP_VC_CNTL_TCL_ENABLE                0x00000200
+#define RADEON_CP_VC_CNTL_NUM_SHIFT                 16
+
+#define RADEON_VS_MATRIX_0_ADDR                   0
+#define RADEON_VS_MATRIX_1_ADDR                   4
+#define RADEON_VS_MATRIX_2_ADDR                   8
+#define RADEON_VS_MATRIX_3_ADDR                  12
+#define RADEON_VS_MATRIX_4_ADDR                  16
+#define RADEON_VS_MATRIX_5_ADDR                  20
+#define RADEON_VS_MATRIX_6_ADDR                  24
+#define RADEON_VS_MATRIX_7_ADDR                  28
+#define RADEON_VS_MATRIX_8_ADDR                  32
+#define RADEON_VS_MATRIX_9_ADDR                  36
+#define RADEON_VS_MATRIX_10_ADDR                 40
+#define RADEON_VS_MATRIX_11_ADDR                 44
+#define RADEON_VS_MATRIX_12_ADDR                 48
+#define RADEON_VS_MATRIX_13_ADDR                 52
+#define RADEON_VS_MATRIX_14_ADDR                 56
+#define RADEON_VS_MATRIX_15_ADDR                 60
+#define RADEON_VS_LIGHT_AMBIENT_ADDR             64
+#define RADEON_VS_LIGHT_DIFFUSE_ADDR             72
+#define RADEON_VS_LIGHT_SPECULAR_ADDR            80
+#define RADEON_VS_LIGHT_DIRPOS_ADDR              88
+#define RADEON_VS_LIGHT_HWVSPOT_ADDR             96
+#define RADEON_VS_LIGHT_ATTENUATION_ADDR        104
+#define RADEON_VS_MATRIX_EYE2CLIP_ADDR          112
+#define RADEON_VS_UCP_ADDR                      116
+#define RADEON_VS_GLOBAL_AMBIENT_ADDR           122
+#define RADEON_VS_FOG_PARAM_ADDR                123
+#define RADEON_VS_EYE_VECTOR_ADDR               124
+
+#define RADEON_SS_LIGHT_DCD_ADDR                  0
+#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR        8
+#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR         16
+#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR     24
+#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR        32
+#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR       48
+#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR    49
+#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR       50
+#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR    51
+#define RADEON_SS_SHININESS                      60
+
+#define RADEON_TV_MASTER_CNTL                    0x0800
+#       define RADEON_TV_ASYNC_RST               (1 <<  0)
+#       define RADEON_CRT_ASYNC_RST              (1 <<  1)
+#       define RADEON_RESTART_PHASE_FIX          (1 <<  3)
+#	define RADEON_TV_FIFO_ASYNC_RST		 (1 <<  4)
+#	define RADEON_VIN_ASYNC_RST		 (1 <<  5)
+#	define RADEON_AUD_ASYNC_RST		 (1 <<  6)
+#	define RADEON_DVS_ASYNC_RST		 (1 <<  7)
+#       define RADEON_CRT_FIFO_CE_EN             (1 <<  9)
+#       define RADEON_TV_FIFO_CE_EN              (1 << 10)
+#       define RADEON_RE_SYNC_NOW_SEL_MASK       (3 << 14)
+#       define RADEON_TVCLK_ALWAYS_ONb           (1 << 30)
+#	define RADEON_TV_ON			 (1 << 31)
+#define RADEON_TV_PRE_DAC_MUX_CNTL               0x0888
+#       define RADEON_Y_RED_EN                   (1 << 0)
+#       define RADEON_C_GRN_EN                   (1 << 1)
+#       define RADEON_CMP_BLU_EN                 (1 << 2)
+#       define RADEON_DAC_DITHER_EN              (1 << 3)
+#       define RADEON_RED_MX_FORCE_DAC_DATA      (6 << 4)
+#       define RADEON_GRN_MX_FORCE_DAC_DATA      (6 << 8)
+#       define RADEON_BLU_MX_FORCE_DAC_DATA      (6 << 12)
+#       define RADEON_TV_FORCE_DAC_DATA_SHIFT    16
+#define RADEON_TV_RGB_CNTL                           0x0804
+#       define RADEON_SWITCH_TO_BLUE		  (1 <<  4)
+#       define RADEON_RGB_DITHER_EN		  (1 <<  5)
+#       define RADEON_RGB_SRC_SEL_MASK		  (3 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC1		  (0 <<  8)
+#       define RADEON_RGB_SRC_SEL_RMX		  (1 <<  8)
+#       define RADEON_RGB_SRC_SEL_CRTC2		  (2 <<  8)
+#       define RADEON_RGB_CONVERT_BY_PASS	  (1 << 10)
+#       define RADEON_UVRAM_READ_MARGIN_SHIFT	  16
+#       define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT	  20
+#       define RADEON_RGB_ATTEN_SEL(x)            ((x) << 24)
+#       define RADEON_TVOUT_SCALE_EN              (1 << 26)
+#       define RADEON_RGB_ATTEN_VAL(x)            ((x) << 28)
+#define RADEON_TV_SYNC_CNTL                          0x0808
+#       define RADEON_SYNC_OE                     (1 <<  0)
+#       define RADEON_SYNC_OUT                    (1 <<  1)
+#       define RADEON_SYNC_IN                     (1 <<  2)
+#       define RADEON_SYNC_PUB                    (1 <<  3)
+#       define RADEON_SYNC_PD                     (1 <<  4)
+#       define RADEON_TV_SYNC_IO_DRIVE            (1 <<  5)
+#define RADEON_TV_HTOTAL                             0x080c
+#define RADEON_TV_HDISP                              0x0810
+#define RADEON_TV_HSTART                             0x0818
+#define RADEON_TV_HCOUNT                             0x081C
+#define RADEON_TV_VTOTAL                             0x0820
+#define RADEON_TV_VDISP                              0x0824
+#define RADEON_TV_VCOUNT                             0x0828
+#define RADEON_TV_FTOTAL                             0x082c
+#define RADEON_TV_FCOUNT                             0x0830
+#define RADEON_TV_FRESTART                           0x0834
+#define RADEON_TV_HRESTART                           0x0838
+#define RADEON_TV_VRESTART                           0x083c
+#define RADEON_TV_HOST_READ_DATA                     0x0840
+#define RADEON_TV_HOST_WRITE_DATA                    0x0844
+#define RADEON_TV_HOST_RD_WT_CNTL                    0x0848
+#	define RADEON_HOST_FIFO_RD		 (1 << 12)
+#	define RADEON_HOST_FIFO_RD_ACK		 (1 << 13)
+#	define RADEON_HOST_FIFO_WT		 (1 << 14)
+#	define RADEON_HOST_FIFO_WT_ACK		 (1 << 15)
+#define RADEON_TV_VSCALER_CNTL1                      0x084c
+#       define RADEON_UV_INC_MASK                0xffff
+#       define RADEON_UV_INC_SHIFT               0
+#       define RADEON_Y_W_EN			 (1 << 24)
+#       define RADEON_RESTART_FIELD              (1 << 29) /* restart on field 0 */
+#       define RADEON_Y_DEL_W_SIG_SHIFT          26
+#define RADEON_TV_TIMING_CNTL                        0x0850
+#       define RADEON_H_INC_MASK                 0xfff
+#       define RADEON_H_INC_SHIFT                0
+#       define RADEON_REQ_Y_FIRST                (1 << 19)
+#       define RADEON_FORCE_BURST_ALWAYS         (1 << 21)
+#       define RADEON_UV_POST_SCALE_BYPASS       (1 << 23)
+#       define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
+#define RADEON_TV_VSCALER_CNTL2                      0x0854
+#       define RADEON_DITHER_MODE                (1 <<  0)
+#       define RADEON_Y_OUTPUT_DITHER_EN         (1 <<  1)
+#       define RADEON_UV_OUTPUT_DITHER_EN        (1 <<  2)
+#       define RADEON_UV_TO_BUF_DITHER_EN        (1 <<  3)
+#define RADEON_TV_Y_FALL_CNTL                        0x0858
+#       define RADEON_Y_FALL_PING_PONG           (1 << 16)
+#       define RADEON_Y_COEF_EN                  (1 << 17)
+#define RADEON_TV_Y_RISE_CNTL                        0x085c
+#       define RADEON_Y_RISE_PING_PONG           (1 << 16)
+#define RADEON_TV_Y_SAW_TOOTH_CNTL                   0x0860
+#define RADEON_TV_UPSAMP_AND_GAIN_CNTL               0x0864
+#	define RADEON_YUPSAMP_EN		 (1 <<  0)
+#	define RADEON_UVUPSAMP_EN		 (1 <<  2)
+#define RADEON_TV_GAIN_LIMIT_SETTINGS                0x0868
+#       define RADEON_Y_GAIN_LIMIT_SHIFT         0
+#       define RADEON_UV_GAIN_LIMIT_SHIFT        16
+#define RADEON_TV_LINEAR_GAIN_SETTINGS               0x086c
+#       define RADEON_Y_GAIN_SHIFT               0
+#       define RADEON_UV_GAIN_SHIFT              16
+#define RADEON_TV_MODULATOR_CNTL1                    0x0870
+#	define RADEON_YFLT_EN			 (1 <<  2)
+#	define RADEON_UVFLT_EN			 (1 <<  3)
+#       define RADEON_ALT_PHASE_EN               (1 <<  6)
+#       define RADEON_SYNC_TIP_LEVEL             (1 <<  7)
+#       define RADEON_BLANK_LEVEL_SHIFT          8
+#       define RADEON_SET_UP_LEVEL_SHIFT         16
+#	define RADEON_SLEW_RATE_LIMIT		 (1 << 23)
+#       define RADEON_CY_FILT_BLEND_SHIFT        28
+#define RADEON_TV_MODULATOR_CNTL2                    0x0874
+#       define RADEON_TV_U_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_MASK     0x1ff
+#       define RADEON_TV_V_BURST_LEVEL_SHIFT    16
+#define RADEON_TV_CRC_CNTL                           0x0890
+#define RADEON_TV_UV_ADR                             0x08ac
+#	define RADEON_MAX_UV_ADR_MASK		 0x000000ff
+#	define RADEON_MAX_UV_ADR_SHIFT		 0
+#	define RADEON_TABLE1_BOT_ADR_MASK	 0x0000ff00
+#	define RADEON_TABLE1_BOT_ADR_SHIFT	 8
+#	define RADEON_TABLE3_TOP_ADR_MASK	 0x00ff0000
+#	define RADEON_TABLE3_TOP_ADR_SHIFT	 16
+#	define RADEON_HCODE_TABLE_SEL_MASK	 0x06000000
+#	define RADEON_HCODE_TABLE_SEL_SHIFT	 25
+#	define RADEON_VCODE_TABLE_SEL_MASK	 0x18000000
+#	define RADEON_VCODE_TABLE_SEL_SHIFT	 27
+#	define RADEON_TV_MAX_FIFO_ADDR		 0x1a7
+#	define RADEON_TV_MAX_FIFO_ADDR_INTERNAL	 0x1ff
+#define RADEON_TV_PLL_FINE_CNTL			     0x0020	/* PLL */
+#define RADEON_TV_PLL_CNTL                           0x0021	/* PLL */
+#       define RADEON_TV_M0LO_MASK               0xff
+#       define RADEON_TV_M0HI_MASK               0x7
+#       define RADEON_TV_M0HI_SHIFT              18
+#       define RADEON_TV_N0LO_MASK               0x1ff
+#       define RADEON_TV_N0LO_SHIFT              8
+#       define RADEON_TV_N0HI_MASK               0x3
+#       define RADEON_TV_N0HI_SHIFT              21
+#       define RADEON_TV_P_MASK                  0xf
+#       define RADEON_TV_P_SHIFT                 24
+#       define RADEON_TV_SLIP_EN                 (1 << 23)
+#       define RADEON_TV_DTO_EN                  (1 << 28)
+#define RADEON_TV_PLL_CNTL1                          0x0022	/* PLL */
+#       define RADEON_TVPLL_RESET                (1 <<  1)
+#       define RADEON_TVPLL_SLEEP                (1 <<  3)
+#       define RADEON_TVPLL_REFCLK_SEL           (1 <<  4)
+#       define RADEON_TVPCP_SHIFT                8
+#       define RADEON_TVPCP_MASK                 (7 << 8)
+#       define RADEON_TVPVG_SHIFT                11
+#       define RADEON_TVPVG_MASK                 (7 << 11)
+#       define RADEON_TVPDC_SHIFT                14
+#       define RADEON_TVPDC_MASK                 (3 << 14)
+#       define RADEON_TVPLL_TEST_DIS             (1 << 31)
+#       define RADEON_TVCLK_SRC_SEL_TVPLL        (1 << 30)
+
+#define RS400_DISP2_REQ_CNTL1			0xe30
+#       define RS400_DISP2_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP2_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP2_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP2_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP2_ALLOW_FID_LEVEL_MASK    0x3ff
+#define RS400_DISP2_REQ_CNTL2			0xe34
+#       define RS400_DISP2_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP2_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP2_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DMIF_MEM_CNTL1			0xe38
+#       define RS400_DISP2_START_ADR_SHIFT      0
+#       define RS400_DISP2_START_ADR_MASK       0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_START_SHIFT    12
+#       define RS400_DISP1_CRITICAL_POINT_START_MASK     0x3ff
+#       define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT     22
+#       define RS400_DISP1_CRITICAL_POINT_STOP_MASK      0x3ff
+#define RS400_DISP1_REQ_CNTL1			0xe3c
+#       define RS400_DISP1_START_REQ_LEVEL_SHIFT   0
+#       define RS400_DISP1_START_REQ_LEVEL_MASK    0x3ff
+#       define RS400_DISP1_STOP_REQ_LEVEL_SHIFT    12
+#       define RS400_DISP1_STOP_REQ_LEVEL_MASK     0x3ff
+#       define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT   22
+#       define RS400_DISP1_ALLOW_FID_LEVEL_MASK    0x3ff
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+#define RADEON_PCIE_TX_GART_ERROR	0x18
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+
+#define RV530_GB_PIPE_SELECT2           0x4124
+
+#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define RADEON_PACKET_TYPE0 0
+#define RADEON_PACKET_TYPE1 1
+#define RADEON_PACKET_TYPE2 2
+#define RADEON_PACKET_TYPE3 3
+
+#define RADEON_PACKET3_NOP 0x10
+
+#define RADEON_VLINE_STAT (1 << 12)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_ring.c b/linux-imx/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 0000000..6e0f480
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,896 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ *          Christian König
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored.  You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them.  Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics).  IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+		  struct radeon_ib *ib, struct radeon_vm *vm,
+		  unsigned size)
+{
+	int i, r;
+
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+	if (r) {
+		dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+		return r;
+	}
+
+	r = radeon_semaphore_create(rdev, &ib->semaphore);
+	if (r) {
+		return r;
+	}
+
+	ib->ring = ring;
+	ib->fence = NULL;
+	ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+	ib->vm = vm;
+	if (vm) {
+		/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+		 * space and soffset is the offset inside the pool bo
+		 */
+		ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+	} else {
+		ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+	}
+	ib->is_const_ib = false;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		ib->sync_to[i] = NULL;
+
+	return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+	radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+	radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_sync_to - sync to fence before executing the IB
+ *
+ * @ib: IB object to add fence to
+ * @fence: fence to sync to
+ *
+ * Sync to the fence before executing the IB
+ */
+void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
+{
+	struct radeon_fence *other;
+
+	if (!fence)
+		return;
+
+	other = ib->sync_to[fence->ring];
+	ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine).  Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed.  To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE.  If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB.  Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+		       struct radeon_ib *const_ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	bool need_sync = false;
+	int i, r = 0;
+
+	if (!ib->length_dw || !ring->ready) {
+		/* TODO: Nothings in the ib we should report. */
+		dev_err(rdev->dev, "couldn't schedule ib\n");
+		return -EINVAL;
+	}
+
+	/* 64 dwords should be enough for fence too */
+	r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+	if (r) {
+		dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+		return r;
+	}
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_fence *fence = ib->sync_to[i];
+		if (radeon_fence_need_sync(fence, ib->ring)) {
+			need_sync = true;
+			radeon_semaphore_sync_rings(rdev, ib->semaphore,
+						    fence->ring, ib->ring);
+			radeon_fence_note_sync(fence, ib->ring);
+		}
+	}
+	/* immediately free semaphore when we don't need to sync */
+	if (!need_sync) {
+		radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+	}
+	/* if we can't remember our last VM flush then flush now! */
+	/* XXX figure out why we have to flush for every IB */
+	if (ib->vm /*&& !ib->vm->last_flush*/) {
+		radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+	}
+	if (const_ib) {
+		radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+		radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+	}
+	radeon_ring_ib_execute(rdev, ib->ring, ib);
+	r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+	if (r) {
+		dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+	if (const_ib) {
+		const_ib->fence = radeon_fence_ref(ib->fence);
+	}
+	/* we just flushed the VM, remember that */
+	if (ib->vm && !ib->vm->last_flush) {
+		ib->vm->last_flush = radeon_fence_ref(ib->fence);
+	}
+	radeon_ring_unlock_commit(rdev, ring);
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->ib_pool_ready) {
+		return 0;
+	}
+	r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+				      RADEON_IB_POOL_SIZE*64*1024,
+				      RADEON_GPU_PAGE_SIZE,
+				      RADEON_GEM_DOMAIN_GTT);
+	if (r) {
+		return r;
+	}
+
+	r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+	if (r) {
+		return r;
+	}
+
+	rdev->ib_pool_ready = true;
+	if (radeon_debugfs_sa_init(rdev)) {
+		dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+	}
+	return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+	if (rdev->ib_pool_ready) {
+		radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+		radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+		rdev->ib_pool_ready = false;
+	}
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+	unsigned i;
+	int r;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ring = &rdev->ring[i];
+
+		if (!ring->ready)
+			continue;
+
+		r = radeon_ib_test(rdev, i, ring);
+		if (r) {
+			ring->ready = false;
+
+			if (i == RADEON_RING_TYPE_GFX_INDEX) {
+				/* oh, oh, that's really bad */
+				DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+		                rdev->accel_working = false;
+				return r;
+
+			} else {
+				/* still not good, but we can live with it */
+				DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * Rings
+ * Most engines on the GPU are fed via ring buffers.  Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written.  When the
+ * pointers are equal, the ring is idle.  When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr.  The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
+ */
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+	if (ring->count_dw <= 0) {
+		DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+	}
+#endif
+	ring->ring[ring->wptr++] = v;
+	ring->wptr &= ring->ptr_mask;
+	ring->count_dw--;
+	ring->ring_free_dw--;
+}
+
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+				      struct radeon_ring *ring)
+{
+	switch (ring->idx) {
+	case RADEON_RING_TYPE_GFX_INDEX:
+	case CAYMAN_RING_TYPE_CP1_INDEX:
+	case CAYMAN_RING_TYPE_CP2_INDEX:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 rptr;
+
+	if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
+		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+	else
+		rptr = RREG32(ring->rptr_reg);
+	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+	/* This works because ring_size is a power of 2 */
+	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+	ring->ring_free_dw -= ring->wptr;
+	ring->ring_free_dw &= ring->ptr_mask;
+	if (!ring->ring_free_dw) {
+		ring->ring_free_dw = ring->ring_size / 4;
+	}
+}
+
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	/* make sure we aren't trying to allocate more space than there is on the ring */
+	if (ndw > (ring->ring_size / 4))
+		return -ENOMEM;
+	/* Align requested size with padding so unlock_commit can
+	 * pad safely */
+	radeon_ring_free_size(rdev, ring);
+	if (ring->ring_free_dw == (ring->ring_size / 4)) {
+		/* This is an empty ring update lockup info to avoid
+		 * false positive.
+		 */
+		radeon_ring_lockup_update(ring);
+	}
+	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+	while (ndw > (ring->ring_free_dw - 1)) {
+		radeon_ring_free_size(rdev, ring);
+		if (ndw < ring->ring_free_dw) {
+			break;
+		}
+		r = radeon_fence_wait_next_locked(rdev, ring->idx);
+		if (r)
+			return r;
+	}
+	ring->count_dw = ndw;
+	ring->wptr_old = ring->wptr;
+	return 0;
+}
+
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+{
+	int r;
+
+	mutex_lock(&rdev->ring_lock);
+	r = radeon_ring_alloc(rdev, ring, ndw);
+	if (r) {
+		mutex_unlock(&rdev->ring_lock);
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	/* We pad to match fetch size */
+	while (ring->wptr & ring->align_mask) {
+		radeon_ring_write(ring, ring->nop);
+	}
+	DRM_MEMORYBARRIER();
+	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+	(void)RREG32(ring->wptr_reg);
+}
+
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_commit(rdev, ring);
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wptr (all asics).
+ */
+void radeon_ring_undo(struct radeon_ring *ring)
+{
+	ring->wptr = ring->wptr_old;
+}
+
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	radeon_ring_undo(ring);
+	mutex_unlock(&rdev->ring_lock);
+}
+
+/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	radeon_ring_free_size(rdev, ring);
+	if (ring->rptr == ring->wptr) {
+		r = radeon_ring_alloc(rdev, ring, 1);
+		if (!r) {
+			radeon_ring_write(ring, ring->nop);
+			radeon_ring_commit(rdev, ring);
+		}
+	}
+}
+
+/**
+ * radeon_ring_lockup_update - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
+void radeon_ring_lockup_update(struct radeon_ring *ring)
+{
+	ring->last_rptr = ring->rptr;
+	ring->last_activity = jiffies;
+}
+
+/**
+ * radeon_ring_test_lockup() - check if ring is lockedup by recording information
+ * @rdev:       radeon device structure
+ * @ring:       radeon_ring structure holding ring information
+ *
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
+bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	unsigned long cjiffies, elapsed;
+	uint32_t rptr;
+
+	cjiffies = jiffies;
+	if (!time_after(cjiffies, ring->last_activity)) {
+		/* likely a wrap around */
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	rptr = RREG32(ring->rptr_reg);
+	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+	if (ring->rptr != ring->last_rptr) {
+		/* CP is still working no lockup */
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
+	if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
+		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
+		return true;
+	}
+	/* give a chance to the GPU ... */
+	return false;
+}
+
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+			    uint32_t **data)
+{
+	unsigned size, ptr, i;
+
+	/* just in case lock the ring */
+	mutex_lock(&rdev->ring_lock);
+	*data = NULL;
+
+	if (ring->ring_obj == NULL) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* it doesn't make sense to save anything if all fences are signaled */
+	if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* calculate the number of dw on the ring */
+	if (ring->rptr_save_reg)
+		ptr = RREG32(ring->rptr_save_reg);
+	else if (rdev->wb.enabled)
+		ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+	else {
+		/* no way to read back the next rptr */
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	size = ring->wptr + (ring->ring_size / 4);
+	size -= ptr;
+	size &= ring->ptr_mask;
+	if (size == 0) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+
+	/* and then save the content of the ring */
+	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+	if (!*data) {
+		mutex_unlock(&rdev->ring_lock);
+		return 0;
+	}
+	for (i = 0; i < size; ++i) {
+		(*data)[i] = ring->ring[ptr++];
+		ptr &= ring->ptr_mask;
+	}
+
+	mutex_unlock(&rdev->ring_lock);
+	return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+			unsigned size, uint32_t *data)
+{
+	int i, r;
+
+	if (!size || !data)
+		return 0;
+
+	/* restore the saved ring content */
+	r = radeon_ring_lock(rdev, ring, size);
+	if (r)
+		return r;
+
+	for (i = 0; i < size; ++i) {
+		radeon_ring_write(ring, data[i]);
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	kfree(data);
+	return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+{
+	int r;
+
+	ring->ring_size = ring_size;
+	ring->rptr_offs = rptr_offs;
+	ring->rptr_reg = rptr_reg;
+	ring->wptr_reg = wptr_reg;
+	ring->ptr_reg_shift = ptr_reg_shift;
+	ring->ptr_reg_mask = ptr_reg_mask;
+	ring->nop = nop;
+	/* Allocate ring buffer */
+	if (ring->ring_obj == NULL) {
+		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT,
+				     NULL, &ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring create failed\n", r);
+			return r;
+		}
+		r = radeon_bo_reserve(ring->ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+					&ring->gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(ring->ring_obj);
+			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+			return r;
+		}
+		r = radeon_bo_kmap(ring->ring_obj,
+				       (void **)&ring->ring);
+		radeon_bo_unreserve(ring->ring_obj);
+		if (r) {
+			dev_err(rdev->dev, "(%d) ring map failed\n", r);
+			return r;
+		}
+	}
+	ring->ptr_mask = (ring->ring_size / 4) - 1;
+	ring->ring_free_dw = ring->ring_size / 4;
+	if (rdev->wb.enabled) {
+		u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+		ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+		ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+	}
+	if (radeon_debugfs_ring_init(rdev, ring)) {
+		DRM_ERROR("Failed to register debugfs file for rings !\n");
+	}
+	radeon_ring_lockup_update(ring);
+	return 0;
+}
+
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+	struct radeon_bo *ring_obj;
+
+	mutex_lock(&rdev->ring_lock);
+	ring_obj = ring->ring_obj;
+	ring->ready = false;
+	ring->ring = NULL;
+	ring->ring_obj = NULL;
+	mutex_unlock(&rdev->ring_lock);
+
+	if (ring_obj) {
+		r = radeon_bo_reserve(ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(ring_obj);
+			radeon_bo_unpin(ring_obj);
+			radeon_bo_unreserve(ring_obj);
+		}
+		radeon_bo_unref(&ring_obj);
+	}
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ridx = *(int*)node->info_ent->data;
+	struct radeon_ring *ring = &rdev->ring[ridx];
+	unsigned count, i, j;
+	u32 tmp;
+
+	radeon_ring_free_size(rdev, ring);
+	count = (ring->ring_size / 4) - ring->ring_free_dw;
+	tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+	seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+	tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+	seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
+	if (ring->rptr_save_reg) {
+		seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+			   RREG32(ring->rptr_save_reg));
+	}
+	seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+	seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+	seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+	seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
+	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+	seq_printf(m, "%u dwords in ring\n", count);
+	/* print 8 dw before current rptr as often it's the last executed
+	 * packet that is the root issue
+	 */
+	i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+	if (ring->ready) {
+		for (j = 0; j <= (count + 32); j++) {
+			seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+			i = (i + 1) & ring->ptr_mask;
+		}
+	}
+	return 0;
+}
+
+static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
+static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
+	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
+	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
+	{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
+	{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
+	{"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
+};
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+	return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+        {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+	unsigned i;
+	for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
+		struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
+		int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
+		unsigned r;
+
+		if (&rdev->ring[ridx] != ring)
+			continue;
+
+		r = radeon_debugfs_add_files(rdev, info, 1);
+		if (r)
+			return r;
+	}
+#endif
+	return 0;
+}
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+	return 0;
+#endif
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_sa.c b/linux-imx/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 0000000..f0bac68
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+			      struct radeon_sa_manager *sa_manager,
+			      unsigned size, u32 align, u32 domain)
+{
+	int i, r;
+
+	init_waitqueue_head(&sa_manager->wq);
+	sa_manager->bo = NULL;
+	sa_manager->size = size;
+	sa_manager->domain = domain;
+	sa_manager->align = align;
+	sa_manager->hole = &sa_manager->olist;
+	INIT_LIST_HEAD(&sa_manager->olist);
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		INIT_LIST_HEAD(&sa_manager->flist[i]);
+	}
+
+	r = radeon_bo_create(rdev, size, align, true,
+			     domain, NULL, &sa_manager->bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+		return r;
+	}
+
+	return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (!list_empty(&sa_manager->olist)) {
+		sa_manager->hole = &sa_manager->olist,
+		radeon_sa_bo_try_free(sa_manager);
+		if (!list_empty(&sa_manager->olist)) {
+			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+		}
+	}
+	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+	radeon_bo_unref(&sa_manager->bo);
+	sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+			       struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	/* map the buffer */
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(sa_manager->bo);
+		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+		return r;
+	}
+	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+	radeon_bo_unreserve(sa_manager->bo);
+	return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+				 struct radeon_sa_manager *sa_manager)
+{
+	int r;
+
+	if (sa_manager->bo == NULL) {
+		dev_err(rdev->dev, "no bo for sa manager\n");
+		return -EINVAL;
+	}
+
+	r = radeon_bo_reserve(sa_manager->bo, false);
+	if (!r) {
+		radeon_bo_kunmap(sa_manager->bo);
+		radeon_bo_unpin(sa_manager->bo);
+		radeon_bo_unreserve(sa_manager->bo);
+	}
+	return r;
+}
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+	struct radeon_sa_manager *sa_manager = sa_bo->manager;
+	if (sa_manager->hole == &sa_bo->olist) {
+		sa_manager->hole = sa_bo->olist.prev;
+	}
+	list_del_init(&sa_bo->olist);
+	list_del_init(&sa_bo->flist);
+	radeon_fence_unref(&sa_bo->fence);
+	kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+	struct radeon_sa_bo *sa_bo, *tmp;
+
+	if (sa_manager->hole->next == &sa_manager->olist)
+		return;
+
+	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+			return;
+		}
+		radeon_sa_bo_remove_locked(sa_bo);
+	}
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole != &sa_manager->olist) {
+		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+	}
+	return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+	struct list_head *hole = sa_manager->hole;
+
+	if (hole->next != &sa_manager->olist) {
+		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+	}
+	return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+				   struct radeon_sa_bo *sa_bo,
+				   unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		soffset += wasted;
+
+		sa_bo->manager = sa_manager;
+		sa_bo->soffset = soffset;
+		sa_bo->eoffset = soffset + size;
+		list_add(&sa_bo->olist, sa_manager->hole);
+		INIT_LIST_HEAD(&sa_bo->flist);
+		sa_manager->hole = &sa_bo->olist;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+			    unsigned size, unsigned align)
+{
+	unsigned soffset, eoffset, wasted;
+	int i;
+
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		if (!list_empty(&sa_manager->flist[i])) {
+			return true;
+		}
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+	wasted = (align - (soffset % align)) % align;
+
+	if ((eoffset - soffset) >= (size + wasted)) {
+		return true;
+	}
+
+	return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+				   struct radeon_fence **fences,
+				   unsigned *tries)
+{
+	struct radeon_sa_bo *best_bo = NULL;
+	unsigned i, soffset, best, tmp;
+
+	/* if hole points to the end of the buffer */
+	if (sa_manager->hole->next == &sa_manager->olist) {
+		/* try again with its beginning */
+		sa_manager->hole = &sa_manager->olist;
+		return true;
+	}
+
+	soffset = radeon_sa_bo_hole_soffset(sa_manager);
+	/* to handle wrap around we add sa_manager->size */
+	best = sa_manager->size * 2;
+	/* go over all fence list and try to find the closest sa_bo
+	 * of the current last
+	 */
+	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_sa_bo *sa_bo;
+
+		if (list_empty(&sa_manager->flist[i])) {
+			continue;
+		}
+
+		sa_bo = list_first_entry(&sa_manager->flist[i],
+					 struct radeon_sa_bo, flist);
+
+		if (!radeon_fence_signaled(sa_bo->fence)) {
+			fences[i] = sa_bo->fence;
+			continue;
+		}
+
+		/* limit the number of tries each ring gets */
+		if (tries[i] > 2) {
+			continue;
+		}
+
+		tmp = sa_bo->soffset;
+		if (tmp < soffset) {
+			/* wrap around, pretend it's after */
+			tmp += sa_manager->size;
+		}
+		tmp -= soffset;
+		if (tmp < best) {
+			/* this sa bo is the closest one */
+			best = tmp;
+			best_bo = sa_bo;
+		}
+	}
+
+	if (best_bo) {
+		++tries[best_bo->fence->ring];
+		sa_manager->hole = best_bo->olist.prev;
+
+		/* we knew that this one is signaled,
+		   so it's save to remote it */
+		radeon_sa_bo_remove_locked(best_bo);
+		return true;
+	}
+	return false;
+}
+
+int radeon_sa_bo_new(struct radeon_device *rdev,
+		     struct radeon_sa_manager *sa_manager,
+		     struct radeon_sa_bo **sa_bo,
+		     unsigned size, unsigned align, bool block)
+{
+	struct radeon_fence *fences[RADEON_NUM_RINGS];
+	unsigned tries[RADEON_NUM_RINGS];
+	int i, r;
+
+	BUG_ON(align > sa_manager->align);
+	BUG_ON(size > sa_manager->size);
+
+	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+	if ((*sa_bo) == NULL) {
+		return -ENOMEM;
+	}
+	(*sa_bo)->manager = sa_manager;
+	(*sa_bo)->fence = NULL;
+	INIT_LIST_HEAD(&(*sa_bo)->olist);
+	INIT_LIST_HEAD(&(*sa_bo)->flist);
+
+	spin_lock(&sa_manager->wq.lock);
+	do {
+		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+			fences[i] = NULL;
+			tries[i] = 0;
+		}
+
+		do {
+			radeon_sa_bo_try_free(sa_manager);
+
+			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+						   size, align)) {
+				spin_unlock(&sa_manager->wq.lock);
+				return 0;
+			}
+
+			/* see if we can skip over some allocations */
+		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+		spin_unlock(&sa_manager->wq.lock);
+		r = radeon_fence_wait_any(rdev, fences, false);
+		spin_lock(&sa_manager->wq.lock);
+		/* if we have nothing to wait for block */
+		if (r == -ENOENT && block) {
+			r = wait_event_interruptible_locked(
+				sa_manager->wq, 
+				radeon_sa_event(sa_manager, size, align)
+			);
+
+		} else if (r == -ENOENT) {
+			r = -ENOMEM;
+		}
+
+	} while (!r);
+
+	spin_unlock(&sa_manager->wq.lock);
+	kfree(*sa_bo);
+	*sa_bo = NULL;
+	return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+		       struct radeon_fence *fence)
+{
+	struct radeon_sa_manager *sa_manager;
+
+	if (sa_bo == NULL || *sa_bo == NULL) {
+		return;
+	}
+
+	sa_manager = (*sa_bo)->manager;
+	spin_lock(&sa_manager->wq.lock);
+	if (fence && !radeon_fence_signaled(fence)) {
+		(*sa_bo)->fence = radeon_fence_ref(fence);
+		list_add_tail(&(*sa_bo)->flist,
+			      &sa_manager->flist[fence->ring]);
+	} else {
+		radeon_sa_bo_remove_locked(*sa_bo);
+	}
+	wake_up_all_locked(&sa_manager->wq);
+	spin_unlock(&sa_manager->wq.lock);
+	*sa_bo = NULL;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+				  struct seq_file *m)
+{
+	struct radeon_sa_bo *i;
+
+	spin_lock(&sa_manager->wq.lock);
+	list_for_each_entry(i, &sa_manager->olist, olist) {
+		if (&i->olist == sa_manager->hole) {
+			seq_printf(m, ">");
+		} else {
+			seq_printf(m, " ");
+		}
+		seq_printf(m, "[0x%08x 0x%08x] size %8d",
+			   i->soffset, i->eoffset, i->eoffset - i->soffset);
+		if (i->fence) {
+			seq_printf(m, " protected by 0x%016llx on ring %d",
+				   i->fence->seq, i->fence->ring);
+		}
+		seq_printf(m, "\n");
+	}
+	spin_unlock(&sa_manager->wq.lock);
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_semaphore.c b/linux-imx/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 0000000..8dcc20f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+			    struct radeon_semaphore **semaphore)
+{
+	int r;
+
+	*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
+	if (*semaphore == NULL) {
+		return -ENOMEM;
+	}
+	r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+			     &(*semaphore)->sa_bo, 8, 8, true);
+	if (r) {
+		kfree(*semaphore);
+		*semaphore = NULL;
+		return r;
+	}
+	(*semaphore)->waiters = 0;
+	(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+	*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+	return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+			          struct radeon_semaphore *semaphore)
+{
+	--semaphore->waiters;
+	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+			        struct radeon_semaphore *semaphore)
+{
+	++semaphore->waiters;
+	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+/* caller must hold ring lock */
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+				struct radeon_semaphore *semaphore,
+				int signaler, int waiter)
+{
+	int r;
+
+	/* no need to signal and wait on the same ring */
+	if (signaler == waiter) {
+		return 0;
+	}
+
+	/* prevent GPU deadlocks */
+	if (!rdev->ring[signaler].ready) {
+		dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+		return -EINVAL;
+	}
+
+	r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+	if (r) {
+		return r;
+	}
+	radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+	radeon_ring_commit(rdev, &rdev->ring[signaler]);
+
+	/* we assume caller has already allocated space on waiters ring */
+	radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+
+	/* for debugging lockup only, used by sysfs debug files */
+	rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+	rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
+	return 0;
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+			   struct radeon_semaphore **semaphore,
+			   struct radeon_fence *fence)
+{
+	if (semaphore == NULL || *semaphore == NULL) {
+		return;
+	}
+	if ((*semaphore)->waiters > 0) {
+		dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+			" hardware lockup imminent!\n", *semaphore);
+	}
+	radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+	kfree(*semaphore);
+	*semaphore = NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_state.c b/linux-imx/drivers/gpu/drm/radeon/radeon_state.c
new file mode 100644
index 0000000..4d20910
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_state.c
@@ -0,0 +1,3261 @@
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ *    Kevin E. Martin <martin@valinux.com>
+ *
+ * ------------------------ This file is DEPRECATED! -------------------------
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_buffer.h>
+#include <drm/radeon_drm.h>
+#include "radeon_drv.h"
+
+/* ================================================================
+ * Helper functions for client state checking and fixup
+ */
+
+static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
+						    dev_priv,
+						    struct drm_file * file_priv,
+						    u32 *offset)
+{
+	u64 off = *offset;
+	u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	/* Hrm ... the story of the offset ... So this function converts
+	 * the various ideas of what userland clients might have for an
+	 * offset in the card address space into an offset into the card
+	 * address space :) So with a sane client, it should just keep
+	 * the value intact and just do some boundary checking. However,
+	 * not all clients are sane. Some older clients pass us 0 based
+	 * offsets relative to the start of the framebuffer and some may
+	 * assume the AGP aperture it appended to the framebuffer, so we
+	 * try to detect those cases and fix them up.
+	 *
+	 * Note: It might be a good idea here to make sure the offset lands
+	 * in some "allowed" area to protect things like the PCIE GART...
+	 */
+
+	/* First, the best case, the offset already lands in either the
+	 * framebuffer or the GART mapped space
+	 */
+	if (radeon_check_offset(dev_priv, off))
+		return 0;
+
+	/* Ok, that didn't happen... now check if we have a zero based
+	 * offset that fits in the framebuffer + gart space, apply the
+	 * magic offset we get from SETPARAM or calculated from fb_location
+	 */
+	if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+		radeon_priv = file_priv->driver_priv;
+		off += radeon_priv->radeon_fb_delta;
+	}
+
+	/* Finally, assume we aimed at a GART offset if beyond the fb */
+	if (off > fb_end)
+		off = off - fb_end - 1 + dev_priv->gart_vm_start;
+
+	/* Now recheck and fail if out of bounds */
+	if (radeon_check_offset(dev_priv, off)) {
+		DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
+		*offset = off;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
+						     dev_priv,
+						     struct drm_file *file_priv,
+						     int id, struct drm_buffer *buf)
+{
+	u32 *data;
+	switch (id) {
+
+	case RADEON_EMIT_PP_MISC:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+		dev_priv->have_z_offset = 1;
+		break;
+
+	case RADEON_EMIT_PP_CNTL:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid colour buffer offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_TXOFFSET_0:
+	case R200_EMIT_PP_TXOFFSET_1:
+	case R200_EMIT_PP_TXOFFSET_2:
+	case R200_EMIT_PP_TXOFFSET_3:
+	case R200_EMIT_PP_TXOFFSET_4:
+	case R200_EMIT_PP_TXOFFSET_5:
+		data = drm_buffer_pointer_to_dword(buf, 0);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid R200 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_EMIT_PP_TXFILTER_0:
+	case RADEON_EMIT_PP_TXFILTER_1:
+	case RADEON_EMIT_PP_TXFILTER_2:
+		data = drm_buffer_pointer_to_dword(buf,
+			(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
+			DRM_ERROR("Invalid R100 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_CUBIC_OFFSETS_0:
+	case R200_EMIT_PP_CUBIC_OFFSETS_1:
+	case R200_EMIT_PP_CUBIC_OFFSETS_2:
+	case R200_EMIT_PP_CUBIC_OFFSETS_3:
+	case R200_EMIT_PP_CUBIC_OFFSETS_4:
+	case R200_EMIT_PP_CUBIC_OFFSETS_5:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				data = drm_buffer_pointer_to_dword(buf, i);
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  data)) {
+					DRM_ERROR
+					    ("Invalid R200 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+			break;
+		}
+
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				data = drm_buffer_pointer_to_dword(buf, i);
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  data)) {
+					DRM_ERROR
+					    ("Invalid R100 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+		}
+		break;
+
+	case R200_EMIT_VAP_CTL:{
+			RING_LOCALS;
+			BEGIN_RING(2);
+			OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+			ADVANCE_RING();
+		}
+		break;
+
+	case RADEON_EMIT_RB3D_COLORPITCH:
+	case RADEON_EMIT_RE_LINE_PATTERN:
+	case RADEON_EMIT_SE_LINE_WIDTH:
+	case RADEON_EMIT_PP_LUM_MATRIX:
+	case RADEON_EMIT_PP_ROT_MATRIX_0:
+	case RADEON_EMIT_RB3D_STENCILREFMASK:
+	case RADEON_EMIT_SE_VPORT_XSCALE:
+	case RADEON_EMIT_SE_CNTL:
+	case RADEON_EMIT_SE_CNTL_STATUS:
+	case RADEON_EMIT_RE_MISC:
+	case RADEON_EMIT_PP_BORDER_COLOR_0:
+	case RADEON_EMIT_PP_BORDER_COLOR_1:
+	case RADEON_EMIT_PP_BORDER_COLOR_2:
+	case RADEON_EMIT_SE_ZBIAS_FACTOR:
+	case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
+	case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
+	case R200_EMIT_PP_TXCBLEND_0:
+	case R200_EMIT_PP_TXCBLEND_1:
+	case R200_EMIT_PP_TXCBLEND_2:
+	case R200_EMIT_PP_TXCBLEND_3:
+	case R200_EMIT_PP_TXCBLEND_4:
+	case R200_EMIT_PP_TXCBLEND_5:
+	case R200_EMIT_PP_TXCBLEND_6:
+	case R200_EMIT_PP_TXCBLEND_7:
+	case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
+	case R200_EMIT_TFACTOR_0:
+	case R200_EMIT_VTX_FMT_0:
+	case R200_EMIT_MATRIX_SELECT_0:
+	case R200_EMIT_TEX_PROC_CTL_2:
+	case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
+	case R200_EMIT_PP_TXFILTER_0:
+	case R200_EMIT_PP_TXFILTER_1:
+	case R200_EMIT_PP_TXFILTER_2:
+	case R200_EMIT_PP_TXFILTER_3:
+	case R200_EMIT_PP_TXFILTER_4:
+	case R200_EMIT_PP_TXFILTER_5:
+	case R200_EMIT_VTE_CNTL:
+	case R200_EMIT_OUTPUT_VTX_COMP_SEL:
+	case R200_EMIT_PP_TAM_DEBUG3:
+	case R200_EMIT_PP_CNTL_X:
+	case R200_EMIT_RB3D_DEPTHXY_OFFSET:
+	case R200_EMIT_RE_AUX_SCISSOR_CNTL:
+	case R200_EMIT_RE_SCISSOR_TL_0:
+	case R200_EMIT_RE_SCISSOR_TL_1:
+	case R200_EMIT_RE_SCISSOR_TL_2:
+	case R200_EMIT_SE_VAP_CNTL_STATUS:
+	case R200_EMIT_SE_VTX_STATE_CNTL:
+	case R200_EMIT_RE_POINTSIZE:
+	case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
+	case R200_EMIT_PP_CUBIC_FACES_0:
+	case R200_EMIT_PP_CUBIC_FACES_1:
+	case R200_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_CUBIC_FACES_3:
+	case R200_EMIT_PP_CUBIC_FACES_4:
+	case R200_EMIT_PP_CUBIC_FACES_5:
+	case RADEON_EMIT_PP_TEX_SIZE_0:
+	case RADEON_EMIT_PP_TEX_SIZE_1:
+	case RADEON_EMIT_PP_TEX_SIZE_2:
+	case R200_EMIT_RB3D_BLENDCOLOR:
+	case R200_EMIT_TCL_POINT_SPRITE_CNTL:
+	case RADEON_EMIT_PP_CUBIC_FACES_0:
+	case RADEON_EMIT_PP_CUBIC_FACES_1:
+	case RADEON_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_TRI_PERF_CNTL:
+	case R200_EMIT_PP_AFS_0:
+	case R200_EMIT_PP_AFS_1:
+	case R200_EMIT_ATF_TFACTOR:
+	case R200_EMIT_PP_TXCTLALL_0:
+	case R200_EMIT_PP_TXCTLALL_1:
+	case R200_EMIT_PP_TXCTLALL_2:
+	case R200_EMIT_PP_TXCTLALL_3:
+	case R200_EMIT_PP_TXCTLALL_4:
+	case R200_EMIT_PP_TXCTLALL_5:
+	case R200_EMIT_VAP_PVS_CNTL:
+		/* These packets don't contain memory offsets */
+		break;
+
+	default:
+		DRM_ERROR("Unknown state packet ID %d\n", id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+					  dev_priv,
+					  struct drm_file *file_priv,
+					  drm_radeon_kcmd_buffer_t *
+					  cmdbuf,
+					  unsigned int *cmdsz)
+{
+	u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+	u32 offset, narrays;
+	int count, i, k;
+
+	count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+	*cmdsz = 2 + count;
+
+	if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
+		DRM_ERROR("Not a type 3 packet\n");
+		return -EINVAL;
+	}
+
+	if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR("Packet size larger than size of data provided\n");
+		return -EINVAL;
+	}
+
+	switch (*cmd & 0xff00) {
+	/* XXX Are there old drivers needing other packets? */
+
+	case RADEON_3D_DRAW_IMMD:
+	case RADEON_3D_DRAW_VBUF:
+	case RADEON_3D_DRAW_INDX:
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+	case RADEON_3D_CLEAR_ZMASK:
+/*	case RADEON_CP_NEXT_CHAR:
+	case RADEON_CP_PLY_NEXTSCAN:
+	case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
+		/* these packets are safe */
+		break;
+
+	case RADEON_CP_3D_DRAW_IMMD_2:
+	case RADEON_CP_3D_DRAW_VBUF_2:
+	case RADEON_CP_3D_DRAW_INDX_2:
+	case RADEON_3D_CLEAR_HIZ:
+		/* safe but r200 only */
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_LOAD_VBPNTR:
+
+		if (count > 18) { /* 12 arrays max */
+			DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+				  count);
+			return -EINVAL;
+		}
+
+		/* carefully check packet contents */
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+		narrays = *cmd & ~0xc000;
+		k = 0;
+		i = 2;
+		while ((k < narrays) && (i < (count + 2))) {
+			i++;		/* skip attribute field */
+			cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+			if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+							  cmd)) {
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+			if (k == narrays)
+				break;
+			/* have one more to process, they come in pairs */
+			cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
+			if (radeon_check_and_fixup_offset(dev_priv,
+							  file_priv, cmd))
+			{
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+		}
+		/* do the counts match what we expect ? */
+		if ((k != narrays) || (i != (count + 2))) {
+			DRM_ERROR
+			    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+			      k, i, narrays, count + 1);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_RNDR_GEN_INDX_PRIM:
+		if (dev_priv->microcode_version != UCODE_R100) {
+			DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+			return -EINVAL;
+		}
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+				DRM_ERROR("Invalid rndr_gen_indx offset\n");
+				return -EINVAL;
+		}
+		break;
+
+	case RADEON_CP_INDX_BUFFER:
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if ((*cmd & 0x8000ffff) != 0x80000810) {
+			DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
+			return -EINVAL;
+		}
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+			DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_CNTL_HOSTDATA_BLT:
+	case RADEON_CNTL_PAINT_MULTI:
+	case RADEON_CNTL_BITBLT_MULTI:
+		/* MSB of opcode: next DWORD GUI_CNTL */
+		cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+		if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+			offset = *cmd2 << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid first packet offset\n");
+				return -EINVAL;
+			}
+			*cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
+		}
+
+		if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+			offset = *cmd3 << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid second packet offset\n");
+				return -EINVAL;
+			}
+			*cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
+		}
+		break;
+
+	default:
+		DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* ================================================================
+ * CP hardware state programming functions
+ */
+
+static void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+				  struct drm_clip_rect * box)
+{
+	RING_LOCALS;
+
+	DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
+		  box->x1, box->y1, box->x2, box->y2);
+
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
+	OUT_RING((box->y1 << 16) | box->x1);
+	OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
+	OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
+	ADVANCE_RING();
+}
+
+/* Emit 1.1 state
+ */
+static int radeon_emit_state(drm_radeon_private_t * dev_priv,
+			     struct drm_file *file_priv,
+			     drm_radeon_context_regs_t * ctx,
+			     drm_radeon_texture_regs_t * tex,
+			     unsigned int dirty)
+{
+	RING_LOCALS;
+	DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+	if (dirty & RADEON_UPLOAD_CONTEXT) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_depthoffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_coloroffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(14);
+		OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
+		OUT_RING(ctx->pp_misc);
+		OUT_RING(ctx->pp_fog_color);
+		OUT_RING(ctx->re_solid_color);
+		OUT_RING(ctx->rb3d_blendcntl);
+		OUT_RING(ctx->rb3d_depthoffset);
+		OUT_RING(ctx->rb3d_depthpitch);
+		OUT_RING(ctx->rb3d_zstencilcntl);
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
+		OUT_RING(ctx->pp_cntl);
+		OUT_RING(ctx->rb3d_cntl);
+		OUT_RING(ctx->rb3d_coloroffset);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
+		OUT_RING(ctx->rb3d_colorpitch);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VERTFMT) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
+		OUT_RING(ctx->se_coord_fmt);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_LINE) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
+		OUT_RING(ctx->re_line_pattern);
+		OUT_RING(ctx->re_line_state);
+		OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
+		OUT_RING(ctx->se_line_width);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_BUMPMAP) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
+		OUT_RING(ctx->pp_lum_matrix);
+		OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
+		OUT_RING(ctx->pp_rot_matrix_0);
+		OUT_RING(ctx->pp_rot_matrix_1);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MASKS) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
+		OUT_RING(ctx->rb3d_stencilrefmask);
+		OUT_RING(ctx->rb3d_ropcntl);
+		OUT_RING(ctx->rb3d_planemask);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VIEWPORT) {
+		BEGIN_RING(7);
+		OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
+		OUT_RING(ctx->se_vport_xscale);
+		OUT_RING(ctx->se_vport_xoffset);
+		OUT_RING(ctx->se_vport_yscale);
+		OUT_RING(ctx->se_vport_yoffset);
+		OUT_RING(ctx->se_vport_zscale);
+		OUT_RING(ctx->se_vport_zoffset);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_SETUP) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
+		OUT_RING(ctx->se_cntl);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
+		OUT_RING(ctx->se_cntl_status);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MISC) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
+		OUT_RING(ctx->re_misc);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX0) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[0].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 0\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
+		OUT_RING(tex[0].pp_txfilter);
+		OUT_RING(tex[0].pp_txformat);
+		OUT_RING(tex[0].pp_txoffset);
+		OUT_RING(tex[0].pp_txcblend);
+		OUT_RING(tex[0].pp_txablend);
+		OUT_RING(tex[0].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
+		OUT_RING(tex[0].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX1) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[1].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 1\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
+		OUT_RING(tex[1].pp_txfilter);
+		OUT_RING(tex[1].pp_txformat);
+		OUT_RING(tex[1].pp_txoffset);
+		OUT_RING(tex[1].pp_txcblend);
+		OUT_RING(tex[1].pp_txablend);
+		OUT_RING(tex[1].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
+		OUT_RING(tex[1].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX2) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[2].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 2\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
+		OUT_RING(tex[2].pp_txfilter);
+		OUT_RING(tex[2].pp_txformat);
+		OUT_RING(tex[2].pp_txoffset);
+		OUT_RING(tex[2].pp_txcblend);
+		OUT_RING(tex[2].pp_txablend);
+		OUT_RING(tex[2].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
+		OUT_RING(tex[2].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+/* Emit 1.2 state
+ */
+static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
+			      struct drm_file *file_priv,
+			      drm_radeon_state_t * state)
+{
+	RING_LOCALS;
+
+	if (state->dirty & RADEON_UPLOAD_ZBIAS) {
+		BEGIN_RING(3);
+		OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
+		OUT_RING(state->context2.se_zbias_factor);
+		OUT_RING(state->context2.se_zbias_constant);
+		ADVANCE_RING();
+	}
+
+	return radeon_emit_state(dev_priv, file_priv, &state->context,
+				 state->tex, state->dirty);
+}
+
+/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
+ * 1.3 cmdbuffers allow all previous state to be updated as well as
+ * the tcl scalar and vector areas.
+ */
+static struct {
+	int start;
+	int len;
+	const char *name;
+} packet[RADEON_MAX_STATE_PACKETS] = {
+	{RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
+	{RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
+	{RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
+	{RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
+	{RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
+	{RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
+	{RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
+	{RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
+	{RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
+	{RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
+	{RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
+	{RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
+	{RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
+	{RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
+	{RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
+	{RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
+	{RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
+	{RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
+	{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
+	{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
+	{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
+		    "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
+	{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
+	{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
+	{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
+	{R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
+	{R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
+	{R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
+	{R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
+	{R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
+	{R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
+	{R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
+	{R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
+	{R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
+	{R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
+	{R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
+	{R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
+	{R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
+	{R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
+	{R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
+	{R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
+	{R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
+	{R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
+	{R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
+	{R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
+	{R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
+	{R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
+	{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
+	{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
+	{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
+	{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+	 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+	{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
+	{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
+	{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
+	{R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
+	{R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
+	{R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
+	{R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
+	{R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
+	{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
+	{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
+	{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
+		    "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
+	{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},	/* 61 */
+	{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+	{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
+	{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
+	{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
+	{R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
+	{R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
+	{R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
+	{R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
+	{R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
+	{R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
+	{R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
+	{RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
+	{RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
+	{RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
+	{R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
+	{R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
+	{RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
+	{RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
+	{RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
+	{RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
+	{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
+	{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
+	{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
+	{R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
+	{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
+	{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
+	{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
+	{R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
+	{R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
+	{R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
+	{R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
+	{R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
+	{R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
+};
+
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+			     struct drm_radeon_master_private *master_priv,
+			     int x, int y, int w, int h, int r, int g, int b)
+{
+	u32 color;
+	RING_LOCALS;
+
+	x += master_priv->sarea_priv->boxes[0].x1;
+	y += master_priv->sarea_priv->boxes[0].y1;
+
+	switch (dev_priv->color_fmt) {
+	case RADEON_COLOR_FORMAT_RGB565:
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+		break;
+	case RADEON_COLOR_FORMAT_ARGB8888:
+	default:
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+		break;
+	}
+
+	BEGIN_RING(4);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+	OUT_RING(0xffffffff);
+	ADVANCE_RING();
+
+	BEGIN_RING(6);
+
+	OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
+	OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+		 RADEON_GMC_BRUSH_SOLID_COLOR |
+		 (dev_priv->color_fmt << 8) |
+		 RADEON_GMC_SRC_DATATYPE_COLOR |
+		 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+	if (master_priv->sarea_priv->pfCurrentPage == 1) {
+		OUT_RING(dev_priv->front_pitch_offset);
+	} else {
+		OUT_RING(dev_priv->back_pitch_offset);
+	}
+
+	OUT_RING(color);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((w << 16) | h);
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes(drm_radeon_private_t *dev_priv, struct drm_radeon_master_private *master_priv)
+{
+	/* Collapse various things into a wait flag -- trying to
+	 * guess if userspase slept -- better just to have them tell us.
+	 */
+	if (dev_priv->stats.last_frame_reads > 1 ||
+	    dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	if (dev_priv->stats.freelist_loops) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	/* Purple box for page flipping
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
+		radeon_clear_box(dev_priv, master_priv, 4, 4, 8, 8, 255, 0, 255);
+
+	/* Red box if we have to wait for idle at any point
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
+		radeon_clear_box(dev_priv, master_priv, 16, 4, 8, 8, 255, 0, 0);
+
+	/* Blue box: lost context?
+	 */
+
+	/* Yellow box for texture swaps
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
+		radeon_clear_box(dev_priv, master_priv, 40, 4, 8, 8, 255, 255, 0);
+
+	/* Green box if hardware never idles (as far as we can tell)
+	 */
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
+		radeon_clear_box(dev_priv, master_priv, 64, 4, 8, 8, 0, 255, 0);
+
+	/* Draw bars indicating number of buffers allocated
+	 * (not a great measure, easily confused)
+	 */
+	if (dev_priv->stats.requested_bufs) {
+		if (dev_priv->stats.requested_bufs > 100)
+			dev_priv->stats.requested_bufs = 100;
+
+		radeon_clear_box(dev_priv, master_priv, 4, 16,
+				 dev_priv->stats.requested_bufs, 4,
+				 196, 128, 128);
+	}
+
+	memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
+
+}
+
+/* ================================================================
+ * CP command dispatch functions
+ */
+
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
+				     struct drm_master *master,
+				     drm_radeon_clear_t * clear,
+				     drm_radeon_clear_rect_t * depth_boxes)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	unsigned int flags = clear->flags;
+	u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("flags = 0x%x\n", flags);
+
+	dev_priv->stats.clears++;
+
+	if (sarea_priv->pfCurrentPage == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(RADEON_FRONT | RADEON_BACK);
+		if (tmp & RADEON_FRONT)
+			flags |= RADEON_BACK;
+		if (tmp & RADEON_BACK)
+			flags |= RADEON_FRONT;
+	}
+	if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+		if (!dev_priv->have_z_offset) {
+			printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+			flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+		}
+	}
+
+	if (flags & (RADEON_FRONT | RADEON_BACK)) {
+
+		BEGIN_RING(4);
+
+		/* Ensure the 3D stream is idle before doing a
+		 * 2D fill to clear the front or back buffer.
+		 */
+		RADEON_WAIT_UNTIL_3D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+		OUT_RING(clear->color_mask);
+
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+			int x = pbox[i].x1;
+			int y = pbox[i].y1;
+			int w = pbox[i].x2 - x;
+			int h = pbox[i].y2 - y;
+
+			DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
+				  x, y, w, h, flags);
+
+			if (flags & RADEON_FRONT) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->front_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+
+			if (flags & RADEON_BACK) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->back_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+		}
+	}
+
+	/* hyper z clear */
+	/* no docs available, based on reverse engineering by Stephane Marchesin */
+	if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
+	    && (flags & RADEON_CLEAR_FASTZ)) {
+
+		int i;
+		int depthpixperline =
+		    dev_priv->depth_fmt ==
+		    RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
+						       2) : (dev_priv->
+							     depth_pitch / 4);
+
+		u32 clearmask;
+
+		u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
+		    ((clear->depth_mask & 0xff) << 24);
+
+		/* Make sure we restore the 3D state next time.
+		 * we haven't touched any "normal" state - still need this?
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (flags & RADEON_USE_HIERZ)) {
+			/* FIXME : reverse engineer that for Rx00 cards */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+			/* pattern seems to work for r100, though get slight
+			   rendering errors with glxgears. If hierz is not enabled for r100,
+			   only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
+			   other ones are ignored, and the same clear mask can be used. That's
+			   very different behaviour than R200 which needs different clear mask
+			   and different number of tiles to clear if hierz is enabled or not !?!
+			 */
+			clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
+		} else {
+			/* clear mask : chooses the clearing pattern.
+			   rv250: could be used to clear only parts of macrotiles
+			   (but that would get really complicated...)?
+			   bit 0 and 1 (either or both of them ?!?!) are used to
+			   not clear tile (or maybe one of the bits indicates if the tile is
+			   compressed or not), bit 2 and 3 to not clear tile 1,...,.
+			   Pattern is as follows:
+			   | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
+			   bits -------------------------------------------------
+			   | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
+			   rv100: clearmask covers 2x8 4x1 tiles, but one clear still
+			   covers 256 pixels ?!?
+			 */
+			clearmask = 0x0;
+		}
+
+		BEGIN_RING(8);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
+			     tempRB3D_DEPTHCLEARVALUE);
+		/* what offset is this exactly ? */
+		OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
+		/* need ctlstat, otherwise get some strange black flickering */
+		OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
+			     RADEON_RB3D_ZC_FLUSH_ALL);
+		ADVANCE_RING();
+
+		for (i = 0; i < nbox; i++) {
+			int tileoffset, nrtilesx, nrtilesy, j;
+			/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
+			if ((dev_priv->flags & RADEON_HAS_HIERZ)
+			    && !(dev_priv->microcode_version == UCODE_R200)) {
+				/* FIXME : figure this out for r200 (when hierz is enabled). Or
+				   maybe r200 actually doesn't need to put the low-res z value into
+				   the tile cache like r100, but just needs to clear the hi-level z-buffer?
+				   Works for R100, both with hierz and without.
+				   R100 seems to operate on 2x1 8x8 tiles, but...
+				   odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
+				   problematic with resolutions which are not 64 pix aligned? */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					OUT_RING(tileoffset * 8);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			} else if (dev_priv->microcode_version == UCODE_R200) {
+				/* works for rv250. */
+				/* find first macro tile (8x2 4x4 z-pixels on rv250) */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 5;
+				nrtilesx =
+				    (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					/* judging by the first tile offset needed, could possibly
+					   directly address/clear 4x4 tiles instead of 8x2 * 4x4
+					   macro tiles, though would still need clear mask for
+					   right/bottom if truly 4x4 granularity is desired ? */
+					OUT_RING(tileoffset * 16);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 1);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 5;
+				}
+			} else {	/* rv 100 */
+				/* rv100 might not need 64 pix alignment, who knows */
+				/* offsets are, hmm, weird */
+				tileoffset =
+				    ((pbox[i].y1 >> 4) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					OUT_RING(tileoffset * 128);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			}
+		}
+
+		/* TODO don't always clear all hi-level z tiles */
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (dev_priv->microcode_version == UCODE_R200)
+		    && (flags & RADEON_USE_HIERZ))
+			/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+		{
+			BEGIN_RING(4);
+			OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
+			OUT_RING(0x0);	/* First tile */
+			OUT_RING(0x3cc0);
+			OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
+			ADVANCE_RING();
+		}
+	}
+
+	/* We have to clear the depth and/or stencil buffers by
+	 * rendering a quad into just those buffers.  Thus, we have to
+	 * make sure the 3D engine is configured correctly.
+	 */
+	else if ((dev_priv->microcode_version == UCODE_R200) &&
+		(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempPP_CNTL;
+		int tempRE_CNTL;
+		int tempRB3D_CNTL;
+		int tempRB3D_ZSTENCILCNTL;
+		int tempRB3D_STENCILREFMASK;
+		int tempRB3D_PLANEMASK;
+		int tempSE_CNTL;
+		int tempSE_VTE_CNTL;
+		int tempSE_VTX_FMT_0;
+		int tempSE_VTX_FMT_1;
+		int tempSE_VAP_CNTL;
+		int tempRE_AUX_SCISSOR_CNTL;
+
+		tempPP_CNTL = 0;
+		tempRE_CNTL = 0;
+
+		tempRB3D_CNTL = depth_clear->rb3d_cntl;
+
+		tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+		tempRB3D_STENCILREFMASK = 0x0;
+
+		tempSE_CNTL = depth_clear->se_cntl;
+
+		/* Disable TCL */
+
+		tempSE_VAP_CNTL = (	/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
+					  (0x9 <<
+					   SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
+
+		tempRB3D_PLANEMASK = 0x0;
+
+		tempRE_AUX_SCISSOR_CNTL = 0x0;
+
+		tempSE_VTE_CNTL =
+		    SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
+
+		/* Vertex format (X, Y, Z, W) */
+		tempSE_VTX_FMT_0 =
+		    SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
+		    SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
+		tempSE_VTX_FMT_1 = 0x0;
+
+		/*
+		 * Depth buffer specific enables
+		 */
+		if (flags & RADEON_DEPTH) {
+			/* Enable depth buffer */
+			tempRB3D_CNTL |= RADEON_Z_ENABLE;
+		} else {
+			/* Disable depth buffer */
+			tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
+		}
+
+		/*
+		 * Stencil buffer specific enables
+		 */
+		if (flags & RADEON_STENCIL) {
+			tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = clear->depth_mask;
+		} else {
+			tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(26);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
+		OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
+		OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
+			     tempRB3D_STENCILREFMASK);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
+		OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
+		OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
+		OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
+		OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
+		OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
+		OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(14);
+			OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			ADVANCE_RING();
+		}
+	} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+
+		rb3d_cntl = depth_clear->rb3d_cntl;
+
+		if (flags & RADEON_DEPTH) {
+			rb3d_cntl |= RADEON_Z_ENABLE;
+		} else {
+			rb3d_cntl &= ~RADEON_Z_ENABLE;
+		}
+
+		if (flags & RADEON_STENCIL) {
+			rb3d_cntl |= RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = clear->depth_mask;	/* misnamed field */
+		} else {
+			rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(13);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
+		OUT_RING(0x00000000);
+		OUT_RING(rb3d_cntl);
+
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
+		OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(15);
+
+			OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
+			OUT_RING(RADEON_VTX_Z_PRESENT |
+				 RADEON_VTX_PKCOLOR_PRESENT);
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  RADEON_MAOS_ENABLE |
+				  RADEON_VTX_FMT_RADEON_MODE |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			ADVANCE_RING();
+		}
+	}
+
+	/* Increment the clear counter.  The client-side 3D driver must
+	 * wait on this value before performing the clear ioctl.  We
+	 * need this because the card's so damned fast...
+	 */
+	sarea_priv->last_clear++;
+
+	BEGIN_RING(4);
+
+	RADEON_CLEAR_AGE(sarea_priv->last_clear);
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes)
+		radeon_cp_performance_boxes(dev_priv, master_priv);
+
+	/* Wait for the 3D stream to idle before dispatching the bitblt.
+	 * This will prevent data corruption between the two streams.
+	 */
+	BEGIN_RING(2);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+
+	ADVANCE_RING();
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+		BEGIN_RING(9);
+
+		OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (dev_priv->color_fmt << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+
+		/* Make this work even if front & back are flipped:
+		 */
+		OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
+		if (sarea_priv->pfCurrentPage == 0) {
+			OUT_RING(dev_priv->back_pitch_offset);
+			OUT_RING(dev_priv->front_pitch_offset);
+		} else {
+			OUT_RING(dev_priv->front_pitch_offset);
+			OUT_RING(dev_priv->back_pitch_offset);
+		}
+
+		OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
+		OUT_RING((x << 16) | y);
+		OUT_RING((x << 16) | y);
+		OUT_RING((w << 16) | h);
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	sarea_priv->last_frame++;
+
+	BEGIN_RING(4);
+
+	RADEON_FRAME_AGE(sarea_priv->last_frame);
+	RADEON_WAIT_UNTIL_2D_IDLE();
+
+	ADVANCE_RING();
+}
+
+void radeon_cp_dispatch_flip(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	struct drm_sarea *sarea = (struct drm_sarea *)master_priv->sarea->handle;
+	int offset = (master_priv->sarea_priv->pfCurrentPage == 1)
+	    ? dev_priv->front_offset : dev_priv->back_offset;
+	RING_LOCALS;
+	DRM_DEBUG("pfCurrentPage=%d\n",
+		  master_priv->sarea_priv->pfCurrentPage);
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes) {
+		dev_priv->stats.boxes |= RADEON_BOX_FLIP;
+		radeon_cp_performance_boxes(dev_priv, master_priv);
+	}
+
+	/* Update the frame offsets for both CRTCs
+	 */
+	BEGIN_RING(6);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING_REG(RADEON_CRTC_OFFSET,
+		     ((sarea->frame.y * dev_priv->front_pitch +
+		       sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
+		     + offset);
+	OUT_RING_REG(RADEON_CRTC2_OFFSET, master_priv->sarea_priv->crtc2_base
+		     + offset);
+
+	ADVANCE_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	master_priv->sarea_priv->last_frame++;
+	master_priv->sarea_priv->pfCurrentPage =
+		1 - master_priv->sarea_priv->pfCurrentPage;
+
+	BEGIN_RING(2);
+
+	RADEON_FRAME_AGE(master_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static int bad_prim_vertex_nr(int primitive, int nr)
+{
+	switch (primitive & RADEON_PRIM_TYPE_MASK) {
+	case RADEON_PRIM_TYPE_NONE:
+	case RADEON_PRIM_TYPE_POINT:
+		return nr < 1;
+	case RADEON_PRIM_TYPE_LINE:
+		return (nr & 1) || nr == 0;
+	case RADEON_PRIM_TYPE_LINE_STRIP:
+		return nr < 2;
+	case RADEON_PRIM_TYPE_TRI_LIST:
+	case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
+	case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
+	case RADEON_PRIM_TYPE_RECT_LIST:
+		return nr % 3 || nr == 0;
+	case RADEON_PRIM_TYPE_TRI_FAN:
+	case RADEON_PRIM_TYPE_TRI_STRIP:
+		return nr < 3;
+	default:
+		return 1;
+	}
+}
+
+typedef struct {
+	unsigned int start;
+	unsigned int finish;
+	unsigned int prim;
+	unsigned int numverts;
+	unsigned int offset;
+	unsigned int vc_format;
+} drm_radeon_tcl_prim_t;
+
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+				      struct drm_file *file_priv,
+				      struct drm_buf * buf,
+				      drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
+	int numverts = (int)prim->numverts;
+	int nbox = sarea_priv->nbox;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
+		  prim->prim,
+		  prim->vc_format, prim->start, prim->finish, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
+		DRM_ERROR("bad prim %x numverts %d\n",
+			  prim->prim, prim->numverts);
+		return;
+	}
+
+	do {
+		/* Emit the next cliprect */
+		if (i < nbox) {
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+		}
+
+		/* Emit the vertex buffer rendering commands */
+		BEGIN_RING(5);
+
+		OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
+		OUT_RING(offset);
+		OUT_RING(numverts);
+		OUT_RING(prim->vc_format);
+		OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
+			 RADEON_COLOR_ORDER_RGBA |
+			 RADEON_VTX_FMT_RADEON_MODE |
+			 (numverts << RADEON_NUM_VERTICES_SHIFT));
+
+		ADVANCE_RING();
+
+		i++;
+	} while (i < nbox);
+}
+
+void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+
+	buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
+
+	/* Emit the vertex buffer age */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
+		BEGIN_RING(3);
+		R600_DISPATCH_AGE(buf_priv->age);
+		ADVANCE_RING();
+	} else {
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(buf_priv->age);
+		ADVANCE_RING();
+	}
+
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+					struct drm_buf * buf, int start, int end)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+	if (start != end) {
+		int offset = (dev_priv->gart_buffers_offset
+			      + buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CP packet.
+		 */
+		if (dwords & 1) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = RADEON_CP_PACKET2;
+		}
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(3);
+
+		OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+		OUT_RING(offset);
+		OUT_RING(dwords);
+
+		ADVANCE_RING();
+	}
+}
+
+static void radeon_cp_dispatch_indices(struct drm_device *dev,
+				       struct drm_master *master,
+				       struct drm_buf * elt_buf,
+				       drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + prim->offset;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
+	int count = (prim->finish - start) / sizeof(u16);
+	int nbox = sarea_priv->nbox;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
+		  prim->prim,
+		  prim->vc_format,
+		  prim->start, prim->finish, prim->offset, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, count)) {
+		DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
+		return;
+	}
+
+	if (start >= prim->finish || (prim->start & 0x7)) {
+		DRM_ERROR("buffer prim %d\n", prim->prim);
+		return;
+	}
+
+	dwords = (prim->finish - prim->start + 3) / sizeof(u32);
+
+	data = (u32 *) ((char *)dev->agp_buffer_map->handle +
+			elt_buf->offset + prim->start);
+
+	data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
+	data[1] = offset;
+	data[2] = prim->numverts;
+	data[3] = prim->vc_format;
+	data[4] = (prim->prim |
+		   RADEON_PRIM_WALK_IND |
+		   RADEON_COLOR_ORDER_RGBA |
+		   RADEON_VTX_FMT_RADEON_MODE |
+		   (count << RADEON_NUM_VERTICES_SHIFT));
+
+	do {
+		if (i < nbox)
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+		radeon_cp_dispatch_indirect(dev, elt_buf,
+					    prim->start, prim->finish);
+
+		i++;
+	} while (i < nbox);
+
+}
+
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
+
+static int radeon_cp_dispatch_texture(struct drm_device * dev,
+				      struct drm_file *file_priv,
+				      drm_radeon_texture_t * tex,
+				      drm_radeon_tex_image_t * image)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	u32 format;
+	u32 *buffer;
+	const u8 __user *data;
+	int size, dwords, tex_width, blit_width, spitch;
+	u32 height;
+	int i;
+	u32 texpitch, microtile;
+	u32 offset, byte_offset;
+	RING_LOCALS;
+
+	if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
+		DRM_ERROR("Invalid destination offset\n");
+		return -EINVAL;
+	}
+
+	dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
+
+	/* Flush the pixel cache.  This ensures no pixel data gets mixed
+	 * up with the texture data from the host data blit, otherwise
+	 * part of the texture image may be corrupted.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+	ADVANCE_RING();
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch (tex->format) {
+	case RADEON_TXFORMAT_ARGB8888:
+	case RADEON_TXFORMAT_RGBA8888:
+		format = RADEON_COLOR_FORMAT_ARGB8888;
+		tex_width = tex->width * 4;
+		blit_width = image->width * 4;
+		break;
+	case RADEON_TXFORMAT_AI88:
+	case RADEON_TXFORMAT_ARGB1555:
+	case RADEON_TXFORMAT_RGB565:
+	case RADEON_TXFORMAT_ARGB4444:
+	case RADEON_TXFORMAT_VYUY422:
+	case RADEON_TXFORMAT_YVYU422:
+		format = RADEON_COLOR_FORMAT_RGB565;
+		tex_width = tex->width * 2;
+		blit_width = image->width * 2;
+		break;
+	case RADEON_TXFORMAT_I8:
+	case RADEON_TXFORMAT_RGB332:
+		format = RADEON_COLOR_FORMAT_CI8;
+		tex_width = tex->width * 1;
+		blit_width = image->width * 1;
+		break;
+	default:
+		DRM_ERROR("invalid texture format %d\n", tex->format);
+		return -EINVAL;
+	}
+	spitch = blit_width >> 6;
+	if (spitch == 0 && image->height > 1)
+		return -EINVAL;
+
+	texpitch = tex->pitch;
+	if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
+		microtile = 1;
+		if (tex_width < 64) {
+			texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
+			/* we got tiled coordinates, untile them */
+			image->x *= 2;
+		}
+	} else
+		microtile = 0;
+
+	/* this might fail for zero-sized uploads - are those illegal? */
+	if (!radeon_check_offset(dev_priv, tex->offset + image->height *
+				blit_width - 1)) {
+		DRM_ERROR("Invalid final destination offset\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
+
+	do {
+		DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+			  tex->offset >> 10, tex->pitch, tex->format,
+			  image->x, image->y, image->width, image->height);
+
+		/* Make a copy of some parameters in case we have to
+		 * update them for a multi-pass texture blit.
+		 */
+		height = image->height;
+		data = (const u8 __user *)image->data;
+
+		size = height * blit_width;
+
+		if (size > RADEON_MAX_TEXTURE_SIZE) {
+			height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+			size = height * blit_width;
+		} else if (size < 4 && size > 0) {
+			size = 4;
+		} else if (size == 0) {
+			return 0;
+		}
+
+		buf = radeon_freelist_get(dev);
+		if (0 && !buf) {
+			radeon_do_cp_idle(dev_priv);
+			buf = radeon_freelist_get(dev);
+		}
+		if (!buf) {
+			DRM_DEBUG("EAGAIN\n");
+			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+				return -EFAULT;
+			return -EAGAIN;
+		}
+
+		/* Dispatch the indirect buffer.
+		 */
+		buffer =
+		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+		dwords = size / 4;
+
+#define RADEON_COPY_MT(_buf, _data, _width) \
+	do { \
+		if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+			DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+			return -EFAULT; \
+		} \
+	} while(0)
+
+		if (microtile) {
+			/* texture micro tiling in use, minimum texture width is thus 16 bytes.
+			   however, we cannot use blitter directly for texture width < 64 bytes,
+			   since minimum tex pitch is 64 bytes and we need this to match
+			   the texture width, otherwise the blitter will tile it wrong.
+			   Thus, tiling manually in this case. Additionally, need to special
+			   case tex height = 1, since our actual image will have height 2
+			   and we need to ensure we don't read beyond the texture size
+			   from user space. */
+			if (tex->height == 1) {
+				if (tex_width >= 64 || tex_width <= 16) {
+					RADEON_COPY_MT(buffer, data,
+						(int)(tex_width * sizeof(u32)));
+				} else if (tex_width == 32) {
+					RADEON_COPY_MT(buffer, data, 16);
+					RADEON_COPY_MT(buffer + 8,
+						       data + 16, 16);
+				}
+			} else if (tex_width >= 64 || tex_width == 16) {
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else if (tex_width < 16) {
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 4;
+					data += tex_width;
+				}
+			} else if (tex_width == 32) {
+				/* TODO: make sure this works when not fitting in one buffer
+				   (i.e. 32bytes x 2048...) */
+				for (i = 0; i < tex->height; i += 2) {
+					RADEON_COPY_MT(buffer, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 8, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 4, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 12, data, 16);
+					data += 16;
+					buffer += 16;
+				}
+			}
+		} else {
+			if (tex_width >= 32) {
+				/* Texture image width is larger than the minimum, so we
+				 * can upload it directly.
+				 */
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else {
+				/* Texture image width is less than the minimum, so we
+				 * need to pad out each image scanline to the minimum
+				 * width.
+				 */
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 8;
+					data += tex_width;
+				}
+			}
+		}
+
+#undef RADEON_COPY_MT
+		byte_offset = (image->y & ~2047) * blit_width;
+		buf->file_priv = file_priv;
+		buf->used = size;
+		offset = dev_priv->gart_buffers_offset + buf->offset;
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (format << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+		OUT_RING((spitch << 22) | (offset >> 10));
+		OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
+		OUT_RING(0);
+		OUT_RING((image->x << 16) | (image->y % 2048));
+		OUT_RING((image->width << 16) | height);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		COMMIT_RING();
+
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+
+		/* Update the input parameters for next time */
+		image->y += height;
+		image->height -= height;
+		image->data = (const u8 __user *)image->data + size;
+	} while (image->height > 0);
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_2D_IDLE();
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return 0;
+}
+
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(35);
+
+	OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
+	OUT_RING(0x00000000);
+
+	OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
+	for (i = 0; i < 32; i++) {
+		OUT_RING(stipple[i]);
+	}
+
+	ADVANCE_RING();
+}
+
+static void radeon_apply_surface_regs(int surf_index,
+				      drm_radeon_private_t *dev_priv)
+{
+	if (!dev_priv->mmio)
+		return;
+
+	radeon_do_cp_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].flags);
+	RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].lower);
+	RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].upper);
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contiguous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+			 drm_radeon_private_t *dev_priv,
+			 struct drm_file *file_priv)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	int virt_surface_index;
+	uint32_t new_upper, new_lower;
+
+	new_lower = new->address;
+	new_upper = new_lower + new->size - 1;
+
+	/* sanity check */
+	if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+	    ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
+	     RADEON_SURF_ADDRESS_FIXED_MASK)
+	    || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+		return -1;
+
+	/* make sure there is no overlap with existing surfaces */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if ((dev_priv->surfaces[i].refcount != 0) &&
+		    (((new_lower >= dev_priv->surfaces[i].lower) &&
+		      (new_lower < dev_priv->surfaces[i].upper)) ||
+		     ((new_lower < dev_priv->surfaces[i].lower) &&
+		      (new_upper > dev_priv->surfaces[i].lower)))) {
+			return -1;
+		}
+	}
+
+	/* find a virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
+		if (dev_priv->virt_surfaces[i].file_priv == NULL)
+			break;
+	if (i == 2 * RADEON_MAX_SURFACES) {
+		return -1;
+	}
+	virt_surface_index = i;
+
+	/* try to reuse an existing surface */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		/* extend before */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].lower = s->lower;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+
+		/* extend after */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_lower == dev_priv->surfaces[i].upper + 1)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].upper = s->upper;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* okay, we need a new one */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->surfaces[i].refcount == 0) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount = 1;
+			dev_priv->surfaces[i].lower = s->lower;
+			dev_priv->surfaces[i].upper = s->upper;
+			dev_priv->surfaces[i].flags = s->flags;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* we didn't find anything */
+	return -1;
+}
+
+static int free_surface(struct drm_file *file_priv,
+			drm_radeon_private_t * dev_priv,
+			int lower)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	/* find the virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		s = &(dev_priv->virt_surfaces[i]);
+		if (s->file_priv) {
+			if ((lower == s->lower) && (file_priv == s->file_priv))
+			{
+				if (dev_priv->surfaces[s->surface_index].
+				    lower == s->lower)
+					dev_priv->surfaces[s->surface_index].
+					    lower = s->upper;
+
+				if (dev_priv->surfaces[s->surface_index].
+				    upper == s->upper)
+					dev_priv->surfaces[s->surface_index].
+					    upper = s->lower;
+
+				dev_priv->surfaces[s->surface_index].refcount--;
+				if (dev_priv->surfaces[s->surface_index].
+				    refcount == 0)
+					dev_priv->surfaces[s->surface_index].
+					    flags = 0;
+				s->file_priv = NULL;
+				radeon_apply_surface_regs(s->surface_index,
+							  dev_priv);
+				return 0;
+			}
+		}
+	}
+	return 1;
+}
+
+static void radeon_surfaces_release(struct drm_file *file_priv,
+				    drm_radeon_private_t * dev_priv)
+{
+	int i;
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->virt_surfaces[i].file_priv == file_priv)
+			free_surface(file_priv, dev_priv,
+				     dev_priv->virt_surfaces[i].lower);
+	}
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_alloc_t *alloc = data;
+
+	if (alloc_surface(alloc, dev_priv, file_priv) == -1)
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_free_t *memfree = data;
+
+	if (free_surface(file_priv, dev_priv, memfree->address))
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+	drm_radeon_clear_t *clear = data;
+	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+			       sarea_priv->nbox * sizeof(depth_boxes[0])))
+		return -EFAULT;
+
+	radeon_cp_dispatch_clear(dev, file_priv->master, clear, depth_boxes);
+
+	COMMIT_RING();
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static int radeon_do_init_pageflip(struct drm_device *dev, struct drm_master *master)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = master->driver_priv;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	ADVANCE_RING();
+
+	dev_priv->page_flipping = 1;
+
+	if (master_priv->sarea_priv->pfCurrentPage != 1)
+		master_priv->sarea_priv->pfCurrentPage = 0;
+
+	return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (!dev_priv->page_flipping)
+		radeon_do_init_pageflip(dev, file_priv->master);
+
+	radeon_cp_dispatch_flip(dev, file_priv->master);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_cp_dispatch_swap(dev, file_priv);
+	else
+		radeon_cp_dispatch_swap(dev, file_priv->master);
+	sarea_priv->ctx_owner = 0;
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex_t *vertex = data;
+	drm_radeon_tcl_prim_t prim;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", vertex->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	/* Build up a prim_t record:
+	 */
+	if (vertex->count) {
+		buf->used = vertex->count;	/* not used? */
+
+		if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+			if (radeon_emit_state(dev_priv, file_priv,
+					      &sarea_priv->context_state,
+					      sarea_priv->tex_state,
+					      sarea_priv->dirty)) {
+				DRM_ERROR("radeon_emit_state failed\n");
+				return -EINVAL;
+			}
+
+			sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+					       RADEON_UPLOAD_TEX1IMAGES |
+					       RADEON_UPLOAD_TEX2IMAGES |
+					       RADEON_REQUIRE_QUIESCENCE);
+		}
+
+		prim.start = 0;
+		prim.finish = vertex->count;	/* unused */
+		prim.prim = vertex->prim;
+		prim.numverts = vertex->count;
+		prim.vc_format = sarea_priv->vc_format;
+
+		radeon_cp_dispatch_vertex(dev, file_priv, buf, &prim);
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indices_t *elts = data;
+	drm_radeon_tcl_prim_t prim;
+	int count;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
+		  DRM_CURRENTPID, elts->idx, elts->start, elts->end,
+		  elts->discard);
+
+	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  elts->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", elts->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[elts->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", elts->idx);
+		return -EINVAL;
+	}
+
+	count = (elts->end - elts->start) / sizeof(u16);
+	elts->start -= RADEON_INDEX_PRIM_OFFSET;
+
+	if (elts->start & 0x7) {
+		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+		return -EINVAL;
+	}
+	if (elts->start < buf->used) {
+		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+		return -EINVAL;
+	}
+
+	buf->used = elts->end;
+
+	if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+		if (radeon_emit_state(dev_priv, file_priv,
+				      &sarea_priv->context_state,
+				      sarea_priv->tex_state,
+				      sarea_priv->dirty)) {
+			DRM_ERROR("radeon_emit_state failed\n");
+			return -EINVAL;
+		}
+
+		sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+				       RADEON_UPLOAD_TEX1IMAGES |
+				       RADEON_UPLOAD_TEX2IMAGES |
+				       RADEON_REQUIRE_QUIESCENCE);
+	}
+
+	/* Build up a prim_t record:
+	 */
+	prim.start = elts->start;
+	prim.finish = elts->end;
+	prim.prim = elts->prim;
+	prim.offset = 0;	/* offset from start of dma buffers */
+	prim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+	prim.vc_format = sarea_priv->vc_format;
+
+	radeon_cp_dispatch_indices(dev, file_priv->master, buf, &prim);
+	if (elts->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_texture_t *tex = data;
+	drm_radeon_tex_image_t image;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (tex->image == NULL) {
+		DRM_ERROR("null texture image!\n");
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_FROM_USER(&image,
+			       (drm_radeon_tex_image_t __user *) tex->image,
+			       sizeof(image)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
+	else
+		ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
+
+	return ret;
+}
+
+static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_stipple_t *stipple = data;
+	u32 mask[32];
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	radeon_cp_dispatch_stipple(dev, mask);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indirect_t *indirect = data;
+	RING_LOCALS;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+		  indirect->idx, indirect->start, indirect->end,
+		  indirect->discard);
+
+	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  indirect->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[indirect->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+		return -EINVAL;
+	}
+
+	if (indirect->start < buf->used) {
+		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+			  indirect->start, buf->used);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf->used = indirect->end;
+
+	/* Dispatch the indirect buffer full of commands from the
+	 * X server.  This is insecure and is thus only available to
+	 * privileged clients.
+	 */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+		r600_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+	else {
+		/* Wait for the 3D stream to idle before the indirect buffer
+		 * containing 2D acceleration commands is processed.
+		 */
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_3D_IDLE();
+		ADVANCE_RING();
+		radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+	}
+
+	if (indirect->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_sarea_t *sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex2_t *vertex = data;
+	int i;
+	unsigned char laststate;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	sarea_priv = master_priv->sarea_priv;
+
+	DRM_DEBUG("pid=%d index=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		return -EINVAL;
+
+	for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
+		drm_radeon_prim_t prim;
+		drm_radeon_tcl_prim_t tclprim;
+
+		if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+			return -EFAULT;
+
+		if (prim.stateidx != laststate) {
+			drm_radeon_state_t state;
+
+			if (DRM_COPY_FROM_USER(&state,
+					       &vertex->state[prim.stateidx],
+					       sizeof(state)))
+				return -EFAULT;
+
+			if (radeon_emit_state2(dev_priv, file_priv, &state)) {
+				DRM_ERROR("radeon_emit_state2 failed\n");
+				return -EINVAL;
+			}
+
+			laststate = prim.stateidx;
+		}
+
+		tclprim.start = prim.start;
+		tclprim.finish = prim.finish;
+		tclprim.prim = prim.prim;
+		tclprim.vc_format = prim.vc_format;
+
+		if (prim.prim & RADEON_PRIM_WALK_IND) {
+			tclprim.offset = prim.numverts * 64;
+			tclprim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+
+			radeon_cp_dispatch_indices(dev, file_priv->master, buf, &tclprim);
+		} else {
+			tclprim.numverts = prim.numverts;
+			tclprim.offset = 0;	/* not used */
+
+			radeon_cp_dispatch_vertex(dev, file_priv, buf, &tclprim);
+		}
+
+		if (sarea_priv->nbox == 1)
+			sarea_priv->nbox = 0;
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, file_priv->master, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
+			       struct drm_file *file_priv,
+			       drm_radeon_cmd_header_t header,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int id = (int)header.packet.packet_id;
+	int sz, reg;
+	RING_LOCALS;
+
+	if (id >= RADEON_MAX_STATE_PACKETS)
+		return -EINVAL;
+
+	sz = packet[id].len;
+	reg = packet[id].start;
+
+	if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
+		DRM_ERROR("Packet size provided larger than data provided\n");
+		return -EINVAL;
+	}
+
+	if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+				cmdbuf->buffer)) {
+		DRM_ERROR("Packet verification failed\n");
+		return -EINVAL;
+	}
+
+	BEGIN_RING(sz + 1);
+	OUT_RING(CP_PACKET0(reg, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = header.scalars.offset;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+	return 0;
+}
+
+/* God this is ugly
+ */
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
+					   drm_radeon_cmd_header_t header,
+					   drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = ((unsigned int)header.scalars.offset) + 0x100;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+	return 0;
+}
+
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.vectors.count;
+	int start = header.vectors.offset;
+	int stride = header.vectors.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.veclinear.count * 4;
+	int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
+	RING_LOCALS;
+
+        if (!sz)
+                return 0;
+	if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
+                return -EINVAL;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int radeon_emit_packet3(struct drm_device * dev,
+			       struct drm_file *file_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int cmdsz;
+	int ret;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	BEGIN_RING(cmdsz);
+	OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+					struct drm_file *file_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					int orig_nbox)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect box;
+	unsigned int cmdsz;
+	int ret;
+	struct drm_clip_rect __user *boxes = cmdbuf->boxes;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	if (!orig_nbox)
+		goto out;
+
+	do {
+		if (i < cmdbuf->nbox) {
+			if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+				return -EFAULT;
+			/* FIXME The second and subsequent times round
+			 * this loop, send a WAIT_UNTIL_3D_IDLE before
+			 * calling emit_clip_rect(). This fixes a
+			 * lockup on fast machines when sending
+			 * several cliprects with a cmdbuf, as when
+			 * waving a 2D window over a 3D
+			 * window. Something in the commands from user
+			 * space seems to hang the card when they're
+			 * sent several times in a row. That would be
+			 * the correct place to fix it but this works
+			 * around it until I can figure that out - Tim
+			 * Smith */
+			if (i) {
+				BEGIN_RING(2);
+				RADEON_WAIT_UNTIL_3D_IDLE();
+				ADVANCE_RING();
+			}
+			radeon_emit_clip_rect(dev_priv, &box);
+		}
+
+		BEGIN_RING(cmdsz);
+		OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
+		ADVANCE_RING();
+
+	} while (++i < cmdbuf->nbox);
+	if (cmdbuf->nbox == 1)
+		cmdbuf->nbox = 0;
+
+	return 0;
+      out:
+	drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
+	return 0;
+}
+
+static int radeon_emit_wait(struct drm_device * dev, int flags)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%x\n", flags);
+	switch (flags) {
+	case RADEON_WAIT_2D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_3D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_2D | RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_IDLE();
+		ADVANCE_RING();
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	drm_radeon_cmd_header_t stack_header;
+	int idx;
+	drm_radeon_kcmd_buffer_t *cmdbuf = data;
+	int orig_nbox;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
+		return -EINVAL;
+	}
+
+	/* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
+	 * races between checking values and using those values in other code,
+	 * and simply to avoid a lot of function calls to copy in data.
+	 */
+	if (cmdbuf->bufsz != 0) {
+		int rv;
+		void __user *buffer = cmdbuf->buffer;
+		rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+		if (rv)
+			return rv;
+		rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+						cmdbuf->bufsz);
+		if (rv) {
+			drm_buffer_free(cmdbuf->buffer);
+			return rv;
+		}
+	} else
+		goto done;
+
+	orig_nbox = cmdbuf->nbox;
+
+	if (dev_priv->microcode_version == UCODE_R300) {
+		int temp;
+		temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
+
+		drm_buffer_free(cmdbuf->buffer);
+
+		return temp;
+	}
+
+	/* microcode_version != r300 */
+	while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
+
+		drm_radeon_cmd_header_t *header;
+		header = drm_buffer_read_object(cmdbuf->buffer,
+				sizeof(stack_header), &stack_header);
+
+		switch (header->header.cmd_type) {
+		case RADEON_CMD_PACKET:
+			DRM_DEBUG("RADEON_CMD_PACKET\n");
+			if (radeon_emit_packets
+			    (dev_priv, file_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packets failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS:
+			DRM_DEBUG("RADEON_CMD_SCALARS\n");
+			if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_VECTORS:
+			DRM_DEBUG("RADEON_CMD_VECTORS\n");
+			if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_vectors failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header->dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				goto err;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				goto err;
+			}
+
+			radeon_cp_discard_buffer(dev, file_priv->master, buf);
+			break;
+
+		case RADEON_CMD_PACKET3:
+			DRM_DEBUG("RADEON_CMD_PACKET3\n");
+			if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packet3 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_PACKET3_CLIP:
+			DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
+			if (radeon_emit_packet3_cliprect
+			    (dev, file_priv, cmdbuf, orig_nbox)) {
+				DRM_ERROR("radeon_emit_packet3_clip failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS2:
+			DRM_DEBUG("RADEON_CMD_SCALARS2\n");
+			if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars2 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_WAIT:
+			DRM_DEBUG("RADEON_CMD_WAIT\n");
+			if (radeon_emit_wait(dev, header->wait.flags)) {
+				DRM_ERROR("radeon_emit_wait failed\n");
+				goto err;
+			}
+			break;
+		case RADEON_CMD_VECLINEAR:
+			DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
+			if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_veclinear failed\n");
+				goto err;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad cmd_type %d at byte %d\n",
+				  header->header.cmd_type,
+				  cmdbuf->buffer->iterator);
+			goto err;
+		}
+	}
+
+	drm_buffer_free(cmdbuf->buffer);
+
+      done:
+	DRM_DEBUG("DONE\n");
+	COMMIT_RING();
+	return 0;
+
+      err:
+	drm_buffer_free(cmdbuf->buffer);
+	return -EINVAL;
+}
+
+static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_getparam_t *param = data;
+	int value;
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case RADEON_PARAM_GART_BUFFER_OFFSET:
+		value = dev_priv->gart_buffers_offset;
+		break;
+	case RADEON_PARAM_LAST_FRAME:
+		dev_priv->stats.last_frame_reads++;
+		value = GET_SCRATCH(dev_priv, 0);
+		break;
+	case RADEON_PARAM_LAST_DISPATCH:
+		value = GET_SCRATCH(dev_priv, 1);
+		break;
+	case RADEON_PARAM_LAST_CLEAR:
+		dev_priv->stats.last_clear_reads++;
+		value = GET_SCRATCH(dev_priv, 2);
+		break;
+	case RADEON_PARAM_IRQ_NR:
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			value = 0;
+		else
+			value = drm_dev_to_irq(dev);
+		break;
+	case RADEON_PARAM_GART_BASE:
+		value = dev_priv->gart_vm_start;
+		break;
+	case RADEON_PARAM_REGISTER_HANDLE:
+		value = dev_priv->mmio->offset;
+		break;
+	case RADEON_PARAM_STATUS_HANDLE:
+		value = dev_priv->ring_rptr_offset;
+		break;
+#if BITS_PER_LONG == 32
+		/*
+		 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
+		 * pointer which can't fit into an int-sized variable.  According to
+		 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
+		 * not supporting it shouldn't be a problem.  If the same functionality
+		 * is needed on 64-bit platforms, a new ioctl() would have to be added,
+		 * so backwards-compatibility for the embedded platforms can be
+		 * maintained.  --davidm 4-Feb-2004.
+		 */
+	case RADEON_PARAM_SAREA_HANDLE:
+		/* The lock is the first dword in the sarea. */
+		/* no users of this parameter */
+		break;
+#endif
+	case RADEON_PARAM_GART_TEX_HANDLE:
+		value = dev_priv->gart_textures_offset;
+		break;
+	case RADEON_PARAM_SCRATCH_OFFSET:
+		if (!dev_priv->writeback_works)
+			return -EINVAL;
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+			value = R600_SCRATCH_REG_OFFSET;
+		else
+			value = RADEON_SCRATCH_REG_OFFSET;
+		break;
+	case RADEON_PARAM_CARD_TYPE:
+		if (dev_priv->flags & RADEON_IS_PCIE)
+			value = RADEON_CARD_PCIE;
+		else if (dev_priv->flags & RADEON_IS_AGP)
+			value = RADEON_CARD_AGP;
+		else
+			value = RADEON_CARD_PCI;
+		break;
+	case RADEON_PARAM_VBLANK_CRTC:
+		value = radeon_vblank_crtc_get(dev);
+		break;
+	case RADEON_PARAM_FB_LOCATION:
+		value = radeon_read_fb_location(dev_priv);
+		break;
+	case RADEON_PARAM_NUM_GB_PIPES:
+		value = dev_priv->num_gb_pipes;
+		break;
+	case RADEON_PARAM_NUM_Z_PIPES:
+		value = dev_priv->num_z_pipes;
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
+	drm_radeon_setparam_t *sp = data;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	switch (sp->param) {
+	case RADEON_SETPARAM_FB_LOCATION:
+		radeon_priv = file_priv->driver_priv;
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location -
+		    sp->value;
+		break;
+	case RADEON_SETPARAM_SWITCH_TILING:
+		if (sp->value == 0) {
+			DRM_DEBUG("color tiling disabled\n");
+			dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->tiling_enabled = 0;
+		} else if (sp->value == 1) {
+			DRM_DEBUG("color tiling enabled\n");
+			dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+			if (master_priv->sarea_priv)
+				master_priv->sarea_priv->tiling_enabled = 1;
+		}
+		break;
+	case RADEON_SETPARAM_PCIGART_LOCATION:
+		dev_priv->pcigart_offset = sp->value;
+		dev_priv->pcigart_offset_set = 1;
+		break;
+	case RADEON_SETPARAM_NEW_MEMMAP:
+		dev_priv->new_memmap = sp->value;
+		break;
+	case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
+		dev_priv->gart_info.table_size = sp->value;
+		if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
+			dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+		break;
+	case RADEON_SETPARAM_VBLANK_CRTC:
+		return radeon_vblank_crtc_set(dev, sp->value);
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", sp->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ *    - Free any alloced GART memory.
+ *    - Free any alloced radeon surfaces.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
+ */
+void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_radeon_private_t *dev_priv = dev->dev_private;
+		dev_priv->page_flipping = 0;
+		radeon_mem_release(file_priv, dev_priv->gart_heap);
+		radeon_mem_release(file_priv, dev_priv->fb_heap);
+		radeon_surfaces_release(file_priv, dev_priv);
+	}
+}
+
+void radeon_driver_lastclose(struct drm_device *dev)
+{
+	radeon_surfaces_release(PCIGART_FILE_PRIV, dev->dev_private);
+	radeon_do_release(dev);
+}
+
+int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	DRM_DEBUG("\n");
+	radeon_priv = kmalloc(sizeof(*radeon_priv), GFP_KERNEL);
+
+	if (!radeon_priv)
+		return -ENOMEM;
+
+	file_priv->driver_priv = radeon_priv;
+
+	if (dev_priv)
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location;
+	else
+		radeon_priv->radeon_fb_delta = 0;
+	return 0;
+}
+
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_radeon_driver_file_fields *radeon_priv =
+	    file_priv->driver_priv;
+
+	kfree(radeon_priv);
+}
+
+struct drm_ioctl_desc radeon_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+};
+
+int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_test.c b/linux-imx/drivers/gpu/drm/radeon/radeon_test.c
new file mode 100644
index 0000000..f9ebf2b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_test.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright 2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Michel Dänzer
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
+
+/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
+{
+	struct radeon_bo *vram_obj = NULL;
+	struct radeon_bo **gtt_obj = NULL;
+	struct radeon_fence *fence = NULL;
+	uint64_t gtt_addr, vram_addr;
+	unsigned n, size;
+	int i, r, ring;
+
+	switch (flag) {
+	case RADEON_TEST_COPY_DMA:
+		ring = radeon_copy_dma_ring_index(rdev);
+		break;
+	case RADEON_TEST_COPY_BLIT:
+		ring = radeon_copy_blit_ring_index(rdev);
+		break;
+	default:
+		DRM_ERROR("Unknown copy method\n");
+		return;
+	}
+
+	size = 1024 * 1024;
+
+	/* Number of tests =
+	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
+	 */
+	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+	for (i = 0; i < RADEON_NUM_RINGS; ++i)
+		n -= rdev->ring[i].ring_size;
+	if (rdev->wb.wb_obj)
+		n -= RADEON_GPU_PAGE_SIZE;
+	if (rdev->ih.ring_obj)
+		n -= rdev->ih.ring_size;
+	n /= size;
+
+	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
+	if (!gtt_obj) {
+		DRM_ERROR("Failed to allocate %d pointers\n", n);
+		r = 1;
+		goto out_cleanup;
+	}
+
+	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+			     NULL, &vram_obj);
+	if (r) {
+		DRM_ERROR("Failed to create VRAM object\n");
+		goto out_cleanup;
+	}
+	r = radeon_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+	if (r) {
+		DRM_ERROR("Failed to pin VRAM object\n");
+		goto out_cleanup;
+	}
+	for (i = 0; i < n; i++) {
+		void *gtt_map, *vram_map;
+		void **gtt_start, **gtt_end;
+		void **vram_start, **vram_end;
+
+		r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+		if (r) {
+			DRM_ERROR("Failed to create GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_cleanup;
+		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+		if (r) {
+			DRM_ERROR("Failed to pin GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++)
+			*gtt_start = gtt_start;
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (r) {
+			DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
+			goto out_cleanup;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(vram_obj, &vram_map);
+		if (r) {
+			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     vram_start < vram_end;
+		     gtt_start++, vram_start++) {
+			if (*vram_start != gtt_start) {
+				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
+					  "expected 0x%p (GTT/VRAM offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *vram_start, gtt_start,
+					  (unsigned long long)
+					  (gtt_addr - rdev->mc.gtt_start +
+					   (void*)gtt_start - gtt_map),
+					  (unsigned long long)
+					  (vram_addr - rdev->mc.vram_start +
+					   (void*)gtt_start - gtt_map));
+				radeon_bo_kunmap(vram_obj);
+				goto out_cleanup;
+			}
+			*vram_start = vram_start;
+		}
+
+		radeon_bo_kunmap(vram_obj);
+
+		if (ring == R600_RING_TYPE_DMA_INDEX)
+			r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		else
+			r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+		if (r) {
+			DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		r = radeon_fence_wait(fence, false);
+		if (r) {
+			DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
+			goto out_cleanup;
+		}
+
+		radeon_fence_unref(&fence);
+
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+		if (r) {
+			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+			goto out_cleanup;
+		}
+
+		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		     vram_start = vram_map, vram_end = vram_map + size;
+		     gtt_start < gtt_end;
+		     gtt_start++, vram_start++) {
+			if (*gtt_start != vram_start) {
+				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
+					  "expected 0x%p (VRAM/GTT offset "
+					  "0x%16llx/0x%16llx)\n",
+					  i, *gtt_start, vram_start,
+					  (unsigned long long)
+					  (vram_addr - rdev->mc.vram_start +
+					   (void*)vram_start - vram_map),
+					  (unsigned long long)
+					  (gtt_addr - rdev->mc.gtt_start +
+					   (void*)vram_start - vram_map));
+				radeon_bo_kunmap(gtt_obj[i]);
+				goto out_cleanup;
+			}
+		}
+
+		radeon_bo_kunmap(gtt_obj[i]);
+
+		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
+			 gtt_addr - rdev->mc.gtt_start);
+	}
+
+out_cleanup:
+	if (vram_obj) {
+		if (radeon_bo_is_reserved(vram_obj)) {
+			radeon_bo_unpin(vram_obj);
+			radeon_bo_unreserve(vram_obj);
+		}
+		radeon_bo_unref(&vram_obj);
+	}
+	if (gtt_obj) {
+		for (i = 0; i < n; i++) {
+			if (gtt_obj[i]) {
+				if (radeon_bo_is_reserved(gtt_obj[i])) {
+					radeon_bo_unpin(gtt_obj[i]);
+					radeon_bo_unreserve(gtt_obj[i]);
+				}
+				radeon_bo_unref(&gtt_obj[i]);
+			}
+		}
+		kfree(gtt_obj);
+	}
+	if (fence) {
+		radeon_fence_unref(&fence);
+	}
+	if (r) {
+		printk(KERN_WARNING "Error while testing BO move.\n");
+	}
+}
+
+void radeon_test_moves(struct radeon_device *rdev)
+{
+	if (rdev->asic->copy.dma)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+	if (rdev->asic->copy.blit)
+		radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
+static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
+					     struct radeon_ring *ring,
+					     struct radeon_fence **fence)
+{
+	int r;
+
+	if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
+		r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+		if (r) {
+			DRM_ERROR("Failed to get dummy create msg\n");
+			return r;
+		}
+
+		r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, fence);
+		if (r) {
+			DRM_ERROR("Failed to get dummy destroy msg\n");
+			return r;
+		}
+	} else {
+		r = radeon_ring_lock(rdev, ring, 64);
+		if (r) {
+			DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
+			return r;
+		}
+		radeon_fence_emit(rdev, fence, ring->idx);
+		radeon_ring_unlock_commit(rdev, ring);
+	}
+	return 0;
+}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+			   struct radeon_ring *ringA,
+			   struct radeon_ring *ringB)
+{
+	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	int r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
+	if (r)
+		goto out_cleanup;
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence1)) {
+		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB);
+
+	r = radeon_fence_wait(fence1, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fence2)) {
+		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringB);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB);
+
+	r = radeon_fence_wait(fence2, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence 1\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fence1)
+		radeon_fence_unref(&fence1);
+
+	if (fence2)
+		radeon_fence_unref(&fence2);
+
+	if (r)
+		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+static void radeon_test_ring_sync2(struct radeon_device *rdev,
+			    struct radeon_ring *ringA,
+			    struct radeon_ring *ringB,
+			    struct radeon_ring *ringC)
+{
+	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+	struct radeon_semaphore *semaphore = NULL;
+	bool sigA, sigB;
+	int i, r;
+
+	r = radeon_semaphore_create(rdev, &semaphore);
+	if (r) {
+		DRM_ERROR("Failed to create semaphore\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringA, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringA);
+
+	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
+	if (r)
+		goto out_cleanup;
+
+	r = radeon_ring_lock(rdev, ringB, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringB);
+	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
+	if (r)
+		goto out_cleanup;
+
+	mdelay(1000);
+
+	if (radeon_fence_signaled(fenceA)) {
+		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+	if (radeon_fence_signaled(fenceB)) {
+		DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
+		goto out_cleanup;
+	}
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC);
+
+	for (i = 0; i < 30; ++i) {
+		mdelay(100);
+		sigA = radeon_fence_signaled(fenceA);
+		sigB = radeon_fence_signaled(fenceB);
+		if (sigA || sigB)
+			break;
+	}
+
+	if (!sigA && !sigB) {
+		DRM_ERROR("Neither fence A nor B has been signaled\n");
+		goto out_cleanup;
+	} else if (sigA && sigB) {
+		DRM_ERROR("Both fence A and B has been signaled\n");
+		goto out_cleanup;
+	}
+
+	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+	r = radeon_ring_lock(rdev, ringC, 64);
+	if (r) {
+		DRM_ERROR("Failed to lock ring B %p\n", ringC);
+		goto out_cleanup;
+	}
+	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
+	radeon_ring_unlock_commit(rdev, ringC);
+
+	mdelay(1000);
+
+	r = radeon_fence_wait(fenceA, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence A\n");
+		goto out_cleanup;
+	}
+	r = radeon_fence_wait(fenceB, false);
+	if (r) {
+		DRM_ERROR("Failed to wait for sync fence B\n");
+		goto out_cleanup;
+	}
+
+out_cleanup:
+	radeon_semaphore_free(rdev, &semaphore, NULL);
+
+	if (fenceA)
+		radeon_fence_unref(&fenceA);
+
+	if (fenceB)
+		radeon_fence_unref(&fenceB);
+
+	if (r)
+		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+	int i, j, k;
+
+	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+		struct radeon_ring *ringA = &rdev->ring[i];
+		if (!ringA->ready)
+			continue;
+
+		for (j = 0; j < i; ++j) {
+			struct radeon_ring *ringB = &rdev->ring[j];
+			if (!ringB->ready)
+				continue;
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+			radeon_test_ring_sync(rdev, ringA, ringB);
+
+			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+			radeon_test_ring_sync(rdev, ringB, ringA);
+
+			for (k = 0; k < j; ++k) {
+				struct radeon_ring *ringC = &rdev->ring[k];
+				if (!ringC->ready)
+					continue;
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+			}
+		}
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_trace.h b/linux-imx/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 0000000..eafd816
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+	    TP_PROTO(struct radeon_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct radeon_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_trace_points.c b/linux-imx/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 0000000..e51d357
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_ttm.c b/linux-imx/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 0000000..6c0ce89
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright 2009 Jerome Glisse.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Jerome Glisse <glisse@freedesktop.org>
+ *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ *    Dave Airlie
+ */
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_page_alloc.h>
+#include <drm/drmP.h>
+#include <drm/radeon_drm.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
+
+static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+{
+	struct radeon_mman *mman;
+	struct radeon_device *rdev;
+
+	mman = container_of(bdev, struct radeon_mman, bdev);
+	rdev = container_of(mman, struct radeon_device, mman);
+	return rdev;
+}
+
+
+/*
+ * Global memory.
+ */
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	return ttm_mem_global_init(ref->object);
+}
+
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+static int radeon_ttm_global_init(struct radeon_device *rdev)
+{
+	struct drm_global_reference *global_ref;
+	int r;
+
+	rdev->mman.mem_global_referenced = false;
+	global_ref = &rdev->mman.mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &radeon_ttm_mem_global_init;
+	global_ref->release = &radeon_ttm_mem_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM memory accounting "
+			  "subsystem.\n");
+		return r;
+	}
+
+	rdev->mman.bo_global_ref.mem_glob =
+		rdev->mman.mem_global_ref.object;
+	global_ref = &rdev->mman.bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	r = drm_global_item_ref(global_ref);
+	if (r != 0) {
+		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		return r;
+	}
+
+	rdev->mman.mem_global_referenced = true;
+	return 0;
+}
+
+static void radeon_ttm_global_fini(struct radeon_device *rdev)
+{
+	if (rdev->mman.mem_global_referenced) {
+		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+		drm_global_item_unref(&rdev->mman.mem_global_ref);
+		rdev->mman.mem_global_referenced = false;
+	}
+}
+
+static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+				struct ttm_mem_type_manager *man)
+{
+	struct radeon_device *rdev;
+
+	rdev = radeon_get_rdev(bdev);
+
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_TT:
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.gtt_start;
+		man->available_caching = TTM_PL_MASK_CACHING;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+#if __OS_HAS_AGP
+		if (rdev->flags & RADEON_IS_AGP) {
+			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
+				DRM_ERROR("AGP is not enabled for memory type %u\n",
+					  (unsigned)type);
+				return -EINVAL;
+			}
+			if (!rdev->ddev->agp->cant_use_aperture)
+				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+						 TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = rdev->mc.vram_start;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+			     TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+		man->default_caching = TTM_PL_FLAG_WC;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	struct radeon_bo *rbo;
+	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+		placement->fpfn = 0;
+		placement->lpfn = 0;
+		placement->placement = &placements;
+		placement->busy_placement = &placements;
+		placement->num_placement = 1;
+		placement->num_busy_placement = 1;
+		return;
+	}
+	rbo = container_of(bo, struct radeon_bo, tbo);
+	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+		else
+			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
+	default:
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+	}
+	*placement = rbo->placement;
+}
+
+static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	return 0;
+}
+
+static void radeon_move_null(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *new_mem)
+{
+	struct ttm_mem_reg *old_mem = &bo->mem;
+
+	BUG_ON(old_mem->mm_node != NULL);
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+}
+
+static int radeon_move_blit(struct ttm_buffer_object *bo,
+			bool evict, bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem,
+			struct ttm_mem_reg *old_mem)
+{
+	struct radeon_device *rdev;
+	uint64_t old_start, new_start;
+	struct radeon_fence *fence;
+	int r, ridx;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	ridx = radeon_copy_ring_index(rdev);
+	old_start = old_mem->start << PAGE_SHIFT;
+	new_start = new_mem->start << PAGE_SHIFT;
+
+	switch (old_mem->mem_type) {
+	case TTM_PL_VRAM:
+		old_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		old_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	switch (new_mem->mem_type) {
+	case TTM_PL_VRAM:
+		new_start += rdev->mc.vram_start;
+		break;
+	case TTM_PL_TT:
+		new_start += rdev->mc.gtt_start;
+		break;
+	default:
+		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+		return -EINVAL;
+	}
+	if (!rdev->ring[ridx].ready) {
+		DRM_ERROR("Trying to move memory with ring turned off.\n");
+		return -EINVAL;
+	}
+
+	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+
+	/* sync other rings */
+	fence = bo->sync_obj;
+	r = radeon_copy(rdev, old_start, new_start,
+			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+			&fence);
+	/* FIXME: handle copy error */
+	r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+				      evict, no_wait_gpu, new_mem);
+	radeon_fence_unref(&fence);
+	return r;
+}
+
+static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	u32 placements;
+	struct ttm_placement placement;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+
+	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+
+	r = ttm_tt_bind(bo->ttm, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+				bool evict, bool interruptible,
+				bool no_wait_gpu,
+				struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg tmp_mem;
+	struct ttm_placement placement;
+	u32 placements;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	tmp_mem = *new_mem;
+	tmp_mem.mm_node = NULL;
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+			     interruptible, no_wait_gpu);
+	if (unlikely(r)) {
+		return r;
+	}
+	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+	if (unlikely(r)) {
+		goto out_cleanup;
+	}
+out_cleanup:
+	ttm_bo_mem_put(bo, &tmp_mem);
+	return r;
+}
+
+static int radeon_bo_move(struct ttm_buffer_object *bo,
+			bool evict, bool interruptible,
+			bool no_wait_gpu,
+			struct ttm_mem_reg *new_mem)
+{
+	struct radeon_device *rdev;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int r;
+
+	rdev = radeon_get_rdev(bo->bdev);
+	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if ((old_mem->mem_type == TTM_PL_TT &&
+	     new_mem->mem_type == TTM_PL_SYSTEM) ||
+	    (old_mem->mem_type == TTM_PL_SYSTEM &&
+	     new_mem->mem_type == TTM_PL_TT)) {
+		/* bind is enough */
+		radeon_move_null(bo, new_mem);
+		return 0;
+	}
+	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
+	    rdev->asic->copy.copy == NULL) {
+		/* use memcpy */
+		goto memcpy;
+	}
+
+	if (old_mem->mem_type == TTM_PL_VRAM &&
+	    new_mem->mem_type == TTM_PL_SYSTEM) {
+		r = radeon_move_vram_ram(bo, evict, interruptible,
+					no_wait_gpu, new_mem);
+	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
+		   new_mem->mem_type == TTM_PL_VRAM) {
+		r = radeon_move_ram_vram(bo, evict, interruptible,
+					    no_wait_gpu, new_mem);
+	} else {
+		r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+	}
+
+	if (r) {
+memcpy:
+		r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+	}
+	return r;
+}
+
+static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct radeon_device *rdev = radeon_get_rdev(bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	mem->bus.is_iomem = false;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+		/* system memory */
+		return 0;
+	case TTM_PL_TT:
+#if __OS_HAS_AGP
+		if (rdev->flags & RADEON_IS_AGP) {
+			/* RADEON_IS_AGP is set only if AGP is active */
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = rdev->mc.agp_base;
+			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+		}
+#endif
+		break;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		/* check if it's visible */
+		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
+			return -EINVAL;
+		mem->bus.base = rdev->mc.aper_base;
+		mem->bus.is_iomem = true;
+#ifdef __alpha__
+		/*
+		 * Alpha: use bus.addr to hold the ioremap() return,
+		 * so we can modify bus.base below.
+		 */
+		if (mem->placement & TTM_PL_FLAG_WC)
+			mem->bus.addr =
+				ioremap_wc(mem->bus.base + mem->bus.offset,
+					   mem->bus.size);
+		else
+			mem->bus.addr =
+				ioremap_nocache(mem->bus.base + mem->bus.offset,
+						mem->bus.size);
+
+		/*
+		 * Alpha: Use just the bus offset plus
+		 * the hose/domain memory base for bus.base.
+		 * It then can be used to build PTEs for VRAM
+		 * access, as done in ttm_bo_vm_fault().
+		 */
+		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+			rdev->ddev->hose->dense_mem_base;
+#endif
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
+{
+	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+
+static int radeon_sync_obj_flush(void *sync_obj)
+{
+	return 0;
+}
+
+static void radeon_sync_obj_unref(void **sync_obj)
+{
+	radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
+
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+	return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
+
+static bool radeon_sync_obj_signaled(void *sync_obj)
+{
+	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+}
+
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+	struct ttm_dma_tt		ttm;
+	struct radeon_device		*rdev;
+	u64				offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+				   struct ttm_mem_reg *bo_mem)
+{
+	struct radeon_ttm_tt *gtt = (void*)ttm;
+	int r;
+
+	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+	if (!ttm->num_pages) {
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     ttm->num_pages, bo_mem, ttm);
+	}
+	r = radeon_gart_bind(gtt->rdev, gtt->offset,
+			     ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+	if (r) {
+		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+			  ttm->num_pages, (unsigned)gtt->offset);
+		return r;
+	}
+	return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+	return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+
+	ttm_dma_tt_fini(&gtt->ttm);
+	kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+	.bind = &radeon_ttm_backend_bind,
+	.unbind = &radeon_ttm_backend_unbind,
+	.destroy = &radeon_ttm_backend_destroy,
+};
+
+static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+				    unsigned long size, uint32_t page_flags,
+				    struct page *dummy_read_page)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt;
+
+	rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+					 size, page_flags, dummy_read_page);
+	}
+#endif
+
+	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+	if (gtt == NULL) {
+		return NULL;
+	}
+	gtt->ttm.ttm.func = &radeon_backend_func;
+	gtt->rdev = rdev;
+	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+		kfree(gtt);
+		return NULL;
+	}
+	return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	int r;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	if (slave && ttm->sg) {
+		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
+						 gtt->ttm.dma_address, ttm->num_pages);
+		ttm->state = tt_unbound;
+		return 0;
+	}
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		return ttm_agp_tt_populate(ttm);
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		return ttm_dma_populate(&gtt->ttm, rdev->dev);
+	}
+#endif
+
+	r = ttm_pool_populate(ttm);
+	if (r) {
+		return r;
+	}
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+						       0, PAGE_SIZE,
+						       PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+			while (--i) {
+				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+				gtt->ttm.dma_address[i] = 0;
+			}
+			ttm_pool_unpopulate(ttm);
+			return -EFAULT;
+		}
+	}
+	return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+	struct radeon_device *rdev;
+	struct radeon_ttm_tt *gtt = (void *)ttm;
+	unsigned i;
+	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+
+	if (slave)
+		return;
+
+	rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+	if (rdev->flags & RADEON_IS_AGP) {
+		ttm_agp_tt_unpopulate(ttm);
+		return;
+	}
+#endif
+
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+		return;
+	}
+#endif
+
+	for (i = 0; i < ttm->num_pages; i++) {
+		if (gtt->ttm.dma_address[i]) {
+			pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		}
+	}
+
+	ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_bo_driver radeon_bo_driver = {
+	.ttm_tt_create = &radeon_ttm_tt_create,
+	.ttm_tt_populate = &radeon_ttm_tt_populate,
+	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+	.invalidate_caches = &radeon_invalidate_caches,
+	.init_mem_type = &radeon_init_mem_type,
+	.evict_flags = &radeon_evict_flags,
+	.move = &radeon_bo_move,
+	.verify_access = &radeon_verify_access,
+	.sync_obj_signaled = &radeon_sync_obj_signaled,
+	.sync_obj_wait = &radeon_sync_obj_wait,
+	.sync_obj_flush = &radeon_sync_obj_flush,
+	.sync_obj_unref = &radeon_sync_obj_unref,
+	.sync_obj_ref = &radeon_sync_obj_ref,
+	.move_notify = &radeon_bo_move_notify,
+	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
+	.io_mem_free = &radeon_ttm_io_mem_free,
+};
+
+int radeon_ttm_init(struct radeon_device *rdev)
+{
+	int r;
+
+	r = radeon_ttm_global_init(rdev);
+	if (r) {
+		return r;
+	}
+	/* No others user of address space so set it to 0 */
+	r = ttm_bo_device_init(&rdev->mman.bdev,
+			       rdev->mman.bo_global_ref.ref.object,
+			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
+			       rdev->need_dma32);
+	if (r) {
+		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+		return r;
+	}
+	rdev->mman.initialized = true;
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+				rdev->mc.real_vram_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing VRAM heap.\n");
+		return r;
+	}
+	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM,
+			     NULL, &rdev->stollen_vga_memory);
+	if (r) {
+		return r;
+	}
+	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+	if (r)
+		return r;
+	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	radeon_bo_unreserve(rdev->stollen_vga_memory);
+	if (r) {
+		radeon_bo_unref(&rdev->stollen_vga_memory);
+		return r;
+	}
+	DRM_INFO("radeon: %uM of VRAM memory ready\n",
+		 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+				rdev->mc.gtt_size >> PAGE_SHIFT);
+	if (r) {
+		DRM_ERROR("Failed initializing GTT heap.\n");
+		return r;
+	}
+	DRM_INFO("radeon: %uM of GTT memory ready.\n",
+		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
+	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+
+	r = radeon_ttm_debugfs_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init debugfs\n");
+		return r;
+	}
+	return 0;
+}
+
+void radeon_ttm_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (!rdev->mman.initialized)
+		return;
+	if (rdev->stollen_vga_memory) {
+		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+		if (r == 0) {
+			radeon_bo_unpin(rdev->stollen_vga_memory);
+			radeon_bo_unreserve(rdev->stollen_vga_memory);
+		}
+		radeon_bo_unref(&rdev->stollen_vga_memory);
+	}
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+	ttm_bo_device_release(&rdev->mman.bdev);
+	radeon_gart_fini(rdev);
+	radeon_ttm_global_fini(rdev);
+	rdev->mman.initialized = false;
+	DRM_INFO("radeon: ttm finalized\n");
+}
+
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+	struct ttm_mem_type_manager *man;
+
+	if (!rdev->mman.initialized)
+		return;
+
+	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
+	man->size = size >> PAGE_SHIFT;
+}
+
+static struct vm_operations_struct radeon_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops = NULL;
+
+static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo;
+	struct radeon_device *rdev;
+	int r;
+
+	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
+	if (bo == NULL) {
+		return VM_FAULT_NOPAGE;
+	}
+	rdev = radeon_get_rdev(bo->bdev);
+	down_read(&rdev->pm.mclk_lock);
+	r = ttm_vm_ops->fault(vma, vmf);
+	up_read(&rdev->pm.mclk_lock);
+	return r;
+}
+
+int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct radeon_device *rdev;
+	int r;
+
+	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
+		return drm_mmap(filp, vma);
+	}
+
+	file_priv = filp->private_data;
+	rdev = file_priv->minor->dev->dev_private;
+	if (rdev == NULL) {
+		return -EINVAL;
+	}
+	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
+	if (unlikely(r != 0)) {
+		return r;
+	}
+	if (unlikely(ttm_vm_ops == NULL)) {
+		ttm_vm_ops = vma->vm_ops;
+		radeon_ttm_vm_ops = *ttm_vm_ops;
+		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
+	}
+	vma->vm_ops = &radeon_ttm_vm_ops;
+	return 0;
+}
+
+
+#define RADEON_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
+static int radeon_mm_dump_table(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)m->private;
+	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int ret;
+	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+
+	spin_lock(&glob->lru_lock);
+	ret = drm_mm_dump_table(m, mm);
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+#endif
+
+static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+	unsigned i;
+
+	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+		if (i == 0)
+			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+		else
+			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+		radeon_mem_types_list[i].driver_features = 0;
+		if (i == 0)
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+		else
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+
+	}
+	/* Add ttm page pool to debugfs */
+	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+	radeon_mem_types_list[i].driver_features = 0;
+	radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+	if (swiotlb_nr_tbl()) {
+		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+		radeon_mem_types_list[i].driver_features = 0;
+		radeon_mem_types_list[i++].data = NULL;
+	}
+#endif
+	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+
+#endif
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/radeon_uvd.c b/linux-imx/drivers/gpu/drm/radeon/radeon_uvd.c
new file mode 100644
index 0000000..5715429
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ *    Christian König <deathsimple@vodafone.de>
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "radeon.h"
+#include "r600d.h"
+
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS	1000
+
+/* Firmware Names */
+#define FIRMWARE_RV710		"radeon/RV710_uvd.bin"
+#define FIRMWARE_CYPRESS	"radeon/CYPRESS_uvd.bin"
+#define FIRMWARE_SUMO		"radeon/SUMO_uvd.bin"
+#define FIRMWARE_TAHITI		"radeon/TAHITI_uvd.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RV710);
+MODULE_FIRMWARE(FIRMWARE_CYPRESS);
+MODULE_FIRMWARE(FIRMWARE_SUMO);
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
+int radeon_uvd_init(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	unsigned long bo_size;
+	const char *fw_name;
+	int i, r;
+
+	INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
+	pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
+	r = IS_ERR(pdev);
+	if (r) {
+		dev_err(rdev->dev, "radeon_uvd: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_RV710:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		fw_name = FIRMWARE_RV710;
+		break;
+
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+	case CHIP_JUNIPER:
+	case CHIP_REDWOOD:
+	case CHIP_CEDAR:
+		fw_name = FIRMWARE_CYPRESS;
+		break;
+
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+	case CHIP_PALM:
+	case CHIP_CAYMAN:
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		fw_name = FIRMWARE_SUMO;
+		break;
+
+	case CHIP_TAHITI:
+	case CHIP_VERDE:
+	case CHIP_PITCAIRN:
+	case CHIP_ARUBA:
+		fw_name = FIRMWARE_TAHITI;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	r = request_firmware(&rdev->uvd_fw, fw_name, &pdev->dev);
+	if (r) {
+		dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+			fw_name);
+		platform_device_unregister(pdev);
+		return r;
+	}
+
+	platform_device_unregister(pdev);
+
+	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
+		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
+	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+	if (r) {
+		dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+	if (r) {
+		radeon_bo_unref(&rdev->uvd.vcpu_bo);
+		dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
+		return r;
+	}
+
+	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->uvd.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+		radeon_bo_unref(&rdev->uvd.vcpu_bo);
+		dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
+		return r;
+	}
+
+	r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+	if (r) {
+		dev_err(rdev->dev, "(%d) UVD map failed\n", r);
+		return r;
+	}
+
+	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		atomic_set(&rdev->uvd.handles[i], 0);
+		rdev->uvd.filp[i] = NULL;
+	}
+
+	return 0;
+}
+
+void radeon_uvd_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return;
+
+	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
+	if (!r) {
+		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+		radeon_bo_unpin(rdev->uvd.vcpu_bo);
+		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+	}
+
+	radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+	release_firmware(rdev->uvd_fw);
+}
+
+int radeon_uvd_suspend(struct radeon_device *rdev)
+{
+	unsigned size;
+	void *ptr;
+	int i;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return 0;
+
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+		if (atomic_read(&rdev->uvd.handles[i]))
+			break;
+
+	if (i == RADEON_MAX_UVD_HANDLES)
+		return 0;
+
+	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+	size -= rdev->uvd_fw->size;
+
+	ptr = rdev->uvd.cpu_addr;
+	ptr += rdev->uvd_fw->size;
+
+	rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+	memcpy(rdev->uvd.saved_bo, ptr, size);
+
+	return 0;
+}
+
+int radeon_uvd_resume(struct radeon_device *rdev)
+{
+	unsigned size;
+	void *ptr;
+
+	if (rdev->uvd.vcpu_bo == NULL)
+		return -EINVAL;
+
+	memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+	size = radeon_bo_size(rdev->uvd.vcpu_bo);
+	size -= rdev->uvd_fw->size;
+
+	ptr = rdev->uvd.cpu_addr;
+	ptr += rdev->uvd_fw->size;
+
+	if (rdev->uvd.saved_bo != NULL) {
+		memcpy(ptr, rdev->uvd.saved_bo, size);
+		kfree(rdev->uvd.saved_bo);
+		rdev->uvd.saved_bo = NULL;
+	} else
+		memset(ptr, 0, size);
+
+	return 0;
+}
+
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+{
+	rbo->placement.fpfn = 0 >> PAGE_SHIFT;
+	rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+}
+
+void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
+{
+	int i, r;
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+		if (handle != 0 && rdev->uvd.filp[i] == filp) {
+			struct radeon_fence *fence;
+
+			radeon_uvd_note_usage(rdev);
+
+			r = radeon_uvd_get_destroy_msg(rdev,
+				R600_RING_TYPE_UVD_INDEX, handle, &fence);
+			if (r) {
+				DRM_ERROR("Error destroying UVD (%d)!\n", r);
+				continue;
+			}
+
+			radeon_fence_wait(fence, false);
+			radeon_fence_unref(&fence);
+
+			rdev->uvd.filp[i] = NULL;
+			atomic_set(&rdev->uvd.handles[i], 0);
+		}
+	}
+}
+
+static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+{
+	unsigned stream_type = msg[4];
+	unsigned width = msg[6];
+	unsigned height = msg[7];
+	unsigned dpb_size = msg[9];
+	unsigned pitch = msg[28];
+
+	unsigned width_in_mb = width / 16;
+	unsigned height_in_mb = ALIGN(height / 16, 2);
+
+	unsigned image_size, tmp, min_dpb_size;
+
+	image_size = width * height;
+	image_size += image_size / 2;
+	image_size = ALIGN(image_size, 1024);
+
+	switch (stream_type) {
+	case 0: /* H264 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 17;
+
+		/* macroblock context buffer */
+		min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * height_in_mb * 32;
+		break;
+
+	case 1: /* VC1 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CONTEXT_BUFFER */
+		min_dpb_size += width_in_mb * height_in_mb * 128;
+
+		/* IT surface buffer */
+		min_dpb_size += width_in_mb * 64;
+
+		/* DB surface buffer */
+		min_dpb_size += width_in_mb * 128;
+
+		/* BP */
+		tmp = max(width_in_mb, height_in_mb);
+		min_dpb_size += ALIGN(tmp * 7 * 16, 64);
+		break;
+
+	case 3: /* MPEG2 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+		break;
+
+	case 4: /* MPEG4 */
+
+		/* reference picture buffer */
+		min_dpb_size = image_size * 3;
+
+		/* CM */
+		min_dpb_size += width_in_mb * height_in_mb * 64;
+
+		/* IT surface buffer */
+		min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
+		break;
+
+	default:
+		DRM_ERROR("UVD codec not handled %d!\n", stream_type);
+		return -EINVAL;
+	}
+
+	if (width > pitch) {
+		DRM_ERROR("Invalid UVD decoding target pitch!\n");
+		return -EINVAL;
+	}
+
+	if (dpb_size < min_dpb_size) {
+		DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
+			  dpb_size, min_dpb_size);
+		return -EINVAL;
+	}
+
+	buf_sizes[0x1] = dpb_size;
+	buf_sizes[0x2] = image_size;
+	return 0;
+}
+
+static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
+			     unsigned offset, unsigned buf_sizes[])
+{
+	int32_t *msg, msg_type, handle;
+	void *ptr;
+
+	int i, r;
+
+	if (offset & 0x3F) {
+		DRM_ERROR("UVD messages must be 64 byte aligned!\n");
+		return -EINVAL;
+	}
+
+	if (bo->tbo.sync_obj) {
+		r = radeon_fence_wait(bo->tbo.sync_obj, false);
+		if (r) {
+			DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+			return r;
+		}
+	}
+
+	r = radeon_bo_kmap(bo, &ptr);
+	if (r)
+		return r;
+
+	msg = ptr + offset;
+
+	msg_type = msg[1];
+	handle = msg[2];
+
+	if (handle == 0) {
+		DRM_ERROR("Invalid UVD handle!\n");
+		return -EINVAL;
+	}
+
+	if (msg_type == 1) {
+		/* it's a decode msg, calc buffer sizes */
+		r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+		radeon_bo_kunmap(bo);
+		if (r)
+			return r;
+
+	} else if (msg_type == 2) {
+		/* it's a destroy msg, free the handle */
+		for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+			atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
+		radeon_bo_kunmap(bo);
+		return 0;
+	} else {
+		/* it's a create msg, no special handling needed */
+		radeon_bo_kunmap(bo);
+	}
+
+	/* create or decode, validate the handle */
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
+			return 0;
+	}
+
+	/* handle not found try to alloc a new one */
+	for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+		if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+			p->rdev->uvd.filp[i] = p->filp;
+			return 0;
+		}
+	}
+
+	DRM_ERROR("No more free UVD handles!\n");
+	return -EINVAL;
+}
+
+static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
+			       int data0, int data1,
+			       unsigned buf_sizes[])
+{
+	struct radeon_cs_chunk *relocs_chunk;
+	struct radeon_cs_reloc *reloc;
+	unsigned idx, cmd, offset;
+	uint64_t start, end;
+	int r;
+
+	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+	offset = radeon_get_ib_value(p, data0);
+	idx = radeon_get_ib_value(p, data1);
+	if (idx >= relocs_chunk->length_dw) {
+		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+			  idx, relocs_chunk->length_dw);
+		return -EINVAL;
+	}
+
+	reloc = p->relocs_ptr[(idx / 4)];
+	start = reloc->lobj.gpu_offset;
+	end = start + radeon_bo_size(reloc->robj);
+	start += offset;
+
+	p->ib.ptr[data0] = start & 0xFFFFFFFF;
+	p->ib.ptr[data1] = start >> 32;
+
+	cmd = radeon_get_ib_value(p, p->idx) >> 1;
+
+	if (cmd < 0x4) {
+		if (end <= start) {
+			DRM_ERROR("invalid reloc offset %X!\n", offset);
+			return -EINVAL;
+		}
+		if ((end - start) < buf_sizes[cmd]) {
+			DRM_ERROR("buffer to small (%d / %d)!\n",
+				  (unsigned)(end - start), buf_sizes[cmd]);
+			return -EINVAL;
+		}
+
+	} else if (cmd != 0x100) {
+		DRM_ERROR("invalid UVD command %X!\n", cmd);
+		return -EINVAL;
+	}
+
+	if ((start >> 28) != ((end - 1) >> 28)) {
+		DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+			  start, end);
+		return -EINVAL;
+	}
+
+	/* TODO: is this still necessary on NI+ ? */
+	if ((cmd == 0 || cmd == 0x3) &&
+	    (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+		DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+			  start, end);
+		return -EINVAL;
+	}
+
+	if (cmd == 0) {
+		r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+		if (r)
+			return r;
+	}
+
+	return 0;
+}
+
+static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
+			     struct radeon_cs_packet *pkt,
+			     int *data0, int *data1,
+			     unsigned buf_sizes[])
+{
+	int i, r;
+
+	p->idx++;
+	for (i = 0; i <= pkt->count; ++i) {
+		switch (pkt->reg + i*4) {
+		case UVD_GPCOM_VCPU_DATA0:
+			*data0 = p->idx;
+			break;
+		case UVD_GPCOM_VCPU_DATA1:
+			*data1 = p->idx;
+			break;
+		case UVD_GPCOM_VCPU_CMD:
+			r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+			if (r)
+				return r;
+			break;
+		case UVD_ENGINE_CNTL:
+			break;
+		default:
+			DRM_ERROR("Invalid reg 0x%X!\n",
+				  pkt->reg + i*4);
+			return -EINVAL;
+		}
+		p->idx++;
+	}
+	return 0;
+}
+
+int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
+{
+	struct radeon_cs_packet pkt;
+	int r, data0 = 0, data1 = 0;
+
+	/* minimum buffer sizes */
+	unsigned buf_sizes[] = {
+		[0x00000000]	=	2048,
+		[0x00000001]	=	32 * 1024 * 1024,
+		[0x00000002]	=	2048 * 1152 * 3,
+		[0x00000003]	=	2048,
+	};
+
+	if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+		DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
+			  p->chunks[p->chunk_ib_idx].length_dw);
+		return -EINVAL;
+	}
+
+	if (p->chunk_relocs_idx == -1) {
+		DRM_ERROR("No relocation chunk !\n");
+		return -EINVAL;
+	}
+
+
+	do {
+		r = radeon_cs_packet_parse(p, &pkt, p->idx);
+		if (r)
+			return r;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			r = radeon_uvd_cs_reg(p, &pkt, &data0,
+					      &data1, buf_sizes);
+			if (r)
+				return r;
+			break;
+		case RADEON_PACKET_TYPE2:
+			p->idx += pkt.count + 2;
+			break;
+		default:
+			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+			return -EINVAL;
+		}
+	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+	return 0;
+}
+
+static int radeon_uvd_send_msg(struct radeon_device *rdev,
+			       int ring, struct radeon_bo *bo,
+			       struct radeon_fence **fence)
+{
+	struct ttm_validate_buffer tv;
+	struct list_head head;
+	struct radeon_ib ib;
+	uint64_t addr;
+	int i, r;
+
+	memset(&tv, 0, sizeof(tv));
+	tv.bo = &bo->tbo;
+
+	INIT_LIST_HEAD(&head);
+	list_add(&tv.head, &head);
+
+	r = ttm_eu_reserve_buffers(&head);
+	if (r)
+		return r;
+
+	radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
+	radeon_uvd_force_into_uvd_segment(bo);
+
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	if (r) {
+		ttm_eu_backoff_reservation(&head);
+		return r;
+	}
+
+	r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
+	if (r) {
+		ttm_eu_backoff_reservation(&head);
+		return r;
+	}
+
+	addr = radeon_bo_gpu_offset(bo);
+	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
+	ib.ptr[1] = addr;
+	ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
+	ib.ptr[3] = addr >> 32;
+	ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
+	ib.ptr[5] = 0;
+	for (i = 6; i < 16; ++i)
+		ib.ptr[i] = PACKET2(0);
+	ib.length_dw = 16;
+
+	r = radeon_ib_schedule(rdev, &ib, NULL);
+	if (r) {
+		ttm_eu_backoff_reservation(&head);
+		return r;
+	}
+	ttm_eu_fence_buffer_objects(&head, ib.fence);
+
+	if (fence)
+		*fence = radeon_fence_ref(ib.fence);
+
+	radeon_ib_free(rdev, &ib);
+	radeon_bo_unref(&bo);
+	return 0;
+}
+
+/* multiple fence commands without any stream commands in between can
+   crash the vcpu so just try to emmit a dummy create/destroy msg to
+   avoid this */
+int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
+			      uint32_t handle, struct radeon_fence **fence)
+{
+	struct radeon_bo *bo;
+	uint32_t *msg;
+	int r, i;
+
+	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+	if (r)
+		return r;
+
+	r = radeon_bo_reserve(bo, false);
+	if (r) {
+		radeon_bo_unref(&bo);
+		return r;
+	}
+
+	r = radeon_bo_kmap(bo, (void **)&msg);
+	if (r) {
+		radeon_bo_unreserve(bo);
+		radeon_bo_unref(&bo);
+		return r;
+	}
+
+	/* stitch together an UVD create msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000000);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	msg[4] = cpu_to_le32(0x00000000);
+	msg[5] = cpu_to_le32(0x00000000);
+	msg[6] = cpu_to_le32(0x00000000);
+	msg[7] = cpu_to_le32(0x00000780);
+	msg[8] = cpu_to_le32(0x00000440);
+	msg[9] = cpu_to_le32(0x00000000);
+	msg[10] = cpu_to_le32(0x01b37000);
+	for (i = 11; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	radeon_bo_kunmap(bo);
+	radeon_bo_unreserve(bo);
+
+	return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
+			       uint32_t handle, struct radeon_fence **fence)
+{
+	struct radeon_bo *bo;
+	uint32_t *msg;
+	int r, i;
+
+	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
+			     RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+	if (r)
+		return r;
+
+	r = radeon_bo_reserve(bo, false);
+	if (r) {
+		radeon_bo_unref(&bo);
+		return r;
+	}
+
+	r = radeon_bo_kmap(bo, (void **)&msg);
+	if (r) {
+		radeon_bo_unreserve(bo);
+		radeon_bo_unref(&bo);
+		return r;
+	}
+
+	/* stitch together an UVD destroy msg */
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000002);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	for (i = 4; i < 1024; ++i)
+		msg[i] = cpu_to_le32(0x0);
+
+	radeon_bo_kunmap(bo);
+	radeon_bo_unreserve(bo);
+
+	return radeon_uvd_send_msg(rdev, ring, bo, fence);
+}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+	struct radeon_device *rdev =
+		container_of(work, struct radeon_device, uvd.idle_work.work);
+
+	if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+		radeon_set_uvd_clocks(rdev, 0, 0);
+	else
+		schedule_delayed_work(&rdev->uvd.idle_work,
+				      msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+	bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+	set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+					    msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+	if (set_clocks)
+		radeon_set_uvd_clocks(rdev, 53300, 40000);
+}
+
+static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
+					      unsigned target_freq,
+					      unsigned pd_min,
+					      unsigned pd_even)
+{
+	unsigned post_div = vco_freq / target_freq;
+
+	/* adjust to post divider minimum value */
+	if (post_div < pd_min)
+		post_div = pd_min;
+
+	/* we alway need a frequency less than or equal the target */
+	if ((vco_freq / post_div) > target_freq)
+		post_div += 1;
+
+	/* post dividers above a certain value must be even */
+	if (post_div > pd_even && post_div % 2)
+		post_div += 1;
+
+	return post_div;
+}
+
+/**
+ * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
+ *
+ * @rdev: radeon_device pointer
+ * @vclk: wanted VCLK
+ * @dclk: wanted DCLK
+ * @vco_min: minimum VCO frequency
+ * @vco_max: maximum VCO frequency
+ * @fb_factor: factor to multiply vco freq with
+ * @fb_mask: limit and bitmask for feedback divider
+ * @pd_min: post divider minimum
+ * @pd_max: post divider maximum
+ * @pd_even: post divider must be even above this value
+ * @optimal_fb_div: resulting feedback divider
+ * @optimal_vclk_div: resulting vclk post divider
+ * @optimal_dclk_div: resulting dclk post divider
+ *
+ * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
+ * Returns zero on success -EINVAL on error.
+ */
+int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+				  unsigned vclk, unsigned dclk,
+				  unsigned vco_min, unsigned vco_max,
+				  unsigned fb_factor, unsigned fb_mask,
+				  unsigned pd_min, unsigned pd_max,
+				  unsigned pd_even,
+				  unsigned *optimal_fb_div,
+				  unsigned *optimal_vclk_div,
+				  unsigned *optimal_dclk_div)
+{
+	unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
+
+	/* start off with something large */
+	unsigned optimal_score = ~0;
+
+	/* loop through vco from low to high */
+	vco_min = max(max(vco_min, vclk), dclk);
+	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
+
+		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
+		unsigned vclk_div, dclk_div, score;
+
+		do_div(fb_div, ref_freq);
+
+		/* fb div out of range ? */
+		if (fb_div > fb_mask)
+			break; /* it can oly get worse */
+
+		fb_div &= fb_mask;
+
+		/* calc vclk divider with current vco freq */
+		vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
+							 pd_min, pd_even);
+		if (vclk_div > pd_max)
+			break; /* vco is too big, it has to stop */
+
+		/* calc dclk divider with current vco freq */
+		dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+							 pd_min, pd_even);
+		if (vclk_div > pd_max)
+			break; /* vco is too big, it has to stop */
+
+		/* calc score with current vco freq */
+		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
+
+		/* determine if this vco setting is better than current optimal settings */
+		if (score < optimal_score) {
+			*optimal_fb_div = fb_div;
+			*optimal_vclk_div = vclk_div;
+			*optimal_dclk_div = dclk_div;
+			optimal_score = score;
+			if (optimal_score == 0)
+				break; /* it can't get better than this */
+		}
+	}
+
+	/* did we found a valid setup ? */
+	if (optimal_score == ~0)
+		return -EINVAL;
+
+	return 0;
+}
+
+int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
+				unsigned cg_upll_func_cntl)
+{
+	unsigned i;
+
+	/* make sure UPLL_CTLREQ is deasserted */
+	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+	mdelay(10);
+
+	/* assert UPLL_CTLREQ */
+	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+
+	/* wait for CTLACK and CTLACK2 to get asserted */
+	for (i = 0; i < 100; ++i) {
+		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
+			break;
+		mdelay(10);
+	}
+
+	/* deassert UPLL_CTLREQ */
+	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
+
+	if (i == 100) {
+		DRM_ERROR("Timeout setting UVD clocks!\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/cayman b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 0000000..a072fa8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,642 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/evergreen b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/evergreen
new file mode 100644
index 0000000..b912a37
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -0,0 +1,644 @@
+evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
+0x00008044 WAIT_UNTIL_POLL_CNTL
+0x00008048 WAIT_UNTIL_POLL_MASK
+0x0000804c WAIT_UNTIL_POLL_REFDATA
+0x000084FC CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D90 SQ_DYN_GPR_OPTIMIZATION
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
+0x00008D9C SQ_DYN_GPR_LDS_LIMIT
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C0C SQ_GPR_RESOURCE_MGMT_3
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008C18 SQ_THREAD_RESOURCE_MGMT
+0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
+0x00008C20 SQ_STACK_RESOURCE_MGMT_1
+0x00008C24 SQ_STACK_RESOURCE_MGMT_2
+0x00008C28 SQ_STACK_RESOURCE_MGMT_3
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E2C SQ_LDS_RESOURCE_MGMT
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
+0x00009700 VC_CNTL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
+0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B5C VGT_LS_SIZE
+0x00028B60 VGT_HS_SIZE
+0x00028B64 VGT_LS_HS_ALLOC
+0x00028B68 VGT_HS_PATCH_CONST
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028C00 PA_SC_LINE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+0x00028C3C PA_SC_AA_MASK
+0x00028C78 CB_COLOR0_DIM
+0x00028CB4 CB_COLOR1_DIM
+0x00028CF0 CB_COLOR2_DIM
+0x00028D2C CB_COLOR3_DIM
+0x00028D68 CB_COLOR4_DIM
+0x00028DA4 CB_COLOR5_DIM
+0x00028DE0 CB_COLOR6_DIM
+0x00028E1C CB_COLOR7_DIM
+0x00028E58 CB_COLOR8_DIM
+0x00028E74 CB_COLOR9_DIM
+0x00028E90 CB_COLOR10_DIM
+0x00028EAC CB_COLOR11_DIM
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r100 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r100
new file mode 100644
index 0000000..f7ee062
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r100
@@ -0,0 +1,105 @@
+r100 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1810 FOG_3D_TABLE_START
+0x1814 FOG_3D_TABLE_END
+0x1a14 FOG_TABLE_INDEX
+0x1a18 FOG_TABLE_DATA
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 SE_COORD_FMT
+0x1c60 PP_TXCBLEND_0
+0x1c64 PP_TXABLEND_0
+0x1c68 PP_TFACTOR_0
+0x1c78 PP_TXCBLEND_1
+0x1c7c PP_TXABLEND_1
+0x1c80 PP_TFACTOR_1
+0x1c90 PP_TXCBLEND_2
+0x1c94 PP_TXABLEND_2
+0x1c98 PP_TFACTOR_2
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1d40 PP_BORDER_COLOR0
+0x1d44 PP_BORDER_COLOR1
+0x1d48 PP_BORDER_COLOR2
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9C VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2140 SE_CNTL_STATUS
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
+0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
+0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
+0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
+0x2220 SE_TCL_MATERIAL_AMBIENT_RED
+0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
+0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
+0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
+0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
+0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
+0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
+0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
+0x2240 SE_TCL_MATERIAL_SPECULAR_RED
+0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
+0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
+0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
+0x2250 SE_TCL_SHININESS
+0x2254 SE_TCL_OUTPUT_VTX_FMT
+0x2258 SE_TCL_OUTPUT_VTX_SEL
+0x225c SE_TCL_MATRIX_SELECT_0
+0x2260 SE_TCL_MATRIX_SELECT_1
+0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
+0x2268 SE_TCL_TEXTURE_PROC_CTL
+0x226c SE_TCL_LIGHT_MODEL_CTL
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 SE_TCL_STATE_FLUSH
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x3290 RB3D_ZPASS_DATA
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r200 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r200
new file mode 100644
index 0000000..c29ac43
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r200
@@ -0,0 +1,186 @@
+r200 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1c14 PP_MISC
+0x1c18 PP_FOG_COLOR
+0x1c1c RE_SOLID_COLOR
+0x1c20 RB3D_BLENDCNTL
+0x1c4c SE_CNTL
+0x1c50 RE_CNTL
+0x1cc8 RE_STIPPLE_ADDR
+0x1ccc RE_STIPPLE_DATA
+0x1cd0 RE_LINE_PATTERN
+0x1cd4 RE_LINE_STATE
+0x1cd8 RE_SCISSOR_TL_0
+0x1cdc RE_SCISSOR_BR_0
+0x1ce0 RE_SCISSOR_TL_1
+0x1ce4 RE_SCISSOR_BR_1
+0x1ce8 RE_SCISSOR_TL_2
+0x1cec RE_SCISSOR_BR_2
+0x1d60 RB3D_DEPTHXY_OFFSET
+0x1d7c RB3D_STENCILREFMASK
+0x1d80 RB3D_ROPCNTL
+0x1d84 RB3D_PLANEMASK
+0x1d98 VAP_VPORT_XSCALE
+0x1d9c VAP_VPORT_XOFFSET
+0x1da0 VAP_VPORT_YSCALE
+0x1da4 VAP_VPORT_YOFFSET
+0x1da8 VAP_VPORT_ZSCALE
+0x1dac VAP_VPORT_ZOFFSET
+0x1db0 SE_ZBIAS_FACTOR
+0x1db4 SE_ZBIAS_CONSTANT
+0x1db8 SE_LINE_WIDTH
+0x2080 SE_VAP_CNTL
+0x2090 SE_TCL_OUTPUT_VTX_FMT_0
+0x2094 SE_TCL_OUTPUT_VTX_FMT_1
+0x20b0 SE_VTE_CNTL
+0x2140 SE_CNTL_STATUS
+0x2180 SE_VTX_STATE_CNTL
+0x2200 SE_TCL_VECTOR_INDX_REG
+0x2204 SE_TCL_VECTOR_DATA_REG
+0x2208 SE_TCL_SCALAR_INDX_REG
+0x220c SE_TCL_SCALAR_DATA_REG
+0x2230 SE_TCL_MATRIX_SEL_0
+0x2234 SE_TCL_MATRIX_SEL_1
+0x2238 SE_TCL_MATRIX_SEL_2
+0x223c SE_TCL_MATRIX_SEL_3
+0x2240 SE_TCL_MATRIX_SEL_4
+0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
+0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
+0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
+0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
+0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
+0x2268 SE_TCL_LIGHT_MODEL_CTL_0
+0x226c SE_TCL_LIGHT_MODEL_CTL_1
+0x2270 SE_TCL_PER_LIGHT_CTL_0
+0x2274 SE_TCL_PER_LIGHT_CTL_1
+0x2278 SE_TCL_PER_LIGHT_CTL_2
+0x227c SE_TCL_PER_LIGHT_CTL_3
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x22a8 SE_TCL_TEX_PROC_CTL_2
+0x22ac SE_TCL_TEX_PROC_CTL_3
+0x22b0 SE_TCL_TEX_PROC_CTL_0
+0x22b4 SE_TCL_TEX_PROC_CTL_1
+0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
+0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
+0x22c4 SE_TCL_POINT_SPRITE_CNTL
+0x22d0 SE_PVS_CNTL
+0x22d4 SE_PVS_CONST_CNTL
+0x2648 RE_POINTSIZE
+0x26c0 RE_TOP_LEFT
+0x26c4 RE_MISC
+0x26f0 RE_AUX_SCISSOR_CNTL
+0x2c14 PP_BORDER_COLOR_0
+0x2c34 PP_BORDER_COLOR_1
+0x2c54 PP_BORDER_COLOR_2
+0x2c74 PP_BORDER_COLOR_3
+0x2c94 PP_BORDER_COLOR_4
+0x2cb4 PP_BORDER_COLOR_5
+0x2cc4 PP_CNTL_X
+0x2cf8 PP_TRI_PERF
+0x2cfc PP_PERF_CNTL
+0x2d9c PP_TAM_DEBUG3
+0x2ee0 PP_TFACTOR_0
+0x2ee4 PP_TFACTOR_1
+0x2ee8 PP_TFACTOR_2
+0x2eec PP_TFACTOR_3
+0x2ef0 PP_TFACTOR_4
+0x2ef4 PP_TFACTOR_5
+0x2ef8 PP_TFACTOR_6
+0x2efc PP_TFACTOR_7
+0x2f00 PP_TXCBLEND_0
+0x2f04 PP_TXCBLEND2_0
+0x2f08 PP_TXABLEND_0
+0x2f0c PP_TXABLEND2_0
+0x2f10 PP_TXCBLEND_1
+0x2f14 PP_TXCBLEND2_1
+0x2f18 PP_TXABLEND_1
+0x2f1c PP_TXABLEND2_1
+0x2f20 PP_TXCBLEND_2
+0x2f24 PP_TXCBLEND2_2
+0x2f28 PP_TXABLEND_2
+0x2f2c PP_TXABLEND2_2
+0x2f30 PP_TXCBLEND_3
+0x2f34 PP_TXCBLEND2_3
+0x2f38 PP_TXABLEND_3
+0x2f3c PP_TXABLEND2_3
+0x2f40 PP_TXCBLEND_4
+0x2f44 PP_TXCBLEND2_4
+0x2f48 PP_TXABLEND_4
+0x2f4c PP_TXABLEND2_4
+0x2f50 PP_TXCBLEND_5
+0x2f54 PP_TXCBLEND2_5
+0x2f58 PP_TXABLEND_5
+0x2f5c PP_TXABLEND2_5
+0x2f60 PP_TXCBLEND_6
+0x2f64 PP_TXCBLEND2_6
+0x2f68 PP_TXABLEND_6
+0x2f6c PP_TXABLEND2_6
+0x2f70 PP_TXCBLEND_7
+0x2f74 PP_TXCBLEND2_7
+0x2f78 PP_TXABLEND_7
+0x2f7c PP_TXABLEND2_7
+0x2f80 PP_TXCBLEND_8
+0x2f84 PP_TXCBLEND2_8
+0x2f88 PP_TXABLEND_8
+0x2f8c PP_TXABLEND2_8
+0x2f90 PP_TXCBLEND_9
+0x2f94 PP_TXCBLEND2_9
+0x2f98 PP_TXABLEND_9
+0x2f9c PP_TXABLEND2_9
+0x2fa0 PP_TXCBLEND_10
+0x2fa4 PP_TXCBLEND2_10
+0x2fa8 PP_TXABLEND_10
+0x2fac PP_TXABLEND2_10
+0x2fb0 PP_TXCBLEND_11
+0x2fb4 PP_TXCBLEND2_11
+0x2fb8 PP_TXABLEND_11
+0x2fbc PP_TXABLEND2_11
+0x2fc0 PP_TXCBLEND_12
+0x2fc4 PP_TXCBLEND2_12
+0x2fc8 PP_TXABLEND_12
+0x2fcc PP_TXABLEND2_12
+0x2fd0 PP_TXCBLEND_13
+0x2fd4 PP_TXCBLEND2_13
+0x2fd8 PP_TXABLEND_13
+0x2fdc PP_TXABLEND2_13
+0x2fe0 PP_TXCBLEND_14
+0x2fe4 PP_TXCBLEND2_14
+0x2fe8 PP_TXABLEND_14
+0x2fec PP_TXABLEND2_14
+0x2ff0 PP_TXCBLEND_15
+0x2ff4 PP_TXCBLEND2_15
+0x2ff8 PP_TXABLEND_15
+0x2ffc PP_TXABLEND2_15
+0x3218 RB3D_BLENCOLOR
+0x321c RB3D_ABLENDCNTL
+0x3220 RB3D_CBLENDCNTL
+0x3290 RB3D_ZPASS_DATA
+
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r300 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r300
new file mode 100644
index 0000000..e8a1786
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -0,0 +1,714 @@
+r300 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r420 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 0000000..722074e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,780 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r600 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 0000000..20bfbda
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,755 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008490 CP_STRMOUT_CNTL
+0x000085F0 CP_COHER_CNTL
+0x000085F4 CP_COHER_SIZE
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
+0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
+0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
+0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
+0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028354 SX_SURFACE_SYNC
+0x00009014 SX_MEMORY_EXPORT_SIZE
+0x00009604 TC_INVALIDATE
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009700 VC_CNTL
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rn50 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rn50
new file mode 100644
index 0000000..2687b63
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rn50
@@ -0,0 +1,30 @@
+rn50 0x3294
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rs600 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rs600
new file mode 100644
index 0000000..d9f6286
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -0,0 +1,780 @@
+rs600 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F58 ZB_ZPASS_DATA
diff --git a/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rv515 b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rv515
new file mode 100644
index 0000000..78d5e99
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -0,0 +1,496 @@
+rv515 0x6d40
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x208C VAP_INDEX_OFFSET
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x2218 VAP_TEX_TO_COLOR_CNTL
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
+0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
+0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
+0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
+0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
+0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
+0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
+0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
+0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
+0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
+0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
+0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
+0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
+0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
+0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
+0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
+0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
+0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
+0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
+0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
+0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
+0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
+0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
+0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
+0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
+0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
+0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
+0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
+0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
+0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
+0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
+0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x4010 GB_MSPOS0
+0x4014 GB_MSPOS1
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4074 RS_IP_0
+0x4078 RS_IP_1
+0x407C RS_IP_2
+0x4080 RS_IP_3
+0x4084 RS_IP_4
+0x4088 RS_IP_5
+0x408C RS_IP_6
+0x4090 RS_IP_7
+0x4094 RS_IP_8
+0x4098 RS_IP_9
+0x409C RS_IP_10
+0x40A0 RS_IP_11
+0x40A4 RS_IP_12
+0x40A8 RS_IP_13
+0x40AC RS_IP_14
+0x40B0 RS_IP_15
+0x4320 RS_INST_0
+0x4324 RS_INST_1
+0x4328 RS_INST_2
+0x432C RS_INST_3
+0x4330 RS_INST_4
+0x4334 RS_INST_5
+0x4338 RS_INST_6
+0x433C RS_INST_7
+0x4340 RS_INST_8
+0x4344 RS_INST_9
+0x4348 RS_INST_10
+0x434C RS_INST_11
+0x4350 RS_INST_12
+0x4354 RS_INST_13
+0x4358 RS_INST_14
+0x435C RS_INST_15
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4250 GA_US_VECTOR_INDEX
+0x4254 GA_US_VECTOR_DATA
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4620 US_FC_BOOL_CONST
+0x4624 US_FC_CTRL
+0x4630 US_CODE_ADDR
+0x4634 US_CODE_RANGE
+0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4EF8 RB3D_CONSTANT_COLOR_AR
+0x4EFC RB3D_CONSTANT_COLOR_GB
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs100d.h b/linux-imx/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 0000000..48a913a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS100D_H__
+#define __RS100D_H__
+
+/* Registers */
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs400.c b/linux-imx/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 0000000..b8074a8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rs400d.h"
+
+/* This files gather functions specifics to : rs400,rs480 */
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
+
+void rs400_gart_adjust_size(struct radeon_device *rdev)
+{
+	/* Check gart size */
+	switch (rdev->mc.gtt_size/(1024*1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		DRM_ERROR("Unable to use IGP GART size %uM\n",
+			  (unsigned)(rdev->mc.gtt_size >> 20));
+		DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
+		DRM_ERROR("Forcing to 32M GART size\n");
+		rdev->mc.gtt_size = 32 * 1024 * 1024;
+		return;
+	}
+}
+
+void rs400_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	unsigned int timeout = rdev->usec_timeout;
+
+	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
+	do {
+		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
+			break;
+		DRM_UDELAY(1);
+		timeout--;
+	} while (timeout > 0);
+	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
+}
+
+int rs400_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.ptr) {
+		WARN(1, "RS400 GART already initialized\n");
+		return 0;
+	}
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+	case 64:
+	case 128:
+	case 256:
+	case 512:
+	case 1024:
+	case 2048:
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r)
+		return r;
+	if (rs400_debugfs_pcie_gart_info_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+	return radeon_gart_table_ram_alloc(rdev);
+}
+
+int rs400_gart_enable(struct radeon_device *rdev)
+{
+	uint32_t size_reg;
+	uint32_t tmp;
+
+	radeon_gart_restore(rdev);
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	/* Check gart size */
+	switch(rdev->mc.gtt_size / (1024 * 1024)) {
+	case 32:
+		size_reg = RS480_VA_SIZE_32MB;
+		break;
+	case 64:
+		size_reg = RS480_VA_SIZE_64MB;
+		break;
+	case 128:
+		size_reg = RS480_VA_SIZE_128MB;
+		break;
+	case 256:
+		size_reg = RS480_VA_SIZE_256MB;
+		break;
+	case 512:
+		size_reg = RS480_VA_SIZE_512MB;
+		break;
+	case 1024:
+		size_reg = RS480_VA_SIZE_1GB;
+		break;
+	case 2048:
+		size_reg = RS480_VA_SIZE_2GB;
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* It should be fine to program it to max value */
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
+		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
+	} else {
+		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
+		WREG32(RS480_AGP_BASE_2, 0);
+	}
+	tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	} else {
+		WREG32(RADEON_MC_AGP_LOCATION, tmp);
+		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		WREG32(RADEON_BUS_CNTL, tmp);
+	}
+	/* Table should be in 32bits address space so ignore bits above. */
+	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
+	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
+
+	WREG32_MC(RS480_GART_BASE, tmp);
+	/* TODO: more tweaking here */
+	WREG32_MC(RS480_GART_FEATURE_ID,
+		  (RS480_TLB_ENABLE |
+		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
+	/* Disable snooping */
+	WREG32_MC(RS480_AGP_MODE_CNTL,
+		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
+	/* Disable AGP mode */
+	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+	} else {
+		tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+		tmp |= RS480_GART_INDEX_REG_EN;
+		WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+	}
+	/* Enable gart */
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+	rs400_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+void rs400_gart_disable(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
+	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
+	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+}
+
+void rs400_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs400_gart_disable(rdev);
+	radeon_gart_table_ram_free(rdev);
+}
+
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	uint32_t entry;
+	u32 *gtt = rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+
+	entry = (lower_32_bits(addr) & PAGE_MASK) |
+		((upper_32_bits(addr) & 0xff) << 4) |
+		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+	entry = cpu_to_le32(entry);
+	gtt[i] = entry;
+	return 0;
+}
+
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+static void rs400_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs400_mc_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
+	}
+}
+
+static void rs400_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
+	/* DDR for all card after R300 & IGP */
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	r100_vram_init_sizes(rdev);
+	base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(RS480_NB_MC_INDEX, reg & 0xff);
+	r = RREG32(RS480_NB_MC_DATA);
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+	return r;
+}
+
+void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
+	WREG32(RS480_NB_MC_DATA, (v));
+	WREG32(RS480_NB_MC_INDEX, 0xff);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
+	tmp = RREG32(RADEON_BUS_CNTL);
+	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
+	seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
+	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
+		seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
+		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
+		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
+		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
+		tmp = RREG32(RS690_HDP_FB_LOCATION);
+		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
+	} else {
+		tmp = RREG32(RADEON_AGP_BASE);
+		seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
+		tmp = RREG32(RS480_AGP_BASE_2);
+		seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
+		tmp = RREG32(RADEON_MC_AGP_LOCATION);
+		seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
+	}
+	tmp = RREG32_MC(RS480_GART_BASE);
+	seq_printf(m, "GART_BASE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_FEATURE_ID);
+	seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
+	seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+	seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x5F);
+	seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
+	seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
+	tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
+	seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3B);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x3C);
+	seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x30);
+	seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x31);
+	seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x32);
+	seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x33);
+	seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x34);
+	seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x35);
+	seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x36);
+	seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
+	tmp = RREG32_MC(0x37);
+	seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rs400_gart_info_list[] = {
+	{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
+};
+#endif
+
+static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static void rs400_mc_program(struct radeon_device *rdev)
+{
+	struct r100_mc_save save;
+
+	/* Stops all mc clients */
+	r100_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs400_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
+	WREG32(R_000148_MC_FB_LOCATION,
+		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+
+	r100_mc_resume(rdev, &save);
+}
+
+static int rs400_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	r100_set_common_regs(rdev);
+
+	rs400_mc_program(rdev);
+	/* Resume clock */
+	r300_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs400_gpu_init(rdev);
+	r100_enable_bm(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rs400_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	r300_clock_startup(rdev);
+	/* setup MC before calling post tables */
+	rs400_mc_program(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	radeon_combios_asic_init(rdev->ddev);
+	/* Resume clock after posting */
+	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs400_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	r100_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs400_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs400_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	r100_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
+		return -EINVAL;
+	} else {
+		r = radeon_combios_init(rdev);
+		if (r)
+			return r;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs400_mc_init(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	r300_set_reg_safe(rdev);
+
+	rdev->accel_working = true;
+	r = rs400_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs400d.h b/linux-imx/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 0000000..6d8bac5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS400D_H__
+#define __RS400D_H__
+
+/* Registers */
+#define R_000148_MC_FB_LOCATION                      0x000148
+#define   S_000148_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000148_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000148_MC_FB_START                         0xFFFF0000
+#define   S_000148_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000148_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000148_MC_FB_TOP                           0x0000FFFF
+#define R_00015C_NB_TOM                              0x00015C
+#define   S_00015C_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_00015C_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_00015C_MC_FB_START                         0xFFFF0000
+#define   S_00015C_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_00015C_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_00015C_MC_FB_TOP                           0x0000FFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs600.c b/linux-imx/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 0000000..ae813fe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+/* RS600 / Radeon X1250/X1270 integrated GPU
+ *
+ * This file gather function specific to RS600 which is the IGP of
+ * the X1250/X1270 family supporting intel CPU (while RS690/RS740
+ * is the X1250/X1270 supporting AMD CPU). The display engine are
+ * the avivo one, bios is an atombios, 3D block are the one of the
+ * R4XX family. The GART is different from the RS400 one and is very
+ * close to the one of the R600 family (R600 likely being an evolution
+ * of the RS600 GART block).
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs600d.h"
+
+#include "rs600_reg_safe.h"
+
+static void rs600_gpu_init(struct radeon_device *rdev);
+int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
+{
+	if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
+		return true;
+	else
+		return false;
+}
+
+static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
+{
+	u32 pos1, pos2;
+
+	pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+
+	if (pos1 != pos2)
+		return true;
+	else
+		return false;
+}
+
+/**
+ * avivo_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r5xx-r7xx).
+ */
+void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
+{
+	unsigned i = 0;
+
+	if (crtc >= rdev->num_crtc)
+		return;
+
+	if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
+		return;
+
+	/* depending on when we hit vblank, we may be close to active; if so,
+	 * wait for another frame.
+	 */
+	while (avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+
+	while (!avivo_is_in_vblank(rdev, crtc)) {
+		if (i++ % 100 == 0) {
+			if (!avivo_is_counter_moving(rdev, crtc))
+				break;
+		}
+	}
+}
+
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+void rs600_pm_misc(struct radeon_device *rdev)
+{
+	int requested_index = rdev->pm.requested_power_state_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
+	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
+	u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
+	u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
+
+	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
+		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp |= voltage->gpio.mask;
+			else
+				tmp &= ~(voltage->gpio.mask);
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		} else {
+			tmp = RREG32(voltage->gpio.reg);
+			if (voltage->active_high)
+				tmp &= ~voltage->gpio.mask;
+			else
+				tmp |= voltage->gpio.mask;
+			WREG32(voltage->gpio.reg, tmp);
+			if (voltage->delay)
+				udelay(voltage->delay);
+		}
+	} else if (voltage->type == VOLTAGE_VDDC)
+		radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
+
+	dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
+	dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
+		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
+		} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
+			dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
+		}
+	} else {
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
+		dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
+	}
+	WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
+
+	dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
+		dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
+		if (voltage->delay) {
+			dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
+			dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
+		} else
+			dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
+	} else
+		dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
+	WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
+
+	hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
+		hdp_dyn_cntl &= ~HDP_FORCEON;
+	else
+		hdp_dyn_cntl |= HDP_FORCEON;
+	WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
+#if 0
+	/* mc_host_dyn seems to cause hangs from time to time */
+	mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
+		mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
+	else
+		mc_host_dyn_cntl |= MC_HOST_FORCEON;
+	WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
+#endif
+	dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
+	if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
+		dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
+	else
+		dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
+	WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
+
+	/* set pcie lanes */
+	if ((rdev->flags & RADEON_IS_PCIE) &&
+	    !(rdev->flags & RADEON_IS_IGP) &&
+	    rdev->asic->pm.set_pcie_lanes &&
+	    (ps->pcie_lanes !=
+	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+		radeon_set_pcie_lanes(rdev,
+				      ps->pcie_lanes);
+		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+	}
+}
+
+void rs600_pm_prepare(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* disable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+void rs600_pm_finish(struct radeon_device *rdev)
+{
+	struct drm_device *ddev = rdev->ddev;
+	struct drm_crtc *crtc;
+	struct radeon_crtc *radeon_crtc;
+	u32 tmp;
+
+	/* enable any active CRTCs */
+	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
+		radeon_crtc = to_radeon_crtc(crtc);
+		if (radeon_crtc->enabled) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
+		}
+	}
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = rs600_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		else
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		else
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned enable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+			break;
+		default:
+			break;
+		}
+		enable |= 1 << radeon_connector->hpd.hpd;
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+	}
+	radeon_irq_kms_enable_hpd(rdev, enable);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+	unsigned disable = 0;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+			break;
+		default:
+			break;
+		}
+		disable |= 1 << radeon_connector->hpd.hpd;
+	}
+	radeon_irq_kms_disable_hpd(rdev, disable);
+}
+
+int rs600_asic_reset(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 status, tmp;
+	int ret = 0;
+
+	status = RREG32(R_000E40_RBBM_STATUS);
+	if (!G_000E40_GUI_ACTIVE(status)) {
+		return 0;
+	}
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* stop CP */
+	WREG32(RADEON_CP_CSQ_CNTL, 0);
+	tmp = RREG32(RADEON_CP_RB_CNTL);
+	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
+	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+	WREG32(RADEON_CP_RB_WPTR, 0);
+	WREG32(RADEON_CP_RB_CNTL, tmp);
+	pci_save_state(rdev->pdev);
+	/* disable bus mastering */
+	pci_clear_master(rdev->pdev);
+	mdelay(1);
+	/* reset GA+VAP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+					S_0000F0_SOFT_RESET_GA(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset CP */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* reset MC */
+	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
+	RREG32(R_0000F0_RBBM_SOFT_RESET);
+	mdelay(500);
+	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
+	mdelay(1);
+	status = RREG32(R_000E40_RBBM_STATUS);
+	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
+	/* restore PCI & busmastering */
+	pci_restore_state(rdev->pdev);
+	/* Check if GPU is idle */
+	if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
+		dev_err(rdev->dev, "failed to reset GPU\n");
+		ret = -1;
+	} else
+		dev_info(rdev->dev, "GPU reset succeed\n");
+	rv515_mc_resume(rdev, &save);
+	return ret;
+}
+
+/*
+ * GART.
+ */
+void rs600_gart_tlb_flush(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
+	WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+}
+
+static int rs600_gart_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->gart.robj) {
+		WARN(1, "RS600 GART already initialized\n");
+		return 0;
+	}
+	/* Initialize common gart structure */
+	r = radeon_gart_init(rdev);
+	if (r) {
+		return r;
+	}
+	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
+	return radeon_gart_table_vram_alloc(rdev);
+}
+
+static int rs600_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Enable bus master */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+	/* FIXME: setup default page */
+	WREG32_MC(R_000100_MC_PT0_CNTL,
+		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
+	for (i = 0; i < 19; i++) {
+		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
+				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
+	}
+	/* enable first context */
+	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+		  S_000102_ENABLE_PAGE_TABLE(1) |
+		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
+	/* disable all other contexts */
+	for (i = 1; i < 8; i++)
+		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
+
+	/* setup the page table */
+	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+		  rdev->gart.table_addr);
+	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+
+	/* System context maps to VRAM space */
+	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
+	/* enable page tables */
+	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
+	rs600_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rs600_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	/* FIXME: disable out of gart access */
+	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+	tmp = RREG32_MC(R_000009_MC_CNTL1);
+	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rs600_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rs600_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+#define R600_PTE_VALID     (1 << 0)
+#define R600_PTE_SYSTEM    (1 << 1)
+#define R600_PTE_SNOOPED   (1 << 2)
+#define R600_PTE_READABLE  (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+{
+	void __iomem *ptr = (void *)rdev->gart.ptr;
+
+	if (i < 0 || i > rdev->gart.num_gpu_pages) {
+		return -EINVAL;
+	}
+	addr = addr & 0xFFFFFFFFFFFFF000ULL;
+	if (addr != rdev->dummy_page.addr)
+		addr |= R600_PTE_VALID | R600_PTE_READABLE |
+			R600_PTE_WRITEABLE;
+	addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+	writeq(addr, ptr + (i * 8));
+	return 0;
+}
+
+int rs600_irq_set(struct radeon_device *rdev)
+{
+	uint32_t tmp = 0;
+	uint32_t mode_int = 0;
+	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	u32 hdmi0;
+	if (ASIC_IS_DCE2(rdev))
+		hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+			~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	else
+		hdmi0 = 0;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		tmp |= S_000040_SW_INT_EN(1);
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
+	}
+	if (rdev->irq.hpd[0]) {
+		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	}
+	if (rdev->irq.hpd[1]) {
+		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	}
+	if (rdev->irq.afmt[0]) {
+		hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	}
+	WREG32(R_000040_GEN_INT_CNTL, tmp);
+	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+	if (ASIC_IS_DCE2(rdev))
+		WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+	return 0;
+}
+
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
+{
+	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+	uint32_t irq_mask = S_000044_SW_INT(1);
+	u32 tmp;
+
+	if (G_000044_DISPLAY_INT_STAT(irqs)) {
+		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006534_D1MODE_VBLANK_STATUS,
+				S_006534_D1MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+				S_006D34_D2MODE_VBLANK_ACK(1));
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
+	} else {
+		rdev->irq.stat_regs.r500.disp_int = 0;
+	}
+
+	if (ASIC_IS_DCE2(rdev)) {
+		rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
+			S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
+			tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
+			WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
+		}
+	} else
+		rdev->irq.stat_regs.r500.hdmi0_status = 0;
+
+	if (irqs) {
+		WREG32(R_000044_GEN_INT_STATUS, irqs);
+	}
+	return irqs & irq_mask;
+}
+
+void rs600_irq_disable(struct radeon_device *rdev)
+{
+	u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
+		~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
+	WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
+	WREG32(R_000040_GEN_INT_CNTL, 0);
+	WREG32(R_006540_DxMODE_INT_MASK, 0);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	rs600_irq_ack(rdev);
+}
+
+int rs600_irq_process(struct radeon_device *rdev)
+{
+	u32 status, msi_rearm;
+	bool queue_hotplug = false;
+	bool queue_hdmi = false;
+
+	status = rs600_irq_ack(rdev);
+	if (!status &&
+	    !rdev->irq.stat_regs.r500.disp_int &&
+	    !rdev->irq.stat_regs.r500.hdmi0_status) {
+		return IRQ_NONE;
+	}
+	while (status ||
+	       rdev->irq.stat_regs.r500.disp_int ||
+	       rdev->irq.stat_regs.r500.hdmi0_status) {
+		/* SW interrupt */
+		if (G_000044_SW_INT(status)) {
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+		}
+		/* Vertical blank interrupts */
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[0]))
+				radeon_crtc_handle_flip(rdev, 0);
+		}
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (atomic_read(&rdev->irq.pflip[1]))
+				radeon_crtc_handle_flip(rdev, 1);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
+		if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
+			queue_hdmi = true;
+			DRM_DEBUG("HDMI0\n");
+		}
+		status = rs600_irq_ack(rdev);
+	}
+	if (queue_hotplug)
+		schedule_work(&rdev->hotplug_work);
+	if (queue_hdmi)
+		schedule_work(&rdev->audio_work);
+	if (rdev->msi_enabled) {
+		switch (rdev->family) {
+		case CHIP_RS600:
+		case CHIP_RS690:
+		case CHIP_RS740:
+			msi_rearm = RREG32(RADEON_BUS_CNTL) & ~RS600_MSI_REARM;
+			WREG32(RADEON_BUS_CNTL, msi_rearm);
+			WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+			break;
+		default:
+			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+			break;
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
+{
+	if (crtc == 0)
+		return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
+	else
+		return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
+}
+
+int rs600_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs600_gpu_init(struct radeon_device *rdev)
+{
+	r420_pipes_init(rdev);
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+}
+
+static void rs600_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	base = RREG32_MC(R_000004_MC_FB_LOCATION);
+	base = G_000004_MC_FB_START(base) << 16;
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = 0;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs600_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+	/* FIXME: implement full support */
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if (rdev->disp_priority == 2) {
+		d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+		d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+		d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+		d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+		WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+		WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+		WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+	}
+}
+
+uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1));
+	return RREG32(R_000074_MC_IND_DATA);
+}
+
+void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
+		S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
+	WREG32(R_000074_MC_IND_DATA, v);
+}
+
+static void rs600_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev))
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+}
+
+void rs600_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
+}
+
+static void rs600_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs600_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+
+	/* FIXME: What does AGP means for such chipset ? */
+	WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
+	WREG32_MC(R_000006_AGP_BASE, 0);
+	WREG32_MC(R_000007_AGP_BASE_2, 0);
+	/* Program MC */
+	WREG32_MC(R_000004_MC_FB_LOCATION,
+			S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs600_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs600_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs600_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs600_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs600_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs600_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs600_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs600_gart_disable(rdev);
+	return 0;
+}
+
+void rs600_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs600_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs600_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs600_mc_init(rdev);
+	rs600_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs600_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rs600_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs600_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs600d.h b/linux-imx/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 0000000..f1f8941
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS600D_H__
+#define __RS600D_H__
+
+/* Registers */
+#define R_000040_GEN_INT_CNTL                        0x000040
+#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
+#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
+#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
+#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
+#define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+#define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+#define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+#define   S_000040_DMA_VIPH2_INT_EN(x)                 (((x) & 0x1) << 14)
+#define   G_000040_DMA_VIPH2_INT_EN(x)                 (((x) >> 14) & 0x1)
+#define   C_000040_DMA_VIPH2_INT_EN                    0xFFFFBFFF
+#define   S_000040_DMA_VIPH3_INT_EN(x)                 (((x) & 0x1) << 15)
+#define   G_000040_DMA_VIPH3_INT_EN(x)                 (((x) >> 15) & 0x1)
+#define   C_000040_DMA_VIPH3_INT_EN                    0xFFFF7FFF
+#define   S_000040_I2C_INT_EN(x)                       (((x) & 0x1) << 17)
+#define   G_000040_I2C_INT_EN(x)                       (((x) >> 17) & 0x1)
+#define   C_000040_I2C_INT_EN                          0xFFFDFFFF
+#define   S_000040_GUI_IDLE(x)                         (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE(x)                         (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE                            0xFFF7FFFF
+#define   S_000040_VIPH_INT_EN(x)                      (((x) & 0x1) << 24)
+#define   G_000040_VIPH_INT_EN(x)                      (((x) >> 24) & 0x1)
+#define   C_000040_VIPH_INT_EN                         0xFEFFFFFF
+#define   S_000040_SW_INT_EN(x)                        (((x) & 0x1) << 25)
+#define   G_000040_SW_INT_EN(x)                        (((x) >> 25) & 0x1)
+#define   C_000040_SW_INT_EN                           0xFDFFFFFF
+#define   S_000040_GEYSERVILLE(x)                      (((x) & 0x1) << 27)
+#define   G_000040_GEYSERVILLE(x)                      (((x) >> 27) & 0x1)
+#define   C_000040_GEYSERVILLE                         0xF7FFFFFF
+#define   S_000040_HDCP_AUTHORIZED_INT(x)              (((x) & 0x1) << 28)
+#define   G_000040_HDCP_AUTHORIZED_INT(x)              (((x) >> 28) & 0x1)
+#define   C_000040_HDCP_AUTHORIZED_INT                 0xEFFFFFFF
+#define   S_000040_DVI_I2C_INT(x)                      (((x) & 0x1) << 29)
+#define   G_000040_DVI_I2C_INT(x)                      (((x) >> 29) & 0x1)
+#define   C_000040_DVI_I2C_INT                         0xDFFFFFFF
+#define   S_000040_GUIDMA(x)                           (((x) & 0x1) << 30)
+#define   G_000040_GUIDMA(x)                           (((x) >> 30) & 0x1)
+#define   C_000040_GUIDMA                              0xBFFFFFFF
+#define   S_000040_VIDDMA(x)                           (((x) & 0x1) << 31)
+#define   G_000040_VIDDMA(x)                           (((x) >> 31) & 0x1)
+#define   C_000040_VIDDMA                              0x7FFFFFFF
+#define R_000044_GEN_INT_STATUS                      0x000044
+#define   S_000044_DISPLAY_INT_STAT(x)                 (((x) & 0x1) << 0)
+#define   G_000044_DISPLAY_INT_STAT(x)                 (((x) >> 0) & 0x1)
+#define   C_000044_DISPLAY_INT_STAT                    0xFFFFFFFE
+#define   S_000044_VGA_INT_STAT(x)                     (((x) & 0x1) << 1)
+#define   G_000044_VGA_INT_STAT(x)                     (((x) >> 1) & 0x1)
+#define   C_000044_VGA_INT_STAT                        0xFFFFFFFD
+#define   S_000044_CAP0_INT_ACTIVE(x)                  (((x) & 0x1) << 8)
+#define   G_000044_CAP0_INT_ACTIVE(x)                  (((x) >> 8) & 0x1)
+#define   C_000044_CAP0_INT_ACTIVE                     0xFFFFFEFF
+#define   S_000044_DMA_VIPH0_INT(x)                    (((x) & 0x1) << 12)
+#define   G_000044_DMA_VIPH0_INT(x)                    (((x) >> 12) & 0x1)
+#define   C_000044_DMA_VIPH0_INT                       0xFFFFEFFF
+#define   S_000044_DMA_VIPH1_INT(x)                    (((x) & 0x1) << 13)
+#define   G_000044_DMA_VIPH1_INT(x)                    (((x) >> 13) & 0x1)
+#define   C_000044_DMA_VIPH1_INT                       0xFFFFDFFF
+#define   S_000044_DMA_VIPH2_INT(x)                    (((x) & 0x1) << 14)
+#define   G_000044_DMA_VIPH2_INT(x)                    (((x) >> 14) & 0x1)
+#define   C_000044_DMA_VIPH2_INT                       0xFFFFBFFF
+#define   S_000044_DMA_VIPH3_INT(x)                    (((x) & 0x1) << 15)
+#define   G_000044_DMA_VIPH3_INT(x)                    (((x) >> 15) & 0x1)
+#define   C_000044_DMA_VIPH3_INT                       0xFFFF7FFF
+#define   S_000044_MC_PROBE_FAULT_STAT(x)              (((x) & 0x1) << 16)
+#define   G_000044_MC_PROBE_FAULT_STAT(x)              (((x) >> 16) & 0x1)
+#define   C_000044_MC_PROBE_FAULT_STAT                 0xFFFEFFFF
+#define   S_000044_I2C_INT(x)                          (((x) & 0x1) << 17)
+#define   G_000044_I2C_INT(x)                          (((x) >> 17) & 0x1)
+#define   C_000044_I2C_INT                             0xFFFDFFFF
+#define   S_000044_SCRATCH_INT_STAT(x)                 (((x) & 0x1) << 18)
+#define   G_000044_SCRATCH_INT_STAT(x)                 (((x) >> 18) & 0x1)
+#define   C_000044_SCRATCH_INT_STAT                    0xFFFBFFFF
+#define   S_000044_GUI_IDLE_STAT(x)                    (((x) & 0x1) << 19)
+#define   G_000044_GUI_IDLE_STAT(x)                    (((x) >> 19) & 0x1)
+#define   C_000044_GUI_IDLE_STAT                       0xFFF7FFFF
+#define   S_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) & 0x1) << 20)
+#define   G_000044_ATI_OVERDRIVE_INT_STAT(x)           (((x) >> 20) & 0x1)
+#define   C_000044_ATI_OVERDRIVE_INT_STAT              0xFFEFFFFF
+#define   S_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) & 0x1) << 21)
+#define   G_000044_MC_PROTECTION_FAULT_STAT(x)         (((x) >> 21) & 0x1)
+#define   C_000044_MC_PROTECTION_FAULT_STAT            0xFFDFFFFF
+#define   S_000044_RBBM_READ_INT_STAT(x)               (((x) & 0x1) << 22)
+#define   G_000044_RBBM_READ_INT_STAT(x)               (((x) >> 22) & 0x1)
+#define   C_000044_RBBM_READ_INT_STAT                  0xFFBFFFFF
+#define   S_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) & 0x1) << 23)
+#define   G_000044_CB_CONTEXT_SWITCH_STAT(x)           (((x) >> 23) & 0x1)
+#define   C_000044_CB_CONTEXT_SWITCH_STAT              0xFF7FFFFF
+#define   S_000044_VIPH_INT(x)                         (((x) & 0x1) << 24)
+#define   G_000044_VIPH_INT(x)                         (((x) >> 24) & 0x1)
+#define   C_000044_VIPH_INT                            0xFEFFFFFF
+#define   S_000044_SW_INT(x)                           (((x) & 0x1) << 25)
+#define   G_000044_SW_INT(x)                           (((x) >> 25) & 0x1)
+#define   C_000044_SW_INT                              0xFDFFFFFF
+#define   S_000044_SW_INT_SET(x)                       (((x) & 0x1) << 26)
+#define   G_000044_SW_INT_SET(x)                       (((x) >> 26) & 0x1)
+#define   C_000044_SW_INT_SET                          0xFBFFFFFF
+#define   S_000044_IDCT_INT_STAT(x)                    (((x) & 0x1) << 27)
+#define   G_000044_IDCT_INT_STAT(x)                    (((x) >> 27) & 0x1)
+#define   C_000044_IDCT_INT_STAT                       0xF7FFFFFF
+#define   S_000044_GUIDMA_STAT(x)                      (((x) & 0x1) << 30)
+#define   G_000044_GUIDMA_STAT(x)                      (((x) >> 30) & 0x1)
+#define   C_000044_GUIDMA_STAT                         0xBFFFFFFF
+#define   S_000044_VIDDMA_STAT(x)                      (((x) & 0x1) << 31)
+#define   G_000044_VIDDMA_STAT(x)                      (((x) >> 31) & 0x1)
+#define   C_000044_VIDDMA_STAT                         0x7FFFFFFF
+#define R_00004C_BUS_CNTL                            0x00004C
+#define   S_00004C_BUS_MASTER_DIS(x)                   (((x) & 0x1) << 14)
+#define   G_00004C_BUS_MASTER_DIS(x)                   (((x) >> 14) & 0x1)
+#define   C_00004C_BUS_MASTER_DIS                      0xFFFFBFFF
+#define   S_00004C_BUS_MSI_REARM(x)                    (((x) & 0x1) << 20)
+#define   G_00004C_BUS_MSI_REARM(x)                    (((x) >> 20) & 0x1)
+#define   C_00004C_BUS_MSI_REARM                       0xFFEFFFFF
+#define R_000070_MC_IND_INDEX                        0x000070
+#define   S_000070_MC_IND_ADDR(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000070_MC_IND_ADDR(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000070_MC_IND_ADDR                         0xFFFF0000
+#define   S_000070_MC_IND_SEQ_RBS_0(x)                 (((x) & 0x1) << 16)
+#define   G_000070_MC_IND_SEQ_RBS_0(x)                 (((x) >> 16) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_0                    0xFFFEFFFF
+#define   S_000070_MC_IND_SEQ_RBS_1(x)                 (((x) & 0x1) << 17)
+#define   G_000070_MC_IND_SEQ_RBS_1(x)                 (((x) >> 17) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_1                    0xFFFDFFFF
+#define   S_000070_MC_IND_SEQ_RBS_2(x)                 (((x) & 0x1) << 18)
+#define   G_000070_MC_IND_SEQ_RBS_2(x)                 (((x) >> 18) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_2                    0xFFFBFFFF
+#define   S_000070_MC_IND_SEQ_RBS_3(x)                 (((x) & 0x1) << 19)
+#define   G_000070_MC_IND_SEQ_RBS_3(x)                 (((x) >> 19) & 0x1)
+#define   C_000070_MC_IND_SEQ_RBS_3                    0xFFF7FFFF
+#define   S_000070_MC_IND_AIC_RBS(x)                   (((x) & 0x1) << 20)
+#define   G_000070_MC_IND_AIC_RBS(x)                   (((x) >> 20) & 0x1)
+#define   C_000070_MC_IND_AIC_RBS                      0xFFEFFFFF
+#define   S_000070_MC_IND_CITF_ARB0(x)                 (((x) & 0x1) << 21)
+#define   G_000070_MC_IND_CITF_ARB0(x)                 (((x) >> 21) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB0                    0xFFDFFFFF
+#define   S_000070_MC_IND_CITF_ARB1(x)                 (((x) & 0x1) << 22)
+#define   G_000070_MC_IND_CITF_ARB1(x)                 (((x) >> 22) & 0x1)
+#define   C_000070_MC_IND_CITF_ARB1                    0xFFBFFFFF
+#define   S_000070_MC_IND_WR_EN(x)                     (((x) & 0x1) << 23)
+#define   G_000070_MC_IND_WR_EN(x)                     (((x) >> 23) & 0x1)
+#define   C_000070_MC_IND_WR_EN                        0xFF7FFFFF
+#define   S_000070_MC_IND_RD_INV(x)                    (((x) & 0x1) << 24)
+#define   G_000070_MC_IND_RD_INV(x)                    (((x) >> 24) & 0x1)
+#define   C_000070_MC_IND_RD_INV                       0xFEFFFFFF
+#define R_000074_MC_IND_DATA                         0x000074
+#define   S_000074_MC_IND_DATA(x)                      (((x) & 0xFFFFFFFF) << 0)
+#define   G_000074_MC_IND_DATA(x)                      (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000074_MC_IND_DATA                         0x00000000
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT           0x0060A4
+#define   S_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0060A4_D1CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0060A4_D1CRTC_FRAME_COUNT                  0xFF000000
+#define R_006534_D1MODE_VBLANK_STATUS                0x006534
+#define   S_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006534_D1MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006534_D1MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006534_D1MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006534_D1MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006534_D1MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006534_D1MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006534_D1MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006534_D1MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006534_D1MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006534_D1MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_006540_DxMODE_INT_MASK                     0x006540
+#define   S_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 0)
+#define   G_006540_D1MODE_VBLANK_INT_MASK(x)           (((x) >> 0) & 0x1)
+#define   C_006540_D1MODE_VBLANK_INT_MASK              0xFFFFFFFE
+#define   S_006540_D1MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 4)
+#define   G_006540_D1MODE_VLINE_INT_MASK(x)            (((x) >> 4) & 0x1)
+#define   C_006540_D1MODE_VLINE_INT_MASK               0xFFFFFFEF
+#define   S_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) & 0x1) << 8)
+#define   G_006540_D2MODE_VBLANK_INT_MASK(x)           (((x) >> 8) & 0x1)
+#define   C_006540_D2MODE_VBLANK_INT_MASK              0xFFFFFEFF
+#define   S_006540_D2MODE_VLINE_INT_MASK(x)            (((x) & 0x1) << 12)
+#define   G_006540_D2MODE_VLINE_INT_MASK(x)            (((x) >> 12) & 0x1)
+#define   C_006540_D2MODE_VLINE_INT_MASK               0xFFFFEFFF
+#define   S_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 30)
+#define   G_006540_D1MODE_VBLANK_CP_SEL(x)             (((x) >> 30) & 0x1)
+#define   C_006540_D1MODE_VBLANK_CP_SEL                0xBFFFFFFF
+#define   S_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) & 0x1) << 31)
+#define   G_006540_D2MODE_VBLANK_CP_SEL(x)             (((x) >> 31) & 0x1)
+#define   C_006540_D2MODE_VBLANK_CP_SEL                0x7FFFFFFF
+#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT           0x0068A4
+#define   S_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) & 0xFFFFFF) << 0)
+#define   G_0068A4_D2CRTC_FRAME_COUNT(x)               (((x) >> 0) & 0xFFFFFF)
+#define   C_0068A4_D2CRTC_FRAME_COUNT                  0xFF000000
+#define R_006D34_D2MODE_VBLANK_STATUS                0x006D34
+#define   S_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) & 0x1) << 0)
+#define   G_006D34_D2MODE_VBLANK_OCCURRED(x)           (((x) >> 0) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_OCCURRED              0xFFFFFFFE
+#define   S_006D34_D2MODE_VBLANK_ACK(x)                (((x) & 0x1) << 4)
+#define   G_006D34_D2MODE_VBLANK_ACK(x)                (((x) >> 4) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_ACK                   0xFFFFFFEF
+#define   S_006D34_D2MODE_VBLANK_STAT(x)               (((x) & 0x1) << 12)
+#define   G_006D34_D2MODE_VBLANK_STAT(x)               (((x) >> 12) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_STAT                  0xFFFFEFFF
+#define   S_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) & 0x1) << 16)
+#define   G_006D34_D2MODE_VBLANK_INTERRUPT(x)          (((x) >> 16) & 0x1)
+#define   C_006D34_D2MODE_VBLANK_INTERRUPT             0xFFFEFFFF
+#define R_007EDC_DISP_INTERRUPT_STATUS               0x007EDC
+#define   S_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 4)
+#define   G_007EDC_LB_D1_VBLANK_INTERRUPT(x)           (((x) >> 4) & 0x1)
+#define   C_007EDC_LB_D1_VBLANK_INTERRUPT              0xFFFFFFEF
+#define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
+#define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
+#define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
+#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
+#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
+#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
+#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
+#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
+#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
+#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
+#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
+#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
+#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
+#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
+#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
+#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
+#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
+#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
+#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
+#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
+#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
+#define R_007404_HDMI0_STATUS                          0x007404
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) & 0x1) << 28)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG(x)            (((x) >> 28) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG               0xEFFFFFFF
+#define   S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) & 0x1) << 29)
+#define   G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x)        (((x) >> 29) & 0x1)
+#define   C_007404_HDMI0_AZ_FORMAT_WTRIG_INT           0xDFFFFFFF
+#define R_007408_HDMI0_AUDIO_PACKET_CONTROL            0x007408
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) & 0x1) << 28)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x)       (((x) >> 28) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK          0xEFFFFFFF
+#define   S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) & 0x1) << 29)
+#define   G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x)        (((x) >> 29) & 0x1)
+#define   C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK           0xDFFFFFFF
+
+/* MC registers */
+#define R_000000_MC_STATUS                           0x000000
+#define   S_000000_MC_IDLE(x)                          (((x) & 0x1) << 0)
+#define   G_000000_MC_IDLE(x)                          (((x) >> 0) & 0x1)
+#define   C_000000_MC_IDLE                             0xFFFFFFFE
+#define R_000004_MC_FB_LOCATION                      0x000004
+#define   S_000004_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000004_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000004_MC_FB_START                         0xFFFF0000
+#define   S_000004_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000004_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000004_MC_FB_TOP                           0x0000FFFF
+#define R_000005_MC_AGP_LOCATION                     0x000005
+#define   S_000005_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000005_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000005_MC_AGP_START                        0xFFFF0000
+#define   S_000005_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000005_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000005_MC_AGP_TOP                          0x0000FFFF
+#define R_000006_AGP_BASE                            0x000006
+#define   S_000006_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000006_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000006_AGP_BASE_ADDR                       0x00000000
+#define R_000007_AGP_BASE_2                          0x000007
+#define   S_000007_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000007_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000007_AGP_BASE_ADDR_2                     0xFFFFFFF0
+#define R_000009_MC_CNTL1                            0x000009
+#define   S_000009_ENABLE_PAGE_TABLES(x)               (((x) & 0x1) << 26)
+#define   G_000009_ENABLE_PAGE_TABLES(x)               (((x) >> 26) & 0x1)
+#define   C_000009_ENABLE_PAGE_TABLES                  0xFBFFFFFF
+/* FIXME don't know the various field size need feedback from AMD */
+#define R_000100_MC_PT0_CNTL                         0x000100
+#define   S_000100_ENABLE_PT(x)                        (((x) & 0x1) << 0)
+#define   G_000100_ENABLE_PT(x)                        (((x) >> 0) & 0x1)
+#define   C_000100_ENABLE_PT                           0xFFFFFFFE
+#define   S_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_000100_EFFECTIVE_L2_CACHE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_000100_EFFECTIVE_L2_CACHE_SIZE             0xFFFC7FFF
+#define   S_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) & 0x7) << 21)
+#define   G_000100_EFFECTIVE_L2_QUEUE_SIZE(x)          (((x) >> 21) & 0x7)
+#define   C_000100_EFFECTIVE_L2_QUEUE_SIZE             0xFF1FFFFF
+#define   S_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) & 0x1) << 28)
+#define   G_000100_INVALIDATE_ALL_L1_TLBS(x)           (((x) >> 28) & 0x1)
+#define   C_000100_INVALIDATE_ALL_L1_TLBS              0xEFFFFFFF
+#define   S_000100_INVALIDATE_L2_CACHE(x)              (((x) & 0x1) << 29)
+#define   G_000100_INVALIDATE_L2_CACHE(x)              (((x) >> 29) & 0x1)
+#define   C_000100_INVALIDATE_L2_CACHE                 0xDFFFFFFF
+#define R_000102_MC_PT0_CONTEXT0_CNTL                0x000102
+#define   S_000102_ENABLE_PAGE_TABLE(x)                (((x) & 0x1) << 0)
+#define   G_000102_ENABLE_PAGE_TABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000102_ENABLE_PAGE_TABLE                   0xFFFFFFFE
+#define   S_000102_PAGE_TABLE_DEPTH(x)                 (((x) & 0x3) << 1)
+#define   G_000102_PAGE_TABLE_DEPTH(x)                 (((x) >> 1) & 0x3)
+#define   C_000102_PAGE_TABLE_DEPTH                    0xFFFFFFF9
+#define   V_000102_PAGE_TABLE_FLAT                     0
+/* R600 documentation suggest that this should be a number of pages */
+#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR     0x000112
+#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR    0x000114
+#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR   0x00011C
+#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR      0x00012C
+#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR     0x00013C
+#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR       0x00014C
+#define R_00016C_MC_PT0_CLIENT0_CNTL                 0x00016C
+#define   S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
+#define   G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
+#define   C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE    0xFFFFFFFE
+#define   S_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) & 0x1) << 1)
+#define   G_00016C_TRANSLATION_MODE_OVERRIDE(x)        (((x) >> 1) & 0x1)
+#define   C_00016C_TRANSLATION_MODE_OVERRIDE           0xFFFFFFFD
+#define   S_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) & 0x3) << 8)
+#define   G_00016C_SYSTEM_ACCESS_MODE_MASK(x)          (((x) >> 8) & 0x3)
+#define   C_00016C_SYSTEM_ACCESS_MODE_MASK             0xFFFFFCFF
+#define   V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY          0
+#define   V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP      1
+#define   V_00016C_SYSTEM_ACCESS_MODE_IN_SYS           2
+#define   V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS       3
+#define   S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) & 0x1) << 10)
+#define   G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x)  (((x) >> 10) & 0x1)
+#define   C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS     0xFFFFFBFF
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH  0
+#define   V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
+#define   S_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) & 0x7) << 11)
+#define   G_00016C_EFFECTIVE_L1_CACHE_SIZE(x)          (((x) >> 11) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_CACHE_SIZE             0xFFFFC7FF
+#define   S_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) & 0x1) << 14)
+#define   G_00016C_ENABLE_FRAGMENT_PROCESSING(x)       (((x) >> 14) & 0x1)
+#define   C_00016C_ENABLE_FRAGMENT_PROCESSING          0xFFFFBFFF
+#define   S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) & 0x7) << 15)
+#define   G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x)          (((x) >> 15) & 0x7)
+#define   C_00016C_EFFECTIVE_L1_QUEUE_SIZE             0xFFFC7FFF
+#define   S_00016C_INVALIDATE_L1_TLB(x)                (((x) & 0x1) << 20)
+#define   G_00016C_INVALIDATE_L1_TLB(x)                (((x) >> 20) & 0x1)
+#define   C_00016C_INVALIDATE_L1_TLB                   0xFFEFFFFF
+
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+
+/* PLL regs */
+#define GENERAL_PWRMGT                                 0x8
+#define   GLOBAL_PWRMGT_EN                             (1 << 0)
+#define   MOBILE_SU                                    (1 << 2)
+#define DYN_PWRMGT_SCLK_LENGTH                         0xc
+#define   NORMAL_POWER_SCLK_HILEN(x)                   ((x) << 0)
+#define   NORMAL_POWER_SCLK_LOLEN(x)                   ((x) << 4)
+#define   REDUCED_POWER_SCLK_HILEN(x)                  ((x) << 8)
+#define   REDUCED_POWER_SCLK_LOLEN(x)                  ((x) << 12)
+#define   POWER_D1_SCLK_HILEN(x)                       ((x) << 16)
+#define   POWER_D1_SCLK_LOLEN(x)                       ((x) << 20)
+#define   STATIC_SCREEN_HILEN(x)                       ((x) << 24)
+#define   STATIC_SCREEN_LOLEN(x)                       ((x) << 28)
+#define DYN_SCLK_VOL_CNTL                              0xe
+#define   IO_CG_VOLTAGE_DROP                           (1 << 0)
+#define   VOLTAGE_DROP_SYNC                            (1 << 2)
+#define   VOLTAGE_DELAY_SEL(x)                         ((x) << 3)
+#define HDP_DYN_CNTL                                   0x10
+#define   HDP_FORCEON                                  (1 << 0)
+#define MC_HOST_DYN_CNTL                               0x1e
+#define   MC_HOST_FORCEON                              (1 << 0)
+#define DYN_BACKBIAS_CNTL                              0x29
+#define   IO_CG_BACKBIAS_EN                            (1 << 0)
+
+/* mmreg */
+#define DOUT_POWER_MANAGEMENT_CNTL                     0x7ee0
+#define   PWRDN_WAIT_BUSY_OFF                          (1 << 0)
+#define   PWRDN_WAIT_PWRSEQ_OFF                        (1 << 4)
+#define   PWRDN_WAIT_PPLL_OFF                          (1 << 8)
+#define   PWRUP_WAIT_PPLL_ON                           (1 << 12)
+#define   PWRUP_WAIT_MEM_INIT_DONE                     (1 << 16)
+#define   PM_ASSERT_RESET                              (1 << 20)
+#define   PM_PWRDN_PPLL                                (1 << 24)
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs690.c b/linux-imx/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 0000000..ea28ecb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rs690d.h"
+
+int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
+		if (G_000090_MC_SYSTEM_IDLE(tmp))
+			return 0;
+		udelay(1);
+	}
+	return -1;
+}
+
+static void rs690_gpu_init(struct radeon_device *rdev)
+{
+	/* FIXME: is this correct ? */
+	r420_pipes_init(rdev);
+	if (rs690_mc_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
+void rs690_pm_info(struct radeon_device *rdev)
+{
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *info;
+	uint16_t data_offset;
+	uint8_t frev, crev;
+	fixed20_12 tmp;
+
+	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+		/* Get various system informations from bios */
+		switch (crev) {
+		case 1:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le16_to_cpu(info->info.usK8MemoryClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+			else if (rdev->clock.default_mclk) {
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			} else
+				rdev->pm.igp_system_mclk.full = dfixed_const(400);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
+			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
+			break;
+		case 2:
+			tmp.full = dfixed_const(100);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
+			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
+			else if (rdev->clock.default_mclk)
+				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
+			else
+				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
+			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
+			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+			break;
+		default:
+			/* We assume the slower possible clock ie worst case */
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+			rdev->pm.igp_system_mclk.full = dfixed_const(200);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+			break;
+		}
+	} else {
+		/* We assume the slower possible clock ie worst case */
+		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
+		rdev->pm.igp_system_mclk.full = dfixed_const(200);
+		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
+		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
+		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+	}
+	/* Compute various bandwidth */
+	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
+	tmp.full = dfixed_const(4);
+	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
+	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
+	 *              = ht_clk * ht_width / 5
+	 */
+	tmp.full = dfixed_const(5);
+	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
+						rdev->pm.igp_ht_link_width);
+	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
+	if (tmp.full < rdev->pm.max_bandwidth.full) {
+		/* HT link is a limiting factor */
+		rdev->pm.max_bandwidth.full = tmp.full;
+	}
+	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
+	 *                    = (sideport_clk * 14) / 10
+	 */
+	tmp.full = dfixed_const(14);
+	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
+	tmp.full = dfixed_const(10);
+	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
+}
+
+static void rs690_mc_init(struct radeon_device *rdev)
+{
+	u64 base;
+	uint32_t h_addr, l_addr;
+	unsigned long long k8_addr;
+
+	rs400_gart_adjust_size(rdev);
+	rdev->mc.vram_is_ddr = true;
+	rdev->mc.vram_width = 128;
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+	base = G_000100_MC_FB_START(base) << 16;
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+	/* Some boards seem to be configured for 128MB of sideport memory,
+	 * but really only have 64MB.  Just skip the sideport and use
+	 * UMA memory.
+	 */
+	if (rdev->mc.igp_sideport_enabled &&
+	    (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+		base += 128 * 1024 * 1024;
+		rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+	}
+
+	/* Use K8 direct mapping for fast fb access. */ 
+	rdev->fastfb_working = false;
+	h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
+	l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
+	k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+	if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)	
+#endif
+	{
+		/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 
+		 * memory is present.
+		 */
+		if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+			DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 
+					(unsigned long long)rdev->mc.aper_base, k8_addr);
+			rdev->mc.aper_base = (resource_size_t)k8_addr;
+			rdev->fastfb_working = true;
+		}
+	}  
+
+	rs690_pm_info(rdev);
+	radeon_vram_location(rdev, &rdev->mc, base);
+	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+	radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+			      struct drm_display_mode *mode1,
+			      struct drm_display_mode *mode2)
+{
+	u32 tmp;
+
+	/*
+	 * Line Buffer Setup
+	 * There is a single line buffer shared by both display controllers.
+	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning can either be done
+	 * manually or via one of four preset allocations specified in bits 1:0:
+	 *  0 - line buffer is divided in half and shared between crtc
+	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
+	 *  2 - D1 gets the whole buffer
+	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
+	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
+	 * allocation mode. In manual allocation mode, D1 always starts at 0,
+	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
+	 */
+	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
+	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
+	/* auto */
+	if (mode1 && mode2) {
+		if (mode1->hdisplay > mode2->hdisplay) {
+			if (mode1->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else if (mode2->hdisplay > mode1->hdisplay) {
+			if (mode2->hdisplay > 2560)
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+			else
+				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+		} else
+			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
+	} else if (mode1) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
+	} else if (mode2) {
+		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
+	}
+	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
+}
+
+struct rs690_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+				  struct radeon_crtc *crtc,
+				  struct rs690_watermark *wm)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Maximun bandwidth is the minimun bandwidth of all component */
+	rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
+	if (rdev->mc.igp_sideport_enabled) {
+		if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+			rdev->pm.sideport_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
+		read_delay_latency.full = dfixed_const(370 * 800 * 1000);
+		read_delay_latency.full = dfixed_div(read_delay_latency,
+			rdev->pm.igp_sideport_mclk);
+	} else {
+		if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+			rdev->pm.k8_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
+		if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
+			rdev->pm.ht_bandwidth.full)
+			rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
+		read_delay_latency.full = dfixed_const(5000);
+	}
+
+	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
+	a.full = dfixed_const(16);
+	rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
+	a.full = dfixed_const(1000);
+	rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(ns)
+	 */
+	a.full = dfixed_const(256 * 13);
+	chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
+	a.full = dfixed_const(10);
+	chunk_time.full = dfixed_div(chunk_time, a);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		a.full = dfixed_const(2);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(4 * 8);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = dfixed_const(10);
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+void rs690_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rs690_watermark wm0;
+	struct rs690_watermark wm1;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+	u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
+		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+		tmp &= C_000104_MC_DISP0R_INIT_LAT;
+		tmp &= C_000104_MC_DISP1R_INIT_LAT;
+		if (mode0)
+			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+		if (mode1)
+			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
+	}
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
+		WREG32(R_006C9C_DCP_CONTROL, 0);
+	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+		WREG32(R_006C9C_DCP_CONTROL, 2);
+
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+	tmp = (wm0.lb_request_fifo_depth - 1);
+	tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
+	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		else
+			b.full = wm1.num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (dfixed_trunc(priority_mark02) < 0)
+			priority_mark02.full = 0;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (dfixed_trunc(priority_mark12) < 0)
+			priority_mark12.full = 0;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (dfixed_trunc(priority_mark02) < 0)
+			priority_mark02.full = 0;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+	} else if (mode1) {
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
+		else
+			a.full = wm1.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1.sclk, a);
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			a.full = a.full + b.full;
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (dfixed_trunc(priority_mark12) < 0)
+			priority_mark12.full = 0;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+	}
+
+	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
+	r = RREG32(R_00007C_MC_DATA);
+	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
+	return r;
+}
+
+void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
+		S_000078_MC_IND_WR_EN(1));
+	WREG32(R_00007C_MC_DATA, v);
+	WREG32(R_000078_MC_INDEX, 0x7F);
+}
+
+static void rs690_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rs690_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
+			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+
+	rv515_mc_resume(rdev, &save);
+}
+
+static int rs690_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rs690_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rs690_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	r = rs400_gart_enable(rdev);
+	if (r)
+		return r;
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing audio\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rs690_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	rs400_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rs690_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	rs400_gart_disable(rdev);
+	return 0;
+}
+
+void rs690_fini(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rs400_gart_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rs690_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Disable VGA */
+	rv515_vga_render_disable(rdev);
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize memory controller */
+	rs690_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rs400_gart_init(rdev);
+	if (r)
+		return r;
+	rs600_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rs690_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		rs400_gart_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/rs690d.h b/linux-imx/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 0000000..8af3ccf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RS690D_H__
+#define __RS690D_H__
+
+/* Registers */
+#define R_00001E_K8_FB_LOCATION                      0x00001E
+#define R_00005F_MC_MISC_UMA_CNTL                    0x00005F
+#define   G_00005F_K8_ADDR_EXT(x)                      (((x) >> 0) & 0xFF)
+#define R_000078_MC_INDEX                            0x000078
+#define   S_000078_MC_IND_ADDR(x)                      (((x) & 0x1FF) << 0)
+#define   G_000078_MC_IND_ADDR(x)                      (((x) >> 0) & 0x1FF)
+#define   C_000078_MC_IND_ADDR                         0xFFFFFE00
+#define   S_000078_MC_IND_WR_EN(x)                     (((x) & 0x1) << 9)
+#define   G_000078_MC_IND_WR_EN(x)                     (((x) >> 9) & 0x1)
+#define   C_000078_MC_IND_WR_EN                        0xFFFFFDFF
+#define R_00007C_MC_DATA                             0x00007C
+#define   S_00007C_MC_DATA(x)                          (((x) & 0xFFFFFFFF) << 0)
+#define   G_00007C_MC_DATA(x)                          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_00007C_MC_DATA                             0x00000000
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006520_DC_LB_MEMORY_SPLIT                  0x006520
+#define   S_006520_DC_LB_MEMORY_SPLIT(x)               (((x) & 0x3) << 0)
+#define   G_006520_DC_LB_MEMORY_SPLIT(x)               (((x) >> 0) & 0x3)
+#define   C_006520_DC_LB_MEMORY_SPLIT                  0xFFFFFFFC
+#define   S_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) & 0x1) << 2)
+#define   G_006520_DC_LB_MEMORY_SPLIT_MODE(x)          (((x) >> 2) & 0x1)
+#define   C_006520_DC_LB_MEMORY_SPLIT_MODE             0xFFFFFFFB
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF    0
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q      1
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY          2
+#define   V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q      3
+#define   S_006520_DC_LB_DISP1_END_ADR(x)              (((x) & 0x7FF) << 4)
+#define   G_006520_DC_LB_DISP1_END_ADR(x)              (((x) >> 4) & 0x7FF)
+#define   C_006520_DC_LB_DISP1_END_ADR                 0xFFFF800F
+#define R_006548_D1MODE_PRIORITY_A_CNT               0x006548
+#define   S_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006548_D1MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006548_D1MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006548_D1MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006548_D1MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT               0x00654C
+#define   S_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_00654C_D1MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_00654C_D1MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_00654C_D1MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_00654C_D1MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006C9C_DCP_CONTROL                         0x006C9C
+#define R_006D48_D2MODE_PRIORITY_A_CNT               0x006D48
+#define   S_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D48_D2MODE_PRIORITY_MARK_A(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D48_D2MODE_PRIORITY_MARK_A              0xFFFF8000
+#define   S_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D48_D2MODE_PRIORITY_A_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_OFF               0xFFFEFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D48_D2MODE_PRIORITY_A_FORCE_MASK        0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT               0x006D4C
+#define   S_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) & 0x7FFF) << 0)
+#define   G_006D4C_D2MODE_PRIORITY_MARK_B(x)           (((x) >> 0) & 0x7FFF)
+#define   C_006D4C_D2MODE_PRIORITY_MARK_B              0xFFFF8000
+#define   S_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) & 0x1) << 16)
+#define   G_006D4C_D2MODE_PRIORITY_B_OFF(x)            (((x) >> 16) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_OFF               0xFFFEFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) & 0x1) << 20)
+#define   G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x)      (((x) >> 20) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON         0xFFEFFFFF
+#define   S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) & 0x1) << 24)
+#define   G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x)     (((x) >> 24) & 0x1)
+#define   C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK        0xFEFFFFFF
+#define R_006D58_LB_MAX_REQ_OUTSTANDING              0x006D58
+#define   S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 0)
+#define   G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x)        (((x) >> 0) & 0xF)
+#define   C_006D58_LB_D1_MAX_REQ_OUTSTANDING           0xFFFFFFF0
+#define   S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) & 0xF) << 16)
+#define   G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x)        (((x) >> 16) & 0xF)
+#define   C_006D58_LB_D2_MAX_REQ_OUTSTANDING           0xFFF0FFFF
+
+
+#define R_000090_MC_SYSTEM_STATUS                    0x000090
+#define   S_000090_MC_SYSTEM_IDLE(x)                   (((x) & 0x1) << 0)
+#define   G_000090_MC_SYSTEM_IDLE(x)                   (((x) >> 0) & 0x1)
+#define   C_000090_MC_SYSTEM_IDLE                      0xFFFFFFFE
+#define   S_000090_MC_SEQUENCER_IDLE(x)                (((x) & 0x1) << 1)
+#define   G_000090_MC_SEQUENCER_IDLE(x)                (((x) >> 1) & 0x1)
+#define   C_000090_MC_SEQUENCER_IDLE                   0xFFFFFFFD
+#define   S_000090_MC_ARBITER_IDLE(x)                  (((x) & 0x1) << 2)
+#define   G_000090_MC_ARBITER_IDLE(x)                  (((x) >> 2) & 0x1)
+#define   C_000090_MC_ARBITER_IDLE                     0xFFFFFFFB
+#define   S_000090_MC_SELECT_PM(x)                     (((x) & 0x1) << 3)
+#define   G_000090_MC_SELECT_PM(x)                     (((x) >> 3) & 0x1)
+#define   C_000090_MC_SELECT_PM                        0xFFFFFFF7
+#define   S_000090_RESERVED4(x)                        (((x) & 0xF) << 4)
+#define   G_000090_RESERVED4(x)                        (((x) >> 4) & 0xF)
+#define   C_000090_RESERVED4                           0xFFFFFF0F
+#define   S_000090_RESERVED8(x)                        (((x) & 0xF) << 8)
+#define   G_000090_RESERVED8(x)                        (((x) >> 8) & 0xF)
+#define   C_000090_RESERVED8                           0xFFFFF0FF
+#define   S_000090_RESERVED12(x)                       (((x) & 0xF) << 12)
+#define   G_000090_RESERVED12(x)                       (((x) >> 12) & 0xF)
+#define   C_000090_RESERVED12                          0xFFFF0FFF
+#define   S_000090_MCA_INIT_EXECUTED(x)                (((x) & 0x1) << 16)
+#define   G_000090_MCA_INIT_EXECUTED(x)                (((x) >> 16) & 0x1)
+#define   C_000090_MCA_INIT_EXECUTED                   0xFFFEFFFF
+#define   S_000090_MCA_IDLE(x)                         (((x) & 0x1) << 17)
+#define   G_000090_MCA_IDLE(x)                         (((x) >> 17) & 0x1)
+#define   C_000090_MCA_IDLE                            0xFFFDFFFF
+#define   S_000090_MCA_SEQ_IDLE(x)                     (((x) & 0x1) << 18)
+#define   G_000090_MCA_SEQ_IDLE(x)                     (((x) >> 18) & 0x1)
+#define   C_000090_MCA_SEQ_IDLE                        0xFFFBFFFF
+#define   S_000090_MCA_ARB_IDLE(x)                     (((x) & 0x1) << 19)
+#define   G_000090_MCA_ARB_IDLE(x)                     (((x) >> 19) & 0x1)
+#define   C_000090_MCA_ARB_IDLE                        0xFFF7FFFF
+#define   S_000090_RESERVED20(x)                       (((x) & 0xFFF) << 20)
+#define   G_000090_RESERVED20(x)                       (((x) >> 20) & 0xFFF)
+#define   C_000090_RESERVED20                          0x000FFFFF
+#define R_000100_MCCFG_FB_LOCATION                   0x000100
+#define   S_000100_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000100_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000100_MC_FB_START                         0xFFFF0000
+#define   S_000100_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000100_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000100_MC_FB_TOP                           0x0000FFFF
+#define R_000104_MC_INIT_MISC_LAT_TIMER              0x000104
+#define   S_000104_MC_CPR_INIT_LAT(x)                  (((x) & 0xF) << 0)
+#define   G_000104_MC_CPR_INIT_LAT(x)                  (((x) >> 0) & 0xF)
+#define   C_000104_MC_CPR_INIT_LAT                     0xFFFFFFF0
+#define   S_000104_MC_VF_INIT_LAT(x)                   (((x) & 0xF) << 4)
+#define   G_000104_MC_VF_INIT_LAT(x)                   (((x) >> 4) & 0xF)
+#define   C_000104_MC_VF_INIT_LAT                      0xFFFFFF0F
+#define   S_000104_MC_DISP0R_INIT_LAT(x)               (((x) & 0xF) << 8)
+#define   G_000104_MC_DISP0R_INIT_LAT(x)               (((x) >> 8) & 0xF)
+#define   C_000104_MC_DISP0R_INIT_LAT                  0xFFFFF0FF
+#define   S_000104_MC_DISP1R_INIT_LAT(x)               (((x) & 0xF) << 12)
+#define   G_000104_MC_DISP1R_INIT_LAT(x)               (((x) >> 12) & 0xF)
+#define   C_000104_MC_DISP1R_INIT_LAT                  0xFFFF0FFF
+#define   S_000104_MC_FIXED_INIT_LAT(x)                (((x) & 0xF) << 16)
+#define   G_000104_MC_FIXED_INIT_LAT(x)                (((x) >> 16) & 0xF)
+#define   C_000104_MC_FIXED_INIT_LAT                   0xFFF0FFFF
+#define   S_000104_MC_E2R_INIT_LAT(x)                  (((x) & 0xF) << 20)
+#define   G_000104_MC_E2R_INIT_LAT(x)                  (((x) >> 20) & 0xF)
+#define   C_000104_MC_E2R_INIT_LAT                     0xFF0FFFFF
+#define   S_000104_SAME_PAGE_PRIO(x)                   (((x) & 0xF) << 24)
+#define   G_000104_SAME_PAGE_PRIO(x)                   (((x) >> 24) & 0xF)
+#define   C_000104_SAME_PAGE_PRIO                      0xF0FFFFFF
+#define   S_000104_MC_GLOBW_INIT_LAT(x)                (((x) & 0xF) << 28)
+#define   G_000104_MC_GLOBW_INIT_LAT(x)                (((x) >> 28) & 0xF)
+#define   C_000104_MC_GLOBW_INIT_LAT                   0x0FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv200d.h b/linux-imx/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 0000000..c5b3983
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV200D_H__
+#define __RV200D_H__
+
+#define R_00015C_AGP_BASE_2                          0x00015C
+#define   S_00015C_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_00015C_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_00015C_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv250d.h b/linux-imx/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 0000000..e5a70b0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV250D_H__
+#define __RV250D_H__
+
+#define R_00000D_SCLK_CNTL_M6                        0x00000D
+#define   S_00000D_SCLK_SRC_SEL(x)                     (((x) & 0x7) << 0)
+#define   G_00000D_SCLK_SRC_SEL(x)                     (((x) >> 0) & 0x7)
+#define   C_00000D_SCLK_SRC_SEL                        0xFFFFFFF8
+#define   S_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 3)
+#define   G_00000D_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 3) & 0x1)
+#define   C_00000D_CP_MAX_DYN_STOP_LAT                 0xFFFFFFF7
+#define   S_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 4)
+#define   G_00000D_HDP_MAX_DYN_STOP_LAT(x)             (((x) >> 4) & 0x1)
+#define   C_00000D_HDP_MAX_DYN_STOP_LAT                0xFFFFFFEF
+#define   S_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 5)
+#define   G_00000D_TV_MAX_DYN_STOP_LAT(x)              (((x) >> 5) & 0x1)
+#define   C_00000D_TV_MAX_DYN_STOP_LAT                 0xFFFFFFDF
+#define   S_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 6)
+#define   G_00000D_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 6) & 0x1)
+#define   C_00000D_E2_MAX_DYN_STOP_LAT                 0xFFFFFFBF
+#define   S_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 7)
+#define   G_00000D_SE_MAX_DYN_STOP_LAT(x)              (((x) >> 7) & 0x1)
+#define   C_00000D_SE_MAX_DYN_STOP_LAT                 0xFFFFFF7F
+#define   S_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 8)
+#define   G_00000D_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 8) & 0x1)
+#define   C_00000D_IDCT_MAX_DYN_STOP_LAT               0xFFFFFEFF
+#define   S_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 9)
+#define   G_00000D_VIP_MAX_DYN_STOP_LAT(x)             (((x) >> 9) & 0x1)
+#define   C_00000D_VIP_MAX_DYN_STOP_LAT                0xFFFFFDFF
+#define   S_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 10)
+#define   G_00000D_RE_MAX_DYN_STOP_LAT(x)              (((x) >> 10) & 0x1)
+#define   C_00000D_RE_MAX_DYN_STOP_LAT                 0xFFFFFBFF
+#define   S_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 11)
+#define   G_00000D_PB_MAX_DYN_STOP_LAT(x)              (((x) >> 11) & 0x1)
+#define   C_00000D_PB_MAX_DYN_STOP_LAT                 0xFFFFF7FF
+#define   S_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 12)
+#define   G_00000D_TAM_MAX_DYN_STOP_LAT(x)             (((x) >> 12) & 0x1)
+#define   C_00000D_TAM_MAX_DYN_STOP_LAT                0xFFFFEFFF
+#define   S_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) & 0x1) << 13)
+#define   G_00000D_TDM_MAX_DYN_STOP_LAT(x)             (((x) >> 13) & 0x1)
+#define   C_00000D_TDM_MAX_DYN_STOP_LAT                0xFFFFDFFF
+#define   S_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 14)
+#define   G_00000D_RB_MAX_DYN_STOP_LAT(x)              (((x) >> 14) & 0x1)
+#define   C_00000D_RB_MAX_DYN_STOP_LAT                 0xFFFFBFFF
+#define   S_00000D_FORCE_DISP2(x)                      (((x) & 0x1) << 15)
+#define   G_00000D_FORCE_DISP2(x)                      (((x) >> 15) & 0x1)
+#define   C_00000D_FORCE_DISP2                         0xFFFF7FFF
+#define   S_00000D_FORCE_CP(x)                         (((x) & 0x1) << 16)
+#define   G_00000D_FORCE_CP(x)                         (((x) >> 16) & 0x1)
+#define   C_00000D_FORCE_CP                            0xFFFEFFFF
+#define   S_00000D_FORCE_HDP(x)                        (((x) & 0x1) << 17)
+#define   G_00000D_FORCE_HDP(x)                        (((x) >> 17) & 0x1)
+#define   C_00000D_FORCE_HDP                           0xFFFDFFFF
+#define   S_00000D_FORCE_DISP1(x)                      (((x) & 0x1) << 18)
+#define   G_00000D_FORCE_DISP1(x)                      (((x) >> 18) & 0x1)
+#define   C_00000D_FORCE_DISP1                         0xFFFBFFFF
+#define   S_00000D_FORCE_TOP(x)                        (((x) & 0x1) << 19)
+#define   G_00000D_FORCE_TOP(x)                        (((x) >> 19) & 0x1)
+#define   C_00000D_FORCE_TOP                           0xFFF7FFFF
+#define   S_00000D_FORCE_E2(x)                         (((x) & 0x1) << 20)
+#define   G_00000D_FORCE_E2(x)                         (((x) >> 20) & 0x1)
+#define   C_00000D_FORCE_E2                            0xFFEFFFFF
+#define   S_00000D_FORCE_SE(x)                         (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_SE(x)                         (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_SE                            0xFFDFFFFF
+#define   S_00000D_FORCE_IDCT(x)                       (((x) & 0x1) << 22)
+#define   G_00000D_FORCE_IDCT(x)                       (((x) >> 22) & 0x1)
+#define   C_00000D_FORCE_IDCT                          0xFFBFFFFF
+#define   S_00000D_FORCE_VIP(x)                        (((x) & 0x1) << 23)
+#define   G_00000D_FORCE_VIP(x)                        (((x) >> 23) & 0x1)
+#define   C_00000D_FORCE_VIP                           0xFF7FFFFF
+#define   S_00000D_FORCE_RE(x)                         (((x) & 0x1) << 24)
+#define   G_00000D_FORCE_RE(x)                         (((x) >> 24) & 0x1)
+#define   C_00000D_FORCE_RE                            0xFEFFFFFF
+#define   S_00000D_FORCE_PB(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_PB(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_PB                            0xFDFFFFFF
+#define   S_00000D_FORCE_TAM(x)                        (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_TAM(x)                        (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_TAM                           0xFBFFFFFF
+#define   S_00000D_FORCE_TDM(x)                        (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TDM(x)                        (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TDM                           0xF7FFFFFF
+#define   S_00000D_FORCE_RB(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_RB(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_RB                            0xEFFFFFFF
+#define   S_00000D_FORCE_TV_SCLK(x)                    (((x) & 0x1) << 29)
+#define   G_00000D_FORCE_TV_SCLK(x)                    (((x) >> 29) & 0x1)
+#define   C_00000D_FORCE_TV_SCLK                       0xDFFFFFFF
+#define   S_00000D_FORCE_SUBPIC(x)                     (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SUBPIC(x)                     (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SUBPIC                        0xBFFFFFFF
+#define   S_00000D_FORCE_OV0(x)                        (((x) & 0x1) << 31)
+#define   G_00000D_FORCE_OV0(x)                        (((x) >> 31) & 0x1)
+#define   C_00000D_FORCE_OV0                           0x7FFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv350d.h b/linux-imx/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 0000000..c75c5ed
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV350D_H__
+#define __RV350D_H__
+
+/* RV350, RV380 registers */
+/* #define R_00000D_SCLK_CNTL                           0x00000D */
+#define   S_00000D_FORCE_VAP(x)                        (((x) & 0x1) << 21)
+#define   G_00000D_FORCE_VAP(x)                        (((x) >> 21) & 0x1)
+#define   C_00000D_FORCE_VAP                           0xFFDFFFFF
+#define   S_00000D_FORCE_SR(x)                         (((x) & 0x1) << 25)
+#define   G_00000D_FORCE_SR(x)                         (((x) >> 25) & 0x1)
+#define   C_00000D_FORCE_SR                            0xFDFFFFFF
+#define   S_00000D_FORCE_PX(x)                         (((x) & 0x1) << 26)
+#define   G_00000D_FORCE_PX(x)                         (((x) >> 26) & 0x1)
+#define   C_00000D_FORCE_PX                            0xFBFFFFFF
+#define   S_00000D_FORCE_TX(x)                         (((x) & 0x1) << 27)
+#define   G_00000D_FORCE_TX(x)                         (((x) >> 27) & 0x1)
+#define   C_00000D_FORCE_TX                            0xF7FFFFFF
+#define   S_00000D_FORCE_US(x)                         (((x) & 0x1) << 28)
+#define   G_00000D_FORCE_US(x)                         (((x) >> 28) & 0x1)
+#define   C_00000D_FORCE_US                            0xEFFFFFFF
+#define   S_00000D_FORCE_SU(x)                         (((x) & 0x1) << 30)
+#define   G_00000D_FORCE_SU(x)                         (((x) >> 30) & 0x1)
+#define   C_00000D_FORCE_SU                            0xBFFFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv515.c b/linux-imx/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 0000000..21c7d7b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,1257 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "rv515d.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+#include "rv515_reg_safe.h"
+
+/* This files gather functions specifics to: rv515 */
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
+static void rv515_gpu_init(struct radeon_device *rdev);
+int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+
+static const u32 crtc_offsets[2] =
+{
+	0,
+	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
+void rv515_debugfs(struct radeon_device *rdev)
+{
+	if (r100_debugfs_rbbm_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
+	}
+	if (rv515_debugfs_pipes_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+	if (rv515_debugfs_ga_info_init(rdev)) {
+		DRM_ERROR("Failed to register debugfs file for pipes !\n");
+	}
+}
+
+void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	int r;
+
+	r = radeon_ring_lock(rdev, ring, 64);
+	if (r) {
+		return;
+	}
+	radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+	radeon_ring_write(ring,
+			  ISYNC_ANY2D_IDLE3D |
+			  ISYNC_ANY3D_IDLE2D |
+			  ISYNC_WAIT_IDLEGUI |
+			  ISYNC_CPSCRATCH_IDLEGUI);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+	radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+	radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+	radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+	radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+	radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X0_SHIFT) |
+			   (6 << MS_Y0_SHIFT) |
+			   (6 << MS_X1_SHIFT) |
+			   (6 << MS_Y1_SHIFT) |
+			   (6 << MS_X2_SHIFT) |
+			   (6 << MS_Y2_SHIFT) |
+			   (6 << MSBD0_Y_SHIFT) |
+			   (6 << MSBD0_X_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+	radeon_ring_write(ring,
+			  ((6 << MS_X3_SHIFT) |
+			   (6 << MS_Y3_SHIFT) |
+			   (6 << MS_X4_SHIFT) |
+			   (6 << MS_Y4_SHIFT) |
+			   (6 << MS_X5_SHIFT) |
+			   (6 << MS_Y5_SHIFT) |
+			   (6 << MSBD1_SHIFT)));
+	radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+	radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+	radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+	radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+	radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+	radeon_ring_write(ring, PACKET0(0x20C8, 0));
+	radeon_ring_write(ring, 0);
+	radeon_ring_unlock_commit(rdev, ring);
+}
+
+int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+{
+	unsigned i;
+	uint32_t tmp;
+
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		/* read MC_STATUS */
+		tmp = RREG32_MC(MC_STATUS);
+		if (tmp & MC_STATUS_IDLE) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+	return -1;
+}
+
+void rv515_vga_render_disable(struct radeon_device *rdev)
+{
+	WREG32(R_000300_VGA_RENDER_CONTROL,
+		RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+}
+
+static void rv515_gpu_init(struct radeon_device *rdev)
+{
+	unsigned pipe_select_current, gb_pipe_select, tmp;
+
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "resetting GPU. Bad things might happen.\n");
+	}
+	rv515_vga_render_disable(rdev);
+	r420_pipes_init(rdev);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
+	pipe_select_current = (tmp >> 2) & 3;
+	tmp = (1 << pipe_select_current) |
+	      (((gb_pipe_select >> 8) & 0xF) << 4);
+	WREG32_PLL(0x000D, tmp);
+	if (r100_gui_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait GUI idle while "
+		       "resetting GPU. Bad things might happen.\n");
+	}
+	if (rv515_mc_wait_for_idle(rdev)) {
+		printk(KERN_WARNING "Failed to wait MC idle while "
+		       "programming pipes. Bad things might happen.\n");
+	}
+}
+
+static void rv515_vram_get_type(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+
+	rdev->mc.vram_width = 128;
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
+	switch (tmp) {
+	case 0:
+		rdev->mc.vram_width = 64;
+		break;
+	case 1:
+		rdev->mc.vram_width = 128;
+		break;
+	default:
+		rdev->mc.vram_width = 128;
+		break;
+	}
+}
+
+static void rv515_mc_init(struct radeon_device *rdev)
+{
+
+	rv515_vram_get_type(rdev);
+	r100_vram_init_sizes(rdev);
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	if (!(rdev->flags & RADEON_IS_AGP))
+		radeon_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+}
+
+uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+	uint32_t r;
+
+	WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
+	r = RREG32(MC_IND_DATA);
+	WREG32(MC_IND_INDEX, 0);
+	return r;
+}
+
+void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+	WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
+	WREG32(MC_IND_DATA, (v));
+	WREG32(MC_IND_INDEX, 0);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(GB_PIPE_SELECT);
+	seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
+	tmp = RREG32(SU_REG_DEST);
+	seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
+	tmp = RREG32(GB_TILE_CONFIG);
+	seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
+	tmp = RREG32(DST_PIPE_CONFIG);
+	seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
+	return 0;
+}
+
+static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	uint32_t tmp;
+
+	tmp = RREG32(0x2140);
+	seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
+	radeon_asic_reset(rdev);
+	tmp = RREG32(0x425C);
+	seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
+	return 0;
+}
+
+static struct drm_info_list rv515_pipes_info_list[] = {
+	{"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
+};
+
+static struct drm_info_list rv515_ga_info_list[] = {
+	{"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
+};
+#endif
+
+static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+	return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
+#else
+	return 0;
+#endif
+}
+
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 crtc_enabled, tmp, frame_count, blackout;
+	int i, j;
+
+	save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+
+	/* disable VGA render */
+	WREG32(R_000300_VGA_RENDER_CONTROL, 0);
+	/* blank the display controllers */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+		if (crtc_enabled) {
+			save->crtc_enabled[i] = true;
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+				radeon_wait_for_vblank(rdev, i);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+				WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			}
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+
+			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_EN;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			save->crtc_enabled[i] = false;
+			/* ***** */
+		} else {
+			save->crtc_enabled[i] = false;
+		}
+	}
+
+	radeon_mc_wait_for_idle(rdev);
+
+	if (rdev->family >= CHIP_R600) {
+		if (rdev->family >= CHIP_RV770)
+			blackout = RREG32(R700_MC_CITF_CNTL);
+		else
+			blackout = RREG32(R600_CITF_CNTL);
+		if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+			/* Block CPU access */
+			WREG32(R600_BIF_FB_EN, 0);
+			/* blackout the MC */
+			blackout |= R600_BLACKOUT_MASK;
+			if (rdev->family >= CHIP_RV770)
+				WREG32(R700_MC_CITF_CNTL, blackout);
+			else
+				WREG32(R600_CITF_CNTL, blackout);
+		}
+	}
+	/* wait for the MC to settle */
+	udelay(100);
+
+	/* lock double buffered regs */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
+				tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (!(tmp & 1)) {
+				tmp |= 1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+		}
+	}
+}
+
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+{
+	u32 tmp, frame_count;
+	int i, j;
+
+	/* update crtc base addresses */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->family >= CHIP_RV770) {
+			if (i == 0) {
+				WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			} else {
+				WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+				WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+				       upper_32_bits(rdev->mc.vram_start));
+			}
+		}
+		WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+		WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		       (u32)rdev->mc.vram_start);
+	}
+	WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+	/* unlock regs and wait for update */
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			if ((tmp & 0x3) != 0) {
+				tmp &= ~0x3;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
+				tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+				WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
+			}
+			tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			if (tmp & 1) {
+				tmp &= ~1;
+				WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+			}
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
+					break;
+				udelay(1);
+			}
+		}
+	}
+
+	if (rdev->family >= CHIP_R600) {
+		/* unblackout the MC */
+		if (rdev->family >= CHIP_RV770)
+			tmp = RREG32(R700_MC_CITF_CNTL);
+		else
+			tmp = RREG32(R600_CITF_CNTL);
+		tmp &= ~R600_BLACKOUT_MASK;
+		if (rdev->family >= CHIP_RV770)
+			WREG32(R700_MC_CITF_CNTL, tmp);
+		else
+			WREG32(R600_CITF_CNTL, tmp);
+		/* allow CPU access */
+		WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+	}
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (save->crtc_enabled[i]) {
+			tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+			WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+			/* wait for the next frame */
+			frame_count = radeon_get_vblank_counter(rdev, i);
+			for (j = 0; j < rdev->usec_timeout; j++) {
+				if (radeon_get_vblank_counter(rdev, i) != frame_count)
+					break;
+				udelay(1);
+			}
+		}
+	}
+	/* Unlock vga access */
+	WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+	mdelay(1);
+	WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void rv515_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+
+	/* Stops all mc clients */
+	rv515_mc_stop(rdev, &save);
+
+	/* Wait for mc idle */
+	if (rv515_mc_wait_for_idle(rdev))
+		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+	/* Write VRAM size in case we are limiting it */
+	WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
+	/* Program MC, should be a 32bits limited address space */
+	WREG32_MC(R_000001_MC_FB_LOCATION,
+			S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
+			S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
+	WREG32(R_000134_HDP_FB_LOCATION,
+		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32_MC(R_000002_MC_AGP_LOCATION,
+			S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
+			S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
+		WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+		WREG32_MC(R_000004_MC_AGP_BASE_2,
+			S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
+	} else {
+		WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
+		WREG32_MC(R_000003_MC_AGP_BASE, 0);
+		WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
+	}
+
+	rv515_mc_resume(rdev, &save);
+}
+
+void rv515_clock_startup(struct radeon_device *rdev)
+{
+	if (radeon_dynclks != -1 && radeon_dynclks)
+		radeon_atom_set_clock_gating(rdev, 1);
+	/* We need to force on some of the block */
+	WREG32_PLL(R_00000F_CP_DYN_CNTL,
+		RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
+	WREG32_PLL(R_000011_E2_DYN_CNTL,
+		RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
+	WREG32_PLL(R_000013_IDCT_DYN_CNTL,
+		RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
+}
+
+static int rv515_startup(struct radeon_device *rdev)
+{
+	int r;
+
+	rv515_mc_program(rdev);
+	/* Resume clock */
+	rv515_clock_startup(rdev);
+	/* Initialize GPU configuration (# pipes, ...) */
+	rv515_gpu_init(rdev);
+	/* Initialize GART (initialize after TTM so we can allocate
+	 * memory through TTM but finalize after TTM) */
+	if (rdev->flags & RADEON_IS_PCIE) {
+		r = rv370_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+	/* 1M ring buffer */
+	r = r100_cp_init(rdev, 1024 * 1024);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int rv515_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Make sur GART are not working */
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	/* Resume clock before doing reset */
+	rv515_clock_startup(rdev);
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* post */
+	atom_asic_init(rdev->mode_info.atom_context);
+	/* Resume clock after posting */
+	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+
+	rdev->accel_working = true;
+	r =  rv515_startup(rdev);
+	if (r) {
+		rdev->accel_working = false;
+	}
+	return r;
+}
+
+int rv515_suspend(struct radeon_device *rdev)
+{
+	r100_cp_disable(rdev);
+	radeon_wb_disable(rdev);
+	rs600_irq_disable(rdev);
+	if (rdev->flags & RADEON_IS_PCIE)
+		rv370_pcie_gart_disable(rdev);
+	return 0;
+}
+
+void rv515_set_safe_registers(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
+}
+
+void rv515_fini(struct radeon_device *rdev)
+{
+	r100_cp_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_gem_fini(rdev);
+	rv370_pcie_gart_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+int rv515_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Initialize scratch registers */
+	radeon_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* TODO: disable VGA need to use VGA request */
+	/* restore some register to sane defaults */
+	r100_restore_sanity(rdev);
+	/* BIOS*/
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	if (rdev->is_atom_bios) {
+		r = radeon_atombios_init(rdev);
+		if (r)
+			return r;
+	} else {
+		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
+		return -EINVAL;
+	}
+	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
+	if (radeon_asic_reset(rdev)) {
+		dev_warn(rdev->dev,
+			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
+			RREG32(R_000E40_RBBM_STATUS),
+			RREG32(R_0007C0_CP_STAT));
+	}
+	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r) {
+			radeon_agp_disable(rdev);
+		}
+	}
+	/* initialize memory controller */
+	rv515_mc_init(rdev);
+	rv515_debugfs(rdev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+	r = rv370_pcie_gart_init(rdev);
+	if (r)
+		return r;
+	rv515_set_safe_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rv515_startup(rdev);
+	if (r) {
+		/* Somethings want wront with the accel init stop accel */
+		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+		r100_cp_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv370_pcie_gart_fini(rdev);
+		radeon_agp_fini(rdev);
+		rdev->accel_working = false;
+	}
+	return 0;
+}
+
+void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc)
+{
+	int index_reg = 0x6578 + crtc->crtc_offset;
+	int data_reg = 0x657c + crtc->crtc_offset;
+
+	WREG32(0x659C + crtc->crtc_offset, 0x0);
+	WREG32(0x6594 + crtc->crtc_offset, 0x705);
+	WREG32(0x65A4 + crtc->crtc_offset, 0x10001);
+	WREG32(0x65D8 + crtc->crtc_offset, 0x0);
+	WREG32(0x65B0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65C0 + crtc->crtc_offset, 0x0);
+	WREG32(0x65D4 + crtc->crtc_offset, 0x0);
+	WREG32(index_reg, 0x0);
+	WREG32(data_reg, 0x841880A8);
+	WREG32(index_reg, 0x1);
+	WREG32(data_reg, 0x84208680);
+	WREG32(index_reg, 0x2);
+	WREG32(data_reg, 0xBFF880B0);
+	WREG32(index_reg, 0x100);
+	WREG32(data_reg, 0x83D88088);
+	WREG32(index_reg, 0x101);
+	WREG32(data_reg, 0x84608680);
+	WREG32(index_reg, 0x102);
+	WREG32(data_reg, 0xBFF080D0);
+	WREG32(index_reg, 0x200);
+	WREG32(data_reg, 0x83988068);
+	WREG32(index_reg, 0x201);
+	WREG32(data_reg, 0x84A08680);
+	WREG32(index_reg, 0x202);
+	WREG32(data_reg, 0xBFF080F8);
+	WREG32(index_reg, 0x300);
+	WREG32(data_reg, 0x83588058);
+	WREG32(index_reg, 0x301);
+	WREG32(data_reg, 0x84E08660);
+	WREG32(index_reg, 0x302);
+	WREG32(data_reg, 0xBFF88120);
+	WREG32(index_reg, 0x400);
+	WREG32(data_reg, 0x83188040);
+	WREG32(index_reg, 0x401);
+	WREG32(data_reg, 0x85008660);
+	WREG32(index_reg, 0x402);
+	WREG32(data_reg, 0xBFF88150);
+	WREG32(index_reg, 0x500);
+	WREG32(data_reg, 0x82D88030);
+	WREG32(index_reg, 0x501);
+	WREG32(data_reg, 0x85408640);
+	WREG32(index_reg, 0x502);
+	WREG32(data_reg, 0xBFF88180);
+	WREG32(index_reg, 0x600);
+	WREG32(data_reg, 0x82A08018);
+	WREG32(index_reg, 0x601);
+	WREG32(data_reg, 0x85808620);
+	WREG32(index_reg, 0x602);
+	WREG32(data_reg, 0xBFF081B8);
+	WREG32(index_reg, 0x700);
+	WREG32(data_reg, 0x82608010);
+	WREG32(index_reg, 0x701);
+	WREG32(data_reg, 0x85A08600);
+	WREG32(index_reg, 0x702);
+	WREG32(data_reg, 0x800081F0);
+	WREG32(index_reg, 0x800);
+	WREG32(data_reg, 0x8228BFF8);
+	WREG32(index_reg, 0x801);
+	WREG32(data_reg, 0x85E085E0);
+	WREG32(index_reg, 0x802);
+	WREG32(data_reg, 0xBFF88228);
+	WREG32(index_reg, 0x10000);
+	WREG32(data_reg, 0x82A8BF00);
+	WREG32(index_reg, 0x10001);
+	WREG32(data_reg, 0x82A08CC0);
+	WREG32(index_reg, 0x10002);
+	WREG32(data_reg, 0x8008BEF8);
+	WREG32(index_reg, 0x10100);
+	WREG32(data_reg, 0x81F0BF28);
+	WREG32(index_reg, 0x10101);
+	WREG32(data_reg, 0x83608CA0);
+	WREG32(index_reg, 0x10102);
+	WREG32(data_reg, 0x8018BED0);
+	WREG32(index_reg, 0x10200);
+	WREG32(data_reg, 0x8148BF38);
+	WREG32(index_reg, 0x10201);
+	WREG32(data_reg, 0x84408C80);
+	WREG32(index_reg, 0x10202);
+	WREG32(data_reg, 0x8008BEB8);
+	WREG32(index_reg, 0x10300);
+	WREG32(data_reg, 0x80B0BF78);
+	WREG32(index_reg, 0x10301);
+	WREG32(data_reg, 0x85008C20);
+	WREG32(index_reg, 0x10302);
+	WREG32(data_reg, 0x8020BEA0);
+	WREG32(index_reg, 0x10400);
+	WREG32(data_reg, 0x8028BF90);
+	WREG32(index_reg, 0x10401);
+	WREG32(data_reg, 0x85E08BC0);
+	WREG32(index_reg, 0x10402);
+	WREG32(data_reg, 0x8018BE90);
+	WREG32(index_reg, 0x10500);
+	WREG32(data_reg, 0xBFB8BFB0);
+	WREG32(index_reg, 0x10501);
+	WREG32(data_reg, 0x86C08B40);
+	WREG32(index_reg, 0x10502);
+	WREG32(data_reg, 0x8010BE90);
+	WREG32(index_reg, 0x10600);
+	WREG32(data_reg, 0xBF58BFC8);
+	WREG32(index_reg, 0x10601);
+	WREG32(data_reg, 0x87A08AA0);
+	WREG32(index_reg, 0x10602);
+	WREG32(data_reg, 0x8010BE98);
+	WREG32(index_reg, 0x10700);
+	WREG32(data_reg, 0xBF10BFF0);
+	WREG32(index_reg, 0x10701);
+	WREG32(data_reg, 0x886089E0);
+	WREG32(index_reg, 0x10702);
+	WREG32(data_reg, 0x8018BEB0);
+	WREG32(index_reg, 0x10800);
+	WREG32(data_reg, 0xBED8BFE8);
+	WREG32(index_reg, 0x10801);
+	WREG32(data_reg, 0x89408940);
+	WREG32(index_reg, 0x10802);
+	WREG32(data_reg, 0xBFE8BED8);
+	WREG32(index_reg, 0x20000);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20001);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x20002);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20003);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20100);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20101);
+	WREG32(data_reg, 0x8FE0BF70);
+	WREG32(index_reg, 0x20102);
+	WREG32(data_reg, 0xBFE880C0);
+	WREG32(index_reg, 0x20103);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20200);
+	WREG32(data_reg, 0x8018BFF8);
+	WREG32(index_reg, 0x20201);
+	WREG32(data_reg, 0x8F80BF08);
+	WREG32(index_reg, 0x20202);
+	WREG32(data_reg, 0xBFD081A0);
+	WREG32(index_reg, 0x20203);
+	WREG32(data_reg, 0xBFF88000);
+	WREG32(index_reg, 0x20300);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20301);
+	WREG32(data_reg, 0x8EE0BEC0);
+	WREG32(index_reg, 0x20302);
+	WREG32(data_reg, 0xBFB082A0);
+	WREG32(index_reg, 0x20303);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20400);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20401);
+	WREG32(data_reg, 0x8E00BEA0);
+	WREG32(index_reg, 0x20402);
+	WREG32(data_reg, 0xBF8883C0);
+	WREG32(index_reg, 0x20403);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x20500);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20501);
+	WREG32(data_reg, 0x8D00BE90);
+	WREG32(index_reg, 0x20502);
+	WREG32(data_reg, 0xBF588500);
+	WREG32(index_reg, 0x20503);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20600);
+	WREG32(data_reg, 0x80188000);
+	WREG32(index_reg, 0x20601);
+	WREG32(data_reg, 0x8BC0BE98);
+	WREG32(index_reg, 0x20602);
+	WREG32(data_reg, 0xBF308660);
+	WREG32(index_reg, 0x20603);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20700);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20701);
+	WREG32(data_reg, 0x8A80BEB0);
+	WREG32(index_reg, 0x20702);
+	WREG32(data_reg, 0xBF0087C0);
+	WREG32(index_reg, 0x20703);
+	WREG32(data_reg, 0x80008008);
+	WREG32(index_reg, 0x20800);
+	WREG32(data_reg, 0x80108000);
+	WREG32(index_reg, 0x20801);
+	WREG32(data_reg, 0x8920BED0);
+	WREG32(index_reg, 0x20802);
+	WREG32(data_reg, 0xBED08920);
+	WREG32(index_reg, 0x20803);
+	WREG32(data_reg, 0x80008010);
+	WREG32(index_reg, 0x30000);
+	WREG32(data_reg, 0x90008000);
+	WREG32(index_reg, 0x30001);
+	WREG32(data_reg, 0x80008000);
+	WREG32(index_reg, 0x30100);
+	WREG32(data_reg, 0x8FE0BF90);
+	WREG32(index_reg, 0x30101);
+	WREG32(data_reg, 0xBFF880A0);
+	WREG32(index_reg, 0x30200);
+	WREG32(data_reg, 0x8F60BF40);
+	WREG32(index_reg, 0x30201);
+	WREG32(data_reg, 0xBFE88180);
+	WREG32(index_reg, 0x30300);
+	WREG32(data_reg, 0x8EC0BF00);
+	WREG32(index_reg, 0x30301);
+	WREG32(data_reg, 0xBFC88280);
+	WREG32(index_reg, 0x30400);
+	WREG32(data_reg, 0x8DE0BEE0);
+	WREG32(index_reg, 0x30401);
+	WREG32(data_reg, 0xBFA083A0);
+	WREG32(index_reg, 0x30500);
+	WREG32(data_reg, 0x8CE0BED0);
+	WREG32(index_reg, 0x30501);
+	WREG32(data_reg, 0xBF7884E0);
+	WREG32(index_reg, 0x30600);
+	WREG32(data_reg, 0x8BA0BED8);
+	WREG32(index_reg, 0x30601);
+	WREG32(data_reg, 0xBF508640);
+	WREG32(index_reg, 0x30700);
+	WREG32(data_reg, 0x8A60BEE8);
+	WREG32(index_reg, 0x30701);
+	WREG32(data_reg, 0xBF2087A0);
+	WREG32(index_reg, 0x30800);
+	WREG32(data_reg, 0x8900BF00);
+	WREG32(index_reg, 0x30801);
+	WREG32(data_reg, 0xBF008900);
+}
+
+struct rv515_watermark {
+	u32        lb_request_fifo_depth;
+	fixed20_12 num_line_pair;
+	fixed20_12 estimated_width;
+	fixed20_12 worst_case_latency;
+	fixed20_12 consumption_rate;
+	fixed20_12 active_time;
+	fixed20_12 dbpp;
+	fixed20_12 priority_mark_max;
+	fixed20_12 priority_mark;
+	fixed20_12 sclk;
+};
+
+static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+				  struct radeon_crtc *crtc,
+				  struct rv515_watermark *wm)
+{
+	struct drm_display_mode *mode = &crtc->base.mode;
+	fixed20_12 a, b, c;
+	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
+	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
+
+	if (!crtc->base.enabled) {
+		/* FIXME: wouldn't it better to set priority mark to maximum */
+		wm->lb_request_fifo_depth = 4;
+		return;
+	}
+
+	if (crtc->vsc.full > dfixed_const(2))
+		wm->num_line_pair.full = dfixed_const(2);
+	else
+		wm->num_line_pair.full = dfixed_const(1);
+
+	b.full = dfixed_const(mode->crtc_hdisplay);
+	c.full = dfixed_const(256);
+	a.full = dfixed_div(b, c);
+	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
+	if (a.full < dfixed_const(4)) {
+		wm->lb_request_fifo_depth = 4;
+	} else {
+		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
+	}
+
+	/* Determine consumption rate
+	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
+	 *  vtaps = number of vertical taps,
+	 *  vsc = vertical scaling ratio, defined as source/destination
+	 *  hsc = horizontal scaling ration, defined as source/destination
+	 */
+	a.full = dfixed_const(mode->clock);
+	b.full = dfixed_const(1000);
+	a.full = dfixed_div(a, b);
+	pclk.full = dfixed_div(b, a);
+	if (crtc->rmx_type != RMX_OFF) {
+		b.full = dfixed_const(2);
+		if (crtc->vsc.full > b.full)
+			b.full = crtc->vsc.full;
+		b.full = dfixed_mul(b, crtc->hsc);
+		c.full = dfixed_const(2);
+		b.full = dfixed_div(b, c);
+		consumption_time.full = dfixed_div(pclk, b);
+	} else {
+		consumption_time.full = pclk.full;
+	}
+	a.full = dfixed_const(1);
+	wm->consumption_rate.full = dfixed_div(a, consumption_time);
+
+
+	/* Determine line time
+	 *  LineTime = total time for one line of displayhtotal
+	 *  LineTime = total number of horizontal pixels
+	 *  pclk = pixel clock period(ns)
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	line_time.full = dfixed_mul(a, pclk);
+
+	/* Determine active time
+	 *  ActiveTime = time of active region of display within one line,
+	 *  hactive = total number of horizontal active pixels
+	 *  htotal = total number of horizontal pixels
+	 */
+	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
+	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->active_time.full = dfixed_mul(line_time, b);
+	wm->active_time.full = dfixed_div(wm->active_time, a);
+
+	/* Determine chunk time
+	 * ChunkTime = the time it takes the DCP to send one chunk of data
+	 * to the LB which consists of pipeline delay and inter chunk gap
+	 * sclk = system clock(Mhz)
+	 */
+	a.full = dfixed_const(600 * 1000);
+	chunk_time.full = dfixed_div(a, rdev->pm.sclk);
+	read_delay_latency.full = dfixed_const(1000);
+
+	/* Determine the worst case latency
+	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
+	 * WorstCaseLatency = worst case time from urgent to when the MC starts
+	 *                    to return data
+	 * READ_DELAY_IDLE_MAX = constant of 1us
+	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
+	 *             which consists of pipeline delay and inter chunk gap
+	 */
+	if (dfixed_trunc(wm->num_line_pair) > 1) {
+		a.full = dfixed_const(3);
+		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
+		wm->worst_case_latency.full += read_delay_latency.full;
+	} else {
+		wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
+	}
+
+	/* Determine the tolerable latency
+	 * TolerableLatency = Any given request has only 1 line time
+	 *                    for the data to be returned
+	 * LBRequestFifoDepth = Number of chunk requests the LB can
+	 *                      put into the request FIFO for a display
+	 *  LineTime = total time for one line of display
+	 *  ChunkTime = the time it takes the DCP to send one chunk
+	 *              of data to the LB which consists of
+	 *  pipeline delay and inter chunk gap
+	 */
+	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
+		tolerable_latency.full = line_time.full;
+	} else {
+		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
+		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
+		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
+		tolerable_latency.full = line_time.full - tolerable_latency.full;
+	}
+	/* We assume worst case 32bits (4 bytes) */
+	wm->dbpp.full = dfixed_const(2 * 16);
+
+	/* Determine the maximum priority mark
+	 *  width = viewport width in pixels
+	 */
+	a.full = dfixed_const(16);
+	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
+	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
+
+	/* Determine estimated width */
+	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+	estimated_width.full = dfixed_div(estimated_width, consumption_time);
+	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+		wm->priority_mark.full = wm->priority_mark_max.full;
+	} else {
+		a.full = dfixed_const(16);
+		wm->priority_mark.full = dfixed_div(estimated_width, a);
+		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
+		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+	}
+}
+
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	struct rv515_watermark wm0;
+	struct rv515_watermark wm1;
+	u32 tmp;
+	u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+	u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+	fixed20_12 priority_mark02, priority_mark12, fill_rate;
+	fixed20_12 a, b;
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+	rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+	tmp = wm0.lb_request_fifo_depth;
+	tmp |= wm1.lb_request_fifo_depth << 16;
+	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
+
+	if (mode0 && mode1) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+		else
+			b.full = wm1.num_line_pair.full;
+		a.full += b.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (dfixed_trunc(priority_mark02) < 0)
+			priority_mark02.full = 0;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (dfixed_trunc(priority_mark12) < 0)
+			priority_mark12.full = 0;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2) {
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+		}
+	} else if (mode0) {
+		if (dfixed_trunc(wm0.dbpp) > 64)
+			a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
+		else
+			a.full = wm0.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm0.sclk, a);
+		if (wm0.consumption_rate.full > fill_rate.full) {
+			b.full = wm0.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm0.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			priority_mark02.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm0.worst_case_latency,
+						wm0.consumption_rate);
+			b.full = dfixed_const(16);
+			priority_mark02.full = dfixed_div(a, b);
+		}
+		if (wm0.priority_mark.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark.full;
+		if (dfixed_trunc(priority_mark02) < 0)
+			priority_mark02.full = 0;
+		if (wm0.priority_mark_max.full > priority_mark02.full)
+			priority_mark02.full = wm0.priority_mark_max.full;
+		d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+		if (rdev->disp_priority == 2)
+			d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	} else if (mode1) {
+		if (dfixed_trunc(wm1.dbpp) > 64)
+			a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
+		else
+			a.full = wm1.num_line_pair.full;
+		fill_rate.full = dfixed_div(wm1.sclk, a);
+		if (wm1.consumption_rate.full > fill_rate.full) {
+			b.full = wm1.consumption_rate.full - fill_rate.full;
+			b.full = dfixed_mul(b, wm1.active_time);
+			a.full = dfixed_const(16);
+			b.full = dfixed_div(b, a);
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			priority_mark12.full = a.full + b.full;
+		} else {
+			a.full = dfixed_mul(wm1.worst_case_latency,
+						wm1.consumption_rate);
+			b.full = dfixed_const(16 * 1000);
+			priority_mark12.full = dfixed_div(a, b);
+		}
+		if (wm1.priority_mark.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark.full;
+		if (dfixed_trunc(priority_mark12) < 0)
+			priority_mark12.full = 0;
+		if (wm1.priority_mark_max.full > priority_mark12.full)
+			priority_mark12.full = wm1.priority_mark_max.full;
+		d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+		if (rdev->disp_priority == 2)
+			d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+	}
+
+	WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+	WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+	WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+	WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+}
+
+void rv515_bandwidth_update(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+
+	radeon_update_display_priority(rdev);
+
+	if (rdev->mode_info.crtcs[0]->base.enabled)
+		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+	if (rdev->mode_info.crtcs[1]->base.enabled)
+		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+	/*
+	 * Set display0/1 priority up in the memory controller for
+	 * modes if the user specifies HIGH for displaypriority
+	 * option.
+	 */
+	if ((rdev->disp_priority == 2) &&
+	    (rdev->family == CHIP_RV515)) {
+		tmp = RREG32_MC(MC_MISC_LAT_TIMER);
+		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
+		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
+		if (mode1)
+			tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
+		if (mode0)
+			tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
+		WREG32_MC(MC_MISC_LAT_TIMER, tmp);
+	}
+	rv515_bandwidth_avivo_update(rdev);
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv515d.h b/linux-imx/drivers/gpu/drm/radeon/rv515d.h
new file mode 100644
index 0000000..6927a20
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv515d.h
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef __RV515D_H__
+#define __RV515D_H__
+
+/*
+ * RV515 registers
+ */
+#define PCIE_INDEX			0x0030
+#define PCIE_DATA			0x0034
+#define	MC_IND_INDEX			0x0070
+#define		MC_IND_WR_EN				(1 << 24)
+#define	MC_IND_DATA			0x0074
+#define	RBBM_SOFT_RESET			0x00F0
+#define	CONFIG_MEMSIZE			0x00F8
+#define HDP_FB_LOCATION			0x0134
+#define	CP_CSQ_CNTL			0x0740
+#define	CP_CSQ_MODE			0x0744
+#define	CP_CSQ_ADDR			0x07F0
+#define	CP_CSQ_DATA			0x07F4
+#define	CP_CSQ_STAT			0x07F8
+#define	CP_CSQ2_STAT			0x07FC
+#define	RBBM_STATUS			0x0E40
+#define	DST_PIPE_CONFIG			0x170C
+#define	WAIT_UNTIL			0x1720
+#define		WAIT_2D_IDLE				(1 << 14)
+#define		WAIT_3D_IDLE				(1 << 15)
+#define		WAIT_2D_IDLECLEAN			(1 << 16)
+#define		WAIT_3D_IDLECLEAN			(1 << 17)
+#define	ISYNC_CNTL			0x1724
+#define		ISYNC_ANY2D_IDLE3D			(1 << 0)
+#define		ISYNC_ANY3D_IDLE2D			(1 << 1)
+#define		ISYNC_TRIG2D_IDLE3D			(1 << 2)
+#define		ISYNC_TRIG3D_IDLE2D			(1 << 3)
+#define		ISYNC_WAIT_IDLEGUI			(1 << 4)
+#define		ISYNC_CPSCRATCH_IDLEGUI			(1 << 5)
+#define	VAP_INDEX_OFFSET		0x208C
+#define	VAP_PVS_STATE_FLUSH_REG		0x2284
+#define	GB_ENABLE			0x4008
+#define	GB_MSPOS0			0x4010
+#define		MS_X0_SHIFT				0
+#define		MS_Y0_SHIFT				4
+#define		MS_X1_SHIFT				8
+#define		MS_Y1_SHIFT				12
+#define		MS_X2_SHIFT				16
+#define		MS_Y2_SHIFT				20
+#define		MSBD0_Y_SHIFT				24
+#define		MSBD0_X_SHIFT				28
+#define	GB_MSPOS1			0x4014
+#define		MS_X3_SHIFT				0
+#define		MS_Y3_SHIFT				4
+#define		MS_X4_SHIFT				8
+#define		MS_Y4_SHIFT				12
+#define		MS_X5_SHIFT				16
+#define		MS_Y5_SHIFT				20
+#define		MSBD1_SHIFT				24
+#define GB_TILE_CONFIG			0x4018
+#define		ENABLE_TILING				(1 << 0)
+#define		PIPE_COUNT_MASK				0x0000000E
+#define		PIPE_COUNT_SHIFT			1
+#define		TILE_SIZE_8				(0 << 4)
+#define		TILE_SIZE_16				(1 << 4)
+#define		TILE_SIZE_32				(2 << 4)
+#define		SUBPIXEL_1_12				(0 << 16)
+#define		SUBPIXEL_1_16				(1 << 16)
+#define	GB_SELECT			0x401C
+#define	GB_AA_CONFIG			0x4020
+#define	GB_PIPE_SELECT			0x402C
+#define	GA_ENHANCE			0x4274
+#define		GA_DEADLOCK_CNTL			(1 << 0)
+#define		GA_FASTSYNC_CNTL			(1 << 1)
+#define	GA_POLY_MODE			0x4288
+#define		FRONT_PTYPE_POINT			(0 << 4)
+#define		FRONT_PTYPE_LINE			(1 << 4)
+#define		FRONT_PTYPE_TRIANGE			(2 << 4)
+#define		BACK_PTYPE_POINT			(0 << 7)
+#define		BACK_PTYPE_LINE				(1 << 7)
+#define		BACK_PTYPE_TRIANGE			(2 << 7)
+#define	GA_ROUND_MODE			0x428C
+#define		GEOMETRY_ROUND_TRUNC			(0 << 0)
+#define		GEOMETRY_ROUND_NEAREST			(1 << 0)
+#define		COLOR_ROUND_TRUNC			(0 << 2)
+#define		COLOR_ROUND_NEAREST			(1 << 2)
+#define	SU_REG_DEST			0x42C8
+#define	RB3D_DSTCACHE_CTLSTAT		0x4E4C
+#define		RB3D_DC_FLUSH				(2 << 0)
+#define		RB3D_DC_FREE				(2 << 2)
+#define		RB3D_DC_FINISH				(1 << 4)
+#define ZB_ZCACHE_CTLSTAT		0x4F18
+#define		ZC_FLUSH				(1 << 0)
+#define		ZC_FREE					(1 << 1)
+#define DC_LB_MEMORY_SPLIT		0x6520
+#define		DC_LB_MEMORY_SPLIT_MASK			0x00000003
+#define		DC_LB_MEMORY_SPLIT_SHIFT		0
+#define		DC_LB_MEMORY_SPLIT_D1HALF_D2HALF	0
+#define		DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q		1
+#define		DC_LB_MEMORY_SPLIT_D1_ONLY		2
+#define		DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q		3
+#define		DC_LB_MEMORY_SPLIT_SHIFT_MODE		(1 << 2)
+#define		DC_LB_DISP1_END_ADR_SHIFT		4
+#define		DC_LB_DISP1_END_ADR_MASK		0x00007FF0
+#define D1MODE_PRIORITY_A_CNT		0x6548
+#define		MODE_PRIORITY_MARK_MASK			0x00007FFF
+#define		MODE_PRIORITY_OFF			(1 << 16)
+#define		MODE_PRIORITY_ALWAYS_ON			(1 << 20)
+#define		MODE_PRIORITY_FORCE_MASK		(1 << 24)
+#define D1MODE_PRIORITY_B_CNT		0x654C
+#define LB_MAX_REQ_OUTSTANDING		0x6D58
+#define		LB_D1_MAX_REQ_OUTSTANDING_MASK		0x0000000F
+#define		LB_D1_MAX_REQ_OUTSTANDING_SHIFT		0
+#define		LB_D2_MAX_REQ_OUTSTANDING_MASK		0x000F0000
+#define		LB_D2_MAX_REQ_OUTSTANDING_SHIFT		16
+#define D2MODE_PRIORITY_A_CNT		0x6D48
+#define D2MODE_PRIORITY_B_CNT		0x6D4C
+
+/* ix[MC] registers */
+#define MC_FB_LOCATION			0x01
+#define		MC_FB_START_MASK			0x0000FFFF
+#define		MC_FB_START_SHIFT			0
+#define		MC_FB_TOP_MASK				0xFFFF0000
+#define		MC_FB_TOP_SHIFT				16
+#define MC_AGP_LOCATION			0x02
+#define		MC_AGP_START_MASK			0x0000FFFF
+#define		MC_AGP_START_SHIFT			0
+#define		MC_AGP_TOP_MASK				0xFFFF0000
+#define		MC_AGP_TOP_SHIFT			16
+#define MC_AGP_BASE			0x03
+#define MC_AGP_BASE_2			0x04
+#define	MC_CNTL				0x5
+#define		MEM_NUM_CHANNELS_MASK			0x00000003
+#define	MC_STATUS			0x08
+#define		MC_STATUS_IDLE				(1 << 4)
+#define	MC_MISC_LAT_TIMER		0x09
+#define		MC_CPR_INIT_LAT_MASK			0x0000000F
+#define		MC_VF_INIT_LAT_MASK			0x000000F0
+#define		MC_DISP0R_INIT_LAT_MASK			0x00000F00
+#define		MC_DISP0R_INIT_LAT_SHIFT		8
+#define		MC_DISP1R_INIT_LAT_MASK			0x0000F000
+#define		MC_DISP1R_INIT_LAT_SHIFT		12
+#define		MC_FIXED_INIT_LAT_MASK			0x000F0000
+#define		MC_E2R_INIT_LAT_MASK			0x00F00000
+#define		SAME_PAGE_PRIO_MASK			0x0F000000
+#define		MC_GLOBW_INIT_LAT_MASK			0xF0000000
+
+
+/*
+ * PM4 packet
+ */
+#define CP_PACKET0			0x00000000
+#define		PACKET0_BASE_INDEX_SHIFT	0
+#define		PACKET0_BASE_INDEX_MASK		(0x1ffff << 0)
+#define		PACKET0_COUNT_SHIFT		16
+#define		PACKET0_COUNT_MASK		(0x3fff << 16)
+#define CP_PACKET1			0x40000000
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+#define CP_PACKET3			0xC0000000
+#define		PACKET3_IT_OPCODE_SHIFT		8
+#define		PACKET3_IT_OPCODE_MASK		(0xff << 8)
+#define		PACKET3_COUNT_SHIFT		16
+#define		PACKET3_COUNT_MASK		(0x3fff << 16)
+/* PACKET3 op code */
+#define		PACKET3_NOP			0x10
+#define		PACKET3_3D_DRAW_VBUF		0x28
+#define		PACKET3_3D_DRAW_IMMD		0x29
+#define		PACKET3_3D_DRAW_INDX		0x2A
+#define		PACKET3_3D_LOAD_VBPNTR		0x2F
+#define		PACKET3_INDX_BUFFER		0x33
+#define		PACKET3_3D_DRAW_VBUF_2		0x34
+#define		PACKET3_3D_DRAW_IMMD_2		0x35
+#define		PACKET3_3D_DRAW_INDX_2		0x36
+#define		PACKET3_BITBLT_MULTI		0x9B
+
+#define PACKET0(reg, n)	(CP_PACKET0 |					\
+			 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) |	\
+			 REG_SET(PACKET0_COUNT, (n)))
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+#define PACKET3(op, n)	(CP_PACKET3 |					\
+			 REG_SET(PACKET3_IT_OPCODE, (op)) |		\
+			 REG_SET(PACKET3_COUNT, (n)))
+
+/* Registers */
+#define R_0000F0_RBBM_SOFT_RESET                     0x0000F0
+#define   S_0000F0_SOFT_RESET_CP(x)                    (((x) & 0x1) << 0)
+#define   G_0000F0_SOFT_RESET_CP(x)                    (((x) >> 0) & 0x1)
+#define   C_0000F0_SOFT_RESET_CP                       0xFFFFFFFE
+#define   S_0000F0_SOFT_RESET_HI(x)                    (((x) & 0x1) << 1)
+#define   G_0000F0_SOFT_RESET_HI(x)                    (((x) >> 1) & 0x1)
+#define   C_0000F0_SOFT_RESET_HI                       0xFFFFFFFD
+#define   S_0000F0_SOFT_RESET_VAP(x)                   (((x) & 0x1) << 2)
+#define   G_0000F0_SOFT_RESET_VAP(x)                   (((x) >> 2) & 0x1)
+#define   C_0000F0_SOFT_RESET_VAP                      0xFFFFFFFB
+#define   S_0000F0_SOFT_RESET_RE(x)                    (((x) & 0x1) << 3)
+#define   G_0000F0_SOFT_RESET_RE(x)                    (((x) >> 3) & 0x1)
+#define   C_0000F0_SOFT_RESET_RE                       0xFFFFFFF7
+#define   S_0000F0_SOFT_RESET_PP(x)                    (((x) & 0x1) << 4)
+#define   G_0000F0_SOFT_RESET_PP(x)                    (((x) >> 4) & 0x1)
+#define   C_0000F0_SOFT_RESET_PP                       0xFFFFFFEF
+#define   S_0000F0_SOFT_RESET_E2(x)                    (((x) & 0x1) << 5)
+#define   G_0000F0_SOFT_RESET_E2(x)                    (((x) >> 5) & 0x1)
+#define   C_0000F0_SOFT_RESET_E2                       0xFFFFFFDF
+#define   S_0000F0_SOFT_RESET_RB(x)                    (((x) & 0x1) << 6)
+#define   G_0000F0_SOFT_RESET_RB(x)                    (((x) >> 6) & 0x1)
+#define   C_0000F0_SOFT_RESET_RB                       0xFFFFFFBF
+#define   S_0000F0_SOFT_RESET_HDP(x)                   (((x) & 0x1) << 7)
+#define   G_0000F0_SOFT_RESET_HDP(x)                   (((x) >> 7) & 0x1)
+#define   C_0000F0_SOFT_RESET_HDP                      0xFFFFFF7F
+#define   S_0000F0_SOFT_RESET_MC(x)                    (((x) & 0x1) << 8)
+#define   G_0000F0_SOFT_RESET_MC(x)                    (((x) >> 8) & 0x1)
+#define   C_0000F0_SOFT_RESET_MC                       0xFFFFFEFF
+#define   S_0000F0_SOFT_RESET_AIC(x)                   (((x) & 0x1) << 9)
+#define   G_0000F0_SOFT_RESET_AIC(x)                   (((x) >> 9) & 0x1)
+#define   C_0000F0_SOFT_RESET_AIC                      0xFFFFFDFF
+#define   S_0000F0_SOFT_RESET_VIP(x)                   (((x) & 0x1) << 10)
+#define   G_0000F0_SOFT_RESET_VIP(x)                   (((x) >> 10) & 0x1)
+#define   C_0000F0_SOFT_RESET_VIP                      0xFFFFFBFF
+#define   S_0000F0_SOFT_RESET_DISP(x)                  (((x) & 0x1) << 11)
+#define   G_0000F0_SOFT_RESET_DISP(x)                  (((x) >> 11) & 0x1)
+#define   C_0000F0_SOFT_RESET_DISP                     0xFFFFF7FF
+#define   S_0000F0_SOFT_RESET_CG(x)                    (((x) & 0x1) << 12)
+#define   G_0000F0_SOFT_RESET_CG(x)                    (((x) >> 12) & 0x1)
+#define   C_0000F0_SOFT_RESET_CG                       0xFFFFEFFF
+#define   S_0000F0_SOFT_RESET_GA(x)                    (((x) & 0x1) << 13)
+#define   G_0000F0_SOFT_RESET_GA(x)                    (((x) >> 13) & 0x1)
+#define   C_0000F0_SOFT_RESET_GA                       0xFFFFDFFF
+#define   S_0000F0_SOFT_RESET_IDCT(x)                  (((x) & 0x1) << 14)
+#define   G_0000F0_SOFT_RESET_IDCT(x)                  (((x) >> 14) & 0x1)
+#define   C_0000F0_SOFT_RESET_IDCT                     0xFFFFBFFF
+#define R_0000F8_CONFIG_MEMSIZE                      0x0000F8
+#define   S_0000F8_CONFIG_MEMSIZE(x)                   (((x) & 0xFFFFFFFF) << 0)
+#define   G_0000F8_CONFIG_MEMSIZE(x)                   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_0000F8_CONFIG_MEMSIZE                      0x00000000
+#define R_000134_HDP_FB_LOCATION                     0x000134
+#define   S_000134_HDP_FB_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000134_HDP_FB_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000134_HDP_FB_START                        0xFFFF0000
+#define R_000300_VGA_RENDER_CONTROL                  0x000300
+#define   S_000300_VGA_BLINK_RATE(x)                   (((x) & 0x1F) << 0)
+#define   G_000300_VGA_BLINK_RATE(x)                   (((x) >> 0) & 0x1F)
+#define   C_000300_VGA_BLINK_RATE                      0xFFFFFFE0
+#define   S_000300_VGA_BLINK_MODE(x)                   (((x) & 0x3) << 5)
+#define   G_000300_VGA_BLINK_MODE(x)                   (((x) >> 5) & 0x3)
+#define   C_000300_VGA_BLINK_MODE                      0xFFFFFF9F
+#define   S_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) & 0x1) << 7)
+#define   G_000300_VGA_CURSOR_BLINK_INVERT(x)          (((x) >> 7) & 0x1)
+#define   C_000300_VGA_CURSOR_BLINK_INVERT             0xFFFFFF7F
+#define   S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) & 0x1) << 8)
+#define   G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x)       (((x) >> 8) & 0x1)
+#define   C_000300_VGA_EXTD_ADDR_COUNT_ENABLE          0xFFFFFEFF
+#define   S_000300_VGA_VSTATUS_CNTL(x)                 (((x) & 0x3) << 16)
+#define   G_000300_VGA_VSTATUS_CNTL(x)                 (((x) >> 16) & 0x3)
+#define   C_000300_VGA_VSTATUS_CNTL                    0xFFFCFFFF
+#define   S_000300_VGA_LOCK_8DOT(x)                    (((x) & 0x1) << 24)
+#define   G_000300_VGA_LOCK_8DOT(x)                    (((x) >> 24) & 0x1)
+#define   C_000300_VGA_LOCK_8DOT                       0xFEFFFFFF
+#define   S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
+#define   G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
+#define   C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL    0xFDFFFFFF
+#define R_000310_VGA_MEMORY_BASE_ADDRESS             0x000310
+#define   S_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) & 0xFFFFFFFF) << 0)
+#define   G_000310_VGA_MEMORY_BASE_ADDRESS(x)          (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000310_VGA_MEMORY_BASE_ADDRESS             0x00000000
+#define R_000328_VGA_HDP_CONTROL                     0x000328
+#define   S_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) & 0x1) << 0)
+#define   G_000328_VGA_MEM_PAGE_SELECT_EN(x)           (((x) >> 0) & 0x1)
+#define   C_000328_VGA_MEM_PAGE_SELECT_EN              0xFFFFFFFE
+#define   S_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) & 0x1) << 8)
+#define   G_000328_VGA_RBBM_LOCK_DISABLE(x)            (((x) >> 8) & 0x1)
+#define   C_000328_VGA_RBBM_LOCK_DISABLE               0xFFFFFEFF
+#define   S_000328_VGA_SOFT_RESET(x)                   (((x) & 0x1) << 16)
+#define   G_000328_VGA_SOFT_RESET(x)                   (((x) >> 16) & 0x1)
+#define   C_000328_VGA_SOFT_RESET                      0xFFFEFFFF
+#define   S_000328_VGA_TEST_RESET_CONTROL(x)           (((x) & 0x1) << 24)
+#define   G_000328_VGA_TEST_RESET_CONTROL(x)           (((x) >> 24) & 0x1)
+#define   C_000328_VGA_TEST_RESET_CONTROL              0xFEFFFFFF
+#define R_000330_D1VGA_CONTROL                       0x000330
+#define   S_000330_D1VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000330_D1VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000330_D1VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000330_D1VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000330_D1VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000330_D1VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000330_D1VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000330_D1VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000330_D1VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000330_D1VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000330_D1VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000330_D1VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000330_D1VGA_ROTATE                        0xFCFFFFFF
+#define R_000338_D2VGA_CONTROL                       0x000338
+#define   S_000338_D2VGA_MODE_ENABLE(x)                (((x) & 0x1) << 0)
+#define   G_000338_D2VGA_MODE_ENABLE(x)                (((x) >> 0) & 0x1)
+#define   C_000338_D2VGA_MODE_ENABLE                   0xFFFFFFFE
+#define   S_000338_D2VGA_TIMING_SELECT(x)              (((x) & 0x1) << 8)
+#define   G_000338_D2VGA_TIMING_SELECT(x)              (((x) >> 8) & 0x1)
+#define   C_000338_D2VGA_TIMING_SELECT                 0xFFFFFEFF
+#define   S_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) & 0x1) << 9)
+#define   G_000338_D2VGA_SYNC_POLARITY_SELECT(x)       (((x) >> 9) & 0x1)
+#define   C_000338_D2VGA_SYNC_POLARITY_SELECT          0xFFFFFDFF
+#define   S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) & 0x1) << 10)
+#define   G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x)     (((x) >> 10) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_TIMING_SELECT        0xFFFFFBFF
+#define   S_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) & 0x1) << 16)
+#define   G_000338_D2VGA_OVERSCAN_COLOR_EN(x)          (((x) >> 16) & 0x1)
+#define   C_000338_D2VGA_OVERSCAN_COLOR_EN             0xFFFEFFFF
+#define   S_000338_D2VGA_ROTATE(x)                     (((x) & 0x3) << 24)
+#define   G_000338_D2VGA_ROTATE(x)                     (((x) >> 24) & 0x3)
+#define   C_000338_D2VGA_ROTATE                        0xFCFFFFFF
+#define R_0007C0_CP_STAT                             0x0007C0
+#define   S_0007C0_MRU_BUSY(x)                         (((x) & 0x1) << 0)
+#define   G_0007C0_MRU_BUSY(x)                         (((x) >> 0) & 0x1)
+#define   C_0007C0_MRU_BUSY                            0xFFFFFFFE
+#define   S_0007C0_MWU_BUSY(x)                         (((x) & 0x1) << 1)
+#define   G_0007C0_MWU_BUSY(x)                         (((x) >> 1) & 0x1)
+#define   C_0007C0_MWU_BUSY                            0xFFFFFFFD
+#define   S_0007C0_RSIU_BUSY(x)                        (((x) & 0x1) << 2)
+#define   G_0007C0_RSIU_BUSY(x)                        (((x) >> 2) & 0x1)
+#define   C_0007C0_RSIU_BUSY                           0xFFFFFFFB
+#define   S_0007C0_RCIU_BUSY(x)                        (((x) & 0x1) << 3)
+#define   G_0007C0_RCIU_BUSY(x)                        (((x) >> 3) & 0x1)
+#define   C_0007C0_RCIU_BUSY                           0xFFFFFFF7
+#define   S_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) & 0x1) << 9)
+#define   G_0007C0_CSF_PRIMARY_BUSY(x)                 (((x) >> 9) & 0x1)
+#define   C_0007C0_CSF_PRIMARY_BUSY                    0xFFFFFDFF
+#define   S_0007C0_CSF_INDIRECT_BUSY(x)                (((x) & 0x1) << 10)
+#define   G_0007C0_CSF_INDIRECT_BUSY(x)                (((x) >> 10) & 0x1)
+#define   C_0007C0_CSF_INDIRECT_BUSY                   0xFFFFFBFF
+#define   S_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) & 0x1) << 11)
+#define   G_0007C0_CSQ_PRIMARY_BUSY(x)                 (((x) >> 11) & 0x1)
+#define   C_0007C0_CSQ_PRIMARY_BUSY                    0xFFFFF7FF
+#define   S_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) & 0x1) << 12)
+#define   G_0007C0_CSQ_INDIRECT_BUSY(x)                (((x) >> 12) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT_BUSY                   0xFFFFEFFF
+#define   S_0007C0_CSI_BUSY(x)                         (((x) & 0x1) << 13)
+#define   G_0007C0_CSI_BUSY(x)                         (((x) >> 13) & 0x1)
+#define   C_0007C0_CSI_BUSY                            0xFFFFDFFF
+#define   S_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) & 0x1) << 14)
+#define   G_0007C0_CSF_INDIRECT2_BUSY(x)               (((x) >> 14) & 0x1)
+#define   C_0007C0_CSF_INDIRECT2_BUSY                  0xFFFFBFFF
+#define   S_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) & 0x1) << 15)
+#define   G_0007C0_CSQ_INDIRECT2_BUSY(x)               (((x) >> 15) & 0x1)
+#define   C_0007C0_CSQ_INDIRECT2_BUSY                  0xFFFF7FFF
+#define   S_0007C0_GUIDMA_BUSY(x)                      (((x) & 0x1) << 28)
+#define   G_0007C0_GUIDMA_BUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_0007C0_GUIDMA_BUSY                         0xEFFFFFFF
+#define   S_0007C0_VIDDMA_BUSY(x)                      (((x) & 0x1) << 29)
+#define   G_0007C0_VIDDMA_BUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_0007C0_VIDDMA_BUSY                         0xDFFFFFFF
+#define   S_0007C0_CMDSTRM_BUSY(x)                     (((x) & 0x1) << 30)
+#define   G_0007C0_CMDSTRM_BUSY(x)                     (((x) >> 30) & 0x1)
+#define   C_0007C0_CMDSTRM_BUSY                        0xBFFFFFFF
+#define   S_0007C0_CP_BUSY(x)                          (((x) & 0x1) << 31)
+#define   G_0007C0_CP_BUSY(x)                          (((x) >> 31) & 0x1)
+#define   C_0007C0_CP_BUSY                             0x7FFFFFFF
+#define R_000E40_RBBM_STATUS                         0x000E40
+#define   S_000E40_CMDFIFO_AVAIL(x)                    (((x) & 0x7F) << 0)
+#define   G_000E40_CMDFIFO_AVAIL(x)                    (((x) >> 0) & 0x7F)
+#define   C_000E40_CMDFIFO_AVAIL                       0xFFFFFF80
+#define   S_000E40_HIRQ_ON_RBB(x)                      (((x) & 0x1) << 8)
+#define   G_000E40_HIRQ_ON_RBB(x)                      (((x) >> 8) & 0x1)
+#define   C_000E40_HIRQ_ON_RBB                         0xFFFFFEFF
+#define   S_000E40_CPRQ_ON_RBB(x)                      (((x) & 0x1) << 9)
+#define   G_000E40_CPRQ_ON_RBB(x)                      (((x) >> 9) & 0x1)
+#define   C_000E40_CPRQ_ON_RBB                         0xFFFFFDFF
+#define   S_000E40_CFRQ_ON_RBB(x)                      (((x) & 0x1) << 10)
+#define   G_000E40_CFRQ_ON_RBB(x)                      (((x) >> 10) & 0x1)
+#define   C_000E40_CFRQ_ON_RBB                         0xFFFFFBFF
+#define   S_000E40_HIRQ_IN_RTBUF(x)                    (((x) & 0x1) << 11)
+#define   G_000E40_HIRQ_IN_RTBUF(x)                    (((x) >> 11) & 0x1)
+#define   C_000E40_HIRQ_IN_RTBUF                       0xFFFFF7FF
+#define   S_000E40_CPRQ_IN_RTBUF(x)                    (((x) & 0x1) << 12)
+#define   G_000E40_CPRQ_IN_RTBUF(x)                    (((x) >> 12) & 0x1)
+#define   C_000E40_CPRQ_IN_RTBUF                       0xFFFFEFFF
+#define   S_000E40_CFRQ_IN_RTBUF(x)                    (((x) & 0x1) << 13)
+#define   G_000E40_CFRQ_IN_RTBUF(x)                    (((x) >> 13) & 0x1)
+#define   C_000E40_CFRQ_IN_RTBUF                       0xFFFFDFFF
+#define   S_000E40_CF_PIPE_BUSY(x)                     (((x) & 0x1) << 14)
+#define   G_000E40_CF_PIPE_BUSY(x)                     (((x) >> 14) & 0x1)
+#define   C_000E40_CF_PIPE_BUSY                        0xFFFFBFFF
+#define   S_000E40_ENG_EV_BUSY(x)                      (((x) & 0x1) << 15)
+#define   G_000E40_ENG_EV_BUSY(x)                      (((x) >> 15) & 0x1)
+#define   C_000E40_ENG_EV_BUSY                         0xFFFF7FFF
+#define   S_000E40_CP_CMDSTRM_BUSY(x)                  (((x) & 0x1) << 16)
+#define   G_000E40_CP_CMDSTRM_BUSY(x)                  (((x) >> 16) & 0x1)
+#define   C_000E40_CP_CMDSTRM_BUSY                     0xFFFEFFFF
+#define   S_000E40_E2_BUSY(x)                          (((x) & 0x1) << 17)
+#define   G_000E40_E2_BUSY(x)                          (((x) >> 17) & 0x1)
+#define   C_000E40_E2_BUSY                             0xFFFDFFFF
+#define   S_000E40_RB2D_BUSY(x)                        (((x) & 0x1) << 18)
+#define   G_000E40_RB2D_BUSY(x)                        (((x) >> 18) & 0x1)
+#define   C_000E40_RB2D_BUSY                           0xFFFBFFFF
+#define   S_000E40_RB3D_BUSY(x)                        (((x) & 0x1) << 19)
+#define   G_000E40_RB3D_BUSY(x)                        (((x) >> 19) & 0x1)
+#define   C_000E40_RB3D_BUSY                           0xFFF7FFFF
+#define   S_000E40_VAP_BUSY(x)                         (((x) & 0x1) << 20)
+#define   G_000E40_VAP_BUSY(x)                         (((x) >> 20) & 0x1)
+#define   C_000E40_VAP_BUSY                            0xFFEFFFFF
+#define   S_000E40_RE_BUSY(x)                          (((x) & 0x1) << 21)
+#define   G_000E40_RE_BUSY(x)                          (((x) >> 21) & 0x1)
+#define   C_000E40_RE_BUSY                             0xFFDFFFFF
+#define   S_000E40_TAM_BUSY(x)                         (((x) & 0x1) << 22)
+#define   G_000E40_TAM_BUSY(x)                         (((x) >> 22) & 0x1)
+#define   C_000E40_TAM_BUSY                            0xFFBFFFFF
+#define   S_000E40_TDM_BUSY(x)                         (((x) & 0x1) << 23)
+#define   G_000E40_TDM_BUSY(x)                         (((x) >> 23) & 0x1)
+#define   C_000E40_TDM_BUSY                            0xFF7FFFFF
+#define   S_000E40_PB_BUSY(x)                          (((x) & 0x1) << 24)
+#define   G_000E40_PB_BUSY(x)                          (((x) >> 24) & 0x1)
+#define   C_000E40_PB_BUSY                             0xFEFFFFFF
+#define   S_000E40_TIM_BUSY(x)                         (((x) & 0x1) << 25)
+#define   G_000E40_TIM_BUSY(x)                         (((x) >> 25) & 0x1)
+#define   C_000E40_TIM_BUSY                            0xFDFFFFFF
+#define   S_000E40_GA_BUSY(x)                          (((x) & 0x1) << 26)
+#define   G_000E40_GA_BUSY(x)                          (((x) >> 26) & 0x1)
+#define   C_000E40_GA_BUSY                             0xFBFFFFFF
+#define   S_000E40_CBA2D_BUSY(x)                       (((x) & 0x1) << 27)
+#define   G_000E40_CBA2D_BUSY(x)                       (((x) >> 27) & 0x1)
+#define   C_000E40_CBA2D_BUSY                          0xF7FFFFFF
+#define   S_000E40_RBBM_HIBUSY(x)                      (((x) & 0x1) << 28)
+#define   G_000E40_RBBM_HIBUSY(x)                      (((x) >> 28) & 0x1)
+#define   C_000E40_RBBM_HIBUSY                         0xEFFFFFFF
+#define   S_000E40_SKID_CFBUSY(x)                      (((x) & 0x1) << 29)
+#define   G_000E40_SKID_CFBUSY(x)                      (((x) >> 29) & 0x1)
+#define   C_000E40_SKID_CFBUSY                         0xDFFFFFFF
+#define   S_000E40_VAP_VF_BUSY(x)                      (((x) & 0x1) << 30)
+#define   G_000E40_VAP_VF_BUSY(x)                      (((x) >> 30) & 0x1)
+#define   C_000E40_VAP_VF_BUSY                         0xBFFFFFFF
+#define   S_000E40_GUI_ACTIVE(x)                       (((x) & 0x1) << 31)
+#define   G_000E40_GUI_ACTIVE(x)                       (((x) >> 31) & 0x1)
+#define   C_000E40_GUI_ACTIVE                          0x7FFFFFFF
+#define R_006080_D1CRTC_CONTROL                      0x006080
+#define   S_006080_D1CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006080_D1CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006080_D1CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006080_D1CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006080_D1CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006080_D1CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006080_D1CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006080_D1CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0060E8_D1CRTC_UPDATE_LOCK                  0x0060E8
+#define   S_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0060E8_D1CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0060E8_D1CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x006110
+#define   S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x006118
+#define   S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+#define R_006880_D2CRTC_CONTROL                      0x006880
+#define   S_006880_D2CRTC_MASTER_EN(x)                 (((x) & 0x1) << 0)
+#define   G_006880_D2CRTC_MASTER_EN(x)                 (((x) >> 0) & 0x1)
+#define   C_006880_D2CRTC_MASTER_EN                    0xFFFFFFFE
+#define   S_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) & 0x1) << 4)
+#define   G_006880_D2CRTC_SYNC_RESET_SEL(x)            (((x) >> 4) & 0x1)
+#define   C_006880_D2CRTC_SYNC_RESET_SEL               0xFFFFFFEF
+#define   S_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) & 0x3) << 8)
+#define   G_006880_D2CRTC_DISABLE_POINT_CNTL(x)        (((x) >> 8) & 0x3)
+#define   C_006880_D2CRTC_DISABLE_POINT_CNTL           0xFFFFFCFF
+#define   S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) & 0x1) << 16)
+#define   G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x)   (((x) >> 16) & 0x1)
+#define   C_006880_D2CRTC_CURRENT_MASTER_EN_STATE      0xFFFEFFFF
+#define   S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
+#define   G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
+#define   C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE    0xFEFFFFFF
+#define R_0068E8_D2CRTC_UPDATE_LOCK                  0x0068E8
+#define   S_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) & 0x1) << 0)
+#define   G_0068E8_D2CRTC_UPDATE_LOCK(x)               (((x) >> 0) & 0x1)
+#define   C_0068E8_D2CRTC_UPDATE_LOCK                  0xFFFFFFFE
+#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x006910
+#define   S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) & 0xFFFFFFFF) << 0)
+#define   G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x)   (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS      0x00000000
+#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x006918
+#define   S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
+#define   G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
+#define   C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS    0x00000000
+
+
+#define R_000001_MC_FB_LOCATION                      0x000001
+#define   S_000001_MC_FB_START(x)                      (((x) & 0xFFFF) << 0)
+#define   G_000001_MC_FB_START(x)                      (((x) >> 0) & 0xFFFF)
+#define   C_000001_MC_FB_START                         0xFFFF0000
+#define   S_000001_MC_FB_TOP(x)                        (((x) & 0xFFFF) << 16)
+#define   G_000001_MC_FB_TOP(x)                        (((x) >> 16) & 0xFFFF)
+#define   C_000001_MC_FB_TOP                           0x0000FFFF
+#define R_000002_MC_AGP_LOCATION                     0x000002
+#define   S_000002_MC_AGP_START(x)                     (((x) & 0xFFFF) << 0)
+#define   G_000002_MC_AGP_START(x)                     (((x) >> 0) & 0xFFFF)
+#define   C_000002_MC_AGP_START                        0xFFFF0000
+#define   S_000002_MC_AGP_TOP(x)                       (((x) & 0xFFFF) << 16)
+#define   G_000002_MC_AGP_TOP(x)                       (((x) >> 16) & 0xFFFF)
+#define   C_000002_MC_AGP_TOP                          0x0000FFFF
+#define R_000003_MC_AGP_BASE                         0x000003
+#define   S_000003_AGP_BASE_ADDR(x)                    (((x) & 0xFFFFFFFF) << 0)
+#define   G_000003_AGP_BASE_ADDR(x)                    (((x) >> 0) & 0xFFFFFFFF)
+#define   C_000003_AGP_BASE_ADDR                       0x00000000
+#define R_000004_MC_AGP_BASE_2                       0x000004
+#define   S_000004_AGP_BASE_ADDR_2(x)                  (((x) & 0xF) << 0)
+#define   G_000004_AGP_BASE_ADDR_2(x)                  (((x) >> 0) & 0xF)
+#define   C_000004_AGP_BASE_ADDR_2                     0xFFFFFFF0
+
+
+#define R_00000F_CP_DYN_CNTL                         0x00000F
+#define   S_00000F_CP_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_00000F_CP_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_00000F_CP_FORCEON                          0xFFFFFFFE
+#define   S_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_00000F_CP_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_00000F_CP_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_00000F_CP_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_00000F_CP_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_00000F_CP_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_00000F_CP_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_00000F_CP_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_00000F_CP_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_00000F_CP_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_00000F_CP_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_00000F_CP_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_00000F_CP_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_00000F_CP_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_00000F_CP_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_00000F_CP_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_00000F_CP_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_00000F_CP_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_00000F_CP_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_00000F_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_00000F_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_00000F_SPARE                               0xFF3FFFFF
+#define   S_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_00000F_CP_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_00000F_CP_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000011_E2_DYN_CNTL                         0x000011
+#define   S_000011_E2_FORCEON(x)                       (((x) & 0x1) << 0)
+#define   G_000011_E2_FORCEON(x)                       (((x) >> 0) & 0x1)
+#define   C_000011_E2_FORCEON                          0xFFFFFFFE
+#define   S_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) & 0x1) << 1)
+#define   G_000011_E2_MAX_DYN_STOP_LAT(x)              (((x) >> 1) & 0x1)
+#define   C_000011_E2_MAX_DYN_STOP_LAT                 0xFFFFFFFD
+#define   S_000011_E2_CLOCK_STATUS(x)                  (((x) & 0x1) << 2)
+#define   G_000011_E2_CLOCK_STATUS(x)                  (((x) >> 2) & 0x1)
+#define   C_000011_E2_CLOCK_STATUS                     0xFFFFFFFB
+#define   S_000011_E2_PROG_SHUTOFF(x)                  (((x) & 0x1) << 3)
+#define   G_000011_E2_PROG_SHUTOFF(x)                  (((x) >> 3) & 0x1)
+#define   C_000011_E2_PROG_SHUTOFF                     0xFFFFFFF7
+#define   S_000011_E2_PROG_DELAY_VALUE(x)              (((x) & 0xFF) << 4)
+#define   G_000011_E2_PROG_DELAY_VALUE(x)              (((x) >> 4) & 0xFF)
+#define   C_000011_E2_PROG_DELAY_VALUE                 0xFFFFF00F
+#define   S_000011_E2_LOWER_POWER_IDLE(x)              (((x) & 0xFF) << 12)
+#define   G_000011_E2_LOWER_POWER_IDLE(x)              (((x) >> 12) & 0xFF)
+#define   C_000011_E2_LOWER_POWER_IDLE                 0xFFF00FFF
+#define   S_000011_E2_LOWER_POWER_IGNORE(x)            (((x) & 0x1) << 20)
+#define   G_000011_E2_LOWER_POWER_IGNORE(x)            (((x) >> 20) & 0x1)
+#define   C_000011_E2_LOWER_POWER_IGNORE               0xFFEFFFFF
+#define   S_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) & 0x1) << 21)
+#define   G_000011_E2_NORMAL_POWER_IGNORE(x)           (((x) >> 21) & 0x1)
+#define   C_000011_E2_NORMAL_POWER_IGNORE              0xFFDFFFFF
+#define   S_000011_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000011_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000011_SPARE                               0xFF3FFFFF
+#define   S_000011_E2_NORMAL_POWER_BUSY(x)             (((x) & 0xFF) << 24)
+#define   G_000011_E2_NORMAL_POWER_BUSY(x)             (((x) >> 24) & 0xFF)
+#define   C_000011_E2_NORMAL_POWER_BUSY                0x00FFFFFF
+#define R_000013_IDCT_DYN_CNTL                       0x000013
+#define   S_000013_IDCT_FORCEON(x)                     (((x) & 0x1) << 0)
+#define   G_000013_IDCT_FORCEON(x)                     (((x) >> 0) & 0x1)
+#define   C_000013_IDCT_FORCEON                        0xFFFFFFFE
+#define   S_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) & 0x1) << 1)
+#define   G_000013_IDCT_MAX_DYN_STOP_LAT(x)            (((x) >> 1) & 0x1)
+#define   C_000013_IDCT_MAX_DYN_STOP_LAT               0xFFFFFFFD
+#define   S_000013_IDCT_CLOCK_STATUS(x)                (((x) & 0x1) << 2)
+#define   G_000013_IDCT_CLOCK_STATUS(x)                (((x) >> 2) & 0x1)
+#define   C_000013_IDCT_CLOCK_STATUS                   0xFFFFFFFB
+#define   S_000013_IDCT_PROG_SHUTOFF(x)                (((x) & 0x1) << 3)
+#define   G_000013_IDCT_PROG_SHUTOFF(x)                (((x) >> 3) & 0x1)
+#define   C_000013_IDCT_PROG_SHUTOFF                   0xFFFFFFF7
+#define   S_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) & 0xFF) << 4)
+#define   G_000013_IDCT_PROG_DELAY_VALUE(x)            (((x) >> 4) & 0xFF)
+#define   C_000013_IDCT_PROG_DELAY_VALUE               0xFFFFF00F
+#define   S_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) & 0xFF) << 12)
+#define   G_000013_IDCT_LOWER_POWER_IDLE(x)            (((x) >> 12) & 0xFF)
+#define   C_000013_IDCT_LOWER_POWER_IDLE               0xFFF00FFF
+#define   S_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) & 0x1) << 20)
+#define   G_000013_IDCT_LOWER_POWER_IGNORE(x)          (((x) >> 20) & 0x1)
+#define   C_000013_IDCT_LOWER_POWER_IGNORE             0xFFEFFFFF
+#define   S_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) & 0x1) << 21)
+#define   G_000013_IDCT_NORMAL_POWER_IGNORE(x)         (((x) >> 21) & 0x1)
+#define   C_000013_IDCT_NORMAL_POWER_IGNORE            0xFFDFFFFF
+#define   S_000013_SPARE(x)                            (((x) & 0x3) << 22)
+#define   G_000013_SPARE(x)                            (((x) >> 22) & 0x3)
+#define   C_000013_SPARE                               0xFF3FFFFF
+#define   S_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) & 0xFF) << 24)
+#define   G_000013_IDCT_NORMAL_POWER_BUSY(x)           (((x) >> 24) & 0xFF)
+#define   C_000013_IDCT_NORMAL_POWER_BUSY              0x00FFFFFF
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv770.c b/linux-imx/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 0000000..f5e92cf
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,2194 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+
+static void rv770_gpu_init(struct radeon_device *rdev);
+void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
+
+int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* RV740 uses evergreen uvd clk programming */
+	if (rdev->family == CHIP_RV740)
+		return evergreen_set_uvd_clocks(rdev, vclk, dclk);
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
+					  43663, 0x03FFFFFE, 1, 30, ~0,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	fb_div |= 1;
+	vclk_div -= 1;
+	dclk_div -= 1;
+
+	/* set UPLL_FB_DIV to 0x50000 */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
+
+	/* deassert UPLL_RESET and UPLL_SLEEP */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
+
+	/* assert BYPASS EN and FB_DIV[0] <- ??? why? */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* set the required FB_DIV, REF_DIV, Post divder values */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 UPLL_SW_HILEN(vclk_div >> 1) |
+		 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
+		 UPLL_SW_HILEN2(dclk_div >> 1) |
+		 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
+		 ~UPLL_SW_MASK);
+
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
+		 ~UPLL_FB_DIV_MASK);
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
+
+static const u32 r7xx_golden_registers[] =
+{
+	0x8d00, 0xffffffff, 0x0e0e0074,
+	0x8d04, 0xffffffff, 0x013a2b34,
+	0x9508, 0xffffffff, 0x00000002,
+	0x8b20, 0xffffffff, 0,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x28350, 0xffffffff, 0,
+	0x9058, 0xffffffff, 0x0fffc40f,
+	0x240c, 0xffffffff, 0x00000380,
+	0x733c, 0xffffffff, 0x00000002,
+	0x2650, 0x00040000, 0,
+	0x20bc, 0x00040000, 0,
+	0x7300, 0xffffffff, 0x001000f0
+};
+
+static const u32 r7xx_golden_dyn_gpr_registers[] =
+{
+	0x8db0, 0xffffffff, 0x98989898,
+	0x8db4, 0xffffffff, 0x98989898,
+	0x8db8, 0xffffffff, 0x98989898,
+	0x8dbc, 0xffffffff, 0x98989898,
+	0x8dc0, 0xffffffff, 0x98989898,
+	0x8dc4, 0xffffffff, 0x98989898,
+	0x8dc8, 0xffffffff, 0x98989898,
+	0x8dcc, 0xffffffff, 0x98989898,
+	0x88c4, 0xffffffff, 0x00000082
+};
+
+static const u32 rv770_golden_registers[] =
+{
+	0x562c, 0xffffffff, 0,
+	0x3f90, 0xffffffff, 0,
+	0x9148, 0xffffffff, 0,
+	0x3f94, 0xffffffff, 0,
+	0x914c, 0xffffffff, 0,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770ce_golden_registers[] =
+{
+	0x562c, 0xffffffff, 0,
+	0x3f90, 0xffffffff, 0x00cc0000,
+	0x9148, 0xffffffff, 0x00cc0000,
+	0x3f94, 0xffffffff, 0x00cc0000,
+	0x914c, 0xffffffff, 0x00cc0000,
+	0x9b7c, 0xffffffff, 0x00fa0000,
+	0x3f8c, 0xffffffff, 0x00fa0000,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv770_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x130300f9,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10002,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10003,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x9,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x9,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x8,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x9,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9160, 0xffffffff, 0x00040003,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00080007,
+	0x9174, 0xffffffff, 0x000a0009,
+	0x9178, 0xffffffff, 0x000c000b,
+	0x917c, 0xffffffff, 0x000e000d,
+	0x9180, 0xffffffff, 0x0010000f,
+	0x918c, 0xffffffff, 0x00120011,
+	0x9190, 0xffffffff, 0x00140013,
+	0x9194, 0xffffffff, 0x00020001,
+	0x9198, 0xffffffff, 0x00040003,
+	0x919c, 0xffffffff, 0x00060005,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000a0009,
+	0x91b0, 0xffffffff, 0x000c000b,
+	0x91b4, 0xffffffff, 0x000e000d,
+	0x91b8, 0xffffffff, 0x0010000f,
+	0x91c4, 0xffffffff, 0x00120011,
+	0x91c8, 0xffffffff, 0x00140013,
+	0x91cc, 0xffffffff, 0x00020001,
+	0x91d0, 0xffffffff, 0x00040003,
+	0x91d4, 0xffffffff, 0x00060005,
+	0x91e0, 0xffffffff, 0x00080007,
+	0x91e4, 0xffffffff, 0x000a0009,
+	0x91e8, 0xffffffff, 0x000c000b,
+	0x91ec, 0xffffffff, 0x00020001,
+	0x91f0, 0xffffffff, 0x00040003,
+	0x91f4, 0xffffffff, 0x00060005,
+	0x9200, 0xffffffff, 0x00080007,
+	0x9204, 0xffffffff, 0x000a0009,
+	0x9208, 0xffffffff, 0x000c000b,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x0010000f,
+	0x921c, 0xffffffff, 0x00120011,
+	0x9220, 0xffffffff, 0x00140013,
+	0x9224, 0xffffffff, 0x00020001,
+	0x9228, 0xffffffff, 0x00040003,
+	0x922c, 0xffffffff, 0x00060005,
+	0x9238, 0xffffffff, 0x00080007,
+	0x923c, 0xffffffff, 0x000a0009,
+	0x9240, 0xffffffff, 0x000c000b,
+	0x9244, 0xffffffff, 0x000e000d,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x9254, 0xffffffff, 0x00120011,
+	0x9258, 0xffffffff, 0x00140013,
+	0x925c, 0xffffffff, 0x00020001,
+	0x9260, 0xffffffff, 0x00040003,
+	0x9264, 0xffffffff, 0x00060005,
+	0x9270, 0xffffffff, 0x00080007,
+	0x9274, 0xffffffff, 0x000a0009,
+	0x9278, 0xffffffff, 0x000c000b,
+	0x927c, 0xffffffff, 0x000e000d,
+	0x9280, 0xffffffff, 0x0010000f,
+	0x928c, 0xffffffff, 0x00120011,
+	0x9290, 0xffffffff, 0x00140013,
+	0x9294, 0xffffffff, 0x00020001,
+	0x929c, 0xffffffff, 0x00040003,
+	0x92a0, 0xffffffff, 0x00060005,
+	0x92a4, 0xffffffff, 0x00080007
+};
+
+static const u32 rv710_golden_registers[] =
+{
+	0x3f90, 0x00ff0000, 0x00fc0000,
+	0x9148, 0x00ff0000, 0x00fc0000,
+	0x3f94, 0x00ff0000, 0x00fc0000,
+	0x914c, 0x00ff0000, 0x00fc0000,
+	0xb4c, 0x00000020, 0x00000020,
+	0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv710_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x13030040,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9174, 0xffffffff, 0x00000003,
+	0x9178, 0xffffffff, 0x00050001,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00000004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050001,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00000004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000001,
+	0x9294, 0xffffffff, 0x00000001,
+	0x929c, 0xffffffff, 0x00000002,
+	0x92a0, 0xffffffff, 0x00040003,
+	0x9150, 0xffffffff, 0x4d940000
+};
+
+static const u32 rv730_golden_registers[] =
+{
+	0x3f90, 0x00ff0000, 0x00f00000,
+	0x9148, 0x00ff0000, 0x00f00000,
+	0x3f94, 0x00ff0000, 0x00f00000,
+	0x914c, 0x00ff0000, 0x00f00000,
+	0x900c, 0xffffffff, 0x003b033f,
+	0xb4c, 0x00000020, 0x00000020,
+	0xa180, 0xffffffff, 0x00003f3f
+};
+
+static const u32 rv730_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x130300f9,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x8000100,
+	0x8b28, 0xffffffff, 0x3c000100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x916c, 0xffffffff, 0x00040003,
+	0x9170, 0xffffffff, 0x00000005,
+	0x9178, 0xffffffff, 0x00050001,
+	0x917c, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00000004,
+	0x9190, 0xffffffff, 0x00070006,
+	0x9194, 0xffffffff, 0x00050001,
+	0x9198, 0xffffffff, 0x00030002,
+	0x91a8, 0xffffffff, 0x00000004,
+	0x91ac, 0xffffffff, 0x00070006,
+	0x91b0, 0xffffffff, 0x00050001,
+	0x91b4, 0xffffffff, 0x00030002,
+	0x91c4, 0xffffffff, 0x00000004,
+	0x91c8, 0xffffffff, 0x00070006,
+	0x91cc, 0xffffffff, 0x00050001,
+	0x91d0, 0xffffffff, 0x00030002,
+	0x91e0, 0xffffffff, 0x00000004,
+	0x91e4, 0xffffffff, 0x00070006,
+	0x91e8, 0xffffffff, 0x00000001,
+	0x91ec, 0xffffffff, 0x00050001,
+	0x91f0, 0xffffffff, 0x00030002,
+	0x9200, 0xffffffff, 0x00000004,
+	0x9204, 0xffffffff, 0x00070006,
+	0x9208, 0xffffffff, 0x00050001,
+	0x920c, 0xffffffff, 0x00030002,
+	0x921c, 0xffffffff, 0x00000004,
+	0x9220, 0xffffffff, 0x00070006,
+	0x9224, 0xffffffff, 0x00050001,
+	0x9228, 0xffffffff, 0x00030002,
+	0x9238, 0xffffffff, 0x00000004,
+	0x923c, 0xffffffff, 0x00070006,
+	0x9240, 0xffffffff, 0x00050001,
+	0x9244, 0xffffffff, 0x00030002,
+	0x9254, 0xffffffff, 0x00000004,
+	0x9258, 0xffffffff, 0x00070006,
+	0x9294, 0xffffffff, 0x00000001,
+	0x929c, 0xffffffff, 0x00000002,
+	0x92a0, 0xffffffff, 0x00040003,
+	0x92a4, 0xffffffff, 0x00000005
+};
+
+static const u32 rv740_golden_registers[] =
+{
+	0x88c4, 0xffffffff, 0x00000082,
+	0x28a50, 0xfffffffc, 0x00000004,
+	0x2650, 0x00040000, 0,
+	0x20bc, 0x00040000, 0,
+	0x733c, 0xffffffff, 0x00000002,
+	0x7300, 0xffffffff, 0x001000f0,
+	0x3f90, 0x00ff0000, 0,
+	0x9148, 0x00ff0000, 0,
+	0x3f94, 0x00ff0000, 0,
+	0x914c, 0x00ff0000, 0,
+	0x240c, 0xffffffff, 0x00000380,
+	0x8a14, 0x00000007, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ff0fff,
+	0x28a4c, 0xffffffff, 0x00004000,
+	0xa180, 0xffffffff, 0x00003f3f,
+	0x8d00, 0xffffffff, 0x0e0e003a,
+	0x8d04, 0xffffffff, 0x013a0e2a,
+	0x8c00, 0xffffffff, 0xe400000f,
+	0x8db0, 0xffffffff, 0x98989898,
+	0x8db4, 0xffffffff, 0x98989898,
+	0x8db8, 0xffffffff, 0x98989898,
+	0x8dbc, 0xffffffff, 0x98989898,
+	0x8dc0, 0xffffffff, 0x98989898,
+	0x8dc4, 0xffffffff, 0x98989898,
+	0x8dc8, 0xffffffff, 0x98989898,
+	0x8dcc, 0xffffffff, 0x98989898,
+	0x9058, 0xffffffff, 0x0fffc40f,
+	0x900c, 0xffffffff, 0x003b033f,
+	0x28350, 0xffffffff, 0,
+	0x8cf0, 0x1fffffff, 0x08e00420,
+	0x9508, 0xffffffff, 0x00000002,
+	0x88c4, 0xffffffff, 0x000000c2,
+	0x9698, 0x18000000, 0x18000000
+};
+
+static const u32 rv740_mgcg_init[] =
+{
+	0x8bcc, 0xffffffff, 0x13030100,
+	0x5448, 0xffffffff, 0x100,
+	0x55e4, 0xffffffff, 0x100,
+	0x160c, 0xffffffff, 0x100,
+	0x5644, 0xffffffff, 0x100,
+	0xc164, 0xffffffff, 0x100,
+	0x8a18, 0xffffffff, 0x100,
+	0x897c, 0xffffffff, 0x100,
+	0x8b28, 0xffffffff, 0x100,
+	0x9144, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10000,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10001,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10002,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x10003,
+	0x9a50, 0xffffffff, 0x100,
+	0x9a1c, 0xffffffff, 0x0,
+	0x9870, 0xffffffff, 0x100,
+	0x8d58, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x0,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x1,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x2,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x3,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x4,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x5,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x6,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x7,
+	0x9510, 0xffffffff, 0x100,
+	0x9500, 0xffffffff, 0x8000,
+	0x9490, 0xffffffff, 0x0,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x1,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x2,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x3,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x4,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x5,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x6,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x7,
+	0x949c, 0xffffffff, 0x100,
+	0x9490, 0xffffffff, 0x8000,
+	0x9604, 0xffffffff, 0x0,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x1,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x2,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x3,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x4,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x5,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x6,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x7,
+	0x9654, 0xffffffff, 0x100,
+	0x9604, 0xffffffff, 0x80000000,
+	0x9030, 0xffffffff, 0x100,
+	0x9034, 0xffffffff, 0x100,
+	0x9038, 0xffffffff, 0x100,
+	0x903c, 0xffffffff, 0x100,
+	0x9040, 0xffffffff, 0x100,
+	0xa200, 0xffffffff, 0x100,
+	0xa204, 0xffffffff, 0x100,
+	0xa208, 0xffffffff, 0x100,
+	0xa20c, 0xffffffff, 0x100,
+	0x971c, 0xffffffff, 0x100,
+	0x915c, 0xffffffff, 0x00020001,
+	0x9160, 0xffffffff, 0x00040003,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00080007,
+	0x9174, 0xffffffff, 0x000a0009,
+	0x9178, 0xffffffff, 0x000c000b,
+	0x917c, 0xffffffff, 0x000e000d,
+	0x9180, 0xffffffff, 0x0010000f,
+	0x918c, 0xffffffff, 0x00120011,
+	0x9190, 0xffffffff, 0x00140013,
+	0x9194, 0xffffffff, 0x00020001,
+	0x9198, 0xffffffff, 0x00040003,
+	0x919c, 0xffffffff, 0x00060005,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000a0009,
+	0x91b0, 0xffffffff, 0x000c000b,
+	0x91b4, 0xffffffff, 0x000e000d,
+	0x91b8, 0xffffffff, 0x0010000f,
+	0x91c4, 0xffffffff, 0x00120011,
+	0x91c8, 0xffffffff, 0x00140013,
+	0x91cc, 0xffffffff, 0x00020001,
+	0x91d0, 0xffffffff, 0x00040003,
+	0x91d4, 0xffffffff, 0x00060005,
+	0x91e0, 0xffffffff, 0x00080007,
+	0x91e4, 0xffffffff, 0x000a0009,
+	0x91e8, 0xffffffff, 0x000c000b,
+	0x91ec, 0xffffffff, 0x00020001,
+	0x91f0, 0xffffffff, 0x00040003,
+	0x91f4, 0xffffffff, 0x00060005,
+	0x9200, 0xffffffff, 0x00080007,
+	0x9204, 0xffffffff, 0x000a0009,
+	0x9208, 0xffffffff, 0x000c000b,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x0010000f,
+	0x921c, 0xffffffff, 0x00120011,
+	0x9220, 0xffffffff, 0x00140013,
+	0x9224, 0xffffffff, 0x00020001,
+	0x9228, 0xffffffff, 0x00040003,
+	0x922c, 0xffffffff, 0x00060005,
+	0x9238, 0xffffffff, 0x00080007,
+	0x923c, 0xffffffff, 0x000a0009,
+	0x9240, 0xffffffff, 0x000c000b,
+	0x9244, 0xffffffff, 0x000e000d,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x9254, 0xffffffff, 0x00120011,
+	0x9258, 0xffffffff, 0x00140013,
+	0x9294, 0xffffffff, 0x00020001,
+	0x929c, 0xffffffff, 0x00040003,
+	0x92a0, 0xffffffff, 0x00060005,
+	0x92a4, 0xffffffff, 0x00080007
+};
+
+static void rv770_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_RV770:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		if (rdev->pdev->device == 0x994e)
+			radeon_program_register_sequence(rdev,
+							 rv770ce_golden_registers,
+							 (const u32)ARRAY_SIZE(rv770ce_golden_registers));
+		else
+			radeon_program_register_sequence(rdev,
+							 rv770_golden_registers,
+							 (const u32)ARRAY_SIZE(rv770_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv770_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv770_mgcg_init));
+		break;
+	case CHIP_RV730:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		radeon_program_register_sequence(rdev,
+						 rv730_golden_registers,
+						 (const u32)ARRAY_SIZE(rv730_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv730_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv730_mgcg_init));
+		break;
+	case CHIP_RV710:
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 r7xx_golden_dyn_gpr_registers,
+						 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
+		radeon_program_register_sequence(rdev,
+						 rv710_golden_registers,
+						 (const u32)ARRAY_SIZE(rv710_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv710_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv710_mgcg_init));
+		break;
+	case CHIP_RV740:
+		radeon_program_register_sequence(rdev,
+						 rv740_golden_registers,
+						 (const u32)ARRAY_SIZE(rv740_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 rv740_mgcg_init,
+						 (const u32)ARRAY_SIZE(rv740_mgcg_init));
+		break;
+	default:
+		break;
+	}
+}
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+/**
+ * rv770_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (r7xx-cayman).
+ */
+u32 rv770_get_xclk(struct radeon_device *rdev)
+{
+	u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 tmp = RREG32(CG_CLKPIN_CNTL);
+
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+int rv770_uvd_resume(struct radeon_device *rdev)
+{
+	uint64_t addr;
+	uint32_t chip_id, size;
+	int r;
+
+	r = radeon_uvd_resume(rdev);
+	if (r)
+		return r;
+
+	/* programm the VCPU memory controller bits 0-27 */
+	addr = rdev->uvd.gpu_addr >> 3;
+	size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+	addr += size;
+	size = RADEON_UVD_STACK_SIZE >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+	addr += size;
+	size = RADEON_UVD_HEAP_SIZE >> 3;
+	WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+	WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+	/* bits 28-31 */
+	addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+	WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+	/* bits 32-39 */
+	addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+	WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+	/* tell firmware which hardware it is running on */
+	switch (rdev->family) {
+	default:
+		return -EINVAL;
+	case CHIP_RV710:
+		chip_id = 0x01000005;
+		break;
+	case CHIP_RV730:
+		chip_id = 0x01000006;
+		break;
+	case CHIP_RV740:
+		chip_id = 0x01000007;
+		break;
+	case CHIP_CYPRESS:
+	case CHIP_HEMLOCK:
+		chip_id = 0x01000008;
+		break;
+	case CHIP_JUNIPER:
+		chip_id = 0x01000009;
+		break;
+	case CHIP_REDWOOD:
+		chip_id = 0x0100000a;
+		break;
+	case CHIP_CEDAR:
+		chip_id = 0x0100000b;
+		break;
+	case CHIP_SUMO:
+	case CHIP_SUMO2:
+		chip_id = 0x0100000c;
+		break;
+	case CHIP_PALM:
+		chip_id = 0x0100000e;
+		break;
+	case CHIP_CAYMAN:
+		chip_id = 0x0100000f;
+		break;
+	case CHIP_BARTS:
+		chip_id = 0x01000010;
+		break;
+	case CHIP_TURKS:
+		chip_id = 0x01000011;
+		break;
+	case CHIP_CAICOS:
+		chip_id = 0x01000012;
+		break;
+	case CHIP_TAHITI:
+		chip_id = 0x01000014;
+		break;
+	case CHIP_VERDE:
+		chip_id = 0x01000015;
+		break;
+	case CHIP_PITCAIRN:
+		chip_id = 0x01000016;
+		break;
+	case CHIP_ARUBA:
+		chip_id = 0x01000017;
+		break;
+	}
+	WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+	return 0;
+}
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+	int i;
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	if (radeon_crtc->crtc_id) {
+		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	} else {
+		WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	}
+	WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+			break;
+		udelay(1);
+	}
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
+/* get temperature in millidegrees */
+int rv770_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+		ASIC_T_SHIFT;
+	int actual_temp;
+
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
+		actual_temp = 255;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
+
+	return (actual_temp * 1000) / 2;
+}
+
+void rv770_pm_misc(struct radeon_device *rdev)
+{
+	int req_ps_idx = rdev->pm.requested_power_state_index;
+	int req_cm_idx = rdev->pm.requested_clock_mode_index;
+	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
+	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
+
+	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
+		/* 0xff01 is a flag rather then an actual voltage */
+		if (voltage->voltage == 0xff01)
+			return;
+		if (voltage->voltage != rdev->pm.current_vddc) {
+			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
+			rdev->pm.current_vddc = voltage->voltage;
+			DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+		}
+	}
+}
+
+/*
+ * GART
+ */
+static int rv770_pcie_gart_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	if (rdev->family == CHIP_RV740)
+		WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	for (i = 1; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	r600_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void rv770_pcie_gart_disable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Disable all tables */
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void rv770_pcie_gart_fini(struct radeon_device *rdev)
+{
+	radeon_gart_fini(rdev);
+	rv770_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+}
+
+
+static void rv770_agp_enable(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int i;
+
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+				EFFECTIVE_L2_QUEUE_SIZE(7));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+	/* Setup TLB control */
+	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+	for (i = 0; i < 7; i++)
+		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void rv770_mc_program(struct radeon_device *rdev)
+{
+	struct rv515_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	/* r7xx hw bug.  Read from HDP_DEBUG1 rather
+	 * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+	 */
+	tmp = RREG32(HDP_DEBUG1);
+
+	rv515_mc_stop(rdev, &save);
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	/* Lockout access through VGA aperture*/
+	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	if (rdev->flags & RADEON_IS_AGP) {
+		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+			/* VRAM before AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.vram_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.gtt_end >> 12);
+		} else {
+			/* VRAM after AGP */
+			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+				rdev->mc.gtt_start >> 12);
+			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+				rdev->mc.vram_end >> 12);
+		}
+	} else {
+		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+			rdev->mc.vram_start >> 12);
+		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+			rdev->mc.vram_end >> 12);
+	}
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	if (rdev->flags & RADEON_IS_AGP) {
+		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+	} else {
+		WREG32(MC_VM_AGP_BASE, 0);
+		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	}
+	if (r600_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	rv515_mc_resume(rdev, &save);
+	/* we need to own VRAM, so turn off the VGA renderer here
+	 * to stop it overwriting our objects */
+	rv515_vga_render_disable(rdev);
+}
+
+
+/*
+ * CP.
+ */
+void r700_cp_stop(struct radeon_device *rdev)
+{
+	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+	WREG32(SCRATCH_UMSK, 0);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+}
+
+static int rv770_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	r700_cp_stop(rdev);
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+
+	/* Reset cp */
+	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+void r700_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r700_cp_stop(rdev);
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+/*
+ * Core functions
+ */
+static void rv770_gpu_init(struct radeon_device *rdev)
+{
+	int i, j, num_qd_pipes;
+	u32 ta_aux_cntl;
+	u32 sx_debug_1;
+	u32 smx_dc_ctl0;
+	u32 db_debug3;
+	u32 num_gs_verts_per_thread;
+	u32 vgt_gs_per_es;
+	u32 gs_prim_buffer_depth = 0;
+	u32 sq_ms_fifo_sizes;
+	u32 sq_config;
+	u32 sq_thread_resource_mgmt;
+	u32 hdp_host_path_cntl;
+	u32 sq_dyn_gpr_size_simd_ab_0;
+	u32 gb_tiling_config = 0;
+	u32 cc_rb_backend_disable = 0;
+	u32 cc_gc_shader_pipe_config = 0;
+	u32 mc_arb_ramcfg;
+	u32 db_debug4, tmp;
+	u32 inactive_pipes, shader_pipe_config;
+	u32 disabled_rb_mask;
+	unsigned active_number;
+
+	/* setup chip specs */
+	rdev->config.rv770.tiling_group_size = 256;
+	switch (rdev->family) {
+	case CHIP_RV770:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 8;
+		rdev->config.rv770.max_simds = 10;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xF9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV730:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 2;
+		rdev->config.rv770.max_gprs = 128;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0xf9;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	case CHIP_RV710:
+		rdev->config.rv770.max_pipes = 2;
+		rdev->config.rv770.max_tile_pipes = 2;
+		rdev->config.rv770.max_simds = 2;
+		rdev->config.rv770.max_backends = 1;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 192;
+		rdev->config.rv770.max_stack_entries = 256;
+		rdev->config.rv770.max_hw_contexts = 4;
+		rdev->config.rv770.max_gs_threads = 8 * 2;
+		rdev->config.rv770.sx_max_export_size = 128;
+		rdev->config.rv770.sx_max_export_pos_size = 16;
+		rdev->config.rv770.sx_max_export_smx_size = 112;
+		rdev->config.rv770.sq_num_cf_insts = 1;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x40;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+		break;
+	case CHIP_RV740:
+		rdev->config.rv770.max_pipes = 4;
+		rdev->config.rv770.max_tile_pipes = 4;
+		rdev->config.rv770.max_simds = 8;
+		rdev->config.rv770.max_backends = 4;
+		rdev->config.rv770.max_gprs = 256;
+		rdev->config.rv770.max_threads = 248;
+		rdev->config.rv770.max_stack_entries = 512;
+		rdev->config.rv770.max_hw_contexts = 8;
+		rdev->config.rv770.max_gs_threads = 16 * 2;
+		rdev->config.rv770.sx_max_export_size = 256;
+		rdev->config.rv770.sx_max_export_pos_size = 32;
+		rdev->config.rv770.sx_max_export_smx_size = 224;
+		rdev->config.rv770.sq_num_cf_insts = 2;
+
+		rdev->config.rv770.sx_num_of_sets = 7;
+		rdev->config.rv770.sc_prim_fifo_size = 0x100;
+		rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
+
+		if (rdev->config.rv770.sx_max_export_pos_size > 16) {
+			rdev->config.rv770.sx_max_export_pos_size -= 16;
+			rdev->config.rv770.sx_max_export_smx_size += 16;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Initialize HDP */
+	j = 0;
+	for (i = 0; i < 32; i++) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+		j += 0x18;
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	/* setup tiling, simd, pipe config */
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+	inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+	for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+		if (!(inactive_pipes & tmp)) {
+			active_number++;
+		}
+		tmp <<= 1;
+	}
+	if (active_number == 1) {
+		WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+	} else {
+		WREG32(SPI_CONFIG_CNTL, 0);
+	}
+
+	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+	tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+	if (tmp < rdev->config.rv770.max_backends) {
+		rdev->config.rv770.max_backends = tmp;
+	}
+
+	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+	tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+	if (tmp < rdev->config.rv770.max_pipes) {
+		rdev->config.rv770.max_pipes = tmp;
+	}
+	tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+	if (tmp < rdev->config.rv770.max_simds) {
+		rdev->config.rv770.max_simds = tmp;
+	}
+
+	switch (rdev->config.rv770.max_tile_pipes) {
+	case 1:
+	default:
+		gb_tiling_config = PIPE_TILING(0);
+		break;
+	case 2:
+		gb_tiling_config = PIPE_TILING(1);
+		break;
+	case 4:
+		gb_tiling_config = PIPE_TILING(2);
+		break;
+	case 8:
+		gb_tiling_config = PIPE_TILING(3);
+		break;
+	}
+	rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+
+	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+	tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+					R7XX_MAX_BACKENDS, disabled_rb_mask);
+	gb_tiling_config |= tmp << 16;
+	rdev->config.rv770.backend_map = tmp;
+
+	if (rdev->family == CHIP_RV770)
+		gb_tiling_config |= BANK_TILING(1);
+	else {
+		if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+			gb_tiling_config |= BANK_TILING(1);
+		else
+			gb_tiling_config |= BANK_TILING(0);
+	}
+	rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
+	gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+	if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
+		gb_tiling_config |= ROW_TILING(3);
+		gb_tiling_config |= SAMPLE_SPLIT(3);
+	} else {
+		gb_tiling_config |=
+			ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+		gb_tiling_config |=
+			SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
+	}
+
+	gb_tiling_config |= BANK_SWAPS(1);
+	rdev->config.rv770.tile_config = gb_tiling_config;
+
+	WREG32(GB_TILING_CONFIG, gb_tiling_config);
+	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
+	if (rdev->family == CHIP_RV730) {
+		WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
+		WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
+		WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
+	}
+
+	WREG32(CGTS_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+	WREG32(CGTS_USER_TCC_DISABLE, 0);
+
+
+	num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+	WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
+	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+
+	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
+
+	ta_aux_cntl = RREG32(TA_CNTL_AUX);
+	WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+	smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
+	smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
+	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+	if (rdev->family != CHIP_RV740)
+		WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+				       GS_FLUSH_CTL(4) |
+				       ACK_FLUSH_CTL(3) |
+				       SYNC_FLUSH_CTL));
+
+	if (rdev->family != CHIP_RV770)
+		WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
+	db_debug3 = RREG32(DB_DEBUG3);
+	db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV740:
+		db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+		break;
+	case CHIP_RV710:
+	case CHIP_RV730:
+	default:
+		db_debug3 |= DB_CLK_OFF_DELAY(2);
+		break;
+	}
+	WREG32(DB_DEBUG3, db_debug3);
+
+	if (rdev->family != CHIP_RV770) {
+		db_debug4 = RREG32(DB_DEBUG4);
+		db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
+		WREG32(DB_DEBUG4, db_debug4);
+	}
+
+	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
+					POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
+					SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
+			    DONE_FIFO_HIWATER(0xe0) |
+			    ALU_UPDATE_FIFO_HIWATER(0x8));
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV710:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+		break;
+	case CHIP_RV740:
+	default:
+		sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
+		break;
+	}
+	WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
+
+	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
+	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
+	 */
+	sq_config = RREG32(SQ_CONFIG);
+	sq_config &= ~(PS_PRIO(3) |
+		       VS_PRIO(3) |
+		       GS_PRIO(3) |
+		       ES_PRIO(3));
+	sq_config |= (DX9_CONSTS |
+		      VC_ENABLE |
+		      EXPORT_SRC_C |
+		      PS_PRIO(0) |
+		      VS_PRIO(1) |
+		      GS_PRIO(2) |
+		      ES_PRIO(3));
+	if (rdev->family == CHIP_RV710)
+		/* no vertex cache */
+		sq_config &= ~VC_ENABLE;
+
+	WREG32(SQ_CONFIG, sq_config);
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_1,  (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
+					 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
+
+	WREG32(SQ_GPR_RESOURCE_MGMT_2,  (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
+					 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
+
+	sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
+				   NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
+				   NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
+	if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
+		sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
+	else
+		sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
+	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
+						     NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
+
+	sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
+				     SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
+
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
+	WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	if (rdev->family == CHIP_RV710)
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+	else
+		WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
+						AUTO_INVLD_EN(ES_AND_GS_AUTO)));
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+	case CHIP_RV740:
+		gs_prim_buffer_depth = 384;
+		break;
+	case CHIP_RV710:
+		gs_prim_buffer_depth = 128;
+		break;
+	default:
+		break;
+	}
+
+	num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
+	vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
+	/* Max value for this is 256 */
+	if (vgt_gs_per_es > 256)
+		vgt_gs_per_es = 256;
+
+	WREG32(VGT_ES_PER_GS, 128);
+	WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
+	WREG32(VGT_GS_PER_VS, 2);
+
+	/* more default values. 2D/3D driver should adjust as needed */
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+	WREG32(VGT_STRMOUT_EN, 0);
+	WREG32(SX_MISC, 0);
+	WREG32(PA_SC_MODE_CNTL, 0);
+	WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
+	WREG32(PA_SC_AA_CONFIG, 0);
+	WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
+	WREG32(PA_SC_LINE_STIPPLE, 0);
+	WREG32(SPI_INPUT_Z, 0);
+	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
+	WREG32(CB_COLOR7_FRAG, 0);
+
+	/* clear render buffer base addresses */
+	WREG32(CB_COLOR0_BASE, 0);
+	WREG32(CB_COLOR1_BASE, 0);
+	WREG32(CB_COLOR2_BASE, 0);
+	WREG32(CB_COLOR3_BASE, 0);
+	WREG32(CB_COLOR4_BASE, 0);
+	WREG32(CB_COLOR5_BASE, 0);
+	WREG32(CB_COLOR6_BASE, 0);
+	WREG32(CB_COLOR7_BASE, 0);
+
+	WREG32(TCP_CNTL, 0);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
+
+	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+					  NUM_CLIP_SEQ(3)));
+	WREG32(VC_ENHANCE, 0);
+}
+
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = mc->mc_mask - mc->gtt_end;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end + 1;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		radeon_vram_location(rdev, &rdev->mc, 0);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
+static int rv770_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* Setup GPU memory space */
+	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	r700_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+		  uint64_t src_offset, uint64_t dst_offset,
+		  unsigned num_gpu_pages,
+		  struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_dw, cur_size_in_dw;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+	num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_dw = size_in_dw;
+		if (cur_size_in_dw > 0xFFFF)
+			cur_size_in_dw = 0xFFFF;
+		size_in_dw -= cur_size_in_dw;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+		radeon_ring_write(ring, dst_offset & 0xfffffffc);
+		radeon_ring_write(ring, src_offset & 0xfffffffc);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_dw * 4;
+		dst_offset += cur_size_in_dw * 4;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+static int rv770_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	/* enable pcie gen2 link */
+	rv770_pcie_gen2_enable(rdev);
+
+	rv770_mc_program(rdev);
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	if (rdev->flags & RADEON_IS_AGP) {
+		rv770_agp_enable(rdev);
+	} else {
+		r = rv770_pcie_gart_enable(rdev);
+		if (r)
+			return r;
+	}
+
+	rv770_gpu_init(rdev);
+	r = r600_blit_init(rdev);
+	if (r) {
+		r600_blit_fini(rdev);
+		rdev->asic->copy.copy = NULL;
+		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = rv770_uvd_resume(rdev);
+	if (!r) {
+		r = radeon_fence_driver_start_ring(rdev,
+						   R600_RING_TYPE_UVD_INDEX);
+		if (r)
+			dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+	}
+
+	if (r)
+		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR, DMA_RB_WPTR,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = rv770_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = r600_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = r600_dma_resume(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+	if (ring->ring_size) {
+		r = radeon_ring_init(rdev, ring, ring->ring_size,
+				     R600_WB_UVD_RPTR_OFFSET,
+				     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+				     0, 0xfffff, RADEON_CP_PACKET2);
+		if (!r)
+			r = r600_uvd_init(rdev);
+
+		if (r)
+			DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = r600_audio_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: audio init failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+int rv770_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	rv770_init_golden_registers(rdev);
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		DRM_ERROR("r600 startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int rv770_suspend(struct radeon_device *rdev)
+{
+	r600_audio_fini(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_suspend(rdev);
+	r700_cp_stop(rdev);
+	r600_dma_stop(rdev);
+	r600_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	rv770_pcie_gart_disable(rdev);
+
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int rv770_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	rv770_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	r600_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+	/* initialize AGP */
+	if (rdev->flags & RADEON_IS_AGP) {
+		r = radeon_agp_init(rdev);
+		if (r)
+			radeon_agp_disable(rdev);
+	}
+	r = rv770_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+
+	rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
+	r = radeon_uvd_init(rdev);
+	if (!r) {
+		rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+		r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
+			       4096);
+	}
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = rv770_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		r700_cp_fini(rdev);
+		r600_dma_fini(rdev);
+		r600_irq_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		rv770_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	return 0;
+}
+
+void rv770_fini(struct radeon_device *rdev)
+{
+	r600_blit_fini(rdev);
+	r700_cp_fini(rdev);
+	r600_dma_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	rv770_pcie_gart_fini(rdev);
+	r600_uvd_stop(rdev);
+	radeon_uvd_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_agp_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, tmp;
+	u16 link_cntl2;
+
+	if (radeon_pcie_gen2 == 0)
+		return;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
+		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
+		return;
+
+	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
+	/* advertise upconfig capability */
+	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+	WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+				     LC_RECONFIG_ARC_MISSING_ESCAPE);
+		link_width_cntl |= lanes | LC_RECONFIG_NOW |
+			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	} else {
+		link_width_cntl |= LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+
+	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/rv770d.h b/linux-imx/drivers/gpu/drm/radeon/rv770d.h
new file mode 100644
index 0000000..85b1626
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/rv770d.h
@@ -0,0 +1,717 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#ifndef RV770_H
+#define RV770_H
+
+#define R7XX_MAX_SH_GPRS           256
+#define R7XX_MAX_TEMP_GPRS         16
+#define R7XX_MAX_SH_THREADS        256
+#define R7XX_MAX_SH_STACK_ENTRIES  4096
+#define R7XX_MAX_BACKENDS          8
+#define R7XX_MAX_BACKENDS_MASK     0xff
+#define R7XX_MAX_SIMDS             16
+#define R7XX_MAX_SIMDS_MASK        0xffff
+#define R7XX_MAX_PIPES             8
+#define R7XX_MAX_PIPES_MASK        0xff
+
+/* discrete uvd clocks */
+#define CG_UPLL_FUNC_CNTL				0x718
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_REF_DIV(x)				((x) << 16)
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define CG_UPLL_FUNC_CNTL_2				0x71c
+#	define UPLL_SW_HILEN(x)				((x) << 0)
+#	define UPLL_SW_LOLEN(x)				((x) << 4)
+#	define UPLL_SW_HILEN2(x)			((x) << 8)
+#	define UPLL_SW_LOLEN2(x)			((x) << 12)
+#	define UPLL_SW_MASK				0x0000FFFF
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define CG_UPLL_FUNC_CNTL_3				0x720
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+
+/* Registers */
+#define	CB_COLOR0_BASE					0x28040
+#define	CB_COLOR1_BASE					0x28044
+#define	CB_COLOR2_BASE					0x28048
+#define	CB_COLOR3_BASE					0x2804C
+#define	CB_COLOR4_BASE					0x28050
+#define	CB_COLOR5_BASE					0x28054
+#define	CB_COLOR6_BASE					0x28058
+#define	CB_COLOR7_BASE					0x2805C
+#define	CB_COLOR7_FRAG					0x280FC
+
+#define	CC_GC_SHADER_PIPE_CONFIG			0x8950
+#define	CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)				((x) << 16)
+#define	CC_SYS_RB_BACKEND_DISABLE			0x3F88
+
+#define	CGTS_SYS_TCC_DISABLE				0x3F90
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_SYS_TCC_DISABLE			0x3F94
+#define	CGTS_USER_TCC_DISABLE				0x914C
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define	CP_ME_CNTL					0x86D8
+#define		CP_ME_HALT					(1<<28)
+#define		CP_PFP_HALT					(1<<26)
+#define	CP_ME_RAM_DATA					0xC160
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		STQ_SPLIT(x)					((x) << 0)
+#define	CP_PERFMON_CNTL					0x87FC
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define	CP_RB_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define	CP_RB_RPTR					0x8700
+#define	CP_RB_RPTR_ADDR					0xC10C
+#define	CP_RB_RPTR_ADDR_HI				0xC110
+#define	CP_RB_RPTR_WR					0xC108
+#define	CP_RB_WPTR					0xC114
+#define	CP_RB_WPTR_ADDR					0xC118
+#define	CP_RB_WPTR_ADDR_HI				0xC11C
+#define	CP_RB_WPTR_DELAY				0x8704
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	DB_DEBUG3					0x98B0
+#define		DB_CLK_OFF_DELAY(x)				((x) << 11)
+#define DB_DEBUG4					0x9B8C
+#define		DISABLE_TILE_COVERED_FOR_PS_ITER		(1 << 6)
+
+#define	DCP_TILING_CONFIG				0x6CA0
+#define		PIPE_TILING(x)					((x) << 1)
+#define 	BANK_TILING(x)					((x) << 4)
+#define		GROUP_SIZE(x)					((x) << 6)
+#define		ROW_TILING(x)					((x) << 8)
+#define		BANK_SWAPS(x)					((x) << 11)
+#define		SAMPLE_SPLIT(x)					((x) << 14)
+#define		BACKEND_MAP(x)					((x) << 16)
+
+#define GB_TILING_CONFIG				0x98F0
+#define     PIPE_TILING__SHIFT              1
+#define     PIPE_TILING__MASK               0x0000000e
+
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
+/* RV730 only */
+#define UVD_UDEC_TILING_CONFIG                          0xef40
+#define UVD_UDEC_DB_TILING_CONFIG                       0xef44
+#define UVD_UDEC_DBW_TILING_CONFIG                      0xef48
+
+#define	GC_USER_SHADER_PIPE_CONFIG			0x8954
+#define		INACTIVE_QD_PIPES(x)				((x) << 8)
+#define		INACTIVE_QD_PIPES_MASK				0x0000FF00
+#define		INACTIVE_QD_PIPES_SHIFT			    8
+#define		INACTIVE_SIMDS(x)				((x) << 16)
+#define		INACTIVE_SIMDS_MASK				0x00FF0000
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1<<0)
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		GUI_ACTIVE					(1<<31)
+#define	GRBM_STATUS2					0x8014
+
+#define CG_CLKPIN_CNTL                                    0x660
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+#       define XTALIN_DIVIDE                              (1 << 9)
+
+#define	CG_MULT_THERMAL_STATUS				0x740
+#define		ASIC_T(x)			        ((x) << 16)
+#define		ASIC_T_MASK			        0x3FF0000
+#define		ASIC_T_SHIFT			        16
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+#define	HDP_TILING_CONFIG				0x2F3C
+#define HDP_DEBUG1                                      0x2F34
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		BURSTLENGTH_SHIFT				9
+#define		BURSTLENGTH_MASK				0x00000200
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_MB_L1_TLB0_CNTL				0x2234
+#define	MC_VM_MB_L1_TLB1_CNTL				0x2238
+#define	MC_VM_MB_L1_TLB2_CNTL				0x223C
+#define	MC_VM_MB_L1_TLB3_CNTL				0x2240
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		EFFECTIVE_L1_TLB_SIZE(x)			((x)<<15)
+#define		EFFECTIVE_L1_QUEUE_SIZE(x)			((x)<<18)
+#define	MC_VM_MD_L1_TLB0_CNTL				0x2654
+#define	MC_VM_MD_L1_TLB1_CNTL				0x2658
+#define	MC_VM_MD_L1_TLB2_CNTL				0x265C
+#define	MC_VM_MD_L1_TLB3_CNTL				0x2698
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+#define PA_SC_AA_CONFIG					0x28C04
+#define PA_SC_CLIPRECT_RULE				0x2820C
+#define	PA_SC_EDGERULE					0x28230
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_PRIM_FIFO_SIZE(x)				((x) << 0)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 12)
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x)<<0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x)<<16)
+#define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+#define PA_SC_MODE_CNTL					0x28A4C
+#define	PA_SC_MULTI_CHIP_CNTL				0x8B20
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 20)
+
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	SMX_SAR_CTL0					0xA008
+#define	SMX_DC_CTL0					0xA020
+#define		USE_HASH_FUNCTION				(1 << 0)
+#define		CACHE_DEPTH(x)					((x) << 1)
+#define		FLUSH_ALL_ON_EVENT				(1 << 10)
+#define		STALL_ON_EVENT					(1 << 11)
+#define	SMX_EVENT_CTL					0xA02C
+#define		ES_FLUSH_CTL(x)					((x) << 0)
+#define		GS_FLUSH_CTL(x)					((x) << 3)
+#define		ACK_FLUSH_CTL(x)				((x) << 6)
+#define		SYNC_FLUSH_CTL					(1 << 8)
+
+#define	SPI_CONFIG_CNTL					0x9100
+#define		GPR_WRITE_PRIORITY(x)				((x) << 0)
+#define		DISABLE_INTERP_1				(1 << 5)
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+#define	SPI_INPUT_Z					0x286D8
+#define	SPI_PS_IN_CONTROL_0				0x286CC
+#define		NUM_INTERP(x)					((x)<<0)
+#define		POSITION_ENA					(1<<8)
+#define		POSITION_CENTROID				(1<<9)
+#define		POSITION_ADDR(x)				((x)<<10)
+#define		PARAM_GEN(x)					((x)<<15)
+#define		PARAM_GEN_ADDR(x)				((x)<<19)
+#define		BARYC_SAMPLE_CNTL(x)				((x)<<26)
+#define		PERSP_GRADIENT_ENA				(1<<28)
+#define		LINEAR_GRADIENT_ENA				(1<<29)
+#define		POSITION_SAMPLE					(1<<30)
+#define		BARYC_AT_SAMPLE_ENA				(1<<31)
+
+#define	SQ_CONFIG					0x8C00
+#define		VC_ENABLE					(1 << 0)
+#define		EXPORT_SRC_C					(1 << 1)
+#define		DX9_CONSTS					(1 << 2)
+#define		ALU_INST_PREFER_VECTOR				(1 << 3)
+#define		DX10_CLAMP					(1 << 4)
+#define		CLAUSE_SEQ_PRIO(x)				((x) << 8)
+#define		PS_PRIO(x)					((x) << 24)
+#define		VS_PRIO(x)					((x) << 26)
+#define		GS_PRIO(x)					((x) << 28)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_0			0x8DB0
+#define		SIMDA_RING0(x)					((x)<<0)
+#define		SIMDA_RING1(x)					((x)<<8)
+#define		SIMDB_RING0(x)					((x)<<16)
+#define		SIMDB_RING1(x)					((x)<<24)
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_1			0x8DB4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_2			0x8DB8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_3			0x8DBC
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_4			0x8DC0
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_5			0x8DC4
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_6			0x8DC8
+#define	SQ_DYN_GPR_SIZE_SIMD_AB_7			0x8DCC
+#define		ES_PRIO(x)					((x) << 30)
+#define	SQ_GPR_RESOURCE_MGMT_1				0x8C04
+#define		NUM_PS_GPRS(x)					((x) << 0)
+#define		NUM_VS_GPRS(x)					((x) << 16)
+#define		DYN_GPR_ENABLE					(1 << 27)
+#define		NUM_CLAUSE_TEMP_GPRS(x)				((x) << 28)
+#define	SQ_GPR_RESOURCE_MGMT_2				0x8C08
+#define		NUM_GS_GPRS(x)					((x) << 0)
+#define		NUM_ES_GPRS(x)					((x) << 16)
+#define	SQ_MS_FIFO_SIZES				0x8CF0
+#define		CACHE_FIFO_SIZE(x)				((x) << 0)
+#define		FETCH_FIFO_HIWATER(x)				((x) << 8)
+#define		DONE_FIFO_HIWATER(x)				((x) << 16)
+#define		ALU_UPDATE_FIFO_HIWATER(x)			((x) << 24)
+#define	SQ_STACK_RESOURCE_MGMT_1			0x8C10
+#define		NUM_PS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_VS_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_STACK_RESOURCE_MGMT_2			0x8C14
+#define		NUM_GS_STACK_ENTRIES(x)				((x) << 0)
+#define		NUM_ES_STACK_ENTRIES(x)				((x) << 16)
+#define	SQ_THREAD_RESOURCE_MGMT				0x8C0C
+#define		NUM_PS_THREADS(x)				((x) << 0)
+#define		NUM_VS_THREADS(x)				((x) << 8)
+#define		NUM_GS_THREADS(x)				((x) << 16)
+#define		NUM_ES_THREADS(x)				((x) << 24)
+
+#define	SX_DEBUG_1					0x9058
+#define		ENABLE_NEW_SMX_ADDRESS				(1 << 16)
+#define	SX_EXPORT_BUFFER_SIZES				0x900C
+#define		COLOR_BUFFER_SIZE(x)				((x) << 0)
+#define		POSITION_BUFFER_SIZE(x)				((x) << 8)
+#define		SMX_BUFFER_SIZE(x)				((x) << 16)
+#define	SX_MISC						0x28350
+
+#define	TA_CNTL_AUX					0x9508
+#define		DISABLE_CUBE_WRAP				(1 << 0)
+#define		DISABLE_CUBE_ANISO				(1 << 1)
+#define		SYNC_GRADIENT					(1 << 24)
+#define		SYNC_WALKER					(1 << 25)
+#define		SYNC_ALIGNER					(1 << 26)
+#define		BILINEAR_PRECISION_6_BIT			(0 << 31)
+#define		BILINEAR_PRECISION_8_BIT			(1 << 31)
+
+#define	TCP_CNTL					0x9610
+#define	TCP_CHAN_STEER					0x9614
+
+#define	VC_ENHANCE					0x9714
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x)<<0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ES_PER_GS					0x88CC
+#define	VGT_GS_PER_ES					0x88C8
+#define	VGT_GS_PER_VS					0x88E8
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+#define	VGT_NUM_INSTANCES				0x8974
+#define	VGT_OUT_DEALLOC_CNTL				0x28C5C
+#define		DEALLOC_DIST_MASK				0x0000007F
+#define	VGT_STRMOUT_EN					0x28AB0
+#define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+#define		VTX_REUSE_DEPTH_MASK				0x000000FF
+
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153C
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155C
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 14)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		CACHE_UPDATE_MODE(x)				((x) << 6)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+
+#define	WAIT_UNTIL					0x8040
+
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+
+#define	SRBM_STATUS				        0x0E50
+
+/* DCE 3.2 HDMI */
+#define HDMI_CONTROL                         0x7400
+#       define HDMI_KEEPOUT_MODE             (1 << 0)
+#       define HDMI_PACKET_GEN_VERSION       (1 << 4) /* 0 = r6xx compat */
+#       define HDMI_ERROR_ACK                (1 << 8)
+#       define HDMI_ERROR_MASK               (1 << 9)
+#define HDMI_STATUS                          0x7404
+#       define HDMI_ACTIVE_AVMUTE            (1 << 0)
+#       define HDMI_AUDIO_PACKET_ERROR       (1 << 16)
+#       define HDMI_VBI_PACKET_ERROR         (1 << 20)
+#define HDMI_AUDIO_PACKET_CONTROL            0x7408
+#       define HDMI_AUDIO_DELAY_EN(x)        (((x) & 3) << 4)
+#       define HDMI_AUDIO_PACKETS_PER_LINE(x)  (((x) & 0x1f) << 16)
+#define HDMI_ACR_PACKET_CONTROL              0x740c
+#       define HDMI_ACR_SEND                 (1 << 0)
+#       define HDMI_ACR_CONT                 (1 << 1)
+#       define HDMI_ACR_SELECT(x)            (((x) & 3) << 4)
+#       define HDMI_ACR_HW                   0
+#       define HDMI_ACR_32                   1
+#       define HDMI_ACR_44                   2
+#       define HDMI_ACR_48                   3
+#       define HDMI_ACR_SOURCE               (1 << 8) /* 0 - hw; 1 - cts value */
+#       define HDMI_ACR_AUTO_SEND            (1 << 12)
+#define HDMI_VBI_PACKET_CONTROL              0x7410
+#       define HDMI_NULL_SEND                (1 << 0)
+#       define HDMI_GC_SEND                  (1 << 4)
+#       define HDMI_GC_CONT                  (1 << 5) /* 0 - once; 1 - every frame */
+#define HDMI_INFOFRAME_CONTROL0              0x7414
+#       define HDMI_AVI_INFO_SEND            (1 << 0)
+#       define HDMI_AVI_INFO_CONT            (1 << 1)
+#       define HDMI_AUDIO_INFO_SEND          (1 << 4)
+#       define HDMI_AUDIO_INFO_CONT          (1 << 5)
+#       define HDMI_MPEG_INFO_SEND           (1 << 8)
+#       define HDMI_MPEG_INFO_CONT           (1 << 9)
+#define HDMI_INFOFRAME_CONTROL1              0x7418
+#       define HDMI_AVI_INFO_LINE(x)         (((x) & 0x3f) << 0)
+#       define HDMI_AUDIO_INFO_LINE(x)       (((x) & 0x3f) << 8)
+#       define HDMI_MPEG_INFO_LINE(x)        (((x) & 0x3f) << 16)
+#define HDMI_GENERIC_PACKET_CONTROL          0x741c
+#       define HDMI_GENERIC0_SEND            (1 << 0)
+#       define HDMI_GENERIC0_CONT            (1 << 1)
+#       define HDMI_GENERIC1_SEND            (1 << 4)
+#       define HDMI_GENERIC1_CONT            (1 << 5)
+#       define HDMI_GENERIC0_LINE(x)         (((x) & 0x3f) << 16)
+#       define HDMI_GENERIC1_LINE(x)         (((x) & 0x3f) << 24)
+#define HDMI_GC                              0x7428
+#       define HDMI_GC_AVMUTE                (1 << 0)
+#define AFMT_AUDIO_PACKET_CONTROL2           0x742c
+#       define AFMT_AUDIO_LAYOUT_OVRD        (1 << 0)
+#       define AFMT_AUDIO_LAYOUT_SELECT      (1 << 1)
+#       define AFMT_60958_CS_SOURCE          (1 << 4)
+#       define AFMT_AUDIO_CHANNEL_ENABLE(x)  (((x) & 0xff) << 8)
+#       define AFMT_DP_AUDIO_STREAM_ID(x)    (((x) & 0xff) << 16)
+#define AFMT_AVI_INFO0                       0x7454
+#       define AFMT_AVI_INFO_CHECKSUM(x)     (((x) & 0xff) << 0)
+#       define AFMT_AVI_INFO_S(x)            (((x) & 3) << 8)
+#       define AFMT_AVI_INFO_B(x)            (((x) & 3) << 10)
+#       define AFMT_AVI_INFO_A(x)            (((x) & 1) << 12)
+#       define AFMT_AVI_INFO_Y(x)            (((x) & 3) << 13)
+#       define AFMT_AVI_INFO_Y_RGB           0
+#       define AFMT_AVI_INFO_Y_YCBCR422      1
+#       define AFMT_AVI_INFO_Y_YCBCR444      2
+#       define AFMT_AVI_INFO_Y_A_B_S(x)      (((x) & 0xff) << 8)
+#       define AFMT_AVI_INFO_R(x)            (((x) & 0xf) << 16)
+#       define AFMT_AVI_INFO_M(x)            (((x) & 0x3) << 20)
+#       define AFMT_AVI_INFO_C(x)            (((x) & 0x3) << 22)
+#       define AFMT_AVI_INFO_C_M_R(x)        (((x) & 0xff) << 16)
+#       define AFMT_AVI_INFO_SC(x)           (((x) & 0x3) << 24)
+#       define AFMT_AVI_INFO_Q(x)            (((x) & 0x3) << 26)
+#       define AFMT_AVI_INFO_EC(x)           (((x) & 0x3) << 28)
+#       define AFMT_AVI_INFO_ITC(x)          (((x) & 0x1) << 31)
+#       define AFMT_AVI_INFO_ITC_EC_Q_SC(x)  (((x) & 0xff) << 24)
+#define AFMT_AVI_INFO1                       0x7458
+#       define AFMT_AVI_INFO_VIC(x)          (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_PR(x)           (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
+#       define AFMT_AVI_INFO_TOP(x)          (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO2                       0x745c
+#       define AFMT_AVI_INFO_BOTTOM(x)       (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_LEFT(x)         (((x) & 0xffff) << 16)
+#define AFMT_AVI_INFO3                       0x7460
+#       define AFMT_AVI_INFO_RIGHT(x)        (((x) & 0xffff) << 0)
+#       define AFMT_AVI_INFO_VERSION(x)      (((x) & 3) << 24)
+#define AFMT_MPEG_INFO0                      0x7464
+#       define AFMT_MPEG_INFO_CHECKSUM(x)    (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MB0(x)         (((x) & 0xff) << 8)
+#       define AFMT_MPEG_INFO_MB1(x)         (((x) & 0xff) << 16)
+#       define AFMT_MPEG_INFO_MB2(x)         (((x) & 0xff) << 24)
+#define AFMT_MPEG_INFO1                      0x7468
+#       define AFMT_MPEG_INFO_MB3(x)         (((x) & 0xff) << 0)
+#       define AFMT_MPEG_INFO_MF(x)          (((x) & 3) << 8)
+#       define AFMT_MPEG_INFO_FR(x)          (((x) & 1) << 12)
+#define AFMT_GENERIC0_HDR                    0x746c
+#define AFMT_GENERIC0_0                      0x7470
+#define AFMT_GENERIC0_1                      0x7474
+#define AFMT_GENERIC0_2                      0x7478
+#define AFMT_GENERIC0_3                      0x747c
+#define AFMT_GENERIC0_4                      0x7480
+#define AFMT_GENERIC0_5                      0x7484
+#define AFMT_GENERIC0_6                      0x7488
+#define AFMT_GENERIC1_HDR                    0x748c
+#define AFMT_GENERIC1_0                      0x7490
+#define AFMT_GENERIC1_1                      0x7494
+#define AFMT_GENERIC1_2                      0x7498
+#define AFMT_GENERIC1_3                      0x749c
+#define AFMT_GENERIC1_4                      0x74a0
+#define AFMT_GENERIC1_5                      0x74a4
+#define AFMT_GENERIC1_6                      0x74a8
+#define HDMI_ACR_32_0                        0x74ac
+#       define HDMI_ACR_CTS_32(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_32_1                        0x74b0
+#       define HDMI_ACR_N_32(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_44_0                        0x74b4
+#       define HDMI_ACR_CTS_44(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_44_1                        0x74b8
+#       define HDMI_ACR_N_44(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_48_0                        0x74bc
+#       define HDMI_ACR_CTS_48(x)            (((x) & 0xfffff) << 12)
+#define HDMI_ACR_48_1                        0x74c0
+#       define HDMI_ACR_N_48(x)              (((x) & 0xfffff) << 0)
+#define HDMI_ACR_STATUS_0                    0x74c4
+#define HDMI_ACR_STATUS_1                    0x74c8
+#define AFMT_AUDIO_INFO0                     0x74cc
+#       define AFMT_AUDIO_INFO_CHECKSUM(x)   (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_CC(x)         (((x) & 7) << 8)
+#       define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x)   (((x) & 0xff) << 16)
+#define AFMT_AUDIO_INFO1                     0x74d0
+#       define AFMT_AUDIO_INFO_CA(x)         (((x) & 0xff) << 0)
+#       define AFMT_AUDIO_INFO_LSV(x)        (((x) & 0xf) << 11)
+#       define AFMT_AUDIO_INFO_DM_INH(x)     (((x) & 1) << 15)
+#       define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
+#define AFMT_60958_0                         0x74d4
+#       define AFMT_60958_CS_A(x)            (((x) & 1) << 0)
+#       define AFMT_60958_CS_B(x)            (((x) & 1) << 1)
+#       define AFMT_60958_CS_C(x)            (((x) & 1) << 2)
+#       define AFMT_60958_CS_D(x)            (((x) & 3) << 3)
+#       define AFMT_60958_CS_MODE(x)         (((x) & 3) << 6)
+#       define AFMT_60958_CS_CATEGORY_CODE(x)      (((x) & 0xff) << 8)
+#       define AFMT_60958_CS_SOURCE_NUMBER(x)      (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_L(x)   (((x) & 0xf) << 20)
+#       define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
+#       define AFMT_60958_CS_CLOCK_ACCURACY(x)     (((x) & 3) << 28)
+#define AFMT_60958_1                         0x74d8
+#       define AFMT_60958_CS_WORD_LENGTH(x)  (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_VALID_L(x)      (((x) & 1) << 16)
+#       define AFMT_60958_CS_VALID_R(x)      (((x) & 1) << 18)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_R(x)   (((x) & 0xf) << 20)
+#define AFMT_AUDIO_CRC_CONTROL               0x74dc
+#       define AFMT_AUDIO_CRC_EN             (1 << 0)
+#define AFMT_RAMP_CONTROL0                   0x74e0
+#       define AFMT_RAMP_MAX_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_RAMP_DATA_SIGN           (1 << 31)
+#define AFMT_RAMP_CONTROL1                   0x74e4
+#       define AFMT_RAMP_MIN_COUNT(x)        (((x) & 0xffffff) << 0)
+#       define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
+#define AFMT_RAMP_CONTROL2                   0x74e8
+#       define AFMT_RAMP_INC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_RAMP_CONTROL3                   0x74ec
+#       define AFMT_RAMP_DEC_COUNT(x)        (((x) & 0xffffff) << 0)
+#define AFMT_60958_2                         0x74f0
+#       define AFMT_60958_CS_CHANNEL_NUMBER_2(x)   (((x) & 0xf) << 0)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_3(x)   (((x) & 0xf) << 4)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_4(x)   (((x) & 0xf) << 8)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_5(x)   (((x) & 0xf) << 12)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_6(x)   (((x) & 0xf) << 16)
+#       define AFMT_60958_CS_CHANNEL_NUMBER_7(x)   (((x) & 0xf) << 20)
+#define AFMT_STATUS                          0x7600
+#       define AFMT_AUDIO_ENABLE             (1 << 4)
+#       define AFMT_AZ_FORMAT_WTRIG          (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_INT      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG      (1 << 30)
+#define AFMT_AUDIO_PACKET_CONTROL            0x7604
+#       define AFMT_AUDIO_SAMPLE_SEND        (1 << 0)
+#       define AFMT_AUDIO_TEST_EN            (1 << 12)
+#       define AFMT_AUDIO_CHANNEL_SWAP       (1 << 24)
+#       define AFMT_60958_CS_UPDATE          (1 << 26)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
+#       define AFMT_AZ_FORMAT_WTRIG_MASK     (1 << 28)
+#       define AFMT_AZ_FORMAT_WTRIG_ACK      (1 << 29)
+#       define AFMT_AZ_AUDIO_ENABLE_CHG_ACK  (1 << 30)
+#define AFMT_VBI_PACKET_CONTROL              0x7608
+#       define AFMT_GENERIC0_UPDATE          (1 << 2)
+#define AFMT_INFOFRAME_CONTROL0              0x760c
+#       define AFMT_AUDIO_INFO_SOURCE        (1 << 6) /* 0 - sound block; 1 - hmdi regs */
+#       define AFMT_AUDIO_INFO_UPDATE        (1 << 7)
+#       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
+#define AFMT_GENERIC0_7                      0x7610
+/* second instance starts at 0x7800 */
+#define HDMI_OFFSET0                      (0x7400 - 0x7400)
+#define HDMI_OFFSET1                      (0x7800 - 0x7400)
+
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
+/* UVD */
+#define UVD_LMI_EXT40_ADDR				0xf498
+#define UVD_VCPU_CHIP_ID				0xf4d4
+#define UVD_VCPU_CACHE_OFFSET0				0xf4d8
+#define UVD_VCPU_CACHE_SIZE0				0xf4dc
+#define UVD_VCPU_CACHE_OFFSET1				0xf4e0
+#define UVD_VCPU_CACHE_SIZE1				0xf4e4
+#define UVD_VCPU_CACHE_OFFSET2				0xf4e8
+#define UVD_VCPU_CACHE_SIZE2				0xf4ec
+#define UVD_LMI_ADDR_EXT				0xf594
+
+#define UVD_RBC_RB_RPTR					0xf690
+#define UVD_RBC_RB_WPTR					0xf694
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/si.c b/linux-imx/drivers/gpu/drm/radeon/si.c
new file mode 100644
index 0000000..03add5d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/si.c
@@ -0,0 +1,5782 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include <drm/radeon_drm.h>
+#include "sid.h"
+#include "atom.h"
+#include "si_blit_shaders.h"
+
+#define SI_PFP_UCODE_SIZE 2144
+#define SI_PM4_UCODE_SIZE 2144
+#define SI_CE_UCODE_SIZE 2144
+#define SI_RLC_UCODE_SIZE 2048
+#define SI_MC_UCODE_SIZE 7769
+#define OLAND_MC_UCODE_SIZE 7863
+
+MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
+MODULE_FIRMWARE("radeon/TAHITI_me.bin");
+MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
+MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
+MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
+MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
+MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
+MODULE_FIRMWARE("radeon/VERDE_me.bin");
+MODULE_FIRMWARE("radeon/VERDE_ce.bin");
+MODULE_FIRMWARE("radeon/VERDE_mc.bin");
+MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
+MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
+MODULE_FIRMWARE("radeon/OLAND_me.bin");
+MODULE_FIRMWARE("radeon/OLAND_ce.bin");
+MODULE_FIRMWARE("radeon/OLAND_mc.bin");
+MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
+MODULE_FIRMWARE("radeon/HAINAN_me.bin");
+MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
+MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
+MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
+
+extern int r600_ih_ring_alloc(struct radeon_device *rdev);
+extern void r600_ih_ring_fini(struct radeon_device *rdev);
+extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
+extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
+extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+
+static const u32 tahiti_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601005,
+	0xc47c, 0xffffffff, 0x10104040,
+	0xc488, 0xffffffff, 0x0100000a,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000f4,
+	0xf4a8, 0xffffffff, 0x00000000
+};
+
+static const u32 tahiti_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x277c, 0x00000003, 0x000007ff,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x2a00126a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x00000200, 0x000002fb,
+	0xac10, 0xffffffff, 0x0000543b,
+	0xac0c, 0xffffffff, 0xa9210876,
+	0x88d0, 0xffffffff, 0x000fff40,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x1410, 0x20000000, 0x20fffed8,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 tahiti_golden_registers2[] =
+{
+	0xc64, 0x00000001, 0x00000001
+};
+
+static const u32 pitcairn_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601004,
+	0xc47c, 0xffffffff, 0x10102020,
+	0xc488, 0xffffffff, 0x01000020,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000a4
+};
+
+static const u32 pitcairn_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x2a00126a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f7,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x32761054,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 verde_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x033f1005,
+	0xc47c, 0xffffffff, 0x10808020,
+	0xc488, 0xffffffff, 0x00800008,
+	0xc314, 0xffffffff, 0x00001000,
+	0xc30c, 0xffffffff, 0x80010014
+};
+
+static const u32 verde_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x240c, 0x000007ff, 0x00000000,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x28350, 0x3f3f3fff, 0x0000124a,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e88, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x8e84, 0x01ff1f3f, 0x00000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac14, 0x000003ff, 0x00000003,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00001032,
+	0xac0c, 0xffffffff, 0x00001032,
+	0xac0c, 0xffffffff, 0x00001032,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 oland_golden_rlc_registers[] =
+{
+	0xc424, 0xffffffff, 0x00601005,
+	0xc47c, 0xffffffff, 0x10104040,
+	0xc488, 0xffffffff, 0x0100000a,
+	0xc314, 0xffffffff, 0x00000800,
+	0xc30c, 0xffffffff, 0x800000f4
+};
+
+static const u32 oland_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xc78, 0x00000080, 0x00000000,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd830, 0x000300c0, 0x00800040,
+	0x5bb0, 0x000000f0, 0x00000070,
+	0x5bc0, 0x00200000, 0x50100000,
+	0x7030, 0x31000311, 0x00000011,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x00000082,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x07ffffff, 0x03000000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f3,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00003210,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers[] =
+{
+	0x9a10, 0x00010000, 0x00018208,
+	0x9830, 0xffffffff, 0x00000000,
+	0x9834, 0xf00fffff, 0x00000400,
+	0x9838, 0x0002021c, 0x00020200,
+	0xd0c0, 0xff000fff, 0x00000100,
+	0xd030, 0x000300c0, 0x00800040,
+	0xd8c0, 0xff000fff, 0x00000100,
+	0xd830, 0x000300c0, 0x00800040,
+	0x2ae4, 0x00073ffe, 0x000022a2,
+	0x240c, 0x000007ff, 0x00000000,
+	0x8a14, 0xf000001f, 0x00000007,
+	0x8b24, 0xffffffff, 0x00ffffff,
+	0x8b10, 0x0000ff0f, 0x00000000,
+	0x28a4c, 0x07ffffff, 0x4e000000,
+	0x28350, 0x3f3f3fff, 0x00000000,
+	0x30, 0x000000ff, 0x0040,
+	0x34, 0x00000040, 0x00004040,
+	0x9100, 0x03e00000, 0x03600000,
+	0x9060, 0x0000007f, 0x00000020,
+	0x9508, 0x00010000, 0x00010000,
+	0xac14, 0x000003ff, 0x000000f1,
+	0xac10, 0xffffffff, 0x00000000,
+	0xac0c, 0xffffffff, 0x00003210,
+	0x88d4, 0x0000001f, 0x00000010,
+	0x15c0, 0x000c0fc0, 0x000c0400
+};
+
+static const u32 hainan_golden_registers2[] =
+{
+	0x98f8, 0xffffffff, 0x02010001
+};
+
+static const u32 tahiti_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x91d8, 0xffffffff, 0x00070006,
+	0x91dc, 0xffffffff, 0x00090008,
+	0x91e0, 0xffffffff, 0x0000000c,
+	0x91e4, 0xffffffff, 0x000b000a,
+	0x91e8, 0xffffffff, 0x000e000d,
+	0x91ec, 0xffffffff, 0x00080007,
+	0x91f0, 0xffffffff, 0x000a0009,
+	0x91f4, 0xffffffff, 0x0000000d,
+	0x91f8, 0xffffffff, 0x000c000b,
+	0x91fc, 0xffffffff, 0x000f000e,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9264, 0xffffffff, 0x000e000d,
+	0x9268, 0xffffffff, 0x0010000f,
+	0x926c, 0xffffffff, 0x00000013,
+	0x9270, 0xffffffff, 0x00120011,
+	0x9274, 0xffffffff, 0x00150014,
+	0x9278, 0xffffffff, 0x000f000e,
+	0x927c, 0xffffffff, 0x00110010,
+	0x9280, 0xffffffff, 0x00000014,
+	0x9284, 0xffffffff, 0x00130012,
+	0x9288, 0xffffffff, 0x00160015,
+	0x928c, 0xffffffff, 0x0010000f,
+	0x9290, 0xffffffff, 0x00120011,
+	0x9294, 0xffffffff, 0x00000015,
+	0x9298, 0xffffffff, 0x00140013,
+	0x929c, 0xffffffff, 0x00170016,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 pitcairn_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 verde_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x9200, 0xffffffff, 0x00090008,
+	0x9204, 0xffffffff, 0x000b000a,
+	0x9208, 0xffffffff, 0x000c000f,
+	0x920c, 0xffffffff, 0x000e000d,
+	0x9210, 0xffffffff, 0x00110010,
+	0x9214, 0xffffffff, 0x000a0009,
+	0x9218, 0xffffffff, 0x000c000b,
+	0x921c, 0xffffffff, 0x0000000f,
+	0x9220, 0xffffffff, 0x000e000d,
+	0x9224, 0xffffffff, 0x00110010,
+	0x9228, 0xffffffff, 0x000b000a,
+	0x922c, 0xffffffff, 0x000d000c,
+	0x9230, 0xffffffff, 0x00000010,
+	0x9234, 0xffffffff, 0x000f000e,
+	0x9238, 0xffffffff, 0x00120011,
+	0x923c, 0xffffffff, 0x000c000b,
+	0x9240, 0xffffffff, 0x000e000d,
+	0x9244, 0xffffffff, 0x00000011,
+	0x9248, 0xffffffff, 0x0010000f,
+	0x924c, 0xffffffff, 0x00130012,
+	0x9250, 0xffffffff, 0x000d000c,
+	0x9254, 0xffffffff, 0x000f000e,
+	0x9258, 0xffffffff, 0x00100013,
+	0x925c, 0xffffffff, 0x00120011,
+	0x9260, 0xffffffff, 0x00150014,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 oland_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x102c, 0x00000101, 0x00000000,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x55e4, 0xff000fff, 0x00000100,
+	0x55e8, 0x00000001, 0x00000001,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static const u32 hainan_mgcg_cgcg_init[] =
+{
+	0xc400, 0xffffffff, 0xfffffffc,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9a60, 0xffffffff, 0x00000100,
+	0x92a4, 0xffffffff, 0x00000100,
+	0xc164, 0xffffffff, 0x00000100,
+	0x9774, 0xffffffff, 0x00000100,
+	0x8984, 0xffffffff, 0x06000100,
+	0x8a18, 0xffffffff, 0x00000100,
+	0x92a0, 0xffffffff, 0x00000100,
+	0xc380, 0xffffffff, 0x00000100,
+	0x8b28, 0xffffffff, 0x00000100,
+	0x9144, 0xffffffff, 0x00000100,
+	0x8d88, 0xffffffff, 0x00000100,
+	0x8d8c, 0xffffffff, 0x00000100,
+	0x9030, 0xffffffff, 0x00000100,
+	0x9034, 0xffffffff, 0x00000100,
+	0x9038, 0xffffffff, 0x00000100,
+	0x903c, 0xffffffff, 0x00000100,
+	0xad80, 0xffffffff, 0x00000100,
+	0xac54, 0xffffffff, 0x00000100,
+	0x897c, 0xffffffff, 0x06000100,
+	0x9868, 0xffffffff, 0x00000100,
+	0x9510, 0xffffffff, 0x00000100,
+	0xaf04, 0xffffffff, 0x00000100,
+	0xae04, 0xffffffff, 0x00000100,
+	0x949c, 0xffffffff, 0x00000100,
+	0x802c, 0xffffffff, 0xe0000000,
+	0x9160, 0xffffffff, 0x00010000,
+	0x9164, 0xffffffff, 0x00030002,
+	0x9168, 0xffffffff, 0x00040007,
+	0x916c, 0xffffffff, 0x00060005,
+	0x9170, 0xffffffff, 0x00090008,
+	0x9174, 0xffffffff, 0x00020001,
+	0x9178, 0xffffffff, 0x00040003,
+	0x917c, 0xffffffff, 0x00000007,
+	0x9180, 0xffffffff, 0x00060005,
+	0x9184, 0xffffffff, 0x00090008,
+	0x9188, 0xffffffff, 0x00030002,
+	0x918c, 0xffffffff, 0x00050004,
+	0x9190, 0xffffffff, 0x00000008,
+	0x9194, 0xffffffff, 0x00070006,
+	0x9198, 0xffffffff, 0x000a0009,
+	0x919c, 0xffffffff, 0x00040003,
+	0x91a0, 0xffffffff, 0x00060005,
+	0x91a4, 0xffffffff, 0x00000009,
+	0x91a8, 0xffffffff, 0x00080007,
+	0x91ac, 0xffffffff, 0x000b000a,
+	0x91b0, 0xffffffff, 0x00050004,
+	0x91b4, 0xffffffff, 0x00070006,
+	0x91b8, 0xffffffff, 0x0008000b,
+	0x91bc, 0xffffffff, 0x000a0009,
+	0x91c0, 0xffffffff, 0x000d000c,
+	0x91c4, 0xffffffff, 0x00060005,
+	0x91c8, 0xffffffff, 0x00080007,
+	0x91cc, 0xffffffff, 0x0000000b,
+	0x91d0, 0xffffffff, 0x000a0009,
+	0x91d4, 0xffffffff, 0x000d000c,
+	0x9150, 0xffffffff, 0x96940200,
+	0x8708, 0xffffffff, 0x00900100,
+	0xc478, 0xffffffff, 0x00000080,
+	0xc404, 0xffffffff, 0x0020003f,
+	0x30, 0xffffffff, 0x0000001c,
+	0x34, 0x000f0000, 0x000f0000,
+	0x160c, 0xffffffff, 0x00000100,
+	0x1024, 0xffffffff, 0x00000100,
+	0x20a8, 0xffffffff, 0x00000104,
+	0x264c, 0x000c0000, 0x000c0000,
+	0x2648, 0x000c0000, 0x000c0000,
+	0x2f50, 0x00000001, 0x00000001,
+	0x30cc, 0xc0000fff, 0x00000104,
+	0xc1e4, 0x00000001, 0x00000001,
+	0xd0c0, 0xfffffff0, 0x00000100,
+	0xd8c0, 0xfffffff0, 0x00000100
+};
+
+static u32 verde_pg_init[] =
+{
+	0x353c, 0xffffffff, 0x40000,
+	0x3538, 0xffffffff, 0x200010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x7007,
+	0x3538, 0xffffffff, 0x300010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x400000,
+	0x3538, 0xffffffff, 0x100010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x120200,
+	0x3538, 0xffffffff, 0x500010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x1e1e16,
+	0x3538, 0xffffffff, 0x600010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x171f1e,
+	0x3538, 0xffffffff, 0x700010ff,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x353c, 0xffffffff, 0x0,
+	0x3538, 0xffffffff, 0x9ff,
+	0x3500, 0xffffffff, 0x0,
+	0x3504, 0xffffffff, 0x10000800,
+	0x3504, 0xffffffff, 0xf,
+	0x3504, 0xffffffff, 0xf,
+	0x3500, 0xffffffff, 0x4,
+	0x3504, 0xffffffff, 0x1000051e,
+	0x3504, 0xffffffff, 0xffff,
+	0x3504, 0xffffffff, 0xffff,
+	0x3500, 0xffffffff, 0x8,
+	0x3504, 0xffffffff, 0x80500,
+	0x3500, 0xffffffff, 0x12,
+	0x3504, 0xffffffff, 0x9050c,
+	0x3500, 0xffffffff, 0x1d,
+	0x3504, 0xffffffff, 0xb052c,
+	0x3500, 0xffffffff, 0x2a,
+	0x3504, 0xffffffff, 0x1053e,
+	0x3500, 0xffffffff, 0x2d,
+	0x3504, 0xffffffff, 0x10546,
+	0x3500, 0xffffffff, 0x30,
+	0x3504, 0xffffffff, 0xa054e,
+	0x3500, 0xffffffff, 0x3c,
+	0x3504, 0xffffffff, 0x1055f,
+	0x3500, 0xffffffff, 0x3f,
+	0x3504, 0xffffffff, 0x10567,
+	0x3500, 0xffffffff, 0x42,
+	0x3504, 0xffffffff, 0x1056f,
+	0x3500, 0xffffffff, 0x45,
+	0x3504, 0xffffffff, 0x10572,
+	0x3500, 0xffffffff, 0x48,
+	0x3504, 0xffffffff, 0x20575,
+	0x3500, 0xffffffff, 0x4c,
+	0x3504, 0xffffffff, 0x190801,
+	0x3500, 0xffffffff, 0x67,
+	0x3504, 0xffffffff, 0x1082a,
+	0x3500, 0xffffffff, 0x6a,
+	0x3504, 0xffffffff, 0x1b082d,
+	0x3500, 0xffffffff, 0x87,
+	0x3504, 0xffffffff, 0x310851,
+	0x3500, 0xffffffff, 0xba,
+	0x3504, 0xffffffff, 0x891,
+	0x3500, 0xffffffff, 0xbc,
+	0x3504, 0xffffffff, 0x893,
+	0x3500, 0xffffffff, 0xbe,
+	0x3504, 0xffffffff, 0x20895,
+	0x3500, 0xffffffff, 0xc2,
+	0x3504, 0xffffffff, 0x20899,
+	0x3500, 0xffffffff, 0xc6,
+	0x3504, 0xffffffff, 0x2089d,
+	0x3500, 0xffffffff, 0xca,
+	0x3504, 0xffffffff, 0x8a1,
+	0x3500, 0xffffffff, 0xcc,
+	0x3504, 0xffffffff, 0x8a3,
+	0x3500, 0xffffffff, 0xce,
+	0x3504, 0xffffffff, 0x308a5,
+	0x3500, 0xffffffff, 0xd3,
+	0x3504, 0xffffffff, 0x6d08cd,
+	0x3500, 0xffffffff, 0x142,
+	0x3504, 0xffffffff, 0x2000095a,
+	0x3504, 0xffffffff, 0x1,
+	0x3500, 0xffffffff, 0x144,
+	0x3504, 0xffffffff, 0x301f095b,
+	0x3500, 0xffffffff, 0x165,
+	0x3504, 0xffffffff, 0xc094d,
+	0x3500, 0xffffffff, 0x173,
+	0x3504, 0xffffffff, 0xf096d,
+	0x3500, 0xffffffff, 0x184,
+	0x3504, 0xffffffff, 0x15097f,
+	0x3500, 0xffffffff, 0x19b,
+	0x3504, 0xffffffff, 0xc0998,
+	0x3500, 0xffffffff, 0x1a9,
+	0x3504, 0xffffffff, 0x409a7,
+	0x3500, 0xffffffff, 0x1af,
+	0x3504, 0xffffffff, 0xcdc,
+	0x3500, 0xffffffff, 0x1b1,
+	0x3504, 0xffffffff, 0x800,
+	0x3508, 0xffffffff, 0x6c9b2000,
+	0x3510, 0xfc00, 0x2000,
+	0x3544, 0xffffffff, 0xfc0,
+	0x28d4, 0x00000100, 0x100
+};
+
+static void si_init_golden_registers(struct radeon_device *rdev)
+{
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 tahiti_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 tahiti_golden_registers2,
+						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
+		break;
+	case CHIP_PITCAIRN:
+		radeon_program_register_sequence(rdev,
+						 pitcairn_golden_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 pitcairn_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 pitcairn_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+		break;
+	case CHIP_VERDE:
+		radeon_program_register_sequence(rdev,
+						 verde_golden_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 verde_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 verde_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
+		radeon_program_register_sequence(rdev,
+						 verde_pg_init,
+						 (const u32)ARRAY_SIZE(verde_pg_init));
+		break;
+	case CHIP_OLAND:
+		radeon_program_register_sequence(rdev,
+						 oland_golden_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 oland_golden_rlc_registers,
+						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
+		radeon_program_register_sequence(rdev,
+						 oland_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+		break;
+	case CHIP_HAINAN:
+		radeon_program_register_sequence(rdev,
+						 hainan_golden_registers,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers));
+		radeon_program_register_sequence(rdev,
+						 hainan_golden_registers2,
+						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
+		radeon_program_register_sequence(rdev,
+						 hainan_mgcg_cgcg_init,
+						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
+		break;
+	default:
+		break;
+	}
+}
+
+#define PCIE_BUS_CLK                10000
+#define TCLK                        (PCIE_BUS_CLK / 10)
+
+/**
+ * si_get_xclk - get the xclk
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Returns the reference clock used by the gfx engine
+ * (SI).
+ */
+u32 si_get_xclk(struct radeon_device *rdev)
+{
+        u32 reference_clock = rdev->clock.spll.reference_freq;
+	u32 tmp;
+
+	tmp = RREG32(CG_CLKPIN_CNTL_2);
+	if (tmp & MUX_TCLK_TO_XCLK)
+		return TCLK;
+
+	tmp = RREG32(CG_CLKPIN_CNTL);
+	if (tmp & XTALIN_DIVIDE)
+		return reference_clock / 4;
+
+	return reference_clock;
+}
+
+/* get temperature in millidegrees */
+int si_get_temp(struct radeon_device *rdev)
+{
+	u32 temp;
+	int actual_temp = 0;
+
+	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+		CTF_TEMP_SHIFT;
+
+	if (temp & 0x200)
+		actual_temp = 255;
+	else
+		actual_temp = temp & 0x1ff;
+
+	actual_temp = (actual_temp * 1000);
+
+	return actual_temp;
+}
+
+#define TAHITI_IO_MC_REGS_SIZE 36
+
+static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a77400}
+};
+
+static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a47400}
+};
+
+static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a37400}
+};
+
+static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a17730}
+};
+
+static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
+	{0x0000006f, 0x03044000},
+	{0x00000070, 0x0480c018},
+	{0x00000071, 0x00000040},
+	{0x00000072, 0x01000000},
+	{0x00000074, 0x000000ff},
+	{0x00000075, 0x00143400},
+	{0x00000076, 0x08ec0800},
+	{0x00000077, 0x040000cc},
+	{0x00000079, 0x00000000},
+	{0x0000007a, 0x21000409},
+	{0x0000007c, 0x00000000},
+	{0x0000007d, 0xe8000000},
+	{0x0000007e, 0x044408a8},
+	{0x0000007f, 0x00000003},
+	{0x00000080, 0x00000000},
+	{0x00000081, 0x01000000},
+	{0x00000082, 0x02000000},
+	{0x00000083, 0x00000000},
+	{0x00000084, 0xe3f3e4f4},
+	{0x00000085, 0x00052024},
+	{0x00000087, 0x00000000},
+	{0x00000088, 0x66036603},
+	{0x00000089, 0x01000000},
+	{0x0000008b, 0x1c0a0000},
+	{0x0000008c, 0xff010000},
+	{0x0000008e, 0xffffefff},
+	{0x0000008f, 0xfff3efff},
+	{0x00000090, 0xfff3efbf},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00a07730}
+};
+
+/* ucode loading */
+static int si_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 running, blackout = 0;
+	u32 *io_mc_regs;
+	int i, ucode_size, regs_size;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_PITCAIRN:
+		io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_VERDE:
+	default:
+		io_mc_regs = (u32 *)&verde_io_mc_regs;
+		ucode_size = SI_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_OLAND:
+		io_mc_regs = (u32 *)&oland_io_mc_regs;
+		ucode_size = OLAND_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	case CHIP_HAINAN:
+		io_mc_regs = (u32 *)&hainan_io_mc_regs;
+		ucode_size = OLAND_MC_UCODE_SIZE;
+		regs_size = TAHITI_IO_MC_REGS_SIZE;
+		break;
+	}
+
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if (running == 0) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < regs_size; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < ucode_size; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
+				break;
+			udelay(1);
+		}
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
+				break;
+			udelay(1);
+		}
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+static int si_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		chip_name = "TAHITI";
+		rlc_chip_name = "TAHITI";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_PITCAIRN:
+		chip_name = "PITCAIRN";
+		rlc_chip_name = "PITCAIRN";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_VERDE:
+		chip_name = "VERDE";
+		rlc_chip_name = "VERDE";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = SI_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_OLAND:
+		chip_name = "OLAND";
+		rlc_chip_name = "OLAND";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+		break;
+	case CHIP_HAINAN:
+		chip_name = "HAINAN";
+		rlc_chip_name = "HAINAN";
+		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
+		me_req_size = SI_PM4_UCODE_SIZE * 4;
+		ce_req_size = SI_CE_UCODE_SIZE * 4;
+		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
+		mc_req_size = OLAND_MC_UCODE_SIZE * 4;
+		break;
+	default: BUG();
+	}
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+	err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->ce_fw->size != ce_req_size) {
+		printk(KERN_ERR
+		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->ce_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->mc_fw->size != mc_req_size) {
+		printk(KERN_ERR
+		       "si_mc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->mc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "si_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->ce_fw);
+		rdev->ce_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+	}
+	return err;
+}
+
+/* watermark setup */
+static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
+				   struct radeon_crtc *radeon_crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *other_mode)
+{
+	u32 tmp, buffer_alloc, i;
+	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+	/*
+	 * Line Buffer Setup
+	 * There are 3 line buffers, each one shared by 2 display controllers.
+	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * the display controllers.  The paritioning is done via one of four
+	 * preset allocations specified in bits 21:20:
+	 *  0 - half lb
+	 *  2 - whole lb, other crtc must be disabled
+	 */
+	/* this can get tricky if we have two large displays on a paired group
+	 * of crtcs.  Ideally for multiple large displays we'd assign them to
+	 * non-linked crtcs for maximum line buffer allocation.
+	 */
+	if (radeon_crtc->base.enabled && mode) {
+		if (other_mode) {
+			tmp = 0; /* 1/2 */
+			buffer_alloc = 1;
+		} else {
+			tmp = 2; /* whole */
+			buffer_alloc = 2;
+		}
+	} else {
+		tmp = 0;
+		buffer_alloc = 0;
+	}
+
+	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
+	       DC_LB_MEMORY_CONFIG(tmp));
+
+	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	for (i = 0; i < rdev->usec_timeout; i++) {
+		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+			break;
+		udelay(1);
+	}
+
+	if (radeon_crtc->base.enabled && mode) {
+		switch (tmp) {
+		case 0:
+		default:
+			return 4096 * 2;
+		case 2:
+			return 8192 * 2;
+		}
+	}
+
+	/* controller not enabled, so no lb used */
+	return 0;
+}
+
+static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
+{
+	u32 tmp = RREG32(MC_SHARED_CHMAP);
+
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		return 1;
+	case 1:
+		return 2;
+	case 2:
+		return 4;
+	case 3:
+		return 8;
+	case 4:
+		return 3;
+	case 5:
+		return 6;
+	case 6:
+		return 10;
+	case 7:
+		return 12;
+	case 8:
+		return 16;
+	}
+}
+
+struct dce6_wm_params {
+	u32 dram_channels; /* number of dram channels */
+	u32 yclk;          /* bandwidth per dram data pin in kHz */
+	u32 sclk;          /* engine clock in kHz */
+	u32 disp_clk;      /* display clock in kHz */
+	u32 src_width;     /* viewport width */
+	u32 active_time;   /* active display time in ns */
+	u32 blank_time;    /* blank time in ns */
+	bool interlaced;    /* mode is interlaced */
+	fixed20_12 vsc;    /* vertical scale ratio */
+	u32 num_heads;     /* number of active crtcs */
+	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
+	u32 lb_size;       /* line buffer allocated to pipe */
+	u32 vtaps;         /* vertical scaler taps */
+};
+
+static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate raw DRAM Bandwidth */
+	fixed20_12 dram_efficiency; /* 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	dram_efficiency.full = dfixed_const(7);
+	dram_efficiency.full = dfixed_div(dram_efficiency, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	/* Calculate DRAM Bandwidth and the part allocated to display. */
+	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
+	fixed20_12 yclk, dram_channels, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	yclk.full = dfixed_const(wm->yclk);
+	yclk.full = dfixed_div(yclk, a);
+	dram_channels.full = dfixed_const(wm->dram_channels * 4);
+	a.full = dfixed_const(10);
+	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
+	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
+	bandwidth.full = dfixed_mul(dram_channels, yclk);
+	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display Data return Bandwidth */
+	fixed20_12 return_efficiency; /* 0.8 */
+	fixed20_12 sclk, bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(10);
+	return_efficiency.full = dfixed_const(8);
+	return_efficiency.full = dfixed_div(return_efficiency, a);
+	a.full = dfixed_const(32);
+	bandwidth.full = dfixed_mul(a, sclk);
+	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
+{
+	return 32;
+}
+
+static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the DMIF Request Bandwidth */
+	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
+	fixed20_12 disp_clk, sclk, bandwidth;
+	fixed20_12 a, b1, b2;
+	u32 min_bandwidth;
+
+	a.full = dfixed_const(1000);
+	disp_clk.full = dfixed_const(wm->disp_clk);
+	disp_clk.full = dfixed_div(disp_clk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
+	b1.full = dfixed_mul(a, disp_clk);
+
+	a.full = dfixed_const(1000);
+	sclk.full = dfixed_const(wm->sclk);
+	sclk.full = dfixed_div(sclk, a);
+	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
+	b2.full = dfixed_mul(a, sclk);
+
+	a.full = dfixed_const(10);
+	disp_clk_request_efficiency.full = dfixed_const(8);
+	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
+
+	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
+
+	a.full = dfixed_const(min_bandwidth);
+	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
+	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
+	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
+	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
+
+	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
+}
+
+static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
+{
+	/* Calculate the display mode Average Bandwidth
+	 * DisplayMode should contain the source and destination dimensions,
+	 * timing, etc.
+	 */
+	fixed20_12 bpp;
+	fixed20_12 line_time;
+	fixed20_12 src_width;
+	fixed20_12 bandwidth;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1000);
+	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
+	line_time.full = dfixed_div(line_time, a);
+	bpp.full = dfixed_const(wm->bytes_per_pixel);
+	src_width.full = dfixed_const(wm->src_width);
+	bandwidth.full = dfixed_mul(src_width, bpp);
+	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
+	bandwidth.full = dfixed_div(bandwidth, line_time);
+
+	return dfixed_trunc(bandwidth);
+}
+
+static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
+{
+	/* First calcualte the latency in ns */
+	u32 mc_latency = 2000; /* 2000 ns. */
+	u32 available_bandwidth = dce6_available_bandwidth(wm);
+	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
+	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
+	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
+	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
+		(wm->num_heads * cursor_line_pair_return_time);
+	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
+	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
+	u32 tmp, dmif_size = 12288;
+	fixed20_12 a, b, c;
+
+	if (wm->num_heads == 0)
+		return 0;
+
+	a.full = dfixed_const(2);
+	b.full = dfixed_const(1);
+	if ((wm->vsc.full > a.full) ||
+	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
+	    (wm->vtaps >= 5) ||
+	    ((wm->vsc.full >= a.full) && wm->interlaced))
+		max_src_lines_per_dst_line = 4;
+	else
+		max_src_lines_per_dst_line = 2;
+
+	a.full = dfixed_const(available_bandwidth);
+	b.full = dfixed_const(wm->num_heads);
+	a.full = dfixed_div(a, b);
+
+	b.full = dfixed_const(mc_latency + 512);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(b, c);
+
+	c.full = dfixed_const(dmif_size);
+	b.full = dfixed_div(c, b);
+
+	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
+
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(wm->disp_clk);
+	b.full = dfixed_div(c, b);
+	c.full = dfixed_const(wm->bytes_per_pixel);
+	b.full = dfixed_mul(b, c);
+
+	lb_fill_bw = min(tmp, dfixed_trunc(b));
+
+	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
+	b.full = dfixed_const(1000);
+	c.full = dfixed_const(lb_fill_bw);
+	b.full = dfixed_div(c, b);
+	a.full = dfixed_div(a, b);
+	line_fill_time = dfixed_trunc(a);
+
+	if (line_fill_time < wm->active_time)
+		return latency;
+	else
+		return latency + (line_fill_time - wm->active_time);
+
+}
+
+static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
+{
+	if (dce6_average_bandwidth(wm) <=
+	    (dce6_available_bandwidth(wm) / wm->num_heads))
+		return true;
+	else
+		return false;
+};
+
+static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
+{
+	u32 lb_partitions = wm->lb_size / wm->src_width;
+	u32 line_time = wm->active_time + wm->blank_time;
+	u32 latency_tolerant_lines;
+	u32 latency_hiding;
+	fixed20_12 a;
+
+	a.full = dfixed_const(1);
+	if (wm->vsc.full > a.full)
+		latency_tolerant_lines = 1;
+	else {
+		if (lb_partitions <= (wm->vtaps + 1))
+			latency_tolerant_lines = 1;
+		else
+			latency_tolerant_lines = 2;
+	}
+
+	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
+
+	if (dce6_latency_watermark(wm) <= latency_hiding)
+		return true;
+	else
+		return false;
+}
+
+static void dce6_program_watermarks(struct radeon_device *rdev,
+					 struct radeon_crtc *radeon_crtc,
+					 u32 lb_size, u32 num_heads)
+{
+	struct drm_display_mode *mode = &radeon_crtc->base.mode;
+	struct dce6_wm_params wm;
+	u32 pixel_period;
+	u32 line_time = 0;
+	u32 latency_watermark_a = 0, latency_watermark_b = 0;
+	u32 priority_a_mark = 0, priority_b_mark = 0;
+	u32 priority_a_cnt = PRIORITY_OFF;
+	u32 priority_b_cnt = PRIORITY_OFF;
+	u32 tmp, arb_control3;
+	fixed20_12 a, b, c;
+
+	if (radeon_crtc->base.enabled && num_heads && mode) {
+		pixel_period = 1000000 / (u32)mode->clock;
+		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		priority_a_cnt = 0;
+		priority_b_cnt = 0;
+
+		wm.yclk = rdev->pm.current_mclk * 10;
+		wm.sclk = rdev->pm.current_sclk * 10;
+		wm.disp_clk = mode->clock;
+		wm.src_width = mode->crtc_hdisplay;
+		wm.active_time = mode->crtc_hdisplay * pixel_period;
+		wm.blank_time = line_time - wm.active_time;
+		wm.interlaced = false;
+		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+			wm.interlaced = true;
+		wm.vsc = radeon_crtc->vsc;
+		wm.vtaps = 1;
+		if (radeon_crtc->rmx_type != RMX_OFF)
+			wm.vtaps = 2;
+		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+		wm.lb_size = lb_size;
+		if (rdev->family == CHIP_ARUBA)
+			wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+		else
+			wm.dram_channels = si_get_number_of_dram_channels(rdev);
+		wm.num_heads = num_heads;
+
+		/* set for high clocks */
+		latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
+		/* set for low clocks */
+		/* wm.yclk = low clk; wm.sclk = low clk */
+		latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
+
+		/* possibly force display priority to high */
+		/* should really do this at mode validation time... */
+		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+		    !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
+		    !dce6_check_latency_hiding(&wm) ||
+		    (rdev->disp_priority == 2)) {
+			DRM_DEBUG_KMS("force priority to high\n");
+			priority_a_cnt |= PRIORITY_ALWAYS_ON;
+			priority_b_cnt |= PRIORITY_ALWAYS_ON;
+		}
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_a);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_a_mark = dfixed_trunc(c);
+		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
+
+		a.full = dfixed_const(1000);
+		b.full = dfixed_const(mode->clock);
+		b.full = dfixed_div(b, a);
+		c.full = dfixed_const(latency_watermark_b);
+		c.full = dfixed_mul(c, b);
+		c.full = dfixed_mul(c, radeon_crtc->hsc);
+		c.full = dfixed_div(c, a);
+		a.full = dfixed_const(16);
+		c.full = dfixed_div(c, a);
+		priority_b_mark = dfixed_trunc(c);
+		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+	}
+
+	/* select wm A */
+	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp = arb_control3;
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(1);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* select wm B */
+	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
+	tmp &= ~LATENCY_WATERMARK_MASK(3);
+	tmp |= LATENCY_WATERMARK_MASK(2);
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
+	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
+	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
+		LATENCY_HIGH_WATERMARK(line_time)));
+	/* restore original selection */
+	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
+
+	/* write the priority marks */
+	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
+	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
+
+}
+
+void dce6_bandwidth_update(struct radeon_device *rdev)
+{
+	struct drm_display_mode *mode0 = NULL;
+	struct drm_display_mode *mode1 = NULL;
+	u32 num_heads = 0, lb_size;
+	int i;
+
+	radeon_update_display_priority(rdev);
+
+	for (i = 0; i < rdev->num_crtc; i++) {
+		if (rdev->mode_info.crtcs[i]->base.enabled)
+			num_heads++;
+	}
+	for (i = 0; i < rdev->num_crtc; i += 2) {
+		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
+		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
+		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
+		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
+	}
+}
+
+/*
+ * Core functions
+ */
+static void si_tiling_mode_table_init(struct radeon_device *rdev)
+{
+	const u32 num_tile_mode_states = 32;
+	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
+		break;
+	case 2:
+	default:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
+		break;
+	case 4:
+		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
+		break;
+	}
+
+	if ((rdev->family == CHIP_TAHITI) ||
+	    (rdev->family == CHIP_PITCAIRN)) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+	} else if ((rdev->family == CHIP_VERDE) ||
+		   (rdev->family == CHIP_OLAND) ||
+		   (rdev->family == CHIP_HAINAN)) {
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:  /* non-AA compressed depth or any compressed stencil */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 1:  /* 2xAA/4xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 2:  /* 8xAA compressed depth only */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 8:  /* 1D and 1D Array Surfaces */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 9:  /* Displayable maps. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 10:  /* Display 8bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 11:  /* Display 16bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 12:  /* Display 32bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 13:  /* Thin. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 14:  /* Thin 8 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 15:  /* Thin 16 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 16:  /* Thin 32 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 17:  /* Thin 64 bpp. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+						 TILE_SPLIT(split_equal_to_row_size) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 21:  /* 8 bpp PRT. */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 22:  /* 16 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+				break;
+			case 23:  /* 32 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 24:  /* 64 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						 NUM_BANKS(ADDR_SURF_16_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+				break;
+			case 25:  /* 128 bpp PRT */
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+						 NUM_BANKS(ADDR_SURF_8_BANK) |
+						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+		}
+	} else
+		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+}
+
+static void si_select_se_sh(struct radeon_device *rdev,
+			    u32 se_num, u32 sh_num)
+{
+	u32 data = INSTANCE_BROADCAST_WRITES;
+
+	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+	else if (se_num == 0xffffffff)
+		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+	else if (sh_num == 0xffffffff)
+		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+	else
+		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+	WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+	u32 i, mask = 0;
+
+	for (i = 0; i < bit_width; i++) {
+		mask <<= 1;
+		mask |= 1;
+	}
+	return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+	if (data & 1)
+		data &= INACTIVE_CUS_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+	data >>= INACTIVE_CUS_SHIFT;
+
+	mask = si_create_bitmask(cu_per_sh);
+
+	return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+			 u32 se_num, u32 sh_per_se,
+			 u32 cu_per_sh)
+{
+	int i, j, k;
+	u32 data, mask, active_cu;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+			mask = 1;
+			for (k = 0; k < 16; k++) {
+				mask <<= k;
+				if (active_cu & mask) {
+					data &= ~mask;
+					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+					break;
+				}
+			}
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+			      u32 max_rb_num, u32 se_num,
+			      u32 sh_per_se)
+{
+	u32 data, mask;
+
+	data = RREG32(CC_RB_BACKEND_DISABLE);
+	if (data & 1)
+		data &= BACKEND_DISABLE_MASK;
+	else
+		data = 0;
+	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+	data >>= BACKEND_DISABLE_SHIFT;
+
+	mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+	return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+			u32 se_num, u32 sh_per_se,
+			u32 max_rb_num)
+{
+	int i, j;
+	u32 data, mask;
+	u32 disabled_rbs = 0;
+	u32 enabled_rbs = 0;
+
+	for (i = 0; i < se_num; i++) {
+		for (j = 0; j < sh_per_se; j++) {
+			si_select_se_sh(rdev, i, j);
+			data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+		}
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+	mask = 1;
+	for (i = 0; i < max_rb_num; i++) {
+		if (!(disabled_rbs & mask))
+			enabled_rbs |= mask;
+		mask <<= 1;
+	}
+
+	for (i = 0; i < se_num; i++) {
+		si_select_se_sh(rdev, i, 0xffffffff);
+		data = 0;
+		for (j = 0; j < sh_per_se; j++) {
+			switch (enabled_rbs & 3) {
+			case 1:
+				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+				break;
+			case 2:
+				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+				break;
+			case 3:
+			default:
+				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+				break;
+			}
+			enabled_rbs >>= 2;
+		}
+		WREG32(PA_SC_RASTER_CONFIG, data);
+	}
+	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static void si_gpu_init(struct radeon_device *rdev)
+{
+	u32 gb_addr_config = 0;
+	u32 mc_shared_chmap, mc_arb_ramcfg;
+	u32 sx_debug_1;
+	u32 hdp_host_path_cntl;
+	u32 tmp;
+	int i, j;
+
+	switch (rdev->family) {
+	case CHIP_TAHITI:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 12;
+		rdev->config.si.max_cu_per_sh = 8;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 12;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_PITCAIRN:
+		rdev->config.si.max_shader_engines = 2;
+		rdev->config.si.max_tile_pipes = 8;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 8;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_VERDE:
+	default:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 2;
+		rdev->config.si.max_backends_per_se = 4;
+		rdev->config.si.max_texture_channel_caches = 4;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 32;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_OLAND:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 6;
+		rdev->config.si.max_sh_per_se = 1;
+		rdev->config.si.max_backends_per_se = 2;
+		rdev->config.si.max_texture_channel_caches = 4;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 16;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	case CHIP_HAINAN:
+		rdev->config.si.max_shader_engines = 1;
+		rdev->config.si.max_tile_pipes = 4;
+		rdev->config.si.max_cu_per_sh = 5;
+		rdev->config.si.max_sh_per_se = 1;
+		rdev->config.si.max_backends_per_se = 1;
+		rdev->config.si.max_texture_channel_caches = 2;
+		rdev->config.si.max_gprs = 256;
+		rdev->config.si.max_gs_threads = 16;
+		rdev->config.si.max_hw_contexts = 8;
+
+		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
+		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
+		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
+		break;
+	}
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+
+	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+	evergreen_fix_pci_max_read_req_size(rdev);
+
+	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
+	rdev->config.si.mem_max_burst_length_bytes = 256;
+	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+	if (rdev->config.si.mem_row_size_in_kb > 4)
+		rdev->config.si.mem_row_size_in_kb = 4;
+	/* XXX use MC settings? */
+	rdev->config.si.shader_engine_tile_size = 32;
+	rdev->config.si.num_gpus = 1;
+	rdev->config.si.multi_gpu_tile_size = 64;
+
+	/* fix up row size */
+	gb_addr_config &= ~ROW_SIZE_MASK;
+	switch (rdev->config.si.mem_row_size_in_kb) {
+	case 1:
+	default:
+		gb_addr_config |= ROW_SIZE(0);
+		break;
+	case 2:
+		gb_addr_config |= ROW_SIZE(1);
+		break;
+	case 4:
+		gb_addr_config |= ROW_SIZE(2);
+		break;
+	}
+
+	/* setup tiling info dword.  gb_addr_config is not adequate since it does
+	 * not have bank info, so create a custom tiling dword.
+	 * bits 3:0   num_pipes
+	 * bits 7:4   num_banks
+	 * bits 11:8  group_size
+	 * bits 15:12 row_size
+	 */
+	rdev->config.si.tile_config = 0;
+	switch (rdev->config.si.num_tile_pipes) {
+	case 1:
+		rdev->config.si.tile_config |= (0 << 0);
+		break;
+	case 2:
+		rdev->config.si.tile_config |= (1 << 0);
+		break;
+	case 4:
+		rdev->config.si.tile_config |= (2 << 0);
+		break;
+	case 8:
+	default:
+		/* XXX what about 12? */
+		rdev->config.si.tile_config |= (3 << 0);
+		break;
+	}	
+	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+	case 0: /* four banks */
+		rdev->config.si.tile_config |= 0 << 4;
+		break;
+	case 1: /* eight banks */
+		rdev->config.si.tile_config |= 1 << 4;
+		break;
+	case 2: /* sixteen banks */
+	default:
+		rdev->config.si.tile_config |= 2 << 4;
+		break;
+	}
+	rdev->config.si.tile_config |=
+		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+	rdev->config.si.tile_config |=
+		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+	WREG32(GB_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMIF_ADDR_CALC, gb_addr_config);
+	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
+	if (rdev->has_uvd) {
+		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
+		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+	}
+
+	si_tiling_mode_table_init(rdev);
+
+	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+		    rdev->config.si.max_sh_per_se,
+		    rdev->config.si.max_backends_per_se);
+
+	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+		     rdev->config.si.max_sh_per_se,
+		     rdev->config.si.max_cu_per_sh);
+
+
+	/* set HW defaults for 3D engine */
+	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
+				     ROQ_IB2_START(0x2b)));
+	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+	sx_debug_1 = RREG32(SX_DEBUG_1);
+	WREG32(SX_DEBUG_1, sx_debug_1);
+
+	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
+
+	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
+				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
+				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
+				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
+
+	WREG32(VGT_NUM_INSTANCES, 1);
+
+	WREG32(CP_PERFMON_CNTL, 0);
+
+	WREG32(SQ_CONFIG, 0);
+
+	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+					  FORCE_EOV_MAX_REZ_CNT(255)));
+
+	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
+	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
+
+	tmp = RREG32(HDP_MISC_CNTL);
+	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
+	WREG32(HDP_MISC_CNTL, tmp);
+
+	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+	udelay(50);
+}
+
+/*
+ * GPU scratch registers helpers function.
+ */
+static void si_scratch_init(struct radeon_device *rdev)
+{
+	int i;
+
+	rdev->scratch.num_reg = 7;
+	rdev->scratch.reg_base = SCRATCH_REG0;
+	for (i = 0; i < rdev->scratch.num_reg; i++) {
+		rdev->scratch.free[i] = true;
+		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
+	}
+}
+
+void si_fence_ring_emit(struct radeon_device *rdev,
+			struct radeon_fence *fence)
+{
+	struct radeon_ring *ring = &rdev->ring[fence->ring];
+	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+	/* flush read cache over gart */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+			  PACKET3_TC_ACTION_ENA |
+			  PACKET3_SH_KCACHE_ACTION_ENA |
+			  PACKET3_SH_ICACHE_ACTION_ENA);
+	radeon_ring_write(ring, 0xFFFFFFFF);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 10); /* poll interval */
+	/* EVENT_WRITE_EOP - flush caches, send int */
+	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+	radeon_ring_write(ring, addr & 0xffffffff);
+	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+	radeon_ring_write(ring, fence->seq);
+	radeon_ring_write(ring, 0);
+}
+
+/*
+ * IB stuff
+ */
+void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	struct radeon_ring *ring = &rdev->ring[ib->ring];
+	u32 header;
+
+	if (ib->is_const_ib) {
+		/* set switch buffer packet before const IB */
+		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+		radeon_ring_write(ring, 0);
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
+	} else {
+		u32 next_rptr;
+		if (ring->rptr_save_reg) {
+			next_rptr = ring->wptr + 3 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+			radeon_ring_write(ring, ((ring->rptr_save_reg -
+						  PACKET3_SET_CONFIG_REG_START) >> 2));
+			radeon_ring_write(ring, next_rptr);
+		} else if (rdev->wb.enabled) {
+			next_rptr = ring->wptr + 5 + 4 + 8;
+			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+			radeon_ring_write(ring, (1 << 8));
+			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+			radeon_ring_write(ring, next_rptr);
+		}
+
+		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+	}
+
+	radeon_ring_write(ring, header);
+	radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+	radeon_ring_write(ring, ib->length_dw |
+			  (ib->vm ? (ib->vm->id << 24) : 0));
+
+	if (!ib->is_const_ib) {
+		/* flush read cache over gart for this vmid */
+		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
+		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+				  PACKET3_TC_ACTION_ENA |
+				  PACKET3_SH_KCACHE_ACTION_ENA |
+				  PACKET3_SH_ICACHE_ACTION_ENA);
+		radeon_ring_write(ring, 0xFFFFFFFF);
+		radeon_ring_write(ring, 0);
+		radeon_ring_write(ring, 10); /* poll interval */
+	}
+}
+
+/*
+ * CP.
+ */
+static void si_cp_enable(struct radeon_device *rdev, bool enable)
+{
+	if (enable)
+		WREG32(CP_ME_CNTL, 0);
+	else {
+		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
+		WREG32(SCRATCH_UMSK, 0);
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+	udelay(50);
+}
+
+static int si_cp_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	int i;
+
+	if (!rdev->me_fw || !rdev->pfp_fw)
+		return -EINVAL;
+
+	si_cp_enable(rdev, false);
+
+	/* PFP */
+	fw_data = (const __be32 *)rdev->pfp_fw->data;
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+
+	/* CE */
+	fw_data = (const __be32 *)rdev->ce_fw->data;
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+		WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_CE_UCODE_ADDR, 0);
+
+	/* ME */
+	fw_data = (const __be32 *)rdev->me_fw->data;
+	WREG32(CP_ME_RAM_WADDR, 0);
+	for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+	WREG32(CP_ME_RAM_WADDR, 0);
+
+	WREG32(CP_PFP_UCODE_ADDR, 0);
+	WREG32(CP_CE_UCODE_ADDR, 0);
+	WREG32(CP_ME_RAM_WADDR, 0);
+	WREG32(CP_ME_RAM_RADDR, 0);
+	return 0;
+}
+
+static int si_cp_start(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r, i;
+
+	r = radeon_ring_lock(rdev, ring, 7 + 4);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+	/* init the CP */
+	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+	radeon_ring_write(ring, 0x1);
+	radeon_ring_write(ring, 0x0);
+	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
+	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0);
+
+	/* init the CE partitions */
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
+	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
+	radeon_ring_write(ring, 0xc000);
+	radeon_ring_write(ring, 0xe000);
+	radeon_ring_unlock_commit(rdev, ring);
+
+	si_cp_enable(rdev, true);
+
+	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
+	if (r) {
+		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+		return r;
+	}
+
+	/* setup clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+	for (i = 0; i < si_default_size; i++)
+		radeon_ring_write(ring, si_default_state[i]);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+	/* set clear context state */
+	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+	radeon_ring_write(ring, 0);
+
+	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+	radeon_ring_write(ring, 0x00000316);
+	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
+
+	radeon_ring_unlock_commit(rdev, ring);
+
+	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
+		ring = &rdev->ring[i];
+		r = radeon_ring_lock(rdev, ring, 2);
+
+		/* clear the compute context state */
+		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
+		radeon_ring_write(ring, 0);
+
+		radeon_ring_unlock_commit(rdev, ring);
+	}
+
+	return 0;
+}
+
+static void si_cp_fini(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	si_cp_enable(rdev, false);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	radeon_ring_fini(rdev, ring);
+	radeon_scratch_free(rdev, ring->rptr_save_reg);
+}
+
+static int si_cp_resume(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	u32 tmp;
+	u32 rb_bufsz;
+	int r;
+
+	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+				 SOFT_RESET_PA |
+				 SOFT_RESET_VGT |
+				 SOFT_RESET_SPI |
+				 SOFT_RESET_SX));
+	RREG32(GRBM_SOFT_RESET);
+	mdelay(15);
+	WREG32(GRBM_SOFT_RESET, 0);
+	RREG32(GRBM_SOFT_RESET);
+
+	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+
+	/* Set the write pointer delay */
+	WREG32(CP_RB_WPTR_DELAY, 0);
+
+	WREG32(CP_DEBUG, 0);
+	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+	/* ring 0 - compute and gfx */
+	/* Set ring buffer size */
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB0_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB0_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+
+	if (rdev->wb.enabled)
+		WREG32(SCRATCH_UMSK, 0xff);
+	else {
+		tmp |= RB_NO_UPDATE;
+		WREG32(SCRATCH_UMSK, 0);
+	}
+
+	mdelay(1);
+	WREG32(CP_RB0_CNTL, tmp);
+
+	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB0_RPTR);
+
+	/* ring1  - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB1_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB1_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB1_CNTL, tmp);
+
+	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB1_RPTR);
+
+	/* ring2 - compute only */
+	/* Set ring buffer size */
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	rb_bufsz = drm_order(ring->ring_size / 8);
+	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+	tmp |= BUF_SWAP_32BIT;
+#endif
+	WREG32(CP_RB2_CNTL, tmp);
+
+	/* Initialize the ring buffer's read and write pointers */
+	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+	ring->wptr = 0;
+	WREG32(CP_RB2_WPTR, ring->wptr);
+
+	/* set the wb address whether it's enabled or not */
+	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+	mdelay(1);
+	WREG32(CP_RB2_CNTL, tmp);
+
+	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+
+	ring->rptr = RREG32(CP_RB2_RPTR);
+
+	/* start the rings */
+	si_cp_start(rdev);
+	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
+	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
+	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+	if (r) {
+		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+		return r;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+	}
+	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+	if (r) {
+		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+	}
+
+	return 0;
+}
+
+static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask = 0;
+	u32 tmp;
+
+	/* GRBM_STATUS */
+	tmp = RREG32(GRBM_STATUS);
+	if (tmp & (PA_BUSY | SC_BUSY |
+		   BCI_BUSY | SX_BUSY |
+		   TA_BUSY | VGT_BUSY |
+		   DB_BUSY | CB_BUSY |
+		   GDS_BUSY | SPI_BUSY |
+		   IA_BUSY | IA_BUSY_NO_DMA))
+		reset_mask |= RADEON_RESET_GFX;
+
+	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
+		   CP_BUSY | CP_COHERENCY_BUSY))
+		reset_mask |= RADEON_RESET_CP;
+
+	if (tmp & GRBM_EE_BUSY)
+		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
+
+	/* GRBM_STATUS2 */
+	tmp = RREG32(GRBM_STATUS2);
+	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
+		reset_mask |= RADEON_RESET_RLC;
+
+	/* DMA_STATUS_REG 0 */
+	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA;
+
+	/* DMA_STATUS_REG 1 */
+	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+	if (!(tmp & DMA_IDLE))
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS2 */
+	tmp = RREG32(SRBM_STATUS2);
+	if (tmp & DMA_BUSY)
+		reset_mask |= RADEON_RESET_DMA;
+
+	if (tmp & DMA1_BUSY)
+		reset_mask |= RADEON_RESET_DMA1;
+
+	/* SRBM_STATUS */
+	tmp = RREG32(SRBM_STATUS);
+
+	if (tmp & IH_BUSY)
+		reset_mask |= RADEON_RESET_IH;
+
+	if (tmp & SEM_BUSY)
+		reset_mask |= RADEON_RESET_SEM;
+
+	if (tmp & GRBM_RQ_PENDING)
+		reset_mask |= RADEON_RESET_GRBM;
+
+	if (tmp & VMC_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
+		   MCC_BUSY | MCD_BUSY))
+		reset_mask |= RADEON_RESET_MC;
+
+	if (evergreen_is_display_hung(rdev))
+		reset_mask |= RADEON_RESET_DISPLAY;
+
+	/* VM_L2_STATUS */
+	tmp = RREG32(VM_L2_STATUS);
+	if (tmp & L2_BUSY)
+		reset_mask |= RADEON_RESET_VMC;
+
+	/* Skip MC reset as it's mostly likely not hung, just busy */
+	if (reset_mask & RADEON_RESET_MC) {
+		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
+		reset_mask &= ~RADEON_RESET_MC;
+	}
+
+	return reset_mask;
+}
+
+static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+{
+	struct evergreen_mc_save save;
+	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+	u32 tmp;
+
+	if (reset_mask == 0)
+		return;
+
+	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+	evergreen_print_gpu_status_regs(rdev);
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+	/* Disable CP parsing/prefetching */
+	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
+
+	if (reset_mask & RADEON_RESET_DMA) {
+		/* dma0 */
+		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	}
+	if (reset_mask & RADEON_RESET_DMA1) {
+		/* dma1 */
+		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+		tmp &= ~DMA_RB_ENABLE;
+		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	}
+
+	udelay(50);
+
+	evergreen_mc_stop(rdev, &save);
+	if (evergreen_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+
+	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
+		grbm_soft_reset = SOFT_RESET_CB |
+			SOFT_RESET_DB |
+			SOFT_RESET_GDS |
+			SOFT_RESET_PA |
+			SOFT_RESET_SC |
+			SOFT_RESET_BCI |
+			SOFT_RESET_SPI |
+			SOFT_RESET_SX |
+			SOFT_RESET_TC |
+			SOFT_RESET_TA |
+			SOFT_RESET_VGT |
+			SOFT_RESET_IA;
+	}
+
+	if (reset_mask & RADEON_RESET_CP) {
+		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+	}
+
+	if (reset_mask & RADEON_RESET_DMA)
+		srbm_soft_reset |= SOFT_RESET_DMA;
+
+	if (reset_mask & RADEON_RESET_DMA1)
+		srbm_soft_reset |= SOFT_RESET_DMA1;
+
+	if (reset_mask & RADEON_RESET_DISPLAY)
+		srbm_soft_reset |= SOFT_RESET_DC;
+
+	if (reset_mask & RADEON_RESET_RLC)
+		grbm_soft_reset |= SOFT_RESET_RLC;
+
+	if (reset_mask & RADEON_RESET_SEM)
+		srbm_soft_reset |= SOFT_RESET_SEM;
+
+	if (reset_mask & RADEON_RESET_IH)
+		srbm_soft_reset |= SOFT_RESET_IH;
+
+	if (reset_mask & RADEON_RESET_GRBM)
+		srbm_soft_reset |= SOFT_RESET_GRBM;
+
+	if (reset_mask & RADEON_RESET_VMC)
+		srbm_soft_reset |= SOFT_RESET_VMC;
+
+	if (reset_mask & RADEON_RESET_MC)
+		srbm_soft_reset |= SOFT_RESET_MC;
+
+	if (grbm_soft_reset) {
+		tmp = RREG32(GRBM_SOFT_RESET);
+		tmp |= grbm_soft_reset;
+		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~grbm_soft_reset;
+		WREG32(GRBM_SOFT_RESET, tmp);
+		tmp = RREG32(GRBM_SOFT_RESET);
+	}
+
+	if (srbm_soft_reset) {
+		tmp = RREG32(SRBM_SOFT_RESET);
+		tmp |= srbm_soft_reset;
+		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+
+		udelay(50);
+
+		tmp &= ~srbm_soft_reset;
+		WREG32(SRBM_SOFT_RESET, tmp);
+		tmp = RREG32(SRBM_SOFT_RESET);
+	}
+
+	/* Wait a little for things to settle down */
+	udelay(50);
+
+	evergreen_mc_resume(rdev, &save);
+	udelay(50);
+
+	evergreen_print_gpu_status_regs(rdev);
+}
+
+int si_asic_reset(struct radeon_device *rdev)
+{
+	u32 reset_mask;
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, true);
+
+	si_gpu_soft_reset(rdev, reset_mask);
+
+	reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (!reset_mask)
+		r600_set_bios_scratch_engine_hung(rdev, false);
+
+	return 0;
+}
+
+/**
+ * si_gfx_is_lockup - Check if the GFX engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the GFX engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = si_gpu_check_soft_reset(rdev);
+
+	if (!(reset_mask & (RADEON_RESET_GFX |
+			    RADEON_RESET_COMPUTE |
+			    RADEON_RESET_CP))) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force CP activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+	u32 reset_mask = si_gpu_check_soft_reset(rdev);
+	u32 mask;
+
+	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+		mask = RADEON_RESET_DMA;
+	else
+		mask = RADEON_RESET_DMA1;
+
+	if (!(reset_mask & mask)) {
+		radeon_ring_lockup_update(ring);
+		return false;
+	}
+	/* force ring activities */
+	radeon_ring_force_activity(rdev, ring);
+	return radeon_ring_test_lockup(rdev, ring);
+}
+
+/* MC */
+static void si_mc_program(struct radeon_device *rdev)
+{
+	struct evergreen_mc_save save;
+	u32 tmp;
+	int i, j;
+
+	/* Initialize HDP */
+	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+		WREG32((0x2c14 + j), 0x00000000);
+		WREG32((0x2c18 + j), 0x00000000);
+		WREG32((0x2c1c + j), 0x00000000);
+		WREG32((0x2c20 + j), 0x00000000);
+		WREG32((0x2c24 + j), 0x00000000);
+	}
+	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+	evergreen_mc_stop(rdev, &save);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	if (!ASIC_IS_NODCE(rdev))
+		/* Lockout access through VGA aperture*/
+		WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+	/* Update configuration */
+	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+	       rdev->mc.vram_start >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+	       rdev->mc.vram_end >> 12);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
+	       rdev->vram_scratch.gpu_addr >> 12);
+	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+	WREG32(MC_VM_FB_LOCATION, tmp);
+	/* XXX double check these! */
+	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+	WREG32(MC_VM_AGP_BASE, 0);
+	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+	if (radeon_mc_wait_for_idle(rdev)) {
+		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+	}
+	evergreen_mc_resume(rdev, &save);
+	if (!ASIC_IS_NODCE(rdev)) {
+		/* we need to own VRAM, so turn off the VGA renderer here
+		 * to stop it overwriting our objects */
+		rv515_vga_render_disable(rdev);
+	}
+}
+
+static void si_vram_gtt_location(struct radeon_device *rdev,
+				 struct radeon_mc *mc)
+{
+	if (mc->mc_vram_size > 0xFFC0000000ULL) {
+		/* leave room for at least 1024M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xFFC0000000ULL;
+		mc->mc_vram_size = 0xFFC0000000ULL;
+	}
+	radeon_vram_location(rdev, &rdev->mc, 0);
+	rdev->mc.gtt_base_align = 0;
+	radeon_gtt_location(rdev, mc);
+}
+
+static int si_mc_init(struct radeon_device *rdev)
+{
+	u32 tmp;
+	int chansize, numchan;
+
+	/* Get VRAM informations */
+	rdev->mc.vram_is_ddr = true;
+	tmp = RREG32(MC_ARB_RAMCFG);
+	if (tmp & CHANSIZE_OVERRIDE) {
+		chansize = 16;
+	} else if (tmp & CHANSIZE_MASK) {
+		chansize = 64;
+	} else {
+		chansize = 32;
+	}
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	default:
+		numchan = 1;
+		break;
+	case 1:
+		numchan = 2;
+		break;
+	case 2:
+		numchan = 4;
+		break;
+	case 3:
+		numchan = 8;
+		break;
+	case 4:
+		numchan = 3;
+		break;
+	case 5:
+		numchan = 6;
+		break;
+	case 6:
+		numchan = 10;
+		break;
+	case 7:
+		numchan = 12;
+		break;
+	case 8:
+		numchan = 16;
+		break;
+	}
+	rdev->mc.vram_width = numchan * chansize;
+	/* Could aper size report 0 ? */
+	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+	/* size in MB on si */
+	tmp = RREG32(CONFIG_MEMSIZE);
+	/* some boards may have garbage in the upper 16 bits */
+	if (tmp & 0xffff0000) {
+		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
+		if (tmp & 0xffff)
+			tmp &= 0xffff;
+	}
+	rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
+	rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
+	rdev->mc.visible_vram_size = rdev->mc.aper_size;
+	si_vram_gtt_location(rdev, &rdev->mc);
+	radeon_update_bandwidth_info(rdev);
+
+	return 0;
+}
+
+/*
+ * GART
+ */
+void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+	/* flush hdp cache */
+	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+static int si_pcie_gart_enable(struct radeon_device *rdev)
+{
+	int r, i;
+
+	if (rdev->gart.robj == NULL) {
+		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+		return -EINVAL;
+	}
+	r = radeon_gart_table_vram_pin(rdev);
+	if (r)
+		return r;
+	radeon_gart_restore(rdev);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL,
+	       (0xA << 7) |
+	       ENABLE_L1_TLB |
+	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       ENABLE_ADVANCED_DRIVER_MODEL |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	/* setup context0 */
+	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+			(u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT0_CNTL2, 0);
+	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
+
+	WREG32(0x15D4, 0);
+	WREG32(0x15D8, 0);
+	WREG32(0x15DC, 0);
+
+	/* empty context1-15 */
+	/* set vm size, must be a multiple of 4 */
+	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
+	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+	/* Assign the pt base to something valid for now; the pts used for
+	 * the VMs are determined by the application and setup and assigned
+	 * on the fly in the vm part of radeon_gart.c
+	 */
+	for (i = 1; i < 16; i++) {
+		if (i < 8)
+			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+			       rdev->gart.table_addr >> 12);
+		else
+			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
+			       rdev->gart.table_addr >> 12);
+	}
+
+	/* enable context1-15 */
+	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+	       (u32)(rdev->dummy_page.addr >> 12));
+	WREG32(VM_CONTEXT1_CNTL2, 4);
+	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
+				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
+
+	si_pcie_gart_tlb_flush(rdev);
+	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+		 (unsigned)(rdev->mc.gtt_size >> 20),
+		 (unsigned long long)rdev->gart.table_addr);
+	rdev->gart.ready = true;
+	return 0;
+}
+
+static void si_pcie_gart_disable(struct radeon_device *rdev)
+{
+	/* Disable all tables */
+	WREG32(VM_CONTEXT0_CNTL, 0);
+	WREG32(VM_CONTEXT1_CNTL, 0);
+	/* Setup TLB control */
+	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+	/* Setup L2 cache */
+	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+	       EFFECTIVE_L2_QUEUE_SIZE(7) |
+	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
+	WREG32(VM_L2_CNTL2, 0);
+	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
+	radeon_gart_table_vram_unpin(rdev);
+}
+
+static void si_pcie_gart_fini(struct radeon_device *rdev)
+{
+	si_pcie_gart_disable(rdev);
+	radeon_gart_table_vram_free(rdev);
+	radeon_gart_fini(rdev);
+}
+
+/* vm parser */
+static bool si_vm_reg_valid(u32 reg)
+{
+	/* context regs are fine */
+	if (reg >= 0x28000)
+		return true;
+
+	/* check config regs */
+	switch (reg) {
+	case GRBM_GFX_INDEX:
+	case CP_STRMOUT_CNTL:
+	case VGT_VTX_VECT_EJECT_REG:
+	case VGT_CACHE_INVALIDATION:
+	case VGT_ESGS_RING_SIZE:
+	case VGT_GSVS_RING_SIZE:
+	case VGT_GS_VERTEX_REUSE:
+	case VGT_PRIMITIVE_TYPE:
+	case VGT_INDEX_TYPE:
+	case VGT_NUM_INDICES:
+	case VGT_NUM_INSTANCES:
+	case VGT_TF_RING_SIZE:
+	case VGT_HS_OFFCHIP_PARAM:
+	case VGT_TF_MEMORY_BASE:
+	case PA_CL_ENHANCE:
+	case PA_SU_LINE_STIPPLE_VALUE:
+	case PA_SC_LINE_STIPPLE_STATE:
+	case PA_SC_ENHANCE:
+	case SQC_CACHES:
+	case SPI_STATIC_THREAD_MGMT_1:
+	case SPI_STATIC_THREAD_MGMT_2:
+	case SPI_STATIC_THREAD_MGMT_3:
+	case SPI_PS_MAX_WAVE_ID:
+	case SPI_CONFIG_CNTL:
+	case SPI_CONFIG_CNTL_1:
+	case TA_CNTL_AUX:
+		return true;
+	default:
+		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+		return false;
+	}
+}
+
+static int si_vm_packet3_ce_check(struct radeon_device *rdev,
+				  u32 *ib, struct radeon_cs_packet *pkt)
+{
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_SET_CE_DE_COUNTERS:
+	case PACKET3_LOAD_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM:
+	case PACKET3_WRITE_CONST_RAM_OFFSET:
+	case PACKET3_DUMP_CONST_RAM:
+	case PACKET3_INCREMENT_CE_COUNTER:
+	case PACKET3_WAIT_ON_DE_COUNTER:
+	case PACKET3_CE_WRITE:
+		break;
+	default:
+		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+	u32 start_reg, reg, i;
+	u32 command = ib[idx + 4];
+	u32 info = ib[idx + 1];
+	u32 idx_value = ib[idx];
+	if (command & PACKET3_CP_DMA_CMD_SAS) {
+		/* src address space is register */
+		if (((info & 0x60000000) >> 29) == 0) {
+			start_reg = idx_value << 2;
+			if (command & PACKET3_CP_DMA_CMD_SAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad SRC register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad SRC register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	if (command & PACKET3_CP_DMA_CMD_DAS) {
+		/* dst address space is register */
+		if (((info & 0x00300000) >> 20) == 0) {
+			start_reg = ib[idx + 2];
+			if (command & PACKET3_CP_DMA_CMD_DAIC) {
+				reg = start_reg;
+				if (!si_vm_reg_valid(reg)) {
+					DRM_ERROR("CP DMA Bad DST register\n");
+					return -EINVAL;
+				}
+			} else {
+				for (i = 0; i < (command & 0x1fffff); i++) {
+					reg = start_reg + (4 * i);
+				if (!si_vm_reg_valid(reg)) {
+						DRM_ERROR("CP DMA Bad DST register\n");
+						return -EINVAL;
+					}
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
+				   u32 *ib, struct radeon_cs_packet *pkt)
+{
+	int r;
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, end_reg, reg, i;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_INDEX_BUFFER_SIZE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_DRAW_INDIRECT:
+	case PACKET3_DRAW_INDEX_INDIRECT:
+	case PACKET3_INDEX_BASE:
+	case PACKET3_DRAW_INDEX_2:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_INDEX_TYPE:
+	case PACKET3_DRAW_INDIRECT_MULTI:
+	case PACKET3_DRAW_INDEX_AUTO:
+	case PACKET3_DRAW_INDEX_IMMD:
+	case PACKET3_NUM_INSTANCES:
+	case PACKET3_DRAW_INDEX_MULTI_AUTO:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_DRAW_INDEX_OFFSET_2:
+	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
+	case PACKET3_MPEG_INDEX:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_SET_CONFIG_REG:
+		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+		end_reg = 4 * pkt->count + start_reg - 4;
+		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+			return -EINVAL;
+		}
+		for (i = 0; i < pkt->count; i++) {
+			reg = start_reg + (4 * i);
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
+		break;
+	default:
+		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int si_vm_packet3_compute_check(struct radeon_device *rdev,
+				       u32 *ib, struct radeon_cs_packet *pkt)
+{
+	int r;
+	u32 idx = pkt->idx + 1;
+	u32 idx_value = ib[idx];
+	u32 start_reg, reg, i;
+
+	switch (pkt->opcode) {
+	case PACKET3_NOP:
+	case PACKET3_SET_BASE:
+	case PACKET3_CLEAR_STATE:
+	case PACKET3_DISPATCH_DIRECT:
+	case PACKET3_DISPATCH_INDIRECT:
+	case PACKET3_ALLOC_GDS:
+	case PACKET3_WRITE_GDS_RAM:
+	case PACKET3_ATOMIC_GDS:
+	case PACKET3_ATOMIC:
+	case PACKET3_OCCLUSION_QUERY:
+	case PACKET3_SET_PREDICATION:
+	case PACKET3_COND_EXEC:
+	case PACKET3_PRED_EXEC:
+	case PACKET3_CONTEXT_CONTROL:
+	case PACKET3_STRMOUT_BUFFER_UPDATE:
+	case PACKET3_WAIT_REG_MEM:
+	case PACKET3_MEM_WRITE:
+	case PACKET3_PFP_SYNC_ME:
+	case PACKET3_SURFACE_SYNC:
+	case PACKET3_EVENT_WRITE:
+	case PACKET3_EVENT_WRITE_EOP:
+	case PACKET3_EVENT_WRITE_EOS:
+	case PACKET3_SET_CONTEXT_REG:
+	case PACKET3_SET_CONTEXT_REG_INDIRECT:
+	case PACKET3_SET_SH_REG:
+	case PACKET3_SET_SH_REG_OFFSET:
+	case PACKET3_INCREMENT_DE_COUNTER:
+	case PACKET3_WAIT_ON_CE_COUNTER:
+	case PACKET3_WAIT_ON_AVAIL_BUFFER:
+	case PACKET3_ME_WRITE:
+		break;
+	case PACKET3_COPY_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_WRITE_DATA:
+		if ((idx_value & 0xf00) == 0) {
+			start_reg = ib[idx + 1] * 4;
+			if (idx_value & 0x10000) {
+				if (!si_vm_reg_valid(start_reg))
+					return -EINVAL;
+			} else {
+				for (i = 0; i < (pkt->count - 2); i++) {
+					reg = start_reg + (4 * i);
+					if (!si_vm_reg_valid(reg))
+						return -EINVAL;
+				}
+			}
+		}
+		break;
+	case PACKET3_COND_WRITE:
+		if (idx_value & 0x100) {
+			reg = ib[idx + 5] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_COPY_DW:
+		if (idx_value & 0x2) {
+			reg = ib[idx + 3] * 4;
+			if (!si_vm_reg_valid(reg))
+				return -EINVAL;
+		}
+		break;
+	case PACKET3_CP_DMA:
+		r = si_vm_packet3_cp_dma_check(ib, idx);
+		if (r)
+			return r;
+		break;
+	default:
+		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	int ret = 0;
+	u32 idx = 0;
+	struct radeon_cs_packet pkt;
+
+	do {
+		pkt.idx = idx;
+		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
+		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+		pkt.one_reg_wr = 0;
+		switch (pkt.type) {
+		case RADEON_PACKET_TYPE0:
+			dev_err(rdev->dev, "Packet0 not allowed!\n");
+			ret = -EINVAL;
+			break;
+		case RADEON_PACKET_TYPE2:
+			idx += 1;
+			break;
+		case RADEON_PACKET_TYPE3:
+			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+			if (ib->is_const_ib)
+				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
+			else {
+				switch (ib->ring) {
+				case RADEON_RING_TYPE_GFX_INDEX:
+					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
+					break;
+				case CAYMAN_RING_TYPE_CP1_INDEX:
+				case CAYMAN_RING_TYPE_CP2_INDEX:
+					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
+					break;
+				default:
+					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
+					ret = -EINVAL;
+					break;
+				}
+			}
+			idx += pkt.count + 2;
+			break;
+		default:
+			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+			ret = -EINVAL;
+			break;
+		}
+		if (ret)
+			break;
+	} while (idx < ib->length_dw);
+
+	return ret;
+}
+
+/*
+ * vm
+ */
+int si_vm_init(struct radeon_device *rdev)
+{
+	/* number of VMs */
+	rdev->vm_manager.nvm = 16;
+	/* base offset of vram pages */
+	rdev->vm_manager.vram_base_offset = 0;
+
+	return 0;
+}
+
+void si_vm_fini(struct radeon_device *rdev)
+{
+}
+
+/**
+ * si_vm_set_page - update the page tables using the CP
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the CP (SI).
+ */
+void si_vm_set_page(struct radeon_device *rdev,
+		    struct radeon_ib *ib,
+		    uint64_t pe,
+		    uint64_t addr, unsigned count,
+		    uint32_t incr, uint32_t flags)
+{
+	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+	uint64_t value;
+	unsigned ndw;
+
+	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+		while (count) {
+			ndw = 2 + count * 2;
+			if (ndw > 0x3FFE)
+				ndw = 0x3FFE;
+
+			ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
+			ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
+					WRITE_DATA_DST_SEL(1));
+			ib->ptr[ib->length_dw++] = pe;
+			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+			for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+				if (flags & RADEON_VM_PAGE_SYSTEM) {
+					value = radeon_vm_map_gart(rdev, addr);
+					value &= 0xFFFFFFFFFFFFF000ULL;
+				} else if (flags & RADEON_VM_PAGE_VALID) {
+					value = addr;
+				} else {
+					value = 0;
+				}
+				addr += incr;
+				value |= r600_flags;
+				ib->ptr[ib->length_dw++] = value;
+				ib->ptr[ib->length_dw++] = upper_32_bits(value);
+			}
+		}
+	} else {
+		/* DMA */
+		if (flags & RADEON_VM_PAGE_SYSTEM) {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				/* for non-physically contiguous pages (system) */
+				ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+				ib->ptr[ib->length_dw++] = pe;
+				ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+				for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+					if (flags & RADEON_VM_PAGE_SYSTEM) {
+						value = radeon_vm_map_gart(rdev, addr);
+						value &= 0xFFFFFFFFFFFFF000ULL;
+					} else if (flags & RADEON_VM_PAGE_VALID) {
+						value = addr;
+					} else {
+						value = 0;
+					}
+					addr += incr;
+					value |= r600_flags;
+					ib->ptr[ib->length_dw++] = value;
+					ib->ptr[ib->length_dw++] = upper_32_bits(value);
+				}
+			}
+		} else {
+			while (count) {
+				ndw = count * 2;
+				if (ndw > 0xFFFFE)
+					ndw = 0xFFFFE;
+
+				if (flags & RADEON_VM_PAGE_VALID)
+					value = addr;
+				else
+					value = 0;
+				/* for physically contiguous pages (vram) */
+				ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+				ib->ptr[ib->length_dw++] = pe; /* dst addr */
+				ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+				ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+				ib->ptr[ib->length_dw++] = 0;
+				ib->ptr[ib->length_dw++] = value; /* value */
+				ib->ptr[ib->length_dw++] = upper_32_bits(value);
+				ib->ptr[ib->length_dw++] = incr; /* increment size */
+				ib->ptr[ib->length_dw++] = 0;
+				pe += ndw * 4;
+				addr += (ndw / 2) * incr;
+				count -= ndw / 2;
+			}
+		}
+		while (ib->length_dw & 0x7)
+			ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+	}
+}
+
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	/* write new base address */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+
+	if (vm->id < 8) {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+	} else {
+		radeon_ring_write(ring,
+				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+	}
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 0x1);
+
+	/* bits 0-15 are the VM contexts0-15 */
+	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+				 WRITE_DATA_DST_SEL(0)));
+	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+	radeon_ring_write(ring, 0);
+	radeon_ring_write(ring, 1 << vm->id);
+
+	/* sync PFP to ME, otherwise we might get invalid PFP reads */
+	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+	radeon_ring_write(ring, 0x0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+	struct radeon_ring *ring = &rdev->ring[ridx];
+
+	if (vm == NULL)
+		return;
+
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	if (vm->id < 8) {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+	} else {
+		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+	}
+	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+	/* flush hdp cache */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+	radeon_ring_write(ring, 1);
+
+	/* bits 0-7 are the VM contexts0-7 */
+	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+	radeon_ring_write(ring, 1 << vm->id);
+}
+
+/*
+ * RLC
+ */
+void si_rlc_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	/* save restore block */
+	if (rdev->rlc.save_restore_obj) {
+		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.save_restore_obj);
+		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+
+		radeon_bo_unref(&rdev->rlc.save_restore_obj);
+		rdev->rlc.save_restore_obj = NULL;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.clear_state_obj) {
+		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+		if (unlikely(r != 0))
+			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
+		radeon_bo_unpin(rdev->rlc.clear_state_obj);
+		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+
+		radeon_bo_unref(&rdev->rlc.clear_state_obj);
+		rdev->rlc.clear_state_obj = NULL;
+	}
+}
+
+int si_rlc_init(struct radeon_device *rdev)
+{
+	int r;
+
+	/* save restore block */
+	if (rdev->rlc.save_restore_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM, NULL,
+				     &rdev->rlc.save_restore_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+	if (unlikely(r != 0)) {
+		si_rlc_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->rlc.save_restore_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+		si_rlc_fini(rdev);
+		return r;
+	}
+
+	/* clear state block */
+	if (rdev->rlc.clear_state_obj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+				     RADEON_GEM_DOMAIN_VRAM, NULL,
+				     &rdev->rlc.clear_state_obj);
+		if (r) {
+			dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+			si_rlc_fini(rdev);
+			return r;
+		}
+	}
+	r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+	if (unlikely(r != 0)) {
+		si_rlc_fini(rdev);
+		return r;
+	}
+	r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+			  &rdev->rlc.clear_state_gpu_addr);
+	radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+	if (r) {
+		dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
+		si_rlc_fini(rdev);
+		return r;
+	}
+
+	return 0;
+}
+
+static void si_rlc_stop(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, 0);
+}
+
+static void si_rlc_start(struct radeon_device *rdev)
+{
+	WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int si_rlc_resume(struct radeon_device *rdev)
+{
+	u32 i;
+	const __be32 *fw_data;
+
+	if (!rdev->rlc_fw)
+		return -EINVAL;
+
+	si_rlc_stop(rdev);
+
+	WREG32(RLC_RL_BASE, 0);
+	WREG32(RLC_RL_SIZE, 0);
+	WREG32(RLC_LB_CNTL, 0);
+	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
+	WREG32(RLC_LB_CNTR_INIT, 0);
+
+	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+
+	WREG32(RLC_MC_CNTL, 0);
+	WREG32(RLC_UCODE_CNTL, 0);
+
+	fw_data = (const __be32 *)rdev->rlc_fw->data;
+	for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+		WREG32(RLC_UCODE_ADDR, i);
+		WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+	}
+	WREG32(RLC_UCODE_ADDR, 0);
+
+	si_rlc_start(rdev);
+
+	return 0;
+}
+
+static void si_enable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_cntl = RREG32(IH_CNTL);
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+	ih_cntl |= ENABLE_INTR;
+	ih_rb_cntl |= IH_RB_ENABLE;
+	WREG32(IH_CNTL, ih_cntl);
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	rdev->ih.enabled = true;
+}
+
+static void si_disable_interrupts(struct radeon_device *rdev)
+{
+	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+	u32 ih_cntl = RREG32(IH_CNTL);
+
+	ih_rb_cntl &= ~IH_RB_ENABLE;
+	ih_cntl &= ~ENABLE_INTR;
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+	WREG32(IH_CNTL, ih_cntl);
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+	rdev->ih.enabled = false;
+	rdev->ih.rptr = 0;
+}
+
+static void si_disable_interrupt_state(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+	WREG32(CP_INT_CNTL_RING1, 0);
+	WREG32(CP_INT_CNTL_RING2, 0);
+	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
+	WREG32(GRBM_INT_CNTL, 0);
+	if (rdev->num_crtc >= 2) {
+		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	if (rdev->num_crtc >= 2) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+
+		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+		tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+}
+
+static int si_irq_init(struct radeon_device *rdev)
+{
+	int ret = 0;
+	int rb_bufsz;
+	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+	/* allocate ring */
+	ret = r600_ih_ring_alloc(rdev);
+	if (ret)
+		return ret;
+
+	/* disable irqs */
+	si_disable_interrupts(rdev);
+
+	/* init rlc */
+	ret = si_rlc_resume(rdev);
+	if (ret) {
+		r600_ih_ring_fini(rdev);
+		return ret;
+	}
+
+	/* setup interrupt control */
+	/* set dummy read address to ring address */
+	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+	interrupt_cntl = RREG32(INTERRUPT_CNTL);
+	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+	 */
+	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+	WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+		      IH_WPTR_OVERFLOW_CLEAR |
+		      (rb_bufsz << 1));
+
+	if (rdev->wb.enabled)
+		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
+
+	/* set the writeback address whether it's enabled or not */
+	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
+
+	WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+	/* set rptr, wptr to 0 */
+	WREG32(IH_RB_RPTR, 0);
+	WREG32(IH_RB_WPTR, 0);
+
+	/* Default settings for IH_CNTL (disabled at first) */
+	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
+	/* RPTR_REARM only works if msi's are enabled */
+	if (rdev->msi_enabled)
+		ih_cntl |= RPTR_REARM;
+	WREG32(IH_CNTL, ih_cntl);
+
+	/* force the active interrupt state to all disabled */
+	si_disable_interrupt_state(rdev);
+
+	pci_set_master(rdev->pdev);
+
+	/* enable irqs */
+	si_enable_interrupts(rdev);
+
+	return ret;
+}
+
+int si_irq_set(struct radeon_device *rdev)
+{
+	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+	u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+	u32 dma_cntl, dma_cntl1;
+
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
+		return -EINVAL;
+	}
+	/* don't enable anything if the ih is disabled */
+	if (!rdev->ih.enabled) {
+		si_disable_interrupts(rdev);
+		/* force the active interrupt state to all disabled */
+		si_disable_interrupt_state(rdev);
+		return 0;
+	}
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+		hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+	}
+
+	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
+	/* enable CP interrupts on all rings */
+	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int gfx\n");
+		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp1\n");
+		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int cp2\n");
+		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+	}
+	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma\n");
+		dma_cntl |= TRAP_ENABLE;
+	}
+
+	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+		DRM_DEBUG("si_irq_set: sw int dma1\n");
+		dma_cntl1 |= TRAP_ENABLE;
+	}
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    atomic_read(&rdev->irq.pflip[0])) {
+		DRM_DEBUG("si_irq_set: vblank 0\n");
+		crtc1 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    atomic_read(&rdev->irq.pflip[1])) {
+		DRM_DEBUG("si_irq_set: vblank 1\n");
+		crtc2 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    atomic_read(&rdev->irq.pflip[2])) {
+		DRM_DEBUG("si_irq_set: vblank 2\n");
+		crtc3 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    atomic_read(&rdev->irq.pflip[3])) {
+		DRM_DEBUG("si_irq_set: vblank 3\n");
+		crtc4 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    atomic_read(&rdev->irq.pflip[4])) {
+		DRM_DEBUG("si_irq_set: vblank 4\n");
+		crtc5 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    atomic_read(&rdev->irq.pflip[5])) {
+		DRM_DEBUG("si_irq_set: vblank 5\n");
+		crtc6 |= VBLANK_INT_MASK;
+	}
+	if (rdev->irq.hpd[0]) {
+		DRM_DEBUG("si_irq_set: hpd 1\n");
+		hpd1 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[1]) {
+		DRM_DEBUG("si_irq_set: hpd 2\n");
+		hpd2 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[2]) {
+		DRM_DEBUG("si_irq_set: hpd 3\n");
+		hpd3 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[3]) {
+		DRM_DEBUG("si_irq_set: hpd 4\n");
+		hpd4 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[4]) {
+		DRM_DEBUG("si_irq_set: hpd 5\n");
+		hpd5 |= DC_HPDx_INT_EN;
+	}
+	if (rdev->irq.hpd[5]) {
+		DRM_DEBUG("si_irq_set: hpd 6\n");
+		hpd6 |= DC_HPDx_INT_EN;
+	}
+
+	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
+	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
+	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
+
+	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
+	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+
+	if (rdev->num_crtc >= 2) {
+		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	if (rdev->num_crtc >= 2) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	}
+	if (rdev->num_crtc >= 4) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	}
+	if (rdev->num_crtc >= 6) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+	}
+
+	if (!ASIC_IS_NODCE(rdev)) {
+		WREG32(DC_HPD1_INT_CONTROL, hpd1);
+		WREG32(DC_HPD2_INT_CONTROL, hpd2);
+		WREG32(DC_HPD3_INT_CONTROL, hpd3);
+		WREG32(DC_HPD4_INT_CONTROL, hpd4);
+		WREG32(DC_HPD5_INT_CONTROL, hpd5);
+		WREG32(DC_HPD6_INT_CONTROL, hpd6);
+	}
+
+	return 0;
+}
+
+static inline void si_irq_ack(struct radeon_device *rdev)
+{
+	u32 tmp;
+
+	if (ASIC_IS_NODCE(rdev))
+		return;
+
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	if (rdev->num_crtc >= 4) {
+		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	}
+	if (rdev->num_crtc >= 6) {
+		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
+		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
+
+	if (rdev->num_crtc >= 4) {
+		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->num_crtc >= 6) {
+		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+	}
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+		tmp = RREG32(DC_HPD1_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD1_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+		tmp = RREG32(DC_HPD2_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD2_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+		tmp = RREG32(DC_HPD3_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD3_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+		tmp = RREG32(DC_HPD4_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD4_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD5_INT_CONTROL, tmp);
+	}
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp |= DC_HPDx_INT_ACK;
+		WREG32(DC_HPD6_INT_CONTROL, tmp);
+	}
+}
+
+static void si_irq_disable(struct radeon_device *rdev)
+{
+	si_disable_interrupts(rdev);
+	/* Wait and acknowledge irq */
+	mdelay(1);
+	si_irq_ack(rdev);
+	si_disable_interrupt_state(rdev);
+}
+
+static void si_irq_suspend(struct radeon_device *rdev)
+{
+	si_irq_disable(rdev);
+	si_rlc_stop(rdev);
+}
+
+static void si_irq_fini(struct radeon_device *rdev)
+{
+	si_irq_suspend(rdev);
+	r600_ih_ring_fini(rdev);
+}
+
+static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
+{
+	u32 wptr, tmp;
+
+	if (rdev->wb.enabled)
+		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
+	else
+		wptr = RREG32(IH_RB_WPTR);
+
+	if (wptr & RB_OVERFLOW) {
+		/* When a ring buffer overflow happen start parsing interrupt
+		 * from the last not overwritten vector (wptr + 16). Hopefully
+		 * this should allow us to catchup.
+		 */
+		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
+		tmp = RREG32(IH_RB_CNTL);
+		tmp |= IH_WPTR_OVERFLOW_CLEAR;
+		WREG32(IH_RB_CNTL, tmp);
+	}
+	return (wptr & rdev->ih.ptr_mask);
+}
+
+/*        SI IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0]    - interrupt source id
+ * [31:8]   - reserved
+ * [59:32]  - interrupt source data
+ * [63:60]  - reserved
+ * [71:64]  - RINGID
+ * [79:72]  - VMID
+ * [127:80] - reserved
+ */
+int si_irq_process(struct radeon_device *rdev)
+{
+	u32 wptr;
+	u32 rptr;
+	u32 src_id, src_data, ring_id;
+	u32 ring_index;
+	bool queue_hotplug = false;
+
+	if (!rdev->ih.enabled || rdev->shutdown)
+		return IRQ_NONE;
+
+	wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+	/* is somebody else already processing irqs? */
+	if (atomic_xchg(&rdev->ih.lock, 1))
+		return IRQ_NONE;
+
+	rptr = rdev->ih.rptr;
+	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+	/* Order reading of wptr vs. reading of IH ring data */
+	rmb();
+
+	/* display interrupts */
+	si_irq_ack(rdev);
+
+	while (rptr != wptr) {
+		/* wptr/rptr are in bytes! */
+		ring_index = rptr / 4;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
+		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
+
+		switch (src_id) {
+		case 1: /* D1 vblank/vline */
+			switch (src_data) {
+			case 0: /* D1 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[0]))
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D1 vblank\n");
+				}
+				break;
+			case 1: /* D1 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D1 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 2: /* D2 vblank/vline */
+			switch (src_data) {
+			case 0: /* D2 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[1]))
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D2 vblank\n");
+				}
+				break;
+			case 1: /* D2 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D2 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 3: /* D3 vblank/vline */
+			switch (src_data) {
+			case 0: /* D3 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[2]))
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D3 vblank\n");
+				}
+				break;
+			case 1: /* D3 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D3 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 4: /* D4 vblank/vline */
+			switch (src_data) {
+			case 0: /* D4 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[3]))
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D4 vblank\n");
+				}
+				break;
+			case 1: /* D4 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D4 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 5: /* D5 vblank/vline */
+			switch (src_data) {
+			case 0: /* D5 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[4]))
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D5 vblank\n");
+				}
+				break;
+			case 1: /* D5 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D5 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 6: /* D6 vblank/vline */
+			switch (src_data) {
+			case 0: /* D6 vblank */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (atomic_read(&rdev->irq.pflip[5]))
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+					DRM_DEBUG("IH: D6 vblank\n");
+				}
+				break;
+			case 1: /* D6 vline */
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+					DRM_DEBUG("IH: D6 vline\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 42: /* HPD hotplug */
+			switch (src_data) {
+			case 0:
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD1\n");
+				}
+				break;
+			case 1:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD2\n");
+				}
+				break;
+			case 2:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD3\n");
+				}
+				break;
+			case 3:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD4\n");
+				}
+				break;
+			case 4:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD5\n");
+				}
+				break;
+			case 5:
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+					queue_hotplug = true;
+					DRM_DEBUG("IH: HPD6\n");
+				}
+				break;
+			default:
+				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+				break;
+			}
+			break;
+		case 124: /* UVD */
+			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+			break;
+		case 146:
+		case 147:
+			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+				RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+			/* reset addr and status */
+			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+			break;
+		case 176: /* RINGID0 CP_INT */
+			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+			break;
+		case 177: /* RINGID1 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+			break;
+		case 178: /* RINGID2 CP_INT */
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+			break;
+		case 181: /* CP EOP event */
+			DRM_DEBUG("IH: CP EOP\n");
+			switch (ring_id) {
+			case 0:
+				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+				break;
+			case 1:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+				break;
+			case 2:
+				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+				break;
+			}
+			break;
+		case 224: /* DMA trap event */
+			DRM_DEBUG("IH: DMA trap\n");
+			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+			break;
+		case 233: /* GUI IDLE */
+			DRM_DEBUG("IH: GUI idle\n");
+			break;
+		case 244: /* DMA trap event */
+			DRM_DEBUG("IH: DMA1 trap\n");
+			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+			break;
+		default:
+			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+			break;
+		}
+
+		/* wptr/rptr are in bytes! */
+		rptr += 16;
+		rptr &= rdev->ih.ptr_mask;
+	}
+	if (queue_hotplug)
+		schedule_work(&rdev->hotplug_work);
+	rdev->ih.rptr = rptr;
+	WREG32(IH_RB_RPTR, rdev->ih.rptr);
+	atomic_set(&rdev->ih.lock, 0);
+
+	/* make sure wptr hasn't changed while processing */
+	wptr = si_get_ih_wptr(rdev);
+	if (wptr != rptr)
+		goto restart_ih;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+		uint64_t src_offset, uint64_t dst_offset,
+		unsigned num_gpu_pages,
+		struct radeon_fence **fence)
+{
+	struct radeon_semaphore *sem = NULL;
+	int ring_index = rdev->asic->copy.dma_ring_index;
+	struct radeon_ring *ring = &rdev->ring[ring_index];
+	u32 size_in_bytes, cur_size_in_bytes;
+	int i, num_loops;
+	int r = 0;
+
+	r = radeon_semaphore_create(rdev, &sem);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		return r;
+	}
+
+	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+	if (r) {
+		DRM_ERROR("radeon: moving bo (%d).\n", r);
+		radeon_semaphore_free(rdev, &sem, NULL);
+		return r;
+	}
+
+	if (radeon_fence_need_sync(*fence, ring->idx)) {
+		radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+					    ring->idx);
+		radeon_fence_note_sync(*fence, ring->idx);
+	} else {
+		radeon_semaphore_free(rdev, &sem, NULL);
+	}
+
+	for (i = 0; i < num_loops; i++) {
+		cur_size_in_bytes = size_in_bytes;
+		if (cur_size_in_bytes > 0xFFFFF)
+			cur_size_in_bytes = 0xFFFFF;
+		size_in_bytes -= cur_size_in_bytes;
+		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+		radeon_ring_write(ring, dst_offset & 0xffffffff);
+		radeon_ring_write(ring, src_offset & 0xffffffff);
+		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+		src_offset += cur_size_in_bytes;
+		dst_offset += cur_size_in_bytes;
+	}
+
+	r = radeon_fence_emit(rdev, fence, ring->idx);
+	if (r) {
+		radeon_ring_unlock_undo(rdev, ring);
+		return r;
+	}
+
+	radeon_ring_unlock_commit(rdev, ring);
+	radeon_semaphore_free(rdev, &sem, *fence);
+
+	return r;
+}
+
+/*
+ * startup/shutdown callbacks
+ */
+static int si_startup(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring;
+	int r;
+
+	si_mc_program(rdev);
+
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+	    !rdev->rlc_fw || !rdev->mc_fw) {
+		r = si_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
+	r = si_mc_load_microcode(rdev);
+	if (r) {
+		DRM_ERROR("Failed to load MC firmware!\n");
+		return r;
+	}
+
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
+	r = si_pcie_gart_enable(rdev);
+	if (r)
+		return r;
+	si_gpu_init(rdev);
+
+	/* allocate rlc buffers */
+	r = si_rlc_init(rdev);
+	if (r) {
+		DRM_ERROR("Failed to init rlc BOs!\n");
+		return r;
+	}
+
+	/* allocate wb buffer */
+	r = radeon_wb_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+	if (r) {
+		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+		return r;
+	}
+
+	if (rdev->has_uvd) {
+		r = rv770_uvd_resume(rdev);
+		if (!r) {
+			r = radeon_fence_driver_start_ring(rdev,
+							   R600_RING_TYPE_UVD_INDEX);
+			if (r)
+				dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+		}
+		if (r)
+			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+	}
+
+	/* Enable IRQ */
+	if (!rdev->irq.installed) {
+		r = radeon_irq_kms_init(rdev);
+		if (r)
+			return r;
+	}
+
+	r = si_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	si_irq_set(rdev);
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+			     CP_RB0_RPTR, CP_RB0_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
+			     CP_RB1_RPTR, CP_RB1_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
+			     CP_RB2_RPTR, CP_RB2_WPTR,
+			     0, 0xfffff, RADEON_CP_PACKET2);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+	if (r)
+		return r;
+
+	r = si_cp_load_microcode(rdev);
+	if (r)
+		return r;
+	r = si_cp_resume(rdev);
+	if (r)
+		return r;
+
+	r = cayman_dma_resume(rdev);
+	if (r)
+		return r;
+
+	if (rdev->has_uvd) {
+		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+		if (ring->ring_size) {
+			r = radeon_ring_init(rdev, ring, ring->ring_size,
+					     R600_WB_UVD_RPTR_OFFSET,
+					     UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
+					     0, 0xfffff, RADEON_CP_PACKET2);
+			if (!r)
+				r = r600_uvd_init(rdev);
+			if (r)
+				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
+		}
+	}
+
+	r = radeon_ib_pool_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+		return r;
+	}
+
+	r = radeon_vm_manager_init(rdev);
+	if (r) {
+		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+		return r;
+	}
+
+	return 0;
+}
+
+int si_resume(struct radeon_device *rdev)
+{
+	int r;
+
+	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+	 * posting will perform necessary task to bring back GPU into good
+	 * shape.
+	 */
+	/* post card */
+	atom_asic_init(rdev->mode_info.atom_context);
+
+	/* init golden registers */
+	si_init_golden_registers(rdev);
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		DRM_ERROR("si startup failed on resume\n");
+		rdev->accel_working = false;
+		return r;
+	}
+
+	return r;
+
+}
+
+int si_suspend(struct radeon_device *rdev)
+{
+	radeon_vm_manager_fini(rdev);
+	si_cp_enable(rdev, false);
+	cayman_dma_stop(rdev);
+	if (rdev->has_uvd) {
+		r600_uvd_stop(rdev);
+		radeon_uvd_suspend(rdev);
+	}
+	si_irq_suspend(rdev);
+	radeon_wb_disable(rdev);
+	si_pcie_gart_disable(rdev);
+	return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int si_init(struct radeon_device *rdev)
+{
+	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	int r;
+
+	/* Read BIOS */
+	if (!radeon_get_bios(rdev)) {
+		if (ASIC_IS_AVIVO(rdev))
+			return -EINVAL;
+	}
+	/* Must be an ATOMBIOS */
+	if (!rdev->is_atom_bios) {
+		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+		return -EINVAL;
+	}
+	r = radeon_atombios_init(rdev);
+	if (r)
+		return r;
+
+	/* Post card if necessary */
+	if (!radeon_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
+		DRM_INFO("GPU not posted. posting now...\n");
+		atom_asic_init(rdev->mode_info.atom_context);
+	}
+	/* init golden registers */
+	si_init_golden_registers(rdev);
+	/* Initialize scratch registers */
+	si_scratch_init(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
+	/* Initialize clocks */
+	radeon_get_clock_info(rdev->ddev);
+
+	/* Fence driver */
+	r = radeon_fence_driver_init(rdev);
+	if (r)
+		return r;
+
+	/* initialize memory controller */
+	r = si_mc_init(rdev);
+	if (r)
+		return r;
+	/* Memory manager */
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 1024 * 1024);
+
+	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+	ring->ring_obj = NULL;
+	r600_ring_init(rdev, ring, 64 * 1024);
+
+	if (rdev->has_uvd) {
+		r = radeon_uvd_init(rdev);
+		if (!r) {
+			ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+			ring->ring_obj = NULL;
+			r600_ring_init(rdev, ring, 4096);
+		}
+	}
+
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
+
+	r = r600_pcie_gart_init(rdev);
+	if (r)
+		return r;
+
+	rdev->accel_working = true;
+	r = si_startup(rdev);
+	if (r) {
+		dev_err(rdev->dev, "disabling GPU acceleration\n");
+		si_cp_fini(rdev);
+		cayman_dma_fini(rdev);
+		si_irq_fini(rdev);
+		si_rlc_fini(rdev);
+		radeon_wb_fini(rdev);
+		radeon_ib_pool_fini(rdev);
+		radeon_vm_manager_fini(rdev);
+		radeon_irq_kms_fini(rdev);
+		si_pcie_gart_fini(rdev);
+		rdev->accel_working = false;
+	}
+
+	/* Don't start up if the MC ucode is missing.
+	 * The default clocks and voltages before the MC ucode
+	 * is loaded are not suffient for advanced operations.
+	 */
+	if (!rdev->mc_fw) {
+		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void si_fini(struct radeon_device *rdev)
+{
+	si_cp_fini(rdev);
+	cayman_dma_fini(rdev);
+	si_irq_fini(rdev);
+	si_rlc_fini(rdev);
+	radeon_wb_fini(rdev);
+	radeon_vm_manager_fini(rdev);
+	radeon_ib_pool_fini(rdev);
+	radeon_irq_kms_fini(rdev);
+	if (rdev->has_uvd) {
+		r600_uvd_stop(rdev);
+		radeon_uvd_fini(rdev);
+	}
+	si_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
+	radeon_gem_fini(rdev);
+	radeon_fence_driver_fini(rdev);
+	radeon_bo_fini(rdev);
+	radeon_atombios_fini(rdev);
+	kfree(rdev->bios);
+	rdev->bios = NULL;
+}
+
+/**
+ * si_get_gpu_clock_counter - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
+{
+	uint64_t clock;
+
+	mutex_lock(&rdev->gpu_clock_mutex);
+	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+	mutex_unlock(&rdev->gpu_clock_mutex);
+	return clock;
+}
+
+int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
+{
+	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
+	int r;
+
+	/* bypass vclk and dclk with bclk */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	/* put PLL in bypass mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
+
+	if (!vclk || !dclk) {
+		/* keep the Bypass mode, put PLL to sleep */
+		WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+		return 0;
+	}
+
+	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
+					  16384, 0x03FFFFFF, 0, 128, 5,
+					  &fb_div, &vclk_div, &dclk_div);
+	if (r)
+		return r;
+
+	/* set RESET_ANTI_MUX to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
+
+	/* set VCO_MODE to 1 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
+
+	/* toggle UPLL_SLEEP to 1 then back to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
+
+	/* deassert UPLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(1);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* assert UPLL_RESET again */
+	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
+
+	/* disable spread spectrum. */
+	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
+
+	/* set feedback divider */
+	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
+
+	/* set ref divider to 0 */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
+
+	if (fb_div < 307200)
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
+	else
+		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
+
+	/* set PDIV_A and PDIV_B */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
+		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
+
+	/* give the PLL some time to settle */
+	mdelay(15);
+
+	/* deassert PLL_RESET */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
+
+	mdelay(15);
+
+	/* switch from bypass mode to normal mode */
+	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
+
+	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
+	if (r)
+		return r;
+
+	/* switch VCLK and DCLK selection */
+	WREG32_P(CG_UPLL_FUNC_CNTL_2,
+		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
+		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
+
+	mdelay(100);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.c b/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.c
new file mode 100644
index 0000000..ec415e7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+const u32 si_default_state[] =
+{
+	0xc0066900,
+	0x00000000,
+	0x00000060, /* DB_RENDER_CONTROL */
+	0x00000000, /* DB_COUNT_CONTROL */
+	0x00000000, /* DB_DEPTH_VIEW */
+	0x0000002a, /* DB_RENDER_OVERRIDE */
+	0x00000000, /* DB_RENDER_OVERRIDE2 */
+	0x00000000, /* DB_HTILE_DATA_BASE */
+
+	0xc0046900,
+	0x00000008,
+	0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+	0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+	0x00000000, /* DB_STENCIL_CLEAR */
+	0x00000000, /* DB_DEPTH_CLEAR */
+
+	0xc0036900,
+	0x0000000f,
+	0x00000000, /* DB_DEPTH_INFO */
+	0x00000000, /* DB_Z_INFO */
+	0x00000000, /* DB_STENCIL_INFO */
+
+	0xc0016900,
+	0x00000080,
+	0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+	0xc00d6900,
+	0x00000083,
+	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+	0x00000000, /* PA_SC_CLIPRECT_0_TL */
+	0x20002000, /* PA_SC_CLIPRECT_0_BR */
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0x00000000,
+	0x20002000,
+	0xaaaaaaaa, /* PA_SC_EDGERULE */
+	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+	0x0000000f, /* CB_TARGET_MASK */
+	0x0000000f, /* CB_SHADER_MASK */
+
+	0xc0226900,
+	0x00000094,
+	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x80000000,
+	0x20002000,
+	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+	0xc0026900,
+	0x000000d9,
+	0x00000000, /* CP_RINGID */
+	0x00000000, /* CP_VMID */
+
+	0xc0046900,
+	0x00000100,
+	0xffffffff, /* VGT_MAX_VTX_INDX */
+	0x00000000, /* VGT_MIN_VTX_INDX */
+	0x00000000, /* VGT_INDX_OFFSET */
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+	0xc0046900,
+	0x00000105,
+	0x00000000, /* CB_BLEND_RED */
+	0x00000000, /* CB_BLEND_GREEN */
+	0x00000000, /* CB_BLEND_BLUE */
+	0x00000000, /* CB_BLEND_ALPHA */
+
+	0xc0016900,
+	0x000001e0,
+	0x00000000, /* CB_BLEND0_CONTROL */
+
+	0xc00e6900,
+	0x00000200,
+	0x00000000, /* DB_DEPTH_CONTROL */
+	0x00000000, /* DB_EQAA */
+	0x00cc0010, /* CB_COLOR_CONTROL */
+	0x00000210, /* DB_SHADER_CONTROL */
+	0x00010000, /* PA_CL_CLIP_CNTL */
+	0x00000004, /* PA_SU_SC_MODE_CNTL */
+	0x00000100, /* PA_CL_VTE_CNTL */
+	0x00000000, /* PA_CL_VS_OUT_CNTL */
+	0x00000000, /* PA_CL_NANINF_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+	0x00000000, /*  */
+	0x00000000, /*  */
+
+	0xc0116900,
+	0x00000280,
+	0x00000000, /* PA_SU_POINT_SIZE */
+	0x00000000, /* PA_SU_POINT_MINMAX */
+	0x00000008, /* PA_SU_LINE_CNTL */
+	0x00000000, /* PA_SC_LINE_STIPPLE */
+	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+	0x00000000, /* VGT_HOS_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000, /* VGT_GS_MODE */
+
+	0xc0026900,
+	0x00000292,
+	0x00000000, /* PA_SC_MODE_CNTL_0 */
+	0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+	0xc0016900,
+	0x000002a1,
+	0x00000000, /* VGT_PRIMITIVEID_EN */
+
+	0xc0016900,
+	0x000002a5,
+	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+	0xc0026900,
+	0x000002a8,
+	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+	0x00000000,
+
+	0xc0026900,
+	0x000002ad,
+	0x00000000, /* VGT_REUSE_OFF */
+	0x00000000,
+
+	0xc0016900,
+	0x000002d5,
+	0x00000000, /* VGT_SHADER_STAGES_EN */
+
+	0xc0016900,
+	0x000002dc,
+	0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+	0xc0066900,
+	0x000002de,
+	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+
+	0xc0026900,
+	0x000002e5,
+	0x00000000, /* VGT_STRMOUT_CONFIG */
+	0x00000000,
+
+	0xc01b6900,
+	0x000002f5,
+	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+	0x00000000, /* PA_SC_LINE_CNTL */
+	0x00000000, /* PA_SC_AA_CONFIG */
+	0x00000005, /* PA_SU_VTX_CNTL */
+	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0x00000000,
+	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+	0xffffffff,
+
+	0xc0026900,
+	0x00000316,
+	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	0x00000010, /*  */
+};
+
+const u32 si_default_size = ARRAY_SIZE(si_default_state);
diff --git a/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.h b/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.h
new file mode 100644
index 0000000..c739e51
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/si_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SI_BLIT_SHADERS_H
+#define SI_BLIT_SHADERS_H
+
+extern const u32 si_default_state[];
+
+extern const u32 si_default_size;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/si_reg.h b/linux-imx/drivers/gpu/drm/radeon/si_reg.h
new file mode 100644
index 0000000..501f9d4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/si_reg.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __SI_REG_H__
+#define __SI_REG_H__
+
+/* SI */
+#define SI_DC_GPIO_HPD_MASK                      0x65b0
+#define SI_DC_GPIO_HPD_A                         0x65b4
+#define SI_DC_GPIO_HPD_EN                        0x65b8
+#define SI_DC_GPIO_HPD_Y                         0x65bc
+
+#define SI_GRPH_CONTROL                          0x6804
+#       define SI_GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#       define SI_GRPH_DEPTH_8BPP                0
+#       define SI_GRPH_DEPTH_16BPP               1
+#       define SI_GRPH_DEPTH_32BPP               2
+#       define SI_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define SI_ADDR_SURF_2_BANK               0
+#       define SI_ADDR_SURF_4_BANK               1
+#       define SI_ADDR_SURF_8_BANK               2
+#       define SI_ADDR_SURF_16_BANK              3
+#       define SI_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define SI_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define SI_ADDR_SURF_BANK_WIDTH_1         0
+#       define SI_ADDR_SURF_BANK_WIDTH_2         1
+#       define SI_ADDR_SURF_BANK_WIDTH_4         2
+#       define SI_ADDR_SURF_BANK_WIDTH_8         3
+#       define SI_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+/* 8 BPP */
+#       define SI_GRPH_FORMAT_INDEXED            0
+/* 16 BPP */
+#       define SI_GRPH_FORMAT_ARGB1555           0
+#       define SI_GRPH_FORMAT_ARGB565            1
+#       define SI_GRPH_FORMAT_ARGB4444           2
+#       define SI_GRPH_FORMAT_AI88               3
+#       define SI_GRPH_FORMAT_MONO16             4
+#       define SI_GRPH_FORMAT_BGRA5551           5
+/* 32 BPP */
+#       define SI_GRPH_FORMAT_ARGB8888           0
+#       define SI_GRPH_FORMAT_ARGB2101010        1
+#       define SI_GRPH_FORMAT_32BPP_DIG          2
+#       define SI_GRPH_FORMAT_8B_ARGB2101010     3
+#       define SI_GRPH_FORMAT_BGRA1010102        4
+#       define SI_GRPH_FORMAT_8B_BGRA1010102     5
+#       define SI_GRPH_FORMAT_RGB111110          6
+#       define SI_GRPH_FORMAT_BGR101111          7
+#       define SI_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define SI_ADDR_SURF_BANK_HEIGHT_1        0
+#       define SI_ADDR_SURF_BANK_HEIGHT_2        1
+#       define SI_ADDR_SURF_BANK_HEIGHT_4        2
+#       define SI_ADDR_SURF_BANK_HEIGHT_8        3
+#       define SI_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define SI_ADDR_SURF_TILE_SPLIT_64B       0
+#       define SI_ADDR_SURF_TILE_SPLIT_128B      1
+#       define SI_ADDR_SURF_TILE_SPLIT_256B      2
+#       define SI_ADDR_SURF_TILE_SPLIT_512B      3
+#       define SI_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define SI_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define SI_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define SI_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define SI_ADDR_SURF_MACRO_TILE_ASPECT_8  3
+#       define SI_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#       define SI_GRPH_ARRAY_LINEAR_GENERAL      0
+#       define SI_GRPH_ARRAY_LINEAR_ALIGNED      1
+#       define SI_GRPH_ARRAY_1D_TILED_THIN1      2
+#       define SI_GRPH_ARRAY_2D_TILED_THIN1      4
+#       define SI_GRPH_PIPE_CONFIG(x)		 (((x) & 0x1f) << 24)
+#       define SI_ADDR_SURF_P2			 0
+#       define SI_ADDR_SURF_P4_8x16		 4
+#       define SI_ADDR_SURF_P4_16x16		 5
+#       define SI_ADDR_SURF_P4_16x32		 6
+#       define SI_ADDR_SURF_P4_32x32		 7
+#       define SI_ADDR_SURF_P8_16x16_8x16	 8
+#       define SI_ADDR_SURF_P8_16x32_8x16	 9
+#       define SI_ADDR_SURF_P8_32x32_8x16	 10
+#       define SI_ADDR_SURF_P8_16x32_16x16	 11
+#       define SI_ADDR_SURF_P8_32x32_16x16	 12
+#       define SI_ADDR_SURF_P8_32x32_16x32	 13
+#       define SI_ADDR_SURF_P8_32x64_32x32	 14
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/radeon/sid.h b/linux-imx/drivers/gpu/drm/radeon/sid.h
new file mode 100644
index 0000000..9652ed9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/radeon/sid.h
@@ -0,0 +1,1115 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef SI_H
+#define SI_H
+
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH  2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN        0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN         0x12010002
+#define HAINAN_GB_ADDR_CONFIG_GOLDEN        0x02010001
+
+/* discrete uvd clocks */
+#define	CG_UPLL_FUNC_CNTL				0x634
+#	define UPLL_RESET_MASK				0x00000001
+#	define UPLL_SLEEP_MASK				0x00000002
+#	define UPLL_BYPASS_EN_MASK			0x00000004
+#	define UPLL_CTLREQ_MASK				0x00000008
+#	define UPLL_VCO_MODE_MASK			0x00000600
+#	define UPLL_REF_DIV_MASK			0x003F0000
+#	define UPLL_CTLACK_MASK				0x40000000
+#	define UPLL_CTLACK2_MASK			0x80000000
+#define	CG_UPLL_FUNC_CNTL_2				0x638
+#	define UPLL_PDIV_A(x)				((x) << 0)
+#	define UPLL_PDIV_A_MASK				0x0000007F
+#	define UPLL_PDIV_B(x)				((x) << 8)
+#	define UPLL_PDIV_B_MASK				0x00007F00
+#	define VCLK_SRC_SEL(x)				((x) << 20)
+#	define VCLK_SRC_SEL_MASK			0x01F00000
+#	define DCLK_SRC_SEL(x)				((x) << 25)
+#	define DCLK_SRC_SEL_MASK			0x3E000000
+#define	CG_UPLL_FUNC_CNTL_3				0x63C
+#	define UPLL_FB_DIV(x)				((x) << 0)
+#	define UPLL_FB_DIV_MASK				0x01FFFFFF
+#define	CG_UPLL_FUNC_CNTL_4                             0x644
+#	define UPLL_SPARE_ISPARE9			0x00020000
+#define	CG_UPLL_FUNC_CNTL_5				0x648
+#	define RESET_ANTI_MUX_MASK			0x00000200
+#define	CG_UPLL_SPREAD_SPECTRUM				0x650
+#	define SSEN_MASK				0x00000001
+
+#define	CG_MULT_THERMAL_STATUS					0x714
+#define		ASIC_MAX_TEMP(x)				((x) << 0)
+#define		ASIC_MAX_TEMP_MASK				0x000001ff
+#define		ASIC_MAX_TEMP_SHIFT				0
+#define		CTF_TEMP(x)					((x) << 9)
+#define		CTF_TEMP_MASK					0x0003fe00
+#define		CTF_TEMP_SHIFT					9
+
+#define SI_MAX_SH_GPRS           256
+#define SI_MAX_TEMP_GPRS         16
+#define SI_MAX_SH_THREADS        256
+#define SI_MAX_SH_STACK_ENTRIES  4096
+#define SI_MAX_FRC_EOV_CNT       16384
+#define SI_MAX_BACKENDS          8
+#define SI_MAX_BACKENDS_MASK     0xFF
+#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
+#define SI_MAX_SIMDS             12
+#define SI_MAX_SIMDS_MASK        0x0FFF
+#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
+#define SI_MAX_PIPES             8
+#define SI_MAX_PIPES_MASK        0xFF
+#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
+#define SI_MAX_LDS_NUM           0xFFFF
+#define SI_MAX_TCC               16
+#define SI_MAX_TCC_MASK          0xFFFF
+
+#define VGA_HDP_CONTROL  				0x328
+#define		VGA_MEMORY_DISABLE				(1 << 4)
+
+#define CG_CLKPIN_CNTL                                    0x660
+#       define XTALIN_DIVIDE                              (1 << 1)
+#define CG_CLKPIN_CNTL_2                                  0x664
+#       define MUX_TCLK_TO_XCLK                           (1 << 8)
+
+#define DMIF_ADDR_CONFIG  				0xBD4
+
+#define DMIF_ADDR_CALC  				0xC00
+
+#define	PIPE0_DMIF_BUFFER_CONTROL			  0x0ca0
+#       define DMIF_BUFFERS_ALLOCATED(x)                  ((x) << 0)
+#       define DMIF_BUFFERS_ALLOCATED_COMPLETED           (1 << 4)
+
+#define	SRBM_STATUS				        0xE50
+#define		GRBM_RQ_PENDING 			(1 << 5)
+#define		VMC_BUSY 				(1 << 8)
+#define		MCB_BUSY 				(1 << 9)
+#define		MCB_NON_DISPLAY_BUSY 			(1 << 10)
+#define		MCC_BUSY 				(1 << 11)
+#define		MCD_BUSY 				(1 << 12)
+#define		SEM_BUSY 				(1 << 14)
+#define		IH_BUSY 				(1 << 17)
+
+#define	SRBM_SOFT_RESET				        0x0E60
+#define		SOFT_RESET_BIF				(1 << 1)
+#define		SOFT_RESET_DC				(1 << 5)
+#define		SOFT_RESET_DMA1				(1 << 6)
+#define		SOFT_RESET_GRBM				(1 << 8)
+#define		SOFT_RESET_HDP				(1 << 9)
+#define		SOFT_RESET_IH				(1 << 10)
+#define		SOFT_RESET_MC				(1 << 11)
+#define		SOFT_RESET_ROM				(1 << 14)
+#define		SOFT_RESET_SEM				(1 << 15)
+#define		SOFT_RESET_VMC				(1 << 17)
+#define		SOFT_RESET_DMA				(1 << 20)
+#define		SOFT_RESET_TST				(1 << 21)
+#define		SOFT_RESET_REGBB			(1 << 22)
+#define		SOFT_RESET_ORB				(1 << 23)
+
+#define	CC_SYS_RB_BACKEND_DISABLE			0xe80
+#define	GC_USER_SYS_RB_BACKEND_DISABLE			0xe84
+
+#define	SRBM_STATUS2				        0x0EC4
+#define		DMA_BUSY 				(1 << 5)
+#define		DMA1_BUSY 				(1 << 6)
+
+#define VM_L2_CNTL					0x1400
+#define		ENABLE_L2_CACHE					(1 << 0)
+#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
+#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
+#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
+#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
+#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
+#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
+#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
+#define VM_L2_CNTL2					0x1404
+#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
+#define		INVALIDATE_L2_CACHE				(1 << 1)
+#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
+#define			INVALIDATE_PTE_AND_PDE_CACHES		0
+#define			INVALIDATE_ONLY_PTE_CACHES		1
+#define			INVALIDATE_ONLY_PDE_CACHES		2
+#define VM_L2_CNTL3					0x1408
+#define		BANK_SELECT(x)					((x) << 0)
+#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
+#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
+#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
+#define	VM_L2_STATUS					0x140C
+#define		L2_BUSY						(1 << 0)
+#define VM_CONTEXT0_CNTL				0x1410
+#define		ENABLE_CONTEXT					(1 << 0)
+#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
+#define		RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 3)
+#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT	(1 << 6)
+#define		DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT	(1 << 7)
+#define		PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 9)
+#define		PDE0_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 10)
+#define		VALID_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 12)
+#define		VALID_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 13)
+#define		READ_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 15)
+#define		READ_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 16)
+#define		WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT		(1 << 18)
+#define		WRITE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 19)
+#define VM_CONTEXT1_CNTL				0x1414
+#define VM_CONTEXT0_CNTL2				0x1430
+#define VM_CONTEXT1_CNTL2				0x1434
+#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x1438
+#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x143c
+#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x1440
+#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x1444
+#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x1448
+#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x144c
+#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
+#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
+
+#define	VM_CONTEXT1_PROTECTION_FAULT_ADDR		0x14FC
+#define	VM_CONTEXT1_PROTECTION_FAULT_STATUS		0x14DC
+
+#define VM_INVALIDATE_REQUEST				0x1478
+#define VM_INVALIDATE_RESPONSE				0x147c
+
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
+
+#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153c
+#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x1540
+#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x1544
+#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x1548
+#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x154c
+#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x1550
+#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x1554
+#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x1558
+#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155c
+#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x1560
+
+#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
+#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x1580
+
+#define MC_SHARED_CHMAP						0x2004
+#define		NOOFCHAN_SHIFT					12
+#define		NOOFCHAN_MASK					0x0000f000
+#define MC_SHARED_CHREMAP					0x2008
+
+#define	MC_VM_FB_LOCATION				0x2024
+#define	MC_VM_AGP_TOP					0x2028
+#define	MC_VM_AGP_BOT					0x202C
+#define	MC_VM_AGP_BASE					0x2030
+#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
+#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
+#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
+
+#define	MC_VM_MX_L1_TLB_CNTL				0x2064
+#define		ENABLE_L1_TLB					(1 << 0)
+#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
+#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
+#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
+#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
+#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+
+#define	MC_ARB_RAMCFG					0x2760
+#define		NOOFBANK_SHIFT					0
+#define		NOOFBANK_MASK					0x00000003
+#define		NOOFRANK_SHIFT					2
+#define		NOOFRANK_MASK					0x00000004
+#define		NOOFROWS_SHIFT					3
+#define		NOOFROWS_MASK					0x00000038
+#define		NOOFCOLS_SHIFT					6
+#define		NOOFCOLS_MASK					0x000000C0
+#define		CHANSIZE_SHIFT					8
+#define		CHANSIZE_MASK					0x00000100
+#define		CHANSIZE_OVERRIDE				(1 << 11)
+#define		NOOFGROUPS_SHIFT				12
+#define		NOOFGROUPS_MASK					0x00001000
+
+#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x28e8
+#define		TRAIN_DONE_D0      			(1 << 30)
+#define		TRAIN_DONE_D1      			(1 << 31)
+
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#define	HDP_HOST_PATH_CNTL				0x2C00
+#define	HDP_NONSURFACE_BASE				0x2C04
+#define	HDP_NONSURFACE_INFO				0x2C08
+#define	HDP_NONSURFACE_SIZE				0x2C0C
+
+#define HDP_ADDR_CONFIG  				0x2F48
+#define HDP_MISC_CNTL					0x2F4C
+#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
+
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 1)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+#       define MC_VMID(x)                                 ((x) << 25)
+
+#define	CONFIG_MEMSIZE					0x5428
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
+
+#define	BIF_FB_EN						0x5490
+#define		FB_READ_EN					(1 << 0)
+#define		FB_WRITE_EN					(1 << 1)
+
+#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
+
+#define	DC_LB_MEMORY_SPLIT					0x6b0c
+#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
+
+#define	PRIORITY_A_CNT						0x6b18
+#define		PRIORITY_MARK_MASK				0x7fff
+#define		PRIORITY_OFF					(1 << 16)
+#define		PRIORITY_ALWAYS_ON				(1 << 20)
+#define	PRIORITY_B_CNT						0x6b1c
+
+#define	DPG_PIPE_ARBITRATION_CONTROL3				0x6cc8
+#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
+#define	DPG_PIPE_LATENCY_CONTROL				0x6ccc
+#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
+#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
+
+/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
+#define VLINE_STATUS                                    0x6bb8
+#       define VLINE_OCCURRED                           (1 << 0)
+#       define VLINE_ACK                                (1 << 4)
+#       define VLINE_STAT                               (1 << 12)
+#       define VLINE_INTERRUPT                          (1 << 16)
+#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
+/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
+#define VBLANK_STATUS                                   0x6bbc
+#       define VBLANK_OCCURRED                          (1 << 0)
+#       define VBLANK_ACK                               (1 << 4)
+#       define VBLANK_STAT                              (1 << 12)
+#       define VBLANK_INTERRUPT                         (1 << 16)
+#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
+
+/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
+#define INT_MASK                                        0x6b40
+#       define VBLANK_INT_MASK                          (1 << 0)
+#       define VLINE_INT_MASK                           (1 << 4)
+
+#define DISP_INTERRUPT_STATUS                           0x60f4
+#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD1_INTERRUPT                        (1 << 17)
+#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
+#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
+#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
+#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
+#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
+#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
+#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD2_INTERRUPT                        (1 << 17)
+#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
+#       define DISP_TIMER_INTERRUPT                     (1 << 24)
+#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
+#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD3_INTERRUPT                        (1 << 17)
+#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
+#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD4_INTERRUPT                        (1 << 17)
+#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
+#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD5_INTERRUPT                        (1 << 17)
+#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
+#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
+#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
+#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
+#       define DC_HPD6_INTERRUPT                        (1 << 17)
+#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
+
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS                                 0x6858
+#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define	GRPH_INT_CONTROL			        0x685c
+#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+#define	DAC_AUTODETECT_INT_CONTROL			0x67c8
+
+#define DC_HPD1_INT_STATUS                              0x601c
+#define DC_HPD2_INT_STATUS                              0x6028
+#define DC_HPD3_INT_STATUS                              0x6034
+#define DC_HPD4_INT_STATUS                              0x6040
+#define DC_HPD5_INT_STATUS                              0x604c
+#define DC_HPD6_INT_STATUS                              0x6058
+#       define DC_HPDx_INT_STATUS                       (1 << 0)
+#       define DC_HPDx_SENSE                            (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
+
+#define DC_HPD1_INT_CONTROL                             0x6020
+#define DC_HPD2_INT_CONTROL                             0x602c
+#define DC_HPD3_INT_CONTROL                             0x6038
+#define DC_HPD4_INT_CONTROL                             0x6044
+#define DC_HPD5_INT_CONTROL                             0x6050
+#define DC_HPD6_INT_CONTROL                             0x605c
+#       define DC_HPDx_INT_ACK                          (1 << 0)
+#       define DC_HPDx_INT_POLARITY                     (1 << 8)
+#       define DC_HPDx_INT_EN                           (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
+#       define DC_HPDx_RX_INT_EN                        (1 << 24)
+
+#define DC_HPD1_CONTROL                                   0x6024
+#define DC_HPD2_CONTROL                                   0x6030
+#define DC_HPD3_CONTROL                                   0x603c
+#define DC_HPD4_CONTROL                                   0x6048
+#define DC_HPD5_CONTROL                                   0x6054
+#define DC_HPD6_CONTROL                                   0x6060
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+#       define DC_HPDx_EN                                 (1 << 28)
+
+/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
+#define CRTC_STATUS_FRAME_COUNT                         0x6e98
+
+#define	GRBM_CNTL					0x8000
+#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
+
+#define	GRBM_STATUS2					0x8008
+#define		RLC_RQ_PENDING 					(1 << 0)
+#define		RLC_BUSY 					(1 << 8)
+#define		TC_BUSY 					(1 << 9)
+
+#define	GRBM_STATUS					0x8010
+#define		CMDFIFO_AVAIL_MASK				0x0000000F
+#define		RING2_RQ_PENDING				(1 << 4)
+#define		SRBM_RQ_PENDING					(1 << 5)
+#define		RING1_RQ_PENDING				(1 << 6)
+#define		CF_RQ_PENDING					(1 << 7)
+#define		PF_RQ_PENDING					(1 << 8)
+#define		GDS_DMA_RQ_PENDING				(1 << 9)
+#define		GRBM_EE_BUSY					(1 << 10)
+#define		DB_CLEAN					(1 << 12)
+#define		CB_CLEAN					(1 << 13)
+#define		TA_BUSY 					(1 << 14)
+#define		GDS_BUSY 					(1 << 15)
+#define		VGT_BUSY					(1 << 17)
+#define		IA_BUSY_NO_DMA					(1 << 18)
+#define		IA_BUSY						(1 << 19)
+#define		SX_BUSY 					(1 << 20)
+#define		SPI_BUSY					(1 << 22)
+#define		BCI_BUSY					(1 << 23)
+#define		SC_BUSY 					(1 << 24)
+#define		PA_BUSY 					(1 << 25)
+#define		DB_BUSY 					(1 << 26)
+#define		CP_COHERENCY_BUSY      				(1 << 28)
+#define		CP_BUSY 					(1 << 29)
+#define		CB_BUSY 					(1 << 30)
+#define		GUI_ACTIVE					(1 << 31)
+#define	GRBM_STATUS_SE0					0x8014
+#define	GRBM_STATUS_SE1					0x8018
+#define		SE_DB_CLEAN					(1 << 1)
+#define		SE_CB_CLEAN					(1 << 2)
+#define		SE_BCI_BUSY					(1 << 22)
+#define		SE_VGT_BUSY					(1 << 23)
+#define		SE_PA_BUSY					(1 << 24)
+#define		SE_TA_BUSY					(1 << 25)
+#define		SE_SX_BUSY					(1 << 26)
+#define		SE_SPI_BUSY					(1 << 27)
+#define		SE_SC_BUSY					(1 << 29)
+#define		SE_DB_BUSY					(1 << 30)
+#define		SE_CB_BUSY					(1 << 31)
+
+#define	GRBM_SOFT_RESET					0x8020
+#define		SOFT_RESET_CP					(1 << 0)
+#define		SOFT_RESET_CB					(1 << 1)
+#define		SOFT_RESET_RLC					(1 << 2)
+#define		SOFT_RESET_DB					(1 << 3)
+#define		SOFT_RESET_GDS					(1 << 4)
+#define		SOFT_RESET_PA					(1 << 5)
+#define		SOFT_RESET_SC					(1 << 6)
+#define		SOFT_RESET_BCI					(1 << 7)
+#define		SOFT_RESET_SPI					(1 << 8)
+#define		SOFT_RESET_SX					(1 << 10)
+#define		SOFT_RESET_TC					(1 << 11)
+#define		SOFT_RESET_TA					(1 << 12)
+#define		SOFT_RESET_VGT					(1 << 14)
+#define		SOFT_RESET_IA					(1 << 15)
+
+#define GRBM_GFX_INDEX          			0x802C
+#define		INSTANCE_INDEX(x)			((x) << 0)
+#define		SH_INDEX(x)     			((x) << 8)
+#define		SE_INDEX(x)     			((x) << 16)
+#define		SH_BROADCAST_WRITES      		(1 << 29)
+#define		INSTANCE_BROADCAST_WRITES      		(1 << 30)
+#define		SE_BROADCAST_WRITES      		(1 << 31)
+
+#define GRBM_INT_CNTL                                   0x8060
+#       define RDERR_INT_ENABLE                         (1 << 0)
+#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
+
+#define	CP_STRMOUT_CNTL					0x84FC
+#define	SCRATCH_REG0					0x8500
+#define	SCRATCH_REG1					0x8504
+#define	SCRATCH_REG2					0x8508
+#define	SCRATCH_REG3					0x850C
+#define	SCRATCH_REG4					0x8510
+#define	SCRATCH_REG5					0x8514
+#define	SCRATCH_REG6					0x8518
+#define	SCRATCH_REG7					0x851C
+
+#define	SCRATCH_UMSK					0x8540
+#define	SCRATCH_ADDR					0x8544
+
+#define	CP_SEM_WAIT_TIMER				0x85BC
+
+#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+
+#define CP_ME_CNTL					0x86D8
+#define		CP_CE_HALT					(1 << 24)
+#define		CP_PFP_HALT					(1 << 26)
+#define		CP_ME_HALT					(1 << 28)
+
+#define	CP_COHER_CNTL2					0x85E8
+
+#define	CP_RB2_RPTR					0x86f8
+#define	CP_RB1_RPTR					0x86fc
+#define	CP_RB0_RPTR					0x8700
+#define	CP_RB_WPTR_DELAY				0x8704
+
+#define	CP_QUEUE_THRESHOLDS				0x8760
+#define		ROQ_IB1_START(x)				((x) << 0)
+#define		ROQ_IB2_START(x)				((x) << 8)
+#define CP_MEQ_THRESHOLDS				0x8764
+#define		MEQ1_START(x)				((x) << 0)
+#define		MEQ2_START(x)				((x) << 8)
+
+#define	CP_PERFMON_CNTL					0x87FC
+
+#define	VGT_VTX_VECT_EJECT_REG				0x88B0
+
+#define	VGT_CACHE_INVALIDATION				0x88C4
+#define		CACHE_INVALIDATION(x)				((x) << 0)
+#define			VC_ONLY						0
+#define			TC_ONLY						1
+#define			VC_AND_TC					2
+#define		AUTO_INVLD_EN(x)				((x) << 6)
+#define			NO_AUTO						0
+#define			ES_AUTO						1
+#define			GS_AUTO						2
+#define			ES_AND_GS_AUTO					3
+#define	VGT_ESGS_RING_SIZE				0x88C8
+#define	VGT_GSVS_RING_SIZE				0x88CC
+
+#define	VGT_GS_VERTEX_REUSE				0x88D4
+
+#define	VGT_PRIMITIVE_TYPE				0x8958
+#define	VGT_INDEX_TYPE					0x895C
+
+#define	VGT_NUM_INDICES					0x8970
+#define	VGT_NUM_INSTANCES				0x8974
+
+#define	VGT_TF_RING_SIZE				0x8988
+
+#define	VGT_HS_OFFCHIP_PARAM				0x89B0
+
+#define	VGT_TF_MEMORY_BASE				0x89B8
+
+#define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
+#define		INACTIVE_CUS_MASK			0xFFFF0000
+#define		INACTIVE_CUS_SHIFT			16
+#define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
+
+#define	PA_CL_ENHANCE					0x8A14
+#define		CLIP_VTX_REORDER_ENA				(1 << 0)
+#define		NUM_CLIP_SEQ(x)					((x) << 1)
+
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
+
+#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
+
+#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
+#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
+#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
+
+#define	PA_SC_FIFO_SIZE					0x8BCC
+#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
+#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
+#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
+#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
+
+#define	PA_SC_ENHANCE					0x8BF0
+
+#define	SQ_CONFIG					0x8C00
+
+#define	SQC_CACHES					0x8C08
+
+#define	SX_DEBUG_1					0x9060
+
+#define	SPI_STATIC_THREAD_MGMT_1			0x90E0
+#define	SPI_STATIC_THREAD_MGMT_2			0x90E4
+#define	SPI_STATIC_THREAD_MGMT_3			0x90E8
+#define	SPI_PS_MAX_WAVE_ID				0x90EC
+
+#define	SPI_CONFIG_CNTL					0x9100
+
+#define	SPI_CONFIG_CNTL_1				0x913C
+#define		VTX_DONE_DELAY(x)				((x) << 0)
+#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
+
+#define	CGTS_TCC_DISABLE				0x9148
+#define	CGTS_USER_TCC_DISABLE				0x914C
+#define		TCC_DISABLE_MASK				0xFFFF0000
+#define		TCC_DISABLE_SHIFT				16
+
+#define	TA_CNTL_AUX					0x9508
+
+#define CC_RB_BACKEND_DISABLE				0x98F4
+#define		BACKEND_DISABLE(x)     			((x) << 16)
+#define GB_ADDR_CONFIG  				0x98F8
+#define		NUM_PIPES(x)				((x) << 0)
+#define		NUM_PIPES_MASK				0x00000007
+#define		NUM_PIPES_SHIFT				0
+#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
+#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
+#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
+#define		NUM_SHADER_ENGINES(x)			((x) << 12)
+#define		NUM_SHADER_ENGINES_MASK			0x00003000
+#define		NUM_SHADER_ENGINES_SHIFT		12
+#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
+#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
+#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
+#define		NUM_GPUS(x)     			((x) << 20)
+#define		NUM_GPUS_MASK				0x00700000
+#define		NUM_GPUS_SHIFT				20
+#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
+#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
+#define		MULTI_GPU_TILE_SIZE_SHIFT		24
+#define		ROW_SIZE(x)             		((x) << 28)
+#define		ROW_SIZE_MASK				0x30000000
+#define		ROW_SIZE_SHIFT				28
+
+#define	GB_TILE_MODE0					0x9910
+#       define MICRO_TILE_MODE(x)				((x) << 0)
+#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
+#              define	ADDR_SURF_THIN_MICRO_TILING		1
+#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
+#       define ARRAY_MODE(x)					((x) << 2)
+#              define	ARRAY_LINEAR_GENERAL			0
+#              define	ARRAY_LINEAR_ALIGNED			1
+#              define	ARRAY_1D_TILED_THIN1			2
+#              define	ARRAY_2D_TILED_THIN1			4
+#       define PIPE_CONFIG(x)					((x) << 6)
+#              define	ADDR_SURF_P2				0
+#              define	ADDR_SURF_P4_8x16			4
+#              define	ADDR_SURF_P4_16x16			5
+#              define	ADDR_SURF_P4_16x32			6
+#              define	ADDR_SURF_P4_32x32			7
+#              define	ADDR_SURF_P8_16x16_8x16			8
+#              define	ADDR_SURF_P8_16x32_8x16			9
+#              define	ADDR_SURF_P8_32x32_8x16			10
+#              define	ADDR_SURF_P8_16x32_16x16		11
+#              define	ADDR_SURF_P8_32x32_16x16		12
+#              define	ADDR_SURF_P8_32x32_16x32		13
+#              define	ADDR_SURF_P8_32x64_32x32		14
+#       define TILE_SPLIT(x)					((x) << 11)
+#              define	ADDR_SURF_TILE_SPLIT_64B		0
+#              define	ADDR_SURF_TILE_SPLIT_128B		1
+#              define	ADDR_SURF_TILE_SPLIT_256B		2
+#              define	ADDR_SURF_TILE_SPLIT_512B		3
+#              define	ADDR_SURF_TILE_SPLIT_1KB		4
+#              define	ADDR_SURF_TILE_SPLIT_2KB		5
+#              define	ADDR_SURF_TILE_SPLIT_4KB		6
+#       define BANK_WIDTH(x)					((x) << 14)
+#              define	ADDR_SURF_BANK_WIDTH_1			0
+#              define	ADDR_SURF_BANK_WIDTH_2			1
+#              define	ADDR_SURF_BANK_WIDTH_4			2
+#              define	ADDR_SURF_BANK_WIDTH_8			3
+#       define BANK_HEIGHT(x)					((x) << 16)
+#              define	ADDR_SURF_BANK_HEIGHT_1			0
+#              define	ADDR_SURF_BANK_HEIGHT_2			1
+#              define	ADDR_SURF_BANK_HEIGHT_4			2
+#              define	ADDR_SURF_BANK_HEIGHT_8			3
+#       define MACRO_TILE_ASPECT(x)				((x) << 18)
+#              define	ADDR_SURF_MACRO_ASPECT_1		0
+#              define	ADDR_SURF_MACRO_ASPECT_2		1
+#              define	ADDR_SURF_MACRO_ASPECT_4		2
+#              define	ADDR_SURF_MACRO_ASPECT_8		3
+#       define NUM_BANKS(x)					((x) << 20)
+#              define	ADDR_SURF_2_BANK			0
+#              define	ADDR_SURF_4_BANK			1
+#              define	ADDR_SURF_8_BANK			2
+#              define	ADDR_SURF_16_BANK			3
+
+#define	CB_PERFCOUNTER0_SELECT0				0x9a20
+#define	CB_PERFCOUNTER0_SELECT1				0x9a24
+#define	CB_PERFCOUNTER1_SELECT0				0x9a28
+#define	CB_PERFCOUNTER1_SELECT1				0x9a2c
+#define	CB_PERFCOUNTER2_SELECT0				0x9a30
+#define	CB_PERFCOUNTER2_SELECT1				0x9a34
+#define	CB_PERFCOUNTER3_SELECT0				0x9a38
+#define	CB_PERFCOUNTER3_SELECT1				0x9a3c
+
+#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
+#define		BACKEND_DISABLE_MASK			0x00FF0000
+#define		BACKEND_DISABLE_SHIFT			16
+
+#define	TCP_CHAN_STEER_LO				0xac0c
+#define	TCP_CHAN_STEER_HI				0xac10
+
+#define	CP_RB0_BASE					0xC100
+#define	CP_RB0_CNTL					0xC104
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		BUF_SWAP_32BIT					(2 << 16)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
+
+#define	CP_RB0_RPTR_ADDR				0xC10C
+#define	CP_RB0_RPTR_ADDR_HI				0xC110
+#define	CP_RB0_WPTR					0xC114
+
+#define	CP_PFP_UCODE_ADDR				0xC150
+#define	CP_PFP_UCODE_DATA				0xC154
+#define	CP_ME_RAM_RADDR					0xC158
+#define	CP_ME_RAM_WADDR					0xC15C
+#define	CP_ME_RAM_DATA					0xC160
+
+#define	CP_CE_UCODE_ADDR				0xC168
+#define	CP_CE_UCODE_DATA				0xC16C
+
+#define	CP_RB1_BASE					0xC180
+#define	CP_RB1_CNTL					0xC184
+#define	CP_RB1_RPTR_ADDR				0xC188
+#define	CP_RB1_RPTR_ADDR_HI				0xC18C
+#define	CP_RB1_WPTR					0xC190
+#define	CP_RB2_BASE					0xC194
+#define	CP_RB2_CNTL					0xC198
+#define	CP_RB2_RPTR_ADDR				0xC19C
+#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
+#define	CP_RB2_WPTR					0xC1A4
+#define CP_INT_CNTL_RING0                               0xC1A8
+#define CP_INT_CNTL_RING1                               0xC1AC
+#define CP_INT_CNTL_RING2                               0xC1B0
+#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
+#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
+#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
+#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
+#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
+#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
+#define CP_INT_STATUS_RING0                             0xC1B4
+#define CP_INT_STATUS_RING1                             0xC1B8
+#define CP_INT_STATUS_RING2                             0xC1BC
+#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
+#       define TIME_STAMP_INT_STAT                      (1 << 26)
+#       define CP_RINGID2_INT_STAT                      (1 << 29)
+#       define CP_RINGID1_INT_STAT                      (1 << 30)
+#       define CP_RINGID0_INT_STAT                      (1 << 31)
+
+#define	CP_DEBUG					0xC1FC
+
+#define RLC_CNTL                                          0xC300
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_RL_BASE                                       0xC304
+#define RLC_RL_SIZE                                       0xC308
+#define RLC_LB_CNTL                                       0xC30C
+#define RLC_SAVE_AND_RESTORE_BASE                         0xC310
+#define RLC_LB_CNTR_MAX                                   0xC314
+#define RLC_LB_CNTR_INIT                                  0xC318
+
+#define RLC_CLEAR_STATE_RESTORE_BASE                      0xC320
+
+#define RLC_UCODE_ADDR                                    0xC32C
+#define RLC_UCODE_DATA                                    0xC330
+
+#define RLC_GPU_CLOCK_COUNT_LSB                           0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB                           0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT                       0xC340
+#define RLC_MC_CNTL                                       0xC344
+#define RLC_UCODE_CNTL                                    0xC348
+
+#define PA_SC_RASTER_CONFIG                             0x28350
+#       define RASTER_CONFIG_RB_MAP_0                   0
+#       define RASTER_CONFIG_RB_MAP_1                   1
+#       define RASTER_CONFIG_RB_MAP_2                   2
+#       define RASTER_CONFIG_RB_MAP_3                   3
+
+#define VGT_EVENT_INITIATOR                             0x28a90
+#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
+#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
+#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
+#       define CACHE_FLUSH_TS                           (4 << 0)
+#       define CACHE_FLUSH                              (6 << 0)
+#       define CS_PARTIAL_FLUSH                         (7 << 0)
+#       define VGT_STREAMOUT_RESET                      (10 << 0)
+#       define END_OF_PIPE_INCR_DE                      (11 << 0)
+#       define END_OF_PIPE_IB_END                       (12 << 0)
+#       define RST_PIX_CNT                              (13 << 0)
+#       define VS_PARTIAL_FLUSH                         (15 << 0)
+#       define PS_PARTIAL_FLUSH                         (16 << 0)
+#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
+#       define ZPASS_DONE                               (21 << 0)
+#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
+#       define PERFCOUNTER_START                        (23 << 0)
+#       define PERFCOUNTER_STOP                         (24 << 0)
+#       define PIPELINESTAT_START                       (25 << 0)
+#       define PIPELINESTAT_STOP                        (26 << 0)
+#       define PERFCOUNTER_SAMPLE                       (27 << 0)
+#       define SAMPLE_PIPELINESTAT                      (30 << 0)
+#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
+#       define RESET_VTX_CNT                            (33 << 0)
+#       define VGT_FLUSH                                (36 << 0)
+#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
+#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
+#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
+#       define FLUSH_AND_INV_DB_META                    (44 << 0)
+#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
+#       define FLUSH_AND_INV_CB_META                    (46 << 0)
+#       define CS_DONE                                  (47 << 0)
+#       define PS_DONE                                  (48 << 0)
+#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
+#       define THREAD_TRACE_START                       (51 << 0)
+#       define THREAD_TRACE_STOP                        (52 << 0)
+#       define THREAD_TRACE_FLUSH                       (54 << 0)
+#       define THREAD_TRACE_FINISH                      (55 << 0)
+
+/*
+ * UVD
+ */
+#define UVD_UDEC_ADDR_CONFIG				0xEF4C
+#define UVD_UDEC_DB_ADDR_CONFIG				0xEF50
+#define UVD_UDEC_DBW_ADDR_CONFIG			0xEF54
+#define UVD_RBC_RB_RPTR					0xF690
+#define UVD_RBC_RB_WPTR					0xF694
+
+/*
+ * PM4
+ */
+#define PACKET0(reg, n)	((RADEON_PACKET_TYPE0 << 30) |			\
+			 (((reg) >> 2) & 0xFFFF) |			\
+			 ((n) & 0x3FFF) << 16)
+#define CP_PACKET2			0x80000000
+#define		PACKET2_PAD_SHIFT		0
+#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
+
+#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n)	((RADEON_PACKET_TYPE3 << 30) |			\
+			 (((op) & 0xFF) << 8) |				\
+			 ((n) & 0x3FFF) << 16)
+
+#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
+
+/* Packet 3 types */
+#define	PACKET3_NOP					0x10
+#define	PACKET3_SET_BASE				0x11
+#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
+#define			GDS_PARTITION_BASE		2
+#define			CE_PARTITION_BASE		3
+#define	PACKET3_CLEAR_STATE				0x12
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
+#define	PACKET3_DISPATCH_DIRECT				0x15
+#define	PACKET3_DISPATCH_INDIRECT			0x16
+#define	PACKET3_ALLOC_GDS				0x1B
+#define	PACKET3_WRITE_GDS_RAM				0x1C
+#define	PACKET3_ATOMIC_GDS				0x1D
+#define	PACKET3_ATOMIC					0x1E
+#define	PACKET3_OCCLUSION_QUERY				0x1F
+#define	PACKET3_SET_PREDICATION				0x20
+#define	PACKET3_REG_RMW					0x21
+#define	PACKET3_COND_EXEC				0x22
+#define	PACKET3_PRED_EXEC				0x23
+#define	PACKET3_DRAW_INDIRECT				0x24
+#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
+#define	PACKET3_INDEX_BASE				0x26
+#define	PACKET3_DRAW_INDEX_2				0x27
+#define	PACKET3_CONTEXT_CONTROL				0x28
+#define	PACKET3_INDEX_TYPE				0x2A
+#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
+#define	PACKET3_DRAW_INDEX_AUTO				0x2D
+#define	PACKET3_DRAW_INDEX_IMMD				0x2E
+#define	PACKET3_NUM_INSTANCES				0x2F
+#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
+#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
+#define	PACKET3_INDIRECT_BUFFER				0x32
+#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
+#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+#define	PACKET3_WRITE_DATA				0x37
+#define		WRITE_DATA_DST_SEL(x)                   ((x) << 8)
+                /* 0 - register
+		 * 1 - memory (sync - via GRBM)
+		 * 2 - tc/l2
+		 * 3 - gds
+		 * 4 - reserved
+		 * 5 - memory (async - direct)
+		 */
+#define		WR_ONE_ADDR                             (1 << 16)
+#define		WR_CONFIRM                              (1 << 20)
+#define		WRITE_DATA_ENGINE_SEL(x)                ((x) << 30)
+                /* 0 - me
+		 * 1 - pfp
+		 * 2 - ce
+		 */
+#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
+#define	PACKET3_MEM_SEMAPHORE				0x39
+#define	PACKET3_MPEG_INDEX				0x3A
+#define	PACKET3_COPY_DW					0x3B
+#define	PACKET3_WAIT_REG_MEM				0x3C
+#define	PACKET3_MEM_WRITE				0x3D
+#define	PACKET3_COPY_DATA				0x40
+#define	PACKET3_CP_DMA					0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - DST_ADDR
+		 * 1 - GDS
+		 */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+		 * 1 - PFP
+		 */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+		 * 1 - GDS
+		 * 2 - DATA
+		 */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+		 * 1 - 8 in 16
+		 * 2 - 8 in 32
+		 * 3 - 8 in 64
+		 */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+		 * 1 - register
+		 */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
+#define	PACKET3_PFP_SYNC_ME				0x42
+#define	PACKET3_SURFACE_SYNC				0x43
+#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
+#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
+#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
+#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
+#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
+#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
+#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
+#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
+#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
+#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
+#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
+#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
+#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
+#              define PACKET3_TC_ACTION_ENA        (1 << 23)
+#              define PACKET3_CB_ACTION_ENA        (1 << 25)
+#              define PACKET3_DB_ACTION_ENA        (1 << 26)
+#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
+#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
+#define	PACKET3_ME_INITIALIZE				0x44
+#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define	PACKET3_COND_WRITE				0x45
+#define	PACKET3_EVENT_WRITE				0x46
+#define		EVENT_TYPE(x)                           ((x) << 0)
+#define		EVENT_INDEX(x)                          ((x) << 8)
+                /* 0 - any non-TS event
+		 * 1 - ZPASS_DONE
+		 * 2 - SAMPLE_PIPELINESTAT
+		 * 3 - SAMPLE_STREAMOUTSTAT*
+		 * 4 - *S_PARTIAL_FLUSH
+		 * 5 - EOP events
+		 * 6 - EOS events
+		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
+		 */
+#define		INV_L2                                  (1 << 20)
+                /* INV TC L2 cache when EVENT_INDEX = 7 */
+#define	PACKET3_EVENT_WRITE_EOP				0x47
+#define		DATA_SEL(x)                             ((x) << 29)
+                /* 0 - discard
+		 * 1 - send low 32bit data
+		 * 2 - send 64bit data
+		 * 3 - send 64bit counter value
+		 */
+#define		INT_SEL(x)                              ((x) << 24)
+                /* 0 - none
+		 * 1 - interrupt only (DATA_SEL = 0)
+		 * 2 - interrupt when data write is confirmed
+		 */
+#define	PACKET3_EVENT_WRITE_EOS				0x48
+#define	PACKET3_PREAMBLE_CNTL				0x4A
+#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
+#define	PACKET3_ONE_REG_WRITE				0x57
+#define	PACKET3_LOAD_CONFIG_REG				0x5F
+#define	PACKET3_LOAD_CONTEXT_REG			0x60
+#define	PACKET3_LOAD_SH_REG				0x61
+#define	PACKET3_SET_CONFIG_REG				0x68
+#define		PACKET3_SET_CONFIG_REG_START			0x00008000
+#define		PACKET3_SET_CONFIG_REG_END			0x0000b000
+#define	PACKET3_SET_CONTEXT_REG				0x69
+#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
+#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
+#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
+#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
+#define	PACKET3_SET_SH_REG				0x76
+#define		PACKET3_SET_SH_REG_START			0x0000b000
+#define		PACKET3_SET_SH_REG_END				0x0000c000
+#define	PACKET3_SET_SH_REG_OFFSET			0x77
+#define	PACKET3_ME_WRITE				0x7A
+#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
+#define	PACKET3_SCRATCH_RAM_READ			0x7E
+#define	PACKET3_CE_WRITE				0x7F
+#define	PACKET3_LOAD_CONST_RAM				0x80
+#define	PACKET3_WRITE_CONST_RAM				0x81
+#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
+#define	PACKET3_DUMP_CONST_RAM				0x83
+#define	PACKET3_INCREMENT_CE_COUNTER			0x84
+#define	PACKET3_INCREMENT_DE_COUNTER			0x85
+#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
+#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
+#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
+#define	PACKET3_SET_CE_DE_COUNTERS			0x89
+#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
+#define	PACKET3_SWITCH_BUFFER				0x8B
+
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_TILING_CONFIG  				  0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n)	((((cmd) & 0xF) << 28) |	\
+					 (((b) & 0x1) << 26) |		\
+					 (((t) & 0x1) << 23) |		\
+					 (((s) & 0x1) << 22) |		\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)	((((cmd) & 0xF) << 28) |	\
+					 (((vmid) & 0xF) << 20) |	\
+					 (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)		((2 << 28) |			\
+					 (1 << 26) |			\
+					 (1 << 21) |			\
+					 (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define	DMA_PACKET_WRITE				  0x2
+#define	DMA_PACKET_COPY					  0x3
+#define	DMA_PACKET_INDIRECT_BUFFER			  0x4
+#define	DMA_PACKET_SEMAPHORE				  0x5
+#define	DMA_PACKET_FENCE				  0x6
+#define	DMA_PACKET_TRAP					  0x7
+#define	DMA_PACKET_SRBM_WRITE				  0x9
+#define	DMA_PACKET_CONSTANT_FILL			  0xd
+#define	DMA_PACKET_NOP					  0xf
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/savage/Makefile b/linux-imx/drivers/gpu/drm/savage/Makefile
new file mode 100644
index 0000000..d8f84ac
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/savage/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y = -Iinclude/drm
+savage-y := savage_drv.o savage_bci.o savage_state.o
+
+obj-$(CONFIG_DRM_SAVAGE)+= savage.o
+
diff --git a/linux-imx/drivers/gpu/drm/savage/savage_bci.c b/linux-imx/drivers/gpu/drm/savage/savage_bci.c
new file mode 100644
index 0000000..b55c1d6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/savage/savage_bci.c
@@ -0,0 +1,1095 @@
+/* savage_bci.c -- BCI support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <drm/drmP.h>
+#include <drm/savage_drm.h>
+#include "savage_drv.h"
+
+/* Need a long timeout for shadow status updates can take a while
+ * and so can waiting for events when the queue is full. */
+#define SAVAGE_DEFAULT_USEC_TIMEOUT	1000000	/* 1s */
+#define SAVAGE_EVENT_USEC_TIMEOUT	5000000	/* 5s */
+#define SAVAGE_FREELIST_DEBUG		0
+
+static int savage_do_cleanup_bci(struct drm_device *dev);
+
+static int
+savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t mask = dev_priv->status_used_mask;
+	uint32_t threshold = dev_priv->bci_threshold_hi;
+	uint32_t status;
+	int i;
+
+#if SAVAGE_BCI_DEBUG
+	if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
+		DRM_ERROR("Trying to emit %d words "
+			  "(more than guaranteed space in COB)\n", n);
+#endif
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[0];
+		if ((status & mask) < threshold)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
+#endif
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+/*
+ * Waiting for events.
+ *
+ * The BIOSresets the event tag to 0 on mode changes. Therefore we
+ * never emit 0 to the event tag. If we find a 0 event tag we know the
+ * BIOS stomped on it and return success assuming that the BIOS waited
+ * for engine idle.
+ *
+ * Note: if the Xserver uses the event tag it has to follow the same
+ * rule. Otherwise there may be glitches every 2^16 events.
+ */
+static int
+savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[1];
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return -EBUSY;
+}
+
+uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
+			       unsigned int flags)
+{
+	uint16_t count;
+	BCI_LOCALS;
+
+	if (dev_priv->status_ptr) {
+		/* coordinate with Xserver */
+		count = dev_priv->status_ptr[1023];
+		if (count < dev_priv->event_counter)
+			dev_priv->event_wrap++;
+	} else {
+		count = dev_priv->event_counter;
+	}
+	count = (count + 1) & 0xffff;
+	if (count == 0) {
+		count++;	/* See the comment above savage_wait_event_*. */
+		dev_priv->event_wrap++;
+	}
+	dev_priv->event_counter = count;
+	if (dev_priv->status_ptr)
+		dev_priv->status_ptr[1023] = (uint32_t) count;
+
+	if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
+		unsigned int wait_cmd = BCI_CMD_WAIT;
+		if ((flags & SAVAGE_WAIT_2D))
+			wait_cmd |= BCI_CMD_WAIT_2D;
+		if ((flags & SAVAGE_WAIT_3D))
+			wait_cmd |= BCI_CMD_WAIT_3D;
+		BEGIN_BCI(2);
+		BCI_WRITE(wait_cmd);
+	} else {
+		BEGIN_BCI(1);
+	}
+	BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
+
+	return count;
+}
+
+/*
+ * Freelist management
+ */
+static int savage_freelist_init(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_savage_buf_priv_t *entry;
+	int i;
+	DRM_DEBUG("count=%d\n", dma->buf_count);
+
+	dev_priv->head.next = &dev_priv->tail;
+	dev_priv->head.prev = NULL;
+	dev_priv->head.buf = NULL;
+
+	dev_priv->tail.next = NULL;
+	dev_priv->tail.prev = &dev_priv->head;
+	dev_priv->tail.buf = NULL;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		entry = buf->dev_private;
+
+		SET_AGE(&entry->age, 0, 0);
+		entry->buf = buf;
+
+		entry->next = dev_priv->head.next;
+		entry->prev = &dev_priv->head;
+		dev_priv->head.next->prev = entry;
+		dev_priv->head.next = entry;
+	}
+
+	return 0;
+}
+
+static struct drm_buf *savage_freelist_get(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
+	uint16_t event;
+	unsigned int wrap;
+	DRM_DEBUG("\n");
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--;		/* hardware hasn't passed the last wrap yet */
+
+	DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
+	DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
+
+	if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
+		drm_savage_buf_priv_t *next = tail->next;
+		drm_savage_buf_priv_t *prev = tail->prev;
+		prev->next = next;
+		next->prev = prev;
+		tail->next = tail->prev = NULL;
+		return tail->buf;
+	}
+
+	DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
+	return NULL;
+}
+
+void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
+
+	DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
+
+	if (entry->next != NULL || entry->prev != NULL) {
+		DRM_ERROR("entry already on freelist.\n");
+		return;
+	}
+
+	prev = &dev_priv->head;
+	next = prev->next;
+	prev->next = entry;
+	next->prev = entry;
+	entry->prev = prev;
+	entry->next = next;
+}
+
+/*
+ * Command DMA
+ */
+static int savage_dma_init(drm_savage_private_t * dev_priv)
+{
+	unsigned int i;
+
+	dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
+	    (SAVAGE_DMA_PAGE_SIZE * 4);
+	dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
+				      dev_priv->nr_dma_pages, GFP_KERNEL);
+	if (dev_priv->dma_pages == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, 0, 0);
+
+	dev_priv->first_dma_page = 0;
+	dev_priv->current_dma_page = 0;
+
+	return 0;
+}
+
+void savage_dma_reset(drm_savage_private_t * dev_priv)
+{
+	uint16_t event;
+	unsigned int wrap, i;
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
+{
+	uint16_t event;
+	unsigned int wrap;
+
+	/* Faked DMA buffer pages don't age. */
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma)
+		return;
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--;		/* hardware hasn't passed the last wrap yet */
+
+	if (dev_priv->dma_pages[page].age.wrap > wrap ||
+	    (dev_priv->dma_pages[page].age.wrap == wrap &&
+	     dev_priv->dma_pages[page].age.event > event)) {
+		if (dev_priv->wait_evnt(dev_priv,
+					dev_priv->dma_pages[page].age.event)
+		    < 0)
+			DRM_ERROR("wait_evnt failed!\n");
+	}
+}
+
+uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	unsigned int cur = dev_priv->current_dma_page;
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
+	    dev_priv->dma_pages[cur].used;
+	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
+	    SAVAGE_DMA_PAGE_SIZE;
+	uint32_t *dma_ptr;
+	unsigned int i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
+		  cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
+
+	if (cur + nr_pages < dev_priv->nr_dma_pages) {
+		dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
+		if (n < rest)
+			rest = n;
+		dev_priv->dma_pages[cur].used += rest;
+		n -= rest;
+		cur++;
+	} else {
+		dev_priv->dma_flush(dev_priv);
+		nr_pages =
+		    (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
+		for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
+			dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
+			dev_priv->dma_pages[i].used = 0;
+			dev_priv->dma_pages[i].flushed = 0;
+		}
+		dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
+		dev_priv->first_dma_page = cur = 0;
+	}
+	for (i = cur; nr_pages > 0; ++i, --nr_pages) {
+#if SAVAGE_DMA_DEBUG
+		if (dev_priv->dma_pages[i].used) {
+			DRM_ERROR("unflushed page %u: used=%u\n",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		if (n > SAVAGE_DMA_PAGE_SIZE)
+			dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
+		else
+			dev_priv->dma_pages[i].used = n;
+		n -= SAVAGE_DMA_PAGE_SIZE;
+	}
+	dev_priv->current_dma_page = --i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
+		  i, dev_priv->dma_pages[i].used, n);
+
+	savage_dma_wait(dev_priv, dev_priv->current_dma_page);
+
+	return dma_ptr;
+}
+
+static void savage_dma_flush(drm_savage_private_t * dev_priv)
+{
+	unsigned int first = dev_priv->first_dma_page;
+	unsigned int cur = dev_priv->current_dma_page;
+	uint16_t event;
+	unsigned int wrap, pad, align, len, i;
+	unsigned long phys_addr;
+	BCI_LOCALS;
+
+	if (first == cur &&
+	    dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
+		return;
+
+	/* pad length to multiples of 2 entries
+	 * align start of next DMA block to multiles of 8 entries */
+	pad = -dev_priv->dma_pages[cur].used & 1;
+	align = -(dev_priv->dma_pages[cur].used + pad) & 7;
+
+	DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
+		  "pad=%u, align=%u\n",
+		  first, cur, dev_priv->dma_pages[first].flushed,
+		  dev_priv->dma_pages[cur].used, pad, align);
+
+	/* pad with noops */
+	if (pad) {
+		uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
+		dev_priv->dma_pages[cur].used += pad;
+		while (pad != 0) {
+			*dma_ptr++ = BCI_CMD_WAIT;
+			pad--;
+		}
+	}
+
+	DRM_MEMORYBARRIER();
+
+	/* do flush ... */
+	phys_addr = dev_priv->cmd_dma->offset +
+	    (first * SAVAGE_DMA_PAGE_SIZE +
+	     dev_priv->dma_pages[first].flushed) * 4;
+	len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
+	    dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
+
+	DRM_DEBUG("phys_addr=%lx, len=%u\n",
+		  phys_addr | dev_priv->dma_type, len);
+
+	BEGIN_BCI(3);
+	BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
+	BCI_WRITE(phys_addr | dev_priv->dma_type);
+	BCI_DMA(len);
+
+	/* fix alignment of the start of the next block */
+	dev_priv->dma_pages[cur].used += align;
+
+	/* age DMA pages */
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = first; i < cur; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	/* age the current page only when it's full */
+	if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
+		SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
+		dev_priv->dma_pages[cur].used = 0;
+		dev_priv->dma_pages[cur].flushed = 0;
+		/* advance to next page */
+		cur++;
+		if (cur == dev_priv->nr_dma_pages)
+			cur = 0;
+		dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
+	} else {
+		dev_priv->first_dma_page = cur;
+		dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+
+	DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
+		  dev_priv->dma_pages[cur].used,
+		  dev_priv->dma_pages[cur].flushed);
+}
+
+static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
+{
+	unsigned int i, j;
+	BCI_LOCALS;
+
+	if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
+	    dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
+		return;
+
+	DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
+		  dev_priv->first_dma_page, dev_priv->current_dma_page,
+		  dev_priv->dma_pages[dev_priv->current_dma_page].used);
+
+	for (i = dev_priv->first_dma_page;
+	     i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
+	     ++i) {
+		uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    i * SAVAGE_DMA_PAGE_SIZE;
+#if SAVAGE_DMA_DEBUG
+		/* Sanity check: all pages except the last one must be full. */
+		if (i < dev_priv->current_dma_page &&
+		    dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
+			DRM_ERROR("partial DMA page %u: used=%u",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		BEGIN_BCI(dev_priv->dma_pages[i].used);
+		for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
+			BCI_WRITE(dma_ptr[j]);
+		}
+		dev_priv->dma_pages[i].used = 0;
+	}
+
+	/* reset to first page */
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+int savage_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_savage_private_t *dev_priv;
+
+	dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+
+	dev_priv->chipset = (enum savage_family)chipset;
+
+	pci_set_master(dev->pdev);
+
+	return 0;
+}
+
+
+/*
+ * Initialize mappings. On Savage4 and SavageIX the alignment
+ * and size of the aperture is not suitable for automatic MTRR setup
+ * in drm_addmap. Therefore we add them manually before the maps are
+ * initialized, and tear them down on last close.
+ */
+int savage_driver_firstopen(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	unsigned long mmio_base, fb_base, fb_size, aperture_base;
+	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
+	 * in case we decide we need information on the BAR for BSD in the
+	 * future.
+	 */
+	unsigned int fb_rsrc, aper_rsrc;
+	int ret = 0;
+
+	dev_priv->mtrr[0].handle = -1;
+	dev_priv->mtrr[1].handle = -1;
+	dev_priv->mtrr[2].handle = -1;
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		fb_rsrc = 0;
+		fb_base = pci_resource_start(dev->pdev, 0);
+		fb_size = SAVAGE_FB_SIZE_S3;
+		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
+		aper_rsrc = 0;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
+			/* Don't make MMIO write-cobining! We need 3
+			 * MTRRs. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x01000000;
+			dev_priv->mtrr[0].handle =
+			    drm_mtrr_add(dev_priv->mtrr[0].base,
+				         dev_priv->mtrr[0].size, DRM_MTRR_WC);
+			dev_priv->mtrr[1].base = fb_base + 0x02000000;
+			dev_priv->mtrr[1].size = 0x02000000;
+			dev_priv->mtrr[1].handle =
+			    drm_mtrr_add(dev_priv->mtrr[1].base,
+					 dev_priv->mtrr[1].size, DRM_MTRR_WC);
+			dev_priv->mtrr[2].base = fb_base + 0x04000000;
+			dev_priv->mtrr[2].size = 0x04000000;
+			dev_priv->mtrr[2].handle =
+			    drm_mtrr_add(dev_priv->mtrr[2].base,
+					 dev_priv->mtrr[2].size, DRM_MTRR_WC);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08llx\n",
+				  (unsigned long long)
+				  pci_resource_len(dev->pdev, 0));
+		}
+	} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
+		   dev_priv->chipset != S3_SAVAGE2000) {
+		mmio_base = pci_resource_start(dev->pdev, 0);
+		fb_rsrc = 1;
+		fb_base = pci_resource_start(dev->pdev, 1);
+		fb_size = SAVAGE_FB_SIZE_S4;
+		aper_rsrc = 1;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
+			/* Can use one MTRR to cover both fb and
+			 * aperture. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x08000000;
+			dev_priv->mtrr[0].handle =
+			    drm_mtrr_add(dev_priv->mtrr[0].base,
+					 dev_priv->mtrr[0].size, DRM_MTRR_WC);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08llx\n",
+				  (unsigned long long)
+				  pci_resource_len(dev->pdev, 1));
+		}
+	} else {
+		mmio_base = pci_resource_start(dev->pdev, 0);
+		fb_rsrc = 1;
+		fb_base = pci_resource_start(dev->pdev, 1);
+		fb_size = pci_resource_len(dev->pdev, 1);
+		aper_rsrc = 2;
+		aperture_base = pci_resource_start(dev->pdev, 2);
+		/* Automatic MTRR setup will do the right thing. */
+	}
+
+	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
+			 _DRM_READ_ONLY, &dev_priv->mmio);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &dev_priv->fb);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
+			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
+			 &dev_priv->aperture);
+	return ret;
+}
+
+/*
+ * Delete MTRRs and free device-private data.
+ */
+void savage_driver_lastclose(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 3; ++i)
+		if (dev_priv->mtrr[i].handle >= 0)
+			drm_mtrr_del(dev_priv->mtrr[i].handle,
+				 dev_priv->mtrr[i].base,
+				 dev_priv->mtrr[i].size, DRM_MTRR_WC);
+}
+
+int savage_driver_unload(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	kfree(dev_priv);
+
+	return 0;
+}
+
+static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (init->fb_bpp != 16 && init->fb_bpp != 32) {
+		DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
+		return -EINVAL;
+	}
+	if (init->depth_bpp != 16 && init->depth_bpp != 32) {
+		DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
+		return -EINVAL;
+	}
+	if (init->dma_type != SAVAGE_DMA_AGP &&
+	    init->dma_type != SAVAGE_DMA_PCI) {
+		DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
+		return -EINVAL;
+	}
+
+	dev_priv->cob_size = init->cob_size;
+	dev_priv->bci_threshold_lo = init->bci_threshold_lo;
+	dev_priv->bci_threshold_hi = init->bci_threshold_hi;
+	dev_priv->dma_type = init->dma_type;
+
+	dev_priv->fb_bpp = init->fb_bpp;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+	dev_priv->depth_bpp = init->depth_bpp;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	dev_priv->texture_offset = init->texture_offset;
+	dev_priv->texture_size = init->texture_size;
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		savage_do_cleanup_bci(dev);
+		return -EINVAL;
+	}
+	if (init->status_offset != 0) {
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("could not find shadow status region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->status = NULL;
+	}
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
+		dev->agp_buffer_token = init->buffers_offset;
+		dev->agp_buffer_map = drm_core_findmap(dev,
+						       init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("could not find DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+		if (!dev->agp_buffer_map->handle) {
+			DRM_ERROR("failed to ioremap DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return -ENOMEM;
+		}
+	}
+	if (init->agp_textures_offset) {
+		dev_priv->agp_textures =
+		    drm_core_findmap(dev, init->agp_textures_offset);
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("could not find agp texture region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->agp_textures = NULL;
+	}
+
+	if (init->cmd_dma_offset) {
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			DRM_ERROR("command DMA not supported on "
+				  "Savage3D/MX/IX.\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		if (dev->dma && dev->dma->buflist) {
+			DRM_ERROR("command and vertex DMA not supported "
+				  "at the same time.\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
+		if (!dev_priv->cmd_dma) {
+			DRM_ERROR("could not find command DMA region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
+			if (dev_priv->cmd_dma->type != _DRM_AGP) {
+				DRM_ERROR("AGP command DMA region is not a "
+					  "_DRM_AGP map!\n");
+				savage_do_cleanup_bci(dev);
+				return -EINVAL;
+			}
+			drm_core_ioremap(dev_priv->cmd_dma, dev);
+			if (!dev_priv->cmd_dma->handle) {
+				DRM_ERROR("failed to ioremap command "
+					  "DMA region!\n");
+				savage_do_cleanup_bci(dev);
+				return -ENOMEM;
+			}
+		} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
+			DRM_ERROR("PCI command DMA region is not a "
+				  "_DRM_CONSISTENT map!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->cmd_dma = NULL;
+	}
+
+	dev_priv->dma_flush = savage_dma_flush;
+	if (!dev_priv->cmd_dma) {
+		DRM_DEBUG("falling back to faked command DMA.\n");
+		dev_priv->fake_dma.offset = 0;
+		dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
+		dev_priv->fake_dma.type = _DRM_SHM;
+		dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
+						    GFP_KERNEL);
+		if (!dev_priv->fake_dma.handle) {
+			DRM_ERROR("could not allocate faked DMA buffer!\n");
+			savage_do_cleanup_bci(dev);
+			return -ENOMEM;
+		}
+		dev_priv->cmd_dma = &dev_priv->fake_dma;
+		dev_priv->dma_flush = savage_fake_dma_flush;
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
+				    init->sarea_priv_offset);
+
+	/* setup bitmap descriptors */
+	{
+		unsigned int color_tile_format;
+		unsigned int depth_tile_format;
+		unsigned int front_stride, back_stride, depth_stride;
+		if (dev_priv->chipset <= S3_SAVAGE4) {
+			color_tile_format = dev_priv->fb_bpp == 16 ?
+			    SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+			depth_tile_format = dev_priv->depth_bpp == 16 ?
+			    SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+		} else {
+			color_tile_format = SAVAGE_BD_TILE_DEST;
+			depth_tile_format = SAVAGE_BD_TILE_DEST;
+		}
+		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
+		back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
+		depth_stride =
+		    dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
+
+		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
+	}
+
+	/* setup status and bci ptr */
+	dev_priv->event_counter = 0;
+	dev_priv->event_wrap = 0;
+	dev_priv->bci_ptr = (volatile uint32_t *)
+	    ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
+	} else {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
+	}
+	if (dev_priv->status != NULL) {
+		dev_priv->status_ptr =
+		    (volatile uint32_t *)dev_priv->status->handle;
+		dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
+		dev_priv->wait_evnt = savage_bci_wait_event_shadow;
+		dev_priv->status_ptr[1023] = dev_priv->event_counter;
+	} else {
+		dev_priv->status_ptr = NULL;
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
+		} else {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
+		}
+		dev_priv->wait_evnt = savage_bci_wait_event_reg;
+	}
+
+	/* cliprect functions */
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
+	else
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
+
+	if (savage_freelist_init(dev) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
+		savage_do_cleanup_bci(dev);
+		return -ENOMEM;
+	}
+
+	if (savage_dma_init(dev_priv) < 0) {
+		DRM_ERROR("could not initialize command DMA\n");
+		savage_do_cleanup_bci(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int savage_do_cleanup_bci(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
+		kfree(dev_priv->fake_dma.handle);
+	} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
+		   dev_priv->cmd_dma->type == _DRM_AGP &&
+		   dev_priv->dma_type == SAVAGE_DMA_AGP)
+		drm_core_ioremapfree(dev_priv->cmd_dma, dev);
+
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
+	    dev->agp_buffer_map && dev->agp_buffer_map->handle) {
+		drm_core_ioremapfree(dev->agp_buffer_map, dev);
+		/* make sure the next instance (which may be running
+		 * in PCI mode) doesn't try to use an old
+		 * agp_buffer_map. */
+		dev->agp_buffer_map = NULL;
+	}
+
+	kfree(dev_priv->dma_pages);
+
+	return 0;
+}
+
+static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_init_t *init = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case SAVAGE_INIT_BCI:
+		return savage_do_init_bci(dev, init);
+	case SAVAGE_CLEANUP_BCI:
+		return savage_do_cleanup_bci(dev);
+	}
+
+	return -EINVAL;
+}
+
+static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_emit_t *event = data;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	event->count = savage_bci_emit_event(dev_priv, event->flags);
+	event->count |= dev_priv->event_wrap << 16;
+
+	return 0;
+}
+
+static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_wait_t *event = data;
+	unsigned int event_e, hw_e;
+	unsigned int event_w, hw_w;
+
+	DRM_DEBUG("\n");
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		hw_e = dev_priv->status_ptr[1] & 0xffff;
+	else
+		hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	hw_w = dev_priv->event_wrap;
+	if (hw_e > dev_priv->event_counter)
+		hw_w--;		/* hardware hasn't passed the last wrap yet */
+
+	event_e = event->count & 0xffff;
+	event_w = event->count >> 16;
+
+	/* Don't need to wait if
+	 * - event counter wrapped since the event was emitted or
+	 * - the hardware has advanced up to or over the event to wait for.
+	 */
+	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
+		return 0;
+	else
+		return dev_priv->wait_evnt(dev_priv, event_e);
+}
+
+/*
+ * DMA buffer management
+ */
+
+static int savage_bci_get_buffers(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_dma *d)
+{
+	struct drm_buf *buf;
+	int i;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = savage_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i],
+				     &buf->idx, sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+				     &buf->total, sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_dma *d = data;
+	int ret = 0;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = savage_bci_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int release_idlelock = 0;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev_priv)
+		return;
+	if (!dma->buflist)
+		return;
+
+	if (file_priv->master && file_priv->master->lock.hw_lock) {
+		drm_idlelock_take(&file_priv->master->lock);
+		release_idlelock = 1;
+	}
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_savage_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->file_priv == file_priv && buf_priv &&
+		    buf_priv->next == NULL && buf_priv->prev == NULL) {
+			uint16_t event;
+			DRM_DEBUG("reclaimed from client\n");
+			event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+			SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+			savage_freelist_put(dev, buf);
+		}
+	}
+
+	if (release_idlelock)
+		drm_idlelock_release(&file_priv->master->lock);
+}
+
+struct drm_ioctl_desc savage_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+};
+
+int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/savage/savage_drv.c b/linux-imx/drivers/gpu/drm/savage/savage_drv.c
new file mode 100644
index 0000000..71b2081
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/savage/savage_drv.c
@@ -0,0 +1,93 @@
+/* savage_drv.c -- Savage driver for Linux
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/savage_drm.h>
+#include "savage_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static struct pci_device_id pciidlist[] = {
+	savage_PCI_IDS
+};
+
+static const struct file_operations savage_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+	.dev_priv_size = sizeof(drm_savage_buf_priv_t),
+	.load = savage_driver_load,
+	.firstopen = savage_driver_firstopen,
+	.preclose = savage_reclaim_buffers,
+	.lastclose = savage_driver_lastclose,
+	.unload = savage_driver_unload,
+	.ioctls = savage_ioctls,
+	.dma_ioctl = savage_bci_buffers,
+	.fops = &savage_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver savage_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init savage_init(void)
+{
+	driver.num_ioctls = savage_max_ioctl;
+	return drm_pci_init(&driver, &savage_pci_driver);
+}
+
+static void __exit savage_exit(void)
+{
+	drm_pci_exit(&driver, &savage_pci_driver);
+}
+
+module_init(savage_init);
+module_exit(savage_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/savage/savage_drv.h b/linux-imx/drivers/gpu/drm/savage/savage_drv.h
new file mode 100644
index 0000000..df2aac6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/savage/savage_drv.h
@@ -0,0 +1,575 @@
+/* savage_drv.h -- Private header for the savage driver */
+/*
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRV_H__
+#define __SAVAGE_DRV_H__
+
+#define DRIVER_AUTHOR	"Felix Kuehling"
+
+#define DRIVER_NAME	"savage"
+#define DRIVER_DESC	"Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
+#define DRIVER_DATE	"20050313"
+
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		4
+#define DRIVER_PATCHLEVEL	1
+/* Interface history:
+ *
+ * 1.x   The DRM driver from the VIA/S3 code drop, basically a dummy
+ * 2.0   The first real DRM
+ * 2.1   Scissors registers managed by the DRM, 3D operations clipped by
+ *       cliprects of the cmdbuf ioctl
+ * 2.2   Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
+ * 2.3   Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
+ *       wide and thus very long lived (unlikely to ever wrap). The size
+ *       in the struct was 32 bits before, but only 16 bits were used
+ * 2.4   Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
+ *       actually used
+ */
+
+typedef struct drm_savage_age {
+	uint16_t event;
+	unsigned int wrap;
+} drm_savage_age_t;
+
+typedef struct drm_savage_buf_priv {
+	struct drm_savage_buf_priv *next;
+	struct drm_savage_buf_priv *prev;
+	drm_savage_age_t age;
+	struct drm_buf *buf;
+} drm_savage_buf_priv_t;
+
+typedef struct drm_savage_dma_page {
+	drm_savage_age_t age;
+	unsigned int used, flushed;
+} drm_savage_dma_page_t;
+#define SAVAGE_DMA_PAGE_SIZE 1024	/* in dwords */
+/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
+ * size of 16kbytes or 4k entries. Minimum requirement would be
+ * 10kbytes for 255 40-byte vertices in one drawing command. */
+#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
+
+/* interesting bits of hardware state that are saved in dev_priv */
+typedef union {
+	struct drm_savage_common_state {
+		uint32_t vbaddr;
+	} common;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texctrl, texaddr;
+		uint32_t scstart, new_scstart;
+		uint32_t scend, new_scend;
+	} s3d;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texdescr, texaddr0, texaddr1;
+		uint32_t drawctrl0, new_drawctrl0;
+		uint32_t drawctrl1, new_drawctrl1;
+	} s4;
+} drm_savage_state_t;
+
+/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
+enum savage_family {
+	S3_UNKNOWN = 0,
+	S3_SAVAGE3D,
+	S3_SAVAGE_MX,
+	S3_SAVAGE4,
+	S3_PROSAVAGE,
+	S3_TWISTER,
+	S3_PROSAVAGEDDR,
+	S3_SUPERSAVAGE,
+	S3_SAVAGE2000,
+	S3_LAST
+};
+
+extern struct drm_ioctl_desc savage_ioctls[];
+extern int savage_max_ioctl;
+
+#define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
+
+#define S3_SAVAGE4_SERIES(chip)  ((chip==S3_SAVAGE4)            \
+                                  || (chip==S3_PROSAVAGE)       \
+                                  || (chip==S3_TWISTER)         \
+                                  || (chip==S3_PROSAVAGEDDR))
+
+#define	S3_SAVAGE_MOBILE_SERIES(chip)	((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
+
+#define S3_SAVAGE_SERIES(chip)    ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
+
+#define S3_MOBILE_TWISTER_SERIES(chip)   ((chip==S3_TWISTER)    \
+                                          ||(chip==S3_PROSAVAGEDDR))
+
+/* flags */
+#define SAVAGE_IS_AGP 1
+
+typedef struct drm_savage_private {
+	drm_savage_sarea_t *sarea_priv;
+
+	drm_savage_buf_priv_t head, tail;
+
+	/* who am I? */
+	enum savage_family chipset;
+
+	unsigned int cob_size;
+	unsigned int bci_threshold_lo, bci_threshold_hi;
+	unsigned int dma_type;
+
+	/* frame buffer layout */
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	/* bitmap descriptors for swap and clear */
+	unsigned int front_bd, back_bd, depth_bd;
+
+	/* local textures */
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	/* memory regions in physical memory */
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *fb;
+	drm_local_map_t *aperture;
+	drm_local_map_t *status;
+	drm_local_map_t *agp_textures;
+	drm_local_map_t *cmd_dma;
+	drm_local_map_t fake_dma;
+
+	struct {
+		int handle;
+		unsigned long base, size;
+	} mtrr[3];
+
+	/* BCI and status-related stuff */
+	volatile uint32_t *status_ptr, *bci_ptr;
+	uint32_t status_used_mask;
+	uint16_t event_counter;
+	unsigned int event_wrap;
+
+	/* Savage4 command DMA */
+	drm_savage_dma_page_t *dma_pages;
+	unsigned int nr_dma_pages, first_dma_page, current_dma_page;
+	drm_savage_age_t last_dma_age;
+
+	/* saved hw state for global/local check on S3D */
+	uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
+	/* and for scissors (global, so don't emit if not changed) */
+	uint32_t hw_scissors_start, hw_scissors_end;
+
+	drm_savage_state_t state;
+
+	/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
+	unsigned int waiting;
+
+	/* config/hardware-dependent function pointers */
+	int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n);
+	int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e);
+	/* Err, there is a macro wait_event in include/linux/wait.h.
+	 * Avoid unwanted macro expansion. */
+	void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
+				const struct drm_clip_rect * pbox);
+	void (*dma_flush) (struct drm_savage_private * dev_priv);
+} drm_savage_private_t;
+
+/* ioctls */
+extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+/* BCI functions */
+extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
+				      unsigned int flags);
+extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+extern void savage_dma_reset(drm_savage_private_t * dev_priv);
+extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
+extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
+				  unsigned int n);
+extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int savage_driver_firstopen(struct drm_device *dev);
+extern void savage_driver_lastclose(struct drm_device *dev);
+extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_reclaim_buffers(struct drm_device *dev,
+				   struct drm_file *file_priv);
+
+/* state functions */
+extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
+				      const struct drm_clip_rect * pbox);
+extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
+				     const struct drm_clip_rect * pbox);
+
+#define SAVAGE_FB_SIZE_S3	0x01000000	/*  16MB */
+#define SAVAGE_FB_SIZE_S4	0x02000000	/*  32MB */
+#define SAVAGE_MMIO_SIZE        0x00080000	/* 512kB */
+#define SAVAGE_APERTURE_OFFSET  0x02000000	/*  32MB */
+#define SAVAGE_APERTURE_SIZE    0x05000000	/* 5 tiled surfaces, 16MB each */
+
+#define SAVAGE_BCI_OFFSET       0x00010000	/* offset of the BCI region
+						 * inside the MMIO region */
+#define SAVAGE_BCI_FIFO_SIZE	32	/* number of entries in on-chip
+					 * BCI FIFO */
+
+/*
+ * MMIO registers
+ */
+#define SAVAGE_STATUS_WORD0		0x48C00
+#define SAVAGE_STATUS_WORD1		0x48C04
+#define SAVAGE_ALT_STATUS_WORD0 	0x48C60
+
+#define SAVAGE_FIFO_USED_MASK_S3D	0x0001ffff
+#define SAVAGE_FIFO_USED_MASK_S4	0x001fffff
+
+/* Copied from savage_bci.h in the 2D driver with some renaming. */
+
+/* Bitmap descriptors */
+#define SAVAGE_BD_STRIDE_SHIFT 0
+#define SAVAGE_BD_BPP_SHIFT   16
+#define SAVAGE_BD_TILE_SHIFT  24
+#define SAVAGE_BD_BW_DISABLE  (1<<28)
+/* common: */
+#define	SAVAGE_BD_TILE_LINEAR		0
+/* savage4, MX, IX, 3D */
+#define	SAVAGE_BD_TILE_16BPP		2
+#define	SAVAGE_BD_TILE_32BPP		3
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define	SAVAGE_BD_TILE_DEST		1
+#define	SAVAGE_BD_TILE_TEXTURE		2
+/* GBD - BCI enable */
+/* savage4, MX, IX, 3D */
+#define SAVAGE_GBD_BCI_ENABLE                    8
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define SAVAGE_GBD_BCI_ENABLE_TWISTER            0
+
+#define SAVAGE_GBD_BIG_ENDIAN                    4
+#define SAVAGE_GBD_LITTLE_ENDIAN                 0
+#define SAVAGE_GBD_64                            1
+
+/*  Global Bitmap Descriptor */
+#define SAVAGE_BCI_GLB_BD_LOW             0x8168
+#define SAVAGE_BCI_GLB_BD_HIGH            0x816C
+
+/*
+ * BCI registers
+ */
+/* Savage4/Twister/ProSavage 3D registers */
+#define SAVAGE_DRAWLOCALCTRL_S4		0x1e
+#define SAVAGE_TEXPALADDR_S4		0x1f
+#define SAVAGE_TEXCTRL0_S4		0x20
+#define SAVAGE_TEXCTRL1_S4		0x21
+#define SAVAGE_TEXADDR0_S4		0x22
+#define SAVAGE_TEXADDR1_S4		0x23
+#define SAVAGE_TEXBLEND0_S4		0x24
+#define SAVAGE_TEXBLEND1_S4		0x25
+#define SAVAGE_TEXXPRCLR_S4		0x26	/* never used */
+#define SAVAGE_TEXDESCR_S4		0x27
+#define SAVAGE_FOGTABLE_S4		0x28
+#define SAVAGE_FOGCTRL_S4		0x30
+#define SAVAGE_STENCILCTRL_S4		0x31
+#define SAVAGE_ZBUFCTRL_S4		0x32
+#define SAVAGE_ZBUFOFF_S4		0x33
+#define SAVAGE_DESTCTRL_S4		0x34
+#define SAVAGE_DRAWCTRL0_S4		0x35
+#define SAVAGE_DRAWCTRL1_S4		0x36
+#define SAVAGE_ZWATERMARK_S4		0x37
+#define SAVAGE_DESTTEXRWWATERMARK_S4	0x38
+#define SAVAGE_TEXBLENDCOLOR_S4		0x39
+/* Savage3D/MX/IX 3D registers */
+#define SAVAGE_TEXPALADDR_S3D		0x18
+#define SAVAGE_TEXXPRCLR_S3D		0x19	/* never used */
+#define SAVAGE_TEXADDR_S3D		0x1A
+#define SAVAGE_TEXDESCR_S3D		0x1B
+#define SAVAGE_TEXCTRL_S3D		0x1C
+#define SAVAGE_FOGTABLE_S3D		0x20
+#define SAVAGE_FOGCTRL_S3D		0x30
+#define SAVAGE_DRAWCTRL_S3D		0x31
+#define SAVAGE_ZBUFCTRL_S3D		0x32
+#define SAVAGE_ZBUFOFF_S3D		0x33
+#define SAVAGE_DESTCTRL_S3D		0x34
+#define SAVAGE_SCSTART_S3D		0x35
+#define SAVAGE_SCEND_S3D		0x36
+#define SAVAGE_ZWATERMARK_S3D		0x37
+#define SAVAGE_DESTTEXRWWATERMARK_S3D	0x38
+/* common stuff */
+#define SAVAGE_VERTBUFADDR		0x3e
+#define SAVAGE_BITPLANEWTMASK		0xd7
+#define SAVAGE_DMABUFADDR		0x51
+
+/* texture enable bits (needed for tex addr checking) */
+#define SAVAGE_TEXCTRL_TEXEN_MASK	0x00010000	/* S3D */
+#define SAVAGE_TEXDESCR_TEX0EN_MASK	0x02000000	/* S4 */
+#define SAVAGE_TEXDESCR_TEX1EN_MASK	0x04000000	/* S4 */
+
+/* Global fields in Savage4/Twister/ProSavage 3D registers:
+ *
+ * All texture registers and DrawLocalCtrl are local. All other
+ * registers are global. */
+
+/* Global fields in Savage3D/MX/IX 3D registers:
+ *
+ * All texture registers are local. DrawCtrl and ZBufCtrl are
+ * partially local. All other registers are global.
+ *
+ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
+ * ZBufCtrl global fields: zCmpFunc, zBufEn
+ */
+#define SAVAGE_DRAWCTRL_S3D_GLOBAL	0x03f3c00c
+#define SAVAGE_ZBUFCTRL_S3D_GLOBAL	0x00000027
+
+/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
+ */
+#define SAVAGE_SCISSOR_MASK_S4		0x00fff7ff
+#define SAVAGE_SCISSOR_MASK_S3D		0x07ff07ff
+
+/*
+ * BCI commands
+ */
+#define BCI_CMD_NOP                  0x40000000
+#define BCI_CMD_RECT                 0x48000000
+#define BCI_CMD_RECT_XP              0x01000000
+#define BCI_CMD_RECT_YP              0x02000000
+#define BCI_CMD_SCANLINE             0x50000000
+#define BCI_CMD_LINE                 0x5C000000
+#define BCI_CMD_LINE_LAST_PIXEL      0x58000000
+#define BCI_CMD_BYTE_TEXT            0x63000000
+#define BCI_CMD_NT_BYTE_TEXT         0x67000000
+#define BCI_CMD_BIT_TEXT             0x6C000000
+#define BCI_CMD_GET_ROP(cmd)         (((cmd) >> 16) & 0xFF)
+#define BCI_CMD_SET_ROP(cmd, rop)    ((cmd) |= ((rop & 0xFF) << 16))
+#define BCI_CMD_SEND_COLOR           0x00008000
+
+#define BCI_CMD_CLIP_NONE            0x00000000
+#define BCI_CMD_CLIP_CURRENT         0x00002000
+#define BCI_CMD_CLIP_LR              0x00004000
+#define BCI_CMD_CLIP_NEW             0x00006000
+
+#define BCI_CMD_DEST_GBD             0x00000000
+#define BCI_CMD_DEST_PBD             0x00000800
+#define BCI_CMD_DEST_PBD_NEW         0x00000C00
+#define BCI_CMD_DEST_SBD             0x00001000
+#define BCI_CMD_DEST_SBD_NEW         0x00001400
+
+#define BCI_CMD_SRC_TRANSPARENT      0x00000200
+#define BCI_CMD_SRC_SOLID            0x00000000
+#define BCI_CMD_SRC_GBD              0x00000020
+#define BCI_CMD_SRC_COLOR            0x00000040
+#define BCI_CMD_SRC_MONO             0x00000060
+#define BCI_CMD_SRC_PBD_COLOR        0x00000080
+#define BCI_CMD_SRC_PBD_MONO         0x000000A0
+#define BCI_CMD_SRC_PBD_COLOR_NEW    0x000000C0
+#define BCI_CMD_SRC_PBD_MONO_NEW     0x000000E0
+#define BCI_CMD_SRC_SBD_COLOR        0x00000100
+#define BCI_CMD_SRC_SBD_MONO         0x00000120
+#define BCI_CMD_SRC_SBD_COLOR_NEW    0x00000140
+#define BCI_CMD_SRC_SBD_MONO_NEW     0x00000160
+
+#define BCI_CMD_PAT_TRANSPARENT      0x00000010
+#define BCI_CMD_PAT_NONE             0x00000000
+#define BCI_CMD_PAT_COLOR            0x00000002
+#define BCI_CMD_PAT_MONO             0x00000003
+#define BCI_CMD_PAT_PBD_COLOR        0x00000004
+#define BCI_CMD_PAT_PBD_MONO         0x00000005
+#define BCI_CMD_PAT_PBD_COLOR_NEW    0x00000006
+#define BCI_CMD_PAT_PBD_MONO_NEW     0x00000007
+#define BCI_CMD_PAT_SBD_COLOR        0x00000008
+#define BCI_CMD_PAT_SBD_MONO         0x00000009
+#define BCI_CMD_PAT_SBD_COLOR_NEW    0x0000000A
+#define BCI_CMD_PAT_SBD_MONO_NEW     0x0000000B
+
+#define BCI_BD_BW_DISABLE            0x10000000
+#define BCI_BD_TILE_MASK             0x03000000
+#define BCI_BD_TILE_NONE             0x00000000
+#define BCI_BD_TILE_16               0x02000000
+#define BCI_BD_TILE_32               0x03000000
+#define BCI_BD_GET_BPP(bd)           (((bd) >> 16) & 0xFF)
+#define BCI_BD_SET_BPP(bd, bpp)      ((bd) |= (((bpp) & 0xFF) << 16))
+#define BCI_BD_GET_STRIDE(bd)        ((bd) & 0xFFFF)
+#define BCI_BD_SET_STRIDE(bd, st)    ((bd) |= ((st) & 0xFFFF))
+
+#define BCI_CMD_SET_REGISTER            0x96000000
+
+#define BCI_CMD_WAIT                    0xC0000000
+#define BCI_CMD_WAIT_3D                 0x00010000
+#define BCI_CMD_WAIT_2D                 0x00020000
+
+#define BCI_CMD_UPDATE_EVENT_TAG        0x98000000
+
+#define BCI_CMD_DRAW_PRIM               0x80000000
+#define BCI_CMD_DRAW_INDEXED_PRIM       0x88000000
+#define BCI_CMD_DRAW_CONT               0x01000000
+#define BCI_CMD_DRAW_TRILIST            0x00000000
+#define BCI_CMD_DRAW_TRISTRIP           0x02000000
+#define BCI_CMD_DRAW_TRIFAN             0x04000000
+#define BCI_CMD_DRAW_SKIPFLAGS          0x000000ff
+#define BCI_CMD_DRAW_NO_Z		0x00000001
+#define BCI_CMD_DRAW_NO_W		0x00000002
+#define BCI_CMD_DRAW_NO_CD		0x00000004
+#define BCI_CMD_DRAW_NO_CS		0x00000008
+#define BCI_CMD_DRAW_NO_U0		0x00000010
+#define BCI_CMD_DRAW_NO_V0		0x00000020
+#define BCI_CMD_DRAW_NO_UV0		0x00000030
+#define BCI_CMD_DRAW_NO_U1		0x00000040
+#define BCI_CMD_DRAW_NO_V1		0x00000080
+#define BCI_CMD_DRAW_NO_UV1		0x000000c0
+
+#define BCI_CMD_DMA			0xa8000000
+
+#define BCI_W_H(w, h)                ((((h) << 16) | (w)) & 0x0FFF0FFF)
+#define BCI_X_Y(x, y)                ((((y) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_X_W(x, y)                ((((w) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_CLIP_LR(l, r)            ((((r) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_BR(b, r)            ((((b) << 16) | (r)) & 0x0FFF0FFF)
+
+#define BCI_LINE_X_Y(x, y)           (((y) << 16) | ((x) & 0xFFFF))
+#define BCI_LINE_STEPS(diag, axi)    (((axi) << 16) | ((diag) & 0xFFFF))
+#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
+	(((maj) & 0x1FFF) | \
+	((ym) ? 1<<13 : 0) | \
+	((xp) ? 1<<14 : 0) | \
+	((yp) ? 1<<15 : 0) | \
+	((err) << 16))
+
+/*
+ * common commands
+ */
+#define BCI_SET_REGISTERS( first, n )			\
+	BCI_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+#define DMA_SET_REGISTERS( first, n )			\
+	DMA_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+
+#define BCI_DRAW_PRIMITIVE(n, type, skip)         \
+        BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+#define DMA_DRAW_PRIMITIVE(n, type, skip)         \
+        DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+
+#define BCI_DRAW_INDICES_S3D(n, type, i0)         \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+		  ((n) << 16) | (i0))
+
+#define BCI_DRAW_INDICES_S4(n, type, skip)        \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+                  (skip) | ((n) << 16))
+
+#define BCI_DMA(n)	\
+	BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
+
+/*
+ * access to MMIO
+ */
+#define SAVAGE_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define SAVAGE_WRITE(reg)	DRM_WRITE32( dev_priv->mmio, (reg) )
+
+/*
+ * access to the burst command interface (BCI)
+ */
+#define SAVAGE_BCI_DEBUG 1
+
+#define BCI_LOCALS    volatile uint32_t *bci_ptr;
+
+#define BEGIN_BCI( n ) do {			\
+	dev_priv->wait_fifo(dev_priv, (n));	\
+	bci_ptr = dev_priv->bci_ptr;		\
+} while(0)
+
+#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
+
+/*
+ * command DMA support
+ */
+#define SAVAGE_DMA_DEBUG 1
+
+#define DMA_LOCALS   uint32_t *dma_ptr;
+
+#define BEGIN_DMA( n ) do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -			\
+		dev_priv->dma_pages[cur].used;				\
+	if ((n) > rest) {						\
+		dma_ptr = savage_dma_alloc(dev_priv, (n));		\
+	} else { /* fast path for small allocations */			\
+		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+		if (dev_priv->dma_pages[cur].used == 0)			\
+			savage_dma_wait(dev_priv, cur);			\
+		dev_priv->dma_pages[cur].used += (n);			\
+	}								\
+} while(0)
+
+#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
+
+#define DMA_COPY(src, n) do {					\
+	memcpy(dma_ptr, (src), (n)*4);				\
+	dma_ptr += n;						\
+} while(0)
+
+#if SAVAGE_DMA_DEBUG
+#define DMA_COMMIT() do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+	if (dma_ptr != expected) {					\
+		DRM_ERROR("DMA allocation and use don't match: "	\
+			  "%p != %p\n", expected, dma_ptr);		\
+		savage_dma_reset(dev_priv);				\
+	}								\
+} while(0)
+#else
+#define DMA_COMMIT() do {/* nothing */} while(0)
+#endif
+
+#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
+
+/* Buffer aging via event tag
+ */
+
+#define UPDATE_EVENT_COUNTER( ) do {			\
+	if (dev_priv->status_ptr) {			\
+		uint16_t count;				\
+		/* coordinate with Xserver */		\
+		count = dev_priv->status_ptr[1023];	\
+		if (count < dev_priv->event_counter)	\
+			dev_priv->event_wrap++;		\
+		dev_priv->event_counter = count;	\
+	}						\
+} while(0)
+
+#define SET_AGE( age, e, w ) do {	\
+	(age)->event = e;		\
+	(age)->wrap = w;		\
+} while(0)
+
+#define TEST_AGE( age, e, w )				\
+	( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
+
+#endif				/* __SAVAGE_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/savage/savage_state.c b/linux-imx/drivers/gpu/drm/savage/savage_state.c
new file mode 100644
index 0000000..b35e75e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/savage/savage_state.c
@@ -0,0 +1,1163 @@
+/* savage_state.c -- State and drawing support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <drm/drmP.h>
+#include <drm/savage_drm.h>
+#include "savage_drv.h"
+
+void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
+			       const struct drm_clip_rect * pbox)
+{
+	uint32_t scstart = dev_priv->state.s3d.new_scstart;
+	uint32_t scend = dev_priv->state.s3d.new_scend;
+	scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
+	    ((uint32_t) pbox->x1 & 0x000007ff) |
+	    (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
+	scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
+	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
+	    ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
+	if (scstart != dev_priv->state.s3d.scstart ||
+	    scend != dev_priv->state.s3d.scend) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
+		DMA_WRITE(scstart);
+		DMA_WRITE(scend);
+		dev_priv->state.s3d.scstart = scstart;
+		dev_priv->state.s3d.scend = scend;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
+			      const struct drm_clip_rect * pbox)
+{
+	uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
+	uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
+	drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
+	    ((uint32_t) pbox->x1 & 0x000007ff) |
+	    (((uint32_t) pbox->y1 << 12) & 0x00fff000);
+	drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
+	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
+	    ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
+	if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
+	    drawctrl1 != dev_priv->state.s4.drawctrl1) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
+		DMA_WRITE(drawctrl0);
+		DMA_WRITE(drawctrl1);
+		dev_priv->state.s4.drawctrl0 = drawctrl0;
+		dev_priv->state.s4.drawctrl1 = drawctrl1;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
+				 uint32_t addr)
+{
+	if ((addr & 6) != 2) {	/* reserved bits */
+		DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
+		return -EINVAL;
+	}
+	if (!(addr & 1)) {	/* local */
+		addr &= ~7;
+		if (addr < dev_priv->texture_offset ||
+		    addr >= dev_priv->texture_offset + dev_priv->texture_size) {
+			DRM_ERROR
+			    ("bad texAddr%d %08x (local addr out of range)\n",
+			     unit, addr);
+			return -EINVAL;
+		}
+	} else {		/* AGP */
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
+				  unit, addr);
+			return -EINVAL;
+		}
+		addr &= ~7;
+		if (addr < dev_priv->agp_textures->offset ||
+		    addr >= (dev_priv->agp_textures->offset +
+			     dev_priv->agp_textures->size)) {
+			DRM_ERROR
+			    ("bad texAddr%d %08x (AGP addr out of range)\n",
+			     unit, addr);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+#define SAVE_STATE(reg,where)			\
+	if(start <= reg && start+count > reg)	\
+		dev_priv->state.where = regs[reg - start]
+#define SAVE_STATE_MASK(reg,where,mask) do {			\
+	if(start <= reg && start+count > reg) {			\
+		uint32_t tmp;					\
+		tmp = regs[reg - start];			\
+		dev_priv->state.where = (tmp & (mask)) |	\
+			(dev_priv->state.where & ~(mask));	\
+	}							\
+} while (0)
+
+static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
+				   unsigned int start, unsigned int count,
+				   const uint32_t *regs)
+{
+	if (start < SAVAGE_TEXPALADDR_S3D ||
+	    start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start + count - 1);
+		return -EINVAL;
+	}
+
+	SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
+			~SAVAGE_SCISSOR_MASK_S3D);
+	SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
+			~SAVAGE_SCISSOR_MASK_S3D);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXCTRL_S3D &&
+	    start + count > SAVAGE_TEXPALADDR_S3D) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
+		SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
+		if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
+			return savage_verify_texaddr(dev_priv, 0,
+						dev_priv->state.s3d.texaddr);
+	}
+
+	return 0;
+}
+
+static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
+				  unsigned int start, unsigned int count,
+				  const uint32_t *regs)
+{
+	int ret = 0;
+
+	if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
+	    start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start + count - 1);
+		return -EINVAL;
+	}
+
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
+			~SAVAGE_SCISSOR_MASK_S4);
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
+			~SAVAGE_SCISSOR_MASK_S4);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXDESCR_S4 &&
+	    start + count > SAVAGE_TEXPALADDR_S4) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
+		SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
+		SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
+			ret |= savage_verify_texaddr(dev_priv, 0,
+						dev_priv->state.s4.texaddr0);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
+			ret |= savage_verify_texaddr(dev_priv, 1,
+						dev_priv->state.s4.texaddr1);
+	}
+
+	return ret;
+}
+
+#undef SAVE_STATE
+#undef SAVE_STATE_MASK
+
+static int savage_dispatch_state(drm_savage_private_t * dev_priv,
+				 const drm_savage_cmd_header_t * cmd_header,
+				 const uint32_t *regs)
+{
+	unsigned int count = cmd_header->state.count;
+	unsigned int start = cmd_header->state.start;
+	unsigned int count2 = 0;
+	unsigned int bci_size;
+	int ret;
+	DMA_LOCALS;
+
+	if (!count)
+		return 0;
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		ret = savage_verify_state_s3d(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_SCSTART_S3D) {
+			if (start + count > SAVAGE_SCEND_S3D + 1)
+				count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
+			if (start + count > SAVAGE_SCSTART_S3D)
+				count = SAVAGE_SCSTART_S3D - start;
+		} else if (start <= SAVAGE_SCEND_S3D) {
+			if (start + count > SAVAGE_SCEND_S3D + 1) {
+				count -= SAVAGE_SCEND_S3D + 1 - start;
+				start = SAVAGE_SCEND_S3D + 1;
+			} else
+				return 0;
+		}
+	} else {
+		ret = savage_verify_state_s4(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_DRAWCTRL0_S4) {
+			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
+				count2 = count -
+					 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
+			if (start + count > SAVAGE_DRAWCTRL0_S4)
+				count = SAVAGE_DRAWCTRL0_S4 - start;
+		} else if (start <= SAVAGE_DRAWCTRL1_S4) {
+			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
+				count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
+				start = SAVAGE_DRAWCTRL1_S4 + 1;
+			} else
+				return 0;
+		}
+	}
+
+	bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
+
+	if (cmd_header->state.global) {
+		BEGIN_DMA(bci_size + 1);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		dev_priv->waiting = 1;
+	} else {
+		BEGIN_DMA(bci_size);
+	}
+
+	do {
+		while (count > 0) {
+			unsigned int n = count < 255 ? count : 255;
+			DMA_SET_REGISTERS(start, n);
+			DMA_COPY(regs, n);
+			count -= n;
+			start += n;
+			regs += n;
+		}
+		start += 2;
+		regs += 2;
+		count = count2;
+		count2 = 0;
+	} while (count);
+
+	DMA_COMMIT();
+
+	return 0;
+}
+
+static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
+				    const drm_savage_cmd_header_t * cmd_header,
+				    const struct drm_buf * dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+		DRM_ERROR("called without dma buffers!\n");
+		return -EINVAL;
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
+			     n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return -EINVAL;
+		}
+	}
+
+	if (start + n > dmabuf->total / 32) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, dmabuf->total / 32);
+		return -EINVAL;
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { -1, -1, -1 };
+			reorder[start % 3] = 2;
+
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, start + 2);
+
+			for (i = start + 1; i + 1 < start + count; i += 2)
+				BCI_WRITE((i + reorder[i % 3]) |
+					  ((i + 1 +
+					    reorder[(i + 1) % 3]) << 16));
+			if (i < start + count)
+				BCI_WRITE(i + reorder[i % 3]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, start);
+
+			for (i = start + 1; i + 1 < start + count; i += 2)
+				BCI_WRITE(i | ((i + 1) << 16));
+			if (i < start + count)
+				BCI_WRITE(i);
+		} else {
+			BEGIN_BCI((count + 2 + 1) / 2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = start; i + 1 < start + count; i += 2)
+				BCI_WRITE(i | ((i + 1) << 16));
+			if (i < start + count)
+				BCI_WRITE(i);
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
+				   const drm_savage_cmd_header_t * cmd_header,
+				   const uint32_t *vtxbuf, unsigned int vb_size,
+				   unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
+			     n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 8;	/* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 10;	/* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return -EINVAL;
+	}
+
+	if (start + n > vb_size / (vb_stride * 4)) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, vb_size / (vb_stride * 4));
+		return -EINVAL;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { -1, -1, -1 };
+			reorder[start % 3] = 2;
+
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = start; i < start + count; ++i) {
+				unsigned int j = i + reorder[i % 3];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			if (vb_stride == vtx_size) {
+				DMA_COPY(&vtxbuf[vb_stride * start],
+					 vtx_size * count);
+			} else {
+				for (i = start; i < start + count; ++i) {
+					DMA_COPY(&vtxbuf [vb_stride * i],
+						 vtx_size);
+				}
+			}
+
+			DMA_COMMIT();
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
+				   const drm_savage_cmd_header_t * cmd_header,
+				   const uint16_t *idx,
+				   const struct drm_buf * dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+		DRM_ERROR("called without dma buffers!\n");
+		return -EINVAL;
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+
+		/* check indices */
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > dmabuf->total / 32) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i], dmabuf->total / 32);
+				return -EINVAL;
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { 2, -1, -1 };
+
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
+
+			for (i = 1; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i + reorder[i % 3]] |
+					  (idx[i + 1 +
+					   reorder[(i + 1) % 3]] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i + reorder[i % 3]]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
+
+			for (i = 1; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		} else {
+			BEGIN_BCI((count + 2 + 1) / 2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = 0; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		}
+
+		idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
+				  const drm_savage_cmd_header_t * cmd_header,
+				  const uint16_t *idx,
+				  const uint32_t *vtxbuf,
+				  unsigned int vb_size, unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 8;	/* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 10;	/* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return -EINVAL;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+
+		/* Check indices */
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > vb_size / (vb_stride * 4)) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i], vb_size / (vb_stride * 4));
+				return -EINVAL;
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { 2, -1, -1 };
+
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i + reorder[i % 3]];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		}
+
+		idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
+				 const drm_savage_cmd_header_t * cmd_header,
+				 const drm_savage_cmd_header_t *data,
+				 unsigned int nbox,
+				 const struct drm_clip_rect *boxes)
+{
+	unsigned int flags = cmd_header->clear0.flags;
+	unsigned int clear_cmd;
+	unsigned int i, nbufs;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+	    BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
+	BCI_CMD_SET_ROP(clear_cmd, 0xCC);
+
+	nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
+	    ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
+	if (nbufs == 0)
+		return 0;
+
+	if (data->clear1.mask != 0xffffffff) {
+		/* set mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(data->clear1.mask);
+		DMA_COMMIT();
+	}
+	for (i = 0; i < nbox; ++i) {
+		unsigned int x, y, w, h;
+		unsigned int buf;
+		x = boxes[i].x1, y = boxes[i].y1;
+		w = boxes[i].x2 - boxes[i].x1;
+		h = boxes[i].y2 - boxes[i].y1;
+		BEGIN_DMA(nbufs * 6);
+		for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
+			if (!(flags & buf))
+				continue;
+			DMA_WRITE(clear_cmd);
+			switch (buf) {
+			case SAVAGE_FRONT:
+				DMA_WRITE(dev_priv->front_offset);
+				DMA_WRITE(dev_priv->front_bd);
+				break;
+			case SAVAGE_BACK:
+				DMA_WRITE(dev_priv->back_offset);
+				DMA_WRITE(dev_priv->back_bd);
+				break;
+			case SAVAGE_DEPTH:
+				DMA_WRITE(dev_priv->depth_offset);
+				DMA_WRITE(dev_priv->depth_bd);
+				break;
+			}
+			DMA_WRITE(data->clear1.value);
+			DMA_WRITE(BCI_X_Y(x, y));
+			DMA_WRITE(BCI_W_H(w, h));
+		}
+		DMA_COMMIT();
+	}
+	if (data->clear1.mask != 0xffffffff) {
+		/* reset mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(0xffffffff);
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
+				unsigned int nbox, const struct drm_clip_rect *boxes)
+{
+	unsigned int swap_cmd;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+	    BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
+	BCI_CMD_SET_ROP(swap_cmd, 0xCC);
+
+	for (i = 0; i < nbox; ++i) {
+		BEGIN_DMA(6);
+		DMA_WRITE(swap_cmd);
+		DMA_WRITE(dev_priv->back_offset);
+		DMA_WRITE(dev_priv->back_bd);
+		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+		DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
+				  boxes[i].y2 - boxes[i].y1));
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
+				const drm_savage_cmd_header_t *start,
+				const drm_savage_cmd_header_t *end,
+				const struct drm_buf * dmabuf,
+				const unsigned int *vtxbuf,
+				unsigned int vb_size, unsigned int vb_stride,
+				unsigned int nbox,
+				const struct drm_clip_rect *boxes)
+{
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < nbox; ++i) {
+		const drm_savage_cmd_header_t *cmdbuf;
+		dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
+
+		cmdbuf = start;
+		while (cmdbuf < end) {
+			drm_savage_cmd_header_t cmd_header;
+			cmd_header = *cmdbuf;
+			cmdbuf++;
+			switch (cmd_header.cmd.cmd) {
+			case SAVAGE_CMD_DMA_PRIM:
+				ret = savage_dispatch_dma_prim(
+					dev_priv, &cmd_header, dmabuf);
+				break;
+			case SAVAGE_CMD_VB_PRIM:
+				ret = savage_dispatch_vb_prim(
+					dev_priv, &cmd_header,
+					vtxbuf, vb_size, vb_stride);
+				break;
+			case SAVAGE_CMD_DMA_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_dma_idx(dev_priv,
+					&cmd_header, (const uint16_t *)cmdbuf,
+					dmabuf);
+				cmdbuf += j;
+				break;
+			case SAVAGE_CMD_VB_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_vb_idx(dev_priv,
+					&cmd_header, (const uint16_t *)cmdbuf,
+					(const uint32_t *)vtxbuf, vb_size,
+					vb_stride);
+				cmdbuf += j;
+				break;
+			default:
+				/* What's the best return code? EFAULT? */
+				DRM_ERROR("IMPLEMENTATION ERROR: "
+					  "non-drawing-command %d\n",
+					  cmd_header.cmd.cmd);
+				return -EINVAL;
+			}
+
+			if (ret != 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *dmabuf;
+	drm_savage_cmdbuf_t *cmdbuf = data;
+	drm_savage_cmd_header_t *kcmd_addr = NULL;
+	drm_savage_cmd_header_t *first_draw_cmd;
+	unsigned int *kvb_addr = NULL;
+	struct drm_clip_rect *kbox_addr = NULL;
+	unsigned int i, j;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dma && dma->buflist) {
+		if (cmdbuf->dma_idx > dma->buf_count) {
+			DRM_ERROR
+			    ("vertex buffer index %u out of range (0-%u)\n",
+			     cmdbuf->dma_idx, dma->buf_count - 1);
+			return -EINVAL;
+		}
+		dmabuf = dma->buflist[cmdbuf->dma_idx];
+	} else {
+		dmabuf = NULL;
+	}
+
+	/* Copy the user buffers into kernel temporary areas.  This hasn't been
+	 * a performance loss compared to VERIFYAREA_READ/
+	 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
+	 * for locking on FreeBSD.
+	 */
+	if (cmdbuf->size) {
+		kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
+		if (kcmd_addr == NULL)
+			return -ENOMEM;
+
+		if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+				       cmdbuf->size * 8))
+		{
+			kfree(kcmd_addr);
+			return -EFAULT;
+		}
+		cmdbuf->cmd_addr = kcmd_addr;
+	}
+	if (cmdbuf->vb_size) {
+		kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL);
+		if (kvb_addr == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+				       cmdbuf->vb_size)) {
+			ret = -EFAULT;
+			goto done;
+		}
+		cmdbuf->vb_addr = kvb_addr;
+	}
+	if (cmdbuf->nbox) {
+		kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
+					  GFP_KERNEL);
+		if (kbox_addr == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+				       cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
+			ret = -EFAULT;
+			goto done;
+		}
+	cmdbuf->box_addr = kbox_addr;
+	}
+
+	/* Make sure writes to DMA buffers are finished before sending
+	 * DMA commands to the graphics hardware. */
+	DRM_MEMORYBARRIER();
+
+	/* Coming from user space. Don't know if the Xserver has
+	 * emitted wait commands. Assuming the worst. */
+	dev_priv->waiting = 1;
+
+	i = 0;
+	first_draw_cmd = NULL;
+	while (i < cmdbuf->size) {
+		drm_savage_cmd_header_t cmd_header;
+		cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
+		cmdbuf->cmd_addr++;
+		i++;
+
+		/* Group drawing commands with same state to minimize
+		 * iterations over clip rects. */
+		j = 0;
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_DMA_IDX:
+		case SAVAGE_CMD_VB_IDX:
+			j = (cmd_header.idx.count + 3) / 4;
+			if (i + j > cmdbuf->size) {
+				DRM_ERROR("indexed drawing command extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				ret = -EINVAL;
+				goto done;
+			}
+			/* fall through */
+		case SAVAGE_CMD_DMA_PRIM:
+		case SAVAGE_CMD_VB_PRIM:
+			if (!first_draw_cmd)
+				first_draw_cmd = cmdbuf->cmd_addr - 1;
+			cmdbuf->cmd_addr += j;
+			i += j;
+			break;
+		default:
+			if (first_draw_cmd) {
+				ret = savage_dispatch_draw(
+				      dev_priv, first_draw_cmd,
+				      cmdbuf->cmd_addr - 1,
+				      dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
+				      cmdbuf->vb_stride,
+				      cmdbuf->nbox, cmdbuf->box_addr);
+				if (ret != 0)
+					goto done;
+				first_draw_cmd = NULL;
+			}
+		}
+		if (first_draw_cmd)
+			continue;
+
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_STATE:
+			j = (cmd_header.state.count + 1) / 2;
+			if (i + j > cmdbuf->size) {
+				DRM_ERROR("command SAVAGE_CMD_STATE extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = savage_dispatch_state(dev_priv, &cmd_header,
+				(const uint32_t *)cmdbuf->cmd_addr);
+			cmdbuf->cmd_addr += j;
+			i += j;
+			break;
+		case SAVAGE_CMD_CLEAR:
+			if (i + 1 > cmdbuf->size) {
+				DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = savage_dispatch_clear(dev_priv, &cmd_header,
+						    cmdbuf->cmd_addr,
+						    cmdbuf->nbox,
+						    cmdbuf->box_addr);
+			cmdbuf->cmd_addr++;
+			i++;
+			break;
+		case SAVAGE_CMD_SWAP:
+			ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
+						   cmdbuf->box_addr);
+			break;
+		default:
+			DRM_ERROR("invalid command 0x%x\n",
+				  cmd_header.cmd.cmd);
+			DMA_FLUSH();
+			ret = -EINVAL;
+			goto done;
+		}
+
+		if (ret != 0) {
+			DMA_FLUSH();
+			goto done;
+		}
+	}
+
+	if (first_draw_cmd) {
+		ret = savage_dispatch_draw (
+			dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
+			cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
+			cmdbuf->nbox, cmdbuf->box_addr);
+		if (ret != 0) {
+			DMA_FLUSH();
+			goto done;
+		}
+	}
+
+	DMA_FLUSH();
+
+	if (dmabuf && cmdbuf->discard) {
+		drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
+		uint16_t event;
+		event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+		SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+		savage_freelist_put(dev, dmabuf);
+	}
+
+done:
+	/* If we didn't need to allocate them, these'll be NULL */
+	kfree(kcmd_addr);
+	kfree(kvb_addr);
+	kfree(kbox_addr);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/shmobile/Kconfig b/linux-imx/drivers/gpu/drm/shmobile/Kconfig
new file mode 100644
index 0000000..7e7d52b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/Kconfig
@@ -0,0 +1,10 @@
+config DRM_SHMOBILE
+	tristate "DRM Support for SH Mobile"
+	depends on DRM && (SUPERH || ARCH_SHMOBILE)
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	help
+	  Choose this option if you have an SH Mobile chipset.
+	  If M is selected the module will be called shmob-drm.
+
diff --git a/linux-imx/drivers/gpu/drm/shmobile/Makefile b/linux-imx/drivers/gpu/drm/shmobile/Makefile
new file mode 100644
index 0000000..4c3eeb3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/Makefile
@@ -0,0 +1,7 @@
+shmob-drm-y := shmob_drm_backlight.o \
+	       shmob_drm_crtc.o \
+	       shmob_drm_drv.o \
+	       shmob_drm_kms.o \
+	       shmob_drm_plane.o
+
+obj-$(CONFIG_DRM_SHMOBILE)	+= shmob-drm.o
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
new file mode 100644
index 0000000..463aee1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -0,0 +1,90 @@
+/*
+ * shmob_drm_backlight.c  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+
+static int shmob_drm_backlight_update(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	int brightness = bdev->props.brightness;
+
+	if (bdev->props.power != FB_BLANK_UNBLANK ||
+	    bdev->props.state & BL_CORE_SUSPENDED)
+		brightness = 0;
+
+	return bdata->set_brightness(brightness);
+}
+
+static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
+{
+	struct shmob_drm_connector *scon = bl_get_data(bdev);
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+
+	return bdata->get_brightness();
+}
+
+static const struct backlight_ops shmob_drm_backlight_ops = {
+	.options	= BL_CORE_SUSPENDRESUME,
+	.update_status	= shmob_drm_backlight_update,
+	.get_brightness	= shmob_drm_backlight_get_brightness,
+};
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
+{
+	if (scon->backlight == NULL)
+		return;
+
+	scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
+				     ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+	backlight_update_status(scon->backlight);
+}
+
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
+{
+	struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
+	const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
+	struct drm_connector *connector = &scon->connector;
+	struct drm_device *dev = connector->dev;
+	struct backlight_device *backlight;
+
+	if (!bdata->max_brightness)
+		return 0;
+
+	backlight = backlight_device_register(bdata->name, dev->dev, scon,
+					      &shmob_drm_backlight_ops, NULL);
+	if (IS_ERR(backlight)) {
+		dev_err(dev->dev, "unable to register backlight device: %ld\n",
+			PTR_ERR(backlight));
+		return PTR_ERR(backlight);
+	}
+
+	backlight->props.max_brightness = bdata->max_brightness;
+	backlight->props.brightness = bdata->max_brightness;
+	backlight->props.power = FB_BLANK_POWERDOWN;
+	backlight_update_status(backlight);
+
+	scon->backlight = backlight;
+	return 0;
+}
+
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
+{
+	backlight_device_unregister(scon->backlight);
+}
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
new file mode 100644
index 0000000..9477595
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -0,0 +1,23 @@
+/*
+ * shmob_drm_backlight.h  --  SH Mobile DRM Backlight
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_BACKLIGHT_H__
+#define __SHMOB_DRM_BACKLIGHT_H__
+
+struct shmob_drm_connector;
+
+void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
+int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
+void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
+
+#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
new file mode 100644
index 0000000..99e2034
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -0,0 +1,752 @@
+/*
+ * shmob_drm_crtc.c  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_backlight.h"
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/*
+ * TODO: panel support
+ */
+
+/* -----------------------------------------------------------------------------
+ * Clock management
+ */
+
+static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
+{
+	if (sdev->clock)
+		clk_enable(sdev->clock);
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
+#endif
+}
+
+static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+{
+#if 0
+	if (sdev->meram_dev && sdev->meram_dev->pdev)
+		pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
+#endif
+	if (sdev->clock)
+		clk_disable(sdev->clock);
+}
+
+/* -----------------------------------------------------------------------------
+ * CRTC
+ */
+
+static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct drm_display_mode *mode = &crtc->mode;
+	u32 value;
+
+	value = sdev->ldmt1r
+	      | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
+	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
+	      | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
+	lcdc_write(sdev, LDMT1R, value);
+
+	if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
+	    idata->interface <= SHMOB_DRM_IFACE_SYS24) {
+		/* Setup SYS bus. */
+		value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
+		      | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
+		      | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
+		      | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
+		      | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
+		      | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
+		lcdc_write(sdev, LDMT2R, value);
+
+		value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
+		      | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
+		      | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
+		      | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
+		lcdc_write(sdev, LDMT3R, value);
+	}
+
+	value = ((mode->hdisplay / 8) << 16)			/* HDCN */
+	      | (mode->htotal / 8);				/* HTCN */
+	lcdc_write(sdev, LDHCNR, value);
+
+	value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
+	      | (mode->hsync_start / 8);			/* HSYNP */
+	lcdc_write(sdev, LDHSYNR, value);
+
+	value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
+	      | (((mode->hsync_end - mode->hsync_start) & 7) << 8)
+	      | (mode->hsync_start & 7);
+	lcdc_write(sdev, LDHAJR, value);
+
+	value = ((mode->vdisplay) << 16)			/* VDLN */
+	      | mode->vtotal;					/* VTLN */
+	lcdc_write(sdev, LDVLNR, value);
+
+	value = ((mode->vsync_end - mode->vsync_start) << 16)	/* VSYNW */
+	      | mode->vsync_start;				/* VSYNP */
+	lcdc_write(sdev, LDVSYNR, value);
+}
+
+static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
+{
+	struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+	u32 value;
+
+	value = lcdc_read(sdev, LDCNT2R);
+	if (start)
+		lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
+	else
+		lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
+
+	/* Wait until power is applied/stopped. */
+	while (1) {
+		value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
+		if ((start && value) || (!start && !value))
+			break;
+
+		cpu_relax();
+	}
+
+	if (!start) {
+		/* Stop the dot clock. */
+		lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
+	}
+}
+
+/*
+ * shmob_drm_crtc_start - Configure and start the LCDC
+ * @scrtc: the SH Mobile CRTC
+ *
+ * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
+ * ...) are not touched by this function.
+ */
+static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+	const struct shmob_drm_format_info *format;
+	struct drm_device *dev = sdev->ddev;
+	struct drm_plane *plane;
+	u32 value;
+
+	if (scrtc->started)
+		return;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (WARN_ON(format == NULL))
+		return;
+
+	/* Enable clocks before accessing the hardware. */
+	shmob_drm_clk_on(sdev);
+
+	/* Reset and enable the LCDC. */
+	lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
+	lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
+	lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
+
+	/* Stop the LCDC first and disable all interrupts. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+	lcdc_write(sdev, LDINTR, 0);
+
+	/* Configure power supply, dot clocks and start them. */
+	lcdc_write(sdev, LDPMR, 0);
+
+	value = sdev->lddckr;
+	if (idata->clk_div) {
+		/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
+		 * denominator.
+		 */
+		lcdc_write(sdev, LDDCKPAT1R, 0);
+		lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+
+		if (idata->clk_div == 1)
+			value |= LDDCKR_MOSEL;
+		else
+			value |= idata->clk_div;
+	}
+
+	lcdc_write(sdev, LDDCKR, value);
+	lcdc_write(sdev, LDDCKSTPR, 0);
+	lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
+
+	/* TODO: Setup SYS panel */
+
+	/* Setup geometry, format, frame buffer memory and operation mode. */
+	shmob_drm_crtc_setup_geometry(scrtc);
+
+	/* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+	lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
+	lcdc_write(sdev, LDMLSR, scrtc->line_size);
+	lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
+	if (format->yuv)
+		lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
+	lcdc_write(sdev, LDSM1R, 0);
+
+	/* Word and long word swap. */
+	switch (format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		value = LDDDSR_LS | LDDDSR_WS;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		value = LDDDSR_LS;
+		break;
+	}
+	lcdc_write(sdev, LDDDSR, value);
+
+	/* Setup planes. */
+	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+		if (plane->crtc == crtc)
+			shmob_drm_plane_setup(plane);
+	}
+
+	/* Enable the display output. */
+	lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
+
+	shmob_drm_crtc_start_stop(scrtc, true);
+
+	scrtc->started = true;
+}
+
+static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	if (!scrtc->started)
+		return;
+
+	/* Disable the MERAM cache. */
+	if (scrtc->cache) {
+		sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+		scrtc->cache = NULL;
+	}
+
+	/* Stop the LCDC. */
+	shmob_drm_crtc_start_stop(scrtc, false);
+
+	/* Disable the display output. */
+	lcdc_write(sdev, LDCNT1R, 0);
+
+	/* Stop clocks. */
+	shmob_drm_clk_off(sdev);
+
+	scrtc->started = false;
+}
+
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
+{
+	shmob_drm_crtc_stop(scrtc);
+}
+
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
+{
+	if (scrtc->dpms != DRM_MODE_DPMS_ON)
+		return;
+
+	shmob_drm_crtc_start(scrtc);
+}
+
+static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
+					int x, int y)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	scrtc->dma[0] = gem->paddr + fb->offsets[0]
+		      + y * fb->pitches[0] + x * bpp / 8;
+
+	if (scrtc->format->yuv) {
+		bpp = scrtc->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		scrtc->dma[1] = gem->paddr + fb->offsets[1]
+			      + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			      + x * (bpp == 16 ? 2 : 1);
+	}
+
+	if (scrtc->cache)
+		sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
+					     scrtc->dma[0], scrtc->dma[1],
+					     &scrtc->dma[0], &scrtc->dma[1]);
+}
+
+static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_crtc *crtc = &scrtc->crtc;
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+
+	shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
+
+	lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
+	if (scrtc->format->yuv)
+		lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
+
+	lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+#define to_shmob_crtc(c)	container_of(c, struct shmob_drm_crtc, crtc)
+
+static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+
+	if (scrtc->dpms == mode)
+		return;
+
+	if (mode == DRM_MODE_DPMS_ON)
+		shmob_drm_crtc_start(scrtc);
+	else
+		shmob_drm_crtc_stop(scrtc);
+
+	scrtc->dpms = mode;
+}
+
+static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+				      const struct drm_display_mode *mode,
+				      struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode,
+				   int x, int y,
+				   struct drm_framebuffer *old_fb)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct shmob_drm_device *sdev = crtc->dev->dev_private;
+	const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
+	const struct shmob_drm_format_info *format;
+	void *cache;
+
+	format = shmob_drm_format_info(crtc->fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
+			crtc->fb->pixel_format);
+		return -EINVAL;
+	}
+
+	scrtc->format = format;
+	scrtc->line_size = crtc->fb->pitches[0];
+
+	if (sdev->meram) {
+		/* Enable MERAM cache if configured. We need to de-init
+		 * configured ICBs before we can re-initialize them.
+		 */
+		if (scrtc->cache) {
+			sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
+			scrtc->cache = NULL;
+		}
+
+		cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
+						    crtc->fb->pitches[0],
+						    adjusted_mode->vdisplay,
+						    format->meram,
+						    &scrtc->line_size);
+		if (!IS_ERR(cache))
+			scrtc->cache = cache;
+	}
+
+	shmob_drm_crtc_compute_base(scrtc, x, y);
+
+	return 0;
+}
+
+static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
+{
+	shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+					struct drm_framebuffer *old_fb)
+{
+	shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
+
+	return 0;
+}
+
+static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
+	.dpms = shmob_drm_crtc_dpms,
+	.mode_fixup = shmob_drm_crtc_mode_fixup,
+	.prepare = shmob_drm_crtc_mode_prepare,
+	.commit = shmob_drm_crtc_mode_commit,
+	.mode_set = shmob_drm_crtc_mode_set,
+	.mode_set_base = shmob_drm_crtc_mode_set_base,
+};
+
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	/* Destroy the pending vertical blanking event associated with the
+	 * pending page flip, if any, and disable vertical blanking interrupts.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	if (event && event->base.file_priv == file) {
+		scrtc->event = NULL;
+		event->base.destroy(&event->base);
+		drm_vblank_put(dev, 0);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
+{
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = scrtc->event;
+	scrtc->event = NULL;
+	if (event) {
+		drm_send_vblank_event(dev, 0, event);
+		drm_vblank_put(dev, 0);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
+				    struct drm_framebuffer *fb,
+				    struct drm_pending_vblank_event *event)
+{
+	struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+	struct drm_device *dev = scrtc->crtc.dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (scrtc->event != NULL) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EBUSY;
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	crtc->fb = fb;
+	shmob_drm_crtc_update_base(scrtc);
+
+	if (event) {
+		event->pipe = 0;
+		drm_vblank_get(dev, 0);
+		spin_lock_irqsave(&dev->event_lock, flags);
+		scrtc->event = event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	return 0;
+}
+
+static const struct drm_crtc_funcs crtc_funcs = {
+	.destroy = drm_crtc_cleanup,
+	.set_config = drm_crtc_helper_set_config,
+	.page_flip = shmob_drm_crtc_page_flip,
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
+{
+	struct drm_crtc *crtc = &sdev->crtc.crtc;
+	int ret;
+
+	sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+
+	ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs);
+	if (ret < 0)
+		return ret;
+
+	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+#define to_shmob_encoder(e) \
+	container_of(e, struct shmob_drm_encoder, encoder)
+
+static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
+	struct shmob_drm_device *sdev = encoder->dev->dev_private;
+	struct shmob_drm_connector *scon = &sdev->connector;
+
+	if (senc->dpms == mode)
+		return;
+
+	shmob_drm_backlight_dpms(scon, mode);
+
+	senc->dpms = mode;
+}
+
+static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+					 const struct drm_display_mode *mode,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	struct drm_connector *connector = &sdev->connector.connector;
+	const struct drm_display_mode *panel_mode;
+
+	if (list_empty(&connector->modes)) {
+		dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+		return false;
+	}
+
+	/* The flat panel mode is fixed, just copy it to the adjusted mode. */
+	panel_mode = list_first_entry(&connector->modes,
+				      struct drm_display_mode, head);
+	drm_mode_copy(adjusted_mode, panel_mode);
+
+	return true;
+}
+
+static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
+{
+	/* No-op, everything is handled in the CRTC code. */
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = shmob_drm_encoder_dpms,
+	.mode_fixup = shmob_drm_encoder_mode_fixup,
+	.prepare = shmob_drm_encoder_mode_prepare,
+	.commit = shmob_drm_encoder_mode_commit,
+	.mode_set = shmob_drm_encoder_mode_set,
+};
+
+static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = shmob_drm_encoder_destroy,
+};
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
+{
+	struct drm_encoder *encoder = &sdev->encoder.encoder;
+	int ret;
+
+	sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
+
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
+			       DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+	return 0;
+}
+
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable)
+{
+	unsigned long flags;
+	u32 ldintr;
+
+	/* Be careful not to acknowledge any pending interrupt. */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
+	if (enable)
+		ldintr |= LDINTR_VEE;
+	else
+		ldintr &= ~LDINTR_VEE;
+	lcdc_write(sdev, LDINTR, ldintr);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+#define to_shmob_connector(c) \
+	container_of(c, struct shmob_drm_connector, connector)
+
+static int shmob_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct shmob_drm_device *sdev = connector->dev->dev_private;
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_create(connector->dev);
+	if (mode == NULL)
+		return 0;
+
+	mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+	mode->clock = sdev->pdata->panel.mode.clock;
+	mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
+	mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
+	mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
+	mode->htotal = sdev->pdata->panel.mode.htotal;
+	mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
+	mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
+	mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
+	mode->vtotal = sdev->pdata->panel.mode.vtotal;
+	mode->flags = sdev->pdata->panel.mode.flags;
+
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	return 1;
+}
+
+static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
+					  struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+shmob_drm_connector_best_encoder(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	return scon->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = shmob_drm_connector_get_modes,
+	.mode_valid = shmob_drm_connector_mode_valid,
+	.best_encoder = shmob_drm_connector_best_encoder,
+};
+
+static void shmob_drm_connector_destroy(struct drm_connector *connector)
+{
+	struct shmob_drm_connector *scon = to_shmob_connector(connector);
+
+	shmob_drm_backlight_exit(scon);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static enum drm_connector_status
+shmob_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	return connector_status_connected;
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = shmob_drm_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = shmob_drm_connector_destroy,
+};
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = &sdev->connector.connector;
+	int ret;
+
+	sdev->connector.encoder = encoder;
+
+	connector->display_info.width_mm = sdev->pdata->panel.width_mm;
+	connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+
+	ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
+				 DRM_MODE_CONNECTOR_LVDS);
+	if (ret < 0)
+		return ret;
+
+	drm_connector_helper_add(connector, &connector_helper_funcs);
+	ret = drm_sysfs_connector_add(connector);
+	if (ret < 0)
+		goto err_cleanup;
+
+	ret = shmob_drm_backlight_init(&sdev->connector);
+	if (ret < 0)
+		goto err_sysfs;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret < 0)
+		goto err_backlight;
+
+	connector->encoder = encoder;
+
+	drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+	drm_object_property_set_value(&connector->base,
+		sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+
+	return 0;
+
+err_backlight:
+	shmob_drm_backlight_exit(&sdev->connector);
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+err_cleanup:
+	drm_connector_cleanup(connector);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
new file mode 100644
index 0000000..e5bd109
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -0,0 +1,60 @@
+/*
+ * shmob_drm_crtc.h  --  SH Mobile DRM CRTCs
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_CRTC_H__
+#define __SHMOB_DRM_CRTC_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+struct backlight_device;
+struct shmob_drm_device;
+
+struct shmob_drm_crtc {
+	struct drm_crtc crtc;
+
+	struct drm_pending_vblank_event *event;
+	int dpms;
+
+	const struct shmob_drm_format_info *format;
+	void *cache;
+	unsigned long dma[2];
+	unsigned int line_size;
+	bool started;
+};
+
+struct shmob_drm_encoder {
+	struct drm_encoder encoder;
+	int dpms;
+};
+
+struct shmob_drm_connector {
+	struct drm_connector connector;
+	struct drm_encoder *encoder;
+
+	struct backlight_device *backlight;
+};
+
+int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
+void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
+void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
+				     struct drm_file *file);
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
+void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
+
+int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+			       struct drm_encoder *encoder);
+
+#endif /* __SHMOB_DRM_CRTC_H__ */
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.c
new file mode 100644
index 0000000..f6e0b53
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -0,0 +1,359 @@
+/*
+ * shmob_drm_drv.c  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Hardware initialization
+ */
+
+static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
+{
+	static const u32 ldmt1r[] = {
+		[SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
+		[SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
+		[SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
+		[SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
+		[SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
+		[SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
+		[SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
+		[SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
+		[SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
+		[SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
+		[SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
+		[SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
+		[SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
+		[SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
+		[SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
+		[SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
+		[SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
+		[SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
+		[SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
+	};
+
+	if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
+		dev_err(sdev->dev, "invalid interface type %u\n",
+			sdev->pdata->iface.interface);
+		return -EINVAL;
+	}
+
+	sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
+	return 0;
+}
+
+static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
+					    enum shmob_drm_clk_source clksrc)
+{
+	struct clk *clk;
+	char *clkname;
+
+	switch (clksrc) {
+	case SHMOB_DRM_CLK_BUS:
+		clkname = "bus_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_BUS;
+		break;
+	case SHMOB_DRM_CLK_PERIPHERAL:
+		clkname = "peripheral_clk";
+		sdev->lddckr = LDDCKR_ICKSEL_MIPI;
+		break;
+	case SHMOB_DRM_CLK_EXTERNAL:
+		clkname = NULL;
+		sdev->lddckr = LDDCKR_ICKSEL_HDMI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	clk = clk_get(sdev->dev, clkname);
+	if (IS_ERR(clk)) {
+		dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
+		return PTR_ERR(clk);
+	}
+
+	sdev->clock = clk;
+	return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM operations
+ */
+
+static int shmob_drm_unload(struct drm_device *dev)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+	drm_irq_uninstall(dev);
+
+	if (sdev->clock)
+		clk_put(sdev->clock);
+
+	if (sdev->mmio)
+		iounmap(sdev->mmio);
+
+	dev->dev_private = NULL;
+	kfree(sdev);
+
+	return 0;
+}
+
+static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
+	struct platform_device *pdev = dev->platformdev;
+	struct shmob_drm_device *sdev;
+	struct resource *res;
+	unsigned int i;
+	int ret;
+
+	if (pdata == NULL) {
+		dev_err(dev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+
+	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+	if (sdev == NULL) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	sdev->dev = &pdev->dev;
+	sdev->pdata = pdata;
+	spin_lock_init(&sdev->irq_lock);
+
+	sdev->ddev = dev;
+	dev->dev_private = sdev;
+
+	/* I/O resources and clocks */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	sdev->mmio = ioremap_nocache(res->start, resource_size(res));
+	if (sdev->mmio == NULL) {
+		dev_err(&pdev->dev, "failed to remap memory resource\n");
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_init_interface(sdev);
+	if (ret < 0)
+		goto done;
+
+	ret = shmob_drm_modeset_init(sdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize mode setting\n");
+		goto done;
+	}
+
+	for (i = 0; i < 4; ++i) {
+		ret = shmob_drm_plane_create(sdev, i);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "failed to create plane %u\n", i);
+			goto done;
+		}
+	}
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to initialize vblank\n");
+		goto done;
+	}
+
+	ret = drm_irq_install(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to install IRQ handler\n");
+		goto done;
+	}
+
+	platform_set_drvdata(pdev, sdev);
+
+done:
+	if (ret)
+		shmob_drm_unload(dev);
+
+	return ret;
+}
+
+static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
+}
+
+static irqreturn_t shmob_drm_irq(int irq, void *arg)
+{
+	struct drm_device *dev = arg;
+	struct shmob_drm_device *sdev = dev->dev_private;
+	unsigned long flags;
+	u32 status;
+
+	/* Acknowledge interrupts. Putting interrupt enable and interrupt flag
+	 * bits in the same register is really brain-dead design and requires
+	 * taking a spinlock.
+	 */
+	spin_lock_irqsave(&sdev->irq_lock, flags);
+	status = lcdc_read(sdev, LDINTR);
+	lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
+	spin_unlock_irqrestore(&sdev->irq_lock, flags);
+
+	if (status & LDINTR_VES) {
+		drm_handle_vblank(dev, 0);
+		shmob_drm_crtc_finish_page_flip(&sdev->crtc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, true);
+
+	return 0;
+}
+
+static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct shmob_drm_device *sdev = dev->dev_private;
+
+	shmob_drm_crtc_enable_vblank(sdev, false);
+}
+
+static const struct file_operations shmob_drm_fops = {
+	.owner		= THIS_MODULE,
+	.open		= drm_open,
+	.release	= drm_release,
+	.unlocked_ioctl	= drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= drm_compat_ioctl,
+#endif
+	.poll		= drm_poll,
+	.read		= drm_read,
+	.fasync		= drm_fasync,
+	.llseek		= no_llseek,
+	.mmap		= drm_gem_cma_mmap,
+};
+
+static struct drm_driver shmob_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.load			= shmob_drm_load,
+	.unload			= shmob_drm_unload,
+	.preclose		= shmob_drm_preclose,
+	.irq_handler		= shmob_drm_irq,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= shmob_drm_enable_vblank,
+	.disable_vblank		= shmob_drm_disable_vblank,
+	.gem_free_object	= drm_gem_cma_free_object,
+	.gem_vm_ops		= &drm_gem_cma_vm_ops,
+	.dumb_create		= drm_gem_cma_dumb_create,
+	.dumb_map_offset	= drm_gem_cma_dumb_map_offset,
+	.dumb_destroy		= drm_gem_cma_dumb_destroy,
+	.fops			= &shmob_drm_fops,
+	.name			= "shmob-drm",
+	.desc			= "Renesas SH Mobile DRM",
+	.date			= "20120424",
+	.major			= 1,
+	.minor			= 0,
+};
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+#if CONFIG_PM_SLEEP
+static int shmob_drm_pm_suspend(struct device *dev)
+{
+	struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+	drm_kms_helper_poll_disable(sdev->ddev);
+	shmob_drm_crtc_suspend(&sdev->crtc);
+
+	return 0;
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+	struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+	drm_modeset_lock_all(sdev->ddev);
+	shmob_drm_crtc_resume(&sdev->crtc);
+	drm_modeset_unlock_all(sdev->ddev);
+
+	drm_kms_helper_poll_enable(sdev->ddev);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform driver
+ */
+
+static int shmob_drm_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&shmob_drm_driver, pdev);
+}
+
+static int shmob_drm_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&shmob_drm_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver shmob_drm_platform_driver = {
+	.probe		= shmob_drm_probe,
+	.remove		= shmob_drm_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "shmob-drm",
+		.pm	= &shmob_drm_pm_ops,
+	},
+};
+
+module_platform_driver(shmob_drm_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.h
new file mode 100644
index 0000000..4d46b81
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -0,0 +1,47 @@
+/*
+ * shmob_drm.h  --  SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_DRV_H__
+#define __SHMOB_DRM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/platform_data/shmob_drm.h>
+#include <linux/spinlock.h>
+
+#include "shmob_drm_crtc.h"
+
+struct clk;
+struct device;
+struct drm_device;
+struct sh_mobile_meram_info;
+
+struct shmob_drm_device {
+	struct device *dev;
+	const struct shmob_drm_platform_data *pdata;
+
+	void __iomem *mmio;
+	struct clk *clock;
+	struct sh_mobile_meram_info *meram;
+	u32 lddckr;
+	u32 ldmt1r;
+
+	spinlock_t irq_lock;		/* Protects hardware LDINTR register */
+
+	struct drm_device *ddev;
+
+	struct shmob_drm_crtc crtc;
+	struct shmob_drm_encoder encoder;
+	struct shmob_drm_connector connector;
+};
+
+#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.c
new file mode 100644
index 0000000..c291ee3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -0,0 +1,160 @@
+/*
+ * shmob_drm_kms.c  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_crtc.h"
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_regs.h"
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
+	{
+		.fourcc = DRM_FORMAT_RGB565,
+		.bpp = 16,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB16,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_RGB888,
+		.bpp = 24,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_RGB24,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_ARGB8888,
+		.bpp = 32,
+		.yuv = false,
+		.lddfr = LDDFR_PKF_ARGB32,
+		.meram = SH_MOBILE_MERAM_PF_RGB,
+	}, {
+		.fourcc = DRM_FORMAT_NV12,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV21,
+		.bpp = 12,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_420,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV16,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV61,
+		.bpp = 16,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_422,
+		.meram = SH_MOBILE_MERAM_PF_NV,
+	}, {
+		.fourcc = DRM_FORMAT_NV24,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	}, {
+		.fourcc = DRM_FORMAT_NV42,
+		.bpp = 24,
+		.yuv = true,
+		.lddfr = LDDFR_CC | LDDFR_YF_444,
+		.meram = SH_MOBILE_MERAM_PF_NV24,
+	},
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
+		if (shmob_drm_format_infos[i].fourcc == fourcc)
+			return &shmob_drm_format_infos[i];
+	}
+
+	return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer
+ */
+
+static struct drm_framebuffer *
+shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+		    struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(mode_cmd->pixel_format);
+	if (format == NULL) {
+		dev_dbg(dev->dev, "unsupported pixel format %08x\n",
+			mode_cmd->pixel_format);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
+		dev_dbg(dev->dev, "valid pitch value %u\n",
+			mode_cmd->pitches[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (format->yuv) {
+		unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
+
+		if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
+			dev_dbg(dev->dev,
+				"luma and chroma pitches do not match\n");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
+	.fb_create = shmob_drm_fb_create,
+};
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
+{
+	drm_mode_config_init(sdev->ddev);
+
+	shmob_drm_crtc_create(sdev);
+	shmob_drm_encoder_create(sdev);
+	shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+
+	drm_kms_helper_poll_init(sdev->ddev);
+
+	sdev->ddev->mode_config.min_width = 0;
+	sdev->ddev->mode_config.min_height = 0;
+	sdev->ddev->mode_config.max_width = 4095;
+	sdev->ddev->mode_config.max_height = 4095;
+	sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+
+	drm_helper_disable_unused_functions(sdev->ddev);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.h
new file mode 100644
index 0000000..9495c91
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -0,0 +1,34 @@
+/*
+ * shmob_drm_kms.h  --  SH Mobile DRM Mode Setting
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_KMS_H__
+#define __SHMOB_DRM_KMS_H__
+
+#include <linux/types.h>
+
+struct drm_gem_cma_object;
+struct shmob_drm_device;
+
+struct shmob_drm_format_info {
+	u32 fourcc;
+	unsigned int bpp;
+	bool yuv;
+	u32 lddfr;
+	unsigned int meram;
+};
+
+const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
+
+int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
+
+#endif /* __SHMOB_DRM_KMS_H__ */
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.c
new file mode 100644
index 0000000..e1eb899
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -0,0 +1,268 @@
+/*
+ * shmob_drm_plane.c  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include <video/sh_mobile_meram.h>
+
+#include "shmob_drm_drv.h"
+#include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
+#include "shmob_drm_regs.h"
+
+struct shmob_drm_plane {
+	struct drm_plane plane;
+	unsigned int index;
+	unsigned int alpha;
+
+	const struct shmob_drm_format_info *format;
+	unsigned long dma[2];
+
+	unsigned int src_x;
+	unsigned int src_y;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+};
+
+#define to_shmob_plane(p)	container_of(p, struct shmob_drm_plane, plane)
+
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
+					 struct drm_framebuffer *fb,
+					 int x, int y)
+{
+	struct drm_gem_cma_object *gem;
+	unsigned int bpp;
+
+	bpp = splane->format->yuv ? 8 : splane->format->bpp;
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+	splane->dma[0] = gem->paddr + fb->offsets[0]
+		       + y * fb->pitches[0] + x * bpp / 8;
+
+	if (splane->format->yuv) {
+		bpp = splane->format->bpp - 8;
+		gem = drm_fb_cma_get_gem_obj(fb, 1);
+		splane->dma[1] = gem->paddr + fb->offsets[1]
+			       + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+			       + x * (bpp == 16 ? 2 : 1);
+	}
+}
+
+static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
+				    struct drm_framebuffer *fb)
+{
+	struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+	u32 format;
+
+	/* TODO: Support ROP3 mode */
+	format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
+		break;
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV24:
+		format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
+		break;
+	case DRM_FORMAT_ARGB8888:
+	default:
+		format |= LDBBSIFR_SWPL;
+		break;
+	}
+
+	switch (splane->format->fourcc) {
+	case DRM_FORMAT_RGB565:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
+		break;
+	case DRM_FORMAT_RGB888:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
+		break;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
+		break;
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
+		break;
+	case DRM_FORMAT_NV24:
+	case DRM_FORMAT_NV42:
+		format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
+		break;
+	}
+
+#define plane_reg_dump(sdev, splane, reg) \
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+		splane->index, #reg, \
+		lcdc_read(sdev, reg(splane->index)), \
+		lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+
+	lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), format);
+
+	lcdc_write(sdev, LDBnBSSZR(splane->index),
+		   (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+		   (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+	lcdc_write(sdev, LDBnBLOCR(splane->index),
+		   (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+		   (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+	lcdc_write(sdev, LDBnBSMWR(splane->index),
+		   fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
+
+	shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
+
+	lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
+	if (splane->format->yuv)
+		lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+
+	lcdc_write(sdev, LDBCR,
+		   LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
+	dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+		"LDBCR", lcdc_read(sdev, LDBCR));
+
+	plane_reg_dump(sdev, splane, LDBnBSIFR);
+	plane_reg_dump(sdev, splane, LDBnBSSZR);
+	plane_reg_dump(sdev, splane, LDBnBLOCR);
+	plane_reg_dump(sdev, splane, LDBnBSMWR);
+	plane_reg_dump(sdev, splane, LDBnBSAYR);
+	plane_reg_dump(sdev, splane, LDBnBSACR);
+}
+
+void shmob_drm_plane_setup(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	if (plane->fb == NULL || !plane->enabled)
+		return;
+
+	__shmob_drm_plane_setup(splane, plane->fb);
+}
+
+static int
+shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+		       struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+		       unsigned int crtc_w, unsigned int crtc_h,
+		       uint32_t src_x, uint32_t src_y,
+		       uint32_t src_w, uint32_t src_h)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+	const struct shmob_drm_format_info *format;
+
+	format = shmob_drm_format_info(fb->pixel_format);
+	if (format == NULL) {
+		dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
+			fb->pixel_format);
+		return -EINVAL;
+	}
+
+	if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
+		dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+		return -EINVAL;
+	}
+
+	splane->format = format;
+
+	splane->src_x = src_x >> 16;
+	splane->src_y = src_y >> 16;
+	splane->crtc_x = crtc_x;
+	splane->crtc_y = crtc_y;
+	splane->crtc_w = crtc_w;
+	splane->crtc_h = crtc_h;
+
+	__shmob_drm_plane_setup(splane, fb);
+	return 0;
+}
+
+static int shmob_drm_plane_disable(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+	struct shmob_drm_device *sdev = plane->dev->dev_private;
+
+	splane->format = NULL;
+
+	lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
+	return 0;
+}
+
+static void shmob_drm_plane_destroy(struct drm_plane *plane)
+{
+	struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+	shmob_drm_plane_disable(plane);
+	drm_plane_cleanup(plane);
+	kfree(splane);
+}
+
+static const struct drm_plane_funcs shmob_drm_plane_funcs = {
+	.update_plane = shmob_drm_plane_update,
+	.disable_plane = shmob_drm_plane_disable,
+	.destroy = shmob_drm_plane_destroy,
+};
+
+static const uint32_t formats[] = {
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_RGB888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_NV12,
+	DRM_FORMAT_NV21,
+	DRM_FORMAT_NV16,
+	DRM_FORMAT_NV61,
+	DRM_FORMAT_NV24,
+	DRM_FORMAT_NV42,
+};
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+{
+	struct shmob_drm_plane *splane;
+	int ret;
+
+	splane = kzalloc(sizeof(*splane), GFP_KERNEL);
+	if (splane == NULL)
+		return -ENOMEM;
+
+	splane->index = index;
+	splane->alpha = 255;
+
+	ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
+			     &shmob_drm_plane_funcs, formats,
+			     ARRAY_SIZE(formats), false);
+	if (ret < 0)
+		kfree(splane);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.h
new file mode 100644
index 0000000..99623d0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -0,0 +1,22 @@
+/*
+ * shmob_drm_plane.h  --  SH Mobile DRM Planes
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_PLANE_H__
+#define __SHMOB_DRM_PLANE_H__
+
+struct shmob_drm_device;
+
+int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
+void shmob_drm_plane_setup(struct drm_plane *plane);
+
+#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_regs.h
new file mode 100644
index 0000000..7923cdd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -0,0 +1,311 @@
+/*
+ * shmob_drm_regs.h  --  SH Mobile DRM registers
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_REGS_H__
+#define __SHMOB_DRM_REGS_H__
+
+#include <linux/io.h>
+
+/* Register definitions */
+#define LDDCKPAT1R		0x400
+#define LDDCKPAT2R		0x404
+#define LDDCKR			0x410
+#define LDDCKR_ICKSEL_BUS	(0 << 16)
+#define LDDCKR_ICKSEL_MIPI	(1 << 16)
+#define LDDCKR_ICKSEL_HDMI	(2 << 16)
+#define LDDCKR_ICKSEL_EXT	(3 << 16)
+#define LDDCKR_ICKSEL_MASK	(7 << 16)
+#define LDDCKR_MOSEL		(1 << 6)
+#define LDDCKSTPR		0x414
+#define LDDCKSTPR_DCKSTS	(1 << 16)
+#define LDDCKSTPR_DCKSTP	(1 << 0)
+#define LDMT1R			0x418
+#define LDMT1R_VPOL		(1 << 28)
+#define LDMT1R_HPOL		(1 << 27)
+#define LDMT1R_DWPOL		(1 << 26)
+#define LDMT1R_DIPOL		(1 << 25)
+#define LDMT1R_DAPOL		(1 << 24)
+#define LDMT1R_HSCNT		(1 << 17)
+#define LDMT1R_DWCNT		(1 << 16)
+#define LDMT1R_IFM		(1 << 12)
+#define LDMT1R_MIFTYP_RGB8	(0x0 << 0)
+#define LDMT1R_MIFTYP_RGB9	(0x4 << 0)
+#define LDMT1R_MIFTYP_RGB12A	(0x5 << 0)
+#define LDMT1R_MIFTYP_RGB12B	(0x6 << 0)
+#define LDMT1R_MIFTYP_RGB16	(0x7 << 0)
+#define LDMT1R_MIFTYP_RGB18	(0xa << 0)
+#define LDMT1R_MIFTYP_RGB24	(0xb << 0)
+#define LDMT1R_MIFTYP_YCBCR	(0xf << 0)
+#define LDMT1R_MIFTYP_SYS8A	(0x0 << 0)
+#define LDMT1R_MIFTYP_SYS8B	(0x1 << 0)
+#define LDMT1R_MIFTYP_SYS8C	(0x2 << 0)
+#define LDMT1R_MIFTYP_SYS8D	(0x3 << 0)
+#define LDMT1R_MIFTYP_SYS9	(0x4 << 0)
+#define LDMT1R_MIFTYP_SYS12	(0x5 << 0)
+#define LDMT1R_MIFTYP_SYS16A	(0x7 << 0)
+#define LDMT1R_MIFTYP_SYS16B	(0x8 << 0)
+#define LDMT1R_MIFTYP_SYS16C	(0x9 << 0)
+#define LDMT1R_MIFTYP_SYS18	(0xa << 0)
+#define LDMT1R_MIFTYP_SYS24	(0xb << 0)
+#define LDMT1R_MIFTYP_MASK	(0xf << 0)
+#define LDMT2R			0x41c
+#define LDMT2R_CSUP_MASK	(7 << 26)
+#define LDMT2R_CSUP_SHIFT	26
+#define LDMT2R_RSV		(1 << 25)
+#define LDMT2R_VSEL		(1 << 24)
+#define LDMT2R_WCSC_MASK	(0xff << 16)
+#define LDMT2R_WCSC_SHIFT	16
+#define LDMT2R_WCEC_MASK	(0xff << 8)
+#define LDMT2R_WCEC_SHIFT	8
+#define LDMT2R_WCLW_MASK	(0xff << 0)
+#define LDMT2R_WCLW_SHIFT	0
+#define LDMT3R			0x420
+#define LDMT3R_RDLC_MASK	(0x3f << 24)
+#define LDMT3R_RDLC_SHIFT	24
+#define LDMT3R_RCSC_MASK	(0xff << 16)
+#define LDMT3R_RCSC_SHIFT	16
+#define LDMT3R_RCEC_MASK	(0xff << 8)
+#define LDMT3R_RCEC_SHIFT	8
+#define LDMT3R_RCLW_MASK	(0xff << 0)
+#define LDMT3R_RCLW_SHIFT	0
+#define LDDFR			0x424
+#define LDDFR_CF1		(1 << 18)
+#define LDDFR_CF0		(1 << 17)
+#define LDDFR_CC		(1 << 16)
+#define LDDFR_YF_420		(0 << 8)
+#define LDDFR_YF_422		(1 << 8)
+#define LDDFR_YF_444		(2 << 8)
+#define LDDFR_YF_MASK		(3 << 8)
+#define LDDFR_PKF_ARGB32	(0x00 << 0)
+#define LDDFR_PKF_RGB16		(0x03 << 0)
+#define LDDFR_PKF_RGB24		(0x0b << 0)
+#define LDDFR_PKF_MASK		(0x1f << 0)
+#define LDSM1R			0x428
+#define LDSM1R_OS		(1 << 0)
+#define LDSM2R			0x42c
+#define LDSM2R_OSTRG		(1 << 0)
+#define LDSA1R			0x430
+#define LDSA2R			0x434
+#define LDMLSR			0x438
+#define LDWBFR			0x43c
+#define LDWBCNTR		0x440
+#define LDWBAR			0x444
+#define LDHCNR			0x448
+#define LDHSYNR			0x44c
+#define LDVLNR			0x450
+#define LDVSYNR			0x454
+#define LDHPDR			0x458
+#define LDVPDR			0x45c
+#define LDPMR			0x460
+#define LDPMR_LPS		(3 << 0)
+#define LDINTR			0x468
+#define LDINTR_FE		(1 << 10)
+#define LDINTR_VSE		(1 << 9)
+#define LDINTR_VEE		(1 << 8)
+#define LDINTR_FS		(1 << 2)
+#define LDINTR_VSS		(1 << 1)
+#define LDINTR_VES		(1 << 0)
+#define LDINTR_STATUS_MASK	(0xff << 0)
+#define LDSR			0x46c
+#define LDSR_MSS		(1 << 10)
+#define LDSR_MRS		(1 << 8)
+#define LDSR_AS			(1 << 1)
+#define LDCNT1R			0x470
+#define LDCNT1R_DE		(1 << 0)
+#define LDCNT2R			0x474
+#define LDCNT2R_BR		(1 << 8)
+#define LDCNT2R_MD		(1 << 3)
+#define LDCNT2R_SE		(1 << 2)
+#define LDCNT2R_ME		(1 << 1)
+#define LDCNT2R_DO		(1 << 0)
+#define LDRCNTR			0x478
+#define LDRCNTR_SRS		(1 << 17)
+#define LDRCNTR_SRC		(1 << 16)
+#define LDRCNTR_MRS		(1 << 1)
+#define LDRCNTR_MRC		(1 << 0)
+#define LDDDSR			0x47c
+#define LDDDSR_LS		(1 << 2)
+#define LDDDSR_WS		(1 << 1)
+#define LDDDSR_BS		(1 << 0)
+#define LDHAJR			0x4a0
+
+#define LDDWD0R			0x800
+#define LDDWDxR_WDACT		(1 << 28)
+#define LDDWDxR_RSW		(1 << 24)
+#define LDDRDR			0x840
+#define LDDRDR_RSR		(1 << 24)
+#define LDDRDR_DRD_MASK		(0x3ffff << 0)
+#define LDDWAR			0x900
+#define LDDWAR_WA		(1 << 0)
+#define LDDRAR			0x904
+#define LDDRAR_RA		(1 << 0)
+
+#define LDBCR			0xb00
+#define LDBCR_UPC(n)		(1 << ((n) + 16))
+#define LDBCR_UPF(n)		(1 << ((n) + 8))
+#define LDBCR_UPD(n)		(1 << ((n) + 0))
+#define LDBnBSIFR(n)		(0xb20 + (n) * 0x20 + 0x00)
+#define LDBBSIFR_EN		(1 << 31)
+#define LDBBSIFR_VS		(1 << 29)
+#define LDBBSIFR_BRSEL		(1 << 28)
+#define LDBBSIFR_MX		(1 << 27)
+#define LDBBSIFR_MY		(1 << 26)
+#define LDBBSIFR_CV3		(3 << 24)
+#define LDBBSIFR_CV2		(2 << 24)
+#define LDBBSIFR_CV1		(1 << 24)
+#define LDBBSIFR_CV0		(0 << 24)
+#define LDBBSIFR_CV_MASK	(3 << 24)
+#define LDBBSIFR_LAY_MASK	(0xff << 16)
+#define LDBBSIFR_LAY_SHIFT	16
+#define LDBBSIFR_ROP3_MASK	(0xff << 16)
+#define LDBBSIFR_ROP3_SHIFT	16
+#define LDBBSIFR_AL_PL8		(3 << 14)
+#define LDBBSIFR_AL_PL1		(2 << 14)
+#define LDBBSIFR_AL_PK		(1 << 14)
+#define LDBBSIFR_AL_1		(0 << 14)
+#define LDBBSIFR_AL_MASK	(3 << 14)
+#define LDBBSIFR_SWPL		(1 << 10)
+#define LDBBSIFR_SWPW		(1 << 9)
+#define LDBBSIFR_SWPB		(1 << 8)
+#define LDBBSIFR_RY		(1 << 7)
+#define LDBBSIFR_CHRR_420	(2 << 0)
+#define LDBBSIFR_CHRR_422	(1 << 0)
+#define LDBBSIFR_CHRR_444	(0 << 0)
+#define LDBBSIFR_RPKF_ARGB32	(0x00 << 0)
+#define LDBBSIFR_RPKF_RGB16	(0x03 << 0)
+#define LDBBSIFR_RPKF_RGB24	(0x0b << 0)
+#define LDBBSIFR_RPKF_MASK	(0x1f << 0)
+#define LDBnBSSZR(n)		(0xb20 + (n) * 0x20 + 0x04)
+#define LDBBSSZR_BVSS_MASK	(0xfff << 16)
+#define LDBBSSZR_BVSS_SHIFT	16
+#define LDBBSSZR_BHSS_MASK	(0xfff << 0)
+#define LDBBSSZR_BHSS_SHIFT	0
+#define LDBnBLOCR(n)		(0xb20 + (n) * 0x20 + 0x08)
+#define LDBBLOCR_CVLC_MASK	(0xfff << 16)
+#define LDBBLOCR_CVLC_SHIFT	16
+#define LDBBLOCR_CHLC_MASK	(0xfff << 0)
+#define LDBBLOCR_CHLC_SHIFT	0
+#define LDBnBSMWR(n)		(0xb20 + (n) * 0x20 + 0x0c)
+#define LDBBSMWR_BSMWA_MASK	(0xffff << 16)
+#define LDBBSMWR_BSMWA_SHIFT	16
+#define LDBBSMWR_BSMW_MASK	(0xffff << 0)
+#define LDBBSMWR_BSMW_SHIFT	0
+#define LDBnBSAYR(n)		(0xb20 + (n) * 0x20 + 0x10)
+#define LDBBSAYR_FG1A_MASK	(0xff << 24)
+#define LDBBSAYR_FG1A_SHIFT	24
+#define LDBBSAYR_FG1R_MASK	(0xff << 16)
+#define LDBBSAYR_FG1R_SHIFT	16
+#define LDBBSAYR_FG1G_MASK	(0xff << 8)
+#define LDBBSAYR_FG1G_SHIFT	8
+#define LDBBSAYR_FG1B_MASK	(0xff << 0)
+#define LDBBSAYR_FG1B_SHIFT	0
+#define LDBnBSACR(n)		(0xb20 + (n) * 0x20 + 0x14)
+#define LDBBSACR_FG2A_MASK	(0xff << 24)
+#define LDBBSACR_FG2A_SHIFT	24
+#define LDBBSACR_FG2R_MASK	(0xff << 16)
+#define LDBBSACR_FG2R_SHIFT	16
+#define LDBBSACR_FG2G_MASK	(0xff << 8)
+#define LDBBSACR_FG2G_SHIFT	8
+#define LDBBSACR_FG2B_MASK	(0xff << 0)
+#define LDBBSACR_FG2B_SHIFT	0
+#define LDBnBSAAR(n)		(0xb20 + (n) * 0x20 + 0x18)
+#define LDBBSAAR_AP_MASK	(0xff << 24)
+#define LDBBSAAR_AP_SHIFT	24
+#define LDBBSAAR_R_MASK		(0xff << 16)
+#define LDBBSAAR_R_SHIFT	16
+#define LDBBSAAR_GY_MASK	(0xff << 8)
+#define LDBBSAAR_GY_SHIFT	8
+#define LDBBSAAR_B_MASK		(0xff << 0)
+#define LDBBSAAR_B_SHIFT	0
+#define LDBnBPPCR(n)		(0xb20 + (n) * 0x20 + 0x1c)
+#define LDBBPPCR_AP_MASK	(0xff << 24)
+#define LDBBPPCR_AP_SHIFT	24
+#define LDBBPPCR_R_MASK		(0xff << 16)
+#define LDBBPPCR_R_SHIFT	16
+#define LDBBPPCR_GY_MASK	(0xff << 8)
+#define LDBBPPCR_GY_SHIFT	8
+#define LDBBPPCR_B_MASK		(0xff << 0)
+#define LDBBPPCR_B_SHIFT	0
+#define LDBnBBGCL(n)		(0xb10 + (n) * 0x04)
+#define LDBBBGCL_BGA_MASK	(0xff << 24)
+#define LDBBBGCL_BGA_SHIFT	24
+#define LDBBBGCL_BGR_MASK	(0xff << 16)
+#define LDBBBGCL_BGR_SHIFT	16
+#define LDBBBGCL_BGG_MASK	(0xff << 8)
+#define LDBBBGCL_BGG_SHIFT	8
+#define LDBBBGCL_BGB_MASK	(0xff << 0)
+#define LDBBBGCL_BGB_SHIFT	0
+
+#define LCDC_SIDE_B_OFFSET	0x1000
+#define LCDC_MIRROR_OFFSET	0x2000
+
+static inline bool lcdc_is_banked(u32 reg)
+{
+	switch (reg) {
+	case LDMT1R:
+	case LDMT2R:
+	case LDMT3R:
+	case LDDFR:
+	case LDSM1R:
+	case LDSA1R:
+	case LDSA2R:
+	case LDMLSR:
+	case LDWBFR:
+	case LDWBCNTR:
+	case LDWBAR:
+	case LDHCNR:
+	case LDHSYNR:
+	case LDVLNR:
+	case LDVSYNR:
+	case LDHPDR:
+	case LDVPDR:
+	case LDHAJR:
+		return true;
+	default:
+		return reg >= LDBnBBGCL(0) && reg <= LDBnBPPCR(3);
+	}
+}
+
+static inline void lcdc_write_mirror(struct shmob_drm_device *sdev, u32 reg,
+				     u32 data)
+{
+	iowrite32(data, sdev->mmio + reg + LCDC_MIRROR_OFFSET);
+}
+
+static inline void lcdc_write(struct shmob_drm_device *sdev, u32 reg, u32 data)
+{
+	iowrite32(data, sdev->mmio + reg);
+	if (lcdc_is_banked(reg))
+		iowrite32(data, sdev->mmio + reg + LCDC_SIDE_B_OFFSET);
+}
+
+static inline u32 lcdc_read(struct shmob_drm_device *sdev, u32 reg)
+{
+	return ioread32(sdev->mmio + reg);
+}
+
+static inline int lcdc_wait_bit(struct shmob_drm_device *sdev, u32 reg,
+				u32 mask, u32 until)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+	while ((lcdc_read(sdev, reg) & mask) != until) {
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+		cpu_relax();
+	}
+
+	return 0;
+}
+
+#endif /* __SHMOB_DRM_REGS_H__ */
diff --git a/linux-imx/drivers/gpu/drm/sis/Makefile b/linux-imx/drivers/gpu/drm/sis/Makefile
new file mode 100644
index 0000000..441c061
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/sis/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y = -Iinclude/drm
+sis-y := sis_drv.o sis_mm.o
+
+obj-$(CONFIG_DRM_SIS)   += sis.o
+
+
diff --git a/linux-imx/drivers/gpu/drm/sis/sis_drv.c b/linux-imx/drivers/gpu/drm/sis/sis_drv.c
new file mode 100644
index 0000000..5a5325e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/sis/sis_drv.c
@@ -0,0 +1,145 @@
+/* sis.c -- sis driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/sis_drm.h>
+#include "sis_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static struct pci_device_id pciidlist[] = {
+	sisdrv_PCI_IDS
+};
+
+static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_sis_private_t *dev_priv;
+
+	pci_set_master(dev->pdev);
+
+	dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	idr_init(&dev_priv->object_idr);
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->chipset = chipset;
+
+	return 0;
+}
+
+static int sis_driver_unload(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+
+	idr_destroy(&dev_priv->object_idr);
+
+	kfree(dev_priv);
+
+	return 0;
+}
+
+static const struct file_operations sis_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct sis_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+
+	INIT_LIST_HEAD(&file_priv->obj_list);
+
+	return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct sis_file_private *file_priv = file->driver_priv;
+
+	kfree(file_priv);
+}
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+	.load = sis_driver_load,
+	.unload = sis_driver_unload,
+	.open = sis_driver_open,
+	.preclose = sis_reclaim_buffers_locked,
+	.postclose = sis_driver_postclose,
+	.dma_quiescent = sis_idle,
+	.lastclose = sis_lastclose,
+	.ioctls = sis_ioctls,
+	.fops = &sis_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver sis_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init sis_init(void)
+{
+	driver.num_ioctls = sis_max_ioctl;
+	return drm_pci_init(&driver, &sis_pci_driver);
+}
+
+static void __exit sis_exit(void)
+{
+	drm_pci_exit(&driver, &sis_pci_driver);
+}
+
+module_init(sis_init);
+module_exit(sis_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/sis/sis_drv.h b/linux-imx/drivers/gpu/drm/sis/sis_drv.h
new file mode 100644
index 0000000..13b527b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/sis/sis_drv.h
@@ -0,0 +1,76 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SIS_DRV_H_
+#define _SIS_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"SIS, Tungsten Graphics"
+#define DRIVER_NAME		"sis"
+#define DRIVER_DESC		"SIS 300/630/540 and XGI V3XE/V5/V8"
+#define DRIVER_DATE		"20070626"
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	0
+
+enum sis_family {
+	SIS_OTHER = 0,
+	SIS_CHIP_315 = 1,
+};
+
+#include <drm/drm_mm.h>
+
+
+#define SIS_BASE (dev_priv->mmio)
+#define SIS_READ(reg)         DRM_READ32(SIS_BASE, reg)
+#define SIS_WRITE(reg, val)   DRM_WRITE32(SIS_BASE, reg, val)
+
+typedef struct drm_sis_private {
+	drm_local_map_t *mmio;
+	unsigned int idle_fault;
+	unsigned int chipset;
+	int vram_initialized;
+	int agp_initialized;
+	unsigned long vram_offset;
+	unsigned long agp_offset;
+	struct drm_mm vram_mm;
+	struct drm_mm agp_mm;
+	/** Mapping of userspace keys to mm objects */
+	struct idr object_idr;
+} drm_sis_private_t;
+
+extern int sis_idle(struct drm_device *dev);
+extern void sis_reclaim_buffers_locked(struct drm_device *dev,
+				       struct drm_file *file_priv);
+extern void sis_lastclose(struct drm_device *dev);
+
+extern struct drm_ioctl_desc sis_ioctls[];
+extern int sis_max_ioctl;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/sis/sis_mm.c b/linux-imx/drivers/gpu/drm/sis/sis_mm.c
new file mode 100644
index 0000000..9a43d98
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/sis/sis_mm.c
@@ -0,0 +1,360 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ *    Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/sis_drm.h>
+#include "sis_drv.h"
+
+#include <video/sisfb.h>
+
+#define VIDEO_TYPE 0
+#define AGP_TYPE 1
+
+
+struct sis_memblock {
+	struct drm_mm_node mm_node;
+	struct sis_memreq req;
+	struct list_head owner_list;
+};
+
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+/* fb management via fb device */
+
+#define SIS_MM_ALIGN_SHIFT 0
+#define SIS_MM_ALIGN_MASK 0
+
+#else /* CONFIG_FB_SIS[_MODULE] */
+
+#define SIS_MM_ALIGN_SHIFT 4
+#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1)
+
+#endif /* CONFIG_FB_SIS[_MODULE] */
+
+static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_fb_t *fb = data;
+
+	mutex_lock(&dev->struct_mutex);
+	/* Unconditionally init the drm_mm, even though we don't use it when the
+	 * fb sis driver is available - make cleanup easier. */
+	drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
+
+	dev_priv->vram_initialized = 1;
+	dev_priv->vram_offset = fb->offset;
+
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("offset = %lu, size = %lu\n", fb->offset, fb->size);
+
+	return 0;
+}
+
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
+			 void *data, int pool)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_mem_t *mem = data;
+	int retval = 0, user_key;
+	struct sis_memblock *item;
+	struct sis_file_private *file_priv = file->driver_priv;
+	unsigned long offset;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (0 == ((pool == 0) ? dev_priv->vram_initialized :
+		      dev_priv->agp_initialized)) {
+		DRM_ERROR
+		    ("Attempt to allocate from uninitialized memory manager.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	item = kzalloc(sizeof(*item), GFP_KERNEL);
+	if (!item) {
+		retval = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+	if (pool == AGP_TYPE) {
+		retval = drm_mm_insert_node(&dev_priv->agp_mm,
+					    &item->mm_node,
+					    mem->size, 0);
+		offset = item->mm_node.start;
+	} else {
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+		item->req.size = mem->size;
+		sis_malloc(&item->req);
+		if (item->req.size == 0)
+			retval = -ENOMEM;
+		offset = item->req.offset;
+#else
+		retval = drm_mm_insert_node(&dev_priv->vram_mm,
+					    &item->mm_node,
+					    mem->size, 0);
+		offset = item->mm_node.start;
+#endif
+	}
+	if (retval)
+		goto fail_alloc;
+
+	retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+	if (retval < 0)
+		goto fail_idr;
+	user_key = retval;
+
+	list_add(&item->owner_list, &file_priv->obj_list);
+	mutex_unlock(&dev->struct_mutex);
+
+	mem->offset = ((pool == 0) ?
+		      dev_priv->vram_offset : dev_priv->agp_offset) +
+	    (offset << SIS_MM_ALIGN_SHIFT);
+	mem->free = user_key;
+	mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+	return 0;
+
+fail_idr:
+	drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+	kfree(item);
+	mutex_unlock(&dev->struct_mutex);
+
+	mem->offset = 0;
+	mem->size = 0;
+	mem->free = 0;
+
+	DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size,
+		  mem->offset);
+
+	return retval;
+}
+
+static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_mem_t *mem = data;
+	struct sis_memblock *obj;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = idr_find(&dev_priv->object_idr, mem->free);
+	if (obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	idr_remove(&dev_priv->object_idr, mem->free);
+	list_del(&obj->owner_list);
+	if (drm_mm_node_allocated(&obj->mm_node))
+		drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+	else
+		sis_free(obj->req.offset);
+#endif
+	kfree(obj);
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("free = 0x%lx\n", mem->free);
+
+	return 0;
+}
+
+static int sis_fb_alloc(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
+}
+
+static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_agp_t *agp = data;
+	dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
+
+	dev_priv->agp_initialized = 1;
+	dev_priv->agp_offset = agp->offset;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("offset = %lu, size = %lu\n", agp->offset, agp->size);
+	return 0;
+}
+
+static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+
+	return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
+}
+
+static drm_local_map_t *sis_reg_init(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
+	drm_local_map_t *map;
+
+	list_for_each_entry(entry, &dev->maplist, head) {
+		map = entry->map;
+		if (!map)
+			continue;
+		if (map->type == _DRM_REGISTERS)
+			return map;
+	}
+	return NULL;
+}
+
+int sis_idle(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	uint32_t idle_reg;
+	unsigned long end;
+	int i;
+
+	if (dev_priv->idle_fault)
+		return 0;
+
+	if (dev_priv->mmio == NULL) {
+		dev_priv->mmio = sis_reg_init(dev);
+		if (dev_priv->mmio == NULL) {
+			DRM_ERROR("Could not find register map.\n");
+			return 0;
+		}
+	}
+
+	/*
+	 * Implement a device switch here if needed
+	 */
+
+	if (dev_priv->chipset != SIS_CHIP_315)
+		return 0;
+
+	/*
+	 * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
+	 * because its polling frequency is too low.
+	 */
+
+	end = jiffies + (DRM_HZ * 3);
+
+	for (i = 0; i < 4; ++i) {
+		do {
+			idle_reg = SIS_READ(0x85cc);
+		} while (!time_after_eq(jiffies, end) &&
+			  ((idle_reg & 0x80000000) != 0x80000000));
+	}
+
+	if (time_after_eq(jiffies, end)) {
+		DRM_ERROR("Graphics engine idle timeout. "
+			  "Disabling idle check\n");
+		dev_priv->idle_fault = 1;
+	}
+
+	/*
+	 * The caller never sees an error code. It gets trapped
+	 * in libdrm.
+	 */
+
+	return 0;
+}
+
+
+void sis_lastclose(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->vram_initialized) {
+		drm_mm_takedown(&dev_priv->vram_mm);
+		dev_priv->vram_initialized = 0;
+	}
+	if (dev_priv->agp_initialized) {
+		drm_mm_takedown(&dev_priv->agp_mm);
+		dev_priv->agp_initialized = 0;
+	}
+	dev_priv->mmio = NULL;
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void sis_reclaim_buffers_locked(struct drm_device *dev,
+				struct drm_file *file)
+{
+	struct sis_file_private *file_priv = file->driver_priv;
+	struct sis_memblock *entry, *next;
+
+	if (!(file->minor->master && file->master->lock.hw_lock))
+		return;
+
+	drm_idlelock_take(&file->master->lock);
+
+	mutex_lock(&dev->struct_mutex);
+	if (list_empty(&file_priv->obj_list)) {
+		mutex_unlock(&dev->struct_mutex);
+		drm_idlelock_release(&file->master->lock);
+
+		return;
+	}
+
+	sis_idle(dev);
+
+
+	list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+				 owner_list) {
+		list_del(&entry->owner_list);
+		if (drm_mm_node_allocated(&entry->mm_node))
+			drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+		else
+			sis_free(entry->req.offset);
+#endif
+		kfree(entry);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_idlelock_release(&file->master->lock);
+
+	return;
+}
+
+struct drm_ioctl_desc sis_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+};
+
+int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/tdfx/Makefile b/linux-imx/drivers/gpu/drm/tdfx/Makefile
new file mode 100644
index 0000000..0379f29
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tdfx/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+tdfx-y := tdfx_drv.o
+
+obj-$(CONFIG_DRM_TDFX)	+= tdfx.o
diff --git a/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.c b/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.c
new file mode 100644
index 0000000..ddfa743
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -0,0 +1,89 @@
+/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
+ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Daryll Strauss <daryll@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include "tdfx_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static struct pci_device_id pciidlist[] = {
+	tdfx_PCI_IDS
+};
+
+static const struct file_operations tdfx_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_MTRR,
+	.fops = &tdfx_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver tdfx_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init tdfx_init(void)
+{
+	return drm_pci_init(&driver, &tdfx_pci_driver);
+}
+
+static void __exit tdfx_exit(void)
+{
+	drm_pci_exit(&driver, &tdfx_pci_driver);
+}
+
+module_init(tdfx_init);
+module_exit(tdfx_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.h b/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.h
new file mode 100644
index 0000000..84204ec
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tdfx/tdfx_drv.h
@@ -0,0 +1,47 @@
+/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
+ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __TDFX_H__
+#define __TDFX_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"VA Linux Systems Inc."
+
+#define DRIVER_NAME		"tdfx"
+#define DRIVER_DESC		"3dfx Banshee/Voodoo3+"
+#define DRIVER_DATE		"20010216"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/Kconfig b/linux-imx/drivers/gpu/drm/tilcdc/Kconfig
new file mode 100644
index 0000000..7a4d101
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_TILCDC
+	tristate "DRM Support for TI LCDC Display Controller"
+	depends on DRM && OF && ARM
+	select DRM_KMS_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_GEM_CMA_HELPER
+	select VIDEOMODE_HELPERS
+	select BACKLIGHT_CLASS_DEVICE
+	select BACKLIGHT_LCD_SUPPORT
+	help
+	  Choose this option if you have an TI SoC with LCDC display
+	  controller, for example AM33xx in beagle-bone, DA8xx, or
+	  OMAP-L1xx.  This driver replaces the FB_DA8XX fbdev driver.
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/Makefile b/linux-imx/drivers/gpu/drm/tilcdc/Makefile
new file mode 100644
index 0000000..7d2eefe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/Makefile
@@ -0,0 +1,13 @@
+ccflags-y := -Iinclude/drm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+	ccflags-y += -Werror
+endif
+
+tilcdc-y := \
+	tilcdc_crtc.o \
+	tilcdc_tfp410.o \
+	tilcdc_slave.o \
+	tilcdc_panel.o \
+	tilcdc_drv.o
+
+obj-$(CONFIG_DRM_TILCDC)	+= tilcdc.o
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
new file mode 100644
index 0000000..5dd3c7d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -0,0 +1,602 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+
+struct tilcdc_crtc {
+	struct drm_crtc base;
+
+	const struct tilcdc_panel_info *info;
+	uint32_t dirty;
+	dma_addr_t start, end;
+	struct drm_pending_vblank_event *event;
+	int dpms;
+	wait_queue_head_t frame_done_wq;
+	bool frame_done;
+
+	/* fb currently set to scanout 0/1: */
+	struct drm_framebuffer *scanout[2];
+
+	/* for deferred fb unref's: */
+	DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
+	struct work_struct work;
+};
+#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
+
+static void unref_worker(struct work_struct *work)
+{
+	struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work);
+	struct drm_device *dev = tilcdc_crtc->base.dev;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&dev->mode_config.mutex);
+	while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
+		drm_framebuffer_unreference(fb);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void set_scanout(struct drm_crtc *crtc, int n)
+{
+	static const uint32_t base_reg[] = {
+			LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG,
+	};
+	static const uint32_t ceil_reg[] = {
+			LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG,
+	};
+	static const uint32_t stat[] = {
+			LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
+	};
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+
+	pm_runtime_get_sync(dev->dev);
+	tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
+	tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
+	if (tilcdc_crtc->scanout[n]) {
+		if (kfifo_put(&tilcdc_crtc->unref_fifo,
+				(const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
+			struct tilcdc_drm_private *priv = dev->dev_private;
+			queue_work(priv->wq, &tilcdc_crtc->work);
+		} else {
+			dev_err(dev->dev, "unref fifo full!\n");
+			drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
+		}
+	}
+	tilcdc_crtc->scanout[n] = crtc->fb;
+	drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
+	tilcdc_crtc->dirty &= ~stat[n];
+	pm_runtime_put_sync(dev->dev);
+}
+
+static void update_scanout(struct drm_crtc *crtc)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_framebuffer *fb = crtc->fb;
+	struct drm_gem_cma_object *gem;
+	unsigned int depth, bpp;
+
+	drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
+	gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+	tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
+			(crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
+
+	tilcdc_crtc->end = tilcdc_crtc->start +
+			(crtc->mode.vdisplay * fb->pitches[0]);
+
+	if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
+		/* already enabled, so just mark the frames that need
+		 * updating and they will be updated on vblank:
+		 */
+		tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
+		drm_vblank_get(dev, 0);
+	} else {
+		/* not enabled yet, so update registers immediately: */
+		set_scanout(crtc, 0);
+		set_scanout(crtc, 1);
+	}
+}
+
+static void start(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	if (priv->rev == 2) {
+		tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+		msleep(1);
+		tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
+		msleep(1);
+	}
+
+	tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
+	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
+	tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void stop(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+
+	tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+}
+
+static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+
+	WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
+
+	drm_crtc_cleanup(crtc);
+	WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
+	kfifo_free(&tilcdc_crtc->unref_fifo);
+	kfree(tilcdc_crtc);
+}
+
+static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
+		struct drm_framebuffer *fb,
+		struct drm_pending_vblank_event *event)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+
+	if (tilcdc_crtc->event) {
+		dev_err(dev->dev, "already pending page flip!\n");
+		return -EBUSY;
+	}
+
+	crtc->fb = fb;
+	tilcdc_crtc->event = event;
+	update_scanout(crtc);
+
+	return 0;
+}
+
+static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	/* we really only care about on or off: */
+	if (mode != DRM_MODE_DPMS_ON)
+		mode = DRM_MODE_DPMS_OFF;
+
+	if (tilcdc_crtc->dpms == mode)
+		return;
+
+	tilcdc_crtc->dpms = mode;
+
+	pm_runtime_get_sync(dev->dev);
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		pm_runtime_forbid(dev->dev);
+		start(crtc);
+	} else {
+		tilcdc_crtc->frame_done = false;
+		stop(crtc);
+
+		/* if necessary wait for framedone irq which will still come
+		 * before putting things to sleep..
+		 */
+		if (priv->rev == 2) {
+			int ret = wait_event_timeout(
+					tilcdc_crtc->frame_done_wq,
+					tilcdc_crtc->frame_done,
+					msecs_to_jiffies(50));
+			if (ret == 0)
+				dev_err(dev->dev, "timeout waiting for framedone\n");
+		}
+		pm_runtime_allow(dev->dev);
+	}
+
+	pm_runtime_put_sync(dev->dev);
+}
+
+static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
+{
+	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void tilcdc_crtc_commit(struct drm_crtc *crtc)
+{
+	tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode,
+		int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	const struct tilcdc_panel_info *info = tilcdc_crtc->info;
+	uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
+	int ret;
+
+	ret = tilcdc_crtc_mode_valid(crtc, mode);
+	if (WARN_ON(ret))
+		return ret;
+
+	if (WARN_ON(!info))
+		return -EINVAL;
+
+	pm_runtime_get_sync(dev->dev);
+
+	/* Configure the Burst Size and fifo threshold of DMA: */
+	reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
+	switch (info->dma_burst_sz) {
+	case 1:
+		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
+		break;
+	case 2:
+		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
+		break;
+	case 4:
+		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
+		break;
+	case 8:
+		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
+		break;
+	case 16:
+		reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
+		break;
+	default:
+		return -EINVAL;
+	}
+	reg |= (info->fifo_th << 8);
+	tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
+
+	/* Configure timings: */
+	hbp = mode->htotal - mode->hsync_end;
+	hfp = mode->hsync_start - mode->hdisplay;
+	hsw = mode->hsync_end - mode->hsync_start;
+	vbp = mode->vtotal - mode->vsync_end;
+	vfp = mode->vsync_start - mode->vdisplay;
+	vsw = mode->vsync_end - mode->vsync_start;
+
+	DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
+			mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
+
+	/* Configure the AC Bias Period and Number of Transitions per Interrupt: */
+	reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
+	reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
+		LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
+	if (priv->rev == 2) {
+		reg |= (hfp & 0x300) >> 8;
+		reg |= (hbp & 0x300) >> 4;
+		reg |= (hsw & 0x3c0) << 21;
+	}
+	tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
+
+	reg = (((mode->hdisplay >> 4) - 1) << 4) |
+		((hbp & 0xff) << 24) |
+		((hfp & 0xff) << 16) |
+		((hsw & 0x3f) << 10);
+	if (priv->rev == 2)
+		reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
+	tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
+
+	reg = ((mode->vdisplay - 1) & 0x3ff) |
+		((vbp & 0xff) << 24) |
+		((vfp & 0xff) << 16) |
+		((vsw & 0x3f) << 10);
+	tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
+
+	/* Configure display type: */
+	reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
+		~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
+			LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
+	reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
+	if (info->tft_alt_mode)
+		reg |= LCDC_TFT_ALT_ENABLE;
+	if (priv->rev == 2) {
+		unsigned int depth, bpp;
+
+		drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
+		switch (bpp) {
+		case 16:
+			break;
+		case 32:
+			reg |= LCDC_V2_TFT_24BPP_UNPACK;
+			/* fallthrough */
+		case 24:
+			reg |= LCDC_V2_TFT_24BPP_MODE;
+			break;
+		default:
+			dev_err(dev->dev, "invalid pixel format\n");
+			return -EINVAL;
+		}
+	}
+	reg |= info->fdd < 12;
+	tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
+
+	if (info->invert_pxl_clk)
+		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
+
+	if (info->sync_ctrl)
+		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
+
+	if (info->sync_edge)
+		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
+
+	if (info->raster_order)
+		tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+	else
+		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
+
+
+	update_scanout(crtc);
+	tilcdc_crtc_update_clk(crtc);
+
+	pm_runtime_put_sync(dev->dev);
+
+	return 0;
+}
+
+static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	update_scanout(crtc);
+	return 0;
+}
+
+static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
+		.destroy        = tilcdc_crtc_destroy,
+		.set_config     = drm_crtc_helper_set_config,
+		.page_flip      = tilcdc_crtc_page_flip,
+};
+
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+		.dpms           = tilcdc_crtc_dpms,
+		.mode_fixup     = tilcdc_crtc_mode_fixup,
+		.prepare        = tilcdc_crtc_prepare,
+		.commit         = tilcdc_crtc_commit,
+		.mode_set       = tilcdc_crtc_mode_set,
+		.mode_set_base  = tilcdc_crtc_mode_set_base,
+		.load_lut       = tilcdc_crtc_load_lut,
+};
+
+int tilcdc_crtc_max_width(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	int max_width = 0;
+
+	if (priv->rev == 1)
+		max_width = 1024;
+	else if (priv->rev == 2)
+		max_width = 2048;
+
+	return max_width;
+}
+
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+	struct tilcdc_drm_private *priv = crtc->dev->dev_private;
+	unsigned int bandwidth;
+
+	if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+		return MODE_VIRTUAL_X;
+
+	/* width must be multiple of 16 */
+	if (mode->hdisplay & 0xf)
+		return MODE_VIRTUAL_X;
+
+	if (mode->vdisplay > 2048)
+		return MODE_VIRTUAL_Y;
+
+	/* filter out modes that would require too much memory bandwidth: */
+	bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode);
+	if (bandwidth > priv->max_bandwidth)
+		return MODE_BAD;
+
+	return MODE_OK;
+}
+
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+		const struct tilcdc_panel_info *info)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	tilcdc_crtc->info = info;
+}
+
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	int dpms = tilcdc_crtc->dpms;
+	unsigned int lcd_clk, div;
+	int ret;
+
+	pm_runtime_get_sync(dev->dev);
+
+	if (dpms == DRM_MODE_DPMS_ON)
+		tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+	/* in raster mode, minimum divisor is 2: */
+	ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
+	if (ret) {
+		dev_err(dev->dev, "failed to set display clock rate to: %d\n",
+				crtc->mode.clock);
+		goto out;
+	}
+
+	lcd_clk = clk_get_rate(priv->clk);
+	div = lcd_clk / (crtc->mode.clock * 1000);
+
+	DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
+	DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
+
+	/* Configure the LCD clock divisor. */
+	tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
+			LCDC_RASTER_MODE);
+
+	if (priv->rev == 2)
+		tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
+				LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
+				LCDC_V2_CORE_CLK_EN);
+
+	if (dpms == DRM_MODE_DPMS_ON)
+		tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
+out:
+	pm_runtime_put_sync(dev->dev);
+}
+
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	uint32_t stat = tilcdc_read_irqstatus(dev);
+
+	if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
+		stop(crtc);
+		dev_err(dev->dev, "error: %08x\n", stat);
+		tilcdc_clear_irqstatus(dev, stat);
+		start(crtc);
+	} else if (stat & LCDC_PL_LOAD_DONE) {
+		tilcdc_clear_irqstatus(dev, stat);
+	} else {
+		struct drm_pending_vblank_event *event;
+		unsigned long flags;
+		uint32_t dirty = tilcdc_crtc->dirty & stat;
+
+		tilcdc_clear_irqstatus(dev, stat);
+
+		if (dirty & LCDC_END_OF_FRAME0)
+			set_scanout(crtc, 0);
+
+		if (dirty & LCDC_END_OF_FRAME1)
+			set_scanout(crtc, 1);
+
+		drm_handle_vblank(dev, 0);
+
+		spin_lock_irqsave(&dev->event_lock, flags);
+		event = tilcdc_crtc->event;
+		tilcdc_crtc->event = NULL;
+		if (event)
+			drm_send_vblank_event(dev, 0, event);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		if (dirty && !tilcdc_crtc->dirty)
+			drm_vblank_put(dev, 0);
+	}
+
+	if (priv->rev == 2) {
+		if (stat & LCDC_FRAME_DONE) {
+			tilcdc_crtc->frame_done = true;
+			wake_up(&tilcdc_crtc->frame_done_wq);
+		}
+		tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+	struct drm_pending_vblank_event *event;
+	struct drm_device *dev = crtc->dev;
+	unsigned long flags;
+
+	/* Destroy the pending vertical blanking event associated with the
+	 * pending page flip, if any, and disable vertical blanking interrupts.
+	 */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = tilcdc_crtc->event;
+	if (event && event->base.file_priv == file) {
+		tilcdc_crtc->event = NULL;
+		event->base.destroy(&event->base);
+		drm_vblank_put(dev, 0);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
+{
+	struct tilcdc_crtc *tilcdc_crtc;
+	struct drm_crtc *crtc;
+	int ret;
+
+	tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
+	if (!tilcdc_crtc) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	crtc = &tilcdc_crtc->base;
+
+	tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
+	init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
+
+	ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+	if (ret) {
+		dev_err(dev->dev, "could not allocate unref FIFO\n");
+		goto fail;
+	}
+
+	INIT_WORK(&tilcdc_crtc->work, unref_worker);
+
+	ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
+	if (ret < 0)
+		goto fail;
+
+	drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
+
+	return crtc;
+
+fail:
+	tilcdc_crtc_destroy(crtc);
+	return NULL;
+}
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.c
new file mode 100644
index 0000000..2b5461b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* LCDC DRM driver, based on da8xx-fb */
+
+#include "tilcdc_drv.h"
+#include "tilcdc_regs.h"
+#include "tilcdc_tfp410.h"
+#include "tilcdc_slave.h"
+#include "tilcdc_panel.h"
+
+#include "drm_fb_helper.h"
+
+static LIST_HEAD(module_list);
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+		const struct tilcdc_module_ops *funcs)
+{
+	mod->name = name;
+	mod->funcs = funcs;
+	INIT_LIST_HEAD(&mod->list);
+	list_add(&mod->list, &module_list);
+}
+
+void tilcdc_module_cleanup(struct tilcdc_module *mod)
+{
+	list_del(&mod->list);
+}
+
+static struct of_device_id tilcdc_of_match[];
+
+static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
+		struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	return drm_fb_cma_create(dev, file_priv, mode_cmd);
+}
+
+static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	if (priv->fbdev)
+		drm_fbdev_cma_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+	.fb_create = tilcdc_fb_create,
+	.output_poll_changed = tilcdc_fb_output_poll_changed,
+};
+
+static int modeset_init(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct tilcdc_module *mod;
+
+	drm_mode_config_init(dev);
+
+	priv->crtc = tilcdc_crtc_create(dev);
+
+	list_for_each_entry(mod, &module_list, list) {
+		DBG("loading module: %s", mod->name);
+		mod->funcs->modeset_init(mod, dev);
+	}
+
+	if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
+		/* oh nos! */
+		dev_err(dev->dev, "no encoders/connectors found\n");
+		return -ENXIO;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+	dev->mode_config.max_height = 2048;
+	dev->mode_config.funcs = &mode_config_funcs;
+
+	return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ
+static int cpufreq_transition(struct notifier_block *nb,
+				     unsigned long val, void *data)
+{
+	struct tilcdc_drm_private *priv = container_of(nb,
+			struct tilcdc_drm_private, freq_transition);
+	if (val == CPUFREQ_POSTCHANGE) {
+		if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
+			priv->lcd_fck_rate = clk_get_rate(priv->clk);
+			tilcdc_crtc_update_clk(priv->crtc);
+		}
+	}
+
+	return 0;
+}
+#endif
+
+/*
+ * DRM operations:
+ */
+
+static int tilcdc_unload(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct tilcdc_module *mod, *cur;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+
+	pm_runtime_get_sync(dev->dev);
+	drm_irq_uninstall(dev);
+	pm_runtime_put_sync(dev->dev);
+
+#ifdef CONFIG_CPU_FREQ
+	cpufreq_unregister_notifier(&priv->freq_transition,
+			CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+
+	if (priv->clk)
+		clk_put(priv->clk);
+
+	if (priv->mmio)
+		iounmap(priv->mmio);
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+
+	dev->dev_private = NULL;
+
+	pm_runtime_disable(dev->dev);
+
+	list_for_each_entry_safe(mod, cur, &module_list, list) {
+		DBG("destroying module: %s", mod->name);
+		mod->funcs->destroy(mod);
+	}
+
+	kfree(priv);
+
+	return 0;
+}
+
+static int tilcdc_load(struct drm_device *dev, unsigned long flags)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct device_node *node = pdev->dev.of_node;
+	struct tilcdc_drm_private *priv;
+	struct resource *res;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_private = priv;
+
+	priv->wq = alloc_ordered_workqueue("tilcdc", 0);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	priv->mmio = ioremap_nocache(res->start, resource_size(res));
+	if (!priv->mmio) {
+		dev_err(dev->dev, "failed to ioremap\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	priv->clk = clk_get(dev->dev, "fck");
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev->dev, "failed to get functional clock\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev->dev, "failed to get display clock\n");
+		ret = -ENODEV;
+		goto fail;
+	}
+
+#ifdef CONFIG_CPU_FREQ
+	priv->lcd_fck_rate = clk_get_rate(priv->clk);
+	priv->freq_transition.notifier_call = cpufreq_transition;
+	ret = cpufreq_register_notifier(&priv->freq_transition,
+			CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		dev_err(dev->dev, "failed to register cpufreq notifier\n");
+		goto fail;
+	}
+#endif
+
+	if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+		priv->max_bandwidth = 1280 * 1024 * 60;
+
+	pm_runtime_enable(dev->dev);
+
+	/* Determine LCD IP Version */
+	pm_runtime_get_sync(dev->dev);
+	switch (tilcdc_read(dev, LCDC_PID_REG)) {
+	case 0x4c100102:
+		priv->rev = 1;
+		break;
+	case 0x4f200800:
+	case 0x4f201000:
+		priv->rev = 2;
+		break;
+	default:
+		dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
+				"defaulting to LCD revision 1\n",
+				tilcdc_read(dev, LCDC_PID_REG));
+		priv->rev = 1;
+		break;
+	}
+
+	pm_runtime_put_sync(dev->dev);
+
+	ret = modeset_init(dev);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to initialize mode setting\n");
+		goto fail;
+	}
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to initialize vblank\n");
+		goto fail;
+	}
+
+	pm_runtime_get_sync(dev->dev);
+	ret = drm_irq_install(dev);
+	pm_runtime_put_sync(dev->dev);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to install IRQ handler\n");
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	priv->fbdev = drm_fbdev_cma_init(dev, 16,
+			dev->mode_config.num_crtc,
+			dev->mode_config.num_connector);
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+fail:
+	tilcdc_unload(dev);
+	return ret;
+}
+
+static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	tilcdc_crtc_cancel_page_flip(priv->crtc, file);
+}
+
+static void tilcdc_lastclose(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = arg;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	return tilcdc_crtc_irq(priv->crtc);
+}
+
+static void tilcdc_irq_preinstall(struct drm_device *dev)
+{
+	tilcdc_clear_irqstatus(dev, 0xffffffff);
+}
+
+static int tilcdc_irq_postinstall(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	/* enable FIFO underflow irq: */
+	if (priv->rev == 1)
+		tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
+	else
+		tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
+
+	return 0;
+}
+
+static void tilcdc_irq_uninstall(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+
+	/* disable irqs that we might have enabled: */
+	if (priv->rev == 1) {
+		tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
+				LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
+		tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
+	} else {
+		tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+			LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
+			LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
+			LCDC_FRAME_DONE);
+	}
+
+}
+
+static void enable_vblank(struct drm_device *dev, bool enable)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	u32 reg, mask;
+
+	if (priv->rev == 1) {
+		reg = LCDC_DMA_CTRL_REG;
+		mask = LCDC_V1_END_OF_FRAME_INT_ENA;
+	} else {
+		reg = LCDC_INT_ENABLE_SET_REG;
+		mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
+			LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
+	}
+
+	if (enable)
+		tilcdc_set(dev, reg, mask);
+	else
+		tilcdc_clear(dev, reg, mask);
+}
+
+static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
+{
+	enable_vblank(dev, true);
+	return 0;
+}
+
+static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
+{
+	enable_vblank(dev, false);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
+static const struct {
+	const char *name;
+	uint8_t  rev;
+	uint8_t  save;
+	uint32_t reg;
+} registers[] =		{
+#define REG(rev, save, reg) { #reg, rev, save, reg }
+		/* exists in revision 1: */
+		REG(1, false, LCDC_PID_REG),
+		REG(1, true,  LCDC_CTRL_REG),
+		REG(1, false, LCDC_STAT_REG),
+		REG(1, true,  LCDC_RASTER_CTRL_REG),
+		REG(1, true,  LCDC_RASTER_TIMING_0_REG),
+		REG(1, true,  LCDC_RASTER_TIMING_1_REG),
+		REG(1, true,  LCDC_RASTER_TIMING_2_REG),
+		REG(1, true,  LCDC_DMA_CTRL_REG),
+		REG(1, true,  LCDC_DMA_FB_BASE_ADDR_0_REG),
+		REG(1, true,  LCDC_DMA_FB_CEILING_ADDR_0_REG),
+		REG(1, true,  LCDC_DMA_FB_BASE_ADDR_1_REG),
+		REG(1, true,  LCDC_DMA_FB_CEILING_ADDR_1_REG),
+		/* new in revision 2: */
+		REG(2, false, LCDC_RAW_STAT_REG),
+		REG(2, false, LCDC_MASKED_STAT_REG),
+		REG(2, false, LCDC_INT_ENABLE_SET_REG),
+		REG(2, false, LCDC_INT_ENABLE_CLR_REG),
+		REG(2, false, LCDC_END_OF_INT_IND_REG),
+		REG(2, true,  LCDC_CLK_ENABLE_REG),
+		REG(2, true,  LCDC_INT_ENABLE_SET_REG),
+#undef REG
+};
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static int tilcdc_regs_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	unsigned i;
+
+	pm_runtime_get_sync(dev->dev);
+
+	seq_printf(m, "revision: %d\n", priv->rev);
+
+	for (i = 0; i < ARRAY_SIZE(registers); i++)
+		if (priv->rev >= registers[i].rev)
+			seq_printf(m, "%s:\t %08x\n", registers[i].name,
+					tilcdc_read(dev, registers[i].reg));
+
+	pm_runtime_put_sync(dev->dev);
+
+	return 0;
+}
+
+static int tilcdc_mm_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static struct drm_info_list tilcdc_debugfs_list[] = {
+		{ "regs", tilcdc_regs_show, 0 },
+		{ "mm",   tilcdc_mm_show,   0 },
+		{ "fb",   drm_fb_cma_debugfs_show, 0 },
+};
+
+static int tilcdc_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct tilcdc_module *mod;
+	int ret;
+
+	ret = drm_debugfs_create_files(tilcdc_debugfs_list,
+			ARRAY_SIZE(tilcdc_debugfs_list),
+			minor->debugfs_root, minor);
+
+	list_for_each_entry(mod, &module_list, list)
+		if (mod->funcs->debugfs_init)
+			mod->funcs->debugfs_init(mod, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
+{
+	struct tilcdc_module *mod;
+	drm_debugfs_remove_files(tilcdc_debugfs_list,
+			ARRAY_SIZE(tilcdc_debugfs_list), minor);
+
+	list_for_each_entry(mod, &module_list, list)
+		if (mod->funcs->debugfs_cleanup)
+			mod->funcs->debugfs_cleanup(mod, minor);
+}
+#endif
+
+static const struct file_operations fops = {
+	.owner              = THIS_MODULE,
+	.open               = drm_open,
+	.release            = drm_release,
+	.unlocked_ioctl     = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl       = drm_compat_ioctl,
+#endif
+	.poll               = drm_poll,
+	.read               = drm_read,
+	.fasync             = drm_fasync,
+	.llseek             = no_llseek,
+	.mmap               = drm_gem_cma_mmap,
+};
+
+static struct drm_driver tilcdc_driver = {
+	.driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.load               = tilcdc_load,
+	.unload             = tilcdc_unload,
+	.preclose           = tilcdc_preclose,
+	.lastclose          = tilcdc_lastclose,
+	.irq_handler        = tilcdc_irq,
+	.irq_preinstall     = tilcdc_irq_preinstall,
+	.irq_postinstall    = tilcdc_irq_postinstall,
+	.irq_uninstall      = tilcdc_irq_uninstall,
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank      = tilcdc_enable_vblank,
+	.disable_vblank     = tilcdc_disable_vblank,
+	.gem_free_object    = drm_gem_cma_free_object,
+	.gem_vm_ops         = &drm_gem_cma_vm_ops,
+	.dumb_create        = drm_gem_cma_dumb_create,
+	.dumb_map_offset    = drm_gem_cma_dumb_map_offset,
+	.dumb_destroy       = drm_gem_cma_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init       = tilcdc_debugfs_init,
+	.debugfs_cleanup    = tilcdc_debugfs_cleanup,
+#endif
+	.fops               = &fops,
+	.name               = "tilcdc",
+	.desc               = "TI LCD Controller DRM",
+	.date               = "20121205",
+	.major              = 1,
+	.minor              = 0,
+};
+
+/*
+ * Power management:
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int tilcdc_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct tilcdc_drm_private *priv = ddev->dev_private;
+	unsigned i, n = 0;
+
+	drm_kms_helper_poll_disable(ddev);
+
+	/* Save register state: */
+	for (i = 0; i < ARRAY_SIZE(registers); i++)
+		if (registers[i].save && (priv->rev >= registers[i].rev))
+			priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
+
+	return 0;
+}
+
+static int tilcdc_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct tilcdc_drm_private *priv = ddev->dev_private;
+	unsigned i, n = 0;
+
+	/* Restore register state: */
+	for (i = 0; i < ARRAY_SIZE(registers); i++)
+		if (registers[i].save && (priv->rev >= registers[i].rev))
+			tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
+
+	drm_kms_helper_poll_enable(ddev);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops tilcdc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int tilcdc_pdev_probe(struct platform_device *pdev)
+{
+	/* bail out early if no DT data: */
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "device-tree data is missing\n");
+		return -ENXIO;
+	}
+
+	return drm_platform_init(&tilcdc_driver, pdev);
+}
+
+static int tilcdc_pdev_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&tilcdc_driver, pdev);
+
+	return 0;
+}
+
+static struct of_device_id tilcdc_of_match[] = {
+		{ .compatible = "ti,am33xx-tilcdc", },
+		{ },
+};
+MODULE_DEVICE_TABLE(of, tilcdc_of_match);
+
+static struct platform_driver tilcdc_platform_driver = {
+	.probe      = tilcdc_pdev_probe,
+	.remove     = tilcdc_pdev_remove,
+	.driver     = {
+		.owner  = THIS_MODULE,
+		.name   = "tilcdc",
+		.pm     = &tilcdc_pm_ops,
+		.of_match_table = tilcdc_of_match,
+	},
+};
+
+static int __init tilcdc_drm_init(void)
+{
+	DBG("init");
+	tilcdc_tfp410_init();
+	tilcdc_slave_init();
+	tilcdc_panel_init();
+	return platform_driver_register(&tilcdc_platform_driver);
+}
+
+static void __exit tilcdc_drm_fini(void)
+{
+	DBG("fini");
+	tilcdc_tfp410_fini();
+	tilcdc_slave_fini();
+	tilcdc_panel_fini();
+	platform_driver_unregister(&tilcdc_platform_driver);
+}
+
+late_initcall(tilcdc_drm_init);
+module_exit(tilcdc_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.h
new file mode 100644
index 0000000..8242b5a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_DRV_H__
+#define __TILCDC_DRV_H__
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/list.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+struct tilcdc_drm_private {
+	void __iomem *mmio;
+
+	struct clk *disp_clk;    /* display dpll */
+	struct clk *clk;         /* functional clock */
+	int rev;                 /* IP revision */
+
+	/* don't attempt resolutions w/ higher W * H * Hz: */
+	uint32_t max_bandwidth;
+
+	/* register contents saved across suspend/resume: */
+	u32 saved_register[12];
+
+#ifdef CONFIG_CPU_FREQ
+	struct notifier_block freq_transition;
+	unsigned int lcd_fck_rate;
+#endif
+
+	struct workqueue_struct *wq;
+
+	struct drm_fbdev_cma *fbdev;
+
+	struct drm_crtc *crtc;
+
+	unsigned int num_encoders;
+	struct drm_encoder *encoders[8];
+
+	unsigned int num_connectors;
+	struct drm_connector *connectors[8];
+};
+
+/* Sub-module for display.  Since we don't know at compile time what panels
+ * or display adapter(s) might be present (for ex, off chip dvi/tfp410,
+ * hdmi encoder, various lcd panels), the connector/encoder(s) are split into
+ * separate drivers.  If they are probed and found to be present, they
+ * register themselves with tilcdc_register_module().
+ */
+struct tilcdc_module;
+
+struct tilcdc_module_ops {
+	/* create appropriate encoders/connectors: */
+	int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
+	void (*destroy)(struct tilcdc_module *mod);
+#ifdef CONFIG_DEBUG_FS
+	/* create debugfs nodes (can be NULL): */
+	int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
+	/* cleanup debugfs nodes (can be NULL): */
+	void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
+#endif
+};
+
+struct tilcdc_module {
+	const char *name;
+	struct list_head list;
+	const struct tilcdc_module_ops *funcs;
+};
+
+void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
+		const struct tilcdc_module_ops *funcs);
+void tilcdc_module_cleanup(struct tilcdc_module *mod);
+
+
+/* Panel config that needs to be set in the crtc, but is not coming from
+ * the mode timings.  The display module is expected to call
+ * tilcdc_crtc_set_panel_info() to set this during modeset.
+ */
+struct tilcdc_panel_info {
+
+	/* AC Bias Pin Frequency */
+	uint32_t ac_bias;
+
+	/* AC Bias Pin Transitions per Interrupt */
+	uint32_t ac_bias_intrpt;
+
+	/* DMA burst size */
+	uint32_t dma_burst_sz;
+
+	/* Bits per pixel */
+	uint32_t bpp;
+
+	/* FIFO DMA Request Delay */
+	uint32_t fdd;
+
+	/* TFT Alternative Signal Mapping (Only for active) */
+	bool tft_alt_mode;
+
+	/* Invert pixel clock */
+	bool invert_pxl_clk;
+
+	/* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
+	uint32_t sync_edge;
+
+	/* Horizontal and Vertical Sync: Control: 0=ignore */
+	uint32_t sync_ctrl;
+
+	/* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
+	uint32_t raster_order;
+
+	/* DMA FIFO threshold */
+	uint32_t fifo_th;
+};
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
+void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
+irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
+void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
+void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
+		const struct tilcdc_panel_info *info);
+int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
+int tilcdc_crtc_max_width(struct drm_crtc *crtc);
+
+#endif /* __TILCDC_DRV_H__ */
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.c
new file mode 100644
index 0000000..0917665
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/backlight.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include "tilcdc_drv.h"
+
+struct panel_module {
+	struct tilcdc_module base;
+	struct tilcdc_panel_info *info;
+	struct display_timings *timings;
+	struct backlight_device *backlight;
+};
+#define to_panel_module(x) container_of(x, struct panel_module, base)
+
+
+/*
+ * Encoder:
+ */
+
+struct panel_encoder {
+	struct drm_encoder base;
+	struct panel_module *mod;
+};
+#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
+
+
+static void panel_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(panel_encoder);
+}
+
+static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+	struct backlight_device *backlight = panel_encoder->mod->backlight;
+
+	if (!backlight)
+		return;
+
+	backlight->props.power = mode == DRM_MODE_DPMS_ON
+				     ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+	backlight_update_status(backlight);
+}
+
+static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	/* nothing needed */
+	return true;
+}
+
+static void panel_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
+	panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+	tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
+}
+
+static void panel_encoder_commit(struct drm_encoder *encoder)
+{
+	panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void panel_encoder_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	/* nothing needed */
+}
+
+static const struct drm_encoder_funcs panel_encoder_funcs = {
+		.destroy        = panel_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
+		.dpms           = panel_encoder_dpms,
+		.mode_fixup     = panel_encoder_mode_fixup,
+		.prepare        = panel_encoder_prepare,
+		.commit         = panel_encoder_commit,
+		.mode_set       = panel_encoder_mode_set,
+};
+
+static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
+		struct panel_module *mod)
+{
+	struct panel_encoder *panel_encoder;
+	struct drm_encoder *encoder;
+	int ret;
+
+	panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
+	if (!panel_encoder) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	panel_encoder->mod = mod;
+
+	encoder = &panel_encoder->base;
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
+			DRM_MODE_ENCODER_LVDS);
+	if (ret < 0)
+		goto fail;
+
+	drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
+
+	return encoder;
+
+fail:
+	panel_encoder_destroy(encoder);
+	return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct panel_connector {
+	struct drm_connector base;
+
+	struct drm_encoder *encoder;  /* our connected encoder */
+	struct panel_module *mod;
+};
+#define to_panel_connector(x) container_of(x, struct panel_connector, base)
+
+
+static void panel_connector_destroy(struct drm_connector *connector)
+{
+	struct panel_connector *panel_connector = to_panel_connector(connector);
+	drm_connector_cleanup(connector);
+	kfree(panel_connector);
+}
+
+static enum drm_connector_status panel_connector_detect(
+		struct drm_connector *connector,
+		bool force)
+{
+	return connector_status_connected;
+}
+
+static int panel_connector_get_modes(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct panel_connector *panel_connector = to_panel_connector(connector);
+	struct display_timings *timings = panel_connector->mod->timings;
+	int i;
+
+	for (i = 0; i < timings->num_timings; i++) {
+		struct drm_display_mode *mode = drm_mode_create(dev);
+		struct videomode vm;
+
+		if (videomode_from_timings(timings, &vm, i))
+			break;
+
+		drm_display_mode_from_videomode(&vm, mode);
+
+		mode->type = DRM_MODE_TYPE_DRIVER;
+
+		if (timings->native_mode == i)
+			mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+	}
+
+	return i;
+}
+
+static int panel_connector_mode_valid(struct drm_connector *connector,
+		  struct drm_display_mode *mode)
+{
+	struct tilcdc_drm_private *priv = connector->dev->dev_private;
+	/* our only constraints are what the crtc can generate: */
+	return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *panel_connector_best_encoder(
+		struct drm_connector *connector)
+{
+	struct panel_connector *panel_connector = to_panel_connector(connector);
+	return panel_connector->encoder;
+}
+
+static const struct drm_connector_funcs panel_connector_funcs = {
+	.destroy            = panel_connector_destroy,
+	.dpms               = drm_helper_connector_dpms,
+	.detect             = panel_connector_detect,
+	.fill_modes         = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
+	.get_modes          = panel_connector_get_modes,
+	.mode_valid         = panel_connector_mode_valid,
+	.best_encoder       = panel_connector_best_encoder,
+};
+
+static struct drm_connector *panel_connector_create(struct drm_device *dev,
+		struct panel_module *mod, struct drm_encoder *encoder)
+{
+	struct panel_connector *panel_connector;
+	struct drm_connector *connector;
+	int ret;
+
+	panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
+	if (!panel_connector) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	panel_connector->encoder = encoder;
+	panel_connector->mod = mod;
+
+	connector = &panel_connector->base;
+
+	drm_connector_init(dev, connector, &panel_connector_funcs,
+			DRM_MODE_CONNECTOR_LVDS);
+	drm_connector_helper_add(connector, &panel_connector_helper_funcs);
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret)
+		goto fail;
+
+	drm_sysfs_connector_add(connector);
+
+	return connector;
+
+fail:
+	panel_connector_destroy(connector);
+	return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+	struct panel_module *panel_mod = to_panel_module(mod);
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	encoder = panel_encoder_create(dev, panel_mod);
+	if (!encoder)
+		return -ENOMEM;
+
+	connector = panel_connector_create(dev, panel_mod, encoder);
+	if (!connector)
+		return -ENOMEM;
+
+	priv->encoders[priv->num_encoders++] = encoder;
+	priv->connectors[priv->num_connectors++] = connector;
+
+	return 0;
+}
+
+static void panel_destroy(struct tilcdc_module *mod)
+{
+	struct panel_module *panel_mod = to_panel_module(mod);
+
+	if (panel_mod->timings) {
+		display_timings_release(panel_mod->timings);
+		kfree(panel_mod->timings);
+	}
+
+	tilcdc_module_cleanup(mod);
+	kfree(panel_mod->info);
+	kfree(panel_mod);
+}
+
+static const struct tilcdc_module_ops panel_module_ops = {
+		.modeset_init = panel_modeset_init,
+		.destroy = panel_destroy,
+};
+
+/*
+ * Device:
+ */
+
+/* maybe move this somewhere common if it is needed by other outputs? */
+static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
+{
+	struct device_node *info_np;
+	struct tilcdc_panel_info *info;
+	int ret = 0;
+
+	if (!np) {
+		pr_err("%s: no devicenode given\n", __func__);
+		return NULL;
+	}
+
+	info_np = of_get_child_by_name(np, "panel-info");
+	if (!info_np) {
+		pr_err("%s: could not find panel-info node\n", __func__);
+		return NULL;
+	}
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info) {
+		pr_err("%s: allocation failed\n", __func__);
+		return NULL;
+	}
+
+	ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
+	ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
+	ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
+	ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
+	ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
+	ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
+	ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
+	ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
+	ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
+
+	/* optional: */
+	info->tft_alt_mode      = of_property_read_bool(info_np, "tft-alt-mode");
+	info->invert_pxl_clk    = of_property_read_bool(info_np, "invert-pxl-clk");
+
+	if (ret) {
+		pr_err("%s: error reading panel-info properties\n", __func__);
+		kfree(info);
+		return NULL;
+	}
+
+	return info;
+}
+
+static struct of_device_id panel_of_match[];
+
+static int panel_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct panel_module *panel_mod;
+	struct tilcdc_module *mod;
+	struct pinctrl *pinctrl;
+	int ret = -EINVAL;
+
+
+	/* bail out early if no DT data: */
+	if (!node) {
+		dev_err(&pdev->dev, "device-tree data is missing\n");
+		return -ENXIO;
+	}
+
+	panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
+	if (!panel_mod)
+		return -ENOMEM;
+
+	mod = &panel_mod->base;
+
+	tilcdc_module_init(mod, "panel", &panel_module_ops);
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl))
+		dev_warn(&pdev->dev, "pins are not configured\n");
+
+
+	panel_mod->timings = of_get_display_timings(node);
+	if (!panel_mod->timings) {
+		dev_err(&pdev->dev, "could not get panel timings\n");
+		goto fail;
+	}
+
+	panel_mod->info = of_get_panel_info(node);
+	if (!panel_mod->info) {
+		dev_err(&pdev->dev, "could not get panel info\n");
+		goto fail;
+	}
+
+	panel_mod->backlight = of_find_backlight_by_node(node);
+	if (panel_mod->backlight)
+		dev_info(&pdev->dev, "found backlight\n");
+
+	return 0;
+
+fail:
+	panel_destroy(mod);
+	return ret;
+}
+
+static int panel_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct of_device_id panel_of_match[] = {
+		{ .compatible = "ti,tilcdc,panel", },
+		{ },
+};
+
+struct platform_driver panel_driver = {
+	.probe = panel_probe,
+	.remove = panel_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "panel",
+		.of_match_table = panel_of_match,
+	},
+};
+
+int __init tilcdc_panel_init(void)
+{
+	return platform_driver_register(&panel_driver);
+}
+
+void __exit tilcdc_panel_fini(void)
+{
+	platform_driver_unregister(&panel_driver);
+}
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.h b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.h
new file mode 100644
index 0000000..7db40aa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_panel.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_PANEL_H__
+#define __TILCDC_PANEL_H__
+
+/* sub-module for generic lcd panel output */
+
+int tilcdc_panel_init(void);
+void tilcdc_panel_fini(void);
+
+#endif /* __TILCDC_PANEL_H__ */
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_regs.h
new file mode 100644
index 0000000..17fd1b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_REGS_H__
+#define __TILCDC_REGS_H__
+
+/* LCDC register definitions, based on da8xx-fb */
+
+#include <linux/bitops.h>
+
+#include "tilcdc_drv.h"
+
+/* LCDC Status Register */
+#define LCDC_END_OF_FRAME1                       BIT(9)
+#define LCDC_END_OF_FRAME0                       BIT(8)
+#define LCDC_PL_LOAD_DONE                        BIT(6)
+#define LCDC_FIFO_UNDERFLOW                      BIT(5)
+#define LCDC_SYNC_LOST                           BIT(2)
+#define LCDC_FRAME_DONE                          BIT(0)
+
+/* LCDC DMA Control Register */
+#define LCDC_DMA_BURST_SIZE(x)                   ((x) << 4)
+#define LCDC_DMA_BURST_1                         0x0
+#define LCDC_DMA_BURST_2                         0x1
+#define LCDC_DMA_BURST_4                         0x2
+#define LCDC_DMA_BURST_8                         0x3
+#define LCDC_DMA_BURST_16                        0x4
+#define LCDC_V1_END_OF_FRAME_INT_ENA             BIT(2)
+#define LCDC_V2_END_OF_FRAME0_INT_ENA            BIT(8)
+#define LCDC_V2_END_OF_FRAME1_INT_ENA            BIT(9)
+#define LCDC_DUAL_FRAME_BUFFER_ENABLE            BIT(0)
+
+/* LCDC Control Register */
+#define LCDC_CLK_DIVISOR(x)                      ((x) << 8)
+#define LCDC_RASTER_MODE                         0x01
+
+/* LCDC Raster Control Register */
+#define LCDC_PALETTE_LOAD_MODE(x)                ((x) << 20)
+#define PALETTE_AND_DATA                         0x00
+#define PALETTE_ONLY                             0x01
+#define DATA_ONLY                                0x02
+
+#define LCDC_MONO_8BIT_MODE                      BIT(9)
+#define LCDC_RASTER_ORDER                        BIT(8)
+#define LCDC_TFT_MODE                            BIT(7)
+#define LCDC_V1_UNDERFLOW_INT_ENA                BIT(6)
+#define LCDC_V2_UNDERFLOW_INT_ENA                BIT(5)
+#define LCDC_V1_PL_INT_ENA                       BIT(4)
+#define LCDC_V2_PL_INT_ENA                       BIT(6)
+#define LCDC_MONOCHROME_MODE                     BIT(1)
+#define LCDC_RASTER_ENABLE                       BIT(0)
+#define LCDC_TFT_ALT_ENABLE                      BIT(23)
+#define LCDC_STN_565_ENABLE                      BIT(24)
+#define LCDC_V2_DMA_CLK_EN                       BIT(2)
+#define LCDC_V2_LIDD_CLK_EN                      BIT(1)
+#define LCDC_V2_CORE_CLK_EN                      BIT(0)
+#define LCDC_V2_LPP_B10                          26
+#define LCDC_V2_TFT_24BPP_MODE                   BIT(25)
+#define LCDC_V2_TFT_24BPP_UNPACK                 BIT(26)
+
+/* LCDC Raster Timing 2 Register */
+#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x)      ((x) << 16)
+#define LCDC_AC_BIAS_FREQUENCY(x)                ((x) << 8)
+#define LCDC_SYNC_CTRL                           BIT(25)
+#define LCDC_SYNC_EDGE                           BIT(24)
+#define LCDC_INVERT_PIXEL_CLOCK                  BIT(22)
+#define LCDC_INVERT_HSYNC                        BIT(21)
+#define LCDC_INVERT_VSYNC                        BIT(20)
+
+/* LCDC Block */
+#define LCDC_PID_REG                             0x0
+#define LCDC_CTRL_REG                            0x4
+#define LCDC_STAT_REG                            0x8
+#define LCDC_RASTER_CTRL_REG                     0x28
+#define LCDC_RASTER_TIMING_0_REG                 0x2c
+#define LCDC_RASTER_TIMING_1_REG                 0x30
+#define LCDC_RASTER_TIMING_2_REG                 0x34
+#define LCDC_DMA_CTRL_REG                        0x40
+#define LCDC_DMA_FB_BASE_ADDR_0_REG              0x44
+#define LCDC_DMA_FB_CEILING_ADDR_0_REG           0x48
+#define LCDC_DMA_FB_BASE_ADDR_1_REG              0x4c
+#define LCDC_DMA_FB_CEILING_ADDR_1_REG           0x50
+
+/* Interrupt Registers available only in Version 2 */
+#define LCDC_RAW_STAT_REG                        0x58
+#define LCDC_MASKED_STAT_REG                     0x5c
+#define LCDC_INT_ENABLE_SET_REG                  0x60
+#define LCDC_INT_ENABLE_CLR_REG                  0x64
+#define LCDC_END_OF_INT_IND_REG                  0x68
+
+/* Clock registers available only on Version 2 */
+#define LCDC_CLK_ENABLE_REG                      0x6c
+#define LCDC_CLK_RESET_REG                       0x70
+#define LCDC_CLK_MAIN_RESET                      BIT(3)
+
+
+/*
+ * Helpers:
+ */
+
+static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	iowrite32(data, priv->mmio + reg);
+}
+
+static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	return ioread32(priv->mmio + reg);
+}
+
+static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
+{
+	tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
+}
+
+static inline void tilcdc_clear(struct drm_device *dev, u32 reg, u32 mask)
+{
+	tilcdc_write(dev, reg, tilcdc_read(dev, reg) & ~mask);
+}
+
+/* the register to read/clear irqstatus differs between v1 and v2 of the IP */
+static inline u32 tilcdc_irqstatus_reg(struct drm_device *dev)
+{
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	return (priv->rev == 2) ? LCDC_MASKED_STAT_REG : LCDC_STAT_REG;
+}
+
+static inline u32 tilcdc_read_irqstatus(struct drm_device *dev)
+{
+	return tilcdc_read(dev, tilcdc_irqstatus_reg(dev));
+}
+
+static inline void tilcdc_clear_irqstatus(struct drm_device *dev, u32 mask)
+{
+	tilcdc_write(dev, tilcdc_irqstatus_reg(dev), mask);
+}
+
+#endif /* __TILCDC_REGS_H__ */
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.c
new file mode 100644
index 0000000..db1d2fc
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+#include <drm/drm_encoder_slave.h>
+
+#include "tilcdc_drv.h"
+
+struct slave_module {
+	struct tilcdc_module base;
+	struct i2c_adapter *i2c;
+};
+#define to_slave_module(x) container_of(x, struct slave_module, base)
+
+static const struct tilcdc_panel_info slave_info = {
+		.bpp                    = 16,
+		.ac_bias                = 255,
+		.ac_bias_intrpt         = 0,
+		.dma_burst_sz           = 16,
+		.fdd                    = 0x80,
+		.tft_alt_mode           = 0,
+		.sync_edge              = 0,
+		.sync_ctrl              = 1,
+		.raster_order           = 0,
+};
+
+
+/*
+ * Encoder:
+ */
+
+struct slave_encoder {
+	struct drm_encoder_slave base;
+	struct slave_module *mod;
+};
+#define to_slave_encoder(x) container_of(to_encoder_slave(x), struct slave_encoder, base)
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+	return to_encoder_slave(enc)->slave_funcs;
+}
+
+static void slave_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct slave_encoder *slave_encoder = to_slave_encoder(encoder);
+	if (get_slave_funcs(encoder))
+		get_slave_funcs(encoder)->destroy(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(slave_encoder);
+}
+
+static void slave_encoder_prepare(struct drm_encoder *encoder)
+{
+	drm_i2c_encoder_prepare(encoder);
+	tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
+}
+
+static const struct drm_encoder_funcs slave_encoder_funcs = {
+		.destroy        = slave_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
+		.dpms           = drm_i2c_encoder_dpms,
+		.mode_fixup     = drm_i2c_encoder_mode_fixup,
+		.prepare        = slave_encoder_prepare,
+		.commit         = drm_i2c_encoder_commit,
+		.mode_set       = drm_i2c_encoder_mode_set,
+		.save           = drm_i2c_encoder_save,
+		.restore        = drm_i2c_encoder_restore,
+};
+
+static const struct i2c_board_info info = {
+		I2C_BOARD_INFO("tda998x", 0x70)
+};
+
+static struct drm_encoder *slave_encoder_create(struct drm_device *dev,
+		struct slave_module *mod)
+{
+	struct slave_encoder *slave_encoder;
+	struct drm_encoder *encoder;
+	int ret;
+
+	slave_encoder = kzalloc(sizeof(*slave_encoder), GFP_KERNEL);
+	if (!slave_encoder) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	slave_encoder->mod = mod;
+
+	encoder = &slave_encoder->base.base;
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(dev, encoder, &slave_encoder_funcs,
+			DRM_MODE_ENCODER_TMDS);
+	if (ret)
+		goto fail;
+
+	drm_encoder_helper_add(encoder, &slave_encoder_helper_funcs);
+
+	ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), mod->i2c, &info);
+	if (ret)
+		goto fail;
+
+	return encoder;
+
+fail:
+	slave_encoder_destroy(encoder);
+	return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct slave_connector {
+	struct drm_connector base;
+
+	struct drm_encoder *encoder;  /* our connected encoder */
+	struct slave_module *mod;
+};
+#define to_slave_connector(x) container_of(x, struct slave_connector, base)
+
+static void slave_connector_destroy(struct drm_connector *connector)
+{
+	struct slave_connector *slave_connector = to_slave_connector(connector);
+	drm_connector_cleanup(connector);
+	kfree(slave_connector);
+}
+
+static enum drm_connector_status slave_connector_detect(
+		struct drm_connector *connector,
+		bool force)
+{
+	struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+	return get_slave_funcs(encoder)->detect(encoder, connector);
+}
+
+static int slave_connector_get_modes(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+	return get_slave_funcs(encoder)->get_modes(encoder, connector);
+}
+
+static int slave_connector_mode_valid(struct drm_connector *connector,
+		  struct drm_display_mode *mode)
+{
+	struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+	struct tilcdc_drm_private *priv = connector->dev->dev_private;
+	int ret;
+
+	ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
+	if (ret != MODE_OK)
+		return ret;
+
+	return get_slave_funcs(encoder)->mode_valid(encoder, mode);
+}
+
+static struct drm_encoder *slave_connector_best_encoder(
+		struct drm_connector *connector)
+{
+	struct slave_connector *slave_connector = to_slave_connector(connector);
+	return slave_connector->encoder;
+}
+
+static int slave_connector_set_property(struct drm_connector *connector,
+		struct drm_property *property, uint64_t value)
+{
+	struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
+	return get_slave_funcs(encoder)->set_property(encoder,
+			connector, property, value);
+}
+
+static const struct drm_connector_funcs slave_connector_funcs = {
+	.destroy            = slave_connector_destroy,
+	.dpms               = drm_helper_connector_dpms,
+	.detect             = slave_connector_detect,
+	.fill_modes         = drm_helper_probe_single_connector_modes,
+	.set_property       = slave_connector_set_property,
+};
+
+static const struct drm_connector_helper_funcs slave_connector_helper_funcs = {
+	.get_modes          = slave_connector_get_modes,
+	.mode_valid         = slave_connector_mode_valid,
+	.best_encoder       = slave_connector_best_encoder,
+};
+
+static struct drm_connector *slave_connector_create(struct drm_device *dev,
+		struct slave_module *mod, struct drm_encoder *encoder)
+{
+	struct slave_connector *slave_connector;
+	struct drm_connector *connector;
+	int ret;
+
+	slave_connector = kzalloc(sizeof(*slave_connector), GFP_KERNEL);
+	if (!slave_connector) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	slave_connector->encoder = encoder;
+	slave_connector->mod = mod;
+
+	connector = &slave_connector->base;
+
+	drm_connector_init(dev, connector, &slave_connector_funcs,
+			DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_helper_add(connector, &slave_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+			DRM_CONNECTOR_POLL_DISCONNECT;
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	get_slave_funcs(encoder)->create_resources(encoder, connector);
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret)
+		goto fail;
+
+	drm_sysfs_connector_add(connector);
+
+	return connector;
+
+fail:
+	slave_connector_destroy(connector);
+	return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+	struct slave_module *slave_mod = to_slave_module(mod);
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	encoder = slave_encoder_create(dev, slave_mod);
+	if (!encoder)
+		return -ENOMEM;
+
+	connector = slave_connector_create(dev, slave_mod, encoder);
+	if (!connector)
+		return -ENOMEM;
+
+	priv->encoders[priv->num_encoders++] = encoder;
+	priv->connectors[priv->num_connectors++] = connector;
+
+	return 0;
+}
+
+static void slave_destroy(struct tilcdc_module *mod)
+{
+	struct slave_module *slave_mod = to_slave_module(mod);
+
+	tilcdc_module_cleanup(mod);
+	kfree(slave_mod);
+}
+
+static const struct tilcdc_module_ops slave_module_ops = {
+		.modeset_init = slave_modeset_init,
+		.destroy = slave_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id slave_of_match[];
+
+static int slave_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *i2c_node;
+	struct slave_module *slave_mod;
+	struct tilcdc_module *mod;
+	struct pinctrl *pinctrl;
+	uint32_t i2c_phandle;
+	int ret = -EINVAL;
+
+	/* bail out early if no DT data: */
+	if (!node) {
+		dev_err(&pdev->dev, "device-tree data is missing\n");
+		return -ENXIO;
+	}
+
+	slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
+	if (!slave_mod)
+		return -ENOMEM;
+
+	mod = &slave_mod->base;
+
+	tilcdc_module_init(mod, "slave", &slave_module_ops);
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl))
+		dev_warn(&pdev->dev, "pins are not configured\n");
+
+	if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+		dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+		goto fail;
+	}
+
+	i2c_node = of_find_node_by_phandle(i2c_phandle);
+	if (!i2c_node) {
+		dev_err(&pdev->dev, "could not get i2c bus node\n");
+		goto fail;
+	}
+
+	slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+	if (!slave_mod->i2c) {
+		dev_err(&pdev->dev, "could not get i2c\n");
+		goto fail;
+	}
+
+	of_node_put(i2c_node);
+
+	return 0;
+
+fail:
+	slave_destroy(mod);
+	return ret;
+}
+
+static int slave_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct of_device_id slave_of_match[] = {
+		{ .compatible = "ti,tilcdc,slave", },
+		{ },
+};
+
+struct platform_driver slave_driver = {
+	.probe = slave_probe,
+	.remove = slave_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "slave",
+		.of_match_table = slave_of_match,
+	},
+};
+
+int __init tilcdc_slave_init(void)
+{
+	return platform_driver_register(&slave_driver);
+}
+
+void __exit tilcdc_slave_fini(void)
+{
+	platform_driver_unregister(&slave_driver);
+}
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.h b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.h
new file mode 100644
index 0000000..2f85048
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_slave.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_SLAVE_H__
+#define __TILCDC_SLAVE_H__
+
+/* sub-module for i2c slave encoder output */
+
+int tilcdc_slave_init(void);
+void tilcdc_slave_fini(void);
+
+#endif /* __TILCDC_SLAVE_H__ */
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
new file mode 100644
index 0000000..a36788f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "tilcdc_drv.h"
+
+struct tfp410_module {
+	struct tilcdc_module base;
+	struct i2c_adapter *i2c;
+	int gpio;
+};
+#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
+
+
+static const struct tilcdc_panel_info dvi_info = {
+		.ac_bias                = 255,
+		.ac_bias_intrpt         = 0,
+		.dma_burst_sz           = 16,
+		.bpp                    = 16,
+		.fdd                    = 0x80,
+		.tft_alt_mode           = 0,
+		.sync_edge              = 0,
+		.sync_ctrl              = 1,
+		.raster_order           = 0,
+};
+
+/*
+ * Encoder:
+ */
+
+struct tfp410_encoder {
+	struct drm_encoder base;
+	struct tfp410_module *mod;
+	int dpms;
+};
+#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
+
+
+static void tfp410_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(tfp410_encoder);
+}
+
+static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
+
+	if (tfp410_encoder->dpms == mode)
+		return;
+
+	if (mode == DRM_MODE_DPMS_ON) {
+		DBG("Power on");
+		gpio_direction_output(tfp410_encoder->mod->gpio, 1);
+	} else {
+		DBG("Power off");
+		gpio_direction_output(tfp410_encoder->mod->gpio, 0);
+	}
+
+	tfp410_encoder->dpms = mode;
+}
+
+static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	/* nothing needed */
+	return true;
+}
+
+static void tfp410_encoder_prepare(struct drm_encoder *encoder)
+{
+	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+	tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
+}
+
+static void tfp410_encoder_commit(struct drm_encoder *encoder)
+{
+	tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	/* nothing needed */
+}
+
+static const struct drm_encoder_funcs tfp410_encoder_funcs = {
+		.destroy        = tfp410_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
+		.dpms           = tfp410_encoder_dpms,
+		.mode_fixup     = tfp410_encoder_mode_fixup,
+		.prepare        = tfp410_encoder_prepare,
+		.commit         = tfp410_encoder_commit,
+		.mode_set       = tfp410_encoder_mode_set,
+};
+
+static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
+		struct tfp410_module *mod)
+{
+	struct tfp410_encoder *tfp410_encoder;
+	struct drm_encoder *encoder;
+	int ret;
+
+	tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
+	if (!tfp410_encoder) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
+	tfp410_encoder->mod = mod;
+
+	encoder = &tfp410_encoder->base;
+	encoder->possible_crtcs = 1;
+
+	ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
+			DRM_MODE_ENCODER_TMDS);
+	if (ret < 0)
+		goto fail;
+
+	drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
+
+	return encoder;
+
+fail:
+	tfp410_encoder_destroy(encoder);
+	return NULL;
+}
+
+/*
+ * Connector:
+ */
+
+struct tfp410_connector {
+	struct drm_connector base;
+
+	struct drm_encoder *encoder;  /* our connected encoder */
+	struct tfp410_module *mod;
+};
+#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
+
+
+static void tfp410_connector_destroy(struct drm_connector *connector)
+{
+	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+	drm_connector_cleanup(connector);
+	kfree(tfp410_connector);
+}
+
+static enum drm_connector_status tfp410_connector_detect(
+		struct drm_connector *connector,
+		bool force)
+{
+	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+
+	if (drm_probe_ddc(tfp410_connector->mod->i2c))
+		return connector_status_connected;
+
+	return connector_status_unknown;
+}
+
+static int tfp410_connector_get_modes(struct drm_connector *connector)
+{
+	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
+
+	drm_mode_connector_update_edid_property(connector, edid);
+
+	if (edid) {
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return ret;
+}
+
+static int tfp410_connector_mode_valid(struct drm_connector *connector,
+		  struct drm_display_mode *mode)
+{
+	struct tilcdc_drm_private *priv = connector->dev->dev_private;
+	/* our only constraints are what the crtc can generate: */
+	return tilcdc_crtc_mode_valid(priv->crtc, mode);
+}
+
+static struct drm_encoder *tfp410_connector_best_encoder(
+		struct drm_connector *connector)
+{
+	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+	return tfp410_connector->encoder;
+}
+
+static const struct drm_connector_funcs tfp410_connector_funcs = {
+	.destroy            = tfp410_connector_destroy,
+	.dpms               = drm_helper_connector_dpms,
+	.detect             = tfp410_connector_detect,
+	.fill_modes         = drm_helper_probe_single_connector_modes,
+};
+
+static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
+	.get_modes          = tfp410_connector_get_modes,
+	.mode_valid         = tfp410_connector_mode_valid,
+	.best_encoder       = tfp410_connector_best_encoder,
+};
+
+static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
+		struct tfp410_module *mod, struct drm_encoder *encoder)
+{
+	struct tfp410_connector *tfp410_connector;
+	struct drm_connector *connector;
+	int ret;
+
+	tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
+	if (!tfp410_connector) {
+		dev_err(dev->dev, "allocation failed\n");
+		return NULL;
+	}
+
+	tfp410_connector->encoder = encoder;
+	tfp410_connector->mod = mod;
+
+	connector = &tfp410_connector->base;
+
+	drm_connector_init(dev, connector, &tfp410_connector_funcs,
+			DRM_MODE_CONNECTOR_DVID);
+	drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+			DRM_CONNECTOR_POLL_DISCONNECT;
+
+	connector->interlace_allowed = 0;
+	connector->doublescan_allowed = 0;
+
+	ret = drm_mode_connector_attach_encoder(connector, encoder);
+	if (ret)
+		goto fail;
+
+	drm_sysfs_connector_add(connector);
+
+	return connector;
+
+fail:
+	tfp410_connector_destroy(connector);
+	return NULL;
+}
+
+/*
+ * Module:
+ */
+
+static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
+{
+	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+	struct tilcdc_drm_private *priv = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	encoder = tfp410_encoder_create(dev, tfp410_mod);
+	if (!encoder)
+		return -ENOMEM;
+
+	connector = tfp410_connector_create(dev, tfp410_mod, encoder);
+	if (!connector)
+		return -ENOMEM;
+
+	priv->encoders[priv->num_encoders++] = encoder;
+	priv->connectors[priv->num_connectors++] = connector;
+
+	return 0;
+}
+
+static void tfp410_destroy(struct tilcdc_module *mod)
+{
+	struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+	if (tfp410_mod->i2c)
+		i2c_put_adapter(tfp410_mod->i2c);
+
+	if (!IS_ERR_VALUE(tfp410_mod->gpio))
+		gpio_free(tfp410_mod->gpio);
+
+	tilcdc_module_cleanup(mod);
+	kfree(tfp410_mod);
+}
+
+static const struct tilcdc_module_ops tfp410_module_ops = {
+		.modeset_init = tfp410_modeset_init,
+		.destroy = tfp410_destroy,
+};
+
+/*
+ * Device:
+ */
+
+static struct of_device_id tfp410_of_match[];
+
+static int tfp410_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *i2c_node;
+	struct tfp410_module *tfp410_mod;
+	struct tilcdc_module *mod;
+	struct pinctrl *pinctrl;
+	uint32_t i2c_phandle;
+	int ret = -EINVAL;
+
+	/* bail out early if no DT data: */
+	if (!node) {
+		dev_err(&pdev->dev, "device-tree data is missing\n");
+		return -ENXIO;
+	}
+
+	tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
+	if (!tfp410_mod)
+		return -ENOMEM;
+
+	mod = &tfp410_mod->base;
+
+	tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl))
+		dev_warn(&pdev->dev, "pins are not configured\n");
+
+	if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
+		dev_err(&pdev->dev, "could not get i2c bus phandle\n");
+		goto fail;
+	}
+
+	i2c_node = of_find_node_by_phandle(i2c_phandle);
+	if (!i2c_node) {
+		dev_err(&pdev->dev, "could not get i2c bus node\n");
+		goto fail;
+	}
+
+	tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
+	if (!tfp410_mod->i2c) {
+		dev_err(&pdev->dev, "could not get i2c\n");
+		goto fail;
+	}
+
+	of_node_put(i2c_node);
+
+	tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
+			0, NULL);
+	if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+		dev_warn(&pdev->dev, "No power down GPIO\n");
+	} else {
+		ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
+		if (ret) {
+			dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	tfp410_destroy(mod);
+	return ret;
+}
+
+static int tfp410_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct of_device_id tfp410_of_match[] = {
+		{ .compatible = "ti,tilcdc,tfp410", },
+		{ },
+};
+
+struct platform_driver tfp410_driver = {
+	.probe = tfp410_probe,
+	.remove = tfp410_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "tfp410",
+		.of_match_table = tfp410_of_match,
+	},
+};
+
+int __init tilcdc_tfp410_init(void)
+{
+	return platform_driver_register(&tfp410_driver);
+}
+
+void __exit tilcdc_tfp410_fini(void)
+{
+	platform_driver_unregister(&tfp410_driver);
+}
diff --git a/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
new file mode 100644
index 0000000..5b800f1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __TILCDC_TFP410_H__
+#define __TILCDC_TFP410_H__
+
+/* sub-module for tfp410 dvi adaptor */
+
+int tilcdc_tfp410_init(void);
+void tilcdc_tfp410_fini(void);
+
+#endif /* __TILCDC_TFP410_H__ */
diff --git a/linux-imx/drivers/gpu/drm/ttm/Makefile b/linux-imx/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 0000000..b2b33dd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+
+ccflags-y := -Iinclude/drm
+ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+	ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
+	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+	ttm_bo_manager.o
+
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
+obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_agp_backend.c b/linux-imx/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 0000000..3302f99
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,151 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *          Keith Packard.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <drm/ttm/ttm_placement.h>
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/agp.h>
+
+struct ttm_agp_backend {
+	struct ttm_tt ttm;
+	struct agp_memory *mem;
+	struct agp_bridge_data *bridge;
+};
+
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+	struct drm_mm_node *node = bo_mem->mm_node;
+	struct agp_memory *mem;
+	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+	unsigned i;
+
+	mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
+	if (unlikely(mem == NULL))
+		return -ENOMEM;
+
+	mem->page_count = 0;
+	for (i = 0; i < ttm->num_pages; i++) {
+		struct page *page = ttm->pages[i];
+
+		if (!page)
+			page = ttm->dummy_read_page;
+
+		mem->pages[mem->page_count++] = page;
+	}
+	agp_be->mem = mem;
+
+	mem->is_flushed = 1;
+	mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+	ret = agp_bind_memory(mem, node->start);
+	if (ret)
+		pr_err("AGP Bind memory failed\n");
+
+	return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_tt *ttm)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+	if (agp_be->mem) {
+		if (agp_be->mem->is_bound)
+			return agp_unbind_memory(agp_be->mem);
+		agp_free_memory(agp_be->mem);
+		agp_be->mem = NULL;
+	}
+	return 0;
+}
+
+static void ttm_agp_destroy(struct ttm_tt *ttm)
+{
+	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+	if (agp_be->mem)
+		ttm_agp_unbind(ttm);
+	ttm_tt_fini(ttm);
+	kfree(agp_be);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+	.bind = ttm_agp_bind,
+	.unbind = ttm_agp_unbind,
+	.destroy = ttm_agp_destroy,
+};
+
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+				 struct agp_bridge_data *bridge,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct ttm_agp_backend *agp_be;
+
+	agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
+	if (!agp_be)
+		return NULL;
+
+	agp_be->mem = NULL;
+	agp_be->bridge = bridge;
+	agp_be->ttm.func = &ttm_agp_func;
+
+	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+		return NULL;
+	}
+
+	return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+	ttm_pool_unpopulate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_bo.c b/linux-imx/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 0000000..0ac0a88
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1951 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+static void ttm_bo_global_kobj_release(struct kobject *kobj);
+
+static struct attribute ttm_bo_count = {
+	.name = "bo_count",
+	.mode = S_IRUGO
+};
+
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+	int i;
+
+	for (i = 0; i <= TTM_PL_PRIV5; i++)
+		if (flags & (1 << i)) {
+			*mem_type = i;
+			return 0;
+		}
+	return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+	pr_err("    has_type: %d\n", man->has_type);
+	pr_err("    use_type: %d\n", man->use_type);
+	pr_err("    flags: 0x%08X\n", man->flags);
+	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
+	pr_err("    size: %llu\n", man->size);
+	pr_err("    available_caching: 0x%08X\n", man->available_caching);
+	pr_err("    default_caching: 0x%08X\n", man->default_caching);
+	if (mem_type != TTM_PL_SYSTEM)
+		(*man->func->debug)(man, TTM_PFX);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+					struct ttm_placement *placement)
+{
+	int i, ret, mem_type;
+
+	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
+	       bo, bo->mem.num_pages, bo->mem.size >> 10,
+	       bo->mem.size >> 20);
+	for (i = 0; i < placement->num_placement; i++) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return;
+		pr_err("  placement[%d]=0x%08X (%d)\n",
+		       i, placement->placement[i], mem_type);
+		ttm_mem_type_debug(bo->bdev, mem_type);
+	}
+}
+
+static ssize_t ttm_bo_global_show(struct kobject *kobj,
+				  struct attribute *attr,
+				  char *buffer)
+{
+	struct ttm_bo_global *glob =
+		container_of(kobj, struct ttm_bo_global, kobj);
+
+	return snprintf(buffer, PAGE_SIZE, "%lu\n",
+			(unsigned long) atomic_read(&glob->bo_count));
+}
+
+static struct attribute *ttm_bo_global_attrs[] = {
+	&ttm_bo_count,
+	NULL
+};
+
+static const struct sysfs_ops ttm_bo_global_ops = {
+	.show = &ttm_bo_global_show
+};
+
+static struct kobj_type ttm_bo_glob_kobj_type  = {
+	.release = &ttm_bo_global_kobj_release,
+	.sysfs_ops = &ttm_bo_global_ops,
+	.default_attrs = ttm_bo_global_attrs
+};
+
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+	return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct kref *list_kref)
+{
+	struct ttm_buffer_object *bo =
+	    container_of(list_kref, struct ttm_buffer_object, list_kref);
+	struct ttm_bo_device *bdev = bo->bdev;
+	size_t acc_size = bo->acc_size;
+
+	BUG_ON(atomic_read(&bo->list_kref.refcount));
+	BUG_ON(atomic_read(&bo->kref.refcount));
+	BUG_ON(atomic_read(&bo->cpu_writers));
+	BUG_ON(bo->sync_obj != NULL);
+	BUG_ON(bo->mem.mm_node != NULL);
+	BUG_ON(!list_empty(&bo->lru));
+	BUG_ON(!list_empty(&bo->ddestroy));
+
+	if (bo->ttm)
+		ttm_tt_destroy(bo->ttm);
+	atomic_dec(&bo->glob->bo_count);
+	if (bo->destroy)
+		bo->destroy(bo);
+	else {
+		kfree(bo);
+	}
+	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+}
+
+static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+				  bool interruptible)
+{
+	if (interruptible) {
+		return wait_event_interruptible(bo->event_queue,
+					       !ttm_bo_is_reserved(bo));
+	} else {
+		wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
+		return 0;
+	}
+}
+
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man;
+
+	BUG_ON(!ttm_bo_is_reserved(bo));
+
+	if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+		BUG_ON(!list_empty(&bo->lru));
+
+		man = &bdev->man[bo->mem.mem_type];
+		list_add_tail(&bo->lru, &man->lru);
+		kref_get(&bo->list_kref);
+
+		if (bo->ttm != NULL) {
+			list_add_tail(&bo->swap, &bo->glob->swap_lru);
+			kref_get(&bo->list_kref);
+		}
+	}
+}
+
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+	int put_count = 0;
+
+	if (!list_empty(&bo->swap)) {
+		list_del_init(&bo->swap);
+		++put_count;
+	}
+	if (!list_empty(&bo->lru)) {
+		list_del_init(&bo->lru);
+		++put_count;
+	}
+
+	/*
+	 * TODO: Add a driver hook to delete from
+	 * driver-specific LRU's here.
+	 */
+
+	return put_count;
+}
+
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
+			  bool interruptible,
+			  bool no_wait, bool use_sequence, uint32_t sequence)
+{
+	int ret;
+
+	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+		/**
+		 * Deadlock avoidance for multi-bo reserving.
+		 */
+		if (use_sequence && bo->seq_valid) {
+			/**
+			 * We've already reserved this one.
+			 */
+			if (unlikely(sequence == bo->val_seq))
+				return -EDEADLK;
+			/**
+			 * Already reserved by a thread that will not back
+			 * off for us. We need to back off.
+			 */
+			if (unlikely(sequence - bo->val_seq < (1 << 31)))
+				return -EAGAIN;
+		}
+
+		if (no_wait)
+			return -EBUSY;
+
+		ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (use_sequence) {
+		bool wake_up = false;
+		/**
+		 * Wake up waiters that may need to recheck for deadlock,
+		 * if we decreased the sequence number.
+		 */
+		if (unlikely((bo->val_seq - sequence < (1 << 31))
+			     || !bo->seq_valid))
+			wake_up = true;
+
+		/*
+		 * In the worst case with memory ordering these values can be
+		 * seen in the wrong order. However since we call wake_up_all
+		 * in that case, this will hopefully not pose a problem,
+		 * and the worst case would only cause someone to accidentally
+		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
+		 * val_seq. However this would only happen if seq_valid was
+		 * written before val_seq was, and just means some slightly
+		 * increased cpu usage
+		 */
+		bo->val_seq = sequence;
+		bo->seq_valid = true;
+		if (wake_up)
+			wake_up_all(&bo->event_queue);
+	} else {
+		bo->seq_valid = false;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_reserve);
+
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+	BUG();
+}
+
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+			 bool never_free)
+{
+	kref_sub(&bo->list_kref, count,
+		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+		   bool interruptible,
+		   bool no_wait, bool use_sequence, uint32_t sequence)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count = 0;
+	int ret;
+
+	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
+				   sequence);
+	if (likely(ret == 0)) {
+		spin_lock(&glob->lru_lock);
+		put_count = ttm_bo_del_from_lru(bo);
+		spin_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	}
+
+	return ret;
+}
+
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+				  bool interruptible, uint32_t sequence)
+{
+	bool wake_up = false;
+	int ret;
+
+	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+		WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+		ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+		wake_up = true;
+
+	/**
+	 * Wake up waiters that may need to recheck for deadlock,
+	 * if we decreased the sequence number.
+	 */
+	bo->val_seq = sequence;
+	bo->seq_valid = true;
+	if (wake_up)
+		wake_up_all(&bo->event_queue);
+
+	return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+			    bool interruptible, uint32_t sequence)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count, ret;
+
+	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+	if (likely(!ret)) {
+		spin_lock(&glob->lru_lock);
+		put_count = ttm_bo_del_from_lru(bo);
+		spin_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
+
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+	ttm_bo_add_to_lru(bo);
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_global *glob = bo->glob;
+
+	spin_lock(&glob->lru_lock);
+	ttm_bo_unreserve_locked(bo);
+	spin_unlock(&glob->lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unreserve);
+
+/*
+ * Call bo->mutex locked.
+ */
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	int ret = 0;
+	uint32_t page_flags = 0;
+
+	TTM_ASSERT_LOCKED(&bo->mutex);
+	bo->ttm = NULL;
+
+	if (bdev->need_dma32)
+		page_flags |= TTM_PAGE_FLAG_DMA32;
+
+	switch (bo->type) {
+	case ttm_bo_type_device:
+		if (zero_alloc)
+			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+	case ttm_bo_type_kernel:
+		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+						      page_flags, glob->dummy_read_page);
+		if (unlikely(bo->ttm == NULL))
+			ret = -ENOMEM;
+		break;
+	case ttm_bo_type_sg:
+		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+						      page_flags | TTM_PAGE_FLAG_SG,
+						      glob->dummy_read_page);
+		if (unlikely(bo->ttm == NULL)) {
+			ret = -ENOMEM;
+			break;
+		}
+		bo->ttm->sg = bo->sg;
+		break;
+	default:
+		pr_err("Illegal buffer object type\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+				  struct ttm_mem_reg *mem,
+				  bool evict, bool interruptible,
+				  bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (old_is_pci || new_is_pci ||
+	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+		ret = ttm_mem_io_lock(old_man, true);
+		if (unlikely(ret != 0))
+			goto out_err;
+		ttm_bo_unmap_virtual_locked(bo);
+		ttm_mem_io_unlock(old_man);
+	}
+
+	/*
+	 * Create and bind a ttm if required.
+	 */
+
+	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+		if (bo->ttm == NULL) {
+			bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
+			ret = ttm_bo_add_ttm(bo, zero);
+			if (ret)
+				goto out_err;
+		}
+
+		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+		if (ret)
+			goto out_err;
+
+		if (mem->mem_type != TTM_PL_SYSTEM) {
+			ret = ttm_tt_bind(bo->ttm, mem);
+			if (ret)
+				goto out_err;
+		}
+
+		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+			if (bdev->driver->move_notify)
+				bdev->driver->move_notify(bo, mem);
+			bo->mem = *mem;
+			mem->mm_node = NULL;
+			goto moved;
+		}
+	}
+
+	if (bdev->driver->move_notify)
+		bdev->driver->move_notify(bo, mem);
+
+	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+		ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+	else if (bdev->driver->move)
+		ret = bdev->driver->move(bo, evict, interruptible,
+					 no_wait_gpu, mem);
+	else
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+
+	if (ret) {
+		if (bdev->driver->move_notify) {
+			struct ttm_mem_reg tmp_mem = *mem;
+			*mem = bo->mem;
+			bo->mem = tmp_mem;
+			bdev->driver->move_notify(bo, mem);
+			bo->mem = *mem;
+			*mem = tmp_mem;
+		}
+
+		goto out_err;
+	}
+
+moved:
+	if (bo->evicted) {
+		if (bdev->driver->invalidate_caches) {
+			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+			if (ret)
+				pr_err("Can not flush read caches\n");
+		}
+		bo->evicted = false;
+	}
+
+	if (bo->mem.mm_node) {
+		bo->offset = (bo->mem.start << PAGE_SHIFT) +
+		    bdev->man[bo->mem.mem_type].gpu_offset;
+		bo->cur_placement = bo->mem.placement;
+	} else
+		bo->offset = 0;
+
+	return 0;
+
+out_err:
+	new_man = &bdev->man[bo->mem.mem_type];
+	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+		ttm_tt_unbind(bo->ttm);
+		ttm_tt_destroy(bo->ttm);
+		bo->ttm = NULL;
+	}
+
+	return ret;
+}
+
+/**
+ * Call bo::reserved.
+ * Will release GPU memory type usage on destruction.
+ * This is the place to put in driver specific hooks to release
+ * driver private resources.
+ * Will release the bo::reserved lock.
+ */
+
+static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+{
+	if (bo->bdev->driver->move_notify)
+		bo->bdev->driver->move_notify(bo, NULL);
+
+	if (bo->ttm) {
+		ttm_tt_unbind(bo->ttm);
+		ttm_tt_destroy(bo->ttm);
+		bo->ttm = NULL;
+	}
+	ttm_bo_mem_put(bo, &bo->mem);
+
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+
+	/*
+	 * Since the final reference to this bo may not be dropped by
+	 * the current task we have to put a memory barrier here to make
+	 * sure the changes done in this function are always visible.
+	 *
+	 * This function only needs protection against the final kref_put.
+	 */
+	smp_mb__before_atomic_dec();
+}
+
+static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	struct ttm_bo_driver *driver = bdev->driver;
+	void *sync_obj = NULL;
+	int put_count;
+	int ret;
+
+	spin_lock(&glob->lru_lock);
+	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+
+	spin_lock(&bdev->fence_lock);
+	(void) ttm_bo_wait(bo, false, false, true);
+	if (!ret && !bo->sync_obj) {
+		spin_unlock(&bdev->fence_lock);
+		put_count = ttm_bo_del_from_lru(bo);
+
+		spin_unlock(&glob->lru_lock);
+		ttm_bo_cleanup_memtype_use(bo);
+
+		ttm_bo_list_ref_sub(bo, put_count, true);
+
+		return;
+	}
+	if (bo->sync_obj)
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	spin_unlock(&bdev->fence_lock);
+
+	if (!ret) {
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+	}
+
+	kref_get(&bo->list_kref);
+	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+	spin_unlock(&glob->lru_lock);
+
+	if (sync_obj) {
+		driver->sync_obj_flush(sync_obj);
+		driver->sync_obj_unref(&sync_obj);
+	}
+	schedule_delayed_work(&bdev->wq,
+			      ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+
+/**
+ * function ttm_bo_cleanup_refs_and_unlock
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, do nothing.
+ *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
+ * @interruptible         Any sleeps should occur interruptibly.
+ * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
+ */
+
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+					  bool interruptible,
+					  bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct ttm_bo_global *glob = bo->glob;
+	int put_count;
+	int ret;
+
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, true);
+
+	if (ret && !no_wait_gpu) {
+		void *sync_obj;
+
+		/*
+		 * Take a reference to the fence and unreserve,
+		 * at this point the buffer should be dead, so
+		 * no new sync objects can be attached.
+		 */
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+		spin_unlock(&bdev->fence_lock);
+
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+		spin_unlock(&glob->lru_lock);
+
+		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+		driver->sync_obj_unref(&sync_obj);
+		if (ret)
+			return ret;
+
+		/*
+		 * remove sync_obj with ttm_bo_wait, the wait should be
+		 * finished, and no new wait object should have been added.
+		 */
+		spin_lock(&bdev->fence_lock);
+		ret = ttm_bo_wait(bo, false, false, true);
+		WARN_ON(ret);
+		spin_unlock(&bdev->fence_lock);
+		if (ret)
+			return ret;
+
+		spin_lock(&glob->lru_lock);
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+
+		/*
+		 * We raced, and lost, someone else holds the reservation now,
+		 * and is probably busy in ttm_bo_cleanup_memtype_use.
+		 *
+		 * Even if it's not the case, because we finished waiting any
+		 * delayed destruction would succeed, so just return success
+		 * here.
+		 */
+		if (ret) {
+			spin_unlock(&glob->lru_lock);
+			return 0;
+		}
+	} else
+		spin_unlock(&bdev->fence_lock);
+
+	if (ret || unlikely(list_empty(&bo->ddestroy))) {
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+		spin_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	list_del_init(&bo->ddestroy);
+	++put_count;
+
+	spin_unlock(&glob->lru_lock);
+	ttm_bo_cleanup_memtype_use(bo);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	return 0;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_buffer_object *entry = NULL;
+	int ret = 0;
+
+	spin_lock(&glob->lru_lock);
+	if (list_empty(&bdev->ddestroy))
+		goto out_unlock;
+
+	entry = list_first_entry(&bdev->ddestroy,
+		struct ttm_buffer_object, ddestroy);
+	kref_get(&entry->list_kref);
+
+	for (;;) {
+		struct ttm_buffer_object *nentry = NULL;
+
+		if (entry->ddestroy.next != &bdev->ddestroy) {
+			nentry = list_first_entry(&entry->ddestroy,
+				struct ttm_buffer_object, ddestroy);
+			kref_get(&nentry->list_kref);
+		}
+
+		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+		if (remove_all && ret) {
+			spin_unlock(&glob->lru_lock);
+			ret = ttm_bo_reserve_nolru(entry, false, false,
+						   false, 0);
+			spin_lock(&glob->lru_lock);
+		}
+
+		if (!ret)
+			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+							     !remove_all);
+		else
+			spin_unlock(&glob->lru_lock);
+
+		kref_put(&entry->list_kref, ttm_bo_release_list);
+		entry = nentry;
+
+		if (ret || !entry)
+			goto out;
+
+		spin_lock(&glob->lru_lock);
+		if (list_empty(&entry->ddestroy))
+			break;
+	}
+
+out_unlock:
+	spin_unlock(&glob->lru_lock);
+out:
+	if (entry)
+		kref_put(&entry->list_kref, ttm_bo_release_list);
+	return ret;
+}
+
+static void ttm_bo_delayed_workqueue(struct work_struct *work)
+{
+	struct ttm_bo_device *bdev =
+	    container_of(work, struct ttm_bo_device, wq.work);
+
+	if (ttm_bo_delayed_delete(bdev, false)) {
+		schedule_delayed_work(&bdev->wq,
+				      ((HZ / 100) < 1) ? 1 : HZ / 100);
+	}
+}
+
+static void ttm_bo_release(struct kref *kref)
+{
+	struct ttm_buffer_object *bo =
+	    container_of(kref, struct ttm_buffer_object, kref);
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	write_lock(&bdev->vm_lock);
+	if (likely(bo->vm_node != NULL)) {
+		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
+		drm_mm_put_block(bo->vm_node);
+		bo->vm_node = NULL;
+	}
+	write_unlock(&bdev->vm_lock);
+	ttm_mem_io_lock(man, false);
+	ttm_mem_io_free_vm(bo);
+	ttm_mem_io_unlock(man);
+	ttm_bo_cleanup_refs_or_queue(bo);
+	kref_put(&bo->list_kref, ttm_bo_release_list);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+	struct ttm_buffer_object *bo = *p_bo;
+
+	*p_bo = NULL;
+	kref_put(&bo->kref, ttm_bo_release);
+}
+EXPORT_SYMBOL(ttm_bo_unref);
+
+int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+{
+	return cancel_delayed_work_sync(&bdev->wq);
+}
+EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
+
+void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+{
+	if (resched)
+		schedule_delayed_work(&bdev->wq,
+				      ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+			bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_reg evict_mem;
+	struct ttm_placement placement;
+	int ret = 0;
+
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+	spin_unlock(&bdev->fence_lock);
+
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS) {
+			pr_err("Failed to expire sync object before buffer eviction\n");
+		}
+		goto out;
+	}
+
+	BUG_ON(!ttm_bo_is_reserved(bo));
+
+	evict_mem = bo->mem;
+	evict_mem.mm_node = NULL;
+	evict_mem.bus.io_reserved_vm = false;
+	evict_mem.bus.io_reserved_count = 0;
+
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 0;
+	placement.num_busy_placement = 0;
+	bdev->driver->evict_flags(bo, &placement);
+	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+				no_wait_gpu);
+	if (ret) {
+		if (ret != -ERESTARTSYS) {
+			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
+			       bo);
+			ttm_bo_mem_space_debug(bo, &placement);
+		}
+		goto out;
+	}
+
+	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+				     no_wait_gpu);
+	if (ret) {
+		if (ret != -ERESTARTSYS)
+			pr_err("Buffer eviction failed\n");
+		ttm_bo_mem_put(bo, &evict_mem);
+		goto out;
+	}
+	bo->evicted = true;
+out:
+	return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+				uint32_t mem_type,
+				bool interruptible,
+				bool no_wait_gpu)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_buffer_object *bo;
+	int ret = -EBUSY, put_count;
+
+	spin_lock(&glob->lru_lock);
+	list_for_each_entry(bo, &man->lru, lru) {
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		spin_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	kref_get(&bo->list_kref);
+
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+						     no_wait_gpu);
+		kref_put(&bo->list_kref, ttm_bo_release_list);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	spin_unlock(&glob->lru_lock);
+
+	BUG_ON(ret != 0);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
+	ttm_bo_unreserve(bo);
+
+	kref_put(&bo->list_kref, ttm_bo_release_list);
+	return ret;
+}
+
+void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
+
+	if (mem->mm_node)
+		(*man->func->put_node)(man, mem);
+}
+EXPORT_SYMBOL(ttm_bo_mem_put);
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+					uint32_t mem_type,
+					struct ttm_placement *placement,
+					struct ttm_mem_reg *mem,
+					bool interruptible,
+					bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	int ret;
+
+	do {
+		ret = (*man->func->get_node)(man, bo, placement, mem);
+		if (unlikely(ret != 0))
+			return ret;
+		if (mem->mm_node)
+			break;
+		ret = ttm_mem_evict_first(bdev, mem_type,
+					  interruptible, no_wait_gpu);
+		if (unlikely(ret != 0))
+			return ret;
+	} while (1);
+	if (mem->mm_node == NULL)
+		return -ENOMEM;
+	mem->mem_type = mem_type;
+	return 0;
+}
+
+static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+				      uint32_t cur_placement,
+				      uint32_t proposed_placement)
+{
+	uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
+	uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
+
+	/**
+	 * Keep current caching if possible.
+	 */
+
+	if ((cur_placement & caching) != 0)
+		result |= (cur_placement & caching);
+	else if ((man->default_caching & caching) != 0)
+		result |= man->default_caching;
+	else if ((TTM_PL_FLAG_CACHED & caching) != 0)
+		result |= TTM_PL_FLAG_CACHED;
+	else if ((TTM_PL_FLAG_WC & caching) != 0)
+		result |= TTM_PL_FLAG_WC;
+	else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
+		result |= TTM_PL_FLAG_UNCACHED;
+
+	return result;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+				 uint32_t mem_type,
+				 uint32_t proposed_placement,
+				 uint32_t *masked_placement)
+{
+	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+		return false;
+
+	if ((proposed_placement & man->available_caching) == 0)
+		return false;
+
+	cur_flags |= (proposed_placement & man->available_caching);
+
+	*masked_placement = cur_flags;
+	return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver.  If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			struct ttm_mem_reg *mem,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man;
+	uint32_t mem_type = TTM_PL_SYSTEM;
+	uint32_t cur_flags = 0;
+	bool type_found = false;
+	bool type_ok = false;
+	bool has_erestartsys = false;
+	int i, ret;
+
+	mem->mm_node = NULL;
+	for (i = 0; i < placement->num_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
+		man = &bdev->man[mem_type];
+
+		type_ok = ttm_bo_mt_compatible(man,
+						mem_type,
+						placement->placement[i],
+						&cur_flags);
+
+		if (!type_ok)
+			continue;
+
+		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
+
+		if (mem_type == TTM_PL_SYSTEM)
+			break;
+
+		if (man->has_type && man->use_type) {
+			type_found = true;
+			ret = (*man->func->get_node)(man, bo, placement, mem);
+			if (unlikely(ret))
+				return ret;
+		}
+		if (mem->mm_node)
+			break;
+	}
+
+	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
+		mem->mem_type = mem_type;
+		mem->placement = cur_flags;
+		return 0;
+	}
+
+	if (!type_found)
+		return -EINVAL;
+
+	for (i = 0; i < placement->num_busy_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
+		man = &bdev->man[mem_type];
+		if (!man->has_type)
+			continue;
+		if (!ttm_bo_mt_compatible(man,
+						mem_type,
+						placement->busy_placement[i],
+						&cur_flags))
+			continue;
+
+		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+				~TTM_PL_MASK_MEMTYPE);
+
+
+		if (mem_type == TTM_PL_SYSTEM) {
+			mem->mem_type = mem_type;
+			mem->placement = cur_flags;
+			mem->mm_node = NULL;
+			return 0;
+		}
+
+		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+						interruptible, no_wait_gpu);
+		if (ret == 0 && mem->mm_node) {
+			mem->placement = cur_flags;
+			return 0;
+		}
+		if (ret == -ERESTARTSYS)
+			has_erestartsys = true;
+	}
+	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mem_space);
+
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	int ret = 0;
+	struct ttm_mem_reg mem;
+	struct ttm_bo_device *bdev = bo->bdev;
+
+	BUG_ON(!ttm_bo_is_reserved(bo));
+
+	/*
+	 * FIXME: It's possible to pipeline buffer moves.
+	 * Have the driver move function wait for idle when necessary,
+	 * instead of doing it here.
+	 */
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+	spin_unlock(&bdev->fence_lock);
+	if (ret)
+		return ret;
+	mem.num_pages = bo->num_pages;
+	mem.size = mem.num_pages << PAGE_SHIFT;
+	mem.page_alignment = bo->mem.page_alignment;
+	mem.bus.io_reserved_vm = false;
+	mem.bus.io_reserved_count = 0;
+	/*
+	 * Determine where to move the buffer.
+	 */
+	ret = ttm_bo_mem_space(bo, placement, &mem,
+			       interruptible, no_wait_gpu);
+	if (ret)
+		goto out_unlock;
+	ret = ttm_bo_handle_move_mem(bo, &mem, false,
+				     interruptible, no_wait_gpu);
+out_unlock:
+	if (ret && mem.mm_node)
+		ttm_bo_mem_put(bo, &mem);
+	return ret;
+}
+
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+			      struct ttm_mem_reg *mem,
+			      uint32_t *new_flags)
+{
+	int i;
+
+	if (mem->mm_node && placement->lpfn != 0 &&
+	    (mem->start < placement->fpfn ||
+	     mem->start + mem->num_pages > placement->lpfn))
+		return false;
+
+	for (i = 0; i < placement->num_placement; i++) {
+		*new_flags = placement->placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
+	}
+
+	for (i = 0; i < placement->num_busy_placement; i++) {
+		*new_flags = placement->busy_placement[i];
+		if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+		    (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+			return true;
+	}
+
+	return false;
+}
+
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+			struct ttm_placement *placement,
+			bool interruptible,
+			bool no_wait_gpu)
+{
+	int ret;
+	uint32_t new_flags;
+
+	BUG_ON(!ttm_bo_is_reserved(bo));
+	/* Check that range is valid */
+	if (placement->lpfn || placement->fpfn)
+		if (placement->fpfn > placement->lpfn ||
+			(placement->lpfn - placement->fpfn) < bo->num_pages)
+			return -EINVAL;
+	/*
+	 * Check whether we need to move buffer.
+	 */
+	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+		ret = ttm_bo_move_buffer(bo, placement, interruptible,
+					 no_wait_gpu);
+		if (ret)
+			return ret;
+	} else {
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the compatible memory placement flags to the active flags
+		 */
+		ttm_flag_masked(&bo->mem.placement, new_flags,
+				~TTM_PL_MASK_MEMTYPE);
+	}
+	/*
+	 * We might need to add a TTM.
+	 */
+	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+		ret = ttm_bo_add_ttm(bo, true);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_validate);
+
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
+{
+	BUG_ON((placement->fpfn || placement->lpfn) &&
+	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
+
+	return 0;
+}
+
+int ttm_bo_init(struct ttm_bo_device *bdev,
+		struct ttm_buffer_object *bo,
+		unsigned long size,
+		enum ttm_bo_type type,
+		struct ttm_placement *placement,
+		uint32_t page_alignment,
+		bool interruptible,
+		struct file *persistent_swap_storage,
+		size_t acc_size,
+		struct sg_table *sg,
+		void (*destroy) (struct ttm_buffer_object *))
+{
+	int ret = 0;
+	unsigned long num_pages;
+	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+	if (ret) {
+		pr_err("Out of kernel memory\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			kfree(bo);
+		return -ENOMEM;
+	}
+
+	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (num_pages == 0) {
+		pr_err("Illegal buffer object size\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			kfree(bo);
+		ttm_mem_global_free(mem_glob, acc_size);
+		return -EINVAL;
+	}
+	bo->destroy = destroy;
+
+	kref_init(&bo->kref);
+	kref_init(&bo->list_kref);
+	atomic_set(&bo->cpu_writers, 0);
+	atomic_set(&bo->reserved, 1);
+	init_waitqueue_head(&bo->event_queue);
+	INIT_LIST_HEAD(&bo->lru);
+	INIT_LIST_HEAD(&bo->ddestroy);
+	INIT_LIST_HEAD(&bo->swap);
+	INIT_LIST_HEAD(&bo->io_reserve_lru);
+	bo->bdev = bdev;
+	bo->glob = bdev->glob;
+	bo->type = type;
+	bo->num_pages = num_pages;
+	bo->mem.size = num_pages << PAGE_SHIFT;
+	bo->mem.mem_type = TTM_PL_SYSTEM;
+	bo->mem.num_pages = bo->num_pages;
+	bo->mem.mm_node = NULL;
+	bo->mem.page_alignment = page_alignment;
+	bo->mem.bus.io_reserved_vm = false;
+	bo->mem.bus.io_reserved_count = 0;
+	bo->priv_flags = 0;
+	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+	bo->seq_valid = false;
+	bo->persistent_swap_storage = persistent_swap_storage;
+	bo->acc_size = acc_size;
+	bo->sg = sg;
+	atomic_inc(&bo->glob->bo_count);
+
+	ret = ttm_bo_check_placement(bo, placement);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	/*
+	 * For ttm_bo_type_device buffers, allocate
+	 * address space from the device.
+	 */
+	if (bo->type == ttm_bo_type_device ||
+	    bo->type == ttm_bo_type_sg) {
+		ret = ttm_bo_setup_vm(bo);
+		if (ret)
+			goto out_err;
+	}
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (ret)
+		goto out_err;
+
+	ttm_bo_unreserve(bo);
+	return 0;
+
+out_err:
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_init);
+
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+		       unsigned long bo_size,
+		       unsigned struct_size)
+{
+	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+	size_t size = 0;
+
+	size += ttm_round_pot(struct_size);
+	size += PAGE_ALIGN(npages * sizeof(void *));
+	size += ttm_round_pot(sizeof(struct ttm_tt));
+	return size;
+}
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+			   unsigned long bo_size,
+			   unsigned struct_size)
+{
+	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+	size_t size = 0;
+
+	size += ttm_round_pot(struct_size);
+	size += PAGE_ALIGN(npages * sizeof(void *));
+	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+	return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
+
+int ttm_bo_create(struct ttm_bo_device *bdev,
+			unsigned long size,
+			enum ttm_bo_type type,
+			struct ttm_placement *placement,
+			uint32_t page_alignment,
+			bool interruptible,
+			struct file *persistent_swap_storage,
+			struct ttm_buffer_object **p_bo)
+{
+	struct ttm_buffer_object *bo;
+	size_t acc_size;
+	int ret;
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (unlikely(bo == NULL))
+		return -ENOMEM;
+
+	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+			  interruptible, persistent_swap_storage, acc_size,
+			  NULL, NULL);
+	if (likely(ret == 0))
+		*p_bo = bo;
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_create);
+
+static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+					unsigned mem_type, bool allow_errors)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_bo_global *glob = bdev->glob;
+	int ret;
+
+	/*
+	 * Can't use standard list traversal since we're unlocking.
+	 */
+
+	spin_lock(&glob->lru_lock);
+	while (!list_empty(&man->lru)) {
+		spin_unlock(&glob->lru_lock);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+		if (ret) {
+			if (allow_errors) {
+				return ret;
+			} else {
+				pr_err("Cleanup eviction failed\n");
+			}
+		}
+		spin_lock(&glob->lru_lock);
+	}
+	spin_unlock(&glob->lru_lock);
+	return 0;
+}
+
+int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+	struct ttm_mem_type_manager *man;
+	int ret = -EINVAL;
+
+	if (mem_type >= TTM_NUM_MEM_TYPES) {
+		pr_err("Illegal memory type %d\n", mem_type);
+		return ret;
+	}
+	man = &bdev->man[mem_type];
+
+	if (!man->has_type) {
+		pr_err("Trying to take down uninitialized memory manager type %u\n",
+		       mem_type);
+		return ret;
+	}
+
+	man->use_type = false;
+	man->has_type = false;
+
+	ret = 0;
+	if (mem_type > 0) {
+		ttm_bo_force_list_clean(bdev, mem_type, false);
+
+		ret = (*man->func->takedown)(man);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_clean_mm);
+
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+		pr_err("Illegal memory manager memory type %u\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!man->has_type) {
+		pr_err("Memory type %u has not been initialized\n", mem_type);
+		return 0;
+	}
+
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
+}
+EXPORT_SYMBOL(ttm_bo_evict_mm);
+
+int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+			unsigned long p_size)
+{
+	int ret = -EINVAL;
+	struct ttm_mem_type_manager *man;
+
+	BUG_ON(type >= TTM_NUM_MEM_TYPES);
+	man = &bdev->man[type];
+	BUG_ON(man->has_type);
+	man->io_reserve_fastpath = true;
+	man->use_io_reserve_lru = false;
+	mutex_init(&man->io_reserve_mutex);
+	INIT_LIST_HEAD(&man->io_reserve_lru);
+
+	ret = bdev->driver->init_mem_type(bdev, type, man);
+	if (ret)
+		return ret;
+	man->bdev = bdev;
+
+	ret = 0;
+	if (type != TTM_PL_SYSTEM) {
+		ret = (*man->func->init)(man, p_size);
+		if (ret)
+			return ret;
+	}
+	man->has_type = true;
+	man->use_type = true;
+	man->size = p_size;
+
+	INIT_LIST_HEAD(&man->lru);
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_init_mm);
+
+static void ttm_bo_global_kobj_release(struct kobject *kobj)
+{
+	struct ttm_bo_global *glob =
+		container_of(kobj, struct ttm_bo_global, kobj);
+
+	ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
+	__free_page(glob->dummy_read_page);
+	kfree(glob);
+}
+
+void ttm_bo_global_release(struct drm_global_reference *ref)
+{
+	struct ttm_bo_global *glob = ref->object;
+
+	kobject_del(&glob->kobj);
+	kobject_put(&glob->kobj);
+}
+EXPORT_SYMBOL(ttm_bo_global_release);
+
+int ttm_bo_global_init(struct drm_global_reference *ref)
+{
+	struct ttm_bo_global_ref *bo_ref =
+		container_of(ref, struct ttm_bo_global_ref, ref);
+	struct ttm_bo_global *glob = ref->object;
+	int ret;
+
+	mutex_init(&glob->device_list_mutex);
+	spin_lock_init(&glob->lru_lock);
+	glob->mem_glob = bo_ref->mem_glob;
+	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+
+	if (unlikely(glob->dummy_read_page == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_drp;
+	}
+
+	INIT_LIST_HEAD(&glob->swap_lru);
+	INIT_LIST_HEAD(&glob->device_list);
+
+	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+	if (unlikely(ret != 0)) {
+		pr_err("Could not register buffer object swapout\n");
+		goto out_no_shrink;
+	}
+
+	atomic_set(&glob->bo_count, 0);
+
+	ret = kobject_init_and_add(
+		&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
+	if (unlikely(ret != 0))
+		kobject_put(&glob->kobj);
+	return ret;
+out_no_shrink:
+	__free_page(glob->dummy_read_page);
+out_no_drp:
+	kfree(glob);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_global_init);
+
+
+int ttm_bo_device_release(struct ttm_bo_device *bdev)
+{
+	int ret = 0;
+	unsigned i = TTM_NUM_MEM_TYPES;
+	struct ttm_mem_type_manager *man;
+	struct ttm_bo_global *glob = bdev->glob;
+
+	while (i--) {
+		man = &bdev->man[i];
+		if (man->has_type) {
+			man->use_type = false;
+			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+				ret = -EBUSY;
+				pr_err("DRM memory manager type %d is not clean\n",
+				       i);
+			}
+			man->has_type = false;
+		}
+	}
+
+	mutex_lock(&glob->device_list_mutex);
+	list_del(&bdev->device_list);
+	mutex_unlock(&glob->device_list_mutex);
+
+	cancel_delayed_work_sync(&bdev->wq);
+
+	while (ttm_bo_delayed_delete(bdev, true))
+		;
+
+	spin_lock(&glob->lru_lock);
+	if (list_empty(&bdev->ddestroy))
+		TTM_DEBUG("Delayed destroy list was clean\n");
+
+	if (list_empty(&bdev->man[0].lru))
+		TTM_DEBUG("Swap list was clean\n");
+	spin_unlock(&glob->lru_lock);
+
+	BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
+	write_lock(&bdev->vm_lock);
+	drm_mm_takedown(&bdev->addr_space_mm);
+	write_unlock(&bdev->vm_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_release);
+
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
+		       struct ttm_bo_global *glob,
+		       struct ttm_bo_driver *driver,
+		       uint64_t file_page_offset,
+		       bool need_dma32)
+{
+	int ret = -EINVAL;
+
+	rwlock_init(&bdev->vm_lock);
+	bdev->driver = driver;
+
+	memset(bdev->man, 0, sizeof(bdev->man));
+
+	/*
+	 * Initialize the system memory buffer type.
+	 * Other types need to be driver / IOCTL initialized.
+	 */
+	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
+	if (unlikely(ret != 0))
+		goto out_no_sys;
+
+	bdev->addr_space_rb = RB_ROOT;
+	ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
+	if (unlikely(ret != 0))
+		goto out_no_addr_mm;
+
+	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
+	INIT_LIST_HEAD(&bdev->ddestroy);
+	bdev->dev_mapping = NULL;
+	bdev->glob = glob;
+	bdev->need_dma32 = need_dma32;
+	bdev->val_seq = 0;
+	spin_lock_init(&bdev->fence_lock);
+	mutex_lock(&glob->device_list_mutex);
+	list_add_tail(&bdev->device_list, &glob->device_list);
+	mutex_unlock(&glob->device_list_mutex);
+
+	return 0;
+out_no_addr_mm:
+	ttm_bo_clean_mm(bdev, 0);
+out_no_sys:
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_device_init);
+
+/*
+ * buffer object vm functions.
+ */
+
+bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+		if (mem->mem_type == TTM_PL_SYSTEM)
+			return false;
+
+		if (man->flags & TTM_MEMTYPE_FLAG_CMA)
+			return false;
+
+		if (mem->placement & TTM_PL_FLAG_CACHED)
+			return false;
+	}
+	return true;
+}
+
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	loff_t offset = (loff_t) bo->addr_space_offset;
+	loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+	if (!bdev->dev_mapping)
+		return;
+	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+	ttm_mem_io_free_vm(bo);
+}
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	ttm_mem_io_lock(man, false);
+	ttm_bo_unmap_virtual_locked(bo);
+	ttm_mem_io_unlock(man);
+}
+
+
+EXPORT_SYMBOL(ttm_bo_unmap_virtual);
+
+static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
+	struct rb_node *parent = NULL;
+	struct ttm_buffer_object *cur_bo;
+	unsigned long offset = bo->vm_node->start;
+	unsigned long cur_offset;
+
+	while (*cur) {
+		parent = *cur;
+		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
+		cur_offset = cur_bo->vm_node->start;
+		if (offset < cur_offset)
+			cur = &parent->rb_left;
+		else if (offset > cur_offset)
+			cur = &parent->rb_right;
+		else
+			BUG();
+	}
+
+	rb_link_node(&bo->vm_rb, parent, cur);
+	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
+}
+
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret;
+
+retry_pre_get:
+	ret = drm_mm_pre_get(&bdev->addr_space_mm);
+	if (unlikely(ret != 0))
+		return ret;
+
+	write_lock(&bdev->vm_lock);
+	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
+					 bo->mem.num_pages, 0, 0);
+
+	if (unlikely(bo->vm_node == NULL)) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
+					      bo->mem.num_pages, 0);
+
+	if (unlikely(bo->vm_node == NULL)) {
+		write_unlock(&bdev->vm_lock);
+		goto retry_pre_get;
+	}
+
+	ttm_bo_vm_insert_rb(bo);
+	write_unlock(&bdev->vm_lock);
+	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
+
+	return 0;
+out_unlock:
+	write_unlock(&bdev->vm_lock);
+	return ret;
+}
+
+int ttm_bo_wait(struct ttm_buffer_object *bo,
+		bool lazy, bool interruptible, bool no_wait)
+{
+	struct ttm_bo_driver *driver = bo->bdev->driver;
+	struct ttm_bo_device *bdev = bo->bdev;
+	void *sync_obj;
+	int ret = 0;
+
+	if (likely(bo->sync_obj == NULL))
+		return 0;
+
+	while (bo->sync_obj) {
+
+		if (driver->sync_obj_signaled(bo->sync_obj)) {
+			void *tmp_obj = bo->sync_obj;
+			bo->sync_obj = NULL;
+			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+			spin_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&tmp_obj);
+			spin_lock(&bdev->fence_lock);
+			continue;
+		}
+
+		if (no_wait)
+			return -EBUSY;
+
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+		spin_unlock(&bdev->fence_lock);
+		ret = driver->sync_obj_wait(sync_obj,
+					    lazy, interruptible);
+		if (unlikely(ret != 0)) {
+			driver->sync_obj_unref(&sync_obj);
+			spin_lock(&bdev->fence_lock);
+			return ret;
+		}
+		spin_lock(&bdev->fence_lock);
+		if (likely(bo->sync_obj == sync_obj)) {
+			void *tmp_obj = bo->sync_obj;
+			bo->sync_obj = NULL;
+			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+				  &bo->priv_flags);
+			spin_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&sync_obj);
+			driver->sync_obj_unref(&tmp_obj);
+			spin_lock(&bdev->fence_lock);
+		} else {
+			spin_unlock(&bdev->fence_lock);
+			driver->sync_obj_unref(&sync_obj);
+			spin_lock(&bdev->fence_lock);
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_wait);
+
+int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	int ret = 0;
+
+	/*
+	 * Using ttm_bo_reserve makes sure the lru lists are updated.
+	 */
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+	if (unlikely(ret != 0))
+		return ret;
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, true, no_wait);
+	spin_unlock(&bdev->fence_lock);
+	if (likely(ret == 0))
+		atomic_inc(&bo->cpu_writers);
+	ttm_bo_unreserve(bo);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
+
+void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+{
+	atomic_dec(&bo->cpu_writers);
+}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
+
+/**
+ * A buffer object shrink method that tries to swap out the first
+ * buffer object on the bo_global::swap_lru list.
+ */
+
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+{
+	struct ttm_bo_global *glob =
+	    container_of(shrink, struct ttm_bo_global, shrink);
+	struct ttm_buffer_object *bo;
+	int ret = -EBUSY;
+	int put_count;
+	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
+
+	spin_lock(&glob->lru_lock);
+	list_for_each_entry(bo, &glob->swap_lru, swap) {
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
+		if (!ret)
+			break;
+	}
+
+	if (ret) {
+		spin_unlock(&glob->lru_lock);
+		return ret;
+	}
+
+	kref_get(&bo->list_kref);
+
+	if (!list_empty(&bo->ddestroy)) {
+		ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+		kref_put(&bo->list_kref, ttm_bo_release_list);
+		return ret;
+	}
+
+	put_count = ttm_bo_del_from_lru(bo);
+	spin_unlock(&glob->lru_lock);
+
+	ttm_bo_list_ref_sub(bo, put_count, true);
+
+	/**
+	 * Wait for GPU, then move to system cached.
+	 */
+
+	spin_lock(&bo->bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, false);
+	spin_unlock(&bo->bdev->fence_lock);
+
+	if (unlikely(ret != 0))
+		goto out;
+
+	if ((bo->mem.placement & swap_placement) != swap_placement) {
+		struct ttm_mem_reg evict_mem;
+
+		evict_mem = bo->mem;
+		evict_mem.mm_node = NULL;
+		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+		evict_mem.mem_type = TTM_PL_SYSTEM;
+
+		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
+					     false, false);
+		if (unlikely(ret != 0))
+			goto out;
+	}
+
+	ttm_bo_unmap_virtual(bo);
+
+	/**
+	 * Swap out. Buffer will be swapped in again as soon as
+	 * anyone tries to access a ttm page.
+	 */
+
+	if (bo->bdev->driver->swap_notify)
+		bo->bdev->driver->swap_notify(bo);
+
+	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
+out:
+
+	/**
+	 *
+	 * Unreserve without putting on LRU to avoid swapping out an
+	 * already swapped buffer.
+	 */
+
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+	kref_put(&bo->list_kref, ttm_bo_release_list);
+	return ret;
+}
+
+void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+{
+	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+		;
+}
+EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_bo_manager.c b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 0000000..9212494
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,157 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+	struct drm_mm mm;
+	spinlock_t lock;
+};
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *bo,
+			       struct ttm_placement *placement,
+			       struct ttm_mem_reg *mem)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
+	struct drm_mm_node *node = NULL;
+	unsigned long lpfn;
+	int ret;
+
+	lpfn = placement->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+	do {
+		ret = drm_mm_pre_get(mm);
+		if (unlikely(ret))
+			return ret;
+
+		spin_lock(&rman->lock);
+		node = drm_mm_search_free_in_range(mm,
+					mem->num_pages, mem->page_alignment,
+					placement->fpfn, lpfn, 1);
+		if (unlikely(node == NULL)) {
+			spin_unlock(&rman->lock);
+			return 0;
+		}
+		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+						     mem->page_alignment,
+						     placement->fpfn,
+						     lpfn);
+		spin_unlock(&rman->lock);
+	} while (node == NULL);
+
+	mem->mm_node = node;
+	mem->start = node->start;
+	return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+	if (mem->mm_node) {
+		spin_lock(&rman->lock);
+		drm_mm_put_block(mem->mm_node);
+		spin_unlock(&rman->lock);
+		mem->mm_node = NULL;
+	}
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+			   unsigned long p_size)
+{
+	struct ttm_range_manager *rman;
+	int ret;
+
+	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+	if (!rman)
+		return -ENOMEM;
+
+	ret = drm_mm_init(&rman->mm, 0, p_size);
+	if (ret) {
+		kfree(rman);
+		return ret;
+	}
+
+	spin_lock_init(&rman->lock);
+	man->priv = rman;
+	return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
+
+	spin_lock(&rman->lock);
+	if (drm_mm_clean(mm)) {
+		drm_mm_takedown(mm);
+		spin_unlock(&rman->lock);
+		kfree(rman);
+		man->priv = NULL;
+		return 0;
+	}
+	spin_unlock(&rman->lock);
+	return -EBUSY;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+			     const char *prefix)
+{
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+
+	spin_lock(&rman->lock);
+	drm_mm_debug_table(&rman->mm, prefix);
+	spin_unlock(&rman->lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+	ttm_bo_man_init,
+	ttm_bo_man_takedown,
+	ttm_bo_man_get_node,
+	ttm_bo_man_put_node,
+	ttm_bo_man_debug
+};
+EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_bo_util.c b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644
index 0000000..b7f7571
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -0,0 +1,710 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/io.h>
+#include <linux/highmem.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
+{
+	ttm_bo_mem_put(bo, &bo->mem);
+}
+
+int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+		    bool evict,
+		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
+{
+	struct ttm_tt *ttm = bo->ttm;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+
+	if (old_mem->mem_type != TTM_PL_SYSTEM) {
+		ttm_tt_unbind(ttm);
+		ttm_bo_free_old_node(bo);
+		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+				TTM_PL_MASK_MEM);
+		old_mem->mem_type = TTM_PL_SYSTEM;
+	}
+
+	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (new_mem->mem_type != TTM_PL_SYSTEM) {
+		ret = ttm_tt_bind(ttm, new_mem);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_ttm);
+
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
+{
+	if (likely(man->io_reserve_fastpath))
+		return 0;
+
+	if (interruptible)
+		return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+	mutex_lock(&man->io_reserve_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(ttm_mem_io_lock);
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	mutex_unlock(&man->io_reserve_mutex);
+}
+EXPORT_SYMBOL(ttm_mem_io_unlock);
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+	struct ttm_buffer_object *bo;
+
+	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+		return -EAGAIN;
+
+	bo = list_first_entry(&man->io_reserve_lru,
+			      struct ttm_buffer_object,
+			      io_reserve_lru);
+	list_del_init(&bo->io_reserve_lru);
+	ttm_bo_unmap_virtual_locked(bo);
+
+	return 0;
+}
+
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+		       struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (!bdev->driver->io_mem_reserve)
+		return 0;
+	if (likely(man->io_reserve_fastpath))
+		return bdev->driver->io_mem_reserve(bdev, mem);
+
+	if (bdev->driver->io_mem_reserve &&
+	    mem->bus.io_reserved_count++ == 0) {
+retry:
+		ret = bdev->driver->io_mem_reserve(bdev, mem);
+		if (ret == -EAGAIN) {
+			ret = ttm_mem_io_evict(man);
+			if (ret == 0)
+				goto retry;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(ttm_mem_io_reserve);
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+		     struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	if (bdev->driver->io_mem_reserve &&
+	    --mem->bus.io_reserved_count == 0 &&
+	    bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, mem);
+
+}
+EXPORT_SYMBOL(ttm_mem_io_free);
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+	int ret;
+
+	if (!mem->bus.io_reserved_vm) {
+		struct ttm_mem_type_manager *man =
+			&bo->bdev->man[mem->mem_type];
+
+		ret = ttm_mem_io_reserve(bo->bdev, mem);
+		if (unlikely(ret != 0))
+			return ret;
+		mem->bus.io_reserved_vm = true;
+		if (man->use_io_reserve_lru)
+			list_add_tail(&bo->io_reserve_lru,
+				      &man->io_reserve_lru);
+	}
+	return 0;
+}
+
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (mem->bus.io_reserved_vm) {
+		mem->bus.io_reserved_vm = false;
+		list_del_init(&bo->io_reserve_lru);
+		ttm_mem_io_free(bo->bdev, mem);
+	}
+}
+
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+			void **virtual)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret;
+	void *addr;
+
+	*virtual = NULL;
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bdev, mem);
+	ttm_mem_io_unlock(man);
+	if (ret || !mem->bus.is_iomem)
+		return ret;
+
+	if (mem->bus.addr) {
+		addr = mem->bus.addr;
+	} else {
+		if (mem->placement & TTM_PL_FLAG_WC)
+			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
+		else
+			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
+		if (!addr) {
+			(void) ttm_mem_io_lock(man, false);
+			ttm_mem_io_free(bdev, mem);
+			ttm_mem_io_unlock(man);
+			return -ENOMEM;
+		}
+	}
+	*virtual = addr;
+	return 0;
+}
+
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+			 void *virtual)
+{
+	struct ttm_mem_type_manager *man;
+
+	man = &bdev->man[mem->mem_type];
+
+	if (virtual && mem->bus.addr == NULL)
+		iounmap(virtual);
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(bdev, mem);
+	ttm_mem_io_unlock(man);
+}
+
+static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+	uint32_t *dstP =
+	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+	uint32_t *srcP =
+	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+	int i;
+	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+		iowrite32(ioread32(srcP++), dstP++);
+	return 0;
+}
+
+static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+				unsigned long page,
+				pgprot_t prot)
+{
+	struct page *d = ttm->pages[page];
+	void *dst;
+
+	if (!d)
+		return -ENOMEM;
+
+	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+
+#ifdef CONFIG_X86
+	dst = kmap_atomic_prot(d, prot);
+#else
+	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+		dst = vmap(&d, 1, 0, prot);
+	else
+		dst = kmap(d);
+#endif
+	if (!dst)
+		return -ENOMEM;
+
+	memcpy_fromio(dst, src, PAGE_SIZE);
+
+#ifdef CONFIG_X86
+	kunmap_atomic(dst);
+#else
+	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+		vunmap(dst);
+	else
+		kunmap(d);
+#endif
+
+	return 0;
+}
+
+static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+				unsigned long page,
+				pgprot_t prot)
+{
+	struct page *s = ttm->pages[page];
+	void *src;
+
+	if (!s)
+		return -ENOMEM;
+
+	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+#ifdef CONFIG_X86
+	src = kmap_atomic_prot(s, prot);
+#else
+	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+		src = vmap(&s, 1, 0, prot);
+	else
+		src = kmap(s);
+#endif
+	if (!src)
+		return -ENOMEM;
+
+	memcpy_toio(dst, src, PAGE_SIZE);
+
+#ifdef CONFIG_X86
+	kunmap_atomic(src);
+#else
+	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
+		vunmap(src);
+	else
+		kunmap(s);
+#endif
+
+	return 0;
+}
+
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+		       bool evict, bool no_wait_gpu,
+		       struct ttm_mem_reg *new_mem)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+	struct ttm_tt *ttm = bo->ttm;
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct ttm_mem_reg old_copy = *old_mem;
+	void *old_iomap;
+	void *new_iomap;
+	int ret;
+	unsigned long i;
+	unsigned long page;
+	unsigned long add = 0;
+	int dir;
+
+	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
+	if (ret)
+		return ret;
+	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
+	if (ret)
+		goto out;
+
+	/*
+	 * Single TTM move. NOP.
+	 */
+	if (old_iomap == NULL && new_iomap == NULL)
+		goto out2;
+
+	/*
+	 * Move nonexistent data. NOP.
+	 */
+	if (old_iomap == NULL && ttm == NULL)
+		goto out2;
+
+	/*
+	 * TTM might be null for moves within the same region.
+	 */
+	if (ttm && ttm->state == tt_unpopulated) {
+		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+		if (ret)
+			goto out1;
+	}
+
+	add = 0;
+	dir = 1;
+
+	if ((old_mem->mem_type == new_mem->mem_type) &&
+	    (new_mem->start < old_mem->start + old_mem->size)) {
+		dir = -1;
+		add = new_mem->num_pages - 1;
+	}
+
+	for (i = 0; i < new_mem->num_pages; ++i) {
+		page = i * dir + add;
+		if (old_iomap == NULL) {
+			pgprot_t prot = ttm_io_prot(old_mem->placement,
+						    PAGE_KERNEL);
+			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+						   prot);
+		} else if (new_iomap == NULL) {
+			pgprot_t prot = ttm_io_prot(new_mem->placement,
+						    PAGE_KERNEL);
+			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+						   prot);
+		} else
+			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+		if (ret)
+			goto out1;
+	}
+	mb();
+out2:
+	old_copy = *old_mem;
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+		ttm_tt_unbind(ttm);
+		ttm_tt_destroy(ttm);
+		bo->ttm = NULL;
+	}
+
+out1:
+	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
+out:
+	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+
+	/*
+	 * On error, keep the mm node!
+	 */
+	if (!ret)
+		ttm_bo_mem_put(bo, &old_copy);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_move_memcpy);
+
+static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
+{
+	kfree(bo);
+}
+
+/**
+ * ttm_buffer_object_transfer
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
+ * holding the data of @bo with the old placement.
+ *
+ * This is a utility function that may be called after an accelerated move
+ * has been scheduled. A new buffer object is created as a placeholder for
+ * the old data while it's being copied. When that buffer object is idle,
+ * it can be destroyed, releasing the space of the old placement.
+ * Returns:
+ * !0: Failure.
+ */
+
+static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+				      struct ttm_buffer_object **new_obj)
+{
+	struct ttm_buffer_object *fbo;
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+
+	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
+	if (!fbo)
+		return -ENOMEM;
+
+	*fbo = *bo;
+
+	/**
+	 * Fix up members that we shouldn't copy directly:
+	 * TODO: Explicit member copy would probably be better here.
+	 */
+
+	init_waitqueue_head(&fbo->event_queue);
+	INIT_LIST_HEAD(&fbo->ddestroy);
+	INIT_LIST_HEAD(&fbo->lru);
+	INIT_LIST_HEAD(&fbo->swap);
+	INIT_LIST_HEAD(&fbo->io_reserve_lru);
+	fbo->vm_node = NULL;
+	atomic_set(&fbo->cpu_writers, 0);
+
+	spin_lock(&bdev->fence_lock);
+	if (bo->sync_obj)
+		fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	else
+		fbo->sync_obj = NULL;
+	spin_unlock(&bdev->fence_lock);
+	kref_init(&fbo->list_kref);
+	kref_init(&fbo->kref);
+	fbo->destroy = &ttm_transfered_destroy;
+	fbo->acc_size = 0;
+
+	*new_obj = fbo;
+	return 0;
+}
+
+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+{
+#if defined(__i386__) || defined(__x86_64__)
+	if (caching_flags & TTM_PL_FLAG_WC)
+		tmp = pgprot_writecombine(tmp);
+	else if (boot_cpu_data.x86 > 3)
+		tmp = pgprot_noncached(tmp);
+
+#elif defined(__powerpc__)
+	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
+		pgprot_val(tmp) |= _PAGE_NO_CACHE;
+		if (caching_flags & TTM_PL_FLAG_UNCACHED)
+			pgprot_val(tmp) |= _PAGE_GUARDED;
+	}
+#endif
+#if defined(__ia64__)
+	if (caching_flags & TTM_PL_FLAG_WC)
+		tmp = pgprot_writecombine(tmp);
+	else
+		tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__) || defined(__mips__)
+	if (!(caching_flags & TTM_PL_FLAG_CACHED))
+		tmp = pgprot_noncached(tmp);
+#endif
+	return tmp;
+}
+EXPORT_SYMBOL(ttm_io_prot);
+
+static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+			  unsigned long offset,
+			  unsigned long size,
+			  struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (bo->mem.bus.addr) {
+		map->bo_kmap_type = ttm_bo_map_premapped;
+		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
+	} else {
+		map->bo_kmap_type = ttm_bo_map_iomap;
+		if (mem->placement & TTM_PL_FLAG_WC)
+			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
+						  size);
+		else
+			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
+						       size);
+	}
+	return (!map->virtual) ? -ENOMEM : 0;
+}
+
+static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+			   unsigned long start_page,
+			   unsigned long num_pages,
+			   struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+	struct ttm_tt *ttm = bo->ttm;
+	int ret;
+
+	BUG_ON(!ttm);
+
+	if (ttm->state == tt_unpopulated) {
+		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+		if (ret)
+			return ret;
+	}
+
+	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+		/*
+		 * We're mapping a single page, and the desired
+		 * page protection is consistent with the bo.
+		 */
+
+		map->bo_kmap_type = ttm_bo_map_kmap;
+		map->page = ttm->pages[start_page];
+		map->virtual = kmap(map->page);
+	} else {
+		/*
+		 * We need to use vmap to get the desired page protection
+		 * or to make the buffer object look contiguous.
+		 */
+		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+			PAGE_KERNEL :
+			ttm_io_prot(mem->placement, PAGE_KERNEL);
+		map->bo_kmap_type = ttm_bo_map_vmap;
+		map->virtual = vmap(ttm->pages + start_page, num_pages,
+				    0, prot);
+	}
+	return (!map->virtual) ? -ENOMEM : 0;
+}
+
+int ttm_bo_kmap(struct ttm_buffer_object *bo,
+		unsigned long start_page, unsigned long num_pages,
+		struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+	unsigned long offset, size;
+	int ret;
+
+	BUG_ON(!list_empty(&bo->swap));
+	map->virtual = NULL;
+	map->bo = bo;
+	if (num_pages > bo->num_pages)
+		return -EINVAL;
+	if (start_page > bo->num_pages)
+		return -EINVAL;
+#if 0
+	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+		return -EPERM;
+#endif
+	(void) ttm_mem_io_lock(man, false);
+	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+	ttm_mem_io_unlock(man);
+	if (ret)
+		return ret;
+	if (!bo->mem.bus.is_iomem) {
+		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
+	} else {
+		offset = start_page << PAGE_SHIFT;
+		size = num_pages << PAGE_SHIFT;
+		return ttm_bo_ioremap(bo, offset, size, map);
+	}
+}
+EXPORT_SYMBOL(ttm_bo_kmap);
+
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
+{
+	struct ttm_buffer_object *bo = map->bo;
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+
+	if (!map->virtual)
+		return;
+	switch (map->bo_kmap_type) {
+	case ttm_bo_map_iomap:
+		iounmap(map->virtual);
+		break;
+	case ttm_bo_map_vmap:
+		vunmap(map->virtual);
+		break;
+	case ttm_bo_map_kmap:
+		kunmap(map->page);
+		break;
+	case ttm_bo_map_premapped:
+		break;
+	default:
+		BUG();
+	}
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+	ttm_mem_io_unlock(man);
+	map->virtual = NULL;
+	map->page = NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kunmap);
+
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+			      void *sync_obj,
+			      bool evict,
+			      bool no_wait_gpu,
+			      struct ttm_mem_reg *new_mem)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	int ret;
+	struct ttm_buffer_object *ghost_obj;
+	void *tmp_obj = NULL;
+
+	spin_lock(&bdev->fence_lock);
+	if (bo->sync_obj) {
+		tmp_obj = bo->sync_obj;
+		bo->sync_obj = NULL;
+	}
+	bo->sync_obj = driver->sync_obj_ref(sync_obj);
+	if (evict) {
+		ret = ttm_bo_wait(bo, false, false, false);
+		spin_unlock(&bdev->fence_lock);
+		if (tmp_obj)
+			driver->sync_obj_unref(&tmp_obj);
+		if (ret)
+			return ret;
+
+		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+		    (bo->ttm != NULL)) {
+			ttm_tt_unbind(bo->ttm);
+			ttm_tt_destroy(bo->ttm);
+			bo->ttm = NULL;
+		}
+		ttm_bo_free_old_node(bo);
+	} else {
+		/**
+		 * This should help pipeline ordinary buffer moves.
+		 *
+		 * Hang old buffer memory on a new buffer object,
+		 * and leave it to be released when the GPU
+		 * operation has completed.
+		 */
+
+		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+		spin_unlock(&bdev->fence_lock);
+		if (tmp_obj)
+			driver->sync_obj_unref(&tmp_obj);
+
+		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+		if (ret)
+			return ret;
+
+		/**
+		 * If we're not moving to fixed memory, the TTM object
+		 * needs to stay alive. Otherwhise hang it on the ghost
+		 * bo to be unbound and destroyed.
+		 */
+
+		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
+			ghost_obj->ttm = NULL;
+		else
+			bo->ttm = NULL;
+
+		ttm_bo_unreserve(ghost_obj);
+		ttm_bo_unref(&ghost_obj);
+	}
+
+	*old_mem = *new_mem;
+	new_mem->mm_node = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_bo_vm.c b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644
index 0000000..3df9f16
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -0,0 +1,465 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
+						     unsigned long page_start,
+						     unsigned long num_pages)
+{
+	struct rb_node *cur = bdev->addr_space_rb.rb_node;
+	unsigned long cur_offset;
+	struct ttm_buffer_object *bo;
+	struct ttm_buffer_object *best_bo = NULL;
+
+	while (likely(cur != NULL)) {
+		bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
+		cur_offset = bo->vm_node->start;
+		if (page_start >= cur_offset) {
+			cur = cur->rb_right;
+			best_bo = bo;
+			if (page_start == cur_offset)
+				break;
+		} else
+			cur = cur->rb_left;
+	}
+
+	if (unlikely(best_bo == NULL))
+		return NULL;
+
+	if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
+		     (page_start + num_pages)))
+		return NULL;
+
+	return best_bo;
+}
+
+static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+	    vma->vm_private_data;
+	struct ttm_bo_device *bdev = bo->bdev;
+	unsigned long page_offset;
+	unsigned long page_last;
+	unsigned long pfn;
+	struct ttm_tt *ttm = NULL;
+	struct page *page;
+	int ret;
+	int i;
+	unsigned long address = (unsigned long)vmf->virtual_address;
+	int retval = VM_FAULT_NOPAGE;
+	struct ttm_mem_type_manager *man =
+		&bdev->man[bo->mem.mem_type];
+
+	/*
+	 * Work around locking order reversal in fault / nopfn
+	 * between mmap_sem and bo_reserve: Perform a trylock operation
+	 * for reserve, and if it fails, retry the fault after scheduling.
+	 */
+
+	ret = ttm_bo_reserve(bo, true, true, false, 0);
+	if (unlikely(ret != 0)) {
+		if (ret == -EBUSY)
+			set_need_resched();
+		return VM_FAULT_NOPAGE;
+	}
+
+	if (bdev->driver->fault_reserve_notify) {
+		ret = bdev->driver->fault_reserve_notify(bo);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			set_need_resched();
+		case -ERESTARTSYS:
+			retval = VM_FAULT_NOPAGE;
+			goto out_unlock;
+		default:
+			retval = VM_FAULT_SIGBUS;
+			goto out_unlock;
+		}
+	}
+
+	/*
+	 * Wait for buffer data in transit, due to a pipelined
+	 * move.
+	 */
+
+	spin_lock(&bdev->fence_lock);
+	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+		ret = ttm_bo_wait(bo, false, true, false);
+		spin_unlock(&bdev->fence_lock);
+		if (unlikely(ret != 0)) {
+			retval = (ret != -ERESTARTSYS) ?
+			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+			goto out_unlock;
+		}
+	} else
+		spin_unlock(&bdev->fence_lock);
+
+	ret = ttm_mem_io_lock(man, true);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_NOPAGE;
+		goto out_unlock;
+	}
+	ret = ttm_mem_io_reserve_vm(bo);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_SIGBUS;
+		goto out_io_unlock;
+	}
+
+	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+	    bo->vm_node->start - vma->vm_pgoff;
+	page_last = vma_pages(vma) +
+	    bo->vm_node->start - vma->vm_pgoff;
+
+	if (unlikely(page_offset >= bo->num_pages)) {
+		retval = VM_FAULT_SIGBUS;
+		goto out_io_unlock;
+	}
+
+	/*
+	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
+	 * since the mmap_sem is only held in read mode. However, we
+	 * modify only the caching bits of vma->vm_page_prot and
+	 * consider those bits protected by
+	 * the bo->mutex, as we should be the only writers.
+	 * There shouldn't really be any readers of these bits except
+	 * within vm_insert_mixed()? fork?
+	 *
+	 * TODO: Add a list of vmas to the bo, and change the
+	 * vma->vm_page_prot when the object changes caching policy, with
+	 * the correct locks held.
+	 */
+	if (bo->mem.bus.is_iomem) {
+		vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
+						vma->vm_page_prot);
+	} else {
+		ttm = bo->ttm;
+		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+		    vm_get_page_prot(vma->vm_flags) :
+		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+		/* Allocate all page at once, most common usage */
+		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+			retval = VM_FAULT_OOM;
+			goto out_io_unlock;
+		}
+	}
+
+	/*
+	 * Speculatively prefault a number of pages. Only error on
+	 * first page.
+	 */
+	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+		if (bo->mem.bus.is_iomem)
+			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+		else {
+			page = ttm->pages[page_offset];
+			if (unlikely(!page && i == 0)) {
+				retval = VM_FAULT_OOM;
+				goto out_io_unlock;
+			} else if (unlikely(!page)) {
+				break;
+			}
+			pfn = page_to_pfn(page);
+		}
+
+		ret = vm_insert_mixed(vma, address, pfn);
+		/*
+		 * Somebody beat us to this PTE or prefaulting to
+		 * an already populated PTE, or prefaulting error.
+		 */
+
+		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+			break;
+		else if (unlikely(ret != 0)) {
+			retval =
+			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+			goto out_io_unlock;
+		}
+
+		address += PAGE_SIZE;
+		if (unlikely(++page_offset >= page_last))
+			break;
+	}
+out_io_unlock:
+	ttm_mem_io_unlock(man);
+out_unlock:
+	ttm_bo_unreserve(bo);
+	return retval;
+}
+
+static void ttm_bo_vm_open(struct vm_area_struct *vma)
+{
+	struct ttm_buffer_object *bo =
+	    (struct ttm_buffer_object *)vma->vm_private_data;
+
+	(void)ttm_bo_reference(bo);
+}
+
+static void ttm_bo_vm_close(struct vm_area_struct *vma)
+{
+	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+
+	ttm_bo_unref(&bo);
+	vma->vm_private_data = NULL;
+}
+
+static const struct vm_operations_struct ttm_bo_vm_ops = {
+	.fault = ttm_bo_vm_fault,
+	.open = ttm_bo_vm_open,
+	.close = ttm_bo_vm_close
+};
+
+int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+		struct ttm_bo_device *bdev)
+{
+	struct ttm_bo_driver *driver;
+	struct ttm_buffer_object *bo;
+	int ret;
+
+	read_lock(&bdev->vm_lock);
+	bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
+				 vma_pages(vma));
+	if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+		bo = NULL;
+	read_unlock(&bdev->vm_lock);
+
+	if (unlikely(bo == NULL)) {
+		pr_err("Could not find buffer object to map\n");
+		return -EINVAL;
+	}
+
+	driver = bo->bdev->driver;
+	if (unlikely(!driver->verify_access)) {
+		ret = -EPERM;
+		goto out_unref;
+	}
+	ret = driver->verify_access(bo, filp);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	vma->vm_ops = &ttm_bo_vm_ops;
+
+	/*
+	 * Note: We're transferring the bo reference to
+	 * vma->vm_private_data here.
+	 */
+
+	vma->vm_private_data = bo;
+	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	return 0;
+out_unref:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mmap);
+
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+	if (vma->vm_pgoff != 0)
+		return -EACCES;
+
+	vma->vm_ops = &ttm_bo_vm_ops;
+	vma->vm_private_data = ttm_bo_reference(bo);
+	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+	return 0;
+}
+EXPORT_SYMBOL(ttm_fbdev_mmap);
+
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+		  const char __user *wbuf, char __user *rbuf, size_t count,
+		  loff_t *f_pos, bool write)
+{
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_driver *driver;
+	struct ttm_bo_kmap_obj map;
+	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+	unsigned long kmap_offset;
+	unsigned long kmap_end;
+	unsigned long kmap_num;
+	size_t io_size;
+	unsigned int page_offset;
+	char *virtual;
+	int ret;
+	bool no_wait = false;
+	bool dummy;
+
+	read_lock(&bdev->vm_lock);
+	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
+	if (likely(bo != NULL))
+		ttm_bo_reference(bo);
+	read_unlock(&bdev->vm_lock);
+
+	if (unlikely(bo == NULL))
+		return -EFAULT;
+
+	driver = bo->bdev->driver;
+	if (unlikely(!driver->verify_access)) {
+		ret = -EPERM;
+		goto out_unref;
+	}
+
+	ret = driver->verify_access(bo, filp);
+	if (unlikely(ret != 0))
+		goto out_unref;
+
+	kmap_offset = dev_offset - bo->vm_node->start;
+	if (unlikely(kmap_offset >= bo->num_pages)) {
+		ret = -EFBIG;
+		goto out_unref;
+	}
+
+	page_offset = *f_pos & ~PAGE_MASK;
+	io_size = bo->num_pages - kmap_offset;
+	io_size = (io_size << PAGE_SHIFT) - page_offset;
+	if (count < io_size)
+		io_size = count;
+
+	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+	kmap_num = kmap_end - kmap_offset + 1;
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -EBUSY:
+		ret = -EAGAIN;
+		goto out_unref;
+	default:
+		goto out_unref;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(bo);
+		goto out_unref;
+	}
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	virtual += page_offset;
+
+	if (write)
+		ret = copy_from_user(virtual, wbuf, io_size);
+	else
+		ret = copy_to_user(rbuf, virtual, io_size);
+
+	ttm_bo_kunmap(&map);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	if (unlikely(ret != 0))
+		return -EFBIG;
+
+	*f_pos += io_size;
+
+	return io_size;
+out_unref:
+	ttm_bo_unref(&bo);
+	return ret;
+}
+
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+			char __user *rbuf, size_t count, loff_t *f_pos,
+			bool write)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_end;
+	unsigned long kmap_num;
+	size_t io_size;
+	unsigned int page_offset;
+	char *virtual;
+	int ret;
+	bool no_wait = false;
+	bool dummy;
+
+	kmap_offset = (*f_pos >> PAGE_SHIFT);
+	if (unlikely(kmap_offset >= bo->num_pages))
+		return -EFBIG;
+
+	page_offset = *f_pos & ~PAGE_MASK;
+	io_size = bo->num_pages - kmap_offset;
+	io_size = (io_size << PAGE_SHIFT) - page_offset;
+	if (count < io_size)
+		io_size = count;
+
+	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+	kmap_num = kmap_end - kmap_offset + 1;
+
+	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -EBUSY:
+		return -EAGAIN;
+	default:
+		return ret;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unreserve(bo);
+		return ret;
+	}
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	virtual += page_offset;
+
+	if (write)
+		ret = copy_from_user(virtual, wbuf, io_size);
+	else
+		ret = copy_to_user(rbuf, virtual, io_size);
+
+	ttm_bo_kunmap(&map);
+	ttm_bo_unreserve(bo);
+	ttm_bo_unref(&bo);
+
+	if (unlikely(ret != 0))
+		return ret;
+
+	*f_pos += io_size;
+
+	return io_size;
+}
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/linux-imx/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 0000000..7b90def
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,242 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (entry->removed) {
+			ttm_bo_add_to_lru(bo);
+			entry->removed = false;
+
+		}
+		entry->reserved = false;
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
+	}
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (!entry->removed) {
+			entry->put_count = ttm_bo_del_from_lru(bo);
+			entry->removed = true;
+		}
+	}
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		if (entry->put_count) {
+			ttm_bo_list_ref_sub(bo, entry->put_count, true);
+			entry->put_count = 0;
+		}
+	}
+}
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_bo_global *glob;
+
+	if (list_empty(list))
+		return;
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+	spin_lock(&glob->lru_lock);
+	ttm_eu_backoff_reservation_locked(list);
+	spin_unlock(&glob->lru_lock);
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list)
+{
+	struct ttm_bo_global *glob;
+	struct ttm_validate_buffer *entry;
+	int ret;
+	uint32_t val_seq;
+
+	if (list_empty(list))
+		return 0;
+
+	list_for_each_entry(entry, list, head) {
+		entry->reserved = false;
+		entry->put_count = 0;
+		entry->removed = false;
+	}
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+
+	spin_lock(&glob->lru_lock);
+	val_seq = entry->bo->bdev->val_seq++;
+
+retry:
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		/* already slowpath reserved? */
+		if (entry->reserved)
+			continue;
+
+		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			ttm_eu_del_from_lru_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ret = ttm_bo_reserve_nolru(bo, true, false,
+						   true, val_seq);
+			spin_lock(&glob->lru_lock);
+			if (!ret)
+				break;
+
+			if (unlikely(ret != -EAGAIN))
+				goto err;
+
+			/* fallthrough */
+		case -EAGAIN:
+			ttm_eu_backoff_reservation_locked(list);
+
+			/*
+			 * temporarily increase sequence number every retry,
+			 * to prevent us from seeing our old reservation
+			 * sequence when someone else reserved the buffer,
+			 * but hasn't updated the seq_valid/seqno members yet.
+			 */
+			val_seq = entry->bo->bdev->val_seq++;
+
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
+			ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
+			if (unlikely(ret != 0))
+				return ret;
+			spin_lock(&glob->lru_lock);
+			entry->reserved = true;
+			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+				ret = -EBUSY;
+				goto err;
+			}
+			goto retry;
+		default:
+			goto err;
+		}
+
+		entry->reserved = true;
+		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+			ret = -EBUSY;
+			goto err;
+		}
+	}
+
+	ttm_eu_del_from_lru_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+
+	return 0;
+
+err:
+	ttm_eu_backoff_reservation_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	struct ttm_bo_driver *driver;
+
+	if (list_empty(list))
+		return;
+
+	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+	bdev = bo->bdev;
+	driver = bdev->driver;
+	glob = bo->glob;
+
+	spin_lock(&glob->lru_lock);
+	spin_lock(&bdev->fence_lock);
+
+	list_for_each_entry(entry, list, head) {
+		bo = entry->bo;
+		entry->old_sync_obj = bo->sync_obj;
+		bo->sync_obj = driver->sync_obj_ref(sync_obj);
+		ttm_bo_unreserve_locked(bo);
+		entry->reserved = false;
+	}
+	spin_unlock(&bdev->fence_lock);
+	spin_unlock(&glob->lru_lock);
+
+	list_for_each_entry(entry, list, head) {
+		if (entry->old_sync_obj)
+			driver->sync_obj_unref(&entry->old_sync_obj);
+	}
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_lock.c b/linux-imx/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 0000000..3daa9a3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,310 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <drm/ttm/ttm_lock.h>
+#include <drm/ttm/ttm_module.h>
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING    (1 << 0)
+#define TTM_VT_LOCK_PENDING       (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
+#define TTM_VT_LOCK               (1 << 3)
+#define TTM_SUSPEND_LOCK          (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+	spin_lock_init(&lock->lock);
+	init_waitqueue_head(&lock->queue);
+	lock->rw = 0;
+	lock->flags = 0;
+	lock->kill_takers = false;
+	lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	if (--lock->rw == 0)
+		wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		locked = true;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible)
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_read_lock(lock));
+	else
+		wait_event(lock->queue, __ttm_read_lock(lock));
+	return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+	bool block = true;
+
+	*locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw >= 0 && lock->flags == 0) {
+		++lock->rw;
+		block = false;
+		*locked = true;
+	} else if (lock->flags == 0) {
+		block = false;
+	}
+	spin_unlock(&lock->lock);
+
+	return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+	bool locked;
+
+	if (interruptible)
+		ret = wait_event_interruptible
+			(lock->queue, __ttm_read_trylock(lock, &locked));
+	else
+		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+	if (unlikely(ret != 0)) {
+		BUG_ON(locked);
+		return ret;
+	}
+
+	return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->rw = 0;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (unlikely(lock->kill_takers)) {
+		send_sig(lock->signal, current, 0);
+		spin_unlock(&lock->lock);
+		return false;
+	}
+	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+		lock->rw = -1;
+		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+		locked = true;
+	} else {
+		lock->flags |= TTM_WRITE_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+	int ret = 0;
+
+	if (interruptible) {
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_write_lock(lock));
+		if (unlikely(ret != 0)) {
+			spin_lock(&lock->lock);
+			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+			wake_up_all(&lock->queue);
+			spin_unlock(&lock->lock);
+		}
+	} else
+		wait_event(lock->queue, __ttm_read_lock(lock));
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->rw = 1;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+	int ret = 0;
+
+	spin_lock(&lock->lock);
+	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+		ret = -EINVAL;
+	lock->flags &= ~TTM_VT_LOCK;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+
+	return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+	int ret;
+
+	*p_base = NULL;
+	ret = __ttm_vt_unlock(lock);
+	BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_VT_LOCK_PENDING;
+		lock->flags |= TTM_VT_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_VT_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+		bool interruptible,
+		struct ttm_object_file *tfile)
+{
+	int ret = 0;
+
+	if (interruptible) {
+		ret = wait_event_interruptible(lock->queue,
+					       __ttm_vt_lock(lock));
+		if (unlikely(ret != 0)) {
+			spin_lock(&lock->lock);
+			lock->flags &= ~TTM_VT_LOCK_PENDING;
+			wake_up_all(&lock->queue);
+			spin_unlock(&lock->lock);
+			return ret;
+		}
+	} else
+		wait_event(lock->queue, __ttm_vt_lock(lock));
+
+	/*
+	 * Add a base-object, the destructor of which will
+	 * make sure the lock is released if the client dies
+	 * while holding it.
+	 */
+
+	ret = ttm_base_object_init(tfile, &lock->base, false,
+				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
+	if (ret)
+		(void)__ttm_vt_unlock(lock);
+	else
+		lock->vt_holder = tfile;
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+	return ttm_ref_object_base_unref(lock->vt_holder,
+					 lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+	spin_lock(&lock->lock);
+	lock->flags &= ~TTM_SUSPEND_LOCK;
+	wake_up_all(&lock->queue);
+	spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_suspend_unlock);
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+	bool locked = false;
+
+	spin_lock(&lock->lock);
+	if (lock->rw == 0) {
+		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+		lock->flags |= TTM_SUSPEND_LOCK;
+		locked = true;
+	} else {
+		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+	}
+	spin_unlock(&lock->lock);
+	return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+	wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
+EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_memory.c b/linux-imx/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644
index 0000000..dbc2def
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_memory.c
@@ -0,0 +1,601 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define TTM_MEMORY_ALLOC_RETRIES 4
+
+struct ttm_mem_zone {
+	struct kobject kobj;
+	struct ttm_mem_global *glob;
+	const char *name;
+	uint64_t zone_mem;
+	uint64_t emer_mem;
+	uint64_t max_mem;
+	uint64_t swap_limit;
+	uint64_t used_mem;
+};
+
+static struct attribute ttm_mem_sys = {
+	.name = "zone_memory",
+	.mode = S_IRUGO
+};
+static struct attribute ttm_mem_emer = {
+	.name = "emergency_memory",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_mem_max = {
+	.name = "available_memory",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_mem_swap = {
+	.name = "swap_limit",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_mem_used = {
+	.name = "used_memory",
+	.mode = S_IRUGO
+};
+
+static void ttm_mem_zone_kobj_release(struct kobject *kobj)
+{
+	struct ttm_mem_zone *zone =
+		container_of(kobj, struct ttm_mem_zone, kobj);
+
+	pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+		zone->name, (unsigned long long)zone->used_mem >> 10);
+	kfree(zone);
+}
+
+static ssize_t ttm_mem_zone_show(struct kobject *kobj,
+				 struct attribute *attr,
+				 char *buffer)
+{
+	struct ttm_mem_zone *zone =
+		container_of(kobj, struct ttm_mem_zone, kobj);
+	uint64_t val = 0;
+
+	spin_lock(&zone->glob->lock);
+	if (attr == &ttm_mem_sys)
+		val = zone->zone_mem;
+	else if (attr == &ttm_mem_emer)
+		val = zone->emer_mem;
+	else if (attr == &ttm_mem_max)
+		val = zone->max_mem;
+	else if (attr == &ttm_mem_swap)
+		val = zone->swap_limit;
+	else if (attr == &ttm_mem_used)
+		val = zone->used_mem;
+	spin_unlock(&zone->glob->lock);
+
+	return snprintf(buffer, PAGE_SIZE, "%llu\n",
+			(unsigned long long) val >> 10);
+}
+
+static void ttm_check_swapping(struct ttm_mem_global *glob);
+
+static ssize_t ttm_mem_zone_store(struct kobject *kobj,
+				  struct attribute *attr,
+				  const char *buffer,
+				  size_t size)
+{
+	struct ttm_mem_zone *zone =
+		container_of(kobj, struct ttm_mem_zone, kobj);
+	int chars;
+	unsigned long val;
+	uint64_t val64;
+
+	chars = sscanf(buffer, "%lu", &val);
+	if (chars == 0)
+		return size;
+
+	val64 = val;
+	val64 <<= 10;
+
+	spin_lock(&zone->glob->lock);
+	if (val64 > zone->zone_mem)
+		val64 = zone->zone_mem;
+	if (attr == &ttm_mem_emer) {
+		zone->emer_mem = val64;
+		if (zone->max_mem > val64)
+			zone->max_mem = val64;
+	} else if (attr == &ttm_mem_max) {
+		zone->max_mem = val64;
+		if (zone->emer_mem < val64)
+			zone->emer_mem = val64;
+	} else if (attr == &ttm_mem_swap)
+		zone->swap_limit = val64;
+	spin_unlock(&zone->glob->lock);
+
+	ttm_check_swapping(zone->glob);
+
+	return size;
+}
+
+static struct attribute *ttm_mem_zone_attrs[] = {
+	&ttm_mem_sys,
+	&ttm_mem_emer,
+	&ttm_mem_max,
+	&ttm_mem_swap,
+	&ttm_mem_used,
+	NULL
+};
+
+static const struct sysfs_ops ttm_mem_zone_ops = {
+	.show = &ttm_mem_zone_show,
+	.store = &ttm_mem_zone_store
+};
+
+static struct kobj_type ttm_mem_zone_kobj_type = {
+	.release = &ttm_mem_zone_kobj_release,
+	.sysfs_ops = &ttm_mem_zone_ops,
+	.default_attrs = ttm_mem_zone_attrs,
+};
+
+static void ttm_mem_global_kobj_release(struct kobject *kobj)
+{
+	struct ttm_mem_global *glob =
+		container_of(kobj, struct ttm_mem_global, kobj);
+
+	kfree(glob);
+}
+
+static struct kobj_type ttm_mem_glob_kobj_type = {
+	.release = &ttm_mem_global_kobj_release,
+};
+
+static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
+					bool from_wq, uint64_t extra)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+	uint64_t target;
+
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+
+		if (from_wq)
+			target = zone->swap_limit;
+		else if (capable(CAP_SYS_ADMIN))
+			target = zone->emer_mem;
+		else
+			target = zone->max_mem;
+
+		target = (extra > target) ? 0ULL : target;
+
+		if (zone->used_mem > target)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * At this point we only support a single shrink callback.
+ * Extend this if needed, perhaps using a linked list of callbacks.
+ * Note that this function is reentrant:
+ * many threads may try to swap out at any given time.
+ */
+
+static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
+		       uint64_t extra)
+{
+	int ret;
+	struct ttm_mem_shrink *shrink;
+
+	spin_lock(&glob->lock);
+	if (glob->shrink == NULL)
+		goto out;
+
+	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
+		shrink = glob->shrink;
+		spin_unlock(&glob->lock);
+		ret = shrink->do_shrink(shrink);
+		spin_lock(&glob->lock);
+		if (unlikely(ret != 0))
+			goto out;
+	}
+out:
+	spin_unlock(&glob->lock);
+}
+
+
+
+static void ttm_shrink_work(struct work_struct *work)
+{
+	struct ttm_mem_global *glob =
+	    container_of(work, struct ttm_mem_global, work);
+
+	ttm_shrink(glob, true, 0ULL);
+}
+
+static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+				    const struct sysinfo *si)
+{
+	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+	uint64_t mem;
+	int ret;
+
+	if (unlikely(!zone))
+		return -ENOMEM;
+
+	mem = si->totalram - si->totalhigh;
+	mem *= si->mem_unit;
+
+	zone->name = "kernel";
+	zone->zone_mem = mem;
+	zone->max_mem = mem >> 1;
+	zone->emer_mem = (mem >> 1) + (mem >> 2);
+	zone->swap_limit = zone->max_mem - (mem >> 3);
+	zone->used_mem = 0;
+	zone->glob = glob;
+	glob->zone_kernel = zone;
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+	if (unlikely(ret != 0)) {
+		kobject_put(&zone->kobj);
+		return ret;
+	}
+	glob->zones[glob->num_zones++] = zone;
+	return 0;
+}
+
+#ifdef CONFIG_HIGHMEM
+static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
+				     const struct sysinfo *si)
+{
+	struct ttm_mem_zone *zone;
+	uint64_t mem;
+	int ret;
+
+	if (si->totalhigh == 0)
+		return 0;
+
+	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+	if (unlikely(!zone))
+		return -ENOMEM;
+
+	mem = si->totalram;
+	mem *= si->mem_unit;
+
+	zone->name = "highmem";
+	zone->zone_mem = mem;
+	zone->max_mem = mem >> 1;
+	zone->emer_mem = (mem >> 1) + (mem >> 2);
+	zone->swap_limit = zone->max_mem - (mem >> 3);
+	zone->used_mem = 0;
+	zone->glob = glob;
+	glob->zone_highmem = zone;
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+	if (unlikely(ret != 0)) {
+		kobject_put(&zone->kobj);
+		return ret;
+	}
+	glob->zones[glob->num_zones++] = zone;
+	return 0;
+}
+#else
+static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+				   const struct sysinfo *si)
+{
+	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+	uint64_t mem;
+	int ret;
+
+	if (unlikely(!zone))
+		return -ENOMEM;
+
+	mem = si->totalram;
+	mem *= si->mem_unit;
+
+	/**
+	 * No special dma32 zone needed.
+	 */
+
+	if (mem <= ((uint64_t) 1ULL << 32)) {
+		kfree(zone);
+		return 0;
+	}
+
+	/*
+	 * Limit max dma32 memory to 4GB for now
+	 * until we can figure out how big this
+	 * zone really is.
+	 */
+
+	mem = ((uint64_t) 1ULL << 32);
+	zone->name = "dma32";
+	zone->zone_mem = mem;
+	zone->max_mem = mem >> 1;
+	zone->emer_mem = (mem >> 1) + (mem >> 2);
+	zone->swap_limit = zone->max_mem - (mem >> 3);
+	zone->used_mem = 0;
+	zone->glob = glob;
+	glob->zone_dma32 = zone;
+	ret = kobject_init_and_add(
+		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
+	if (unlikely(ret != 0)) {
+		kobject_put(&zone->kobj);
+		return ret;
+	}
+	glob->zones[glob->num_zones++] = zone;
+	return 0;
+}
+#endif
+
+int ttm_mem_global_init(struct ttm_mem_global *glob)
+{
+	struct sysinfo si;
+	int ret;
+	int i;
+	struct ttm_mem_zone *zone;
+
+	spin_lock_init(&glob->lock);
+	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
+	INIT_WORK(&glob->work, ttm_shrink_work);
+	ret = kobject_init_and_add(
+		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
+	if (unlikely(ret != 0)) {
+		kobject_put(&glob->kobj);
+		return ret;
+	}
+
+	si_meminfo(&si);
+
+	ret = ttm_mem_init_kernel_zone(glob, &si);
+	if (unlikely(ret != 0))
+		goto out_no_zone;
+#ifdef CONFIG_HIGHMEM
+	ret = ttm_mem_init_highmem_zone(glob, &si);
+	if (unlikely(ret != 0))
+		goto out_no_zone;
+#else
+	ret = ttm_mem_init_dma32_zone(glob, &si);
+	if (unlikely(ret != 0))
+		goto out_no_zone;
+#endif
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+			zone->name, (unsigned long long)zone->max_mem >> 10);
+	}
+	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+	return 0;
+out_no_zone:
+	ttm_mem_global_release(glob);
+	return ret;
+}
+EXPORT_SYMBOL(ttm_mem_global_init);
+
+void ttm_mem_global_release(struct ttm_mem_global *glob)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	/* let the page allocator first stop the shrink work. */
+	ttm_page_alloc_fini();
+	ttm_dma_page_alloc_fini();
+
+	flush_workqueue(glob->swap_queue);
+	destroy_workqueue(glob->swap_queue);
+	glob->swap_queue = NULL;
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		kobject_del(&zone->kobj);
+		kobject_put(&zone->kobj);
+			}
+	kobject_del(&glob->kobj);
+	kobject_put(&glob->kobj);
+}
+EXPORT_SYMBOL(ttm_mem_global_release);
+
+static void ttm_check_swapping(struct ttm_mem_global *glob)
+{
+	bool needs_swapping = false;
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	spin_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (zone->used_mem > zone->swap_limit) {
+			needs_swapping = true;
+			break;
+		}
+	}
+
+	spin_unlock(&glob->lock);
+
+	if (unlikely(needs_swapping))
+		(void)queue_work(glob->swap_queue, &glob->work);
+
+}
+
+static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
+				     struct ttm_mem_zone *single_zone,
+				     uint64_t amount)
+{
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	spin_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (single_zone && zone != single_zone)
+			continue;
+		zone->used_mem -= amount;
+	}
+	spin_unlock(&glob->lock);
+}
+
+void ttm_mem_global_free(struct ttm_mem_global *glob,
+			 uint64_t amount)
+{
+	return ttm_mem_global_free_zone(glob, NULL, amount);
+}
+EXPORT_SYMBOL(ttm_mem_global_free);
+
+static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+				  struct ttm_mem_zone *single_zone,
+				  uint64_t amount, bool reserve)
+{
+	uint64_t limit;
+	int ret = -ENOMEM;
+	unsigned int i;
+	struct ttm_mem_zone *zone;
+
+	spin_lock(&glob->lock);
+	for (i = 0; i < glob->num_zones; ++i) {
+		zone = glob->zones[i];
+		if (single_zone && zone != single_zone)
+			continue;
+
+		limit = (capable(CAP_SYS_ADMIN)) ?
+			zone->emer_mem : zone->max_mem;
+
+		if (zone->used_mem > limit)
+			goto out_unlock;
+	}
+
+	if (reserve) {
+		for (i = 0; i < glob->num_zones; ++i) {
+			zone = glob->zones[i];
+			if (single_zone && zone != single_zone)
+				continue;
+			zone->used_mem += amount;
+		}
+	}
+
+	ret = 0;
+out_unlock:
+	spin_unlock(&glob->lock);
+	ttm_check_swapping(glob);
+
+	return ret;
+}
+
+
+static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
+				     struct ttm_mem_zone *single_zone,
+				     uint64_t memory,
+				     bool no_wait, bool interruptible)
+{
+	int count = TTM_MEMORY_ALLOC_RETRIES;
+
+	while (unlikely(ttm_mem_global_reserve(glob,
+					       single_zone,
+					       memory, true)
+			!= 0)) {
+		if (no_wait)
+			return -ENOMEM;
+		if (unlikely(count-- == 0))
+			return -ENOMEM;
+		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+	}
+
+	return 0;
+}
+
+int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+			 bool no_wait, bool interruptible)
+{
+	/**
+	 * Normal allocations of kernel memory are registered in
+	 * all zones.
+	 */
+
+	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
+					 interruptible);
+}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
+
+int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+			      struct page *page,
+			      bool no_wait, bool interruptible)
+{
+
+	struct ttm_mem_zone *zone = NULL;
+
+	/**
+	 * Page allocations may be registed in a single zone
+	 * only if highmem or !dma32.
+	 */
+
+#ifdef CONFIG_HIGHMEM
+	if (PageHighMem(page) && glob->zone_highmem != NULL)
+		zone = glob->zone_highmem;
+#else
+	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+		zone = glob->zone_kernel;
+#endif
+	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
+					 interruptible);
+}
+
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
+{
+	struct ttm_mem_zone *zone = NULL;
+
+#ifdef CONFIG_HIGHMEM
+	if (PageHighMem(page) && glob->zone_highmem != NULL)
+		zone = glob->zone_highmem;
+#else
+	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
+		zone = glob->zone_kernel;
+#endif
+	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+}
+
+
+size_t ttm_round_pot(size_t size)
+{
+	if ((size & (size - 1)) == 0)
+		return size;
+	else if (size > PAGE_SIZE)
+		return PAGE_ALIGN(size);
+	else {
+		size_t tmp_size = 4;
+
+		while (tmp_size < size)
+			tmp_size <<= 1;
+
+		return tmp_size;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_module.c b/linux-imx/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644
index 0000000..d7f92fe
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_module.c
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * 	    Jerome Glisse
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/drm_sysfs.h>
+
+static DECLARE_WAIT_QUEUE_HEAD(exit_q);
+atomic_t device_released;
+
+static struct device_type ttm_drm_class_type = {
+	.name = "ttm",
+	/**
+	 * Add pm ops here.
+	 */
+};
+
+static void ttm_drm_class_device_release(struct device *dev)
+{
+	atomic_set(&device_released, 1);
+	wake_up_all(&exit_q);
+}
+
+static struct device ttm_drm_class_device = {
+	.type = &ttm_drm_class_type,
+	.release = &ttm_drm_class_device_release
+};
+
+struct kobject *ttm_get_kobj(void)
+{
+	struct kobject *kobj = &ttm_drm_class_device.kobj;
+	BUG_ON(kobj == NULL);
+	return kobj;
+}
+
+static int __init ttm_init(void)
+{
+	int ret;
+
+	ret = dev_set_name(&ttm_drm_class_device, "ttm");
+	if (unlikely(ret != 0))
+		return ret;
+
+	atomic_set(&device_released, 0);
+	ret = drm_class_device_register(&ttm_drm_class_device);
+	if (unlikely(ret != 0))
+		goto out_no_dev_reg;
+
+	return 0;
+out_no_dev_reg:
+	atomic_set(&device_released, 1);
+	wake_up_all(&exit_q);
+	return ret;
+}
+
+static void __exit ttm_exit(void)
+{
+	drm_class_device_unregister(&ttm_drm_class_device);
+
+	/**
+	 * Refuse to unload until the TTM device is released.
+	 * Not sure this is 100% needed.
+	 */
+
+	wait_event(exit_q, atomic_read(&device_released) == 1);
+}
+
+module_init(ttm_init);
+module_exit(ttm_exit);
+
+MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
+MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_object.c b/linux-imx/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 0000000..58a5f32
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,454 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <drm/ttm/ttm_object.h>
+#include <drm/ttm/ttm_module.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+struct ttm_object_file {
+	struct ttm_object_device *tdev;
+	rwlock_t lock;
+	struct list_head ref_list;
+	struct drm_open_hash ref_hash[TTM_REF_NUM];
+	struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+	spinlock_t object_lock;
+	struct drm_open_hash object_hash;
+	atomic_t object_count;
+	struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+	struct drm_hash_item hash;
+	struct list_head head;
+	struct kref kref;
+	enum ttm_ref_type ref_type;
+	struct ttm_base_object *obj;
+	struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+	kref_get(&tfile->refcount);
+	return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+	struct ttm_object_file *tfile =
+		container_of(kref, struct ttm_object_file, refcount);
+
+	kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+			 struct ttm_base_object *base,
+			 bool shareable,
+			 enum ttm_object_type object_type,
+			 void (*refcount_release) (struct ttm_base_object **),
+			 void (*ref_obj_release) (struct ttm_base_object *,
+						  enum ttm_ref_type ref_type))
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	int ret;
+
+	base->shareable = shareable;
+	base->tfile = ttm_object_file_ref(tfile);
+	base->refcount_release = refcount_release;
+	base->ref_obj_release = ref_obj_release;
+	base->object_type = object_type;
+	kref_init(&base->refcount);
+	spin_lock(&tdev->object_lock);
+	ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+					    &base->hash,
+					    (unsigned long)base, 31, 0, 0);
+	spin_unlock(&tdev->object_lock);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0))
+		goto out_err1;
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+out_err1:
+	spin_lock(&tdev->object_lock);
+	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+	spin_unlock(&tdev->object_lock);
+out_err0:
+	return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+	struct ttm_base_object *base =
+	    container_of(kref, struct ttm_base_object, refcount);
+	struct ttm_object_device *tdev = base->tfile->tdev;
+
+	spin_lock(&tdev->object_lock);
+	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+	spin_unlock(&tdev->object_lock);
+
+	/*
+	 * Note: We don't use synchronize_rcu() here because it's far
+	 * too slow. It's up to the user to free the object using
+	 * call_rcu() or ttm_base_object_kfree().
+	 */
+
+	if (base->refcount_release) {
+		ttm_object_file_unref(&base->tfile);
+		base->refcount_release(&base);
+	}
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+
+	*p_base = NULL;
+
+	kref_put(&base->refcount, ttm_release_base);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+					       uint32_t key)
+{
+	struct ttm_object_device *tdev = tfile->tdev;
+	struct ttm_base_object *base;
+	struct drm_hash_item *hash;
+	int ret;
+
+	rcu_read_lock();
+	ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
+
+	if (likely(ret == 0)) {
+		base = drm_hash_entry(hash, struct ttm_base_object, hash);
+		ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
+	}
+	rcu_read_unlock();
+
+	if (unlikely(ret != 0))
+		return NULL;
+
+	if (tfile != base->tfile && !base->shareable) {
+		pr_err("Attempted access of non-shareable object\n");
+		ttm_base_object_unref(&base);
+		return NULL;
+	}
+
+	return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+		       struct ttm_base_object *base,
+		       enum ttm_ref_type ref_type, bool *existed)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+	int ret = -EINVAL;
+
+	if (existed != NULL)
+		*existed = true;
+
+	while (ret == -EINVAL) {
+		read_lock(&tfile->lock);
+		ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+		if (ret == 0) {
+			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+			kref_get(&ref->kref);
+			read_unlock(&tfile->lock);
+			break;
+		}
+
+		read_unlock(&tfile->lock);
+		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+					   false, false);
+		if (unlikely(ret != 0))
+			return ret;
+		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+		if (unlikely(ref == NULL)) {
+			ttm_mem_global_free(mem_glob, sizeof(*ref));
+			return -ENOMEM;
+		}
+
+		ref->hash.key = base->hash.key;
+		ref->obj = base;
+		ref->tfile = tfile;
+		ref->ref_type = ref_type;
+		kref_init(&ref->kref);
+
+		write_lock(&tfile->lock);
+		ret = drm_ht_insert_item(ht, &ref->hash);
+
+		if (likely(ret == 0)) {
+			list_add_tail(&ref->head, &tfile->ref_list);
+			kref_get(&base->refcount);
+			write_unlock(&tfile->lock);
+			if (existed != NULL)
+				*existed = false;
+			break;
+		}
+
+		write_unlock(&tfile->lock);
+		BUG_ON(ret != -EINVAL);
+
+		ttm_mem_global_free(mem_glob, sizeof(*ref));
+		kfree(ref);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+	struct ttm_ref_object *ref =
+	    container_of(kref, struct ttm_ref_object, kref);
+	struct ttm_base_object *base = ref->obj;
+	struct ttm_object_file *tfile = ref->tfile;
+	struct drm_open_hash *ht;
+	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+	ht = &tfile->ref_hash[ref->ref_type];
+	(void)drm_ht_remove_item(ht, &ref->hash);
+	list_del(&ref->head);
+	write_unlock(&tfile->lock);
+
+	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+		base->ref_obj_release(base, ref->ref_type);
+
+	ttm_base_object_unref(&ref->obj);
+	ttm_mem_global_free(mem_glob, sizeof(*ref));
+	kfree(ref);
+	write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+			      unsigned long key, enum ttm_ref_type ref_type)
+{
+	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+	struct ttm_ref_object *ref;
+	struct drm_hash_item *hash;
+	int ret;
+
+	write_lock(&tfile->lock);
+	ret = drm_ht_find_item(ht, key, &hash);
+	if (unlikely(ret != 0)) {
+		write_unlock(&tfile->lock);
+		return -EINVAL;
+	}
+	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+	kref_put(&ref->kref, ttm_ref_object_release);
+	write_unlock(&tfile->lock);
+	return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+	struct ttm_ref_object *ref;
+	struct list_head *list;
+	unsigned int i;
+	struct ttm_object_file *tfile = *p_tfile;
+
+	*p_tfile = NULL;
+	write_lock(&tfile->lock);
+
+	/*
+	 * Since we release the lock within the loop, we have to
+	 * restart it from the beginning each time.
+	 */
+
+	while (!list_empty(&tfile->ref_list)) {
+		list = tfile->ref_list.next;
+		ref = list_entry(list, struct ttm_ref_object, head);
+		ttm_ref_object_release(&ref->kref);
+	}
+
+	for (i = 0; i < TTM_REF_NUM; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	write_unlock(&tfile->lock);
+	ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+					     unsigned int hash_order)
+{
+	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+	unsigned int i;
+	unsigned int j = 0;
+	int ret;
+
+	if (unlikely(tfile == NULL))
+		return NULL;
+
+	rwlock_init(&tfile->lock);
+	tfile->tdev = tdev;
+	kref_init(&tfile->refcount);
+	INIT_LIST_HEAD(&tfile->ref_list);
+
+	for (i = 0; i < TTM_REF_NUM; ++i) {
+		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+		if (ret) {
+			j = i;
+			goto out_err;
+		}
+	}
+
+	return tfile;
+out_err:
+	for (i = 0; i < j; ++i)
+		drm_ht_remove(&tfile->ref_hash[i]);
+
+	kfree(tfile);
+
+	return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+						 *mem_glob,
+						 unsigned int hash_order)
+{
+	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+	int ret;
+
+	if (unlikely(tdev == NULL))
+		return NULL;
+
+	tdev->mem_glob = mem_glob;
+	spin_lock_init(&tdev->object_lock);
+	atomic_set(&tdev->object_count, 0);
+	ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+	if (likely(ret == 0))
+		return tdev;
+
+	kfree(tdev);
+	return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+	struct ttm_object_device *tdev = *p_tdev;
+
+	*p_tdev = NULL;
+
+	spin_lock(&tdev->object_lock);
+	drm_ht_remove(&tdev->object_hash);
+	spin_unlock(&tdev->object_lock);
+
+	kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc.c b/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 0000000..bd2a3b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ *          Jerome Glisse <jglisse@redhat.com>
+ *          Pauli Nieminen <suokkos@gmail.com>
+ */
+
+/* simple list based uncached page pool
+ * - Pool collects resently freed pages for reuse
+ * - Use page->lru to keep a free list
+ * - doesn't track currently in use pages
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/atomic.h>
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION		16
+#define FREE_ALL_PAGES			(~0U)
+/* times are in msecs */
+#define PAGE_FREE_INTERVAL		1000
+
+/**
+ * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
+ *
+ * @lock: Protects the shared pool from concurrnet access. Must be used with
+ * irqsave/irqrestore variants because pool allocator maybe called from
+ * delayed work.
+ * @fill_lock: Prevent concurrent calls to fill.
+ * @list: Pool of free uc/wc pages for fast reuse.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @npages: Number of pages in pool.
+ */
+struct ttm_page_pool {
+	spinlock_t		lock;
+	bool			fill_lock;
+	struct list_head	list;
+	gfp_t			gfp_flags;
+	unsigned		npages;
+	char			*name;
+	unsigned long		nfrees;
+	unsigned long		nrefills;
+};
+
+/**
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+	unsigned	alloc_size;
+	unsigned	max_size;
+	unsigned	small;
+};
+
+#define NUM_POOLS 4
+
+/**
+ * struct ttm_pool_manager - Holds memory pools for fst allocation
+ *
+ * Manager is read only object for pool code so it doesn't need locking.
+ *
+ * @free_interval: minimum number of jiffies between freeing pages from pool.
+ * @page_alloc_inited: reference counting for pool allocation.
+ * @work: Work that is used to shrink the pool. Work is only run when there is
+ * some pages to free.
+ * @small_allocation: Limit in number of pages what is small allocation.
+ *
+ * @pools: All pool objects in use.
+ **/
+struct ttm_pool_manager {
+	struct kobject		kobj;
+	struct shrinker		mm_shrink;
+	struct ttm_pool_opts	options;
+
+	union {
+		struct ttm_page_pool	pools[NUM_POOLS];
+		struct {
+			struct ttm_page_pool	wc_pool;
+			struct ttm_page_pool	uc_pool;
+			struct ttm_page_pool	wc_pool_dma32;
+			struct ttm_page_pool	uc_pool_dma32;
+		} ;
+	};
+};
+
+static struct attribute ttm_page_pool_max = {
+	.name = "pool_max_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+	.name = "pool_small_allocation",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+	.name = "pool_allocation_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+	&ttm_page_pool_max,
+	&ttm_page_pool_small,
+	&ttm_page_pool_alloc_size,
+	NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj,
+		struct attribute *attr, const char *buffer, size_t size)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	int chars;
+	unsigned val;
+	chars = sscanf(buffer, "%u", &val);
+	if (chars == 0)
+		return size;
+
+	/* Convert kb to number of pages */
+	val = val / (PAGE_SIZE >> 10);
+
+	if (attr == &ttm_page_pool_max)
+		m->options.max_size = val;
+	else if (attr == &ttm_page_pool_small)
+		m->options.small = val;
+	else if (attr == &ttm_page_pool_alloc_size) {
+		if (val > NUM_PAGES_TO_ALLOC*8) {
+			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			return size;
+		} else if (val > NUM_PAGES_TO_ALLOC) {
+			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+		}
+		m->options.alloc_size = val;
+	}
+
+	return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj,
+		struct attribute *attr, char *buffer)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	unsigned val = 0;
+
+	if (attr == &ttm_page_pool_max)
+		val = m->options.max_size;
+	else if (attr == &ttm_page_pool_small)
+		val = m->options.small;
+	else if (attr == &ttm_page_pool_alloc_size)
+		val = m->options.alloc_size;
+
+	val = val * (PAGE_SIZE >> 10);
+
+	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+	.show = &ttm_pool_show,
+	.store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+	.release = &ttm_pool_kobj_release,
+	.sysfs_ops = &ttm_pool_sysfs_ops,
+	.default_attrs = ttm_pool_attrs,
+};
+
+static struct ttm_pool_manager *_manager;
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		unmap_page_from_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+#endif
+
+/**
+ * Select the right pool or requested caching state and ttm flags. */
+static struct ttm_page_pool *ttm_get_pool(int flags,
+		enum ttm_caching_state cstate)
+{
+	int pool_index;
+
+	if (cstate == tt_cached)
+		return NULL;
+
+	if (cstate == tt_wc)
+		pool_index = 0x0;
+	else
+		pool_index = 0x1;
+
+	if (flags & TTM_PAGE_FLAG_DMA32)
+		pool_index |= 0x2;
+
+	return &_manager->pools[pool_index];
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_pages_put(struct page *pages[], unsigned npages)
+{
+	unsigned i;
+	if (set_pages_array_wb(pages, npages))
+		pr_err("Failed to set %d pages to wb!\n", npages);
+	for (i = 0; i < npages; ++i)
+		__free_page(pages[i]);
+}
+
+static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+		unsigned freed_pages)
+{
+	pool->npages -= freed_pages;
+	pool->nfrees += freed_pages;
+}
+
+/**
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @free_all: If set to true will free all pages in pool
+ **/
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+{
+	unsigned long irq_flags;
+	struct page *p;
+	struct page **pages_to_free;
+	unsigned freed_pages = 0,
+		 npages_to_free = nr_free;
+
+	if (NUM_PAGES_TO_ALLOC < nr_free)
+		npages_to_free = NUM_PAGES_TO_ALLOC;
+
+	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+			GFP_KERNEL);
+	if (!pages_to_free) {
+		pr_err("Failed to allocate memory for pool free operation\n");
+		return 0;
+	}
+
+restart:
+	spin_lock_irqsave(&pool->lock, irq_flags);
+
+	list_for_each_entry_reverse(p, &pool->list, lru) {
+		if (freed_pages >= npages_to_free)
+			break;
+
+		pages_to_free[freed_pages++] = p;
+		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+			/* remove range of pages from the pool */
+			__list_del(p->lru.prev, &pool->list);
+
+			ttm_pool_update_free_locked(pool, freed_pages);
+			/**
+			 * Because changing page caching is costly
+			 * we unlock the pool to prevent stalling.
+			 */
+			spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+			ttm_pages_put(pages_to_free, freed_pages);
+			if (likely(nr_free != FREE_ALL_PAGES))
+				nr_free -= freed_pages;
+
+			if (NUM_PAGES_TO_ALLOC >= nr_free)
+				npages_to_free = nr_free;
+			else
+				npages_to_free = NUM_PAGES_TO_ALLOC;
+
+			freed_pages = 0;
+
+			/* free all so restart the processing */
+			if (nr_free)
+				goto restart;
+
+			/* Not allowed to fall through or break because
+			 * following context is inside spinlock while we are
+			 * outside here.
+			 */
+			goto out;
+
+		}
+	}
+
+	/* remove range of pages from the pool */
+	if (freed_pages) {
+		__list_del(&p->lru, &pool->list);
+
+		ttm_pool_update_free_locked(pool, freed_pages);
+		nr_free -= freed_pages;
+	}
+
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (freed_pages)
+		ttm_pages_put(pages_to_free, freed_pages);
+out:
+	kfree(pages_to_free);
+	return nr_free;
+}
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+	unsigned i;
+	int total = 0;
+	for (i = 0; i < NUM_POOLS; ++i)
+		total += _manager->pools[i].npages;
+
+	return total;
+}
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_pool_mm_shrink(struct shrinker *shrink,
+			      struct shrink_control *sc)
+{
+	static atomic_t start_pool = ATOMIC_INIT(0);
+	unsigned i;
+	unsigned pool_offset = atomic_add_return(1, &start_pool);
+	struct ttm_page_pool *pool;
+	int shrink_pages = sc->nr_to_scan;
+
+	pool_offset = pool_offset % NUM_POOLS;
+	/* select start pool in round robin fashion */
+	for (i = 0; i < NUM_POOLS; ++i) {
+		unsigned nr_free = shrink_pages;
+		if (shrink_pages == 0)
+			break;
+		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+		shrink_pages = ttm_page_pool_free(pool, nr_free);
+	}
+	/* return estimated number of unused pages in pool */
+	return ttm_pool_get_num_unused_pages();
+}
+
+static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+	manager->mm_shrink.seeks = 1;
+	register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+	unregister_shrinker(&manager->mm_shrink);
+}
+
+static int ttm_set_pages_caching(struct page **pages,
+		enum ttm_caching_state cstate, unsigned cpages)
+{
+	int r = 0;
+	/* Set page caching */
+	switch (cstate) {
+	case tt_uncached:
+		r = set_pages_array_uc(pages, cpages);
+		if (r)
+			pr_err("Failed to set %d pages to uc!\n", cpages);
+		break;
+	case tt_wc:
+		r = set_pages_array_wc(pages, cpages);
+		if (r)
+			pr_err("Failed to set %d pages to wc!\n", cpages);
+		break;
+	default:
+		break;
+	}
+	return r;
+}
+
+/**
+ * Free pages the pages that failed to change the caching state. If there is
+ * any pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_handle_caching_state_failure(struct list_head *pages,
+		int ttm_flags, enum ttm_caching_state cstate,
+		struct page **failed_pages, unsigned cpages)
+{
+	unsigned i;
+	/* Failed pages have to be freed */
+	for (i = 0; i < cpages; ++i) {
+		list_del(&failed_pages[i]->lru);
+		__free_page(failed_pages[i]);
+	}
+}
+
+/**
+ * Allocate new pages with correct caching.
+ *
+ * This function is reentrant if caller updates count depending on number of
+ * pages returned in pages array.
+ */
+static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
+		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+{
+	struct page **caching_array;
+	struct page *p;
+	int r = 0;
+	unsigned i, cpages;
+	unsigned max_cpages = min(count,
+			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+	/* allocate array for page caching change */
+	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+	if (!caching_array) {
+		pr_err("Unable to allocate table for new pages\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0, cpages = 0; i < count; ++i) {
+		p = alloc_page(gfp_flags);
+
+		if (!p) {
+			pr_err("Unable to get page %u\n", i);
+
+			/* store already allocated pages in the pool after
+			 * setting the caching state */
+			if (cpages) {
+				r = ttm_set_pages_caching(caching_array,
+							  cstate, cpages);
+				if (r)
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+			}
+			r = -ENOMEM;
+			goto out;
+		}
+
+#ifdef CONFIG_HIGHMEM
+		/* gfp flags of highmem page should never be dma32 so we
+		 * we should be fine in such case
+		 */
+		if (!PageHighMem(p))
+#endif
+		{
+			caching_array[cpages++] = p;
+			if (cpages == max_cpages) {
+
+				r = ttm_set_pages_caching(caching_array,
+						cstate, cpages);
+				if (r) {
+					ttm_handle_caching_state_failure(pages,
+						ttm_flags, cstate,
+						caching_array, cpages);
+					goto out;
+				}
+				cpages = 0;
+			}
+		}
+
+		list_add(&p->lru, pages);
+	}
+
+	if (cpages) {
+		r = ttm_set_pages_caching(caching_array, cstate, cpages);
+		if (r)
+			ttm_handle_caching_state_failure(pages,
+					ttm_flags, cstate,
+					caching_array, cpages);
+	}
+out:
+	kfree(caching_array);
+
+	return r;
+}
+
+/**
+ * Fill the given pool if there aren't enough pages and the requested number of
+ * pages is small.
+ */
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
+		unsigned long *irq_flags)
+{
+	struct page *p;
+	int r;
+	unsigned cpages = 0;
+	/**
+	 * Only allow one pool fill operation at a time.
+	 * If pool doesn't have enough pages for the allocation new pages are
+	 * allocated from outside of pool.
+	 */
+	if (pool->fill_lock)
+		return;
+
+	pool->fill_lock = true;
+
+	/* If allocation request is small and there are not enough
+	 * pages in a pool we fill the pool up first. */
+	if (count < _manager->options.small
+		&& count > pool->npages) {
+		struct list_head new_pages;
+		unsigned alloc_size = _manager->options.alloc_size;
+
+		/**
+		 * Can't change page caching if in irqsave context. We have to
+		 * drop the pool->lock.
+		 */
+		spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+		INIT_LIST_HEAD(&new_pages);
+		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
+				cstate,	alloc_size);
+		spin_lock_irqsave(&pool->lock, *irq_flags);
+
+		if (!r) {
+			list_splice(&new_pages, &pool->list);
+			++pool->nrefills;
+			pool->npages += alloc_size;
+		} else {
+			pr_err("Failed to fill pool (%p)\n", pool);
+			/* If we have any pages left put them to the pool. */
+			list_for_each_entry(p, &pool->list, lru) {
+				++cpages;
+			}
+			list_splice(&new_pages, &pool->list);
+			pool->npages += cpages;
+		}
+
+	}
+	pool->fill_lock = false;
+}
+
+/**
+ * Cut 'count' number of pages from the pool and put them on the return list.
+ *
+ * @return count of pages still required to fulfill the request.
+ */
+static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+					struct list_head *pages,
+					int ttm_flags,
+					enum ttm_caching_state cstate,
+					unsigned count)
+{
+	unsigned long irq_flags;
+	struct list_head *p;
+	unsigned i;
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+
+	if (count >= pool->npages) {
+		/* take all pages from the pool */
+		list_splice_init(&pool->list, pages);
+		count -= pool->npages;
+		pool->npages = 0;
+		goto out;
+	}
+	/* find the last pages to include for requested number of pages. Split
+	 * pool to begin and halve it to reduce search space. */
+	if (count <= pool->npages/2) {
+		i = 0;
+		list_for_each(p, &pool->list) {
+			if (++i == count)
+				break;
+		}
+	} else {
+		i = pool->npages + 1;
+		list_for_each_prev(p, &pool->list) {
+			if (--i == count)
+				break;
+		}
+	}
+	/* Cut 'count' number of pages from the pool */
+	list_cut_position(pages, &pool->list, p);
+	pool->npages -= count;
+	count = 0;
+out:
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	return count;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+			  enum ttm_caching_state cstate)
+{
+	unsigned long irq_flags;
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	unsigned i;
+
+	if (pool == NULL) {
+		/* No pool for this memory type so free the pages */
+		for (i = 0; i < npages; i++) {
+			if (pages[i]) {
+				if (page_count(pages[i]) != 1)
+					pr_err("Erroneous page count. Leaking pages.\n");
+				__free_page(pages[i]);
+				pages[i] = NULL;
+			}
+		}
+		return;
+	}
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	for (i = 0; i < npages; i++) {
+		if (pages[i]) {
+			if (page_count(pages[i]) != 1)
+				pr_err("Erroneous page count. Leaking pages.\n");
+			list_add_tail(&pages[i]->lru, &pool->list);
+			pages[i] = NULL;
+			pool->npages++;
+		}
+	}
+	/* Check that we don't go over the pool limit */
+	npages = 0;
+	if (pool->npages > _manager->options.max_size) {
+		npages = pool->npages - _manager->options.max_size;
+		/* free at least NUM_PAGES_TO_ALLOC number of pages
+		 * to reduce calls to set_memory_wb */
+		if (npages < NUM_PAGES_TO_ALLOC)
+			npages = NUM_PAGES_TO_ALLOC;
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	if (npages)
+		ttm_page_pool_free(pool, npages);
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages.
+ */
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+			 enum ttm_caching_state cstate)
+{
+	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+	struct list_head plist;
+	struct page *p = NULL;
+	gfp_t gfp_flags = GFP_USER;
+	unsigned count;
+	int r;
+
+	/* set zero flag for page allocation if required */
+	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+		gfp_flags |= __GFP_ZERO;
+
+	/* No pool for cached pages */
+	if (pool == NULL) {
+		if (flags & TTM_PAGE_FLAG_DMA32)
+			gfp_flags |= GFP_DMA32;
+		else
+			gfp_flags |= GFP_HIGHUSER;
+
+		for (r = 0; r < npages; ++r) {
+			p = alloc_page(gfp_flags);
+			if (!p) {
+
+				pr_err("Unable to allocate page\n");
+				return -ENOMEM;
+			}
+
+			pages[r] = p;
+		}
+		return 0;
+	}
+
+	/* combine zero flag to pool flags */
+	gfp_flags |= pool->gfp_flags;
+
+	/* First we take pages from the pool */
+	INIT_LIST_HEAD(&plist);
+	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+	count = 0;
+	list_for_each_entry(p, &plist, lru) {
+		pages[count++] = p;
+	}
+
+	/* clear the pages coming from the pool if requested */
+	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+		list_for_each_entry(p, &plist, lru) {
+			if (PageHighMem(p))
+				clear_highpage(p);
+			else
+				clear_page(page_address(p));
+		}
+	}
+
+	/* If pool didn't have enough pages allocate new one. */
+	if (npages > 0) {
+		/* ttm_alloc_new_pages doesn't reference pool so we can run
+		 * multiple requests in parallel.
+		 **/
+		INIT_LIST_HEAD(&plist);
+		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+		list_for_each_entry(p, &plist, lru) {
+			pages[count++] = p;
+		}
+		if (r) {
+			/* If there is any pages in the list put them back to
+			 * the pool. */
+			pr_err("Failed to allocate extra pages for large request\n");
+			ttm_put_pages(pages, count, flags, cstate);
+			return r;
+		}
+	}
+
+	return 0;
+}
+
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+		char *name)
+{
+	spin_lock_init(&pool->lock);
+	pool->fill_lock = false;
+	INIT_LIST_HEAD(&pool->list);
+	pool->npages = pool->nfrees = 0;
+	pool->gfp_flags = flags;
+	pool->name = name;
+}
+
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+	int ret;
+
+	WARN_ON(_manager);
+
+	pr_info("Initializing pool allocator\n");
+
+	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+
+	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+
+	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+
+	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+				  GFP_USER | GFP_DMA32, "wc dma");
+
+	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+				  GFP_USER | GFP_DMA32, "uc dma");
+
+	_manager->options.max_size = max_pages;
+	_manager->options.small = SMALL_ALLOCATION;
+	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+				   &glob->kobj, "pool");
+	if (unlikely(ret != 0)) {
+		kobject_put(&_manager->kobj);
+		_manager = NULL;
+		return ret;
+	}
+
+	ttm_pool_mm_shrink_init(_manager);
+
+	return 0;
+}
+
+void ttm_page_alloc_fini(void)
+{
+	int i;
+
+	pr_info("Finalizing pool allocator\n");
+	ttm_pool_mm_shrink_fini(_manager);
+
+	for (i = 0; i < NUM_POOLS; ++i)
+		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+
+	kobject_put(&_manager->kobj);
+	_manager = NULL;
+}
+
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+	unsigned i;
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		ret = ttm_get_pages(&ttm->pages[i], 1,
+				    ttm->page_flags,
+				    ttm->caching_state);
+		if (ret != 0) {
+			ttm_pool_unpopulate(ttm);
+			return -ENOMEM;
+		}
+
+		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+						false, false);
+		if (unlikely(ret != 0)) {
+			ttm_pool_unpopulate(ttm);
+			return -ENOMEM;
+		}
+	}
+
+	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+		ret = ttm_tt_swapin(ttm);
+		if (unlikely(ret != 0)) {
+			ttm_pool_unpopulate(ttm);
+			return ret;
+		}
+	}
+
+	ttm->state = tt_unbound;
+	return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+	unsigned i;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		if (ttm->pages[i]) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 ttm->pages[i]);
+			ttm_put_pages(&ttm->pages[i], 1,
+				      ttm->page_flags,
+				      ttm->caching_state);
+		}
+	}
+	ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	struct ttm_page_pool *p;
+	unsigned i;
+	char *h[] = {"pool", "refills", "pages freed", "size"};
+	if (!_manager) {
+		seq_printf(m, "No pool allocator running.\n");
+		return 0;
+	}
+	seq_printf(m, "%6s %12s %13s %8s\n",
+			h[0], h[1], h[2], h[3]);
+	for (i = 0; i < NUM_POOLS; ++i) {
+		p = &_manager->pools[i];
+
+		seq_printf(m, "%6s %12ld %13ld %8d\n",
+				p->name, p->nrefills,
+				p->nfrees, p->npages);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 0000000..b8b3943
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1131 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ *   the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ *   when freed).
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION		4
+#define FREE_ALL_PAGES			(~0U)
+/* times are in msecs */
+#define IS_UNDEFINED			(0)
+#define IS_WC				(1<<1)
+#define IS_UC				(1<<2)
+#define IS_CACHED			(1<<3)
+#define IS_DMA32			(1<<4)
+
+enum pool_type {
+	POOL_IS_UNDEFINED,
+	POOL_IS_WC = IS_WC,
+	POOL_IS_UC = IS_UC,
+	POOL_IS_CACHED = IS_CACHED,
+	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ *  - generic (not restricted to DMA32):
+ *      - write combined, uncached, cached.
+ *  - dma32 (up to 2^32 - so up 4GB):
+ *      - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ *   it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ *   Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+	struct list_head pools; /* The 'struct device->dma_pools link */
+	enum pool_type type;
+	spinlock_t lock;
+	struct list_head inuse_list;
+	struct list_head free_list;
+	struct device *dev;
+	unsigned size;
+	unsigned npages_free;
+	unsigned npages_in_use;
+	unsigned long nfrees; /* Stats when shrunk. */
+	unsigned long nrefills; /* Stats when grown. */
+	gfp_t gfp_flags;
+	char name[13]; /* "cached dma32" */
+	char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ *   via the DMA API, it will be -1.
+ */
+struct dma_page {
+	struct list_head page_list;
+	void *vaddr;
+	struct page *p;
+	dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+	unsigned	alloc_size;
+	unsigned	max_size;
+	unsigned	small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+	struct list_head pools;
+	struct device *dev;
+	struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+	struct mutex		lock;
+	struct list_head	pools;
+	struct ttm_pool_opts	options;
+	unsigned		npools;
+	struct shrinker		mm_shrink;
+	struct kobject		kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+	.name = "pool_max_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+	.name = "pool_small_allocation",
+	.mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+	.name = "pool_allocation_size",
+	.mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+	&ttm_page_pool_max,
+	&ttm_page_pool_small,
+	&ttm_page_pool_alloc_size,
+	NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+			      const char *buffer, size_t size)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	int chars;
+	unsigned val;
+	chars = sscanf(buffer, "%u", &val);
+	if (chars == 0)
+		return size;
+
+	/* Convert kb to number of pages */
+	val = val / (PAGE_SIZE >> 10);
+
+	if (attr == &ttm_page_pool_max)
+		m->options.max_size = val;
+	else if (attr == &ttm_page_pool_small)
+		m->options.small = val;
+	else if (attr == &ttm_page_pool_alloc_size) {
+		if (val > NUM_PAGES_TO_ALLOC*8) {
+			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+			return size;
+		} else if (val > NUM_PAGES_TO_ALLOC) {
+			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
+				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+		}
+		m->options.alloc_size = val;
+	}
+
+	return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+			     char *buffer)
+{
+	struct ttm_pool_manager *m =
+		container_of(kobj, struct ttm_pool_manager, kobj);
+	unsigned val = 0;
+
+	if (attr == &ttm_page_pool_max)
+		val = m->options.max_size;
+	else if (attr == &ttm_page_pool_small)
+		val = m->options.small;
+	else if (attr == &ttm_page_pool_alloc_size)
+		val = m->options.alloc_size;
+
+	val = val * (PAGE_SIZE >> 10);
+
+	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+	.show = &ttm_pool_show,
+	.store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+	.release = &ttm_pool_kobj_release,
+	.sysfs_ops = &ttm_pool_sysfs_ops,
+	.default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		unmap_page_from_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+	int i;
+
+	for (i = 0; i < addrinarray; i++)
+		map_page_into_agp(pages[i]);
+#endif
+	return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+				 struct page **pages, unsigned cpages)
+{
+	int r = 0;
+	/* Set page caching */
+	if (pool->type & IS_UC) {
+		r = set_pages_array_uc(pages, cpages);
+		if (r)
+			pr_err("%s: Failed to set %d pages to uc!\n",
+			       pool->dev_name, cpages);
+	}
+	if (pool->type & IS_WC) {
+		r = set_pages_array_wc(pages, cpages);
+		if (r)
+			pr_err("%s: Failed to set %d pages to wc!\n",
+			       pool->dev_name, cpages);
+	}
+	return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+	dma_addr_t dma = d_page->dma;
+	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+	kfree(d_page);
+	d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+	struct dma_page *d_page;
+
+	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+	if (!d_page)
+		return NULL;
+
+	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+					   &d_page->dma,
+					   pool->gfp_flags);
+	if (d_page->vaddr)
+		d_page->p = virt_to_page(d_page->vaddr);
+	else {
+		kfree(d_page);
+		d_page = NULL;
+	}
+	return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+	enum pool_type type = IS_UNDEFINED;
+
+	if (flags & TTM_PAGE_FLAG_DMA32)
+		type |= IS_DMA32;
+	if (cstate == tt_cached)
+		type |= IS_CACHED;
+	else if (cstate == tt_uncached)
+		type |= IS_UC;
+	else
+		type |= IS_WC;
+
+	return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+					unsigned freed_pages)
+{
+	pool->npages_free -= freed_pages;
+	pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+			      struct page *pages[], unsigned npages)
+{
+	struct dma_page *d_page, *tmp;
+
+	/* Don't set WB on WB page pool. */
+	if (npages && !(pool->type & IS_CACHED) &&
+	    set_pages_array_wb(pages, npages))
+		pr_err("%s: Failed to set %d pages to wb!\n",
+		       pool->dev_name, npages);
+
+	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+		list_del(&d_page->page_list);
+		__ttm_dma_free_page(pool, d_page);
+	}
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+	/* Don't set WB on WB page pool. */
+	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+		pr_err("%s: Failed to set %d pages to wb!\n",
+		       pool->dev_name, 1);
+
+	list_del(&d_page->page_list);
+	__ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+	unsigned long irq_flags;
+	struct dma_page *dma_p, *tmp;
+	struct page **pages_to_free;
+	struct list_head d_pages;
+	unsigned freed_pages = 0,
+		 npages_to_free = nr_free;
+
+	if (NUM_PAGES_TO_ALLOC < nr_free)
+		npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+	if (nr_free > 1) {
+		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+			 pool->dev_name, pool->name, current->pid,
+			 npages_to_free, nr_free);
+	}
+#endif
+	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+			GFP_KERNEL);
+
+	if (!pages_to_free) {
+		pr_err("%s: Failed to allocate memory for pool free operation\n",
+		       pool->dev_name);
+		return 0;
+	}
+	INIT_LIST_HEAD(&d_pages);
+restart:
+	spin_lock_irqsave(&pool->lock, irq_flags);
+
+	/* We picking the oldest ones off the list */
+	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+					 page_list) {
+		if (freed_pages >= npages_to_free)
+			break;
+
+		/* Move the dma_page from one list to another. */
+		list_move(&dma_p->page_list, &d_pages);
+
+		pages_to_free[freed_pages++] = dma_p->p;
+		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+			ttm_pool_update_free_locked(pool, freed_pages);
+			/**
+			 * Because changing page caching is costly
+			 * we unlock the pool to prevent stalling.
+			 */
+			spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+					  freed_pages);
+
+			INIT_LIST_HEAD(&d_pages);
+
+			if (likely(nr_free != FREE_ALL_PAGES))
+				nr_free -= freed_pages;
+
+			if (NUM_PAGES_TO_ALLOC >= nr_free)
+				npages_to_free = nr_free;
+			else
+				npages_to_free = NUM_PAGES_TO_ALLOC;
+
+			freed_pages = 0;
+
+			/* free all so restart the processing */
+			if (nr_free)
+				goto restart;
+
+			/* Not allowed to fall through or break because
+			 * following context is inside spinlock while we are
+			 * outside here.
+			 */
+			goto out;
+
+		}
+	}
+
+	/* remove range of pages from the pool */
+	if (freed_pages) {
+		ttm_pool_update_free_locked(pool, freed_pages);
+		nr_free -= freed_pages;
+	}
+
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (freed_pages)
+		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+	kfree(pages_to_free);
+	return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+	struct device_pools *p;
+	struct dma_pool *pool;
+
+	if (!dev)
+		return;
+
+	mutex_lock(&_manager->lock);
+	list_for_each_entry_reverse(p, &_manager->pools, pools) {
+		if (p->dev != dev)
+			continue;
+		pool = p->pool;
+		if (pool->type != type)
+			continue;
+
+		list_del(&p->pools);
+		kfree(p);
+		_manager->npools--;
+		break;
+	}
+	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+		if (pool->type != type)
+			continue;
+		/* Takes a spinlock.. */
+		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+		/* This code path is called after _all_ references to the
+		 * struct device has been dropped - so nobody should be
+		 * touching it. In case somebody is trying to _add_ we are
+		 * guarded by the mutex. */
+		list_del(&pool->pools);
+		kfree(pool);
+		break;
+	}
+	mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+	struct dma_pool *pool = *(struct dma_pool **)res;
+
+	if (pool)
+		ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+	return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+					  enum pool_type type)
+{
+	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+	struct device_pools *sec_pool = NULL;
+	struct dma_pool *pool = NULL, **ptr;
+	unsigned i;
+	int ret = -ENODEV;
+	char *p;
+
+	if (!dev)
+		return NULL;
+
+	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return NULL;
+
+	ret = -ENOMEM;
+
+	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+			    dev_to_node(dev));
+	if (!pool)
+		goto err_mem;
+
+	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+				dev_to_node(dev));
+	if (!sec_pool)
+		goto err_mem;
+
+	INIT_LIST_HEAD(&sec_pool->pools);
+	sec_pool->dev = dev;
+	sec_pool->pool =  pool;
+
+	INIT_LIST_HEAD(&pool->free_list);
+	INIT_LIST_HEAD(&pool->inuse_list);
+	INIT_LIST_HEAD(&pool->pools);
+	spin_lock_init(&pool->lock);
+	pool->dev = dev;
+	pool->npages_free = pool->npages_in_use = 0;
+	pool->nfrees = 0;
+	pool->gfp_flags = flags;
+	pool->size = PAGE_SIZE;
+	pool->type = type;
+	pool->nrefills = 0;
+	p = pool->name;
+	for (i = 0; i < 5; i++) {
+		if (type & t[i]) {
+			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+				      "%s", n[i]);
+		}
+	}
+	*p = 0;
+	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+	 * - the kobj->name has already been deallocated.*/
+	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+		 dev_driver_string(dev), dev_name(dev));
+	mutex_lock(&_manager->lock);
+	/* You can get the dma_pool from either the global: */
+	list_add(&sec_pool->pools, &_manager->pools);
+	_manager->npools++;
+	/* or from 'struct device': */
+	list_add(&pool->pools, &dev->dma_pools);
+	mutex_unlock(&_manager->lock);
+
+	*ptr = pool;
+	devres_add(dev, ptr);
+
+	return pool;
+err_mem:
+	devres_free(ptr);
+	kfree(sec_pool);
+	kfree(pool);
+	return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+					  enum pool_type type)
+{
+	struct dma_pool *pool, *tmp, *found = NULL;
+
+	if (type == IS_UNDEFINED)
+		return found;
+
+	/* NB: We iterate on the 'struct dev' which has no spinlock, but
+	 * it does have a kref which we have taken. The kref is taken during
+	 * graphic driver loading - in the drm_pci_init it calls either
+	 * pci_dev_get or pci_register_driver which both end up taking a kref
+	 * on 'struct device'.
+	 *
+	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+	 * thing is at that point of time there are no pages associated with the
+	 * driver so this function will not be called.
+	 */
+	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+		if (pool->type != type)
+			continue;
+		found = pool;
+		break;
+	}
+	return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+						 struct list_head *d_pages,
+						 struct page **failed_pages,
+						 unsigned cpages)
+{
+	struct dma_page *d_page, *tmp;
+	struct page *p;
+	unsigned i = 0;
+
+	p = failed_pages[0];
+	if (!p)
+		return;
+	/* Find the failed page. */
+	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+		if (d_page->p != p)
+			continue;
+		/* .. and then progress over the full list. */
+		list_del(&d_page->page_list);
+		__ttm_dma_free_page(pool, d_page);
+		if (++i < cpages)
+			p = failed_pages[i];
+		else
+			break;
+	}
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+					struct list_head *d_pages,
+					unsigned count)
+{
+	struct page **caching_array;
+	struct dma_page *dma_p;
+	struct page *p;
+	int r = 0;
+	unsigned i, cpages;
+	unsigned max_cpages = min(count,
+			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+	/* allocate array for page caching change */
+	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+	if (!caching_array) {
+		pr_err("%s: Unable to allocate table for new pages\n",
+		       pool->dev_name);
+		return -ENOMEM;
+	}
+
+	if (count > 1) {
+		pr_debug("%s: (%s:%d) Getting %d pages\n",
+			 pool->dev_name, pool->name, current->pid, count);
+	}
+
+	for (i = 0, cpages = 0; i < count; ++i) {
+		dma_p = __ttm_dma_alloc_page(pool);
+		if (!dma_p) {
+			pr_err("%s: Unable to get page %u\n",
+			       pool->dev_name, i);
+
+			/* store already allocated pages in the pool after
+			 * setting the caching state */
+			if (cpages) {
+				r = ttm_set_pages_caching(pool, caching_array,
+							  cpages);
+				if (r)
+					ttm_dma_handle_caching_state_failure(
+						pool, d_pages, caching_array,
+						cpages);
+			}
+			r = -ENOMEM;
+			goto out;
+		}
+		p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+		/* gfp flags of highmem page should never be dma32 so we
+		 * we should be fine in such case
+		 */
+		if (!PageHighMem(p))
+#endif
+		{
+			caching_array[cpages++] = p;
+			if (cpages == max_cpages) {
+				/* Note: Cannot hold the spinlock */
+				r = ttm_set_pages_caching(pool, caching_array,
+						 cpages);
+				if (r) {
+					ttm_dma_handle_caching_state_failure(
+						pool, d_pages, caching_array,
+						cpages);
+					goto out;
+				}
+				cpages = 0;
+			}
+		}
+		list_add(&dma_p->page_list, d_pages);
+	}
+
+	if (cpages) {
+		r = ttm_set_pages_caching(pool, caching_array, cpages);
+		if (r)
+			ttm_dma_handle_caching_state_failure(pool, d_pages,
+					caching_array, cpages);
+	}
+out:
+	kfree(caching_array);
+	return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+					 unsigned long *irq_flags)
+{
+	unsigned count = _manager->options.small;
+	int r = pool->npages_free;
+
+	if (count > pool->npages_free) {
+		struct list_head d_pages;
+
+		INIT_LIST_HEAD(&d_pages);
+
+		spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+		/* Returns how many more are neccessary to fulfill the
+		 * request. */
+		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+		spin_lock_irqsave(&pool->lock, *irq_flags);
+		if (!r) {
+			/* Add the fresh to the end.. */
+			list_splice(&d_pages, &pool->free_list);
+			++pool->nrefills;
+			pool->npages_free += count;
+			r = count;
+		} else {
+			struct dma_page *d_page;
+			unsigned cpages = 0;
+
+			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
+			       pool->dev_name, pool->name, r);
+
+			list_for_each_entry(d_page, &d_pages, page_list) {
+				cpages++;
+			}
+			list_splice_tail(&d_pages, &pool->free_list);
+			pool->npages_free += cpages;
+			r = cpages;
+		}
+	}
+	return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+				  struct ttm_dma_tt *ttm_dma,
+				  unsigned index)
+{
+	struct dma_page *d_page;
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	unsigned long irq_flags;
+	int count, r = -ENOMEM;
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+	if (count) {
+		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+		ttm->pages[index] = d_page->p;
+		ttm_dma->dma_address[index] = d_page->dma;
+		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+		r = 0;
+		pool->npages_in_use += 1;
+		pool->npages_free -= 1;
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+	return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+	struct dma_pool *pool;
+	enum pool_type type;
+	unsigned i;
+	gfp_t gfp_flags;
+	int ret;
+
+	if (ttm->state != tt_unpopulated)
+		return 0;
+
+	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+		gfp_flags = GFP_USER | GFP_DMA32;
+	else
+		gfp_flags = GFP_HIGHUSER;
+	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+		gfp_flags |= __GFP_ZERO;
+
+	pool = ttm_dma_find_pool(dev, type);
+	if (!pool) {
+		pool = ttm_dma_pool_init(dev, gfp_flags, type);
+		if (IS_ERR_OR_NULL(pool)) {
+			return -ENOMEM;
+		}
+	}
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	for (i = 0; i < ttm->num_pages; ++i) {
+		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+		if (ret != 0) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return -ENOMEM;
+		}
+
+		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+						false, false);
+		if (unlikely(ret != 0)) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return -ENOMEM;
+		}
+	}
+
+	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+		ret = ttm_tt_swapin(ttm);
+		if (unlikely(ret != 0)) {
+			ttm_dma_unpopulate(ttm_dma, dev);
+			return ret;
+		}
+	}
+
+	ttm->state = tt_unbound;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+	struct device_pools *p;
+	unsigned total = 0;
+
+	mutex_lock(&_manager->lock);
+	list_for_each_entry(p, &_manager->pools, pools)
+		total += p->pool->npages_free;
+	mutex_unlock(&_manager->lock);
+	return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+	struct dma_pool *pool;
+	struct dma_page *d_page, *next;
+	enum pool_type type;
+	bool is_cached = false;
+	unsigned count = 0, i, npages = 0;
+	unsigned long irq_flags;
+
+	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+	pool = ttm_dma_find_pool(dev, type);
+	if (!pool)
+		return;
+
+	is_cached = (ttm_dma_find_pool(pool->dev,
+		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+	/* make sure pages array match list and count number of pages */
+	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+		ttm->pages[count] = d_page->p;
+		count++;
+	}
+
+	spin_lock_irqsave(&pool->lock, irq_flags);
+	pool->npages_in_use -= count;
+	if (is_cached) {
+		pool->nfrees += count;
+	} else {
+		pool->npages_free += count;
+		list_splice(&ttm_dma->pages_list, &pool->free_list);
+		npages = count;
+		if (pool->npages_free > _manager->options.max_size) {
+			npages = pool->npages_free - _manager->options.max_size;
+			/* free at least NUM_PAGES_TO_ALLOC number of pages
+			 * to reduce calls to set_memory_wb */
+			if (npages < NUM_PAGES_TO_ALLOC)
+				npages = NUM_PAGES_TO_ALLOC;
+		}
+	}
+	spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+	if (is_cached) {
+		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 d_page->p);
+			ttm_dma_page_put(pool, d_page);
+		}
+	} else {
+		for (i = 0; i < count; i++) {
+			ttm_mem_global_free_page(ttm->glob->mem_glob,
+						 ttm->pages[i]);
+		}
+	}
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	for (i = 0; i < ttm->num_pages; i++) {
+		ttm->pages[i] = NULL;
+		ttm_dma->dma_address[i] = 0;
+	}
+
+	/* shrink pool if necessary (only on !is_cached pools)*/
+	if (npages)
+		ttm_dma_page_pool_free(pool, npages);
+	ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+				  struct shrink_control *sc)
+{
+	static atomic_t start_pool = ATOMIC_INIT(0);
+	unsigned idx = 0;
+	unsigned pool_offset = atomic_add_return(1, &start_pool);
+	unsigned shrink_pages = sc->nr_to_scan;
+	struct device_pools *p;
+
+	if (list_empty(&_manager->pools))
+		return 0;
+
+	mutex_lock(&_manager->lock);
+	pool_offset = pool_offset % _manager->npools;
+	list_for_each_entry(p, &_manager->pools, pools) {
+		unsigned nr_free;
+
+		if (!p->dev)
+			continue;
+		if (shrink_pages == 0)
+			break;
+		/* Do it in round-robin fashion. */
+		if (++idx < pool_offset)
+			continue;
+		nr_free = shrink_pages;
+		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+			 p->pool->dev_name, p->pool->name, current->pid,
+			 nr_free, shrink_pages);
+	}
+	mutex_unlock(&_manager->lock);
+	/* return estimated number of unused pages in pool */
+	return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+	manager->mm_shrink.seeks = 1;
+	register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+	unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+	int ret = -ENOMEM;
+
+	WARN_ON(_manager);
+
+	pr_info("Initializing DMA pool allocator\n");
+
+	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+	if (!_manager)
+		goto err;
+
+	mutex_init(&_manager->lock);
+	INIT_LIST_HEAD(&_manager->pools);
+
+	_manager->options.max_size = max_pages;
+	_manager->options.small = SMALL_ALLOCATION;
+	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+	/* This takes care of auto-freeing the _manager */
+	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+				   &glob->kobj, "dma_pool");
+	if (unlikely(ret != 0)) {
+		kobject_put(&_manager->kobj);
+		goto err;
+	}
+	ttm_dma_pool_mm_shrink_init(_manager);
+	return 0;
+err:
+	return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+	struct device_pools *p, *t;
+
+	pr_info("Finalizing DMA pool allocator\n");
+	ttm_dma_pool_mm_shrink_fini(_manager);
+
+	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+			current->pid);
+		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+			ttm_dma_pool_match, p->pool));
+		ttm_dma_free_pool(p->dev, p->pool->type);
+	}
+	kobject_put(&_manager->kobj);
+	_manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+	struct device_pools *p;
+	struct dma_pool *pool = NULL;
+	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+		     "name", "virt", "busaddr"};
+
+	if (!_manager) {
+		seq_printf(m, "No pool allocator running.\n");
+		return 0;
+	}
+	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+		   h[0], h[1], h[2], h[3], h[4], h[5]);
+	mutex_lock(&_manager->lock);
+	list_for_each_entry(p, &_manager->pools, pools) {
+		struct device *dev = p->dev;
+		if (!dev)
+			continue;
+		pool = p->pool;
+		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+				pool->name, pool->nrefills,
+				pool->nfrees, pool->npages_in_use,
+				pool->npages_free,
+				pool->dev_name);
+	}
+	mutex_unlock(&_manager->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/linux-imx/drivers/gpu/drm/ttm/ttm_tt.c b/linux-imx/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644
index 0000000..210d503
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/ttm/ttm_tt.c
@@ -0,0 +1,377 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <drm/drm_cache.h>
+#include <drm/drm_mem_util.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+/**
+ * Allocates storage for pointers to the pages that back the ttm.
+ */
+static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+{
+	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+}
+
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+{
+	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+	ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+					    sizeof(*ttm->dma_address));
+}
+
+#ifdef CONFIG_X86
+static inline int ttm_tt_set_page_caching(struct page *p,
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
+{
+	int ret = 0;
+
+	if (PageHighMem(p))
+		return 0;
+
+	if (c_old != tt_cached) {
+		/* p isn't in the default caching state, set it to
+		 * writeback first to free its current memtype. */
+
+		ret = set_pages_wb(p, 1);
+		if (ret)
+			return ret;
+	}
+
+	if (c_new == tt_wc)
+		ret = set_memory_wc((unsigned long) page_address(p), 1);
+	else if (c_new == tt_uncached)
+		ret = set_pages_uc(p, 1);
+
+	return ret;
+}
+#else /* CONFIG_X86 */
+static inline int ttm_tt_set_page_caching(struct page *p,
+					  enum ttm_caching_state c_old,
+					  enum ttm_caching_state c_new)
+{
+	return 0;
+}
+#endif /* CONFIG_X86 */
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int ttm_tt_set_caching(struct ttm_tt *ttm,
+			      enum ttm_caching_state c_state)
+{
+	int i, j;
+	struct page *cur_page;
+	int ret;
+
+	if (ttm->caching_state == c_state)
+		return 0;
+
+	if (ttm->state == tt_unpopulated) {
+		/* Change caching but don't populate */
+		ttm->caching_state = c_state;
+		return 0;
+	}
+
+	if (ttm->caching_state == tt_cached)
+		drm_clflush_pages(ttm->pages, ttm->num_pages);
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		cur_page = ttm->pages[i];
+		if (likely(cur_page != NULL)) {
+			ret = ttm_tt_set_page_caching(cur_page,
+						      ttm->caching_state,
+						      c_state);
+			if (unlikely(ret != 0))
+				goto out_err;
+		}
+	}
+
+	ttm->caching_state = c_state;
+
+	return 0;
+
+out_err:
+	for (j = 0; j < i; ++j) {
+		cur_page = ttm->pages[j];
+		if (likely(cur_page != NULL)) {
+			(void)ttm_tt_set_page_caching(cur_page, c_state,
+						      ttm->caching_state);
+		}
+	}
+
+	return ret;
+}
+
+int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+{
+	enum ttm_caching_state state;
+
+	if (placement & TTM_PL_FLAG_WC)
+		state = tt_wc;
+	else if (placement & TTM_PL_FLAG_UNCACHED)
+		state = tt_uncached;
+	else
+		state = tt_cached;
+
+	return ttm_tt_set_caching(ttm, state);
+}
+EXPORT_SYMBOL(ttm_tt_set_placement_caching);
+
+void ttm_tt_destroy(struct ttm_tt *ttm)
+{
+	if (unlikely(ttm == NULL))
+		return;
+
+	if (ttm->state == tt_bound) {
+		ttm_tt_unbind(ttm);
+	}
+
+	if (ttm->state == tt_unbound) {
+		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+	}
+
+	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
+	    ttm->swap_storage)
+		fput(ttm->swap_storage);
+
+	ttm->swap_storage = NULL;
+	ttm->func->destroy(ttm);
+}
+
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+		unsigned long size, uint32_t page_flags,
+		struct page *dummy_read_page)
+{
+	ttm->bdev = bdev;
+	ttm->glob = bdev->glob;
+	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	ttm->caching_state = tt_cached;
+	ttm->page_flags = page_flags;
+	ttm->dummy_read_page = dummy_read_page;
+	ttm->state = tt_unpopulated;
+	ttm->swap_storage = NULL;
+
+	ttm_tt_alloc_page_directory(ttm);
+	if (!ttm->pages) {
+		ttm_tt_destroy(ttm);
+		pr_err("Failed allocating page table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_tt_init);
+
+void ttm_tt_fini(struct ttm_tt *ttm)
+{
+	drm_free_large(ttm->pages);
+	ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
+
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+		unsigned long size, uint32_t page_flags,
+		struct page *dummy_read_page)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+
+	ttm->bdev = bdev;
+	ttm->glob = bdev->glob;
+	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	ttm->caching_state = tt_cached;
+	ttm->page_flags = page_flags;
+	ttm->dummy_read_page = dummy_read_page;
+	ttm->state = tt_unpopulated;
+	ttm->swap_storage = NULL;
+
+	INIT_LIST_HEAD(&ttm_dma->pages_list);
+	ttm_dma_tt_alloc_page_directory(ttm_dma);
+	if (!ttm->pages || !ttm_dma->dma_address) {
+		ttm_tt_destroy(ttm);
+		pr_err("Failed allocating page table\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+	struct ttm_tt *ttm = &ttm_dma->ttm;
+
+	drm_free_large(ttm->pages);
+	ttm->pages = NULL;
+	drm_free_large(ttm_dma->dma_address);
+	ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
+
+void ttm_tt_unbind(struct ttm_tt *ttm)
+{
+	int ret;
+
+	if (ttm->state == tt_bound) {
+		ret = ttm->func->unbind(ttm);
+		BUG_ON(ret);
+		ttm->state = tt_unbound;
+	}
+}
+
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	int ret = 0;
+
+	if (!ttm)
+		return -EINVAL;
+
+	if (ttm->state == tt_bound)
+		return 0;
+
+	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+	if (ret)
+		return ret;
+
+	ret = ttm->func->bind(ttm, bo_mem);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ttm->state = tt_bound;
+
+	return 0;
+}
+EXPORT_SYMBOL(ttm_tt_bind);
+
+int ttm_tt_swapin(struct ttm_tt *ttm)
+{
+	struct address_space *swap_space;
+	struct file *swap_storage;
+	struct page *from_page;
+	struct page *to_page;
+	int i;
+	int ret = -ENOMEM;
+
+	swap_storage = ttm->swap_storage;
+	BUG_ON(swap_storage == NULL);
+
+	swap_space = file_inode(swap_storage)->i_mapping;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		from_page = shmem_read_mapping_page(swap_space, i);
+		if (IS_ERR(from_page)) {
+			ret = PTR_ERR(from_page);
+			goto out_err;
+		}
+		to_page = ttm->pages[i];
+		if (unlikely(to_page == NULL))
+			goto out_err;
+
+		copy_highpage(to_page, from_page);
+		page_cache_release(from_page);
+	}
+
+	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
+		fput(swap_storage);
+	ttm->swap_storage = NULL;
+	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+
+	return 0;
+out_err:
+	return ret;
+}
+
+int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+{
+	struct address_space *swap_space;
+	struct file *swap_storage;
+	struct page *from_page;
+	struct page *to_page;
+	int i;
+	int ret = -ENOMEM;
+
+	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+	BUG_ON(ttm->caching_state != tt_cached);
+
+	if (!persistent_swap_storage) {
+		swap_storage = shmem_file_setup("ttm swap",
+						ttm->num_pages << PAGE_SHIFT,
+						0);
+		if (unlikely(IS_ERR(swap_storage))) {
+			pr_err("Failed allocating swap storage\n");
+			return PTR_ERR(swap_storage);
+		}
+	} else
+		swap_storage = persistent_swap_storage;
+
+	swap_space = file_inode(swap_storage)->i_mapping;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		from_page = ttm->pages[i];
+		if (unlikely(from_page == NULL))
+			continue;
+		to_page = shmem_read_mapping_page(swap_space, i);
+		if (unlikely(IS_ERR(to_page))) {
+			ret = PTR_ERR(to_page);
+			goto out_err;
+		}
+		copy_highpage(to_page, from_page);
+		set_page_dirty(to_page);
+		mark_page_accessed(to_page);
+		page_cache_release(to_page);
+	}
+
+	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+	ttm->swap_storage = swap_storage;
+	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+	if (persistent_swap_storage)
+		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
+
+	return 0;
+out_err:
+	if (!persistent_swap_storage)
+		fput(swap_storage);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/Kconfig b/linux-imx/drivers/gpu/drm/udl/Kconfig
new file mode 100644
index 0000000..6222af1
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/Kconfig
@@ -0,0 +1,13 @@
+config DRM_UDL
+	tristate "DisplayLink"
+	depends on DRM
+	depends on USB_ARCH_HAS_HCD
+	select DRM_USB
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_DEFERRED_IO
+	select DRM_KMS_HELPER
+	help
+	  This is a KMS driver for the USB displaylink video adapters.
+          Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/linux-imx/drivers/gpu/drm/udl/Makefile b/linux-imx/drivers/gpu/drm/udl/Makefile
new file mode 100644
index 0000000..05c7481
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/Makefile
@@ -0,0 +1,6 @@
+
+ccflags-y := -Iinclude/drm
+
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
+
+obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_connector.c b/linux-imx/drivers/gpu/drm/udl/udl_connector.c
new file mode 100644
index 0000000..b44d548
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_connector.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_crtc_helper.h>
+#include "udl_drv.h"
+
+/* dummy connector to just get EDID,
+   all UDL appear to have a DVI-D */
+
+static u8 *udl_get_edid(struct udl_device *udl)
+{
+	u8 *block;
+	char *rbuf;
+	int ret, i;
+
+	block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+	if (block == NULL)
+		return NULL;
+
+	rbuf = kmalloc(2, GFP_KERNEL);
+	if (rbuf == NULL)
+		goto error;
+
+	for (i = 0; i < EDID_LENGTH; i++) {
+		ret = usb_control_msg(udl->ddev->usbdev,
+				      usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
+				      (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
+				      HZ);
+		if (ret < 1) {
+			DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
+			goto error;
+		}
+		block[i] = rbuf[1];
+	}
+
+	kfree(rbuf);
+	return block;
+
+error:
+	kfree(block);
+	kfree(rbuf);
+	return NULL;
+}
+
+static int udl_get_modes(struct drm_connector *connector)
+{
+	struct udl_device *udl = connector->dev->dev_private;
+	struct edid *edid;
+	int ret;
+
+	edid = (struct edid *)udl_get_edid(udl);
+	if (!edid) {
+		drm_mode_connector_update_edid_property(connector, NULL);
+		return 0;
+	}
+
+	/*
+	 * We only read the main block, but if the monitor reports extension
+	 * blocks then the drm edid code expects them to be present, so patch
+	 * the extension count to 0.
+	 */
+	edid->checksum += edid->extensions;
+	edid->extensions = 0;
+
+	drm_mode_connector_update_edid_property(connector, edid);
+	ret = drm_add_edid_modes(connector, edid);
+	kfree(edid);
+	return ret;
+}
+
+static int udl_mode_valid(struct drm_connector *connector,
+			  struct drm_display_mode *mode)
+{
+	struct udl_device *udl = connector->dev->dev_private;
+	if (!udl->sku_pixel_limit)
+		return 0;
+
+	if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
+		return MODE_VIRTUAL_Y;
+
+	return 0;
+}
+
+static enum drm_connector_status
+udl_detect(struct drm_connector *connector, bool force)
+{
+	if (drm_device_is_unplugged(connector->dev))
+		return connector_status_disconnected;
+	return connector_status_connected;
+}
+
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
+{
+	int enc_id = connector->encoder_ids[0];
+	struct drm_mode_object *obj;
+	struct drm_encoder *encoder;
+
+	obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+	if (!obj)
+		return NULL;
+	encoder = obj_to_encoder(obj);
+	return encoder;
+}
+
+static int udl_connector_set_property(struct drm_connector *connector,
+				      struct drm_property *property,
+				      uint64_t val)
+{
+	return 0;
+}
+
+static void udl_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+	.get_modes = udl_get_modes,
+	.mode_valid = udl_mode_valid,
+	.best_encoder = udl_best_single_encoder,
+};
+
+static struct drm_connector_funcs udl_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = udl_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = udl_connector_destroy,
+	.set_property = udl_connector_set_property,
+};
+
+int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
+{
+	struct drm_connector *connector;
+
+	connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
+	if (!connector)
+		return -ENOMEM;
+
+	drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
+	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
+
+	drm_sysfs_connector_add(connector);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.dirty_info_property,
+				      1);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_drv.c b/linux-imx/drivers/gpu/drm/udl/udl_drv.c
new file mode 100644
index 0000000..c0770db
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_drv.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <drm/drm_usb.h>
+#include <drm/drm_crtc_helper.h>
+#include "udl_drv.h"
+
+static struct drm_driver driver;
+
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
+static struct usb_device_id id_table[] = {
+	{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+	 .bInterfaceSubClass = 0x00,
+	 .bInterfaceProtocol = 0x00,
+	 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+			USB_DEVICE_ID_MATCH_INT_CLASS |
+			USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+			USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
+	{},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+MODULE_LICENSE("GPL");
+
+static int udl_usb_probe(struct usb_interface *interface,
+			 const struct usb_device_id *id)
+{
+	return drm_get_usb_dev(interface, id, &driver);
+}
+
+static void udl_usb_disconnect(struct usb_interface *interface)
+{
+	struct drm_device *dev = usb_get_intfdata(interface);
+
+	drm_kms_helper_poll_disable(dev);
+	drm_connector_unplug_all(dev);
+	udl_fbdev_unplug(dev);
+	udl_drop_usb(dev);
+	drm_unplug_dev(dev);
+}
+
+static const struct vm_operations_struct udl_gem_vm_ops = {
+	.fault = udl_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations udl_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.mmap = udl_drm_gem_mmap,
+	.poll = drm_poll,
+	.read = drm_read,
+	.unlocked_ioctl	= drm_ioctl,
+	.release = drm_release,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+	.load = udl_driver_load,
+	.unload = udl_driver_unload,
+
+	/* gem hooks */
+	.gem_init_object = udl_gem_init_object,
+	.gem_free_object = udl_gem_free_object,
+	.gem_vm_ops = &udl_gem_vm_ops,
+
+	.dumb_create = udl_dumb_create,
+	.dumb_map_offset = udl_gem_mmap,
+	.dumb_destroy = udl_dumb_destroy,
+	.fops = &udl_driver_fops,
+
+	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+	.gem_prime_import = udl_gem_prime_import,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct usb_driver udl_driver = {
+	.name = "udl",
+	.probe = udl_usb_probe,
+	.disconnect = udl_usb_disconnect,
+	.id_table = id_table,
+};
+
+static int __init udl_init(void)
+{
+	return drm_usb_init(&driver, &udl_driver);
+}
+
+static void __exit udl_exit(void)
+{
+	drm_usb_exit(&driver, &udl_driver);
+}
+
+module_init(udl_init);
+module_exit(udl_exit);
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_drv.h b/linux-imx/drivers/gpu/drm/udl/udl_drv.h
new file mode 100644
index 0000000..cc6d90f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_drv.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef UDL_DRV_H
+#define UDL_DRV_H
+
+#include <linux/usb.h>
+
+#define DRIVER_NAME		"udl"
+#define DRIVER_DESC		"DisplayLink"
+#define DRIVER_DATE		"20120220"
+
+#define DRIVER_MAJOR		0
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	1
+
+struct udl_device;
+
+struct urb_node {
+	struct list_head entry;
+	struct udl_device *dev;
+	struct delayed_work release_urb_work;
+	struct urb *urb;
+};
+
+struct urb_list {
+	struct list_head list;
+	spinlock_t lock;
+	struct semaphore limit_sem;
+	int available;
+	int count;
+	size_t size;
+};
+
+struct udl_fbdev;
+
+struct udl_device {
+	struct device *dev;
+	struct drm_device *ddev;
+
+	int sku_pixel_limit;
+
+	struct urb_list urbs;
+	atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
+
+	struct udl_fbdev *fbdev;
+	char mode_buf[1024];
+	uint32_t mode_buf_len;
+	atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
+	atomic_t bytes_identical; /* saved effort with backbuffer comparison */
+	atomic_t bytes_sent; /* to usb, after compression including overhead */
+	atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+};
+
+struct udl_gem_object {
+	struct drm_gem_object base;
+	struct page **pages;
+	void *vmapping;
+	struct sg_table *sg;
+};
+
+#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
+
+struct udl_framebuffer {
+	struct drm_framebuffer base;
+	struct udl_gem_object *obj;
+	bool active_16; /* active on the 16-bit channel */
+	int x1, y1, x2, y2; /* dirty rect */
+	spinlock_t dirty_lock;
+};
+
+#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
+
+/* modeset */
+int udl_modeset_init(struct drm_device *dev);
+void udl_modeset_cleanup(struct drm_device *dev);
+int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
+
+struct drm_encoder *udl_encoder_init(struct drm_device *dev);
+
+struct urb *udl_get_urb(struct drm_device *dev);
+
+int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+void udl_urb_completion(struct urb *urb);
+
+int udl_driver_load(struct drm_device *dev, unsigned long flags);
+int udl_driver_unload(struct drm_device *dev);
+
+int udl_fbdev_init(struct drm_device *dev);
+void udl_fbdev_cleanup(struct drm_device *dev);
+void udl_fbdev_unplug(struct drm_device *dev);
+struct drm_framebuffer *
+udl_fb_user_fb_create(struct drm_device *dev,
+		      struct drm_file *file,
+		      struct drm_mode_fb_cmd2 *mode_cmd);
+
+int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+		     const char *front, char **urb_buf_ptr,
+		     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
+		     int *ident_ptr, int *sent_ptr);
+
+int udl_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args);
+int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
+		 uint32_t handle, uint64_t *offset);
+int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+		     uint32_t handle);
+
+int udl_gem_init_object(struct drm_gem_object *obj);
+void udl_gem_free_object(struct drm_gem_object *gem_obj);
+struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
+					    size_t size);
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf);
+
+int udl_gem_vmap(struct udl_gem_object *obj);
+void udl_gem_vunmap(struct udl_gem_object *obj);
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+		      int width, int height);
+
+int udl_drop_usb(struct drm_device *dev);
+
+#define CMD_WRITE_RAW8   "\xAF\x60" /**< 8 bit raw write command. */
+#define CMD_WRITE_RL8    "\xAF\x61" /**< 8 bit run length command. */
+#define CMD_WRITE_COPY8  "\xAF\x62" /**< 8 bit copy command. */
+#define CMD_WRITE_RLX8   "\xAF\x63" /**< 8 bit extended run length command. */
+
+#define CMD_WRITE_RAW16  "\xAF\x68" /**< 16 bit raw write command. */
+#define CMD_WRITE_RL16   "\xAF\x69" /**< 16 bit run length command. */
+#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
+#define CMD_WRITE_RLX16  "\xAF\x6B" /**< 16 bit extended run length command. */
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_encoder.c b/linux-imx/drivers/gpu/drm/udl/udl_encoder.c
new file mode 100644
index 0000000..4052c46
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_encoder.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "udl_drv.h"
+
+/* dummy encoder */
+static void udl_enc_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+	kfree(encoder);
+}
+
+static void udl_encoder_disable(struct drm_encoder *encoder)
+{
+}
+
+static bool udl_mode_fixup(struct drm_encoder *encoder,
+			   const struct drm_display_mode *mode,
+			   struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void udl_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void udl_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void udl_encoder_mode_set(struct drm_encoder *encoder,
+				 struct drm_display_mode *mode,
+				 struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void
+udl_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static const struct drm_encoder_helper_funcs udl_helper_funcs = {
+	.dpms = udl_encoder_dpms,
+	.mode_fixup = udl_mode_fixup,
+	.prepare = udl_encoder_prepare,
+	.mode_set = udl_encoder_mode_set,
+	.commit = udl_encoder_commit,
+	.disable = udl_encoder_disable,
+};
+
+static const struct drm_encoder_funcs udl_enc_funcs = {
+	.destroy = udl_enc_destroy,
+};
+
+struct drm_encoder *udl_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
+	if (!encoder)
+		return NULL;
+
+	drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &udl_helper_funcs);
+	encoder->possible_crtcs = 1;
+	return encoder;
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_fb.c b/linux-imx/drivers/gpu/drm/udl/udl_fb.c
new file mode 100644
index 0000000..dc0c065
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_fb.c
@@ -0,0 +1,672 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/dma-buf.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "udl_drv.h"
+
+#include <drm/drm_fb_helper.h>
+
+#define DL_DEFIO_WRITE_DELAY    (HZ/20) /* fb_deferred_io.delay in jiffies */
+
+static int fb_defio = 0;  /* Optionally enable experimental fb_defio mmap support */
+static int fb_bpp = 16;
+
+module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
+
+struct udl_fbdev {
+	struct drm_fb_helper helper;
+	struct udl_framebuffer ufb;
+	struct list_head fbdev_list;
+	int fb_count;
+};
+
+#define DL_ALIGN_UP(x, a) ALIGN(x, a)
+#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+
+/** Read the red component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
+
+/** Read the green component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
+
+/** Read the blue component (0..255) of a 32 bpp colour. */
+#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
+
+/** Return red/green component of a 16 bpp colour number. */
+#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
+
+/** Return green/blue component of a 16 bpp colour number. */
+#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
+
+/** Return 8 bpp colour number from red, green and blue components. */
+#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
+
+#if 0
+static uint8_t rgb8(uint32_t col)
+{
+	uint8_t red = DLO_RGB_GETRED(col);
+	uint8_t grn = DLO_RGB_GETGRN(col);
+	uint8_t blu = DLO_RGB_GETBLU(col);
+
+	return DLO_RGB8(red, grn, blu);
+}
+
+static uint16_t rgb16(uint32_t col)
+{
+	uint8_t red = DLO_RGB_GETRED(col);
+	uint8_t grn = DLO_RGB_GETGRN(col);
+	uint8_t blu = DLO_RGB_GETBLU(col);
+
+	return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
+}
+#endif
+
+/*
+ * NOTE: fb_defio.c is holding info->fbdefio.mutex
+ *   Touching ANY framebuffer memory that triggers a page fault
+ *   in fb_defio will cause a deadlock, when it also tries to
+ *   grab the same mutex.
+ */
+static void udlfb_dpy_deferred_io(struct fb_info *info,
+				  struct list_head *pagelist)
+{
+	struct page *cur;
+	struct fb_deferred_io *fbdefio = info->fbdefio;
+	struct udl_fbdev *ufbdev = info->par;
+	struct drm_device *dev = ufbdev->ufb.base.dev;
+	struct udl_device *udl = dev->dev_private;
+	struct urb *urb;
+	char *cmd;
+	cycles_t start_cycles, end_cycles;
+	int bytes_sent = 0;
+	int bytes_identical = 0;
+	int bytes_rendered = 0;
+
+	if (!fb_defio)
+		return;
+
+	start_cycles = get_cycles();
+
+	urb = udl_get_urb(dev);
+	if (!urb)
+		return;
+
+	cmd = urb->transfer_buffer;
+
+	/* walk the written page list and render each to device */
+	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+
+		if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
+				     &urb, (char *) info->fix.smem_start,
+				     &cmd, cur->index << PAGE_SHIFT,
+				     cur->index << PAGE_SHIFT,
+				     PAGE_SIZE, &bytes_identical, &bytes_sent))
+			goto error;
+		bytes_rendered += PAGE_SIZE;
+	}
+
+	if (cmd > (char *) urb->transfer_buffer) {
+		/* Send partial buffer remaining before exiting */
+		int len = cmd - (char *) urb->transfer_buffer;
+		udl_submit_urb(dev, urb, len);
+		bytes_sent += len;
+	} else
+		udl_urb_completion(urb);
+
+error:
+	atomic_add(bytes_sent, &udl->bytes_sent);
+	atomic_add(bytes_identical, &udl->bytes_identical);
+	atomic_add(bytes_rendered, &udl->bytes_rendered);
+	end_cycles = get_cycles();
+	atomic_add(((unsigned int) ((end_cycles - start_cycles)
+		    >> 10)), /* Kcycles */
+		   &udl->cpu_kcycles_used);
+}
+
+int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
+		      int width, int height)
+{
+	struct drm_device *dev = fb->base.dev;
+	struct udl_device *udl = dev->dev_private;
+	int i, ret;
+	char *cmd;
+	cycles_t start_cycles, end_cycles;
+	int bytes_sent = 0;
+	int bytes_identical = 0;
+	struct urb *urb;
+	int aligned_x;
+	int bpp = (fb->base.bits_per_pixel / 8);
+	int x2, y2;
+	bool store_for_later = false;
+	unsigned long flags;
+
+	if (!fb->active_16)
+		return 0;
+
+	if (!fb->obj->vmapping) {
+		ret = udl_gem_vmap(fb->obj);
+		if (ret == -ENOMEM) {
+			DRM_ERROR("failed to vmap fb\n");
+			return 0;
+		}
+		if (!fb->obj->vmapping) {
+			DRM_ERROR("failed to vmapping\n");
+			return 0;
+		}
+	}
+
+	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
+	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
+	x = aligned_x;
+
+	if ((width <= 0) ||
+	    (x + width > fb->base.width) ||
+	    (y + height > fb->base.height))
+		return -EINVAL;
+
+	/* if we are in atomic just store the info
+	   can't test inside spin lock */
+	if (in_atomic())
+		store_for_later = true;
+
+	x2 = x + width - 1;
+	y2 = y + height - 1;
+
+	spin_lock_irqsave(&fb->dirty_lock, flags);
+
+	if (fb->y1 < y)
+		y = fb->y1;
+	if (fb->y2 > y2)
+		y2 = fb->y2;
+	if (fb->x1 < x)
+		x = fb->x1;
+	if (fb->x2 > x2)
+		x2 = fb->x2;
+
+	if (store_for_later) {
+		fb->x1 = x;
+		fb->x2 = x2;
+		fb->y1 = y;
+		fb->y2 = y2;
+		spin_unlock_irqrestore(&fb->dirty_lock, flags);
+		return 0;
+	}
+
+	fb->x1 = fb->y1 = INT_MAX;
+	fb->x2 = fb->y2 = 0;
+
+	spin_unlock_irqrestore(&fb->dirty_lock, flags);
+	start_cycles = get_cycles();
+
+	urb = udl_get_urb(dev);
+	if (!urb)
+		return 0;
+	cmd = urb->transfer_buffer;
+
+	for (i = y; i <= y2 ; i++) {
+		const int line_offset = fb->base.pitches[0] * i;
+		const int byte_offset = line_offset + (x * bpp);
+		const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+		if (udl_render_hline(dev, bpp, &urb,
+				     (char *) fb->obj->vmapping,
+				     &cmd, byte_offset, dev_byte_offset,
+				     (x2 - x + 1) * bpp,
+				     &bytes_identical, &bytes_sent))
+			goto error;
+	}
+
+	if (cmd > (char *) urb->transfer_buffer) {
+		/* Send partial buffer remaining before exiting */
+		int len = cmd - (char *) urb->transfer_buffer;
+		ret = udl_submit_urb(dev, urb, len);
+		bytes_sent += len;
+	} else
+		udl_urb_completion(urb);
+
+error:
+	atomic_add(bytes_sent, &udl->bytes_sent);
+	atomic_add(bytes_identical, &udl->bytes_identical);
+	atomic_add(width*height*bpp, &udl->bytes_rendered);
+	end_cycles = get_cycles();
+	atomic_add(((unsigned int) ((end_cycles - start_cycles)
+		    >> 10)), /* Kcycles */
+		   &udl->cpu_kcycles_used);
+
+	return 0;
+}
+
+static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	unsigned long start = vma->vm_start;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+	unsigned long page, pos;
+
+	if (offset + size > info->fix.smem_len)
+		return -EINVAL;
+
+	pos = (unsigned long)info->fix.smem_start + offset;
+
+	pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+		  pos, size);
+
+	while (size > 0) {
+		page = vmalloc_to_pfn((void *)pos);
+		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
+			return -EAGAIN;
+
+		start += PAGE_SIZE;
+		pos += PAGE_SIZE;
+		if (size > PAGE_SIZE)
+			size -= PAGE_SIZE;
+		else
+			size = 0;
+	}
+
+	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+	return 0;
+}
+
+static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct udl_fbdev *ufbdev = info->par;
+
+	sys_fillrect(info, rect);
+
+	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
+			  rect->height);
+}
+
+static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct udl_fbdev *ufbdev = info->par;
+
+	sys_copyarea(info, region);
+
+	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
+			  region->height);
+}
+
+static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct udl_fbdev *ufbdev = info->par;
+
+	sys_imageblit(info, image);
+
+	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
+			  image->height);
+}
+
+/*
+ * It's common for several clients to have framebuffer open simultaneously.
+ * e.g. both fbcon and X. Makes things interesting.
+ * Assumes caller is holding info->lock (for open and release at least)
+ */
+static int udl_fb_open(struct fb_info *info, int user)
+{
+	struct udl_fbdev *ufbdev = info->par;
+	struct drm_device *dev = ufbdev->ufb.base.dev;
+	struct udl_device *udl = dev->dev_private;
+
+	/* If the USB device is gone, we don't accept new opens */
+	if (drm_device_is_unplugged(udl->ddev))
+		return -ENODEV;
+
+	ufbdev->fb_count++;
+
+	if (fb_defio && (info->fbdefio == NULL)) {
+		/* enable defio at last moment if not disabled by client */
+
+		struct fb_deferred_io *fbdefio;
+
+		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+
+		if (fbdefio) {
+			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
+			fbdefio->deferred_io = udlfb_dpy_deferred_io;
+		}
+
+		info->fbdefio = fbdefio;
+		fb_deferred_io_init(info);
+	}
+
+	pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+		  info->node, user, info, ufbdev->fb_count);
+
+	return 0;
+}
+
+
+/*
+ * Assumes caller is holding info->lock mutex (for open and release at least)
+ */
+static int udl_fb_release(struct fb_info *info, int user)
+{
+	struct udl_fbdev *ufbdev = info->par;
+
+	ufbdev->fb_count--;
+
+	if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
+		fb_deferred_io_cleanup(info);
+		kfree(info->fbdefio);
+		info->fbdefio = NULL;
+		info->fbops->fb_mmap = udl_fb_mmap;
+	}
+
+	pr_warn("released /dev/fb%d user=%d count=%d\n",
+		info->node, user, ufbdev->fb_count);
+
+	return 0;
+}
+
+static struct fb_ops udlfb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = udl_fb_fillrect,
+	.fb_copyarea = udl_fb_copyarea,
+	.fb_imageblit = udl_fb_imageblit,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+	.fb_mmap = udl_fb_mmap,
+	.fb_open = udl_fb_open,
+	.fb_release = udl_fb_release,
+};
+
+static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+			   u16 blue, int regno)
+{
+}
+
+static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+			     u16 *blue, int regno)
+{
+	*red = 0;
+	*green = 0;
+	*blue = 0;
+}
+
+static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
+				      struct drm_file *file,
+				      unsigned flags, unsigned color,
+				      struct drm_clip_rect *clips,
+				      unsigned num_clips)
+{
+	struct udl_framebuffer *ufb = to_udl_fb(fb);
+	int i;
+	int ret = 0;
+
+	if (!ufb->active_16)
+		return 0;
+
+	if (ufb->obj->base.import_attach) {
+		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
+					       0, ufb->obj->base.size,
+					       DMA_FROM_DEVICE);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < num_clips; i++) {
+		ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
+				  clips[i].x2 - clips[i].x1,
+				  clips[i].y2 - clips[i].y1);
+		if (ret)
+			break;
+	}
+
+	if (ufb->obj->base.import_attach) {
+		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+				       0, ufb->obj->base.size,
+				       DMA_FROM_DEVICE);
+	}
+	return ret;
+}
+
+static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct udl_framebuffer *ufb = to_udl_fb(fb);
+
+	if (ufb->obj)
+		drm_gem_object_unreference_unlocked(&ufb->obj->base);
+
+	drm_framebuffer_cleanup(fb);
+	kfree(ufb);
+}
+
+static const struct drm_framebuffer_funcs udlfb_funcs = {
+	.destroy = udl_user_framebuffer_destroy,
+	.dirty = udl_user_framebuffer_dirty,
+};
+
+
+static int
+udl_framebuffer_init(struct drm_device *dev,
+		     struct udl_framebuffer *ufb,
+		     struct drm_mode_fb_cmd2 *mode_cmd,
+		     struct udl_gem_object *obj)
+{
+	int ret;
+
+	spin_lock_init(&ufb->dirty_lock);
+	ufb->obj = obj;
+	drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
+	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
+	return ret;
+}
+
+
+static int udlfb_create(struct drm_fb_helper *helper,
+			struct drm_fb_helper_surface_size *sizes)
+{
+	struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
+	struct drm_device *dev = ufbdev->helper.dev;
+	struct fb_info *info;
+	struct device *device = dev->dev;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	struct udl_gem_object *obj;
+	uint32_t size;
+	int ret = 0;
+
+	if (sizes->surface_bpp == 24)
+		sizes->surface_bpp = 32;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+							  sizes->surface_depth);
+
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	size = ALIGN(size, PAGE_SIZE);
+
+	obj = udl_gem_alloc_object(dev, size);
+	if (!obj)
+		goto out;
+
+	ret = udl_gem_vmap(obj);
+	if (ret) {
+		DRM_ERROR("failed to vmap fb\n");
+		goto out_gfree;
+	}
+
+	info = framebuffer_alloc(0, device);
+	if (!info) {
+		ret = -ENOMEM;
+		goto out_gfree;
+	}
+	info->par = ufbdev;
+
+	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
+	if (ret)
+		goto out_gfree;
+
+	fb = &ufbdev->ufb.base;
+
+	ufbdev->helper.fb = fb;
+	ufbdev->helper.fbdev = info;
+
+	strcpy(info->fix.id, "udldrmfb");
+
+	info->screen_base = ufbdev->ufb.obj->vmapping;
+	info->fix.smem_len = size;
+	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
+
+	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+	info->fbops = &udlfb_ops;
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
+
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_gfree;
+	}
+
+
+	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
+		      fb->width, fb->height,
+		      ufbdev->ufb.obj->vmapping);
+
+	return ret;
+out_gfree:
+	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+out:
+	return ret;
+}
+
+static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
+	.gamma_set = udl_crtc_fb_gamma_set,
+	.gamma_get = udl_crtc_fb_gamma_get,
+	.fb_probe = udlfb_create,
+};
+
+static void udl_fbdev_destroy(struct drm_device *dev,
+			      struct udl_fbdev *ufbdev)
+{
+	struct fb_info *info;
+	if (ufbdev->helper.fbdev) {
+		info = ufbdev->helper.fbdev;
+		unregister_framebuffer(info);
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+		framebuffer_release(info);
+	}
+	drm_fb_helper_fini(&ufbdev->helper);
+	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+	drm_framebuffer_cleanup(&ufbdev->ufb.base);
+	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+}
+
+int udl_fbdev_init(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	int bpp_sel = fb_bpp;
+	struct udl_fbdev *ufbdev;
+	int ret;
+
+	ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
+	if (!ufbdev)
+		return -ENOMEM;
+
+	udl->fbdev = ufbdev;
+	ufbdev->helper.funcs = &udl_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, &ufbdev->helper,
+				 1, 1);
+	if (ret) {
+		kfree(ufbdev);
+		return ret;
+
+	}
+
+	drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
+	return 0;
+}
+
+void udl_fbdev_cleanup(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	if (!udl->fbdev)
+		return;
+
+	udl_fbdev_destroy(dev, udl->fbdev);
+	kfree(udl->fbdev);
+	udl->fbdev = NULL;
+}
+
+void udl_fbdev_unplug(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	struct udl_fbdev *ufbdev;
+	if (!udl->fbdev)
+		return;
+
+	ufbdev = udl->fbdev;
+	if (ufbdev->helper.fbdev) {
+		struct fb_info *info;
+		info = ufbdev->helper.fbdev;
+		unlink_framebuffer(info);
+	}
+}
+
+struct drm_framebuffer *
+udl_fb_user_fb_create(struct drm_device *dev,
+		   struct drm_file *file,
+		   struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *obj;
+	struct udl_framebuffer *ufb;
+	int ret;
+	uint32_t size;
+
+	obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
+	if (obj == NULL)
+		return ERR_PTR(-ENOENT);
+
+	size = mode_cmd->pitches[0] * mode_cmd->height;
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (size > obj->size) {
+		DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
+	if (ufb == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
+	if (ret) {
+		kfree(ufb);
+		return ERR_PTR(-EINVAL);
+	}
+	return &ufb->base;
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_gem.c b/linux-imx/drivers/gpu/drm/udl/udl_gem.c
new file mode 100644
index 0000000..ef034fa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_gem.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+#include <linux/shmem_fs.h>
+#include <linux/dma-buf.h>
+
+struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
+					    size_t size)
+{
+	struct udl_gem_object *obj;
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (obj == NULL)
+		return NULL;
+
+	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
+		kfree(obj);
+		return NULL;
+	}
+
+	return obj;
+}
+
+static int
+udl_gem_create(struct drm_file *file,
+	       struct drm_device *dev,
+	       uint64_t size,
+	       uint32_t *handle_p)
+{
+	struct udl_gem_object *obj;
+	int ret;
+	u32 handle;
+
+	size = roundup(size, PAGE_SIZE);
+
+	obj = udl_gem_alloc_object(dev, size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
+	if (ret) {
+		drm_gem_object_release(&obj->base);
+		kfree(obj);
+		return ret;
+	}
+
+	drm_gem_object_unreference(&obj->base);
+	*handle_p = handle;
+	return 0;
+}
+
+int udl_dumb_create(struct drm_file *file,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	args->pitch = args->width * ((args->bpp + 1) / 8);
+	args->size = args->pitch * args->height;
+	return udl_gem_create(file, dev,
+			      args->size, &args->handle);
+}
+
+int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+		     uint32_t handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
+
+int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	return ret;
+}
+
+int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
+	struct page *page;
+	unsigned int page_offset;
+	int ret = 0;
+
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	if (!obj->pages)
+		return VM_FAULT_SIGBUS;
+
+	page = obj->pages[page_offset];
+	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+	switch (ret) {
+	case -EAGAIN:
+		set_need_resched();
+	case 0:
+	case -ERESTARTSYS:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+int udl_gem_init_object(struct drm_gem_object *obj)
+{
+	BUG();
+
+	return 0;
+}
+
+static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+{
+	int page_count, i;
+	struct page *page;
+	struct inode *inode;
+	struct address_space *mapping;
+
+	if (obj->pages)
+		return 0;
+
+	page_count = obj->base.size / PAGE_SIZE;
+	BUG_ON(obj->pages != NULL);
+	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+	if (obj->pages == NULL)
+		return -ENOMEM;
+
+	inode = file_inode(obj->base.filp);
+	mapping = inode->i_mapping;
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	for (i = 0; i < page_count; i++) {
+		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(page))
+			goto err_pages;
+		obj->pages[i] = page;
+	}
+
+	return 0;
+err_pages:
+	while (i--)
+		page_cache_release(obj->pages[i]);
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+	return PTR_ERR(page);
+}
+
+static void udl_gem_put_pages(struct udl_gem_object *obj)
+{
+	int page_count = obj->base.size / PAGE_SIZE;
+	int i;
+
+	if (obj->base.import_attach) {
+		drm_free_large(obj->pages);
+		obj->pages = NULL;
+		return;
+	}
+
+	for (i = 0; i < page_count; i++)
+		page_cache_release(obj->pages[i]);
+
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+}
+
+int udl_gem_vmap(struct udl_gem_object *obj)
+{
+	int page_count = obj->base.size / PAGE_SIZE;
+	int ret;
+
+	if (obj->base.import_attach) {
+		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+		if (!obj->vmapping)
+			return -ENOMEM;
+		return 0;
+	}
+		
+	ret = udl_gem_get_pages(obj, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
+	if (!obj->vmapping)
+		return -ENOMEM;
+	return 0;
+}
+
+void udl_gem_vunmap(struct udl_gem_object *obj)
+{
+	if (obj->base.import_attach) {
+		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+		return;
+	}
+
+	if (obj->vmapping)
+		vunmap(obj->vmapping);
+
+	udl_gem_put_pages(obj);
+}
+
+void udl_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct udl_gem_object *obj = to_udl_bo(gem_obj);
+
+	if (obj->vmapping)
+		udl_gem_vunmap(obj);
+
+	if (gem_obj->import_attach)
+		drm_prime_gem_destroy(gem_obj, obj->sg);
+
+	if (obj->pages)
+		udl_gem_put_pages(obj);
+
+	if (gem_obj->map_list.map)
+		drm_gem_free_mmap_offset(gem_obj);
+}
+
+/* the dumb interface doesn't work with the GEM straight MMAP
+   interface, it expects to do MMAP on the drm fd, like normal */
+int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
+		 uint32_t handle, uint64_t *offset)
+{
+	struct udl_gem_object *gobj;
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+	gobj = to_udl_bo(obj);
+
+	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+	if (ret)
+		goto out;
+	if (!gobj->base.map_list.map) {
+		ret = drm_gem_create_mmap_offset(obj);
+		if (ret)
+			goto out;
+	}
+
+	*offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+
+out:
+	drm_gem_object_unreference(&gobj->base);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static int udl_prime_create(struct drm_device *dev,
+			    size_t size,
+			    struct sg_table *sg,
+			    struct udl_gem_object **obj_p)
+{
+	struct udl_gem_object *obj;
+	int npages;
+
+	npages = size / PAGE_SIZE;
+
+	*obj_p = NULL;
+	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
+	if (!obj)
+		return -ENOMEM;
+
+	obj->sg = sg;
+	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (obj->pages == NULL) {
+		DRM_ERROR("obj pages is NULL %d\n", npages);
+		return -ENOMEM;
+	}
+
+	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+
+	*obj_p = obj;
+	return 0;
+}
+
+struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
+				struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sg;
+	struct udl_gem_object *uobj;
+	int ret;
+
+	/* need to attach */
+	attach = dma_buf_attach(dma_buf, dev->dev);
+	if (IS_ERR(attach))
+		return ERR_CAST(attach);
+
+	get_dma_buf(dma_buf);
+
+	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sg)) {
+		ret = PTR_ERR(sg);
+		goto fail_detach;
+	}
+
+	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
+	if (ret) {
+		goto fail_unmap;
+	}
+
+	uobj->base.import_attach = attach;
+
+	return &uobj->base;
+
+fail_unmap:
+	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_main.c b/linux-imx/drivers/gpu/drm/udl/udl_main.c
new file mode 100644
index 0000000..0ce2d71
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_main.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+#include <drm/drmP.h>
+#include "udl_drv.h"
+
+/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
+#define BULK_SIZE 512
+
+#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
+#define WRITES_IN_FLIGHT (4)
+#define MAX_VENDOR_DESCRIPTOR_SIZE 256
+
+#define GET_URB_TIMEOUT	HZ
+#define FREE_URB_TIMEOUT (HZ*2)
+
+static int udl_parse_vendor_descriptor(struct drm_device *dev,
+				       struct usb_device *usbdev)
+{
+	struct udl_device *udl = dev->dev_private;
+	char *desc;
+	char *buf;
+	char *desc_end;
+
+	u8 total_len = 0;
+
+	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
+	if (!buf)
+		return false;
+	desc = buf;
+
+	total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
+				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
+	if (total_len > 5) {
+		DRM_INFO("vendor descriptor length:%x data:%*ph\n",
+			total_len, 11, desc);
+
+		if ((desc[0] != total_len) || /* descriptor length */
+		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
+		    (desc[2] != 0x01) ||   /* version (2 bytes) */
+		    (desc[3] != 0x00) ||
+		    (desc[4] != total_len - 2)) /* length after type */
+			goto unrecognized;
+
+		desc_end = desc + total_len;
+		desc += 5; /* the fixed header we've already parsed */
+
+		while (desc < desc_end) {
+			u8 length;
+			u16 key;
+
+			key = le16_to_cpu(*((u16 *) desc));
+			desc += sizeof(u16);
+			length = *desc;
+			desc++;
+
+			switch (key) {
+			case 0x0200: { /* max_area */
+				u32 max_area;
+				max_area = le32_to_cpu(*((u32 *)desc));
+				DRM_DEBUG("DL chip limited to %d pixel modes\n",
+					max_area);
+				udl->sku_pixel_limit = max_area;
+				break;
+			}
+			default:
+				break;
+			}
+			desc += length;
+		}
+	}
+
+	goto success;
+
+unrecognized:
+	/* allow udlfb to load for now even if firmware unrecognized */
+	DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+
+success:
+	kfree(buf);
+	return true;
+}
+
+static void udl_release_urb_work(struct work_struct *work)
+{
+	struct urb_node *unode = container_of(work, struct urb_node,
+					      release_urb_work.work);
+
+	up(&unode->dev->urbs.limit_sem);
+}
+
+void udl_urb_completion(struct urb *urb)
+{
+	struct urb_node *unode = urb->context;
+	struct udl_device *udl = unode->dev;
+	unsigned long flags;
+
+	/* sync/async unlink faults aren't errors */
+	if (urb->status) {
+		if (!(urb->status == -ENOENT ||
+		    urb->status == -ECONNRESET ||
+		    urb->status == -ESHUTDOWN)) {
+			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
+				__func__, urb->status);
+			atomic_set(&udl->lost_pixels, 1);
+		}
+	}
+
+	urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
+
+	spin_lock_irqsave(&udl->urbs.lock, flags);
+	list_add_tail(&unode->entry, &udl->urbs.list);
+	udl->urbs.available++;
+	spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+#if 0
+	/*
+	 * When using fb_defio, we deadlock if up() is called
+	 * while another is waiting. So queue to another process.
+	 */
+	if (fb_defio)
+		schedule_delayed_work(&unode->release_urb_work, 0);
+	else
+#endif
+		up(&udl->urbs.limit_sem);
+}
+
+static void udl_free_urb_list(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	int count = udl->urbs.count;
+	struct list_head *node;
+	struct urb_node *unode;
+	struct urb *urb;
+	int ret;
+	unsigned long flags;
+
+	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
+
+	/* keep waiting and freeing, until we've got 'em all */
+	while (count--) {
+
+		/* Getting interrupted means a leak, but ok at shutdown*/
+		ret = down_interruptible(&udl->urbs.limit_sem);
+		if (ret)
+			break;
+
+		spin_lock_irqsave(&udl->urbs.lock, flags);
+
+		node = udl->urbs.list.next; /* have reserved one with sem */
+		list_del_init(node);
+
+		spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+		unode = list_entry(node, struct urb_node, entry);
+		urb = unode->urb;
+
+		/* Free each separately allocated piece */
+		usb_free_coherent(urb->dev, udl->urbs.size,
+				  urb->transfer_buffer, urb->transfer_dma);
+		usb_free_urb(urb);
+		kfree(node);
+	}
+	udl->urbs.count = 0;
+}
+
+static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+{
+	struct udl_device *udl = dev->dev_private;
+	int i = 0;
+	struct urb *urb;
+	struct urb_node *unode;
+	char *buf;
+
+	spin_lock_init(&udl->urbs.lock);
+
+	udl->urbs.size = size;
+	INIT_LIST_HEAD(&udl->urbs.list);
+
+	while (i < count) {
+		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
+		if (!unode)
+			break;
+		unode->dev = udl;
+
+		INIT_DELAYED_WORK(&unode->release_urb_work,
+			  udl_release_urb_work);
+
+		urb = usb_alloc_urb(0, GFP_KERNEL);
+		if (!urb) {
+			kfree(unode);
+			break;
+		}
+		unode->urb = urb;
+
+		buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
+					 &urb->transfer_dma);
+		if (!buf) {
+			kfree(unode);
+			usb_free_urb(urb);
+			break;
+		}
+
+		/* urb->transfer_buffer_length set to actual before submit */
+		usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
+			buf, size, udl_urb_completion, unode);
+		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+		list_add_tail(&unode->entry, &udl->urbs.list);
+
+		i++;
+	}
+
+	sema_init(&udl->urbs.limit_sem, i);
+	udl->urbs.count = i;
+	udl->urbs.available = i;
+
+	DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+
+	return i;
+}
+
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	int ret = 0;
+	struct list_head *entry;
+	struct urb_node *unode;
+	struct urb *urb = NULL;
+	unsigned long flags;
+
+	/* Wait for an in-flight buffer to complete and get re-queued */
+	ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
+	if (ret) {
+		atomic_set(&udl->lost_pixels, 1);
+		DRM_INFO("wait for urb interrupted: %x available: %d\n",
+		       ret, udl->urbs.available);
+		goto error;
+	}
+
+	spin_lock_irqsave(&udl->urbs.lock, flags);
+
+	BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
+	entry = udl->urbs.list.next;
+	list_del_init(entry);
+	udl->urbs.available--;
+
+	spin_unlock_irqrestore(&udl->urbs.lock, flags);
+
+	unode = list_entry(entry, struct urb_node, entry);
+	urb = unode->urb;
+
+error:
+	return urb;
+}
+
+int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+{
+	struct udl_device *udl = dev->dev_private;
+	int ret;
+
+	BUG_ON(len > udl->urbs.size);
+
+	urb->transfer_buffer_length = len; /* set to actual payload len */
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	if (ret) {
+		udl_urb_completion(urb); /* because no one else will */
+		atomic_set(&udl->lost_pixels, 1);
+		DRM_ERROR("usb_submit_urb error %x\n", ret);
+	}
+	return ret;
+}
+
+int udl_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct udl_device *udl;
+	int ret;
+
+	DRM_DEBUG("\n");
+	udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
+	if (!udl)
+		return -ENOMEM;
+
+	udl->ddev = dev;
+	dev->dev_private = udl;
+
+	if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
+		DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+		goto err;
+	}
+
+	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+		ret = -ENOMEM;
+		DRM_ERROR("udl_alloc_urb_list failed\n");
+		goto err;
+	}
+
+	DRM_DEBUG("\n");
+	ret = udl_modeset_init(dev);
+
+	ret = udl_fbdev_init(dev);
+	return 0;
+err:
+	kfree(udl);
+	DRM_ERROR("%d\n", ret);
+	return ret;
+}
+
+int udl_drop_usb(struct drm_device *dev)
+{
+	udl_free_urb_list(dev);
+	return 0;
+}
+
+int udl_driver_unload(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+
+	if (udl->urbs.count)
+		udl_free_urb_list(dev);
+
+	udl_fbdev_cleanup(dev);
+	udl_modeset_cleanup(dev);
+	kfree(udl);
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_modeset.c b/linux-imx/drivers/gpu/drm/udl/udl_modeset.c
new file mode 100644
index 0000000..e96d234
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_modeset.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ *
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "udl_drv.h"
+
+/*
+ * All DisplayLink bulk operations start with 0xAF, followed by specific code
+ * All operations are written to buffers which then later get sent to device
+ */
+static char *udl_set_register(char *buf, u8 reg, u8 val)
+{
+	*buf++ = 0xAF;
+	*buf++ = 0x20;
+	*buf++ = reg;
+	*buf++ = val;
+	return buf;
+}
+
+static char *udl_vidreg_lock(char *buf)
+{
+	return udl_set_register(buf, 0xFF, 0x00);
+}
+
+static char *udl_vidreg_unlock(char *buf)
+{
+	return udl_set_register(buf, 0xFF, 0xFF);
+}
+
+/*
+ * On/Off for driving the DisplayLink framebuffer to the display
+ *  0x00 H and V sync on
+ *  0x01 H and V sync off (screen blank but powered)
+ *  0x07 DPMS powerdown (requires modeset to come back)
+ */
+static char *udl_set_blank(char *buf, int dpms_mode)
+{
+	u8 reg;
+	switch (dpms_mode) {
+	case DRM_MODE_DPMS_OFF:
+		reg = 0x07;
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+		reg = 0x05;
+		break;
+	case DRM_MODE_DPMS_SUSPEND:
+		reg = 0x01;
+		break;
+	case DRM_MODE_DPMS_ON:
+		reg = 0x00;
+		break;
+	}
+
+	return udl_set_register(buf, 0x1f, reg);
+}
+
+static char *udl_set_color_depth(char *buf, u8 selection)
+{
+	return udl_set_register(buf, 0x00, selection);
+}
+
+static char *udl_set_base16bpp(char *wrptr, u32 base)
+{
+	/* the base pointer is 16 bits wide, 0x20 is hi byte. */
+	wrptr = udl_set_register(wrptr, 0x20, base >> 16);
+	wrptr = udl_set_register(wrptr, 0x21, base >> 8);
+	return udl_set_register(wrptr, 0x22, base);
+}
+
+/*
+ * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
+ * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
+ */
+static char *udl_set_base8bpp(char *wrptr, u32 base)
+{
+	wrptr = udl_set_register(wrptr, 0x26, base >> 16);
+	wrptr = udl_set_register(wrptr, 0x27, base >> 8);
+	return udl_set_register(wrptr, 0x28, base);
+}
+
+static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
+{
+	wrptr = udl_set_register(wrptr, reg, value >> 8);
+	return udl_set_register(wrptr, reg+1, value);
+}
+
+/*
+ * This is kind of weird because the controller takes some
+ * register values in a different byte order than other registers.
+ */
+static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
+{
+	wrptr = udl_set_register(wrptr, reg, value);
+	return udl_set_register(wrptr, reg+1, value >> 8);
+}
+
+/*
+ * LFSR is linear feedback shift register. The reason we have this is
+ * because the display controller needs to minimize the clock depth of
+ * various counters used in the display path. So this code reverses the
+ * provided value into the lfsr16 value by counting backwards to get
+ * the value that needs to be set in the hardware comparator to get the
+ * same actual count. This makes sense once you read above a couple of
+ * times and think about it from a hardware perspective.
+ */
+static u16 udl_lfsr16(u16 actual_count)
+{
+	u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
+
+	while (actual_count--) {
+		lv =	 ((lv << 1) |
+			(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
+			& 0xFFFF;
+	}
+
+	return (u16) lv;
+}
+
+/*
+ * This does LFSR conversion on the value that is to be written.
+ * See LFSR explanation above for more detail.
+ */
+static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
+{
+	return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
+}
+
+/*
+ * This takes a standard fbdev screeninfo struct and all of its monitor mode
+ * details and converts them into the DisplayLink equivalent register commands.
+  ERR(vreg(dev,               0x00, (color_depth == 16) ? 0 : 1));
+  ERR(vreg_lfsr16(dev,        0x01, xDisplayStart));
+  ERR(vreg_lfsr16(dev,        0x03, xDisplayEnd));
+  ERR(vreg_lfsr16(dev,        0x05, yDisplayStart));
+  ERR(vreg_lfsr16(dev,        0x07, yDisplayEnd));
+  ERR(vreg_lfsr16(dev,        0x09, xEndCount));
+  ERR(vreg_lfsr16(dev,        0x0B, hSyncStart));
+  ERR(vreg_lfsr16(dev,        0x0D, hSyncEnd));
+  ERR(vreg_big_endian(dev,    0x0F, hPixels));
+  ERR(vreg_lfsr16(dev,        0x11, yEndCount));
+  ERR(vreg_lfsr16(dev,        0x13, vSyncStart));
+  ERR(vreg_lfsr16(dev,        0x15, vSyncEnd));
+  ERR(vreg_big_endian(dev,    0x17, vPixels));
+  ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
+
+  ERR(vreg(dev,               0x1F, 0));
+
+  ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
+ */
+static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
+{
+	u16 xds, yds;
+	u16 xde, yde;
+	u16 yec;
+
+	/* x display start */
+	xds = mode->crtc_htotal - mode->crtc_hsync_start;
+	wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
+	/* x display end */
+	xde = xds + mode->crtc_hdisplay;
+	wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
+
+	/* y display start */
+	yds = mode->crtc_vtotal - mode->crtc_vsync_start;
+	wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
+	/* y display end */
+	yde = yds + mode->crtc_vdisplay;
+	wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
+
+	/* x end count is active + blanking - 1 */
+	wrptr = udl_set_register_lfsr16(wrptr, 0x09,
+					mode->crtc_htotal - 1);
+
+	/* libdlo hardcodes hsync start to 1 */
+	wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
+
+	/* hsync end is width of sync pulse + 1 */
+	wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
+					mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
+
+	/* hpixels is active pixels */
+	wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
+
+	/* yendcount is vertical active + vertical blanking */
+	yec = mode->crtc_vtotal;
+	wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
+
+	/* libdlo hardcodes vsync start to 0 */
+	wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
+
+	/* vsync end is width of vsync pulse */
+	wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
+
+	/* vpixels is active pixels */
+	wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
+
+	wrptr = udl_set_register_16be(wrptr, 0x1B,
+				      mode->clock / 5);
+
+	return wrptr;
+}
+
+static char *udl_dummy_render(char *wrptr)
+{
+	*wrptr++ = 0xAF;
+	*wrptr++ = 0x6A; /* copy */
+	*wrptr++ = 0x00; /* from addr */
+	*wrptr++ = 0x00;
+	*wrptr++ = 0x00;
+	*wrptr++ = 0x01; /* one pixel */
+	*wrptr++ = 0x00; /* to address */
+	*wrptr++ = 0x00;
+	*wrptr++ = 0x00;
+	return wrptr;
+}
+
+static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct udl_device *udl = dev->dev_private;
+	struct urb *urb;
+	char *buf;
+	int retval;
+
+	urb = udl_get_urb(dev);
+	if (!urb)
+		return -ENOMEM;
+
+	buf = (char *)urb->transfer_buffer;
+
+	memcpy(buf, udl->mode_buf, udl->mode_buf_len);
+	retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
+	DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+	return retval;
+}
+
+
+static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct udl_device *udl = dev->dev_private;
+	int retval;
+
+	if (mode == DRM_MODE_DPMS_OFF) {
+		char *buf;
+		struct urb *urb;
+		urb = udl_get_urb(dev);
+		if (!urb)
+			return;
+
+		buf = (char *)urb->transfer_buffer;
+		buf = udl_vidreg_lock(buf);
+		buf = udl_set_blank(buf, mode);
+		buf = udl_vidreg_unlock(buf);
+
+		buf = udl_dummy_render(buf);
+		retval = udl_submit_urb(dev, urb, buf - (char *)
+					urb->transfer_buffer);
+	} else {
+		if (udl->mode_buf_len == 0) {
+			DRM_ERROR("Trying to enable DPMS with no mode\n");
+			return;
+		}
+		udl_crtc_write_mode_to_hw(crtc);
+	}
+
+}
+
+static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+
+{
+	return true;
+}
+
+#if 0
+static int
+udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   int x, int y, enum mode_set_atomic state)
+{
+	return 0;
+}
+
+static int
+udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+		    struct drm_framebuffer *old_fb)
+{
+	return 0;
+}
+#endif
+
+static int udl_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode,
+			       int x, int y,
+			       struct drm_framebuffer *old_fb)
+
+{
+	struct drm_device *dev = crtc->dev;
+	struct udl_framebuffer *ufb = to_udl_fb(crtc->fb);
+	struct udl_device *udl = dev->dev_private;
+	char *buf;
+	char *wrptr;
+	int color_depth = 0;
+
+	buf = (char *)udl->mode_buf;
+
+	/* for now we just clip 24 -> 16 - if we fix that fix this */
+	/*if  (crtc->fb->bits_per_pixel != 16)
+	  color_depth = 1; */
+
+	/* This first section has to do with setting the base address on the
+	* controller * associated with the display. There are 2 base
+	* pointers, currently, we only * use the 16 bpp segment.
+	*/
+	wrptr = udl_vidreg_lock(buf);
+	wrptr = udl_set_color_depth(wrptr, color_depth);
+	/* set base for 16bpp segment to 0 */
+	wrptr = udl_set_base16bpp(wrptr, 0);
+	/* set base for 8bpp segment to end of fb */
+	wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
+
+	wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
+	wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
+	wrptr = udl_vidreg_unlock(wrptr);
+
+	wrptr = udl_dummy_render(wrptr);
+
+	ufb->active_16 = true;
+	if (old_fb) {
+		struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
+		uold_fb->active_16 = false;
+	}
+	udl->mode_buf_len = wrptr - buf;
+
+	/* damage all of it */
+	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
+	return 0;
+}
+
+
+static void udl_crtc_disable(struct drm_crtc *crtc)
+{
+	udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void udl_crtc_destroy(struct drm_crtc *crtc)
+{
+	drm_crtc_cleanup(crtc);
+	kfree(crtc);
+}
+
+static void udl_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void udl_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+static void udl_crtc_commit(struct drm_crtc *crtc)
+{
+	udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static struct drm_crtc_helper_funcs udl_helper_funcs = {
+	.dpms = udl_crtc_dpms,
+	.mode_fixup = udl_crtc_mode_fixup,
+	.mode_set = udl_crtc_mode_set,
+	.prepare = udl_crtc_prepare,
+	.commit = udl_crtc_commit,
+	.disable = udl_crtc_disable,
+	.load_lut = udl_load_lut,
+};
+
+static const struct drm_crtc_funcs udl_crtc_funcs = {
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = udl_crtc_destroy,
+};
+
+static int udl_crtc_init(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+
+	crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
+	if (crtc == NULL)
+		return -ENOMEM;
+
+	drm_crtc_init(dev, crtc, &udl_crtc_funcs);
+	drm_crtc_helper_add(crtc, &udl_helper_funcs);
+
+	return 0;
+}
+
+static const struct drm_mode_config_funcs udl_mode_funcs = {
+	.fb_create = udl_fb_user_fb_create,
+	.output_poll_changed = NULL,
+};
+
+int udl_modeset_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+	drm_mode_config_init(dev);
+
+	dev->mode_config.min_width = 640;
+	dev->mode_config.min_height = 480;
+
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+
+	dev->mode_config.prefer_shadow = 0;
+	dev->mode_config.preferred_depth = 24;
+
+	dev->mode_config.funcs = &udl_mode_funcs;
+
+	drm_mode_create_dirty_info_property(dev);
+
+	udl_crtc_init(dev);
+
+	encoder = udl_encoder_init(dev);
+
+	udl_connector_init(dev, encoder);
+
+	return 0;
+}
+
+void udl_modeset_cleanup(struct drm_device *dev)
+{
+	drm_mode_config_cleanup(dev);
+}
diff --git a/linux-imx/drivers/gpu/drm/udl/udl_transfer.c b/linux-imx/drivers/gpu/drm/udl/udl_transfer.c
new file mode 100644
index 0000000..f343db7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/udl/udl_transfer.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2012 Red Hat
+ * based in parts on udlfb.c:
+ * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
+ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
+ * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License v2. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/prefetch.h>
+
+#include <drm/drmP.h>
+#include "udl_drv.h"
+
+#define MAX_CMD_PIXELS		255
+
+#define RLX_HEADER_BYTES	7
+#define MIN_RLX_PIX_BYTES       4
+#define MIN_RLX_CMD_BYTES	(RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
+
+#define RLE_HEADER_BYTES	6
+#define MIN_RLE_PIX_BYTES	3
+#define MIN_RLE_CMD_BYTES	(RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
+
+#define RAW_HEADER_BYTES	6
+#define MIN_RAW_PIX_BYTES	2
+#define MIN_RAW_CMD_BYTES	(RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
+
+/*
+ * Trims identical data from front and back of line
+ * Sets new front buffer address and width
+ * And returns byte count of identical pixels
+ * Assumes CPU natural alignment (unsigned long)
+ * for back and front buffer ptrs and width
+ */
+#if 0
+static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
+{
+	int j, k;
+	const unsigned long *back = (const unsigned long *) bback;
+	const unsigned long *front = (const unsigned long *) *bfront;
+	const int width = *width_bytes / sizeof(unsigned long);
+	int identical = width;
+	int start = width;
+	int end = width;
+
+	prefetch((void *) front);
+	prefetch((void *) back);
+
+	for (j = 0; j < width; j++) {
+		if (back[j] != front[j]) {
+			start = j;
+			break;
+		}
+	}
+
+	for (k = width - 1; k > j; k--) {
+		if (back[k] != front[k]) {
+			end = k+1;
+			break;
+		}
+	}
+
+	identical = start + (width - end);
+	*bfront = (u8 *) &front[start];
+	*width_bytes = (end - start) * sizeof(unsigned long);
+
+	return identical * sizeof(unsigned long);
+}
+#endif
+
+static inline u16 pixel32_to_be16(const uint32_t pixel)
+{
+	return (((pixel >> 3) & 0x001f) |
+		((pixel >> 5) & 0x07e0) |
+		((pixel >> 8) & 0xf800));
+}
+
+static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
+{
+	if (bpp == 2)
+		return *(const uint16_t *)pixel == repeat;
+	else
+		return *(const uint32_t *)pixel == repeat;
+}
+
+/*
+ * Render a command stream for an encoded horizontal line segment of pixels.
+ *
+ * A command buffer holds several commands.
+ * It always begins with a fresh command header
+ * (the protocol doesn't require this, but we enforce it to allow
+ * multiple buffers to be potentially encoded and sent in parallel).
+ * A single command encodes one contiguous horizontal line of pixels
+ *
+ * The function relies on the client to do all allocation, so that
+ * rendering can be done directly to output buffers (e.g. USB URBs).
+ * The function fills the supplied command buffer, providing information
+ * on where it left off, so the client may call in again with additional
+ * buffers if the line will take several buffers to complete.
+ *
+ * A single command can transmit a maximum of 256 pixels,
+ * regardless of the compression ratio (protocol design limit).
+ * To the hardware, 0 for a size byte means 256
+ *
+ * Rather than 256 pixel commands which are either rl or raw encoded,
+ * the rlx command simply assumes alternating raw and rl spans within one cmd.
+ * This has a slightly larger header overhead, but produces more even results.
+ * It also processes all data (read and write) in a single pass.
+ * Performance benchmarks of common cases show it having just slightly better
+ * compression than 256 pixel raw or rle commands, with similar CPU consumpion.
+ * But for very rl friendly data, will compress not quite as well.
+ */
+static void udl_compress_hline16(
+	const u8 **pixel_start_ptr,
+	const u8 *const pixel_end,
+	uint32_t *device_address_ptr,
+	uint8_t **command_buffer_ptr,
+	const uint8_t *const cmd_buffer_end, int bpp)
+{
+	const u8 *pixel = *pixel_start_ptr;
+	uint32_t dev_addr  = *device_address_ptr;
+	uint8_t *cmd = *command_buffer_ptr;
+
+	while ((pixel_end > pixel) &&
+	       (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
+		uint8_t *raw_pixels_count_byte = NULL;
+		uint8_t *cmd_pixels_count_byte = NULL;
+		const u8 *raw_pixel_start = NULL;
+		const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
+
+		prefetchw((void *) cmd); /* pull in one cache line at least */
+
+		*cmd++ = 0xaf;
+		*cmd++ = 0x6b;
+		*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
+		*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
+		*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
+
+		cmd_pixels_count_byte = cmd++; /*  we'll know this later */
+		cmd_pixel_start = pixel;
+
+		raw_pixels_count_byte = cmd++; /*  we'll know this later */
+		raw_pixel_start = pixel;
+
+		cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
+			min((int)(pixel_end - pixel) / bpp,
+			    (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+
+		prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+
+		while (pixel < cmd_pixel_end) {
+			const u8 *const start = pixel;
+			u32 repeating_pixel;
+
+			if (bpp == 2) {
+				repeating_pixel = *(uint16_t *)pixel;
+				*(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
+			} else {
+				repeating_pixel = *(uint32_t *)pixel;
+				*(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
+			}
+
+			cmd += 2;
+			pixel += bpp;
+
+			if (unlikely((pixel < cmd_pixel_end) &&
+				     (pixel_repeats(pixel, repeating_pixel, bpp)))) {
+				/* go back and fill in raw pixel count */
+				*raw_pixels_count_byte = (((start -
+						raw_pixel_start) / bpp) + 1) & 0xFF;
+
+				while ((pixel < cmd_pixel_end) &&
+				       (pixel_repeats(pixel, repeating_pixel, bpp))) {
+					pixel += bpp;
+				}
+
+				/* immediately after raw data is repeat byte */
+				*cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
+
+				/* Then start another raw pixel span */
+				raw_pixel_start = pixel;
+				raw_pixels_count_byte = cmd++;
+			}
+		}
+
+		if (pixel > raw_pixel_start) {
+			/* finalize last RAW span */
+			*raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+		}
+
+		*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
+		dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+	}
+
+	if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
+		/* Fill leftover bytes with no-ops */
+		if (cmd_buffer_end > cmd)
+			memset(cmd, 0xAF, cmd_buffer_end - cmd);
+		cmd = (uint8_t *) cmd_buffer_end;
+	}
+
+	*command_buffer_ptr = cmd;
+	*pixel_start_ptr = pixel;
+	*device_address_ptr = dev_addr;
+
+	return;
+}
+
+/*
+ * There are 3 copies of every pixel: The front buffer that the fbdev
+ * client renders to, the actual framebuffer across the USB bus in hardware
+ * (that we can only write to, slowly, and can never read), and (optionally)
+ * our shadow copy that tracks what's been sent to that hardware buffer.
+ */
+int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+		     const char *front, char **urb_buf_ptr,
+		     u32 byte_offset, u32 device_byte_offset,
+		     u32 byte_width,
+		     int *ident_ptr, int *sent_ptr)
+{
+	const u8 *line_start, *line_end, *next_pixel;
+	u32 base16 = 0 + (device_byte_offset / bpp) * 2;
+	struct urb *urb = *urb_ptr;
+	u8 *cmd = *urb_buf_ptr;
+	u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
+
+	BUG_ON(!(bpp == 2 || bpp == 4));
+
+	line_start = (u8 *) (front + byte_offset);
+	next_pixel = line_start;
+	line_end = next_pixel + byte_width;
+
+	while (next_pixel < line_end) {
+
+		udl_compress_hline16(&next_pixel,
+			     line_end, &base16,
+			     (u8 **) &cmd, (u8 *) cmd_end, bpp);
+
+		if (cmd >= cmd_end) {
+			int len = cmd - (u8 *) urb->transfer_buffer;
+			if (udl_submit_urb(dev, urb, len))
+				return 1; /* lost pixels is set */
+			*sent_ptr += len;
+			urb = udl_get_urb(dev);
+			if (!urb)
+				return 1; /* lost_pixels is set */
+			*urb_ptr = urb;
+			cmd = urb->transfer_buffer;
+			cmd_end = &cmd[urb->transfer_buffer_length];
+		}
+	}
+
+	*urb_buf_ptr = cmd;
+
+	return 0;
+}
+
diff --git a/linux-imx/drivers/gpu/drm/via/Makefile b/linux-imx/drivers/gpu/drm/via/Makefile
new file mode 100644
index 0000000..d59e258
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+via-y    := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
+
+obj-$(CONFIG_DRM_VIA)	+=via.o
diff --git a/linux-imx/drivers/gpu/drm/via/via_3d_reg.h b/linux-imx/drivers/gpu/drm/via/via_3d_reg.h
new file mode 100644
index 0000000..462375d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_3d_reg.h
@@ -0,0 +1,1650 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VIA_3D_REG_H
+#define VIA_3D_REG_H
+#define HC_REG_BASE             0x0400
+
+#define HC_REG_TRANS_SPACE      0x0040
+
+#define HC_ParaN_MASK           0xffffffff
+#define HC_Para_MASK            0x00ffffff
+#define HC_SubA_MASK            0xff000000
+#define HC_SubA_SHIFT           24
+/* Transmission Setting
+ */
+#define HC_REG_TRANS_SET        0x003c
+#define HC_ParaSubType_MASK     0xff000000
+#define HC_ParaType_MASK        0x00ff0000
+#define HC_ParaOS_MASK          0x0000ff00
+#define HC_ParaAdr_MASK         0x000000ff
+#define HC_ParaSubType_SHIFT    24
+#define HC_ParaType_SHIFT       16
+#define HC_ParaOS_SHIFT         8
+#define HC_ParaAdr_SHIFT        0
+
+#define HC_ParaType_CmdVdata    0x0000
+#define HC_ParaType_NotTex      0x0001
+#define HC_ParaType_Tex         0x0002
+#define HC_ParaType_Palette     0x0003
+#define HC_ParaType_PreCR       0x0010
+#define HC_ParaType_Auto        0x00fe
+
+/* Transmission Space
+ */
+#define HC_REG_Hpara0           0x0040
+#define HC_REG_HpataAF          0x02fc
+
+/* Read
+ */
+#define HC_REG_HREngSt          0x0000
+#define HC_REG_HRFIFOempty      0x0004
+#define HC_REG_HRFIFOfull       0x0008
+#define HC_REG_HRErr            0x000c
+#define HC_REG_FIFOstatus       0x0010
+/* HC_REG_HREngSt          0x0000
+ */
+#define HC_HDASZC_MASK          0x00010000
+#define HC_HSGEMI_MASK          0x0000f000
+#define HC_HLGEMISt_MASK        0x00000f00
+#define HC_HCRSt_MASK           0x00000080
+#define HC_HSE0St_MASK          0x00000040
+#define HC_HSE1St_MASK          0x00000020
+#define HC_HPESt_MASK           0x00000010
+#define HC_HXESt_MASK           0x00000008
+#define HC_HBESt_MASK           0x00000004
+#define HC_HE2St_MASK           0x00000002
+#define HC_HE3St_MASK           0x00000001
+/* HC_REG_HRFIFOempty      0x0004
+ */
+#define HC_HRZDempty_MASK       0x00000010
+#define HC_HRTXAempty_MASK      0x00000008
+#define HC_HRTXDempty_MASK      0x00000004
+#define HC_HWZDempty_MASK       0x00000002
+#define HC_HWCDempty_MASK       0x00000001
+/* HC_REG_HRFIFOfull       0x0008
+ */
+#define HC_HRZDfull_MASK        0x00000010
+#define HC_HRTXAfull_MASK       0x00000008
+#define HC_HRTXDfull_MASK       0x00000004
+#define HC_HWZDfull_MASK        0x00000002
+#define HC_HWCDfull_MASK        0x00000001
+/* HC_REG_HRErr            0x000c
+ */
+#define HC_HAGPCMErr_MASK       0x80000000
+#define HC_HAGPCMErrC_MASK      0x70000000
+/* HC_REG_FIFOstatus       0x0010
+ */
+#define HC_HRFIFOATall_MASK     0x80000000
+#define HC_HRFIFOATbusy_MASK    0x40000000
+#define HC_HRATFGMDo_MASK       0x00000100
+#define HC_HRATFGMDi_MASK       0x00000080
+#define HC_HRATFRZD_MASK        0x00000040
+#define HC_HRATFRTXA_MASK       0x00000020
+#define HC_HRATFRTXD_MASK       0x00000010
+#define HC_HRATFWZD_MASK        0x00000008
+#define HC_HRATFWCD_MASK        0x00000004
+#define HC_HRATTXTAG_MASK       0x00000002
+#define HC_HRATTXCH_MASK        0x00000001
+
+/* AGP Command Setting
+ */
+#define HC_SubA_HAGPBstL        0x0060
+#define HC_SubA_HAGPBendL       0x0061
+#define HC_SubA_HAGPCMNT        0x0062
+#define HC_SubA_HAGPBpL         0x0063
+#define HC_SubA_HAGPBpH         0x0064
+/* HC_SubA_HAGPCMNT        0x0062
+ */
+#define HC_HAGPCMNT_MASK        0x00800000
+#define HC_HCmdErrClr_MASK      0x00400000
+#define HC_HAGPBendH_MASK       0x0000ff00
+#define HC_HAGPBstH_MASK        0x000000ff
+#define HC_HAGPBendH_SHIFT      8
+#define HC_HAGPBstH_SHIFT       0
+/* HC_SubA_HAGPBpL         0x0063
+ */
+#define HC_HAGPBpL_MASK         0x00fffffc
+#define HC_HAGPBpID_MASK        0x00000003
+#define HC_HAGPBpID_PAUSE       0x00000000
+#define HC_HAGPBpID_JUMP        0x00000001
+#define HC_HAGPBpID_STOP        0x00000002
+/* HC_SubA_HAGPBpH         0x0064
+ */
+#define HC_HAGPBpH_MASK         0x00ffffff
+
+/* Miscellaneous Settings
+ */
+#define HC_SubA_HClipTB         0x0070
+#define HC_SubA_HClipLR         0x0071
+#define HC_SubA_HFPClipTL       0x0072
+#define HC_SubA_HFPClipBL       0x0073
+#define HC_SubA_HFPClipLL       0x0074
+#define HC_SubA_HFPClipRL       0x0075
+#define HC_SubA_HFPClipTBH      0x0076
+#define HC_SubA_HFPClipLRH      0x0077
+#define HC_SubA_HLP             0x0078
+#define HC_SubA_HLPRF           0x0079
+#define HC_SubA_HSolidCL        0x007a
+#define HC_SubA_HPixGC          0x007b
+#define HC_SubA_HSPXYOS         0x007c
+#define HC_SubA_HVertexCNT      0x007d
+
+#define HC_HClipT_MASK          0x00fff000
+#define HC_HClipT_SHIFT         12
+#define HC_HClipB_MASK          0x00000fff
+#define HC_HClipB_SHIFT         0
+#define HC_HClipL_MASK          0x00fff000
+#define HC_HClipL_SHIFT         12
+#define HC_HClipR_MASK          0x00000fff
+#define HC_HClipR_SHIFT         0
+#define HC_HFPClipBH_MASK       0x0000ff00
+#define HC_HFPClipBH_SHIFT      8
+#define HC_HFPClipTH_MASK       0x000000ff
+#define HC_HFPClipTH_SHIFT      0
+#define HC_HFPClipRH_MASK       0x0000ff00
+#define HC_HFPClipRH_SHIFT      8
+#define HC_HFPClipLH_MASK       0x000000ff
+#define HC_HFPClipLH_SHIFT      0
+#define HC_HSolidCH_MASK        0x000000ff
+#define HC_HPixGC_MASK          0x00800000
+#define HC_HSPXOS_MASK          0x00fff000
+#define HC_HSPXOS_SHIFT         12
+#define HC_HSPYOS_MASK          0x00000fff
+
+/* Command
+ * Command A
+ */
+#define HC_HCmdHeader_MASK      0xfe000000	/*0xffe00000 */
+#define HC_HE3Fire_MASK         0x00100000
+#define HC_HPMType_MASK         0x000f0000
+#define HC_HEFlag_MASK          0x0000e000
+#define HC_HShading_MASK        0x00001c00
+#define HC_HPMValidN_MASK       0x00000200
+#define HC_HPLEND_MASK          0x00000100
+#define HC_HVCycle_MASK         0x000000ff
+#define HC_HVCycle_Style_MASK   0x000000c0
+#define HC_HVCycle_ChgA_MASK    0x00000030
+#define HC_HVCycle_ChgB_MASK    0x0000000c
+#define HC_HVCycle_ChgC_MASK    0x00000003
+#define HC_HPMType_Point        0x00000000
+#define HC_HPMType_Line         0x00010000
+#define HC_HPMType_Tri          0x00020000
+#define HC_HPMType_TriWF        0x00040000
+#define HC_HEFlag_NoAA          0x00000000
+#define HC_HEFlag_ab            0x00008000
+#define HC_HEFlag_bc            0x00004000
+#define HC_HEFlag_ca            0x00002000
+#define HC_HShading_Solid       0x00000000
+#define HC_HShading_FlatA       0x00000400
+#define HC_HShading_FlatB       0x00000800
+#define HC_HShading_FlatC       0x00000c00
+#define HC_HShading_Gouraud     0x00001000
+#define HC_HVCycle_Full         0x00000000
+#define HC_HVCycle_AFP          0x00000040
+#define HC_HVCycle_One          0x000000c0
+#define HC_HVCycle_NewA         0x00000000
+#define HC_HVCycle_AA           0x00000010
+#define HC_HVCycle_AB           0x00000020
+#define HC_HVCycle_AC           0x00000030
+#define HC_HVCycle_NewB         0x00000000
+#define HC_HVCycle_BA           0x00000004
+#define HC_HVCycle_BB           0x00000008
+#define HC_HVCycle_BC           0x0000000c
+#define HC_HVCycle_NewC         0x00000000
+#define HC_HVCycle_CA           0x00000001
+#define HC_HVCycle_CB           0x00000002
+#define HC_HVCycle_CC           0x00000003
+
+/* Command B
+ */
+#define HC_HLPrst_MASK          0x00010000
+#define HC_HLLastP_MASK         0x00008000
+#define HC_HVPMSK_MASK          0x00007f80
+#define HC_HBFace_MASK          0x00000040
+#define HC_H2nd1VT_MASK         0x0000003f
+#define HC_HVPMSK_X             0x00004000
+#define HC_HVPMSK_Y             0x00002000
+#define HC_HVPMSK_Z             0x00001000
+#define HC_HVPMSK_W             0x00000800
+#define HC_HVPMSK_Cd            0x00000400
+#define HC_HVPMSK_Cs            0x00000200
+#define HC_HVPMSK_S             0x00000100
+#define HC_HVPMSK_T             0x00000080
+
+/* Enable Setting
+ */
+#define HC_SubA_HEnable         0x0000
+#define HC_HenTXEnvMap_MASK     0x00200000
+#define HC_HenVertexCNT_MASK    0x00100000
+#define HC_HenCPUDAZ_MASK       0x00080000
+#define HC_HenDASZWC_MASK       0x00040000
+#define HC_HenFBCull_MASK       0x00020000
+#define HC_HenCW_MASK           0x00010000
+#define HC_HenAA_MASK           0x00008000
+#define HC_HenST_MASK           0x00004000
+#define HC_HenZT_MASK           0x00002000
+#define HC_HenZW_MASK           0x00001000
+#define HC_HenAT_MASK           0x00000800
+#define HC_HenAW_MASK           0x00000400
+#define HC_HenSP_MASK           0x00000200
+#define HC_HenLP_MASK           0x00000100
+#define HC_HenTXCH_MASK         0x00000080
+#define HC_HenTXMP_MASK         0x00000040
+#define HC_HenTXPP_MASK         0x00000020
+#define HC_HenTXTR_MASK         0x00000010
+#define HC_HenCS_MASK           0x00000008
+#define HC_HenFOG_MASK          0x00000004
+#define HC_HenABL_MASK          0x00000002
+#define HC_HenDT_MASK           0x00000001
+
+/* Z Setting
+ */
+#define HC_SubA_HZWBBasL        0x0010
+#define HC_SubA_HZWBBasH        0x0011
+#define HC_SubA_HZWBType        0x0012
+#define HC_SubA_HZBiasL         0x0013
+#define HC_SubA_HZWBend         0x0014
+#define HC_SubA_HZWTMD          0x0015
+#define HC_SubA_HZWCDL          0x0016
+#define HC_SubA_HZWCTAGnum      0x0017
+#define HC_SubA_HZCYNum         0x0018
+#define HC_SubA_HZWCFire        0x0019
+/* HC_SubA_HZWBType
+ */
+#define HC_HZWBType_MASK        0x00800000
+#define HC_HZBiasedWB_MASK      0x00400000
+#define HC_HZONEasFF_MASK       0x00200000
+#define HC_HZOONEasFF_MASK      0x00100000
+#define HC_HZWBFM_MASK          0x00030000
+#define HC_HZWBLoc_MASK         0x0000c000
+#define HC_HZWBPit_MASK         0x00003fff
+#define HC_HZWBFM_16            0x00000000
+#define HC_HZWBFM_32            0x00020000
+#define HC_HZWBFM_24            0x00030000
+#define HC_HZWBLoc_Local        0x00000000
+#define HC_HZWBLoc_SyS          0x00004000
+/* HC_SubA_HZWBend
+ */
+#define HC_HZWBend_MASK         0x00ffe000
+#define HC_HZBiasH_MASK         0x000000ff
+#define HC_HZWBend_SHIFT        10
+/* HC_SubA_HZWTMD
+ */
+#define HC_HZWTMD_MASK          0x00070000
+#define HC_HEBEBias_MASK        0x00007f00
+#define HC_HZNF_MASK            0x000000ff
+#define HC_HZWTMD_NeverPass     0x00000000
+#define HC_HZWTMD_LT            0x00010000
+#define HC_HZWTMD_EQ            0x00020000
+#define HC_HZWTMD_LE            0x00030000
+#define HC_HZWTMD_GT            0x00040000
+#define HC_HZWTMD_NE            0x00050000
+#define HC_HZWTMD_GE            0x00060000
+#define HC_HZWTMD_AllPass       0x00070000
+#define HC_HEBEBias_SHIFT       8
+/* HC_SubA_HZWCDL          0x0016
+ */
+#define HC_HZWCDL_MASK          0x00ffffff
+/* HC_SubA_HZWCTAGnum      0x0017
+ */
+#define HC_HZWCTAGnum_MASK      0x00ff0000
+#define HC_HZWCTAGnum_SHIFT     16
+#define HC_HZWCDH_MASK          0x000000ff
+#define HC_HZWCDH_SHIFT         0
+/* HC_SubA_HZCYNum         0x0018
+ */
+#define HC_HZCYNum_MASK         0x00030000
+#define HC_HZCYNum_SHIFT        16
+#define HC_HZWCQWnum_MASK       0x00003fff
+#define HC_HZWCQWnum_SHIFT      0
+/* HC_SubA_HZWCFire        0x0019
+ */
+#define HC_ZWCFire_MASK         0x00010000
+#define HC_HZWCQWnumLast_MASK   0x00003fff
+#define HC_HZWCQWnumLast_SHIFT  0
+
+/* Stencil Setting
+ */
+#define HC_SubA_HSTREF          0x0023
+#define HC_SubA_HSTMD           0x0024
+/* HC_SubA_HSBFM
+ */
+#define HC_HSBFM_MASK           0x00030000
+#define HC_HSBLoc_MASK          0x0000c000
+#define HC_HSBPit_MASK          0x00003fff
+/* HC_SubA_HSTREF
+ */
+#define HC_HSTREF_MASK          0x00ff0000
+#define HC_HSTOPMSK_MASK        0x0000ff00
+#define HC_HSTBMSK_MASK         0x000000ff
+#define HC_HSTREF_SHIFT         16
+#define HC_HSTOPMSK_SHIFT       8
+/* HC_SubA_HSTMD
+ */
+#define HC_HSTMD_MASK           0x00070000
+#define HC_HSTOPSF_MASK         0x000001c0
+#define HC_HSTOPSPZF_MASK       0x00000038
+#define HC_HSTOPSPZP_MASK       0x00000007
+#define HC_HSTMD_NeverPass      0x00000000
+#define HC_HSTMD_LT             0x00010000
+#define HC_HSTMD_EQ             0x00020000
+#define HC_HSTMD_LE             0x00030000
+#define HC_HSTMD_GT             0x00040000
+#define HC_HSTMD_NE             0x00050000
+#define HC_HSTMD_GE             0x00060000
+#define HC_HSTMD_AllPass        0x00070000
+#define HC_HSTOPSF_KEEP         0x00000000
+#define HC_HSTOPSF_ZERO         0x00000040
+#define HC_HSTOPSF_REPLACE      0x00000080
+#define HC_HSTOPSF_INCRSAT      0x000000c0
+#define HC_HSTOPSF_DECRSAT      0x00000100
+#define HC_HSTOPSF_INVERT       0x00000140
+#define HC_HSTOPSF_INCR         0x00000180
+#define HC_HSTOPSF_DECR         0x000001c0
+#define HC_HSTOPSPZF_KEEP       0x00000000
+#define HC_HSTOPSPZF_ZERO       0x00000008
+#define HC_HSTOPSPZF_REPLACE    0x00000010
+#define HC_HSTOPSPZF_INCRSAT    0x00000018
+#define HC_HSTOPSPZF_DECRSAT    0x00000020
+#define HC_HSTOPSPZF_INVERT     0x00000028
+#define HC_HSTOPSPZF_INCR       0x00000030
+#define HC_HSTOPSPZF_DECR       0x00000038
+#define HC_HSTOPSPZP_KEEP       0x00000000
+#define HC_HSTOPSPZP_ZERO       0x00000001
+#define HC_HSTOPSPZP_REPLACE    0x00000002
+#define HC_HSTOPSPZP_INCRSAT    0x00000003
+#define HC_HSTOPSPZP_DECRSAT    0x00000004
+#define HC_HSTOPSPZP_INVERT     0x00000005
+#define HC_HSTOPSPZP_INCR       0x00000006
+#define HC_HSTOPSPZP_DECR       0x00000007
+
+/* Alpha Setting
+ */
+#define HC_SubA_HABBasL         0x0030
+#define HC_SubA_HABBasH         0x0031
+#define HC_SubA_HABFM           0x0032
+#define HC_SubA_HATMD           0x0033
+#define HC_SubA_HABLCsat        0x0034
+#define HC_SubA_HABLCop         0x0035
+#define HC_SubA_HABLAsat        0x0036
+#define HC_SubA_HABLAop         0x0037
+#define HC_SubA_HABLRCa         0x0038
+#define HC_SubA_HABLRFCa        0x0039
+#define HC_SubA_HABLRCbias      0x003a
+#define HC_SubA_HABLRCb         0x003b
+#define HC_SubA_HABLRFCb        0x003c
+#define HC_SubA_HABLRAa         0x003d
+#define HC_SubA_HABLRAb         0x003e
+/* HC_SubA_HABFM
+ */
+#define HC_HABFM_MASK           0x00030000
+#define HC_HABLoc_MASK          0x0000c000
+#define HC_HABPit_MASK          0x000007ff
+/* HC_SubA_HATMD
+ */
+#define HC_HATMD_MASK           0x00000700
+#define HC_HATREF_MASK          0x000000ff
+#define HC_HATMD_NeverPass      0x00000000
+#define HC_HATMD_LT             0x00000100
+#define HC_HATMD_EQ             0x00000200
+#define HC_HATMD_LE             0x00000300
+#define HC_HATMD_GT             0x00000400
+#define HC_HATMD_NE             0x00000500
+#define HC_HATMD_GE             0x00000600
+#define HC_HATMD_AllPass        0x00000700
+/* HC_SubA_HABLCsat
+ */
+#define HC_HABLCsat_MASK        0x00010000
+#define HC_HABLCa_MASK          0x0000fc00
+#define HC_HABLCa_C_MASK        0x0000c000
+#define HC_HABLCa_OPC_MASK      0x00003c00
+#define HC_HABLFCa_MASK         0x000003f0
+#define HC_HABLFCa_C_MASK       0x00000300
+#define HC_HABLFCa_OPC_MASK     0x000000f0
+#define HC_HABLCbias_MASK       0x0000000f
+#define HC_HABLCbias_C_MASK     0x00000008
+#define HC_HABLCbias_OPC_MASK   0x00000007
+/*-- Define the input color.
+ */
+#define HC_XC_Csrc              0x00000000
+#define HC_XC_Cdst              0x00000001
+#define HC_XC_Asrc              0x00000002
+#define HC_XC_Adst              0x00000003
+#define HC_XC_Fog               0x00000004
+#define HC_XC_HABLRC            0x00000005
+#define HC_XC_minSrcDst         0x00000006
+#define HC_XC_maxSrcDst         0x00000007
+#define HC_XC_mimAsrcInvAdst    0x00000008
+#define HC_XC_OPC               0x00000000
+#define HC_XC_InvOPC            0x00000010
+#define HC_XC_OPCp5             0x00000020
+/*-- Define the input Alpha
+ */
+#define HC_XA_OPA               0x00000000
+#define HC_XA_InvOPA            0x00000010
+#define HC_XA_OPAp5             0x00000020
+#define HC_XA_0                 0x00000000
+#define HC_XA_Asrc              0x00000001
+#define HC_XA_Adst              0x00000002
+#define HC_XA_Fog               0x00000003
+#define HC_XA_minAsrcFog        0x00000004
+#define HC_XA_minAsrcAdst       0x00000005
+#define HC_XA_maxAsrcFog        0x00000006
+#define HC_XA_maxAsrcAdst       0x00000007
+#define HC_XA_HABLRA            0x00000008
+#define HC_XA_minAsrcInvAdst    0x00000008
+#define HC_XA_HABLFRA           0x00000009
+/*--
+ */
+#define HC_HABLCa_OPC           (HC_XC_OPC << 10)
+#define HC_HABLCa_InvOPC        (HC_XC_InvOPC << 10)
+#define HC_HABLCa_OPCp5         (HC_XC_OPCp5 << 10)
+#define HC_HABLCa_Csrc          (HC_XC_Csrc << 10)
+#define HC_HABLCa_Cdst          (HC_XC_Cdst << 10)
+#define HC_HABLCa_Asrc          (HC_XC_Asrc << 10)
+#define HC_HABLCa_Adst          (HC_XC_Adst << 10)
+#define HC_HABLCa_Fog           (HC_XC_Fog << 10)
+#define HC_HABLCa_HABLRCa       (HC_XC_HABLRC << 10)
+#define HC_HABLCa_minSrcDst     (HC_XC_minSrcDst << 10)
+#define HC_HABLCa_maxSrcDst     (HC_XC_maxSrcDst << 10)
+#define HC_HABLFCa_OPC              (HC_XC_OPC << 4)
+#define HC_HABLFCa_InvOPC           (HC_XC_InvOPC << 4)
+#define HC_HABLFCa_OPCp5            (HC_XC_OPCp5 << 4)
+#define HC_HABLFCa_Csrc             (HC_XC_Csrc << 4)
+#define HC_HABLFCa_Cdst             (HC_XC_Cdst << 4)
+#define HC_HABLFCa_Asrc             (HC_XC_Asrc << 4)
+#define HC_HABLFCa_Adst             (HC_XC_Adst << 4)
+#define HC_HABLFCa_Fog              (HC_XC_Fog << 4)
+#define HC_HABLFCa_HABLRCa          (HC_XC_HABLRC << 4)
+#define HC_HABLFCa_minSrcDst        (HC_XC_minSrcDst << 4)
+#define HC_HABLFCa_maxSrcDst        (HC_XC_maxSrcDst << 4)
+#define HC_HABLFCa_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 4)
+#define HC_HABLCbias_HABLRCbias 0x00000000
+#define HC_HABLCbias_Asrc       0x00000001
+#define HC_HABLCbias_Adst       0x00000002
+#define HC_HABLCbias_Fog        0x00000003
+#define HC_HABLCbias_Cin        0x00000004
+/* HC_SubA_HABLCop         0x0035
+ */
+#define HC_HABLdot_MASK         0x00010000
+#define HC_HABLCop_MASK         0x00004000
+#define HC_HABLCb_MASK          0x00003f00
+#define HC_HABLCb_C_MASK        0x00003000
+#define HC_HABLCb_OPC_MASK      0x00000f00
+#define HC_HABLFCb_MASK         0x000000fc
+#define HC_HABLFCb_C_MASK       0x000000c0
+#define HC_HABLFCb_OPC_MASK     0x0000003c
+#define HC_HABLCshift_MASK      0x00000003
+#define HC_HABLCb_OPC           (HC_XC_OPC << 8)
+#define HC_HABLCb_InvOPC        (HC_XC_InvOPC << 8)
+#define HC_HABLCb_OPCp5         (HC_XC_OPCp5 << 8)
+#define HC_HABLCb_Csrc          (HC_XC_Csrc << 8)
+#define HC_HABLCb_Cdst          (HC_XC_Cdst << 8)
+#define HC_HABLCb_Asrc          (HC_XC_Asrc << 8)
+#define HC_HABLCb_Adst          (HC_XC_Adst << 8)
+#define HC_HABLCb_Fog           (HC_XC_Fog << 8)
+#define HC_HABLCb_HABLRCa       (HC_XC_HABLRC << 8)
+#define HC_HABLCb_minSrcDst     (HC_XC_minSrcDst << 8)
+#define HC_HABLCb_maxSrcDst     (HC_XC_maxSrcDst << 8)
+#define HC_HABLFCb_OPC              (HC_XC_OPC << 2)
+#define HC_HABLFCb_InvOPC           (HC_XC_InvOPC << 2)
+#define HC_HABLFCb_OPCp5            (HC_XC_OPCp5 << 2)
+#define HC_HABLFCb_Csrc             (HC_XC_Csrc << 2)
+#define HC_HABLFCb_Cdst             (HC_XC_Cdst << 2)
+#define HC_HABLFCb_Asrc             (HC_XC_Asrc << 2)
+#define HC_HABLFCb_Adst             (HC_XC_Adst << 2)
+#define HC_HABLFCb_Fog              (HC_XC_Fog << 2)
+#define HC_HABLFCb_HABLRCb          (HC_XC_HABLRC << 2)
+#define HC_HABLFCb_minSrcDst        (HC_XC_minSrcDst << 2)
+#define HC_HABLFCb_maxSrcDst        (HC_XC_maxSrcDst << 2)
+#define HC_HABLFCb_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 2)
+/* HC_SubA_HABLAsat        0x0036
+ */
+#define HC_HABLAsat_MASK        0x00010000
+#define HC_HABLAa_MASK          0x0000fc00
+#define HC_HABLAa_A_MASK        0x0000c000
+#define HC_HABLAa_OPA_MASK      0x00003c00
+#define HC_HABLFAa_MASK         0x000003f0
+#define HC_HABLFAa_A_MASK       0x00000300
+#define HC_HABLFAa_OPA_MASK     0x000000f0
+#define HC_HABLAbias_MASK       0x0000000f
+#define HC_HABLAbias_A_MASK     0x00000008
+#define HC_HABLAbias_OPA_MASK   0x00000007
+#define HC_HABLAa_OPA           (HC_XA_OPA << 10)
+#define HC_HABLAa_InvOPA        (HC_XA_InvOPA << 10)
+#define HC_HABLAa_OPAp5         (HC_XA_OPAp5 << 10)
+#define HC_HABLAa_0             (HC_XA_0 << 10)
+#define HC_HABLAa_Asrc          (HC_XA_Asrc << 10)
+#define HC_HABLAa_Adst          (HC_XA_Adst << 10)
+#define HC_HABLAa_Fog           (HC_XA_Fog << 10)
+#define HC_HABLAa_minAsrcFog    (HC_XA_minAsrcFog << 10)
+#define HC_HABLAa_minAsrcAdst   (HC_XA_minAsrcAdst << 10)
+#define HC_HABLAa_maxAsrcFog    (HC_XA_maxAsrcFog << 10)
+#define HC_HABLAa_maxAsrcAdst   (HC_XA_maxAsrcAdst << 10)
+#define HC_HABLAa_HABLRA        (HC_XA_HABLRA << 10)
+#define HC_HABLFAa_OPA          (HC_XA_OPA << 4)
+#define HC_HABLFAa_InvOPA       (HC_XA_InvOPA << 4)
+#define HC_HABLFAa_OPAp5        (HC_XA_OPAp5 << 4)
+#define HC_HABLFAa_0            (HC_XA_0 << 4)
+#define HC_HABLFAa_Asrc         (HC_XA_Asrc << 4)
+#define HC_HABLFAa_Adst         (HC_XA_Adst << 4)
+#define HC_HABLFAa_Fog          (HC_XA_Fog << 4)
+#define HC_HABLFAa_minAsrcFog   (HC_XA_minAsrcFog << 4)
+#define HC_HABLFAa_minAsrcAdst  (HC_XA_minAsrcAdst << 4)
+#define HC_HABLFAa_maxAsrcFog   (HC_XA_maxAsrcFog << 4)
+#define HC_HABLFAa_maxAsrcAdst  (HC_XA_maxAsrcAdst << 4)
+#define HC_HABLFAa_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 4)
+#define HC_HABLFAa_HABLFRA          (HC_XA_HABLFRA << 4)
+#define HC_HABLAbias_HABLRAbias 0x00000000
+#define HC_HABLAbias_Asrc       0x00000001
+#define HC_HABLAbias_Adst       0x00000002
+#define HC_HABLAbias_Fog        0x00000003
+#define HC_HABLAbias_Aaa        0x00000004
+/* HC_SubA_HABLAop         0x0037
+ */
+#define HC_HABLAop_MASK         0x00004000
+#define HC_HABLAb_MASK          0x00003f00
+#define HC_HABLAb_OPA_MASK      0x00000f00
+#define HC_HABLFAb_MASK         0x000000fc
+#define HC_HABLFAb_OPA_MASK     0x0000003c
+#define HC_HABLAshift_MASK      0x00000003
+#define HC_HABLAb_OPA           (HC_XA_OPA << 8)
+#define HC_HABLAb_InvOPA        (HC_XA_InvOPA << 8)
+#define HC_HABLAb_OPAp5         (HC_XA_OPAp5 << 8)
+#define HC_HABLAb_0             (HC_XA_0 << 8)
+#define HC_HABLAb_Asrc          (HC_XA_Asrc << 8)
+#define HC_HABLAb_Adst          (HC_XA_Adst << 8)
+#define HC_HABLAb_Fog           (HC_XA_Fog << 8)
+#define HC_HABLAb_minAsrcFog    (HC_XA_minAsrcFog << 8)
+#define HC_HABLAb_minAsrcAdst   (HC_XA_minAsrcAdst << 8)
+#define HC_HABLAb_maxAsrcFog    (HC_XA_maxAsrcFog << 8)
+#define HC_HABLAb_maxAsrcAdst   (HC_XA_maxAsrcAdst << 8)
+#define HC_HABLAb_HABLRA        (HC_XA_HABLRA << 8)
+#define HC_HABLFAb_OPA          (HC_XA_OPA << 2)
+#define HC_HABLFAb_InvOPA       (HC_XA_InvOPA << 2)
+#define HC_HABLFAb_OPAp5        (HC_XA_OPAp5 << 2)
+#define HC_HABLFAb_0            (HC_XA_0 << 2)
+#define HC_HABLFAb_Asrc         (HC_XA_Asrc << 2)
+#define HC_HABLFAb_Adst         (HC_XA_Adst << 2)
+#define HC_HABLFAb_Fog          (HC_XA_Fog << 2)
+#define HC_HABLFAb_minAsrcFog   (HC_XA_minAsrcFog << 2)
+#define HC_HABLFAb_minAsrcAdst  (HC_XA_minAsrcAdst << 2)
+#define HC_HABLFAb_maxAsrcFog   (HC_XA_maxAsrcFog << 2)
+#define HC_HABLFAb_maxAsrcAdst  (HC_XA_maxAsrcAdst << 2)
+#define HC_HABLFAb_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 2)
+#define HC_HABLFAb_HABLFRA          (HC_XA_HABLFRA << 2)
+/* HC_SubA_HABLRAa         0x003d
+ */
+#define HC_HABLRAa_MASK         0x00ff0000
+#define HC_HABLRFAa_MASK        0x0000ff00
+#define HC_HABLRAbias_MASK      0x000000ff
+#define HC_HABLRAa_SHIFT        16
+#define HC_HABLRFAa_SHIFT       8
+/* HC_SubA_HABLRAb         0x003e
+ */
+#define HC_HABLRAb_MASK         0x0000ff00
+#define HC_HABLRFAb_MASK        0x000000ff
+#define HC_HABLRAb_SHIFT        8
+
+/* Destination Setting
+ */
+#define HC_SubA_HDBBasL         0x0040
+#define HC_SubA_HDBBasH         0x0041
+#define HC_SubA_HDBFM           0x0042
+#define HC_SubA_HFBBMSKL        0x0043
+#define HC_SubA_HROP            0x0044
+/* HC_SubA_HDBFM           0x0042
+ */
+#define HC_HDBFM_MASK           0x001f0000
+#define HC_HDBLoc_MASK          0x0000c000
+#define HC_HDBPit_MASK          0x00003fff
+#define HC_HDBFM_RGB555         0x00000000
+#define HC_HDBFM_RGB565         0x00010000
+#define HC_HDBFM_ARGB4444       0x00020000
+#define HC_HDBFM_ARGB1555       0x00030000
+#define HC_HDBFM_BGR555         0x00040000
+#define HC_HDBFM_BGR565         0x00050000
+#define HC_HDBFM_ABGR4444       0x00060000
+#define HC_HDBFM_ABGR1555       0x00070000
+#define HC_HDBFM_ARGB0888       0x00080000
+#define HC_HDBFM_ARGB8888       0x00090000
+#define HC_HDBFM_ABGR0888       0x000a0000
+#define HC_HDBFM_ABGR8888       0x000b0000
+#define HC_HDBLoc_Local         0x00000000
+#define HC_HDBLoc_Sys           0x00004000
+/* HC_SubA_HROP            0x0044
+ */
+#define HC_HROP_MASK            0x00000f00
+#define HC_HFBBMSKH_MASK        0x000000ff
+#define HC_HROP_BLACK           0x00000000
+#define HC_HROP_DPon            0x00000100
+#define HC_HROP_DPna            0x00000200
+#define HC_HROP_Pn              0x00000300
+#define HC_HROP_PDna            0x00000400
+#define HC_HROP_Dn              0x00000500
+#define HC_HROP_DPx             0x00000600
+#define HC_HROP_DPan            0x00000700
+#define HC_HROP_DPa             0x00000800
+#define HC_HROP_DPxn            0x00000900
+#define HC_HROP_D               0x00000a00
+#define HC_HROP_DPno            0x00000b00
+#define HC_HROP_P               0x00000c00
+#define HC_HROP_PDno            0x00000d00
+#define HC_HROP_DPo             0x00000e00
+#define HC_HROP_WHITE           0x00000f00
+
+/* Fog Setting
+ */
+#define HC_SubA_HFogLF          0x0050
+#define HC_SubA_HFogCL          0x0051
+#define HC_SubA_HFogCH          0x0052
+#define HC_SubA_HFogStL         0x0053
+#define HC_SubA_HFogStH         0x0054
+#define HC_SubA_HFogOOdMF       0x0055
+#define HC_SubA_HFogOOdEF       0x0056
+#define HC_SubA_HFogEndL        0x0057
+#define HC_SubA_HFogDenst       0x0058
+/* HC_SubA_FogLF           0x0050
+ */
+#define HC_FogLF_MASK           0x00000010
+#define HC_FogEq_MASK           0x00000008
+#define HC_FogMD_MASK           0x00000007
+#define HC_FogMD_LocalFog        0x00000000
+#define HC_FogMD_LinearFog       0x00000002
+#define HC_FogMD_ExponentialFog  0x00000004
+#define HC_FogMD_Exponential2Fog 0x00000005
+/* #define HC_FogMD_FogTable       0x00000003 */
+
+/* HC_SubA_HFogDenst        0x0058
+ */
+#define HC_FogDenst_MASK        0x001fff00
+#define HC_FogEndL_MASK         0x000000ff
+
+/* Texture subtype definitions
+ */
+#define HC_SubType_Tex0         0x00000000
+#define HC_SubType_Tex1         0x00000001
+#define HC_SubType_TexGeneral   0x000000fe
+
+/* Attribute of texture n
+ */
+#define HC_SubA_HTXnL0BasL      0x0000
+#define HC_SubA_HTXnL1BasL      0x0001
+#define HC_SubA_HTXnL2BasL      0x0002
+#define HC_SubA_HTXnL3BasL      0x0003
+#define HC_SubA_HTXnL4BasL      0x0004
+#define HC_SubA_HTXnL5BasL      0x0005
+#define HC_SubA_HTXnL6BasL      0x0006
+#define HC_SubA_HTXnL7BasL      0x0007
+#define HC_SubA_HTXnL8BasL      0x0008
+#define HC_SubA_HTXnL9BasL      0x0009
+#define HC_SubA_HTXnLaBasL      0x000a
+#define HC_SubA_HTXnLbBasL      0x000b
+#define HC_SubA_HTXnLcBasL      0x000c
+#define HC_SubA_HTXnLdBasL      0x000d
+#define HC_SubA_HTXnLeBasL      0x000e
+#define HC_SubA_HTXnLfBasL      0x000f
+#define HC_SubA_HTXnL10BasL     0x0010
+#define HC_SubA_HTXnL11BasL     0x0011
+#define HC_SubA_HTXnL012BasH    0x0020
+#define HC_SubA_HTXnL345BasH    0x0021
+#define HC_SubA_HTXnL678BasH    0x0022
+#define HC_SubA_HTXnL9abBasH    0x0023
+#define HC_SubA_HTXnLcdeBasH    0x0024
+#define HC_SubA_HTXnLf1011BasH  0x0025
+#define HC_SubA_HTXnL0Pit       0x002b
+#define HC_SubA_HTXnL1Pit       0x002c
+#define HC_SubA_HTXnL2Pit       0x002d
+#define HC_SubA_HTXnL3Pit       0x002e
+#define HC_SubA_HTXnL4Pit       0x002f
+#define HC_SubA_HTXnL5Pit       0x0030
+#define HC_SubA_HTXnL6Pit       0x0031
+#define HC_SubA_HTXnL7Pit       0x0032
+#define HC_SubA_HTXnL8Pit       0x0033
+#define HC_SubA_HTXnL9Pit       0x0034
+#define HC_SubA_HTXnLaPit       0x0035
+#define HC_SubA_HTXnLbPit       0x0036
+#define HC_SubA_HTXnLcPit       0x0037
+#define HC_SubA_HTXnLdPit       0x0038
+#define HC_SubA_HTXnLePit       0x0039
+#define HC_SubA_HTXnLfPit       0x003a
+#define HC_SubA_HTXnL10Pit      0x003b
+#define HC_SubA_HTXnL11Pit      0x003c
+#define HC_SubA_HTXnL0_5WE      0x004b
+#define HC_SubA_HTXnL6_bWE      0x004c
+#define HC_SubA_HTXnLc_11WE     0x004d
+#define HC_SubA_HTXnL0_5HE      0x0051
+#define HC_SubA_HTXnL6_bHE      0x0052
+#define HC_SubA_HTXnLc_11HE     0x0053
+#define HC_SubA_HTXnL0OS        0x0077
+#define HC_SubA_HTXnTB          0x0078
+#define HC_SubA_HTXnMPMD        0x0079
+#define HC_SubA_HTXnCLODu       0x007a
+#define HC_SubA_HTXnFM          0x007b
+#define HC_SubA_HTXnTRCH        0x007c
+#define HC_SubA_HTXnTRCL        0x007d
+#define HC_SubA_HTXnTBC         0x007e
+#define HC_SubA_HTXnTRAH        0x007f
+#define HC_SubA_HTXnTBLCsat     0x0080
+#define HC_SubA_HTXnTBLCop      0x0081
+#define HC_SubA_HTXnTBLMPfog    0x0082
+#define HC_SubA_HTXnTBLAsat     0x0083
+#define HC_SubA_HTXnTBLRCa      0x0085
+#define HC_SubA_HTXnTBLRCb      0x0086
+#define HC_SubA_HTXnTBLRCc      0x0087
+#define HC_SubA_HTXnTBLRCbias   0x0088
+#define HC_SubA_HTXnTBLRAa      0x0089
+#define HC_SubA_HTXnTBLRFog     0x008a
+#define HC_SubA_HTXnBumpM00     0x0090
+#define HC_SubA_HTXnBumpM01     0x0091
+#define HC_SubA_HTXnBumpM10     0x0092
+#define HC_SubA_HTXnBumpM11     0x0093
+#define HC_SubA_HTXnLScale      0x0094
+#define HC_SubA_HTXSMD          0x0000
+/* HC_SubA_HTXnL012BasH    0x0020
+ */
+#define HC_HTXnL0BasH_MASK      0x000000ff
+#define HC_HTXnL1BasH_MASK      0x0000ff00
+#define HC_HTXnL2BasH_MASK      0x00ff0000
+#define HC_HTXnL1BasH_SHIFT     8
+#define HC_HTXnL2BasH_SHIFT     16
+/* HC_SubA_HTXnL345BasH    0x0021
+ */
+#define HC_HTXnL3BasH_MASK      0x000000ff
+#define HC_HTXnL4BasH_MASK      0x0000ff00
+#define HC_HTXnL5BasH_MASK      0x00ff0000
+#define HC_HTXnL4BasH_SHIFT     8
+#define HC_HTXnL5BasH_SHIFT     16
+/* HC_SubA_HTXnL678BasH    0x0022
+ */
+#define HC_HTXnL6BasH_MASK      0x000000ff
+#define HC_HTXnL7BasH_MASK      0x0000ff00
+#define HC_HTXnL8BasH_MASK      0x00ff0000
+#define HC_HTXnL7BasH_SHIFT     8
+#define HC_HTXnL8BasH_SHIFT     16
+/* HC_SubA_HTXnL9abBasH    0x0023
+ */
+#define HC_HTXnL9BasH_MASK      0x000000ff
+#define HC_HTXnLaBasH_MASK      0x0000ff00
+#define HC_HTXnLbBasH_MASK      0x00ff0000
+#define HC_HTXnLaBasH_SHIFT     8
+#define HC_HTXnLbBasH_SHIFT     16
+/* HC_SubA_HTXnLcdeBasH    0x0024
+ */
+#define HC_HTXnLcBasH_MASK      0x000000ff
+#define HC_HTXnLdBasH_MASK      0x0000ff00
+#define HC_HTXnLeBasH_MASK      0x00ff0000
+#define HC_HTXnLdBasH_SHIFT     8
+#define HC_HTXnLeBasH_SHIFT     16
+/* HC_SubA_HTXnLcdeBasH    0x0025
+ */
+#define HC_HTXnLfBasH_MASK      0x000000ff
+#define HC_HTXnL10BasH_MASK      0x0000ff00
+#define HC_HTXnL11BasH_MASK      0x00ff0000
+#define HC_HTXnL10BasH_SHIFT     8
+#define HC_HTXnL11BasH_SHIFT     16
+/* HC_SubA_HTXnL0Pit       0x002b
+ */
+#define HC_HTXnLnPit_MASK       0x00003fff
+#define HC_HTXnEnPit_MASK       0x00080000
+#define HC_HTXnLnPitE_MASK      0x00f00000
+#define HC_HTXnLnPitE_SHIFT     20
+/* HC_SubA_HTXnL0_5WE      0x004b
+ */
+#define HC_HTXnL0WE_MASK        0x0000000f
+#define HC_HTXnL1WE_MASK        0x000000f0
+#define HC_HTXnL2WE_MASK        0x00000f00
+#define HC_HTXnL3WE_MASK        0x0000f000
+#define HC_HTXnL4WE_MASK        0x000f0000
+#define HC_HTXnL5WE_MASK        0x00f00000
+#define HC_HTXnL1WE_SHIFT       4
+#define HC_HTXnL2WE_SHIFT       8
+#define HC_HTXnL3WE_SHIFT       12
+#define HC_HTXnL4WE_SHIFT       16
+#define HC_HTXnL5WE_SHIFT       20
+/* HC_SubA_HTXnL6_bWE      0x004c
+ */
+#define HC_HTXnL6WE_MASK        0x0000000f
+#define HC_HTXnL7WE_MASK        0x000000f0
+#define HC_HTXnL8WE_MASK        0x00000f00
+#define HC_HTXnL9WE_MASK        0x0000f000
+#define HC_HTXnLaWE_MASK        0x000f0000
+#define HC_HTXnLbWE_MASK        0x00f00000
+#define HC_HTXnL7WE_SHIFT       4
+#define HC_HTXnL8WE_SHIFT       8
+#define HC_HTXnL9WE_SHIFT       12
+#define HC_HTXnLaWE_SHIFT       16
+#define HC_HTXnLbWE_SHIFT       20
+/* HC_SubA_HTXnLc_11WE      0x004d
+ */
+#define HC_HTXnLcWE_MASK        0x0000000f
+#define HC_HTXnLdWE_MASK        0x000000f0
+#define HC_HTXnLeWE_MASK        0x00000f00
+#define HC_HTXnLfWE_MASK        0x0000f000
+#define HC_HTXnL10WE_MASK       0x000f0000
+#define HC_HTXnL11WE_MASK       0x00f00000
+#define HC_HTXnLdWE_SHIFT       4
+#define HC_HTXnLeWE_SHIFT       8
+#define HC_HTXnLfWE_SHIFT       12
+#define HC_HTXnL10WE_SHIFT      16
+#define HC_HTXnL11WE_SHIFT      20
+/* HC_SubA_HTXnL0_5HE      0x0051
+ */
+#define HC_HTXnL0HE_MASK        0x0000000f
+#define HC_HTXnL1HE_MASK        0x000000f0
+#define HC_HTXnL2HE_MASK        0x00000f00
+#define HC_HTXnL3HE_MASK        0x0000f000
+#define HC_HTXnL4HE_MASK        0x000f0000
+#define HC_HTXnL5HE_MASK        0x00f00000
+#define HC_HTXnL1HE_SHIFT       4
+#define HC_HTXnL2HE_SHIFT       8
+#define HC_HTXnL3HE_SHIFT       12
+#define HC_HTXnL4HE_SHIFT       16
+#define HC_HTXnL5HE_SHIFT       20
+/* HC_SubA_HTXnL6_bHE      0x0052
+ */
+#define HC_HTXnL6HE_MASK        0x0000000f
+#define HC_HTXnL7HE_MASK        0x000000f0
+#define HC_HTXnL8HE_MASK        0x00000f00
+#define HC_HTXnL9HE_MASK        0x0000f000
+#define HC_HTXnLaHE_MASK        0x000f0000
+#define HC_HTXnLbHE_MASK        0x00f00000
+#define HC_HTXnL7HE_SHIFT       4
+#define HC_HTXnL8HE_SHIFT       8
+#define HC_HTXnL9HE_SHIFT       12
+#define HC_HTXnLaHE_SHIFT       16
+#define HC_HTXnLbHE_SHIFT       20
+/* HC_SubA_HTXnLc_11HE      0x0053
+ */
+#define HC_HTXnLcHE_MASK        0x0000000f
+#define HC_HTXnLdHE_MASK        0x000000f0
+#define HC_HTXnLeHE_MASK        0x00000f00
+#define HC_HTXnLfHE_MASK        0x0000f000
+#define HC_HTXnL10HE_MASK       0x000f0000
+#define HC_HTXnL11HE_MASK       0x00f00000
+#define HC_HTXnLdHE_SHIFT       4
+#define HC_HTXnLeHE_SHIFT       8
+#define HC_HTXnLfHE_SHIFT       12
+#define HC_HTXnL10HE_SHIFT      16
+#define HC_HTXnL11HE_SHIFT      20
+/* HC_SubA_HTXnL0OS        0x0077
+ */
+#define HC_HTXnL0OS_MASK        0x003ff000
+#define HC_HTXnLVmax_MASK       0x00000fc0
+#define HC_HTXnLVmin_MASK       0x0000003f
+#define HC_HTXnL0OS_SHIFT       12
+#define HC_HTXnLVmax_SHIFT      6
+/* HC_SubA_HTXnTB          0x0078
+ */
+#define HC_HTXnTB_MASK          0x00f00000
+#define HC_HTXnFLSe_MASK        0x0000e000
+#define HC_HTXnFLSs_MASK        0x00001c00
+#define HC_HTXnFLTe_MASK        0x00000380
+#define HC_HTXnFLTs_MASK        0x00000070
+#define HC_HTXnFLDs_MASK        0x0000000f
+#define HC_HTXnTB_NoTB          0x00000000
+#define HC_HTXnTB_TBC_S         0x00100000
+#define HC_HTXnTB_TBC_T         0x00200000
+#define HC_HTXnTB_TB_S          0x00400000
+#define HC_HTXnTB_TB_T          0x00800000
+#define HC_HTXnFLSe_Nearest     0x00000000
+#define HC_HTXnFLSe_Linear      0x00002000
+#define HC_HTXnFLSe_NonLinear   0x00004000
+#define HC_HTXnFLSe_Sharp       0x00008000
+#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
+#define HC_HTXnFLSs_Nearest     0x00000000
+#define HC_HTXnFLSs_Linear      0x00000400
+#define HC_HTXnFLSs_NonLinear   0x00000800
+#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
+#define HC_HTXnFLTe_Nearest     0x00000000
+#define HC_HTXnFLTe_Linear      0x00000080
+#define HC_HTXnFLTe_NonLinear   0x00000100
+#define HC_HTXnFLTe_Sharp       0x00000180
+#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
+#define HC_HTXnFLTs_Nearest     0x00000000
+#define HC_HTXnFLTs_Linear      0x00000010
+#define HC_HTXnFLTs_NonLinear   0x00000020
+#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
+#define HC_HTXnFLDs_Tex0        0x00000000
+#define HC_HTXnFLDs_Nearest     0x00000001
+#define HC_HTXnFLDs_Linear      0x00000002
+#define HC_HTXnFLDs_NonLinear   0x00000003
+#define HC_HTXnFLDs_Dither      0x00000004
+#define HC_HTXnFLDs_ConstLOD    0x00000005
+#define HC_HTXnFLDs_Ani         0x00000006
+#define HC_HTXnFLDs_AniDither   0x00000007
+/* HC_SubA_HTXnMPMD        0x0079
+ */
+#define HC_HTXnMPMD_SMASK       0x00070000
+#define HC_HTXnMPMD_TMASK       0x00380000
+#define HC_HTXnLODDTf_MASK      0x00000007
+#define HC_HTXnXY2ST_MASK       0x00000008
+#define HC_HTXnMPMD_Tsingle     0x00000000
+#define HC_HTXnMPMD_Tclamp      0x00080000
+#define HC_HTXnMPMD_Trepeat     0x00100000
+#define HC_HTXnMPMD_Tmirror     0x00180000
+#define HC_HTXnMPMD_Twrap       0x00200000
+#define HC_HTXnMPMD_Ssingle     0x00000000
+#define HC_HTXnMPMD_Sclamp      0x00010000
+#define HC_HTXnMPMD_Srepeat     0x00020000
+#define HC_HTXnMPMD_Smirror     0x00030000
+#define HC_HTXnMPMD_Swrap       0x00040000
+/* HC_SubA_HTXnCLODu       0x007a
+ */
+#define HC_HTXnCLODu_MASK       0x000ffc00
+#define HC_HTXnCLODd_MASK       0x000003ff
+#define HC_HTXnCLODu_SHIFT      10
+/* HC_SubA_HTXnFM          0x007b
+ */
+#define HC_HTXnFM_MASK          0x00ff0000
+#define HC_HTXnLoc_MASK         0x00000003
+#define HC_HTXnFM_INDEX         0x00000000
+#define HC_HTXnFM_Intensity     0x00080000
+#define HC_HTXnFM_Lum           0x00100000
+#define HC_HTXnFM_Alpha         0x00180000
+#define HC_HTXnFM_DX            0x00280000
+#define HC_HTXnFM_ARGB16        0x00880000
+#define HC_HTXnFM_ARGB32        0x00980000
+#define HC_HTXnFM_ABGR16        0x00a80000
+#define HC_HTXnFM_ABGR32        0x00b80000
+#define HC_HTXnFM_RGBA16        0x00c80000
+#define HC_HTXnFM_RGBA32        0x00d80000
+#define HC_HTXnFM_BGRA16        0x00e80000
+#define HC_HTXnFM_BGRA32        0x00f80000
+#define HC_HTXnFM_BUMPMAP       0x00380000
+#define HC_HTXnFM_Index1        (HC_HTXnFM_INDEX     | 0x00000000)
+#define HC_HTXnFM_Index2        (HC_HTXnFM_INDEX     | 0x00010000)
+#define HC_HTXnFM_Index4        (HC_HTXnFM_INDEX     | 0x00020000)
+#define HC_HTXnFM_Index8        (HC_HTXnFM_INDEX     | 0x00030000)
+#define HC_HTXnFM_T1            (HC_HTXnFM_Intensity | 0x00000000)
+#define HC_HTXnFM_T2            (HC_HTXnFM_Intensity | 0x00010000)
+#define HC_HTXnFM_T4            (HC_HTXnFM_Intensity | 0x00020000)
+#define HC_HTXnFM_T8            (HC_HTXnFM_Intensity | 0x00030000)
+#define HC_HTXnFM_L1            (HC_HTXnFM_Lum       | 0x00000000)
+#define HC_HTXnFM_L2            (HC_HTXnFM_Lum       | 0x00010000)
+#define HC_HTXnFM_L4            (HC_HTXnFM_Lum       | 0x00020000)
+#define HC_HTXnFM_L8            (HC_HTXnFM_Lum       | 0x00030000)
+#define HC_HTXnFM_AL44          (HC_HTXnFM_Lum       | 0x00040000)
+#define HC_HTXnFM_AL88          (HC_HTXnFM_Lum       | 0x00050000)
+#define HC_HTXnFM_A1            (HC_HTXnFM_Alpha     | 0x00000000)
+#define HC_HTXnFM_A2            (HC_HTXnFM_Alpha     | 0x00010000)
+#define HC_HTXnFM_A4            (HC_HTXnFM_Alpha     | 0x00020000)
+#define HC_HTXnFM_A8            (HC_HTXnFM_Alpha     | 0x00030000)
+#define HC_HTXnFM_DX1           (HC_HTXnFM_DX        | 0x00010000)
+#define HC_HTXnFM_DX23          (HC_HTXnFM_DX        | 0x00020000)
+#define HC_HTXnFM_DX45          (HC_HTXnFM_DX        | 0x00030000)
+#define HC_HTXnFM_RGB555        (HC_HTXnFM_ARGB16    | 0x00000000)
+#define HC_HTXnFM_RGB565        (HC_HTXnFM_ARGB16    | 0x00010000)
+#define HC_HTXnFM_ARGB1555      (HC_HTXnFM_ARGB16    | 0x00020000)
+#define HC_HTXnFM_ARGB4444      (HC_HTXnFM_ARGB16    | 0x00030000)
+#define HC_HTXnFM_ARGB0888      (HC_HTXnFM_ARGB32    | 0x00000000)
+#define HC_HTXnFM_ARGB8888      (HC_HTXnFM_ARGB32    | 0x00010000)
+#define HC_HTXnFM_BGR555        (HC_HTXnFM_ABGR16    | 0x00000000)
+#define HC_HTXnFM_BGR565        (HC_HTXnFM_ABGR16    | 0x00010000)
+#define HC_HTXnFM_ABGR1555      (HC_HTXnFM_ABGR16    | 0x00020000)
+#define HC_HTXnFM_ABGR4444      (HC_HTXnFM_ABGR16    | 0x00030000)
+#define HC_HTXnFM_ABGR0888      (HC_HTXnFM_ABGR32    | 0x00000000)
+#define HC_HTXnFM_ABGR8888      (HC_HTXnFM_ABGR32    | 0x00010000)
+#define HC_HTXnFM_RGBA5550      (HC_HTXnFM_RGBA16    | 0x00000000)
+#define HC_HTXnFM_RGBA5551      (HC_HTXnFM_RGBA16    | 0x00020000)
+#define HC_HTXnFM_RGBA4444      (HC_HTXnFM_RGBA16    | 0x00030000)
+#define HC_HTXnFM_RGBA8880      (HC_HTXnFM_RGBA32    | 0x00000000)
+#define HC_HTXnFM_RGBA8888      (HC_HTXnFM_RGBA32    | 0x00010000)
+#define HC_HTXnFM_BGRA5550      (HC_HTXnFM_BGRA16    | 0x00000000)
+#define HC_HTXnFM_BGRA5551      (HC_HTXnFM_BGRA16    | 0x00020000)
+#define HC_HTXnFM_BGRA4444      (HC_HTXnFM_BGRA16    | 0x00030000)
+#define HC_HTXnFM_BGRA8880      (HC_HTXnFM_BGRA32    | 0x00000000)
+#define HC_HTXnFM_BGRA8888      (HC_HTXnFM_BGRA32    | 0x00010000)
+#define HC_HTXnFM_VU88          (HC_HTXnFM_BUMPMAP   | 0x00000000)
+#define HC_HTXnFM_LVU655        (HC_HTXnFM_BUMPMAP   | 0x00010000)
+#define HC_HTXnFM_LVU888        (HC_HTXnFM_BUMPMAP   | 0x00020000)
+#define HC_HTXnLoc_Local        0x00000000
+#define HC_HTXnLoc_Sys          0x00000002
+#define HC_HTXnLoc_AGP          0x00000003
+/* HC_SubA_HTXnTRAH        0x007f
+ */
+#define HC_HTXnTRAH_MASK        0x00ff0000
+#define HC_HTXnTRAL_MASK        0x0000ff00
+#define HC_HTXnTBA_MASK         0x000000ff
+#define HC_HTXnTRAH_SHIFT       16
+#define HC_HTXnTRAL_SHIFT       8
+/* HC_SubA_HTXnTBLCsat     0x0080
+ *-- Define the input texture.
+ */
+#define HC_XTC_TOPC             0x00000000
+#define HC_XTC_InvTOPC          0x00000010
+#define HC_XTC_TOPCp5           0x00000020
+#define HC_XTC_Cbias            0x00000000
+#define HC_XTC_InvCbias         0x00000010
+#define HC_XTC_0                0x00000000
+#define HC_XTC_Dif              0x00000001
+#define HC_XTC_Spec             0x00000002
+#define HC_XTC_Tex              0x00000003
+#define HC_XTC_Cur              0x00000004
+#define HC_XTC_Adif             0x00000005
+#define HC_XTC_Fog              0x00000006
+#define HC_XTC_Atex             0x00000007
+#define HC_XTC_Acur             0x00000008
+#define HC_XTC_HTXnTBLRC        0x00000009
+#define HC_XTC_Ctexnext         0x0000000a
+/*--
+ */
+#define HC_HTXnTBLCsat_MASK     0x00800000
+#define HC_HTXnTBLCa_MASK       0x000fc000
+#define HC_HTXnTBLCb_MASK       0x00001f80
+#define HC_HTXnTBLCc_MASK       0x0000003f
+#define HC_HTXnTBLCa_TOPC       (HC_XTC_TOPC << 14)
+#define HC_HTXnTBLCa_InvTOPC    (HC_XTC_InvTOPC << 14)
+#define HC_HTXnTBLCa_TOPCp5     (HC_XTC_TOPCp5 << 14)
+#define HC_HTXnTBLCa_0          (HC_XTC_0 << 14)
+#define HC_HTXnTBLCa_Dif        (HC_XTC_Dif << 14)
+#define HC_HTXnTBLCa_Spec       (HC_XTC_Spec << 14)
+#define HC_HTXnTBLCa_Tex        (HC_XTC_Tex << 14)
+#define HC_HTXnTBLCa_Cur        (HC_XTC_Cur << 14)
+#define HC_HTXnTBLCa_Adif       (HC_XTC_Adif << 14)
+#define HC_HTXnTBLCa_Fog        (HC_XTC_Fog << 14)
+#define HC_HTXnTBLCa_Atex       (HC_XTC_Atex << 14)
+#define HC_HTXnTBLCa_Acur       (HC_XTC_Acur << 14)
+#define HC_HTXnTBLCa_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 14)
+#define HC_HTXnTBLCa_Ctexnext   (HC_XTC_Ctexnext << 14)
+#define HC_HTXnTBLCb_TOPC       (HC_XTC_TOPC << 7)
+#define HC_HTXnTBLCb_InvTOPC    (HC_XTC_InvTOPC << 7)
+#define HC_HTXnTBLCb_TOPCp5     (HC_XTC_TOPCp5 << 7)
+#define HC_HTXnTBLCb_0          (HC_XTC_0 << 7)
+#define HC_HTXnTBLCb_Dif        (HC_XTC_Dif << 7)
+#define HC_HTXnTBLCb_Spec       (HC_XTC_Spec << 7)
+#define HC_HTXnTBLCb_Tex        (HC_XTC_Tex << 7)
+#define HC_HTXnTBLCb_Cur        (HC_XTC_Cur << 7)
+#define HC_HTXnTBLCb_Adif       (HC_XTC_Adif << 7)
+#define HC_HTXnTBLCb_Fog        (HC_XTC_Fog << 7)
+#define HC_HTXnTBLCb_Atex       (HC_XTC_Atex << 7)
+#define HC_HTXnTBLCb_Acur       (HC_XTC_Acur << 7)
+#define HC_HTXnTBLCb_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 7)
+#define HC_HTXnTBLCb_Ctexnext   (HC_XTC_Ctexnext << 7)
+#define HC_HTXnTBLCc_TOPC       (HC_XTC_TOPC << 0)
+#define HC_HTXnTBLCc_InvTOPC    (HC_XTC_InvTOPC << 0)
+#define HC_HTXnTBLCc_TOPCp5     (HC_XTC_TOPCp5 << 0)
+#define HC_HTXnTBLCc_0          (HC_XTC_0 << 0)
+#define HC_HTXnTBLCc_Dif        (HC_XTC_Dif << 0)
+#define HC_HTXnTBLCc_Spec       (HC_XTC_Spec << 0)
+#define HC_HTXnTBLCc_Tex        (HC_XTC_Tex << 0)
+#define HC_HTXnTBLCc_Cur        (HC_XTC_Cur << 0)
+#define HC_HTXnTBLCc_Adif       (HC_XTC_Adif << 0)
+#define HC_HTXnTBLCc_Fog        (HC_XTC_Fog << 0)
+#define HC_HTXnTBLCc_Atex       (HC_XTC_Atex << 0)
+#define HC_HTXnTBLCc_Acur       (HC_XTC_Acur << 0)
+#define HC_HTXnTBLCc_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 0)
+#define HC_HTXnTBLCc_Ctexnext   (HC_XTC_Ctexnext << 0)
+/* HC_SubA_HTXnTBLCop      0x0081
+ */
+#define HC_HTXnTBLdot_MASK      0x00c00000
+#define HC_HTXnTBLCop_MASK      0x00380000
+#define HC_HTXnTBLCbias_MASK    0x0007c000
+#define HC_HTXnTBLCshift_MASK   0x00001800
+#define HC_HTXnTBLAop_MASK      0x00000380
+#define HC_HTXnTBLAbias_MASK    0x00000078
+#define HC_HTXnTBLAshift_MASK   0x00000003
+#define HC_HTXnTBLCop_Add       0x00000000
+#define HC_HTXnTBLCop_Sub       0x00080000
+#define HC_HTXnTBLCop_Min       0x00100000
+#define HC_HTXnTBLCop_Max       0x00180000
+#define HC_HTXnTBLCop_Mask      0x00200000
+#define HC_HTXnTBLCbias_Cbias           (HC_XTC_Cbias << 14)
+#define HC_HTXnTBLCbias_InvCbias        (HC_XTC_InvCbias << 14)
+#define HC_HTXnTBLCbias_0               (HC_XTC_0 << 14)
+#define HC_HTXnTBLCbias_Dif             (HC_XTC_Dif << 14)
+#define HC_HTXnTBLCbias_Spec            (HC_XTC_Spec << 14)
+#define HC_HTXnTBLCbias_Tex             (HC_XTC_Tex << 14)
+#define HC_HTXnTBLCbias_Cur             (HC_XTC_Cur << 14)
+#define HC_HTXnTBLCbias_Adif            (HC_XTC_Adif << 14)
+#define HC_HTXnTBLCbias_Fog             (HC_XTC_Fog << 14)
+#define HC_HTXnTBLCbias_Atex            (HC_XTC_Atex << 14)
+#define HC_HTXnTBLCbias_Acur            (HC_XTC_Acur << 14)
+#define HC_HTXnTBLCbias_HTXnTBLRC       (HC_XTC_HTXnTBLRC << 14)
+#define HC_HTXnTBLCshift_1      0x00000000
+#define HC_HTXnTBLCshift_2      0x00000800
+#define HC_HTXnTBLCshift_No     0x00001000
+#define HC_HTXnTBLCshift_DotP   0x00001800
+/*=* John Sheng [2003.7.18] texture combine *=*/
+#define HC_HTXnTBLDOT3   0x00080000
+#define HC_HTXnTBLDOT4   0x000C0000
+
+#define HC_HTXnTBLAop_Add       0x00000000
+#define HC_HTXnTBLAop_Sub       0x00000080
+#define HC_HTXnTBLAop_Min       0x00000100
+#define HC_HTXnTBLAop_Max       0x00000180
+#define HC_HTXnTBLAop_Mask      0x00000200
+#define HC_HTXnTBLAbias_Inv             0x00000040
+#define HC_HTXnTBLAbias_Adif            0x00000000
+#define HC_HTXnTBLAbias_Fog             0x00000008
+#define HC_HTXnTBLAbias_Acur            0x00000010
+#define HC_HTXnTBLAbias_HTXnTBLRAbias   0x00000018
+#define HC_HTXnTBLAbias_Atex            0x00000020
+#define HC_HTXnTBLAshift_1      0x00000000
+#define HC_HTXnTBLAshift_2      0x00000001
+#define HC_HTXnTBLAshift_No     0x00000002
+/* #define HC_HTXnTBLAshift_DotP   0x00000003 */
+/* HC_SubA_HTXnTBLMPFog    0x0082
+ */
+#define HC_HTXnTBLMPfog_MASK    0x00e00000
+#define HC_HTXnTBLMPfog_0       0x00000000
+#define HC_HTXnTBLMPfog_Adif    0x00200000
+#define HC_HTXnTBLMPfog_Fog     0x00400000
+#define HC_HTXnTBLMPfog_Atex    0x00600000
+#define HC_HTXnTBLMPfog_Acur    0x00800000
+#define HC_HTXnTBLMPfog_GHTXnTBLRFog    0x00a00000
+/* HC_SubA_HTXnTBLAsat     0x0083
+ *-- Define the texture alpha input.
+ */
+#define HC_XTA_TOPA             0x00000000
+#define HC_XTA_InvTOPA          0x00000008
+#define HC_XTA_TOPAp5           0x00000010
+#define HC_XTA_Adif             0x00000000
+#define HC_XTA_Fog              0x00000001
+#define HC_XTA_Acur             0x00000002
+#define HC_XTA_HTXnTBLRA        0x00000003
+#define HC_XTA_Atex             0x00000004
+#define HC_XTA_Atexnext         0x00000005
+/*--
+ */
+#define HC_HTXnTBLAsat_MASK     0x00800000
+#define HC_HTXnTBLAMB_MASK      0x00700000
+#define HC_HTXnTBLAa_MASK       0x0007c000
+#define HC_HTXnTBLAb_MASK       0x00000f80
+#define HC_HTXnTBLAc_MASK       0x0000001f
+#define HC_HTXnTBLAMB_SHIFT     20
+#define HC_HTXnTBLAa_TOPA       (HC_XTA_TOPA << 14)
+#define HC_HTXnTBLAa_InvTOPA    (HC_XTA_InvTOPA << 14)
+#define HC_HTXnTBLAa_TOPAp5     (HC_XTA_TOPAp5 << 14)
+#define HC_HTXnTBLAa_Adif       (HC_XTA_Adif << 14)
+#define HC_HTXnTBLAa_Fog        (HC_XTA_Fog << 14)
+#define HC_HTXnTBLAa_Acur       (HC_XTA_Acur << 14)
+#define HC_HTXnTBLAa_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 14)
+#define HC_HTXnTBLAa_Atex       (HC_XTA_Atex << 14)
+#define HC_HTXnTBLAa_Atexnext   (HC_XTA_Atexnext << 14)
+#define HC_HTXnTBLAb_TOPA       (HC_XTA_TOPA << 7)
+#define HC_HTXnTBLAb_InvTOPA    (HC_XTA_InvTOPA << 7)
+#define HC_HTXnTBLAb_TOPAp5     (HC_XTA_TOPAp5 << 7)
+#define HC_HTXnTBLAb_Adif       (HC_XTA_Adif << 7)
+#define HC_HTXnTBLAb_Fog        (HC_XTA_Fog << 7)
+#define HC_HTXnTBLAb_Acur       (HC_XTA_Acur << 7)
+#define HC_HTXnTBLAb_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 7)
+#define HC_HTXnTBLAb_Atex       (HC_XTA_Atex << 7)
+#define HC_HTXnTBLAb_Atexnext   (HC_XTA_Atexnext << 7)
+#define HC_HTXnTBLAc_TOPA       (HC_XTA_TOPA << 0)
+#define HC_HTXnTBLAc_InvTOPA    (HC_XTA_InvTOPA << 0)
+#define HC_HTXnTBLAc_TOPAp5     (HC_XTA_TOPAp5 << 0)
+#define HC_HTXnTBLAc_Adif       (HC_XTA_Adif << 0)
+#define HC_HTXnTBLAc_Fog        (HC_XTA_Fog << 0)
+#define HC_HTXnTBLAc_Acur       (HC_XTA_Acur << 0)
+#define HC_HTXnTBLAc_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 0)
+#define HC_HTXnTBLAc_Atex       (HC_XTA_Atex << 0)
+#define HC_HTXnTBLAc_Atexnext   (HC_XTA_Atexnext << 0)
+/* HC_SubA_HTXnTBLRAa      0x0089
+ */
+#define HC_HTXnTBLRAa_MASK      0x00ff0000
+#define HC_HTXnTBLRAb_MASK      0x0000ff00
+#define HC_HTXnTBLRAc_MASK      0x000000ff
+#define HC_HTXnTBLRAa_SHIFT     16
+#define HC_HTXnTBLRAb_SHIFT     8
+#define HC_HTXnTBLRAc_SHIFT     0
+/* HC_SubA_HTXnTBLRFog     0x008a
+ */
+#define HC_HTXnTBLRFog_MASK     0x0000ff00
+#define HC_HTXnTBLRAbias_MASK   0x000000ff
+#define HC_HTXnTBLRFog_SHIFT    8
+#define HC_HTXnTBLRAbias_SHIFT  0
+/* HC_SubA_HTXnLScale      0x0094
+ */
+#define HC_HTXnLScale_MASK      0x0007fc00
+#define HC_HTXnLOff_MASK        0x000001ff
+#define HC_HTXnLScale_SHIFT     10
+/* HC_SubA_HTXSMD          0x0000
+ */
+#define HC_HTXSMD_MASK          0x00000080
+#define HC_HTXTMD_MASK          0x00000040
+#define HC_HTXNum_MASK          0x00000038
+#define HC_HTXTRMD_MASK         0x00000006
+#define HC_HTXCHCLR_MASK        0x00000001
+#define HC_HTXNum_SHIFT         3
+
+/* Texture Palette n
+ */
+#define HC_SubType_TexPalette0  0x00000000
+#define HC_SubType_TexPalette1  0x00000001
+#define HC_SubType_FogTable     0x00000010
+#define HC_SubType_Stipple      0x00000014
+/* HC_SubA_TexPalette0     0x0000
+ */
+#define HC_HTPnA_MASK           0xff000000
+#define HC_HTPnR_MASK           0x00ff0000
+#define HC_HTPnG_MASK           0x0000ff00
+#define HC_HTPnB_MASK           0x000000ff
+/* HC_SubA_FogTable        0x0010
+ */
+#define HC_HFPn3_MASK           0xff000000
+#define HC_HFPn2_MASK           0x00ff0000
+#define HC_HFPn1_MASK           0x0000ff00
+#define HC_HFPn_MASK            0x000000ff
+#define HC_HFPn3_SHIFT          24
+#define HC_HFPn2_SHIFT          16
+#define HC_HFPn1_SHIFT          8
+
+/* Auto Testing & Security
+ */
+#define HC_SubA_HenFIFOAT       0x0000
+#define HC_SubA_HFBDrawFirst    0x0004
+#define HC_SubA_HFBBasL         0x0005
+#define HC_SubA_HFBDst          0x0006
+/* HC_SubA_HenFIFOAT       0x0000
+ */
+#define HC_HenFIFOAT_MASK       0x00000020
+#define HC_HenGEMILock_MASK     0x00000010
+#define HC_HenFBASwap_MASK      0x00000008
+#define HC_HenOT_MASK           0x00000004
+#define HC_HenCMDQ_MASK         0x00000002
+#define HC_HenTXCTSU_MASK       0x00000001
+/* HC_SubA_HFBDrawFirst    0x0004
+ */
+#define HC_HFBDrawFirst_MASK    0x00000800
+#define HC_HFBQueue_MASK        0x00000400
+#define HC_HFBLock_MASK         0x00000200
+#define HC_HEOF_MASK            0x00000100
+#define HC_HFBBasH_MASK         0x000000ff
+
+/* GEMI Setting
+ */
+#define HC_SubA_HTArbRCM        0x0008
+#define HC_SubA_HTArbRZ         0x000a
+#define HC_SubA_HTArbWZ         0x000b
+#define HC_SubA_HTArbRTX        0x000c
+#define HC_SubA_HTArbRCW        0x000d
+#define HC_SubA_HTArbE2         0x000e
+#define HC_SubA_HArbRQCM        0x0010
+#define HC_SubA_HArbWQCM        0x0011
+#define HC_SubA_HGEMITout       0x0020
+#define HC_SubA_HFthRTXD        0x0040
+#define HC_SubA_HFthRTXA        0x0044
+#define HC_SubA_HCMDQstL        0x0050
+#define HC_SubA_HCMDQendL       0x0051
+#define HC_SubA_HCMDQLen        0x0052
+/* HC_SubA_HTArbRCM        0x0008
+ */
+#define HC_HTArbRCM_MASK        0x0000ffff
+/* HC_SubA_HTArbRZ         0x000a
+ */
+#define HC_HTArbRZ_MASK         0x0000ffff
+/* HC_SubA_HTArbWZ         0x000b
+ */
+#define HC_HTArbWZ_MASK         0x0000ffff
+/* HC_SubA_HTArbRTX        0x000c
+ */
+#define HC_HTArbRTX_MASK        0x0000ffff
+/* HC_SubA_HTArbRCW        0x000d
+ */
+#define HC_HTArbRCW_MASK        0x0000ffff
+/* HC_SubA_HTArbE2         0x000e
+ */
+#define HC_HTArbE2_MASK         0x0000ffff
+/* HC_SubA_HArbRQCM        0x0010
+ */
+#define HC_HTArbRQCM_MASK       0x0000ffff
+/* HC_SubA_HArbWQCM        0x0011
+ */
+#define HC_HArbWQCM_MASK        0x0000ffff
+/* HC_SubA_HGEMITout       0x0020
+ */
+#define HC_HGEMITout_MASK       0x000f0000
+#define HC_HNPArbZC_MASK        0x0000ffff
+#define HC_HGEMITout_SHIFT      16
+/* HC_SubA_HFthRTXD        0x0040
+ */
+#define HC_HFthRTXD_MASK        0x00ff0000
+#define HC_HFthRZD_MASK         0x0000ff00
+#define HC_HFthWZD_MASK         0x000000ff
+#define HC_HFthRTXD_SHIFT       16
+#define HC_HFthRZD_SHIFT        8
+/* HC_SubA_HFthRTXA        0x0044
+ */
+#define HC_HFthRTXA_MASK        0x000000ff
+
+/******************************************************************************
+** Define the Halcyon Internal register access constants. For simulator only.
+******************************************************************************/
+#define HC_SIMA_HAGPBstL        0x0000
+#define HC_SIMA_HAGPBendL       0x0001
+#define HC_SIMA_HAGPCMNT        0x0002
+#define HC_SIMA_HAGPBpL         0x0003
+#define HC_SIMA_HAGPBpH         0x0004
+#define HC_SIMA_HClipTB         0x0005
+#define HC_SIMA_HClipLR         0x0006
+#define HC_SIMA_HFPClipTL       0x0007
+#define HC_SIMA_HFPClipBL       0x0008
+#define HC_SIMA_HFPClipLL       0x0009
+#define HC_SIMA_HFPClipRL       0x000a
+#define HC_SIMA_HFPClipTBH      0x000b
+#define HC_SIMA_HFPClipLRH      0x000c
+#define HC_SIMA_HLP             0x000d
+#define HC_SIMA_HLPRF           0x000e
+#define HC_SIMA_HSolidCL        0x000f
+#define HC_SIMA_HPixGC          0x0010
+#define HC_SIMA_HSPXYOS         0x0011
+#define HC_SIMA_HCmdA           0x0012
+#define HC_SIMA_HCmdB           0x0013
+#define HC_SIMA_HEnable         0x0014
+#define HC_SIMA_HZWBBasL        0x0015
+#define HC_SIMA_HZWBBasH        0x0016
+#define HC_SIMA_HZWBType        0x0017
+#define HC_SIMA_HZBiasL         0x0018
+#define HC_SIMA_HZWBend         0x0019
+#define HC_SIMA_HZWTMD          0x001a
+#define HC_SIMA_HZWCDL          0x001b
+#define HC_SIMA_HZWCTAGnum      0x001c
+#define HC_SIMA_HZCYNum         0x001d
+#define HC_SIMA_HZWCFire        0x001e
+/* #define HC_SIMA_HSBBasL         0x001d */
+/* #define HC_SIMA_HSBBasH         0x001e */
+/* #define HC_SIMA_HSBFM           0x001f */
+#define HC_SIMA_HSTREF          0x0020
+#define HC_SIMA_HSTMD           0x0021
+#define HC_SIMA_HABBasL         0x0022
+#define HC_SIMA_HABBasH         0x0023
+#define HC_SIMA_HABFM           0x0024
+#define HC_SIMA_HATMD           0x0025
+#define HC_SIMA_HABLCsat        0x0026
+#define HC_SIMA_HABLCop         0x0027
+#define HC_SIMA_HABLAsat        0x0028
+#define HC_SIMA_HABLAop         0x0029
+#define HC_SIMA_HABLRCa         0x002a
+#define HC_SIMA_HABLRFCa        0x002b
+#define HC_SIMA_HABLRCbias      0x002c
+#define HC_SIMA_HABLRCb         0x002d
+#define HC_SIMA_HABLRFCb        0x002e
+#define HC_SIMA_HABLRAa         0x002f
+#define HC_SIMA_HABLRAb         0x0030
+#define HC_SIMA_HDBBasL         0x0031
+#define HC_SIMA_HDBBasH         0x0032
+#define HC_SIMA_HDBFM           0x0033
+#define HC_SIMA_HFBBMSKL        0x0034
+#define HC_SIMA_HROP            0x0035
+#define HC_SIMA_HFogLF          0x0036
+#define HC_SIMA_HFogCL          0x0037
+#define HC_SIMA_HFogCH          0x0038
+#define HC_SIMA_HFogStL         0x0039
+#define HC_SIMA_HFogStH         0x003a
+#define HC_SIMA_HFogOOdMF       0x003b
+#define HC_SIMA_HFogOOdEF       0x003c
+#define HC_SIMA_HFogEndL        0x003d
+#define HC_SIMA_HFogDenst       0x003e
+/*---- start of texture 0 setting ----
+ */
+#define HC_SIMA_HTX0L0BasL      0x0040
+#define HC_SIMA_HTX0L1BasL      0x0041
+#define HC_SIMA_HTX0L2BasL      0x0042
+#define HC_SIMA_HTX0L3BasL      0x0043
+#define HC_SIMA_HTX0L4BasL      0x0044
+#define HC_SIMA_HTX0L5BasL      0x0045
+#define HC_SIMA_HTX0L6BasL      0x0046
+#define HC_SIMA_HTX0L7BasL      0x0047
+#define HC_SIMA_HTX0L8BasL      0x0048
+#define HC_SIMA_HTX0L9BasL      0x0049
+#define HC_SIMA_HTX0LaBasL      0x004a
+#define HC_SIMA_HTX0LbBasL      0x004b
+#define HC_SIMA_HTX0LcBasL      0x004c
+#define HC_SIMA_HTX0LdBasL      0x004d
+#define HC_SIMA_HTX0LeBasL      0x004e
+#define HC_SIMA_HTX0LfBasL      0x004f
+#define HC_SIMA_HTX0L10BasL     0x0050
+#define HC_SIMA_HTX0L11BasL     0x0051
+#define HC_SIMA_HTX0L012BasH    0x0052
+#define HC_SIMA_HTX0L345BasH    0x0053
+#define HC_SIMA_HTX0L678BasH    0x0054
+#define HC_SIMA_HTX0L9abBasH    0x0055
+#define HC_SIMA_HTX0LcdeBasH    0x0056
+#define HC_SIMA_HTX0Lf1011BasH  0x0057
+#define HC_SIMA_HTX0L0Pit       0x0058
+#define HC_SIMA_HTX0L1Pit       0x0059
+#define HC_SIMA_HTX0L2Pit       0x005a
+#define HC_SIMA_HTX0L3Pit       0x005b
+#define HC_SIMA_HTX0L4Pit       0x005c
+#define HC_SIMA_HTX0L5Pit       0x005d
+#define HC_SIMA_HTX0L6Pit       0x005e
+#define HC_SIMA_HTX0L7Pit       0x005f
+#define HC_SIMA_HTX0L8Pit       0x0060
+#define HC_SIMA_HTX0L9Pit       0x0061
+#define HC_SIMA_HTX0LaPit       0x0062
+#define HC_SIMA_HTX0LbPit       0x0063
+#define HC_SIMA_HTX0LcPit       0x0064
+#define HC_SIMA_HTX0LdPit       0x0065
+#define HC_SIMA_HTX0LePit       0x0066
+#define HC_SIMA_HTX0LfPit       0x0067
+#define HC_SIMA_HTX0L10Pit      0x0068
+#define HC_SIMA_HTX0L11Pit      0x0069
+#define HC_SIMA_HTX0L0_5WE      0x006a
+#define HC_SIMA_HTX0L6_bWE      0x006b
+#define HC_SIMA_HTX0Lc_11WE     0x006c
+#define HC_SIMA_HTX0L0_5HE      0x006d
+#define HC_SIMA_HTX0L6_bHE      0x006e
+#define HC_SIMA_HTX0Lc_11HE     0x006f
+#define HC_SIMA_HTX0L0OS        0x0070
+#define HC_SIMA_HTX0TB          0x0071
+#define HC_SIMA_HTX0MPMD        0x0072
+#define HC_SIMA_HTX0CLODu       0x0073
+#define HC_SIMA_HTX0FM          0x0074
+#define HC_SIMA_HTX0TRCH        0x0075
+#define HC_SIMA_HTX0TRCL        0x0076
+#define HC_SIMA_HTX0TBC         0x0077
+#define HC_SIMA_HTX0TRAH        0x0078
+#define HC_SIMA_HTX0TBLCsat     0x0079
+#define HC_SIMA_HTX0TBLCop      0x007a
+#define HC_SIMA_HTX0TBLMPfog    0x007b
+#define HC_SIMA_HTX0TBLAsat     0x007c
+#define HC_SIMA_HTX0TBLRCa      0x007d
+#define HC_SIMA_HTX0TBLRCb      0x007e
+#define HC_SIMA_HTX0TBLRCc      0x007f
+#define HC_SIMA_HTX0TBLRCbias   0x0080
+#define HC_SIMA_HTX0TBLRAa      0x0081
+#define HC_SIMA_HTX0TBLRFog     0x0082
+#define HC_SIMA_HTX0BumpM00     0x0083
+#define HC_SIMA_HTX0BumpM01     0x0084
+#define HC_SIMA_HTX0BumpM10     0x0085
+#define HC_SIMA_HTX0BumpM11     0x0086
+#define HC_SIMA_HTX0LScale      0x0087
+/*---- end of texture 0 setting ----      0x008f
+ */
+#define HC_SIMA_TX0TX1_OFF      0x0050
+/*---- start of texture 1 setting ----
+ */
+#define HC_SIMA_HTX1L0BasL      (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1BasL      (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2BasL      (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3BasL      (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4BasL      (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5BasL      (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6BasL      (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7BasL      (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8BasL      (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9BasL      (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaBasL      (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbBasL      (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcBasL      (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdBasL      (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LeBasL      (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfBasL      (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10BasL     (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11BasL     (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L012BasH    (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L345BasH    (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L678BasH    (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9abBasH    (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcdeBasH    (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lf1011BasH  (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0Pit       (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1Pit       (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2Pit       (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3Pit       (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4Pit       (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5Pit       (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6Pit       (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7Pit       (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8Pit       (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9Pit       (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaPit       (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbPit       (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcPit       (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdPit       (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LePit       (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfPit       (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10Pit      (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11Pit      (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5WE      (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bWE      (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11WE     (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5HE      (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bHE      (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11HE      (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0OS        (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TB          (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1MPMD        (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1CLODu       (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1FM          (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCH        (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCL        (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBC         (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRAH        (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTC         (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTA         (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCsat     (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCop      (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLMPfog    (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLAsat     (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCa      (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCb      (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCc      (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCbias   (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRAa      (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRFog     (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM00     (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM01     (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM10     (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM11     (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LScale      (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
+/*---- end of texture 1 setting ---- 0xaf
+ */
+#define HC_SIMA_HTXSMD          0x00b0
+#define HC_SIMA_HenFIFOAT       0x00b1
+#define HC_SIMA_HFBDrawFirst    0x00b2
+#define HC_SIMA_HFBBasL         0x00b3
+#define HC_SIMA_HTArbRCM        0x00b4
+#define HC_SIMA_HTArbRZ         0x00b5
+#define HC_SIMA_HTArbWZ         0x00b6
+#define HC_SIMA_HTArbRTX        0x00b7
+#define HC_SIMA_HTArbRCW        0x00b8
+#define HC_SIMA_HTArbE2         0x00b9
+#define HC_SIMA_HGEMITout       0x00ba
+#define HC_SIMA_HFthRTXD        0x00bb
+#define HC_SIMA_HFthRTXA        0x00bc
+/* Define the texture palette 0
+ */
+#define HC_SIMA_HTP0            0x0100
+#define HC_SIMA_HTP1            0x0200
+#define HC_SIMA_FOGTABLE        0x0300
+#define HC_SIMA_STIPPLE         0x0400
+#define HC_SIMA_HE3Fire         0x0440
+#define HC_SIMA_TRANS_SET       0x0441
+#define HC_SIMA_HREngSt         0x0442
+#define HC_SIMA_HRFIFOempty     0x0443
+#define HC_SIMA_HRFIFOfull      0x0444
+#define HC_SIMA_HRErr           0x0445
+#define HC_SIMA_FIFOstatus      0x0446
+
+/******************************************************************************
+** Define the AGP command header.
+******************************************************************************/
+#define HC_ACMD_MASK            0xfe000000
+#define HC_ACMD_SUB_MASK        0x0c000000
+#define HC_ACMD_HCmdA           0xee000000
+#define HC_ACMD_HCmdB           0xec000000
+#define HC_ACMD_HCmdC           0xea000000
+#define HC_ACMD_H1              0xf0000000
+#define HC_ACMD_H2              0xf2000000
+#define HC_ACMD_H3              0xf4000000
+#define HC_ACMD_H4              0xf6000000
+
+#define HC_ACMD_H1IO_MASK       0x000001ff
+#define HC_ACMD_H2IO1_MASK      0x001ff000
+#define HC_ACMD_H2IO2_MASK      0x000001ff
+#define HC_ACMD_H2IO1_SHIFT     12
+#define HC_ACMD_H2IO2_SHIFT     0
+#define HC_ACMD_H3IO_MASK       0x000001ff
+#define HC_ACMD_H3COUNT_MASK    0x01fff000
+#define HC_ACMD_H3COUNT_SHIFT   12
+#define HC_ACMD_H4ID_MASK       0x000001ff
+#define HC_ACMD_H4COUNT_MASK    0x01fffe00
+#define HC_ACMD_H4COUNT_SHIFT   9
+
+/********************************************************************************
+** Define Header
+********************************************************************************/
+#define HC_HEADER2		0xF210F110
+
+/********************************************************************************
+** Define Dummy Value
+********************************************************************************/
+#define HC_DUMMY		0xCCCCCCCC
+/********************************************************************************
+** Define for DMA use
+********************************************************************************/
+#define HALCYON_HEADER2     0XF210F110
+#define HALCYON_FIRECMD     0XEE100000
+#define HALCYON_FIREMASK    0XFFF00000
+#define HALCYON_CMDB        0XEC000000
+#define HALCYON_CMDBMASK    0XFFFE0000
+#define HALCYON_SUB_ADDR0   0X00000000
+#define HALCYON_HEADER1MASK 0XFFFFFC00
+#define HALCYON_HEADER1     0XF0000000
+#define HC_SubA_HAGPBstL        0x0060
+#define HC_SubA_HAGPBendL       0x0061
+#define HC_SubA_HAGPCMNT        0x0062
+#define HC_SubA_HAGPBpL         0x0063
+#define HC_SubA_HAGPBpH         0x0064
+#define HC_HAGPCMNT_MASK        0x00800000
+#define HC_HCmdErrClr_MASK      0x00400000
+#define HC_HAGPBendH_MASK       0x0000ff00
+#define HC_HAGPBstH_MASK        0x000000ff
+#define HC_HAGPBendH_SHIFT      8
+#define HC_HAGPBstH_SHIFT       0
+#define HC_HAGPBpL_MASK         0x00fffffc
+#define HC_HAGPBpID_MASK        0x00000003
+#define HC_HAGPBpID_PAUSE       0x00000000
+#define HC_HAGPBpID_JUMP        0x00000001
+#define HC_HAGPBpID_STOP        0x00000002
+#define HC_HAGPBpH_MASK         0x00ffffff
+
+#define VIA_VIDEO_HEADER5       0xFE040000
+#define VIA_VIDEO_HEADER6       0xFE050000
+#define VIA_VIDEO_HEADER7       0xFE060000
+#define VIA_VIDEOMASK           0xFFFF0000
+#endif
diff --git a/linux-imx/drivers/gpu/drm/via/via_dma.c b/linux-imx/drivers/gpu/drm/via/via_dma.c
new file mode 100644
index 0000000..13558f5
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_dma.c
@@ -0,0 +1,740 @@
+/* via_dma.c -- DMA support for the VIA Unichrome/Pro
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
+ * All Rights Reserved.
+ *
+ * Copyright 2004 The Unichrome project.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Tungsten Graphics,
+ *    Erdi Chen,
+ *    Thomas Hellstrom.
+ */
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+#include "via_3d_reg.h"
+
+#define CMDBUF_ALIGNMENT_SIZE   (0x100)
+#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
+
+/* defines for VIA 3D registers */
+#define VIA_REG_STATUS          0x400
+#define VIA_REG_TRANSET         0x43C
+#define VIA_REG_TRANSPACE       0x440
+
+/* VIA_REG_STATUS(0x400): Engine Status */
+#define VIA_CMD_RGTR_BUSY       0x00000080	/* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY         0x00000001	/* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY         0x00000002	/* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY       0x00020000	/* Virtual Queue is busy */
+
+#define SetReg2DAGP(nReg, nData) {				\
+	*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;	\
+	*((uint32_t *)(vb) + 1) = (nData);			\
+	vb = ((uint32_t *)vb) + 2;				\
+	dev_priv->dma_low += 8;					\
+}
+
+#define via_flush_write_combine() DRM_MEMORYBARRIER()
+
+#define VIA_OUT_RING_QW(w1, w2)	do {		\
+	*vb++ = (w1);				\
+	*vb++ = (w2);				\
+	dev_priv->dma_low += 8;			\
+} while (0)
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
+
+/*
+ * Free space in command buffer.
+ */
+
+static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+	return ((hw_addr <= dev_priv->dma_low) ?
+		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
+		(hw_addr - dev_priv->dma_low));
+}
+
+/*
+ * How much does the command regulator lag behind?
+ */
+
+static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+	return ((hw_addr <= dev_priv->dma_low) ?
+		(dev_priv->dma_low - hw_addr) :
+		(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
+}
+
+/*
+ * Check that the given size fits in the buffer, otherwise wait.
+ */
+
+static inline int
+via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t cur_addr, hw_addr, next_addr;
+	volatile uint32_t *hw_addr_ptr;
+	uint32_t count;
+	hw_addr_ptr = dev_priv->hw_addr_ptr;
+	cur_addr = dev_priv->dma_low;
+	next_addr = cur_addr + size + 512 * 1024;
+	count = 1000000;
+	do {
+		hw_addr = *hw_addr_ptr - agp_base;
+		if (count-- == 0) {
+			DRM_ERROR
+			    ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
+			     hw_addr, cur_addr, next_addr);
+			return -1;
+		}
+		if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
+			msleep(1);
+	} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
+	return 0;
+}
+
+/*
+ * Checks whether buffer head has reach the end. Rewind the ring buffer
+ * when necessary.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+
+static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
+				      unsigned int size)
+{
+	if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
+	    dev_priv->dma_high) {
+		via_cmdbuf_rewind(dev_priv);
+	}
+	if (via_cmdbuf_wait(dev_priv, size) != 0)
+		return NULL;
+
+	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+int via_dma_cleanup(struct drm_device *dev)
+{
+	if (dev->dev_private) {
+		drm_via_private_t *dev_priv =
+		    (drm_via_private_t *) dev->dev_private;
+
+		if (dev_priv->ring.virtual_start) {
+			via_cmdbuf_reset(dev_priv);
+
+			drm_core_ioremapfree(&dev_priv->ring.map, dev);
+			dev_priv->ring.virtual_start = NULL;
+		}
+
+	}
+
+	return 0;
+}
+
+static int via_initialize(struct drm_device *dev,
+			  drm_via_private_t *dev_priv,
+			  drm_via_dma_init_t *init)
+{
+	if (!dev_priv || !dev_priv->mmio) {
+		DRM_ERROR("via_dma_init called before via_map_init\n");
+		return -EFAULT;
+	}
+
+	if (dev_priv->ring.virtual_start != NULL) {
+		DRM_ERROR("called again without calling cleanup\n");
+		return -EFAULT;
+	}
+
+	if (!dev->agp || !dev->agp->base) {
+		DRM_ERROR("called with no agp memory available\n");
+		return -EFAULT;
+	}
+
+	if (dev_priv->chipset == VIA_DX9_0) {
+		DRM_ERROR("AGP DMA is not supported on this chip\n");
+		return -EINVAL;
+	}
+
+	dev_priv->ring.map.offset = dev->agp->base + init->offset;
+	dev_priv->ring.map.size = init->size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		via_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->dma_ptr = dev_priv->ring.virtual_start;
+	dev_priv->dma_low = 0;
+	dev_priv->dma_high = init->size;
+	dev_priv->dma_wrap = init->size;
+	dev_priv->dma_offset = init->offset;
+	dev_priv->last_pause_ptr = NULL;
+	dev_priv->hw_addr_ptr =
+		(volatile uint32_t *)((char *)dev_priv->mmio->handle +
+		init->reg_pause_addr);
+
+	via_cmdbuf_start(dev_priv);
+
+	return 0;
+}
+
+static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_dma_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case VIA_INIT_DMA:
+		if (!DRM_SUSER(DRM_CURPROC))
+			retcode = -EPERM;
+		else
+			retcode = via_initialize(dev, dev_priv, init);
+		break;
+	case VIA_CLEANUP_DMA:
+		if (!DRM_SUSER(DRM_CURPROC))
+			retcode = -EPERM;
+		else
+			retcode = via_dma_cleanup(dev);
+		break;
+	case VIA_DMA_INITIALIZED:
+		retcode = (dev_priv->ring.virtual_start != NULL) ?
+			0 : -EFAULT;
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
+{
+	drm_via_private_t *dev_priv;
+	uint32_t *vb;
+	int ret;
+
+	dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (dev_priv->ring.virtual_start == NULL) {
+		DRM_ERROR("called without initializing AGP ring buffer.\n");
+		return -EFAULT;
+	}
+
+	if (cmd->size > VIA_PCI_BUF_SIZE)
+		return -ENOMEM;
+
+	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+		return -EFAULT;
+
+	/*
+	 * Running this function on AGP memory is dead slow. Therefore
+	 * we run it on a temporary cacheable system memory buffer and
+	 * copy it to AGP memory when ready.
+	 */
+
+	if ((ret =
+	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+				       cmd->size, dev, 1))) {
+		return ret;
+	}
+
+	vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
+	if (vb == NULL)
+		return -EAGAIN;
+
+	memcpy(vb, dev_priv->pci_buf, cmd->size);
+
+	dev_priv->dma_low += cmd->size;
+
+	/*
+	 * Small submissions somehow stalls the CPU. (AGP cache effects?)
+	 * pad to greater size.
+	 */
+
+	if (cmd->size < 0x100)
+		via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
+	via_cmdbuf_pause(dev_priv);
+
+	return 0;
+}
+
+int via_driver_dma_quiescent(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	if (!via_wait_idle(dev_priv))
+		return -EBUSY;
+	return 0;
+}
+
+static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return via_driver_dma_quiescent(dev);
+}
+
+static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+	ret = via_dispatch_cmdbuffer(dev, cmdbuf);
+	return ret;
+}
+
+static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
+				      drm_via_cmdbuffer_t *cmd)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (cmd->size > VIA_PCI_BUF_SIZE)
+		return -ENOMEM;
+	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+		return -EFAULT;
+
+	if ((ret =
+	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+				       cmd->size, dev, 0))) {
+		return ret;
+	}
+
+	ret =
+	    via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
+				     cmd->size);
+	return ret;
+}
+
+static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+	ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
+	return ret;
+}
+
+static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
+					 uint32_t * vb, int qw_count)
+{
+	for (; qw_count > 0; --qw_count)
+		VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
+	return vb;
+}
+
+/*
+ * This function is used internally by ring buffer management code.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
+{
+	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+/*
+ * Hooks a segment of data into the tail of the ring-buffer by
+ * modifying the pause address stored in the buffer itself. If
+ * the regulator has already paused, restart it.
+ */
+static int via_hook_segment(drm_via_private_t *dev_priv,
+			    uint32_t pause_addr_hi, uint32_t pause_addr_lo,
+			    int no_pci_fire)
+{
+	int paused, count;
+	volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
+	uint32_t reader, ptr;
+	uint32_t diff;
+
+	paused = 0;
+	via_flush_write_combine();
+	(void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
+
+	*paused_at = pause_addr_lo;
+	via_flush_write_combine();
+	(void) *paused_at;
+
+	reader = *(dev_priv->hw_addr_ptr);
+	ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
+		dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+	dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
+
+	/*
+	 * If there is a possibility that the command reader will
+	 * miss the new pause address and pause on the old one,
+	 * In that case we need to program the new start address
+	 * using PCI.
+	 */
+
+	diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+	count = 10000000;
+	while (diff == 0 && count--) {
+		paused = (VIA_READ(0x41c) & 0x80000000);
+		if (paused)
+			break;
+		reader = *(dev_priv->hw_addr_ptr);
+		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+	}
+
+	paused = VIA_READ(0x41c) & 0x80000000;
+
+	if (paused && !no_pci_fire) {
+		reader = *(dev_priv->hw_addr_ptr);
+		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+		diff &= (dev_priv->dma_high - 1);
+		if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
+			DRM_ERROR("Paused at incorrect address. "
+				  "0x%08x, 0x%08x 0x%08x\n",
+				  ptr, reader, dev_priv->dma_diff);
+		} else if (diff == 0) {
+			/*
+			 * There is a concern that these writes may stall the PCI bus
+			 * if the GPU is not idle. However, idling the GPU first
+			 * doesn't make a difference.
+			 */
+
+			VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
+			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+			VIA_READ(VIA_REG_TRANSPACE);
+		}
+	}
+	return paused;
+}
+
+static int via_wait_idle(drm_via_private_t *dev_priv)
+{
+	int count = 10000000;
+
+	while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+		;
+
+	while (count && (VIA_READ(VIA_REG_STATUS) &
+			   (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
+			    VIA_3D_ENG_BUSY)))
+		--count;
+	return count;
+}
+
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
+			       uint32_t addr, uint32_t *cmd_addr_hi,
+			       uint32_t *cmd_addr_lo, int skip_wait)
+{
+	uint32_t agp_base;
+	uint32_t cmd_addr, addr_lo, addr_hi;
+	uint32_t *vb;
+	uint32_t qw_pad_count;
+
+	if (!skip_wait)
+		via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
+
+	vb = via_get_dma(dev_priv);
+	VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
+			(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
+	    ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
+
+	cmd_addr = (addr) ? addr :
+	    agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
+	addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
+		   (cmd_addr & HC_HAGPBpL_MASK));
+	addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
+
+	vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
+	VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
+	return vb;
+}
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv)
+{
+	uint32_t pause_addr_lo, pause_addr_hi;
+	uint32_t start_addr, start_addr_lo;
+	uint32_t end_addr, end_addr_lo;
+	uint32_t command;
+	uint32_t agp_base;
+	uint32_t ptr;
+	uint32_t reader;
+	int count;
+
+	dev_priv->dma_low = 0;
+
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	start_addr = agp_base;
+	end_addr = agp_base + dev_priv->dma_high;
+
+	start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
+	end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
+	command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
+		   ((end_addr & 0xff000000) >> 16));
+
+	dev_priv->last_pause_ptr =
+	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
+			  &pause_addr_hi, &pause_addr_lo, 1) - 1;
+
+	via_flush_write_combine();
+	(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
+
+	VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+	VIA_WRITE(VIA_REG_TRANSPACE, command);
+	VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
+	VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
+
+	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
+	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+	DRM_WRITEMEMORYBARRIER();
+	VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+	VIA_READ(VIA_REG_TRANSPACE);
+
+	dev_priv->dma_diff = 0;
+
+	count = 10000000;
+	while (!(VIA_READ(0x41c) & 0x80000000) && count--);
+
+	reader = *(dev_priv->hw_addr_ptr);
+	ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
+	    dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+	/*
+	 * This is the difference between where we tell the
+	 * command reader to pause and where it actually pauses.
+	 * This differs between hw implementation so we need to
+	 * detect it.
+	 */
+
+	dev_priv->dma_diff = ptr - reader;
+}
+
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
+{
+	uint32_t *vb;
+
+	via_cmdbuf_wait(dev_priv, qwords + 2);
+	vb = via_get_dma(dev_priv);
+	VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
+	via_align_buffer(dev_priv, vb, qwords);
+}
+
+static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
+{
+	uint32_t *vb = via_get_dma(dev_priv);
+	SetReg2DAGP(0x0C, (0 | (0 << 16)));
+	SetReg2DAGP(0x10, 0 | (0 << 16));
+	SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
+}
+
+static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
+{
+	uint32_t agp_base;
+	uint32_t pause_addr_lo, pause_addr_hi;
+	uint32_t jump_addr_lo, jump_addr_hi;
+	volatile uint32_t *last_pause_ptr;
+	uint32_t dma_low_save1, dma_low_save2;
+
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
+		      &jump_addr_lo, 0);
+
+	dev_priv->dma_wrap = dev_priv->dma_low;
+
+	/*
+	 * Wrap command buffer to the beginning.
+	 */
+
+	dev_priv->dma_low = 0;
+	if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
+		DRM_ERROR("via_cmdbuf_jump failed\n");
+
+	via_dummy_bitblt(dev_priv);
+	via_dummy_bitblt(dev_priv);
+
+	last_pause_ptr =
+	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+			  &pause_addr_lo, 0) - 1;
+	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+		      &pause_addr_lo, 0);
+
+	*last_pause_ptr = pause_addr_lo;
+	dma_low_save1 = dev_priv->dma_low;
+
+	/*
+	 * Now, set a trap that will pause the regulator if it tries to rerun the old
+	 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
+	 * and reissues the jump command over PCI, while the regulator has already taken the jump
+	 * and actually paused at the current buffer end).
+	 * There appears to be no other way to detect this condition, since the hw_addr_pointer
+	 * does not seem to get updated immediately when a jump occurs.
+	 */
+
+	last_pause_ptr =
+		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+			      &pause_addr_lo, 0) - 1;
+	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+		      &pause_addr_lo, 0);
+	*last_pause_ptr = pause_addr_lo;
+
+	dma_low_save2 = dev_priv->dma_low;
+	dev_priv->dma_low = dma_low_save1;
+	via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
+	dev_priv->dma_low = dma_low_save2;
+	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
+{
+	via_cmdbuf_jump(dev_priv);
+}
+
+static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
+{
+	uint32_t pause_addr_lo, pause_addr_hi;
+
+	via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
+	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
+{
+	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
+}
+
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
+{
+	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
+	via_wait_idle(dev_priv);
+}
+
+/*
+ * User interface to the space and lag functions.
+ */
+
+static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuf_size_t *d_siz = data;
+	int ret = 0;
+	uint32_t tmp_size, count;
+	drm_via_private_t *dev_priv;
+
+	DRM_DEBUG("\n");
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (dev_priv->ring.virtual_start == NULL) {
+		DRM_ERROR("called without initializing AGP ring buffer.\n");
+		return -EFAULT;
+	}
+
+	count = 1000000;
+	tmp_size = d_siz->size;
+	switch (d_siz->func) {
+	case VIA_CMDBUF_SPACE:
+		while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
+		       && --count) {
+			if (!d_siz->wait)
+				break;
+		}
+		if (!count) {
+			DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
+			ret = -EAGAIN;
+		}
+		break;
+	case VIA_CMDBUF_LAG:
+		while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
+		       && --count) {
+			if (!d_siz->wait)
+				break;
+		}
+		if (!count) {
+			DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
+			ret = -EAGAIN;
+		}
+		break;
+	default:
+		ret = -EFAULT;
+	}
+	d_siz->size = tmp_size;
+
+	return ret;
+}
+
+struct drm_ioctl_desc via_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+};
+
+int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/linux-imx/drivers/gpu/drm/via/via_dmablit.c b/linux-imx/drivers/gpu/drm/via/via_dmablit.c
new file mode 100644
index 0000000..8b0f259
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_dmablit.c
@@ -0,0 +1,808 @@
+/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Thomas Hellstrom.
+ *    Partially based on code obtained from Digeo Inc.
+ */
+
+
+/*
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
+ */
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+#include "via_dmablit.h"
+
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+
+#define VIA_PGDN(x)	     (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x)	    (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x)	      ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+	uint32_t mem_addr;
+	uint32_t dev_addr;
+	uint32_t size;
+	uint32_t next;
+} drm_via_descriptor_t;
+
+
+/*
+ * Unmap a DMA mapping.
+ */
+
+
+
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+	int num_desc = vsg->num_desc;
+	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+		descriptor_this_page;
+	dma_addr_t next = vsg->chain_start;
+
+	while (num_desc--) {
+		if (descriptor_this_page-- == 0) {
+			cur_descriptor_page--;
+			descriptor_this_page = vsg->descriptors_per_page - 1;
+			desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+				descriptor_this_page;
+		}
+		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+		next = (dma_addr_t) desc_ptr->next;
+		desc_ptr--;
+	}
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+		   const drm_via_dmablit_t *xfer,
+		   drm_via_sg_info_t *vsg,
+		   int mode)
+{
+	unsigned cur_descriptor_page = 0;
+	unsigned num_descriptors_this_page = 0;
+	unsigned char *mem_addr = xfer->mem_addr;
+	unsigned char *cur_mem;
+	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+	uint32_t fb_addr = xfer->fb_addr;
+	uint32_t cur_fb;
+	unsigned long line_len;
+	unsigned remaining_len;
+	int num_desc = 0;
+	int cur_line;
+	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+	drm_via_descriptor_t *desc_ptr = NULL;
+
+	if (mode == 1)
+		desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+		line_len = xfer->line_length;
+		cur_fb = fb_addr;
+		cur_mem = mem_addr;
+
+		while (line_len > 0) {
+
+			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+			line_len -= remaining_len;
+
+			if (mode == 1) {
+				desc_ptr->mem_addr =
+					dma_map_page(&pdev->dev,
+						     vsg->pages[VIA_PFN(cur_mem) -
+								VIA_PFN(first_addr)],
+						     VIA_PGOFF(cur_mem), remaining_len,
+						     vsg->direction);
+				desc_ptr->dev_addr = cur_fb;
+
+				desc_ptr->size = remaining_len;
+				desc_ptr->next = (uint32_t) next;
+				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+						      DMA_TO_DEVICE);
+				desc_ptr++;
+				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+					num_descriptors_this_page = 0;
+					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+				}
+			}
+
+			num_desc++;
+			cur_mem += remaining_len;
+			cur_fb += remaining_len;
+		}
+
+		mem_addr += xfer->mem_stride;
+		fb_addr += xfer->fb_stride;
+	}
+
+	if (mode == 1) {
+		vsg->chain_start = next;
+		vsg->state = dr_via_device_mapped;
+	}
+	vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the
+ * blit info has only been partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+
+
+static void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+	struct page *page;
+	int i;
+
+	switch (vsg->state) {
+	case dr_via_device_mapped:
+		via_unmap_blit_from_device(pdev, vsg);
+	case dr_via_desc_pages_alloc:
+		for (i = 0; i < vsg->num_desc_pages; ++i) {
+			if (vsg->desc_pages[i] != NULL)
+				free_page((unsigned long)vsg->desc_pages[i]);
+		}
+		kfree(vsg->desc_pages);
+	case dr_via_pages_locked:
+		for (i = 0; i < vsg->num_pages; ++i) {
+			if (NULL != (page = vsg->pages[i])) {
+				if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+					SetPageDirty(page);
+				page_cache_release(page);
+			}
+		}
+	case dr_via_pages_alloc:
+		vfree(vsg->pages);
+	default:
+		vsg->state = dr_via_sg_init;
+	}
+	vfree(vsg->bounce_buffer);
+	vsg->bounce_buffer = NULL;
+	vsg->free_on_sequence = 0;
+}
+
+/*
+ * Fire a blit engine.
+ */
+
+static void
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+		  VIA_DMA_CSR_DE);
+	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+	DRM_WRITEMEMORYBARRIER();
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+	VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
+{
+	int ret;
+	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
+		first_pfn + 1;
+
+	vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+	if (NULL == vsg->pages)
+		return -ENOMEM;
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm,
+			     (unsigned long)xfer->mem_addr,
+			     vsg->num_pages,
+			     (vsg->direction == DMA_FROM_DEVICE),
+			     0, vsg->pages, NULL);
+
+	up_read(&current->mm->mmap_sem);
+	if (ret != vsg->num_pages) {
+		if (ret < 0)
+			return ret;
+		vsg->state = dr_via_pages_locked;
+		return -EINVAL;
+	}
+	vsg->state = dr_via_pages_locked;
+	DRM_DEBUG("DMA pages locked\n");
+	return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
+ */
+
+static int
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+	int i;
+
+	vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
+	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+		vsg->descriptors_per_page;
+
+	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
+		return -ENOMEM;
+
+	vsg->state = dr_via_desc_pages_alloc;
+	for (i = 0; i < vsg->num_desc_pages; ++i) {
+		if (NULL == (vsg->desc_pages[i] =
+			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+			return -ENOMEM;
+	}
+	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+		  vsg->num_desc);
+	return 0;
+}
+
+static void
+via_abort_dmablit(struct drm_device *dev, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(struct drm_device *dev, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
+
+
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+
+void
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+	int cur;
+	int done_transfer;
+	unsigned long irqsave = 0;
+	uint32_t status = 0;
+
+	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+		  engine, from_irq, (unsigned long) blitq);
+
+	if (from_irq)
+		spin_lock(&blitq->blit_lock);
+	else
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	done_transfer = blitq->is_active &&
+	  ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+	done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
+
+	cur = blitq->cur;
+	if (done_transfer) {
+
+		blitq->blits[cur]->aborted = blitq->aborting;
+		blitq->done_blit_handle++;
+		DRM_WAKEUP(blitq->blit_queue + cur);
+
+		cur++;
+		if (cur >= VIA_NUM_BLIT_SLOTS)
+			cur = 0;
+		blitq->cur = cur;
+
+		/*
+		 * Clear transfer done flag.
+		 */
+
+		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
+
+		blitq->is_active = 0;
+		blitq->aborting = 0;
+		schedule_work(&blitq->wq);
+
+	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+		/*
+		 * Abort transfer after one second.
+		 */
+
+		via_abort_dmablit(dev, engine);
+		blitq->aborting = 1;
+		blitq->end = jiffies + DRM_HZ;
+	}
+
+	if (!blitq->is_active) {
+		if (blitq->num_outstanding) {
+			via_fire_dmablit(dev, blitq->blits[cur], engine);
+			blitq->is_active = 1;
+			blitq->cur = cur;
+			blitq->num_outstanding--;
+			blitq->end = jiffies + DRM_HZ;
+			if (!timer_pending(&blitq->poll_timer))
+				mod_timer(&blitq->poll_timer, jiffies + 1);
+		} else {
+			if (timer_pending(&blitq->poll_timer))
+				del_timer(&blitq->poll_timer);
+			via_dmablit_engine_off(dev, engine);
+		}
+	}
+
+	if (from_irq)
+		spin_unlock(&blitq->blit_lock);
+	else
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+	unsigned long irqsave;
+	uint32_t slot;
+	int active;
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	/*
+	 * Allow for handle wraparounds.
+	 */
+
+	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+		((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+	if (queue && active) {
+		slot = handle - blitq->done_blit_handle + blitq->cur - 1;
+		if (slot >= VIA_NUM_BLIT_SLOTS)
+			slot -= VIA_NUM_BLIT_SLOTS;
+		*queue = blitq->blit_queue + slot;
+	}
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+	return active;
+}
+
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+
+static int
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+	wait_queue_head_t *queue;
+	int ret = 0;
+
+	if (via_dmablit_active(blitq, engine, handle, &queue)) {
+		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+			    !via_dmablit_active(blitq, engine, handle, NULL));
+	}
+	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+		  handle, engine, ret);
+
+	return ret;
+}
+
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+
+
+
+static void
+via_dmablit_timer(unsigned long data)
+{
+	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+	struct drm_device *dev = blitq->dev;
+	int engine = (int)
+		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+
+	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+		  (unsigned long) jiffies);
+
+	via_dmablit_handler(dev, engine, 0);
+
+	if (!timer_pending(&blitq->poll_timer)) {
+		mod_timer(&blitq->poll_timer, jiffies + 1);
+
+	       /*
+		* Rerun handler to delete timer if engines are off, and
+		* to shorten abort latency. This is a little nasty.
+		*/
+
+	       via_dmablit_handler(dev, engine, 0);
+
+	}
+}
+
+
+
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+
+
+static void
+via_dmablit_workqueue(struct work_struct *work)
+{
+	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
+	struct drm_device *dev = blitq->dev;
+	unsigned long irqsave;
+	drm_via_sg_info_t *cur_sg;
+	int cur_released;
+
+
+	DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
+		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	while (blitq->serviced != blitq->cur) {
+
+		cur_released = blitq->serviced++;
+
+		DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+			blitq->serviced = 0;
+
+		cur_sg = blitq->blits[cur_released];
+		blitq->num_free++;
+
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+		DRM_WAKEUP(&blitq->busy_queue);
+
+		via_free_sg_info(dev->pdev, cur_sg);
+		kfree(cur_sg);
+
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	}
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+
+
+void
+via_init_dmablit(struct drm_device *dev)
+{
+	int i, j;
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq;
+
+	pci_set_master(dev->pdev);
+
+	for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
+		blitq = dev_priv->blit_queues + i;
+		blitq->dev = dev;
+		blitq->cur_blit_handle = 0;
+		blitq->done_blit_handle = 0;
+		blitq->head = 0;
+		blitq->cur = 0;
+		blitq->serviced = 0;
+		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
+		blitq->num_outstanding = 0;
+		blitq->is_active = 0;
+		blitq->aborting = 0;
+		spin_lock_init(&blitq->blit_lock);
+		for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
+			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
+		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
+		setup_timer(&blitq->poll_timer, via_dmablit_timer,
+				(unsigned long)blitq);
+	}
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+
+
+static int
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+	int draw = xfer->to_fb;
+	int ret = 0;
+
+	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	vsg->bounce_buffer = NULL;
+
+	vsg->state = dr_via_sg_init;
+
+	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+		DRM_ERROR("Zero size bitblt.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Below check is a driver limitation, not a hardware one. We
+	 * don't want to lock unused pages, and don't want to incoporate the
+	 * extra logic of avoiding them. Make sure there are no.
+	 * (Not a big limitation anyway.)
+	 */
+
+	if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
+		DRM_ERROR("Too large system memory stride. Stride: %d, "
+			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
+		return -EINVAL;
+	}
+
+	if ((xfer->mem_stride == xfer->line_length) &&
+	   (xfer->fb_stride == xfer->line_length)) {
+		xfer->mem_stride *= xfer->num_lines;
+		xfer->line_length = xfer->mem_stride;
+		xfer->fb_stride = xfer->mem_stride;
+		xfer->num_lines = 1;
+	}
+
+	/*
+	 * Don't lock an arbitrary large number of pages, since that causes a
+	 * DOS security hole.
+	 */
+
+	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
+		DRM_ERROR("Too large PCI DMA bitblt.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * we allow a negative fb stride to allow flipping of images in
+	 * transfer.
+	 */
+
+	if (xfer->mem_stride < xfer->line_length ||
+		abs(xfer->fb_stride) < xfer->line_length) {
+		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * A hardware bug seems to be worked around if system memory addresses start on
+	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+	 * about this. Meanwhile, impose the following restrictions:
+	 */
+
+#ifdef VIA_BUGFREE
+	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
+		DRM_ERROR("Invalid DRM bitblt alignment.\n");
+		return -EINVAL;
+	}
+#else
+	if ((((unsigned long)xfer->mem_addr & 15) ||
+	      ((unsigned long)xfer->fb_addr & 3)) ||
+	   ((xfer->num_lines > 1) &&
+	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
+		DRM_ERROR("Invalid DRM bitblt alignment.\n");
+		return -EINVAL;
+	}
+#endif
+
+	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+		DRM_ERROR("Could not lock DMA pages.\n");
+		via_free_sg_info(dev->pdev, vsg);
+		return ret;
+	}
+
+	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
+	if (0 != (ret = via_alloc_desc_pages(vsg))) {
+		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+		via_free_sg_info(dev->pdev, vsg);
+		return ret;
+	}
+	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
+
+	return 0;
+}
+
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+
+static int
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+	int ret = 0;
+	unsigned long irqsave;
+
+	DRM_DEBUG("Num free is %d\n", blitq->num_free);
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	while (blitq->num_free == 0) {
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+		if (ret)
+			return (-EINTR == ret) ? -EAGAIN : ret;
+
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	}
+
+	blitq->num_free--;
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+	return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+
+static void
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+	unsigned long irqsave;
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	blitq->num_free++;
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+	DRM_WAKEUP(&blitq->busy_queue);
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+
+
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_sg_info_t *vsg;
+	drm_via_blitq_t *blitq;
+	int ret;
+	int engine;
+	unsigned long irqsave;
+
+	if (dev_priv == NULL) {
+		DRM_ERROR("Called without initialization.\n");
+		return -EINVAL;
+	}
+
+	engine = (xfer->to_fb) ? 0 : 1;
+	blitq = dev_priv->blit_queues + engine;
+	if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
+		return ret;
+	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+		via_dmablit_release_slot(blitq);
+		return -ENOMEM;
+	}
+	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+		via_dmablit_release_slot(blitq);
+		kfree(vsg);
+		return ret;
+	}
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	blitq->blits[blitq->head++] = vsg;
+	if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+		blitq->head = 0;
+	blitq->num_outstanding++;
+	xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+	xfer->sync.engine = engine;
+
+	via_dmablit_handler(dev, engine, 0);
+
+	return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered.
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+
+int
+via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_blitsync_t *sync = data;
+	int err;
+
+	if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+		return -EINVAL;
+
+	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
+
+	if (-EINTR == err)
+		err = -EAGAIN;
+
+	return err;
+}
+
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * be reissued. See the above IOCTL code.
+ */
+
+int
+via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_dmablit_t *xfer = data;
+	int err;
+
+	err = via_dmablit(dev, xfer);
+
+	return err;
+}
diff --git a/linux-imx/drivers/gpu/drm/via/via_dmablit.h b/linux-imx/drivers/gpu/drm/via/via_dmablit.h
new file mode 100644
index 0000000..9b662a3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_dmablit.h
@@ -0,0 +1,140 @@
+/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Thomas Hellstrom.
+ *    Register info from Digeo Inc.
+ */
+
+#ifndef _VIA_DMABLIT_H
+#define _VIA_DMABLIT_H
+
+#include <linux/dma-mapping.h>
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+	struct page **pages;
+	unsigned long num_pages;
+	struct _drm_via_descriptor **desc_pages;
+	int num_desc_pages;
+	int num_desc;
+	enum dma_data_direction direction;
+	unsigned char *bounce_buffer;
+	dma_addr_t chain_start;
+	uint32_t free_on_sequence;
+	unsigned int descriptors_per_page;
+	int aborted;
+	enum {
+		dr_via_device_mapped,
+		dr_via_desc_pages_alloc,
+		dr_via_pages_locked,
+		dr_via_pages_alloc,
+		dr_via_sg_init
+	} state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+	struct drm_device *dev;
+	uint32_t cur_blit_handle;
+	uint32_t done_blit_handle;
+	unsigned serviced;
+	unsigned head;
+	unsigned cur;
+	unsigned num_free;
+	unsigned num_outstanding;
+	unsigned long end;
+	int aborting;
+	int is_active;
+	drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+	spinlock_t blit_lock;
+	wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+	wait_queue_head_t busy_queue;
+	struct work_struct wq;
+	struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+
+/*
+ *  PCI DMA Registers
+ *  Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC		(1<<1)	/* end of chain */
+#define VIA_DMA_DPR_DDIE	(1<<2)	/* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT		(1<<3)	/* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM		(1<<0)	/* chaining mode */
+#define VIA_DMA_MR_TDIE		(1<<1)	/* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD		(1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE		(1<<0)	/* DMA enable */
+#define VIA_DMA_CSR_TS		(1<<1)	/* transfer start */
+#define VIA_DMA_CSR_TA		(1<<2)	/* transfer abort */
+#define VIA_DMA_CSR_TD		(1<<3)	/* transfer done */
+#define VIA_DMA_CSR_DD		(1<<4)	/* descriptor done */
+#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
+
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/via/via_drv.c b/linux-imx/drivers/gpu/drm/via/via_drv.c
new file mode 100644
index 0000000..f4ae203
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_drv.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+
+#include <drm/drm_pciids.h>
+
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+	struct via_file_private *file_priv;
+
+	DRM_DEBUG_DRIVER("\n");
+	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
+		return -ENOMEM;
+
+	file->driver_priv = file_priv;
+
+	INIT_LIST_HEAD(&file_priv->obj_list);
+
+	return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct via_file_private *file_priv = file->driver_priv;
+
+	kfree(file_priv);
+}
+
+static struct pci_device_id pciidlist[] = {
+	viadrv_PCI_IDS
+};
+
+static const struct file_operations via_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+	    DRIVER_IRQ_SHARED,
+	.load = via_driver_load,
+	.unload = via_driver_unload,
+	.open = via_driver_open,
+	.preclose = via_reclaim_buffers_locked,
+	.postclose = via_driver_postclose,
+	.context_dtor = via_final_context,
+	.get_vblank_counter = via_get_vblank_counter,
+	.enable_vblank = via_enable_vblank,
+	.disable_vblank = via_disable_vblank,
+	.irq_preinstall = via_driver_irq_preinstall,
+	.irq_postinstall = via_driver_irq_postinstall,
+	.irq_uninstall = via_driver_irq_uninstall,
+	.irq_handler = via_driver_irq_handler,
+	.dma_quiescent = via_driver_dma_quiescent,
+	.lastclose = via_lastclose,
+	.ioctls = via_ioctls,
+	.fops = &via_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver via_pci_driver = {
+	.name = DRIVER_NAME,
+	.id_table = pciidlist,
+};
+
+static int __init via_init(void)
+{
+	driver.num_ioctls = via_max_ioctl;
+	via_init_command_verifier();
+	return drm_pci_init(&driver, &via_pci_driver);
+}
+
+static void __exit via_exit(void)
+{
+	drm_pci_exit(&driver, &via_pci_driver);
+}
+
+module_init(via_init);
+module_exit(via_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/via/via_drv.h b/linux-imx/drivers/gpu/drm/via/via_drv.h
new file mode 100644
index 0000000..893a650
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_drv.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_DRV_H_
+#define _VIA_DRV_H_
+
+#include <drm/drm_mm.h>
+#define DRIVER_AUTHOR	"Various"
+
+#define DRIVER_NAME		"via"
+#define DRIVER_DESC		"VIA Unichrome / Pro"
+#define DRIVER_DATE		"20070202"
+
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		11
+#define DRIVER_PATCHLEVEL	1
+
+#include "via_verifier.h"
+
+#include "via_dmablit.h"
+
+#define VIA_PCI_BUF_SIZE 60000
+#define VIA_FIRE_BUF_SIZE  1024
+#define VIA_NUM_IRQS 4
+
+typedef struct drm_via_ring_buffer {
+	drm_local_map_t map;
+	char *virtual_start;
+} drm_via_ring_buffer_t;
+
+typedef uint32_t maskarray_t[5];
+
+typedef struct drm_via_irq {
+	atomic_t irq_received;
+	uint32_t pending_mask;
+	uint32_t enable_mask;
+	wait_queue_head_t irq_queue;
+} drm_via_irq_t;
+
+typedef struct drm_via_private {
+	drm_via_sarea_t *sarea_priv;
+	drm_local_map_t *sarea;
+	drm_local_map_t *fb;
+	drm_local_map_t *mmio;
+	unsigned long agpAddr;
+	wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
+	char *dma_ptr;
+	unsigned int dma_low;
+	unsigned int dma_high;
+	unsigned int dma_offset;
+	uint32_t dma_wrap;
+	volatile uint32_t *last_pause_ptr;
+	volatile uint32_t *hw_addr_ptr;
+	drm_via_ring_buffer_t ring;
+	struct timeval last_vblank;
+	int last_vblank_valid;
+	unsigned usec_per_vblank;
+	atomic_t vbl_received;
+	drm_via_state_t hc_state;
+	char pci_buf[VIA_PCI_BUF_SIZE];
+	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+	uint32_t num_fire_offsets;
+	int chipset;
+	drm_via_irq_t via_irqs[VIA_NUM_IRQS];
+	unsigned num_irqs;
+	maskarray_t *irq_masks;
+	uint32_t irq_enable_mask;
+	uint32_t irq_pending_mask;
+	int *irq_map;
+	unsigned int idle_fault;
+	int vram_initialized;
+	struct drm_mm vram_mm;
+	int agp_initialized;
+	struct drm_mm agp_mm;
+	/** Mapping of userspace keys to mm objects */
+	struct idr object_idr;
+	unsigned long vram_offset;
+	unsigned long agp_offset;
+	drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+	uint32_t dma_diff;
+} drm_via_private_t;
+
+enum via_family {
+  VIA_OTHER = 0,     /* Baseline */
+  VIA_PRO_GROUP_A,   /* Another video engine and DMA commands */
+  VIA_DX9_0          /* Same video as pro_group_a, but 3D is unsupported */
+};
+
+/* VIA MMIO register access */
+#define VIA_BASE ((dev_priv->mmio))
+
+#define VIA_READ(reg)		DRM_READ32(VIA_BASE, reg)
+#define VIA_WRITE(reg, val)	DRM_WRITE32(VIA_BASE, reg, val)
+#define VIA_READ8(reg)		DRM_READ8(VIA_BASE, reg)
+#define VIA_WRITE8(reg, val)	DRM_WRITE8(VIA_BASE, reg, val)
+
+extern struct drm_ioctl_desc via_ioctls[];
+extern int via_max_ioctl;
+
+extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int via_driver_unload(struct drm_device *dev);
+
+extern int via_init_context(struct drm_device *dev, int context);
+extern int via_final_context(struct drm_device *dev, int context);
+
+extern int via_do_cleanup_map(struct drm_device *dev);
+extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int via_enable_vblank(struct drm_device *dev, int crtc);
+extern void via_disable_vblank(struct drm_device *dev, int crtc);
+
+extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern void via_driver_irq_preinstall(struct drm_device *dev);
+extern int via_driver_irq_postinstall(struct drm_device *dev);
+extern void via_driver_irq_uninstall(struct drm_device *dev);
+
+extern int via_dma_cleanup(struct drm_device *dev);
+extern void via_init_command_verifier(void);
+extern int via_driver_dma_quiescent(struct drm_device *dev);
+extern void via_init_futex(drm_via_private_t *dev_priv);
+extern void via_cleanup_futex(drm_via_private_t *dev_priv);
+extern void via_release_futex(drm_via_private_t *dev_priv, int context);
+
+extern void via_reclaim_buffers_locked(struct drm_device *dev,
+				       struct drm_file *file_priv);
+extern void via_lastclose(struct drm_device *dev);
+
+extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+extern void via_init_dmablit(struct drm_device *dev);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/via/via_irq.c b/linux-imx/drivers/gpu/drm/via/via_irq.c
new file mode 100644
index 0000000..ac98964
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_irq.c
@@ -0,0 +1,391 @@
+/* via_irq.c
+ *
+ * Copyright 2004 BEAM Ltd.
+ * Copyright 2002 Tungsten Graphics, Inc.
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Terry Barnaby <terry1@beam.ltd.uk>
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Thomas Hellstrom <unichrome@shipmail.org>
+ *
+ * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
+ * interrupt, as well as an infrastructure to handle other interrupts of the chip.
+ * The refresh rate is also calculated for video playback sync purposes.
+ */
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+
+#define VIA_REG_INTERRUPT       0x200
+
+/* VIA_REG_INTERRUPT */
+#define VIA_IRQ_GLOBAL	  (1 << 31)
+#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
+#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
+#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
+#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
+#define VIA_IRQ_HQV0_PENDING    (1 << 9)
+#define VIA_IRQ_HQV1_PENDING    (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
+
+/*
+ * Device-specific IRQs go here. This type might need to be extended with
+ * the register if there are multiple IRQ control registers.
+ * Currently we activate the HQV interrupts of  Unichrome Pro group A.
+ */
+
+static maskarray_t via_pro_group_a_irqs[] = {
+	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
+	 0x00000000 },
+	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
+	 0x00000000 },
+	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+};
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
+
+static maskarray_t via_unichrome_irqs[] = {
+	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
+
+
+static unsigned time_diff(struct timeval *now, struct timeval *then)
+{
+	return (now->tv_usec >= then->tv_usec) ?
+		now->tv_usec - then->tv_usec :
+		1000000 - (then->tv_usec - now->tv_usec);
+}
+
+u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
+irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+	int handled = 0;
+	struct timeval cur_vblank;
+	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+	int i;
+
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	if (status & VIA_IRQ_VBLANK_PENDING) {
+		atomic_inc(&dev_priv->vbl_received);
+		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
+			do_gettimeofday(&cur_vblank);
+			if (dev_priv->last_vblank_valid) {
+				dev_priv->usec_per_vblank =
+					time_diff(&cur_vblank,
+						  &dev_priv->last_vblank) >> 4;
+			}
+			dev_priv->last_vblank = cur_vblank;
+			dev_priv->last_vblank_valid = 1;
+		}
+		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
+			DRM_DEBUG("US per vblank is: %u\n",
+				  dev_priv->usec_per_vblank);
+		}
+		drm_handle_vblank(dev, 0);
+		handled = 1;
+	}
+
+	for (i = 0; i < dev_priv->num_irqs; ++i) {
+		if (status & cur_irq->pending_mask) {
+			atomic_inc(&cur_irq->irq_received);
+			DRM_WAKEUP(&cur_irq->irq_queue);
+			handled = 1;
+			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+				via_dmablit_handler(dev, 0, 1);
+			else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
+				via_dmablit_handler(dev, 1, 1);
+		}
+		cur_irq++;
+	}
+
+	/* Acknowledge interrupts */
+	VIA_WRITE(VIA_REG_INTERRUPT, status);
+
+
+	if (handled)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
+{
+	u32 status;
+
+	if (dev_priv) {
+		/* Acknowledge interrupts */
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status |
+			  dev_priv->irq_pending_mask);
+	}
+}
+
+int via_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	u32 status;
+
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+		return -EINVAL;
+	}
+
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
+
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+
+	return 0;
+}
+
+void via_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	u32 status;
+
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
+
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+}
+
+static int
+via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
+		    unsigned int *sequence)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	unsigned int cur_irq_sequence;
+	drm_via_irq_t *cur_irq;
+	int ret = 0;
+	maskarray_t *masks;
+	int real_irq;
+
+	DRM_DEBUG("\n");
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (irq >= drm_via_irq_num) {
+		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
+		return -EINVAL;
+	}
+
+	real_irq = dev_priv->irq_map[irq];
+
+	if (real_irq < 0) {
+		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+			  irq);
+		return -EINVAL;
+	}
+
+	masks = dev_priv->irq_masks;
+	cur_irq = dev_priv->via_irqs + real_irq;
+
+	if (masks[real_irq][2] && !force_sequence) {
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+			     masks[irq][4]));
+		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+	} else {
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+			    (((cur_irq_sequence =
+			       atomic_read(&cur_irq->irq_received)) -
+			      *sequence) <= (1 << 23)));
+	}
+	*sequence = cur_irq_sequence;
+	return ret;
+}
+
+
+/*
+ * drm_dma.h hooks
+ */
+
+void via_driver_irq_preinstall(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+	drm_via_irq_t *cur_irq;
+	int i;
+
+	DRM_DEBUG("dev_priv: %p\n", dev_priv);
+	if (dev_priv) {
+		cur_irq = dev_priv->via_irqs;
+
+		dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
+		dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
+
+		if (dev_priv->chipset == VIA_PRO_GROUP_A ||
+		    dev_priv->chipset == VIA_DX9_0) {
+			dev_priv->irq_masks = via_pro_group_a_irqs;
+			dev_priv->num_irqs = via_num_pro_group_a;
+			dev_priv->irq_map = via_irqmap_pro_group_a;
+		} else {
+			dev_priv->irq_masks = via_unichrome_irqs;
+			dev_priv->num_irqs = via_num_unichrome;
+			dev_priv->irq_map = via_irqmap_unichrome;
+		}
+
+		for (i = 0; i < dev_priv->num_irqs; ++i) {
+			atomic_set(&cur_irq->irq_received, 0);
+			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
+			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
+			cur_irq++;
+
+			DRM_DEBUG("Initializing IRQ %d\n", i);
+		}
+
+		dev_priv->last_vblank_valid = 0;
+
+		/* Clear VSync interrupt regs */
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status &
+			  ~(dev_priv->irq_enable_mask));
+
+		/* Clear bits if they're already high */
+		viadrv_acknowledge_irqs(dev_priv);
+	}
+}
+
+int via_driver_irq_postinstall(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+
+	DRM_DEBUG("via_driver_irq_postinstall\n");
+	if (!dev_priv)
+		return -EINVAL;
+
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+		  | dev_priv->irq_enable_mask);
+
+	/* Some magic, oh for some data sheets ! */
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+
+	return 0;
+}
+
+void via_driver_irq_uninstall(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+
+	DRM_DEBUG("\n");
+	if (dev_priv) {
+
+		/* Some more magic, oh for some data sheets ! */
+
+		VIA_WRITE8(0x83d4, 0x11);
+		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status &
+			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
+	}
+}
+
+int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_irqwait_t *irqwait = data;
+	struct timeval now;
+	int ret = 0;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+	int force_sequence;
+
+	if (irqwait->request.irq >= dev_priv->num_irqs) {
+		DRM_ERROR("Trying to wait on unknown irq %d\n",
+			  irqwait->request.irq);
+		return -EINVAL;
+	}
+
+	cur_irq += irqwait->request.irq;
+
+	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+	case VIA_IRQ_RELATIVE:
+		irqwait->request.sequence +=
+			atomic_read(&cur_irq->irq_received);
+		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+	case VIA_IRQ_ABSOLUTE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
+		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
+		return -EINVAL;
+	}
+
+	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
+
+	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
+				  &irqwait->request.sequence);
+	do_gettimeofday(&now);
+	irqwait->reply.tval_sec = now.tv_sec;
+	irqwait->reply.tval_usec = now.tv_usec;
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/via/via_map.c b/linux-imx/drivers/gpu/drm/via/via_map.c
new file mode 100644
index 0000000..d0ab3fb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_map.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+
+static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
+	if (!dev_priv->fb) {
+		DRM_ERROR("could not find framebuffer!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+	dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio) {
+		DRM_ERROR("could not find mmio region!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				 init->sarea_priv_offset);
+
+	dev_priv->agpAddr = init->agpAddr;
+
+	via_init_futex(dev_priv);
+
+	via_init_dmablit(dev);
+
+	dev->dev_private = (void *)dev_priv;
+	return 0;
+}
+
+int via_do_cleanup_map(struct drm_device *dev)
+{
+	via_dma_cleanup(dev);
+
+	return 0;
+}
+
+int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_init_t *init = data;
+
+	DRM_DEBUG("\n");
+
+	switch (init->func) {
+	case VIA_INIT_MAP:
+		return via_do_init_map(dev, init);
+	case VIA_CLEANUP_MAP:
+		return via_do_cleanup_map(dev);
+	}
+
+	return -EINVAL;
+}
+
+int via_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_via_private_t *dev_priv;
+	int ret = 0;
+
+	dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	idr_init(&dev_priv->object_idr);
+	dev->dev_private = (void *)dev_priv;
+
+	dev_priv->chipset = chipset;
+
+	pci_set_master(dev->pdev);
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret) {
+		kfree(dev_priv);
+		return ret;
+	}
+
+	return 0;
+}
+
+int via_driver_unload(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	idr_destroy(&dev_priv->object_idr);
+
+	kfree(dev_priv);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/via/via_mm.c b/linux-imx/drivers/gpu/drm/via/via_mm.c
new file mode 100644
index 0000000..0ab93ff
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_mm.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+
+#define VIA_MM_ALIGN_SHIFT 4
+#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+
+struct via_memblock {
+	struct drm_mm_node mm_node;
+	struct list_head owner_list;
+};
+
+int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_agp_t *agp = data;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
+
+	dev_priv->agp_initialized = 1;
+	dev_priv->agp_offset = agp->offset;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+	return 0;
+}
+
+int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_fb_t *fb = data;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
+
+	dev_priv->vram_initialized = 1;
+	dev_priv->vram_offset = fb->offset;
+
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+
+	return 0;
+
+}
+
+int via_final_context(struct drm_device *dev, int context)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	via_release_futex(dev_priv, context);
+
+	/* Linux specific until context tracking code gets ported to BSD */
+	/* Last context, perform cleanup */
+	if (dev->ctx_count == 1 && dev->dev_private) {
+		DRM_DEBUG("Last Context\n");
+		drm_irq_uninstall(dev);
+		via_cleanup_futex(dev_priv);
+		via_do_cleanup_map(dev);
+	}
+	return 1;
+}
+
+void via_lastclose(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	if (dev_priv->vram_initialized) {
+		drm_mm_takedown(&dev_priv->vram_mm);
+		dev_priv->vram_initialized = 0;
+	}
+	if (dev_priv->agp_initialized) {
+		drm_mm_takedown(&dev_priv->agp_mm);
+		dev_priv->agp_initialized = 0;
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+int via_mem_alloc(struct drm_device *dev, void *data,
+		  struct drm_file *file)
+{
+	drm_via_mem_t *mem = data;
+	int retval = 0, user_key;
+	struct via_memblock *item;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	struct via_file_private *file_priv = file->driver_priv;
+	unsigned long tmpSize;
+
+	if (mem->type > VIA_MEM_AGP) {
+		DRM_ERROR("Unknown memory type allocation\n");
+		return -EINVAL;
+	}
+	mutex_lock(&dev->struct_mutex);
+	if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+		      dev_priv->agp_initialized)) {
+		DRM_ERROR
+		    ("Attempt to allocate from uninitialized memory manager.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	item = kzalloc(sizeof(*item), GFP_KERNEL);
+	if (!item) {
+		retval = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+	if (mem->type == VIA_MEM_AGP)
+		retval = drm_mm_insert_node(&dev_priv->agp_mm,
+					    &item->mm_node,
+					    tmpSize, 0);
+	else
+		retval = drm_mm_insert_node(&dev_priv->vram_mm,
+					    &item->mm_node,
+					    tmpSize, 0);
+	if (retval)
+		goto fail_alloc;
+
+	retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+	if (retval < 0)
+		goto fail_idr;
+	user_key = retval;
+
+	list_add(&item->owner_list, &file_priv->obj_list);
+	mutex_unlock(&dev->struct_mutex);
+
+	mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+		      dev_priv->vram_offset : dev_priv->agp_offset) +
+	    ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+	mem->index = user_key;
+
+	return 0;
+
+fail_idr:
+	drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+	kfree(item);
+	mutex_unlock(&dev->struct_mutex);
+
+	mem->offset = 0;
+	mem->size = 0;
+	mem->index = 0;
+	DRM_DEBUG("Video memory allocation failed\n");
+
+	return retval;
+}
+
+int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	drm_via_mem_t *mem = data;
+	struct via_memblock *obj;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = idr_find(&dev_priv->object_idr, mem->index);
+	if (obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	idr_remove(&dev_priv->object_idr, mem->index);
+	list_del(&obj->owner_list);
+	drm_mm_remove_node(&obj->mm_node);
+	kfree(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("free = 0x%lx\n", mem->index);
+
+	return 0;
+}
+
+
+void via_reclaim_buffers_locked(struct drm_device *dev,
+				struct drm_file *file)
+{
+	struct via_file_private *file_priv = file->driver_priv;
+	struct via_memblock *entry, *next;
+
+	if (!(file->minor->master && file->master->lock.hw_lock))
+		return;
+
+	drm_idlelock_take(&file->master->lock);
+
+	mutex_lock(&dev->struct_mutex);
+	if (list_empty(&file_priv->obj_list)) {
+		mutex_unlock(&dev->struct_mutex);
+		drm_idlelock_release(&file->master->lock);
+
+		return;
+	}
+
+	via_driver_dma_quiescent(dev);
+
+	list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+				 owner_list) {
+		list_del(&entry->owner_list);
+		drm_mm_remove_node(&entry->mm_node);
+		kfree(entry);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_idlelock_release(&file->master->lock);
+
+	return;
+}
diff --git a/linux-imx/drivers/gpu/drm/via/via_verifier.c b/linux-imx/drivers/gpu/drm/via/via_verifier.c
new file mode 100644
index 0000000..9dbc92b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_verifier.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright 2004 The Unichrome Project. All Rights Reserved.
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellstrom 2004, 2005.
+ * This code was written using docs obtained under NDA from VIA Inc.
+ *
+ * Don't run this code directly on an AGP buffer. Due to cache problems it will
+ * be very slow.
+ */
+
+#include "via_3d_reg.h"
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_verifier.h"
+#include "via_drv.h"
+
+typedef enum {
+	state_command,
+	state_header2,
+	state_header1,
+	state_vheader5,
+	state_vheader6,
+	state_error
+} verifier_state_t;
+
+typedef enum {
+	no_check = 0,
+	check_for_header2,
+	check_for_header1,
+	check_for_header2_err,
+	check_for_header1_err,
+	check_for_fire,
+	check_z_buffer_addr0,
+	check_z_buffer_addr1,
+	check_z_buffer_addr_mode,
+	check_destination_addr0,
+	check_destination_addr1,
+	check_destination_addr_mode,
+	check_for_dummy,
+	check_for_dd,
+	check_texture_addr0,
+	check_texture_addr1,
+	check_texture_addr2,
+	check_texture_addr3,
+	check_texture_addr4,
+	check_texture_addr5,
+	check_texture_addr6,
+	check_texture_addr7,
+	check_texture_addr8,
+	check_texture_addr_mode,
+	check_for_vertex_count,
+	check_number_texunits,
+	forbidden_command
+} hazard_t;
+
+/*
+ * Associates each hazard above with a possible multi-command
+ * sequence. For example an address that is split over multiple
+ * commands and that needs to be checked at the first command
+ * that does not include any part of the address.
+ */
+
+static drm_via_sequence_t seqs[] = {
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	z_address,
+	z_address,
+	z_address,
+	dest_address,
+	dest_address,
+	dest_address,
+	no_sequence,
+	no_sequence,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	no_sequence
+};
+
+typedef struct {
+	unsigned int code;
+	hazard_t hz;
+} hz_init_t;
+
+static hz_init_t init_table1[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xee, check_for_fire},
+	{0xcc, check_for_dummy},
+	{0xdd, check_for_dd},
+	{0x00, no_check},
+	{0x10, check_z_buffer_addr0},
+	{0x11, check_z_buffer_addr1},
+	{0x12, check_z_buffer_addr_mode},
+	{0x13, no_check},
+	{0x14, no_check},
+	{0x15, no_check},
+	{0x23, no_check},
+	{0x24, no_check},
+	{0x33, no_check},
+	{0x34, no_check},
+	{0x35, no_check},
+	{0x36, no_check},
+	{0x37, no_check},
+	{0x38, no_check},
+	{0x39, no_check},
+	{0x3A, no_check},
+	{0x3B, no_check},
+	{0x3C, no_check},
+	{0x3D, no_check},
+	{0x3E, no_check},
+	{0x40, check_destination_addr0},
+	{0x41, check_destination_addr1},
+	{0x42, check_destination_addr_mode},
+	{0x43, no_check},
+	{0x44, no_check},
+	{0x50, no_check},
+	{0x51, no_check},
+	{0x52, no_check},
+	{0x53, no_check},
+	{0x54, no_check},
+	{0x55, no_check},
+	{0x56, no_check},
+	{0x57, no_check},
+	{0x58, no_check},
+	{0x70, no_check},
+	{0x71, no_check},
+	{0x78, no_check},
+	{0x79, no_check},
+	{0x7A, no_check},
+	{0x7B, no_check},
+	{0x7C, no_check},
+	{0x7D, check_for_vertex_count}
+};
+
+static hz_init_t init_table2[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xee, check_for_fire},
+	{0xcc, check_for_dummy},
+	{0x00, check_texture_addr0},
+	{0x01, check_texture_addr0},
+	{0x02, check_texture_addr0},
+	{0x03, check_texture_addr0},
+	{0x04, check_texture_addr0},
+	{0x05, check_texture_addr0},
+	{0x06, check_texture_addr0},
+	{0x07, check_texture_addr0},
+	{0x08, check_texture_addr0},
+	{0x09, check_texture_addr0},
+	{0x20, check_texture_addr1},
+	{0x21, check_texture_addr1},
+	{0x22, check_texture_addr1},
+	{0x23, check_texture_addr4},
+	{0x2B, check_texture_addr3},
+	{0x2C, check_texture_addr3},
+	{0x2D, check_texture_addr3},
+	{0x2E, check_texture_addr3},
+	{0x2F, check_texture_addr3},
+	{0x30, check_texture_addr3},
+	{0x31, check_texture_addr3},
+	{0x32, check_texture_addr3},
+	{0x33, check_texture_addr3},
+	{0x34, check_texture_addr3},
+	{0x4B, check_texture_addr5},
+	{0x4C, check_texture_addr6},
+	{0x51, check_texture_addr7},
+	{0x52, check_texture_addr8},
+	{0x77, check_texture_addr2},
+	{0x78, no_check},
+	{0x79, no_check},
+	{0x7A, no_check},
+	{0x7B, check_texture_addr_mode},
+	{0x7C, no_check},
+	{0x7D, no_check},
+	{0x7E, no_check},
+	{0x7F, no_check},
+	{0x80, no_check},
+	{0x81, no_check},
+	{0x82, no_check},
+	{0x83, no_check},
+	{0x85, no_check},
+	{0x86, no_check},
+	{0x87, no_check},
+	{0x88, no_check},
+	{0x89, no_check},
+	{0x8A, no_check},
+	{0x90, no_check},
+	{0x91, no_check},
+	{0x92, no_check},
+	{0x93, no_check}
+};
+
+static hz_init_t init_table3[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xcc, check_for_dummy},
+	{0x00, check_number_texunits}
+};
+
+static hazard_t table1[256];
+static hazard_t table2[256];
+static hazard_t table3[256];
+
+static __inline__ int
+eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
+{
+	if ((buf_end - *buf) >= num_words) {
+		*buf += num_words;
+		return 0;
+	}
+	DRM_ERROR("Illegal termination of DMA command buffer\n");
+	return 1;
+}
+
+/*
+ * Partially stolen from drm_memory.h
+ */
+
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
+						    unsigned long offset,
+						    unsigned long size,
+						    struct drm_device *dev)
+{
+	struct drm_map_list *r_list;
+	drm_local_map_t *map = seq->map_cache;
+
+	if (map && map->offset <= offset
+	    && (offset + size) <= (map->offset + map->size)) {
+		return map;
+	}
+
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		map = r_list->map;
+		if (!map)
+			continue;
+		if (map->offset <= offset
+		    && (offset + size) <= (map->offset + map->size)
+		    && !(map->flags & _DRM_RESTRICTED)
+		    && (map->type == _DRM_AGP)) {
+			seq->map_cache = map;
+			return map;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Require that all AGP texture levels reside in the same AGP map which should
+ * be mappable by the client. This is not a big restriction.
+ * FIXME: To actually enforce this security policy strictly, drm_rmmap
+ * would have to wait for dma quiescent before removing an AGP map.
+ * The via_drm_lookup_agp_map call in reality seems to take
+ * very little CPU time.
+ */
+
+static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
+{
+	switch (cur_seq->unfinished) {
+	case z_address:
+		DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
+		break;
+	case dest_address:
+		DRM_DEBUG("Destination start address is 0x%x\n",
+			  cur_seq->d_addr);
+		break;
+	case tex_address:
+		if (cur_seq->agp_texture) {
+			unsigned start =
+			    cur_seq->tex_level_lo[cur_seq->texture];
+			unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
+			unsigned long lo = ~0, hi = 0, tmp;
+			uint32_t *addr, *pitch, *height, tex;
+			unsigned i;
+			int npot;
+
+			if (end > 9)
+				end = 9;
+			if (start > 9)
+				start = 9;
+
+			addr =
+			    &(cur_seq->t_addr[tex = cur_seq->texture][start]);
+			pitch = &(cur_seq->pitch[tex][start]);
+			height = &(cur_seq->height[tex][start]);
+			npot = cur_seq->tex_npot[tex];
+			for (i = start; i <= end; ++i) {
+				tmp = *addr++;
+				if (tmp < lo)
+					lo = tmp;
+				if (i == 0 && npot)
+					tmp += (*height++ * *pitch++);
+				else
+					tmp += (*height++ << *pitch++);
+				if (tmp > hi)
+					hi = tmp;
+			}
+
+			if (!via_drm_lookup_agp_map
+			    (cur_seq, lo, hi - lo, cur_seq->dev)) {
+				DRM_ERROR
+				    ("AGP texture is not in allowed map\n");
+				return 2;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+	cur_seq->unfinished = no_sequence;
+	return 0;
+}
+
+static __inline__ int
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
+{
+	register uint32_t tmp, *tmp_addr;
+
+	if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
+		int ret;
+		if ((ret = finish_current_sequence(cur_seq)))
+			return ret;
+	}
+
+	switch (hz) {
+	case check_for_header2:
+		if (cmd == HALCYON_HEADER2)
+			return 1;
+		return 0;
+	case check_for_header1:
+		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+			return 1;
+		return 0;
+	case check_for_header2_err:
+		if (cmd == HALCYON_HEADER2)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
+		break;
+	case check_for_header1_err:
+		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
+		break;
+	case check_for_fire:
+		if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
+		break;
+	case check_for_dummy:
+		if (HC_DUMMY == cmd)
+			return 0;
+		DRM_ERROR("Illegal DMA HC_DUMMY command\n");
+		break;
+	case check_for_dd:
+		if (0xdddddddd == cmd)
+			return 0;
+		DRM_ERROR("Illegal DMA 0xdddddddd command\n");
+		break;
+	case check_z_buffer_addr0:
+		cur_seq->unfinished = z_address;
+		cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
+		    (cmd & 0x00FFFFFF);
+		return 0;
+	case check_z_buffer_addr1:
+		cur_seq->unfinished = z_address;
+		cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
+		    ((cmd & 0xFF) << 24);
+		return 0;
+	case check_z_buffer_addr_mode:
+		cur_seq->unfinished = z_address;
+		if ((cmd & 0x0000C000) == 0)
+			return 0;
+		DRM_ERROR("Attempt to place Z buffer in system memory\n");
+		return 2;
+	case check_destination_addr0:
+		cur_seq->unfinished = dest_address;
+		cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
+		    (cmd & 0x00FFFFFF);
+		return 0;
+	case check_destination_addr1:
+		cur_seq->unfinished = dest_address;
+		cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
+		    ((cmd & 0xFF) << 24);
+		return 0;
+	case check_destination_addr_mode:
+		cur_seq->unfinished = dest_address;
+		if ((cmd & 0x0000C000) == 0)
+			return 0;
+		DRM_ERROR
+		    ("Attempt to place 3D drawing buffer in system memory\n");
+		return 2;
+	case check_texture_addr0:
+		cur_seq->unfinished = tex_address;
+		tmp = (cmd >> 24);
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+		*tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
+		return 0;
+	case check_texture_addr1:
+		cur_seq->unfinished = tex_address;
+		tmp = ((cmd >> 24) - 0x20);
+		tmp += tmp << 1;
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+		tmp_addr++;
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
+		tmp_addr++;
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
+		return 0;
+	case check_texture_addr2:
+		cur_seq->unfinished = tex_address;
+		cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
+		cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
+		return 0;
+	case check_texture_addr3:
+		cur_seq->unfinished = tex_address;
+		tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
+		if (tmp == 0 &&
+		    (cmd & HC_HTXnEnPit_MASK)) {
+			cur_seq->pitch[cur_seq->texture][tmp] =
+				(cmd & HC_HTXnLnPit_MASK);
+			cur_seq->tex_npot[cur_seq->texture] = 1;
+		} else {
+			cur_seq->pitch[cur_seq->texture][tmp] =
+				(cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
+			cur_seq->tex_npot[cur_seq->texture] = 0;
+			if (cmd & 0x000FFFFF) {
+				DRM_ERROR
+					("Unimplemented texture level 0 pitch mode.\n");
+				return 2;
+			}
+		}
+		return 0;
+	case check_texture_addr4:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+		return 0;
+	case check_texture_addr5:
+	case check_texture_addr6:
+		cur_seq->unfinished = tex_address;
+		/*
+		 * Texture width. We don't care since we have the pitch.
+		 */
+		return 0;
+	case check_texture_addr7:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+		tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
+		tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
+		tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
+		tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
+		tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
+		tmp_addr[0] = 1 << (cmd & 0x0000000F);
+		return 0;
+	case check_texture_addr8:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+		tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
+		tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
+		tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
+		tmp_addr[6] = 1 << (cmd & 0x0000000F);
+		return 0;
+	case check_texture_addr_mode:
+		cur_seq->unfinished = tex_address;
+		if (2 == (tmp = cmd & 0x00000003)) {
+			DRM_ERROR
+			    ("Attempt to fetch texture from system memory.\n");
+			return 2;
+		}
+		cur_seq->agp_texture = (tmp == 3);
+		cur_seq->tex_palette_size[cur_seq->texture] =
+		    (cmd >> 16) & 0x000000007;
+		return 0;
+	case check_for_vertex_count:
+		cur_seq->vertex_count = cmd & 0x0000FFFF;
+		return 0;
+	case check_number_texunits:
+		cur_seq->multitex = (cmd >> 3) & 1;
+		return 0;
+	default:
+		DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
+		return 2;
+	}
+	return 2;
+}
+
+static __inline__ int
+via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
+		    drm_via_state_t *cur_seq)
+{
+	drm_via_private_t *dev_priv =
+	    (drm_via_private_t *) cur_seq->dev->dev_private;
+	uint32_t a_fire, bcmd, dw_count;
+	int ret = 0;
+	int have_fire;
+	const uint32_t *buf = *buffer;
+
+	while (buf < buf_end) {
+		have_fire = 0;
+		if ((buf_end - buf) < 2) {
+			DRM_ERROR
+			    ("Unexpected termination of primitive list.\n");
+			ret = 1;
+			break;
+		}
+		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
+			break;
+		bcmd = *buf++;
+		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
+			DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
+				  *buf);
+			ret = 1;
+			break;
+		}
+		a_fire =
+		    *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
+		    HC_HE3Fire_MASK;
+
+		/*
+		 * How many dwords per vertex ?
+		 */
+
+		if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
+			DRM_ERROR("Illegal B command vertex data for AGP.\n");
+			ret = 1;
+			break;
+		}
+
+		dw_count = 0;
+		if (bcmd & (1 << 7))
+			dw_count += (cur_seq->multitex) ? 2 : 1;
+		if (bcmd & (1 << 8))
+			dw_count += (cur_seq->multitex) ? 2 : 1;
+		if (bcmd & (1 << 9))
+			dw_count++;
+		if (bcmd & (1 << 10))
+			dw_count++;
+		if (bcmd & (1 << 11))
+			dw_count++;
+		if (bcmd & (1 << 12))
+			dw_count++;
+		if (bcmd & (1 << 13))
+			dw_count++;
+		if (bcmd & (1 << 14))
+			dw_count++;
+
+		while (buf < buf_end) {
+			if (*buf == a_fire) {
+				if (dev_priv->num_fire_offsets >=
+				    VIA_FIRE_BUF_SIZE) {
+					DRM_ERROR("Fire offset buffer full.\n");
+					ret = 1;
+					break;
+				}
+				dev_priv->fire_offsets[dev_priv->
+						       num_fire_offsets++] =
+				    buf;
+				have_fire = 1;
+				buf++;
+				if (buf < buf_end && *buf == a_fire)
+					buf++;
+				break;
+			}
+			if ((*buf == HALCYON_HEADER2) ||
+			    ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
+				DRM_ERROR("Missing Vertex Fire command, "
+					  "Stray Vertex Fire command  or verifier "
+					  "lost sync.\n");
+				ret = 1;
+				break;
+			}
+			if ((ret = eat_words(&buf, buf_end, dw_count)))
+				break;
+		}
+		if (buf >= buf_end && !have_fire) {
+			DRM_ERROR("Missing Vertex Fire command or verifier "
+				  "lost sync.\n");
+			ret = 1;
+			break;
+		}
+		if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
+			DRM_ERROR("AGP Primitive list end misaligned.\n");
+			ret = 1;
+			break;
+		}
+	}
+	*buffer = buf;
+	return ret;
+}
+
+static __inline__ verifier_state_t
+via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
+		  drm_via_state_t *hc_state)
+{
+	uint32_t cmd;
+	int hz_mode;
+	hazard_t hz;
+	const uint32_t *buf = *buffer;
+	const hazard_t *hz_table;
+
+	if ((buf_end - buf) < 2) {
+		DRM_ERROR
+		    ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
+		return state_error;
+	}
+	buf++;
+	cmd = (*buf++ & 0xFFFF0000) >> 16;
+
+	switch (cmd) {
+	case HC_ParaType_CmdVdata:
+		if (via_check_prim_list(&buf, buf_end, hc_state))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case HC_ParaType_NotTex:
+		hz_table = table1;
+		break;
+	case HC_ParaType_Tex:
+		hc_state->texture = 0;
+		hz_table = table2;
+		break;
+	case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
+		hc_state->texture = 1;
+		hz_table = table2;
+		break;
+	case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
+		hz_table = table3;
+		break;
+	case HC_ParaType_Auto:
+		if (eat_words(&buf, buf_end, 2))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
+		if (eat_words(&buf, buf_end, 32))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
+	case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
+		DRM_ERROR("Texture palettes are rejected because of "
+			  "lack of info how to determine their size.\n");
+		return state_error;
+	case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
+		DRM_ERROR("Fog factor palettes are rejected because of "
+			  "lack of info how to determine their size.\n");
+		return state_error;
+	default:
+
+		/*
+		 * There are some unimplemented HC_ParaTypes here, that
+		 * need to be implemented if the Mesa driver is extended.
+		 */
+
+		DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
+			  "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
+			  cmd, *(buf - 2));
+		*buffer = buf;
+		return state_error;
+	}
+
+	while (buf < buf_end) {
+		cmd = *buf++;
+		if ((hz = hz_table[cmd >> 24])) {
+			if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
+				if (hz_mode == 1) {
+					buf--;
+					break;
+				}
+				return state_error;
+			}
+		} else if (hc_state->unfinished &&
+			   finish_current_sequence(hc_state)) {
+			return state_error;
+		}
+	}
+	if (hc_state->unfinished && finish_current_sequence(hc_state))
+		return state_error;
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
+		  const uint32_t *buf_end, int *fire_count)
+{
+	uint32_t cmd;
+	const uint32_t *buf = *buffer;
+	const uint32_t *next_fire;
+	int burst = 0;
+
+	next_fire = dev_priv->fire_offsets[*fire_count];
+	buf++;
+	cmd = (*buf & 0xFFFF0000) >> 16;
+	VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+	switch (cmd) {
+	case HC_ParaType_CmdVdata:
+		while ((buf < buf_end) &&
+		       (*fire_count < dev_priv->num_fire_offsets) &&
+		       (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
+			while (buf <= next_fire) {
+				VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+					  (burst & 63), *buf++);
+				burst += 4;
+			}
+			if ((buf < buf_end)
+			    && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
+				buf++;
+
+			if (++(*fire_count) < dev_priv->num_fire_offsets)
+				next_fire = dev_priv->fire_offsets[*fire_count];
+		}
+		break;
+	default:
+		while (buf < buf_end) {
+
+			if (*buf == HC_HEADER2 ||
+			    (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
+			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
+			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				break;
+
+			VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+				  (burst & 63), *buf++);
+			burst += 4;
+		}
+	}
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ int verify_mmio_address(uint32_t address)
+{
+	if ((address > 0x3FF) && (address < 0xC00)) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access 3D- or command burst area.\n");
+		return 1;
+	} else if ((address > 0xCFF) && (address < 0x1300)) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access PCI DMA area.\n");
+		return 1;
+	} else if (address > 0x13FF) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access VGA registers.\n");
+		return 1;
+	}
+	return 0;
+}
+
+static __inline__ int
+verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
+		  uint32_t dwords)
+{
+	const uint32_t *buf = *buffer;
+
+	if (buf_end - buf < dwords) {
+		DRM_ERROR("Illegal termination of video command.\n");
+		return 1;
+	}
+	while (dwords--) {
+		if (*buf++) {
+			DRM_ERROR("Illegal video command tail.\n");
+			return 1;
+		}
+	}
+	*buffer = buf;
+	return 0;
+}
+
+static __inline__ verifier_state_t
+via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
+{
+	uint32_t cmd;
+	const uint32_t *buf = *buffer;
+	verifier_state_t ret = state_command;
+
+	while (buf < buf_end) {
+		cmd = *buf;
+		if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
+		    (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
+			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+				break;
+			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+				  "Attempt to access 3D- or command burst area.\n");
+			ret = state_error;
+			break;
+		} else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
+			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+				break;
+			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+				  "Attempt to access VGA registers.\n");
+			ret = state_error;
+			break;
+		} else {
+			buf += 2;
+		}
+	}
+	*buffer = buf;
+	return ret;
+}
+
+static __inline__ verifier_state_t
+via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
+		  const uint32_t *buf_end)
+{
+	register uint32_t cmd;
+	const uint32_t *buf = *buffer;
+
+	while (buf < buf_end) {
+		cmd = *buf;
+		if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+			break;
+		VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+		buf++;
+	}
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
+{
+	uint32_t data;
+	const uint32_t *buf = *buffer;
+
+	if (buf_end - buf < 4) {
+		DRM_ERROR("Illegal termination of video header5 command\n");
+		return state_error;
+	}
+
+	data = *buf++ & ~VIA_VIDEOMASK;
+	if (verify_mmio_address(data))
+		return state_error;
+
+	data = *buf++;
+	if (*buf++ != 0x00F50000) {
+		DRM_ERROR("Illegal header5 header data\n");
+		return state_error;
+	}
+	if (*buf++ != 0x00000000) {
+		DRM_ERROR("Illegal header5 header data\n");
+		return state_error;
+	}
+	if (eat_words(&buf, buf_end, data))
+		return state_error;
+	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+		return state_error;
+	*buffer = buf;
+	return state_command;
+
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
+		   const uint32_t *buf_end)
+{
+	uint32_t addr, count, i;
+	const uint32_t *buf = *buffer;
+
+	addr = *buf++ & ~VIA_VIDEOMASK;
+	i = count = *buf;
+	buf += 3;
+	while (i--)
+		VIA_WRITE(addr, *buf++);
+	if (count & 3)
+		buf += 4 - (count & 3);
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
+{
+	uint32_t data;
+	const uint32_t *buf = *buffer;
+	uint32_t i;
+
+	if (buf_end - buf < 4) {
+		DRM_ERROR("Illegal termination of video header6 command\n");
+		return state_error;
+	}
+	buf++;
+	data = *buf++;
+	if (*buf++ != 0x00F60000) {
+		DRM_ERROR("Illegal header6 header data\n");
+		return state_error;
+	}
+	if (*buf++ != 0x00000000) {
+		DRM_ERROR("Illegal header6 header data\n");
+		return state_error;
+	}
+	if ((buf_end - buf) < (data << 1)) {
+		DRM_ERROR("Illegal termination of video header6 command\n");
+		return state_error;
+	}
+	for (i = 0; i < data; ++i) {
+		if (verify_mmio_address(*buf++))
+			return state_error;
+		buf++;
+	}
+	data <<= 1;
+	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+		return state_error;
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
+		   const uint32_t *buf_end)
+{
+
+	uint32_t addr, count, i;
+	const uint32_t *buf = *buffer;
+
+	i = count = *++buf;
+	buf += 3;
+	while (i--) {
+		addr = *buf++;
+		VIA_WRITE(addr, *buf++);
+	}
+	count <<= 1;
+	if (count & 3)
+		buf += 4 - (count & 3);
+	*buffer = buf;
+	return state_command;
+}
+
+int
+via_verify_command_stream(const uint32_t * buf, unsigned int size,
+			  struct drm_device * dev, int agp)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_state_t *hc_state = &dev_priv->hc_state;
+	drm_via_state_t saved_state = *hc_state;
+	uint32_t cmd;
+	const uint32_t *buf_end = buf + (size >> 2);
+	verifier_state_t state = state_command;
+	int cme_video;
+	int supported_3d;
+
+	cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
+		     dev_priv->chipset == VIA_DX9_0);
+
+	supported_3d = dev_priv->chipset != VIA_DX9_0;
+
+	hc_state->dev = dev;
+	hc_state->unfinished = no_sequence;
+	hc_state->map_cache = NULL;
+	hc_state->agp = agp;
+	hc_state->buf_start = buf;
+	dev_priv->num_fire_offsets = 0;
+
+	while (buf < buf_end) {
+
+		switch (state) {
+		case state_header2:
+			state = via_check_header2(&buf, buf_end, hc_state);
+			break;
+		case state_header1:
+			state = via_check_header1(&buf, buf_end);
+			break;
+		case state_vheader5:
+			state = via_check_vheader5(&buf, buf_end);
+			break;
+		case state_vheader6:
+			state = via_check_vheader6(&buf, buf_end);
+			break;
+		case state_command:
+			if ((HALCYON_HEADER2 == (cmd = *buf)) &&
+			    supported_3d)
+				state = state_header2;
+			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+				state = state_header1;
+			else if (cme_video
+				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+				state = state_vheader5;
+			else if (cme_video
+				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				state = state_vheader6;
+			else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
+				DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
+				state = state_error;
+			} else {
+				DRM_ERROR
+				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+				     cmd);
+				state = state_error;
+			}
+			break;
+		case state_error:
+		default:
+			*hc_state = saved_state;
+			return -EINVAL;
+		}
+	}
+	if (state == state_error) {
+		*hc_state = saved_state;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int
+via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
+			 unsigned int size)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	uint32_t cmd;
+	const uint32_t *buf_end = buf + (size >> 2);
+	verifier_state_t state = state_command;
+	int fire_count = 0;
+
+	while (buf < buf_end) {
+
+		switch (state) {
+		case state_header2:
+			state =
+			    via_parse_header2(dev_priv, &buf, buf_end,
+					      &fire_count);
+			break;
+		case state_header1:
+			state = via_parse_header1(dev_priv, &buf, buf_end);
+			break;
+		case state_vheader5:
+			state = via_parse_vheader5(dev_priv, &buf, buf_end);
+			break;
+		case state_vheader6:
+			state = via_parse_vheader6(dev_priv, &buf, buf_end);
+			break;
+		case state_command:
+			if (HALCYON_HEADER2 == (cmd = *buf))
+				state = state_header2;
+			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+				state = state_header1;
+			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+				state = state_vheader5;
+			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				state = state_vheader6;
+			else {
+				DRM_ERROR
+				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+				     cmd);
+				state = state_error;
+			}
+			break;
+		case state_error:
+		default:
+			return -EINVAL;
+		}
+	}
+	if (state == state_error)
+		return -EINVAL;
+	return 0;
+}
+
+static void
+setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
+{
+	int i;
+
+	for (i = 0; i < 256; ++i)
+		table[i] = forbidden_command;
+
+	for (i = 0; i < size; ++i)
+		table[init_table[i].code] = init_table[i].hz;
+}
+
+void via_init_command_verifier(void)
+{
+	setup_hazard_table(init_table1, table1,
+			   sizeof(init_table1) / sizeof(hz_init_t));
+	setup_hazard_table(init_table2, table2,
+			   sizeof(init_table2) / sizeof(hz_init_t));
+	setup_hazard_table(init_table3, table3,
+			   sizeof(init_table3) / sizeof(hz_init_t));
+}
diff --git a/linux-imx/drivers/gpu/drm/via/via_verifier.h b/linux-imx/drivers/gpu/drm/via/via_verifier.h
new file mode 100644
index 0000000..26b6d36
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_verifier.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2004 The Unichrome Project. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellström 2004.
+ */
+
+#ifndef _VIA_VERIFIER_H_
+#define _VIA_VERIFIER_H_
+
+typedef enum {
+	no_sequence = 0,
+	z_address,
+	dest_address,
+	tex_address
+} drm_via_sequence_t;
+
+typedef struct {
+	unsigned texture;
+	uint32_t z_addr;
+	uint32_t d_addr;
+	uint32_t t_addr[2][10];
+	uint32_t pitch[2][10];
+	uint32_t height[2][10];
+	uint32_t tex_level_lo[2];
+	uint32_t tex_level_hi[2];
+	uint32_t tex_palette_size[2];
+	uint32_t tex_npot[2];
+	drm_via_sequence_t unfinished;
+	int agp_texture;
+	int multitex;
+	struct drm_device *dev;
+	drm_local_map_t *map_cache;
+	uint32_t vertex_count;
+	int agp;
+	const uint32_t *buf_start;
+} drm_via_state_t;
+
+extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
+				     struct drm_device *dev, int agp);
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
+				    unsigned int size);
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/via/via_video.c b/linux-imx/drivers/gpu/drm/via/via_video.c
new file mode 100644
index 0000000..6569efa
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/via/via_video.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellstrom 2005.
+ *
+ * Video and XvMC related functions.
+ */
+
+#include <drm/drmP.h>
+#include <drm/via_drm.h>
+#include "via_drv.h"
+
+void via_init_futex(drm_via_private_t *dev_priv)
+{
+	unsigned int i;
+
+	DRM_DEBUG("\n");
+
+	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+		DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+		XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
+	}
+}
+
+void via_cleanup_futex(drm_via_private_t *dev_priv)
+{
+}
+
+void via_release_futex(drm_via_private_t *dev_priv, int context)
+{
+	unsigned int i;
+	volatile int *lock;
+
+	if (!dev_priv->sarea_priv)
+		return;
+
+	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+		lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+		if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
+			if (_DRM_LOCK_IS_HELD(*lock)
+			    && (*lock & _DRM_LOCK_CONT)) {
+				DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+			}
+			*lock = 0;
+		}
+	}
+}
+
+int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_futex_t *fx = data;
+	volatile int *lock;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	if (fx->lock >= VIA_NR_XVMC_LOCKS)
+		return -EFAULT;
+
+	lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
+
+	switch (fx->func) {
+	case VIA_FUTEX_WAIT:
+		DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+			    (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+		return ret;
+	case VIA_FUTEX_WAKE:
+		DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+		return 0;
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vivante/Makefile b/linux-imx/drivers/gpu/drm/vivante/Makefile
new file mode 100644
index 0000000..d87c8e8
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vivante/Makefile
@@ -0,0 +1,29 @@
+##############################################################################
+#
+#    Copyright (C) 2005 - 2013 by Vivante Corp.
+#
+#    This program is free software; you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation; either version 2 of the license, or
+#    (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program; if not write to the Free Software
+#    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+##############################################################################
+
+
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+vivante-y := vivante_drv.o
+
+obj-$(CONFIG_DRM_VIVANTE)	+= vivante.o
diff --git a/linux-imx/drivers/gpu/drm/vivante/vivante_drv.c b/linux-imx/drivers/gpu/drm/vivante/vivante_drv.c
new file mode 100644
index 0000000..4caf46f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vivante/vivante_drv.c
@@ -0,0 +1,111 @@
+/****************************************************************************
+*
+*    Copyright (C) 2005 - 2013 by Vivante Corp.
+*
+*    This program is free software; you can redistribute it and/or modify
+*    it under the terms of the GNU General Public License as published by
+*    the Free Software Foundation; either version 2 of the license, or
+*    (at your option) any later version.
+*
+*    This program is distributed in the hope that it will be useful,
+*    but WITHOUT ANY WARRANTY; without even the implied warranty of
+*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*    GNU General Public License for more details.
+*
+*    You should have received a copy of the GNU General Public License
+*    along with this program; if not write to the Free Software
+*    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+
+/* vivante_drv.c -- vivante driver -*- linux-c -*-
+ *
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Daryll Strauss <daryll@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+#include "vivante_drv.h"
+
+#include "drm_pciids.h"
+
+static char platformdevicename[] = "Vivante GCCore";
+static struct platform_device *pplatformdev;
+
+static const struct file_operations viv_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_MTRR,
+	.fops = &viv_driver_fops,
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init vivante_init(void)
+{
+	int retcode;
+
+	pplatformdev = platform_device_register_simple(platformdevicename,
+			-1, NULL, 0);
+	if (pplatformdev == NULL)
+		printk(KERN_ERR"Platform device is null\n");
+
+	retcode = drm_platform_init(&driver, pplatformdev);
+
+	return retcode;
+}
+
+static void __exit vivante_exit(void)
+{
+	if (pplatformdev) {
+		drm_platform_exit(&driver, pplatformdev);
+		platform_device_unregister(pplatformdev);
+		pplatformdev = NULL;
+	}
+}
+
+module_init(vivante_init);
+module_exit(vivante_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-imx/drivers/gpu/drm/vivante/vivante_drv.h b/linux-imx/drivers/gpu/drm/vivante/vivante_drv.h
new file mode 100644
index 0000000..03f5884
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vivante/vivante_drv.h
@@ -0,0 +1,66 @@
+/****************************************************************************
+*
+*    Copyright (C) 2005 - 2013 by Vivante Corp.
+*
+*    This program is free software; you can redistribute it and/or modify
+*    it under the terms of the GNU General Public License as published by
+*    the Free Software Foundation; either version 2 of the license, or
+*    (at your option) any later version.
+*
+*    This program is distributed in the hope that it will be useful,
+*    but WITHOUT ANY WARRANTY; without even the implied warranty of
+*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*    GNU General Public License for more details.
+*
+*    You should have received a copy of the GNU General Public License
+*    along with this program; if not write to the Free Software
+*    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+*****************************************************************************/
+
+
+/* vivante_drv.h -- Vivante DRM template customization -*- linux-c -*-
+ * Created: Wed Feb 14 12:32:32 2012 by John Zhao
+ */
+/*
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __VIVANTE_DRV_H__
+#define __VIVANTE_DRV_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Vivante Inc."
+
+#define DRIVER_NAME		"vivante"
+#define DRIVER_DESC		"Vivante GCCore"
+#define DRIVER_DATE		"20120216"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/Kconfig b/linux-imx/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 0000000..b71bcd0
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,22 @@
+config DRM_VMWGFX
+	tristate "DRM driver for VMware Virtual GPU"
+	depends on DRM && PCI && FB
+	select FB_DEFERRED_IO
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select DRM_TTM
+	help
+	  Choose this option if you would like to run 3D acceleration
+	  in a VMware virtual machine.
+	  This is a KMS enabled DRM driver for the VMware SVGA2
+	  virtual hardware.
+	  The compiled module will be called "vmwgfx.ko".
+
+config DRM_VMWGFX_FBCON
+	depends on DRM_VMWGFX
+	bool "Enable framebuffer console under vmwgfx by default"
+	help
+	   Choose this option if you are shipping a new vmwgfx
+	   userspace driver that supports using the kernel driver.
+
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/Makefile b/linux-imx/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 0000000..2cc6cd9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,11 @@
+
+ccflags-y := -Iinclude/drm
+
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
+	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+	    vmwgfx_surface.o
+
+obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 0000000..d0e085e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1896 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ *       SVGA 3D hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#include "svga_reg.h"
+
+
+/*
+ * 3D Hardware Version
+ *
+ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ *   register.   Is set by the host and read by the guest.  This lets
+ *   us make new guest drivers which are backwards-compatible with old
+ *   SVGA hardware revisions.  It does not let us support old guest
+ *   drivers.  Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
+
+typedef enum {
+   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
+   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
+   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
+   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
+   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * Generic Types
+ */
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+#define SVGA3D_NUM_CLIPPLANES                   6
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
+
+/*
+ * Surface formats.
+ *
+ * If you modify this list, be sure to keep GLUtil.c in sync. It
+ * includes the internal format definition of each surface in
+ * GLUtil_ConvertSurfaceFormat, and it contains a table of
+ * human-readable names in GLUtil_GetFormatName.
+ */
+
+typedef enum SVGA3dSurfaceFormat {
+   SVGA3D_FORMAT_INVALID               = 0,
+
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_A8R8G8B8                     = 2,
+
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
+
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
+
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
+
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
+
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_BUMPL8V8U8                   = 23,
+
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
+
+   SVGA3D_A2R10G10B10                  = 26,
+
+   /* signed formats */
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
+
+   /* mixed formats */
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
+
+   SVGA3D_ALPHA8                       = 32,
+
+   /* Single- and dual-component floating point formats */
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
+
+   /*
+    * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
+    * the most efficient format to use when creating new surfaces
+    * expressly for index or vertex data.
+    */
+
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
+
+   SVGA3D_V16U16                       = 39,
+
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
+
+   /* Packed Video formats */
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_BC4_UNORM                    = 108,
+   SVGA3D_BC5_UNORM                    = 111,
+
+   /* Advanced D3D9 depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
+
+   SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+/*
+ * These match the D3DFORMAT_OP definitions used by Direct3D. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
+typedef enum {
+   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
+   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
+   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
+   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
+   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
+   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
+   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip).  This flag
+ * should not to be set on alpha formats.
+ */
+   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target (meaning that the
+ * pixel pipe will DE-linearize data on output to format)
+ */
+   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate wrap
+ * modes, nor mipmapping
+ */
+   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
+} SVGA3dFormatOp;
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*.
+ * Entries must be located at the same position.
+ */
+typedef union {
+   uint32 value;
+   struct {
+      uint32 texture : 1;
+      uint32 volumeTexture : 1;
+      uint32 cubeTexture : 1;
+      uint32 offscreenRenderTarget : 1;
+      uint32 sameFormatRenderTarget : 1;
+      uint32 unknown1 : 1;
+      uint32 zStencil : 1;
+      uint32 zStencilArbitraryDepth : 1;
+      uint32 sameFormatUpToAlpha : 1;
+      uint32 unknown2 : 1;
+      uint32 displayMode : 1;
+      uint32 acceleration3d : 1;
+      uint32 pixelSize : 1;
+      uint32 convertToARGB : 1;
+      uint32 offscreenPlain : 1;
+      uint32 sRGBRead : 1;
+      uint32 bumpMap : 1;
+      uint32 dmap : 1;
+      uint32 noFilter : 1;
+      uint32 memberOfGroupARGB : 1;
+      uint32 sRGBWrite : 1;
+      uint32 noAlphaBlend : 1;
+      uint32 autoGenMipMap : 1;
+      uint32 vertexTexture : 1;
+      uint32 noTexCoordWrapNorMip : 1;
+   };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_RS_INVALID                   = 0,
+   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
+   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
+   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
+   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
+   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
+   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
+   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
+   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
+   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
+   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
+   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
+   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
+   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
+   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
+   SVGA3D_RS_FOGSTART                  = 16,    /* float */
+   SVGA3D_RS_FOGEND                    = 17,    /* float */
+   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
+   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
+   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
+   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
+   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
+   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
+   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
+   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
+   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
+   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
+   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
+   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
+   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
+   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
+   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
+   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
+   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
+   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
+   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
+   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
+   SVGA3D_RS_ZBIAS                     = 45,    /* float */
+   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
+   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
+   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
+   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
+   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
+   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
+   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
+   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
+   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
+   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
+   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
+   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
+   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
+   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
+
+
+   /*
+    * Output Gamma Level
+    *
+    * Output gamma effects the gamma curve of colors that are output from the
+    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
+    * value is <= 0.0, gamma correction is ignored and linear color space is
+    * used.
+    */
+
+   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
+   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
+   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
+   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
+   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
+   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
+   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
+   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
+   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
+   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
+   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
+   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
+   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
+   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
+   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
+   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEAA                    = 98,    /* SVGA3dBool */
+   SVGA3D_RS_LINEWIDTH                 = 99,    /* float */
+   SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
+typedef enum {
+   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
+   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
+   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
+} SVGA3dVertexMaterial;
+
+typedef enum {
+   SVGA3D_FILLMODE_INVALID = 0,
+   SVGA3D_FILLMODE_POINT   = 1,
+   SVGA3D_FILLMODE_LINE    = 2,
+   SVGA3D_FILLMODE_FILL    = 3,
+   SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+union {
+   struct {
+      uint16   mode;       /* SVGA3dFillModeType */
+      uint16   face;       /* SVGA3dFace */
+   };
+   uint32 uintValue;
+} SVGA3dFillMode;
+
+typedef enum {
+   SVGA3D_SHADEMODE_INVALID = 0,
+   SVGA3D_SHADEMODE_FLAT    = 1,
+   SVGA3D_SHADEMODE_SMOOTH  = 2,
+   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
+   SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+union {
+   struct {
+      uint16 repeat;
+      uint16 pattern;
+   };
+   uint32 uintValue;
+} SVGA3dLinePattern;
+
+typedef enum {
+   SVGA3D_BLENDOP_INVALID            = 0,
+   SVGA3D_BLENDOP_ZERO               = 1,
+   SVGA3D_BLENDOP_ONE                = 2,
+   SVGA3D_BLENDOP_SRCCOLOR           = 3,
+   SVGA3D_BLENDOP_INVSRCCOLOR        = 4,
+   SVGA3D_BLENDOP_SRCALPHA           = 5,
+   SVGA3D_BLENDOP_INVSRCALPHA        = 6,
+   SVGA3D_BLENDOP_DESTALPHA          = 7,
+   SVGA3D_BLENDOP_INVDESTALPHA       = 8,
+   SVGA3D_BLENDOP_DESTCOLOR          = 9,
+   SVGA3D_BLENDOP_INVDESTCOLOR       = 10,
+   SVGA3D_BLENDOP_SRCALPHASAT        = 11,
+   SVGA3D_BLENDOP_BLENDFACTOR        = 12,
+   SVGA3D_BLENDOP_INVBLENDFACTOR     = 13,
+   SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+   SVGA3D_BLENDEQ_INVALID            = 0,
+   SVGA3D_BLENDEQ_ADD                = 1,
+   SVGA3D_BLENDEQ_SUBTRACT           = 2,
+   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
+   SVGA3D_BLENDEQ_MINIMUM            = 4,
+   SVGA3D_BLENDEQ_MAXIMUM            = 5,
+   SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+   SVGA3D_FRONTWINDING_INVALID = 0,
+   SVGA3D_FRONTWINDING_CW      = 1,
+   SVGA3D_FRONTWINDING_CCW     = 2,
+   SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+   SVGA3D_FACE_INVALID  = 0,
+   SVGA3D_FACE_NONE     = 1,
+   SVGA3D_FACE_FRONT    = 2,
+   SVGA3D_FACE_BACK     = 3,
+   SVGA3D_FACE_FRONT_BACK = 4,
+   SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+   SVGA3D_CMP_INVALID              = 0,
+   SVGA3D_CMP_NEVER                = 1,
+   SVGA3D_CMP_LESS                 = 2,
+   SVGA3D_CMP_EQUAL                = 3,
+   SVGA3D_CMP_LESSEQUAL            = 4,
+   SVGA3D_CMP_GREATER              = 5,
+   SVGA3D_CMP_NOTEQUAL             = 6,
+   SVGA3D_CMP_GREATEREQUAL         = 7,
+   SVGA3D_CMP_ALWAYS               = 8,
+   SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+   SVGA3D_FOGFUNC_INVALID          = 0,
+   SVGA3D_FOGFUNC_EXP              = 1,
+   SVGA3D_FOGFUNC_EXP2             = 2,
+   SVGA3D_FOGFUNC_LINEAR           = 3,
+   SVGA3D_FOGFUNC_PER_VERTEX       = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+   SVGA3D_FOGTYPE_INVALID          = 0,
+   SVGA3D_FOGTYPE_VERTEX           = 1,
+   SVGA3D_FOGTYPE_PIXEL            = 2,
+   SVGA3D_FOGTYPE_MAX              = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+   SVGA3D_FOGBASE_INVALID          = 0,
+   SVGA3D_FOGBASE_DEPTHBASED       = 1,
+   SVGA3D_FOGBASE_RANGEBASED       = 2,
+   SVGA3D_FOGBASE_MAX              = 3
+} SVGA3dFogBase;
+
+typedef enum {
+   SVGA3D_STENCILOP_INVALID        = 0,
+   SVGA3D_STENCILOP_KEEP           = 1,
+   SVGA3D_STENCILOP_ZERO           = 2,
+   SVGA3D_STENCILOP_REPLACE        = 3,
+   SVGA3D_STENCILOP_INCRSAT        = 4,
+   SVGA3D_STENCILOP_DECRSAT        = 5,
+   SVGA3D_STENCILOP_INVERT         = 6,
+   SVGA3D_STENCILOP_INCR           = 7,
+   SVGA3D_STENCILOP_DECR           = 8,
+   SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+   SVGA3D_CLIPPLANE_0              = (1 << 0),
+   SVGA3D_CLIPPLANE_1              = (1 << 1),
+   SVGA3D_CLIPPLANE_2              = (1 << 2),
+   SVGA3D_CLIPPLANE_3              = (1 << 3),
+   SVGA3D_CLIPPLANE_4              = (1 << 4),
+   SVGA3D_CLIPPLANE_5              = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+   SVGA3D_CLEAR_COLOR              = 0x1,
+   SVGA3D_CLEAR_DEPTH              = 0x2,
+   SVGA3D_CLEAR_STENCIL            = 0x4
+} SVGA3dClearFlag;
+
+typedef enum {
+   SVGA3D_RT_DEPTH                 = 0,
+   SVGA3D_RT_STENCIL               = 1,
+   SVGA3D_RT_COLOR0                = 2,
+   SVGA3D_RT_COLOR1                = 3,
+   SVGA3D_RT_COLOR2                = 4,
+   SVGA3D_RT_COLOR3                = 5,
+   SVGA3D_RT_COLOR4                = 6,
+   SVGA3D_RT_COLOR5                = 7,
+   SVGA3D_RT_COLOR6                = 8,
+   SVGA3D_RT_COLOR7                = 9,
+   SVGA3D_RT_MAX,
+   SVGA3D_RT_INVALID               = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+union {
+   struct {
+      uint32  red   : 1;
+      uint32  green : 1;
+      uint32  blue  : 1;
+      uint32  alpha : 1;
+   };
+   uint32 uintValue;
+} SVGA3dColorMask;
+
+typedef enum {
+   SVGA3D_VBLEND_DISABLE            = 0,
+   SVGA3D_VBLEND_1WEIGHT            = 1,
+   SVGA3D_VBLEND_2WEIGHT            = 2,
+   SVGA3D_VBLEND_3WEIGHT            = 3,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+   SVGA3D_WRAPCOORD_0   = 1 << 0,
+   SVGA3D_WRAPCOORD_1   = 1 << 1,
+   SVGA3D_WRAPCOORD_2   = 1 << 2,
+   SVGA3D_WRAPCOORD_3   = 1 << 3,
+   SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+   SVGA3D_TS_INVALID                    = 0,
+   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
+   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
+   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
+   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
+   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
+   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
+   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
+   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
+   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
+   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
+   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
+   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
+   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
+   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
+   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
+   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
+   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
+
+
+   /*
+    * Sampler Gamma Level
+    *
+    * Sampler gamma effects the color of samples taken from the sampler.  A
+    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
+    * gamma value is ignored and a linear space is used.
+    */
+
+   SVGA3D_TS_GAMMA                      = 25,   /* float */
+   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
+   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
+   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
+   SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+   SVGA3D_TC_INVALID                   = 0,
+   SVGA3D_TC_DISABLE                   = 1,
+   SVGA3D_TC_SELECTARG1                = 2,
+   SVGA3D_TC_SELECTARG2                = 3,
+   SVGA3D_TC_MODULATE                  = 4,
+   SVGA3D_TC_ADD                       = 5,
+   SVGA3D_TC_ADDSIGNED                 = 6,
+   SVGA3D_TC_SUBTRACT                  = 7,
+   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
+   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
+   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
+   SVGA3D_TC_BLENDFACTORALPHA          = 11,
+   SVGA3D_TC_MODULATE2X                = 12,
+   SVGA3D_TC_MODULATE4X                = 13,
+   SVGA3D_TC_DSDT                      = 14,
+   SVGA3D_TC_DOTPRODUCT3               = 15,
+   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
+   SVGA3D_TC_ADDSIGNED2X               = 17,
+   SVGA3D_TC_ADDSMOOTH                 = 18,
+   SVGA3D_TC_PREMODULATE               = 19,
+   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
+   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
+   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
+   SVGA3D_TC_MULTIPLYADD               = 25,
+   SVGA3D_TC_LERP                      = 26,
+   SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+   SVGA3D_TEX_ADDRESS_INVALID    = 0,
+   SVGA3D_TEX_ADDRESS_WRAP       = 1,
+   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
+   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
+   SVGA3D_TEX_ADDRESS_BORDER     = 4,
+   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+   SVGA3D_TEX_ADDRESS_EDGE       = 6,
+   SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+   SVGA3D_TEX_FILTER_NONE           = 0,
+   SVGA3D_TEX_FILTER_NEAREST        = 1,
+   SVGA3D_TEX_FILTER_LINEAR         = 2,
+   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+   SVGA3D_TEX_TRANSFORM_OFF    = 0,
+   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
+   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
+   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
+   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
+   SVGA3D_TEX_PROJECTED        = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+   SVGA3D_TEXCOORD_GEN_OFF              = 0,
+   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
+   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
+   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
+   SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+   SVGA3D_TA_INVALID    = 0,
+   SVGA3D_TA_CONSTANT   = 1,
+   SVGA3D_TA_PREVIOUS   = 2,
+   SVGA3D_TA_DIFFUSE    = 3,
+   SVGA3D_TA_TEXTURE    = 4,
+   SVGA3D_TA_SPECULAR   = 5,
+   SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+   SVGA3D_TM_NONE       = 0,
+   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
+   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+#define SVGA3D_INVALID_ID         ((uint32)-1)
+#define SVGA3D_MAX_CLIP_PLANES    6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+   SVGA3D_DECLUSAGE_POSITION     = 0,
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,       /*  1 */
+   SVGA3D_DECLUSAGE_BLENDINDICES,      /*  2 */
+   SVGA3D_DECLUSAGE_NORMAL,            /*  3 */
+   SVGA3D_DECLUSAGE_PSIZE,             /*  4 */
+   SVGA3D_DECLUSAGE_TEXCOORD,          /*  5 */
+   SVGA3D_DECLUSAGE_TANGENT,           /*  6 */
+   SVGA3D_DECLUSAGE_BINORMAL,          /*  7 */
+   SVGA3D_DECLUSAGE_TESSFACTOR,        /*  8 */
+   SVGA3D_DECLUSAGE_POSITIONT,         /*  9 */
+   SVGA3D_DECLUSAGE_COLOR,             /* 10 */
+   SVGA3D_DECLUSAGE_FOG,               /* 11 */
+   SVGA3D_DECLUSAGE_DEPTH,             /* 12 */
+   SVGA3D_DECLUSAGE_SAMPLE,            /* 13 */
+   SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+   SVGA3D_DECLMETHOD_DEFAULT     = 0,
+   SVGA3D_DECLMETHOD_PARTIALU,
+   SVGA3D_DECLMETHOD_PARTIALV,
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
+   SVGA3D_DECLMETHOD_UV,
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
+} SVGA3dDeclMethod;
+
+typedef enum {
+   SVGA3D_DECLTYPE_FLOAT1        =  0,
+   SVGA3D_DECLTYPE_FLOAT2        =  1,
+   SVGA3D_DECLTYPE_FLOAT3        =  2,
+   SVGA3D_DECLTYPE_FLOAT4        =  3,
+   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
+   SVGA3D_DECLTYPE_UBYTE4        =  5,
+   SVGA3D_DECLTYPE_SHORT2        =  6,
+   SVGA3D_DECLTYPE_SHORT4        =  7,
+   SVGA3D_DECLTYPE_UBYTE4N       =  8,
+   SVGA3D_DECLTYPE_SHORT2N       =  9,
+   SVGA3D_DECLTYPE_SHORT4N       = 10,
+   SVGA3D_DECLTYPE_USHORT2N      = 11,
+   SVGA3D_DECLTYPE_USHORT4N      = 12,
+   SVGA3D_DECLTYPE_UDEC3         = 13,
+   SVGA3D_DECLTYPE_DEC3N         = 14,
+   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
+   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
+   SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+   struct {
+      /*
+       * For index data, this number represents the number of instances to draw.
+       * For instance data, this number represents the number of
+       * instances/vertex in this stream
+       */
+      uint32 count : 30;
+
+      /*
+       * This is 1 if this is supposed to be the data that is repeated for
+       * every instance.
+       */
+      uint32 indexedData : 1;
+
+      /*
+       * This is 1 if this is supposed to be the per-instance data.
+       */
+      uint32 instanceData : 1;
+   };
+
+   uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+   SVGA3D_PRIMITIVE_INVALID                     = 0,
+   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
+   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
+   SVGA3D_PRIMITIVE_LINELIST                    = 3,
+   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
+   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
+   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
+   SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+   SVGA3D_COORDINATE_INVALID                   = 0,
+   SVGA3D_COORDINATE_LEFTHANDED                = 1,
+   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
+   SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+   SVGA3D_TRANSFORM_INVALID                     = 0,
+   SVGA3D_TRANSFORM_WORLD                       = 1,
+   SVGA3D_TRANSFORM_VIEW                        = 2,
+   SVGA3D_TRANSFORM_PROJECTION                  = 3,
+   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
+   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
+   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
+   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
+   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
+   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
+   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
+   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
+   SVGA3D_TRANSFORM_WORLD1                      = 12,
+   SVGA3D_TRANSFORM_WORLD2                      = 13,
+   SVGA3D_TRANSFORM_WORLD3                      = 14,
+   SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+   SVGA3D_LIGHTTYPE_INVALID                     = 0,
+   SVGA3D_LIGHTTYPE_POINT                       = 1,
+   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
+   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
+   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
+   SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+   SVGA3D_CUBEFACE_POSX                         = 0,
+   SVGA3D_CUBEFACE_NEGX                         = 1,
+   SVGA3D_CUBEFACE_POSY                         = 2,
+   SVGA3D_CUBEFACE_NEGY                         = 3,
+   SVGA3D_CUBEFACE_POSZ                         = 4,
+   SVGA3D_CUBEFACE_NEGZ                         = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+   SVGA3D_SHADERTYPE_VS                         = 1,
+   SVGA3D_SHADERTYPE_PS                         = 2,
+   SVGA3D_SHADERTYPE_MAX
+} SVGA3dShaderType;
+
+typedef enum {
+   SVGA3D_CONST_TYPE_FLOAT                      = 0,
+   SVGA3D_CONST_TYPE_INT                        = 1,
+   SVGA3D_CONST_TYPE_BOOL                       = 2,
+} SVGA3dShaderConstType;
+
+#define SVGA3D_MAX_SURFACE_FACES                6
+
+typedef enum {
+   SVGA3D_STRETCH_BLT_POINT                     = 0,
+   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
+   SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
+   SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef enum {
+   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Waiting on the host (set by guest) */
+   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully (set by host) */
+   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully (set by host) */
+   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (For guest use only) */
+} SVGA3dQueryState;
+
+typedef enum {
+   SVGA3D_WRITE_HOST_VRAM        = 1,
+   SVGA3D_READ_HOST_VRAM         = 2,
+} SVGA3dTransferType;
+
+/*
+ * The maximum number of vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS   32
+
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+#define SVGA_3D_CMD_LEGACY_BASE            1000
+#define SVGA_3D_CMD_BASE                   1040
+
+#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0     /* Deprecated */
+#define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
+#define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
+#define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
+#define SVGA_3D_CMD_SURFACE_DMA            SVGA_3D_CMD_BASE + 4
+#define SVGA_3D_CMD_CONTEXT_DEFINE         SVGA_3D_CMD_BASE + 5
+#define SVGA_3D_CMD_CONTEXT_DESTROY        SVGA_3D_CMD_BASE + 6
+#define SVGA_3D_CMD_SETTRANSFORM           SVGA_3D_CMD_BASE + 7
+#define SVGA_3D_CMD_SETZRANGE              SVGA_3D_CMD_BASE + 8
+#define SVGA_3D_CMD_SETRENDERSTATE         SVGA_3D_CMD_BASE + 9
+#define SVGA_3D_CMD_SETRENDERTARGET        SVGA_3D_CMD_BASE + 10
+#define SVGA_3D_CMD_SETTEXTURESTATE        SVGA_3D_CMD_BASE + 11
+#define SVGA_3D_CMD_SETMATERIAL            SVGA_3D_CMD_BASE + 12
+#define SVGA_3D_CMD_SETLIGHTDATA           SVGA_3D_CMD_BASE + 13
+#define SVGA_3D_CMD_SETLIGHTENABLED        SVGA_3D_CMD_BASE + 14
+#define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
+#define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
+#define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
+#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    /* Deprecated */
+#define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
+#define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
+#define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
+#define SVGA_3D_CMD_SET_SHADER_CONST       SVGA_3D_CMD_BASE + 22
+#define SVGA_3D_CMD_DRAW_PRIMITIVES        SVGA_3D_CMD_BASE + 23
+#define SVGA_3D_CMD_SETSCISSORRECT         SVGA_3D_CMD_BASE + 24
+#define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
+#define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
+#define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
+#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    /* Deprecated */
+#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
+#define SVGA_3D_CMD_SURFACE_DEFINE_V2      SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
+#define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
+#define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
+#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 42
+
+#define SVGA_3D_CMD_FUTURE_MAX             2000
+
+/*
+ * Common substructures used in multiple FIFO commands:
+ */
+
+typedef struct {
+   union {
+      struct {
+         uint16  function;       /* SVGA3dFogFunction */
+         uint8   type;           /* SVGA3dFogType */
+         uint8   base;           /* SVGA3dFogBase */
+      };
+      uint32     uintValue;
+   };
+} SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+struct SVGA3dSurfaceImageId {
+   uint32               sid;
+   uint32               face;
+   uint32               mipmap;
+} SVGA3dSurfaceImageId;
+
+typedef
+struct SVGA3dGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+} SVGA3dGuestImage;
+
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+struct {
+   uint32               id;
+   uint32               size;
+} SVGA3dCmdHeader;
+
+/*
+ * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
+ * optional mipmaps and cube faces.
+ */
+
+typedef
+struct {
+   uint32               width;
+   uint32               height;
+   uint32               depth;
+} SVGA3dSize;
+
+typedef enum {
+   SVGA3D_SURFACE_CUBEMAP              = (1 << 0),
+   SVGA3D_SURFACE_HINT_STATIC          = (1 << 1),
+   SVGA3D_SURFACE_HINT_DYNAMIC         = (1 << 2),
+   SVGA3D_SURFACE_HINT_INDEXBUFFER     = (1 << 3),
+   SVGA3D_SURFACE_HINT_VERTEXBUFFER    = (1 << 4),
+   SVGA3D_SURFACE_HINT_TEXTURE         = (1 << 5),
+   SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
+   SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
+   SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
+   SVGA3D_SURFACE_MASKABLE_ANTIALIAS   = (1 << 9),
+   SVGA3D_SURFACE_AUTOGENMIPMAPS       = (1 << 10),
+} SVGA3dSurfaceFlags;
+
+typedef
+struct {
+   uint32               numMipLevels;
+} SVGA3dSurfaceFace;
+
+typedef
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+} SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+} SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
+typedef
+struct {
+   uint32               sid;
+} SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+struct {
+   uint32               cid;
+} SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+struct {
+   uint32               cid;
+} SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dClearFlag      clearFlag;
+   uint32               color;
+   float                depth;
+   uint32               stencil;
+   /* Followed by variable number of SVGA3dRect structures */
+} SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
+
+typedef
+struct SVGA3dCopyRect {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+   uint32               srcx;
+   uint32               srcy;
+} SVGA3dCopyRect;
+
+typedef
+struct SVGA3dCopyBox {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+   uint32               srcx;
+   uint32               srcy;
+   uint32               srcz;
+} SVGA3dCopyBox;
+
+typedef
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               w;
+   uint32               h;
+} SVGA3dRect;
+
+typedef
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+   uint32               w;
+   uint32               h;
+   uint32               d;
+} SVGA3dBox;
+
+typedef
+struct {
+   uint32               x;
+   uint32               y;
+   uint32               z;
+} SVGA3dPoint;
+
+typedef
+struct {
+   SVGA3dLightType      type;
+   SVGA3dBool           inWorldSpace;
+   float                diffuse[4];
+   float                specular[4];
+   float                ambient[4];
+   float                position[4];
+   float                direction[4];
+   float                range;
+   float                falloff;
+   float                attenuation0;
+   float                attenuation1;
+   float                attenuation2;
+   float                theta;
+   float                phi;
+} SVGA3dLightData;
+
+typedef
+struct {
+   uint32               sid;
+   /* Followed by variable number of SVGA3dCopyRect structures */
+} SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
+
+typedef
+struct {
+   SVGA3dRenderStateName   state;
+   union {
+      uint32               uintValue;
+      float                floatValue;
+   };
+} SVGA3dRenderState;
+
+typedef
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dRenderState structures */
+} SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+struct {
+   uint32                 cid;
+   SVGA3dRenderTargetType type;
+   SVGA3dSurfaceImageId   target;
+} SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   /* Followed by variable number of SVGA3dCopyBox structures */
+} SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+struct {
+   SVGA3dSurfaceImageId  src;
+   SVGA3dSurfaceImageId  dest;
+   SVGA3dBox             boxSrc;
+   SVGA3dBox             boxDest;
+   SVGA3dStretchBltMode  mode;
+} SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+struct {
+   /*
+    * If the discard flag is present in a surface DMA operation, the host may
+    * discard the contents of the current mipmap level and face of the target
+    * surface before applying the surface DMA contents.
+    */
+   uint32 discard : 1;
+
+   /*
+    * If the unsynchronized flag is present, the host may perform this upload
+    * without syncing to pending reads on this surface.
+    */
+   uint32 unsynchronized : 1;
+
+   /*
+    * Guests *MUST* set the reserved bits to 0 before submitting the command
+    * suffix as future flags may occupy these bits.
+    */
+   uint32 reserved : 30;
+} SVGA3dSurfaceDMAFlags;
+
+typedef
+struct {
+   SVGA3dGuestImage      guest;
+   SVGA3dSurfaceImageId  host;
+   SVGA3dTransferType    transfer;
+   /*
+    * Followed by variable number of SVGA3dCopyBox structures. For consistency
+    * in all clipping logic and coordinate translation, we define the
+    * "source" in each copyBox as the guest image and the
+    * "destination" as the host image, regardless of transfer
+    * direction.
+    *
+    * For efficiency, the SVGA3D device is free to copy more data than
+    * specified. For example, it may round copy boxes outwards such
+    * that they lie on particular alignment boundaries.
+    */
+} SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ *    This is a command suffix that will appear after a SurfaceDMA command in
+ *    the FIFO.  It contains some extra information that hosts may use to
+ *    optimize performance or protect the guest.  This suffix exists to preserve
+ *    backwards compatibility while also allowing for new functionality to be
+ *    implemented.
+ */
+
+typedef
+struct {
+   uint32 suffixSize;
+
+   /*
+    * The maximum offset is used to determine the maximum offset from the
+    * guestPtr base address that will be accessed or written to during this
+    * surfaceDMA.  If the suffix is supported, the host will respect this
+    * boundary while performing surface DMAs.
+    *
+    * Defaults to MAX_UINT32
+    */
+   uint32 maximumOffset;
+
+   /*
+    * A set of flags that describes optimizations that the host may perform
+    * while performing this surface DMA operation.  The guest should never rely
+    * on behaviour that is different when these flags are set for correctness.
+    *
+    * Defaults to 0
+    */
+   SVGA3dSurfaceDMAFlags flags;
+} SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ *   This command is the SVGA3D device's generic drawing entry point.
+ *   It can draw multiple ranges of primitives, optionally using an
+ *   index buffer, using an arbitrary collection of vertex buffers.
+ *
+ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ *   during this draw call. The declarations specify which surface
+ *   the vertex data lives in, what that vertex data is used for,
+ *   and how to interpret it.
+ *
+ *   Each SVGA3dPrimitiveRange defines a collection of primitives
+ *   to render using the same vertex arrays. An index buffer is
+ *   optional.
+ */
+
+typedef
+struct {
+   /*
+    * A range hint is an optional specification for the range of indices
+    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+    * that the entire array will be used.
+    *
+    * These are only hints. The SVGA3D device may use them for
+    * performance optimization if possible, but it's also allowed to
+    * ignore these values.
+    */
+   uint32               first;
+   uint32               last;
+} SVGA3dArrayRangeHint;
+
+typedef
+struct {
+   /*
+    * Define the origin and shape of a vertex or index array. Both
+    * 'offset' and 'stride' are in bytes. The provided surface will be
+    * reinterpreted as a flat array of bytes in the same format used
+    * by surface DMA operations. To avoid unnecessary conversions, the
+    * surface should be created with the SVGA3D_BUFFER format.
+    *
+    * Index 0 in the array starts 'offset' bytes into the surface.
+    * Index 1 begins at byte 'offset + stride', etc. Array indices may
+    * not be negative.
+    */
+   uint32               surfaceId;
+   uint32               offset;
+   uint32               stride;
+} SVGA3dArray;
+
+typedef
+struct {
+   /*
+    * Describe a vertex array's data type, and define how it is to be
+    * used by the fixed function pipeline or the vertex shader. It
+    * isn't useful to have two VertexDecls with the same
+    * VertexArrayIdentity in one draw call.
+    */
+   SVGA3dDeclType       type;
+   SVGA3dDeclMethod     method;
+   SVGA3dDeclUsage      usage;
+   uint32               usageIndex;
+} SVGA3dVertexArrayIdentity;
+
+typedef
+struct {
+   SVGA3dVertexArrayIdentity  identity;
+   SVGA3dArray                array;
+   SVGA3dArrayRangeHint       rangeHint;
+} SVGA3dVertexDecl;
+
+typedef
+struct {
+   /*
+    * Define a group of primitives to render, from sequential indices.
+    *
+    * The value of 'primitiveType' and 'primitiveCount' imply the
+    * total number of vertices that will be rendered.
+    */
+   SVGA3dPrimitiveType  primType;
+   uint32               primitiveCount;
+
+   /*
+    * Optional index buffer. If indexArray.surfaceId is
+    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+    * without an index buffer is identical to rendering with an index
+    * buffer containing the sequence [0, 1, 2, 3, ...].
+    *
+    * If an index buffer is in use, indexWidth specifies the width in
+    * bytes of each index value. It must be less than or equal to
+    * indexArray.stride.
+    *
+    * (Currently, the SVGA3D device requires index buffers to be tightly
+    * packed. In other words, indexWidth == indexArray.stride)
+    */
+   SVGA3dArray          indexArray;
+   uint32               indexWidth;
+
+   /*
+    * Optional index bias. This number is added to all indices from
+    * indexArray before they are used as vertex array indices. This
+    * can be used in multiple ways:
+    *
+    *  - When not using an indexArray, this bias can be used to
+    *    specify where in the vertex arrays to begin rendering.
+    *
+    *  - A positive number here is equivalent to increasing the
+    *    offset in each vertex array.
+    *
+    *  - A negative number can be used to render using a small
+    *    vertex array and an index buffer that contains large
+    *    values. This may be used by some applications that
+    *    crop a vertex buffer without modifying their index
+    *    buffer.
+    *
+    * Note that rendering with a negative bias value may be slower and
+    * use more memory than rendering with a positive or zero bias.
+    */
+   int32                indexBias;
+} SVGA3dPrimitiveRange;
+
+typedef
+struct {
+   uint32               cid;
+   uint32               numVertexDecls;
+   uint32               numRanges;
+
+   /*
+    * There are two variable size arrays after the
+    * SVGA3dCmdDrawPrimitives structure. In order,
+    * they are:
+    *
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
+    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+    *    the frequency divisor for the corresponding vertex decl).
+    */
+} SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+struct {
+   uint32                   stage;
+   SVGA3dTextureStateName   name;
+   union {
+      uint32                value;
+      float                 floatValue;
+   };
+} SVGA3dTextureState;
+
+typedef
+struct {
+   uint32               cid;
+   /* Followed by variable number of SVGA3dTextureState structures */
+} SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+struct {
+   uint32                   cid;
+   SVGA3dTransformType      type;
+   float                    matrix[16];
+} SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+struct {
+   float                min;
+   float                max;
+} SVGA3dZRange;
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dZRange         zRange;
+} SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+struct {
+   float                diffuse[4];
+   float                ambient[4];
+   float                specular[4];
+   float                emissive[4];
+   float                shininess;
+} SVGA3dMaterial;
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dFace           face;
+   SVGA3dMaterial       material;
+} SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+struct {
+   uint32               cid;
+   uint32               index;
+   SVGA3dLightData      data;
+} SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+struct {
+   uint32               cid;
+   uint32               index;
+   uint32               enabled;
+} SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+} SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dRect           rect;
+} SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+struct {
+   uint32               cid;
+   uint32               index;
+   float                plane[4];
+} SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+   /* Followed by variable number of DWORDs for shader bycode */
+} SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+struct {
+   uint32               cid;
+   uint32               shid;
+   SVGA3dShaderType     type;
+} SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+struct {
+   uint32                  cid;
+   uint32                  reg;     /* register number */
+   SVGA3dShaderType        type;
+   SVGA3dShaderConstType   ctype;
+   uint32                  values[4];
+} SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dShaderType     type;
+   uint32               shid;
+} SVGA3dCmdSetShader;             /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+} SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+struct {
+   uint32               cid;
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;  /* Points to an SVGA3dQueryResult structure */
+} SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
+
+typedef
+struct {
+   uint32               cid;          /* Same parameters passed to END_QUERY */
+   SVGA3dQueryType      type;
+   SVGAGuestPtr         guestResult;
+} SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+struct {
+   uint32               totalSize;    /* Set by guest before query is ended. */
+   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
+   union {                            /* Set by host on exit from PENDING state */
+      uint32            result32;
+   };
+} SVGA3dQueryResult;
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ *    This is a blit from an SVGA3D surface to a Screen Object. Just
+ *    like GMR-to-screen blits, this blit may be directed at a
+ *    specific screen or to the virtual coordinate space.
+ *
+ *    The blit copies from a rectangular region of an SVGA3D surface
+ *    image to a rectangular region of a screen or screens.
+ *
+ *    This command takes an optional variable-length list of clipping
+ *    rectangles after the body of the command. If no rectangles are
+ *    specified, there is no clipping region. The entire destRect is
+ *    drawn to. If one or more rectangles are included, they describe
+ *    a clipping region. The clip rectangle coordinates are measured
+ *    relative to the top-left corner of destRect.
+ *
+ *    This clipping region serves multiple purposes:
+ *
+ *      - It can be used to perform an irregularly shaped blit more
+ *        efficiently than by issuing many separate blit commands.
+ *
+ *      - It is equivalent to allowing blits with non-integer
+ *        source coordinates. You could blit just one half-pixel
+ *        of a source, for example, by specifying a larger
+ *        destination rectangle than you need, then removing
+ *        part of it using a clip rectangle.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *
+ * Limitations:
+ *
+ *    - Currently, no backend supports blits from a mipmap or face
+ *      other than the first one.
+ */
+
+typedef
+struct {
+   SVGA3dSurfaceImageId srcImage;
+   SVGASignedRect       srcRect;
+   uint32               destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
+   SVGASignedRect       destRect;     /* Supports scaling if src/rest different size */
+   /* Clipping: zero or more SVGASignedRects follow */
+} SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+typedef
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+} SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
+
+/*
+ * Capability query index.
+ *
+ * Notes:
+ *
+ *   1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ *      fixed-function texture units available. Each of these units
+ *      work in both FFP and Shader modes, and they support texture
+ *      transforms and texture coordinates. The host may have additional
+ *      texture image units that are only usable with shaders.
+ *
+ *   2. The BUFFER_FORMAT capabilities are deprecated, and they always
+ *      return TRUE. Even on physical hardware that does not support
+ *      these formats natively, the SVGA3D device will provide an emulation
+ *      which should be invisible to the guest OS.
+ *
+ *      In general, the SVGA3D device should support any operation on
+ *      any surface format, it just may perform some of these
+ *      operations in software depending on the capabilities of the
+ *      available physical hardware.
+ *
+ *      XXX: In the future, we will add capabilities that describe in
+ *      detail what formats are supported in hardware for what kinds
+ *      of operations.
+ */
+
+typedef enum {
+   SVGA3D_DEVCAP_3D                                = 0,
+   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
+   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,  /* See note (1) */
+   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
+   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
+   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
+   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
+   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
+   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
+   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
+   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
+   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12, /* See note (2) */
+   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13, /* See note (2) */
+   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14, /* See note (2) */
+   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
+   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
+   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
+   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
+   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
+   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
+   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
+   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
+   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
+   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
+   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
+   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
+   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
+   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
+   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
+   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
+   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
+   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
+   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
+   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
+   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
+   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
+   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
+   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
+   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
+   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
+   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
+   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
+   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
+   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
+   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
+
+   /*
+    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+    * render targets.  This does no include the depth or stencil targets.
+    */
+   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
+
+   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
+   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
+   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
+   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
+   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM              = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
+
+   /*
+    * Don't add new caps into the previous section; the values in this
+    * enumeration must not change. You can put new values right before
+    * SVGA3D_DEVCAP_MAX.
+    */
+   SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+typedef union {
+   Bool   b;
+   uint32 u;
+   int32  i;
+   float  f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 0000000..8369c3b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+	SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+	SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+						    data */
+	SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+						    data */
+	SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+						    U and V */
+	SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+						    data */
+	SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+						    data */
+	SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+						    channel */
+	SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+						    data */
+	SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+						    data */
+	SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+						    data */
+	SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+						    data */
+	SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+	SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+						    channel */
+	SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+						    data */
+	SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+						    data */
+	SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+						    data depending on the
+						    compression method used */
+	SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+						    floating point
+						    representation in
+						    all channels */
+	SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+						    data. */
+	SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+	SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+	SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+	SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+	SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+						    e.g., NV12. */
+	SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+						    Y, U, V, e.g., YV12. */
+
+	SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V,
+	SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_LUMINANCE,
+	SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+	SVGA3DBLOCKDESC_W,
+	SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+	SVGA3DBLOCKDESC_V |
+	SVGA3DBLOCKDESC_W |
+	SVGA3DBLOCKDESC_Q,
+	SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+	SVGA3DBLOCKDESC_IEEE_FP,
+	SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+	SVGA3DBLOCKDESC_GREEN,
+	SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+	SVGA3DBLOCKDESC_BLUE,
+	SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+	SVGA3DBLOCKDESC_ALPHA,
+	SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+	SVGA3DBLOCKDESC_STENCIL,
+	SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+	SVGA3DBLOCKDESC_Y,
+	SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+	SVGA3DBLOCKDESC_Y |
+	SVGA3DBLOCKDESC_U_VIDEO |
+	SVGA3DBLOCKDESC_V_VIDEO,
+	SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+	SVGA3DBLOCKDESC_EXP,
+	SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+	SVGA3DBLOCKDESC_SRGB,
+	SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_2PLANAR_YUV,
+	SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+	SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type)		\
+	struct {				\
+		union {				\
+			type blue;              \
+			type u;                 \
+			type uv_video;          \
+			type u_video;           \
+		};				\
+		union {				\
+			type green;             \
+			type v;                 \
+			type stencil;           \
+			type v_video;           \
+		};				\
+		union {				\
+			type red;               \
+			type w;                 \
+			type luminance;         \
+			type y;                 \
+			type depth;             \
+			type data;              \
+		};				\
+		union {				\
+			type alpha;             \
+			type q;                 \
+			type exp;               \
+		};				\
+	}
+
+struct svga3d_surface_desc {
+	enum svga3d_block_desc block_desc;
+	surf_size_struct block_size;
+	u32 bytes_per_block;
+	u32 pitch_bytes_per_block;
+
+	struct {
+		u32 total;
+		SVGA3D_CHANNEL_DEF(uint8);
+	} bit_depth;
+
+	struct {
+		SVGA3D_CHANNEL_DEF(uint8);
+	} bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+	{SVGA3DBLOCKDESC_NONE,
+	 {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
+	 {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
+	 {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
+	 {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
+	 {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
+	 {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
+
+	{SVGA3DBLOCKDESC_LUMINANCE,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
+
+	{SVGA3DBLOCKDESC_LA,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
+	 {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+	{SVGA3DBLOCKDESC_LUMINANCE,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
+
+	{SVGA3DBLOCKDESC_LA,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+	 {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
+	 {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
+
+	{SVGA3DBLOCKDESC_RGBA_FP,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
+
+	{SVGA3DBLOCKDESC_RGBA_FP,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
+
+	{SVGA3DBLOCKDESC_UVL,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
+
+	{SVGA3DBLOCKDESC_UVWA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
+
+	{SVGA3DBLOCKDESC_ALPHA,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
+
+	{SVGA3DBLOCKDESC_BUFFER,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
+	 {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
+
+	{SVGA3DBLOCKDESC_YUV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
+
+	{SVGA3DBLOCKDESC_YUV,
+	 {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+	 {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
+
+	{SVGA3DBLOCKDESC_NV12,
+	 {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
+
+	{SVGA3DBLOCKDESC_AYUV,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+	 {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGB_FP,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
+
+	{SVGA3DBLOCKDESC_UVW,
+	 {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+	 {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
+
+	{SVGA3DBLOCKDESC_UVWQ,
+	 {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+	 {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+	{SVGA3DBLOCKDESC_R_FP,
+	 {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+	{SVGA3DBLOCKDESC_GREEN,
+	 {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
+	 {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
+
+	{SVGA3DBLOCKDESC_RGB_FP,
+	 {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
+	 {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBA_SRGB,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG_FP,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+	 {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_GREEN,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
+
+	{SVGA3DBLOCKDESC_UV,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
+
+	{SVGA3DBLOCKDESC_U,
+	 {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
+
+	{SVGA3DBLOCKDESC_RED,
+	 {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBE,
+	 {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
+	 {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
+
+	{SVGA3DBLOCKDESC_RG,
+	 {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
+
+	{SVGA3DBLOCKDESC_COMPRESSED,
+	 {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+	 {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+	{SVGA3DBLOCKDESC_RGBA,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGBA_SRGB,
+	 {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_RGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
+
+	{SVGA3DBLOCKDESC_RGB_SRGB,
+	 {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+	 {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+	{SVGA3DBLOCKDESC_DEPTH,
+	 {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+	 {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
+
+	{SVGA3DBLOCKDESC_DS,
+	 {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+	 {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+	uint64_t tmp = (uint64_t) a*b;
+	return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+	if (format < ARRAY_SIZE(svga3d_surface_descs))
+		return &svga3d_surface_descs[format];
+
+	return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+	surf_size_struct size;
+
+	size.width = max_t(u32, base_level.width >> mip_level, 1);
+	size.height = max_t(u32, base_level.height >> mip_level, 1);
+	size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+	return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+				 const surf_size_struct *pixel_size,
+				 surf_size_struct *block_size)
+{
+	block_size->width = DIV_ROUND_UP(pixel_size->width,
+					 desc->block_size.width);
+	block_size->height = DIV_ROUND_UP(pixel_size->height,
+					  desc->block_size.height);
+	block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+					 desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+	return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+			      const surf_size_struct *size)
+{
+	u32 pitch;
+	surf_size_struct blocks;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+	pitch = blocks.width * desc->pitch_bytes_per_block;
+
+	return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+				    const surf_size_struct *size,
+				    u32 pitch)
+{
+	surf_size_struct image_blocks;
+	u32 slice_size, total_size;
+
+	svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+	if (svga3dsurface_is_planar_surface(desc)) {
+		total_size = clamped_umul32(image_blocks.width,
+					    image_blocks.height);
+		total_size = clamped_umul32(total_size, image_blocks.depth);
+		total_size = clamped_umul32(total_size, desc->bytes_per_block);
+		return total_size;
+	}
+
+	if (pitch == 0)
+		pitch = svga3dsurface_calculate_pitch(desc, size);
+
+	slice_size = clamped_umul32(image_blocks.height, pitch);
+	total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+	return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+				  surf_size_struct base_level_size,
+				  u32 num_mip_levels,
+				  bool cubemap)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	u32 total_size = 0;
+	u32 mip;
+
+	for (mip = 0; mip < num_mip_levels; mip++) {
+		surf_size_struct size =
+			svga3dsurface_get_mip_size(base_level_size, mip);
+		total_size += svga3dsurface_get_image_buffer_size(desc,
+								  &size, 0);
+	}
+
+	if (cubemap)
+		total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+	return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+			       u32 width, u32 height,
+			       u32 x, u32 y, u32 z)
+{
+	const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+	const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+	const u32 bd = desc->block_size.depth;
+	const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+	const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+	const u32 offset = (z / bd * imgstride +
+			    y / bh * rowstride +
+			    x / bw * desc->bytes_per_block);
+	return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+			       surf_size_struct baseLevelSize,
+			       u32 numMipLevels,
+			       u32 face,
+			       u32 mip)
+
+{
+	u32 offset;
+	u32 mipChainBytes;
+	u32 mipChainBytesToLevel;
+	u32 i;
+	const struct svga3d_surface_desc *desc;
+	surf_size_struct mipSize;
+	u32 bytes;
+
+	desc = svga3dsurface_get_desc(format);
+
+	mipChainBytes = 0;
+	mipChainBytesToLevel = 0;
+	for (i = 0; i < numMipLevels; i++) {
+		mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+		bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+		mipChainBytes += bytes;
+		if (i < mip)
+			mipChainBytesToLevel += bytes;
+	}
+
+	offset = mipChainBytes * face + mipChainBytesToLevel;
+
+	return offset;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga_escape.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 0000000..8e8d968
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ *    Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ *   0000: Reserved
+ *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ *     that use the SVGA Screen Object extension. Instead of sending
+ *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ *     Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
+
+typedef
+struct {
+   uint32 command;
+   uint32 fullscreen;
+   struct {
+      int32 x, y;
+   } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga_overlay.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 0000000..f38416f
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ *    Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
+
+typedef enum {
+   SVGA_OVERLAY_FORMAT_INVALID = 0,
+   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
+        /* FIFO escape layout:
+         * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
+        /* FIFO escape layout:
+         * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+   struct {
+      uint32 cmdType;
+      uint32 streamId;
+   } header;
+
+   /* May include zero or more items. */
+   struct {
+      uint32 registerId;
+      uint32 value;
+   } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+   uint32 cmdType;
+   uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+   uint32 command;
+   uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+   SVGAFifoEscapeCmdVideoBase videoCmd;
+   struct {
+      uint32 regId;
+      uint32 value;
+   } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ *      Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ *      TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ *      Pitches and offsets for the given YUV frame are put in 'pitches'
+ *      and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
+                         uint32 *width,                     /* IN / OUT */
+                         uint32 *height,                    /* IN / OUT */
+                         uint32 *size,                      /* OUT */
+                         uint32 *pitches,                   /* OUT (optional) */
+                         uint32 *offsets)                   /* OUT (optional) */
+{
+    int tmp;
+
+    *width = (*width + 1) & ~1;
+
+    if (offsets) {
+        offsets[0] = 0;
+    }
+
+    switch (format) {
+    case VMWARE_FOURCC_YV12:
+       *height = (*height + 1) & ~1;
+       *size = (*width + 3) & ~3;
+
+       if (pitches) {
+          pitches[0] = *size;
+       }
+
+       *size *= *height;
+
+       if (offsets) {
+          offsets[1] = *size;
+       }
+
+       tmp = ((*width >> 1) + 3) & ~3;
+
+       if (pitches) {
+          pitches[1] = pitches[2] = tmp;
+       }
+
+       tmp *= (*height >> 1);
+       *size += tmp;
+
+       if (offsets) {
+          offsets[2] = *size;
+       }
+
+       *size += tmp;
+       break;
+
+    case VMWARE_FOURCC_YUY2:
+    case VMWARE_FOURCC_UYVY:
+       *size = *width * 2;
+
+       if (pitches) {
+          pitches[0] = *size;
+       }
+
+       *size *= *height;
+       break;
+
+    default:
+       return false;
+    }
+
+    return true;
+}
+
+#endif /* _SVGA_OVERLAY_H_ */
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga_reg.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 0000000..01f63cb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1552 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ *    Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+
+/*
+ * PCI device IDs.
+ */
+#define PCI_VENDOR_ID_VMWARE            0x15AD
+#define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
+
+/*
+ * SVGA_REG_ENABLE bit definitions.
+ */
+#define SVGA_REG_ENABLE_DISABLE     0
+#define SVGA_REG_ENABLE_ENABLE      1
+#define SVGA_REG_ENABLE_HIDE        2
+#define SVGA_REG_ENABLE_ENABLE_HIDE (SVGA_REG_ENABLE_ENABLE |\
+				     SVGA_REG_ENABLE_HIDE)
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
+ * The changeMap in the monitor is proportional to this number. Therefore, we'd
+ * like to keep it as small as possible to reduce monitor overhead (using
+ * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
+ * 4k!).
+ *
+ * NB: For compatibility reasons, this value must be greater than 0xff0000.
+ *     See bug 335072.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
+#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC         0x900000UL
+#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2     2
+#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+   PALETTE_BASE has moved */
+#define SVGA_VERSION_1     1
+#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0     0
+#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID    0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT         0x0
+#define SVGA_VALUE_PORT         0x1
+#define SVGA_BIOS_PORT          0x2
+#define SVGA_IRQSTATUS_PORT     0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
+
+/*
+ * Registers
+ */
+
+enum {
+   SVGA_REG_ID = 0,
+   SVGA_REG_ENABLE = 1,
+   SVGA_REG_WIDTH = 2,
+   SVGA_REG_HEIGHT = 3,
+   SVGA_REG_MAX_WIDTH = 4,
+   SVGA_REG_MAX_HEIGHT = 5,
+   SVGA_REG_DEPTH = 6,
+   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
+   SVGA_REG_PSEUDOCOLOR = 8,
+   SVGA_REG_RED_MASK = 9,
+   SVGA_REG_GREEN_MASK = 10,
+   SVGA_REG_BLUE_MASK = 11,
+   SVGA_REG_BYTES_PER_LINE = 12,
+   SVGA_REG_FB_START = 13,            /* (Deprecated) */
+   SVGA_REG_FB_OFFSET = 14,
+   SVGA_REG_VRAM_SIZE = 15,
+   SVGA_REG_FB_SIZE = 16,
+
+   /* ID 0 implementation only had the above registers, then the palette */
+
+   SVGA_REG_CAPABILITIES = 17,
+   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
+   SVGA_REG_MEM_SIZE = 19,
+   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
+   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
+   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
+   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
+   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
+   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
+   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
+   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
+   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
+   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
+   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
+   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
+
+   /* Legacy multi-monitor support */
+   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
+   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
+   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
+
+   /* See "Guest memory regions" below. */
+   SVGA_REG_GMR_ID = 41,
+   SVGA_REG_GMR_DESCRIPTOR = 42,
+   SVGA_REG_GMR_MAX_IDS = 43,
+   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
+   SVGA_REG_GMRS_MAX_PAGES = 46,    /* Maximum number of 4KB pages for all GMRs */
+   SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
+   SVGA_REG_TOP = 48,               /* Must be 1 more than the last register */
+
+   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
+   /* Next 768 (== 256*3) registers exist for colormap */
+
+   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+                                    /* Base of scratch registers */
+   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+      First 4 are reserved for VESA BIOS Extension; any remaining are for
+      the use of the current SVGA driver. */
+};
+
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ *   SVGA_REG_GMR_ID --
+ *
+ *     Read/write.
+ *     This register holds the 32-bit ID (a small positive integer)
+ *     of a GMR to create, delete, or redefine. Writing this register
+ *     has no side-effects.
+ *
+ *   SVGA_REG_GMR_DESCRIPTOR --
+ *
+ *     Write-only.
+ *     Writing this register will create, delete, or redefine the GMR
+ *     specified by the above ID register. If this register is zero,
+ *     the GMR is deleted. Any pointers into this GMR (including those
+ *     currently being processed by FIFO commands) will be
+ *     synchronously invalidated.
+ *
+ *     If this register is nonzero, it must be the physical page
+ *     number (PPN) of a data structure which describes the physical
+ *     layout of the memory region this GMR should describe. The
+ *     descriptor structure will be read synchronously by the SVGA
+ *     device when this register is written. The descriptor need not
+ *     remain allocated for the lifetime of the GMR.
+ *
+ *     The guest driver should write SVGA_REG_GMR_ID first, then
+ *     SVGA_REG_GMR_DESCRIPTOR.
+ *
+ *   SVGA_REG_GMR_MAX_IDS --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to support a maximum number of
+ *     user-defined GMR IDs. This register holds the number of supported
+ *     IDs. (The maximum supported ID plus 1)
+ *
+ *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ *     Read-only.
+ *     The SVGA device may choose to put a limit on the total number
+ *     of SVGAGuestMemDescriptor structures it will read when defining
+ *     a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ *   - Terminate the GMR descriptor list.
+ *     (ppn==0, numPages==0)
+ *
+ *   - Add a PPN or range of PPNs to the GMR's virtual address space.
+ *     (ppn != 0, numPages != 0)
+ *
+ *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ *     support multi-page GMR descriptor tables without forcing the
+ *     driver to allocate physically contiguous memory.
+ *     (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page.  The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well.  In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL         ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
+
+typedef
+struct SVGAGuestMemDescriptor {
+   uint32 ppn;
+   uint32 numPages;
+} SVGAGuestMemDescriptor;
+
+typedef
+struct SVGAGuestPtr {
+   uint32 gmrId;
+   uint32 offset;
+} SVGAGuestPtr;
+
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ *    This is a packed representation of the source 2D image format
+ *    for a GMR-to-screen blit. Currently it is defined as an encoding
+ *    of the screen's color depth and bits-per-pixel, however, 16 bits
+ *    are reserved for future use to identify other encodings (such as
+ *    RGBA or higher-precision images).
+ *
+ *    Currently supported formats:
+ *
+ *       bpp depth  Format Name
+ *       --- -----  -----------
+ *        32    24  32-bit BGRX
+ *        24    24  24-bit BGR
+ *        16    16  RGB 5-6-5
+ *        16    15  RGB 5-5-5
+ *
+ */
+
+typedef
+struct SVGAGMRImageFormat {
+   union {
+      struct {
+         uint32 bitsPerPixel : 8;
+         uint32 colorDepth   : 8;
+         uint32 reserved     : 16;  /* Must be zero */
+      };
+
+      uint32 value;
+   };
+} SVGAGMRImageFormat;
+
+typedef
+struct SVGAGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+} SVGAGuestImage;
+
+/*
+ * SVGAColorBGRX --
+ *
+ *    A 24-bit color format (BGRX), which does not depend on the
+ *    format of the legacy guest framebuffer (GFB) or the current
+ *    GMRFB state.
+ */
+
+typedef
+struct SVGAColorBGRX {
+   union {
+      struct {
+         uint32 b : 8;
+         uint32 g : 8;
+         uint32 r : 8;
+         uint32 x : 8;  /* Unused */
+      };
+
+      uint32 value;
+   };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ *    Signed rectangle and point primitives. These are used by the new
+ *    2D primitives for drawing to Screen Objects, which can occupy a
+ *    signed virtual coordinate space.
+ *
+ *    SVGASignedRect specifies a half-open interval: the (left, top)
+ *    pixel is part of the rectangle, but the (right, bottom) pixel is
+ *    not.
+ */
+
+typedef
+struct SVGASignedRect {
+   int32  left;
+   int32  top;
+   int32  right;
+   int32  bottom;
+} SVGASignedRect;
+
+typedef
+struct SVGASignedPoint {
+   int32  x;
+   int32  y;
+} SVGASignedPoint;
+
+
+/*
+ *  Capabilities
+ *
+ *  Note the holes in the bitfield. Missing bits have been deprecated,
+ *  and must not be reused. Those capabilities will never be reported
+ *  by new versions of the SVGA device.
+ *
+ * SVGA_CAP_GMR2 --
+ *    Provides asynchronous commands to define and remap guest memory
+ *    regions.  Adds device registers SVGA_REG_GMRS_MAX_PAGES and
+ *    SVGA_REG_MEMORY_SIZE.
+ *
+ * SVGA_CAP_SCREEN_OBJECT_2 --
+ *    Allow screen object support, and require backing stores from the
+ *    guest for each screen object.
+ */
+
+#define SVGA_CAP_NONE               0x00000000
+#define SVGA_CAP_RECT_COPY          0x00000002
+#define SVGA_CAP_CURSOR             0x00000020
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040   /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_8BIT_EMULATION     0x00000100
+#define SVGA_CAP_ALPHA_CURSOR       0x00000200
+#define SVGA_CAP_3D                 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO      0x00008000
+#define SVGA_CAP_MULTIMON           0x00010000   /* Legacy multi-monitor support */
+#define SVGA_CAP_PITCHLOCK          0x00020000
+#define SVGA_CAP_IRQMASK            0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   /* Legacy multi-monitor support */
+#define SVGA_CAP_GMR                0x00100000
+#define SVGA_CAP_TRACES             0x00200000
+#define SVGA_CAP_GMR2               0x00400000
+#define SVGA_CAP_SCREEN_OBJECT_2    0x00800000
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem.  It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ *   following data area is and how much data it contains; there may be
+ *   more registers following these, depending on the FIFO protocol
+ *   version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+   /*
+    * Block 1 (basic registers): The originally defined FIFO registers.
+    * These exist and are valid for all versions of the FIFO protocol.
+    */
+
+   SVGA_FIFO_MIN = 0,
+   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
+   SVGA_FIFO_NEXT_CMD,
+   SVGA_FIFO_STOP,
+
+   /*
+    * Block 2 (extended registers): Mandatory registers for the extended
+    * FIFO.  These exist if the SVGA caps register includes
+    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+    * associated capability bit is enabled.
+    *
+    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+    * This means that the guest has to test individually (in most cases
+    * using FIFO caps) for the presence of registers after this; the VMX
+    * can define "extended FIFO" to mean whatever it wants, and currently
+    * won't enable it unless there's room for that set and much more.
+    */
+
+   SVGA_FIFO_CAPABILITIES = 4,
+   SVGA_FIFO_FLAGS,
+   /* Valid with SVGA_FIFO_CAP_FENCE: */
+   SVGA_FIFO_FENCE,
+
+   /*
+    * Block 3a (optional extended registers): Additional registers for the
+    * extended FIFO, whose presence isn't actually implied by
+    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+    * leave room for them.
+    *
+    * These in block 3a, the VMX currently considers mandatory for the
+    * extended FIFO.
+    */
+
+   /* Valid if exists (i.e. if extended FIFO enabled): */
+   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
+   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
+   SVGA_FIFO_PITCHLOCK,
+
+   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
+   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
+   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
+   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
+   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
+   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+   /* Valid with SVGA_FIFO_CAP_RESERVE: */
+   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
+
+   /*
+    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
+    *
+    * By default this is SVGA_ID_INVALID, to indicate that the cursor
+    * coordinates are specified relative to the virtual root. If this
+    * is set to a specific screen ID, cursor position is reinterpreted
+    * as a signed offset relative to that screen's origin.
+    */
+   SVGA_FIFO_CURSOR_SCREEN_ID,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_DEAD
+    *
+    * An arbitrary value written by the host, drivers should not use it.
+    */
+   SVGA_FIFO_DEAD,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+    *
+    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+    * on platforms that can enforce graphics resource limits.
+    */
+   SVGA_FIFO_3D_HWVERSION_REVISED,
+
+   /*
+    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+    * registers, but this must be done carefully and with judicious use of
+    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+    * enough to tell you whether the register exists: we've shipped drivers
+    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+    * the earlier ones.  The actual order of introduction was:
+    * - PITCHLOCK
+    * - 3D_CAPS
+    * - CURSOR_* (cursor bypass 3)
+    * - RESERVED
+    * So, code that wants to know whether it can use any of the
+    * aforementioned registers, or anything else added after PITCHLOCK and
+    * before 3D_CAPS, needs to reason about something other than
+    * SVGA_FIFO_MIN.
+    */
+
+   /*
+    * 3D caps block space; valid with 3D hardware version >=
+    * SVGA3D_HWVERSION_WS6_B1.
+    */
+   SVGA_FIFO_3D_CAPS      = 32,
+   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+   /*
+    * End of VMX's current definition of "extended-FIFO registers".
+    * Registers before here are always enabled/disabled as a block; either
+    * the extended FIFO is enabled and includes all preceding registers, or
+    * it's disabled entirely.
+    *
+    * Block 3b (truly optional extended registers): Additional registers for
+    * the extended FIFO, which the VMX already knows how to enable and
+    * disable with correct granularity.
+    *
+    * Registers after here exist if and only if the guest SVGA driver
+    * sets SVGA_FIFO_MIN high enough to leave room for them.
+    */
+
+   /* Valid if register exists: */
+   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
+
+   /*
+    * Always keep this last.  This defines the maximum number of
+    * registers we know about.  At power-on, this value is placed in
+    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+    * to allocate this much space in FIFO memory for registers.
+    */
+    SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data.  It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ *  This explains the relationship between the various FIFO
+ *  sync-related registers in IOSpace and in FIFO space.
+ *
+ *  SVGA_REG_SYNC --
+ *
+ *       The SYNC register can be used in two different ways by the guest:
+ *
+ *         1. If the guest wishes to fully sync (drain) the FIFO,
+ *            it will write once to SYNC then poll on the BUSY
+ *            register. The FIFO is sync'ed once BUSY is zero.
+ *
+ *         2. If the guest wants to asynchronously wake up the host,
+ *            it will write once to SYNC without polling on BUSY.
+ *            Ideally it will do this after some new commands have
+ *            been placed in the FIFO, and after reading a zero
+ *            from SVGA_FIFO_BUSY.
+ *
+ *       (1) is the original behaviour that SYNC was designed to
+ *       support.  Originally, a write to SYNC would implicitly
+ *       trigger a read from BUSY. This causes us to synchronously
+ *       process the FIFO.
+ *
+ *       This behaviour has since been changed so that writing SYNC
+ *       will *not* implicitly cause a read from BUSY. Instead, it
+ *       makes a channel call which asynchronously wakes up the MKS
+ *       thread.
+ *
+ *       New guests can use this new behaviour to implement (2)
+ *       efficiently. This lets guests get the host's attention
+ *       without waiting for the MKS to poll, which gives us much
+ *       better CPU utilization on SMP hosts and on UP hosts while
+ *       we're blocked on the host GPU.
+ *
+ *       Old guests shouldn't notice the behaviour change. SYNC was
+ *       never guaranteed to process the entire FIFO, since it was
+ *       bounded to a particular number of CPU cycles. Old guests will
+ *       still loop on the BUSY register until the FIFO is empty.
+ *
+ *       Writing to SYNC currently has the following side-effects:
+ *
+ *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ *         - Asynchronously wakes up the MKS thread for FIFO processing
+ *         - The value written to SYNC is recorded as a "reason", for
+ *           stats purposes.
+ *
+ *       If SVGA_FIFO_BUSY is available, drivers are advised to only
+ *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ *       eventually set SVGA_FIFO_BUSY on its own, but this approach
+ *       lets the driver avoid sending multiple asynchronous wakeup
+ *       messages to the MKS thread.
+ *
+ *  SVGA_REG_BUSY --
+ *
+ *       This register is set to TRUE when SVGA_REG_SYNC is written,
+ *       and it reads as FALSE when the FIFO has been completely
+ *       drained.
+ *
+ *       Every read from this register causes us to synchronously
+ *       process FIFO commands. There is no guarantee as to how many
+ *       commands each read will process.
+ *
+ *       CPU time spent processing FIFO commands will be billed to
+ *       the guest.
+ *
+ *       New drivers should avoid using this register unless they
+ *       need to guarantee that the FIFO is completely drained. It
+ *       is overkill for performing a sync-to-fence. Older drivers
+ *       will use this register for any type of synchronization.
+ *
+ *  SVGA_FIFO_BUSY --
+ *
+ *       This register is a fast way for the guest driver to check
+ *       whether the FIFO is already being processed. It reads and
+ *       writes at normal RAM speeds, with no monitor intervention.
+ *
+ *       If this register reads as TRUE, the host is guaranteeing that
+ *       any new commands written into the FIFO will be noticed before
+ *       the MKS goes back to sleep.
+ *
+ *       If this register reads as FALSE, no such guarantee can be
+ *       made.
+ *
+ *       The guest should use this register to quickly determine
+ *       whether or not it needs to wake up the host. If the guest
+ *       just wrote a command or group of commands that it would like
+ *       the host to begin processing, it should:
+ *
+ *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ *            action is necessary.
+ *
+ *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ *            code that we've already sent a SYNC to the host and we
+ *            don't need to send a duplicate.
+ *
+ *         3. Write a reason to SVGA_REG_SYNC. This will send an
+ *            asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ *      Fence -- Fence register and command are supported
+ *      Accel Front -- Front buffer only commands are supported
+ *      Pitch Lock -- Pitch lock register is supported
+ *      Video -- SVGA Video overlay units are supported
+ *      Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ *      of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ *    Provides dynamic multi-screen rendering, for improved Unity and
+ *    multi-monitor modes. With Screen Object, the guest can
+ *    dynamically create and destroy 'screens', which can represent
+ *    Unity windows or virtual monitors. Screen Object also provides
+ *    strong guarantees that DMA operations happen only when
+ *    guest-initiated. Screen Object deprecates the BAR1 guest
+ *    framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ *    New registers:
+ *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ *    New 2D commands:
+ *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ *    New 3D commands:
+ *       BLIT_SURFACE_TO_SCREEN
+ *
+ *    New guarantees:
+ *
+ *       - The host will not read or write guest memory, including the GFB,
+ *         except when explicitly initiated by a DMA command.
+ *
+ *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ *         is guaranteed to complete before any subsequent FENCEs.
+ *
+ *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ *         PRESENT_READBACK) as well as new Screen blit commands will
+ *         all behave consistently as blits, and memory will be read
+ *         or written in FIFO order.
+ *
+ *         For example, if you PRESENT from one SVGA3D surface to multiple
+ *         places on the screen, the data copied will always be from the
+ *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ *         This was not necessarily true on devices without Screen Object.
+ *
+ *         This means that on devices that support Screen Object, the
+ *         PRESENT_READBACK command should not be necessary unless you
+ *         actually want to read back the results of 3D rendering into
+ *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ *         command provides a strict superset of functionality.)
+ *
+ *       - When a screen is resized, either using Screen Object commands or
+ *         legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ *    Provides new commands to define and remap guest memory regions (GMR).
+ *
+ *    New 2D commands:
+ *       DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ *    that enforce graphics resource limits.  This allows the platform
+ *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ *    drivers that do not limit their resources.
+ *
+ *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ *    are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ *    Modifies the DEFINE_SCREEN command to include a guest provided
+ *    backing store in GMR memory and the bytesPerLine for the backing
+ *    store.  This capability requires the use of a backing store when
+ *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ *    Drivers should not use this cap bit.  This cap bit can not be
+ *    reused since some hosts already expose it.
+ */
+
+#define SVGA_FIFO_CAP_NONE                  0
+#define SVGA_FIFO_CAP_FENCE             (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
+#define SVGA_FIFO_CAP_VIDEO             (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
+#define SVGA_FIFO_CAP_RESERVE           (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
+#define SVGA_FIFO_CAP_GMR2              (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
+#define SVGA_FIFO_CAP_DEAD              (1<<10)
+
+
+/*
+ * FIFO Flags
+ *
+ *      Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE                 0
+#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+   SVGA_VIDEO_ENABLED = 0,
+   SVGA_VIDEO_FLAGS,
+   SVGA_VIDEO_DATA_OFFSET,
+   SVGA_VIDEO_FORMAT,
+   SVGA_VIDEO_COLORKEY,
+   SVGA_VIDEO_SIZE,          /* Deprecated */
+   SVGA_VIDEO_WIDTH,
+   SVGA_VIDEO_HEIGHT,
+   SVGA_VIDEO_SRC_X,
+   SVGA_VIDEO_SRC_Y,
+   SVGA_VIDEO_SRC_WIDTH,
+   SVGA_VIDEO_SRC_HEIGHT,
+   SVGA_VIDEO_DST_X,         /* Signed int32 */
+   SVGA_VIDEO_DST_Y,         /* Signed int32 */
+   SVGA_VIDEO_DST_WIDTH,
+   SVGA_VIDEO_DST_HEIGHT,
+   SVGA_VIDEO_PITCH_1,
+   SVGA_VIDEO_PITCH_2,
+   SVGA_VIDEO_PITCH_3,
+   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
+   SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ *      width and height relate to the entire source video frame.
+ *      srcX, srcY, srcWidth and srcHeight represent subset of the source
+ *      video frame to be displayed.
+ */
+
+typedef struct SVGAOverlayUnit {
+   uint32 enabled;
+   uint32 flags;
+   uint32 dataOffset;
+   uint32 format;
+   uint32 colorKey;
+   uint32 size;
+   uint32 width;
+   uint32 height;
+   uint32 srcX;
+   uint32 srcY;
+   uint32 srcWidth;
+   uint32 srcHeight;
+   int32  dstX;
+   int32  dstY;
+   uint32 dstWidth;
+   uint32 dstHeight;
+   uint32 pitches[3];
+   uint32 dataGMRId;
+   uint32 dstScreenId;
+} SVGAOverlayUnit;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ *    This is a new way to represent a guest's multi-monitor screen or
+ *    Unity window. Screen objects are only supported if the
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ *    If Screen Objects are supported, they can be used to fully
+ *    replace the functionality provided by the framebuffer registers
+ *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ *    The screen object is a struct with guaranteed binary
+ *    compatibility. New flags can be added, and the struct may grow,
+ *    but existing fields must retain their meaning.
+ *
+ *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ *    a SVGAGuestPtr that is used to back the screen contents.  This
+ *    memory must come from the GFB.  The guest is not allowed to
+ *    access the memory and doing so will have undefined results.  The
+ *    backing store is required to be page aligned and the size is
+ *    padded to the next page boundry.  The number of pages is:
+ *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ *    The pitch in the backingStore is required to be at least large
+ *    enough to hold a 32bbp scanline.  It is recommended that the
+ *    driver pad bytesPerLine for a potential performance win.
+ *
+ *    The cloneCount field is treated as a hint from the guest that
+ *    the user wants this display to be cloned, countCount times.  A
+ *    value of zero means no cloning should happen.
+ */
+
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1) /* Guest considers this screen to be 'primary' */
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black.  When a screen is deactivated the backing store is
+ * optional.  When set backingPtr and bytesPerLine will be ignored.
+ */
+#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved.  The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user.  When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver.  This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
+
+typedef
+struct SVGAScreenObject {
+   uint32 structSize;   /* sizeof(SVGAScreenObject) */
+   uint32 id;
+   uint32 flags;
+   struct {
+      uint32 width;
+      uint32 height;
+   } size;
+   struct {
+      int32 x;
+      int32 y;
+   } root;
+
+   /*
+    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+    */
+   SVGAGuestImage backingStore;
+   uint32 cloneCount;
+} SVGAScreenObject;
+
+
+/*
+ *  Commands in the command FIFO:
+ *
+ *  Command IDs defined below are used for the traditional 2D FIFO
+ *  communication (not all commands are available for all versions of the
+ *  SVGA FIFO protocol).
+ *
+ *  Note the holes in the command ID numbers: These commands have been
+ *  deprecated, and the old IDs must not be reused.
+ *
+ *  Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ *  protocol.
+ *
+ *  Each command's parameters are described by the comments and
+ *  structs below.
+ */
+
+typedef enum {
+   SVGA_CMD_INVALID_CMD           = 0,
+   SVGA_CMD_UPDATE                = 1,
+   SVGA_CMD_RECT_COPY             = 3,
+   SVGA_CMD_DEFINE_CURSOR         = 19,
+   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
+   SVGA_CMD_UPDATE_VERBOSE        = 25,
+   SVGA_CMD_FRONT_ROP_FILL        = 29,
+   SVGA_CMD_FENCE                 = 30,
+   SVGA_CMD_ESCAPE                = 33,
+   SVGA_CMD_DEFINE_SCREEN         = 34,
+   SVGA_CMD_DESTROY_SCREEN        = 35,
+   SVGA_CMD_DEFINE_GMRFB          = 36,
+   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
+   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
+   SVGA_CMD_ANNOTATION_FILL       = 39,
+   SVGA_CMD_ANNOTATION_COPY       = 40,
+   SVGA_CMD_DEFINE_GMR2           = 41,
+   SVGA_CMD_REMAP_GMR2            = 42,
+   SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_ARGS           64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ *    This is a DMA transfer which copies from the Guest Framebuffer
+ *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ *    intersect with the provided virtual rectangle.
+ *
+ *    This command does not support using arbitrary guest memory as a
+ *    data source- it only works with the pre-defined GFB memory.
+ *    This command also does not support signed virtual coordinates.
+ *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ *    negative root x/y coordinates, the negative portion of those
+ *    screens will not be reachable by this command.
+ *
+ *    This command is not necessary when using framebuffer
+ *    traces. Traces are automatically enabled if the SVGA FIFO is
+ *    disabled, and you may explicitly enable/disable traces using
+ *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ *    pseudocolor screen updates. The newer Screen Object commands
+ *    only support true color formats.
+ *
+ * Availability:
+ *    Always available.
+ */
+
+typedef
+struct SVGAFifoCmdUpdate {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+} SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ *    Perform a rectangular DMA transfer from one area of the GFB to
+ *    another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ *    SVGA_CAP_RECT_COPY
+ */
+
+typedef
+struct SVGAFifoCmdRectCopy {
+   uint32 srcX;
+   uint32 srcY;
+   uint32 destX;
+   uint32 destY;
+   uint32 width;
+   uint32 height;
+} SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ *    Provide a new cursor image, as an AND/XOR mask.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_CURSOR
+ */
+
+typedef
+struct SVGAFifoCmdDefineCursor {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   /*
+    * Followed by scanline data for AND mask, then XOR mask.
+    * Each scanline is padded to a 32-bit boundary.
+   */
+} SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ *    Provide a new cursor image, in 32-bit BGRA format.
+ *
+ *    The recommended way to position the cursor overlay is by using
+ *    the SVGA_FIFO_CURSOR_* registers, supported by the
+ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ *    SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+struct SVGAFifoCmdDefineAlphaCursor {
+   uint32 id;             /* Reserved, must be zero. */
+   uint32 hotspotX;
+   uint32 hotspotY;
+   uint32 width;
+   uint32 height;
+   /* Followed by scanline data */
+} SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ *    'reason' value, an opaque cookie which is used by internal
+ *    debugging tools. Third party drivers should not use this
+ *    command.
+ *
+ * Availability:
+ *    SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+struct SVGAFifoCmdUpdateVerbose {
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 reason;
+} SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ *    This is a hint which tells the SVGA device that the driver has
+ *    just filled a rectangular region of the GFB with a solid
+ *    color. Instead of reading these pixels from the GFB, the device
+ *    can assume that they all equal 'color'. This is primarily used
+ *    for remote desktop protocols.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define  SVGA_ROP_COPY                    0x03
+
+typedef
+struct SVGAFifoCmdFrontRopFill {
+   uint32 color;     /* In the same format as the GFB */
+   uint32 x;
+   uint32 y;
+   uint32 width;
+   uint32 height;
+   uint32 rop;       /* Must be SVGA_ROP_COPY */
+} SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ *    Insert a synchronization fence.  When the SVGA device reaches
+ *    this command, it will copy the 'fence' value into the
+ *    SVGA_FIFO_FENCE register. It will also compare the fence against
+ *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ *    raise this interrupt.
+ *
+ * Availability:
+ *    SVGA_FIFO_FENCE for this command,
+ *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+struct {
+   uint32 fence;
+} SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ *    Send an extended or vendor-specific variable length command.
+ *    This is used for video overlay, third party plugins, and
+ *    internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+struct SVGAFifoCmdEscape {
+   uint32 nsid;
+   uint32 size;
+   /* followed by 'size' bytes of data */
+} SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ *    Define or redefine an SVGAScreenObject. See the description of
+ *    SVGAScreenObject above.  The video driver is responsible for
+ *    generating new screen IDs. They should be small positive
+ *    integers. The virtual device will have an implementation
+ *    specific upper limit on the number of screen IDs
+ *    supported. Drivers are responsible for recycling IDs. The first
+ *    valid ID is zero.
+ *
+ *    - Interaction with other registers:
+ *
+ *    For backwards compatibility, when the GFB mode registers (WIDTH,
+ *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ *    deletes all screens other than screen #0, and redefines screen
+ *    #0 according to the specified mode. Drivers that use
+ *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ *    If you use screen objects, do not use the legacy multi-mon
+ *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGAScreenObject screen;   /* Variable-length according to version */
+} SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ *    Destroy an SVGAScreenObject. Its ID is immediately available for
+ *    re-use.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   uint32 screenId;
+} SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ *    This command sets a piece of SVGA device state called the
+ *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ *    piece of light-weight state which identifies the location and
+ *    format of an image in guest memory or in BAR1. The GMRFB has
+ *    an arbitrary size, and it doesn't need to match the geometry
+ *    of the GFB or any screen object.
+ *
+ *    The GMRFB can be redefined as often as you like. You could
+ *    always use the same GMRFB, you could redefine it before
+ *    rendering from a different guest screen, or you could even
+ *    redefine it before every blit.
+ *
+ *    There are multiple ways to use this command. The simplest way is
+ *    to use it to move the framebuffer either to elsewhere in the GFB
+ *    (BAR1) memory region, or to a user-defined GMR. This lets a
+ *    driver use a framebuffer allocated entirely out of normal system
+ *    memory, which we encourage.
+ *
+ *    Another way to use this command is to set up a ring buffer of
+ *    updates in GFB memory. If a driver wants to ensure that no
+ *    frames are skipped by the SVGA device, it is important that the
+ *    driver not modify the source data for a blit until the device is
+ *    done processing the command. One efficient way to accomplish
+ *    this is to use a ring of small DMA buffers. Each buffer is used
+ *    for one blit, then we move on to the next buffer in the
+ *    ring. The FENCE mechanism is used to protect each buffer from
+ *    re-use until the device is finished with that buffer's
+ *    corresponding blit.
+ *
+ *    This command does not affect the meaning of SVGA_CMD_UPDATE.
+ *    UPDATEs always occur from the legacy GFB memory area. This
+ *    command has no support for pseudocolor GMRFBs. Currently only
+ *    true-color 15, 16, and 24-bit depths are supported. Future
+ *    devices may expose capabilities for additional framebuffer
+ *    formats.
+ *
+ *    The default GMRFB value is undefined. Drivers must always send
+ *    this command at least once before performing any blit from the
+ *    GMRFB.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGAGuestPtr        ptr;
+   uint32              bytesPerLine;
+   SVGAGMRImageFormat  format;
+} SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ *    This is a guest-to-host blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from the current GMRFB to
+ *    one or more Screen Objects.
+ *
+ *    The destination coordinate may be specified relative to a
+ *    screen's origin (if a screen ID is specified) or relative to the
+ *    virtual coordinate system's origin (if the screen ID is
+ *    SVGA_ID_INVALID). The actual destination may span zero or more
+ *    screens, in the case of a virtual destination rect or a rect
+ *    which extends off the edge of the specified screen.
+ *
+ *    This command writes to the screen's "base layer": the underlying
+ *    framebuffer which exists below any cursor or video overlays. No
+ *    action is necessary to explicitly hide or update any overlays
+ *    which exist on top of the updated region.
+ *
+ *    The SVGA device is guaranteed to finish reading from the GMRFB
+ *    by the time any subsequent FENCE commands are reached.
+ *
+ *    This command consumes an annotation. See the
+ *    SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGASignedPoint  srcOrigin;
+   SVGASignedRect   destRect;
+   uint32           destScreenId;
+} SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ *    This is a host-to-guest blit. It performs a DMA operation to
+ *    copy a rectangular region of pixels from a single Screen Object
+ *    back to the current GMRFB.
+ *
+ *    Usage note: This command should be used rarely. It will
+ *    typically be inefficient, but it is necessary for some types of
+ *    synchronization between 3D (GPU) and 2D (CPU) rendering into
+ *    overlapping areas of a screen.
+ *
+ *    The source coordinate is specified relative to a screen's
+ *    origin. The provided screen ID must be valid. If any parameters
+ *    are invalid, the resulting pixel values are undefined.
+ *
+ *    This command reads the screen's "base layer". Overlays like
+ *    video and cursor are not included, but any data which was sent
+ *    using a blit-to-screen primitive will be available, no matter
+ *    whether the data's original source was the GMRFB or the 3D
+ *    acceleration hardware.
+ *
+ *    Note that our guest-to-host blits and host-to-guest blits aren't
+ *    symmetric in their current implementation. While the parameters
+ *    are identical, host-to-guest blits are a lot less featureful.
+ *    They do not support clipping: If the source parameters don't
+ *    fully fit within a screen, the blit fails. They must originate
+ *    from exactly one screen. Virtual coordinates are not directly
+ *    supported.
+ *
+ *    Host-to-guest blits do support the same set of GMRFB formats
+ *    offered by guest-to-host blits.
+ *
+ *    The SVGA device is guaranteed to finish writing to the GMRFB by
+ *    the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGASignedPoint  destOrigin;
+   SVGASignedRect   srcRect;
+   uint32           srcScreenId;
+} SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ *    This is a blit annotation. This command stores a small piece of
+ *    device state which is consumed by the next blit-to-screen
+ *    command. The state is only cleared by commands which are
+ *    specifically documented as consuming an annotation. Other
+ *    commands (such as ESCAPEs for debugging) may intervene between
+ *    the annotation and its associated blit.
+ *
+ *    This annotation is a promise about the contents of the next
+ *    blit: The video driver is guaranteeing that all pixels in that
+ *    blit will have the same value, specified here as a color in
+ *    SVGAColorBGRX format.
+ *
+ *    The SVGA device can still render the blit correctly even if it
+ *    ignores this annotation, but the annotation may allow it to
+ *    perform the blit more efficiently, for example by ignoring the
+ *    source data and performing a fill in hardware.
+ *
+ *    This annotation is most important for performance when the
+ *    user's display is being remoted over a network connection.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGAColorBGRX  color;
+} SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ *    This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
+ *    information about annotations.
+ *
+ *    This annotation is a promise about the contents of the next
+ *    blit: The video driver is guaranteeing that all pixels in that
+ *    blit will have the same value as those which already exist at an
+ *    identically-sized region on the same or a different screen.
+ *
+ *    Note that the source pixels for the COPY in this annotation are
+ *    sampled before applying the anqnotation's associated blit. They
+ *    are allowed to overlap with the blit's destination pixels.
+ *
+ *    The copy source rectangle is specified the same way as the blit
+ *    destination: it can be a rectangle which spans zero or more
+ *    screens, specified relative to either a screen or to the virtual
+ *    coordinate system's origin. If the source rectangle includes
+ *    pixels which are not from exactly one screen, the results are
+ *    undefined.
+ *
+ * Availability:
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
+ */
+
+typedef
+struct {
+   SVGASignedPoint  srcOrigin;
+   uint32           srcScreenId;
+} SVGAFifoCmdAnnotationCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMR2 --
+ *
+ *    Define guest memory region v2.  See the description of GMRs above.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef
+struct {
+   uint32 gmrId;
+   uint32 numPages;
+} SVGAFifoCmdDefineGMR2;
+
+
+/*
+ * SVGA_CMD_REMAP_GMR2 --
+ *
+ *    Remap guest memory region v2.  See the description of GMRs above.
+ *
+ *    This command allows guest to modify a portion of an existing GMR by
+ *    invalidating it or reassigning it to different guest physical pages.
+ *    The pages are identified by physical page number (PPN).  The pages
+ *    are assumed to be pinned and valid for DMA operations.
+ *
+ *    Description of command flags:
+ *
+ *    SVGA_REMAP_GMR2_VIA_GMR: If enabled, references a PPN list in a GMR.
+ *       The PPN list must not overlap with the remap region (this can be
+ *       handled trivially by referencing a separate GMR).  If flag is
+ *       disabled, PPN list is appended to SVGARemapGMR command.
+ *
+ *    SVGA_REMAP_GMR2_PPN64: If set, PPN list is in PPN64 format, otherwise
+ *       it is in PPN32 format.
+ *
+ *    SVGA_REMAP_GMR2_SINGLE_PPN: If set, PPN list contains a single entry.
+ *       A single PPN can be used to invalidate a portion of a GMR or
+ *       map it to to a single guest scratch page.
+ *
+ * Availability:
+ *    SVGA_CAP_GMR2
+ */
+
+typedef enum {
+   SVGA_REMAP_GMR2_PPN32         = 0,
+   SVGA_REMAP_GMR2_VIA_GMR       = (1 << 0),
+   SVGA_REMAP_GMR2_PPN64         = (1 << 1),
+   SVGA_REMAP_GMR2_SINGLE_PPN    = (1 << 2),
+} SVGARemapGMR2Flags;
+
+typedef
+struct {
+   uint32 gmrId;
+   SVGARemapGMR2Flags flags;
+   uint32 offsetPages; /* offset in pages to begin remap */
+   uint32 numPages; /* number of pages to remap */
+   /*
+    * Followed by additional data depending on SVGARemapGMR2Flags.
+    *
+    * If flag SVGA_REMAP_GMR2_VIA_GMR is set, single SVGAGuestPtr follows.
+    * Otherwise an array of page descriptors in PPN32 or PPN64 format
+    * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
+    * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
+    */
+} SVGAFifoCmdRemapGMR2;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/svga_types.h b/linux-imx/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 0000000..55836de
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Silly typedefs for the svga headers. Currently the headers are shared
+ * between all components that talk to svga. And as such the headers are
+ * are in a completely different style and use weird defines.
+ *
+ * This file lets all the ugly be prefixed with svga*.
+ */
+
+#ifndef _SVGA_TYPES_H_
+#define _SVGA_TYPES_H_
+
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint8_t uint8;
+typedef int32_t int32;
+typedef bool Bool;
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 0000000..96dc84d
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,352 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
+	TTM_PL_FLAG_CACHED;
+
+static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
+	TTM_PL_FLAG_CACHED |
+	TTM_PL_FLAG_NO_EVICT;
+
+static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
+	TTM_PL_FLAG_CACHED;
+
+static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
+	TTM_PL_FLAG_CACHED;
+
+static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
+	TTM_PL_FLAG_CACHED |
+	TTM_PL_FLAG_NO_EVICT;
+
+struct ttm_placement vmw_vram_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.placement = &vram_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &vram_placement_flags
+};
+
+static uint32_t vram_gmr_placement_flags[] = {
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+static uint32_t gmr_vram_placement_flags[] = {
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 2,
+	.placement = vram_gmr_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &gmr_placement_flags
+};
+
+static uint32_t vram_gmr_ne_placement_flags[] = {
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 2,
+	.placement = vram_gmr_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &gmr_ne_placement_flags
+};
+
+struct ttm_placement vmw_vram_sys_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.placement = &vram_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.placement = &vram_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.placement = &sys_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+static uint32_t evictable_placement_flags[] = {
+	TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_evictable_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 3,
+	.placement = evictable_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.num_busy_placement = 2,
+	.placement = &gmr_placement_flags,
+	.busy_placement = gmr_vram_placement_flags
+};
+
+struct vmw_ttm_tt {
+	struct ttm_tt ttm;
+	struct vmw_private *dev_priv;
+	int gmr_id;
+};
+
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+
+	vmw_be->gmr_id = bo_mem->start;
+
+	return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+			    ttm->num_pages, vmw_be->gmr_id);
+}
+
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+
+	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+	return 0;
+}
+
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
+{
+	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+
+	ttm_tt_fini(ttm);
+	kfree(vmw_be);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+	.bind = vmw_ttm_bind,
+	.unbind = vmw_ttm_unbind,
+	.destroy = vmw_ttm_destroy,
+};
+
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+				 unsigned long size, uint32_t page_flags,
+				 struct page *dummy_read_page)
+{
+	struct vmw_ttm_tt *vmw_be;
+
+	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+	if (!vmw_be)
+		return NULL;
+
+	vmw_be->ttm.func = &vmw_ttm_func;
+	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+		kfree(vmw_be);
+		return NULL;
+	}
+
+	return &vmw_be->ttm;
+}
+
+int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+	return 0;
+}
+
+int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+		      struct ttm_mem_type_manager *man)
+{
+	switch (type) {
+	case TTM_PL_SYSTEM:
+		/* System memory */
+
+		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case TTM_PL_VRAM:
+		/* "On-card" video ram */
+		man->func = &ttm_bo_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	case VMW_PL_GMR:
+		/*
+		 * "Guest Memory Regions" is an aperture like feature with
+		 *  one slot per bo. There is an upper limit of the number of
+		 *  slots as well as the bo size.
+		 */
+		man->func = &vmw_gmrid_manager_func;
+		man->gpu_offset = 0;
+		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+		man->available_caching = TTM_PL_FLAG_CACHED;
+		man->default_caching = TTM_PL_FLAG_CACHED;
+		break;
+	default:
+		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void vmw_evict_flags(struct ttm_buffer_object *bo,
+		     struct ttm_placement *placement)
+{
+	*placement = vmw_sys_placement;
+}
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+	struct ttm_object_file *tfile =
+		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+	return vmw_user_dmabuf_verify_access(bo, tfile);
+}
+
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+	mem->bus.addr = NULL;
+	mem->bus.is_iomem = false;
+	mem->bus.offset = 0;
+	mem->bus.size = mem->num_pages << PAGE_SHIFT;
+	mem->bus.base = 0;
+	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+		return -EINVAL;
+	switch (mem->mem_type) {
+	case TTM_PL_SYSTEM:
+	case VMW_PL_GMR:
+		return 0;
+	case TTM_PL_VRAM:
+		mem->bus.offset = mem->start << PAGE_SHIFT;
+		mem->bus.base = dev_priv->vram_start;
+		mem->bus.is_iomem = true;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+	return 0;
+}
+
+/**
+ * FIXME: We're using the old vmware polling method to sync.
+ * Do this with fences instead.
+ */
+
+static void *vmw_sync_obj_ref(void *sync_obj)
+{
+
+	return (void *)
+		vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
+}
+
+static void vmw_sync_obj_unref(void **sync_obj)
+{
+	vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
+}
+
+static int vmw_sync_obj_flush(void *sync_obj)
+{
+	vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
+	return 0;
+}
+
+static bool vmw_sync_obj_signaled(void *sync_obj)
+{
+	return	vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
+				       DRM_VMW_FENCE_FLAG_EXEC);
+
+}
+
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
+{
+	return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
+				  DRM_VMW_FENCE_FLAG_EXEC,
+				  lazy, interruptible,
+				  VMW_FENCE_WAIT_TIMEOUT);
+}
+
+struct ttm_bo_driver vmw_bo_driver = {
+	.ttm_tt_create = &vmw_ttm_tt_create,
+	.ttm_tt_populate = &ttm_pool_populate,
+	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
+	.invalidate_caches = vmw_invalidate_caches,
+	.init_mem_type = vmw_init_mem_type,
+	.evict_flags = vmw_evict_flags,
+	.move = NULL,
+	.verify_access = vmw_verify_access,
+	.sync_obj_signaled = vmw_sync_obj_signaled,
+	.sync_obj_wait = vmw_sync_obj_wait,
+	.sync_obj_flush = vmw_sync_obj_flush,
+	.sync_obj_unref = vmw_sync_obj_unref,
+	.sync_obj_ref = vmw_sync_obj_ref,
+	.move_notify = NULL,
+	.swap_notify = NULL,
+	.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+	.io_mem_reserve = &vmw_ttm_io_mem_reserve,
+	.io_mem_free = &vmw_ttm_io_mem_free,
+};
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 0000000..00ae092
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+	struct ttm_base_object base;
+	struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+	.object_type = VMW_RES_CONTEXT,
+	.base_obj_to_res = vmw_user_context_base_to_res,
+	.res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+	&user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+	.res_type = vmw_res_context,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "legacy contexts",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyContext body;
+	} *cmd;
+
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "destruction.\n");
+		return;
+	}
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+	cmd->body.cid = cpu_to_le32(res->id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+			    struct vmw_resource *res,
+			    void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineContext body;
+	} *cmd;
+
+	ret = vmw_resource_init(dev_priv, res, false,
+				res_free, &vmw_legacy_context_func);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a resource id.\n");
+		goto out_early;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+		DRM_ERROR("Out of hw context ids.\n");
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		vmw_resource_unreference(&res);
+		return -ENOMEM;
+	}
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+	cmd->body.cid = cpu_to_le32(res->id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	vmw_resource_activate(res, vmw_hw_context_destroy);
+	return 0;
+
+out_early:
+	if (res_free == NULL)
+		kfree(res);
+	else
+		res_free(res);
+	return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+	int ret;
+
+	if (unlikely(res == NULL))
+		return NULL;
+
+	ret = vmw_context_init(dev_priv, res, NULL);
+
+	return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+	struct vmw_user_context *ctx =
+	    container_of(res, struct vmw_user_context, res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	ttm_base_object_kfree(ctx, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_context *ctx =
+	    container_of(base, struct vmw_user_context, base);
+	struct vmw_resource *res = &ctx->res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_context *ctx;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of contexts anyway.
+	 */
+
+	if (unlikely(vmw_user_context_size == 0))
+		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_context_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for context"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (unlikely(ctx == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_context_size);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	res = &ctx->res;
+	ctx->base.shareable = false;
+	ctx->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(&ctx->res);
+	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+				   &vmw_user_context_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	arg->cid = ctx->base.hash.key;
+out_err:
+	vmw_resource_unreference(&res);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
new file mode 100644
index 0000000..5fae06a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -0,0 +1,320 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+
+/**
+ * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * Returns
+ *  -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
+			    struct vmw_dma_buffer *buf,
+			    struct ttm_placement *placement,
+			    bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_execbuf_release_pinned_bo(dev_priv);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_write_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+			      struct vmw_dma_buffer *buf,
+			      bool pin, bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	struct ttm_placement *placement;
+	int ret;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (pin)
+		vmw_execbuf_release_pinned_bo(dev_priv);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err;
+
+	/**
+	 * Put BO in VRAM if there is space, otherwise as a GMR.
+	 * If there is no space in VRAM and GMR ids are all used up,
+	 * start evicting GMRs to make room. If the DMA buffer can't be
+	 * used as a GMR, this will return -ENOMEM.
+	 */
+
+	if (pin)
+		placement = &vmw_vram_gmr_ne_placement;
+	else
+		placement = &vmw_vram_gmr_placement;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (likely(ret == 0) || ret == -ERESTARTSYS)
+		goto err_unreserve;
+
+
+	/**
+	 * If that failed, try VRAM again, this time evicting
+	 * previous contents.
+	 */
+
+	if (pin)
+		placement = &vmw_vram_ne_placement;
+	else
+		placement = &vmw_vram_placement;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false);
+
+err_unreserve:
+	ttm_bo_unreserve(bo);
+err:
+	ttm_write_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram - Move a buffer to vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer in vram if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+		       struct vmw_dma_buffer *buf,
+		       bool pin, bool interruptible)
+{
+	struct ttm_placement *placement;
+
+	if (pin)
+		placement = &vmw_vram_ne_placement;
+	else
+		placement = &vmw_vram_placement;
+
+	return vmw_dmabuf_to_placement(dev_priv, buf,
+				       placement,
+				       interruptible);
+}
+
+/**
+ * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer in vram if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				bool pin, bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	struct ttm_placement placement;
+	int ret = 0;
+
+	if (pin)
+		placement = vmw_vram_ne_placement;
+	else
+		placement = vmw_vram_placement;
+	placement.lpfn = bo->num_pages;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (pin)
+		vmw_execbuf_release_pinned_bo(dev_priv);
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err_unlock;
+
+	/* Is this buffer already in vram but not at the start of it? */
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    bo->mem.start < bo->num_pages &&
+	    bo->mem.start > 0)
+		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+
+	ret = ttm_bo_validate(bo, &placement, interruptible, false);
+
+	/* For some reason we didn't up at the start of vram */
+	WARN_ON(ret == 0 && bo->offset != 0);
+
+	ttm_bo_unreserve(bo);
+err_unlock:
+	ttm_write_unlock(&vmaster->lock);
+
+	return ret;
+}
+
+
+/**
+ * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to unpin.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
+		     struct vmw_dma_buffer *buf,
+		     bool interruptible)
+{
+	/*
+	 * We could in theory early out if the buffer is
+	 * unpinned but we need to lock and reserve the buffer
+	 * anyways so we don't gain much by that.
+	 */
+	return vmw_dmabuf_to_placement(dev_priv, buf,
+				       &vmw_evictable_placement,
+				       interruptible);
+}
+
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+			  SVGAGuestPtr *ptr)
+{
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+		ptr->offset = bo->offset;
+	} else {
+		ptr->gmrId = bo->mem.start;
+		ptr->offset = 0;
+	}
+}
+
+
+/**
+ * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ *
+ * @bo: The buffer object. Must be reserved, and present either in VRAM
+ * or GMR memory.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+{
+	uint32_t pl_flags;
+	struct ttm_placement placement;
+	uint32_t old_mem_type = bo->mem.mem_type;
+	int ret;
+
+	BUG_ON(!ttm_bo_is_reserved(bo));
+	BUG_ON(old_mem_type != TTM_PL_VRAM &&
+	       old_mem_type != VMW_PL_GMR);
+
+	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+	if (pin)
+		pl_flags |= TTM_PL_FLAG_NO_EVICT;
+
+	memset(&placement, 0, sizeof(placement));
+	placement.num_placement = 1;
+	placement.placement = &pl_flags;
+
+	ret = ttm_bo_validate(bo, &placement, false, true);
+
+	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 0000000..6c44c69
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,1215 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_object.h>
+#include <drm/ttm/ttm_module.h>
+
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+#define VMWGFX_CHIP_SVGAII 0
+#define VMW_FB_RESERVATION 0
+
+#define VMW_MIN_INITIAL_WIDTH 800
+#define VMW_MIN_INITIAL_HEIGHT 600
+
+
+/**
+ * Fully encoded drm commands. Might move to vmw_drm.h
+ */
+
+#define DRM_IOCTL_VMW_GET_PARAM					\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
+		 struct drm_vmw_getparam_arg)
+#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
+		union drm_vmw_alloc_dmabuf_arg)
+#define DRM_IOCTL_VMW_UNREF_DMABUF				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
+		struct drm_vmw_unref_dmabuf_arg)
+#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
+		 struct drm_vmw_cursor_bypass_arg)
+
+#define DRM_IOCTL_VMW_CONTROL_STREAM				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
+		 struct drm_vmw_control_stream_arg)
+#define DRM_IOCTL_VMW_CLAIM_STREAM				\
+	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
+		 struct drm_vmw_stream_arg)
+#define DRM_IOCTL_VMW_UNREF_STREAM				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
+		 struct drm_vmw_stream_arg)
+
+#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
+	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
+		struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
+		struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_CREATE_SURFACE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
+		 union drm_vmw_surface_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SURFACE				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
+		 struct drm_vmw_surface_arg)
+#define DRM_IOCTL_VMW_REF_SURFACE				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
+		 union drm_vmw_surface_reference_arg)
+#define DRM_IOCTL_VMW_EXECBUF					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
+		struct drm_vmw_execbuf_arg)
+#define DRM_IOCTL_VMW_GET_3D_CAP				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
+		 struct drm_vmw_get_3d_cap_arg)
+#define DRM_IOCTL_VMW_FENCE_WAIT				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
+		 struct drm_vmw_fence_wait_arg)
+#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
+	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
+		 struct drm_vmw_fence_signaled_arg)
+#define DRM_IOCTL_VMW_FENCE_UNREF				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
+		 struct drm_vmw_fence_arg)
+#define DRM_IOCTL_VMW_FENCE_EVENT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
+		 struct drm_vmw_fence_event_arg)
+#define DRM_IOCTL_VMW_PRESENT					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
+		 struct drm_vmw_present_arg)
+#define DRM_IOCTL_VMW_PRESENT_READBACK				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
+		 struct drm_vmw_present_readback_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
+		 struct drm_vmw_update_layout_arg)
+
+/**
+ * The core DRM version of this macro doesn't account for
+ * DRM_COMMAND_BASE.
+ */
+
+#define VMW_IOCTL_DEF(ioctl, func, flags) \
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
+
+/**
+ * Ioctl definitions.
+ */
+
+static struct drm_ioctl_desc vmw_ioctls[] = {
+	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
+		      vmw_kms_cursor_bypass_ioctl,
+		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
+		      vmw_fence_obj_signaled_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_FENCE_EVENT,
+		      vmw_fence_event_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
+		      DRM_AUTH | DRM_UNLOCKED),
+
+	/* these allow direct access to the framebuffers mark as master only */
+	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
+		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+		      vmw_present_readback_ioctl,
+		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+		      vmw_kms_update_layout_ioctl,
+		      DRM_MASTER | DRM_UNLOCKED),
+};
+
+static struct pci_device_id vmw_pci_id_list[] = {
+	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+	{0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+
+static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+
+static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+static void vmw_master_init(struct vmw_master *);
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+			      void *ptr);
+
+MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+
+static void vmw_print_capabilities(uint32_t capabilities)
+{
+	DRM_INFO("Capabilities:\n");
+	if (capabilities & SVGA_CAP_RECT_COPY)
+		DRM_INFO("  Rect copy.\n");
+	if (capabilities & SVGA_CAP_CURSOR)
+		DRM_INFO("  Cursor.\n");
+	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
+		DRM_INFO("  Cursor bypass.\n");
+	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
+		DRM_INFO("  Cursor bypass 2.\n");
+	if (capabilities & SVGA_CAP_8BIT_EMULATION)
+		DRM_INFO("  8bit emulation.\n");
+	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
+		DRM_INFO("  Alpha cursor.\n");
+	if (capabilities & SVGA_CAP_3D)
+		DRM_INFO("  3D.\n");
+	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
+		DRM_INFO("  Extended Fifo.\n");
+	if (capabilities & SVGA_CAP_MULTIMON)
+		DRM_INFO("  Multimon.\n");
+	if (capabilities & SVGA_CAP_PITCHLOCK)
+		DRM_INFO("  Pitchlock.\n");
+	if (capabilities & SVGA_CAP_IRQMASK)
+		DRM_INFO("  Irq mask.\n");
+	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
+		DRM_INFO("  Display Topology.\n");
+	if (capabilities & SVGA_CAP_GMR)
+		DRM_INFO("  GMR.\n");
+	if (capabilities & SVGA_CAP_TRACES)
+		DRM_INFO("  Traces.\n");
+	if (capabilities & SVGA_CAP_GMR2)
+		DRM_INFO("  GMR2.\n");
+	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
+		DRM_INFO("  Screen Object 2.\n");
+}
+
+
+/**
+ * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
+ * the start of a buffer object.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function will idle the buffer using an uninterruptible wait, then
+ * map the first page and initialize a pending occlusion query result structure,
+ * Finally it will unmap the buffer.
+ *
+ * TODO: Since we're only mapping a single page, we should optimize the map
+ * to use kmap_atomic / iomap_atomic.
+ */
+static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+{
+	struct ttm_bo_kmap_obj map;
+	volatile SVGA3dQueryResult *result;
+	bool dummy;
+	int ret;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+
+	ttm_bo_reserve(bo, false, false, false, 0);
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, false);
+	spin_unlock(&bdev->fence_lock);
+	if (unlikely(ret != 0))
+		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
+					 10*HZ);
+
+	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	if (likely(ret == 0)) {
+		result = ttm_kmap_obj_virtual(&map, &dummy);
+		result->totalSize = sizeof(*result);
+		result->state = SVGA3D_QUERYSTATE_PENDING;
+		result->result32 = 0xff;
+		ttm_bo_kunmap(&map);
+	} else
+		DRM_ERROR("Dummy query buffer map failed.\n");
+	ttm_bo_unreserve(bo);
+}
+
+
+/**
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
+ *
+ * @dev_priv: A device private structure.
+ *
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * No interruptible waits are done within this function.
+ *
+ * Returns an error if bo creation fails.
+ */
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+{
+	return ttm_bo_create(&dev_priv->bdev,
+			     PAGE_SIZE,
+			     ttm_bo_type_device,
+			     &vmw_vram_sys_placement,
+			     0, false, NULL,
+			     &dev_priv->dummy_query_bo);
+}
+
+
+static int vmw_request_device(struct vmw_private *dev_priv)
+{
+	int ret;
+
+	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Unable to initialize FIFO.\n");
+		return ret;
+	}
+	vmw_fence_fifo_up(dev_priv->fman);
+	ret = vmw_dummy_query_bo_create(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_query_bo;
+	vmw_dummy_query_bo_prepare(dev_priv);
+
+	return 0;
+
+out_no_query_bo:
+	vmw_fence_fifo_down(dev_priv->fman);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
+	return ret;
+}
+
+static void vmw_release_device(struct vmw_private *dev_priv)
+{
+	/*
+	 * Previous destructions should've released
+	 * the pinned bo.
+	 */
+
+	BUG_ON(dev_priv->pinned_bo != NULL);
+
+	ttm_bo_unref(&dev_priv->dummy_query_bo);
+	vmw_fence_fifo_down(dev_priv->fman);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
+}
+
+/**
+ * Increase the 3d resource refcount.
+ * If the count was prevously zero, initialize the fifo, switching to svga
+ * mode. Note that the master holds a ref as well, and may request an
+ * explicit switch to svga mode if fb is not running, using @unhide_svga.
+ */
+int vmw_3d_resource_inc(struct vmw_private *dev_priv,
+			bool unhide_svga)
+{
+	int ret = 0;
+
+	mutex_lock(&dev_priv->release_mutex);
+	if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+		ret = vmw_request_device(dev_priv);
+		if (unlikely(ret != 0))
+			--dev_priv->num_3d_resources;
+	} else if (unhide_svga) {
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
+			  ~SVGA_REG_ENABLE_HIDE);
+		mutex_unlock(&dev_priv->hw_mutex);
+	}
+
+	mutex_unlock(&dev_priv->release_mutex);
+	return ret;
+}
+
+/**
+ * Decrease the 3d resource refcount.
+ * If the count reaches zero, disable the fifo, switching to vga mode.
+ * Note that the master holds a refcount as well, and may request an
+ * explicit switch to vga mode when it releases its refcount to account
+ * for the situation of an X server vt switch to VGA with 3d resources
+ * active.
+ */
+void vmw_3d_resource_dec(struct vmw_private *dev_priv,
+			 bool hide_svga)
+{
+	int32_t n3d;
+
+	mutex_lock(&dev_priv->release_mutex);
+	if (unlikely(--dev_priv->num_3d_resources == 0))
+		vmw_release_device(dev_priv);
+	else if (hide_svga) {
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_ENABLE,
+			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
+			  SVGA_REG_ENABLE_HIDE);
+		mutex_unlock(&dev_priv->hw_mutex);
+	}
+
+	n3d = (int32_t) dev_priv->num_3d_resources;
+	mutex_unlock(&dev_priv->release_mutex);
+
+	BUG_ON(n3d < 0);
+}
+
+/**
+ * Sets the initial_[width|height] fields on the given vmw_private.
+ *
+ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
+ * clamping the value to fb_max_[width|height] fields and the
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ * If the values appear to be invalid, set them to
+ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
+ */
+static void vmw_get_initial_size(struct vmw_private *dev_priv)
+{
+	uint32_t width;
+	uint32_t height;
+
+	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
+	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
+
+	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
+	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
+
+	if (width > dev_priv->fb_max_width ||
+	    height > dev_priv->fb_max_height) {
+
+		/*
+		 * This is a host error and shouldn't occur.
+		 */
+
+		width = VMW_MIN_INITIAL_WIDTH;
+		height = VMW_MIN_INITIAL_HEIGHT;
+	}
+
+	dev_priv->initial_width = width;
+	dev_priv->initial_height = height;
+}
+
+static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	struct vmw_private *dev_priv;
+	int ret;
+	uint32_t svga_id;
+	enum vmw_res_type i;
+
+	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+	if (unlikely(dev_priv == NULL)) {
+		DRM_ERROR("Failed allocating a device private struct.\n");
+		return -ENOMEM;
+	}
+
+	pci_set_master(dev->pdev);
+
+	dev_priv->dev = dev;
+	dev_priv->vmw_chipset = chipset;
+	dev_priv->last_read_seqno = (uint32_t) -100;
+	mutex_init(&dev_priv->hw_mutex);
+	mutex_init(&dev_priv->cmdbuf_mutex);
+	mutex_init(&dev_priv->release_mutex);
+	rwlock_init(&dev_priv->resource_lock);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i) {
+		idr_init(&dev_priv->res_idr[i]);
+		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+	}
+
+	mutex_init(&dev_priv->init_mutex);
+	init_waitqueue_head(&dev_priv->fence_queue);
+	init_waitqueue_head(&dev_priv->fifo_queue);
+	dev_priv->fence_queue_waiters = 0;
+	atomic_set(&dev_priv->fifo_queue_waiters, 0);
+
+	dev_priv->used_memory_size = 0;
+
+	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
+	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+
+	dev_priv->enable_fb = enable_fbdev;
+
+	mutex_lock(&dev_priv->hw_mutex);
+
+	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+	if (svga_id != SVGA_ID_2) {
+		ret = -ENOSYS;
+		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
+		mutex_unlock(&dev_priv->hw_mutex);
+		goto out_err0;
+	}
+
+	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+	vmw_get_initial_size(dev_priv);
+
+	if (dev_priv->capabilities & SVGA_CAP_GMR) {
+		dev_priv->max_gmr_descriptors =
+			vmw_read(dev_priv,
+				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+		dev_priv->max_gmr_ids =
+			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
+	}
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+		dev_priv->max_gmr_pages =
+			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+		dev_priv->memory_size =
+			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+		dev_priv->memory_size -= dev_priv->vram_size;
+	} else {
+		/*
+		 * An arbitrary limit of 512MiB on surface
+		 * memory. But all HWV8 hardware supports GMR2.
+		 */
+		dev_priv->memory_size = 512*1024*1024;
+	}
+
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	vmw_print_capabilities(dev_priv->capabilities);
+
+	if (dev_priv->capabilities & SVGA_CAP_GMR) {
+		DRM_INFO("Max GMR ids is %u\n",
+			 (unsigned)dev_priv->max_gmr_ids);
+		DRM_INFO("Max GMR descriptors is %u\n",
+			 (unsigned)dev_priv->max_gmr_descriptors);
+	}
+	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+		DRM_INFO("Max number of GMR pages is %u\n",
+			 (unsigned)dev_priv->max_gmr_pages);
+		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+			 (unsigned)dev_priv->memory_size / 1024);
+	}
+	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
+		 dev_priv->vram_start, dev_priv->vram_size / 1024);
+	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
+		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+
+	ret = vmw_ttm_global_init(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_err0;
+
+
+	vmw_master_init(&dev_priv->fbdev_master);
+	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+	dev_priv->active_master = &dev_priv->fbdev_master;
+
+
+	ret = ttm_bo_device_init(&dev_priv->bdev,
+				 dev_priv->bo_global_ref.ref.object,
+				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
+				 false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+		goto out_err1;
+	}
+
+	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+			     (dev_priv->vram_size >> PAGE_SHIFT));
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+		goto out_err2;
+	}
+
+	dev_priv->has_gmr = true;
+	if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
+			   dev_priv->max_gmr_ids) != 0) {
+		DRM_INFO("No GMR memory available. "
+			 "Graphics memory resources are very limited.\n");
+		dev_priv->has_gmr = false;
+	}
+
+	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
+					   dev_priv->mmio_size, DRM_MTRR_WC);
+
+	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
+					 dev_priv->mmio_size);
+
+	if (unlikely(dev_priv->mmio_virt == NULL)) {
+		ret = -ENOMEM;
+		DRM_ERROR("Failed mapping MMIO.\n");
+		goto out_err3;
+	}
+
+	/* Need mmio memory to check for fifo pitchlock cap. */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
+	    !vmw_fifo_have_pitchlock(dev_priv)) {
+		ret = -ENOSYS;
+		DRM_ERROR("Hardware has no pitchlock\n");
+		goto out_err4;
+	}
+
+	dev_priv->tdev = ttm_object_device_init
+	    (dev_priv->mem_global_ref.object, 12);
+
+	if (unlikely(dev_priv->tdev == NULL)) {
+		DRM_ERROR("Unable to initialize TTM object management.\n");
+		ret = -ENOMEM;
+		goto out_err4;
+	}
+
+	dev->dev_private = dev_priv;
+
+	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
+	dev_priv->stealth = (ret != 0);
+	if (dev_priv->stealth) {
+		/**
+		 * Request at least the mmio PCI resource.
+		 */
+
+		DRM_INFO("It appears like vesafb is loaded. "
+			 "Ignore above error if any.\n");
+		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
+			goto out_no_device;
+		}
+	}
+
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+		ret = drm_irq_install(dev);
+		if (ret != 0) {
+			DRM_ERROR("Failed installing irq: %d\n", ret);
+			goto out_no_irq;
+		}
+	}
+
+	dev_priv->fman = vmw_fence_manager_init(dev_priv);
+	if (unlikely(dev_priv->fman == NULL))
+		goto out_no_fman;
+
+	vmw_kms_save_vga(dev_priv);
+
+	/* Start kms and overlay systems, needs fifo. */
+	ret = vmw_kms_init(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_kms;
+	vmw_overlay_init(dev_priv);
+
+	if (dev_priv->enable_fb) {
+		ret = vmw_3d_resource_inc(dev_priv, true);
+		if (unlikely(ret != 0))
+			goto out_no_fifo;
+		vmw_fb_init(dev_priv);
+	}
+
+	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
+	register_pm_notifier(&dev_priv->pm_nb);
+
+	return 0;
+
+out_no_fifo:
+	vmw_overlay_close(dev_priv);
+	vmw_kms_close(dev_priv);
+out_no_kms:
+	vmw_kms_restore_vga(dev_priv);
+	vmw_fence_manager_takedown(dev_priv->fman);
+out_no_fman:
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
+	if (dev_priv->stealth)
+		pci_release_region(dev->pdev, 2);
+	else
+		pci_release_regions(dev->pdev);
+out_no_device:
+	ttm_object_device_release(&dev_priv->tdev);
+out_err4:
+	iounmap(dev_priv->mmio_virt);
+out_err3:
+	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+		     dev_priv->mmio_size, DRM_MTRR_WC);
+	if (dev_priv->has_gmr)
+		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_err2:
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+out_err1:
+	vmw_ttm_global_release(dev_priv);
+out_err0:
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
+	kfree(dev_priv);
+	return ret;
+}
+
+static int vmw_driver_unload(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	enum vmw_res_type i;
+
+	unregister_pm_notifier(&dev_priv->pm_nb);
+
+	if (dev_priv->ctx.res_ht_initialized)
+		drm_ht_remove(&dev_priv->ctx.res_ht);
+	if (dev_priv->ctx.cmd_bounce)
+		vfree(dev_priv->ctx.cmd_bounce);
+	if (dev_priv->enable_fb) {
+		vmw_fb_close(dev_priv);
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, false);
+	}
+	vmw_kms_close(dev_priv);
+	vmw_overlay_close(dev_priv);
+	vmw_fence_manager_takedown(dev_priv->fman);
+	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+		drm_irq_uninstall(dev_priv->dev);
+	if (dev_priv->stealth)
+		pci_release_region(dev->pdev, 2);
+	else
+		pci_release_regions(dev->pdev);
+
+	ttm_object_device_release(&dev_priv->tdev);
+	iounmap(dev_priv->mmio_virt);
+	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+		     dev_priv->mmio_size, DRM_MTRR_WC);
+	if (dev_priv->has_gmr)
+		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
+	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+	(void)ttm_bo_device_release(&dev_priv->bdev);
+	vmw_ttm_global_release(dev_priv);
+
+	for (i = vmw_res_context; i < vmw_res_max; ++i)
+		idr_destroy(&dev_priv->res_idr[i]);
+
+	kfree(dev_priv);
+
+	return 0;
+}
+
+static void vmw_preclose(struct drm_device *dev,
+			 struct drm_file *file_priv)
+{
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
+}
+
+static void vmw_postclose(struct drm_device *dev,
+			 struct drm_file *file_priv)
+{
+	struct vmw_fpriv *vmw_fp;
+
+	vmw_fp = vmw_fpriv(file_priv);
+
+	if (vmw_fp->locked_master) {
+		struct vmw_master *vmaster =
+			vmw_master(vmw_fp->locked_master);
+
+		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+		ttm_vt_unlock(&vmaster->lock);
+		drm_master_put(&vmw_fp->locked_master);
+	}
+
+	ttm_object_file_release(&vmw_fp->tfile);
+	kfree(vmw_fp);
+}
+
+static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_fpriv *vmw_fp;
+	int ret = -ENOMEM;
+
+	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
+	if (unlikely(vmw_fp == NULL))
+		return ret;
+
+	INIT_LIST_HEAD(&vmw_fp->fence_events);
+	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+	if (unlikely(vmw_fp->tfile == NULL))
+		goto out_no_tfile;
+
+	file_priv->driver_priv = vmw_fp;
+	dev_priv->bdev.dev_mapping = dev->dev_mapping;
+
+	return 0;
+
+out_no_tfile:
+	kfree(vmw_fp);
+	return ret;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+			       unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+
+	/*
+	 * Do extra checking on driver private ioctls.
+	 */
+
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+		struct drm_ioctl_desc *ioctl =
+		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
+
+		if (unlikely(ioctl->cmd_drv != cmd)) {
+			DRM_ERROR("Invalid command format, ioctl %d\n",
+				  nr - DRM_COMMAND_BASE);
+			return -EINVAL;
+		}
+	}
+
+	return drm_ioctl(filp, cmd, arg);
+}
+
+static int vmw_firstopen(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	dev_priv->is_opened = true;
+
+	return 0;
+}
+
+static void vmw_lastclose(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_crtc *crtc;
+	struct drm_mode_set set;
+	int ret;
+
+	/**
+	 * Do nothing on the lastclose call from drm_unload.
+	 */
+
+	if (!dev_priv->is_opened)
+		return;
+
+	dev_priv->is_opened = false;
+	set.x = 0;
+	set.y = 0;
+	set.fb = NULL;
+	set.mode = NULL;
+	set.connectors = NULL;
+	set.num_connectors = 0;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		set.crtc = crtc;
+		ret = drm_mode_set_config_internal(&set);
+		WARN_ON(ret != 0);
+	}
+
+}
+
+static void vmw_master_init(struct vmw_master *vmaster)
+{
+	ttm_lock_init(&vmaster->lock);
+	INIT_LIST_HEAD(&vmaster->fb_surf);
+	mutex_init(&vmaster->fb_surf_mutex);
+}
+
+static int vmw_master_create(struct drm_device *dev,
+			     struct drm_master *master)
+{
+	struct vmw_master *vmaster;
+
+	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
+	if (unlikely(vmaster == NULL))
+		return -ENOMEM;
+
+	vmw_master_init(vmaster);
+	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+	master->driver_priv = vmaster;
+
+	return 0;
+}
+
+static void vmw_master_destroy(struct drm_device *dev,
+			       struct drm_master *master)
+{
+	struct vmw_master *vmaster = vmw_master(master);
+
+	master->driver_priv = NULL;
+	kfree(vmaster);
+}
+
+
+static int vmw_master_set(struct drm_device *dev,
+			  struct drm_file *file_priv,
+			  bool from_open)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct vmw_master *active = dev_priv->active_master;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret = 0;
+
+	if (!dev_priv->enable_fb) {
+		ret = vmw_3d_resource_inc(dev_priv, true);
+		if (unlikely(ret != 0))
+			return ret;
+		vmw_kms_save_vga(dev_priv);
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+		mutex_unlock(&dev_priv->hw_mutex);
+	}
+
+	if (active) {
+		BUG_ON(active != &dev_priv->fbdev_master);
+		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
+		if (unlikely(ret != 0))
+			goto out_no_active_lock;
+
+		ttm_lock_set_kill(&active->lock, true, SIGTERM);
+		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Unable to clean VRAM on "
+				  "master drop.\n");
+		}
+
+		dev_priv->active_master = NULL;
+	}
+
+	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+	if (!from_open) {
+		ttm_vt_unlock(&vmaster->lock);
+		BUG_ON(vmw_fp->locked_master != file_priv->master);
+		drm_master_put(&vmw_fp->locked_master);
+	}
+
+	dev_priv->active_master = vmaster;
+
+	return 0;
+
+out_no_active_lock:
+	if (!dev_priv->enable_fb) {
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, true);
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+		mutex_unlock(&dev_priv->hw_mutex);
+	}
+	return ret;
+}
+
+static void vmw_master_drop(struct drm_device *dev,
+			    struct drm_file *file_priv,
+			    bool from_release)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	/**
+	 * Make sure the master doesn't disappear while we have
+	 * it locked.
+	 */
+
+	vmw_fp->locked_master = drm_master_get(file_priv->master);
+	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+	if (unlikely((ret != 0))) {
+		DRM_ERROR("Unable to lock TTM at VT switch.\n");
+		drm_master_put(&vmw_fp->locked_master);
+	}
+
+	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+	vmw_execbuf_release_pinned_bo(dev_priv);
+
+	if (!dev_priv->enable_fb) {
+		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+		if (unlikely(ret != 0))
+			DRM_ERROR("Unable to clean VRAM on master drop.\n");
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, true);
+		mutex_lock(&dev_priv->hw_mutex);
+		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+		mutex_unlock(&dev_priv->hw_mutex);
+	}
+
+	dev_priv->active_master = &dev_priv->fbdev_master;
+	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
+
+	if (dev_priv->enable_fb)
+		vmw_fb_on(dev_priv);
+}
+
+
+static void vmw_remove(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+
+	drm_put_dev(dev);
+}
+
+static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
+			      void *ptr)
+{
+	struct vmw_private *dev_priv =
+		container_of(nb, struct vmw_private, pm_nb);
+	struct vmw_master *vmaster = dev_priv->active_master;
+
+	switch (val) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		ttm_suspend_lock(&vmaster->lock);
+
+		/**
+		 * This empties VRAM and unbinds all GMR bindings.
+		 * Buffer contents is moved to swappable memory.
+		 */
+		vmw_execbuf_release_pinned_bo(dev_priv);
+		vmw_resource_evict_all(dev_priv);
+		ttm_bo_swapout_all(&dev_priv->bdev);
+
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		ttm_suspend_unlock(&vmaster->lock);
+
+		break;
+	case PM_RESTORE_PREPARE:
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/**
+ * These might not be needed with the virtual SVGA device.
+ */
+
+static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	if (dev_priv->num_3d_resources != 0) {
+		DRM_INFO("Can't suspend or hibernate "
+			 "while 3D resources are active.\n");
+		return -EBUSY;
+	}
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D3hot);
+	return 0;
+}
+
+static int vmw_pci_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	return pci_enable_device(pdev);
+}
+
+static int vmw_pm_suspend(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct pm_message dummy;
+
+	dummy.event = 0;
+
+	return vmw_pci_suspend(pdev, dummy);
+}
+
+static int vmw_pm_resume(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+
+	return vmw_pci_resume(pdev);
+}
+
+static int vmw_pm_prepare(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	/**
+	 * Release 3d reference held by fbdev and potentially
+	 * stop fifo.
+	 */
+	dev_priv->suspended = true;
+	if (dev_priv->enable_fb)
+			vmw_3d_resource_dec(dev_priv, true);
+
+	if (dev_priv->num_3d_resources != 0) {
+
+		DRM_INFO("Can't suspend or hibernate "
+			 "while 3D resources are active.\n");
+
+		if (dev_priv->enable_fb)
+			vmw_3d_resource_inc(dev_priv, true);
+		dev_priv->suspended = false;
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void vmw_pm_complete(struct device *kdev)
+{
+	struct pci_dev *pdev = to_pci_dev(kdev);
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	mutex_lock(&dev_priv->hw_mutex);
+	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+	(void) vmw_read(dev_priv, SVGA_REG_ID);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	/**
+	 * Reclaim 3d reference held by fbdev and potentially
+	 * start fifo.
+	 */
+	if (dev_priv->enable_fb)
+			vmw_3d_resource_inc(dev_priv, false);
+
+	dev_priv->suspended = false;
+}
+
+static const struct dev_pm_ops vmw_pm_ops = {
+	.prepare = vmw_pm_prepare,
+	.complete = vmw_pm_complete,
+	.suspend = vmw_pm_suspend,
+	.resume = vmw_pm_resume,
+};
+
+static const struct file_operations vmwgfx_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = vmw_unlocked_ioctl,
+	.mmap = vmw_mmap,
+	.poll = vmw_fops_poll,
+	.read = vmw_fops_read,
+	.fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+	DRIVER_MODESET,
+	.load = vmw_driver_load,
+	.unload = vmw_driver_unload,
+	.firstopen = vmw_firstopen,
+	.lastclose = vmw_lastclose,
+	.irq_preinstall = vmw_irq_preinstall,
+	.irq_postinstall = vmw_irq_postinstall,
+	.irq_uninstall = vmw_irq_uninstall,
+	.irq_handler = vmw_irq_handler,
+	.get_vblank_counter = vmw_get_vblank_counter,
+	.enable_vblank = vmw_enable_vblank,
+	.disable_vblank = vmw_disable_vblank,
+	.ioctls = vmw_ioctls,
+	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
+	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
+	.master_create = vmw_master_create,
+	.master_destroy = vmw_master_destroy,
+	.master_set = vmw_master_set,
+	.master_drop = vmw_master_drop,
+	.open = vmw_driver_open,
+	.preclose = vmw_preclose,
+	.postclose = vmw_postclose,
+
+	.dumb_create = vmw_dumb_create,
+	.dumb_map_offset = vmw_dumb_map_offset,
+	.dumb_destroy = vmw_dumb_destroy,
+
+	.fops = &vmwgfx_driver_fops,
+	.name = VMWGFX_DRIVER_NAME,
+	.desc = VMWGFX_DRIVER_DESC,
+	.date = VMWGFX_DRIVER_DATE,
+	.major = VMWGFX_DRIVER_MAJOR,
+	.minor = VMWGFX_DRIVER_MINOR,
+	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver vmw_pci_driver = {
+	.name = VMWGFX_DRIVER_NAME,
+	.id_table = vmw_pci_id_list,
+	.probe = vmw_probe,
+	.remove = vmw_remove,
+	.driver = {
+		.pm = &vmw_pm_ops
+	}
+};
+
+static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init vmwgfx_init(void)
+{
+	int ret;
+	ret = drm_pci_init(&driver, &vmw_pci_driver);
+	if (ret)
+		DRM_ERROR("Failed initializing DRM.\n");
+	return ret;
+}
+
+static void __exit vmwgfx_exit(void)
+{
+	drm_pci_exit(&driver, &vmw_pci_driver);
+}
+
+module_init(vmwgfx_init);
+module_exit(vmwgfx_exit);
+
+MODULE_AUTHOR("VMware Inc. and others");
+MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
+	       __stringify(VMWGFX_DRIVER_MINOR) "."
+	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
+	       "0");
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 0000000..13aeda7
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,768 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_DRV_H_
+#define _VMWGFX_DRV_H_
+
+#include "vmwgfx_reg.h"
+#include <drm/drmP.h>
+#include <drm/vmwgfx_drm.h>
+#include <drm/drm_hashtab.h>
+#include <linux/suspend.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_object.h>
+#include <drm/ttm/ttm_lock.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_module.h>
+#include "vmwgfx_fence.h"
+
+#define VMWGFX_DRIVER_DATE "20120209"
+#define VMWGFX_DRIVER_MAJOR 2
+#define VMWGFX_DRIVER_MINOR 4
+#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+#define VMWGFX_MAX_RELOCATIONS 2048
+#define VMWGFX_MAX_VALIDATIONS 2048
+#define VMWGFX_MAX_DISPLAYS 16
+#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
+
+#define VMW_PL_GMR TTM_PL_PRIV0
+#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
+
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+#define VMW_RES_FENCE ttm_driver_type3
+
+struct vmw_fpriv {
+	struct drm_master *locked_master;
+	struct ttm_object_file *tfile;
+	struct list_head fence_events;
+};
+
+struct vmw_dma_buffer {
+	struct ttm_buffer_object base;
+	struct list_head res_list;
+};
+
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+	struct ttm_validate_buffer base;
+	struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
+struct vmw_resource {
+	struct kref kref;
+	struct vmw_private *dev_priv;
+	int id;
+	bool avail;
+	unsigned long backup_size;
+	bool res_dirty; /* Protected by backup buffer reserved */
+	bool backup_dirty; /* Protected by backup buffer reserved */
+	struct vmw_dma_buffer *backup;
+	unsigned long backup_offset;
+	const struct vmw_res_func *func;
+	struct list_head lru_head; /* Protected by the resource lock */
+	struct list_head mob_head; /* Protected by @backup reserved */
+	void (*res_free) (struct vmw_resource *res);
+	void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+	vmw_res_context,
+	vmw_res_surface,
+	vmw_res_stream,
+	vmw_res_max
+};
+
+struct vmw_cursor_snooper {
+	struct drm_crtc *crtc;
+	size_t age;
+	uint32_t *image;
+};
+
+struct vmw_framebuffer;
+struct vmw_surface_offset;
+
+struct vmw_surface {
+	struct vmw_resource res;
+	uint32_t flags;
+	uint32_t format;
+	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+	struct drm_vmw_size base_size;
+	struct drm_vmw_size *sizes;
+	uint32_t num_sizes;
+	bool scanout;
+	/* TODO so far just a extra pointer */
+	struct vmw_cursor_snooper snooper;
+	struct vmw_surface_offset *offsets;
+	SVGA3dTextureFilter autogen_filter;
+	uint32_t multisample_count;
+};
+
+struct vmw_marker_queue {
+	struct list_head head;
+	struct timespec lag;
+	struct timespec lag_time;
+	spinlock_t lock;
+};
+
+struct vmw_fifo_state {
+	unsigned long reserved_size;
+	__le32 *dynamic_buffer;
+	__le32 *static_buffer;
+	unsigned long static_buffer_size;
+	bool using_bounce_buffer;
+	uint32_t capabilities;
+	struct mutex fifo_mutex;
+	struct rw_semaphore rwsem;
+	struct vmw_marker_queue marker_queue;
+};
+
+struct vmw_relocation {
+	SVGAGuestPtr *location;
+	uint32_t index;
+};
+
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+	bool valid;
+	uint32_t handle;
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+};
+
+struct vmw_sw_context{
+	struct drm_open_hash res_ht;
+	bool res_ht_initialized;
+	bool kernel; /**< is the called made from the kernel */
+	struct ttm_object_file *tfile;
+	struct list_head validate_nodes;
+	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
+	uint32_t cur_reloc;
+	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+	uint32_t cur_val_buf;
+	uint32_t *cmd_bounce;
+	uint32_t cmd_bounce_size;
+	struct list_head resource_list;
+	uint32_t fence_flags;
+	struct ttm_buffer_object *cur_query_bo;
+	struct list_head res_relocations;
+	uint32_t *buf_start;
+	struct vmw_res_cache_entry res_cache[vmw_res_max];
+	struct vmw_resource *last_query_ctx;
+	bool needs_post_query_barrier;
+	struct vmw_resource *error_resource;
+};
+
+struct vmw_legacy_display;
+struct vmw_overlay;
+
+struct vmw_master {
+	struct ttm_lock lock;
+	struct mutex fb_surf_mutex;
+	struct list_head fb_surf;
+};
+
+struct vmw_vga_topology_state {
+	uint32_t width;
+	uint32_t height;
+	uint32_t primary;
+	uint32_t pos_x;
+	uint32_t pos_y;
+};
+
+struct vmw_private {
+	struct ttm_bo_device bdev;
+	struct ttm_bo_global_ref bo_global_ref;
+	struct drm_global_reference mem_global_ref;
+
+	struct vmw_fifo_state fifo;
+
+	struct drm_device *dev;
+	unsigned long vmw_chipset;
+	unsigned int io_start;
+	uint32_t vram_start;
+	uint32_t vram_size;
+	uint32_t mmio_start;
+	uint32_t mmio_size;
+	uint32_t fb_max_width;
+	uint32_t fb_max_height;
+	uint32_t initial_width;
+	uint32_t initial_height;
+	__le32 __iomem *mmio_virt;
+	int mmio_mtrr;
+	uint32_t capabilities;
+	uint32_t max_gmr_descriptors;
+	uint32_t max_gmr_ids;
+	uint32_t max_gmr_pages;
+	uint32_t memory_size;
+	bool has_gmr;
+	struct mutex hw_mutex;
+
+	/*
+	 * VGA registers.
+	 */
+
+	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
+	uint32_t vga_width;
+	uint32_t vga_height;
+	uint32_t vga_bpp;
+	uint32_t vga_bpl;
+	uint32_t vga_pitchlock;
+
+	uint32_t num_displays;
+
+	/*
+	 * Framebuffer info.
+	 */
+
+	void *fb_info;
+	struct vmw_legacy_display *ldu_priv;
+	struct vmw_screen_object_display *sou_priv;
+	struct vmw_overlay *overlay_priv;
+
+	/*
+	 * Context and surface management.
+	 */
+
+	rwlock_t resource_lock;
+	struct idr res_idr[vmw_res_max];
+	/*
+	 * Block lastclose from racing with firstopen.
+	 */
+
+	struct mutex init_mutex;
+
+	/*
+	 * A resource manager for kernel-only surfaces and
+	 * contexts.
+	 */
+
+	struct ttm_object_device *tdev;
+
+	/*
+	 * Fencing and IRQs.
+	 */
+
+	atomic_t marker_seq;
+	wait_queue_head_t fence_queue;
+	wait_queue_head_t fifo_queue;
+	int fence_queue_waiters; /* Protected by hw_mutex */
+	int goal_queue_waiters; /* Protected by hw_mutex */
+	atomic_t fifo_queue_waiters;
+	uint32_t last_read_seqno;
+	spinlock_t irq_lock;
+	struct vmw_fence_manager *fman;
+	uint32_t irq_mask;
+
+	/*
+	 * Device state
+	 */
+
+	uint32_t traces_state;
+	uint32_t enable_state;
+	uint32_t config_done_state;
+
+	/**
+	 * Execbuf
+	 */
+	/**
+	 * Protected by the cmdbuf mutex.
+	 */
+
+	struct vmw_sw_context ctx;
+	struct mutex cmdbuf_mutex;
+
+	/**
+	 * Operating mode.
+	 */
+
+	bool stealth;
+	bool is_opened;
+	bool enable_fb;
+
+	/**
+	 * Master management.
+	 */
+
+	struct vmw_master *active_master;
+	struct vmw_master fbdev_master;
+	struct notifier_block pm_nb;
+	bool suspended;
+
+	struct mutex release_mutex;
+	uint32_t num_3d_resources;
+
+	/*
+	 * Query processing. These members
+	 * are protected by the cmdbuf mutex.
+	 */
+
+	struct ttm_buffer_object *dummy_query_bo;
+	struct ttm_buffer_object *pinned_bo;
+	uint32_t query_cid;
+	uint32_t query_cid_valid;
+	bool dummy_query_bo_pinned;
+
+	/*
+	 * Surface swapping. The "surface_lru" list is protected by the
+	 * resource lock in order to be able to destroy a surface and take
+	 * it off the lru atomically. "used_memory_size" is currently
+	 * protected by the cmdbuf mutex for simplicity.
+	 */
+
+	struct list_head res_lru[vmw_res_max];
+	uint32_t used_memory_size;
+};
+
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+	return container_of(res, struct vmw_surface, res);
+}
+
+static inline struct vmw_private *vmw_priv(struct drm_device *dev)
+{
+	return (struct vmw_private *)dev->dev_private;
+}
+
+static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
+{
+	return (struct vmw_fpriv *)file_priv->driver_priv;
+}
+
+static inline struct vmw_master *vmw_master(struct drm_master *master)
+{
+	return (struct vmw_master *) master->driver_priv;
+}
+
+static inline void vmw_write(struct vmw_private *dev_priv,
+			     unsigned int offset, uint32_t value)
+{
+	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+}
+
+static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+				unsigned int offset)
+{
+	uint32_t val;
+
+	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+	return val;
+}
+
+int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
+void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
+
+/**
+ * GMR utilities - vmwgfx_gmr.c
+ */
+
+extern int vmw_gmr_bind(struct vmw_private *dev_priv,
+			struct page *pages[],
+			unsigned long num_pages,
+			int gmr_id);
+extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+extern void vmw_resource_unreference(struct vmw_resource **p_res);
+extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_context_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     int id,
+			     struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+				  struct ttm_object_file *tfile,
+				  uint32_t handle,
+				  struct vmw_surface **out_surf,
+				  struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+	struct vmw_private *dev_priv,
+	struct ttm_object_file *tfile,
+	uint32_t handle,
+	const struct vmw_user_resource_conv *converter,
+	struct vmw_resource **p_res);
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				       struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+			     struct ttm_object_file *tfile,
+			     uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+				struct vmw_surface *srf);
+extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
+			   struct vmw_dma_buffer *vmw_bo,
+			   size_t size, struct ttm_placement *placement,
+			   bool interuptable,
+			   void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+				  struct ttm_object_file *tfile);
+extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+					 uint32_t cur_validate_node);
+extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
+extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+				  uint32_t id, struct vmw_dma_buffer **out);
+extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv);
+extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+				  struct ttm_object_file *tfile,
+				  uint32_t *inout_id,
+				  struct vmw_resource **out);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+				   struct vmw_dma_buffer *new_backup,
+				   unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+				     struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+				struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
+
+/**
+ * DMA buffer helper routines - vmwgfx_dmabuf.c
+ */
+extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
+				   struct vmw_dma_buffer *bo,
+				   struct ttm_placement *placement,
+				   bool interruptible);
+extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+			      struct vmw_dma_buffer *buf,
+			      bool pin, bool interruptible);
+extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+				     struct vmw_dma_buffer *buf,
+				     bool pin, bool interruptible);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+				       struct vmw_dma_buffer *bo,
+				       bool pin, bool interruptible);
+extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
+			    struct vmw_dma_buffer *bo,
+			    bool interruptible);
+extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+				 SVGAGuestPtr *ptr);
+extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
+
+/**
+ * Misc Ioctl functionality - vmwgfx_ioctl.c
+ */
+
+extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int vmw_present_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
+extern unsigned int vmw_fops_poll(struct file *filp,
+				  struct poll_table_struct *wait);
+extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+			     size_t count, loff_t *offset);
+
+/**
+ * Fifo utilities - vmwgfx_fifo.c
+ */
+
+extern int vmw_fifo_init(struct vmw_private *dev_priv,
+			 struct vmw_fifo_state *fifo);
+extern void vmw_fifo_release(struct vmw_private *dev_priv,
+			     struct vmw_fifo_state *fifo);
+extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
+			       uint32_t *seqno);
+extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
+extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
+extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+				     uint32_t cid);
+
+/**
+ * TTM glue - vmwgfx_ttm_glue.c
+ */
+
+extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
+extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
+extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * TTM buffer object driver - vmwgfx_buffer.c
+ */
+
+extern struct ttm_placement vmw_vram_placement;
+extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_vram_sys_placement;
+extern struct ttm_placement vmw_vram_gmr_placement;
+extern struct ttm_placement vmw_vram_gmr_ne_placement;
+extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_evictable_placement;
+extern struct ttm_placement vmw_srf_placement;
+extern struct ttm_bo_driver vmw_bo_driver;
+extern int vmw_dma_quiescent(struct drm_device *dev);
+
+/**
+ * Command submission - vmwgfx_execbuf.c
+ */
+
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int vmw_execbuf_process(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       void __user *user_commands,
+			       void *kernel_commands,
+			       uint32_t command_size,
+			       uint64_t throttle_us,
+			       struct drm_vmw_fence_rep __user
+			       *user_fence_rep,
+			       struct vmw_fence_obj **out_fence);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+					    struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
+
+extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+				      struct vmw_private *dev_priv,
+				      struct vmw_fence_obj **p_fence,
+				      uint32_t *p_handle);
+extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+					struct vmw_fpriv *vmw_fp,
+					int ret,
+					struct drm_vmw_fence_rep __user
+					*user_fence_rep,
+					struct vmw_fence_obj *fence,
+					uint32_t fence_handle);
+
+/**
+ * IRQs and wating - vmwgfx_irq.c
+ */
+
+extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
+			     uint32_t seqno, bool interruptible,
+			     unsigned long timeout);
+extern void vmw_irq_preinstall(struct drm_device *dev);
+extern int vmw_irq_postinstall(struct drm_device *dev);
+extern void vmw_irq_uninstall(struct drm_device *dev);
+extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
+				uint32_t seqno);
+extern int vmw_fallback_wait(struct vmw_private *dev_priv,
+			     bool lazy,
+			     bool fifo_idle,
+			     uint32_t seqno,
+			     bool interruptible,
+			     unsigned long timeout);
+extern void vmw_update_seqno(struct vmw_private *dev_priv,
+				struct vmw_fifo_state *fifo_state);
+extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
+extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+
+/**
+ * Rudimentary fence-like objects currently used only for throttling -
+ * vmwgfx_marker.c
+ */
+
+extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
+extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
+extern int vmw_marker_push(struct vmw_marker_queue *queue,
+			  uint32_t seqno);
+extern int vmw_marker_pull(struct vmw_marker_queue *queue,
+			  uint32_t signaled_seqno);
+extern int vmw_wait_lag(struct vmw_private *dev_priv,
+			struct vmw_marker_queue *queue, uint32_t us);
+
+/**
+ * Kernel framebuffer - vmwgfx_fb.c
+ */
+
+int vmw_fb_init(struct vmw_private *vmw_priv);
+int vmw_fb_close(struct vmw_private *dev_priv);
+int vmw_fb_off(struct vmw_private *vmw_priv);
+int vmw_fb_on(struct vmw_private *vmw_priv);
+
+/**
+ * Kernel modesetting - vmwgfx_kms.c
+ */
+
+int vmw_kms_init(struct vmw_private *dev_priv);
+int vmw_kms_close(struct vmw_private *dev_priv);
+int vmw_kms_save_vga(struct vmw_private *vmw_priv);
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+			  struct ttm_object_file *tfile,
+			  struct ttm_buffer_object *bo,
+			  SVGA3dCmdHeader *header);
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+		       unsigned width, unsigned height, unsigned pitch,
+		       unsigned bpp, unsigned depth);
+void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+				uint32_t pitch,
+				uint32_t height);
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
+int vmw_enable_vblank(struct drm_device *dev, int crtc);
+void vmw_disable_vblank(struct drm_device *dev, int crtc);
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid, int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *clips,
+		     uint32_t num_clips);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+
+int vmw_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args);
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+			struct drm_device *dev, uint32_t handle,
+			uint64_t *offset);
+int vmw_dumb_destroy(struct drm_file *file_priv,
+		     struct drm_device *dev,
+		     uint32_t handle);
+/**
+ * Overlay control - vmwgfx_overlay.c
+ */
+
+int vmw_overlay_init(struct vmw_private *dev_priv);
+int vmw_overlay_close(struct vmw_private *dev_priv);
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
+int vmw_overlay_stop_all(struct vmw_private *dev_priv);
+int vmw_overlay_resume_all(struct vmw_private *dev_priv);
+int vmw_overlay_pause_all(struct vmw_private *dev_priv);
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
+
+/**
+ * GMR Id manager
+ */
+
+extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
+
+/**
+ * Inline helper functions
+ */
+
+static inline void vmw_surface_unreference(struct vmw_surface **srf)
+{
+	struct vmw_surface *tmp_srf = *srf;
+	struct vmw_resource *res = &tmp_srf->res;
+	*srf = NULL;
+
+	vmw_resource_unreference(&res);
+}
+
+static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
+{
+	(void) vmw_resource_reference(&srf->res);
+	return srf;
+}
+
+static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+{
+	struct vmw_dma_buffer *tmp_buf = *buf;
+
+	*buf = NULL;
+	if (tmp_buf != NULL) {
+		struct ttm_buffer_object *bo = &tmp_buf->base;
+
+		ttm_bo_unref(&bo);
+	}
+}
+
+static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+{
+	if (ttm_bo_reference(&buf->base))
+		return buf;
+	return NULL;
+}
+
+static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
+{
+	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+}
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 0000000..da068bd
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,1795 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_reg.h"
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_placement.h>
+
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+	struct list_head head;
+	const struct vmw_resource *res;
+	unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+	struct list_head head;
+	struct drm_hash_item hash;
+	struct vmw_resource *res;
+	struct vmw_dma_buffer *new_backup;
+	unsigned long new_backup_offset;
+	bool first_usage;
+	bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+					bool backoff)
+{
+	struct vmw_resource_val_node *val;
+
+	list_for_each_entry(val, list, head) {
+		struct vmw_resource *res = val->res;
+		struct vmw_dma_buffer *new_backup =
+			backoff ? NULL : val->new_backup;
+
+		vmw_resource_unreserve(res, new_backup,
+			val->new_backup_offset);
+		vmw_dmabuf_unreference(&val->new_backup);
+	}
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+				struct vmw_resource *res,
+				struct vmw_resource_val_node **p_node)
+{
+	struct vmw_resource_val_node *node;
+	struct drm_hash_item *hash;
+	int ret;
+
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+				    &hash) == 0)) {
+		node = container_of(hash, struct vmw_resource_val_node, hash);
+		node->first_usage = false;
+		if (unlikely(p_node != NULL))
+			*p_node = node;
+		return 0;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (unlikely(node == NULL)) {
+		DRM_ERROR("Failed to allocate a resource validation "
+			  "entry.\n");
+		return -ENOMEM;
+	}
+
+	node->hash.key = (unsigned long) res;
+	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to initialize a resource validation "
+			  "entry.\n");
+		kfree(node);
+		return ret;
+	}
+	list_add_tail(&node->head, &sw_context->resource_list);
+	node->res = vmw_resource_reference(res);
+	node->first_usage = true;
+
+	if (unlikely(p_node != NULL))
+		*p_node = node;
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+				       const struct vmw_resource *res,
+				       unsigned long offset)
+{
+	struct vmw_resource_relocation *rel;
+
+	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+	if (unlikely(rel == NULL)) {
+		DRM_ERROR("Failed to allocate a resource relocation.\n");
+		return -ENOMEM;
+	}
+
+	rel->res = res;
+	rel->offset = offset;
+	list_add_tail(&rel->head, list);
+
+	return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+	struct vmw_resource_relocation *rel, *n;
+
+	list_for_each_entry_safe(rel, n, list, head) {
+		list_del(&rel->head);
+		kfree(rel);
+	}
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+					   struct list_head *list)
+{
+	struct vmw_resource_relocation *rel;
+
+	list_for_each_entry(rel, list, head)
+		cb[rel->offset] = rel->res->id;
+}
+
+static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+			   struct vmw_sw_context *sw_context,
+			   SVGA3dCmdHeader *header)
+{
+	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+}
+
+static int vmw_cmd_ok(struct vmw_private *dev_priv,
+		      struct vmw_sw_context *sw_context,
+		      SVGA3dCmdHeader *header)
+{
+	return 0;
+}
+
+/**
+ * vmw_bo_to_validate_list - add a bo to a validate list
+ *
+ * @sw_context: The software context used for this command submission batch.
+ * @bo: The buffer object to add.
+ * @p_val_node: If non-NULL Will be updated with the validate node number
+ * on return.
+ *
+ * Returns -EINVAL if the limit of number of buffer objects per command
+ * submission is reached.
+ */
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+				   struct ttm_buffer_object *bo,
+				   uint32_t *p_val_node)
+{
+	uint32_t val_node;
+	struct vmw_validate_buffer *vval_buf;
+	struct ttm_validate_buffer *val_buf;
+	struct drm_hash_item *hash;
+	int ret;
+
+	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+				    &hash) == 0)) {
+		vval_buf = container_of(hash, struct vmw_validate_buffer,
+					hash);
+		val_buf = &vval_buf->base;
+		val_node = vval_buf - sw_context->val_bufs;
+	} else {
+		val_node = sw_context->cur_val_buf;
+		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+			DRM_ERROR("Max number of DMA buffers per submission "
+				  "exceeded.\n");
+			return -EINVAL;
+		}
+		vval_buf = &sw_context->val_bufs[val_node];
+		vval_buf->hash.key = (unsigned long) bo;
+		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Failed to initialize a buffer validation "
+				  "entry.\n");
+			return ret;
+		}
+		++sw_context->cur_val_buf;
+		val_buf = &vval_buf->base;
+		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->reserved = false;
+		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+	}
+
+	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
+
+	if (p_val_node)
+		*p_val_node = val_node;
+
+	return 0;
+}
+
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
+
+		ret = vmw_resource_reserve(res, val->no_buffer_needed);
+		if (unlikely(ret != 0))
+			return ret;
+
+		if (res->backup) {
+			struct ttm_buffer_object *bo = &res->backup->base;
+
+			ret = vmw_bo_to_validate_list
+				(sw_context, bo, NULL);
+
+			if (unlikely(ret != 0))
+				return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		struct vmw_resource *res = val->res;
+
+		ret = vmw_resource_validate(res);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to validate resource.\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     enum vmw_res_type res_type,
+			     const struct vmw_user_resource_conv *converter,
+			     uint32_t *id,
+			     struct vmw_resource_val_node **p_val)
+{
+	struct vmw_res_cache_entry *rcache =
+		&sw_context->res_cache[res_type];
+	struct vmw_resource *res;
+	struct vmw_resource_val_node *node;
+	int ret;
+
+	if (*id == SVGA3D_INVALID_ID)
+		return 0;
+
+	/*
+	 * Fastpath in case of repeated commands referencing the same
+	 * resource
+	 */
+
+	if (likely(rcache->valid && *id == rcache->handle)) {
+		const struct vmw_resource *res = rcache->res;
+
+		rcache->node->first_usage = false;
+		if (p_val)
+			*p_val = rcache->node;
+
+		return vmw_resource_relocation_add
+			(&sw_context->res_relocations, res,
+			 id - sw_context->buf_start);
+	}
+
+	ret = vmw_user_resource_lookup_handle(dev_priv,
+					      sw_context->tfile,
+					      *id,
+					      converter,
+					      &res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or use resource 0x%08x.\n",
+			  (unsigned) *id);
+		dump_stack();
+		return ret;
+	}
+
+	rcache->valid = true;
+	rcache->res = res;
+	rcache->handle = *id;
+
+	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+					  res,
+					  id - sw_context->buf_start);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	ret = vmw_resource_val_add(sw_context, res, &node);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	rcache->node = node;
+	if (p_val)
+		*p_val = node;
+	vmw_resource_unreference(&res);
+	return 0;
+
+out_no_reloc:
+	BUG_ON(sw_context->error_resource != NULL);
+	sw_context->error_resource = res;
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_cid_cmd {
+		SVGA3dCmdHeader header;
+		__le32 cid;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_cid_cmd, header);
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->cid, NULL);
+}
+
+static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
+					   struct vmw_sw_context *sw_context,
+					   SVGA3dCmdHeader *header)
+{
+	struct vmw_sid_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetRenderTarget body;
+	} *cmd;
+	int ret;
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.target.sid, NULL);
+	return ret;
+}
+
+static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      SVGA3dCmdHeader *header)
+{
+	struct vmw_sid_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceCopy body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
+	if (unlikely(ret != 0))
+		return ret;
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
+}
+
+static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context,
+				     SVGA3dCmdHeader *header)
+{
+	struct vmw_sid_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceStretchBlt body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter,
+				&cmd->body.src.sid, NULL);
+	if (unlikely(ret != 0))
+		return ret;
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.dest.sid, NULL);
+}
+
+static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
+					 struct vmw_sw_context *sw_context,
+					 SVGA3dCmdHeader *header)
+{
+	struct vmw_sid_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBlitSurfaceToScreen body;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+		return -EPERM;
+	}
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter,
+				 &cmd->body.srcImage.sid, NULL);
+}
+
+static int vmw_cmd_present_check(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_sid_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdPresent body;
+	} *cmd;
+
+
+	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+		return -EPERM;
+	}
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				 user_surface_converter, &cmd->body.sid,
+				 NULL);
+}
+
+/**
+ * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
+ *
+ * @dev_priv: The device private structure.
+ * @new_query_bo: The new buffer holding query results.
+ * @sw_context: The software context used for this command submission.
+ *
+ * This function checks whether @new_query_bo is suitable for holding
+ * query results, and if another buffer currently is pinned for query
+ * results. If so, the function prepares the state of @sw_context for
+ * switching pinned buffers after successful submission of the current
+ * command batch.
+ */
+static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
+				       struct ttm_buffer_object *new_query_bo,
+				       struct vmw_sw_context *sw_context)
+{
+	struct vmw_res_cache_entry *ctx_entry =
+		&sw_context->res_cache[vmw_res_context];
+	int ret;
+
+	BUG_ON(!ctx_entry->valid);
+	sw_context->last_query_ctx = ctx_entry->res;
+
+	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
+
+		if (unlikely(new_query_bo->num_pages > 4)) {
+			DRM_ERROR("Query buffer too large.\n");
+			return -EINVAL;
+		}
+
+		if (unlikely(sw_context->cur_query_bo != NULL)) {
+			sw_context->needs_post_query_barrier = true;
+			ret = vmw_bo_to_validate_list(sw_context,
+						      sw_context->cur_query_bo,
+						      NULL);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+		sw_context->cur_query_bo = new_query_bo;
+
+		ret = vmw_bo_to_validate_list(sw_context,
+					      dev_priv->dummy_query_bo,
+					      NULL);
+		if (unlikely(ret != 0))
+			return ret;
+
+	}
+
+	return 0;
+}
+
+
+/**
+ * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
+ *
+ * @dev_priv: The device private structure.
+ * @sw_context: The software context used for this command submission batch.
+ *
+ * This function will check if we're switching query buffers, and will then,
+ * issue a dummy occlusion query wait used as a query barrier. When the fence
+ * object following that query wait has signaled, we are sure that all
+ * preceding queries have finished, and the old query buffer can be unpinned.
+ * However, since both the new query buffer and the old one are fenced with
+ * that fence, we can do an asynchronus unpin now, and be sure that the
+ * old query buffer won't be moved until the fence has signaled.
+ *
+ * As mentioned above, both the new - and old query buffers need to be fenced
+ * using a sequence emitted *after* calling this function.
+ */
+static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context)
+{
+	/*
+	 * The validate list should still hold references to all
+	 * contexts here.
+	 */
+
+	if (sw_context->needs_post_query_barrier) {
+		struct vmw_res_cache_entry *ctx_entry =
+			&sw_context->res_cache[vmw_res_context];
+		struct vmw_resource *ctx;
+		int ret;
+
+		BUG_ON(!ctx_entry->valid);
+		ctx = ctx_entry->res;
+
+		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+
+		if (unlikely(ret != 0))
+			DRM_ERROR("Out of fifo space for dummy query.\n");
+	}
+
+	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
+		if (dev_priv->pinned_bo) {
+			vmw_bo_pin(dev_priv->pinned_bo, false);
+			ttm_bo_unref(&dev_priv->pinned_bo);
+		}
+
+		if (!sw_context->needs_post_query_barrier) {
+			vmw_bo_pin(sw_context->cur_query_bo, true);
+
+			/*
+			 * We pin also the dummy_query_bo buffer so that we
+			 * don't need to validate it when emitting
+			 * dummy queries in context destroy paths.
+			 */
+
+			vmw_bo_pin(dev_priv->dummy_query_bo, true);
+			dev_priv->dummy_query_bo_pinned = true;
+
+			BUG_ON(sw_context->last_query_ctx == NULL);
+			dev_priv->query_cid = sw_context->last_query_ctx->id;
+			dev_priv->query_cid_valid = true;
+			dev_priv->pinned_bo =
+				ttm_bo_reference(sw_context->cur_query_bo);
+		}
+	}
+}
+
+/**
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
+ *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
+ */
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+				   struct vmw_sw_context *sw_context,
+				   SVGAGuestPtr *ptr,
+				   struct vmw_dma_buffer **vmw_bo_p)
+{
+	struct vmw_dma_buffer *vmw_bo = NULL;
+	struct ttm_buffer_object *bo;
+	uint32_t handle = ptr->gmrId;
+	struct vmw_relocation *reloc;
+	int ret;
+
+	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not find or use GMR region.\n");
+		return -EINVAL;
+	}
+	bo = &vmw_bo->base;
+
+	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+		DRM_ERROR("Max number relocations per submission"
+			  " exceeded\n");
+		ret = -EINVAL;
+		goto out_no_reloc;
+	}
+
+	reloc = &sw_context->relocs[sw_context->cur_reloc++];
+	reloc->location = ptr;
+
+	ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
+	if (unlikely(ret != 0))
+		goto out_no_reloc;
+
+	*vmw_bo_p = vmw_bo;
+	return 0;
+
+out_no_reloc:
+	vmw_dmabuf_unreference(&vmw_bo);
+	vmw_bo_p = NULL;
+	return ret;
+}
+
+/**
+ * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+			       struct vmw_sw_context *sw_context,
+			       SVGA3dCmdHeader *header)
+{
+	struct vmw_begin_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBeginQuery q;
+	} *cmd;
+
+	cmd = container_of(header, struct vmw_begin_query_cmd,
+			   header);
+
+	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				 user_context_converter, &cmd->q.cid,
+				 NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdEndQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->q.guestResult,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return ret;
+}
+
+/*
+ * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct vmw_query_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForQuery q;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_query_cmd, header);
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->q.guestResult,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+	return 0;
+}
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+		       struct vmw_sw_context *sw_context,
+		       SVGA3dCmdHeader *header)
+{
+	struct vmw_dma_buffer *vmw_bo = NULL;
+	struct vmw_surface *srf = NULL;
+	struct vmw_dma_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceDMA dma;
+	} *cmd;
+	int ret;
+	SVGA3dCmdSurfaceDMASuffix *suffix;
+	uint32_t bo_size;
+
+	cmd = container_of(header, struct vmw_dma_cmd, header);
+	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+					       header->size - sizeof(*suffix));
+
+	/* Make sure device and verifier stays in sync. */
+	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+		DRM_ERROR("Invalid DMA suffix size.\n");
+		return -EINVAL;
+	}
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->dma.guest.ptr,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	/* Make sure DMA doesn't cross BO boundaries. */
+	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+		DRM_ERROR("Invalid DMA offset.\n");
+		return -EINVAL;
+	}
+
+	bo_size -= cmd->dma.guest.ptr.offset;
+	if (unlikely(suffix->maximumOffset > bo_size))
+		suffix->maximumOffset = bo_size;
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+				user_surface_converter, &cmd->dma.host.sid,
+				NULL);
+	if (unlikely(ret != 0)) {
+		if (unlikely(ret != -ERESTARTSYS))
+			DRM_ERROR("could not find surface for DMA.\n");
+		goto out_no_surface;
+	}
+
+	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
+
+	vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+
+out_no_surface:
+	vmw_dmabuf_unreference(&vmw_bo);
+	return ret;
+}
+
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+			struct vmw_sw_context *sw_context,
+			SVGA3dCmdHeader *header)
+{
+	struct vmw_draw_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDrawPrimitives body;
+	} *cmd;
+	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+		(unsigned long)header + sizeof(*cmd));
+	SVGA3dPrimitiveRange *range;
+	uint32_t i;
+	uint32_t maxnum;
+	int ret;
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	cmd = container_of(header, struct vmw_draw_cmd, header);
+	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+		DRM_ERROR("Illegal number of vertex declarations.\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&decl->array.surfaceId, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	maxnum = (header->size - sizeof(cmd->body) -
+		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+	if (unlikely(cmd->body.numRanges > maxnum)) {
+		DRM_ERROR("Illegal number of index ranges.\n");
+		return -EINVAL;
+	}
+
+	range = (SVGA3dPrimitiveRange *) decl;
+	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&range->indexArray.surfaceId, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     SVGA3dCmdHeader *header)
+{
+	struct vmw_tex_state_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetTextureState state;
+	};
+
+	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+	  ((unsigned long) header + header->size + sizeof(header));
+	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+	int ret;
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	for (; cur_state < last_state; ++cur_state) {
+		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+			continue;
+
+		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+					user_surface_converter,
+					&cur_state->value, NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	return 0;
+}
+
+static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      void *buf)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	int ret;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd = buf;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->body.ptr,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+
+	return ret;
+}
+
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+			      struct vmw_sw_context *sw_context,
+			      SVGA3dCmdHeader *header)
+{
+	struct vmw_set_shader_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShader body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_set_shader_cmd,
+			   header);
+
+	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return 0;
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				void *buf, uint32_t *size)
+{
+	uint32_t size_remaining = *size;
+	uint32_t cmd_id;
+
+	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	switch (cmd_id) {
+	case SVGA_CMD_UPDATE:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+		break;
+	case SVGA_CMD_DEFINE_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+		break;
+	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	default:
+		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+		return -EINVAL;
+	}
+
+	if (*size > size_remaining) {
+		DRM_ERROR("Invalid SVGA command (size mismatch):"
+			  " %u.\n", cmd_id);
+		return -EINVAL;
+	}
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+		return -EPERM;
+	}
+
+	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+	return 0;
+}
+
+typedef int (*vmw_cmd_func) (struct vmw_private *,
+			     struct vmw_sw_context *,
+			     SVGA3dCmdHeader *);
+
+#define VMW_CMD_DEF(cmd, func) \
+	[cmd - SVGA_3D_CMD_BASE] = func
+
+static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+		    &vmw_cmd_set_render_target_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
+	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
+	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
+	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
+		    &vmw_cmd_blt_surf_screen_check),
+	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
+};
+
+static int vmw_cmd_check(struct vmw_private *dev_priv,
+			 struct vmw_sw_context *sw_context,
+			 void *buf, uint32_t *size)
+{
+	uint32_t cmd_id;
+	uint32_t size_remaining = *size;
+	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+	int ret;
+
+	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	/* Handle any none 3D commands */
+	if (unlikely(cmd_id < SVGA_CMD_MAX))
+		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
+
+
+	cmd_id = le32_to_cpu(header->id);
+	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+
+	cmd_id -= SVGA_3D_CMD_BASE;
+	if (unlikely(*size > size_remaining))
+		goto out_err;
+
+	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
+		goto out_err;
+
+	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	return 0;
+out_err:
+	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+		  cmd_id + SVGA_3D_CMD_BASE);
+	return -EINVAL;
+}
+
+static int vmw_cmd_check_all(struct vmw_private *dev_priv,
+			     struct vmw_sw_context *sw_context,
+			     void *buf,
+			     uint32_t size)
+{
+	int32_t cur_size = size;
+	int ret;
+
+	sw_context->buf_start = buf;
+
+	while (cur_size > 0) {
+		size = cur_size;
+		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
+		if (unlikely(ret != 0))
+			return ret;
+		buf = (void *)((unsigned long) buf + size);
+		cur_size -= size;
+	}
+
+	if (unlikely(cur_size != 0)) {
+		DRM_ERROR("Command verifier out of sync.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void vmw_free_relocations(struct vmw_sw_context *sw_context)
+{
+	sw_context->cur_reloc = 0;
+}
+
+static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
+{
+	uint32_t i;
+	struct vmw_relocation *reloc;
+	struct ttm_validate_buffer *validate;
+	struct ttm_buffer_object *bo;
+
+	for (i = 0; i < sw_context->cur_reloc; ++i) {
+		reloc = &sw_context->relocs[i];
+		validate = &sw_context->val_bufs[reloc->index].base;
+		bo = validate->bo;
+		switch (bo->mem.mem_type) {
+		case TTM_PL_VRAM:
+			reloc->location->offset += bo->offset;
+			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
+			break;
+		case VMW_PL_GMR:
+			reloc->location->gmrId = bo->mem.start;
+			break;
+		default:
+			BUG();
+		}
+	}
+	vmw_free_relocations(sw_context);
+}
+
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+	struct vmw_resource_val_node *val, *val_next;
+
+	/*
+	 * Drop references to resources held during command submission.
+	 */
+
+	list_for_each_entry_safe(val, val_next, list, head) {
+		list_del_init(&val->head);
+		vmw_resource_unreference(&val->res);
+		kfree(val);
+	}
+}
+
+static void vmw_clear_validations(struct vmw_sw_context *sw_context)
+{
+	struct vmw_validate_buffer *entry, *next;
+	struct vmw_resource_val_node *val;
+
+	/*
+	 * Drop references to DMA buffers held during command submission.
+	 */
+	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
+				 base.head) {
+		list_del(&entry->base.head);
+		ttm_bo_unref(&entry->base.bo);
+		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
+		sw_context->cur_val_buf--;
+	}
+	BUG_ON(sw_context->cur_val_buf != 0);
+
+	list_for_each_entry(val, &sw_context->resource_list, head)
+		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
+}
+
+static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+				      struct ttm_buffer_object *bo)
+{
+	int ret;
+
+
+	/*
+	 * Don't validate pinned buffers.
+	 */
+
+	if (bo == dev_priv->pinned_bo ||
+	    (bo == dev_priv->dummy_query_bo &&
+	     dev_priv->dummy_query_bo_pinned))
+		return 0;
+
+	/**
+	 * Put BO in VRAM if there is space, otherwise as a GMR.
+	 * If there is no space in VRAM and GMR ids are all used up,
+	 * start evicting GMRs to make room. If the DMA buffer can't be
+	 * used as a GMR, this will return -ENOMEM.
+	 */
+
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
+	if (likely(ret == 0 || ret == -ERESTARTSYS))
+		return ret;
+
+	/**
+	 * If that failed, try VRAM again, this time evicting
+	 * previous contents.
+	 */
+
+	DRM_INFO("Falling through to VRAM.\n");
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+	return ret;
+}
+
+
+static int vmw_validate_buffers(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context)
+{
+	struct vmw_validate_buffer *entry;
+	int ret;
+
+	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	return 0;
+}
+
+static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
+				 uint32_t size)
+{
+	if (likely(sw_context->cmd_bounce_size >= size))
+		return 0;
+
+	if (sw_context->cmd_bounce_size == 0)
+		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
+
+	while (sw_context->cmd_bounce_size < size) {
+		sw_context->cmd_bounce_size =
+			PAGE_ALIGN(sw_context->cmd_bounce_size +
+				   (sw_context->cmd_bounce_size >> 1));
+	}
+
+	if (sw_context->cmd_bounce != NULL)
+		vfree(sw_context->cmd_bounce);
+
+	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
+
+	if (sw_context->cmd_bounce == NULL) {
+		DRM_ERROR("Failed to allocate command bounce buffer.\n");
+		sw_context->cmd_bounce_size = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_fence_commands - create and submit a command stream fence
+ *
+ * Creates a fence object and submits a command stream marker.
+ * If this fails for some reason, We sync the fifo and return NULL.
+ * It is then safe to fence buffers with a NULL pointer.
+ *
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates
+ * a userspace handle if @p_handle is not NULL, otherwise not.
+ */
+
+int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       struct vmw_fence_obj **p_fence,
+			       uint32_t *p_handle)
+{
+	uint32_t sequence;
+	int ret;
+	bool synced = false;
+
+	/* p_handle implies file_priv. */
+	BUG_ON(p_handle != NULL && file_priv == NULL);
+
+	ret = vmw_fifo_send_fence(dev_priv, &sequence);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Fence submission error. Syncing.\n");
+		synced = true;
+	}
+
+	if (p_handle != NULL)
+		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
+					    sequence,
+					    DRM_VMW_FENCE_FLAG_EXEC,
+					    p_fence, p_handle);
+	else
+		ret = vmw_fence_create(dev_priv->fman, sequence,
+				       DRM_VMW_FENCE_FLAG_EXEC,
+				       p_fence);
+
+	if (unlikely(ret != 0 && !synced)) {
+		(void) vmw_fallback_wait(dev_priv, false, false,
+					 sequence, false,
+					 VMW_FENCE_WAIT_TIMEOUT);
+		*p_fence = NULL;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_execbuf_copy_fence_user - copy fence object information to
+ * user-space.
+ *
+ * @dev_priv: Pointer to a vmw_private struct.
+ * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
+ * @ret: Return value from fence object creation.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
+ * which the information should be copied.
+ * @fence: Pointer to the fenc object.
+ * @fence_handle: User-space fence handle.
+ *
+ * This function copies fence information to user-space. If copying fails,
+ * The user-space struct drm_vmw_fence_rep::error member is hopefully
+ * left untouched, and if it's preloaded with an -EFAULT by user-space,
+ * the error will hopefully be detected.
+ * Also if copying fails, user-space will be unable to signal the fence
+ * object so we wait for it immediately, and then unreference the
+ * user-space reference.
+ */
+void
+vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+			    struct vmw_fpriv *vmw_fp,
+			    int ret,
+			    struct drm_vmw_fence_rep __user *user_fence_rep,
+			    struct vmw_fence_obj *fence,
+			    uint32_t fence_handle)
+{
+	struct drm_vmw_fence_rep fence_rep;
+
+	if (user_fence_rep == NULL)
+		return;
+
+	memset(&fence_rep, 0, sizeof(fence_rep));
+
+	fence_rep.error = ret;
+	if (ret == 0) {
+		BUG_ON(fence == NULL);
+
+		fence_rep.handle = fence_handle;
+		fence_rep.seqno = fence->seqno;
+		vmw_update_seqno(dev_priv, &dev_priv->fifo);
+		fence_rep.passed_seqno = dev_priv->last_read_seqno;
+	}
+
+	/*
+	 * copy_to_user errors will be detected by user space not
+	 * seeing fence_rep::error filled in. Typically
+	 * user-space would have pre-set that member to -EFAULT.
+	 */
+	ret = copy_to_user(user_fence_rep, &fence_rep,
+			   sizeof(fence_rep));
+
+	/*
+	 * User-space lost the fence object. We need to sync
+	 * and unreference the handle.
+	 */
+	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+		ttm_ref_object_base_unref(vmw_fp->tfile,
+					  fence_handle, TTM_REF_USAGE);
+		DRM_ERROR("Fence copy error. Syncing.\n");
+		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
+					  false, false,
+					  VMW_FENCE_WAIT_TIMEOUT);
+	}
+}
+
+int vmw_execbuf_process(struct drm_file *file_priv,
+			struct vmw_private *dev_priv,
+			void __user *user_commands,
+			void *kernel_commands,
+			uint32_t command_size,
+			uint64_t throttle_us,
+			struct drm_vmw_fence_rep __user *user_fence_rep,
+			struct vmw_fence_obj **out_fence)
+{
+	struct vmw_sw_context *sw_context = &dev_priv->ctx;
+	struct vmw_fence_obj *fence = NULL;
+	struct vmw_resource *error_resource;
+	struct list_head resource_list;
+	uint32_t handle;
+	void *cmd;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+	if (unlikely(ret != 0))
+		return -ERESTARTSYS;
+
+	if (kernel_commands == NULL) {
+		sw_context->kernel = false;
+
+		ret = vmw_resize_cmd_bounce(sw_context, command_size);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+
+
+		ret = copy_from_user(sw_context->cmd_bounce,
+				     user_commands, command_size);
+
+		if (unlikely(ret != 0)) {
+			ret = -EFAULT;
+			DRM_ERROR("Failed copying commands.\n");
+			goto out_unlock;
+		}
+		kernel_commands = sw_context->cmd_bounce;
+	} else
+		sw_context->kernel = true;
+
+	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+	sw_context->cur_reloc = 0;
+	sw_context->cur_val_buf = 0;
+	sw_context->fence_flags = 0;
+	INIT_LIST_HEAD(&sw_context->resource_list);
+	sw_context->cur_query_bo = dev_priv->pinned_bo;
+	sw_context->last_query_ctx = NULL;
+	sw_context->needs_post_query_barrier = false;
+	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
+	INIT_LIST_HEAD(&sw_context->validate_nodes);
+	INIT_LIST_HEAD(&sw_context->res_relocations);
+	if (!sw_context->res_ht_initialized) {
+		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+		if (unlikely(ret != 0))
+			goto out_unlock;
+		sw_context->res_ht_initialized = true;
+	}
+
+	INIT_LIST_HEAD(&resource_list);
+	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+				command_size);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = vmw_resources_reserve(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = vmw_validate_buffers(dev_priv, sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	ret = vmw_resources_validate(sw_context);
+	if (unlikely(ret != 0))
+		goto out_err;
+
+	if (throttle_us) {
+		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
+				   throttle_us);
+
+		if (unlikely(ret != 0))
+			goto out_err;
+	}
+
+	cmd = vmw_fifo_reserve(dev_priv, command_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving fifo space for commands.\n");
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	vmw_apply_relocations(sw_context);
+	memcpy(cmd, kernel_commands, command_size);
+
+	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+
+	vmw_fifo_commit(dev_priv, command_size);
+
+	vmw_query_bo_switch_commit(dev_priv, sw_context);
+	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+					 &fence,
+					 (user_fence_rep) ? &handle : NULL);
+	/*
+	 * This error is harmless, because if fence submission fails,
+	 * vmw_fifo_send_fence will sync. The error will be propagated to
+	 * user-space in @fence_rep
+	 */
+
+	if (ret != 0)
+		DRM_ERROR("Fence submission error. Syncing.\n");
+
+	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+				    (void *) fence);
+
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
+	vmw_clear_validations(sw_context);
+	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+				    user_fence_rep, fence, handle);
+
+	/* Don't unreference when handing fence out */
+	if (unlikely(out_fence != NULL)) {
+		*out_fence = fence;
+		fence = NULL;
+	} else if (likely(fence != NULL)) {
+		vmw_fence_obj_unreference(&fence);
+	}
+
+	list_splice_init(&sw_context->resource_list, &resource_list);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+
+	return 0;
+
+out_err:
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_free_relocations(sw_context);
+	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+	vmw_resource_list_unreserve(&sw_context->resource_list, true);
+	vmw_clear_validations(sw_context);
+	if (unlikely(dev_priv->pinned_bo != NULL &&
+		     !dev_priv->query_cid_valid))
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+out_unlock:
+	list_splice_init(&sw_context->resource_list, &resource_list);
+	error_resource = sw_context->error_resource;
+	sw_context->error_resource = NULL;
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+	/*
+	 * Unreference resources outside of the cmdbuf_mutex to
+	 * avoid deadlocks in resource destruction paths.
+	 */
+	vmw_resource_list_unreference(&resource_list);
+	if (unlikely(error_resource != NULL))
+		vmw_resource_unreference(&error_resource);
+
+	return ret;
+}
+
+/**
+ * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function is called to idle the fifo and unpin the query buffer
+ * if the normal way to do this hits an error, which should typically be
+ * extremely rare.
+ */
+static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
+{
+	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+
+	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
+	vmw_bo_pin(dev_priv->pinned_bo, false);
+	vmw_bo_pin(dev_priv->dummy_query_bo, false);
+	dev_priv->dummy_query_bo_pinned = false;
+}
+
+
+/**
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
+ */
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+				     struct vmw_fence_obj *fence)
+{
+	int ret = 0;
+	struct list_head validate_list;
+	struct ttm_validate_buffer pinned_val, query_val;
+	struct vmw_fence_obj *lfence = NULL;
+
+	if (dev_priv->pinned_bo == NULL)
+		goto out_unlock;
+
+	INIT_LIST_HEAD(&validate_list);
+
+	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	list_add_tail(&pinned_val.head, &validate_list);
+
+	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	list_add_tail(&query_val.head, &validate_list);
+
+	do {
+		ret = ttm_eu_reserve_buffers(&validate_list);
+	} while (ret == -ERESTARTSYS);
+
+	if (unlikely(ret != 0)) {
+		vmw_execbuf_unpin_panic(dev_priv);
+		goto out_no_reserve;
+	}
+
+	if (dev_priv->query_cid_valid) {
+		BUG_ON(fence != NULL);
+		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+		if (unlikely(ret != 0)) {
+			vmw_execbuf_unpin_panic(dev_priv);
+			goto out_no_emit;
+		}
+		dev_priv->query_cid_valid = false;
+	}
+
+	vmw_bo_pin(dev_priv->pinned_bo, false);
+	vmw_bo_pin(dev_priv->dummy_query_bo, false);
+	dev_priv->dummy_query_bo_pinned = false;
+
+	if (fence == NULL) {
+		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+						  NULL);
+		fence = lfence;
+	}
+	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+	if (lfence != NULL)
+		vmw_fence_obj_unreference(&lfence);
+
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+
+out_unlock:
+	return;
+
+out_no_emit:
+	ttm_eu_backoff_reservation(&validate_list);
+out_no_reserve:
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+	if (dev_priv->query_cid_valid)
+		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	/*
+	 * This will allow us to extend the ioctl argument while
+	 * maintaining backwards compatibility:
+	 * We take different code paths depending on the value of
+	 * arg->version.
+	 */
+
+	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+		DRM_ERROR("Incorrect execbuf version.\n");
+		DRM_ERROR("You're running outdated experimental "
+			  "vmwgfx user-space drivers.");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv,
+				  (void __user *)(unsigned long)arg->commands,
+				  NULL, arg->command_size, arg->throttle_us,
+				  (void __user *)(unsigned long)arg->fence_rep,
+				  NULL);
+
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	vmw_kms_cursor_post_execbuf(dev_priv);
+
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 0000000..1b0f34b
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,657 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 David Airlie
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+#include <drm/ttm/ttm_placement.h>
+
+#define VMW_DIRTY_DELAY (HZ / 30)
+
+struct vmw_fb_par {
+	struct vmw_private *vmw_priv;
+
+	void *vmalloc;
+
+	struct vmw_dma_buffer *vmw_bo;
+	struct ttm_bo_kmap_obj map;
+
+	u32 pseudo_palette[17];
+
+	unsigned depth;
+	unsigned bpp;
+
+	unsigned max_width;
+	unsigned max_height;
+
+	void *bo_ptr;
+	unsigned bo_size;
+	bool bo_iowrite;
+
+	struct {
+		spinlock_t lock;
+		bool active;
+		unsigned x1;
+		unsigned y1;
+		unsigned x2;
+		unsigned y2;
+	} dirty;
+};
+
+static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+			    unsigned blue, unsigned transp,
+			    struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	u32 *pal = par->pseudo_palette;
+
+	if (regno > 15) {
+		DRM_ERROR("Bad regno %u.\n", regno);
+		return 1;
+	}
+
+	switch (par->depth) {
+	case 24:
+	case 32:
+		pal[regno] = ((red & 0xff00) << 8) |
+			      (green & 0xff00) |
+			     ((blue  & 0xff00) >> 8);
+		break;
+	default:
+		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+			    struct fb_info *info)
+{
+	int depth = var->bits_per_pixel;
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+
+	switch (var->bits_per_pixel) {
+	case 32:
+		depth = (var->transp.length > 0) ? 32 : 24;
+		break;
+	default:
+		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+		return -EINVAL;
+	}
+
+	switch (depth) {
+	case 24:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 0;
+		var->transp.offset = 0;
+		break;
+	case 32:
+		var->red.offset = 16;
+		var->green.offset = 8;
+		var->blue.offset = 0;
+		var->red.length = 8;
+		var->green.length = 8;
+		var->blue.length = 8;
+		var->transp.length = 8;
+		var->transp.offset = 24;
+		break;
+	default:
+		DRM_ERROR("Bad depth %u.\n", depth);
+		return -EINVAL;
+	}
+
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
+	    (var->xoffset != 0 || var->yoffset != 0)) {
+		DRM_ERROR("Can not handle panning without display topology\n");
+		return -EINVAL;
+	}
+
+	if ((var->xoffset + var->xres) > par->max_width ||
+	    (var->yoffset + var->yres) > par->max_height) {
+		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	if (!vmw_kms_validate_mode_vram(vmw_priv,
+					var->xres * var->bits_per_pixel/8,
+					var->yoffset + var->yres)) {
+		DRM_ERROR("Requested geom can not fit in framebuffer\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+	struct vmw_fb_par *par = info->par;
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	int ret;
+
+	info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
+
+	ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+				 info->fix.line_length,
+				 par->bpp, par->depth);
+	if (ret)
+		return ret;
+
+	if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
+		/* TODO check if pitch and offset changes */
+		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+	}
+
+	/* This is really helpful since if this fails the user
+	 * can probably not see anything on the screen.
+	 */
+	WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
+
+	return 0;
+}
+
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+			      struct fb_info *info)
+{
+	return 0;
+}
+
+static int vmw_fb_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+/*
+ * Dirty code
+ */
+
+static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+{
+	struct vmw_private *vmw_priv = par->vmw_priv;
+	struct fb_info *info = vmw_priv->fb_info;
+	int stride = (info->fix.line_length / 4);
+	int *src = (int *)info->screen_base;
+	__le32 __iomem *vram_mem = par->bo_ptr;
+	unsigned long flags;
+	unsigned x, y, w, h;
+	int i, k;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	if (vmw_priv->suspended)
+		return;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	if (!par->dirty.active) {
+		spin_unlock_irqrestore(&par->dirty.lock, flags);
+		return;
+	}
+	x = par->dirty.x1;
+	y = par->dirty.y1;
+	w = min(par->dirty.x2, info->var.xres) - x;
+	h = min(par->dirty.y2, info->var.yres) - y;
+	par->dirty.x1 = par->dirty.x2 = 0;
+	par->dirty.y1 = par->dirty.y2 = 0;
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
+		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
+			iowrite32(src[k], vram_mem + k);
+	}
+
+#if 0
+	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
+#endif
+
+	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return;
+	}
+
+	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
+	cmd->body.x = cpu_to_le32(x);
+	cmd->body.y = cpu_to_le32(y);
+	cmd->body.width = cpu_to_le32(w);
+	cmd->body.height = cpu_to_le32(h);
+	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+}
+
+static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+			      unsigned x1, unsigned y1,
+			      unsigned width, unsigned height)
+{
+	struct fb_info *info = par->vmw_priv->fb_info;
+	unsigned long flags;
+	unsigned x2 = x1 + width;
+	unsigned y2 = y1 + height;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	if (par->dirty.x1 == par->dirty.x2) {
+		par->dirty.x1 = x1;
+		par->dirty.y1 = y1;
+		par->dirty.x2 = x2;
+		par->dirty.y2 = y2;
+		/* if we are active start the dirty work
+		 * we share the work with the defio system */
+		if (par->dirty.active)
+			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+	} else {
+		if (x1 < par->dirty.x1)
+			par->dirty.x1 = x1;
+		if (y1 < par->dirty.y1)
+			par->dirty.y1 = y1;
+		if (x2 > par->dirty.x2)
+			par->dirty.x2 = x2;
+		if (y2 > par->dirty.y2)
+			par->dirty.y2 = y2;
+	}
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+}
+
+static void vmw_deferred_io(struct fb_info *info,
+			    struct list_head *pagelist)
+{
+	struct vmw_fb_par *par = info->par;
+	unsigned long start, end, min, max;
+	unsigned long flags;
+	struct page *page;
+	int y1, y2;
+
+	min = ULONG_MAX;
+	max = 0;
+	list_for_each_entry(page, pagelist, lru) {
+		start = page->index << PAGE_SHIFT;
+		end = start + PAGE_SIZE - 1;
+		min = min(min, start);
+		max = max(max, end);
+	}
+
+	if (min < max) {
+		y1 = min / info->fix.line_length;
+		y2 = (max / info->fix.line_length) + 1;
+
+		spin_lock_irqsave(&par->dirty.lock, flags);
+		par->dirty.x1 = 0;
+		par->dirty.y1 = y1;
+		par->dirty.x2 = info->var.xres;
+		par->dirty.y2 = y2;
+		spin_unlock_irqrestore(&par->dirty.lock, flags);
+	}
+
+	vmw_fb_dirty_flush(par);
+};
+
+struct fb_deferred_io vmw_defio = {
+	.delay		= VMW_DIRTY_DELAY,
+	.deferred_io	= vmw_deferred_io,
+};
+
+/*
+ * Draw code
+ */
+
+static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	cfb_fillrect(info, rect);
+	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+			  rect->width, rect->height);
+}
+
+static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	cfb_copyarea(info, region);
+	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+			  region->width, region->height);
+}
+
+static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	cfb_imageblit(info, image);
+	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+			  image->width, image->height);
+}
+
+/*
+ * Bring up code
+ */
+
+static struct fb_ops vmw_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = vmw_fb_check_var,
+	.fb_set_par = vmw_fb_set_par,
+	.fb_setcolreg = vmw_fb_setcolreg,
+	.fb_fillrect = vmw_fb_fillrect,
+	.fb_copyarea = vmw_fb_copyarea,
+	.fb_imageblit = vmw_fb_imageblit,
+	.fb_pan_display = vmw_fb_pan_display,
+	.fb_blank = vmw_fb_blank,
+};
+
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+			    size_t size, struct vmw_dma_buffer **out)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	struct ttm_placement ne_placement = vmw_vram_ne_placement;
+	int ret;
+
+	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	/* interuptable? */
+	ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+	if (!vmw_bo)
+		goto err_unlock;
+
+	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+			      &ne_placement,
+			      false,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto err_unlock; /* init frees the buffer on failure */
+
+	*out = vmw_bo;
+
+	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+
+	return 0;
+
+err_unlock:
+	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+	return ret;
+}
+
+int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+	struct device *device = &vmw_priv->dev->pdev->dev;
+	struct vmw_fb_par *par;
+	struct fb_info *info;
+	unsigned initial_width, initial_height;
+	unsigned fb_width, fb_height;
+	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+	int ret;
+
+	fb_bpp = 32;
+	fb_depth = 24;
+
+	/* XXX As shouldn't these be as well. */
+	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+
+	initial_width = min(vmw_priv->initial_width, fb_width);
+	initial_height = min(vmw_priv->initial_height, fb_height);
+
+	fb_pitch = fb_width * fb_bpp / 8;
+	fb_size = fb_pitch * fb_height;
+	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
+
+	info = framebuffer_alloc(sizeof(*par), device);
+	if (!info)
+		return -ENOMEM;
+
+	/*
+	 * Par
+	 */
+	vmw_priv->fb_info = info;
+	par = info->par;
+	par->vmw_priv = vmw_priv;
+	par->depth = fb_depth;
+	par->bpp = fb_bpp;
+	par->vmalloc = NULL;
+	par->max_width = fb_width;
+	par->max_height = fb_height;
+
+	/*
+	 * Create buffers and alloc memory
+	 */
+	par->vmalloc = vmalloc(fb_size);
+	if (unlikely(par->vmalloc == NULL)) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
+
+	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
+	if (unlikely(ret != 0))
+		goto err_free;
+
+	ret = ttm_bo_kmap(&par->vmw_bo->base,
+			  0,
+			  par->vmw_bo->base.num_pages,
+			  &par->map);
+	if (unlikely(ret != 0))
+		goto err_unref;
+	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+	par->bo_size = fb_size;
+
+	/*
+	 * Fixed and var
+	 */
+	strcpy(info->fix.id, "svgadrmfb");
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = FB_VISUAL_TRUECOLOR;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.line_length = fb_pitch;
+
+	info->fix.smem_start = 0;
+	info->fix.smem_len = fb_size;
+
+	info->pseudo_palette = par->pseudo_palette;
+	info->screen_base = par->vmalloc;
+	info->screen_size = fb_size;
+
+	info->flags = FBINFO_DEFAULT;
+	info->fbops = &vmw_fb_ops;
+
+	/* 24 depth per default */
+	info->var.red.offset = 16;
+	info->var.green.offset = 8;
+	info->var.blue.offset = 0;
+	info->var.red.length = 8;
+	info->var.green.length = 8;
+	info->var.blue.length = 8;
+	info->var.transp.offset = 0;
+	info->var.transp.length = 0;
+
+	info->var.xres_virtual = fb_width;
+	info->var.yres_virtual = fb_height;
+	info->var.bits_per_pixel = par->bpp;
+	info->var.xoffset = 0;
+	info->var.yoffset = 0;
+	info->var.activate = FB_ACTIVATE_NOW;
+	info->var.height = -1;
+	info->var.width = -1;
+
+	info->var.xres = initial_width;
+	info->var.yres = initial_height;
+
+	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+
+	info->apertures = alloc_apertures(1);
+	if (!info->apertures) {
+		ret = -ENOMEM;
+		goto err_aper;
+	}
+	info->apertures->ranges[0].base = vmw_priv->vram_start;
+	info->apertures->ranges[0].size = vmw_priv->vram_size;
+
+	/*
+	 * Dirty & Deferred IO
+	 */
+	par->dirty.x1 = par->dirty.x2 = 0;
+	par->dirty.y1 = par->dirty.y2 = 0;
+	par->dirty.active = true;
+	spin_lock_init(&par->dirty.lock);
+	info->fbdefio = &vmw_defio;
+	fb_deferred_io_init(info);
+
+	ret = register_framebuffer(info);
+	if (unlikely(ret != 0))
+		goto err_defio;
+
+	return 0;
+
+err_defio:
+	fb_deferred_io_cleanup(info);
+err_aper:
+	ttm_bo_kunmap(&par->map);
+err_unref:
+	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
+err_free:
+	vfree(par->vmalloc);
+	framebuffer_release(info);
+	vmw_priv->fb_info = NULL;
+
+	return ret;
+}
+
+int vmw_fb_close(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+	struct ttm_buffer_object *bo;
+
+	if (!vmw_priv->fb_info)
+		return 0;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+	bo = &par->vmw_bo->base;
+	par->vmw_bo = NULL;
+
+	/* ??? order */
+	fb_deferred_io_cleanup(info);
+	unregister_framebuffer(info);
+
+	ttm_bo_kunmap(&par->map);
+	ttm_bo_unref(&bo);
+
+	vfree(par->vmalloc);
+	framebuffer_release(info);
+
+	return 0;
+}
+
+int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+	unsigned long flags;
+
+	if (!vmw_priv->fb_info)
+		return -EINVAL;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	par->dirty.active = false;
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+	flush_delayed_work(&info->deferred_work);
+
+	par->bo_ptr = NULL;
+	ttm_bo_kunmap(&par->map);
+
+	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
+
+	return 0;
+}
+
+int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+	struct fb_info *info;
+	struct vmw_fb_par *par;
+	unsigned long flags;
+	bool dummy;
+	int ret;
+
+	if (!vmw_priv->fb_info)
+		return -EINVAL;
+
+	info = vmw_priv->fb_info;
+	par = info->par;
+
+	/* we are already active */
+	if (par->bo_ptr != NULL)
+		return 0;
+
+	/* Make sure that all overlays are stoped when we take over */
+	vmw_overlay_stop_all(vmw_priv);
+
+	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("could not move buffer to start of VRAM\n");
+		goto err_no_buffer;
+	}
+
+	ret = ttm_bo_kmap(&par->vmw_bo->base,
+			  0,
+			  par->vmw_bo->base.num_pages,
+			  &par->map);
+	BUG_ON(ret != 0);
+	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
+
+	spin_lock_irqsave(&par->dirty.lock, flags);
+	par->dirty.active = true;
+	spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+err_no_buffer:
+	vmw_fb_set_par(info);
+
+	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
+
+	/* If there already was stuff dirty we wont
+	 * schedule a new work, so lets do it now */
+	schedule_delayed_work(&info->deferred_work, 0);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 0000000..c62d20e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,1154 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 31)
+
+struct vmw_fence_manager {
+	int num_fence_objects;
+	struct vmw_private *dev_priv;
+	spinlock_t lock;
+	struct list_head fence_list;
+	struct work_struct work;
+	u32 user_fence_size;
+	u32 fence_size;
+	u32 event_fence_action_size;
+	bool fifo_down;
+	struct list_head cleanup_list;
+	uint32_t pending_actions[VMW_ACTION_MAX];
+	struct mutex goal_irq_mutex;
+	bool goal_irq_on; /* Protected by @goal_irq_mutex */
+	bool seqno_valid; /* Protected by @lock, and may not be set to true
+			     without the @goal_irq_mutex held. */
+};
+
+struct vmw_user_fence {
+	struct ttm_base_object base;
+	struct vmw_fence_obj fence;
+};
+
+/**
+ * struct vmw_event_fence_action - fence action that delivers a drm event.
+ *
+ * @e: A struct drm_pending_event that controls the event delivery.
+ * @action: A struct vmw_fence_action to hook up to a fence.
+ * @fence: A referenced pointer to the fence to keep it alive while @action
+ * hangs on it.
+ * @dev: Pointer to a struct drm_device so we can access the event stuff.
+ * @kref: Both @e and @action has destructors, so we need to refcount.
+ * @size: Size accounted for this object.
+ * @tv_sec: If non-null, the variable pointed to will be assigned
+ * current time tv_sec val when the fence signals.
+ * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
+ * be assigned the current time tv_usec val when the fence signals.
+ */
+struct vmw_event_fence_action {
+	struct vmw_fence_action action;
+	struct list_head fpriv_head;
+
+	struct drm_pending_event *event;
+	struct vmw_fence_obj *fence;
+	struct drm_device *dev;
+
+	uint32_t *tv_sec;
+	uint32_t *tv_usec;
+};
+
+/**
+ * Note on fencing subsystem usage of irqs:
+ * Typically the vmw_fences_update function is called
+ *
+ * a) When a new fence seqno has been submitted by the fifo code.
+ * b) On-demand when we have waiters. Sleeping waiters will switch on the
+ * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
+ * irq is received. When the last fence waiter is gone, that IRQ is masked
+ * away.
+ *
+ * In situations where there are no waiters and we don't submit any new fences,
+ * fence objects may not be signaled. This is perfectly OK, since there are
+ * no consumers of the signaled data, but that is NOT ok when there are fence
+ * actions attached to a fence. The fencing subsystem then makes use of the
+ * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
+ * which has an action attached, and each time vmw_fences_update is called,
+ * the subsystem makes sure the fence goal seqno is updated.
+ *
+ * The fence goal seqno irq is on as long as there are unsignaled fence
+ * objects with actions attached to them.
+ */
+
+static void vmw_fence_obj_destroy_locked(struct kref *kref)
+{
+	struct vmw_fence_obj *fence =
+		container_of(kref, struct vmw_fence_obj, kref);
+
+	struct vmw_fence_manager *fman = fence->fman;
+	unsigned int num_fences;
+
+	list_del_init(&fence->head);
+	num_fences = --fman->num_fence_objects;
+	spin_unlock_irq(&fman->lock);
+	if (fence->destroy)
+		fence->destroy(fence);
+	else
+		kfree(fence);
+
+	spin_lock_irq(&fman->lock);
+}
+
+
+/**
+ * Execute signal actions on fences recently signaled.
+ * This is done from a workqueue so we don't have to execute
+ * signal actions from atomic context.
+ */
+
+static void vmw_fence_work_func(struct work_struct *work)
+{
+	struct vmw_fence_manager *fman =
+		container_of(work, struct vmw_fence_manager, work);
+	struct list_head list;
+	struct vmw_fence_action *action, *next_action;
+	bool seqno_valid;
+
+	do {
+		INIT_LIST_HEAD(&list);
+		mutex_lock(&fman->goal_irq_mutex);
+
+		spin_lock_irq(&fman->lock);
+		list_splice_init(&fman->cleanup_list, &list);
+		seqno_valid = fman->seqno_valid;
+		spin_unlock_irq(&fman->lock);
+
+		if (!seqno_valid && fman->goal_irq_on) {
+			fman->goal_irq_on = false;
+			vmw_goal_waiter_remove(fman->dev_priv);
+		}
+		mutex_unlock(&fman->goal_irq_mutex);
+
+		if (list_empty(&list))
+			return;
+
+		/*
+		 * At this point, only we should be able to manipulate the
+		 * list heads of the actions we have on the private list.
+		 * hence fman::lock not held.
+		 */
+
+		list_for_each_entry_safe(action, next_action, &list, head) {
+			list_del_init(&action->head);
+			if (action->cleanup)
+				action->cleanup(action);
+		}
+	} while (1);
+}
+
+struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
+{
+	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
+
+	if (unlikely(fman == NULL))
+		return NULL;
+
+	fman->dev_priv = dev_priv;
+	spin_lock_init(&fman->lock);
+	INIT_LIST_HEAD(&fman->fence_list);
+	INIT_LIST_HEAD(&fman->cleanup_list);
+	INIT_WORK(&fman->work, &vmw_fence_work_func);
+	fman->fifo_down = true;
+	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
+	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
+	fman->event_fence_action_size =
+		ttm_round_pot(sizeof(struct vmw_event_fence_action));
+	mutex_init(&fman->goal_irq_mutex);
+
+	return fman;
+}
+
+void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
+{
+	unsigned long irq_flags;
+	bool lists_empty;
+
+	(void) cancel_work_sync(&fman->work);
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	lists_empty = list_empty(&fman->fence_list) &&
+		list_empty(&fman->cleanup_list);
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	BUG_ON(!lists_empty);
+	kfree(fman);
+}
+
+static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
+			      struct vmw_fence_obj *fence,
+			      u32 seqno,
+			      uint32_t mask,
+			      void (*destroy) (struct vmw_fence_obj *fence))
+{
+	unsigned long irq_flags;
+	unsigned int num_fences;
+	int ret = 0;
+
+	fence->seqno = seqno;
+	INIT_LIST_HEAD(&fence->seq_passed_actions);
+	fence->fman = fman;
+	fence->signaled = 0;
+	fence->signal_mask = mask;
+	kref_init(&fence->kref);
+	fence->destroy = destroy;
+	init_waitqueue_head(&fence->queue);
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	if (unlikely(fman->fifo_down)) {
+		ret = -EBUSY;
+		goto out_unlock;
+	}
+	list_add_tail(&fence->head, &fman->fence_list);
+	num_fences = ++fman->num_fence_objects;
+
+out_unlock:
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+	return ret;
+
+}
+
+struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
+{
+	if (unlikely(fence == NULL))
+		return NULL;
+
+	kref_get(&fence->kref);
+	return fence;
+}
+
+/**
+ * vmw_fence_obj_unreference
+ *
+ * Note that this function may not be entered with disabled irqs since
+ * it may re-enable them in the destroy function.
+ *
+ */
+void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
+{
+	struct vmw_fence_obj *fence = *fence_p;
+	struct vmw_fence_manager *fman;
+
+	if (unlikely(fence == NULL))
+		return;
+
+	fman = fence->fman;
+	*fence_p = NULL;
+	spin_lock_irq(&fman->lock);
+	BUG_ON(atomic_read(&fence->kref.refcount) == 0);
+	kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+	spin_unlock_irq(&fman->lock);
+}
+
+void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
+				struct list_head *list)
+{
+	struct vmw_fence_action *action, *next_action;
+
+	list_for_each_entry_safe(action, next_action, list, head) {
+		list_del_init(&action->head);
+		fman->pending_actions[action->type]--;
+		if (action->seq_passed != NULL)
+			action->seq_passed(action);
+
+		/*
+		 * Add the cleanup action to the cleanup list so that
+		 * it will be performed by a worker task.
+		 */
+
+		list_add_tail(&action->head, &fman->cleanup_list);
+	}
+}
+
+/**
+ * vmw_fence_goal_new_locked - Figure out a new device fence goal
+ * seqno if needed.
+ *
+ * @fman: Pointer to a fence manager.
+ * @passed_seqno: The seqno the device currently signals as passed.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when we have a new passed_seqno, and
+ * we might need to update the fence goal. It checks to see whether
+ * the current fence goal has already passed, and, in that case,
+ * scans through all unsignaled fences to get the next fence object with an
+ * action attached, and sets the seqno of that fence as a new fence goal.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
+				      u32 passed_seqno)
+{
+	u32 goal_seqno;
+	__le32 __iomem *fifo_mem;
+	struct vmw_fence_obj *fence;
+
+	if (likely(!fman->seqno_valid))
+		return false;
+
+	fifo_mem = fman->dev_priv->mmio_virt;
+	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
+		return false;
+
+	fman->seqno_valid = false;
+	list_for_each_entry(fence, &fman->fence_list, head) {
+		if (!list_empty(&fence->seq_passed_actions)) {
+			fman->seqno_valid = true;
+			iowrite32(fence->seqno,
+				  fifo_mem + SVGA_FIFO_FENCE_GOAL);
+			break;
+		}
+	}
+
+	return true;
+}
+
+
+/**
+ * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
+ * needed.
+ *
+ * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
+ * considered as a device fence goal.
+ *
+ * This function should be called with the fence manager lock held.
+ * It is typically called when an action has been attached to a fence to
+ * check whether the seqno of that fence should be used for a fence
+ * goal interrupt. This is typically needed if the current fence goal is
+ * invalid, or has a higher seqno than that of the current fence object.
+ *
+ * returns true if the device goal seqno was updated. False otherwise.
+ */
+static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
+{
+	u32 goal_seqno;
+	__le32 __iomem *fifo_mem;
+
+	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
+		return false;
+
+	fifo_mem = fence->fman->dev_priv->mmio_virt;
+	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	if (likely(fence->fman->seqno_valid &&
+		   goal_seqno - fence->seqno < VMW_FENCE_WRAP))
+		return false;
+
+	iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
+	fence->fman->seqno_valid = true;
+
+	return true;
+}
+
+void vmw_fences_update(struct vmw_fence_manager *fman)
+{
+	unsigned long flags;
+	struct vmw_fence_obj *fence, *next_fence;
+	struct list_head action_list;
+	bool needs_rerun;
+	uint32_t seqno, new_seqno;
+	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
+
+	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+rerun:
+	spin_lock_irqsave(&fman->lock, flags);
+	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
+		if (seqno - fence->seqno < VMW_FENCE_WRAP) {
+			list_del_init(&fence->head);
+			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+			INIT_LIST_HEAD(&action_list);
+			list_splice_init(&fence->seq_passed_actions,
+					 &action_list);
+			vmw_fences_perform_actions(fman, &action_list);
+			wake_up_all(&fence->queue);
+		} else
+			break;
+	}
+
+	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
+
+	if (!list_empty(&fman->cleanup_list))
+		(void) schedule_work(&fman->work);
+	spin_unlock_irqrestore(&fman->lock, flags);
+
+	/*
+	 * Rerun if the fence goal seqno was updated, and the
+	 * hardware might have raced with that update, so that
+	 * we missed a fence_goal irq.
+	 */
+
+	if (unlikely(needs_rerun)) {
+		new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+		if (new_seqno != seqno) {
+			seqno = new_seqno;
+			goto rerun;
+		}
+	}
+}
+
+bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+			    uint32_t flags)
+{
+	struct vmw_fence_manager *fman = fence->fman;
+	unsigned long irq_flags;
+	uint32_t signaled;
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	signaled = fence->signaled;
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	flags &= fence->signal_mask;
+	if ((signaled & flags) == flags)
+		return 1;
+
+	if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
+		vmw_fences_update(fman);
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	signaled = fence->signaled;
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	return ((signaled & flags) == flags);
+}
+
+int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
+		       uint32_t flags, bool lazy,
+		       bool interruptible, unsigned long timeout)
+{
+	struct vmw_private *dev_priv = fence->fman->dev_priv;
+	long ret;
+
+	if (likely(vmw_fence_obj_signaled(fence, flags)))
+		return 0;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	vmw_seqno_waiter_add(dev_priv);
+
+	if (interruptible)
+		ret = wait_event_interruptible_timeout
+			(fence->queue,
+			 vmw_fence_obj_signaled(fence, flags),
+			 timeout);
+	else
+		ret = wait_event_timeout
+			(fence->queue,
+			 vmw_fence_obj_signaled(fence, flags),
+			 timeout);
+
+	vmw_seqno_waiter_remove(dev_priv);
+
+	if (unlikely(ret == 0))
+		ret = -EBUSY;
+	else if (likely(ret > 0))
+		ret = 0;
+
+	return ret;
+}
+
+void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
+{
+	struct vmw_private *dev_priv = fence->fman->dev_priv;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+}
+
+static void vmw_fence_destroy(struct vmw_fence_obj *fence)
+{
+	struct vmw_fence_manager *fman = fence->fman;
+
+	kfree(fence);
+	/*
+	 * Free kernel space accounting.
+	 */
+	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+			    fman->fence_size);
+}
+
+int vmw_fence_create(struct vmw_fence_manager *fman,
+		     uint32_t seqno,
+		     uint32_t mask,
+		     struct vmw_fence_obj **p_fence)
+{
+	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+	struct vmw_fence_obj *fence;
+	int ret;
+
+	ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
+				   false, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (unlikely(fence == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_object;
+	}
+
+	ret = vmw_fence_obj_init(fman, fence, seqno, mask,
+				 vmw_fence_destroy);
+	if (unlikely(ret != 0))
+		goto out_err_init;
+
+	*p_fence = fence;
+	return 0;
+
+out_err_init:
+	kfree(fence);
+out_no_object:
+	ttm_mem_global_free(mem_glob, fman->fence_size);
+	return ret;
+}
+
+
+static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
+{
+	struct vmw_user_fence *ufence =
+		container_of(fence, struct vmw_user_fence, fence);
+	struct vmw_fence_manager *fman = fence->fman;
+
+	ttm_base_object_kfree(ufence, base);
+	/*
+	 * Free kernel space accounting.
+	 */
+	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
+			    fman->user_fence_size);
+}
+
+static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_fence *ufence =
+		container_of(base, struct vmw_user_fence, base);
+	struct vmw_fence_obj *fence = &ufence->fence;
+
+	*p_base = NULL;
+	vmw_fence_obj_unreference(&fence);
+}
+
+int vmw_user_fence_create(struct drm_file *file_priv,
+			  struct vmw_fence_manager *fman,
+			  uint32_t seqno,
+			  uint32_t mask,
+			  struct vmw_fence_obj **p_fence,
+			  uint32_t *p_handle)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_user_fence *ufence;
+	struct vmw_fence_obj *tmp;
+	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+	int ret;
+
+	/*
+	 * Kernel memory space accounting, since this object may
+	 * be created by a user-space request.
+	 */
+
+	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
+				   false, false);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
+	if (unlikely(ufence == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_object;
+	}
+
+	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
+				 mask, vmw_user_fence_destroy);
+	if (unlikely(ret != 0)) {
+		kfree(ufence);
+		goto out_no_object;
+	}
+
+	/*
+	 * The base object holds a reference which is freed in
+	 * vmw_user_fence_base_release.
+	 */
+	tmp = vmw_fence_obj_reference(&ufence->fence);
+	ret = ttm_base_object_init(tfile, &ufence->base, false,
+				   VMW_RES_FENCE,
+				   &vmw_user_fence_base_release, NULL);
+
+
+	if (unlikely(ret != 0)) {
+		/*
+		 * Free the base object's reference
+		 */
+		vmw_fence_obj_unreference(&tmp);
+		goto out_err;
+	}
+
+	*p_fence = &ufence->fence;
+	*p_handle = ufence->base.hash.key;
+
+	return 0;
+out_err:
+	tmp = &ufence->fence;
+	vmw_fence_obj_unreference(&tmp);
+out_no_object:
+	ttm_mem_global_free(mem_glob, fman->user_fence_size);
+	return ret;
+}
+
+
+/**
+ * vmw_fence_fifo_down - signal all unsignaled fence objects.
+ */
+
+void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
+{
+	unsigned long irq_flags;
+	struct list_head action_list;
+	int ret;
+
+	/*
+	 * The list may be altered while we traverse it, so always
+	 * restart when we've released the fman->lock.
+	 */
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	fman->fifo_down = true;
+	while (!list_empty(&fman->fence_list)) {
+		struct vmw_fence_obj *fence =
+			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
+				   head);
+		kref_get(&fence->kref);
+		spin_unlock_irq(&fman->lock);
+
+		ret = vmw_fence_obj_wait(fence, fence->signal_mask,
+					 false, false,
+					 VMW_FENCE_WAIT_TIMEOUT);
+
+		if (unlikely(ret != 0)) {
+			list_del_init(&fence->head);
+			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
+			INIT_LIST_HEAD(&action_list);
+			list_splice_init(&fence->seq_passed_actions,
+					 &action_list);
+			vmw_fences_perform_actions(fman, &action_list);
+			wake_up_all(&fence->queue);
+		}
+
+		spin_lock_irq(&fman->lock);
+
+		BUG_ON(!list_empty(&fence->head));
+		kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
+	}
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
+
+void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	fman->fifo_down = false;
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
+
+
+int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_wait_arg *arg =
+	    (struct drm_vmw_fence_wait_arg *)data;
+	unsigned long timeout;
+	struct ttm_base_object *base;
+	struct vmw_fence_obj *fence;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	int ret;
+	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
+
+	/*
+	 * 64-bit division not present on 32-bit systems, so do an
+	 * approximation. (Divide by 1000000).
+	 */
+
+	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
+	  (wait_timeout >> 26);
+
+	if (!arg->cookie_valid) {
+		arg->cookie_valid = 1;
+		arg->kernel_cookie = jiffies + wait_timeout;
+	}
+
+	base = ttm_base_object_lookup(tfile, arg->handle);
+	if (unlikely(base == NULL)) {
+		printk(KERN_ERR "Wait invalid fence object handle "
+		       "0x%08lx.\n",
+		       (unsigned long)arg->handle);
+		return -EINVAL;
+	}
+
+	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+
+	timeout = jiffies;
+	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
+		ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
+		       0 : -EBUSY);
+		goto out;
+	}
+
+	timeout = (unsigned long)arg->kernel_cookie - timeout;
+
+	ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
+
+out:
+	ttm_base_object_unref(&base);
+
+	/*
+	 * Optionally unref the fence object.
+	 */
+
+	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
+		return ttm_ref_object_base_unref(tfile, arg->handle,
+						 TTM_REF_USAGE);
+	return ret;
+}
+
+int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_signaled_arg *arg =
+		(struct drm_vmw_fence_signaled_arg *) data;
+	struct ttm_base_object *base;
+	struct vmw_fence_obj *fence;
+	struct vmw_fence_manager *fman;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	base = ttm_base_object_lookup(tfile, arg->handle);
+	if (unlikely(base == NULL)) {
+		printk(KERN_ERR "Fence signaled invalid fence object handle "
+		       "0x%08lx.\n",
+		       (unsigned long)arg->handle);
+		return -EINVAL;
+	}
+
+	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
+	fman = fence->fman;
+
+	arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
+	spin_lock_irq(&fman->lock);
+
+	arg->signaled_flags = fence->signaled;
+	arg->passed_seqno = dev_priv->last_read_seqno;
+	spin_unlock_irq(&fman->lock);
+
+	ttm_base_object_unref(&base);
+
+	return 0;
+}
+
+
+int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_fence_arg *arg =
+		(struct drm_vmw_fence_arg *) data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->handle,
+					 TTM_REF_USAGE);
+}
+
+/**
+ * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
+ *
+ * @fman: Pointer to a struct vmw_fence_manager
+ * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
+ * with pointers to a struct drm_file object about to be closed.
+ *
+ * This function removes all pending fence events with references to a
+ * specific struct drm_file object about to be closed. The caller is required
+ * to pass a list of all struct vmw_event_fence_action objects with such
+ * events attached. This function is typically called before the
+ * struct drm_file object's event management is taken down.
+ */
+void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+				struct list_head *event_list)
+{
+	struct vmw_event_fence_action *eaction;
+	struct drm_pending_event *event;
+	unsigned long irq_flags;
+
+	while (1) {
+		spin_lock_irqsave(&fman->lock, irq_flags);
+		if (list_empty(event_list))
+			goto out_unlock;
+		eaction = list_first_entry(event_list,
+					   struct vmw_event_fence_action,
+					   fpriv_head);
+		list_del_init(&eaction->fpriv_head);
+		event = eaction->event;
+		eaction->event = NULL;
+		spin_unlock_irqrestore(&fman->lock, irq_flags);
+		event->destroy(event);
+	}
+out_unlock:
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+}
+
+
+/**
+ * vmw_event_fence_action_seq_passed
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is called when the seqno of the fence where @action is
+ * attached has passed. It queues the event on the submitter's event list.
+ * This function is always called from atomic context, and may be called
+ * from irq context.
+ */
+static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+{
+	struct vmw_event_fence_action *eaction =
+		container_of(action, struct vmw_event_fence_action, action);
+	struct drm_device *dev = eaction->dev;
+	struct drm_pending_event *event = eaction->event;
+	struct drm_file *file_priv;
+	unsigned long irq_flags;
+
+	if (unlikely(event == NULL))
+		return;
+
+	file_priv = event->file_priv;
+	spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+	if (likely(eaction->tv_sec != NULL)) {
+		struct timeval tv;
+
+		do_gettimeofday(&tv);
+		*eaction->tv_sec = tv.tv_sec;
+		*eaction->tv_usec = tv.tv_usec;
+	}
+
+	list_del_init(&eaction->fpriv_head);
+	list_add_tail(&eaction->event->link, &file_priv->event_list);
+	eaction->event = NULL;
+	wake_up_all(&file_priv->event_wait);
+	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+}
+
+/**
+ * vmw_event_fence_action_cleanup
+ *
+ * @action: The struct vmw_fence_action embedded in a struct
+ * vmw_event_fence_action.
+ *
+ * This function is the struct vmw_fence_action destructor. It's typically
+ * called from a workqueue.
+ */
+static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
+{
+	struct vmw_event_fence_action *eaction =
+		container_of(action, struct vmw_event_fence_action, action);
+	struct vmw_fence_manager *fman = eaction->fence->fman;
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	list_del(&eaction->fpriv_head);
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	vmw_fence_obj_unreference(&eaction->fence);
+	kfree(eaction);
+}
+
+
+/**
+ * vmw_fence_obj_add_action - Add an action to a fence object.
+ *
+ * @fence - The fence object.
+ * @action - The action to add.
+ *
+ * Note that the action callbacks may be executed before this function
+ * returns.
+ */
+void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+			      struct vmw_fence_action *action)
+{
+	struct vmw_fence_manager *fman = fence->fman;
+	unsigned long irq_flags;
+	bool run_update = false;
+
+	mutex_lock(&fman->goal_irq_mutex);
+	spin_lock_irqsave(&fman->lock, irq_flags);
+
+	fman->pending_actions[action->type]++;
+	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
+		struct list_head action_list;
+
+		INIT_LIST_HEAD(&action_list);
+		list_add_tail(&action->head, &action_list);
+		vmw_fences_perform_actions(fman, &action_list);
+	} else {
+		list_add_tail(&action->head, &fence->seq_passed_actions);
+
+		/*
+		 * This function may set fman::seqno_valid, so it must
+		 * be run with the goal_irq_mutex held.
+		 */
+		run_update = vmw_fence_goal_check_locked(fence);
+	}
+
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	if (run_update) {
+		if (!fman->goal_irq_on) {
+			fman->goal_irq_on = true;
+			vmw_goal_waiter_add(fman->dev_priv);
+		}
+		vmw_fences_update(fman);
+	}
+	mutex_unlock(&fman->goal_irq_mutex);
+
+}
+
+/**
+ * vmw_event_fence_action_create - Post an event for sending when a fence
+ * object seqno has passed.
+ *
+ * @file_priv: The file connection on which the event should be posted.
+ * @fence: The fence object on which to post the event.
+ * @event: Event to be posted. This event should've been alloced
+ * using k[mz]alloc, and should've been completely initialized.
+ * @interruptible: Interruptible waits if possible.
+ *
+ * As a side effect, the object pointed to by @event may have been
+ * freed when this function returns. If this function returns with
+ * an error code, the caller needs to free that object.
+ */
+
+int vmw_event_fence_action_queue(struct drm_file *file_priv,
+				 struct vmw_fence_obj *fence,
+				 struct drm_pending_event *event,
+				 uint32_t *tv_sec,
+				 uint32_t *tv_usec,
+				 bool interruptible)
+{
+	struct vmw_event_fence_action *eaction;
+	struct vmw_fence_manager *fman = fence->fman;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	unsigned long irq_flags;
+
+	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
+	if (unlikely(eaction == NULL))
+		return -ENOMEM;
+
+	eaction->event = event;
+
+	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
+	eaction->action.cleanup = vmw_event_fence_action_cleanup;
+	eaction->action.type = VMW_ACTION_EVENT;
+
+	eaction->fence = vmw_fence_obj_reference(fence);
+	eaction->dev = fman->dev_priv->dev;
+	eaction->tv_sec = tv_sec;
+	eaction->tv_usec = tv_usec;
+
+	spin_lock_irqsave(&fman->lock, irq_flags);
+	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
+	spin_unlock_irqrestore(&fman->lock, irq_flags);
+
+	vmw_fence_obj_add_action(fence, &eaction->action);
+
+	return 0;
+}
+
+struct vmw_event_fence_pending {
+	struct drm_pending_event base;
+	struct drm_vmw_event_fence event;
+};
+
+int vmw_event_fence_action_create(struct drm_file *file_priv,
+				  struct vmw_fence_obj *fence,
+				  uint32_t flags,
+				  uint64_t user_data,
+				  bool interruptible)
+{
+	struct vmw_event_fence_pending *event;
+	struct drm_device *dev = fence->fman->dev_priv->dev;
+	unsigned long irq_flags;
+	int ret;
+
+	spin_lock_irqsave(&dev->event_lock, irq_flags);
+
+	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
+	if (likely(ret == 0))
+		file_priv->event_space -= sizeof(event->event);
+
+	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate event space for this file.\n");
+		goto out_no_space;
+	}
+
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (unlikely(event == NULL)) {
+		DRM_ERROR("Failed to allocate an event.\n");
+		ret = -ENOMEM;
+		goto out_no_event;
+	}
+
+	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+	event->event.base.length = sizeof(*event);
+	event->event.user_data = user_data;
+
+	event->base.event = &event->event.base;
+	event->base.file_priv = file_priv;
+	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+
+	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   &event->event.tv_sec,
+						   &event->event.tv_usec,
+						   interruptible);
+	else
+		ret = vmw_event_fence_action_queue(file_priv, fence,
+						   &event->base,
+						   NULL,
+						   NULL,
+						   interruptible);
+	if (ret != 0)
+		goto out_no_queue;
+
+out_no_queue:
+	event->base.destroy(&event->base);
+out_no_event:
+	spin_lock_irqsave(&dev->event_lock, irq_flags);
+	file_priv->event_space += sizeof(*event);
+	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+out_no_space:
+	return ret;
+}
+
+int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_fence_event_arg *arg =
+		(struct drm_vmw_fence_event_arg *) data;
+	struct vmw_fence_obj *fence = NULL;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+	struct drm_vmw_fence_rep __user *user_fence_rep =
+		(struct drm_vmw_fence_rep __user *)(unsigned long)
+		arg->fence_rep;
+	uint32_t handle;
+	int ret;
+
+	/*
+	 * Look up an existing fence object,
+	 * and if user-space wants a new reference,
+	 * add one.
+	 */
+	if (arg->handle) {
+		struct ttm_base_object *base =
+			ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+
+		if (unlikely(base == NULL)) {
+			DRM_ERROR("Fence event invalid fence object handle "
+				  "0x%08lx.\n",
+				  (unsigned long)arg->handle);
+			return -EINVAL;
+		}
+		fence = &(container_of(base, struct vmw_user_fence,
+				       base)->fence);
+		(void) vmw_fence_obj_reference(fence);
+
+		if (user_fence_rep != NULL) {
+			bool existed;
+
+			ret = ttm_ref_object_add(vmw_fp->tfile, base,
+						 TTM_REF_USAGE, &existed);
+			if (unlikely(ret != 0)) {
+				DRM_ERROR("Failed to reference a fence "
+					  "object.\n");
+				goto out_no_ref_obj;
+			}
+			handle = base->hash.key;
+		}
+		ttm_base_object_unref(&base);
+	}
+
+	/*
+	 * Create a new fence object.
+	 */
+	if (!fence) {
+		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+						 &fence,
+						 (user_fence_rep) ?
+						 &handle : NULL);
+		if (unlikely(ret != 0)) {
+			DRM_ERROR("Fence event failed to create fence.\n");
+			return ret;
+		}
+	}
+
+	BUG_ON(fence == NULL);
+
+	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+		ret = vmw_event_fence_action_create(file_priv, fence,
+						    arg->flags,
+						    arg->user_data,
+						    true);
+	else
+		ret = vmw_event_fence_action_create(file_priv, fence,
+						    arg->flags,
+						    arg->user_data,
+						    true);
+
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Failed to attach event to fence.\n");
+		goto out_no_create;
+	}
+
+	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+				    handle);
+	vmw_fence_obj_unreference(&fence);
+	return 0;
+out_no_create:
+	if (user_fence_rep != NULL)
+		ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					  handle, TTM_REF_USAGE);
+out_no_ref_obj:
+	vmw_fence_obj_unreference(&fence);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
new file mode 100644
index 0000000..faf2e78
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -0,0 +1,120 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_FENCE_H_
+
+#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
+
+struct vmw_private;
+
+struct vmw_fence_manager;
+
+/**
+ *
+ *
+ */
+enum vmw_action_type {
+	VMW_ACTION_EVENT = 0,
+	VMW_ACTION_MAX
+};
+
+struct vmw_fence_action {
+	struct list_head head;
+	enum vmw_action_type type;
+	void (*seq_passed) (struct vmw_fence_action *action);
+	void (*cleanup) (struct vmw_fence_action *action);
+};
+
+struct vmw_fence_obj {
+	struct kref kref;
+	u32 seqno;
+
+	struct vmw_fence_manager *fman;
+	struct list_head head;
+	uint32_t signaled;
+	uint32_t signal_mask;
+	struct list_head seq_passed_actions;
+	void (*destroy)(struct vmw_fence_obj *fence);
+	wait_queue_head_t queue;
+};
+
+extern struct vmw_fence_manager *
+vmw_fence_manager_init(struct vmw_private *dev_priv);
+
+extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
+
+extern struct vmw_fence_obj *
+vmw_fence_obj_reference(struct vmw_fence_obj *fence);
+
+extern void vmw_fences_update(struct vmw_fence_manager *fman);
+
+extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
+				   uint32_t flags);
+
+extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
+			      bool lazy,
+			      bool interruptible, unsigned long timeout);
+
+extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
+
+extern int vmw_fence_create(struct vmw_fence_manager *fman,
+			    uint32_t seqno,
+			    uint32_t mask,
+			    struct vmw_fence_obj **p_fence);
+
+extern int vmw_user_fence_create(struct drm_file *file_priv,
+				 struct vmw_fence_manager *fman,
+				 uint32_t sequence,
+				 uint32_t mask,
+				 struct vmw_fence_obj **p_fence,
+				 uint32_t *p_handle);
+
+extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
+
+extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
+
+extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
+				    struct drm_file *file_priv);
+
+extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
+					struct drm_file *file_priv);
+
+extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+				     struct drm_file *file_priv);
+extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
+				       struct list_head *event_list);
+extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
+					struct vmw_fence_obj *fence,
+					struct drm_pending_event *event,
+					uint32_t *tv_sec,
+					uint32_t *tv_usec,
+					bool interruptible);
+#endif /* _VMWGFX_FENCE_H_ */
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 0000000..3eb1486
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,568 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/drmP.h>
+#include <drm/ttm/ttm_placement.h>
+
+bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t fifo_min, hwversion;
+	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
+	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
+	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
+		return false;
+
+	hwversion = ioread32(fifo_mem +
+			     ((fifo->capabilities &
+			       SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+			      SVGA_FIFO_3D_HWVERSION_REVISED :
+			      SVGA_FIFO_3D_HWVERSION));
+
+	if (hwversion == 0)
+		return false;
+
+	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
+		return false;
+
+	/* Non-Screen Object path does not support surfaces */
+	if (!dev_priv->sou_priv)
+		return false;
+
+	return true;
+}
+
+bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t caps;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
+		return false;
+
+	caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
+		return true;
+
+	return false;
+}
+
+int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max;
+	uint32_t min;
+	uint32_t dummy;
+
+	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
+	if (unlikely(fifo->static_buffer == NULL))
+		return -ENOMEM;
+
+	fifo->dynamic_buffer = NULL;
+	fifo->reserved_size = 0;
+	fifo->using_bounce_buffer = false;
+
+	mutex_init(&fifo->fifo_mutex);
+	init_rwsem(&fifo->rwsem);
+
+	/*
+	 * Allow mapping the first page read-only to user-space.
+	 */
+
+	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+	mutex_lock(&dev_priv->hw_mutex);
+	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+	min = 4;
+	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
+		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
+	min <<= 2;
+
+	if (min < PAGE_SIZE)
+		min = PAGE_SIZE;
+
+	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
+	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+	wmb();
+	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
+	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
+	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
+	mb();
+
+	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
+	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+
+	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
+		 (unsigned int) max,
+		 (unsigned int) min,
+		 (unsigned int) fifo->capabilities);
+
+	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
+	vmw_marker_queue_init(&fifo->marker_queue);
+	return vmw_fifo_send_fence(dev_priv, &dummy);
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+	mutex_lock(&dev_priv->hw_mutex);
+
+	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
+		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+	}
+
+	mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+	mutex_lock(&dev_priv->hw_mutex);
+
+	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+
+	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
+		  dev_priv->config_done_state);
+	vmw_write(dev_priv, SVGA_REG_ENABLE,
+		  dev_priv->enable_state);
+	vmw_write(dev_priv, SVGA_REG_TRACES,
+		  dev_priv->traces_state);
+
+	mutex_unlock(&dev_priv->hw_mutex);
+	vmw_marker_queue_takedown(&fifo->marker_queue);
+
+	if (likely(fifo->static_buffer != NULL)) {
+		vfree(fifo->static_buffer);
+		fifo->static_buffer = NULL;
+	}
+
+	if (likely(fifo->dynamic_buffer != NULL)) {
+		vfree(fifo->dynamic_buffer);
+		fifo->dynamic_buffer = NULL;
+	}
+}
+
+static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+	uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+
+	return ((max - next_cmd) + (stop - min) <= bytes);
+}
+
+static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+			       uint32_t bytes, bool interruptible,
+			       unsigned long timeout)
+{
+	int ret = 0;
+	unsigned long end_jiffies = jiffies + timeout;
+	DEFINE_WAIT(__wait);
+
+	DRM_INFO("Fifo wait noirq.\n");
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
+				(interruptible) ?
+				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+		if (!vmw_fifo_is_full(dev_priv, bytes))
+			break;
+		if (time_after_eq(jiffies, end_jiffies)) {
+			ret = -EBUSY;
+			DRM_ERROR("SVGA device lockup.\n");
+			break;
+		}
+		schedule_timeout(1);
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&dev_priv->fifo_queue, &__wait);
+	wake_up_all(&dev_priv->fifo_queue);
+	DRM_INFO("Fifo noirq exit.\n");
+	return ret;
+}
+
+static int vmw_fifo_wait(struct vmw_private *dev_priv,
+			 uint32_t bytes, bool interruptible,
+			 unsigned long timeout)
+{
+	long ret = 1L;
+	unsigned long irq_flags;
+
+	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
+		return 0;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return vmw_fifo_wait_noirq(dev_priv, bytes,
+					   interruptible, timeout);
+
+	mutex_lock(&dev_priv->hw_mutex);
+	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
+		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	if (interruptible)
+		ret = wait_event_interruptible_timeout
+		    (dev_priv->fifo_queue,
+		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
+	else
+		ret = wait_event_timeout
+		    (dev_priv->fifo_queue,
+		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
+
+	if (unlikely(ret == 0))
+		ret = -EBUSY;
+	else if (likely(ret > 0))
+		ret = 0;
+
+	mutex_lock(&dev_priv->hw_mutex);
+	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return ret;
+}
+
+/**
+ * Reserve @bytes number of bytes in the fifo.
+ *
+ * This function will return NULL (error) on two conditions:
+ *  If it timeouts waiting for fifo space, or if @bytes is larger than the
+ *   available fifo space.
+ *
+ * Returns:
+ *   Pointer to the fifo, or null on error (possible hardware hang).
+ */
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t max;
+	uint32_t min;
+	uint32_t next_cmd;
+	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+	int ret;
+
+	mutex_lock(&fifo_state->fifo_mutex);
+	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+
+	if (unlikely(bytes >= (max - min)))
+		goto out_err;
+
+	BUG_ON(fifo_state->reserved_size != 0);
+	BUG_ON(fifo_state->dynamic_buffer != NULL);
+
+	fifo_state->reserved_size = bytes;
+
+	while (1) {
+		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+		bool need_bounce = false;
+		bool reserve_in_place = false;
+
+		if (next_cmd >= stop) {
+			if (likely((next_cmd + bytes < max ||
+				    (next_cmd + bytes == max && stop > min))))
+				reserve_in_place = true;
+
+			else if (vmw_fifo_is_full(dev_priv, bytes)) {
+				ret = vmw_fifo_wait(dev_priv, bytes,
+						    false, 3 * HZ);
+				if (unlikely(ret != 0))
+					goto out_err;
+			} else
+				need_bounce = true;
+
+		} else {
+
+			if (likely((next_cmd + bytes < stop)))
+				reserve_in_place = true;
+			else {
+				ret = vmw_fifo_wait(dev_priv, bytes,
+						    false, 3 * HZ);
+				if (unlikely(ret != 0))
+					goto out_err;
+			}
+		}
+
+		if (reserve_in_place) {
+			if (reserveable || bytes <= sizeof(uint32_t)) {
+				fifo_state->using_bounce_buffer = false;
+
+				if (reserveable)
+					iowrite32(bytes, fifo_mem +
+						  SVGA_FIFO_RESERVED);
+				return fifo_mem + (next_cmd >> 2);
+			} else {
+				need_bounce = true;
+			}
+		}
+
+		if (need_bounce) {
+			fifo_state->using_bounce_buffer = true;
+			if (bytes < fifo_state->static_buffer_size)
+				return fifo_state->static_buffer;
+			else {
+				fifo_state->dynamic_buffer = vmalloc(bytes);
+				return fifo_state->dynamic_buffer;
+			}
+		}
+	}
+out_err:
+	fifo_state->reserved_size = 0;
+	mutex_unlock(&fifo_state->fifo_mutex);
+	return NULL;
+}
+
+static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
+			      __le32 __iomem *fifo_mem,
+			      uint32_t next_cmd,
+			      uint32_t max, uint32_t min, uint32_t bytes)
+{
+	uint32_t chunk_size = max - next_cmd;
+	uint32_t rest;
+	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+	if (bytes < chunk_size)
+		chunk_size = bytes;
+
+	iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+	mb();
+	memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+	rest = bytes - chunk_size;
+	if (rest)
+		memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
+			    rest);
+}
+
+static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
+			       __le32 __iomem *fifo_mem,
+			       uint32_t next_cmd,
+			       uint32_t max, uint32_t min, uint32_t bytes)
+{
+	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+	while (bytes > 0) {
+		iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
+		next_cmd += sizeof(uint32_t);
+		if (unlikely(next_cmd == max))
+			next_cmd = min;
+		mb();
+		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+		mb();
+		bytes -= sizeof(uint32_t);
+	}
+}
+
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+
+	BUG_ON((bytes & 3) != 0);
+	BUG_ON(bytes > fifo_state->reserved_size);
+
+	fifo_state->reserved_size = 0;
+
+	if (fifo_state->using_bounce_buffer) {
+		if (reserveable)
+			vmw_fifo_res_copy(fifo_state, fifo_mem,
+					  next_cmd, max, min, bytes);
+		else
+			vmw_fifo_slow_copy(fifo_state, fifo_mem,
+					   next_cmd, max, min, bytes);
+
+		if (fifo_state->dynamic_buffer) {
+			vfree(fifo_state->dynamic_buffer);
+			fifo_state->dynamic_buffer = NULL;
+		}
+
+	}
+
+	down_write(&fifo_state->rwsem);
+	if (fifo_state->using_bounce_buffer || reserveable) {
+		next_cmd += bytes;
+		if (next_cmd >= max)
+			next_cmd -= max - min;
+		mb();
+		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+	}
+
+	if (reserveable)
+		iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
+	mb();
+	up_write(&fifo_state->rwsem);
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	mutex_unlock(&fifo_state->fifo_mutex);
+}
+
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+	struct svga_fifo_cmd_fence *cmd_fence;
+	void *fm;
+	int ret = 0;
+	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+
+	fm = vmw_fifo_reserve(dev_priv, bytes);
+	if (unlikely(fm == NULL)) {
+		*seqno = atomic_read(&dev_priv->marker_seq);
+		ret = -ENOMEM;
+		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
+					false, 3*HZ);
+		goto out_err;
+	}
+
+	do {
+		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
+	} while (*seqno == 0);
+
+	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+
+		/*
+		 * Don't request hardware to send a fence. The
+		 * waiting code in vmwgfx_irq.c will emulate this.
+		 */
+
+		vmw_fifo_commit(dev_priv, 0);
+		return 0;
+	}
+
+	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
+	cmd_fence = (struct svga_fifo_cmd_fence *)
+	    ((unsigned long)fm + sizeof(__le32));
+
+	iowrite32(*seqno, &cmd_fence->fence);
+	vmw_fifo_commit(dev_priv, bytes);
+	(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
+	vmw_update_seqno(dev_priv, fifo_state);
+
+out_err:
+	return ret;
+}
+
+/**
+ * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+			      uint32_t cid)
+{
+	/*
+	 * A query wait without a preceding query end will
+	 * actually finish all queries for this cid
+	 * without writing to the query result structure.
+	 */
+
+	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForQuery body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of fifo space for dummy query.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = cid;
+	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
+		cmd->body.guestResult.offset = bo->offset;
+	} else {
+		cmd->body.guestResult.gmrId = bo->mem.start;
+		cmd->body.guestResult.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 0000000..1a0bf07
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,300 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/drmP.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+
+static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+			 struct page *pages[],
+			 unsigned long num_pages,
+			 int gmr_id)
+{
+	SVGAFifoCmdDefineGMR2 define_cmd;
+	SVGAFifoCmdRemapGMR2 remap_cmd;
+	uint32_t *cmd;
+	uint32_t *cmd_orig;
+	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+	uint32_t remap_pos = 0;
+	uint32_t cmd_size = define_size + remap_size;
+	uint32_t i;
+
+	cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+	if (unlikely(cmd == NULL))
+		return -ENOMEM;
+
+	define_cmd.gmrId = gmr_id;
+	define_cmd.numPages = num_pages;
+
+	*cmd++ = SVGA_CMD_DEFINE_GMR2;
+	memcpy(cmd, &define_cmd, sizeof(define_cmd));
+	cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+	/*
+	 * Need to split the command if there are too many
+	 * pages that goes into the gmr.
+	 */
+
+	remap_cmd.gmrId = gmr_id;
+	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+
+	while (num_pages > 0) {
+		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+		remap_cmd.offsetPages = remap_pos;
+		remap_cmd.numPages = nr;
+
+		*cmd++ = SVGA_CMD_REMAP_GMR2;
+		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+		cmd += sizeof(remap_cmd) / sizeof(*cmd);
+
+		for (i = 0; i < nr; ++i) {
+			if (VMW_PPN_SIZE <= 4)
+				*cmd = page_to_pfn(*pages++);
+			else
+				*((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+			cmd += VMW_PPN_SIZE / sizeof(*cmd);
+		}
+
+		num_pages -= nr;
+		remap_pos += nr;
+	}
+
+	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+	vmw_fifo_commit(dev_priv, cmd_size);
+
+	return 0;
+}
+
+static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
+			    int gmr_id)
+{
+	SVGAFifoCmdDefineGMR2 define_cmd;
+	uint32_t define_size = sizeof(define_cmd) + 4;
+	uint32_t *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, define_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("GMR2 unbind failed.\n");
+		return;
+	}
+	define_cmd.gmrId = gmr_id;
+	define_cmd.numPages = 0;
+
+	*cmd++ = SVGA_CMD_DEFINE_GMR2;
+	memcpy(cmd, &define_cmd, sizeof(define_cmd));
+
+	vmw_fifo_commit(dev_priv, define_size);
+}
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+				     struct page *pages[],
+				     unsigned long num_pages)
+{
+	struct page *page, *next;
+	struct svga_guest_mem_descriptor *page_virtual = NULL;
+	struct svga_guest_mem_descriptor *desc_virtual = NULL;
+	unsigned int desc_per_page;
+	unsigned long prev_pfn;
+	unsigned long pfn;
+	int ret;
+
+	desc_per_page = PAGE_SIZE /
+	    sizeof(struct svga_guest_mem_descriptor) - 1;
+
+	while (likely(num_pages != 0)) {
+		page = alloc_page(__GFP_HIGHMEM);
+		if (unlikely(page == NULL)) {
+			ret = -ENOMEM;
+			goto out_err;
+		}
+
+		list_add_tail(&page->lru, desc_pages);
+
+		/*
+		 * Point previous page terminating descriptor to this
+		 * page before unmapping it.
+		 */
+
+		if (likely(page_virtual != NULL)) {
+			desc_virtual->ppn = page_to_pfn(page);
+			kunmap_atomic(page_virtual);
+		}
+
+		page_virtual = kmap_atomic(page);
+		desc_virtual = page_virtual - 1;
+		prev_pfn = ~(0UL);
+
+		while (likely(num_pages != 0)) {
+			pfn = page_to_pfn(*pages);
+
+			if (pfn != prev_pfn + 1) {
+
+				if (desc_virtual - page_virtual ==
+				    desc_per_page - 1)
+					break;
+
+				(++desc_virtual)->ppn = cpu_to_le32(pfn);
+				desc_virtual->num_pages = cpu_to_le32(1);
+			} else {
+				uint32_t tmp =
+				    le32_to_cpu(desc_virtual->num_pages);
+				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
+			}
+			prev_pfn = pfn;
+			--num_pages;
+			++pages;
+		}
+
+		(++desc_virtual)->ppn = cpu_to_le32(0);
+		desc_virtual->num_pages = cpu_to_le32(0);
+	}
+
+	if (likely(page_virtual != NULL))
+		kunmap_atomic(page_virtual);
+
+	return 0;
+out_err:
+	list_for_each_entry_safe(page, next, desc_pages, lru) {
+		list_del_init(&page->lru);
+		__free_page(page);
+	}
+	return ret;
+}
+
+static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
+{
+	struct page *page, *next;
+
+	list_for_each_entry_safe(page, next, desc_pages, lru) {
+		list_del_init(&page->lru);
+		__free_page(page);
+	}
+}
+
+static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
+				     int gmr_id, struct list_head *desc_pages)
+{
+	struct page *page;
+
+	if (unlikely(list_empty(desc_pages)))
+		return;
+
+	page = list_entry(desc_pages->next, struct page, lru);
+
+	mutex_lock(&dev_priv->hw_mutex);
+
+	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+	wmb();
+	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+	mb();
+
+	mutex_unlock(&dev_priv->hw_mutex);
+
+}
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
+					unsigned long num_pages)
+{
+	unsigned long prev_pfn = ~(0UL);
+	unsigned long pfn;
+	unsigned long descriptors = 0;
+
+	while (num_pages--) {
+		pfn = page_to_pfn(*pages++);
+		if (prev_pfn + 1 != pfn)
+			++descriptors;
+		prev_pfn = pfn;
+	}
+
+	return descriptors;
+}
+
+int vmw_gmr_bind(struct vmw_private *dev_priv,
+		 struct page *pages[],
+		 unsigned long num_pages,
+		 int gmr_id)
+{
+	struct list_head desc_pages;
+	int ret;
+
+	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
+		return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
+
+	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
+		return -EINVAL;
+
+	if (vmw_gmr_count_descriptors(pages, num_pages) >
+	    dev_priv->max_gmr_descriptors)
+		return -EINVAL;
+
+	INIT_LIST_HEAD(&desc_pages);
+
+	ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
+	vmw_gmr_free_descriptors(&desc_pages);
+
+	return 0;
+}
+
+
+void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
+{
+	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
+		vmw_gmr2_unbind(dev_priv, gmr_id);
+		return;
+	}
+
+	mutex_lock(&dev_priv->hw_mutex);
+	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+	wmb();
+	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
+	mb();
+	mutex_unlock(&dev_priv->hw_mutex);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
new file mode 100644
index 0000000..c5c054a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -0,0 +1,161 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+
+struct vmwgfx_gmrid_man {
+	spinlock_t lock;
+	struct ida gmr_ida;
+	uint32_t max_gmr_ids;
+	uint32_t max_gmr_pages;
+	uint32_t used_gmr_pages;
+};
+
+static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
+				  struct ttm_buffer_object *bo,
+				  struct ttm_placement *placement,
+				  struct ttm_mem_reg *mem)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+	int ret = 0;
+	int id;
+
+	mem->mm_node = NULL;
+
+	spin_lock(&gman->lock);
+
+	if (gman->max_gmr_pages > 0) {
+		gman->used_gmr_pages += bo->num_pages;
+		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+			goto out_err_locked;
+	}
+
+	do {
+		spin_unlock(&gman->lock);
+		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
+			ret = -ENOMEM;
+			goto out_err;
+		}
+		spin_lock(&gman->lock);
+
+		ret = ida_get_new(&gman->gmr_ida, &id);
+		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
+			ida_remove(&gman->gmr_ida, id);
+			ret = 0;
+			goto out_err_locked;
+		}
+	} while (ret == -EAGAIN);
+
+	if (likely(ret == 0)) {
+		mem->mm_node = gman;
+		mem->start = id;
+		mem->num_pages = bo->num_pages;
+	} else
+		goto out_err_locked;
+
+	spin_unlock(&gman->lock);
+	return 0;
+
+out_err:
+	spin_lock(&gman->lock);
+out_err_locked:
+	gman->used_gmr_pages -= bo->num_pages;
+	spin_unlock(&gman->lock);
+	return ret;
+}
+
+static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
+				   struct ttm_mem_reg *mem)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+
+	if (mem->mm_node) {
+		spin_lock(&gman->lock);
+		ida_remove(&gman->gmr_ida, mem->start);
+		gman->used_gmr_pages -= mem->num_pages;
+		spin_unlock(&gman->lock);
+		mem->mm_node = NULL;
+	}
+}
+
+static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
+			      unsigned long p_size)
+{
+	struct vmw_private *dev_priv =
+		container_of(man->bdev, struct vmw_private, bdev);
+	struct vmwgfx_gmrid_man *gman =
+		kzalloc(sizeof(*gman), GFP_KERNEL);
+
+	if (unlikely(gman == NULL))
+		return -ENOMEM;
+
+	spin_lock_init(&gman->lock);
+	gman->max_gmr_pages = dev_priv->max_gmr_pages;
+	gman->used_gmr_pages = 0;
+	ida_init(&gman->gmr_ida);
+	gman->max_gmr_ids = p_size;
+	man->priv = (void *) gman;
+	return 0;
+}
+
+static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct vmwgfx_gmrid_man *gman =
+		(struct vmwgfx_gmrid_man *)man->priv;
+
+	if (gman) {
+		ida_destroy(&gman->gmr_ida);
+		kfree(gman);
+	}
+	return 0;
+}
+
+static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+				const char *prefix)
+{
+	printk(KERN_INFO "%s: No debug info available for the GMR "
+	       "id manager.\n", prefix);
+}
+
+const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
+	vmw_gmrid_man_init,
+	vmw_gmrid_man_takedown,
+	vmw_gmrid_man_get_node,
+	vmw_gmrid_man_put_node,
+	vmw_gmrid_man_debug
+};
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 0000000..c509d40
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,326 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/vmwgfx_drm.h>
+#include "vmwgfx_kms.h"
+
+int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_getparam_arg *param =
+	    (struct drm_vmw_getparam_arg *)data;
+
+	switch (param->param) {
+	case DRM_VMW_PARAM_NUM_STREAMS:
+		param->value = vmw_overlay_num_overlays(dev_priv);
+		break;
+	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
+		param->value = vmw_overlay_num_free_overlays(dev_priv);
+		break;
+	case DRM_VMW_PARAM_3D:
+		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
+		break;
+	case DRM_VMW_PARAM_HW_CAPS:
+		param->value = dev_priv->capabilities;
+		break;
+	case DRM_VMW_PARAM_FIFO_CAPS:
+		param->value = dev_priv->fifo.capabilities;
+		break;
+	case DRM_VMW_PARAM_MAX_FB_SIZE:
+		param->value = dev_priv->vram_size;
+		break;
+	case DRM_VMW_PARAM_FIFO_HW_VERSION:
+	{
+		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+		param->value =
+			ioread32(fifo_mem +
+				 ((fifo->capabilities &
+				   SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+				  SVGA_FIFO_3D_HWVERSION_REVISED :
+				  SVGA_FIFO_3D_HWVERSION));
+		break;
+	}
+	default:
+		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+			  param->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_vmw_get_3d_cap_arg *arg =
+		(struct drm_vmw_get_3d_cap_arg *) data;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t size;
+	__le32 __iomem *fifo_mem;
+	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
+	void *bounce;
+	int ret;
+
+	if (unlikely(arg->pad64 != 0)) {
+		DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+		return -EINVAL;
+	}
+
+	size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
+
+	if (arg->max_size < size)
+		size = arg->max_size;
+
+	bounce = vmalloc(size);
+	if (unlikely(bounce == NULL)) {
+		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
+		return -ENOMEM;
+	}
+
+	fifo_mem = dev_priv->mmio_virt;
+	memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
+
+	ret = copy_to_user(buffer, bounce, size);
+	if (ret)
+		ret = -EFAULT;
+	vfree(bounce);
+
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to report 3D caps info.\n");
+
+	return ret;
+}
+
+int vmw_present_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_arg *arg =
+		(struct drm_vmw_present_arg *)data;
+	struct vmw_surface *surface;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_framebuffer *fb;
+	struct vmw_framebuffer *vfb;
+	struct vmw_resource *res;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		DRM_ERROR("Variable clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	fb = drm_framebuffer_lookup(dev, arg->fb_id);
+	if (!fb) {
+		DRM_ERROR("Invalid framebuffer id.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+	vfb = vmw_framebuffer_to_vfb(fb);
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+					      user_surface_converter,
+					      &res);
+	if (ret)
+		goto out_no_surface;
+
+	surface = vmw_res_to_srf(res);
+	ret = vmw_kms_present(dev_priv, file_priv,
+			      vfb, surface, arg->sid,
+			      arg->dest_x, arg->dest_y,
+			      clips, num_clips);
+
+	/* vmw_user_surface_lookup takes one ref so does new_fb */
+	vmw_surface_unreference(&surface);
+
+out_no_surface:
+	ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+	drm_framebuffer_unreference(fb);
+out_no_fb:
+	drm_modeset_unlock_all(dev);
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}
+
+int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_readback_arg *arg =
+		(struct drm_vmw_present_readback_arg *)data;
+	struct drm_vmw_fence_rep __user *user_fence_rep =
+		(struct drm_vmw_fence_rep __user *)
+		(unsigned long)arg->fence_rep;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_framebuffer *fb;
+	struct vmw_framebuffer *vfb;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		DRM_ERROR("Argument clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	drm_modeset_lock_all(dev);
+
+	fb = drm_framebuffer_lookup(dev, arg->fb_id);
+	if (!fb) {
+		DRM_ERROR("Invalid framebuffer id.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+
+	vfb = vmw_framebuffer_to_vfb(fb);
+	if (!vfb->dmabuf) {
+		DRM_ERROR("Framebuffer not dmabuf backed.\n");
+		ret = -EINVAL;
+		goto out_no_ttm_lock;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_kms_readback(dev_priv, file_priv,
+			       vfb, user_fence_rep,
+			       clips, num_clips);
+
+	ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+	drm_framebuffer_unreference(fb);
+out_no_fb:
+	drm_modeset_unlock_all(dev);
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}
+
+
+/**
+ * vmw_fops_poll - wrapper around the drm_poll function
+ *
+ * @filp: See the linux fops poll documentation.
+ * @wait: See the linux fops poll documentation.
+ *
+ * Wrapper around the drm_poll function that makes sure the device is
+ * processing the fifo if drm_poll decides to wait.
+ */
+unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv =
+		vmw_priv(file_priv->minor->dev);
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	return drm_poll(filp, wait);
+}
+
+
+/**
+ * vmw_fops_read - wrapper around the drm_read function
+ *
+ * @filp: See the linux fops read documentation.
+ * @buffer: See the linux fops read documentation.
+ * @count: See the linux fops read documentation.
+ * offset: See the linux fops read documentation.
+ *
+ * Wrapper around the drm_read function that makes sure the device is
+ * processing the fifo if drm_read decides to wait.
+ */
+ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
+		      size_t count, loff_t *offset)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct vmw_private *dev_priv =
+		vmw_priv(file_priv->minor->dev);
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+	return drm_read(filp, buffer, count, offset);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 0000000..4640adb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,324 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 24)
+
+irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status, masked_status;
+
+	spin_lock(&dev_priv->irq_lock);
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	masked_status = status & dev_priv->irq_mask;
+	spin_unlock(&dev_priv->irq_lock);
+
+	if (likely(status))
+		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+	if (!masked_status)
+		return IRQ_NONE;
+
+	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+			     SVGA_IRQFLAG_FENCE_GOAL)) {
+		vmw_fences_update(dev_priv->fman);
+		wake_up_all(&dev_priv->fence_queue);
+	}
+
+	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
+		wake_up_all(&dev_priv->fifo_queue);
+
+
+	return IRQ_HANDLED;
+}
+
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
+{
+	uint32_t busy;
+
+	mutex_lock(&dev_priv->hw_mutex);
+	busy = vmw_read(dev_priv, SVGA_REG_BUSY);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return (busy == 0);
+}
+
+void vmw_update_seqno(struct vmw_private *dev_priv,
+			 struct vmw_fifo_state *fifo_state)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+	if (dev_priv->last_read_seqno != seqno) {
+		dev_priv->last_read_seqno = seqno;
+		vmw_marker_pull(&fifo_state->marker_queue, seqno);
+		vmw_fences_update(dev_priv->fman);
+	}
+}
+
+bool vmw_seqno_passed(struct vmw_private *dev_priv,
+			 uint32_t seqno)
+{
+	struct vmw_fifo_state *fifo_state;
+	bool ret;
+
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return true;
+
+	fifo_state = &dev_priv->fifo;
+	vmw_update_seqno(dev_priv, fifo_state);
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return true;
+
+	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+	    vmw_fifo_idle(dev_priv, seqno))
+		return true;
+
+	/**
+	 * Then check if the seqno is higher than what we've actually
+	 * emitted. Then the fence is stale and signaled.
+	 */
+
+	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
+	       > VMW_FENCE_WRAP);
+
+	return ret;
+}
+
+int vmw_fallback_wait(struct vmw_private *dev_priv,
+		      bool lazy,
+		      bool fifo_idle,
+		      uint32_t seqno,
+		      bool interruptible,
+		      unsigned long timeout)
+{
+	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+
+	uint32_t count = 0;
+	uint32_t signal_seq;
+	int ret;
+	unsigned long end_jiffies = jiffies + timeout;
+	bool (*wait_condition)(struct vmw_private *, uint32_t);
+	DEFINE_WAIT(__wait);
+
+	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
+		&vmw_seqno_passed;
+
+	/**
+	 * Block command submission while waiting for idle.
+	 */
+
+	if (fifo_idle)
+		down_read(&fifo_state->rwsem);
+	signal_seq = atomic_read(&dev_priv->marker_seq);
+	ret = 0;
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->fence_queue, &__wait,
+				(interruptible) ?
+				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+		if (wait_condition(dev_priv, seqno))
+			break;
+		if (time_after_eq(jiffies, end_jiffies)) {
+			DRM_ERROR("SVGA device lockup.\n");
+			break;
+		}
+		if (lazy)
+			schedule_timeout(1);
+		else if ((++count & 0x0F) == 0) {
+			/**
+			 * FIXME: Use schedule_hr_timeout here for
+			 * newer kernels and lower CPU utilization.
+			 */
+
+			__set_current_state(TASK_RUNNING);
+			schedule();
+			__set_current_state((interruptible) ?
+					    TASK_INTERRUPTIBLE :
+					    TASK_UNINTERRUPTIBLE);
+		}
+		if (interruptible && signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&dev_priv->fence_queue, &__wait);
+	if (ret == 0 && fifo_idle) {
+		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+	}
+	wake_up_all(&dev_priv->fence_queue);
+	if (fifo_idle)
+		up_read(&fifo_state->rwsem);
+
+	return ret;
+}
+
+void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->hw_mutex);
+	if (dev_priv->fence_queue_waiters++ == 0) {
+		unsigned long irq_flags;
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		outl(SVGA_IRQFLAG_ANY_FENCE,
+		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->hw_mutex);
+	if (--dev_priv->fence_queue_waiters == 0) {
+		unsigned long irq_flags;
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+}
+
+
+void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->hw_mutex);
+	if (dev_priv->goal_queue_waiters++ == 0) {
+		unsigned long irq_flags;
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		outl(SVGA_IRQFLAG_FENCE_GOAL,
+		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+		dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+{
+	mutex_lock(&dev_priv->hw_mutex);
+	if (--dev_priv->goal_queue_waiters == 0) {
+		unsigned long irq_flags;
+
+		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
+		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+}
+
+int vmw_wait_seqno(struct vmw_private *dev_priv,
+		      bool lazy, uint32_t seqno,
+		      bool interruptible, unsigned long timeout)
+{
+	long ret;
+	struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+		return 0;
+
+	if (likely(vmw_seqno_passed(dev_priv, seqno)))
+		return 0;
+
+	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
+		return vmw_fallback_wait(dev_priv, lazy, true, seqno,
+					 interruptible, timeout);
+
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return vmw_fallback_wait(dev_priv, lazy, false, seqno,
+					 interruptible, timeout);
+
+	vmw_seqno_waiter_add(dev_priv);
+
+	if (interruptible)
+		ret = wait_event_interruptible_timeout
+		    (dev_priv->fence_queue,
+		     vmw_seqno_passed(dev_priv, seqno),
+		     timeout);
+	else
+		ret = wait_event_timeout
+		    (dev_priv->fence_queue,
+		     vmw_seqno_passed(dev_priv, seqno),
+		     timeout);
+
+	vmw_seqno_waiter_remove(dev_priv);
+
+	if (unlikely(ret == 0))
+		ret = -EBUSY;
+	else if (likely(ret > 0))
+		ret = 0;
+
+	return ret;
+}
+
+void vmw_irq_preinstall(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return;
+
+	spin_lock_init(&dev_priv->irq_lock);
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+int vmw_irq_postinstall(struct drm_device *dev)
+{
+	return 0;
+}
+
+void vmw_irq_uninstall(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	uint32_t status;
+
+	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+		return;
+
+	mutex_lock(&dev_priv->hw_mutex);
+	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 0000000..3e3c7ab
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,2060 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+/* Might need a hrtimer here? */
+#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+
+struct vmw_clip_rect {
+	int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+			int num_rects,
+			struct vmw_clip_rect clip,
+			SVGASignedRect *out_rects,
+			int *out_num)
+{
+	int i, k;
+
+	for (i = 0, k = 0; i < num_rects; i++) {
+		int x1 = max_t(int, clip.x1, rects[i].x1);
+		int y1 = max_t(int, clip.y1, rects[i].y1);
+		int x2 = min_t(int, clip.x2, rects[i].x2);
+		int y2 = min_t(int, clip.y2, rects[i].y2);
+
+		if (x1 >= x2)
+			continue;
+		if (y1 >= y2)
+			continue;
+
+		out_rects[k].left   = x1;
+		out_rects[k].top    = y1;
+		out_rects[k].right  = x2;
+		out_rects[k].bottom = y2;
+		k++;
+	}
+
+	*out_num = k;
+}
+
+void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+{
+	if (du->cursor_surface)
+		vmw_surface_unreference(&du->cursor_surface);
+	if (du->cursor_dmabuf)
+		vmw_dmabuf_unreference(&du->cursor_dmabuf);
+	drm_crtc_cleanup(&du->crtc);
+	drm_encoder_cleanup(&du->encoder);
+	drm_connector_cleanup(&du->connector);
+}
+
+/*
+ * Display Unit Cursor functions
+ */
+
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+			    u32 *image, u32 width, u32 height,
+			    u32 hotspotX, u32 hotspotY)
+{
+	struct {
+		u32 cmd;
+		SVGAFifoCmdDefineAlphaCursor cursor;
+	} *cmd;
+	u32 image_size = width * height * 4;
+	u32 cmd_size = sizeof(*cmd) + image_size;
+
+	if (!image)
+		return -EINVAL;
+
+	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+
+	memcpy(&cmd[1], image, image_size);
+
+	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
+	cmd->cursor.id = cpu_to_le32(0);
+	cmd->cursor.width = cpu_to_le32(width);
+	cmd->cursor.height = cpu_to_le32(height);
+	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
+	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+
+	vmw_fifo_commit(dev_priv, cmd_size);
+
+	return 0;
+}
+
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+			     struct vmw_dma_buffer *dmabuf,
+			     u32 width, u32 height,
+			     u32 hotspotX, u32 hotspotY)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_num;
+	void *virtual;
+	bool dummy;
+	int ret;
+
+	kmap_offset = 0;
+	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+	ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("reserve failed\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0))
+		goto err_unreserve;
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+				      hotspotX, hotspotY);
+
+	ttm_bo_kunmap(&map);
+err_unreserve:
+	ttm_bo_unreserve(&dmabuf->base);
+
+	return ret;
+}
+
+
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+				bool show, int x, int y)
+{
+	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+	uint32_t count;
+
+	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+}
+
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+			   uint32_t handle, uint32_t width, uint32_t height)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+	struct vmw_surface *surface = NULL;
+	struct vmw_dma_buffer *dmabuf = NULL;
+	int ret;
+
+	/*
+	 * FIXME: Unclear whether there's any global state touched by the
+	 * cursor_set function, especially vmw_cursor_update_position looks
+	 * suspicious. For now take the easy route and reacquire all locks. We
+	 * can do this since the caller in the drm core doesn't check anything
+	 * which is protected by any looks.
+	 */
+	mutex_unlock(&crtc->mutex);
+	drm_modeset_lock_all(dev_priv->dev);
+
+	/* A lot of the code assumes this */
+	if (handle && (width != 64 || height != 64)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (handle) {
+		ret = vmw_user_lookup_handle(dev_priv, tfile,
+					     handle, &surface, &dmabuf);
+		if (ret) {
+			DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* need to do this before taking down old image */
+	if (surface && !surface->snooper.image) {
+		DRM_ERROR("surface not suitable for cursor\n");
+		vmw_surface_unreference(&surface);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* takedown old cursor */
+	if (du->cursor_surface) {
+		du->cursor_surface->snooper.crtc = NULL;
+		vmw_surface_unreference(&du->cursor_surface);
+	}
+	if (du->cursor_dmabuf)
+		vmw_dmabuf_unreference(&du->cursor_dmabuf);
+
+	/* setup new image */
+	if (surface) {
+		/* vmw_user_surface_lookup takes one reference */
+		du->cursor_surface = surface;
+
+		du->cursor_surface->snooper.crtc = crtc;
+		du->cursor_age = du->cursor_surface->snooper.age;
+		vmw_cursor_update_image(dev_priv, surface->snooper.image,
+					64, 64, du->hotspot_x, du->hotspot_y);
+	} else if (dmabuf) {
+		/* vmw_user_surface_lookup takes one reference */
+		du->cursor_dmabuf = dmabuf;
+
+		ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+					       du->hotspot_x, du->hotspot_y);
+	} else {
+		vmw_cursor_update_position(dev_priv, false, 0, 0);
+		ret = 0;
+		goto out;
+	}
+
+	vmw_cursor_update_position(dev_priv, true,
+				   du->cursor_x + du->hotspot_x,
+				   du->cursor_y + du->hotspot_y);
+
+	ret = 0;
+out:
+	drm_modeset_unlock_all(dev_priv->dev);
+	mutex_lock(&crtc->mutex);
+
+	return ret;
+}
+
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
+
+	du->cursor_x = x + crtc->x;
+	du->cursor_y = y + crtc->y;
+
+	/*
+	 * FIXME: Unclear whether there's any global state touched by the
+	 * cursor_set function, especially vmw_cursor_update_position looks
+	 * suspicious. For now take the easy route and reacquire all locks. We
+	 * can do this since the caller in the drm core doesn't check anything
+	 * which is protected by any looks.
+	 */
+	mutex_unlock(&crtc->mutex);
+	drm_modeset_lock_all(dev_priv->dev);
+
+	vmw_cursor_update_position(dev_priv, shown,
+				   du->cursor_x + du->hotspot_x,
+				   du->cursor_y + du->hotspot_y);
+
+	drm_modeset_unlock_all(dev_priv->dev);
+	mutex_lock(&crtc->mutex);
+
+	return 0;
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+			  struct ttm_object_file *tfile,
+			  struct ttm_buffer_object *bo,
+			  SVGA3dCmdHeader *header)
+{
+	struct ttm_bo_kmap_obj map;
+	unsigned long kmap_offset;
+	unsigned long kmap_num;
+	SVGA3dCopyBox *box;
+	unsigned box_count;
+	void *virtual;
+	bool dummy;
+	struct vmw_dma_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSurfaceDMA dma;
+	} *cmd;
+	int i, ret;
+
+	cmd = container_of(header, struct vmw_dma_cmd, header);
+
+	/* No snooper installed */
+	if (!srf->snooper.image)
+		return;
+
+	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+		DRM_ERROR("face and mipmap for cursors should never != 0\n");
+		return;
+	}
+
+	if (cmd->header.size < 64) {
+		DRM_ERROR("at least one full copy box must be given\n");
+		return;
+	}
+
+	box = (SVGA3dCopyBox *)&cmd[1];
+	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+			sizeof(SVGA3dCopyBox);
+
+	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+	    box->x != 0    || box->y != 0    || box->z != 0    ||
+	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+	    box->d != 1    || box_count != 1) {
+		/* TODO handle none page aligned offsets */
+		/* TODO handle more dst & src != 0 */
+		/* TODO handle more then one copy */
+		DRM_ERROR("Cant snoop dma request for cursor!\n");
+		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+			  box->srcx, box->srcy, box->srcz,
+			  box->x, box->y, box->z,
+			  box->w, box->h, box->d, box_count,
+			  cmd->dma.guest.ptr.offset);
+		return;
+	}
+
+	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+	kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+	ret = ttm_bo_reserve(bo, true, false, false, 0);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("reserve failed\n");
+		return;
+	}
+
+	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+	if (unlikely(ret != 0))
+		goto err_unreserve;
+
+	virtual = ttm_kmap_obj_virtual(&map, &dummy);
+
+	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
+		memcpy(srf->snooper.image, virtual, 64*64*4);
+	} else {
+		/* Image is unsigned pointer. */
+		for (i = 0; i < box->h; i++)
+			memcpy(srf->snooper.image + i * 64,
+			       virtual + i * cmd->dma.guest.pitch,
+			       box->w * 4);
+	}
+
+	srf->snooper.age++;
+
+	/* we can't call this function from this function since execbuf has
+	 * reserved fifo space.
+	 *
+	 * if (srf->snooper.crtc)
+	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
+	 *					 srf->snooper.image, 64, 64,
+	 *					 du->hotspot_x, du->hotspot_y);
+	 */
+
+	ttm_bo_kunmap(&map);
+err_unreserve:
+	ttm_bo_unreserve(bo);
+}
+
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_crtc *crtc;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+		du = vmw_crtc_to_du(crtc);
+		if (!du->cursor_surface ||
+		    du->cursor_age == du->cursor_surface->snooper.age)
+			continue;
+
+		du->cursor_age = du->cursor_surface->snooper.age;
+		vmw_cursor_update_image(dev_priv,
+					du->cursor_surface->snooper.image,
+					64, 64, du->hotspot_x, du->hotspot_y);
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+/*
+ * Generic framebuffer code
+ */
+
+/*
+ * Surface framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbs(x) \
+	container_of(x, struct vmw_framebuffer_surface, base.base)
+
+struct vmw_framebuffer_surface {
+	struct vmw_framebuffer base;
+	struct vmw_surface *surface;
+	struct vmw_dma_buffer *buffer;
+	struct list_head head;
+	struct drm_master *master;
+};
+
+void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct vmw_framebuffer_surface *vfbs =
+		vmw_framebuffer_to_vfbs(framebuffer);
+	struct vmw_master *vmaster = vmw_master(vfbs->master);
+
+
+	mutex_lock(&vmaster->fb_surf_mutex);
+	list_del(&vfbs->head);
+	mutex_unlock(&vmaster->fb_surf_mutex);
+
+	drm_master_put(&vfbs->master);
+	drm_framebuffer_cleanup(framebuffer);
+	vmw_surface_unreference(&vfbs->surface);
+	ttm_base_object_unref(&vfbs->base.user_obj);
+
+	kfree(vfbs);
+}
+
+static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+				struct drm_file *file_priv,
+				struct vmw_framebuffer *framebuffer,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int inc,
+				struct vmw_fence_obj **out_fence)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_clip_rect *clips_ptr;
+	struct drm_clip_rect *tmp;
+	struct drm_crtc *crtc;
+	size_t fifo_size;
+	int i, num_units;
+	int ret = 0; /* silence warning */
+	int left, right, top, bottom;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBlitSurfaceToScreen body;
+	} *cmd;
+	SVGASignedRect *blits;
+
+	num_units = 0;
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+			    head) {
+		if (crtc->fb != &framebuffer->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	BUG_ON(!clips || !num_clips);
+
+	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+	if (unlikely(tmp == NULL)) {
+		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+	cmd = kzalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Temporary fifo memory alloc failed.\n");
+		ret = -ENOMEM;
+		goto out_free_tmp;
+	}
+
+	/* setup blits pointer */
+	blits = (SVGASignedRect *)&cmd[1];
+
+	/* initial clip region */
+	left = clips->x1;
+	right = clips->x2;
+	top = clips->y1;
+	bottom = clips->y2;
+
+	/* skip the first clip rect */
+	for (i = 1, clips_ptr = clips + inc;
+	     i < num_clips; i++, clips_ptr += inc) {
+		left = min_t(int, left, (int)clips_ptr->x1);
+		right = max_t(int, right, (int)clips_ptr->x2);
+		top = min_t(int, top, (int)clips_ptr->y1);
+		bottom = max_t(int, bottom, (int)clips_ptr->y2);
+	}
+
+	/* only need to do this once */
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+	cmd->body.srcRect.left = left;
+	cmd->body.srcRect.right = right;
+	cmd->body.srcRect.top = top;
+	cmd->body.srcRect.bottom = bottom;
+
+	clips_ptr = clips;
+	for (i = 0; i < num_clips; i++, clips_ptr += inc) {
+		tmp[i].x1 = clips_ptr->x1 - left;
+		tmp[i].x2 = clips_ptr->x2 - left;
+		tmp[i].y1 = clips_ptr->y1 - top;
+		tmp[i].y2 = clips_ptr->y2 - top;
+	}
+
+	/* do per unit writing, reuse fifo for each */
+	for (i = 0; i < num_units; i++) {
+		struct vmw_display_unit *unit = units[i];
+		struct vmw_clip_rect clip;
+		int num;
+
+		clip.x1 = left - unit->crtc.x;
+		clip.y1 = top - unit->crtc.y;
+		clip.x2 = right - unit->crtc.x;
+		clip.y2 = bottom - unit->crtc.y;
+
+		/* skip any crtcs that misses the clip region */
+		if (clip.x1 >= unit->crtc.mode.hdisplay ||
+		    clip.y1 >= unit->crtc.mode.vdisplay ||
+		    clip.x2 <= 0 || clip.y2 <= 0)
+			continue;
+
+		/*
+		 * In order for the clip rects to be correctly scaled
+		 * the src and dest rects needs to be the same size.
+		 */
+		cmd->body.destRect.left = clip.x1;
+		cmd->body.destRect.right = clip.x2;
+		cmd->body.destRect.top = clip.y1;
+		cmd->body.destRect.bottom = clip.y2;
+
+		/* create a clip rect of the crtc in dest coords */
+		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+		clip.x1 = 0 - clip.x1;
+		clip.y1 = 0 - clip.y1;
+
+		/* need to reset sid as it is changed by execbuf */
+		cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
+		cmd->body.destScreenId = unit->unit;
+
+		/* clip and write blits to cmd stream */
+		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
+
+		/* if no cliprects hit skip this */
+		if (num == 0)
+			continue;
+
+		/* only return the last fence */
+		if (out_fence && *out_fence)
+			vmw_fence_obj_unreference(out_fence);
+
+		/* recalculate package length */
+		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+					  fifo_size, 0, NULL, out_fence);
+
+		if (unlikely(ret != 0))
+			break;
+	}
+
+
+	kfree(cmd);
+out_free_tmp:
+	kfree(tmp);
+
+	return ret;
+}
+
+int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+				  struct drm_file *file_priv,
+				  unsigned flags, unsigned color,
+				  struct drm_clip_rect *clips,
+				  unsigned num_clips)
+{
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct vmw_framebuffer_surface *vfbs =
+		vmw_framebuffer_to_vfbs(framebuffer);
+	struct drm_clip_rect norect;
+	int ret, inc = 1;
+
+	if (unlikely(vfbs->master != file_priv->master))
+		return -EINVAL;
+
+	/* Require ScreenObject support for 3D */
+	if (!dev_priv->sou_priv)
+		return -EINVAL;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = framebuffer->width;
+		norect.y2 = framebuffer->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		inc = 2; /* skip source rects */
+	}
+
+	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
+				   flags, color,
+				   clips, num_clips, inc, NULL);
+
+	ttm_read_unlock(&vmaster->lock);
+	return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+	.destroy = vmw_framebuffer_surface_destroy,
+	.dirty = vmw_framebuffer_surface_dirty,
+};
+
+static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+					   struct drm_file *file_priv,
+					   struct vmw_surface *surface,
+					   struct vmw_framebuffer **out,
+					   const struct drm_mode_fb_cmd
+					   *mode_cmd)
+
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_framebuffer_surface *vfbs;
+	enum SVGA3dSurfaceFormat format;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	/* 3D is only supported on HWv8 hosts which supports screen objects */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	/*
+	 * Sanity checks.
+	 */
+
+	/* Surface must be marked as a scanout. */
+	if (unlikely(!surface->scanout))
+		return -EINVAL;
+
+	if (unlikely(surface->mip_levels[0] != 1 ||
+		     surface->num_sizes != 1 ||
+		     surface->sizes[0].width < mode_cmd->width ||
+		     surface->sizes[0].height < mode_cmd->height ||
+		     surface->sizes[0].depth != 1)) {
+		DRM_ERROR("Incompatible surface dimensions "
+			  "for requested mode.\n");
+		return -EINVAL;
+	}
+
+	switch (mode_cmd->depth) {
+	case 32:
+		format = SVGA3D_A8R8G8B8;
+		break;
+	case 24:
+		format = SVGA3D_X8R8G8B8;
+		break;
+	case 16:
+		format = SVGA3D_R5G6B5;
+		break;
+	case 15:
+		format = SVGA3D_A1R5G5B5;
+		break;
+	case 8:
+		format = SVGA3D_LUMINANCE8;
+		break;
+	default:
+		DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+		return -EINVAL;
+	}
+
+	if (unlikely(format != surface->format)) {
+		DRM_ERROR("Invalid surface format for requested mode.\n");
+		return -EINVAL;
+	}
+
+	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+	if (!vfbs) {
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+
+	if (!vmw_surface_reference(surface)) {
+		DRM_ERROR("failed to reference surface %p\n", surface);
+		ret = -EINVAL;
+		goto out_err2;
+	}
+
+	/* XXX get the first 3 from the surface info */
+	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
+	vfbs->base.base.pitches[0] = mode_cmd->pitch;
+	vfbs->base.base.depth = mode_cmd->depth;
+	vfbs->base.base.width = mode_cmd->width;
+	vfbs->base.base.height = mode_cmd->height;
+	vfbs->surface = surface;
+	vfbs->base.user_handle = mode_cmd->handle;
+	vfbs->master = drm_master_get(file_priv->master);
+
+	mutex_lock(&vmaster->fb_surf_mutex);
+	list_add_tail(&vfbs->head, &vmaster->fb_surf);
+	mutex_unlock(&vmaster->fb_surf_mutex);
+
+	*out = &vfbs->base;
+
+	ret = drm_framebuffer_init(dev, &vfbs->base.base,
+				   &vmw_framebuffer_surface_funcs);
+	if (ret)
+		goto out_err3;
+
+	return 0;
+
+out_err3:
+	vmw_surface_unreference(&surface);
+out_err2:
+	kfree(vfbs);
+out_err1:
+	return ret;
+}
+
+/*
+ * Dmabuf framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbd(x) \
+	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+
+struct vmw_framebuffer_dmabuf {
+	struct vmw_framebuffer base;
+	struct vmw_dma_buffer *buffer;
+};
+
+void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(framebuffer);
+
+	drm_framebuffer_cleanup(framebuffer);
+	vmw_dmabuf_unreference(&vfbd->buffer);
+	ttm_base_object_unref(&vfbd->base.user_obj);
+
+	kfree(vfbd);
+}
+
+static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       unsigned flags, unsigned color,
+			       struct drm_clip_rect *clips,
+			       unsigned num_clips, int increment)
+{
+	size_t fifo_size;
+	int i;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdUpdate body;
+	} *cmd;
+
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
+		cmd[i].body.x = cpu_to_le32(clips->x1);
+		cmd[i].body.y = cpu_to_le32(clips->y1);
+		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
+		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
+	}
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+	return 0;
+}
+
+static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
+				  struct vmw_private *dev_priv,
+				  struct vmw_framebuffer *framebuffer)
+{
+	int depth = framebuffer->base.depth;
+	size_t fifo_size;
+	int ret;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+
+	/* Emulate RGBA support, contrary to svga_reg.h this is not
+	 * supported by hosts. This is only a problem if we are reading
+	 * this value later and expecting what we uploaded back.
+	 */
+	if (depth == 32)
+		depth = 24;
+
+	fifo_size = sizeof(*cmd);
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+	cmd->body.format.colorDepth = depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+	cmd->body.ptr.gmrId = framebuffer->user_handle;
+	cmd->body.ptr.offset = 0;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+				  fifo_size, 0, NULL, NULL);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       unsigned flags, unsigned color,
+			       struct drm_clip_rect *clips,
+			       unsigned num_clips, int increment,
+			       struct vmw_fence_obj **out_fence)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_clip_rect *clips_ptr;
+	int i, k, num_units, ret;
+	struct drm_crtc *crtc;
+	size_t fifo_size;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdBlitGMRFBToScreen body;
+	} *blits;
+
+	ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
+	if (unlikely(ret != 0))
+		return ret; /* define_gmrfb prints warnings */
+
+	fifo_size = sizeof(*blits) * num_clips;
+	blits = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(blits == NULL)) {
+		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
+		return -ENOMEM;
+	}
+
+	num_units = 0;
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->fb != &framebuffer->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		int hit_num = 0;
+
+		clips_ptr = clips;
+		for (i = 0; i < num_clips; i++, clips_ptr += increment) {
+			int clip_x1 = clips_ptr->x1 - unit->crtc.x;
+			int clip_y1 = clips_ptr->y1 - unit->crtc.y;
+			int clip_x2 = clips_ptr->x2 - unit->crtc.x;
+			int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+			int move_x, move_y;
+
+			/* skip any crtcs that misses the clip region */
+			if (clip_x1 >= unit->crtc.mode.hdisplay ||
+			    clip_y1 >= unit->crtc.mode.vdisplay ||
+			    clip_x2 <= 0 || clip_y2 <= 0)
+				continue;
+
+			/* clip size to crtc size */
+			clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+			clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+			/* translate both src and dest to bring clip into screen */
+			move_x = min_t(int, clip_x1, 0);
+			move_y = min_t(int, clip_y1, 0);
+
+			/* actual translate done here */
+			blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+			blits[hit_num].body.destScreenId = unit->unit;
+			blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+			blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+			blits[hit_num].body.destRect.left = clip_x1 - move_x;
+			blits[hit_num].body.destRect.top = clip_y1 - move_y;
+			blits[hit_num].body.destRect.right = clip_x2;
+			blits[hit_num].body.destRect.bottom = clip_y2;
+			hit_num++;
+		}
+
+		/* no clips hit the crtc */
+		if (hit_num == 0)
+			continue;
+
+		/* only return the last fence */
+		if (out_fence && *out_fence)
+			vmw_fence_obj_unreference(out_fence);
+
+		fifo_size = sizeof(*blits) * hit_num;
+		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
+					  fifo_size, 0, NULL, out_fence);
+
+		if (unlikely(ret != 0))
+			break;
+	}
+
+	kfree(blits);
+
+	return ret;
+}
+
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+				 struct drm_file *file_priv,
+				 unsigned flags, unsigned color,
+				 struct drm_clip_rect *clips,
+				 unsigned num_clips)
+{
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(framebuffer);
+	struct drm_clip_rect norect;
+	int ret, increment = 1;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = framebuffer->width;
+		norect.y2 = framebuffer->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		increment = 2;
+	}
+
+	if (dev_priv->ldu_priv) {
+		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
+					  flags, color,
+					  clips, num_clips, increment);
+	} else {
+		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
+					  flags, color,
+					  clips, num_clips, increment, NULL);
+	}
+
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+	.destroy = vmw_framebuffer_dmabuf_destroy,
+	.dirty = vmw_framebuffer_dmabuf_dirty,
+};
+
+/**
+ * Pin the dmabuffer to the start of vram.
+ */
+static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(&vfb->base);
+	int ret;
+
+	/* This code should not be used with screen objects */
+	BUG_ON(dev_priv->sou_priv);
+
+	vmw_overlay_pause_all(dev_priv);
+
+	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
+
+	vmw_overlay_resume_all(dev_priv);
+
+	WARN_ON(ret != 0);
+
+	return 0;
+}
+
+static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(&vfb->base);
+
+	if (!vfbd->buffer) {
+		WARN_ON(!vfbd->buffer);
+		return 0;
+	}
+
+	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+}
+
+static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+					  struct vmw_dma_buffer *dmabuf,
+					  struct vmw_framebuffer **out,
+					  const struct drm_mode_fb_cmd
+					  *mode_cmd)
+
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_framebuffer_dmabuf *vfbd;
+	unsigned int requested_size;
+	int ret;
+
+	requested_size = mode_cmd->height * mode_cmd->pitch;
+	if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+		DRM_ERROR("Screen buffer object size is too small "
+			  "for requested mode.\n");
+		return -EINVAL;
+	}
+
+	/* Limited framebuffer color depth support for screen objects */
+	if (dev_priv->sou_priv) {
+		switch (mode_cmd->depth) {
+		case 32:
+		case 24:
+			/* Only support 32 bpp for 32 and 24 depth fbs */
+			if (mode_cmd->bpp == 32)
+				break;
+
+			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+				  mode_cmd->depth, mode_cmd->bpp);
+			return -EINVAL;
+		case 16:
+		case 15:
+			/* Only support 16 bpp for 16 and 15 depth fbs */
+			if (mode_cmd->bpp == 16)
+				break;
+
+			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+				  mode_cmd->depth, mode_cmd->bpp);
+			return -EINVAL;
+		default:
+			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+			return -EINVAL;
+		}
+	}
+
+	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
+	if (!vfbd) {
+		ret = -ENOMEM;
+		goto out_err1;
+	}
+
+	if (!vmw_dmabuf_reference(dmabuf)) {
+		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
+		ret = -EINVAL;
+		goto out_err2;
+	}
+
+	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
+	vfbd->base.base.pitches[0] = mode_cmd->pitch;
+	vfbd->base.base.depth = mode_cmd->depth;
+	vfbd->base.base.width = mode_cmd->width;
+	vfbd->base.base.height = mode_cmd->height;
+	if (!dev_priv->sou_priv) {
+		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+	}
+	vfbd->base.dmabuf = true;
+	vfbd->buffer = dmabuf;
+	vfbd->base.user_handle = mode_cmd->handle;
+	*out = &vfbd->base;
+
+	ret = drm_framebuffer_init(dev, &vfbd->base.base,
+				   &vmw_framebuffer_dmabuf_funcs);
+	if (ret)
+		goto out_err3;
+
+	return 0;
+
+out_err3:
+	vmw_dmabuf_unreference(&dmabuf);
+out_err2:
+	kfree(vfbd);
+out_err1:
+	return ret;
+}
+
+/*
+ * Generic Kernel modesetting functions
+ */
+
+static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+						 struct drm_file *file_priv,
+						 struct drm_mode_fb_cmd2 *mode_cmd2)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_framebuffer *vfb = NULL;
+	struct vmw_surface *surface = NULL;
+	struct vmw_dma_buffer *bo = NULL;
+	struct ttm_base_object *user_obj;
+	struct drm_mode_fb_cmd mode_cmd;
+	int ret;
+
+	mode_cmd.width = mode_cmd2->width;
+	mode_cmd.height = mode_cmd2->height;
+	mode_cmd.pitch = mode_cmd2->pitches[0];
+	mode_cmd.handle = mode_cmd2->handles[0];
+	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+				    &mode_cmd.bpp);
+
+	/**
+	 * This code should be conditioned on Screen Objects not being used.
+	 * If screen objects are used, we can allocate a GMR to hold the
+	 * requested framebuffer.
+	 */
+
+	if (!vmw_kms_validate_mode_vram(dev_priv,
+					mode_cmd.pitch,
+					mode_cmd.height)) {
+		DRM_ERROR("VRAM size is too small for requested mode.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/*
+	 * Take a reference on the user object of the resource
+	 * backing the kms fb. This ensures that user-space handle
+	 * lookups on that resource will always work as long as
+	 * it's registered with a kms framebuffer. This is important,
+	 * since vmw_execbuf_process identifies resources in the
+	 * command stream using user-space handles.
+	 */
+
+	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
+	if (unlikely(user_obj == NULL)) {
+		DRM_ERROR("Could not locate requested kms frame buffer.\n");
+		return ERR_PTR(-ENOENT);
+	}
+
+	/**
+	 * End conditioned code.
+	 */
+
+	/* returns either a dmabuf or surface */
+	ret = vmw_user_lookup_handle(dev_priv, tfile,
+				     mode_cmd.handle,
+				     &surface, &bo);
+	if (ret)
+		goto err_out;
+
+	/* Create the new framebuffer depending one what we got back */
+	if (bo)
+		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+						     &mode_cmd);
+	else if (surface)
+		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+						      surface, &vfb, &mode_cmd);
+	else
+		BUG();
+
+err_out:
+	/* vmw_user_lookup_handle takes one ref so does new_fb */
+	if (bo)
+		vmw_dmabuf_unreference(&bo);
+	if (surface)
+		vmw_surface_unreference(&surface);
+
+	if (ret) {
+		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+		ttm_base_object_unref(&user_obj);
+		return ERR_PTR(ret);
+	} else
+		vfb->user_obj = user_obj;
+
+	return &vfb->base;
+}
+
+static const struct drm_mode_config_funcs vmw_kms_funcs = {
+	.fb_create = vmw_kms_fb_create,
+};
+
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid,
+		    int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips)
+{
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_clip_rect *tmp;
+	struct drm_crtc *crtc;
+	size_t fifo_size;
+	int i, k, num_units;
+	int ret = 0; /* silence warning */
+	int left, right, top, bottom;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBlitSurfaceToScreen body;
+	} *cmd;
+	SVGASignedRect *blits;
+
+	num_units = 0;
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->fb != &vfb->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	BUG_ON(surface == NULL);
+	BUG_ON(!clips || !num_clips);
+
+	tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+	if (unlikely(tmp == NULL)) {
+		DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+		ret = -ENOMEM;
+		goto out_free_tmp;
+	}
+
+	left = clips->x;
+	right = clips->x + clips->w;
+	top = clips->y;
+	bottom = clips->y + clips->h;
+
+	for (i = 1; i < num_clips; i++) {
+		left = min_t(int, left, (int)clips[i].x);
+		right = max_t(int, right, (int)clips[i].x + clips[i].w);
+		top = min_t(int, top, (int)clips[i].y);
+		bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
+	}
+
+	/* only need to do this once */
+	memset(cmd, 0, fifo_size);
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+
+	blits = (SVGASignedRect *)&cmd[1];
+
+	cmd->body.srcRect.left = left;
+	cmd->body.srcRect.right = right;
+	cmd->body.srcRect.top = top;
+	cmd->body.srcRect.bottom = bottom;
+
+	for (i = 0; i < num_clips; i++) {
+		tmp[i].x1 = clips[i].x - left;
+		tmp[i].x2 = clips[i].x + clips[i].w - left;
+		tmp[i].y1 = clips[i].y - top;
+		tmp[i].y2 = clips[i].y + clips[i].h - top;
+	}
+
+	for (k = 0; k < num_units; k++) {
+		struct vmw_display_unit *unit = units[k];
+		struct vmw_clip_rect clip;
+		int num;
+
+		clip.x1 = left + destX - unit->crtc.x;
+		clip.y1 = top + destY - unit->crtc.y;
+		clip.x2 = right + destX - unit->crtc.x;
+		clip.y2 = bottom + destY - unit->crtc.y;
+
+		/* skip any crtcs that misses the clip region */
+		if (clip.x1 >= unit->crtc.mode.hdisplay ||
+		    clip.y1 >= unit->crtc.mode.vdisplay ||
+		    clip.x2 <= 0 || clip.y2 <= 0)
+			continue;
+
+		/*
+		 * In order for the clip rects to be correctly scaled
+		 * the src and dest rects needs to be the same size.
+		 */
+		cmd->body.destRect.left = clip.x1;
+		cmd->body.destRect.right = clip.x2;
+		cmd->body.destRect.top = clip.y1;
+		cmd->body.destRect.bottom = clip.y2;
+
+		/* create a clip rect of the crtc in dest coords */
+		clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+		clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+		clip.x1 = 0 - clip.x1;
+		clip.y1 = 0 - clip.y1;
+
+		/* need to reset sid as it is changed by execbuf */
+		cmd->body.srcImage.sid = sid;
+		cmd->body.destScreenId = unit->unit;
+
+		/* clip and write blits to cmd stream */
+		vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
+
+		/* if no cliprects hit skip this */
+		if (num == 0)
+			continue;
+
+		/* recalculate package length */
+		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+					  fifo_size, 0, NULL, NULL);
+
+		if (unlikely(ret != 0))
+			break;
+	}
+
+	kfree(cmd);
+out_free_tmp:
+	kfree(tmp);
+
+	return ret;
+}
+
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *clips,
+		     uint32_t num_clips)
+{
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	size_t fifo_size;
+	int i, k, ret, num_units, blits_pos;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdBlitScreenToGMRFB body;
+	} *blits;
+
+	num_units = 0;
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->fb != &vfb->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	BUG_ON(dmabuf == NULL);
+	BUG_ON(!clips || !num_clips);
+
+	/* take a safe guess at fifo size */
+	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
+	cmd->body.format.colorDepth = vfb->base.depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = vfb->base.pitches[0];
+	cmd->body.ptr.gmrId = vfb->user_handle;
+	cmd->body.ptr.offset = 0;
+
+	blits = (void *)&cmd[1];
+	blits_pos = 0;
+	for (i = 0; i < num_units; i++) {
+		struct drm_vmw_rect *c = clips;
+		for (k = 0; k < num_clips; k++, c++) {
+			/* transform clip coords to crtc origin based coords */
+			int clip_x1 = c->x - units[i]->crtc.x;
+			int clip_x2 = c->x - units[i]->crtc.x + c->w;
+			int clip_y1 = c->y - units[i]->crtc.y;
+			int clip_y2 = c->y - units[i]->crtc.y + c->h;
+			int dest_x = c->x;
+			int dest_y = c->y;
+
+			/* compensate for clipping, we negate
+			 * a negative number and add that.
+			 */
+			if (clip_x1 < 0)
+				dest_x += -clip_x1;
+			if (clip_y1 < 0)
+				dest_y += -clip_y1;
+
+			/* clip */
+			clip_x1 = max(clip_x1, 0);
+			clip_y1 = max(clip_y1, 0);
+			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
+			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
+
+			/* and cull any rects that misses the crtc */
+			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
+			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
+			    clip_x2 <= 0 || clip_y2 <= 0)
+				continue;
+
+			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+			blits[blits_pos].body.srcScreenId = units[i]->unit;
+			blits[blits_pos].body.destOrigin.x = dest_x;
+			blits[blits_pos].body.destOrigin.y = dest_y;
+
+			blits[blits_pos].body.srcRect.left = clip_x1;
+			blits[blits_pos].body.srcRect.top = clip_y1;
+			blits[blits_pos].body.srcRect.right = clip_x2;
+			blits[blits_pos].body.srcRect.bottom = clip_y2;
+			blits_pos++;
+		}
+	}
+	/* reset size here and use calculated exact size from loops */
+	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+				  0, user_fence_rep, NULL);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+int vmw_kms_init(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int ret;
+
+	drm_mode_config_init(dev);
+	dev->mode_config.funcs = &vmw_kms_funcs;
+	dev->mode_config.min_width = 1;
+	dev->mode_config.min_height = 1;
+	/* assumed largest fb size */
+	dev->mode_config.max_width = 8192;
+	dev->mode_config.max_height = 8192;
+
+	ret = vmw_kms_init_screen_object_display(dev_priv);
+	if (ret) /* Fallback */
+		(void)vmw_kms_init_legacy_display_system(dev_priv);
+
+	return 0;
+}
+
+int vmw_kms_close(struct vmw_private *dev_priv)
+{
+	/*
+	 * Docs says we should take the lock before calling this function
+	 * but since it destroys encoders and our destructor calls
+	 * drm_encoder_cleanup which takes the lock we deadlock.
+	 */
+	drm_mode_config_cleanup(dev_priv->dev);
+	if (dev_priv->sou_priv)
+		vmw_kms_close_screen_object_display(dev_priv);
+	else
+		vmw_kms_close_legacy_display_system(dev_priv);
+	return 0;
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct drm_vmw_cursor_bypass_arg *arg = data;
+	struct vmw_display_unit *du;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+
+	mutex_lock(&dev->mode_config.mutex);
+	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+
+		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+			du = vmw_crtc_to_du(crtc);
+			du->hotspot_x = arg->xhot;
+			du->hotspot_y = arg->yhot;
+		}
+
+		mutex_unlock(&dev->mode_config.mutex);
+		return 0;
+	}
+
+	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	crtc = obj_to_crtc(obj);
+	du = vmw_crtc_to_du(crtc);
+
+	du->hotspot_x = arg->xhot;
+	du->hotspot_y = arg->yhot;
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+int vmw_kms_write_svga(struct vmw_private *vmw_priv,
+			unsigned width, unsigned height, unsigned pitch,
+			unsigned bpp, unsigned depth)
+{
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
+	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
+	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
+
+	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
+		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
+			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
+	struct vmw_vga_topology_state *save;
+	uint32_t i;
+
+	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
+	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_priv->vga_pitchlock =
+		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
+						       SVGA_FIFO_PITCHLOCK);
+
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+		return 0;
+
+	vmw_priv->num_displays = vmw_read(vmw_priv,
+					  SVGA_REG_NUM_GUEST_DISPLAYS);
+
+	if (vmw_priv->num_displays == 0)
+		vmw_priv->num_displays = 1;
+
+	for (i = 0; i < vmw_priv->num_displays; ++i) {
+		save = &vmw_priv->vga_save[i];
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
+		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
+		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
+		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
+		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+		if (i == 0 && vmw_priv->num_displays == 1 &&
+		    save->width == 0 && save->height == 0) {
+
+			/*
+			 * It should be fairly safe to assume that these
+			 * values are uninitialized.
+			 */
+
+			save->width = vmw_priv->vga_width - save->pos_x;
+			save->height = vmw_priv->vga_height - save->pos_y;
+		}
+	}
+
+	return 0;
+}
+
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
+{
+	struct vmw_vga_topology_state *save;
+	uint32_t i;
+
+	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
+	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
+		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
+			  vmw_priv->vga_pitchlock);
+	else if (vmw_fifo_have_pitchlock(vmw_priv))
+		iowrite32(vmw_priv->vga_pitchlock,
+			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
+
+	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
+		return 0;
+
+	for (i = 0; i < vmw_priv->num_displays; ++i) {
+		save = &vmw_priv->vga_save[i];
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
+		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+	}
+
+	return 0;
+}
+
+bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
+				uint32_t pitch,
+				uint32_t height)
+{
+	return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
+}
+
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	return 0;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+int vmw_enable_vblank(struct drm_device *dev, int crtc)
+{
+	return -ENOSYS;
+}
+
+/**
+ * Function called by DRM code called with vbl_lock held.
+ */
+void vmw_disable_vblank(struct drm_device *dev, int crtc)
+{
+}
+
+
+/*
+ * Small shared kms functions.
+ */
+
+int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+			 struct drm_vmw_rect *rects)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_connector *con;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+	{
+		unsigned int i;
+
+		DRM_INFO("%s: new layout ", __func__);
+		for (i = 0; i < num; i++)
+			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+				 rects[i].w, rects[i].h);
+		DRM_INFO("\n");
+	}
+#endif
+
+	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+		du = vmw_connector_to_du(con);
+		if (num > du->unit) {
+			du->pref_width = rects[du->unit].w;
+			du->pref_height = rects[du->unit].h;
+			du->pref_active = true;
+			du->gui_x = rects[du->unit].x;
+			du->gui_y = rects[du->unit].y;
+		} else {
+			du->pref_width = 800;
+			du->pref_height = 600;
+			du->pref_active = false;
+		}
+		con->status = vmw_du_connector_detect(con, true);
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
+int vmw_du_page_flip(struct drm_crtc *crtc,
+		     struct drm_framebuffer *fb,
+		     struct drm_pending_vblank_event *event)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	struct drm_framebuffer *old_fb = crtc->fb;
+	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
+	struct drm_file *file_priv ;
+	struct vmw_fence_obj *fence = NULL;
+	struct drm_clip_rect clips;
+	int ret;
+
+	if (event == NULL)
+		return -EINVAL;
+
+	/* require ScreenObject support for page flipping */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	file_priv = event->base.file_priv;
+	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
+		return -EINVAL;
+
+	crtc->fb = fb;
+
+	/* do a full screen dirty update */
+	clips.x1 = clips.y1 = 0;
+	clips.x2 = fb->width;
+	clips.y2 = fb->height;
+
+	if (vfb->dmabuf)
+		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
+					  0, 0, &clips, 1, 1, &fence);
+	else
+		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
+					   0, 0, &clips, 1, 1, &fence);
+
+
+	if (ret != 0)
+		goto out_no_fence;
+	if (!fence) {
+		ret = -EINVAL;
+		goto out_no_fence;
+	}
+
+	ret = vmw_event_fence_action_queue(file_priv, fence,
+					   &event->base,
+					   &event->event.tv_sec,
+					   &event->event.tv_usec,
+					   true);
+
+	/*
+	 * No need to hold on to this now. The only cleanup
+	 * we need to do if we fail is unref the fence.
+	 */
+	vmw_fence_obj_unreference(&fence);
+
+	if (vmw_crtc_to_du(crtc)->is_implicit)
+		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
+
+	return ret;
+
+out_no_fence:
+	crtc->fb = old_fb;
+	return ret;
+}
+
+
+void vmw_du_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			   u16 *r, u16 *g, u16 *b,
+			   uint32_t start, uint32_t size)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	int i;
+
+	for (i = 0; i < size; i++) {
+		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
+			  r[i], g[i], b[i]);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
+	}
+}
+
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+void vmw_du_connector_save(struct drm_connector *connector)
+{
+}
+
+void vmw_du_connector_restore(struct drm_connector *connector)
+{
+}
+
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force)
+{
+	uint32_t num_displays;
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+
+	mutex_lock(&dev_priv->hw_mutex);
+	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return ((vmw_connector_to_du(connector)->unit < num_displays &&
+		 du->pref_active) ?
+		connector_status_connected : connector_status_disconnected);
+}
+
+static struct drm_display_mode vmw_kms_connector_builtin[] = {
+	/* 640x480@60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 489, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 800x600@60Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+		   968, 1056, 0, 600, 601, 605, 628, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@60Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0, 768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1152x864@75Hz */
+	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+		   1344, 1600, 0, 864, 865, 868, 900, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@60Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+		   1472, 1664, 0, 768, 771, 778, 798, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800@60Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+		   1480, 1680, 0, 800, 803, 809, 831, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x960@60Hz */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+		   1488, 1800, 0, 960, 961, 964, 1000, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@60Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1360x768@60Hz */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+		   1536, 1792, 0, 768, 771, 777, 795, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x1050@60Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@60Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+		   1672, 1904, 0, 900, 903, 909, 934, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@60Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@60Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1792x1344@60Hz */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1853x1392@60Hz */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@60Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440@60Hz */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@60Hz */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* Terminate */
+	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+/**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+ *
+ * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * members filled in.
+ */
+static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+{
+	mode->hsync_start = mode->hdisplay + 50;
+	mode->hsync_end = mode->hsync_start + 50;
+	mode->htotal = mode->hsync_end + 50;
+
+	mode->vsync_start = mode->vdisplay + 50;
+	mode->vsync_end = mode->vsync_start + 50;
+	mode->vtotal = mode->vsync_end + 50;
+
+	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+}
+
+
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height)
+{
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *bmode;
+	struct drm_display_mode prefmode = { DRM_MODE("preferred",
+		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	int i;
+
+	/* Add preferred mode */
+	{
+		mode = drm_mode_duplicate(dev, &prefmode);
+		if (!mode)
+			return 0;
+		mode->hdisplay = du->pref_width;
+		mode->vdisplay = du->pref_height;
+		vmw_guess_mode_timing(mode);
+
+		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+					       mode->vdisplay)) {
+			drm_mode_probed_add(connector, mode);
+		} else {
+			drm_mode_destroy(dev, mode);
+			mode = NULL;
+		}
+
+		if (du->pref_mode) {
+			list_del_init(&du->pref_mode->head);
+			drm_mode_destroy(dev, du->pref_mode);
+		}
+
+		/* mode might be null here, this is intended */
+		du->pref_mode = mode;
+	}
+
+	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+		bmode = &vmw_kms_connector_builtin[i];
+		if (bmode->hdisplay > max_width ||
+		    bmode->vdisplay > max_height)
+			continue;
+
+		if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+						bmode->vdisplay))
+			continue;
+
+		mode = drm_mode_duplicate(dev, bmode);
+		if (!mode)
+			return 0;
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
+		drm_mode_probed_add(connector, mode);
+	}
+
+	/* Move the prefered mode first, help apps pick the right mode. */
+	if (du->pref_mode)
+		list_move(&du->pref_mode->head, &connector->probed_modes);
+
+	drm_mode_connector_list_update(connector);
+
+	return 1;
+}
+
+int vmw_du_connector_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	return 0;
+}
+
+
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_update_layout_arg *arg =
+		(struct drm_vmw_update_layout_arg *)data;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	void __user *user_rects;
+	struct drm_vmw_rect *rects;
+	unsigned rects_size;
+	int ret;
+	int i;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (!arg->num_outputs) {
+		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+		vmw_du_update_layout(dev_priv, 1, &def_rect);
+		goto out_unlock;
+	}
+
+	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+			GFP_KERNEL);
+	if (unlikely(!rects)) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	user_rects = (void __user *)(unsigned long)arg->rects;
+	ret = copy_from_user(rects, user_rects, rects_size);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to get rects.\n");
+		ret = -EFAULT;
+		goto out_free;
+	}
+
+	for (i = 0; i < arg->num_outputs; ++i) {
+		if (rects[i].x < 0 ||
+		    rects[i].y < 0 ||
+		    rects[i].x + rects[i].w > mode_config->max_width ||
+		    rects[i].y + rects[i].h > mode_config->max_height) {
+			DRM_ERROR("Invalid GUI layout.\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
+	}
+
+	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+
+out_free:
+	kfree(rects);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 0000000..6fa89c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,166 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_KMS_H_
+#define VMWGFX_KMS_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "vmwgfx_drv.h"
+
+#define VMWGFX_NUM_DISPLAY_UNITS 8
+
+
+#define vmw_framebuffer_to_vfb(x) \
+	container_of(x, struct vmw_framebuffer, base)
+
+/**
+ * Base class for framebuffers
+ *
+ * @pin is called the when ever a crtc uses this framebuffer
+ * @unpin is called
+ */
+struct vmw_framebuffer {
+	struct drm_framebuffer base;
+	int (*pin)(struct vmw_framebuffer *fb);
+	int (*unpin)(struct vmw_framebuffer *fb);
+	bool dmabuf;
+	struct ttm_base_object *user_obj;
+	uint32_t user_handle;
+};
+
+
+#define vmw_crtc_to_du(x) \
+	container_of(x, struct vmw_display_unit, crtc)
+
+/*
+ * Basic cursor manipulation
+ */
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+			    u32 *image, u32 width, u32 height,
+			    u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+			     struct vmw_dma_buffer *dmabuf,
+			     u32 width, u32 height,
+			     u32 hotspotX, u32 hotspotY);
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+				bool show, int x, int y);
+
+
+/**
+ * Base class display unit.
+ *
+ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
+ * so the display unit is all of them at the same time. This is true for both
+ * legacy multimon and screen objects.
+ */
+struct vmw_display_unit {
+	struct drm_crtc crtc;
+	struct drm_encoder encoder;
+	struct drm_connector connector;
+
+	struct vmw_surface *cursor_surface;
+	struct vmw_dma_buffer *cursor_dmabuf;
+	size_t cursor_age;
+
+	int cursor_x;
+	int cursor_y;
+
+	int hotspot_x;
+	int hotspot_y;
+
+	unsigned unit;
+
+	/*
+	 * Prefered mode tracking.
+	 */
+	unsigned pref_width;
+	unsigned pref_height;
+	bool pref_active;
+	struct drm_display_mode *pref_mode;
+
+	/*
+	 * Gui positioning
+	 */
+	int gui_x;
+	int gui_y;
+	bool is_implicit;
+};
+
+#define vmw_crtc_to_du(x) \
+	container_of(x, struct vmw_display_unit, crtc)
+#define vmw_connector_to_du(x) \
+	container_of(x, struct vmw_display_unit, connector)
+
+
+/*
+ * Shared display unit functions - vmwgfx_kms.c
+ */
+void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_page_flip(struct drm_crtc *crtc,
+		     struct drm_framebuffer *fb,
+		     struct drm_pending_vblank_event *event);
+void vmw_du_crtc_save(struct drm_crtc *crtc);
+void vmw_du_crtc_restore(struct drm_crtc *crtc);
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			   u16 *r, u16 *g, u16 *b,
+			   uint32_t start, uint32_t size);
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+			   uint32_t handle, uint32_t width, uint32_t height);
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+void vmw_du_connector_save(struct drm_connector *connector);
+void vmw_du_connector_restore(struct drm_connector *connector);
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force);
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height);
+int vmw_du_connector_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val);
+
+
+/*
+ * Legacy display unit functions - vmwgfx_ldu.c
+ */
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+
+/*
+ * Screen Objects display functions - vmwgfx_scrn.c
+ */
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
+			      struct drm_vmw_rect *rects);
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+				     struct drm_crtc *crtc);
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+					      struct drm_crtc *crtc);
+
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 0000000..79f7e8e
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,444 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+#define vmw_crtc_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.crtc)
+#define vmw_encoder_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.encoder)
+#define vmw_connector_to_ldu(x) \
+	container_of(x, struct vmw_legacy_display_unit, base.connector)
+
+struct vmw_legacy_display {
+	struct list_head active;
+
+	unsigned num_active;
+	unsigned last_num_active;
+
+	struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using the legacy register interface.
+ */
+struct vmw_legacy_display_unit {
+	struct vmw_display_unit base;
+
+	struct list_head active;
+};
+
+static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
+{
+	list_del_init(&ldu->active);
+	vmw_display_unit_cleanup(&ldu->base);
+	kfree(ldu);
+}
+
+
+/*
+ * Legacy Display Unit CRTC functions
+ */
+
+static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
+}
+
+static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+{
+	struct vmw_legacy_display *lds = dev_priv->ldu_priv;
+	struct vmw_legacy_display_unit *entry;
+	struct vmw_display_unit *du = NULL;
+	struct drm_framebuffer *fb = NULL;
+	struct drm_crtc *crtc = NULL;
+	int i = 0, ret;
+
+	/* If there is no display topology the host just assumes
+	 * that the guest will set the same layout as the host.
+	 */
+	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
+		int w = 0, h = 0;
+		list_for_each_entry(entry, &lds->active, active) {
+			crtc = &entry->base.crtc;
+			w = max(w, crtc->x + crtc->mode.hdisplay);
+			h = max(h, crtc->y + crtc->mode.vdisplay);
+			i++;
+		}
+
+		if (crtc == NULL)
+			return 0;
+		fb = entry->base.crtc.fb;
+
+		return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
+					  fb->bits_per_pixel, fb->depth);
+	}
+
+	if (!list_empty(&lds->active)) {
+		entry = list_entry(lds->active.next, typeof(*entry), active);
+		fb = entry->base.crtc.fb;
+
+		vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
+				   fb->bits_per_pixel, fb->depth);
+	}
+
+	/* Make sure we always show something. */
+	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
+		  lds->num_active ? lds->num_active : 1);
+
+	i = 0;
+	list_for_each_entry(entry, &lds->active, active) {
+		crtc = &entry->base.crtc;
+
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
+		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+		i++;
+	}
+
+	BUG_ON(i != lds->num_active);
+
+	lds->last_num_active = lds->num_active;
+
+
+	/* Find the first du with a cursor. */
+	list_for_each_entry(entry, &lds->active, active) {
+		du = &entry->base;
+
+		if (!du->cursor_dmabuf)
+			continue;
+
+		ret = vmw_cursor_update_dmabuf(dev_priv,
+					       du->cursor_dmabuf,
+					       64, 64,
+					       du->hotspot_x,
+					       du->hotspot_y);
+		if (ret == 0)
+			break;
+
+		DRM_ERROR("Could not update cursor image\n");
+	}
+
+	return 0;
+}
+
+static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
+			      struct vmw_legacy_display_unit *ldu)
+{
+	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+	if (list_empty(&ldu->active))
+		return 0;
+
+	/* Must init otherwise list_empty(&ldu->active) will not work. */
+	list_del_init(&ldu->active);
+	if (--(ld->num_active) == 0) {
+		BUG_ON(!ld->fb);
+		if (ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		ld->fb = NULL;
+	}
+
+	return 0;
+}
+
+static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
+			      struct vmw_legacy_display_unit *ldu,
+			      struct vmw_framebuffer *vfb)
+{
+	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+	struct vmw_legacy_display_unit *entry;
+	struct list_head *at;
+
+	BUG_ON(!ld->num_active && ld->fb);
+	if (vfb != ld->fb) {
+		if (ld->fb && ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		if (vfb->pin)
+			vfb->pin(vfb);
+		ld->fb = vfb;
+	}
+
+	if (!list_empty(&ldu->active))
+		return 0;
+
+	at = &ld->active;
+	list_for_each_entry(entry, &ld->active, active) {
+		if (entry->base.unit > ldu->base.unit)
+			break;
+
+		at = &entry->active;
+	}
+
+	list_add(&ldu->active, at);
+
+	ld->num_active++;
+
+	return 0;
+}
+
+static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_legacy_display_unit *ldu;
+	struct drm_connector *connector;
+	struct drm_display_mode *mode;
+	struct drm_encoder *encoder;
+	struct vmw_framebuffer *vfb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *crtc;
+
+	if (!set)
+		return -EINVAL;
+
+	if (!set->crtc)
+		return -EINVAL;
+
+	/* get the ldu */
+	crtc = set->crtc;
+	ldu = vmw_crtc_to_ldu(crtc);
+	vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("to many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &ldu->base.connector) {
+		DRM_ERROR("connector doesn't match %p %p\n",
+			set->connectors[0], &ldu->base.connector);
+		return -EINVAL;
+	}
+
+	/* ldu only supports one fb active at the time */
+	if (dev_priv->ldu_priv->fb && vfb &&
+	    !(dev_priv->ldu_priv->num_active == 1 &&
+	      !list_empty(&ldu->active)) &&
+	    dev_priv->ldu_priv->fb != vfb) {
+		DRM_ERROR("Multiple framebuffers not supported\n");
+		return -EINVAL;
+	}
+
+	/* since they always map one to one these are safe */
+	connector = &ldu->base.connector;
+	encoder = &ldu->base.encoder;
+
+	/* should we turn the crtc off? */
+	if (set->num_connectors == 0 || !set->mode || !set->fb) {
+
+		connector->encoder = NULL;
+		encoder->crtc = NULL;
+		crtc->fb = NULL;
+
+		vmw_ldu_del_active(dev_priv, ldu);
+
+		return vmw_ldu_commit_list(dev_priv);
+	}
+
+
+	/* we now know we want to set a mode */
+	mode = set->mode;
+	fb = set->fb;
+
+	if (set->x + mode->hdisplay > fb->width ||
+	    set->y + mode->vdisplay > fb->height) {
+		DRM_ERROR("set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	vmw_fb_off(dev_priv);
+
+	crtc->fb = fb;
+	encoder->crtc = crtc;
+	connector->encoder = encoder;
+	crtc->x = set->x;
+	crtc->y = set->y;
+	crtc->mode = *mode;
+
+	vmw_ldu_add_active(dev_priv, ldu, vfb);
+
+	return vmw_ldu_commit_list(dev_priv);
+}
+
+static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_ldu_crtc_destroy,
+	.set_config = vmw_ldu_crtc_set_config,
+};
+
+
+/*
+ * Legacy Display Unit encoder functions
+ */
+
+static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+	.destroy = vmw_ldu_encoder_destroy,
+};
+
+/*
+ * Legacy Display Unit connector functions
+ */
+
+static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+{
+	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_ldu_connector_destroy,
+};
+
+static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_legacy_display_unit *ldu;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
+	if (!ldu)
+		return -ENOMEM;
+
+	ldu->base.unit = unit;
+	crtc = &ldu->base.crtc;
+	encoder = &ldu->base.encoder;
+	connector = &ldu->base.connector;
+
+	INIT_LIST_HEAD(&ldu->active);
+
+	ldu->base.pref_active = (unit == 0);
+	ldu->base.pref_width = dev_priv->initial_width;
+	ldu->base.pref_height = dev_priv->initial_height;
+	ldu->base.pref_mode = NULL;
+	ldu->base.is_implicit = true;
+
+	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->status = vmw_du_connector_detect(connector, true);
+
+	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.dirty_info_property,
+				      1);
+
+	return 0;
+}
+
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+	if (dev_priv->ldu_priv) {
+		DRM_INFO("ldu system already on\n");
+		return -EINVAL;
+	}
+
+	dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
+	if (!dev_priv->ldu_priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
+	dev_priv->ldu_priv->num_active = 0;
+	dev_priv->ldu_priv->last_num_active = 0;
+	dev_priv->ldu_priv->fb = NULL;
+
+	/* for old hardware without multimon only enable one display */
+	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+		ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	else
+		ret = drm_vblank_init(dev, 1);
+	if (ret != 0)
+		goto err_free;
+
+	ret = drm_mode_create_dirty_info_property(dev);
+	if (ret != 0)
+		goto err_vblank_cleanup;
+
+	if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
+		for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+			vmw_ldu_init(dev_priv, i);
+	else
+		vmw_ldu_init(dev_priv, 0);
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+err_free:
+	kfree(dev_priv->ldu_priv);
+	dev_priv->ldu_priv = NULL;
+	return ret;
+}
+
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (!dev_priv->ldu_priv)
+		return -ENOSYS;
+
+	drm_vblank_cleanup(dev);
+
+	BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
+
+	kfree(dev_priv->ldu_priv);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
new file mode 100644
index 0000000..8a8725c
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -0,0 +1,171 @@
+/**************************************************************************
+ *
+ * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "vmwgfx_drv.h"
+
+struct vmw_marker {
+	struct list_head head;
+	uint32_t seqno;
+	struct timespec submitted;
+};
+
+void vmw_marker_queue_init(struct vmw_marker_queue *queue)
+{
+	INIT_LIST_HEAD(&queue->head);
+	queue->lag = ns_to_timespec(0);
+	getrawmonotonic(&queue->lag_time);
+	spin_lock_init(&queue->lock);
+}
+
+void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
+{
+	struct vmw_marker *marker, *next;
+
+	spin_lock(&queue->lock);
+	list_for_each_entry_safe(marker, next, &queue->head, head) {
+		kfree(marker);
+	}
+	spin_unlock(&queue->lock);
+}
+
+int vmw_marker_push(struct vmw_marker_queue *queue,
+		   uint32_t seqno)
+{
+	struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
+
+	if (unlikely(!marker))
+		return -ENOMEM;
+
+	marker->seqno = seqno;
+	getrawmonotonic(&marker->submitted);
+	spin_lock(&queue->lock);
+	list_add_tail(&marker->head, &queue->head);
+	spin_unlock(&queue->lock);
+
+	return 0;
+}
+
+int vmw_marker_pull(struct vmw_marker_queue *queue,
+		   uint32_t signaled_seqno)
+{
+	struct vmw_marker *marker, *next;
+	struct timespec now;
+	bool updated = false;
+
+	spin_lock(&queue->lock);
+	getrawmonotonic(&now);
+
+	if (list_empty(&queue->head)) {
+		queue->lag = ns_to_timespec(0);
+		queue->lag_time = now;
+		updated = true;
+		goto out_unlock;
+	}
+
+	list_for_each_entry_safe(marker, next, &queue->head, head) {
+		if (signaled_seqno - marker->seqno > (1 << 30))
+			continue;
+
+		queue->lag = timespec_sub(now, marker->submitted);
+		queue->lag_time = now;
+		updated = true;
+		list_del(&marker->head);
+		kfree(marker);
+	}
+
+out_unlock:
+	spin_unlock(&queue->lock);
+
+	return (updated) ? 0 : -EBUSY;
+}
+
+static struct timespec vmw_timespec_add(struct timespec t1,
+					struct timespec t2)
+{
+	t1.tv_sec += t2.tv_sec;
+	t1.tv_nsec += t2.tv_nsec;
+	if (t1.tv_nsec >= 1000000000L) {
+		t1.tv_sec += 1;
+		t1.tv_nsec -= 1000000000L;
+	}
+
+	return t1;
+}
+
+static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
+{
+	struct timespec now;
+
+	spin_lock(&queue->lock);
+	getrawmonotonic(&now);
+	queue->lag = vmw_timespec_add(queue->lag,
+				      timespec_sub(now, queue->lag_time));
+	queue->lag_time = now;
+	spin_unlock(&queue->lock);
+	return queue->lag;
+}
+
+
+static bool vmw_lag_lt(struct vmw_marker_queue *queue,
+		       uint32_t us)
+{
+	struct timespec lag, cond;
+
+	cond = ns_to_timespec((s64) us * 1000);
+	lag = vmw_fifo_lag(queue);
+	return (timespec_compare(&lag, &cond) < 1);
+}
+
+int vmw_wait_lag(struct vmw_private *dev_priv,
+		 struct vmw_marker_queue *queue, uint32_t us)
+{
+	struct vmw_marker *marker;
+	uint32_t seqno;
+	int ret;
+
+	while (!vmw_lag_lt(queue, us)) {
+		spin_lock(&queue->lock);
+		if (list_empty(&queue->head))
+			seqno = atomic_read(&dev_priv->marker_seq);
+		else {
+			marker = list_first_entry(&queue->head,
+						 struct vmw_marker, head);
+			seqno = marker->seqno;
+		}
+		spin_unlock(&queue->lock);
+
+		ret = vmw_wait_seqno(dev_priv, false, seqno, true,
+					3*HZ);
+
+		if (unlikely(ret != 0))
+			return ret;
+
+		(void) vmw_marker_pull(queue, seqno);
+	}
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 0000000..87e39f6
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,619 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+#include <drm/ttm/ttm_placement.h>
+
+#include "svga_overlay.h"
+#include "svga_escape.h"
+
+#define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
+
+struct vmw_stream {
+	struct vmw_dma_buffer *buf;
+	bool claimed;
+	bool paused;
+	struct drm_vmw_control_stream_arg saved;
+};
+
+/**
+ * Overlay control
+ */
+struct vmw_overlay {
+	/*
+	 * Each stream is a single overlay. In Xv these are called ports.
+	 */
+	struct mutex mutex;
+	struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
+};
+
+static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	return dev_priv ? dev_priv->overlay_priv : NULL;
+}
+
+struct vmw_escape_header {
+	uint32_t cmd;
+	SVGAFifoCmdEscape body;
+};
+
+struct vmw_escape_video_flush {
+	struct vmw_escape_header escape;
+	SVGAEscapeVideoFlush flush;
+};
+
+static inline void fill_escape(struct vmw_escape_header *header,
+			       uint32_t size)
+{
+	header->cmd = SVGA_CMD_ESCAPE;
+	header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
+	header->body.size = size;
+}
+
+static inline void fill_flush(struct vmw_escape_video_flush *cmd,
+			      uint32_t stream_id)
+{
+	fill_escape(&cmd->escape, sizeof(cmd->flush));
+	cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
+	cmd->flush.streamId = stream_id;
+}
+
+/**
+ * Send put command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				struct drm_vmw_control_stream_arg *arg,
+				bool interruptible)
+{
+	struct vmw_escape_video_flush *flush;
+	size_t fifo_size;
+	bool have_so = dev_priv->sou_priv ? true : false;
+	int i, num_items;
+	SVGAGuestPtr ptr;
+
+	struct {
+		struct vmw_escape_header escape;
+		struct {
+			uint32_t cmdType;
+			uint32_t streamId;
+		} header;
+	} *cmds;
+	struct {
+		uint32_t registerId;
+		uint32_t value;
+	} *items;
+
+	/* defines are a index needs + 1 */
+	if (have_so)
+		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
+	else
+		num_items = SVGA_VIDEO_PITCH_3 + 1;
+
+	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+
+	cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* hardware has hung, can't do anything here */
+	if (!cmds)
+		return -ENOMEM;
+
+	items = (typeof(items))&cmds[1];
+	flush = (struct vmw_escape_video_flush *)&items[num_items];
+
+	/* the size is header + number of items */
+	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
+
+	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+	cmds->header.streamId = arg->stream_id;
+
+	/* the IDs are neatly numbered */
+	for (i = 0; i < num_items; i++)
+		items[i].registerId = i;
+
+	vmw_bo_get_guest_ptr(&buf->base, &ptr);
+	ptr.offset += arg->offset;
+
+	items[SVGA_VIDEO_ENABLED].value     = true;
+	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
+	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
+	items[SVGA_VIDEO_FORMAT].value      = arg->format;
+	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
+	items[SVGA_VIDEO_SIZE].value        = arg->size;
+	items[SVGA_VIDEO_WIDTH].value       = arg->width;
+	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
+	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
+	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
+	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
+	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
+	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
+	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
+	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
+	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
+	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
+	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
+	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
+	if (have_so) {
+		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
+		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
+	}
+
+	fill_flush(flush, arg->stream_id);
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	return 0;
+}
+
+/**
+ * Send stop command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
+				 uint32_t stream_id,
+				 bool interruptible)
+{
+	struct {
+		struct vmw_escape_header escape;
+		SVGAEscapeVideoSetRegs body;
+		struct vmw_escape_video_flush flush;
+	} *cmds;
+	int ret;
+
+	for (;;) {
+		cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+		if (cmds)
+			break;
+
+		ret = vmw_fallback_wait(dev_priv, false, true, 0,
+					interruptible, 3*HZ);
+		if (interruptible && ret == -ERESTARTSYS)
+			return ret;
+		else
+			BUG_ON(ret != 0);
+	}
+
+	fill_escape(&cmds->escape, sizeof(cmds->body));
+	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+	cmds->body.header.streamId = stream_id;
+	cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
+	cmds->body.items[0].value = false;
+	fill_flush(&cmds->flush, stream_id);
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+	return 0;
+}
+
+/**
+ * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
+ *
+ * With the introduction of screen objects buffers could now be
+ * used with GMRs instead of being locked to vram.
+ */
+static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
+				   struct vmw_dma_buffer *buf,
+				   bool pin, bool inter)
+{
+	if (!pin)
+		return vmw_dmabuf_unpin(dev_priv, buf, inter);
+
+	if (!dev_priv->sou_priv)
+		return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+
+	return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+}
+
+/**
+ * Stop or pause a stream.
+ *
+ * If the stream is paused the no evict flag is removed from the buffer
+ * but left in vram. This allows for instance mode_set to evict it
+ * should it need to.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * @stream_id which stream to stop/pause.
+ * @pause true to pause, false to stop completely.
+ */
+static int vmw_overlay_stop(struct vmw_private *dev_priv,
+			    uint32_t stream_id, bool pause,
+			    bool interruptible)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct vmw_stream *stream = &overlay->stream[stream_id];
+	int ret;
+
+	/* no buffer attached the stream is completely stopped */
+	if (!stream->buf)
+		return 0;
+
+	/* If the stream is paused this is already done */
+	if (!stream->paused) {
+		ret = vmw_overlay_send_stop(dev_priv, stream_id,
+					    interruptible);
+		if (ret)
+			return ret;
+
+		/* We just remove the NO_EVICT flag so no -ENOMEM */
+		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
+					      interruptible);
+		if (interruptible && ret == -ERESTARTSYS)
+			return ret;
+		else
+			BUG_ON(ret != 0);
+	}
+
+	if (!pause) {
+		vmw_dmabuf_unreference(&stream->buf);
+		stream->paused = false;
+	} else {
+		stream->paused = true;
+	}
+
+	return 0;
+}
+
+/**
+ * Update a stream and send any put or stop fifo commands needed.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * Returns
+ * -ENOMEM if buffer doesn't fit in vram.
+ * -ERESTARTSYS if interrupted.
+ */
+static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
+				     struct vmw_dma_buffer *buf,
+				     struct drm_vmw_control_stream_arg *arg,
+				     bool interruptible)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct vmw_stream *stream = &overlay->stream[arg->stream_id];
+	int ret = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	DRM_DEBUG("   %s: old %p, new %p, %spaused\n", __func__,
+		  stream->buf, buf, stream->paused ? "" : "not ");
+
+	if (stream->buf != buf) {
+		ret = vmw_overlay_stop(dev_priv, arg->stream_id,
+				       false, interruptible);
+		if (ret)
+			return ret;
+	} else if (!stream->paused) {
+		/* If the buffers match and not paused then just send
+		 * the put command, no need to do anything else.
+		 */
+		ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+		if (ret == 0)
+			stream->saved = *arg;
+		else
+			BUG_ON(!interruptible);
+
+		return ret;
+	}
+
+	/* We don't start the old stream if we are interrupted.
+	 * Might return -ENOMEM if it can't fit the buffer in vram.
+	 */
+	ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
+	if (ret)
+		return ret;
+
+	ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+	if (ret) {
+		/* This one needs to happen no matter what. We only remove
+		 * the NO_EVICT flag so this is safe from -ENOMEM.
+		 */
+		BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
+		       != 0);
+		return ret;
+	}
+
+	if (stream->buf != buf)
+		stream->buf = vmw_dmabuf_reference(buf);
+	stream->saved = *arg;
+	/* stream is no longer stopped/paused */
+	stream->paused = false;
+
+	return 0;
+}
+
+/**
+ * Stop all streams.
+ *
+ * Used by the fb code when starting.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_stop_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		struct vmw_stream *stream = &overlay->stream[i];
+		if (!stream->buf)
+			continue;
+
+		ret = vmw_overlay_stop(dev_priv, i, false, false);
+		WARN_ON(ret != 0);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+/**
+ * Try to resume all paused streams.
+ *
+ * Used by the kms code after moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_resume_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		struct vmw_stream *stream = &overlay->stream[i];
+		if (!stream->paused)
+			continue;
+
+		ret = vmw_overlay_update_stream(dev_priv, stream->buf,
+						&stream->saved, false);
+		if (ret != 0)
+			DRM_INFO("%s: *warning* failed to resume stream %i\n",
+				 __func__, i);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+/**
+ * Pauses all active streams.
+ *
+ * Used by the kms code when moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_pause_all(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, ret;
+
+	if (!overlay)
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		if (overlay->stream[i].paused)
+			DRM_INFO("%s: *warning* stream %i already paused\n",
+				 __func__, i);
+		ret = vmw_overlay_stop(dev_priv, i, true, false);
+		WARN_ON(ret != 0);
+	}
+
+	mutex_unlock(&overlay->mutex);
+
+	return 0;
+}
+
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+	return (dev_priv->overlay_priv != NULL && 
+		((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+		 VMW_OVERLAY_CAP_MASK));
+}
+
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	struct drm_vmw_control_stream_arg *arg =
+	    (struct drm_vmw_control_stream_arg *)data;
+	struct vmw_dma_buffer *buf;
+	struct vmw_resource *res;
+	int ret;
+
+	if (!vmw_overlay_available(dev_priv))
+		return -ENOSYS;
+
+	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
+	if (ret)
+		return ret;
+
+	mutex_lock(&overlay->mutex);
+
+	if (!arg->enabled) {
+		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
+		goto out_unlock;
+	}
+
+	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+	if (ret)
+		goto out_unlock;
+
+	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+	vmw_dmabuf_unreference(&buf);
+
+out_unlock:
+	mutex_unlock(&overlay->mutex);
+	vmw_resource_unreference(&res);
+
+	return ret;
+}
+
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
+{
+	if (!vmw_overlay_available(dev_priv))
+		return 0;
+
+	return VMW_MAX_NUM_STREAMS;
+}
+
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i, k;
+
+	if (!vmw_overlay_available(dev_priv))
+		return 0;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
+		if (!overlay->stream[i].claimed)
+			k++;
+
+	mutex_unlock(&overlay->mutex);
+
+	return k;
+}
+
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	int i;
+
+	if (!overlay)
+		return -ENOSYS;
+
+	mutex_lock(&overlay->mutex);
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+
+		if (overlay->stream[i].claimed)
+			continue;
+
+		overlay->stream[i].claimed = true;
+		*out = i;
+		mutex_unlock(&overlay->mutex);
+		return 0;
+	}
+
+	mutex_unlock(&overlay->mutex);
+	return -ESRCH;
+}
+
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+
+	BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
+
+	if (!overlay)
+		return -ENOSYS;
+
+	mutex_lock(&overlay->mutex);
+
+	WARN_ON(!overlay->stream[stream_id].claimed);
+	vmw_overlay_stop(dev_priv, stream_id, false, false);
+	overlay->stream[stream_id].claimed = false;
+
+	mutex_unlock(&overlay->mutex);
+	return 0;
+}
+
+int vmw_overlay_init(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay;
+	int i;
+
+	if (dev_priv->overlay_priv)
+		return -EINVAL;
+
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+	if (!overlay)
+		return -ENOMEM;
+
+	mutex_init(&overlay->mutex);
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		overlay->stream[i].buf = NULL;
+		overlay->stream[i].paused = false;
+		overlay->stream[i].claimed = false;
+	}
+
+	dev_priv->overlay_priv = overlay;
+
+	return 0;
+}
+
+int vmw_overlay_close(struct vmw_private *dev_priv)
+{
+	struct vmw_overlay *overlay = dev_priv->overlay_priv;
+	bool forgotten_buffer = false;
+	int i;
+
+	if (!overlay)
+		return -ENOSYS;
+
+	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+		if (overlay->stream[i].buf) {
+			forgotten_buffer = true;
+			vmw_overlay_stop(dev_priv, i, false, false);
+		}
+	}
+
+	WARN_ON(forgotten_buffer);
+
+	dev_priv->overlay_priv = NULL;
+	kfree(overlay);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 0000000..9d0dd3a
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * This file contains virtual hardware defines for kernel space.
+ */
+
+#ifndef _VMWGFX_REG_H_
+#define _VMWGFX_REG_H_
+
+#include <linux/types.h>
+
+#define VMWGFX_INDEX_PORT     0x0
+#define VMWGFX_VALUE_PORT     0x1
+#define VMWGFX_IRQSTATUS_PORT 0x8
+
+struct svga_guest_mem_descriptor {
+	__le32 ppn;
+	__le32 num_pages;
+};
+
+struct svga_fifo_cmd_fence {
+	__le32 fence;
+};
+
+#define SVGA_SYNC_GENERIC         1
+#define SVGA_SYNC_FIFOFULL        2
+
+#include "svga_types.h"
+
+#include "svga3d_reg.h"
+
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 0000000..407d7f9
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1298 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/vmwgfx_drm.h>
+#include <drm/ttm/ttm_object.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/drmP.h>
+#include "vmwgfx_resource_priv.h"
+
+struct vmw_user_dma_buffer {
+	struct ttm_base_object base;
+	struct vmw_dma_buffer dma;
+};
+
+struct vmw_bo_user_rep {
+	uint32_t handle;
+	uint64_t map_handle;
+};
+
+struct vmw_stream {
+	struct vmw_resource res;
+	uint32_t stream_id;
+};
+
+struct vmw_user_stream {
+	struct ttm_base_object base;
+	struct vmw_stream stream;
+};
+
+
+static uint64_t vmw_user_stream_size;
+
+static const struct vmw_res_func vmw_stream_func = {
+	.res_type = vmw_res_stream,
+	.needs_backup = false,
+	.may_evict = false,
+	.type_name = "video streams",
+	.backup_placement = NULL,
+	.create = NULL,
+	.destroy = NULL,
+	.bind = NULL,
+	.unbind = NULL
+};
+
+static inline struct vmw_dma_buffer *
+vmw_dma_buffer(struct ttm_buffer_object *bo)
+{
+	return container_of(bo, struct vmw_dma_buffer, base);
+}
+
+static inline struct vmw_user_dma_buffer *
+vmw_user_dma_buffer(struct ttm_buffer_object *bo)
+{
+	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
+}
+
+struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
+{
+	kref_get(&res->kref);
+	return res;
+}
+
+
+/**
+ * vmw_resource_release_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Release the resource id to the resource id manager and set it to -1
+ */
+void vmw_resource_release_id(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	write_lock(&dev_priv->resource_lock);
+	if (res->id != -1)
+		idr_remove(idr, res->id);
+	res->id = -1;
+	write_unlock(&dev_priv->resource_lock);
+}
+
+static void vmw_resource_release(struct kref *kref)
+{
+	struct vmw_resource *res =
+	    container_of(kref, struct vmw_resource, kref);
+	struct vmw_private *dev_priv = res->dev_priv;
+	int id;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	res->avail = false;
+	list_del_init(&res->lru_head);
+	write_unlock(&dev_priv->resource_lock);
+	if (res->backup) {
+		struct ttm_buffer_object *bo = &res->backup->base;
+
+		ttm_bo_reserve(bo, false, false, false, 0);
+		if (!list_empty(&res->mob_head) &&
+		    res->func->unbind != NULL) {
+			struct ttm_validate_buffer val_buf;
+
+			val_buf.bo = bo;
+			res->func->unbind(res, false, &val_buf);
+		}
+		res->backup_dirty = false;
+		list_del_init(&res->mob_head);
+		ttm_bo_unreserve(bo);
+		vmw_dmabuf_unreference(&res->backup);
+	}
+
+	if (likely(res->hw_destroy != NULL))
+		res->hw_destroy(res);
+
+	id = res->id;
+	if (res->res_free != NULL)
+		res->res_free(res);
+	else
+		kfree(res);
+
+	write_lock(&dev_priv->resource_lock);
+
+	if (id != -1)
+		idr_remove(idr, id);
+}
+
+void vmw_resource_unreference(struct vmw_resource **p_res)
+{
+	struct vmw_resource *res = *p_res;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	*p_res = NULL;
+	write_lock(&dev_priv->resource_lock);
+	kref_put(&res->kref, vmw_resource_release);
+	write_unlock(&dev_priv->resource_lock);
+}
+
+
+/**
+ * vmw_resource_alloc_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Allocate the lowest free resource from the resource manager, and set
+ * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
+ */
+int vmw_resource_alloc_id(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+	BUG_ON(res->id != -1);
+
+	idr_preload(GFP_KERNEL);
+	write_lock(&dev_priv->resource_lock);
+
+	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+	if (ret >= 0)
+		res->id = ret;
+
+	write_unlock(&dev_priv->resource_lock);
+	idr_preload_end();
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func)
+{
+	kref_init(&res->kref);
+	res->hw_destroy = NULL;
+	res->res_free = res_free;
+	res->avail = false;
+	res->dev_priv = dev_priv;
+	res->func = func;
+	INIT_LIST_HEAD(&res->lru_head);
+	INIT_LIST_HEAD(&res->mob_head);
+	res->id = -1;
+	res->backup = NULL;
+	res->backup_offset = 0;
+	res->backup_dirty = false;
+	res->res_dirty = false;
+	if (delay_id)
+		return 0;
+	else
+		return vmw_resource_alloc_id(res);
+}
+
+/**
+ * vmw_resource_activate
+ *
+ * @res:        Pointer to the newly created resource
+ * @hw_destroy: Destroy function. NULL if none.
+ *
+ * Activate a resource after the hardware has been made aware of it.
+ * Set tye destroy function to @destroy. Typically this frees the
+ * resource and destroys the hardware resources associated with it.
+ * Activate basically means that the function vmw_resource_lookup will
+ * find it.
+ */
+void vmw_resource_activate(struct vmw_resource *res,
+			   void (*hw_destroy) (struct vmw_resource *))
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	write_lock(&dev_priv->resource_lock);
+	res->avail = true;
+	res->hw_destroy = hw_destroy;
+	write_unlock(&dev_priv->resource_lock);
+}
+
+struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+					 struct idr *idr, int id)
+{
+	struct vmw_resource *res;
+
+	read_lock(&dev_priv->resource_lock);
+	res = idr_find(idr, id);
+	if (res && res->avail)
+		kref_get(&res->kref);
+	else
+		res = NULL;
+	read_unlock(&dev_priv->resource_lock);
+
+	if (unlikely(res == NULL))
+		return NULL;
+
+	return res;
+}
+
+/**
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
+ */
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+				    struct ttm_object_file *tfile,
+				    uint32_t handle,
+				    const struct vmw_user_resource_conv
+				    *converter,
+				    struct vmw_resource **p_res)
+{
+	struct ttm_base_object *base;
+	struct vmw_resource *res;
+	int ret = -EINVAL;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL))
+		return -EINVAL;
+
+	if (unlikely(base->object_type != converter->object_type))
+		goto out_bad_resource;
+
+	res = converter->base_obj_to_res(base);
+
+	read_lock(&dev_priv->resource_lock);
+	if (!res->avail || res->res_free != converter->res_free) {
+		read_unlock(&dev_priv->resource_lock);
+		goto out_bad_resource;
+	}
+
+	kref_get(&res->kref);
+	read_unlock(&dev_priv->resource_lock);
+
+	*p_res = res;
+	ret = 0;
+
+out_bad_resource:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
+
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t handle,
+			   struct vmw_surface **out_surf,
+			   struct vmw_dma_buffer **out_buf)
+{
+	struct vmw_resource *res;
+	int ret;
+
+	BUG_ON(*out_surf || *out_buf);
+
+	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+					      user_surface_converter,
+					      &res);
+	if (!ret) {
+		*out_surf = vmw_res_to_srf(res);
+		return 0;
+	}
+
+	*out_surf = NULL;
+	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+	return ret;
+}
+
+/**
+ * Buffer management.
+ */
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+	kfree(vmw_bo);
+}
+
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+		    struct vmw_dma_buffer *vmw_bo,
+		    size_t size, struct ttm_placement *placement,
+		    bool interruptible,
+		    void (*bo_free) (struct ttm_buffer_object *bo))
+{
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	size_t acc_size;
+	int ret;
+
+	BUG_ON(!bo_free);
+
+	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+	memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+	INIT_LIST_HEAD(&vmw_bo->res_list);
+
+	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+			  ttm_bo_type_device, placement,
+			  0, interruptible,
+			  NULL, acc_size, NULL, bo_free);
+	return ret;
+}
+
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+
+	ttm_base_object_kfree(vmw_user_bo, base);
+}
+
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+{
+	struct vmw_user_dma_buffer *vmw_user_bo;
+	struct ttm_base_object *base = *p_base;
+	struct ttm_buffer_object *bo;
+
+	*p_base = NULL;
+
+	if (unlikely(base == NULL))
+		return;
+
+	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+	bo = &vmw_user_bo->dma.base;
+	ttm_bo_unref(&bo);
+}
+
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+			  struct ttm_object_file *tfile,
+			  uint32_t size,
+			  bool shareable,
+			  uint32_t *handle,
+			  struct vmw_dma_buffer **p_dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+	struct ttm_buffer_object *tmp;
+	int ret;
+
+	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+	if (unlikely(user_bo == NULL)) {
+		DRM_ERROR("Failed to allocate a buffer.\n");
+		return -ENOMEM;
+	}
+
+	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+			      &vmw_vram_sys_placement, true,
+			      &vmw_user_dmabuf_destroy);
+	if (unlikely(ret != 0))
+		return ret;
+
+	tmp = ttm_bo_reference(&user_bo->dma.base);
+	ret = ttm_base_object_init(tfile,
+				   &user_bo->base,
+				   shareable,
+				   ttm_buffer_type,
+				   &vmw_user_dmabuf_release, NULL);
+	if (unlikely(ret != 0)) {
+		ttm_bo_unref(&tmp);
+		goto out_no_base_object;
+	}
+
+	*p_dma_buf = &user_bo->dma;
+	*handle = user_bo->base.hash.key;
+
+out_no_base_object:
+	return ret;
+}
+
+/**
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+				  struct ttm_object_file *tfile)
+{
+	struct vmw_user_dma_buffer *vmw_user_bo;
+
+	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+		return -EPERM;
+
+	vmw_user_bo = vmw_user_dma_buffer(bo);
+	return (vmw_user_bo->base.tfile == tfile ||
+	vmw_user_bo->base.shareable) ? 0 : -EPERM;
+}
+
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	union drm_vmw_alloc_dmabuf_arg *arg =
+	    (union drm_vmw_alloc_dmabuf_arg *)data;
+	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+	struct vmw_dma_buffer *dma_buf;
+	uint32_t handle;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+				    req->size, false, &handle, &dma_buf);
+	if (unlikely(ret != 0))
+		goto out_no_dmabuf;
+
+	rep->handle = handle;
+	rep->map_handle = dma_buf->base.addr_space_offset;
+	rep->cur_gmr_id = handle;
+	rep->cur_gmr_offset = 0;
+
+	vmw_dmabuf_unreference(&dma_buf);
+
+out_no_dmabuf:
+	ttm_read_unlock(&vmaster->lock);
+
+	return ret;
+}
+
+int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_vmw_unref_dmabuf_arg *arg =
+	    (struct drm_vmw_unref_dmabuf_arg *)data;
+
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 arg->handle,
+					 TTM_REF_USAGE);
+}
+
+int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+			   uint32_t handle, struct vmw_dma_buffer **out)
+{
+	struct vmw_user_dma_buffer *vmw_user_bo;
+	struct ttm_base_object *base;
+
+	base = ttm_base_object_lookup(tfile, handle);
+	if (unlikely(base == NULL)) {
+		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		return -ESRCH;
+	}
+
+	if (unlikely(base->object_type != ttm_buffer_type)) {
+		ttm_base_object_unref(&base);
+		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+		       (unsigned long)handle);
+		return -EINVAL;
+	}
+
+	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
+	ttm_base_object_unref(&base);
+	*out = &vmw_user_bo->dma;
+
+	return 0;
+}
+
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+			      struct vmw_dma_buffer *dma_buf)
+{
+	struct vmw_user_dma_buffer *user_bo;
+
+	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+		return -EINVAL;
+
+	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+	return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
+/*
+ * Stream management
+ */
+
+static void vmw_stream_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_stream *stream;
+	int ret;
+
+	DRM_INFO("%s: unref\n", __func__);
+	stream = container_of(res, struct vmw_stream, res);
+
+	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
+	WARN_ON(ret != 0);
+}
+
+static int vmw_stream_init(struct vmw_private *dev_priv,
+			   struct vmw_stream *stream,
+			   void (*res_free) (struct vmw_resource *res))
+{
+	struct vmw_resource *res = &stream->res;
+	int ret;
+
+	ret = vmw_resource_init(dev_priv, res, false, res_free,
+				&vmw_stream_func);
+
+	if (unlikely(ret != 0)) {
+		if (res_free == NULL)
+			kfree(stream);
+		else
+			res_free(&stream->res);
+		return ret;
+	}
+
+	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
+	if (ret) {
+		vmw_resource_unreference(&res);
+		return ret;
+	}
+
+	DRM_INFO("%s: claimed\n", __func__);
+
+	vmw_resource_activate(&stream->res, vmw_stream_destroy);
+	return 0;
+}
+
+static void vmw_user_stream_free(struct vmw_resource *res)
+{
+	struct vmw_user_stream *stream =
+	    container_of(res, struct vmw_user_stream, stream.res);
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	ttm_base_object_kfree(stream, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv),
+			    vmw_user_stream_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_stream *stream =
+	    container_of(base, struct vmw_user_stream, base);
+	struct vmw_resource *res = &stream->stream.res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_resource *res;
+	struct vmw_user_stream *stream;
+	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
+	int ret = 0;
+
+
+	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
+	if (unlikely(res == NULL))
+		return -EINVAL;
+
+	if (res->res_free != &vmw_user_stream_free) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	stream = container_of(res, struct vmw_user_stream, stream.res);
+	if (stream->base.tfile != tfile) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
+out:
+	vmw_resource_unreference(&res);
+	return ret;
+}
+
+int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_stream *stream;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of streams anyway?
+	 */
+
+	if (unlikely(vmw_user_stream_size == 0))
+		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_stream_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for stream"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+
+	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+	if (unlikely(stream == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_stream_size);
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	res = &stream->stream.res;
+	stream->base.shareable = false;
+	stream->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
+				   &vmw_user_stream_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	arg->stream_id = res->id;
+out_err:
+	vmw_resource_unreference(&res);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+			   struct ttm_object_file *tfile,
+			   uint32_t *inout_id, struct vmw_resource **out)
+{
+	struct vmw_user_stream *stream;
+	struct vmw_resource *res;
+	int ret;
+
+	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+				  *inout_id);
+	if (unlikely(res == NULL))
+		return -EINVAL;
+
+	if (res->res_free != &vmw_user_stream_free) {
+		ret = -EINVAL;
+		goto err_ref;
+	}
+
+	stream = container_of(res, struct vmw_user_stream, stream.res);
+	if (stream->base.tfile != tfile) {
+		ret = -EPERM;
+		goto err_ref;
+	}
+
+	*inout_id = stream->stream.stream_id;
+	*out = res;
+	return 0;
+err_ref:
+	vmw_resource_unreference(&res);
+	return ret;
+}
+
+
+int vmw_dumb_create(struct drm_file *file_priv,
+		    struct drm_device *dev,
+		    struct drm_mode_create_dumb *args)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct vmw_user_dma_buffer *vmw_user_bo;
+	struct ttm_buffer_object *tmp;
+	int ret;
+
+	args->pitch = args->width * ((args->bpp + 7) / 8);
+	args->size = args->pitch * args->height;
+
+	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+	if (vmw_user_bo == NULL)
+		return -ENOMEM;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (ret != 0) {
+		kfree(vmw_user_bo);
+		return ret;
+	}
+
+	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
+			      &vmw_vram_sys_placement, true,
+			      &vmw_user_dmabuf_destroy);
+	if (ret != 0)
+		goto out_no_dmabuf;
+
+	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+				   &vmw_user_bo->base,
+				   false,
+				   ttm_buffer_type,
+				   &vmw_user_dmabuf_release, NULL);
+	if (unlikely(ret != 0))
+		goto out_no_base_object;
+
+	args->handle = vmw_user_bo->base.hash.key;
+
+out_no_base_object:
+	ttm_bo_unref(&tmp);
+out_no_dmabuf:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+			struct drm_device *dev, uint32_t handle,
+			uint64_t *offset)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_dma_buffer *out_buf;
+	int ret;
+
+	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+	if (ret != 0)
+		return -EINVAL;
+
+	*offset = out_buf->base.addr_space_offset;
+	vmw_dmabuf_unreference(&out_buf);
+	return 0;
+}
+
+int vmw_dumb_destroy(struct drm_file *file_priv,
+		     struct drm_device *dev,
+		     uint32_t handle)
+{
+	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+					 handle, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+				  bool interruptible)
+{
+	unsigned long size =
+		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+	struct vmw_dma_buffer *backup;
+	int ret;
+
+	if (likely(res->backup)) {
+		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		return 0;
+	}
+
+	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+	if (unlikely(backup == NULL))
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+			      res->func->backup_placement,
+			      interruptible,
+			      &vmw_dmabuf_bo_free);
+	if (unlikely(ret != 0))
+		goto out_no_dmabuf;
+
+	res->backup = backup;
+
+out_no_dmabuf:
+	return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+				    struct ttm_validate_buffer *val_buf)
+{
+	int ret = 0;
+	const struct vmw_res_func *func = res->func;
+
+	if (unlikely(res->id == -1)) {
+		ret = func->create(res);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	if (func->bind &&
+	    ((func->needs_backup && list_empty(&res->mob_head) &&
+	      val_buf->bo != NULL) ||
+	     (!func->needs_backup && val_buf->bo != NULL))) {
+		ret = func->bind(res, val_buf);
+		if (unlikely(ret != 0))
+			goto out_bind_failed;
+		if (func->needs_backup)
+			list_add_tail(&res->mob_head, &res->backup->res_list);
+	}
+
+	/*
+	 * Only do this on write operations, and move to
+	 * vmw_resource_unreserve if it can be called after
+	 * backup buffers have been unreserved. Otherwise
+	 * sort out locking.
+	 */
+	res->res_dirty = true;
+
+	return 0;
+
+out_bind_failed:
+	func->destroy(res);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+			    struct vmw_dma_buffer *new_backup,
+			    unsigned long new_backup_offset)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	if (!list_empty(&res->lru_head))
+		return;
+
+	if (new_backup && new_backup != res->backup) {
+
+		if (res->backup) {
+			BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
+			list_del_init(&res->mob_head);
+			vmw_dmabuf_unreference(&res->backup);
+		}
+
+		res->backup = vmw_dmabuf_reference(new_backup);
+		BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
+		list_add_tail(&res->mob_head, &new_backup->res_list);
+	}
+	if (new_backup)
+		res->backup_offset = new_backup_offset;
+
+	if (!res->func->may_evict || res->id == -1)
+		return;
+
+	write_lock(&dev_priv->resource_lock);
+	list_add_tail(&res->lru_head,
+		      &res->dev_priv->res_lru[res->func->res_type]);
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+			      bool interruptible,
+			      struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+	bool backup_dirty = false;
+	int ret;
+
+	if (unlikely(res->backup == NULL)) {
+		ret = vmw_resource_buf_alloc(res, interruptible);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	INIT_LIST_HEAD(&val_list);
+	val_buf->bo = ttm_bo_reference(&res->backup->base);
+	list_add_tail(&val_buf->head, &val_list);
+	ret = ttm_eu_reserve_buffers(&val_list);
+	if (unlikely(ret != 0))
+		goto out_no_reserve;
+
+	if (res->func->needs_backup && list_empty(&res->mob_head))
+		return 0;
+
+	backup_dirty = res->backup_dirty;
+	ret = ttm_bo_validate(&res->backup->base,
+			      res->func->backup_placement,
+			      true, false);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+
+	return 0;
+
+out_no_validate:
+	ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+	ttm_bo_unref(&val_buf->bo);
+	if (backup_dirty)
+		vmw_dmabuf_unreference(&res->backup);
+
+	return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	int ret;
+
+	write_lock(&dev_priv->resource_lock);
+	list_del_init(&res->lru_head);
+	write_unlock(&dev_priv->resource_lock);
+
+	if (res->func->needs_backup && res->backup == NULL &&
+	    !no_backup) {
+		ret = vmw_resource_buf_alloc(res, true);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @val_buf:        Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+	struct list_head val_list;
+
+	if (likely(val_buf->bo == NULL))
+		return;
+
+	INIT_LIST_HEAD(&val_list);
+	list_add_tail(&val_buf->head, &val_list);
+	ttm_eu_backoff_reservation(&val_list);
+	ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @res:            The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+	struct ttm_validate_buffer val_buf;
+	const struct vmw_res_func *func = res->func;
+	int ret;
+
+	BUG_ON(!func->may_evict);
+
+	val_buf.bo = NULL;
+	ret = vmw_resource_check_buffer(res, true, &val_buf);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(func->unbind != NULL &&
+		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
+		ret = func->unbind(res, res->res_dirty, &val_buf);
+		if (unlikely(ret != 0))
+			goto out_no_unbind;
+		list_del_init(&res->mob_head);
+	}
+	ret = func->destroy(res);
+	res->backup_dirty = true;
+	res->res_dirty = false;
+out_no_unbind:
+	vmw_resource_backoff_reservation(&val_buf);
+
+	return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+	int ret;
+	struct vmw_resource *evict_res;
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+	struct ttm_validate_buffer val_buf;
+
+	if (likely(!res->func->may_evict))
+		return 0;
+
+	val_buf.bo = NULL;
+	if (res->backup)
+		val_buf.bo = &res->backup->base;
+	do {
+		ret = vmw_resource_do_validate(res, &val_buf);
+		if (likely(ret != -EBUSY))
+			break;
+
+		write_lock(&dev_priv->resource_lock);
+		if (list_empty(lru_list) || !res->func->may_evict) {
+			DRM_ERROR("Out of device device id entries "
+				  "for %s.\n", res->func->type_name);
+			ret = -EBUSY;
+			write_unlock(&dev_priv->resource_lock);
+			break;
+		}
+
+		evict_res = vmw_resource_reference
+			(list_first_entry(lru_list, struct vmw_resource,
+					  lru_head));
+		list_del_init(&evict_res->lru_head);
+
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+	else if (!res->func->needs_backup && res->backup) {
+		list_del_init(&res->mob_head);
+		vmw_dmabuf_unreference(&res->backup);
+	}
+
+	return 0;
+
+out_no_validate:
+	return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+			 struct vmw_fence_obj *fence)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_driver *driver = bdev->driver;
+	struct vmw_fence_obj *old_fence_obj;
+	struct vmw_private *dev_priv =
+		container_of(bdev, struct vmw_private, bdev);
+
+	if (fence == NULL)
+		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	else
+		driver->sync_obj_ref(fence);
+
+	spin_lock(&bdev->fence_lock);
+
+	old_fence_obj = bo->sync_obj;
+	bo->sync_obj = fence;
+
+	spin_unlock(&bdev->fence_lock);
+
+	if (old_fence_obj)
+		vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+			      struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+	return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+				    enum vmw_res_type type)
+{
+	struct list_head *lru_list = &dev_priv->res_lru[type];
+	struct vmw_resource *evict_res;
+
+	do {
+		write_lock(&dev_priv->resource_lock);
+
+		if (list_empty(lru_list))
+			goto out_unlock;
+
+		evict_res = vmw_resource_reference(
+			list_first_entry(lru_list, struct vmw_resource,
+					 lru_head));
+		list_del_init(&evict_res->lru_head);
+		write_unlock(&dev_priv->resource_lock);
+		vmw_resource_do_evict(evict_res);
+		vmw_resource_unreference(&evict_res);
+	} while (1);
+
+out_unlock:
+	write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+	enum vmw_res_type type;
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	for (type = 0; type < vmw_res_max; ++type)
+		vmw_resource_evict_type(dev_priv, type);
+
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 0000000..f3adeed
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+	enum ttm_object_type object_type;
+	struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+	void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ */
+
+struct vmw_res_func {
+	enum vmw_res_type res_type;
+	bool needs_backup;
+	const char *type_name;
+	struct ttm_placement *backup_placement;
+	bool may_evict;
+
+	int (*create) (struct vmw_resource *res);
+	int (*destroy) (struct vmw_resource *res);
+	int (*bind) (struct vmw_resource *res,
+		     struct ttm_validate_buffer *val_buf);
+	int (*unbind) (struct vmw_resource *res,
+		       bool readback,
+		       struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+		      bool delay_id,
+		      void (*res_free) (struct vmw_resource *res),
+		      const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+			   void (*hw_destroy) (struct vmw_resource *));
+#endif
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
new file mode 100644
index 0000000..26387c3
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -0,0 +1,571 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+#define vmw_crtc_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.crtc)
+#define vmw_encoder_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.encoder)
+#define vmw_connector_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.connector)
+
+struct vmw_screen_object_display {
+	unsigned num_implicit;
+
+	struct vmw_framebuffer *implicit_fb;
+};
+
+/**
+ * Display unit using screen objects.
+ */
+struct vmw_screen_object_unit {
+	struct vmw_display_unit base;
+
+	unsigned long buffer_size; /**< Size of allocated buffer */
+	struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+
+	bool defined;
+	bool active_implicit;
+};
+
+static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
+{
+	vmw_display_unit_cleanup(&sou->base);
+	kfree(sou);
+}
+
+
+/*
+ * Screen Object Display Unit CRTC functions
+ */
+
+static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
+}
+
+static void vmw_sou_del_active(struct vmw_private *vmw_priv,
+			      struct vmw_screen_object_unit *sou)
+{
+	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+	if (sou->active_implicit) {
+		if (--(ld->num_implicit) == 0)
+			ld->implicit_fb = NULL;
+		sou->active_implicit = false;
+	}
+}
+
+static void vmw_sou_add_active(struct vmw_private *vmw_priv,
+			      struct vmw_screen_object_unit *sou,
+			      struct vmw_framebuffer *vfb)
+{
+	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+
+	BUG_ON(!ld->num_implicit && ld->implicit_fb);
+
+	if (!sou->active_implicit && sou->base.is_implicit) {
+		ld->implicit_fb = vfb;
+		sou->active_implicit = true;
+		ld->num_implicit++;
+	}
+}
+
+/**
+ * Send the fifo command to create a screen.
+ */
+static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
+			       struct vmw_screen_object_unit *sou,
+			       uint32_t x, uint32_t y,
+			       struct drm_display_mode *mode)
+{
+	size_t fifo_size;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAScreenObject obj;
+	} *cmd;
+
+	BUG_ON(!sou->buffer);
+
+	fifo_size = sizeof(*cmd);
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* The hardware has hung, nothing we can do about it here. */
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
+	cmd->obj.structSize = sizeof(SVGAScreenObject);
+	cmd->obj.id = sou->base.unit;
+	cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
+		(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
+	cmd->obj.size.width = mode->hdisplay;
+	cmd->obj.size.height = mode->vdisplay;
+	if (sou->base.is_implicit) {
+		cmd->obj.root.x = x;
+		cmd->obj.root.y = y;
+	} else {
+		cmd->obj.root.x = sou->base.gui_x;
+		cmd->obj.root.y = sou->base.gui_y;
+	}
+
+	/* Ok to assume that buffer is pinned in vram */
+	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+	cmd->obj.backingStore.pitch = mode->hdisplay * 4;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	sou->defined = true;
+
+	return 0;
+}
+
+/**
+ * Send the fifo command to destroy a screen.
+ */
+static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
+				struct vmw_screen_object_unit *sou)
+{
+	size_t fifo_size;
+	int ret;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAFifoCmdDestroyScreen body;
+	} *cmd;
+
+	/* no need to do anything */
+	if (unlikely(!sou->defined))
+		return 0;
+
+	fifo_size = sizeof(*cmd);
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* the hardware has hung, nothing we can do about it here */
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
+	cmd->body.screenId = sou->base.unit;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+	else
+		sou->defined = false;
+
+	return ret;
+}
+
+/**
+ * Free the backing store.
+ */
+static void vmw_sou_backing_free(struct vmw_private *dev_priv,
+				 struct vmw_screen_object_unit *sou)
+{
+	struct ttm_buffer_object *bo;
+
+	if (unlikely(sou->buffer == NULL))
+		return;
+
+	bo = &sou->buffer->base;
+	ttm_bo_unref(&bo);
+	sou->buffer = NULL;
+	sou->buffer_size = 0;
+}
+
+/**
+ * Allocate the backing store for the buffer.
+ */
+static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
+				 struct vmw_screen_object_unit *sou,
+				 unsigned long size)
+{
+	int ret;
+
+	if (sou->buffer_size == size)
+		return 0;
+
+	if (sou->buffer)
+		vmw_sou_backing_free(dev_priv, sou);
+
+	sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
+	if (unlikely(sou->buffer == NULL))
+		return -ENOMEM;
+
+	/* After we have alloced the backing store might not be able to
+	 * resume the overlays, this is preferred to failing to alloc.
+	 */
+	vmw_overlay_pause_all(dev_priv);
+	ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
+			      &vmw_vram_ne_placement,
+			      false, &vmw_dmabuf_bo_free);
+	vmw_overlay_resume_all(dev_priv);
+
+	if (unlikely(ret != 0))
+		sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
+	else
+		sou->buffer_size = size;
+
+	return ret;
+}
+
+static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_object_unit *sou;
+	struct drm_connector *connector;
+	struct drm_display_mode *mode;
+	struct drm_encoder *encoder;
+	struct vmw_framebuffer *vfb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+	if (!set)
+		return -EINVAL;
+
+	if (!set->crtc)
+		return -EINVAL;
+
+	/* get the sou */
+	crtc = set->crtc;
+	sou = vmw_crtc_to_sou(crtc);
+	vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("to many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &sou->base.connector) {
+		DRM_ERROR("connector doesn't match %p %p\n",
+			set->connectors[0], &sou->base.connector);
+		return -EINVAL;
+	}
+
+	/* sou only supports one fb active at the time */
+	if (sou->base.is_implicit &&
+	    dev_priv->sou_priv->implicit_fb && vfb &&
+	    !(dev_priv->sou_priv->num_implicit == 1 &&
+	      sou->active_implicit) &&
+	    dev_priv->sou_priv->implicit_fb != vfb) {
+		DRM_ERROR("Multiple framebuffers not supported\n");
+		return -EINVAL;
+	}
+
+	/* since they always map one to one these are safe */
+	connector = &sou->base.connector;
+	encoder = &sou->base.encoder;
+
+	/* should we turn the crtc off */
+	if (set->num_connectors == 0 || !set->mode || !set->fb) {
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		/* the hardware has hung don't do anything more */
+		if (unlikely(ret != 0))
+			return ret;
+
+		connector->encoder = NULL;
+		encoder->crtc = NULL;
+		crtc->fb = NULL;
+		crtc->x = 0;
+		crtc->y = 0;
+
+		vmw_sou_del_active(dev_priv, sou);
+
+		vmw_sou_backing_free(dev_priv, sou);
+
+		return 0;
+	}
+
+
+	/* we now know we want to set a mode */
+	mode = set->mode;
+	fb = set->fb;
+
+	if (set->x + mode->hdisplay > fb->width ||
+	    set->y + mode->vdisplay > fb->height) {
+		DRM_ERROR("set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	vmw_fb_off(dev_priv);
+
+	if (mode->hdisplay != crtc->mode.hdisplay ||
+	    mode->vdisplay != crtc->mode.vdisplay) {
+		/* no need to check if depth is different, because backing
+		 * store depth is forced to 4 by the device.
+		 */
+
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		/* the hardware has hung don't do anything more */
+		if (unlikely(ret != 0))
+			return ret;
+
+		vmw_sou_backing_free(dev_priv, sou);
+	}
+
+	if (!sou->buffer) {
+		/* forced to depth 4 by the device */
+		size_t size = mode->hdisplay * mode->vdisplay * 4;
+		ret = vmw_sou_backing_alloc(dev_priv, sou, size);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
+	if (unlikely(ret != 0)) {
+		/*
+		 * We are in a bit of a situation here, the hardware has
+		 * hung and we may or may not have a buffer hanging of
+		 * the screen object, best thing to do is not do anything
+		 * if we where defined, if not just turn the crtc of.
+		 * Not what userspace wants but it needs to htfu.
+		 */
+		if (sou->defined)
+			return ret;
+
+		connector->encoder = NULL;
+		encoder->crtc = NULL;
+		crtc->fb = NULL;
+		crtc->x = 0;
+		crtc->y = 0;
+
+		return ret;
+	}
+
+	vmw_sou_add_active(dev_priv, sou, vfb);
+
+	connector->encoder = encoder;
+	encoder->crtc = crtc;
+	crtc->mode = *mode;
+	crtc->fb = fb;
+	crtc->x = set->x;
+	crtc->y = set->y;
+
+	return 0;
+}
+
+static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_sou_crtc_destroy,
+	.set_config = vmw_sou_crtc_set_config,
+	.page_flip = vmw_du_page_flip,
+};
+
+/*
+ * Screen Object Display Unit encoder functions
+ */
+
+static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_sou_destroy(vmw_encoder_to_sou(encoder));
+}
+
+static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+	.destroy = vmw_sou_encoder_destroy,
+};
+
+/*
+ * Screen Object Display Unit connector functions
+ */
+
+static void vmw_sou_connector_destroy(struct drm_connector *connector)
+{
+	vmw_sou_destroy(vmw_connector_to_sou(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_sou_connector_destroy,
+};
+
+static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_object_unit *sou;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	sou = kzalloc(sizeof(*sou), GFP_KERNEL);
+	if (!sou)
+		return -ENOMEM;
+
+	sou->base.unit = unit;
+	crtc = &sou->base.crtc;
+	encoder = &sou->base.encoder;
+	connector = &sou->base.connector;
+
+	sou->active_implicit = false;
+
+	sou->base.pref_active = (unit == 0);
+	sou->base.pref_width = dev_priv->initial_width;
+	sou->base.pref_height = dev_priv->initial_height;
+	sou->base.pref_mode = NULL;
+	sou->base.is_implicit = true;
+
+	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+			   DRM_MODE_CONNECTOR_VIRTUAL);
+	connector->status = vmw_du_connector_detect(connector, true);
+
+	drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
+			 DRM_MODE_ENCODER_VIRTUAL);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_object_attach_property(&connector->base,
+				      dev->mode_config.dirty_info_property,
+				      1);
+
+	return 0;
+}
+
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i, ret;
+
+	if (dev_priv->sou_priv) {
+		DRM_INFO("sou system already on\n");
+		return -EINVAL;
+	}
+
+	if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
+		DRM_INFO("Not using screen objects,"
+			 " missing cap SCREEN_OBJECT_2\n");
+		return -ENOSYS;
+	}
+
+	ret = -ENOMEM;
+	dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
+	if (unlikely(!dev_priv->sou_priv))
+		goto err_no_mem;
+
+	dev_priv->sou_priv->num_implicit = 0;
+	dev_priv->sou_priv->implicit_fb = NULL;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		goto err_free;
+
+	ret = drm_mode_create_dirty_info_property(dev);
+	if (unlikely(ret != 0))
+		goto err_vblank_cleanup;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+		vmw_sou_init(dev_priv, i);
+
+	DRM_INFO("Screen objects system initialized\n");
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+err_free:
+	kfree(dev_priv->sou_priv);
+	dev_priv->sou_priv = NULL;
+err_no_mem:
+	return ret;
+}
+
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	drm_vblank_cleanup(dev);
+
+	kfree(dev_priv->sou_priv);
+
+	return 0;
+}
+
+/**
+ * Returns if this unit can be page flipped.
+ * Must be called with the mode_config mutex held.
+ */
+bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
+				     struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	if (!sou->base.is_implicit)
+		return true;
+
+	if (dev_priv->sou_priv->num_implicit != 1)
+		return false;
+
+	return true;
+}
+
+/**
+ * Update the implicit fb to the current fb of this crtc.
+ * Must be called with the mode_config mutex held.
+ */
+void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
+					      struct drm_crtc *crtc)
+{
+	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
+
+	BUG_ON(!sou->base.is_implicit);
+
+	dev_priv->sou_priv->implicit_fb =
+		vmw_framebuffer_to_vfb(sou->base.crtc.fb);
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 0000000..5828143
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+	struct ttm_base_object base;
+	struct vmw_surface srf;
+	uint32_t size;
+	uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+	uint32_t face;
+	uint32_t mip;
+	uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+	.object_type = VMW_RES_SURFACE,
+	.base_obj_to_res = vmw_user_surface_base_to_res,
+	.res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+	&user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+	.res_type = vmw_res_surface,
+	.needs_backup = false,
+	.may_evict = true,
+	.type_name = "legacy surfaces",
+	.backup_placement = &vmw_srf_placement,
+	.create = &vmw_legacy_srf_create,
+	.destroy = &vmw_legacy_srf_destroy,
+	.bind = &vmw_legacy_srf_bind,
+	.unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdSurfaceDMA body;
+	SVGA3dCopyBox cb;
+	SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+	SVGA3dCmdHeader header;
+	SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+	return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+	return sizeof(struct vmw_surface_define) + srf->num_sizes *
+		sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+	return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+				       void *cmd_space)
+{
+	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+		cmd_space;
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+				      void *cmd_space)
+{
+	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+		cmd_space;
+	struct drm_vmw_size *src_size;
+	SVGA3dSize *cmd_size;
+	uint32_t cmd_len;
+	int i;
+
+	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+	cmd->header.size = cmd_len;
+	cmd->body.sid = srf->res.id;
+	cmd->body.surfaceFlags = srf->flags;
+	cmd->body.format = cpu_to_le32(srf->format);
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+	cmd += 1;
+	cmd_size = (SVGA3dSize *) cmd;
+	src_size = srf->sizes;
+
+	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+		cmd_size->width = src_size->width;
+		cmd_size->height = src_size->height;
+		cmd_size->depth = src_size->depth;
+	}
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+				   void *cmd_space,
+				   const SVGAGuestPtr *ptr,
+				   bool to_surface)
+{
+	uint32_t i;
+	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+	const struct svga3d_surface_desc *desc =
+		svga3dsurface_get_desc(srf->format);
+
+	for (i = 0; i < srf->num_sizes; ++i) {
+		SVGA3dCmdHeader *header = &cmd->header;
+		SVGA3dCmdSurfaceDMA *body = &cmd->body;
+		SVGA3dCopyBox *cb = &cmd->cb;
+		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+		const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+		header->id = SVGA_3D_CMD_SURFACE_DMA;
+		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+		body->guest.ptr = *ptr;
+		body->guest.ptr.offset += cur_offset->bo_offset;
+		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+								  cur_size);
+		body->host.sid = srf->res.id;
+		body->host.face = cur_offset->face;
+		body->host.mipmap = cur_offset->mip;
+		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
+				  SVGA3D_READ_HOST_VRAM);
+		cb->x = 0;
+		cb->y = 0;
+		cb->z = 0;
+		cb->srcx = 0;
+		cb->srcy = 0;
+		cb->srcz = 0;
+		cb->w = cur_size->width;
+		cb->h = cur_size->height;
+		cb->d = cur_size->depth;
+
+		suffix->suffixSize = sizeof(*suffix);
+		suffix->maximumOffset =
+			svga3dsurface_get_image_buffer_size(desc, cur_size,
+							    body->guest.pitch);
+		suffix->flags.discard = 0;
+		suffix->flags.unsynchronized = 0;
+		suffix->flags.reserved = 0;
+		++cmd;
+	}
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	void *cmd;
+
+	if (res->id != -1) {
+
+		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+		if (unlikely(cmd == NULL)) {
+			DRM_ERROR("Failed reserving FIFO space for surface "
+				  "destruction.\n");
+			return;
+		}
+
+		vmw_surface_destroy_encode(res->id, cmd);
+		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+		/*
+		 * used_memory_size_atomic, or separate lock
+		 * to avoid taking dev_priv::cmdbuf_mutex in
+		 * the destroy path.
+		 */
+
+		mutex_lock(&dev_priv->cmdbuf_mutex);
+		srf = vmw_res_to_srf(res);
+		dev_priv->used_memory_size -= res->backup_size;
+		mutex_unlock(&dev_priv->cmdbuf_mutex);
+	}
+	vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	struct vmw_surface *srf;
+	uint32_t submit_size;
+	uint8_t *cmd;
+	int ret;
+
+	if (likely(res->id != -1))
+		return 0;
+
+	srf = vmw_res_to_srf(res);
+	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+		     dev_priv->memory_size))
+		return -EBUSY;
+
+	/*
+	 * Alloc id for the resource.
+	 */
+
+	ret = vmw_resource_alloc_id(res);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to allocate a surface id.\n");
+		goto out_no_id;
+	}
+
+	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+		ret = -EBUSY;
+		goto out_no_fifo;
+	}
+
+	/*
+	 * Encode surface define- commands.
+	 */
+
+	submit_size = vmw_surface_define_size(srf);
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "creation.\n");
+		ret = -ENOMEM;
+		goto out_no_fifo;
+	}
+
+	vmw_surface_define_encode(srf, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size += res->backup_size;
+	return 0;
+
+out_no_fifo:
+	vmw_resource_release_id(res);
+out_no_id:
+	return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+			      struct ttm_validate_buffer *val_buf,
+			      bool bind)
+{
+	SVGAGuestPtr ptr;
+	struct vmw_fence_obj *fence;
+	uint32_t submit_size;
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	uint8_t *cmd;
+	struct vmw_private *dev_priv = res->dev_priv;
+
+	BUG_ON(val_buf->bo == NULL);
+
+	submit_size = vmw_surface_dma_size(srf);
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "DMA.\n");
+		return -ENOMEM;
+	}
+	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Create a fence object and fence the backup buffer.
+	 */
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
+					  &fence, NULL);
+
+	vmw_fence_single_bo(val_buf->bo, fence);
+
+	if (likely(fence != NULL))
+		vmw_fence_obj_unreference(&fence);
+
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+			       struct ttm_validate_buffer *val_buf)
+{
+	if (!res->backup_dirty)
+		return 0;
+
+	return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+				 bool readback,
+				 struct ttm_validate_buffer *val_buf)
+{
+	if (unlikely(readback))
+		return vmw_legacy_srf_dma(res, val_buf, false);
+	return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+	struct vmw_private *dev_priv = res->dev_priv;
+	uint32_t submit_size;
+	uint8_t *cmd;
+
+	BUG_ON(res->id == -1);
+
+	/*
+	 * Encode the dma- and surface destroy commands.
+	 */
+
+	submit_size = vmw_surface_destroy_size();
+	cmd = vmw_fifo_reserve(dev_priv, submit_size);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed reserving FIFO space for surface "
+			  "eviction.\n");
+		return -ENOMEM;
+	}
+
+	vmw_surface_destroy_encode(res->id, cmd);
+	vmw_fifo_commit(dev_priv, submit_size);
+
+	/*
+	 * Surface memory usage accounting.
+	 */
+
+	dev_priv->used_memory_size -= res->backup_size;
+
+	/*
+	 * Release the surface ID.
+	 */
+
+	vmw_resource_release_id(res);
+
+	return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+			    struct vmw_surface *srf,
+			    void (*res_free) (struct vmw_resource *res))
+{
+	int ret;
+	struct vmw_resource *res = &srf->res;
+
+	BUG_ON(res_free == NULL);
+	(void) vmw_3d_resource_inc(dev_priv, false);
+	ret = vmw_resource_init(dev_priv, res, true, res_free,
+				&vmw_legacy_surface_func);
+
+	if (unlikely(ret != 0)) {
+		vmw_3d_resource_dec(dev_priv, false);
+		res_free(res);
+		return ret;
+	}
+
+	/*
+	 * The surface won't be visible to hardware until a
+	 * surface validate.
+	 */
+
+	vmw_resource_activate(res, vmw_hw_surface_destroy);
+	return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
+ *
+ * @base:           Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+	return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+	struct vmw_surface *srf = vmw_res_to_srf(res);
+	struct vmw_user_surface *user_srf =
+	    container_of(srf, struct vmw_user_surface, srf);
+	struct vmw_private *dev_priv = srf->res.dev_priv;
+	uint32_t size = user_srf->size;
+
+	kfree(srf->offsets);
+	kfree(srf->sizes);
+	kfree(srf->snooper.image);
+	ttm_base_object_kfree(user_srf, base);
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct vmw_user_surface *user_srf =
+	    container_of(base, struct vmw_user_surface, base);
+	struct vmw_resource *res = &user_srf->srf.res;
+
+	*p_base = NULL;
+	vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_user_surface *user_srf;
+	struct vmw_surface *srf;
+	struct vmw_resource *res;
+	struct vmw_resource *tmp;
+	union drm_vmw_surface_create_arg *arg =
+	    (union drm_vmw_surface_create_arg *)data;
+	struct drm_vmw_surface_create_req *req = &arg->req;
+	struct drm_vmw_surface_arg *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct drm_vmw_size __user *user_sizes;
+	int ret;
+	int i, j;
+	uint32_t cur_bo_offset;
+	struct drm_vmw_size *cur_size;
+	struct vmw_surface_offset *cur_offset;
+	uint32_t num_sizes;
+	uint32_t size;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	const struct svga3d_surface_desc *desc;
+
+	if (unlikely(vmw_user_surface_size == 0))
+		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+			128;
+
+	num_sizes = 0;
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+		num_sizes += req->mip_levels[i];
+
+	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+	    DRM_VMW_MAX_MIP_LEVELS)
+		return -EINVAL;
+
+	size = vmw_user_surface_size + 128 +
+		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+	desc = svga3dsurface_get_desc(req->format);
+	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+		DRM_ERROR("Invalid surface format for surface creation.\n");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   size, false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for surface"
+				  " creation.\n");
+		goto out_unlock;
+	}
+
+	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+	if (unlikely(user_srf == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_user_srf;
+	}
+
+	srf = &user_srf->srf;
+	res = &srf->res;
+
+	srf->flags = req->flags;
+	srf->format = req->format;
+	srf->scanout = req->scanout;
+
+	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+	srf->num_sizes = num_sizes;
+	user_srf->size = size;
+
+	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+	if (unlikely(srf->sizes == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_sizes;
+	}
+	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+			       GFP_KERNEL);
+	if (unlikely(srf->sizes == NULL)) {
+		ret = -ENOMEM;
+		goto out_no_offsets;
+	}
+
+	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+	    req->size_addr;
+
+	ret = copy_from_user(srf->sizes, user_sizes,
+			     srf->num_sizes * sizeof(*srf->sizes));
+	if (unlikely(ret != 0)) {
+		ret = -EFAULT;
+		goto out_no_copy;
+	}
+
+	srf->base_size = *srf->sizes;
+	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+	srf->multisample_count = 1;
+
+	cur_bo_offset = 0;
+	cur_offset = srf->offsets;
+	cur_size = srf->sizes;
+
+	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+		for (j = 0; j < srf->mip_levels[i]; ++j) {
+			uint32_t stride = svga3dsurface_calculate_pitch
+				(desc, cur_size);
+
+			cur_offset->face = i;
+			cur_offset->mip = j;
+			cur_offset->bo_offset = cur_bo_offset;
+			cur_bo_offset += svga3dsurface_get_image_buffer_size
+				(desc, cur_size, stride);
+			++cur_offset;
+			++cur_size;
+		}
+	}
+	res->backup_size = cur_bo_offset;
+	if (srf->scanout &&
+	    srf->num_sizes == 1 &&
+	    srf->sizes[0].width == 64 &&
+	    srf->sizes[0].height == 64 &&
+	    srf->format == SVGA3D_A8R8G8B8) {
+
+		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+		/* clear the image */
+		if (srf->snooper.image) {
+			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+		} else {
+			DRM_ERROR("Failed to allocate cursor_image\n");
+			ret = -ENOMEM;
+			goto out_no_copy;
+		}
+	} else {
+		srf->snooper.image = NULL;
+	}
+	srf->snooper.crtc = NULL;
+
+	user_srf->base.shareable = false;
+	user_srf->base.tfile = NULL;
+
+	/**
+	 * From this point, the generic resource management functions
+	 * destroy the object on failure.
+	 */
+
+	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	tmp = vmw_resource_reference(&srf->res);
+	ret = ttm_base_object_init(tfile, &user_srf->base,
+				   req->shareable, VMW_RES_SURFACE,
+				   &vmw_user_surface_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		vmw_resource_unreference(&res);
+		goto out_unlock;
+	}
+
+	rep->sid = user_srf->base.hash.key;
+	vmw_resource_unreference(&res);
+
+	ttm_read_unlock(&vmaster->lock);
+	return 0;
+out_no_copy:
+	kfree(srf->offsets);
+out_no_offsets:
+	kfree(srf->sizes);
+out_no_sizes:
+	ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	union drm_vmw_surface_reference_arg *arg =
+	    (union drm_vmw_surface_reference_arg *)data;
+	struct drm_vmw_surface_arg *req = &arg->req;
+	struct drm_vmw_surface_create_req *rep = &arg->rep;
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_surface *srf;
+	struct vmw_user_surface *user_srf;
+	struct drm_vmw_size __user *user_sizes;
+	struct ttm_base_object *base;
+	int ret = -EINVAL;
+
+	base = ttm_base_object_lookup(tfile, req->sid);
+	if (unlikely(base == NULL)) {
+		DRM_ERROR("Could not find surface to reference.\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(base->object_type != VMW_RES_SURFACE))
+		goto out_bad_resource;
+
+	user_srf = container_of(base, struct vmw_user_surface, base);
+	srf = &user_srf->srf;
+
+	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Could not add a reference to a surface.\n");
+		goto out_no_reference;
+	}
+
+	rep->flags = srf->flags;
+	rep->format = srf->format;
+	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+	    rep->size_addr;
+
+	if (user_sizes)
+		ret = copy_to_user(user_sizes, srf->sizes,
+				   srf->num_sizes * sizeof(*srf->sizes));
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("copy_to_user failed %p %u\n",
+			  user_sizes, srf->num_sizes);
+		ret = -EFAULT;
+	}
+out_bad_resource:
+out_no_reference:
+	ttm_base_object_unref(&base);
+
+	return ret;
+}
diff --git a/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 0000000..98d6bfb
--- /dev/null
+++ b/linux-imx/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,98 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *file_priv;
+	struct vmw_private *dev_priv;
+
+	if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
+		DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
+		return -EINVAL;
+	}
+
+	file_priv = filp->private_data;
+	dev_priv = vmw_priv(file_priv->minor->dev);
+	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+}
+
+static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+	DRM_INFO("global init.\n");
+	return ttm_mem_global_init(ref->object);
+}
+
+static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+	ttm_mem_global_release(ref->object);
+}
+
+int vmw_ttm_global_init(struct vmw_private *dev_priv)
+{
+	struct drm_global_reference *global_ref;
+	int ret;
+
+	global_ref = &dev_priv->mem_global_ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+	global_ref->size = sizeof(struct ttm_mem_global);
+	global_ref->init = &vmw_ttm_mem_global_init;
+	global_ref->release = &vmw_ttm_mem_global_release;
+
+	ret = drm_global_item_ref(global_ref);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM memory accounting.\n");
+		return ret;
+	}
+
+	dev_priv->bo_global_ref.mem_glob =
+		dev_priv->mem_global_ref.object;
+	global_ref = &dev_priv->bo_global_ref.ref;
+	global_ref->global_type = DRM_GLOBAL_TTM_BO;
+	global_ref->size = sizeof(struct ttm_bo_global);
+	global_ref->init = &ttm_bo_global_init;
+	global_ref->release = &ttm_bo_global_release;
+	ret = drm_global_item_ref(global_ref);
+
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed setting up TTM buffer objects.\n");
+		goto out_no_bo;
+	}
+
+	return 0;
+out_no_bo:
+	drm_global_item_unref(&dev_priv->mem_global_ref);
+	return ret;
+}
+
+void vmw_ttm_global_release(struct vmw_private *dev_priv)
+{
+	drm_global_item_unref(&dev_priv->bo_global_ref.ref);
+	drm_global_item_unref(&dev_priv->mem_global_ref);
+}
diff --git a/linux-imx/drivers/gpu/host1x/Kconfig b/linux-imx/drivers/gpu/host1x/Kconfig
new file mode 100644
index 0000000..ccfd42b
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/Kconfig
@@ -0,0 +1,24 @@
+config TEGRA_HOST1X
+	tristate "NVIDIA Tegra host1x driver"
+	depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
+	help
+	  Driver for the NVIDIA Tegra host1x hardware.
+
+	  The Tegra host1x module is the DMA engine for register access to
+	  Tegra's graphics- and multimedia-related modules. The modules served
+	  by host1x are referred to as clients. host1x includes some other
+	  functionality, such as synchronization.
+
+if TEGRA_HOST1X
+
+config TEGRA_HOST1X_FIREWALL
+	bool "Enable HOST1X security firewall"
+	default y
+	help
+	  Say yes if kernel should protect command streams from tampering.
+
+	  If unsure, choose Y.
+
+source "drivers/gpu/host1x/drm/Kconfig"
+
+endif
diff --git a/linux-imx/drivers/gpu/host1x/Makefile b/linux-imx/drivers/gpu/host1x/Makefile
new file mode 100644
index 0000000..3b037b6
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/Makefile
@@ -0,0 +1,20 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-y = \
+	syncpt.o \
+	dev.o \
+	intr.o \
+	cdma.o \
+	channel.o \
+	job.o \
+	debug.o \
+	hw/host1x01.o
+
+ccflags-y += -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
+host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
+obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/linux-imx/drivers/gpu/host1x/cdma.c b/linux-imx/drivers/gpu/host1x/cdma.c
new file mode 100644
index 0000000..de72172
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/cdma.c
@@ -0,0 +1,491 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <asm/cacheflush.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+#include "host1x_bo.h"
+#include "job.h"
+
+/*
+ * push_buffer
+ *
+ * The push buffer is a circular array of words to be fetched by command DMA.
+ * Note that it works slightly differently to the sync queue; fence == pos
+ * means that the push buffer is full, not empty.
+ */
+
+#define HOST1X_PUSHBUFFER_SLOTS	512
+
+/*
+ * Clean up push buffer resources
+ */
+static void host1x_pushbuffer_destroy(struct push_buffer *pb)
+{
+	struct host1x_cdma *cdma = pb_to_cdma(pb);
+	struct host1x *host1x = cdma_to_host1x(cdma);
+
+	if (pb->phys != 0)
+		dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
+				      pb->mapped, pb->phys);
+
+	pb->mapped = NULL;
+	pb->phys = 0;
+}
+
+/*
+ * Init push buffer resources
+ */
+static int host1x_pushbuffer_init(struct push_buffer *pb)
+{
+	struct host1x_cdma *cdma = pb_to_cdma(pb);
+	struct host1x *host1x = cdma_to_host1x(cdma);
+
+	pb->mapped = NULL;
+	pb->phys = 0;
+	pb->size_bytes = HOST1X_PUSHBUFFER_SLOTS * 8;
+
+	/* initialize buffer pointers */
+	pb->fence = pb->size_bytes - 8;
+	pb->pos = 0;
+
+	/* allocate and map pushbuffer memory */
+	pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
+					    &pb->phys, GFP_KERNEL);
+	if (!pb->mapped)
+		goto fail;
+
+	host1x_hw_pushbuffer_init(host1x, pb);
+
+	return 0;
+
+fail:
+	host1x_pushbuffer_destroy(pb);
+	return -ENOMEM;
+}
+
+/*
+ * Push two words to the push buffer
+ * Caller must ensure push buffer is not full
+ */
+static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
+{
+	u32 pos = pb->pos;
+	u32 *p = (u32 *)((u32)pb->mapped + pos);
+	WARN_ON(pos == pb->fence);
+	*(p++) = op1;
+	*(p++) = op2;
+	pb->pos = (pos + 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Pop a number of two word slots from the push buffer
+ * Caller must ensure push buffer is not empty
+ */
+static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots)
+{
+	/* Advance the next write position */
+	pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1);
+}
+
+/*
+ * Return the number of two word slots free in the push buffer
+ */
+static u32 host1x_pushbuffer_space(struct push_buffer *pb)
+{
+	return ((pb->fence - pb->pos) & (pb->size_bytes - 1)) / 8;
+}
+
+/*
+ * Sleep (if necessary) until the requested event happens
+ *   - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
+ *     - Returns 1
+ *   - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
+ *     - Return the amount of space (> 0)
+ * Must be called with the cdma lock held.
+ */
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+				     enum cdma_event event)
+{
+	for (;;) {
+		unsigned int space;
+
+		if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+			space = list_empty(&cdma->sync_queue) ? 1 : 0;
+		else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
+			struct push_buffer *pb = &cdma->push_buffer;
+			space = host1x_pushbuffer_space(pb);
+		} else {
+			WARN_ON(1);
+			return -EINVAL;
+		}
+
+		if (space)
+			return space;
+
+		trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma)->dev),
+				       event);
+
+		/* If somebody has managed to already start waiting, yield */
+		if (cdma->event != CDMA_EVENT_NONE) {
+			mutex_unlock(&cdma->lock);
+			schedule();
+			mutex_lock(&cdma->lock);
+			continue;
+		}
+		cdma->event = event;
+
+		mutex_unlock(&cdma->lock);
+		down(&cdma->sem);
+		mutex_lock(&cdma->lock);
+	}
+	return 0;
+}
+
+/*
+ * Start timer that tracks the time spent by the job.
+ * Must be called with the cdma lock held.
+ */
+static void cdma_start_timer_locked(struct host1x_cdma *cdma,
+				    struct host1x_job *job)
+{
+	struct host1x *host = cdma_to_host1x(cdma);
+
+	if (cdma->timeout.client) {
+		/* timer already started */
+		return;
+	}
+
+	cdma->timeout.client = job->client;
+	cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+	cdma->timeout.syncpt_val = job->syncpt_end;
+	cdma->timeout.start_ktime = ktime_get();
+
+	schedule_delayed_work(&cdma->timeout.wq,
+			      msecs_to_jiffies(job->timeout));
+}
+
+/*
+ * Stop timer when a buffer submission completes.
+ * Must be called with the cdma lock held.
+ */
+static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
+{
+	cancel_delayed_work(&cdma->timeout.wq);
+	cdma->timeout.client = 0;
+}
+
+/*
+ * For all sync queue entries that have already finished according to the
+ * current sync point registers:
+ *  - unpin & unref their mems
+ *  - pop their push buffer slots
+ *  - remove them from the sync queue
+ * This is normally called from the host code's worker thread, but can be
+ * called manually if necessary.
+ * Must be called with the cdma lock held.
+ */
+static void update_cdma_locked(struct host1x_cdma *cdma)
+{
+	bool signal = false;
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	struct host1x_job *job, *n;
+
+	/* If CDMA is stopped, queue is cleared and we can return */
+	if (!cdma->running)
+		return;
+
+	/*
+	 * Walk the sync queue, reading the sync point registers as necessary,
+	 * to consume as many sync queue entries as possible without blocking
+	 */
+	list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
+		struct host1x_syncpt *sp =
+			host1x_syncpt_get(host1x, job->syncpt_id);
+
+		/* Check whether this syncpt has completed, and bail if not */
+		if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
+			/* Start timer on next pending syncpt */
+			if (job->timeout)
+				cdma_start_timer_locked(cdma, job);
+			break;
+		}
+
+		/* Cancel timeout, when a buffer completes */
+		if (cdma->timeout.client)
+			stop_cdma_timer_locked(cdma);
+
+		/* Unpin the memory */
+		host1x_job_unpin(job);
+
+		/* Pop push buffer slots */
+		if (job->num_slots) {
+			struct push_buffer *pb = &cdma->push_buffer;
+			host1x_pushbuffer_pop(pb, job->num_slots);
+			if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
+				signal = true;
+		}
+
+		list_del(&job->list);
+		host1x_job_put(job);
+	}
+
+	if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY &&
+	    list_empty(&cdma->sync_queue))
+		signal = true;
+
+	if (signal) {
+		cdma->event = CDMA_EVENT_NONE;
+		up(&cdma->sem);
+	}
+}
+
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+				   struct device *dev)
+{
+	u32 restart_addr;
+	u32 syncpt_incrs;
+	struct host1x_job *job = NULL;
+	u32 syncpt_val;
+	struct host1x *host1x = cdma_to_host1x(cdma);
+
+	syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+	dev_dbg(dev, "%s: starting cleanup (thresh %d)\n",
+		__func__, syncpt_val);
+
+	/*
+	 * Move the sync_queue read pointer to the first entry that hasn't
+	 * completed based on the current HW syncpt value. It's likely there
+	 * won't be any (i.e. we're still at the head), but covers the case
+	 * where a syncpt incr happens just prior/during the teardown.
+	 */
+
+	dev_dbg(dev, "%s: skip completed buffers still in sync_queue\n",
+		__func__);
+
+	list_for_each_entry(job, &cdma->sync_queue, list) {
+		if (syncpt_val < job->syncpt_end)
+			break;
+
+		host1x_job_dump(dev, job);
+	}
+
+	/*
+	 * Walk the sync_queue, first incrementing with the CPU syncpts that
+	 * are partially executed (the first buffer) or fully skipped while
+	 * still in the current context (slots are also NOP-ed).
+	 *
+	 * At the point contexts are interleaved, syncpt increments must be
+	 * done inline with the pushbuffer from a GATHER buffer to maintain
+	 * the order (slots are modified to be a GATHER of syncpt incrs).
+	 *
+	 * Note: save in restart_addr the location where the timed out buffer
+	 * started in the PB, so we can start the refetch from there (with the
+	 * modified NOP-ed PB slots). This lets things appear to have completed
+	 * properly for this buffer and resources are freed.
+	 */
+
+	dev_dbg(dev, "%s: perform CPU incr on pending same ctx buffers\n",
+		__func__);
+
+	if (!list_empty(&cdma->sync_queue))
+		restart_addr = job->first_get;
+	else
+		restart_addr = cdma->last_pos;
+
+	/* do CPU increments as long as this context continues */
+	list_for_each_entry_from(job, &cdma->sync_queue, list) {
+		/* different context, gets us out of this loop */
+		if (job->client != cdma->timeout.client)
+			break;
+
+		/* won't need a timeout when replayed */
+		job->timeout = 0;
+
+		syncpt_incrs = job->syncpt_end - syncpt_val;
+		dev_dbg(dev, "%s: CPU incr (%d)\n", __func__, syncpt_incrs);
+
+		host1x_job_dump(dev, job);
+
+		/* safe to use CPU to incr syncpts */
+		host1x_hw_cdma_timeout_cpu_incr(host1x, cdma, job->first_get,
+						syncpt_incrs, job->syncpt_end,
+						job->num_slots);
+
+		syncpt_val += syncpt_incrs;
+	}
+
+	/* The following sumbits from the same client may be dependent on the
+	 * failed submit and therefore they may fail. Force a small timeout
+	 * to make the queue cleanup faster */
+
+	list_for_each_entry_from(job, &cdma->sync_queue, list)
+		if (job->client == cdma->timeout.client)
+			job->timeout = min_t(unsigned int, job->timeout, 500);
+
+	dev_dbg(dev, "%s: finished sync_queue modification\n", __func__);
+
+	/* roll back DMAGET and start up channel again */
+	host1x_hw_cdma_resume(host1x, cdma, restart_addr);
+}
+
+/*
+ * Create a cdma
+ */
+int host1x_cdma_init(struct host1x_cdma *cdma)
+{
+	int err;
+
+	mutex_init(&cdma->lock);
+	sema_init(&cdma->sem, 0);
+
+	INIT_LIST_HEAD(&cdma->sync_queue);
+
+	cdma->event = CDMA_EVENT_NONE;
+	cdma->running = false;
+	cdma->torndown = false;
+
+	err = host1x_pushbuffer_init(&cdma->push_buffer);
+	if (err)
+		return err;
+	return 0;
+}
+
+/*
+ * Destroy a cdma
+ */
+int host1x_cdma_deinit(struct host1x_cdma *cdma)
+{
+	struct push_buffer *pb = &cdma->push_buffer;
+	struct host1x *host1x = cdma_to_host1x(cdma);
+
+	if (cdma->running) {
+		pr_warn("%s: CDMA still running\n", __func__);
+		return -EBUSY;
+	}
+
+	host1x_pushbuffer_destroy(pb);
+	host1x_hw_cdma_timeout_destroy(host1x, cdma);
+
+	return 0;
+}
+
+/*
+ * Begin a cdma submit
+ */
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+
+	mutex_lock(&cdma->lock);
+
+	if (job->timeout) {
+		/* init state on first submit with timeout value */
+		if (!cdma->timeout.initialized) {
+			int err;
+			err = host1x_hw_cdma_timeout_init(host1x, cdma,
+							  job->syncpt_id);
+			if (err) {
+				mutex_unlock(&cdma->lock);
+				return err;
+			}
+		}
+	}
+	if (!cdma->running)
+		host1x_hw_cdma_start(host1x, cdma);
+
+	cdma->slots_free = 0;
+	cdma->slots_used = 0;
+	cdma->first_get = cdma->push_buffer.pos;
+
+	trace_host1x_cdma_begin(dev_name(job->channel->dev));
+	return 0;
+}
+
+/*
+ * Push two words into a push buffer slot
+ * Blocks as necessary if the push buffer is full.
+ */
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	struct push_buffer *pb = &cdma->push_buffer;
+	u32 slots_free = cdma->slots_free;
+
+	if (host1x_debug_trace_cmdbuf)
+		trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma)->dev),
+				       op1, op2);
+
+	if (slots_free == 0) {
+		host1x_hw_cdma_flush(host1x, cdma);
+		slots_free = host1x_cdma_wait_locked(cdma,
+						CDMA_EVENT_PUSH_BUFFER_SPACE);
+	}
+	cdma->slots_free = slots_free - 1;
+	cdma->slots_used++;
+	host1x_pushbuffer_push(pb, op1, op2);
+}
+
+/*
+ * End a cdma submit
+ * Kick off DMA, add job to the sync queue, and a number of slots to be freed
+ * from the pushbuffer. The handles for a submit must all be pinned at the same
+ * time, but they can be unpinned in smaller chunks.
+ */
+void host1x_cdma_end(struct host1x_cdma *cdma,
+		     struct host1x_job *job)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	bool idle = list_empty(&cdma->sync_queue);
+
+	host1x_hw_cdma_flush(host1x, cdma);
+
+	job->first_get = cdma->first_get;
+	job->num_slots = cdma->slots_used;
+	host1x_job_get(job);
+	list_add_tail(&job->list, &cdma->sync_queue);
+
+	/* start timer on idle -> active transitions */
+	if (job->timeout && idle)
+		cdma_start_timer_locked(cdma, job);
+
+	trace_host1x_cdma_end(dev_name(job->channel->dev));
+	mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Update cdma state according to current sync point values
+ */
+void host1x_cdma_update(struct host1x_cdma *cdma)
+{
+	mutex_lock(&cdma->lock);
+	update_cdma_locked(cdma);
+	mutex_unlock(&cdma->lock);
+}
diff --git a/linux-imx/drivers/gpu/host1x/cdma.h b/linux-imx/drivers/gpu/host1x/cdma.h
new file mode 100644
index 0000000..313c4b7
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/cdma.h
@@ -0,0 +1,100 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CDMA_H
+#define __HOST1X_CDMA_H
+
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+
+struct host1x_syncpt;
+struct host1x_userctx_timeout;
+struct host1x_job;
+
+/*
+ * cdma
+ *
+ * This is in charge of a host command DMA channel.
+ * Sends ops to a push buffer, and takes responsibility for unpinning
+ * (& possibly freeing) of memory after those ops have completed.
+ * Producer:
+ *	begin
+ *		push - send ops to the push buffer
+ *	end - start command DMA and enqueue handles to be unpinned
+ * Consumer:
+ *	update - call to update sync queue and push buffer, unpin memory
+ */
+
+struct push_buffer {
+	u32 *mapped;			/* mapped pushbuffer memory */
+	dma_addr_t phys;		/* physical address of pushbuffer */
+	u32 fence;			/* index we've written */
+	u32 pos;			/* index to write to */
+	u32 size_bytes;
+};
+
+struct buffer_timeout {
+	struct delayed_work wq;		/* work queue */
+	bool initialized;		/* timer one-time setup flag */
+	struct host1x_syncpt *syncpt;	/* buffer completion syncpt */
+	u32 syncpt_val;			/* syncpt value when completed */
+	ktime_t start_ktime;		/* starting time */
+	/* context timeout information */
+	int client;
+};
+
+enum cdma_event {
+	CDMA_EVENT_NONE,		/* not waiting for any event */
+	CDMA_EVENT_SYNC_QUEUE_EMPTY,	/* wait for empty sync queue */
+	CDMA_EVENT_PUSH_BUFFER_SPACE	/* wait for space in push buffer */
+};
+
+struct host1x_cdma {
+	struct mutex lock;		/* controls access to shared state */
+	struct semaphore sem;		/* signalled when event occurs */
+	enum cdma_event event;		/* event that sem is waiting for */
+	unsigned int slots_used;	/* pb slots used in current submit */
+	unsigned int slots_free;	/* pb slots free in current submit */
+	unsigned int first_get;		/* DMAGET value, where submit begins */
+	unsigned int last_pos;		/* last value written to DMAPUT */
+	struct push_buffer push_buffer;	/* channel's push buffer */
+	struct list_head sync_queue;	/* job queue */
+	struct buffer_timeout timeout;	/* channel's timeout state/wq */
+	bool running;
+	bool torndown;
+};
+
+#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
+#define cdma_to_host1x(cdma) dev_get_drvdata(cdma_to_channel(cdma)->dev->parent)
+#define pb_to_cdma(pb) container_of(pb, struct host1x_cdma, push_buffer)
+
+int host1x_cdma_init(struct host1x_cdma *cdma);
+int host1x_cdma_deinit(struct host1x_cdma *cdma);
+void host1x_cdma_stop(struct host1x_cdma *cdma);
+int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2);
+void host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job);
+void host1x_cdma_update(struct host1x_cdma *cdma);
+void host1x_cdma_peek(struct host1x_cdma *cdma, u32 dmaget, int slot,
+		      u32 *out);
+unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
+				     enum cdma_event event);
+void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
+				   struct device *dev);
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/channel.c b/linux-imx/drivers/gpu/host1x/channel.c
new file mode 100644
index 0000000..83ea51b
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/channel.c
@@ -0,0 +1,126 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "job.h"
+
+/* Constructor for the host1x device list */
+int host1x_channel_list_init(struct host1x *host)
+{
+	INIT_LIST_HEAD(&host->chlist.list);
+	mutex_init(&host->chlist_mutex);
+
+	if (host->info->nb_channels > BITS_PER_LONG) {
+		WARN(1, "host1x hardware has more channels than supported by the driver\n");
+		return -ENOSYS;
+	}
+
+	return 0;
+}
+
+int host1x_job_submit(struct host1x_job *job)
+{
+	struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
+
+	return host1x_hw_channel_submit(host, job);
+}
+
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel)
+{
+	int err = 0;
+
+	mutex_lock(&channel->reflock);
+
+	if (channel->refcount == 0)
+		err = host1x_cdma_init(&channel->cdma);
+
+	if (!err)
+		channel->refcount++;
+
+	mutex_unlock(&channel->reflock);
+
+	return err ? NULL : channel;
+}
+
+void host1x_channel_put(struct host1x_channel *channel)
+{
+	mutex_lock(&channel->reflock);
+
+	if (channel->refcount == 1) {
+		struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+		host1x_hw_cdma_stop(host, &channel->cdma);
+		host1x_cdma_deinit(&channel->cdma);
+	}
+
+	channel->refcount--;
+
+	mutex_unlock(&channel->reflock);
+}
+
+struct host1x_channel *host1x_channel_request(struct device *dev)
+{
+	struct host1x *host = dev_get_drvdata(dev->parent);
+	int max_channels = host->info->nb_channels;
+	struct host1x_channel *channel = NULL;
+	int index, err;
+
+	mutex_lock(&host->chlist_mutex);
+
+	index = find_first_zero_bit(&host->allocated_channels, max_channels);
+	if (index >= max_channels)
+		goto fail;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		goto fail;
+
+	err = host1x_hw_channel_init(host, channel, index);
+	if (err < 0)
+		goto fail;
+
+	/* Link device to host1x_channel */
+	channel->dev = dev;
+
+	/* Add to channel list */
+	list_add_tail(&channel->list, &host->chlist.list);
+
+	host->allocated_channels |= BIT(index);
+
+	mutex_unlock(&host->chlist_mutex);
+	return channel;
+
+fail:
+	dev_err(dev, "failed to init channel\n");
+	kfree(channel);
+	mutex_unlock(&host->chlist_mutex);
+	return NULL;
+}
+
+void host1x_channel_free(struct host1x_channel *channel)
+{
+	struct host1x *host = dev_get_drvdata(channel->dev->parent);
+
+	host->allocated_channels &= ~BIT(channel->id);
+	list_del(&channel->list);
+	kfree(channel);
+}
diff --git a/linux-imx/drivers/gpu/host1x/channel.h b/linux-imx/drivers/gpu/host1x/channel.h
new file mode 100644
index 0000000..48723b8
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/channel.h
@@ -0,0 +1,52 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_CHANNEL_H
+#define __HOST1X_CHANNEL_H
+
+#include <linux/io.h>
+
+#include "cdma.h"
+
+struct host1x;
+
+struct host1x_channel {
+	struct list_head list;
+
+	unsigned int refcount;
+	unsigned int id;
+	struct mutex reflock;
+	struct mutex submitlock;
+	void __iomem *regs;
+	struct device *dev;
+	struct host1x_cdma cdma;
+};
+
+/* channel list operations */
+int host1x_channel_list_init(struct host1x *host);
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+#define host1x_for_each_channel(host, channel)				\
+	list_for_each_entry(channel, &host->chlist.list, list)
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/debug.c b/linux-imx/drivers/gpu/host1x/debug.c
new file mode 100644
index 0000000..3ec7d77
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/debug.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "channel.h"
+
+unsigned int host1x_debug_trace_cmdbuf;
+
+static pid_t host1x_debug_force_timeout_pid;
+static u32 host1x_debug_force_timeout_val;
+static u32 host1x_debug_force_timeout_channel;
+
+void host1x_debug_output(struct output *o, const char *fmt, ...)
+{
+	va_list args;
+	int len;
+
+	va_start(args, fmt);
+	len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+	va_end(args);
+	o->fn(o->ctx, o->buf, len);
+}
+
+static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
+{
+	struct host1x *m = dev_get_drvdata(ch->dev->parent);
+	struct output *o = data;
+
+	mutex_lock(&ch->reflock);
+	if (ch->refcount) {
+		mutex_lock(&ch->cdma.lock);
+		if (show_fifo)
+			host1x_hw_show_channel_fifo(m, ch, o);
+		host1x_hw_show_channel_cdma(m, ch, o);
+		mutex_unlock(&ch->cdma.lock);
+	}
+	mutex_unlock(&ch->reflock);
+
+	return 0;
+}
+
+static void show_syncpts(struct host1x *m, struct output *o)
+{
+	int i;
+	host1x_debug_output(o, "---- syncpts ----\n");
+	for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
+		u32 max = host1x_syncpt_read_max(m->syncpt + i);
+		u32 min = host1x_syncpt_load(m->syncpt + i);
+		if (!min && !max)
+			continue;
+		host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+				    i, m->syncpt[i].name, min, max);
+	}
+
+	for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
+		u32 base_val;
+		base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
+		if (base_val)
+			host1x_debug_output(o, "waitbase id %d val %d\n", i,
+					    base_val);
+	}
+
+	host1x_debug_output(o, "\n");
+}
+
+static void show_all(struct host1x *m, struct output *o)
+{
+	struct host1x_channel *ch;
+
+	host1x_hw_show_mlocks(m, o);
+	show_syncpts(m, o);
+	host1x_debug_output(o, "---- channels ----\n");
+
+	host1x_for_each_channel(m, ch)
+		show_channels(ch, o, true);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void show_all_no_fifo(struct host1x *host1x, struct output *o)
+{
+	struct host1x_channel *ch;
+
+	host1x_hw_show_mlocks(host1x, o);
+	show_syncpts(host1x, o);
+	host1x_debug_output(o, "---- channels ----\n");
+
+	host1x_for_each_channel(host1x, ch)
+		show_channels(ch, o, false);
+}
+
+static int host1x_debug_show_all(struct seq_file *s, void *unused)
+{
+	struct output o = {
+		.fn = write_to_seqfile,
+		.ctx = s
+	};
+	show_all(s->private, &o);
+	return 0;
+}
+
+static int host1x_debug_show(struct seq_file *s, void *unused)
+{
+	struct output o = {
+		.fn = write_to_seqfile,
+		.ctx = s
+	};
+	show_all_no_fifo(s->private, &o);
+	return 0;
+}
+
+static int host1x_debug_open_all(struct inode *inode, struct file *file)
+{
+	return single_open(file, host1x_debug_show_all, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_all_fops = {
+	.open		= host1x_debug_open_all,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int host1x_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, host1x_debug_show, inode->i_private);
+}
+
+static const struct file_operations host1x_debug_fops = {
+	.open		= host1x_debug_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+void host1x_debug_init(struct host1x *host1x)
+{
+	struct dentry *de = debugfs_create_dir("tegra-host1x", NULL);
+
+	if (!de)
+		return;
+
+	/* Store the created entry */
+	host1x->debugfs = de;
+
+	debugfs_create_file("status", S_IRUGO, de, host1x, &host1x_debug_fops);
+	debugfs_create_file("status_all", S_IRUGO, de, host1x,
+			    &host1x_debug_all_fops);
+
+	debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR, de,
+			   &host1x_debug_trace_cmdbuf);
+
+	host1x_hw_debug_init(host1x, de);
+
+	debugfs_create_u32("force_timeout_pid", S_IRUGO|S_IWUSR, de,
+			   &host1x_debug_force_timeout_pid);
+	debugfs_create_u32("force_timeout_val", S_IRUGO|S_IWUSR, de,
+			   &host1x_debug_force_timeout_val);
+	debugfs_create_u32("force_timeout_channel", S_IRUGO|S_IWUSR, de,
+			   &host1x_debug_force_timeout_channel);
+}
+
+void host1x_debug_deinit(struct host1x *host1x)
+{
+	debugfs_remove_recursive(host1x->debugfs);
+}
+#else
+void host1x_debug_init(struct host1x *host1x)
+{
+}
+void host1x_debug_deinit(struct host1x *host1x)
+{
+}
+#endif
+
+void host1x_debug_dump(struct host1x *host1x)
+{
+	struct output o = {
+		.fn = write_to_printk
+	};
+	show_all(host1x, &o);
+}
+
+void host1x_debug_dump_syncpts(struct host1x *host1x)
+{
+	struct output o = {
+		.fn = write_to_printk
+	};
+	show_syncpts(host1x, &o);
+}
diff --git a/linux-imx/drivers/gpu/host1x/debug.h b/linux-imx/drivers/gpu/host1x/debug.h
new file mode 100644
index 0000000..4595b2e
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/debug.h
@@ -0,0 +1,51 @@
+/*
+ * Tegra host1x Debug
+ *
+ * Copyright (c) 2011-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __HOST1X_DEBUG_H
+#define __HOST1X_DEBUG_H
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+struct host1x;
+
+struct output {
+	void (*fn)(void *ctx, const char *str, size_t len);
+	void *ctx;
+	char buf[256];
+};
+
+static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
+{
+	seq_write((struct seq_file *)ctx, str, len);
+}
+
+static inline void write_to_printk(void *ctx, const char *str, size_t len)
+{
+	pr_info("%s", str);
+}
+
+void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
+
+extern unsigned int host1x_debug_trace_cmdbuf;
+
+void host1x_debug_init(struct host1x *host1x);
+void host1x_debug_deinit(struct host1x *host1x);
+void host1x_debug_dump(struct host1x *host1x);
+void host1x_debug_dump_syncpts(struct host1x *host1x);
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/dev.c b/linux-imx/drivers/gpu/host1x/dev.c
new file mode 100644
index 0000000..28e28a2
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/dev.c
@@ -0,0 +1,246 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/host1x.h>
+
+#include "dev.h"
+#include "intr.h"
+#include "channel.h"
+#include "debug.h"
+#include "hw/host1x01.h"
+#include "host1x_client.h"
+
+void host1x_set_drm_data(struct device *dev, void *data)
+{
+	struct host1x *host1x = dev_get_drvdata(dev);
+	host1x->drm_data = data;
+}
+
+void *host1x_get_drm_data(struct device *dev)
+{
+	struct host1x *host1x = dev_get_drvdata(dev);
+	return host1x->drm_data;
+}
+
+void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
+{
+	void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+	writel(v, sync_regs + r);
+}
+
+u32 host1x_sync_readl(struct host1x *host1x, u32 r)
+{
+	void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+	return readl(sync_regs + r);
+}
+
+void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
+{
+	writel(v, ch->regs + r);
+}
+
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
+{
+	return readl(ch->regs + r);
+}
+
+static const struct host1x_info host1x01_info = {
+	.nb_channels	= 8,
+	.nb_pts		= 32,
+	.nb_mlocks	= 16,
+	.nb_bases	= 8,
+	.init		= host1x01_init,
+	.sync_offset	= 0x3000,
+};
+
+static struct of_device_id host1x_of_match[] = {
+	{ .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
+	{ .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, host1x_of_match);
+
+static int host1x_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *id;
+	struct host1x *host;
+	struct resource *regs;
+	int syncpt_irq;
+	int err;
+
+	id = of_match_device(host1x_of_match, &pdev->dev);
+	if (!id)
+		return -EINVAL;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "failed to get registers\n");
+		return -ENXIO;
+	}
+
+	syncpt_irq = platform_get_irq(pdev, 0);
+	if (syncpt_irq < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ\n");
+		return -ENXIO;
+	}
+
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->dev = &pdev->dev;
+	host->info = id->data;
+
+	/* set common host1x device data */
+	platform_set_drvdata(pdev, host);
+
+	host->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(host->regs))
+		return PTR_ERR(host->regs);
+
+	if (host->info->init) {
+		err = host->info->init(host);
+		if (err)
+			return err;
+	}
+
+	host->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		err = PTR_ERR(host->clk);
+		return err;
+	}
+
+	err = host1x_channel_list_init(host);
+	if (err) {
+		dev_err(&pdev->dev, "failed to initialize channel list\n");
+		return err;
+	}
+
+	err = clk_prepare_enable(host->clk);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		return err;
+	}
+
+	err = host1x_syncpt_init(host);
+	if (err) {
+		dev_err(&pdev->dev, "failed to initialize syncpts\n");
+		return err;
+	}
+
+	err = host1x_intr_init(host, syncpt_irq);
+	if (err) {
+		dev_err(&pdev->dev, "failed to initialize interrupts\n");
+		goto fail_deinit_syncpt;
+	}
+
+	host1x_debug_init(host);
+
+	host1x_drm_alloc(pdev);
+
+	return 0;
+
+fail_deinit_syncpt:
+	host1x_syncpt_deinit(host);
+	return err;
+}
+
+static int __exit host1x_remove(struct platform_device *pdev)
+{
+	struct host1x *host = platform_get_drvdata(pdev);
+
+	host1x_intr_deinit(host);
+	host1x_syncpt_deinit(host);
+	clk_disable_unprepare(host->clk);
+
+	return 0;
+}
+
+static struct platform_driver tegra_host1x_driver = {
+	.probe = host1x_probe,
+	.remove = __exit_p(host1x_remove),
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "tegra-host1x",
+		.of_match_table = host1x_of_match,
+	},
+};
+
+static int __init tegra_host1x_init(void)
+{
+	int err;
+
+	err = platform_driver_register(&tegra_host1x_driver);
+	if (err < 0)
+		return err;
+
+#ifdef CONFIG_DRM_TEGRA
+	err = platform_driver_register(&tegra_dc_driver);
+	if (err < 0)
+		goto unregister_host1x;
+
+	err = platform_driver_register(&tegra_hdmi_driver);
+	if (err < 0)
+		goto unregister_dc;
+
+	err = platform_driver_register(&tegra_gr2d_driver);
+	if (err < 0)
+		goto unregister_hdmi;
+#endif
+
+	return 0;
+
+#ifdef CONFIG_DRM_TEGRA
+unregister_hdmi:
+	platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dc:
+	platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+	platform_driver_unregister(&tegra_host1x_driver);
+	return err;
+#endif
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+#ifdef CONFIG_DRM_TEGRA
+	platform_driver_unregister(&tegra_gr2d_driver);
+	platform_driver_unregister(&tegra_hdmi_driver);
+	platform_driver_unregister(&tegra_dc_driver);
+#endif
+	platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
+MODULE_DESCRIPTION("Host1x driver for Tegra products");
+MODULE_LICENSE("GPL");
diff --git a/linux-imx/drivers/gpu/host1x/dev.h b/linux-imx/drivers/gpu/host1x/dev.h
new file mode 100644
index 0000000..a1607d6
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/dev.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_DEV_H
+#define HOST1X_DEV_H
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+#include "channel.h"
+#include "syncpt.h"
+#include "intr.h"
+#include "cdma.h"
+#include "job.h"
+
+struct host1x_syncpt;
+struct host1x_channel;
+struct host1x_cdma;
+struct host1x_job;
+struct push_buffer;
+struct output;
+struct dentry;
+
+struct host1x_channel_ops {
+	int (*init)(struct host1x_channel *channel, struct host1x *host,
+		    unsigned int id);
+	int (*submit)(struct host1x_job *job);
+};
+
+struct host1x_cdma_ops {
+	void (*start)(struct host1x_cdma *cdma);
+	void (*stop)(struct host1x_cdma *cdma);
+	void (*flush)(struct  host1x_cdma *cdma);
+	int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+	void (*timeout_destroy)(struct host1x_cdma *cdma);
+	void (*freeze)(struct host1x_cdma *cdma);
+	void (*resume)(struct host1x_cdma *cdma, u32 getptr);
+	void (*timeout_cpu_incr)(struct host1x_cdma *cdma, u32 getptr,
+				 u32 syncpt_incrs, u32 syncval, u32 nr_slots);
+};
+
+struct host1x_pushbuffer_ops {
+	void (*init)(struct push_buffer *pb);
+};
+
+struct host1x_debug_ops {
+	void (*debug_init)(struct dentry *de);
+	void (*show_channel_cdma)(struct host1x *host,
+				  struct host1x_channel *ch,
+				  struct output *o);
+	void (*show_channel_fifo)(struct host1x *host,
+				  struct host1x_channel *ch,
+				  struct output *o);
+	void (*show_mlocks)(struct host1x *host, struct output *output);
+
+};
+
+struct host1x_syncpt_ops {
+	void (*restore)(struct host1x_syncpt *syncpt);
+	void (*restore_wait_base)(struct host1x_syncpt *syncpt);
+	void (*load_wait_base)(struct host1x_syncpt *syncpt);
+	u32 (*load)(struct host1x_syncpt *syncpt);
+	void (*cpu_incr)(struct host1x_syncpt *syncpt);
+	int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
+};
+
+struct host1x_intr_ops {
+	int (*init_host_sync)(struct host1x *host, u32 cpm,
+		void (*syncpt_thresh_work)(struct work_struct *work));
+	void (*set_syncpt_threshold)(
+		struct host1x *host, u32 id, u32 thresh);
+	void (*enable_syncpt_intr)(struct host1x *host, u32 id);
+	void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+	void (*disable_all_syncpt_intrs)(struct host1x *host);
+	int (*free_syncpt_irq)(struct host1x *host);
+};
+
+struct host1x_info {
+	int	nb_channels;		/* host1x: num channels supported */
+	int	nb_pts;			/* host1x: num syncpoints supported */
+	int	nb_bases;		/* host1x: num syncpoints supported */
+	int	nb_mlocks;		/* host1x: number of mlocks */
+	int	(*init)(struct host1x *); /* initialize per SoC ops */
+	int	sync_offset;
+};
+
+struct host1x {
+	const struct host1x_info *info;
+
+	void __iomem *regs;
+	struct host1x_syncpt *syncpt;
+	struct device *dev;
+	struct clk *clk;
+
+	struct mutex intr_mutex;
+	struct workqueue_struct *intr_wq;
+	int intr_syncpt_irq;
+
+	const struct host1x_syncpt_ops *syncpt_op;
+	const struct host1x_intr_ops *intr_op;
+	const struct host1x_channel_ops *channel_op;
+	const struct host1x_cdma_ops *cdma_op;
+	const struct host1x_pushbuffer_ops *cdma_pb_op;
+	const struct host1x_debug_ops *debug_op;
+
+	struct host1x_syncpt *nop_sp;
+
+	struct mutex chlist_mutex;
+	struct host1x_channel chlist;
+	unsigned long allocated_channels;
+	unsigned int num_allocated_channels;
+
+	struct dentry *debugfs;
+
+	void *drm_data;
+};
+
+void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
+u32 host1x_sync_readl(struct host1x *host1x, u32 r);
+void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
+u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
+
+static inline void host1x_hw_syncpt_restore(struct host1x *host,
+					    struct host1x_syncpt *sp)
+{
+	host->syncpt_op->restore(sp);
+}
+
+static inline void host1x_hw_syncpt_restore_wait_base(struct host1x *host,
+						      struct host1x_syncpt *sp)
+{
+	host->syncpt_op->restore_wait_base(sp);
+}
+
+static inline void host1x_hw_syncpt_load_wait_base(struct host1x *host,
+						   struct host1x_syncpt *sp)
+{
+	host->syncpt_op->load_wait_base(sp);
+}
+
+static inline u32 host1x_hw_syncpt_load(struct host1x *host,
+					struct host1x_syncpt *sp)
+{
+	return host->syncpt_op->load(sp);
+}
+
+static inline void host1x_hw_syncpt_cpu_incr(struct host1x *host,
+					     struct host1x_syncpt *sp)
+{
+	host->syncpt_op->cpu_incr(sp);
+}
+
+static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
+					      struct host1x_syncpt *sp,
+					      void *patch_addr)
+{
+	return host->syncpt_op->patch_wait(sp, patch_addr);
+}
+
+static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
+			void (*syncpt_thresh_work)(struct work_struct *))
+{
+	return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
+}
+
+static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
+						       u32 id, u32 thresh)
+{
+	host->intr_op->set_syncpt_threshold(host, id, thresh);
+}
+
+static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
+						     u32 id)
+{
+	host->intr_op->enable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
+						      u32 id)
+{
+	host->intr_op->disable_syncpt_intr(host, id);
+}
+
+static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+	host->intr_op->disable_all_syncpt_intrs(host);
+}
+
+static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
+{
+	return host->intr_op->free_syncpt_irq(host);
+}
+
+static inline int host1x_hw_channel_init(struct host1x *host,
+					 struct host1x_channel *channel,
+					 int chid)
+{
+	return host->channel_op->init(channel, host, chid);
+}
+
+static inline int host1x_hw_channel_submit(struct host1x *host,
+					   struct host1x_job *job)
+{
+	return host->channel_op->submit(job);
+}
+
+static inline void host1x_hw_cdma_start(struct host1x *host,
+					struct host1x_cdma *cdma)
+{
+	host->cdma_op->start(cdma);
+}
+
+static inline void host1x_hw_cdma_stop(struct host1x *host,
+				       struct host1x_cdma *cdma)
+{
+	host->cdma_op->stop(cdma);
+}
+
+static inline void host1x_hw_cdma_flush(struct host1x *host,
+					struct host1x_cdma *cdma)
+{
+	host->cdma_op->flush(cdma);
+}
+
+static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
+					      struct host1x_cdma *cdma,
+					      u32 syncpt_id)
+{
+	return host->cdma_op->timeout_init(cdma, syncpt_id);
+}
+
+static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
+						  struct host1x_cdma *cdma)
+{
+	host->cdma_op->timeout_destroy(cdma);
+}
+
+static inline void host1x_hw_cdma_freeze(struct host1x *host,
+					 struct host1x_cdma *cdma)
+{
+	host->cdma_op->freeze(cdma);
+}
+
+static inline void host1x_hw_cdma_resume(struct host1x *host,
+					 struct host1x_cdma *cdma, u32 getptr)
+{
+	host->cdma_op->resume(cdma, getptr);
+}
+
+static inline void host1x_hw_cdma_timeout_cpu_incr(struct host1x *host,
+						   struct host1x_cdma *cdma,
+						   u32 getptr,
+						   u32 syncpt_incrs,
+						   u32 syncval, u32 nr_slots)
+{
+	host->cdma_op->timeout_cpu_incr(cdma, getptr, syncpt_incrs, syncval,
+					nr_slots);
+}
+
+static inline void host1x_hw_pushbuffer_init(struct host1x *host,
+					     struct push_buffer *pb)
+{
+	host->cdma_pb_op->init(pb);
+}
+
+static inline void host1x_hw_debug_init(struct host1x *host, struct dentry *de)
+{
+	if (host->debug_op && host->debug_op->debug_init)
+		host->debug_op->debug_init(de);
+}
+
+static inline void host1x_hw_show_channel_cdma(struct host1x *host,
+					       struct host1x_channel *channel,
+					       struct output *o)
+{
+	host->debug_op->show_channel_cdma(host, channel, o);
+}
+
+static inline void host1x_hw_show_channel_fifo(struct host1x *host,
+					       struct host1x_channel *channel,
+					       struct output *o)
+{
+	host->debug_op->show_channel_fifo(host, channel, o);
+}
+
+static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
+{
+	host->debug_op->show_mlocks(host, o);
+}
+
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_gr2d_driver;
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/drm/Kconfig b/linux-imx/drivers/gpu/host1x/drm/Kconfig
new file mode 100644
index 0000000..69853a4
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/Kconfig
@@ -0,0 +1,29 @@
+config DRM_TEGRA
+	bool "NVIDIA Tegra DRM"
+	depends on DRM
+	select DRM_KMS_HELPER
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	help
+	  Choose this option if you have an NVIDIA Tegra SoC.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_STAGING
+	bool "Enable HOST1X interface"
+	depends on STAGING
+	help
+	  Say yes if HOST1X should be available for userspace DRM users.
+
+	  If unsure, choose N.
+
+config DRM_TEGRA_DEBUG
+	bool "NVIDIA Tegra DRM debug support"
+	help
+	  Say yes here to enable debugging support.
+
+endif
diff --git a/linux-imx/drivers/gpu/host1x/drm/dc.c b/linux-imx/drivers/gpu/host1x/drm/dc.c
new file mode 100644
index 0000000..8c04943
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/dc.c
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include "host1x_client.h"
+#include "dc.h"
+#include "drm.h"
+#include "gem.h"
+
+struct tegra_plane {
+	struct drm_plane base;
+	unsigned int index;
+};
+
+static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
+{
+	return container_of(plane, struct tegra_plane, base);
+}
+
+static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+			      struct drm_framebuffer *fb, int crtc_x,
+			      int crtc_y, unsigned int crtc_w,
+			      unsigned int crtc_h, uint32_t src_x,
+			      uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+	struct tegra_plane *p = to_tegra_plane(plane);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct tegra_dc_window window;
+	unsigned int i;
+
+	memset(&window, 0, sizeof(window));
+	window.src.x = src_x >> 16;
+	window.src.y = src_y >> 16;
+	window.src.w = src_w >> 16;
+	window.src.h = src_h >> 16;
+	window.dst.x = crtc_x;
+	window.dst.y = crtc_y;
+	window.dst.w = crtc_w;
+	window.dst.h = crtc_h;
+	window.format = tegra_dc_format(fb->pixel_format);
+	window.bits_per_pixel = fb->bits_per_pixel;
+
+	for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+		struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
+
+		window.base[i] = bo->paddr + fb->offsets[i];
+
+		/*
+		 * Tegra doesn't support different strides for U and V planes
+		 * so we display a warning if the user tries to display a
+		 * framebuffer with such a configuration.
+		 */
+		if (i >= 2) {
+			if (fb->pitches[i] != window.stride[1])
+				DRM_ERROR("unsupported UV-plane configuration\n");
+		} else {
+			window.stride[i] = fb->pitches[i];
+		}
+	}
+
+	return tegra_dc_setup_window(dc, p->index, &window);
+}
+
+static int tegra_plane_disable(struct drm_plane *plane)
+{
+	struct tegra_dc *dc = to_tegra_dc(plane->crtc);
+	struct tegra_plane *p = to_tegra_plane(plane);
+	unsigned long value;
+
+	value = WINDOW_A_SELECT << p->index;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+	value &= ~WIN_ENABLE;
+	tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+	tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
+
+	return 0;
+}
+
+static void tegra_plane_destroy(struct drm_plane *plane)
+{
+	tegra_plane_disable(plane);
+	drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs tegra_plane_funcs = {
+	.update_plane = tegra_plane_update,
+	.disable_plane = tegra_plane_disable,
+	.destroy = tegra_plane_destroy,
+};
+
+static const uint32_t plane_formats[] = {
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_RGB565,
+	DRM_FORMAT_UYVY,
+	DRM_FORMAT_YUV420,
+	DRM_FORMAT_YUV422,
+};
+
+static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
+{
+	unsigned int i;
+	int err = 0;
+
+	for (i = 0; i < 2; i++) {
+		struct tegra_plane *plane;
+
+		plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+		if (!plane)
+			return -ENOMEM;
+
+		plane->index = 1 + i;
+
+		err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
+				     &tegra_plane_funcs, plane_formats,
+				     ARRAY_SIZE(plane_formats), false);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
+			     struct drm_framebuffer *fb)
+{
+	struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+	unsigned long value;
+
+	tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	value = fb->offsets[0] + y * fb->pitches[0] +
+		x * fb->bits_per_pixel / 8;
+
+	tegra_dc_writel(dc, bo->paddr + value, DC_WINBUF_START_ADDR);
+	tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
+
+	value = GENERAL_UPDATE | WIN_A_UPDATE;
+	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+	value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+	return 0;
+}
+
+void tegra_dc_enable_vblank(struct tegra_dc *dc)
+{
+	unsigned long value, flags;
+
+	spin_lock_irqsave(&dc->lock, flags);
+
+	value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	value |= VBLANK_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+	spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+void tegra_dc_disable_vblank(struct tegra_dc *dc)
+{
+	unsigned long value, flags;
+
+	spin_lock_irqsave(&dc->lock, flags);
+
+	value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+	value &= ~VBLANK_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+	spin_unlock_irqrestore(&dc->lock, flags);
+}
+
+static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
+{
+	struct drm_device *drm = dc->base.dev;
+	struct drm_crtc *crtc = &dc->base;
+	unsigned long flags, base;
+	struct tegra_bo *bo;
+
+	if (!dc->event)
+		return;
+
+	bo = tegra_fb_get_plane(crtc->fb, 0);
+
+	/* check if new start address has been latched */
+	tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
+	base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
+	tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+
+	if (base == bo->paddr + crtc->fb->offsets[0]) {
+		spin_lock_irqsave(&drm->event_lock, flags);
+		drm_send_vblank_event(drm, dc->pipe, dc->event);
+		drm_vblank_put(drm, dc->pipe);
+		dc->event = NULL;
+		spin_unlock_irqrestore(&drm->event_lock, flags);
+	}
+}
+
+void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct drm_device *drm = crtc->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drm->event_lock, flags);
+
+	if (dc->event && dc->event->base.file_priv == file) {
+		dc->event->base.destroy(&dc->event->base);
+		drm_vblank_put(drm, dc->pipe);
+		dc->event = NULL;
+	}
+
+	spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			      struct drm_pending_vblank_event *event)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct drm_device *drm = crtc->dev;
+
+	if (dc->event)
+		return -EBUSY;
+
+	if (event) {
+		event->pipe = dc->pipe;
+		dc->event = event;
+		drm_vblank_get(drm, dc->pipe);
+	}
+
+	tegra_dc_set_base(dc, 0, 0, fb);
+	crtc->fb = fb;
+
+	return 0;
+}
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+	.page_flip = tegra_dc_page_flip,
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_disable(struct drm_crtc *crtc)
+{
+	struct drm_device *drm = crtc->dev;
+	struct drm_plane *plane;
+
+	list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+		if (plane->crtc == crtc) {
+			tegra_plane_disable(plane);
+			plane->crtc = NULL;
+
+			if (plane->fb) {
+				drm_framebuffer_unreference(plane->fb);
+				plane->fb = NULL;
+			}
+		}
+	}
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
+				  unsigned int bpp)
+{
+	fixed20_12 outf = dfixed_init(out);
+	fixed20_12 inf = dfixed_init(in);
+	u32 dda_inc;
+	int max;
+
+	if (v)
+		max = 15;
+	else {
+		switch (bpp) {
+		case 2:
+			max = 8;
+			break;
+
+		default:
+			WARN_ON_ONCE(1);
+			/* fallthrough */
+		case 4:
+			max = 4;
+			break;
+		}
+	}
+
+	outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+	inf.full -= dfixed_const(1);
+
+	dda_inc = dfixed_div(inf, outf);
+	dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+	return dda_inc;
+}
+
+static inline u32 compute_initial_dda(unsigned int in)
+{
+	fixed20_12 inf = dfixed_init(in);
+	return dfixed_frac(inf);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+				struct drm_display_mode *mode)
+{
+	/* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+	unsigned int h_ref_to_sync = 0;
+	unsigned int v_ref_to_sync = 0;
+	unsigned long value;
+
+	tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+	value = (v_ref_to_sync << 16) | h_ref_to_sync;
+	tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+	value = ((mode->vsync_end - mode->vsync_start) << 16) |
+		((mode->hsync_end - mode->hsync_start) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+	value = ((mode->vtotal - mode->vsync_end) << 16) |
+		((mode->htotal - mode->hsync_end) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+	value = ((mode->vsync_start - mode->vdisplay) << 16) |
+		((mode->hsync_start - mode->hdisplay) <<  0);
+	tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+	value = (mode->vdisplay << 16) | mode->hdisplay;
+	tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+	return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+				struct drm_display_mode *mode,
+				unsigned long *div)
+{
+	unsigned long pclk = mode->clock * 1000, rate;
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct tegra_output *output = NULL;
+	struct drm_encoder *encoder;
+	long err;
+
+	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+		if (encoder->crtc == crtc) {
+			output = encoder_to_output(encoder);
+			break;
+		}
+
+	if (!output)
+		return -ENODEV;
+
+	/*
+	 * This assumes that the display controller will divide its parent
+	 * clock by 2 to generate the pixel clock.
+	 */
+	err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+		return err;
+	}
+
+	rate = clk_get_rate(dc->clk);
+	*div = (rate * 2 / pclk) - 2;
+
+	DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+	return 0;
+}
+
+static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
+{
+	switch (format) {
+	case WIN_COLOR_DEPTH_YCbCr422:
+	case WIN_COLOR_DEPTH_YUV422:
+		if (planar)
+			*planar = false;
+
+		return true;
+
+	case WIN_COLOR_DEPTH_YCbCr420P:
+	case WIN_COLOR_DEPTH_YUV420P:
+	case WIN_COLOR_DEPTH_YCbCr422P:
+	case WIN_COLOR_DEPTH_YUV422P:
+	case WIN_COLOR_DEPTH_YCbCr422R:
+	case WIN_COLOR_DEPTH_YUV422R:
+	case WIN_COLOR_DEPTH_YCbCr422RA:
+	case WIN_COLOR_DEPTH_YUV422RA:
+		if (planar)
+			*planar = true;
+
+		return true;
+	}
+
+	return false;
+}
+
+int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+			  const struct tegra_dc_window *window)
+{
+	unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
+	unsigned long value;
+	bool yuv, planar;
+
+	/*
+	 * For YUV planar modes, the number of bytes per pixel takes into
+	 * account only the luma component and therefore is 1.
+	 */
+	yuv = tegra_dc_format_is_yuv(window->format, &planar);
+	if (!yuv)
+		bpp = window->bits_per_pixel / 8;
+	else
+		bpp = planar ? 1 : 2;
+
+	value = WINDOW_A_SELECT << index;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+	tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
+	tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+	value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
+	tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+	value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
+	tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+	h_offset = window->src.x * bpp;
+	v_offset = window->src.y;
+	h_size = window->src.w * bpp;
+	v_size = window->src.h;
+
+	value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
+	tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+	/*
+	 * For DDA computations the number of bytes per pixel for YUV planar
+	 * modes needs to take into account all Y, U and V components.
+	 */
+	if (yuv && planar)
+		bpp = 2;
+
+	h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
+	v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
+
+	value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+	tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+	h_dda = compute_initial_dda(window->src.x);
+	v_dda = compute_initial_dda(window->src.y);
+
+	tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+	tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+	tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+	tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+	tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
+
+	if (yuv && planar) {
+		tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
+		tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
+		value = window->stride[1] << 16 | window->stride[0];
+		tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
+	} else {
+		tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
+	}
+
+	tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+	tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
+	value = WIN_ENABLE;
+
+	if (yuv) {
+		/* setup default colorspace conversion coefficients */
+		tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
+		tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
+		tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
+		tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
+		tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
+		tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
+		tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
+		tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
+
+		value |= CSC_ENABLE;
+	} else if (window->bits_per_pixel < 24) {
+		value |= COLOR_EXPAND;
+	}
+
+	tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+	/*
+	 * Disable blending and assume Window A is the bottom-most window,
+	 * Window C is the top-most window and Window B is in the middle.
+	 */
+	tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
+	tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
+
+	switch (index) {
+	case 0:
+		tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
+		tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+		tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+		break;
+
+	case 1:
+		tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+		tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
+		tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
+		break;
+
+	case 2:
+		tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
+		tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
+		tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
+		break;
+	}
+
+	tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
+
+	return 0;
+}
+
+unsigned int tegra_dc_format(uint32_t format)
+{
+	switch (format) {
+	case DRM_FORMAT_XBGR8888:
+		return WIN_COLOR_DEPTH_R8G8B8A8;
+
+	case DRM_FORMAT_XRGB8888:
+		return WIN_COLOR_DEPTH_B8G8R8A8;
+
+	case DRM_FORMAT_RGB565:
+		return WIN_COLOR_DEPTH_B5G6R5;
+
+	case DRM_FORMAT_UYVY:
+		return WIN_COLOR_DEPTH_YCbCr422;
+
+	case DRM_FORMAT_YUV420:
+		return WIN_COLOR_DEPTH_YCbCr420P;
+
+	case DRM_FORMAT_YUV422:
+		return WIN_COLOR_DEPTH_YCbCr422P;
+
+	default:
+		break;
+	}
+
+	WARN(1, "unsupported pixel format %u, using default\n", format);
+	return WIN_COLOR_DEPTH_B8G8R8A8;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted,
+			       int x, int y, struct drm_framebuffer *old_fb)
+{
+	struct tegra_bo *bo = tegra_fb_get_plane(crtc->fb, 0);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	struct tegra_dc_window window;
+	unsigned long div, value;
+	int err;
+
+	drm_vblank_pre_modeset(crtc->dev, dc->pipe);
+
+	err = tegra_crtc_setup_clk(crtc, mode, &div);
+	if (err) {
+		dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+		return err;
+	}
+
+	/* program display mode */
+	tegra_dc_set_timings(dc, mode);
+
+	value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+	tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+	value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+	value &= ~LVS_OUTPUT_POLARITY_LOW;
+	value &= ~LHS_OUTPUT_POLARITY_LOW;
+	tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+	value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+		DISP_ORDER_RED_BLUE;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+	tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+	value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+	tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+	/* setup window parameters */
+	memset(&window, 0, sizeof(window));
+	window.src.x = 0;
+	window.src.y = 0;
+	window.src.w = mode->hdisplay;
+	window.src.h = mode->vdisplay;
+	window.dst.x = 0;
+	window.dst.y = 0;
+	window.dst.w = mode->hdisplay;
+	window.dst.h = mode->vdisplay;
+	window.format = tegra_dc_format(crtc->fb->pixel_format);
+	window.bits_per_pixel = crtc->fb->bits_per_pixel;
+	window.stride[0] = crtc->fb->pitches[0];
+	window.base[0] = bo->paddr;
+
+	err = tegra_dc_setup_window(dc, 0, &window);
+	if (err < 0)
+		dev_err(dc->dev, "failed to enable root plane\n");
+
+	return 0;
+}
+
+static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+				    struct drm_framebuffer *old_fb)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	return tegra_dc_set_base(dc, x, y, crtc->fb);
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	unsigned int syncpt;
+	unsigned long value;
+
+	/* hardware initialization */
+	tegra_periph_reset_deassert(dc->clk);
+	usleep_range(10000, 20000);
+
+	if (dc->pipe)
+		syncpt = SYNCPT_VBLANK1;
+	else
+		syncpt = SYNCPT_VBLANK0;
+
+	/* initialize display controller */
+	tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+	tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+		WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+	value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+	value |= DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	/* initialize timer */
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+		WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+	value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+		WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+	tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+	value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+	value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+	tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+	unsigned long value;
+
+	value = GENERAL_UPDATE | WIN_A_UPDATE;
+	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+	value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+	tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+
+	drm_vblank_post_modeset(crtc->dev, dc->pipe);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+	.disable = tegra_crtc_disable,
+	.mode_fixup = tegra_crtc_mode_fixup,
+	.mode_set = tegra_crtc_mode_set,
+	.mode_set_base = tegra_crtc_mode_set_base,
+	.prepare = tegra_crtc_prepare,
+	.commit = tegra_crtc_commit,
+	.load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_dc_irq(int irq, void *data)
+{
+	struct tegra_dc *dc = data;
+	unsigned long status;
+
+	status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+	tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+	if (status & FRAME_END_INT) {
+		/*
+		dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+		*/
+	}
+
+	if (status & VBLANK_INT) {
+		/*
+		dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+		*/
+		drm_handle_vblank(dc->base.dev, dc->pipe);
+		tegra_dc_finish_page_flip(dc);
+	}
+
+	if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+		/*
+		dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+		*/
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name)						\
+	seq_printf(s, "%-40s %#05x %08lx\n", #name, name,	\
+		   tegra_dc_readl(dc, name))
+
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+	DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+	DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+	DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE);
+	DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+	DUMP_REG(DC_CMD_INT_STATUS);
+	DUMP_REG(DC_CMD_INT_MASK);
+	DUMP_REG(DC_CMD_INT_ENABLE);
+	DUMP_REG(DC_CMD_INT_TYPE);
+	DUMP_REG(DC_CMD_INT_POLARITY);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+	DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+	DUMP_REG(DC_CMD_STATE_ACCESS);
+	DUMP_REG(DC_CMD_STATE_CONTROL);
+	DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+	DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+	DUMP_REG(DC_COM_CRC_CONTROL);
+	DUMP_REG(DC_COM_CRC_CHECKSUM);
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+	DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+	DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+	DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+	DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+	DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+	DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+	DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+	DUMP_REG(DC_COM_SPI_CONTROL);
+	DUMP_REG(DC_COM_SPI_START_BYTE);
+	DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+	DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+	DUMP_REG(DC_COM_HSPI_CS_DC);
+	DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+	DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+	DUMP_REG(DC_COM_GPIO_CTRL);
+	DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+	DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+	DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+	DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+	DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+	DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+	DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+	DUMP_REG(DC_DISP_REF_TO_SYNC);
+	DUMP_REG(DC_DISP_SYNC_WIDTH);
+	DUMP_REG(DC_DISP_BACK_PORCH);
+	DUMP_REG(DC_DISP_ACTIVE);
+	DUMP_REG(DC_DISP_FRONT_PORCH);
+	DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+	DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+	DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+	DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+	DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+	DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+	DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+	DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+	DUMP_REG(DC_DISP_M0_CONTROL);
+	DUMP_REG(DC_DISP_M1_CONTROL);
+	DUMP_REG(DC_DISP_DI_CONTROL);
+	DUMP_REG(DC_DISP_PP_CONTROL);
+	DUMP_REG(DC_DISP_PP_SELECT_A);
+	DUMP_REG(DC_DISP_PP_SELECT_B);
+	DUMP_REG(DC_DISP_PP_SELECT_C);
+	DUMP_REG(DC_DISP_PP_SELECT_D);
+	DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+	DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+	DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+	DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+	DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+	DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+	DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+	DUMP_REG(DC_DISP_BORDER_COLOR);
+	DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+	DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+	DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+	DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+	DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+	DUMP_REG(DC_DISP_CURSOR_POSITION);
+	DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+	DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+	DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+	DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+	DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+	DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+	DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+	DUMP_REG(DC_DISP_SD_CONTROL);
+	DUMP_REG(DC_DISP_SD_CSC_COEFF);
+	DUMP_REG(DC_DISP_SD_LUT(0));
+	DUMP_REG(DC_DISP_SD_LUT(1));
+	DUMP_REG(DC_DISP_SD_LUT(2));
+	DUMP_REG(DC_DISP_SD_LUT(3));
+	DUMP_REG(DC_DISP_SD_LUT(4));
+	DUMP_REG(DC_DISP_SD_LUT(5));
+	DUMP_REG(DC_DISP_SD_LUT(6));
+	DUMP_REG(DC_DISP_SD_LUT(7));
+	DUMP_REG(DC_DISP_SD_LUT(8));
+	DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+	DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+	DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+	DUMP_REG(DC_DISP_SD_BL_TF(0));
+	DUMP_REG(DC_DISP_SD_BL_TF(1));
+	DUMP_REG(DC_DISP_SD_BL_TF(2));
+	DUMP_REG(DC_DISP_SD_BL_TF(3));
+	DUMP_REG(DC_DISP_SD_BL_CONTROL);
+	DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+	DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+	DUMP_REG(DC_WIN_WIN_OPTIONS);
+	DUMP_REG(DC_WIN_BYTE_SWAP);
+	DUMP_REG(DC_WIN_BUFFER_CONTROL);
+	DUMP_REG(DC_WIN_COLOR_DEPTH);
+	DUMP_REG(DC_WIN_POSITION);
+	DUMP_REG(DC_WIN_SIZE);
+	DUMP_REG(DC_WIN_PRESCALED_SIZE);
+	DUMP_REG(DC_WIN_H_INITIAL_DDA);
+	DUMP_REG(DC_WIN_V_INITIAL_DDA);
+	DUMP_REG(DC_WIN_DDA_INC);
+	DUMP_REG(DC_WIN_LINE_STRIDE);
+	DUMP_REG(DC_WIN_BUF_STRIDE);
+	DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+	DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+	DUMP_REG(DC_WIN_DV_CONTROL);
+	DUMP_REG(DC_WIN_BLEND_NOKEY);
+	DUMP_REG(DC_WIN_BLEND_1WIN);
+	DUMP_REG(DC_WIN_BLEND_2WIN_X);
+	DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+	DUMP_REG(DC_WIN_BLEND_3WIN_XY);
+	DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+	DUMP_REG(DC_WINBUF_START_ADDR);
+	DUMP_REG(DC_WINBUF_START_ADDR_NS);
+	DUMP_REG(DC_WINBUF_START_ADDR_U);
+	DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+	DUMP_REG(DC_WINBUF_START_ADDR_V);
+	DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+	DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+	DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+	DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+	DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+	DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+	DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+	return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+	{ "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+	unsigned int i;
+	char *name;
+	int err;
+
+	name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+	dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+	kfree(name);
+
+	if (!dc->debugfs)
+		return -ENOMEM;
+
+	dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+				    GFP_KERNEL);
+	if (!dc->debugfs_files) {
+		err = -ENOMEM;
+		goto remove;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		dc->debugfs_files[i].data = dc;
+
+	err = drm_debugfs_create_files(dc->debugfs_files,
+				       ARRAY_SIZE(debugfs_files),
+				       dc->debugfs, minor);
+	if (err < 0)
+		goto free;
+
+	dc->minor = minor;
+
+	return 0;
+
+free:
+	kfree(dc->debugfs_files);
+	dc->debugfs_files = NULL;
+remove:
+	debugfs_remove(dc->debugfs);
+	dc->debugfs = NULL;
+
+	return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+	drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+				 dc->minor);
+	dc->minor = NULL;
+
+	kfree(dc->debugfs_files);
+	dc->debugfs_files = NULL;
+
+	debugfs_remove(dc->debugfs);
+	dc->debugfs = NULL;
+
+	return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+			     struct drm_device *drm)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	int err;
+
+	dc->pipe = drm->mode_config.num_crtc;
+
+	drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+	drm_mode_crtc_set_gamma_size(&dc->base, 256);
+	drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+	err = tegra_dc_rgb_init(drm, dc);
+	if (err < 0 && err != -ENODEV) {
+		dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+		return err;
+	}
+
+	err = tegra_dc_add_planes(drm, dc);
+	if (err < 0)
+		return err;
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dc_debugfs_init(dc, drm->primary);
+		if (err < 0)
+			dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+	}
+
+	err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
+			       dev_name(dc->dev), dc);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+			err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+	struct tegra_dc *dc = host1x_client_to_dc(client);
+	int err;
+
+	devm_free_irq(dc->dev, dc->irq, dc);
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_dc_debugfs_exit(dc);
+		if (err < 0)
+			dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+	}
+
+	err = tegra_dc_rgb_exit(dc);
+	if (err) {
+		dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+	.drm_init = tegra_dc_drm_init,
+	.drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	struct resource *regs;
+	struct tegra_dc *dc;
+	int err;
+
+	dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+	if (!dc)
+		return -ENOMEM;
+
+	spin_lock_init(&dc->lock);
+	INIT_LIST_HEAD(&dc->list);
+	dc->dev = &pdev->dev;
+
+	dc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dc->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(dc->clk);
+	}
+
+	err = clk_prepare_enable(dc->clk);
+	if (err < 0)
+		return err;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(dc->regs))
+		return PTR_ERR(dc->regs);
+
+	dc->irq = platform_get_irq(pdev, 0);
+	if (dc->irq < 0) {
+		dev_err(&pdev->dev, "failed to get IRQ\n");
+		return -ENXIO;
+	}
+
+	INIT_LIST_HEAD(&dc->client.list);
+	dc->client.ops = &dc_client_ops;
+	dc->client.dev = &pdev->dev;
+
+	err = tegra_dc_rgb_probe(dc);
+	if (err < 0 && err != -ENODEV) {
+		dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+		return err;
+	}
+
+	err = host1x_register_client(host1x, &dc->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, dc);
+
+	return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	struct tegra_dc *dc = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_unregister_client(host1x, &dc->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_disable_unprepare(dc->clk);
+
+	return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+	{ .compatible = "nvidia,tegra30-dc", },
+	{ .compatible = "nvidia,tegra20-dc", },
+	{ },
+};
+
+struct platform_driver tegra_dc_driver = {
+	.driver = {
+		.name = "tegra-dc",
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_dc_of_match,
+	},
+	.probe = tegra_dc_probe,
+	.remove = tegra_dc_remove,
+};
diff --git a/linux-imx/drivers/gpu/host1x/drm/dc.h b/linux-imx/drivers/gpu/host1x/drm/dc.h
new file mode 100644
index 0000000..79eaec9
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/dc.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT		0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL	0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR	0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT		0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL		0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR		0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT		0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL		0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR		0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT		0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL		0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR		0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC		0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0		0x031
+#define DC_CMD_DISPLAY_COMMAND			0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE			0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL		0x036
+#define PW0_ENABLE (1 <<  0)
+#define PW1_ENABLE (1 <<  2)
+#define PW2_ENABLE (1 <<  4)
+#define PW3_ENABLE (1 <<  6)
+#define PW4_ENABLE (1 <<  8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS			0x037
+#define DC_CMD_INT_MASK				0x038
+#define DC_CMD_INT_ENABLE			0x039
+#define DC_CMD_INT_TYPE				0x03a
+#define DC_CMD_INT_POLARITY			0x03b
+#define CTXSW_INT     (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT    (1 << 2)
+#define WIN_A_UF_INT  (1 << 8)
+#define WIN_B_UF_INT  (1 << 9)
+#define WIN_C_UF_INT  (1 << 10)
+#define WIN_A_OF_INT  (1 << 14)
+#define WIN_B_OF_INT  (1 << 15)
+#define WIN_C_OF_INT  (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1			0x03c
+#define DC_CMD_SIGNAL_RAISE2			0x03d
+#define DC_CMD_SIGNAL_RAISE3			0x03e
+
+#define DC_CMD_STATE_ACCESS			0x040
+#define READ_MUX  (1 << 0)
+#define WRITE_MUX (1 << 2)
+
+#define DC_CMD_STATE_CONTROL			0x041
+#define GENERAL_ACT_REQ (1 <<  0)
+#define WIN_A_ACT_REQ   (1 <<  1)
+#define WIN_B_ACT_REQ   (1 <<  2)
+#define WIN_C_ACT_REQ   (1 <<  3)
+#define GENERAL_UPDATE  (1 <<  8)
+#define WIN_A_UPDATE    (1 <<  9)
+#define WIN_B_UPDATE    (1 << 10)
+#define WIN_C_UPDATE    (1 << 11)
+#define NC_HOST_TRIG    (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER		0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL			0x043
+
+#define DC_COM_CRC_CONTROL			0x300
+#define DC_COM_CRC_CHECKSUM			0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL			0x31b
+#define DC_COM_PIN_PM0_CONTROL			0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE		0x31d
+#define DC_COM_PIN_PM1_CONTROL			0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE		0x31f
+
+#define DC_COM_SPI_CONTROL			0x320
+#define DC_COM_SPI_START_BYTE			0x321
+#define DC_COM_HSPI_WRITE_DATA_AB		0x322
+#define DC_COM_HSPI_WRITE_DATA_CD		0x323
+#define DC_COM_HSPI_CS_DC			0x324
+#define DC_COM_SCRATCH_REGISTER_A		0x325
+#define DC_COM_SCRATCH_REGISTER_B		0x326
+#define DC_COM_GPIO_CTRL			0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER		0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED		0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0		0x400
+#define H_PULSE_0_ENABLE (1 <<  8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1		0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS		0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY		0x403
+#define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) <<  8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) <<  0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER	0x404
+#define CURSOR_DELAY(x)   (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) <<  8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) <<  0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS		0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC			0x406
+#define DC_DISP_SYNC_WIDTH			0x407
+#define DC_DISP_BACK_PORCH			0x408
+#define DC_DISP_ACTIVE				0x409
+#define DC_DISP_FRONT_PORCH			0x40a
+#define DC_DISP_H_PULSE0_CONTROL		0x40b
+#define DC_DISP_H_PULSE0_POSITION_A		0x40c
+#define DC_DISP_H_PULSE0_POSITION_B		0x40d
+#define DC_DISP_H_PULSE0_POSITION_C		0x40e
+#define DC_DISP_H_PULSE0_POSITION_D		0x40f
+#define DC_DISP_H_PULSE1_CONTROL		0x410
+#define DC_DISP_H_PULSE1_POSITION_A		0x411
+#define DC_DISP_H_PULSE1_POSITION_B		0x412
+#define DC_DISP_H_PULSE1_POSITION_C		0x413
+#define DC_DISP_H_PULSE1_POSITION_D		0x414
+#define DC_DISP_H_PULSE2_CONTROL		0x415
+#define DC_DISP_H_PULSE2_POSITION_A		0x416
+#define DC_DISP_H_PULSE2_POSITION_B		0x417
+#define DC_DISP_H_PULSE2_POSITION_C		0x418
+#define DC_DISP_H_PULSE2_POSITION_D		0x419
+#define DC_DISP_V_PULSE0_CONTROL		0x41a
+#define DC_DISP_V_PULSE0_POSITION_A		0x41b
+#define DC_DISP_V_PULSE0_POSITION_B		0x41c
+#define DC_DISP_V_PULSE0_POSITION_C		0x41d
+#define DC_DISP_V_PULSE1_CONTROL		0x41e
+#define DC_DISP_V_PULSE1_POSITION_A		0x41f
+#define DC_DISP_V_PULSE1_POSITION_B		0x420
+#define DC_DISP_V_PULSE1_POSITION_C		0x421
+#define DC_DISP_V_PULSE2_CONTROL		0x422
+#define DC_DISP_V_PULSE2_POSITION_A		0x423
+#define DC_DISP_V_PULSE3_CONTROL		0x424
+#define DC_DISP_V_PULSE3_POSITION_A		0x425
+#define DC_DISP_M0_CONTROL			0x426
+#define DC_DISP_M1_CONTROL			0x427
+#define DC_DISP_DI_CONTROL			0x428
+#define DC_DISP_PP_CONTROL			0x429
+#define DC_DISP_PP_SELECT_A			0x42a
+#define DC_DISP_PP_SELECT_B			0x42b
+#define DC_DISP_PP_SELECT_C			0x42c
+#define DC_DISP_PP_SELECT_D			0x42d
+
+#define PULSE_MODE_NORMAL    (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH  (0 << 4)
+#define PULSE_POLARITY_LOW   (1 << 4)
+#define PULSE_QUAL_ALWAYS    (0 << 6)
+#define PULSE_QUAL_VACTIVE   (2 << 6)
+#define PULSE_QUAL_VACTIVE1  (3 << 6)
+#define PULSE_LAST_START_A   (0 << 8)
+#define PULSE_LAST_END_A     (1 << 8)
+#define PULSE_LAST_START_B   (2 << 8)
+#define PULSE_LAST_END_B     (3 << 8)
+#define PULSE_LAST_START_C   (4 << 8)
+#define PULSE_LAST_END_C     (5 << 8)
+#define PULSE_LAST_START_D   (6 << 8)
+#define PULSE_LAST_END_D     (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) <<  0)
+#define PULSE_END(x)   (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL		0x42e
+#define PIXEL_CLK_DIVIDER_PCD1  (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2  (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3  (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4  (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6  (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8  (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9  (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x)    ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL		0x42f
+#define DISP_DATA_FORMAT_DF1P1C    (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S      (4 << 0)
+#define DISP_DATA_FORMAT_DF3S      (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI     (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB         (0 << 8)
+#define DISP_ALIGNMENT_LSB         (1 << 8)
+#define DISP_ORDER_RED_BLUE        (0 << 9)
+#define DISP_ORDER_BLUE_RED        (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL		0x430
+#define BASE_COLOR_SIZE666     (0 << 0)
+#define BASE_COLOR_SIZE111     (1 << 0)
+#define BASE_COLOR_SIZE222     (2 << 0)
+#define BASE_COLOR_SIZE333     (3 << 0)
+#define BASE_COLOR_SIZE444     (4 << 0)
+#define BASE_COLOR_SIZE555     (5 << 0)
+#define BASE_COLOR_SIZE565     (6 << 0)
+#define BASE_COLOR_SIZE332     (7 << 0)
+#define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS		0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS		0x432
+#define DE_SELECT_ACTIVE_BLANK  (0 << 0)
+#define DE_SELECT_ACTIVE        (1 << 0)
+#define DE_SELECT_ACTIVE_IS     (2 << 0)
+#define DE_CONTROL_ONECLK       (0 << 2)
+#define DE_CONTROL_NORMAL       (1 << 2)
+#define DE_CONTROL_EARLY_EXT    (2 << 2)
+#define DE_CONTROL_EARLY        (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS	0x433
+#define DC_DISP_LCD_SPI_OPTIONS			0x434
+#define DC_DISP_BORDER_COLOR			0x435
+#define DC_DISP_COLOR_KEY0_LOWER		0x436
+#define DC_DISP_COLOR_KEY0_UPPER		0x437
+#define DC_DISP_COLOR_KEY1_LOWER		0x438
+#define DC_DISP_COLOR_KEY1_UPPER		0x439
+
+#define DC_DISP_CURSOR_FOREGROUND		0x43c
+#define DC_DISP_CURSOR_BACKGROUND		0x43d
+
+#define DC_DISP_CURSOR_START_ADDR		0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS		0x43f
+
+#define DC_DISP_CURSOR_POSITION			0x440
+#define DC_DISP_CURSOR_POSITION_NS		0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL		0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A		0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B		0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C		0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D		0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL		0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST		0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST		0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST		0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST		0x484
+
+#define DC_DISP_DAC_CRT_CTRL			0x4c0
+#define DC_DISP_DISP_MISC_CONTROL		0x4c1
+#define DC_DISP_SD_CONTROL			0x4c2
+#define DC_DISP_SD_CSC_COEFF			0x4c3
+#define DC_DISP_SD_LUT(x)			(0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL		0x4cd
+#define DC_DISP_DC_PIXEL_COUNT			0x4ce
+#define DC_DISP_SD_HISTOGRAM(x)			(0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS		0x4d7
+#define DC_DISP_SD_BL_TF(x)			(0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL			0x4dc
+#define DC_DISP_SD_HW_K_VALUES			0x4dd
+#define DC_DISP_SD_MAN_K_VALUES			0x4de
+
+#define DC_WIN_CSC_YOF				0x611
+#define DC_WIN_CSC_KYRGB			0x612
+#define DC_WIN_CSC_KUR				0x613
+#define DC_WIN_CSC_KVR				0x614
+#define DC_WIN_CSC_KUG				0x615
+#define DC_WIN_CSC_KVG				0x616
+#define DC_WIN_CSC_KUB				0x617
+#define DC_WIN_CSC_KVB				0x618
+
+#define DC_WIN_WIN_OPTIONS			0x700
+#define COLOR_EXPAND (1 <<  6)
+#define CSC_ENABLE   (1 << 18)
+#define WIN_ENABLE   (1 << 30)
+
+#define DC_WIN_BYTE_SWAP			0x701
+#define BYTE_SWAP_NOSWAP  (0 << 0)
+#define BYTE_SWAP_SWAP2   (1 << 0)
+#define BYTE_SWAP_SWAP4   (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL			0x702
+#define BUFFER_CONTROL_HOST  (0 << 0)
+#define BUFFER_CONTROL_VI    (1 << 0)
+#define BUFFER_CONTROL_EPP   (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D  (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH			0x703
+#define WIN_COLOR_DEPTH_P1              0
+#define WIN_COLOR_DEPTH_P2              1
+#define WIN_COLOR_DEPTH_P4              2
+#define WIN_COLOR_DEPTH_P8              3
+#define WIN_COLOR_DEPTH_B4G4R4A4        4
+#define WIN_COLOR_DEPTH_B5G5R5A         5
+#define WIN_COLOR_DEPTH_B5G6R5          6
+#define WIN_COLOR_DEPTH_AB5G5R5         7
+#define WIN_COLOR_DEPTH_B8G8R8A8       12
+#define WIN_COLOR_DEPTH_R8G8B8A8       13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422       16
+#define WIN_COLOR_DEPTH_YUV422         17
+#define WIN_COLOR_DEPTH_YCbCr420P      18
+#define WIN_COLOR_DEPTH_YUV420P        19
+#define WIN_COLOR_DEPTH_YCbCr422P      20
+#define WIN_COLOR_DEPTH_YUV422P        21
+#define WIN_COLOR_DEPTH_YCbCr422R      22
+#define WIN_COLOR_DEPTH_YUV422R        23
+#define WIN_COLOR_DEPTH_YCbCr422RA     24
+#define WIN_COLOR_DEPTH_YUV422RA       25
+
+#define DC_WIN_POSITION				0x704
+#define H_POSITION(x) (((x) & 0x1fff) <<  0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE				0x705
+#define H_SIZE(x) (((x) & 0x1fff) <<  0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE			0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) <<  0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA			0x707
+#define DC_WIN_V_INITIAL_DDA			0x708
+#define DC_WIN_DDA_INC				0x709
+#define H_DDA_INC(x) (((x) & 0xffff) <<  0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE			0x70a
+#define DC_WIN_BUF_STRIDE			0x70b
+#define DC_WIN_UV_BUF_STRIDE			0x70c
+#define DC_WIN_BUFFER_ADDR_MODE			0x70d
+#define DC_WIN_DV_CONTROL			0x70e
+
+#define DC_WIN_BLEND_NOKEY			0x70f
+#define DC_WIN_BLEND_1WIN			0x710
+#define DC_WIN_BLEND_2WIN_X			0x711
+#define DC_WIN_BLEND_2WIN_Y			0x712
+#define DC_WIN_BLEND_3WIN_XY			0x713
+
+#define DC_WIN_HP_FETCH_CONTROL			0x714
+
+#define DC_WINBUF_START_ADDR			0x800
+#define DC_WINBUF_START_ADDR_NS			0x801
+#define DC_WINBUF_START_ADDR_U			0x802
+#define DC_WINBUF_START_ADDR_U_NS		0x803
+#define DC_WINBUF_START_ADDR_V			0x804
+#define DC_WINBUF_START_ADDR_V_NS		0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET			0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS		0x807
+#define DC_WINBUF_ADDR_V_OFFSET			0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS		0x809
+
+#define DC_WINBUF_UFLOW_STATUS			0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS		0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS		0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS		0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/linux-imx/drivers/gpu/host1x/drm/drm.c b/linux-imx/drivers/gpu/host1x/drm/drm.c
new file mode 100644
index 0000000..2b561c9
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/drm.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_client.h"
+#include "dev.h"
+#include "drm.h"
+#include "gem.h"
+#include "syncpt.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct host1x_drm_client {
+	struct host1x_client *client;
+	struct device_node *np;
+	struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x_drm *host1x,
+				 struct device_node *np)
+{
+	struct host1x_drm_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&client->list);
+	client->np = of_node_get(np);
+
+	list_add_tail(&client->list, &host1x->drm_clients);
+
+	return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x_drm *host1x,
+				      struct host1x_drm_client *drm,
+				      struct host1x_client *client)
+{
+	mutex_lock(&host1x->drm_clients_lock);
+	list_del_init(&drm->list);
+	list_add_tail(&drm->list, &host1x->drm_active);
+	drm->client = client;
+	mutex_unlock(&host1x->drm_clients_lock);
+
+	return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x_drm *host1x,
+				    struct host1x_drm_client *client)
+{
+	mutex_lock(&host1x->drm_clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&host1x->drm_clients_lock);
+
+	of_node_put(client->np);
+	kfree(client);
+
+	return 0;
+}
+
+static int host1x_parse_dt(struct host1x_drm *host1x)
+{
+	static const char * const compat[] = {
+		"nvidia,tegra20-dc",
+		"nvidia,tegra20-hdmi",
+		"nvidia,tegra20-gr2d",
+		"nvidia,tegra30-dc",
+		"nvidia,tegra30-hdmi",
+		"nvidia,tegra30-gr2d",
+	};
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(compat); i++) {
+		struct device_node *np;
+
+		for_each_child_of_node(host1x->dev->of_node, np) {
+			if (of_device_is_compatible(np, compat[i]) &&
+			    of_device_is_available(np)) {
+				err = host1x_add_drm_client(host1x, np);
+				if (err < 0)
+					return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int host1x_drm_alloc(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x;
+	int err;
+
+	host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+	if (!host1x)
+		return -ENOMEM;
+
+	mutex_init(&host1x->drm_clients_lock);
+	INIT_LIST_HEAD(&host1x->drm_clients);
+	INIT_LIST_HEAD(&host1x->drm_active);
+	mutex_init(&host1x->clients_lock);
+	INIT_LIST_HEAD(&host1x->clients);
+	host1x->dev = &pdev->dev;
+
+	err = host1x_parse_dt(host1x);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+		return err;
+	}
+
+	host1x_set_drm_data(&pdev->dev, host1x);
+
+	return 0;
+}
+
+int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
+{
+	struct host1x_client *client;
+
+	mutex_lock(&host1x->clients_lock);
+
+	list_for_each_entry(client, &host1x->clients, list) {
+		if (client->ops && client->ops->drm_init) {
+			int err = client->ops->drm_init(client, drm);
+			if (err < 0) {
+				dev_err(host1x->dev,
+					"DRM setup failed for %s: %d\n",
+					dev_name(client->dev), err);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->clients_lock);
+
+	return 0;
+}
+
+int host1x_drm_exit(struct host1x_drm *host1x)
+{
+	struct platform_device *pdev = to_platform_device(host1x->dev);
+	struct host1x_client *client;
+
+	if (!host1x->drm)
+		return 0;
+
+	mutex_lock(&host1x->clients_lock);
+
+	list_for_each_entry_reverse(client, &host1x->clients, list) {
+		if (client->ops && client->ops->drm_exit) {
+			int err = client->ops->drm_exit(client);
+			if (err < 0) {
+				dev_err(host1x->dev,
+					"DRM cleanup failed for %s: %d\n",
+					dev_name(client->dev), err);
+				return err;
+			}
+		}
+	}
+
+	mutex_unlock(&host1x->clients_lock);
+
+	drm_platform_exit(&tegra_drm_driver, pdev);
+	host1x->drm = NULL;
+
+	return 0;
+}
+
+int host1x_register_client(struct host1x_drm *host1x,
+			   struct host1x_client *client)
+{
+	struct host1x_drm_client *drm, *tmp;
+	int err;
+
+	mutex_lock(&host1x->clients_lock);
+	list_add_tail(&client->list, &host1x->clients);
+	mutex_unlock(&host1x->clients_lock);
+
+	list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+		if (drm->np == client->dev->of_node)
+			host1x_activate_drm_client(host1x, drm, client);
+
+	if (list_empty(&host1x->drm_clients)) {
+		struct platform_device *pdev = to_platform_device(host1x->dev);
+
+		err = drm_platform_init(&tegra_drm_driver, pdev);
+		if (err < 0) {
+			dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+int host1x_unregister_client(struct host1x_drm *host1x,
+			     struct host1x_client *client)
+{
+	struct host1x_drm_client *drm, *tmp;
+	int err;
+
+	list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+		if (drm->client == client) {
+			err = host1x_drm_exit(host1x);
+			if (err < 0) {
+				dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+					err);
+				return err;
+			}
+
+			host1x_remove_drm_client(host1x, drm);
+			break;
+		}
+	}
+
+	mutex_lock(&host1x->clients_lock);
+	list_del_init(&client->list);
+	mutex_unlock(&host1x->clients_lock);
+
+	return 0;
+}
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+	struct host1x_drm *host1x;
+	int err;
+
+	host1x = host1x_get_drm_data(drm->dev);
+	drm->dev_private = host1x;
+	host1x->drm = drm;
+
+	drm_mode_config_init(drm);
+
+	err = host1x_drm_init(host1x, drm);
+	if (err < 0)
+		return err;
+
+	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+	if (err < 0)
+		return err;
+
+	err = tegra_drm_fb_init(drm);
+	if (err < 0)
+		return err;
+
+	drm_kms_helper_poll_init(drm);
+
+	return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+	drm_kms_helper_poll_fini(drm);
+	tegra_drm_fb_exit(drm);
+
+	drm_mode_config_cleanup(drm);
+
+	return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+	struct host1x_drm_file *fpriv;
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fpriv->contexts);
+	filp->driver_priv = fpriv;
+
+	return 0;
+}
+
+static void host1x_drm_context_free(struct host1x_drm_context *context)
+{
+	context->client->ops->close_channel(context);
+	kfree(context);
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+	struct host1x_drm *host1x = drm->dev_private;
+
+	tegra_fbdev_restore_mode(host1x->fbdev);
+}
+
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
+					 struct host1x_drm_context *context)
+{
+	struct host1x_drm_context *ctx;
+
+	list_for_each_entry(ctx, &file->contexts, list)
+		if (ctx == context)
+			return true;
+
+	return false;
+}
+
+static int tegra_gem_create(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct drm_tegra_gem_create *args = data;
+	struct tegra_bo *bo;
+
+	bo = tegra_bo_create_with_handle(file, drm, args->size,
+					 &args->handle);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	return 0;
+}
+
+static int tegra_gem_mmap(struct drm_device *drm, void *data,
+			  struct drm_file *file)
+{
+	struct drm_tegra_gem_mmap *args = data;
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, args->handle);
+	if (!gem)
+		return -EINVAL;
+
+	bo = to_tegra_bo(gem);
+
+	args->offset = tegra_bo_get_mmap_offset(bo);
+
+	drm_gem_object_unreference(gem);
+
+	return 0;
+}
+
+static int tegra_syncpt_read(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct drm_tegra_syncpt_read *args = data;
+	struct host1x *host = dev_get_drvdata(drm->dev);
+	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+	if (!sp)
+		return -EINVAL;
+
+	args->value = host1x_syncpt_read_min(sp);
+	return 0;
+}
+
+static int tegra_syncpt_incr(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct drm_tegra_syncpt_incr *args = data;
+	struct host1x *host = dev_get_drvdata(drm->dev);
+	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+	if (!sp)
+		return -EINVAL;
+
+	host1x_syncpt_incr(sp);
+	return 0;
+}
+
+static int tegra_syncpt_wait(struct drm_device *drm, void *data,
+			     struct drm_file *file)
+{
+	struct drm_tegra_syncpt_wait *args = data;
+	struct host1x *host = dev_get_drvdata(drm->dev);
+	struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+
+	if (!sp)
+		return -EINVAL;
+
+	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
+				  &args->value);
+}
+
+static int tegra_open_channel(struct drm_device *drm, void *data,
+			      struct drm_file *file)
+{
+	struct drm_tegra_open_channel *args = data;
+	struct host1x_client *client;
+	struct host1x_drm_context *context;
+	struct host1x_drm_file *fpriv = file->driver_priv;
+	struct host1x_drm *host1x = drm->dev_private;
+	int err = -ENODEV;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return -ENOMEM;
+
+	list_for_each_entry(client, &host1x->clients, list)
+		if (client->class == args->client) {
+			err = client->ops->open_channel(client, context);
+			if (err)
+				break;
+
+			context->client = client;
+			list_add(&context->list, &fpriv->contexts);
+			args->context = (uintptr_t)context;
+			return 0;
+		}
+
+	kfree(context);
+	return err;
+}
+
+static int tegra_close_channel(struct drm_device *drm, void *data,
+			       struct drm_file *file)
+{
+	struct drm_tegra_close_channel *args = data;
+	struct host1x_drm_file *fpriv = file->driver_priv;
+	struct host1x_drm_context *context =
+		(struct host1x_drm_context *)(uintptr_t)args->context;
+
+	if (!host1x_drm_file_owns_context(fpriv, context))
+		return -EINVAL;
+
+	list_del(&context->list);
+	host1x_drm_context_free(context);
+
+	return 0;
+}
+
+static int tegra_get_syncpt(struct drm_device *drm, void *data,
+			    struct drm_file *file)
+{
+	struct drm_tegra_get_syncpt *args = data;
+	struct host1x_drm_file *fpriv = file->driver_priv;
+	struct host1x_drm_context *context =
+		(struct host1x_drm_context *)(uintptr_t)args->context;
+	struct host1x_syncpt *syncpt;
+
+	if (!host1x_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	if (args->index >= context->client->num_syncpts)
+		return -EINVAL;
+
+	syncpt = context->client->syncpts[args->index];
+	args->id = host1x_syncpt_id(syncpt);
+
+	return 0;
+}
+
+static int tegra_submit(struct drm_device *drm, void *data,
+			struct drm_file *file)
+{
+	struct drm_tegra_submit *args = data;
+	struct host1x_drm_file *fpriv = file->driver_priv;
+	struct host1x_drm_context *context =
+		(struct host1x_drm_context *)(uintptr_t)args->context;
+
+	if (!host1x_drm_file_owns_context(fpriv, context))
+		return -ENODEV;
+
+	return context->client->ops->submit(context, args, drm, file);
+}
+#endif
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+#ifdef CONFIG_DRM_TEGRA_STAGING
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
+#endif
+};
+
+static const struct file_operations tegra_drm_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = tegra_drm_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+	.read = drm_read,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = drm_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
+		struct tegra_dc *dc = to_tegra_dc(crtc);
+
+		if (dc->pipe == pipe)
+			return crtc;
+	}
+
+	return NULL;
+}
+
+static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	/* TODO: implement real hardware counter using syncpoints */
+	return drm_vblank_count(dev, crtc);
+}
+
+static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (!crtc)
+		return -ENODEV;
+
+	tegra_dc_enable_vblank(dc);
+
+	return 0;
+}
+
+static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+{
+	struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+	struct tegra_dc *dc = to_tegra_dc(crtc);
+
+	if (crtc)
+		tegra_dc_disable_vblank(dc);
+}
+
+static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
+{
+	struct host1x_drm_file *fpriv = file->driver_priv;
+	struct host1x_drm_context *context, *tmp;
+	struct drm_crtc *crtc;
+
+	list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+		tegra_dc_cancel_page_flip(crtc, file);
+
+	list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
+		host1x_drm_context_free(context);
+
+	kfree(fpriv);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *)s->private;
+	struct drm_device *drm = node->minor->dev;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&drm->mode_config.fb_lock);
+
+	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
+		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
+			   fb->base.id, fb->width, fb->height, fb->depth,
+			   fb->bits_per_pixel,
+			   atomic_read(&fb->refcount.refcount));
+	}
+
+	mutex_unlock(&drm->mode_config.fb_lock);
+
+	return 0;
+}
+
+static struct drm_info_list tegra_debugfs_list[] = {
+	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
+};
+
+static int tegra_debugfs_init(struct drm_minor *minor)
+{
+	return drm_debugfs_create_files(tegra_debugfs_list,
+					ARRAY_SIZE(tegra_debugfs_list),
+					minor->debugfs_root, minor);
+}
+
+static void tegra_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(tegra_debugfs_list,
+				 ARRAY_SIZE(tegra_debugfs_list), minor);
+}
+#endif
+
+struct drm_driver tegra_drm_driver = {
+	.driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+	.load = tegra_drm_load,
+	.unload = tegra_drm_unload,
+	.open = tegra_drm_open,
+	.preclose = tegra_drm_preclose,
+	.lastclose = tegra_drm_lastclose,
+
+	.get_vblank_counter = tegra_drm_get_vblank_counter,
+	.enable_vblank = tegra_drm_enable_vblank,
+	.disable_vblank = tegra_drm_disable_vblank,
+
+#if defined(CONFIG_DEBUG_FS)
+	.debugfs_init = tegra_debugfs_init,
+	.debugfs_cleanup = tegra_debugfs_cleanup,
+#endif
+
+	.gem_free_object = tegra_bo_free_object,
+	.gem_vm_ops = &tegra_bo_vm_ops,
+	.dumb_create = tegra_bo_dumb_create,
+	.dumb_map_offset = tegra_bo_dumb_map_offset,
+	.dumb_destroy = tegra_bo_dumb_destroy,
+
+	.ioctls = tegra_drm_ioctls,
+	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+	.fops = &tegra_drm_fops,
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/linux-imx/drivers/gpu/host1x/drm/drm.h b/linux-imx/drivers/gpu/host1x/drm/drm.h
new file mode 100644
index 0000000..02ce020
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/drm.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef HOST1X_DRM_H
+#define HOST1X_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fixed.h>
+#include <uapi/drm/tegra_drm.h>
+
+#include "host1x.h"
+
+struct tegra_fb {
+	struct drm_framebuffer base;
+	struct tegra_bo **planes;
+	unsigned int num_planes;
+};
+
+struct tegra_fbdev {
+	struct drm_fb_helper base;
+	struct tegra_fb *fb;
+};
+
+struct host1x_drm {
+	struct drm_device *drm;
+	struct device *dev;
+	void __iomem *regs;
+	struct clk *clk;
+	int syncpt;
+	int irq;
+
+	struct mutex drm_clients_lock;
+	struct list_head drm_clients;
+	struct list_head drm_active;
+
+	struct mutex clients_lock;
+	struct list_head clients;
+
+	struct tegra_fbdev *fbdev;
+};
+
+struct host1x_client;
+
+struct host1x_drm_context {
+	struct host1x_client *client;
+	struct host1x_channel *channel;
+	struct list_head list;
+};
+
+struct host1x_client_ops {
+	int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+	int (*drm_exit)(struct host1x_client *client);
+	int (*open_channel)(struct host1x_client *client,
+			    struct host1x_drm_context *context);
+	void (*close_channel)(struct host1x_drm_context *context);
+	int (*submit)(struct host1x_drm_context *context,
+		      struct drm_tegra_submit *args, struct drm_device *drm,
+		      struct drm_file *file);
+};
+
+struct host1x_drm_file {
+	struct list_head contexts;
+};
+
+struct host1x_client {
+	struct host1x_drm *host1x;
+	struct device *dev;
+
+	const struct host1x_client_ops *ops;
+
+	enum host1x_class class;
+	struct host1x_channel *channel;
+
+	struct host1x_syncpt **syncpts;
+	unsigned int num_syncpts;
+
+	struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x_drm *host1x);
+
+extern int host1x_register_client(struct host1x_drm *host1x,
+				  struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x_drm *host1x,
+				    struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+	struct host1x_client client;
+	spinlock_t lock;
+
+	struct host1x_drm *host1x;
+	struct device *dev;
+
+	struct drm_crtc base;
+	int pipe;
+
+	struct clk *clk;
+
+	void __iomem *regs;
+	int irq;
+
+	struct tegra_output *rgb;
+
+	struct list_head list;
+
+	struct drm_info_list *debugfs_files;
+	struct drm_minor *minor;
+	struct dentry *debugfs;
+
+	/* page-flip handling */
+	struct drm_pending_vblank_event *event;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+	return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+				   unsigned long reg)
+{
+	writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+					   unsigned long reg)
+{
+	return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_dc_window {
+	struct {
+		unsigned int x;
+		unsigned int y;
+		unsigned int w;
+		unsigned int h;
+	} src;
+	struct {
+		unsigned int x;
+		unsigned int y;
+		unsigned int w;
+		unsigned int h;
+	} dst;
+	unsigned int bits_per_pixel;
+	unsigned int format;
+	unsigned int stride[2];
+	unsigned long base[3];
+};
+
+/* from dc.c */
+extern unsigned int tegra_dc_format(uint32_t format);
+extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
+				 const struct tegra_dc_window *window);
+extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
+extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
+				      struct drm_file *file);
+
+struct tegra_output_ops {
+	int (*enable)(struct tegra_output *output);
+	int (*disable)(struct tegra_output *output);
+	int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+			   unsigned long pclk);
+	int (*check_mode)(struct tegra_output *output,
+			  struct drm_display_mode *mode,
+			  enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+	TEGRA_OUTPUT_RGB,
+	TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+	struct device_node *of_node;
+	struct device *dev;
+
+	const struct tegra_output_ops *ops;
+	enum tegra_output_type type;
+
+	struct i2c_adapter *ddc;
+	const struct edid *edid;
+	unsigned int hpd_irq;
+	int hpd_gpio;
+
+	struct drm_encoder encoder;
+	struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+	return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+	return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+	if (output && output->ops && output->ops->enable)
+		return output->ops->enable(output);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+	if (output && output->ops && output->ops->disable)
+		return output->ops->disable(output);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+					   struct clk *clk, unsigned long pclk)
+{
+	if (output && output->ops && output->ops->setup_clock)
+		return output->ops->setup_clock(output, clk, pclk);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+					  struct drm_display_mode *mode,
+					  enum drm_mode_status *status)
+{
+	if (output && output->ops && output->ops->check_mode)
+		return output->ops->check_mode(output, mode, status);
+
+	return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from fb.c */
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+				    unsigned int index);
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
+
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* HOST1X_DRM_H */
diff --git a/linux-imx/drivers/gpu/host1x/drm/fb.c b/linux-imx/drivers/gpu/host1x/drm/fb.c
new file mode 100644
index 0000000..979a3e3
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/fb.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (C) 2012-2013 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Based on the KMS/FB CMA helpers
+ *   Copyright (C) 2012 Analog Device Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include "drm.h"
+#include "gem.h"
+
+static inline struct tegra_fb *to_tegra_fb(struct drm_framebuffer *fb)
+{
+	return container_of(fb, struct tegra_fb, base);
+}
+
+static inline struct tegra_fbdev *to_tegra_fbdev(struct drm_fb_helper *helper)
+{
+	return container_of(helper, struct tegra_fbdev, base);
+}
+
+struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
+				    unsigned int index)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	if (index >= drm_format_num_planes(framebuffer->pixel_format))
+		return NULL;
+
+	return fb->planes[index];
+}
+
+static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+	unsigned int i;
+
+	for (i = 0; i < fb->num_planes; i++) {
+		struct tegra_bo *bo = fb->planes[i];
+
+		if (bo)
+			drm_gem_object_unreference_unlocked(&bo->gem);
+	}
+
+	drm_framebuffer_cleanup(framebuffer);
+	kfree(fb->planes);
+	kfree(fb);
+}
+
+static int tegra_fb_create_handle(struct drm_framebuffer *framebuffer,
+				  struct drm_file *file, unsigned int *handle)
+{
+	struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+	return drm_gem_handle_create(file, &fb->planes[0]->gem, handle);
+}
+
+static struct drm_framebuffer_funcs tegra_fb_funcs = {
+	.destroy = tegra_fb_destroy,
+	.create_handle = tegra_fb_create_handle,
+};
+
+static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm,
+				       struct drm_mode_fb_cmd2 *mode_cmd,
+				       struct tegra_bo **planes,
+				       unsigned int num_planes)
+{
+	struct tegra_fb *fb;
+	unsigned int i;
+	int err;
+
+	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+	if (!fb)
+		return ERR_PTR(-ENOMEM);
+
+	fb->planes = kzalloc(num_planes * sizeof(*planes), GFP_KERNEL);
+	if (!fb->planes)
+		return ERR_PTR(-ENOMEM);
+
+	fb->num_planes = num_planes;
+
+	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+	for (i = 0; i < fb->num_planes; i++)
+		fb->planes[i] = planes[i];
+
+	err = drm_framebuffer_init(drm, &fb->base, &tegra_fb_funcs);
+	if (err < 0) {
+		dev_err(drm->dev, "failed to initialize framebuffer: %d\n",
+			err);
+		kfree(fb->planes);
+		kfree(fb);
+		return ERR_PTR(err);
+	}
+
+	return fb;
+}
+
+static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
+					       struct drm_file *file,
+					       struct drm_mode_fb_cmd2 *cmd)
+{
+	unsigned int hsub, vsub, i;
+	struct tegra_bo *planes[4];
+	struct drm_gem_object *gem;
+	struct tegra_fb *fb;
+	int err;
+
+	hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format);
+	vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format);
+
+	for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) {
+		unsigned int width = cmd->width / (i ? hsub : 1);
+		unsigned int height = cmd->height / (i ? vsub : 1);
+		unsigned int size, bpp;
+
+		gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
+		if (!gem) {
+			err = -ENXIO;
+			goto unreference;
+		}
+
+		bpp = drm_format_plane_cpp(cmd->pixel_format, i);
+
+		size = (height - 1) * cmd->pitches[i] +
+		       width * bpp + cmd->offsets[i];
+
+		if (gem->size < size) {
+			err = -EINVAL;
+			goto unreference;
+		}
+
+		planes[i] = to_tegra_bo(gem);
+	}
+
+	fb = tegra_fb_alloc(drm, cmd, planes, i);
+	if (IS_ERR(fb)) {
+		err = PTR_ERR(fb);
+		goto unreference;
+	}
+
+	return &fb->base;
+
+unreference:
+	while (i--)
+		drm_gem_object_unreference_unlocked(&planes[i]->gem);
+
+	return ERR_PTR(err);
+}
+
+static struct fb_ops tegra_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_fillrect = sys_fillrect,
+	.fb_copyarea = sys_copyarea,
+	.fb_imageblit = sys_imageblit,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int tegra_fbdev_probe(struct drm_fb_helper *helper,
+			     struct drm_fb_helper_surface_size *sizes)
+{
+	struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+	struct drm_device *drm = helper->dev;
+	struct drm_mode_fb_cmd2 cmd = { 0 };
+	unsigned int bytes_per_pixel;
+	struct drm_framebuffer *fb;
+	unsigned long offset;
+	struct fb_info *info;
+	struct tegra_bo *bo;
+	size_t size;
+	int err;
+
+	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+	cmd.width = sizes->surface_width;
+	cmd.height = sizes->surface_height;
+	cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+	cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+						     sizes->surface_depth);
+
+	size = cmd.pitches[0] * cmd.height;
+
+	bo = tegra_bo_create(drm, size);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	info = framebuffer_alloc(0, drm->dev);
+	if (!info) {
+		dev_err(drm->dev, "failed to allocate framebuffer info\n");
+		tegra_bo_free_object(&bo->gem);
+		return -ENOMEM;
+	}
+
+	fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
+	if (IS_ERR(fbdev->fb)) {
+		dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
+		err = PTR_ERR(fbdev->fb);
+		goto release;
+	}
+
+	fb = &fbdev->fb->base;
+	helper->fb = fb;
+	helper->fbdev = info;
+
+	info->par = helper;
+	info->flags = FBINFO_FLAG_DEFAULT;
+	info->fbops = &tegra_fb_ops;
+
+	err = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (err < 0) {
+		dev_err(drm->dev, "failed to allocate color map: %d\n", err);
+		goto destroy;
+	}
+
+	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+
+	offset = info->var.xoffset * bytes_per_pixel +
+		 info->var.yoffset * fb->pitches[0];
+
+	drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+	info->screen_base = bo->vaddr + offset;
+	info->screen_size = size;
+	info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+	info->fix.smem_len = size;
+
+	return 0;
+
+destroy:
+	drm_framebuffer_unregister_private(fb);
+	tegra_fb_destroy(fb);
+release:
+	framebuffer_release(info);
+	return err;
+}
+
+static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+	.fb_probe = tegra_fbdev_probe,
+};
+
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
+					      unsigned int preferred_bpp,
+					      unsigned int num_crtc,
+					      unsigned int max_connectors)
+{
+	struct drm_fb_helper *helper;
+	struct tegra_fbdev *fbdev;
+	int err;
+
+	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev) {
+		dev_err(drm->dev, "failed to allocate DRM fbdev\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fbdev->base.funcs = &tegra_fb_helper_funcs;
+	helper = &fbdev->base;
+
+	err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
+	if (err < 0) {
+		dev_err(drm->dev, "failed to initialize DRM FB helper\n");
+		goto free;
+	}
+
+	err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
+	if (err < 0) {
+		dev_err(drm->dev, "failed to add connectors\n");
+		goto fini;
+	}
+
+	drm_helper_disable_unused_functions(drm);
+
+	err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
+	if (err < 0) {
+		dev_err(drm->dev, "failed to set initial configuration\n");
+		goto fini;
+	}
+
+	return fbdev;
+
+fini:
+	drm_fb_helper_fini(&fbdev->base);
+free:
+	kfree(fbdev);
+	return ERR_PTR(err);
+}
+
+static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
+{
+	struct fb_info *info = fbdev->base.fbdev;
+
+	if (info) {
+		int err;
+
+		err = unregister_framebuffer(info);
+		if (err < 0)
+			DRM_DEBUG_KMS("failed to unregister framebuffer\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	if (fbdev->fb) {
+		drm_framebuffer_unregister_private(&fbdev->fb->base);
+		tegra_fb_destroy(&fbdev->fb->base);
+	}
+
+	drm_fb_helper_fini(&fbdev->base);
+	kfree(fbdev);
+}
+
+static void tegra_fb_output_poll_changed(struct drm_device *drm)
+{
+	struct host1x_drm *host1x = drm->dev_private;
+
+	if (host1x->fbdev)
+		drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+	.fb_create = tegra_fb_create,
+	.output_poll_changed = tegra_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+	struct host1x_drm *host1x = drm->dev_private;
+	struct tegra_fbdev *fbdev;
+
+	drm->mode_config.min_width = 0;
+	drm->mode_config.min_height = 0;
+
+	drm->mode_config.max_width = 4096;
+	drm->mode_config.max_height = 4096;
+
+	drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+	fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
+				   drm->mode_config.num_connector);
+	if (IS_ERR(fbdev))
+		return PTR_ERR(fbdev);
+
+	host1x->fbdev = fbdev;
+
+	return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+	struct host1x_drm *host1x = drm->dev_private;
+
+	tegra_fbdev_free(host1x->fbdev);
+}
+
+void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
+{
+	if (fbdev) {
+		drm_modeset_lock_all(fbdev->base.dev);
+		drm_fb_helper_restore_fbdev_mode(&fbdev->base);
+		drm_modeset_unlock_all(fbdev->base.dev);
+	}
+}
diff --git a/linux-imx/drivers/gpu/host1x/drm/gem.c b/linux-imx/drivers/gpu/host1x/drm/gem.c
new file mode 100644
index 0000000..c5e9a9b
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/gem.c
@@ -0,0 +1,270 @@
+/*
+ * NVIDIA Tegra DRM GEM helper functions
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ *
+ * Based on the GEM/CMA helpers
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+#include "gem.h"
+
+static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+{
+	return container_of(bo, struct tegra_bo, base);
+}
+
+static void tegra_bo_put(struct host1x_bo *bo)
+{
+	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct drm_device *drm = obj->gem.dev;
+
+	mutex_lock(&drm->struct_mutex);
+	drm_gem_object_unreference(&obj->gem);
+	mutex_unlock(&drm->struct_mutex);
+}
+
+static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+{
+	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+	return obj->paddr;
+}
+
+static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+}
+
+static void *tegra_bo_mmap(struct host1x_bo *bo)
+{
+	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+	return obj->vaddr;
+}
+
+static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+}
+
+static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
+{
+	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+
+	return obj->vaddr + page * PAGE_SIZE;
+}
+
+static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
+			    void *addr)
+{
+}
+
+static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
+{
+	struct tegra_bo *obj = host1x_to_drm_bo(bo);
+	struct drm_device *drm = obj->gem.dev;
+
+	mutex_lock(&drm->struct_mutex);
+	drm_gem_object_reference(&obj->gem);
+	mutex_unlock(&drm->struct_mutex);
+
+	return bo;
+}
+
+const struct host1x_bo_ops tegra_bo_ops = {
+	.get = tegra_bo_get,
+	.put = tegra_bo_put,
+	.pin = tegra_bo_pin,
+	.unpin = tegra_bo_unpin,
+	.mmap = tegra_bo_mmap,
+	.munmap = tegra_bo_munmap,
+	.kmap = tegra_bo_kmap,
+	.kunmap = tegra_bo_kunmap,
+};
+
+static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
+{
+	dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+}
+
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
+{
+	return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
+}
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+{
+	struct tegra_bo *bo;
+	int err;
+
+	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+	if (!bo)
+		return ERR_PTR(-ENOMEM);
+
+	host1x_bo_init(&bo->base, &tegra_bo_ops);
+	size = round_up(size, PAGE_SIZE);
+
+	bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
+					   GFP_KERNEL | __GFP_NOWARN);
+	if (!bo->vaddr) {
+		dev_err(drm->dev, "failed to allocate buffer with size %u\n",
+			size);
+		err = -ENOMEM;
+		goto err_dma;
+	}
+
+	err = drm_gem_object_init(drm, &bo->gem, size);
+	if (err)
+		goto err_init;
+
+	err = drm_gem_create_mmap_offset(&bo->gem);
+	if (err)
+		goto err_mmap;
+
+	return bo;
+
+err_mmap:
+	drm_gem_object_release(&bo->gem);
+err_init:
+	tegra_bo_destroy(drm, bo);
+err_dma:
+	kfree(bo);
+
+	return ERR_PTR(err);
+
+}
+
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+					    struct drm_device *drm,
+					    unsigned int size,
+					    unsigned int *handle)
+{
+	struct tegra_bo *bo;
+	int ret;
+
+	bo = tegra_bo_create(drm, size);
+	if (IS_ERR(bo))
+		return bo;
+
+	ret = drm_gem_handle_create(file, &bo->gem, handle);
+	if (ret)
+		goto err;
+
+	drm_gem_object_unreference_unlocked(&bo->gem);
+
+	return bo;
+
+err:
+	tegra_bo_free_object(&bo->gem);
+	return ERR_PTR(ret);
+}
+
+void tegra_bo_free_object(struct drm_gem_object *gem)
+{
+	struct tegra_bo *bo = to_tegra_bo(gem);
+
+	if (gem->map_list.map)
+		drm_gem_free_mmap_offset(gem);
+
+	drm_gem_object_release(gem);
+	tegra_bo_destroy(gem->dev, bo);
+
+	kfree(bo);
+}
+
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+			 struct drm_mode_create_dumb *args)
+{
+	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+	struct tegra_bo *bo;
+
+	if (args->pitch < min_pitch)
+		args->pitch = min_pitch;
+
+	if (args->size < args->pitch * args->height)
+		args->size = args->pitch * args->height;
+
+	bo = tegra_bo_create_with_handle(file, drm, args->size,
+					    &args->handle);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
+
+	return 0;
+}
+
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+			     uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	mutex_lock(&drm->struct_mutex);
+
+	gem = drm_gem_object_lookup(drm, file, handle);
+	if (!gem) {
+		dev_err(drm->dev, "failed to lookup GEM object\n");
+		mutex_unlock(&drm->struct_mutex);
+		return -EINVAL;
+	}
+
+	bo = to_tegra_bo(gem);
+
+	*offset = tegra_bo_get_mmap_offset(bo);
+
+	drm_gem_object_unreference(gem);
+
+	mutex_unlock(&drm->struct_mutex);
+
+	return 0;
+}
+
+const struct vm_operations_struct tegra_bo_vm_ops = {
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+	int ret;
+
+	ret = drm_gem_mmap(file, vma);
+	if (ret)
+		return ret;
+
+	gem = vma->vm_private_data;
+	bo = to_tegra_bo(gem);
+
+	ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT,
+			      vma->vm_end - vma->vm_start, vma->vm_page_prot);
+	if (ret)
+		drm_gem_vm_close(vma);
+
+	return ret;
+}
+
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+			  unsigned int handle)
+{
+	return drm_gem_handle_delete(file, handle);
+}
diff --git a/linux-imx/drivers/gpu/host1x/drm/gem.h b/linux-imx/drivers/gpu/host1x/drm/gem.h
new file mode 100644
index 0000000..34de2b4
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/gem.h
@@ -0,0 +1,59 @@
+/*
+ * Tegra host1x GEM implementation
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_GEM_H
+#define __HOST1X_GEM_H
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+
+#include "host1x_bo.h"
+
+struct tegra_bo {
+	struct drm_gem_object gem;
+	struct host1x_bo base;
+	dma_addr_t paddr;
+	void *vaddr;
+};
+
+static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
+{
+	return container_of(gem, struct tegra_bo, gem);
+}
+
+extern const struct host1x_bo_ops tegra_bo_ops;
+
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
+					    struct drm_device *drm,
+					    unsigned int size,
+					    unsigned int *handle);
+void tegra_bo_free_object(struct drm_gem_object *gem);
+unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
+int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
+			 struct drm_mode_create_dumb *args);
+int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
+			     uint32_t handle, uint64_t *offset);
+int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
+			  unsigned int handle);
+
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct tegra_bo_vm_ops;
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/drm/gr2d.c b/linux-imx/drivers/gpu/host1x/drm/gr2d.c
new file mode 100644
index 0000000..6a45ae0
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/gr2d.c
@@ -0,0 +1,339 @@
+/*
+ * drivers/video/tegra/host/gr2d/gr2d.c
+ *
+ * Tegra Graphics 2D
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+
+#include "channel.h"
+#include "drm.h"
+#include "gem.h"
+#include "job.h"
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "host1x_client.h"
+#include "syncpt.h"
+
+struct gr2d {
+	struct host1x_client client;
+	struct clk *clk;
+	struct host1x_channel *channel;
+	unsigned long *addr_regs;
+};
+
+static inline struct gr2d *to_gr2d(struct host1x_client *client)
+{
+	return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
+
+static int gr2d_client_init(struct host1x_client *client,
+			    struct drm_device *drm)
+{
+	return 0;
+}
+
+static int gr2d_client_exit(struct host1x_client *client)
+{
+	return 0;
+}
+
+static int gr2d_open_channel(struct host1x_client *client,
+			     struct host1x_drm_context *context)
+{
+	struct gr2d *gr2d = to_gr2d(client);
+
+	context->channel = host1x_channel_get(gr2d->channel);
+
+	if (!context->channel)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void gr2d_close_channel(struct host1x_drm_context *context)
+{
+	host1x_channel_put(context->channel);
+}
+
+static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
+					  struct drm_file *file,
+					  u32 handle)
+{
+	struct drm_gem_object *gem;
+	struct tegra_bo *bo;
+
+	gem = drm_gem_object_lookup(drm, file, handle);
+	if (!gem)
+		return 0;
+
+	mutex_lock(&drm->struct_mutex);
+	drm_gem_object_unreference(gem);
+	mutex_unlock(&drm->struct_mutex);
+
+	bo = to_tegra_bo(gem);
+	return &bo->base;
+}
+
+static int gr2d_submit(struct host1x_drm_context *context,
+		       struct drm_tegra_submit *args, struct drm_device *drm,
+		       struct drm_file *file)
+{
+	struct host1x_job *job;
+	unsigned int num_cmdbufs = args->num_cmdbufs;
+	unsigned int num_relocs = args->num_relocs;
+	unsigned int num_waitchks = args->num_waitchks;
+	struct drm_tegra_cmdbuf __user *cmdbufs =
+		(void * __user)(uintptr_t)args->cmdbufs;
+	struct drm_tegra_reloc __user *relocs =
+		(void * __user)(uintptr_t)args->relocs;
+	struct drm_tegra_waitchk __user *waitchks =
+		(void * __user)(uintptr_t)args->waitchks;
+	struct drm_tegra_syncpt syncpt;
+	int err;
+
+	/* We don't yet support other than one syncpt_incr struct per submit */
+	if (args->num_syncpts != 1)
+		return -EINVAL;
+
+	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+			       args->num_relocs, args->num_waitchks);
+	if (!job)
+		return -ENOMEM;
+
+	job->num_relocs = args->num_relocs;
+	job->num_waitchk = args->num_waitchks;
+	job->client = (u32)args->context;
+	job->class = context->client->class;
+	job->serialize = true;
+
+	while (num_cmdbufs) {
+		struct drm_tegra_cmdbuf cmdbuf;
+		struct host1x_bo *bo;
+
+		err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+		if (err)
+			goto fail;
+
+		bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+		if (!bo)
+			goto fail;
+
+		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+		num_cmdbufs--;
+		cmdbufs++;
+	}
+
+	err = copy_from_user(job->relocarray, relocs,
+			     sizeof(*relocs) * num_relocs);
+	if (err)
+		goto fail;
+
+	while (num_relocs--) {
+		struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+		struct host1x_bo *cmdbuf, *target;
+
+		cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+		target = host1x_bo_lookup(drm, file, (u32)reloc->target);
+
+		reloc->cmdbuf = cmdbuf;
+		reloc->target = target;
+
+		if (!reloc->target || !reloc->cmdbuf)
+			goto fail;
+	}
+
+	err = copy_from_user(job->waitchk, waitchks,
+			     sizeof(*waitchks) * num_waitchks);
+	if (err)
+		goto fail;
+
+	err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+			     sizeof(syncpt));
+	if (err)
+		goto fail;
+
+	job->syncpt_id = syncpt.id;
+	job->syncpt_incrs = syncpt.incrs;
+	job->timeout = 10000;
+	job->is_addr_reg = gr2d_is_addr_reg;
+
+	if (args->timeout && args->timeout < 10000)
+		job->timeout = args->timeout;
+
+	err = host1x_job_pin(job, context->client->dev);
+	if (err)
+		goto fail;
+
+	err = host1x_job_submit(job);
+	if (err)
+		goto fail_submit;
+
+	args->fence = job->syncpt_end;
+
+	host1x_job_put(job);
+	return 0;
+
+fail_submit:
+	host1x_job_unpin(job);
+fail:
+	host1x_job_put(job);
+	return err;
+}
+
+static struct host1x_client_ops gr2d_client_ops = {
+	.drm_init = gr2d_client_init,
+	.drm_exit = gr2d_client_exit,
+	.open_channel = gr2d_open_channel,
+	.close_channel = gr2d_close_channel,
+	.submit = gr2d_submit,
+};
+
+static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
+{
+	const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
+				      0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
+	unsigned long *bitmap;
+	int i;
+
+	bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
+			      GFP_KERNEL);
+
+	for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
+		u32 reg = gr2d_addr_regs[i];
+		bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
+	}
+
+	gr2d->addr_regs = bitmap;
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
+{
+	struct gr2d *gr2d = dev_get_drvdata(dev);
+
+	switch (class) {
+	case HOST1X_CLASS_HOST1X:
+		return reg == 0x2b;
+	case HOST1X_CLASS_GR2D:
+	case HOST1X_CLASS_GR2D_SB:
+		reg &= 0xff;
+		if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
+			return 1;
+	default:
+		return 0;
+	}
+}
+
+static const struct of_device_id gr2d_match[] = {
+	{ .compatible = "nvidia,tegra30-gr2d" },
+	{ .compatible = "nvidia,tegra20-gr2d" },
+	{ },
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
+	int err;
+	struct gr2d *gr2d = NULL;
+	struct host1x_syncpt **syncpts;
+
+	gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+	if (!gr2d)
+		return -ENOMEM;
+
+	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+	if (!syncpts)
+		return -ENOMEM;
+
+	gr2d->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(gr2d->clk)) {
+		dev_err(dev, "cannot get clock\n");
+		return PTR_ERR(gr2d->clk);
+	}
+
+	err = clk_prepare_enable(gr2d->clk);
+	if (err) {
+		dev_err(dev, "cannot turn on clock\n");
+		return err;
+	}
+
+	gr2d->channel = host1x_channel_request(dev);
+	if (!gr2d->channel)
+		return -ENOMEM;
+
+	*syncpts = host1x_syncpt_request(dev, 0);
+	if (!(*syncpts)) {
+		host1x_channel_free(gr2d->channel);
+		return -ENOMEM;
+	}
+
+	gr2d->client.ops = &gr2d_client_ops;
+	gr2d->client.dev = dev;
+	gr2d->client.class = HOST1X_CLASS_GR2D;
+	gr2d->client.syncpts = syncpts;
+	gr2d->client.num_syncpts = 1;
+
+	err = host1x_register_client(host1x, &gr2d->client);
+	if (err < 0) {
+		dev_err(dev, "failed to register host1x client: %d\n", err);
+		return err;
+	}
+
+	gr2d_init_addr_reg_map(dev, gr2d);
+
+	platform_set_drvdata(pdev, gr2d);
+
+	return 0;
+}
+
+static int __exit gr2d_remove(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	struct gr2d *gr2d = platform_get_drvdata(pdev);
+	unsigned int i;
+	int err;
+
+	err = host1x_unregister_client(host1x, &gr2d->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
+		return err;
+	}
+
+	for (i = 0; i < gr2d->client.num_syncpts; i++)
+		host1x_syncpt_free(gr2d->client.syncpts[i]);
+
+	host1x_channel_free(gr2d->channel);
+	clk_disable_unprepare(gr2d->clk);
+
+	return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+	.probe = gr2d_probe,
+	.remove = __exit_p(gr2d_remove),
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "gr2d",
+		.of_match_table = gr2d_match,
+	}
+};
diff --git a/linux-imx/drivers/gpu/host1x/drm/hdmi.c b/linux-imx/drivers/gpu/host1x/drm/hdmi.c
new file mode 100644
index 0000000..01097da
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/hdmi.c
@@ -0,0 +1,1313 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/hdmi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/tegra.h>
+
+#include <drm/drm_edid.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+#include "host1x_client.h"
+
+struct tegra_hdmi {
+	struct host1x_client client;
+	struct tegra_output output;
+	struct device *dev;
+
+	struct regulator *vdd;
+	struct regulator *pll;
+
+	void __iomem *regs;
+	unsigned int irq;
+
+	struct clk *clk_parent;
+	struct clk *clk;
+
+	unsigned int audio_source;
+	unsigned int audio_freq;
+	bool stereo;
+	bool dvi;
+
+	struct drm_info_list *debugfs_files;
+	struct drm_minor *minor;
+	struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+	return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+	return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+	AUTO = 0,
+	SPDIF,
+	HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+					     unsigned long reg)
+{
+	return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+				     unsigned long reg)
+{
+	writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+	unsigned int pclk;
+	unsigned int n;
+	unsigned int cts;
+	unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+	{  25200000, 4096,  25200, 24000 },
+	{  27000000, 4096,  27000, 24000 },
+	{  74250000, 4096,  74250, 24000 },
+	{ 148500000, 4096, 148500, 24000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+	{  25200000, 5880,  26250, 25000 },
+	{  27000000, 5880,  28125, 25000 },
+	{  74250000, 4704,  61875, 20000 },
+	{ 148500000, 4704, 123750, 20000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+	{  25200000, 6144,  25200, 24000 },
+	{  27000000, 6144,  27000, 24000 },
+	{  74250000, 6144,  74250, 24000 },
+	{ 148500000, 6144, 148500, 24000 },
+	{         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+	{  25200000, 11760,  26250, 25000 },
+	{  27000000, 11760,  28125, 25000 },
+	{  74250000,  9408,  61875, 20000 },
+	{ 148500000,  9408, 123750, 20000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+	{  25200000, 12288,  25200, 24000 },
+	{  27000000, 12288,  27000, 24000 },
+	{  74250000, 12288,  74250, 24000 },
+	{ 148500000, 12288, 148500, 24000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+	{  25200000, 23520,  26250, 25000 },
+	{  27000000, 23520,  28125, 25000 },
+	{  74250000, 18816,  61875, 20000 },
+	{ 148500000, 18816, 123750, 20000 },
+	{         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+	{  25200000, 24576,  25200, 24000 },
+	{  27000000, 24576,  27000, 24000 },
+	{  74250000, 24576,  74250, 24000 },
+	{ 148500000, 24576, 148500, 24000 },
+	{         0,     0,      0,     0 },
+};
+
+struct tmds_config {
+	unsigned int pclk;
+	u32 pll0;
+	u32 pll1;
+	u32 pe_current;
+	u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+	{ /* slow pixel clock modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+			SOR_PLL_TX_REG_LOAD(3),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+			PE_CURRENT1(PE_CURRENT_0_0_mA) |
+			PE_CURRENT2(PE_CURRENT_0_0_mA) |
+			PE_CURRENT3(PE_CURRENT_0_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+	{ /* high pixel clock modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+			SOR_PLL_TX_REG_LOAD(3),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+			PE_CURRENT1(PE_CURRENT_6_0_mA) |
+			PE_CURRENT2(PE_CURRENT_6_0_mA) |
+			PE_CURRENT3(PE_CURRENT_6_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+	},
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+	{ /* 480p modes */
+		.pclk = 27000000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+		.pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+			PE_CURRENT1(PE_CURRENT_0_0_mA) |
+			PE_CURRENT2(PE_CURRENT_0_0_mA) |
+			PE_CURRENT3(PE_CURRENT_0_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	}, { /* 720p modes */
+		.pclk = 74250000,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+			PE_CURRENT1(PE_CURRENT_5_0_mA) |
+			PE_CURRENT2(PE_CURRENT_5_0_mA) |
+			PE_CURRENT3(PE_CURRENT_5_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	}, { /* 1080p modes */
+		.pclk = UINT_MAX,
+		.pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+			SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+			SOR_PLL_TX_REG_LOAD(0),
+		.pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+		.pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+			PE_CURRENT1(PE_CURRENT_5_0_mA) |
+			PE_CURRENT2(PE_CURRENT_5_0_mA) |
+			PE_CURRENT3(PE_CURRENT_5_0_mA),
+		.drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+			DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+	},
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+	const struct tegra_hdmi_audio_config *table;
+
+	switch (audio_freq) {
+	case 32000:
+		table = tegra_hdmi_audio_32k;
+		break;
+
+	case 44100:
+		table = tegra_hdmi_audio_44_1k;
+		break;
+
+	case 48000:
+		table = tegra_hdmi_audio_48k;
+		break;
+
+	case 88200:
+		table = tegra_hdmi_audio_88_2k;
+		break;
+
+	case 96000:
+		table = tegra_hdmi_audio_96k;
+		break;
+
+	case 176400:
+		table = tegra_hdmi_audio_176_4k;
+		break;
+
+	case 192000:
+		table = tegra_hdmi_audio_192k;
+		break;
+
+	default:
+		return NULL;
+	}
+
+	while (table->pclk) {
+		if (table->pclk == pclk)
+			return table;
+
+		table++;
+	}
+
+	return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+	const unsigned int freqs[] = {
+		32000, 44100, 48000, 88200, 96000, 176400, 192000
+	};
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+		unsigned int f = freqs[i];
+		unsigned int eight_half;
+		unsigned long value;
+		unsigned int delta;
+
+		if (f > 96000)
+			delta = 2;
+		else if (f > 480000)
+			delta = 6;
+		else
+			delta = 9;
+
+		eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+		value = AUDIO_FS_LOW(eight_half - delta) |
+			AUDIO_FS_HIGH(eight_half + delta);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+	}
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+	struct device_node *node = hdmi->dev->of_node;
+	const struct tegra_hdmi_audio_config *config;
+	unsigned int offset = 0;
+	unsigned long value;
+
+	switch (hdmi->audio_source) {
+	case HDA:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+		break;
+
+	case SPDIF:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+		break;
+
+	default:
+		value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+		break;
+	}
+
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+	} else {
+		value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+		value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+			AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+	}
+
+	config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+	if (!config) {
+		dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+			hdmi->audio_freq, pclk);
+		return -EINVAL;
+	}
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+	value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+		AUDIO_N_VALUE(config->n - 1);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+	tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+			  HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+	value = ACR_SUBPACK_CTS(config->cts);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+	value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+	value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+	value &= ~AUDIO_N_RESETF;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		switch (hdmi->audio_freq) {
+		case 32000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+			break;
+
+		case 44100:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+			break;
+
+		case 48000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+			break;
+
+		case 88200:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+			break;
+
+		case 96000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+			break;
+
+		case 176400:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+			break;
+
+		case 192000:
+			offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+			break;
+		}
+
+		tegra_hdmi_writel(hdmi, config->aval, offset);
+	}
+
+	tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+	return 0;
+}
+
+static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
+{
+	unsigned long value = 0;
+	size_t i;
+
+	for (i = size; i > 0; i--)
+		value = (value << 8) | ptr[i - 1];
+
+	return value;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
+				      size_t size)
+{
+	const u8 *ptr = data;
+	unsigned long offset;
+	unsigned long value;
+	size_t i, j;
+
+	switch (ptr[0]) {
+	case HDMI_INFOFRAME_TYPE_AVI:
+		offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_AUDIO:
+		offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
+		break;
+
+	case HDMI_INFOFRAME_TYPE_VENDOR:
+		offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
+		break;
+
+	default:
+		dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
+			ptr[0]);
+		return;
+	}
+
+	value = INFOFRAME_HEADER_TYPE(ptr[0]) |
+		INFOFRAME_HEADER_VERSION(ptr[1]) |
+		INFOFRAME_HEADER_LEN(ptr[2]);
+	tegra_hdmi_writel(hdmi, value, offset);
+	offset++;
+
+	/*
+	 * Each subpack contains 7 bytes, divided into:
+	 * - subpack_low: bytes 0 - 3
+	 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
+	 */
+	for (i = 3, j = 0; i < size; i += 7, j += 8) {
+		size_t rem = size - i, num = min_t(size_t, rem, 4);
+
+		value = tegra_hdmi_subpack(&ptr[i], num);
+		tegra_hdmi_writel(hdmi, value, offset++);
+
+		num = min_t(size_t, rem - num, 3);
+
+		value = tegra_hdmi_subpack(&ptr[i + 4], num);
+		tegra_hdmi_writel(hdmi, value, offset++);
+	}
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+					   struct drm_display_mode *mode)
+{
+	struct hdmi_avi_infoframe frame;
+	u8 buffer[17];
+	ssize_t err;
+
+	if (hdmi->dvi) {
+		tegra_hdmi_writel(hdmi, 0,
+				  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+		return;
+	}
+
+	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
+		return;
+	}
+
+	tegra_hdmi_write_infopack(hdmi, buffer, err);
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+	struct hdmi_audio_infoframe frame;
+	u8 buffer[14];
+	ssize_t err;
+
+	if (hdmi->dvi) {
+		tegra_hdmi_writel(hdmi, 0,
+				  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+		return;
+	}
+
+	err = hdmi_audio_infoframe_init(&frame);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+			err);
+		return;
+	}
+
+	frame.channels = 2;
+
+	err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
+			err);
+		return;
+	}
+
+	/*
+	 * The audio infoframe has only one set of subpack registers, so the
+	 * infoframe needs to be truncated. One set of subpack registers can
+	 * contain 7 bytes. Including the 3 byte header only the first 10
+	 * bytes can be programmed.
+	 */
+	tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+
+	tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+			  HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+	struct hdmi_vendor_infoframe frame;
+	unsigned long value;
+	u8 buffer[10];
+	ssize_t err;
+
+	if (!hdmi->stereo) {
+		value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		value &= ~GENERIC_CTRL_ENABLE;
+		tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+		return;
+	}
+
+	memset(&frame, 0, sizeof(frame));
+
+	frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
+	frame.version = 0x01;
+	frame.length = 6;
+
+	frame.data[0] = 0x03; /* regid0 */
+	frame.data[1] = 0x0c; /* regid1 */
+	frame.data[2] = 0x00; /* regid2 */
+	frame.data[3] = 0x02 << 5; /* video format */
+
+	/* TODO: 74 MHz limit? */
+	if (1) {
+		frame.data[4] = 0x00 << 4; /* 3D structure */
+	} else {
+		frame.data[4] = 0x08 << 4; /* 3D structure */
+		frame.data[5] = 0x00 << 4; /* 3D ext. data */
+	}
+
+	err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
+			err);
+		return;
+	}
+
+	tegra_hdmi_write_infopack(hdmi, buffer, err);
+
+	value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	value |= GENERIC_CTRL_ENABLE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+				  const struct tmds_config *tmds)
+{
+	unsigned long value;
+
+	tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+	tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+	tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+	value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+	unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+	struct drm_display_mode *mode = &dc->base.mode;
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	struct device_node *node = hdmi->dev->of_node;
+	unsigned int pulse_start, div82, pclk;
+	const struct tmds_config *tmds;
+	unsigned int num_tmds;
+	unsigned long value;
+	int retries = 1000;
+	int err;
+
+	pclk = mode->clock * 1000;
+	h_sync_width = mode->hsync_end - mode->hsync_start;
+	h_back_porch = mode->htotal - mode->hsync_end;
+	h_front_porch = mode->hsync_start - mode->hdisplay;
+
+	err = regulator_enable(hdmi->vdd);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+		return err;
+	}
+
+	err = regulator_enable(hdmi->pll);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+		return err;
+	}
+
+	/*
+	 * This assumes that the display controller will divide its parent
+	 * clock by 2 to generate the pixel clock.
+	 */
+	err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+		return err;
+	}
+
+	err = clk_set_rate(hdmi->clk, pclk);
+	if (err < 0)
+		return err;
+
+	err = clk_enable(hdmi->clk);
+	if (err < 0) {
+		dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+		return err;
+	}
+
+	tegra_periph_reset_assert(hdmi->clk);
+	usleep_range(1000, 2000);
+	tegra_periph_reset_deassert(hdmi->clk);
+
+	tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+			DC_DISP_DISP_TIMING_OPTIONS);
+	tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+			DC_DISP_DISP_COLOR_CONTROL);
+
+	/* video_preamble uses h_pulse2 */
+	pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+	tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+	value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+		PULSE_LAST_END_A;
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+	value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+	tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+	value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+		VSYNC_WINDOW_ENABLE;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+	if (dc->pipe)
+		value = HDMI_SRC_DISPLAYB;
+	else
+		value = HDMI_SRC_DISPLAYA;
+
+	if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+					(mode->vdisplay == 576)))
+		tegra_hdmi_writel(hdmi,
+				  value | ARM_VIDEO_RANGE_FULL,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+	else
+		tegra_hdmi_writel(hdmi,
+				  value | ARM_VIDEO_RANGE_LIMITED,
+				  HDMI_NV_PDISP_INPUT_CONTROL);
+
+	div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+	value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+	if (!hdmi->dvi) {
+		err = tegra_hdmi_setup_audio(hdmi, pclk);
+		if (err < 0)
+			hdmi->dvi = true;
+	}
+
+	if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+		/*
+		 * TODO: add ELD support
+		 */
+	}
+
+	rekey = HDMI_REKEY_DEFAULT;
+	value = HDMI_CTRL_REKEY(rekey);
+	value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+					  h_front_porch - rekey - 18) / 32);
+
+	if (!hdmi->dvi)
+		value |= HDMI_CTRL_ENABLE;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+	if (hdmi->dvi)
+		tegra_hdmi_writel(hdmi, 0x0,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	else
+		tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+				  HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+	tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+	tegra_hdmi_setup_audio_infoframe(hdmi);
+	tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+	/* TMDS CONFIG */
+	if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+		num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+		tmds = tegra3_tmds_config;
+	} else {
+		num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+		tmds = tegra2_tmds_config;
+	}
+
+	for (i = 0; i < num_tmds; i++) {
+		if (pclk <= tmds[i].pclk) {
+			tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+			break;
+		}
+	}
+
+	tegra_hdmi_writel(hdmi,
+			  SOR_SEQ_CTL_PU_PC(0) |
+			  SOR_SEQ_PU_PC_ALT(0) |
+			  SOR_SEQ_PD_PC(8) |
+			  SOR_SEQ_PD_PC_ALT(8),
+			  HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+	value = SOR_SEQ_INST_WAIT_TIME(1) |
+		SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+		SOR_SEQ_INST_HALT |
+		SOR_SEQ_INST_PIN_A_LOW |
+		SOR_SEQ_INST_PIN_B_LOW |
+		SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+	value = 0x1c800;
+	value &= ~SOR_CSTM_ROTCLK(~0);
+	value |= SOR_CSTM_ROTCLK(2);
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+	tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* start SOR */
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_TRIGGER,
+			  HDMI_NV_PDISP_SOR_PWR);
+	tegra_hdmi_writel(hdmi,
+			  SOR_PWR_NORMAL_STATE_PU |
+			  SOR_PWR_NORMAL_START_NORMAL |
+			  SOR_PWR_SAFE_STATE_PD |
+			  SOR_PWR_SETTING_NEW_DONE,
+			  HDMI_NV_PDISP_SOR_PWR);
+
+	do {
+		BUG_ON(--retries < 0);
+		value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+	} while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+	value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+		SOR_STATE_ASY_OWNER_HEAD0 |
+		SOR_STATE_ASY_SUBOWNER_BOTH |
+		SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+		SOR_STATE_ASY_DEPOL_POS;
+
+	/* setup sync polarities */
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+	value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+	tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+	tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+			  HDMI_NV_PDISP_SOR_STATE1);
+	tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+	tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+	value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+		PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+	value = DISP_CTRL_MODE_C_DISPLAY;
+	tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+	tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+	tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+	/* TODO: add HDCP support */
+
+	return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+
+	tegra_periph_reset_assert(hdmi->clk);
+	clk_disable(hdmi->clk);
+	regulator_disable(hdmi->pll);
+	regulator_disable(hdmi->vdd);
+
+	return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+					 struct clk *clk, unsigned long pclk)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	struct clk *base;
+	int err;
+
+	err = clk_set_parent(clk, hdmi->clk_parent);
+	if (err < 0) {
+		dev_err(output->dev, "failed to set parent: %d\n", err);
+		return err;
+	}
+
+	base = clk_get_parent(hdmi->clk_parent);
+
+	/*
+	 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+	 * respectively, each of which divides the base pll_d by 2.
+	 */
+	err = clk_set_rate(base, pclk * 2);
+	if (err < 0)
+		dev_err(output->dev,
+			"failed to set base clock rate to %lu Hz\n",
+			pclk * 2);
+
+	return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+					struct drm_display_mode *mode,
+					enum drm_mode_status *status)
+{
+	struct tegra_hdmi *hdmi = to_hdmi(output);
+	unsigned long pclk = mode->clock * 1000;
+	struct clk *parent;
+	long err;
+
+	parent = clk_get_parent(hdmi->clk_parent);
+
+	err = clk_round_rate(parent, pclk * 4);
+	if (err < 0)
+		*status = MODE_NOCLOCK;
+	else
+		*status = MODE_OK;
+
+	return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+	.enable = tegra_output_hdmi_enable,
+	.disable = tegra_output_hdmi_disable,
+	.setup_clock = tegra_output_hdmi_setup_clock,
+	.check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+	struct drm_info_node *node = s->private;
+	struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name)						\
+	seq_printf(s, "%-56s %#05x %08lx\n", #name, name,	\
+		tegra_hdmi_readl(hdmi, name))
+
+	DUMP_REG(HDMI_CTXSW);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+	DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+	DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+	DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+	DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+	DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+	DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+	DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+	DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+	DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+	DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+	DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+	DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+	DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+	DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+	DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+	DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+	return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+	{ "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+				   struct drm_minor *minor)
+{
+	unsigned int i;
+	int err;
+
+	hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+	if (!hdmi->debugfs)
+		return -ENOMEM;
+
+	hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+				      GFP_KERNEL);
+	if (!hdmi->debugfs_files) {
+		err = -ENOMEM;
+		goto remove;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+		hdmi->debugfs_files[i].data = hdmi;
+
+	err = drm_debugfs_create_files(hdmi->debugfs_files,
+				       ARRAY_SIZE(debugfs_files),
+				       hdmi->debugfs, minor);
+	if (err < 0)
+		goto free;
+
+	hdmi->minor = minor;
+
+	return 0;
+
+free:
+	kfree(hdmi->debugfs_files);
+	hdmi->debugfs_files = NULL;
+remove:
+	debugfs_remove(hdmi->debugfs);
+	hdmi->debugfs = NULL;
+
+	return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+	drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+				 hdmi->minor);
+	hdmi->minor = NULL;
+
+	kfree(hdmi->debugfs_files);
+	hdmi->debugfs_files = NULL;
+
+	debugfs_remove(hdmi->debugfs);
+	hdmi->debugfs = NULL;
+
+	return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+			       struct drm_device *drm)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	int err;
+
+	hdmi->output.type = TEGRA_OUTPUT_HDMI;
+	hdmi->output.dev = client->dev;
+	hdmi->output.ops = &hdmi_ops;
+
+	err = tegra_output_init(drm, &hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output setup failed: %d\n", err);
+		return err;
+	}
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+		if (err < 0)
+			dev_err(client->dev, "debugfs setup failed: %d\n", err);
+	}
+
+	return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+	struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+	int err;
+
+	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+		err = tegra_hdmi_debugfs_exit(hdmi);
+		if (err < 0)
+			dev_err(client->dev, "debugfs cleanup failed: %d\n",
+				err);
+	}
+
+	err = tegra_output_disable(&hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output failed to disable: %d\n", err);
+		return err;
+	}
+
+	err = tegra_output_exit(&hdmi->output);
+	if (err < 0) {
+		dev_err(client->dev, "output cleanup failed: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+	.drm_init = tegra_hdmi_drm_init,
+	.drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	struct tegra_hdmi *hdmi;
+	struct resource *regs;
+	int err;
+
+	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+	if (!hdmi)
+		return -ENOMEM;
+
+	hdmi->dev = &pdev->dev;
+	hdmi->audio_source = AUTO;
+	hdmi->audio_freq = 44100;
+	hdmi->stereo = false;
+	hdmi->dvi = false;
+
+	hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hdmi->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(hdmi->clk);
+	}
+
+	err = clk_prepare(hdmi->clk);
+	if (err < 0)
+		return err;
+
+	hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+	if (IS_ERR(hdmi->clk_parent))
+		return PTR_ERR(hdmi->clk_parent);
+
+	err = clk_prepare(hdmi->clk_parent);
+	if (err < 0)
+		return err;
+
+	err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+		return err;
+	}
+
+	hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(hdmi->vdd)) {
+		dev_err(&pdev->dev, "failed to get VDD regulator\n");
+		return PTR_ERR(hdmi->vdd);
+	}
+
+	hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+	if (IS_ERR(hdmi->pll)) {
+		dev_err(&pdev->dev, "failed to get PLL regulator\n");
+		return PTR_ERR(hdmi->pll);
+	}
+
+	hdmi->output.dev = &pdev->dev;
+
+	err = tegra_output_parse_dt(&hdmi->output);
+	if (err < 0)
+		return err;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	hdmi->regs = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(hdmi->regs))
+		return PTR_ERR(hdmi->regs);
+
+	err = platform_get_irq(pdev, 0);
+	if (err < 0)
+		return err;
+
+	hdmi->irq = err;
+
+	hdmi->client.ops = &hdmi_client_ops;
+	INIT_LIST_HEAD(&hdmi->client.list);
+	hdmi->client.dev = &pdev->dev;
+
+	err = host1x_register_client(host1x, &hdmi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, hdmi);
+
+	return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+	struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+	struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+	int err;
+
+	err = host1x_unregister_client(host1x, &hdmi->client);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+			err);
+		return err;
+	}
+
+	clk_unprepare(hdmi->clk_parent);
+	clk_unprepare(hdmi->clk);
+
+	return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+	{ .compatible = "nvidia,tegra30-hdmi", },
+	{ .compatible = "nvidia,tegra20-hdmi", },
+	{ },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+	.driver = {
+		.name = "tegra-hdmi",
+		.owner = THIS_MODULE,
+		.of_match_table = tegra_hdmi_of_match,
+	},
+	.probe = tegra_hdmi_probe,
+	.remove = tegra_hdmi_remove,
+};
diff --git a/linux-imx/drivers/gpu/host1x/drm/hdmi.h b/linux-imx/drivers/gpu/host1x/drm/hdmi.h
new file mode 100644
index 0000000..52ac36e
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/hdmi.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+/* register definitions */
+#define HDMI_CTXSW						0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0				0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1				0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL     (1 << 2)
+#define SOR_STATE_ATTACHED              (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2				0x03
+#define SOR_STATE_ASY_OWNER_NONE         (0 <<  0)
+#define SOR_STATE_ASY_OWNER_HEAD0        (1 <<  0)
+#define SOR_STATE_ASY_SUBOWNER_NONE      (0 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0  (1 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1  (2 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH      (3 <<  4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE     (0 <<  6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE   (1 <<  6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 <<  6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM        (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS       (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG       (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS       (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG       (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS          (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG          (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB				0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB				0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB				0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB				0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB				0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB				0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB				0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB				0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB				0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB				0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB				0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB				0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL				0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE				0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB			0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB			0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB			0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2			0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1			0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI				0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB				0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB				0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0				0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0			0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1				0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2				0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL			0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS		0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER		0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW		0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH	0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL			0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS			0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER			0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW		0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH		0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW		0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH		0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x)    (((x) & 0xff) <<  0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) <<  8)
+#define INFOFRAME_HEADER_LEN(x)     (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL				0x2a
+#define GENERIC_CTRL_ENABLE (1 <<  0)
+#define GENERIC_CTRL_OTHER  (1 <<  4)
+#define GENERIC_CTRL_SINGLE (1 <<  8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO  (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS			0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER			0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW			0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH		0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW			0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH		0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW			0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH		0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW			0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH		0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL				0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW			0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH		0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW			0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH		0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW			0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH		0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW			0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH		0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW			0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH		0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW			0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH		0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW			0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH		0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x)   (((x) & 0xffffff) << 0)
+#define ACR_ENABLE         (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL					0x44
+#define HDMI_CTRL_REKEY(x)         (((x) & 0x7f) <<  0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE           (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT			0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW				0x46
+#define VSYNC_WINDOW_END(x)   (((x) & 0x3ff) <<  0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE   (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL				0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS				0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK				0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1			0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2			0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0					0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1					0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA				0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE				0x4f
+#define SPARE_HW_CTS           (1 << 0)
+#define SPARE_FORCE_SW_CTS     (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1			0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2			0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL			0x53
+#define HDMI_NV_PDISP_SOR_CAP					0x54
+#define HDMI_NV_PDISP_SOR_PWR					0x55
+#define SOR_PWR_NORMAL_STATE_PD     (0 <<  0)
+#define SOR_PWR_NORMAL_STATE_PU     (1 <<  0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 <<  1)
+#define SOR_PWR_NORMAL_START_ALT    (1 <<  1)
+#define SOR_PWR_SAFE_STATE_PD       (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU       (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE    (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST					0x56
+#define HDMI_NV_PDISP_SOR_PLL0					0x57
+#define SOR_PLL_PWR            (1 << 0)
+#define SOR_PLL_PDBG           (1 << 1)
+#define SOR_PLL_VCAPD          (1 << 2)
+#define SOR_PLL_PDPORT         (1 << 3)
+#define SOR_PLL_RESISTORSEL    (1 << 4)
+#define SOR_PLL_PULLDOWN       (1 << 5)
+#define SOR_PLL_VCOCAP(x)      (((x) & 0xf) <<  8)
+#define SOR_PLL_BG_V17_S(x)    (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x)      (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x)      (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1					0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x)  (((x) & 0xf) <<  9)
+#define SOR_PLL_LOADADJ(x)       (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN            (1 << 28)
+#define SOR_PLL_HALF_FULL_PE     (1 << 29)
+#define SOR_PLL_S_D_PIN_PE       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2					0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM					0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS					0x5b
+#define HDMI_NV_PDISP_SOR_CRCA					0x5c
+#define HDMI_NV_PDISP_SOR_CRCB					0x5d
+#define HDMI_NV_PDISP_SOR_BLANK					0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL				0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) <<  0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) <<  4)
+#define SOR_SEQ_PD_PC(x)     (((x) & 0xf) <<  8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x)        (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS       (1 << 28)
+#define SOR_SEQ_SWITCH       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x)				(0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x)     (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT             (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW        (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH       (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW        (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH       (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0				0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1				0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0				0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1				0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0				0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1				0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0				0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1				0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0				0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1				0x7b
+#define HDMI_NV_PDISP_SOR_TRIG					0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK				0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT			0x7e
+#define DRIVE_CURRENT_LANE0(x)      (((x) & 0x3f) <<  0)
+#define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
+#define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA  0x00
+#define DRIVE_CURRENT_1_875_mA  0x01
+#define DRIVE_CURRENT_2_250_mA  0x02
+#define DRIVE_CURRENT_2_625_mA  0x03
+#define DRIVE_CURRENT_3_000_mA  0x04
+#define DRIVE_CURRENT_3_375_mA  0x05
+#define DRIVE_CURRENT_3_750_mA  0x06
+#define DRIVE_CURRENT_4_125_mA  0x07
+#define DRIVE_CURRENT_4_500_mA  0x08
+#define DRIVE_CURRENT_4_875_mA  0x09
+#define DRIVE_CURRENT_5_250_mA  0x0a
+#define DRIVE_CURRENT_5_625_mA  0x0b
+#define DRIVE_CURRENT_6_000_mA  0x0c
+#define DRIVE_CURRENT_6_375_mA  0x0d
+#define DRIVE_CURRENT_6_750_mA  0x0e
+#define DRIVE_CURRENT_7_125_mA  0x0f
+#define DRIVE_CURRENT_7_500_mA  0x10
+#define DRIVE_CURRENT_7_875_mA  0x11
+#define DRIVE_CURRENT_8_250_mA  0x12
+#define DRIVE_CURRENT_8_625_mA  0x13
+#define DRIVE_CURRENT_9_000_mA  0x14
+#define DRIVE_CURRENT_9_375_mA  0x15
+#define DRIVE_CURRENT_9_750_mA  0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0				0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1				0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2				0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x)				(0x82 + (x))
+#define AUDIO_FS_LOW(x)  (((x) & 0xfff) <<  0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH				0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD				0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0				0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x)  (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO  (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL  (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N					0x8c
+#define AUDIO_N_VALUE(x)           (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF             (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL    (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING			0x94
+#define HDMI_NV_PDISP_SOR_REFCLK				0x95
+#define SOR_REFCLK_DIV_INT(x)  (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL				0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL				0x97
+#define HDMI_SRC_DISPLAYA       (0 << 0)
+#define HDMI_SRC_DISPLAYB       (1 << 0)
+#define ARM_VIDEO_RANGE_FULL    (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH					0x98
+#define HDMI_NV_PDISP_PE_CURRENT				0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL					0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0				0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1				0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2				0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0				0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1				0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2				0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3				0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG				0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX				0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0				0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR			0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE			0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320    0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441    0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882    0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764    0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480    0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960    0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/linux-imx/drivers/gpu/host1x/drm/output.c b/linux-imx/drivers/gpu/host1x/drm/output.c
new file mode 100644
index 0000000..8140fc6
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/output.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	struct edid *edid = NULL;
+	int err = 0;
+
+	if (output->edid)
+		edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+	else if (output->ddc)
+		edid = drm_get_edid(connector, output->ddc);
+
+	drm_mode_connector_update_edid_property(connector, edid);
+
+	if (edid) {
+		err = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+				      struct drm_display_mode *mode)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	enum drm_mode_status status = MODE_OK;
+	int err;
+
+	err = tegra_output_check_mode(output, mode, &status);
+	if (err < 0)
+		return MODE_ERROR;
+
+	return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+	struct tegra_output *output = connector_to_output(connector);
+
+	return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+	.get_modes = tegra_connector_get_modes,
+	.mode_valid = tegra_connector_mode_valid,
+	.best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct tegra_output *output = connector_to_output(connector);
+	enum drm_connector_status status = connector_status_unknown;
+
+	if (gpio_is_valid(output->hpd_gpio)) {
+		if (gpio_get_value(output->hpd_gpio) == 0)
+			status = connector_status_disconnected;
+		else
+			status = connector_status_connected;
+	} else {
+		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+			status = connector_status_connected;
+	}
+
+	return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = tegra_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+	drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+	.destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+				     const struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted)
+{
+	return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted)
+{
+	struct tegra_output *output = encoder_to_output(encoder);
+	int err;
+
+	err = tegra_output_enable(output);
+	if (err < 0)
+		dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+	.dpms = tegra_encoder_dpms,
+	.mode_fixup = tegra_encoder_mode_fixup,
+	.prepare = tegra_encoder_prepare,
+	.commit = tegra_encoder_commit,
+	.mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+	struct tegra_output *output = data;
+
+	drm_helper_hpd_irq_event(output->connector.dev);
+
+	return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+	enum of_gpio_flags flags;
+	struct device_node *ddc;
+	size_t size;
+	int err;
+
+	if (!output->of_node)
+		output->of_node = output->dev->of_node;
+
+	output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+	ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+	if (ddc) {
+		output->ddc = of_find_i2c_adapter_by_node(ddc);
+		if (!output->ddc) {
+			err = -EPROBE_DEFER;
+			of_node_put(ddc);
+			return err;
+		}
+
+		of_node_put(ddc);
+	}
+
+	if (!output->edid && !output->ddc)
+		return -ENODEV;
+
+	output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+						   "nvidia,hpd-gpio", 0,
+						   &flags);
+
+	return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+	int connector, encoder, err;
+
+	if (gpio_is_valid(output->hpd_gpio)) {
+		unsigned long flags;
+
+		err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+				       "HDMI hotplug detect");
+		if (err < 0) {
+			dev_err(output->dev, "gpio_request_one(): %d\n", err);
+			return err;
+		}
+
+		err = gpio_to_irq(output->hpd_gpio);
+		if (err < 0) {
+			dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+			goto free_hpd;
+		}
+
+		output->hpd_irq = err;
+
+		flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+			IRQF_ONESHOT;
+
+		err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+					   flags, "hpd", output);
+		if (err < 0) {
+			dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+				output->hpd_irq, err);
+			goto free_hpd;
+		}
+
+		output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+	}
+
+	switch (output->type) {
+	case TEGRA_OUTPUT_RGB:
+		connector = DRM_MODE_CONNECTOR_LVDS;
+		encoder = DRM_MODE_ENCODER_LVDS;
+		break;
+
+	case TEGRA_OUTPUT_HDMI:
+		connector = DRM_MODE_CONNECTOR_HDMIA;
+		encoder = DRM_MODE_ENCODER_TMDS;
+		break;
+
+	default:
+		connector = DRM_MODE_CONNECTOR_Unknown;
+		encoder = DRM_MODE_ENCODER_NONE;
+		break;
+	}
+
+	drm_connector_init(drm, &output->connector, &connector_funcs,
+			   connector);
+	drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+	drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+	drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+	drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+	drm_sysfs_connector_add(&output->connector);
+
+	output->encoder.possible_crtcs = 0x3;
+
+	return 0;
+
+free_hpd:
+	gpio_free(output->hpd_gpio);
+
+	return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+	if (gpio_is_valid(output->hpd_gpio)) {
+		free_irq(output->hpd_irq, output);
+		gpio_free(output->hpd_gpio);
+	}
+
+	if (output->ddc)
+		put_device(&output->ddc->dev);
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/host1x/drm/rgb.c b/linux-imx/drivers/gpu/host1x/drm/rgb.c
new file mode 100644
index 0000000..ed4416f
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/drm/rgb.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+	struct tegra_output output;
+	struct clk *clk_parent;
+	struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+	return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+	unsigned long offset;
+	unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+	{ DC_COM_PIN_OUTPUT_ENABLE(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(0),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(1),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(2),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(3),     0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(4),   0x00210222 },
+	{ DC_COM_PIN_OUTPUT_SELECT(5),   0x00002200 },
+	{ DC_COM_PIN_OUTPUT_SELECT(6),   0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+	{ DC_COM_PIN_OUTPUT_SELECT(6),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(5),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(4),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+	{ DC_COM_PIN_OUTPUT_DATA(3),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(2),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(1),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_DATA(0),     0xaaaaaaaa },
+	{ DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(3),   0x55555555 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(2),   0x55555555 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(1),   0x55150005 },
+	{ DC_COM_PIN_OUTPUT_ENABLE(0),   0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+				const struct reg_entry *table,
+				unsigned int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++)
+		tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+	tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+	return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+	struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+	tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+	return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+					struct clk *clk, unsigned long pclk)
+{
+	struct tegra_rgb *rgb = to_rgb(output);
+
+	return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+				       struct drm_display_mode *mode,
+				       enum drm_mode_status *status)
+{
+	/*
+	 * FIXME: For now, always assume that the mode is okay. There are
+	 * unresolved issues with clk_round_rate(), which doesn't always
+	 * reliably report whether a frequency can be set or not.
+	 */
+
+	*status = MODE_OK;
+
+	return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+	.enable = tegra_output_rgb_enable,
+	.disable = tegra_output_rgb_disable,
+	.setup_clock = tegra_output_rgb_setup_clock,
+	.check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+	struct device_node *np;
+	struct tegra_rgb *rgb;
+	int err;
+
+	np = of_get_child_by_name(dc->dev->of_node, "rgb");
+	if (!np || !of_device_is_available(np))
+		return -ENODEV;
+
+	rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+	if (!rgb)
+		return -ENOMEM;
+
+	rgb->clk = devm_clk_get(dc->dev, NULL);
+	if (IS_ERR(rgb->clk)) {
+		dev_err(dc->dev, "failed to get clock\n");
+		return PTR_ERR(rgb->clk);
+	}
+
+	rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+	if (IS_ERR(rgb->clk_parent)) {
+		dev_err(dc->dev, "failed to get parent clock\n");
+		return PTR_ERR(rgb->clk_parent);
+	}
+
+	err = clk_set_parent(rgb->clk, rgb->clk_parent);
+	if (err < 0) {
+		dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+		return err;
+	}
+
+	rgb->output.dev = dc->dev;
+	rgb->output.of_node = np;
+
+	err = tegra_output_parse_dt(&rgb->output);
+	if (err < 0)
+		return err;
+
+	dc->rgb = &rgb->output;
+
+	return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+	struct tegra_rgb *rgb = to_rgb(dc->rgb);
+	int err;
+
+	if (!dc->rgb)
+		return -ENODEV;
+
+	rgb->output.type = TEGRA_OUTPUT_RGB;
+	rgb->output.ops = &rgb_ops;
+
+	err = tegra_output_init(dc->base.dev, &rgb->output);
+	if (err < 0) {
+		dev_err(dc->dev, "output setup failed: %d\n", err);
+		return err;
+	}
+
+	/*
+	 * By default, outputs can be associated with each display controller.
+	 * RGB outputs are an exception, so we make sure they can be attached
+	 * to only their parent display controller.
+	 */
+	rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+	return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+	if (dc->rgb) {
+		int err;
+
+		err = tegra_output_disable(dc->rgb);
+		if (err < 0) {
+			dev_err(dc->dev, "output failed to disable: %d\n", err);
+			return err;
+		}
+
+		err = tegra_output_exit(dc->rgb);
+		if (err < 0) {
+			dev_err(dc->dev, "output cleanup failed: %d\n", err);
+			return err;
+		}
+
+		dc->rgb = NULL;
+	}
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/host1x/host1x.h b/linux-imx/drivers/gpu/host1x/host1x.h
new file mode 100644
index 0000000..a2bc1e6
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/host1x.h
@@ -0,0 +1,30 @@
+/*
+ * Tegra host1x driver
+ *
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+enum host1x_class {
+	HOST1X_CLASS_HOST1X	= 0x1,
+	HOST1X_CLASS_GR2D	= 0x51,
+	HOST1X_CLASS_GR2D_SB    = 0x52
+};
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/host1x_bo.h b/linux-imx/drivers/gpu/host1x/host1x_bo.h
new file mode 100644
index 0000000..4c1f10b
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/host1x_bo.h
@@ -0,0 +1,87 @@
+/*
+ * Tegra host1x Memory Management Abstraction header
+ *
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HOST1X_BO_H
+#define _HOST1X_BO_H
+
+struct host1x_bo;
+
+struct host1x_bo_ops {
+	struct host1x_bo *(*get)(struct host1x_bo *bo);
+	void (*put)(struct host1x_bo *bo);
+	dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+	void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+	void *(*mmap)(struct host1x_bo *bo);
+	void (*munmap)(struct host1x_bo *bo, void *addr);
+	void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+	void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+	const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+				  const struct host1x_bo_ops *ops)
+{
+	bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+	return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+	bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+				       struct sg_table **sgt)
+{
+	return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+	bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+	return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+	bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+	return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+				    unsigned int pagenum, void *addr)
+{
+	bo->ops->kunmap(bo, pagenum, addr);
+}
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/host1x_client.h b/linux-imx/drivers/gpu/host1x/host1x_client.h
new file mode 100644
index 0000000..9b85f10
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/host1x_client.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_CLIENT_H
+#define HOST1X_CLIENT_H
+
+struct device;
+struct platform_device;
+
+#ifdef CONFIG_DRM_TEGRA
+int host1x_drm_alloc(struct platform_device *pdev);
+#else
+static inline int host1x_drm_alloc(struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+void host1x_set_drm_data(struct device *dev, void *data);
+void *host1x_get_drm_data(struct device *dev);
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/hw/Makefile b/linux-imx/drivers/gpu/host1x/hw/Makefile
new file mode 100644
index 0000000..9b50863
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/Makefile
@@ -0,0 +1,6 @@
+ccflags-y = -Idrivers/gpu/host1x
+
+host1x-hw-objs  = \
+	host1x01.o
+
+obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
diff --git a/linux-imx/drivers/gpu/host1x/hw/cdma_hw.c b/linux-imx/drivers/gpu/host1x/hw/cdma_hw.c
new file mode 100644
index 0000000..590b69d
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/cdma_hw.c
@@ -0,0 +1,326 @@
+/*
+ * Tegra host1x Command DMA
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include "cdma.h"
+#include "channel.h"
+#include "dev.h"
+#include "debug.h"
+
+/*
+ * Put the restart at the end of pushbuffer memor
+ */
+static void push_buffer_init(struct push_buffer *pb)
+{
+	*(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0);
+}
+
+/*
+ * Increment timedout buffer's syncpt via CPU.
+ */
+static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
+				u32 syncpt_incrs, u32 syncval, u32 nr_slots)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	struct push_buffer *pb = &cdma->push_buffer;
+	u32 i;
+
+	for (i = 0; i < syncpt_incrs; i++)
+		host1x_syncpt_cpu_incr(cdma->timeout.syncpt);
+
+	/* after CPU incr, ensure shadow is up to date */
+	host1x_syncpt_load(cdma->timeout.syncpt);
+
+	/* NOP all the PB slots */
+	while (nr_slots--) {
+		u32 *p = (u32 *)((u32)pb->mapped + getptr);
+		*(p++) = HOST1X_OPCODE_NOP;
+		*(p++) = HOST1X_OPCODE_NOP;
+		dev_dbg(host1x->dev, "%s: NOP at 0x%x\n", __func__,
+			pb->phys + getptr);
+		getptr = (getptr + 8) & (pb->size_bytes - 1);
+	}
+	wmb();
+}
+
+/*
+ * Start channel DMA
+ */
+static void cdma_start(struct host1x_cdma *cdma)
+{
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+
+	if (cdma->running)
+		return;
+
+	cdma->last_pos = cdma->push_buffer.pos;
+
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+			 HOST1X_CHANNEL_DMACTRL);
+
+	/* set base, put and end pointer */
+	host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+	host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+	host1x_ch_writel(ch, cdma->push_buffer.phys +
+			 cdma->push_buffer.size_bytes + 4,
+			 HOST1X_CHANNEL_DMAEND);
+
+	/* reset GET */
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+			 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+			 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+			 HOST1X_CHANNEL_DMACTRL);
+
+	/* start the command DMA */
+	host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+	cdma->running = true;
+}
+
+/*
+ * Similar to cdma_start(), but rather than starting from an idle
+ * state (where DMA GET is set to DMA PUT), on a timeout we restore
+ * DMA GET from an explicit value (so DMA may again be pending).
+ */
+static void cdma_timeout_restart(struct host1x_cdma *cdma, u32 getptr)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+
+	if (cdma->running)
+		return;
+
+	cdma->last_pos = cdma->push_buffer.pos;
+
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+			 HOST1X_CHANNEL_DMACTRL);
+
+	/* set base, end pointer (all of memory) */
+	host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART);
+	host1x_ch_writel(ch, cdma->push_buffer.phys +
+			 cdma->push_buffer.size_bytes,
+			 HOST1X_CHANNEL_DMAEND);
+
+	/* set GET, by loading the value in PUT (then reset GET) */
+	host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT);
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP |
+			 HOST1X_CHANNEL_DMACTRL_DMAGETRST |
+			 HOST1X_CHANNEL_DMACTRL_DMAINITGET,
+			 HOST1X_CHANNEL_DMACTRL);
+
+	dev_dbg(host1x->dev,
+		"%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n", __func__,
+		host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+		host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+		cdma->last_pos);
+
+	/* deassert GET reset and set PUT */
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+			 HOST1X_CHANNEL_DMACTRL);
+	host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT);
+
+	/* start the command DMA */
+	host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL);
+
+	cdma->running = true;
+}
+
+/*
+ * Kick channel DMA into action by writing its PUT offset (if it has changed)
+ */
+static void cdma_flush(struct host1x_cdma *cdma)
+{
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+
+	if (cdma->push_buffer.pos != cdma->last_pos) {
+		host1x_ch_writel(ch, cdma->push_buffer.pos,
+				 HOST1X_CHANNEL_DMAPUT);
+		cdma->last_pos = cdma->push_buffer.pos;
+	}
+}
+
+static void cdma_stop(struct host1x_cdma *cdma)
+{
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+
+	mutex_lock(&cdma->lock);
+	if (cdma->running) {
+		host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
+		host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+				 HOST1X_CHANNEL_DMACTRL);
+		cdma->running = false;
+	}
+	mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Stops both channel's command processor and CDMA immediately.
+ * Also, tears down the channel and resets corresponding module.
+ */
+static void cdma_freeze(struct host1x_cdma *cdma)
+{
+	struct host1x *host = cdma_to_host1x(cdma);
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+	u32 cmdproc_stop;
+
+	if (cdma->torndown && !cdma->running) {
+		dev_warn(host->dev, "Already torn down\n");
+		return;
+	}
+
+	dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
+
+	cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
+	cmdproc_stop |= BIT(ch->id);
+	host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+	dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
+		__func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
+		host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT),
+		cdma->last_pos);
+
+	host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
+			 HOST1X_CHANNEL_DMACTRL);
+
+	host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+
+	cdma->running = false;
+	cdma->torndown = true;
+}
+
+static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
+{
+	struct host1x *host1x = cdma_to_host1x(cdma);
+	struct host1x_channel *ch = cdma_to_channel(cdma);
+	u32 cmdproc_stop;
+
+	dev_dbg(host1x->dev,
+		"resuming channel (id %d, DMAGET restart = 0x%x)\n",
+		ch->id, getptr);
+
+	cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+	cmdproc_stop &= ~(BIT(ch->id));
+	host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+	cdma->torndown = false;
+	cdma_timeout_restart(cdma, getptr);
+}
+
+/*
+ * If this timeout fires, it indicates the current sync_queue entry has
+ * exceeded its TTL and the userctx should be timed out and remaining
+ * submits already issued cleaned up (future submits return an error).
+ */
+static void cdma_timeout_handler(struct work_struct *work)
+{
+	struct host1x_cdma *cdma;
+	struct host1x *host1x;
+	struct host1x_channel *ch;
+
+	u32 syncpt_val;
+
+	u32 prev_cmdproc, cmdproc_stop;
+
+	cdma = container_of(to_delayed_work(work), struct host1x_cdma,
+			    timeout.wq);
+	host1x = cdma_to_host1x(cdma);
+	ch = cdma_to_channel(cdma);
+
+	host1x_debug_dump(cdma_to_host1x(cdma));
+
+	mutex_lock(&cdma->lock);
+
+	if (!cdma->timeout.client) {
+		dev_dbg(host1x->dev,
+			"cdma_timeout: expired, but has no clientid\n");
+		mutex_unlock(&cdma->lock);
+		return;
+	}
+
+	/* stop processing to get a clean snapshot */
+	prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
+	cmdproc_stop = prev_cmdproc | BIT(ch->id);
+	host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+
+	dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
+		prev_cmdproc, cmdproc_stop);
+
+	syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
+
+	/* has buffer actually completed? */
+	if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
+		dev_dbg(host1x->dev,
+			"cdma_timeout: expired, but buffer had completed\n");
+		/* restore */
+		cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
+		host1x_sync_writel(host1x, cmdproc_stop,
+				   HOST1X_SYNC_CMDPROC_STOP);
+		mutex_unlock(&cdma->lock);
+		return;
+	}
+
+	dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
+		__func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+		syncpt_val, cdma->timeout.syncpt_val);
+
+	/* stop HW, resetting channel/module */
+	host1x_hw_cdma_freeze(host1x, cdma);
+
+	host1x_cdma_update_sync_queue(cdma, ch->dev);
+	mutex_unlock(&cdma->lock);
+}
+
+/*
+ * Init timeout resources
+ */
+static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+{
+	INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
+	cdma->timeout.initialized = true;
+
+	return 0;
+}
+
+/*
+ * Clean up timeout resources
+ */
+static void cdma_timeout_destroy(struct host1x_cdma *cdma)
+{
+	if (cdma->timeout.initialized)
+		cancel_delayed_work(&cdma->timeout.wq);
+	cdma->timeout.initialized = false;
+}
+
+static const struct host1x_cdma_ops host1x_cdma_ops = {
+	.start = cdma_start,
+	.stop = cdma_stop,
+	.flush = cdma_flush,
+
+	.timeout_init = cdma_timeout_init,
+	.timeout_destroy = cdma_timeout_destroy,
+	.freeze = cdma_freeze,
+	.resume = cdma_resume,
+	.timeout_cpu_incr = cdma_timeout_cpu_incr,
+};
+
+static const struct host1x_pushbuffer_ops host1x_pushbuffer_ops = {
+	.init = push_buffer_init,
+};
diff --git a/linux-imx/drivers/gpu/host1x/hw/channel_hw.c b/linux-imx/drivers/gpu/host1x/hw/channel_hw.c
new file mode 100644
index 0000000..ee19962
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/channel_hw.c
@@ -0,0 +1,168 @@
+/*
+ * Tegra host1x Channel
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/slab.h>
+#include <trace/events/host1x.h>
+
+#include "host1x.h"
+#include "host1x_bo.h"
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+#include "job.h"
+
+#define HOST1X_CHANNEL_SIZE 16384
+#define TRACE_MAX_LENGTH 128U
+
+static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
+			       u32 offset, u32 words)
+{
+	void *mem = NULL;
+
+	if (host1x_debug_trace_cmdbuf)
+		mem = host1x_bo_mmap(bo);
+
+	if (mem) {
+		u32 i;
+		/*
+		 * Write in batches of 128 as there seems to be a limit
+		 * of how much you can output to ftrace at once.
+		 */
+		for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
+			trace_host1x_cdma_push_gather(
+				dev_name(cdma_to_channel(cdma)->dev),
+				(u32)bo, min(words - i, TRACE_MAX_LENGTH),
+				offset + i * sizeof(u32), mem);
+		}
+		host1x_bo_munmap(bo, mem);
+	}
+}
+
+static void submit_gathers(struct host1x_job *job)
+{
+	struct host1x_cdma *cdma = &job->channel->cdma;
+	unsigned int i;
+
+	for (i = 0; i < job->num_gathers; i++) {
+		struct host1x_job_gather *g = &job->gathers[i];
+		u32 op1 = host1x_opcode_gather(g->words);
+		u32 op2 = g->base + g->offset;
+		trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
+		host1x_cdma_push(cdma, op1, op2);
+	}
+}
+
+static int channel_submit(struct host1x_job *job)
+{
+	struct host1x_channel *ch = job->channel;
+	struct host1x_syncpt *sp;
+	u32 user_syncpt_incrs = job->syncpt_incrs;
+	u32 prev_max = 0;
+	u32 syncval;
+	int err;
+	struct host1x_waitlist *completed_waiter = NULL;
+	struct host1x *host = dev_get_drvdata(ch->dev->parent);
+
+	sp = host->syncpt + job->syncpt_id;
+	trace_host1x_channel_submit(dev_name(ch->dev),
+				    job->num_gathers, job->num_relocs,
+				    job->num_waitchk, job->syncpt_id,
+				    job->syncpt_incrs);
+
+	/* before error checks, return current max */
+	prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
+
+	/* get submit lock */
+	err = mutex_lock_interruptible(&ch->submitlock);
+	if (err)
+		goto error;
+
+	completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
+	if (!completed_waiter) {
+		mutex_unlock(&ch->submitlock);
+		err = -ENOMEM;
+		goto error;
+	}
+
+	/* begin a CDMA submit */
+	err = host1x_cdma_begin(&ch->cdma, job);
+	if (err) {
+		mutex_unlock(&ch->submitlock);
+		goto error;
+	}
+
+	if (job->serialize) {
+		/*
+		 * Force serialization by inserting a host wait for the
+		 * previous job to finish before this one can commence.
+		 */
+		host1x_cdma_push(&ch->cdma,
+				 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
+					host1x_uclass_wait_syncpt_r(), 1),
+				 host1x_class_host_wait_syncpt(job->syncpt_id,
+					host1x_syncpt_read_max(sp)));
+	}
+
+	syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
+
+	job->syncpt_end = syncval;
+
+	/* add a setclass for modules that require it */
+	if (job->class)
+		host1x_cdma_push(&ch->cdma,
+				 host1x_opcode_setclass(job->class, 0, 0),
+				 HOST1X_OPCODE_NOP);
+
+	submit_gathers(job);
+
+	/* end CDMA submit & stash pinned hMems into sync queue */
+	host1x_cdma_end(&ch->cdma, job);
+
+	trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
+
+	/* schedule a submit complete interrupt */
+	err = host1x_intr_add_action(host, job->syncpt_id, syncval,
+				     HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
+				     completed_waiter, NULL);
+	completed_waiter = NULL;
+	WARN(err, "Failed to set submit complete interrupt");
+
+	mutex_unlock(&ch->submitlock);
+
+	return 0;
+
+error:
+	kfree(completed_waiter);
+	return err;
+}
+
+static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
+			       unsigned int index)
+{
+	ch->id = index;
+	mutex_init(&ch->reflock);
+	mutex_init(&ch->submitlock);
+
+	ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+	return 0;
+}
+
+static const struct host1x_channel_ops host1x_channel_ops = {
+	.init = host1x_channel_init,
+	.submit = channel_submit,
+};
diff --git a/linux-imx/drivers/gpu/host1x/hw/debug_hw.c b/linux-imx/drivers/gpu/host1x/hw/debug_hw.c
new file mode 100644
index 0000000..334c038
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/debug_hw.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "debug.h"
+#include "cdma.h"
+#include "channel.h"
+#include "host1x_bo.h"
+
+#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
+
+enum {
+	HOST1X_OPCODE_SETCLASS	= 0x00,
+	HOST1X_OPCODE_INCR	= 0x01,
+	HOST1X_OPCODE_NONINCR	= 0x02,
+	HOST1X_OPCODE_MASK	= 0x03,
+	HOST1X_OPCODE_IMM	= 0x04,
+	HOST1X_OPCODE_RESTART	= 0x05,
+	HOST1X_OPCODE_GATHER	= 0x06,
+	HOST1X_OPCODE_EXTEND	= 0x0e,
+};
+
+enum {
+	HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK	= 0x00,
+	HOST1X_OPCODE_EXTEND_RELEASE_MLOCK	= 0x01,
+};
+
+static unsigned int show_channel_command(struct output *o, u32 val)
+{
+	unsigned mask;
+	unsigned subop;
+
+	switch (val >> 28) {
+	case HOST1X_OPCODE_SETCLASS:
+		mask = val & 0x3f;
+		if (mask) {
+			host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+					    val >> 6 & 0x3ff,
+					    val >> 16 & 0xfff, mask);
+			return hweight8(mask);
+		} else {
+			host1x_debug_output(o, "SETCL(class=%03x)\n",
+					    val >> 6 & 0x3ff);
+			return 0;
+		}
+
+	case HOST1X_OPCODE_INCR:
+		host1x_debug_output(o, "INCR(offset=%03x, [",
+				    val >> 16 & 0xfff);
+		return val & 0xffff;
+
+	case HOST1X_OPCODE_NONINCR:
+		host1x_debug_output(o, "NONINCR(offset=%03x, [",
+				    val >> 16 & 0xfff);
+		return val & 0xffff;
+
+	case HOST1X_OPCODE_MASK:
+		mask = val & 0xffff;
+		host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+				    val >> 16 & 0xfff, mask);
+		return hweight16(mask);
+
+	case HOST1X_OPCODE_IMM:
+		host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+				    val >> 16 & 0xfff, val & 0xffff);
+		return 0;
+
+	case HOST1X_OPCODE_RESTART:
+		host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+		return 0;
+
+	case HOST1X_OPCODE_GATHER:
+		host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+				    val >> 16 & 0xfff, val >> 15 & 0x1,
+				    val >> 14 & 0x1, val & 0x3fff);
+		return 1;
+
+	case HOST1X_OPCODE_EXTEND:
+		subop = val >> 24 & 0xf;
+		if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
+			host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+					    val & 0xff);
+		else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
+			host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+					    val & 0xff);
+		else
+			host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+		return 0;
+
+	default:
+		return 0;
+	}
+}
+
+static void show_gather(struct output *o, phys_addr_t phys_addr,
+			unsigned int words, struct host1x_cdma *cdma,
+			phys_addr_t pin_addr, u32 *map_addr)
+{
+	/* Map dmaget cursor to corresponding mem handle */
+	u32 offset = phys_addr - pin_addr;
+	unsigned int data_count = 0, i;
+
+	/*
+	 * Sometimes we're given different hardware address to the same
+	 * page - in these cases the offset will get an invalid number and
+	 * we just have to bail out.
+	 */
+	if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
+		host1x_debug_output(o, "[address mismatch]\n");
+		return;
+	}
+
+	for (i = 0; i < words; i++) {
+		u32 addr = phys_addr + i * 4;
+		u32 val = *(map_addr + offset / 4 + i);
+
+		if (!data_count) {
+			host1x_debug_output(o, "%08x: %08x:", addr, val);
+			data_count = show_channel_command(o, val);
+		} else {
+			host1x_debug_output(o, "%08x%s", val,
+					    data_count > 0 ? ", " : "])\n");
+			data_count--;
+		}
+	}
+}
+
+static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
+{
+	struct host1x_job *job;
+
+	list_for_each_entry(job, &cdma->sync_queue, list) {
+		int i;
+		host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
+				    job, job->syncpt_id, job->syncpt_end,
+				    job->first_get, job->timeout,
+				    job->num_slots, job->num_unpins);
+
+		for (i = 0; i < job->num_gathers; i++) {
+			struct host1x_job_gather *g = &job->gathers[i];
+			u32 *mapped;
+
+			if (job->gather_copy_mapped)
+				mapped = (u32 *)job->gather_copy_mapped;
+			else
+				mapped = host1x_bo_mmap(g->bo);
+
+			if (!mapped) {
+				host1x_debug_output(o, "[could not mmap]\n");
+				continue;
+			}
+
+			host1x_debug_output(o, "    GATHER at %08x+%04x, %d words\n",
+					    g->base, g->offset, g->words);
+
+			show_gather(o, g->base + g->offset, g->words, cdma,
+				    g->base, mapped);
+
+			if (!job->gather_copy_mapped)
+				host1x_bo_munmap(g->bo, mapped);
+		}
+	}
+}
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+					   struct host1x_channel *ch,
+					   struct output *o)
+{
+	struct host1x_cdma *cdma = &ch->cdma;
+	u32 dmaput, dmaget, dmactrl;
+	u32 cbstat, cbread;
+	u32 val, base, baseval;
+
+	dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+	dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+	dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+	cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
+	cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
+
+	host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+
+	if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
+	    !ch->cdma.push_buffer.mapped) {
+		host1x_debug_output(o, "inactive\n\n");
+		return;
+	}
+
+	if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
+	    HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+	    HOST1X_UCLASS_WAIT_SYNCPT)
+		host1x_debug_output(o, "waiting on syncpt %d val %d\n",
+				    cbread >> 24, cbread & 0xffffff);
+	else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
+	   HOST1X_CLASS_HOST1X &&
+	   HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+	   HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
+
+		base = (cbread >> 16) & 0xff;
+		baseval =
+			host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
+		val = cbread & 0xffff;
+		host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
+				    cbread >> 24, baseval + val, base,
+				    baseval, val);
+	} else
+		host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
+				    HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
+				    HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
+				    cbread);
+
+	host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+			    dmaput, dmaget, dmactrl);
+	host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+	show_channel_gathers(o, cdma);
+	host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+					   struct host1x_channel *ch,
+					   struct output *o)
+{
+	u32 val, rd_ptr, wr_ptr, start, end;
+	unsigned int data_count = 0;
+
+	host1x_debug_output(o, "%d: fifo:\n", ch->id);
+
+	val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
+	host1x_debug_output(o, "FIFOSTAT %08x\n", val);
+	if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
+		host1x_debug_output(o, "[empty]\n");
+		return;
+	}
+
+	host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+	host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+			   HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
+			   HOST1X_SYNC_CFPEEK_CTRL);
+
+	val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
+	rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
+	wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
+
+	val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
+	start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
+	end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
+
+	do {
+		host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+		host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+				   HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
+				   HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
+				   HOST1X_SYNC_CFPEEK_CTRL);
+		val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
+
+		if (!data_count) {
+			host1x_debug_output(o, "%08x:", val);
+			data_count = show_channel_command(o, val);
+		} else {
+			host1x_debug_output(o, "%08x%s", val,
+					    data_count > 0 ? ", " : "])\n");
+			data_count--;
+		}
+
+		if (rd_ptr == end)
+			rd_ptr = start;
+		else
+			rd_ptr++;
+	} while (rd_ptr != wr_ptr);
+
+	if (data_count)
+		host1x_debug_output(o, ", ...])\n");
+	host1x_debug_output(o, "\n");
+
+	host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+	int i;
+
+	host1x_debug_output(o, "---- mlocks ----\n");
+	for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
+		u32 owner =
+			host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
+		if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
+			host1x_debug_output(o, "%d: locked by channel %d\n",
+				i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
+		else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
+			host1x_debug_output(o, "%d: locked by cpu\n", i);
+		else
+			host1x_debug_output(o, "%d: unlocked\n", i);
+	}
+	host1x_debug_output(o, "\n");
+}
+
+static const struct host1x_debug_ops host1x_debug_ops = {
+	.show_channel_cdma = host1x_debug_show_channel_cdma,
+	.show_channel_fifo = host1x_debug_show_channel_fifo,
+	.show_mlocks = host1x_debug_show_mlocks,
+};
diff --git a/linux-imx/drivers/gpu/host1x/hw/host1x01.c b/linux-imx/drivers/gpu/host1x/hw/host1x01.c
new file mode 100644
index 0000000..a14e91c
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/host1x01.c
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "hw/host1x01.h"
+#include "hw/host1x01_hardware.h"
+
+/* include code */
+#include "hw/cdma_hw.c"
+#include "hw/channel_hw.c"
+#include "hw/debug_hw.c"
+#include "hw/intr_hw.c"
+#include "hw/syncpt_hw.c"
+
+#include "dev.h"
+
+int host1x01_init(struct host1x *host)
+{
+	host->channel_op = &host1x_channel_ops;
+	host->cdma_op = &host1x_cdma_ops;
+	host->cdma_pb_op = &host1x_pushbuffer_ops;
+	host->syncpt_op = &host1x_syncpt_ops;
+	host->intr_op = &host1x_intr_ops;
+	host->debug_op = &host1x_debug_ops;
+
+	return 0;
+}
diff --git a/linux-imx/drivers/gpu/host1x/hw/host1x01.h b/linux-imx/drivers/gpu/host1x/hw/host1x01.h
new file mode 100644
index 0000000..2706b67
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/host1x01.h
@@ -0,0 +1,25 @@
+/*
+ * Host1x init for T20 and T30 Architecture Chips
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HOST1X_HOST1X01_H
+#define HOST1X_HOST1X01_H
+
+struct host1x;
+
+int host1x01_init(struct host1x *host);
+
+#endif /* HOST1X_HOST1X01_H_ */
diff --git a/linux-imx/drivers/gpu/host1x/hw/host1x01_hardware.h b/linux-imx/drivers/gpu/host1x/hw/host1x01_hardware.h
new file mode 100644
index 0000000..5f0fb86
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/host1x01_hardware.h
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Register Offsets for Tegra20 and Tegra30
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X01_HARDWARE_H
+#define __HOST1X_HOST1X01_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x01_channel.h"
+#include "hw_host1x01_sync.h"
+#include "hw_host1x01_uclass.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_wait_syncpt_indx_f(indx)
+		| host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+	unsigned indx, unsigned threshold)
+{
+	return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+		| host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+	unsigned indx, unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_wait_syncpt_base_indx_f(indx)
+		| host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+	unsigned base_indx, unsigned offset)
+{
+	return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+		| host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+	unsigned cond, unsigned indx)
+{
+	return host1x_uclass_incr_syncpt_cond_f(cond)
+		| host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indbe_f(0xf)
+		| host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset);
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+	unsigned mod_id, unsigned offset, bool auto_inc)
+{
+	u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+		| host1x_uclass_indoff_indroffset_f(offset)
+		| host1x_uclass_indoff_rwn_read_v();
+	if (auto_inc)
+		v |= host1x_uclass_indoff_autoinc_f(1);
+	return v;
+}
+
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+	unsigned class_id, unsigned offset, unsigned mask)
+{
+	return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+	return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+	return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+	return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+	return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+	return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+		host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+	return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+	return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,	unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+	return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_channel.h b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_channel.h
new file mode 100644
index 0000000..b4bc7ca
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_channel.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_channel_host1x_h__
+#define __hw_host1x_channel_host1x_h__
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+	host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+	return (r >> 10) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+	host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+	return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+	host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+	return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+	host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+	return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+	host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+	return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+	host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+	return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+	host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+	return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+	host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+	host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+	return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+	host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+	return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+	host1x_channel_dmactrl_dmainitget()
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_sync.h b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_sync.h
new file mode 100644
index 0000000..ac704e5
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_sync.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x01_sync_h__
+#define __hw_host1x01_sync_h__
+
+#define REGISTER_STRIDE	4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+	return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+	host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+	return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+	host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+	return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+	host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+	return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+	host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+	return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+	host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+	return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+	host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+	return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+	host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+	return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+	host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+	return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+	host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+	return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+	host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+	return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+	host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+	return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+	host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+	return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+	host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+	return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+	host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+	return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+	host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+	return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+	host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+	return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+	host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+	return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+	host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+	return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+	host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+	return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+	host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+	return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+	host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+	return (v & 0x1ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+	host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+	return (v & 0x7) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+	host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+	return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+	host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+	return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+	host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+	return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+	host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+	return (r >> 0) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+	return (r >> 16) & 0x1ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+	host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+	return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+	host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+	return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+	host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+	return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+	host1x_sync_cbstat_cbclass_v(r)
+
+#endif /* __hw_host1x01_sync_h__ */
diff --git a/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_uclass.h b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
new file mode 100644
index 0000000..42f3ce1
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/hw_host1x01_uclass.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef __hw_host1x_uclass_host1x_h__
+#define __hw_host1x_uclass_host1x_h__
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+	return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+	host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+	return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+	host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+	host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+	return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+	host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+	host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+	host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+	return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+	host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+	host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+	return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+	host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+	return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+	host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+	return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+	host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+	return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+	host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+	return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+	host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+	return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+	host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+	return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+	return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+	host1x_uclass_indoff_indroffset_f(v)
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/hw/intr_hw.c b/linux-imx/drivers/gpu/host1x/hw/intr_hw.c
new file mode 100644
index 0000000..b083509
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/intr_hw.c
@@ -0,0 +1,143 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/irq.h>
+
+#include "intr.h"
+#include "dev.h"
+
+/*
+ * Sync point threshold interrupt service function
+ * Handles sync point threshold triggers, in interrupt context
+ */
+static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
+{
+	unsigned int id = syncpt->id;
+	struct host1x *host = syncpt->host;
+
+	host1x_sync_writel(host, BIT_MASK(id),
+		HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+	host1x_sync_writel(host, BIT_MASK(id),
+		HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+
+	queue_work(host->intr_wq, &syncpt->intr.work);
+}
+
+static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
+{
+	struct host1x *host = dev_id;
+	unsigned long reg;
+	int i, id;
+
+	for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
+		reg = host1x_sync_readl(host,
+			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+		for_each_set_bit(id, &reg, BITS_PER_LONG) {
+			struct host1x_syncpt *syncpt =
+				host->syncpt + (i * BITS_PER_LONG + id);
+			host1x_intr_syncpt_handle(syncpt);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
+{
+	u32 i;
+
+	for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
+		host1x_sync_writel(host, 0xffffffffu,
+			HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
+		host1x_sync_writel(host, 0xffffffffu,
+			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+	}
+}
+
+static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+	void (*syncpt_thresh_work)(struct work_struct *))
+{
+	int i, err;
+
+	host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+	for (i = 0; i < host->info->nb_pts; i++)
+		INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
+
+	err = devm_request_irq(host->dev, host->intr_syncpt_irq,
+			       syncpt_thresh_isr, IRQF_SHARED,
+			       "host1x_syncpt", host);
+	if (IS_ERR_VALUE(err)) {
+		WARN_ON(1);
+		return err;
+	}
+
+	/* disable the ip_busy_timeout. this prevents write drops */
+	host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+	/*
+	 * increase the auto-ack timout to the maximum value. 2d will hang
+	 * otherwise on Tegra2.
+	 */
+	host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+	/* update host clocks per usec */
+	host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+
+	return 0;
+}
+
+static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
+	u32 id, u32 thresh)
+{
+	host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
+}
+
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+{
+	host1x_sync_writel(host, BIT_MASK(id),
+		HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
+}
+
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+{
+	host1x_sync_writel(host, BIT_MASK(id),
+		HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
+	host1x_sync_writel(host, BIT_MASK(id),
+		HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+}
+
+static int _host1x_free_syncpt_irq(struct host1x *host)
+{
+	devm_free_irq(host->dev, host->intr_syncpt_irq, host);
+	flush_workqueue(host->intr_wq);
+	return 0;
+}
+
+static const struct host1x_intr_ops host1x_intr_ops = {
+	.init_host_sync = _host1x_intr_init_host_sync,
+	.set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
+	.enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
+	.disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
+	.disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
+	.free_syncpt_irq = _host1x_free_syncpt_irq,
+};
diff --git a/linux-imx/drivers/gpu/host1x/hw/syncpt_hw.c b/linux-imx/drivers/gpu/host1x/hw/syncpt_hw.c
new file mode 100644
index 0000000..6117499
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -0,0 +1,114 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+
+#include "dev.h"
+#include "syncpt.h"
+
+/*
+ * Write the current syncpoint value back to hw.
+ */
+static void syncpt_restore(struct host1x_syncpt *sp)
+{
+	struct host1x *host = sp->host;
+	int min = host1x_syncpt_read_min(sp);
+	host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
+}
+
+/*
+ * Write the current waitbase value back to hw.
+ */
+static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
+{
+	struct host1x *host = sp->host;
+	host1x_sync_writel(host, sp->base_val,
+			   HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Read waitbase value from hw.
+ */
+static void syncpt_read_wait_base(struct host1x_syncpt *sp)
+{
+	struct host1x *host = sp->host;
+	sp->base_val =
+		host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+}
+
+/*
+ * Updates the last value read from hardware.
+ */
+static u32 syncpt_load(struct host1x_syncpt *sp)
+{
+	struct host1x *host = sp->host;
+	u32 old, live;
+
+	/* Loop in case there's a race writing to min_val */
+	do {
+		old = host1x_syncpt_read_min(sp);
+		live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
+	} while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
+
+	if (!host1x_syncpt_check_max(sp, live))
+		dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
+			__func__, sp->id, host1x_syncpt_read_min(sp),
+			host1x_syncpt_read_max(sp));
+
+	return live;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache.
+ */
+static void syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+	struct host1x *host = sp->host;
+	u32 reg_offset = sp->id / 32;
+
+	if (!host1x_syncpt_client_managed(sp) &&
+	    host1x_syncpt_idle(sp)) {
+		dev_err(host->dev, "Trying to increment syncpoint id %d beyond max\n",
+			sp->id);
+		host1x_debug_dump(sp->host);
+		return;
+	}
+	host1x_sync_writel(host, BIT_MASK(sp->id),
+			   HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
+	wmb();
+}
+
+/* remove a wait pointed to by patch_addr */
+static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+	u32 override = host1x_class_host_wait_syncpt(
+		HOST1X_SYNCPT_RESERVED, 0);
+
+	*((u32 *)patch_addr) = override;
+	return 0;
+}
+
+static const struct host1x_syncpt_ops host1x_syncpt_ops = {
+	.restore = syncpt_restore,
+	.restore_wait_base = syncpt_restore_wait_base,
+	.load_wait_base = syncpt_read_wait_base,
+	.load = syncpt_load,
+	.cpu_incr = syncpt_cpu_incr,
+	.patch_wait = syncpt_patch_wait,
+};
diff --git a/linux-imx/drivers/gpu/host1x/intr.c b/linux-imx/drivers/gpu/host1x/intr.c
new file mode 100644
index 0000000..2491bf8
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/intr.c
@@ -0,0 +1,354 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <trace/events/host1x.h>
+#include "channel.h"
+#include "dev.h"
+#include "intr.h"
+
+/* Wait list management */
+
+enum waitlist_state {
+	WLS_PENDING,
+	WLS_REMOVED,
+	WLS_CANCELLED,
+	WLS_HANDLED
+};
+
+static void waiter_release(struct kref *kref)
+{
+	kfree(container_of(kref, struct host1x_waitlist, refcount));
+}
+
+/*
+ * add a waiter to a waiter queue, sorted by threshold
+ * returns true if it was added at the head of the queue
+ */
+static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
+				struct list_head *queue)
+{
+	struct host1x_waitlist *pos;
+	u32 thresh = waiter->thresh;
+
+	list_for_each_entry_reverse(pos, queue, list)
+		if ((s32)(pos->thresh - thresh) <= 0) {
+			list_add(&waiter->list, &pos->list);
+			return false;
+		}
+
+	list_add(&waiter->list, queue);
+	return true;
+}
+
+/*
+ * run through a waiter queue for a single sync point ID
+ * and gather all completed waiters into lists by actions
+ */
+static void remove_completed_waiters(struct list_head *head, u32 sync,
+			struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+	struct list_head *dest;
+	struct host1x_waitlist *waiter, *next, *prev;
+
+	list_for_each_entry_safe(waiter, next, head, list) {
+		if ((s32)(waiter->thresh - sync) > 0)
+			break;
+
+		dest = completed + waiter->action;
+
+		/* consolidate submit cleanups */
+		if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
+		    !list_empty(dest)) {
+			prev = list_entry(dest->prev,
+					  struct host1x_waitlist, list);
+			if (prev->data == waiter->data) {
+				prev->count++;
+				dest = NULL;
+			}
+		}
+
+		/* PENDING->REMOVED or CANCELLED->HANDLED */
+		if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
+			list_del(&waiter->list);
+			kref_put(&waiter->refcount, waiter_release);
+		} else
+			list_move_tail(&waiter->list, dest);
+	}
+}
+
+static void reset_threshold_interrupt(struct host1x *host,
+				      struct list_head *head,
+				      unsigned int id)
+{
+	u32 thresh =
+		list_first_entry(head, struct host1x_waitlist, list)->thresh;
+
+	host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+	host1x_hw_intr_enable_syncpt_intr(host, id);
+}
+
+static void action_submit_complete(struct host1x_waitlist *waiter)
+{
+	struct host1x_channel *channel = waiter->data;
+
+	host1x_cdma_update(&channel->cdma);
+
+	/*  Add nr_completed to trace */
+	trace_host1x_channel_submit_complete(dev_name(channel->dev),
+					     waiter->count, waiter->thresh);
+
+}
+
+static void action_wakeup(struct host1x_waitlist *waiter)
+{
+	wait_queue_head_t *wq = waiter->data;
+	wake_up(wq);
+}
+
+static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
+{
+	wait_queue_head_t *wq = waiter->data;
+	wake_up_interruptible(wq);
+}
+
+typedef void (*action_handler)(struct host1x_waitlist *waiter);
+
+static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+	action_submit_complete,
+	action_wakeup,
+	action_wakeup_interruptible,
+};
+
+static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+{
+	struct list_head *head = completed;
+	int i;
+
+	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
+		action_handler handler = action_handlers[i];
+		struct host1x_waitlist *waiter, *next;
+
+		list_for_each_entry_safe(waiter, next, head, list) {
+			list_del(&waiter->list);
+			handler(waiter);
+			WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
+				WLS_REMOVED);
+			kref_put(&waiter->refcount, waiter_release);
+		}
+	}
+}
+
+/*
+ * Remove & handle all waiters that have completed for the given syncpt
+ */
+static int process_wait_list(struct host1x *host,
+			     struct host1x_syncpt *syncpt,
+			     u32 threshold)
+{
+	struct list_head completed[HOST1X_INTR_ACTION_COUNT];
+	unsigned int i;
+	int empty;
+
+	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
+		INIT_LIST_HEAD(completed + i);
+
+	spin_lock(&syncpt->intr.lock);
+
+	remove_completed_waiters(&syncpt->intr.wait_head, threshold,
+				 completed);
+
+	empty = list_empty(&syncpt->intr.wait_head);
+	if (empty)
+		host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
+	else
+		reset_threshold_interrupt(host, &syncpt->intr.wait_head,
+					  syncpt->id);
+
+	spin_unlock(&syncpt->intr.lock);
+
+	run_handlers(completed);
+
+	return empty;
+}
+
+/*
+ * Sync point threshold interrupt service thread function
+ * Handles sync point threshold triggers, in thread context
+ */
+
+static void syncpt_thresh_work(struct work_struct *work)
+{
+	struct host1x_syncpt_intr *syncpt_intr =
+		container_of(work, struct host1x_syncpt_intr, work);
+	struct host1x_syncpt *syncpt =
+		container_of(syncpt_intr, struct host1x_syncpt, intr);
+	unsigned int id = syncpt->id;
+	struct host1x *host = syncpt->host;
+
+	(void)process_wait_list(host, syncpt,
+				host1x_syncpt_load(host->syncpt + id));
+}
+
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+			   enum host1x_intr_action action, void *data,
+			   struct host1x_waitlist *waiter, void **ref)
+{
+	struct host1x_syncpt *syncpt;
+	int queue_was_empty;
+
+	if (waiter == NULL) {
+		pr_warn("%s: NULL waiter\n", __func__);
+		return -EINVAL;
+	}
+
+	/* initialize a new waiter */
+	INIT_LIST_HEAD(&waiter->list);
+	kref_init(&waiter->refcount);
+	if (ref)
+		kref_get(&waiter->refcount);
+	waiter->thresh = thresh;
+	waiter->action = action;
+	atomic_set(&waiter->state, WLS_PENDING);
+	waiter->data = data;
+	waiter->count = 1;
+
+	syncpt = host->syncpt + id;
+
+	spin_lock(&syncpt->intr.lock);
+
+	queue_was_empty = list_empty(&syncpt->intr.wait_head);
+
+	if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
+		/* added at head of list - new threshold value */
+		host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
+
+		/* added as first waiter - enable interrupt */
+		if (queue_was_empty)
+			host1x_hw_intr_enable_syncpt_intr(host, id);
+	}
+
+	spin_unlock(&syncpt->intr.lock);
+
+	if (ref)
+		*ref = waiter;
+	return 0;
+}
+
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+{
+	struct host1x_waitlist *waiter = ref;
+	struct host1x_syncpt *syncpt;
+
+	while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
+	       WLS_REMOVED)
+		schedule();
+
+	syncpt = host->syncpt + id;
+	(void)process_wait_list(host, syncpt,
+				host1x_syncpt_load(host->syncpt + id));
+
+	kref_put(&waiter->refcount, waiter_release);
+}
+
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
+{
+	unsigned int id;
+	u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+	mutex_init(&host->intr_mutex);
+	host->intr_syncpt_irq = irq_sync;
+	host->intr_wq = create_workqueue("host_syncpt");
+	if (!host->intr_wq)
+		return -ENOMEM;
+
+	for (id = 0; id < nb_pts; ++id) {
+		struct host1x_syncpt *syncpt = host->syncpt + id;
+
+		spin_lock_init(&syncpt->intr.lock);
+		INIT_LIST_HEAD(&syncpt->intr.wait_head);
+		snprintf(syncpt->intr.thresh_irq_name,
+			 sizeof(syncpt->intr.thresh_irq_name),
+			 "host1x_sp_%02d", id);
+	}
+
+	host1x_intr_start(host);
+
+	return 0;
+}
+
+void host1x_intr_deinit(struct host1x *host)
+{
+	host1x_intr_stop(host);
+	destroy_workqueue(host->intr_wq);
+}
+
+void host1x_intr_start(struct host1x *host)
+{
+	u32 hz = clk_get_rate(host->clk);
+	int err;
+
+	mutex_lock(&host->intr_mutex);
+	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
+					    syncpt_thresh_work);
+	if (err) {
+		mutex_unlock(&host->intr_mutex);
+		return;
+	}
+	mutex_unlock(&host->intr_mutex);
+}
+
+void host1x_intr_stop(struct host1x *host)
+{
+	unsigned int id;
+	struct host1x_syncpt *syncpt = host->syncpt;
+	u32 nb_pts = host1x_syncpt_nb_pts(host);
+
+	mutex_lock(&host->intr_mutex);
+
+	host1x_hw_intr_disable_all_syncpt_intrs(host);
+
+	for (id = 0; id < nb_pts; ++id) {
+		struct host1x_waitlist *waiter, *next;
+
+		list_for_each_entry_safe(waiter, next,
+			&syncpt[id].intr.wait_head, list) {
+			if (atomic_cmpxchg(&waiter->state,
+			    WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
+				list_del(&waiter->list);
+				kref_put(&waiter->refcount, waiter_release);
+			}
+		}
+
+		if (!list_empty(&syncpt[id].intr.wait_head)) {
+			/* output diagnostics */
+			mutex_unlock(&host->intr_mutex);
+			pr_warn("%s cannot stop syncpt intr id=%d\n",
+				__func__, id);
+			return;
+		}
+	}
+
+	host1x_hw_intr_free_syncpt_irq(host);
+
+	mutex_unlock(&host->intr_mutex);
+}
diff --git a/linux-imx/drivers/gpu/host1x/intr.h b/linux-imx/drivers/gpu/host1x/intr.h
new file mode 100644
index 0000000..2b8adf0
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/intr.h
@@ -0,0 +1,102 @@
+/*
+ * Tegra host1x Interrupt Management
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_INTR_H
+#define __HOST1X_INTR_H
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct host1x;
+
+enum host1x_intr_action {
+	/*
+	 * Perform cleanup after a submit has completed.
+	 * 'data' points to a channel
+	 */
+	HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
+
+	/*
+	 * Wake up a  task.
+	 * 'data' points to a wait_queue_head_t
+	 */
+	HOST1X_INTR_ACTION_WAKEUP,
+
+	/*
+	 * Wake up a interruptible task.
+	 * 'data' points to a wait_queue_head_t
+	 */
+	HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+
+	HOST1X_INTR_ACTION_COUNT
+};
+
+struct host1x_syncpt_intr {
+	spinlock_t lock;
+	struct list_head wait_head;
+	char thresh_irq_name[12];
+	struct work_struct work;
+};
+
+struct host1x_waitlist {
+	struct list_head list;
+	struct kref refcount;
+	u32 thresh;
+	enum host1x_intr_action action;
+	atomic_t state;
+	void *data;
+	int count;
+};
+
+/*
+ * Schedule an action to be taken when a sync point reaches the given threshold.
+ *
+ * @id the sync point
+ * @thresh the threshold
+ * @action the action to take
+ * @data a pointer to extra data depending on action, see above
+ * @waiter waiter structure - assumes ownership
+ * @ref must be passed if cancellation is possible, else NULL
+ *
+ * This is a non-blocking api.
+ */
+int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+	enum host1x_intr_action action, void *data,
+	struct host1x_waitlist *waiter, void **ref);
+
+/*
+ * Unreference an action submitted to host1x_intr_add_action().
+ * You must call this if you passed non-NULL as ref.
+ * @ref the ref returned from host1x_intr_add_action()
+ */
+void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+
+/* Initialize host1x sync point interrupt */
+int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
+
+/* Deinitialize host1x sync point interrupt */
+void host1x_intr_deinit(struct host1x *host);
+
+/* Enable host1x sync point interrupt */
+void host1x_intr_start(struct host1x *host);
+
+/* Disable host1x sync point interrupt */
+void host1x_intr_stop(struct host1x *host);
+
+irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/job.c b/linux-imx/drivers/gpu/host1x/job.c
new file mode 100644
index 0000000..f665d67
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/job.c
@@ -0,0 +1,603 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <trace/events/host1x.h>
+
+#include "channel.h"
+#include "dev.h"
+#include "host1x_bo.h"
+#include "job.h"
+#include "syncpt.h"
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+				    u32 num_cmdbufs, u32 num_relocs,
+				    u32 num_waitchks)
+{
+	struct host1x_job *job = NULL;
+	unsigned int num_unpins = num_cmdbufs + num_relocs;
+	u64 total;
+	void *mem;
+
+	/* Check that we're not going to overflow */
+	total = sizeof(struct host1x_job) +
+		num_relocs * sizeof(struct host1x_reloc) +
+		num_unpins * sizeof(struct host1x_job_unpin_data) +
+		num_waitchks * sizeof(struct host1x_waitchk) +
+		num_cmdbufs * sizeof(struct host1x_job_gather) +
+		num_unpins * sizeof(dma_addr_t) +
+		num_unpins * sizeof(u32 *);
+	if (total > ULONG_MAX)
+		return NULL;
+
+	mem = job = kzalloc(total, GFP_KERNEL);
+	if (!job)
+		return NULL;
+
+	kref_init(&job->ref);
+	job->channel = ch;
+
+	/* Redistribute memory to the structs  */
+	mem += sizeof(struct host1x_job);
+	job->relocarray = num_relocs ? mem : NULL;
+	mem += num_relocs * sizeof(struct host1x_reloc);
+	job->unpins = num_unpins ? mem : NULL;
+	mem += num_unpins * sizeof(struct host1x_job_unpin_data);
+	job->waitchk = num_waitchks ? mem : NULL;
+	mem += num_waitchks * sizeof(struct host1x_waitchk);
+	job->gathers = num_cmdbufs ? mem : NULL;
+	mem += num_cmdbufs * sizeof(struct host1x_job_gather);
+	job->addr_phys = num_unpins ? mem : NULL;
+
+	job->reloc_addr_phys = job->addr_phys;
+	job->gather_addr_phys = &job->addr_phys[num_relocs];
+
+	return job;
+}
+
+struct host1x_job *host1x_job_get(struct host1x_job *job)
+{
+	kref_get(&job->ref);
+	return job;
+}
+
+static void job_free(struct kref *ref)
+{
+	struct host1x_job *job = container_of(ref, struct host1x_job, ref);
+
+	kfree(job);
+}
+
+void host1x_job_put(struct host1x_job *job)
+{
+	kref_put(&job->ref, job_free);
+}
+
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
+			   u32 words, u32 offset)
+{
+	struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
+
+	cur_gather->words = words;
+	cur_gather->bo = bo;
+	cur_gather->offset = offset;
+	job->num_gathers++;
+}
+
+/*
+ * NULL an already satisfied WAIT_SYNCPT host method, by patching its
+ * args in the command stream. The method data is changed to reference
+ * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
+ * with a matching threshold value of 0, so is guaranteed to be popped
+ * by the host HW.
+ */
+static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
+				       struct host1x_bo *h, u32 offset)
+{
+	void *patch_addr = NULL;
+
+	/* patch the wait */
+	patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
+	if (patch_addr) {
+		host1x_syncpt_patch_wait(sp,
+					 patch_addr + (offset & ~PAGE_MASK));
+		host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
+	} else
+		pr_err("Could not map cmdbuf for wait check\n");
+}
+
+/*
+ * Check driver supplied waitchk structs for syncpt thresholds
+ * that have already been satisfied and NULL the comparison (to
+ * avoid a wrap condition in the HW).
+ */
+static int do_waitchks(struct host1x_job *job, struct host1x *host,
+		       struct host1x_bo *patch)
+{
+	int i;
+
+	/* compare syncpt vs wait threshold */
+	for (i = 0; i < job->num_waitchk; i++) {
+		struct host1x_waitchk *wait = &job->waitchk[i];
+		struct host1x_syncpt *sp =
+			host1x_syncpt_get(host, wait->syncpt_id);
+
+		/* validate syncpt id */
+		if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
+			continue;
+
+		/* skip all other gathers */
+		if (patch != wait->bo)
+			continue;
+
+		trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
+					       wait->syncpt_id, wait->thresh,
+					       host1x_syncpt_read_min(sp));
+
+		if (host1x_syncpt_is_expired(sp, wait->thresh)) {
+			dev_dbg(host->dev,
+				"drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+				wait->syncpt_id, sp->name, wait->thresh,
+				host1x_syncpt_read_min(sp));
+
+			host1x_syncpt_patch_offset(sp, patch, wait->offset);
+		}
+
+		wait->bo = NULL;
+	}
+
+	return 0;
+}
+
+static unsigned int pin_job(struct host1x_job *job)
+{
+	unsigned int i;
+
+	job->num_unpins = 0;
+
+	for (i = 0; i < job->num_relocs; i++) {
+		struct host1x_reloc *reloc = &job->relocarray[i];
+		struct sg_table *sgt;
+		dma_addr_t phys_addr;
+
+		reloc->target = host1x_bo_get(reloc->target);
+		if (!reloc->target)
+			goto unpin;
+
+		phys_addr = host1x_bo_pin(reloc->target, &sgt);
+		if (!phys_addr)
+			goto unpin;
+
+		job->addr_phys[job->num_unpins] = phys_addr;
+		job->unpins[job->num_unpins].bo = reloc->target;
+		job->unpins[job->num_unpins].sgt = sgt;
+		job->num_unpins++;
+	}
+
+	for (i = 0; i < job->num_gathers; i++) {
+		struct host1x_job_gather *g = &job->gathers[i];
+		struct sg_table *sgt;
+		dma_addr_t phys_addr;
+
+		g->bo = host1x_bo_get(g->bo);
+		if (!g->bo)
+			goto unpin;
+
+		phys_addr = host1x_bo_pin(g->bo, &sgt);
+		if (!phys_addr)
+			goto unpin;
+
+		job->addr_phys[job->num_unpins] = phys_addr;
+		job->unpins[job->num_unpins].bo = g->bo;
+		job->unpins[job->num_unpins].sgt = sgt;
+		job->num_unpins++;
+	}
+
+	return job->num_unpins;
+
+unpin:
+	host1x_job_unpin(job);
+	return 0;
+}
+
+static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+{
+	int i = 0;
+	u32 last_page = ~0;
+	void *cmdbuf_page_addr = NULL;
+
+	/* pin & patch the relocs for one gather */
+	while (i < job->num_relocs) {
+		struct host1x_reloc *reloc = &job->relocarray[i];
+		u32 reloc_addr = (job->reloc_addr_phys[i] +
+			reloc->target_offset) >> reloc->shift;
+		u32 *target;
+
+		/* skip all other gathers */
+		if (!(reloc->cmdbuf && cmdbuf == reloc->cmdbuf)) {
+			i++;
+			continue;
+		}
+
+		if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+			if (cmdbuf_page_addr)
+				host1x_bo_kunmap(cmdbuf, last_page,
+						 cmdbuf_page_addr);
+
+			cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
+					reloc->cmdbuf_offset >> PAGE_SHIFT);
+			last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+
+			if (unlikely(!cmdbuf_page_addr)) {
+				pr_err("Could not map cmdbuf for relocation\n");
+				return -ENOMEM;
+			}
+		}
+
+		target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+		*target = reloc_addr;
+
+		/* mark this gather as handled */
+		reloc->cmdbuf = 0;
+	}
+
+	if (cmdbuf_page_addr)
+		host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
+
+	return 0;
+}
+
+static int check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
+		       unsigned int offset)
+{
+	offset *= sizeof(u32);
+
+	if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+		return -EINVAL;
+
+	return 0;
+}
+
+struct host1x_firewall {
+	struct host1x_job *job;
+	struct device *dev;
+
+	unsigned int num_relocs;
+	struct host1x_reloc *reloc;
+
+	struct host1x_bo *cmdbuf_id;
+	unsigned int offset;
+
+	u32 words;
+	u32 class;
+	u32 reg;
+	u32 mask;
+	u32 count;
+};
+
+static int check_mask(struct host1x_firewall *fw)
+{
+	u32 mask = fw->mask;
+	u32 reg = fw->reg;
+
+	while (mask) {
+		if (fw->words == 0)
+			return -EINVAL;
+
+		if (mask & 1) {
+			if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+				bool bad_reloc = check_reloc(fw->reloc,
+							     fw->cmdbuf_id,
+							     fw->offset);
+				if (!fw->num_relocs || bad_reloc)
+					return -EINVAL;
+				fw->reloc++;
+				fw->num_relocs--;
+			}
+			fw->words--;
+			fw->offset++;
+		}
+		mask >>= 1;
+		reg++;
+	}
+
+	return 0;
+}
+
+static int check_incr(struct host1x_firewall *fw)
+{
+	u32 count = fw->count;
+	u32 reg = fw->reg;
+
+	while (fw) {
+		if (fw->words == 0)
+			return -EINVAL;
+
+		if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
+			bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+						     fw->offset);
+			if (!fw->num_relocs || bad_reloc)
+				return -EINVAL;
+			fw->reloc++;
+			fw->num_relocs--;
+		}
+		reg++;
+		fw->words--;
+		fw->offset++;
+		count--;
+	}
+
+	return 0;
+}
+
+static int check_nonincr(struct host1x_firewall *fw)
+{
+	int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
+	u32 count = fw->count;
+
+	while (count) {
+		if (fw->words == 0)
+			return -EINVAL;
+
+		if (is_addr_reg) {
+			bool bad_reloc = check_reloc(fw->reloc, fw->cmdbuf_id,
+						     fw->offset);
+			if (!fw->num_relocs || bad_reloc)
+				return -EINVAL;
+			fw->reloc++;
+			fw->num_relocs--;
+		}
+		fw->words--;
+		fw->offset++;
+		count--;
+	}
+
+	return 0;
+}
+
+static int validate(struct host1x_job *job, struct device *dev,
+		    struct host1x_job_gather *g)
+{
+	u32 *cmdbuf_base;
+	int err = 0;
+	struct host1x_firewall fw;
+
+	fw.job = job;
+	fw.dev = dev;
+	fw.reloc = job->relocarray;
+	fw.num_relocs = job->num_relocs;
+	fw.cmdbuf_id = g->bo;
+
+	fw.offset = 0;
+	fw.class = 0;
+
+	if (!job->is_addr_reg)
+		return 0;
+
+	cmdbuf_base = host1x_bo_mmap(g->bo);
+	if (!cmdbuf_base)
+		return -ENOMEM;
+
+	fw.words = g->words;
+	while (fw.words && !err) {
+		u32 word = cmdbuf_base[fw.offset];
+		u32 opcode = (word & 0xf0000000) >> 28;
+
+		fw.mask = 0;
+		fw.reg = 0;
+		fw.count = 0;
+		fw.words--;
+		fw.offset++;
+
+		switch (opcode) {
+		case 0:
+			fw.class = word >> 6 & 0x3ff;
+			fw.mask = word & 0x3f;
+			fw.reg = word >> 16 & 0xfff;
+			err = check_mask(&fw);
+			if (err)
+				goto out;
+			break;
+		case 1:
+			fw.reg = word >> 16 & 0xfff;
+			fw.count = word & 0xffff;
+			err = check_incr(&fw);
+			if (err)
+				goto out;
+			break;
+
+		case 2:
+			fw.reg = word >> 16 & 0xfff;
+			fw.count = word & 0xffff;
+			err = check_nonincr(&fw);
+			if (err)
+				goto out;
+			break;
+
+		case 3:
+			fw.mask = word & 0xffff;
+			fw.reg = word >> 16 & 0xfff;
+			err = check_mask(&fw);
+			if (err)
+				goto out;
+			break;
+		case 4:
+		case 5:
+		case 14:
+			break;
+		default:
+			err = -EINVAL;
+			break;
+		}
+	}
+
+	/* No relocs should remain at this point */
+	if (fw.num_relocs)
+		err = -EINVAL;
+
+out:
+	host1x_bo_munmap(g->bo, cmdbuf_base);
+
+	return err;
+}
+
+static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+{
+	size_t size = 0;
+	size_t offset = 0;
+	int i;
+
+	for (i = 0; i < job->num_gathers; i++) {
+		struct host1x_job_gather *g = &job->gathers[i];
+		size += g->words * sizeof(u32);
+	}
+
+	job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
+							 &job->gather_copy,
+							 GFP_KERNEL);
+	if (!job->gather_copy_mapped) {
+		int err = PTR_ERR(job->gather_copy_mapped);
+		job->gather_copy_mapped = NULL;
+		return err;
+	}
+
+	job->gather_copy_size = size;
+
+	for (i = 0; i < job->num_gathers; i++) {
+		struct host1x_job_gather *g = &job->gathers[i];
+		void *gather;
+
+		gather = host1x_bo_mmap(g->bo);
+		memcpy(job->gather_copy_mapped + offset, gather + g->offset,
+		       g->words * sizeof(u32));
+		host1x_bo_munmap(g->bo, gather);
+
+		g->base = job->gather_copy;
+		g->offset = offset;
+		g->bo = NULL;
+
+		offset += g->words * sizeof(u32);
+	}
+
+	return 0;
+}
+
+int host1x_job_pin(struct host1x_job *job, struct device *dev)
+{
+	int err;
+	unsigned int i, j;
+	struct host1x *host = dev_get_drvdata(dev->parent);
+	DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
+
+	bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
+	for (i = 0; i < job->num_waitchk; i++) {
+		u32 syncpt_id = job->waitchk[i].syncpt_id;
+		if (syncpt_id < host1x_syncpt_nb_pts(host))
+			set_bit(syncpt_id, waitchk_mask);
+	}
+
+	/* get current syncpt values for waitchk */
+	for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
+		host1x_syncpt_load(host->syncpt + i);
+
+	/* pin memory */
+	err = pin_job(job);
+	if (!err)
+		goto out;
+
+	/* patch gathers */
+	for (i = 0; i < job->num_gathers; i++) {
+		struct host1x_job_gather *g = &job->gathers[i];
+
+		/* process each gather mem only once */
+		if (g->handled)
+			continue;
+
+		g->base = job->gather_addr_phys[i];
+
+		for (j = 0; j < job->num_gathers; j++)
+			if (job->gathers[j].bo == g->bo)
+				job->gathers[j].handled = true;
+
+		err = 0;
+
+		if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+			err = validate(job, dev, g);
+
+		if (err)
+			dev_err(dev, "Job invalid (err=%d)\n", err);
+
+		if (!err)
+			err = do_relocs(job, g->bo);
+
+		if (!err)
+			err = do_waitchks(job, host, g->bo);
+
+		if (err)
+			break;
+	}
+
+	if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && !err) {
+		err = copy_gathers(job, dev);
+		if (err) {
+			host1x_job_unpin(job);
+			return err;
+		}
+	}
+
+out:
+	wmb();
+
+	return err;
+}
+
+void host1x_job_unpin(struct host1x_job *job)
+{
+	unsigned int i;
+
+	for (i = 0; i < job->num_unpins; i++) {
+		struct host1x_job_unpin_data *unpin = &job->unpins[i];
+		host1x_bo_unpin(unpin->bo, unpin->sgt);
+		host1x_bo_put(unpin->bo);
+	}
+	job->num_unpins = 0;
+
+	if (job->gather_copy_size)
+		dma_free_writecombine(job->channel->dev, job->gather_copy_size,
+				      job->gather_copy_mapped,
+				      job->gather_copy);
+}
+
+/*
+ * Debug routine used to dump job entries
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job)
+{
+	dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
+	dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
+	dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
+	dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
+	dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
+	dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
+}
diff --git a/linux-imx/drivers/gpu/host1x/job.h b/linux-imx/drivers/gpu/host1x/job.h
new file mode 100644
index 0000000..fba45f2
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/job.h
@@ -0,0 +1,162 @@
+/*
+ * Tegra host1x Job
+ *
+ * Copyright (c) 2011-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_JOB_H
+#define __HOST1X_JOB_H
+
+struct host1x_job_gather {
+	u32 words;
+	dma_addr_t base;
+	struct host1x_bo *bo;
+	int offset;
+	bool handled;
+};
+
+struct host1x_cmdbuf {
+	u32 handle;
+	u32 offset;
+	u32 words;
+	u32 pad;
+};
+
+struct host1x_reloc {
+	struct host1x_bo *cmdbuf;
+	u32 cmdbuf_offset;
+	struct host1x_bo *target;
+	u32 target_offset;
+	u32 shift;
+	u32 pad;
+};
+
+struct host1x_waitchk {
+	struct host1x_bo *bo;
+	u32 offset;
+	u32 syncpt_id;
+	u32 thresh;
+};
+
+struct host1x_job_unpin_data {
+	struct host1x_bo *bo;
+	struct sg_table *sgt;
+};
+
+/*
+ * Each submit is tracked as a host1x_job.
+ */
+struct host1x_job {
+	/* When refcount goes to zero, job can be freed */
+	struct kref ref;
+
+	/* List entry */
+	struct list_head list;
+
+	/* Channel where job is submitted to */
+	struct host1x_channel *channel;
+
+	u32 client;
+
+	/* Gathers and their memory */
+	struct host1x_job_gather *gathers;
+	unsigned int num_gathers;
+
+	/* Wait checks to be processed at submit time */
+	struct host1x_waitchk *waitchk;
+	unsigned int num_waitchk;
+	u32 waitchk_mask;
+
+	/* Array of handles to be pinned & unpinned */
+	struct host1x_reloc *relocarray;
+	unsigned int num_relocs;
+	struct host1x_job_unpin_data *unpins;
+	unsigned int num_unpins;
+
+	dma_addr_t *addr_phys;
+	dma_addr_t *gather_addr_phys;
+	dma_addr_t *reloc_addr_phys;
+
+	/* Sync point id, number of increments and end related to the submit */
+	u32 syncpt_id;
+	u32 syncpt_incrs;
+	u32 syncpt_end;
+
+	/* Maximum time to wait for this job */
+	unsigned int timeout;
+
+	/* Index and number of slots used in the push buffer */
+	unsigned int first_get;
+	unsigned int num_slots;
+
+	/* Copy of gathers */
+	size_t gather_copy_size;
+	dma_addr_t gather_copy;
+	u8 *gather_copy_mapped;
+
+	/* Check if register is marked as an address reg */
+	int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+	/* Request a SETCLASS to this class */
+	u32 class;
+
+	/* Add a channel wait for previous ops to complete */
+	bool serialize;
+};
+/*
+ * Allocate memory for a job. Just enough memory will be allocated to
+ * accomodate the submit.
+ */
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+				    u32 num_cmdbufs, u32 num_relocs,
+				    u32 num_waitchks);
+
+/*
+ * Add a gather to a job.
+ */
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+			   u32 words, u32 offset);
+
+/*
+ * Increment reference going to host1x_job.
+ */
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+
+/*
+ * Decrement reference job, free if goes to zero.
+ */
+void host1x_job_put(struct host1x_job *job);
+
+/*
+ * Pin memory related to job. This handles relocation of addresses to the
+ * host1x address space. Handles both the gather memory and any other memory
+ * referred to from the gather buffers.
+ *
+ * Handles also patching out host waits that would wait for an expired sync
+ * point value.
+ */
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+
+/*
+ * Unpin memory related to job.
+ */
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * Dump contents of job to debug output.
+ */
+void host1x_job_dump(struct device *dev, struct host1x_job *job);
+
+#endif
diff --git a/linux-imx/drivers/gpu/host1x/syncpt.c b/linux-imx/drivers/gpu/host1x/syncpt.c
new file mode 100644
index 0000000..4b49345
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/syncpt.c
@@ -0,0 +1,387 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <trace/events/host1x.h>
+
+#include "syncpt.h"
+#include "dev.h"
+#include "intr.h"
+#include "debug.h"
+
+#define SYNCPT_CHECK_PERIOD (2 * HZ)
+#define MAX_STUCK_CHECK_COUNT 15
+
+static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host,
+						  struct device *dev,
+						  int client_managed)
+{
+	int i;
+	struct host1x_syncpt *sp = host->syncpt;
+	char *name;
+
+	for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+		;
+	if (sp->dev)
+		return NULL;
+
+	name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+			dev ? dev_name(dev) : NULL);
+	if (!name)
+		return NULL;
+
+	sp->dev = dev;
+	sp->name = name;
+	sp->client_managed = client_managed;
+
+	return sp;
+}
+
+u32 host1x_syncpt_id(struct host1x_syncpt *sp)
+{
+	return sp->id;
+}
+
+/*
+ * Updates the value sent to hardware.
+ */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
+{
+	return (u32)atomic_add_return(incrs, &sp->max_val);
+}
+
+ /*
+ * Write cached syncpoint and waitbase values to hardware.
+ */
+void host1x_syncpt_restore(struct host1x *host)
+{
+	struct host1x_syncpt *sp_base = host->syncpt;
+	u32 i;
+
+	for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+		host1x_hw_syncpt_restore(host, sp_base + i);
+	for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+		host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+	wmb();
+}
+
+/*
+ * Update the cached syncpoint and waitbase values by reading them
+ * from the registers.
+  */
+void host1x_syncpt_save(struct host1x *host)
+{
+	struct host1x_syncpt *sp_base = host->syncpt;
+	u32 i;
+
+	for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+		if (host1x_syncpt_client_managed(sp_base + i))
+			host1x_hw_syncpt_load(host, sp_base + i);
+		else
+			WARN_ON(!host1x_syncpt_idle(sp_base + i));
+	}
+
+	for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
+		host1x_hw_syncpt_load_wait_base(host, sp_base + i);
+}
+
+/*
+ * Updates the cached syncpoint value by reading a new value from the hardware
+ * register
+ */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp)
+{
+	u32 val;
+	val = host1x_hw_syncpt_load(sp->host, sp);
+	trace_host1x_syncpt_load_min(sp->id, val);
+
+	return val;
+}
+
+/*
+ * Get the current syncpoint base
+ */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
+{
+	u32 val;
+	host1x_hw_syncpt_load_wait_base(sp->host, sp);
+	val = sp->base_val;
+	return val;
+}
+
+/*
+ * Write a cpu syncpoint increment to the hardware, without touching
+ * the cache. Caller is responsible for host being powered.
+ */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp)
+{
+	host1x_hw_syncpt_cpu_incr(sp->host, sp);
+}
+
+/*
+ * Increment syncpoint value from cpu, updating cache
+ */
+void host1x_syncpt_incr(struct host1x_syncpt *sp)
+{
+	if (host1x_syncpt_client_managed(sp))
+		host1x_syncpt_incr_max(sp, 1);
+	host1x_syncpt_cpu_incr(sp);
+}
+
+/*
+ * Updated sync point form hardware, and returns true if syncpoint is expired,
+ * false if we may need to wait
+ */
+static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+	host1x_hw_syncpt_load(sp->host, sp);
+	return host1x_syncpt_is_expired(sp, thresh);
+}
+
+/*
+ * Main entrypoint for syncpoint value waits.
+ */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+			u32 *value)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+	void *ref;
+	struct host1x_waitlist *waiter;
+	int err = 0, check_count = 0;
+	u32 val;
+
+	if (value)
+		*value = 0;
+
+	/* first check cache */
+	if (host1x_syncpt_is_expired(sp, thresh)) {
+		if (value)
+			*value = host1x_syncpt_load(sp);
+		return 0;
+	}
+
+	/* try to read from register */
+	val = host1x_hw_syncpt_load(sp->host, sp);
+	if (host1x_syncpt_is_expired(sp, thresh)) {
+		if (value)
+			*value = val;
+		goto done;
+	}
+
+	if (!timeout) {
+		err = -EAGAIN;
+		goto done;
+	}
+
+	/* allocate a waiter */
+	waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+	if (!waiter) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	/* schedule a wakeup when the syncpoint value is reached */
+	err = host1x_intr_add_action(sp->host, sp->id, thresh,
+				     HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
+				     &wq, waiter, &ref);
+	if (err)
+		goto done;
+
+	err = -EAGAIN;
+	/* Caller-specified timeout may be impractically low */
+	if (timeout < 0)
+		timeout = LONG_MAX;
+
+	/* wait for the syncpoint, or timeout, or signal */
+	while (timeout) {
+		long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
+		int remain = wait_event_interruptible_timeout(wq,
+				syncpt_load_min_is_expired(sp, thresh),
+				check);
+		if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
+			if (value)
+				*value = host1x_syncpt_load(sp);
+			err = 0;
+			break;
+		}
+		if (remain < 0) {
+			err = remain;
+			break;
+		}
+		timeout -= check;
+		if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
+			dev_warn(sp->host->dev,
+				"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+				 current->comm, sp->id, sp->name,
+				 thresh, timeout);
+
+			host1x_debug_dump_syncpts(sp->host);
+			if (check_count == MAX_STUCK_CHECK_COUNT)
+				host1x_debug_dump(sp->host);
+			check_count++;
+		}
+	}
+	host1x_intr_put_ref(sp->host, sp->id, ref);
+
+done:
+	return err;
+}
+EXPORT_SYMBOL(host1x_syncpt_wait);
+
+/*
+ * Returns true if syncpoint is expired, false if we may need to wait
+ */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
+{
+	u32 current_val;
+	u32 future_val;
+	smp_rmb();
+	current_val = (u32)atomic_read(&sp->min_val);
+	future_val = (u32)atomic_read(&sp->max_val);
+
+	/* Note the use of unsigned arithmetic here (mod 1<<32).
+	 *
+	 * c = current_val = min_val	= the current value of the syncpoint.
+	 * t = thresh			= the value we are checking
+	 * f = future_val  = max_val	= the value c will reach when all
+	 *				  outstanding increments have completed.
+	 *
+	 * Note that c always chases f until it reaches f.
+	 *
+	 * Dtf = (f - t)
+	 * Dtc = (c - t)
+	 *
+	 *  Consider all cases:
+	 *
+	 *	A) .....c..t..f.....	Dtf < Dtc	need to wait
+	 *	B) .....c.....f..t..	Dtf > Dtc	expired
+	 *	C) ..t..c.....f.....	Dtf > Dtc	expired	   (Dct very large)
+	 *
+	 *  Any case where f==c: always expired (for any t).	Dtf == Dcf
+	 *  Any case where t==c: always expired (for any f).	Dtf >= Dtc (because Dtc==0)
+	 *  Any case where t==f!=c: always wait.		Dtf <  Dtc (because Dtf==0,
+	 *							Dtc!=0)
+	 *
+	 *  Other cases:
+	 *
+	 *	A) .....t..f..c.....	Dtf < Dtc	need to wait
+	 *	A) .....f..c..t.....	Dtf < Dtc	need to wait
+	 *	A) .....f..t..c.....	Dtf > Dtc	expired
+	 *
+	 *   So:
+	 *	   Dtf >= Dtc implies EXPIRED	(return true)
+	 *	   Dtf <  Dtc implies WAIT	(return false)
+	 *
+	 * Note: If t is expired then we *cannot* wait on it. We would wait
+	 * forever (hang the system).
+	 *
+	 * Note: do NOT get clever and remove the -thresh from both sides. It
+	 * is NOT the same.
+	 *
+	 * If future valueis zero, we have a client managed sync point. In that
+	 * case we do a direct comparison.
+	 */
+	if (!host1x_syncpt_client_managed(sp))
+		return future_val - thresh >= current_val - thresh;
+	else
+		return (s32)(current_val - thresh) >= 0;
+}
+
+/* remove a wait pointed to by patch_addr */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
+{
+	return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr);
+}
+
+int host1x_syncpt_init(struct host1x *host)
+{
+	struct host1x_syncpt *syncpt;
+	int i;
+
+	syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+		GFP_KERNEL);
+	if (!syncpt)
+		return -ENOMEM;
+
+	for (i = 0; i < host->info->nb_pts; ++i) {
+		syncpt[i].id = i;
+		syncpt[i].host = host;
+	}
+
+	host->syncpt = syncpt;
+
+	host1x_syncpt_restore(host);
+
+	/* Allocate sync point to use for clearing waits for expired fences */
+	host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0);
+	if (!host->nop_sp)
+		return -ENOMEM;
+
+	return 0;
+}
+
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+					    int client_managed)
+{
+	struct host1x *host = dev_get_drvdata(dev->parent);
+	return _host1x_syncpt_alloc(host, dev, client_managed);
+}
+
+void host1x_syncpt_free(struct host1x_syncpt *sp)
+{
+	if (!sp)
+		return;
+
+	kfree(sp->name);
+	sp->dev = NULL;
+	sp->name = NULL;
+	sp->client_managed = 0;
+}
+
+void host1x_syncpt_deinit(struct host1x *host)
+{
+	int i;
+	struct host1x_syncpt *sp = host->syncpt;
+	for (i = 0; i < host->info->nb_pts; i++, sp++)
+		kfree(sp->name);
+}
+
+int host1x_syncpt_nb_pts(struct host1x *host)
+{
+	return host->info->nb_pts;
+}
+
+int host1x_syncpt_nb_bases(struct host1x *host)
+{
+	return host->info->nb_bases;
+}
+
+int host1x_syncpt_nb_mlocks(struct host1x *host)
+{
+	return host->info->nb_mlocks;
+}
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+{
+	if (host->info->nb_pts < id)
+		return NULL;
+	return host->syncpt + id;
+}
diff --git a/linux-imx/drivers/gpu/host1x/syncpt.h b/linux-imx/drivers/gpu/host1x/syncpt.h
new file mode 100644
index 0000000..c998061
--- /dev/null
+++ b/linux-imx/drivers/gpu/host1x/syncpt.h
@@ -0,0 +1,165 @@
+/*
+ * Tegra host1x Syncpoints
+ *
+ * Copyright (c) 2010-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_SYNCPT_H
+#define __HOST1X_SYNCPT_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include "intr.h"
+
+struct host1x;
+
+/* Reserved for replacing an expired wait with a NOP */
+#define HOST1X_SYNCPT_RESERVED			0
+
+struct host1x_syncpt {
+	int id;
+	atomic_t min_val;
+	atomic_t max_val;
+	u32 base_val;
+	const char *name;
+	int client_managed;
+	struct host1x *host;
+	struct device *dev;
+
+	/* interrupt data */
+	struct host1x_syncpt_intr intr;
+};
+
+/* Initialize sync point array  */
+int host1x_syncpt_init(struct host1x *host);
+
+/*  Free sync point array */
+void host1x_syncpt_deinit(struct host1x *host);
+
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+	smp_rmb();
+	return (u32)atomic_read(&sp->min_val);
+}
+
+/* Return number of sync point supported. */
+int host1x_syncpt_nb_pts(struct host1x *host);
+
+/* Return number of wait bases supported. */
+int host1x_syncpt_nb_bases(struct host1x *host);
+
+/* Return number of mlocks supported. */
+int host1x_syncpt_nb_mlocks(struct host1x *host);
+
+/*
+ * Check sync point sanity. If max is larger than min, there have too many
+ * sync point increments.
+ *
+ * Client managed sync point are not tracked.
+ * */
+static inline bool host1x_syncpt_check_max(struct host1x_syncpt *sp, u32 real)
+{
+	u32 max;
+	if (sp->client_managed)
+		return true;
+	max = host1x_syncpt_read_max(sp);
+	return (s32)(max - real) >= 0;
+}
+
+/* Return true if sync point is client managed. */
+static inline int host1x_syncpt_client_managed(struct host1x_syncpt *sp)
+{
+	return sp->client_managed;
+}
+
+/*
+ * Returns true if syncpoint min == max, which means that there are no
+ * outstanding operations.
+ */
+static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
+{
+	int min, max;
+	smp_rmb();
+	min = atomic_read(&sp->min_val);
+	max = atomic_read(&sp->max_val);
+	return (min == max);
+}
+
+/* Return pointer to struct denoting sync point id. */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+
+/* Request incrementing a sync point. */
+void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp);
+
+/* Load current value from hardware to the shadow register. */
+u32 host1x_syncpt_load(struct host1x_syncpt *sp);
+
+/* Check if the given syncpoint value has already passed */
+bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh);
+
+/* Save host1x sync point state into shadow registers. */
+void host1x_syncpt_save(struct host1x *host);
+
+/* Reset host1x sync point state from shadow registers. */
+void host1x_syncpt_restore(struct host1x *host);
+
+/* Read current wait base value into shadow register and return it. */
+u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
+
+/* Increment sync point and its max. */
+void host1x_syncpt_incr(struct host1x_syncpt *sp);
+
+/* Indicate future operations by incrementing the sync point max. */
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
+
+/* Wait until sync point reaches a threshold value, or a timeout. */
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
+			long timeout, u32 *value);
+
+/* Check if sync point id is valid. */
+static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
+{
+	return sp->id < host1x_syncpt_nb_pts(sp->host);
+}
+
+/* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
+int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
+
+/* Return id of the sync point */
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+
+/* Allocate a sync point for a device. */
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+		int client_managed);
+
+/* Free a sync point. */
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+#endif
diff --git a/linux-imx/drivers/gpu/vga/Kconfig b/linux-imx/drivers/gpu/vga/Kconfig
new file mode 100644
index 0000000..29437ea
--- /dev/null
+++ b/linux-imx/drivers/gpu/vga/Kconfig
@@ -0,0 +1,30 @@
+config VGA_ARB
+	bool "VGA Arbitration" if EXPERT
+	default y
+	depends on (PCI && !S390)
+	help
+	  Some "legacy" VGA devices implemented on PCI typically have the same
+	  hard-decoded addresses as they did on ISA. When multiple PCI devices
+	  are accessed at same time they need some kind of coordination. Please
+	  see Documentation/vgaarbiter.txt for more details. Select this to
+	  enable VGA arbiter.
+
+config VGA_ARB_MAX_GPUS
+	int "Maximum number of GPUs"
+	default 16
+	depends on VGA_ARB
+	help
+	  Reserves space in the kernel to maintain resource locking for
+	  multiple GPUS.  The overhead for each GPU is very small.
+
+config VGA_SWITCHEROO
+	bool "Laptop Hybrid Graphics - GPU switching support"
+	depends on X86
+	depends on ACPI
+	select VGA_ARB
+	help
+	  Many laptops released in 2008/9/10 have two GPUs with a multiplexer
+	  to switch between them. This adds support for dynamic switching when
+          X isn't running and delayed switching until the next logoff. This
+	  feature is called hybrid graphics, ATI PowerXpress, and Nvidia
+	  HybridPower.
diff --git a/linux-imx/drivers/gpu/vga/Makefile b/linux-imx/drivers/gpu/vga/Makefile
new file mode 100644
index 0000000..14ca30b
--- /dev/null
+++ b/linux-imx/drivers/gpu/vga/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VGA_ARB)  += vgaarb.o
+obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/linux-imx/drivers/gpu/vga/vga_switcheroo.c b/linux-imx/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 0000000..cf787e1
--- /dev/null
+++ b/linux-imx/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ *
+ * Licensed under GPLv2
+ *
+ * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
+
+ Switcher interface - methods require for ATPX and DCM
+ - switchto - this throws the output MUX switch
+ - discrete_set_power - sets the power state for the discrete card
+
+ GPU driver interface
+ - set_gpu_state - this should do the equiv of s/r for the card
+		  - this should *not* set the discrete power state
+ - switch_check  - check if the device is in a position to switch now
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+
+#include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vga_switcheroo.h>
+
+#include <linux/vgaarb.h>
+
+struct vga_switcheroo_client {
+	struct pci_dev *pdev;
+	struct fb_info *fb_info;
+	int pwr_state;
+	const struct vga_switcheroo_client_ops *ops;
+	int id;
+	bool active;
+	struct list_head list;
+};
+
+static DEFINE_MUTEX(vgasr_mutex);
+
+struct vgasr_priv {
+
+	bool active;
+	bool delayed_switch_active;
+	enum vga_switcheroo_client_id delayed_client_id;
+
+	struct dentry *debugfs_root;
+	struct dentry *switch_file;
+
+	int registered_clients;
+	struct list_head clients;
+
+	struct vga_switcheroo_handler *handler;
+};
+
+#define ID_BIT_AUDIO		0x100
+#define client_is_audio(c)	((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c)	((c)->id == -1 || !client_is_audio(c))
+#define client_id(c)		((c)->id & ~ID_BIT_AUDIO)
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
+
+/* only one switcheroo per system */
+static struct vgasr_priv vgasr_priv = {
+	.clients = LIST_HEAD_INIT(vgasr_priv.clients),
+};
+
+static bool vga_switcheroo_ready(void)
+{
+	/* we're ready if we get two clients + handler */
+	return !vgasr_priv.active &&
+	       vgasr_priv.registered_clients == 2 && vgasr_priv.handler;
+}
+
+static void vga_switcheroo_enable(void)
+{
+	int ret;
+	struct vga_switcheroo_client *client;
+
+	/* call the handler to init */
+	if (vgasr_priv.handler->init)
+		vgasr_priv.handler->init();
+
+	list_for_each_entry(client, &vgasr_priv.clients, list) {
+		if (client->id != -1)
+			continue;
+		ret = vgasr_priv.handler->get_client_id(client->pdev);
+		if (ret < 0)
+			return;
+
+		client->id = ret;
+	}
+	vga_switcheroo_debugfs_init(&vgasr_priv);
+	vgasr_priv.active = true;
+}
+
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+{
+	mutex_lock(&vgasr_mutex);
+	if (vgasr_priv.handler) {
+		mutex_unlock(&vgasr_mutex);
+		return -EINVAL;
+	}
+
+	vgasr_priv.handler = handler;
+	if (vga_switcheroo_ready()) {
+		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		vga_switcheroo_enable();
+	}
+	mutex_unlock(&vgasr_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_handler);
+
+void vga_switcheroo_unregister_handler(void)
+{
+	mutex_lock(&vgasr_mutex);
+	vgasr_priv.handler = NULL;
+	if (vgasr_priv.active) {
+		pr_info("vga_switcheroo: disabled\n");
+		vga_switcheroo_debugfs_fini(&vgasr_priv);
+		vgasr_priv.active = false;
+	}
+	mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+
+static int register_client(struct pci_dev *pdev,
+			   const struct vga_switcheroo_client_ops *ops,
+			   int id, bool active)
+{
+	struct vga_switcheroo_client *client;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->pwr_state = VGA_SWITCHEROO_ON;
+	client->pdev = pdev;
+	client->ops = ops;
+	client->id = id;
+	client->active = active;
+
+	mutex_lock(&vgasr_mutex);
+	list_add_tail(&client->list, &vgasr_priv.clients);
+	if (client_is_vga(client))
+		vgasr_priv.registered_clients++;
+
+	if (vga_switcheroo_ready()) {
+		printk(KERN_INFO "vga_switcheroo: enabled\n");
+		vga_switcheroo_enable();
+	}
+	mutex_unlock(&vgasr_mutex);
+	return 0;
+}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+				   const struct vga_switcheroo_client_ops *ops)
+{
+	return register_client(pdev, ops, -1,
+			       pdev == vga_default_device());
+}
+EXPORT_SYMBOL(vga_switcheroo_register_client);
+
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+					 const struct vga_switcheroo_client_ops *ops,
+					 int id, bool active)
+{
+	return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+}
+EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
+
+static struct vga_switcheroo_client *
+find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
+{
+	struct vga_switcheroo_client *client;
+	list_for_each_entry(client, head, list)
+		if (client->pdev == pdev)
+			return client;
+	return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_client_from_id(struct list_head *head, int client_id)
+{
+	struct vga_switcheroo_client *client;
+	list_for_each_entry(client, head, list)
+		if (client->id == client_id)
+			return client;
+	return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_active_client(struct list_head *head)
+{
+	struct vga_switcheroo_client *client;
+	list_for_each_entry(client, head, list)
+		if (client->active && client_is_vga(client))
+			return client;
+	return NULL;
+}
+
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+	struct vga_switcheroo_client *client;
+
+	client = find_client_from_pci(&vgasr_priv.clients, pdev);
+	if (!client)
+		return VGA_SWITCHEROO_NOT_FOUND;
+	if (!vgasr_priv.active)
+		return VGA_SWITCHEROO_INIT;
+	return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
+void vga_switcheroo_unregister_client(struct pci_dev *pdev)
+{
+	struct vga_switcheroo_client *client;
+
+	mutex_lock(&vgasr_mutex);
+	client = find_client_from_pci(&vgasr_priv.clients, pdev);
+	if (client) {
+		if (client_is_vga(client))
+			vgasr_priv.registered_clients--;
+		list_del(&client->list);
+		kfree(client);
+	}
+	if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
+		printk(KERN_INFO "vga_switcheroo: disabled\n");
+		vga_switcheroo_debugfs_fini(&vgasr_priv);
+		vgasr_priv.active = false;
+	}
+	mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_client);
+
+void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
+				 struct fb_info *info)
+{
+	struct vga_switcheroo_client *client;
+
+	mutex_lock(&vgasr_mutex);
+	client = find_client_from_pci(&vgasr_priv.clients, pdev);
+	if (client)
+		client->fb_info = info;
+	mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
+
+static int vga_switcheroo_show(struct seq_file *m, void *v)
+{
+	struct vga_switcheroo_client *client;
+	int i = 0;
+	mutex_lock(&vgasr_mutex);
+	list_for_each_entry(client, &vgasr_priv.clients, list) {
+		seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+			   client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+			   client_is_vga(client) ? "" : "-Audio",
+			   client->active ? '+' : ' ',
+			   client->pwr_state ? "Pwr" : "Off",
+			   pci_name(client->pdev));
+		i++;
+	}
+	mutex_unlock(&vgasr_mutex);
+	return 0;
+}
+
+static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, vga_switcheroo_show, NULL);
+}
+
+static int vga_switchon(struct vga_switcheroo_client *client)
+{
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+	/* call the driver callback to turn on device */
+	client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+	client->pwr_state = VGA_SWITCHEROO_ON;
+	return 0;
+}
+
+static int vga_switchoff(struct vga_switcheroo_client *client)
+{
+	/* call the driver callback to turn off device */
+	client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+	client->pwr_state = VGA_SWITCHEROO_OFF;
+	return 0;
+}
+
+static void set_audio_state(int id, int state)
+{
+	struct vga_switcheroo_client *client;
+
+	client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
+	if (client && client->pwr_state != state) {
+		client->ops->set_gpu_state(client->pdev, state);
+		client->pwr_state = state;
+	}
+}
+
+/* stage one happens before delay */
+static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
+{
+	struct vga_switcheroo_client *active;
+
+	active = find_active_client(&vgasr_priv.clients);
+	if (!active)
+		return 0;
+
+	if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
+		vga_switchon(new_client);
+
+	vga_set_default_device(new_client->pdev);
+	return 0;
+}
+
+/* post delay */
+static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+{
+	int ret;
+	struct vga_switcheroo_client *active;
+
+	active = find_active_client(&vgasr_priv.clients);
+	if (!active)
+		return 0;
+
+	active->active = false;
+
+	set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
+	if (new_client->fb_info) {
+		struct fb_event event;
+		console_lock();
+		event.info = new_client->fb_info;
+		fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+		console_unlock();
+	}
+
+	ret = vgasr_priv.handler->switchto(new_client->id);
+	if (ret)
+		return ret;
+
+	if (new_client->ops->reprobe)
+		new_client->ops->reprobe(new_client->pdev);
+
+	if (active->pwr_state == VGA_SWITCHEROO_ON)
+		vga_switchoff(active);
+
+	set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
+	new_client->active = true;
+	return 0;
+}
+
+static bool check_can_switch(void)
+{
+	struct vga_switcheroo_client *client;
+
+	list_for_each_entry(client, &vgasr_priv.clients, list) {
+		if (!client->ops->can_switch(client->pdev)) {
+			printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+			return false;
+		}
+	}
+	return true;
+}
+
+static ssize_t
+vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
+			     size_t cnt, loff_t *ppos)
+{
+	char usercmd[64];
+	int ret;
+	bool delay = false, can_switch;
+	bool just_mux = false;
+	int client_id = -1;
+	struct vga_switcheroo_client *client = NULL;
+
+	if (cnt > 63)
+		cnt = 63;
+
+	if (copy_from_user(usercmd, ubuf, cnt))
+		return -EFAULT;
+
+	mutex_lock(&vgasr_mutex);
+
+	if (!vgasr_priv.active) {
+		cnt = -EINVAL;
+		goto out;
+	}
+
+	/* pwr off the device not in use */
+	if (strncmp(usercmd, "OFF", 3) == 0) {
+		list_for_each_entry(client, &vgasr_priv.clients, list) {
+			if (client->active || client_is_audio(client))
+				continue;
+			set_audio_state(client->id, VGA_SWITCHEROO_OFF);
+			if (client->pwr_state == VGA_SWITCHEROO_ON)
+				vga_switchoff(client);
+		}
+		goto out;
+	}
+	/* pwr on the device not in use */
+	if (strncmp(usercmd, "ON", 2) == 0) {
+		list_for_each_entry(client, &vgasr_priv.clients, list) {
+			if (client->active || client_is_audio(client))
+				continue;
+			if (client->pwr_state == VGA_SWITCHEROO_OFF)
+				vga_switchon(client);
+			set_audio_state(client->id, VGA_SWITCHEROO_ON);
+		}
+		goto out;
+	}
+
+	/* request a delayed switch - test can we switch now */
+	if (strncmp(usercmd, "DIGD", 4) == 0) {
+		client_id = VGA_SWITCHEROO_IGD;
+		delay = true;
+	}
+
+	if (strncmp(usercmd, "DDIS", 4) == 0) {
+		client_id = VGA_SWITCHEROO_DIS;
+		delay = true;
+	}
+
+	if (strncmp(usercmd, "IGD", 3) == 0)
+		client_id = VGA_SWITCHEROO_IGD;
+
+	if (strncmp(usercmd, "DIS", 3) == 0)
+		client_id = VGA_SWITCHEROO_DIS;
+
+	if (strncmp(usercmd, "MIGD", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_IGD;
+	}
+	if (strncmp(usercmd, "MDIS", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_DIS;
+	}
+
+	if (client_id == -1)
+		goto out;
+	client = find_client_from_id(&vgasr_priv.clients, client_id);
+	if (!client)
+		goto out;
+
+	vgasr_priv.delayed_switch_active = false;
+
+	if (just_mux) {
+		ret = vgasr_priv.handler->switchto(client_id);
+		goto out;
+	}
+
+	if (client->active)
+		goto out;
+
+	/* okay we want a switch - test if devices are willing to switch */
+	can_switch = check_can_switch();
+
+	if (can_switch == false && delay == false)
+		goto out;
+
+	if (can_switch) {
+		ret = vga_switchto_stage1(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+
+		ret = vga_switchto_stage2(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+
+	} else {
+		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+		vgasr_priv.delayed_switch_active = true;
+		vgasr_priv.delayed_client_id = client_id;
+
+		ret = vga_switchto_stage1(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
+	}
+
+out:
+	mutex_unlock(&vgasr_mutex);
+	return cnt;
+}
+
+static const struct file_operations vga_switcheroo_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = vga_switcheroo_debugfs_open,
+	.write = vga_switcheroo_debugfs_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
+{
+	if (priv->switch_file) {
+		debugfs_remove(priv->switch_file);
+		priv->switch_file = NULL;
+	}
+	if (priv->debugfs_root) {
+		debugfs_remove(priv->debugfs_root);
+		priv->debugfs_root = NULL;
+	}
+}
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
+{
+	/* already initialised */
+	if (priv->debugfs_root)
+		return 0;
+	priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
+
+	if (!priv->debugfs_root) {
+		printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+		goto fail;
+	}
+
+	priv->switch_file = debugfs_create_file("switch", 0644,
+						priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+	if (!priv->switch_file) {
+		printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+		goto fail;
+	}
+	return 0;
+fail:
+	vga_switcheroo_debugfs_fini(priv);
+	return -1;
+}
+
+int vga_switcheroo_process_delayed_switch(void)
+{
+	struct vga_switcheroo_client *client;
+	int ret;
+	int err = -EINVAL;
+
+	mutex_lock(&vgasr_mutex);
+	if (!vgasr_priv.delayed_switch_active)
+		goto err;
+
+	printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+
+	client = find_client_from_id(&vgasr_priv.clients,
+				     vgasr_priv.delayed_client_id);
+	if (!client || !check_can_switch())
+		goto err;
+
+	ret = vga_switchto_stage2(client);
+	if (ret)
+		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
+
+	vgasr_priv.delayed_switch_active = false;
+	err = 0;
+err:
+	mutex_unlock(&vgasr_mutex);
+	return err;
+}
+EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
diff --git a/linux-imx/drivers/gpu/vga/vgaarb.c b/linux-imx/drivers/gpu/vga/vgaarb.c
new file mode 100644
index 0000000..e893f6e
--- /dev/null
+++ b/linux-imx/drivers/gpu/vga/vgaarb.c
@@ -0,0 +1,1333 @@
+/*
+ * vgaarb.c: Implements the VGA arbitration. For details refer to
+ * Documentation/vgaarbiter.txt
+ *
+ *
+ * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
+ * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/vgaarb.h>
+
+static void vga_arbiter_notify_clients(void);
+/*
+ * We keep a list of all vga devices in the system to speed
+ * up the various operations of the arbiter
+ */
+struct vga_device {
+	struct list_head list;
+	struct pci_dev *pdev;
+	unsigned int decodes;	/* what does it decodes */
+	unsigned int owns;	/* what does it owns */
+	unsigned int locks;	/* what does it locks */
+	unsigned int io_lock_cnt;	/* legacy IO lock count */
+	unsigned int mem_lock_cnt;	/* legacy MEM lock count */
+	unsigned int io_norm_cnt;	/* normal IO count */
+	unsigned int mem_norm_cnt;	/* normal MEM count */
+	bool bridge_has_one_vga;
+	/* allow IRQ enable/disable hook */
+	void *cookie;
+	void (*irq_set_state)(void *cookie, bool enable);
+	unsigned int (*set_vga_decode)(void *cookie, bool decode);
+};
+
+static LIST_HEAD(vga_list);
+static int vga_count, vga_decode_count;
+static bool vga_arbiter_used;
+static DEFINE_SPINLOCK(vga_lock);
+static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
+
+
+static const char *vga_iostate_to_str(unsigned int iostate)
+{
+	/* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
+	iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+	switch (iostate) {
+	case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
+		return "io+mem";
+	case VGA_RSRC_LEGACY_IO:
+		return "io";
+	case VGA_RSRC_LEGACY_MEM:
+		return "mem";
+	}
+	return "none";
+}
+
+static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
+{
+	/* we could in theory hand out locks on IO and mem
+	 * separately to userspace but it can cause deadlocks */
+	if (strncmp(buf, "none", 4) == 0) {
+		*io_state = VGA_RSRC_NONE;
+		return 1;
+	}
+
+	/* XXX We're not chekcing the str_size! */
+	if (strncmp(buf, "io+mem", 6) == 0)
+		goto both;
+	else if (strncmp(buf, "io", 2) == 0)
+		goto both;
+	else if (strncmp(buf, "mem", 3) == 0)
+		goto both;
+	return 0;
+both:
+	*io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+	return 1;
+}
+
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+/* this is only used a cookie - it should not be dereferenced */
+static struct pci_dev *vga_default;
+#endif
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev);
+
+/* Find somebody in our list */
+static struct vga_device *vgadev_find(struct pci_dev *pdev)
+{
+	struct vga_device *vgadev;
+
+	list_for_each_entry(vgadev, &vga_list, list)
+		if (pdev == vgadev->pdev)
+			return vgadev;
+	return NULL;
+}
+
+/* Returns the default VGA device (vgacon's babe) */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+struct pci_dev *vga_default_device(void)
+{
+	return vga_default;
+}
+
+EXPORT_SYMBOL_GPL(vga_default_device);
+
+void vga_set_default_device(struct pci_dev *pdev)
+{
+	if (vga_default == pdev)
+		return;
+
+	pci_dev_put(vga_default);
+	vga_default = pci_dev_get(pdev);
+}
+#endif
+
+static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
+{
+	if (vgadev->irq_set_state)
+		vgadev->irq_set_state(vgadev->cookie, state);
+}
+
+
+/* If we don't ever use VGA arb we should avoid
+   turning off anything anywhere due to old X servers getting
+   confused about the boot device not being VGA */
+static void vga_check_first_use(void)
+{
+	/* we should inform all GPUs in the system that
+	 * VGA arb has occurred and to try and disable resources
+	 * if they can */
+	if (!vga_arbiter_used) {
+		vga_arbiter_used = true;
+		vga_arbiter_notify_clients();
+	}
+}
+
+static struct vga_device *__vga_tryget(struct vga_device *vgadev,
+				       unsigned int rsrc)
+{
+	unsigned int wants, legacy_wants, match;
+	struct vga_device *conflict;
+	unsigned int pci_bits;
+	u32 flags = 0;
+
+	/* Account for "normal" resources to lock. If we decode the legacy,
+	 * counterpart, we need to request it as well
+	 */
+	if ((rsrc & VGA_RSRC_NORMAL_IO) &&
+	    (vgadev->decodes & VGA_RSRC_LEGACY_IO))
+		rsrc |= VGA_RSRC_LEGACY_IO;
+	if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
+	    (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
+		rsrc |= VGA_RSRC_LEGACY_MEM;
+
+	pr_debug("%s: %d\n", __func__, rsrc);
+	pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
+
+	/* Check what resources we need to acquire */
+	wants = rsrc & ~vgadev->owns;
+
+	/* We already own everything, just mark locked & bye bye */
+	if (wants == 0)
+		goto lock_them;
+
+	/* We don't need to request a legacy resource, we just enable
+	 * appropriate decoding and go
+	 */
+	legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
+	if (legacy_wants == 0)
+		goto enable_them;
+
+	/* Ok, we don't, let's find out how we need to kick off */
+	list_for_each_entry(conflict, &vga_list, list) {
+		unsigned int lwants = legacy_wants;
+		unsigned int change_bridge = 0;
+
+		/* Don't conflict with myself */
+		if (vgadev == conflict)
+			continue;
+
+		/* Check if the architecture allows a conflict between those
+		 * 2 devices or if they are on separate domains
+		 */
+		if (!vga_conflicts(vgadev->pdev, conflict->pdev))
+			continue;
+
+		/* We have a possible conflict. before we go further, we must
+		 * check if we sit on the same bus as the conflicting device.
+		 * if we don't, then we must tie both IO and MEM resources
+		 * together since there is only a single bit controlling
+		 * VGA forwarding on P2P bridges
+		 */
+		if (vgadev->pdev->bus != conflict->pdev->bus) {
+			change_bridge = 1;
+			lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+		}
+
+		/* Check if the guy has a lock on the resource. If he does,
+		 * return the conflicting entry
+		 */
+		if (conflict->locks & lwants)
+			return conflict;
+
+		/* Ok, now check if he owns the resource we want. We don't need
+		 * to check "decodes" since it should be impossible to own
+		 * own legacy resources you don't decode unless I have a bug
+		 * in this code...
+		 */
+		WARN_ON(conflict->owns & ~conflict->decodes);
+		match = lwants & conflict->owns;
+		if (!match)
+			continue;
+
+		/* looks like he doesn't have a lock, we can steal
+		 * them from him
+		 */
+
+		flags = 0;
+		pci_bits = 0;
+
+		if (!conflict->bridge_has_one_vga) {
+			vga_irq_set_state(conflict, false);
+			flags |= PCI_VGA_STATE_CHANGE_DECODES;
+			if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+				pci_bits |= PCI_COMMAND_MEMORY;
+			if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+				pci_bits |= PCI_COMMAND_IO;
+		}
+
+		if (change_bridge)
+			flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
+
+		pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
+		conflict->owns &= ~lwants;
+		/* If he also owned non-legacy, that is no longer the case */
+		if (lwants & VGA_RSRC_LEGACY_MEM)
+			conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
+		if (lwants & VGA_RSRC_LEGACY_IO)
+			conflict->owns &= ~VGA_RSRC_NORMAL_IO;
+	}
+
+enable_them:
+	/* ok dude, we got it, everybody conflicting has been disabled, let's
+	 * enable us. Make sure we don't mark a bit in "owns" that we don't
+	 * also have in "decodes". We can lock resources we don't decode but
+	 * not own them.
+	 */
+	flags = 0;
+	pci_bits = 0;
+
+	if (!vgadev->bridge_has_one_vga) {
+		flags |= PCI_VGA_STATE_CHANGE_DECODES;
+		if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+			pci_bits |= PCI_COMMAND_MEMORY;
+		if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+			pci_bits |= PCI_COMMAND_IO;
+	}
+	if (!!(wants & VGA_RSRC_LEGACY_MASK))
+		flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
+
+	pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
+
+	if (!vgadev->bridge_has_one_vga) {
+		vga_irq_set_state(vgadev, true);
+	}
+	vgadev->owns |= (wants & vgadev->decodes);
+lock_them:
+	vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
+	if (rsrc & VGA_RSRC_LEGACY_IO)
+		vgadev->io_lock_cnt++;
+	if (rsrc & VGA_RSRC_LEGACY_MEM)
+		vgadev->mem_lock_cnt++;
+	if (rsrc & VGA_RSRC_NORMAL_IO)
+		vgadev->io_norm_cnt++;
+	if (rsrc & VGA_RSRC_NORMAL_MEM)
+		vgadev->mem_norm_cnt++;
+
+	return NULL;
+}
+
+static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
+{
+	unsigned int old_locks = vgadev->locks;
+
+	pr_debug("%s\n", __func__);
+
+	/* Update our counters, and account for equivalent legacy resources
+	 * if we decode them
+	 */
+	if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
+		vgadev->io_norm_cnt--;
+		if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
+			rsrc |= VGA_RSRC_LEGACY_IO;
+	}
+	if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
+		vgadev->mem_norm_cnt--;
+		if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
+			rsrc |= VGA_RSRC_LEGACY_MEM;
+	}
+	if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
+		vgadev->io_lock_cnt--;
+	if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
+		vgadev->mem_lock_cnt--;
+
+	/* Just clear lock bits, we do lazy operations so we don't really
+	 * have to bother about anything else at this point
+	 */
+	if (vgadev->io_lock_cnt == 0)
+		vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
+	if (vgadev->mem_lock_cnt == 0)
+		vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
+
+	/* Kick the wait queue in case somebody was waiting if we actually
+	 * released something
+	 */
+	if (old_locks != vgadev->locks)
+		wake_up_all(&vga_wait_queue);
+}
+
+int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
+{
+	struct vga_device *vgadev, *conflict;
+	unsigned long flags;
+	wait_queue_t wait;
+	int rc = 0;
+
+	vga_check_first_use();
+	/* The one who calls us should check for this, but lets be sure... */
+	if (pdev == NULL)
+		pdev = vga_default_device();
+	if (pdev == NULL)
+		return 0;
+
+	for (;;) {
+		spin_lock_irqsave(&vga_lock, flags);
+		vgadev = vgadev_find(pdev);
+		if (vgadev == NULL) {
+			spin_unlock_irqrestore(&vga_lock, flags);
+			rc = -ENODEV;
+			break;
+		}
+		conflict = __vga_tryget(vgadev, rsrc);
+		spin_unlock_irqrestore(&vga_lock, flags);
+		if (conflict == NULL)
+			break;
+
+
+		/* We have a conflict, we wait until somebody kicks the
+		 * work queue. Currently we have one work queue that we
+		 * kick each time some resources are released, but it would
+		 * be fairly easy to have a per device one so that we only
+		 * need to attach to the conflicting device
+		 */
+		init_waitqueue_entry(&wait, current);
+		add_wait_queue(&vga_wait_queue, &wait);
+		set_current_state(interruptible ?
+				  TASK_INTERRUPTIBLE :
+				  TASK_UNINTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		schedule();
+		remove_wait_queue(&vga_wait_queue, &wait);
+		set_current_state(TASK_RUNNING);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(vga_get);
+
+int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+	int rc = 0;
+
+	vga_check_first_use();
+
+	/* The one who calls us should check for this, but lets be sure... */
+	if (pdev == NULL)
+		pdev = vga_default_device();
+	if (pdev == NULL)
+		return 0;
+	spin_lock_irqsave(&vga_lock, flags);
+	vgadev = vgadev_find(pdev);
+	if (vgadev == NULL) {
+		rc = -ENODEV;
+		goto bail;
+	}
+	if (__vga_tryget(vgadev, rsrc))
+		rc = -EBUSY;
+bail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL(vga_tryget);
+
+void vga_put(struct pci_dev *pdev, unsigned int rsrc)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+
+	/* The one who calls us should check for this, but lets be sure... */
+	if (pdev == NULL)
+		pdev = vga_default_device();
+	if (pdev == NULL)
+		return;
+	spin_lock_irqsave(&vga_lock, flags);
+	vgadev = vgadev_find(pdev);
+	if (vgadev == NULL)
+		goto bail;
+	__vga_put(vgadev, rsrc);
+bail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+}
+EXPORT_SYMBOL(vga_put);
+
+/* Rules for using a bridge to control a VGA descendant decoding:
+   if a bridge has only one VGA descendant then it can be used
+   to control the VGA routing for that device.
+   It should always use the bridge closest to the device to control it.
+   If a bridge has a direct VGA descendant, but also have a sub-bridge
+   VGA descendant then we cannot use that bridge to control the direct VGA descendant.
+   So for every device we register, we need to iterate all its parent bridges
+   so we can invalidate any devices using them properly.
+*/
+static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
+{
+	struct vga_device *same_bridge_vgadev;
+	struct pci_bus *new_bus, *bus;
+	struct pci_dev *new_bridge, *bridge;
+
+	vgadev->bridge_has_one_vga = true;
+
+	if (list_empty(&vga_list))
+		return;
+
+	/* okay iterate the new devices bridge hierarachy */
+	new_bus = vgadev->pdev->bus;
+	while (new_bus) {
+		new_bridge = new_bus->self;
+
+		/* go through list of devices already registered */
+		list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
+			bus = same_bridge_vgadev->pdev->bus;
+			bridge = bus->self;
+
+			/* see if the share a bridge with this device */
+			if (new_bridge == bridge) {
+				/* if their direct parent bridge is the same
+				   as any bridge of this device then it can't be used
+				   for that device */
+				same_bridge_vgadev->bridge_has_one_vga = false;
+			}
+
+			/* now iterate the previous devices bridge hierarchy */
+			/* if the new devices parent bridge is in the other devices
+			   hierarchy then we can't use it to control this device */
+			while (bus) {
+				bridge = bus->self;
+				if (bridge) {
+					if (bridge == vgadev->pdev->bus->self)
+						vgadev->bridge_has_one_vga = false;
+				}
+				bus = bus->parent;
+			}
+		}
+		new_bus = new_bus->parent;
+	}
+}
+
+/*
+ * Currently, we assume that the "initial" setup of the system is
+ * not sane, that is we come up with conflicting devices and let
+ * the arbiter's client decides if devices decodes or not legacy
+ * things.
+ */
+static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+	struct pci_bus *bus;
+	struct pci_dev *bridge;
+	u16 cmd;
+
+	/* Only deal with VGA class devices */
+	if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+		return false;
+
+	/* Allocate structure */
+	vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
+	if (vgadev == NULL) {
+		pr_err("vgaarb: failed to allocate pci device\n");
+		/* What to do on allocation failure ? For now, let's
+		 * just do nothing, I'm not sure there is anything saner
+		 * to be done
+		 */
+		return false;
+	}
+
+	memset(vgadev, 0, sizeof(*vgadev));
+
+	/* Take lock & check for duplicates */
+	spin_lock_irqsave(&vga_lock, flags);
+	if (vgadev_find(pdev) != NULL) {
+		BUG_ON(1);
+		goto fail;
+	}
+	vgadev->pdev = pdev;
+
+	/* By default, assume we decode everything */
+	vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+			  VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+	/* by default mark it as decoding */
+	vga_decode_count++;
+	/* Mark that we "own" resources based on our enables, we will
+	 * clear that below if the bridge isn't forwarding
+	 */
+	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+	if (cmd & PCI_COMMAND_IO)
+		vgadev->owns |= VGA_RSRC_LEGACY_IO;
+	if (cmd & PCI_COMMAND_MEMORY)
+		vgadev->owns |= VGA_RSRC_LEGACY_MEM;
+
+	/* Check if VGA cycles can get down to us */
+	bus = pdev->bus;
+	while (bus) {
+		bridge = bus->self;
+		if (bridge) {
+			u16 l;
+			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+					     &l);
+			if (!(l & PCI_BRIDGE_CTL_VGA)) {
+				vgadev->owns = 0;
+				break;
+			}
+		}
+		bus = bus->parent;
+	}
+
+	/* Deal with VGA default device. Use first enabled one
+	 * by default if arch doesn't have it's own hook
+	 */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+	if (vga_default == NULL &&
+	    ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+		vga_set_default_device(pdev);
+#endif
+
+	vga_arbiter_check_bridge_sharing(vgadev);
+
+	/* Add to the list */
+	list_add(&vgadev->list, &vga_list);
+	vga_count++;
+	pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+		pci_name(pdev),
+		vga_iostate_to_str(vgadev->decodes),
+		vga_iostate_to_str(vgadev->owns),
+		vga_iostate_to_str(vgadev->locks));
+
+	spin_unlock_irqrestore(&vga_lock, flags);
+	return true;
+fail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+	kfree(vgadev);
+	return false;
+}
+
+static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+	bool ret = true;
+
+	spin_lock_irqsave(&vga_lock, flags);
+	vgadev = vgadev_find(pdev);
+	if (vgadev == NULL) {
+		ret = false;
+		goto bail;
+	}
+
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+	if (vga_default == pdev)
+		vga_set_default_device(NULL);
+#endif
+
+	if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+		vga_decode_count--;
+
+	/* Remove entry from list */
+	list_del(&vgadev->list);
+	vga_count--;
+	/* Notify userland driver that the device is gone so it discards
+	 * it's copies of the pci_dev pointer
+	 */
+	vga_arb_device_card_gone(pdev);
+
+	/* Wake up all possible waiters */
+	wake_up_all(&vga_wait_queue);
+bail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+	kfree(vgadev);
+	return ret;
+}
+
+/* this is called with the lock */
+static inline void vga_update_device_decodes(struct vga_device *vgadev,
+					     int new_decodes)
+{
+	int old_decodes;
+	struct vga_device *new_vgadev, *conflict;
+
+	old_decodes = vgadev->decodes;
+	vgadev->decodes = new_decodes;
+
+	pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+		pci_name(vgadev->pdev),
+		vga_iostate_to_str(old_decodes),
+		vga_iostate_to_str(vgadev->decodes),
+		vga_iostate_to_str(vgadev->owns));
+
+
+	/* if we own the decodes we should move them along to
+	   another card */
+	if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
+		/* set us to own nothing */
+		vgadev->owns &= ~old_decodes;
+		list_for_each_entry(new_vgadev, &vga_list, list) {
+			if ((new_vgadev != vgadev) &&
+			    (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
+				pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
+				conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
+				if (!conflict)
+					__vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
+				break;
+			}
+		}
+	}
+
+	/* change decodes counter */
+	if (old_decodes != new_decodes) {
+		if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+			vga_decode_count++;
+		else
+			vga_decode_count--;
+	}
+	pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
+}
+
+static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+
+	decodes &= VGA_RSRC_LEGACY_MASK;
+
+	spin_lock_irqsave(&vga_lock, flags);
+	vgadev = vgadev_find(pdev);
+	if (vgadev == NULL)
+		goto bail;
+
+	/* don't let userspace futz with kernel driver decodes */
+	if (userspace && vgadev->set_vga_decode)
+		goto bail;
+
+	/* update the device decodes + counter */
+	vga_update_device_decodes(vgadev, decodes);
+
+	/* XXX if somebody is going from "doesn't decode" to "decodes" state
+	 * here, additional care must be taken as we may have pending owner
+	 * ship of non-legacy region ...
+	 */
+bail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
+{
+	__vga_set_legacy_decoding(pdev, decodes, false);
+}
+EXPORT_SYMBOL(vga_set_legacy_decoding);
+
+/* call with NULL to unregister */
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+			void (*irq_set_state)(void *cookie, bool state),
+			unsigned int (*set_vga_decode)(void *cookie, bool decode))
+{
+	int ret = -ENODEV;
+	struct vga_device *vgadev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vga_lock, flags);
+	vgadev = vgadev_find(pdev);
+	if (!vgadev)
+		goto bail;
+
+	vgadev->irq_set_state = irq_set_state;
+	vgadev->set_vga_decode = set_vga_decode;
+	vgadev->cookie = cookie;
+	ret = 0;
+
+bail:
+	spin_unlock_irqrestore(&vga_lock, flags);
+	return ret;
+
+}
+EXPORT_SYMBOL(vga_client_register);
+
+/*
+ * Char driver implementation
+ *
+ * Semantics is:
+ *
+ *  open       : open user instance of the arbitrer. by default, it's
+ *                attached to the default VGA device of the system.
+ *
+ *  close      : close user instance, release locks
+ *
+ *  read       : return a string indicating the status of the target.
+ *                an IO state string is of the form {io,mem,io+mem,none},
+ *                mc and ic are respectively mem and io lock counts (for
+ *                debugging/diagnostic only). "decodes" indicate what the
+ *                card currently decodes, "owns" indicates what is currently
+ *                enabled on it, and "locks" indicates what is locked by this
+ *                card. If the card is unplugged, we get "invalid" then for
+ *                card_ID and an -ENODEV error is returned for any command
+ *                until a new card is targeted
+ *
+ *   "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+ *
+ * write       : write a command to the arbiter. List of commands is:
+ *
+ *   target <card_ID>   : switch target to card <card_ID> (see below)
+ *   lock <io_state>    : acquires locks on target ("none" is invalid io_state)
+ *   trylock <io_state> : non-blocking acquire locks on target
+ *   unlock <io_state>  : release locks on target
+ *   unlock all         : release all locks on target held by this user
+ *   decodes <io_state> : set the legacy decoding attributes for the card
+ *
+ * poll         : event if something change on any card (not just the target)
+ *
+ * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+ * to go back to the system default card (TODO: not implemented yet).
+ * Currently, only PCI is supported as a prefix, but the userland API may
+ * support other bus types in the future, even if the current kernel
+ * implementation doesn't.
+ *
+ * Note about locks:
+ *
+ * The driver keeps track of which user has what locks on which card. It
+ * supports stacking, like the kernel one. This complexifies the implementation
+ * a bit, but makes the arbiter more tolerant to userspace problems and able
+ * to properly cleanup in all cases when a process dies.
+ * Currently, a max of 16 cards simultaneously can have locks issued from
+ * userspace for a given user (file descriptor instance) of the arbiter.
+ *
+ * If the device is hot-unplugged, there is a hook inside the module to notify
+ * they being added/removed in the system and automatically added/removed in
+ * the arbiter.
+ */
+
+#define MAX_USER_CARDS         CONFIG_VGA_ARB_MAX_GPUS
+#define PCI_INVALID_CARD       ((struct pci_dev *)-1UL)
+
+/*
+ * Each user has an array of these, tracking which cards have locks
+ */
+struct vga_arb_user_card {
+	struct pci_dev *pdev;
+	unsigned int mem_cnt;
+	unsigned int io_cnt;
+};
+
+struct vga_arb_private {
+	struct list_head list;
+	struct pci_dev *target;
+	struct vga_arb_user_card cards[MAX_USER_CARDS];
+	spinlock_t lock;
+};
+
+static LIST_HEAD(vga_user_list);
+static DEFINE_SPINLOCK(vga_user_lock);
+
+
+/*
+ * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
+ * returns the respective values. If the string is not in this format,
+ * it returns 0.
+ */
+static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
+			       unsigned int *bus, unsigned int *devfn)
+{
+	int n;
+	unsigned int slot, func;
+
+
+	n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
+	if (n != 4)
+		return 0;
+
+	*devfn = PCI_DEVFN(slot, func);
+
+	return 1;
+}
+
+static ssize_t vga_arb_read(struct file *file, char __user * buf,
+			    size_t count, loff_t *ppos)
+{
+	struct vga_arb_private *priv = file->private_data;
+	struct vga_device *vgadev;
+	struct pci_dev *pdev;
+	unsigned long flags;
+	size_t len;
+	int rc;
+	char *lbuf;
+
+	lbuf = kmalloc(1024, GFP_KERNEL);
+	if (lbuf == NULL)
+		return -ENOMEM;
+
+	/* Shields against vga_arb_device_card_gone (pci_dev going
+	 * away), and allows access to vga list
+	 */
+	spin_lock_irqsave(&vga_lock, flags);
+
+	/* If we are targeting the default, use it */
+	pdev = priv->target;
+	if (pdev == NULL || pdev == PCI_INVALID_CARD) {
+		spin_unlock_irqrestore(&vga_lock, flags);
+		len = sprintf(lbuf, "invalid");
+		goto done;
+	}
+
+	/* Find card vgadev structure */
+	vgadev = vgadev_find(pdev);
+	if (vgadev == NULL) {
+		/* Wow, it's not in the list, that shouldn't happen,
+		 * let's fix us up and return invalid card
+		 */
+		if (pdev == priv->target)
+			vga_arb_device_card_gone(pdev);
+		spin_unlock_irqrestore(&vga_lock, flags);
+		len = sprintf(lbuf, "invalid");
+		goto done;
+	}
+
+	/* Fill the buffer with infos */
+	len = snprintf(lbuf, 1024,
+		       "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
+		       vga_decode_count, pci_name(pdev),
+		       vga_iostate_to_str(vgadev->decodes),
+		       vga_iostate_to_str(vgadev->owns),
+		       vga_iostate_to_str(vgadev->locks),
+		       vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
+
+	spin_unlock_irqrestore(&vga_lock, flags);
+done:
+
+	/* Copy that to user */
+	if (len > count)
+		len = count;
+	rc = copy_to_user(buf, lbuf, len);
+	kfree(lbuf);
+	if (rc)
+		return -EFAULT;
+	return len;
+}
+
+/*
+ * TODO: To avoid parsing inside kernel and to improve the speed we may
+ * consider use ioctl here
+ */
+static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+			     size_t count, loff_t *ppos)
+{
+	struct vga_arb_private *priv = file->private_data;
+	struct vga_arb_user_card *uc = NULL;
+	struct pci_dev *pdev;
+
+	unsigned int io_state;
+
+	char *kbuf, *curr_pos;
+	size_t remaining = count;
+
+	int ret_val;
+	int i;
+
+
+	kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	if (copy_from_user(kbuf, buf, count)) {
+		kfree(kbuf);
+		return -EFAULT;
+	}
+	curr_pos = kbuf;
+	kbuf[count] = '\0';	/* Just to make sure... */
+
+	if (strncmp(curr_pos, "lock ", 5) == 0) {
+		curr_pos += 5;
+		remaining -= 5;
+
+		pr_debug("client 0x%p called 'lock'\n", priv);
+
+		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+			ret_val = -EPROTO;
+			goto done;
+		}
+		if (io_state == VGA_RSRC_NONE) {
+			ret_val = -EPROTO;
+			goto done;
+		}
+
+		pdev = priv->target;
+		if (priv->target == NULL) {
+			ret_val = -ENODEV;
+			goto done;
+		}
+
+		vga_get_uninterruptible(pdev, io_state);
+
+		/* Update the client's locks lists... */
+		for (i = 0; i < MAX_USER_CARDS; i++) {
+			if (priv->cards[i].pdev == pdev) {
+				if (io_state & VGA_RSRC_LEGACY_IO)
+					priv->cards[i].io_cnt++;
+				if (io_state & VGA_RSRC_LEGACY_MEM)
+					priv->cards[i].mem_cnt++;
+				break;
+			}
+		}
+
+		ret_val = count;
+		goto done;
+	} else if (strncmp(curr_pos, "unlock ", 7) == 0) {
+		curr_pos += 7;
+		remaining -= 7;
+
+		pr_debug("client 0x%p called 'unlock'\n", priv);
+
+		if (strncmp(curr_pos, "all", 3) == 0)
+			io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+		else {
+			if (!vga_str_to_iostate
+			    (curr_pos, remaining, &io_state)) {
+				ret_val = -EPROTO;
+				goto done;
+			}
+			/* TODO: Add this?
+			   if (io_state == VGA_RSRC_NONE) {
+			   ret_val = -EPROTO;
+			   goto done;
+			   }
+			  */
+		}
+
+		pdev = priv->target;
+		if (priv->target == NULL) {
+			ret_val = -ENODEV;
+			goto done;
+		}
+		for (i = 0; i < MAX_USER_CARDS; i++) {
+			if (priv->cards[i].pdev == pdev)
+				uc = &priv->cards[i];
+		}
+
+		if (!uc) {
+			ret_val = -EINVAL;
+			goto done;
+		}
+
+		if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
+			ret_val = -EINVAL;
+			goto done;
+		}
+
+		if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
+			ret_val = -EINVAL;
+			goto done;
+		}
+
+		vga_put(pdev, io_state);
+
+		if (io_state & VGA_RSRC_LEGACY_IO)
+			uc->io_cnt--;
+		if (io_state & VGA_RSRC_LEGACY_MEM)
+			uc->mem_cnt--;
+
+		ret_val = count;
+		goto done;
+	} else if (strncmp(curr_pos, "trylock ", 8) == 0) {
+		curr_pos += 8;
+		remaining -= 8;
+
+		pr_debug("client 0x%p called 'trylock'\n", priv);
+
+		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+			ret_val = -EPROTO;
+			goto done;
+		}
+		/* TODO: Add this?
+		   if (io_state == VGA_RSRC_NONE) {
+		   ret_val = -EPROTO;
+		   goto done;
+		   }
+		 */
+
+		pdev = priv->target;
+		if (priv->target == NULL) {
+			ret_val = -ENODEV;
+			goto done;
+		}
+
+		if (vga_tryget(pdev, io_state)) {
+			/* Update the client's locks lists... */
+			for (i = 0; i < MAX_USER_CARDS; i++) {
+				if (priv->cards[i].pdev == pdev) {
+					if (io_state & VGA_RSRC_LEGACY_IO)
+						priv->cards[i].io_cnt++;
+					if (io_state & VGA_RSRC_LEGACY_MEM)
+						priv->cards[i].mem_cnt++;
+					break;
+				}
+			}
+			ret_val = count;
+			goto done;
+		} else {
+			ret_val = -EBUSY;
+			goto done;
+		}
+
+	} else if (strncmp(curr_pos, "target ", 7) == 0) {
+		unsigned int domain, bus, devfn;
+		struct vga_device *vgadev;
+
+		curr_pos += 7;
+		remaining -= 7;
+		pr_debug("client 0x%p called 'target'\n", priv);
+		/* if target is default */
+		if (!strncmp(curr_pos, "default", 7))
+			pdev = pci_dev_get(vga_default_device());
+		else {
+			if (!vga_pci_str_to_vars(curr_pos, remaining,
+						 &domain, &bus, &devfn)) {
+				ret_val = -EPROTO;
+				goto done;
+			}
+			pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
+				domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+			pdev = pci_get_domain_bus_and_slot(domain, bus, devfn);
+			pr_debug("vgaarb: pdev %p\n", pdev);
+			if (!pdev) {
+				pr_err("vgaarb: invalid PCI address %x:%x:%x\n",
+					domain, bus, devfn);
+				ret_val = -ENODEV;
+				goto done;
+			}
+		}
+
+		vgadev = vgadev_find(pdev);
+		pr_debug("vgaarb: vgadev %p\n", vgadev);
+		if (vgadev == NULL) {
+			pr_err("vgaarb: this pci device is not a vga device\n");
+			pci_dev_put(pdev);
+			ret_val = -ENODEV;
+			goto done;
+		}
+
+		priv->target = pdev;
+		for (i = 0; i < MAX_USER_CARDS; i++) {
+			if (priv->cards[i].pdev == pdev)
+				break;
+			if (priv->cards[i].pdev == NULL) {
+				priv->cards[i].pdev = pdev;
+				priv->cards[i].io_cnt = 0;
+				priv->cards[i].mem_cnt = 0;
+				break;
+			}
+		}
+		if (i == MAX_USER_CARDS) {
+			pr_err("vgaarb: maximum user cards (%d) number reached!\n",
+				MAX_USER_CARDS);
+			pci_dev_put(pdev);
+			/* XXX: which value to return? */
+			ret_val =  -ENOMEM;
+			goto done;
+		}
+
+		ret_val = count;
+		pci_dev_put(pdev);
+		goto done;
+
+
+	} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
+		curr_pos += 8;
+		remaining -= 8;
+		pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
+
+		if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+			ret_val = -EPROTO;
+			goto done;
+		}
+		pdev = priv->target;
+		if (priv->target == NULL) {
+			ret_val = -ENODEV;
+			goto done;
+		}
+
+		__vga_set_legacy_decoding(pdev, io_state, true);
+		ret_val = count;
+		goto done;
+	}
+	/* If we got here, the message written is not part of the protocol! */
+	kfree(kbuf);
+	return -EPROTO;
+
+done:
+	kfree(kbuf);
+	return ret_val;
+}
+
+static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+{
+	struct vga_arb_private *priv = file->private_data;
+
+	pr_debug("%s\n", __func__);
+
+	if (priv == NULL)
+		return -ENODEV;
+	poll_wait(file, &vga_wait_queue, wait);
+	return POLLIN;
+}
+
+static int vga_arb_open(struct inode *inode, struct file *file)
+{
+	struct vga_arb_private *priv;
+	unsigned long flags;
+
+	pr_debug("%s\n", __func__);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL)
+		return -ENOMEM;
+	spin_lock_init(&priv->lock);
+	file->private_data = priv;
+
+	spin_lock_irqsave(&vga_user_lock, flags);
+	list_add(&priv->list, &vga_user_list);
+	spin_unlock_irqrestore(&vga_user_lock, flags);
+
+	/* Set the client' lists of locks */
+	priv->target = vga_default_device(); /* Maybe this is still null! */
+	priv->cards[0].pdev = priv->target;
+	priv->cards[0].io_cnt = 0;
+	priv->cards[0].mem_cnt = 0;
+
+
+	return 0;
+}
+
+static int vga_arb_release(struct inode *inode, struct file *file)
+{
+	struct vga_arb_private *priv = file->private_data;
+	struct vga_arb_user_card *uc;
+	unsigned long flags;
+	int i;
+
+	pr_debug("%s\n", __func__);
+
+	if (priv == NULL)
+		return -ENODEV;
+
+	spin_lock_irqsave(&vga_user_lock, flags);
+	list_del(&priv->list);
+	for (i = 0; i < MAX_USER_CARDS; i++) {
+		uc = &priv->cards[i];
+		if (uc->pdev == NULL)
+			continue;
+		pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
+			 uc->io_cnt, uc->mem_cnt);
+		while (uc->io_cnt--)
+			vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
+		while (uc->mem_cnt--)
+			vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
+	}
+	spin_unlock_irqrestore(&vga_user_lock, flags);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev)
+{
+}
+
+/*
+ * callback any registered clients to let them know we have a
+ * change in VGA cards
+ */
+static void vga_arbiter_notify_clients(void)
+{
+	struct vga_device *vgadev;
+	unsigned long flags;
+	uint32_t new_decodes;
+	bool new_state;
+
+	if (!vga_arbiter_used)
+		return;
+
+	spin_lock_irqsave(&vga_lock, flags);
+	list_for_each_entry(vgadev, &vga_list, list) {
+		if (vga_count > 1)
+			new_state = false;
+		else
+			new_state = true;
+		if (vgadev->set_vga_decode) {
+			new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+			vga_update_device_decodes(vgadev, new_decodes);
+		}
+	}
+	spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+static int pci_notify(struct notifier_block *nb, unsigned long action,
+		      void *data)
+{
+	struct device *dev = data;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	bool notify = false;
+
+	pr_debug("%s\n", __func__);
+
+	/* For now we're only intereted in devices added and removed. I didn't
+	 * test this thing here, so someone needs to double check for the
+	 * cases of hotplugable vga cards. */
+	if (action == BUS_NOTIFY_ADD_DEVICE)
+		notify = vga_arbiter_add_pci_device(pdev);
+	else if (action == BUS_NOTIFY_DEL_DEVICE)
+		notify = vga_arbiter_del_pci_device(pdev);
+
+	if (notify)
+		vga_arbiter_notify_clients();
+	return 0;
+}
+
+static struct notifier_block pci_notifier = {
+	.notifier_call = pci_notify,
+};
+
+static const struct file_operations vga_arb_device_fops = {
+	.read = vga_arb_read,
+	.write = vga_arb_write,
+	.poll = vga_arb_fpoll,
+	.open = vga_arb_open,
+	.release = vga_arb_release,
+	.llseek = noop_llseek,
+};
+
+static struct miscdevice vga_arb_device = {
+	MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
+};
+
+static int __init vga_arb_device_init(void)
+{
+	int rc;
+	struct pci_dev *pdev;
+	struct vga_device *vgadev;
+
+	rc = misc_register(&vga_arb_device);
+	if (rc < 0)
+		pr_err("vgaarb: error %d registering device\n", rc);
+
+	bus_register_notifier(&pci_bus_type, &pci_notifier);
+
+	/* We add all pci devices satisfying vga class in the arbiter by
+	 * default */
+	pdev = NULL;
+	while ((pdev =
+		pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+			       PCI_ANY_ID, pdev)) != NULL)
+		vga_arbiter_add_pci_device(pdev);
+
+	pr_info("vgaarb: loaded\n");
+
+	list_for_each_entry(vgadev, &vga_list, list) {
+		if (vgadev->bridge_has_one_vga)
+			pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev));
+		else
+			pr_info("vgaarb: no bridge control possible %s\n", pci_name(vgadev->pdev));
+	}
+	return rc;
+}
+subsys_initcall(vga_arb_device_init);